Lines Matching +full:t +full:- +full:head

1 // SPDX-License-Identifier: GPL-2.0
26 #define FBNIC_XMIT_CB(__skb) ((struct fbnic_xmit_cb *)((__skb)->cb))
30 unsigned long csr_base = (unsigned long)ring->doorbell; in fbnic_ring_csr_base()
32 csr_base &= ~(FBNIC_QUEUE_STRIDE * sizeof(u32) - 1); in fbnic_ring_csr_base()
52 * fbnic_ts40_to_ns() - convert descriptor timestamp to PHC time
70 s = u64_stats_fetch_begin(&fbn->time_seq); in fbnic_ts40_to_ns()
71 offset = READ_ONCE(fbn->time_offset); in fbnic_ts40_to_ns()
72 } while (u64_stats_fetch_retry(&fbn->time_seq, s)); in fbnic_ts40_to_ns()
74 high = READ_ONCE(fbn->time_high); in fbnic_ts40_to_ns()
79 /* Compare bits 32-39 between periodic reads and ts40, in fbnic_ts40_to_ns()
85 if (ts_top < (u8)high && (u8)high - ts_top > U8_MAX / 2) in fbnic_ts40_to_ns()
93 return (ring->head - ring->tail - 1) & ring->size_mask; in fbnic_desc_unused()
98 return (ring->tail - ring->head) & ring->size_mask; in fbnic_desc_used()
104 return netdev_get_tx_queue(dev, ring->q_idx); in txring_txq()
122 struct netdev_queue *dev_queue = txring_txq(skb->dev, ring); in fbnic_tx_sent_queue()
123 unsigned int bytecount = FBNIC_XMIT_CB(skb)->bytecount; in fbnic_tx_sent_queue()
162 if (!unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) in fbnic_tx_tstamp()
165 fbn = netdev_priv(skb->dev); in fbnic_tx_tstamp()
166 if (fbn->hwtstamp_config.tx_type == HWTSTAMP_TX_OFF) in fbnic_tx_tstamp()
169 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in fbnic_tx_tstamp()
170 FBNIC_XMIT_CB(skb)->flags |= FBNIC_XMIT_CB_TS; in fbnic_tx_tstamp()
171 FBNIC_XMIT_CB(skb)->hw_head = -1; in fbnic_tx_tstamp()
184 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) in fbnic_tx_offloads()
188 i3len = skb_checksum_start(skb) - skb_network_header(skb); in fbnic_tx_offloads()
191 skb->csum_offset / 2)); in fbnic_tx_offloads()
205 if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) in fbnic_rx_csum()
209 skb->ip_summed = CHECKSUM_UNNECESSARY; in fbnic_rx_csum()
213 skb->ip_summed = CHECKSUM_COMPLETE; in fbnic_rx_csum()
214 skb->csum = (__force __wsum)csum; in fbnic_rx_csum()
221 struct device *dev = skb->dev->dev.parent; in fbnic_tx_map()
222 unsigned int tail = ring->tail, first; in fbnic_tx_map()
228 ring->tx_buf[tail] = skb; in fbnic_tx_map()
231 tail &= ring->size_mask; in fbnic_tx_map()
235 data_len = skb->data_len; in fbnic_tx_map()
240 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); in fbnic_tx_map()
242 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in fbnic_tx_map()
243 twd = &ring->desc[tail]; in fbnic_tx_map()
254 tail &= ring->size_mask; in fbnic_tx_map()
260 data_len -= size; in fbnic_tx_map()
270 FBNIC_XMIT_CB(skb)->desc_count = ((twd - meta) + 1) & ring->size_mask; in fbnic_tx_map()
272 ring->tail = tail; in fbnic_tx_map()
278 fbnic_maybe_stop_tx(skb->dev, ring, FBNIC_MAX_SKB_DESC); in fbnic_tx_map()
286 writel(tail, ring->doorbell); in fbnic_tx_map()
292 netdev_err(skb->dev, "TX DMA map failed\n"); in fbnic_tx_map()
295 tail--; in fbnic_tx_map()
296 tail &= ring->size_mask; in fbnic_tx_map()
297 twd = &ring->desc[tail]; in fbnic_tx_map()
312 __le64 *meta = &ring->desc[ring->tail]; in fbnic_xmit_frame_ring()
321 * + 7 desc gap to keep tail from touching head in fbnic_xmit_frame_ring()
324 desc_needed = skb_shinfo(skb)->nr_frags + 10; in fbnic_xmit_frame_ring()
325 if (fbnic_maybe_stop_tx(skb->dev, ring, desc_needed)) in fbnic_xmit_frame_ring()
331 FBNIC_XMIT_CB(skb)->bytecount = skb->len; in fbnic_xmit_frame_ring()
332 FBNIC_XMIT_CB(skb)->desc_count = 0; in fbnic_xmit_frame_ring()
345 u64_stats_update_begin(&ring->stats.syncp); in fbnic_xmit_frame_ring()
346 ring->stats.dropped++; in fbnic_xmit_frame_ring()
347 u64_stats_update_end(&ring->stats.syncp); in fbnic_xmit_frame_ring()
354 unsigned int q_map = skb->queue_mapping; in fbnic_xmit_frame()
356 return fbnic_xmit_frame_ring(skb, fbn->tx[q_map]); in fbnic_xmit_frame()
365 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) in fbnic_features_check()
369 l3len = skb_checksum_start(skb) - skb_network_header(skb); in fbnic_features_check()
375 if ((l2len | l3len | skb->csum_offset) % 2 || in fbnic_features_check()
378 !FIELD_FIT(FBNIC_TWD_CSUM_OFFSET_MASK, skb->csum_offset / 2)) in fbnic_features_check()
389 unsigned int head = ring->head; in fbnic_clean_twq0() local
393 clean_desc = (hw_head - head) & ring->size_mask; in fbnic_clean_twq0()
396 struct sk_buff *skb = ring->tx_buf[head]; in fbnic_clean_twq0()
399 desc_cnt = FBNIC_XMIT_CB(skb)->desc_count; in fbnic_clean_twq0()
403 if (unlikely(FBNIC_XMIT_CB(skb)->flags & FBNIC_XMIT_CB_TS)) { in fbnic_clean_twq0()
404 FBNIC_XMIT_CB(skb)->hw_head = hw_head; in fbnic_clean_twq0()
410 ring->tx_buf[head] = NULL; in fbnic_clean_twq0()
412 clean_desc -= desc_cnt; in fbnic_clean_twq0()
414 while (!(ring->desc[head] & FBNIC_TWD_TYPE(AL))) { in fbnic_clean_twq0()
415 head++; in fbnic_clean_twq0()
416 head &= ring->size_mask; in fbnic_clean_twq0()
417 desc_cnt--; in fbnic_clean_twq0()
420 fbnic_unmap_single_twd(nv->dev, &ring->desc[head]); in fbnic_clean_twq0()
421 head++; in fbnic_clean_twq0()
422 head &= ring->size_mask; in fbnic_clean_twq0()
423 desc_cnt--; in fbnic_clean_twq0()
425 while (desc_cnt--) { in fbnic_clean_twq0()
426 fbnic_unmap_page_twd(nv->dev, &ring->desc[head]); in fbnic_clean_twq0()
427 head++; in fbnic_clean_twq0()
428 head &= ring->size_mask; in fbnic_clean_twq0()
431 total_bytes += FBNIC_XMIT_CB(skb)->bytecount; in fbnic_clean_twq0()
440 ring->head = head; in fbnic_clean_twq0()
442 txq = txring_txq(nv->napi.dev, ring); in fbnic_clean_twq0()
445 u64_stats_update_begin(&ring->stats.syncp); in fbnic_clean_twq0()
446 ring->stats.dropped += total_packets; in fbnic_clean_twq0()
447 ring->stats.ts_lost += ts_lost; in fbnic_clean_twq0()
448 u64_stats_update_end(&ring->stats.syncp); in fbnic_clean_twq0()
454 u64_stats_update_begin(&ring->stats.syncp); in fbnic_clean_twq0()
455 ring->stats.bytes += total_bytes; in fbnic_clean_twq0()
456 ring->stats.packets += total_packets; in fbnic_clean_twq0()
457 u64_stats_update_end(&ring->stats.syncp); in fbnic_clean_twq0()
471 int head; in fbnic_clean_tsq() local
474 head = (*ts_head < 0) ? ring->head : *ts_head; in fbnic_clean_tsq()
479 if (head == ring->tail) { in fbnic_clean_tsq()
481 netdev_err(nv->napi.dev, in fbnic_clean_tsq()
486 skb = ring->tx_buf[head]; in fbnic_clean_tsq()
487 desc_cnt = FBNIC_XMIT_CB(skb)->desc_count; in fbnic_clean_tsq()
489 head += desc_cnt; in fbnic_clean_tsq()
490 head &= ring->size_mask; in fbnic_clean_tsq()
491 } while (!(FBNIC_XMIT_CB(skb)->flags & FBNIC_XMIT_CB_TS)); in fbnic_clean_tsq()
493 fbn = netdev_priv(nv->napi.dev); in fbnic_clean_tsq()
499 *ts_head = head; in fbnic_clean_tsq()
501 FBNIC_XMIT_CB(skb)->flags &= ~FBNIC_XMIT_CB_TS; in fbnic_clean_tsq()
503 head = FBNIC_XMIT_CB(skb)->hw_head; in fbnic_clean_tsq()
504 if (head >= 0) in fbnic_clean_tsq()
505 *head0 = head; in fbnic_clean_tsq()
509 u64_stats_update_begin(&ring->stats.syncp); in fbnic_clean_tsq()
510 ring->stats.ts_packets++; in fbnic_clean_tsq()
511 u64_stats_update_end(&ring->stats.syncp); in fbnic_clean_tsq()
517 struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx]; in fbnic_page_pool_init()
520 rx_buf->pagecnt_bias = PAGECNT_BIAS_MAX; in fbnic_page_pool_init()
521 rx_buf->page = page; in fbnic_page_pool_init()
527 struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx]; in fbnic_page_pool_get()
529 rx_buf->pagecnt_bias--; in fbnic_page_pool_get()
531 return rx_buf->page; in fbnic_page_pool_get()
537 struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx]; in fbnic_page_pool_drain()
538 struct page *page = rx_buf->page; in fbnic_page_pool_drain()
540 if (!page_pool_unref_page(page, rx_buf->pagecnt_bias)) in fbnic_page_pool_drain()
541 page_pool_put_unrefed_page(nv->page_pool, page, -1, !!budget); in fbnic_page_pool_drain()
543 rx_buf->page = NULL; in fbnic_page_pool_drain()
550 fbnic_clean_twq0(nv, napi_budget, &qt->sub0, false, head0); in fbnic_clean_twq()
552 fbnic_clean_twq0(nv, napi_budget, &qt->sub0, false, ts_head); in fbnic_clean_twq()
559 struct fbnic_ring *cmpl = &qt->cmpl; in fbnic_clean_tcq()
560 s32 head0 = -1, ts_head = -1; in fbnic_clean_tcq()
562 u32 head = cmpl->head; in fbnic_clean_tcq() local
564 done = (head & (cmpl->size_mask + 1)) ? 0 : cpu_to_le64(FBNIC_TCD_DONE); in fbnic_clean_tcq()
565 raw_tcd = &cmpl->desc[head & cmpl->size_mask]; in fbnic_clean_tcq()
589 fbnic_clean_tsq(nv, &qt->sub0, tcd, &ts_head, &head0); in fbnic_clean_tcq()
596 head++; in fbnic_clean_tcq()
597 if (!(head & cmpl->size_mask)) { in fbnic_clean_tcq()
599 raw_tcd = &cmpl->desc[0]; in fbnic_clean_tcq()
603 /* Record the current head/tail of the queue */ in fbnic_clean_tcq()
604 if (cmpl->head != head) { in fbnic_clean_tcq()
605 cmpl->head = head; in fbnic_clean_tcq()
606 writel(head & cmpl->size_mask, cmpl->doorbell); in fbnic_clean_tcq()
616 unsigned int head = ring->head; in fbnic_clean_bdq() local
618 if (head == hw_head) in fbnic_clean_bdq()
622 fbnic_page_pool_drain(ring, head, nv, napi_budget); in fbnic_clean_bdq()
624 head++; in fbnic_clean_bdq()
625 head &= ring->size_mask; in fbnic_clean_bdq()
626 } while (head != hw_head); in fbnic_clean_bdq()
628 ring->head = head; in fbnic_clean_bdq()
633 __le64 *bdq_desc = &bdq->desc[id * FBNIC_BD_FRAG_COUNT]; in fbnic_bd_prep()
649 } while (--i); in fbnic_bd_prep()
655 unsigned int i = bdq->tail; in fbnic_fill_bdq()
663 page = page_pool_dev_alloc_pages(nv->page_pool); in fbnic_fill_bdq()
671 i &= bdq->size_mask; in fbnic_fill_bdq()
673 count--; in fbnic_fill_bdq()
676 if (bdq->tail != i) { in fbnic_fill_bdq()
677 bdq->tail = i; in fbnic_fill_bdq()
682 writel(i, bdq->doorbell); in fbnic_fill_bdq()
696 return pg_off - FBNIC_RX_HROOM; in fbnic_hdr_pg_start()
706 return ALIGN(pg_off, 128) - FBNIC_RX_HROOM; in fbnic_hdr_pg_end()
715 struct page *page = fbnic_page_pool_get(&qt->sub0, hdr_pg_idx); in fbnic_pkt_prepare()
721 WARN_ON_ONCE(pkt->buff.data_hard_start); in fbnic_pkt_prepare()
723 /* Short-cut the end calculation if we know page is fully consumed */ in fbnic_pkt_prepare()
728 headroom = hdr_pg_off - hdr_pg_start + FBNIC_RX_PAD; in fbnic_pkt_prepare()
729 frame_sz = hdr_pg_end - hdr_pg_start; in fbnic_pkt_prepare()
730 xdp_init_buff(&pkt->buff, frame_sz, NULL); in fbnic_pkt_prepare()
735 dma_sync_single_range_for_cpu(nv->dev, page_pool_get_dma_addr(page), in fbnic_pkt_prepare()
742 xdp_prepare_buff(&pkt->buff, hdr_start, headroom, in fbnic_pkt_prepare()
743 len - FBNIC_RX_PAD, true); in fbnic_pkt_prepare()
745 pkt->data_truesize = 0; in fbnic_pkt_prepare()
746 pkt->data_len = 0; in fbnic_pkt_prepare()
747 pkt->nr_frags = 0; in fbnic_pkt_prepare()
757 struct page *page = fbnic_page_pool_get(&qt->sub1, pg_idx); in fbnic_add_rx_frag()
762 FBNIC_BD_FRAG_SIZE - pg_off : ALIGN(len, 128); in fbnic_add_rx_frag()
768 dma_sync_single_range_for_cpu(nv->dev, page_pool_get_dma_addr(page), in fbnic_add_rx_frag()
772 shinfo = xdp_get_shared_info_from_buff(&pkt->buff); in fbnic_add_rx_frag()
775 pkt->data_truesize += truesize; in fbnic_add_rx_frag()
777 __skb_fill_page_desc_noacc(shinfo, pkt->nr_frags++, page, pg_off, len); in fbnic_add_rx_frag()
780 pkt->data_len += len; in fbnic_add_rx_frag()
790 if (!pkt->buff.data_hard_start) in fbnic_put_pkt_buff()
793 shinfo = xdp_get_shared_info_from_buff(&pkt->buff); in fbnic_put_pkt_buff()
794 nr_frags = pkt->nr_frags; in fbnic_put_pkt_buff()
796 while (nr_frags--) { in fbnic_put_pkt_buff()
797 page = skb_frag_page(&shinfo->frags[nr_frags]); in fbnic_put_pkt_buff()
798 page_pool_put_full_page(nv->page_pool, page, !!budget); in fbnic_put_pkt_buff()
801 page = virt_to_page(pkt->buff.data_hard_start); in fbnic_put_pkt_buff()
802 page_pool_put_full_page(nv->page_pool, page, !!budget); in fbnic_put_pkt_buff()
808 unsigned int nr_frags = pkt->nr_frags; in fbnic_build_skb()
813 truesize = xdp_data_hard_end(&pkt->buff) + FBNIC_RX_TROOM - in fbnic_build_skb()
814 pkt->buff.data_hard_start; in fbnic_build_skb()
817 skb = napi_build_skb(pkt->buff.data_hard_start, truesize); in fbnic_build_skb()
822 skb_reserve(skb, pkt->buff.data - pkt->buff.data_hard_start); in fbnic_build_skb()
823 __skb_put(skb, pkt->buff.data_end - pkt->buff.data); in fbnic_build_skb()
826 skb_metadata_set(skb, pkt->buff.data - pkt->buff.data_meta); in fbnic_build_skb()
830 /* Verify that shared info didn't move */ in fbnic_build_skb()
831 shinfo = xdp_get_shared_info_from_buff(&pkt->buff); in fbnic_build_skb()
834 skb->truesize += pkt->data_truesize; in fbnic_build_skb()
835 skb->data_len += pkt->data_len; in fbnic_build_skb()
836 shinfo->nr_frags = nr_frags; in fbnic_build_skb()
837 skb->len += pkt->data_len; in fbnic_build_skb()
843 skb->protocol = eth_type_trans(skb, nv->napi.dev); in fbnic_build_skb()
846 if (pkt->hwtstamp) in fbnic_build_skb()
847 skb_hwtstamps(skb)->hwtstamp = pkt->hwtstamp; in fbnic_build_skb()
868 fbn = netdev_priv(nv->napi.dev); in fbnic_rx_tstamp()
873 pkt->hwtstamp = ns_to_ktime(ns); in fbnic_rx_tstamp()
880 struct net_device *netdev = nv->napi.dev; in fbnic_populate_skb_fields()
881 struct fbnic_ring *rcq = &qt->cmpl; in fbnic_populate_skb_fields()
885 if (netdev->features & NETIF_F_RXHASH) in fbnic_populate_skb_fields()
890 skb_record_rx_queue(skb, rcq->q_idx); in fbnic_populate_skb_fields()
902 struct fbnic_ring *rcq = &qt->cmpl; in fbnic_clean_rcq()
904 s32 head0 = -1, head1 = -1; in fbnic_clean_rcq()
906 u32 head = rcq->head; in fbnic_clean_rcq() local
908 done = (head & (rcq->size_mask + 1)) ? cpu_to_le64(FBNIC_RCD_DONE) : 0; in fbnic_clean_rcq()
909 raw_rcd = &rcq->desc[head & rcq->size_mask]; in fbnic_clean_rcq()
910 pkt = rcq->pkt; in fbnic_clean_rcq()
914 struct sk_buff *skb = ERR_PTR(-EINVAL); in fbnic_clean_rcq()
953 bytes += skb->len; in fbnic_clean_rcq()
955 napi_gro_receive(&nv->napi, skb); in fbnic_clean_rcq()
961 pkt->buff.data_hard_start = NULL; in fbnic_clean_rcq()
967 head++; in fbnic_clean_rcq()
968 if (!(head & rcq->size_mask)) { in fbnic_clean_rcq()
970 raw_rcd = &rcq->desc[0]; in fbnic_clean_rcq()
974 u64_stats_update_begin(&rcq->stats.syncp); in fbnic_clean_rcq()
975 rcq->stats.packets += packets; in fbnic_clean_rcq()
976 rcq->stats.bytes += bytes; in fbnic_clean_rcq()
977 /* Re-add ethernet header length (removed in fbnic_build_skb) */ in fbnic_clean_rcq()
978 rcq->stats.bytes += ETH_HLEN * packets; in fbnic_clean_rcq()
979 rcq->stats.dropped += dropped; in fbnic_clean_rcq()
980 u64_stats_update_end(&rcq->stats.syncp); in fbnic_clean_rcq()
984 fbnic_clean_bdq(nv, budget, &qt->sub0, head0); in fbnic_clean_rcq()
985 fbnic_fill_bdq(nv, &qt->sub0); in fbnic_clean_rcq()
988 fbnic_clean_bdq(nv, budget, &qt->sub1, head1); in fbnic_clean_rcq()
989 fbnic_fill_bdq(nv, &qt->sub1); in fbnic_clean_rcq()
991 /* Record the current head/tail of the queue */ in fbnic_clean_rcq()
992 if (rcq->head != head) { in fbnic_clean_rcq()
993 rcq->head = head; in fbnic_clean_rcq()
994 writel(head & rcq->size_mask, rcq->doorbell); in fbnic_clean_rcq()
1002 struct fbnic_dev *fbd = nv->fbd; in fbnic_nv_irq_disable()
1003 u32 v_idx = nv->v_idx; in fbnic_nv_irq_disable()
1010 struct fbnic_dev *fbd = nv->fbd; in fbnic_nv_irq_rearm()
1011 u32 v_idx = nv->v_idx; in fbnic_nv_irq_rearm()
1024 for (i = 0; i < nv->txt_count; i++) in fbnic_poll()
1025 fbnic_clean_tcq(nv, &nv->qt[i], budget); in fbnic_poll()
1027 for (j = 0; j < nv->rxt_count; j++, i++) in fbnic_poll()
1028 work_done += fbnic_clean_rcq(nv, &nv->qt[i], budget); in fbnic_poll()
1043 napi_schedule_irqoff(&nv->napi); in fbnic_msix_clean_rings()
1051 struct fbnic_queue_stats *stats = &rxr->stats; in fbnic_aggregate_ring_rx_counters()
1054 fbn->rx_stats.bytes += stats->bytes; in fbnic_aggregate_ring_rx_counters()
1055 fbn->rx_stats.packets += stats->packets; in fbnic_aggregate_ring_rx_counters()
1056 fbn->rx_stats.dropped += stats->dropped; in fbnic_aggregate_ring_rx_counters()
1062 struct fbnic_queue_stats *stats = &txr->stats; in fbnic_aggregate_ring_tx_counters()
1065 fbn->tx_stats.bytes += stats->bytes; in fbnic_aggregate_ring_tx_counters()
1066 fbn->tx_stats.packets += stats->packets; in fbnic_aggregate_ring_tx_counters()
1067 fbn->tx_stats.dropped += stats->dropped; in fbnic_aggregate_ring_tx_counters()
1068 fbn->tx_stats.ts_lost += stats->ts_lost; in fbnic_aggregate_ring_tx_counters()
1069 fbn->tx_stats.ts_packets += stats->ts_packets; in fbnic_aggregate_ring_tx_counters()
1075 if (!(txr->flags & FBNIC_RING_F_STATS)) in fbnic_remove_tx_ring()
1081 WARN_ON(fbn->tx[txr->q_idx] && fbn->tx[txr->q_idx] != txr); in fbnic_remove_tx_ring()
1082 fbn->tx[txr->q_idx] = NULL; in fbnic_remove_tx_ring()
1088 if (!(rxr->flags & FBNIC_RING_F_STATS)) in fbnic_remove_rx_ring()
1094 WARN_ON(fbn->rx[rxr->q_idx] && fbn->rx[rxr->q_idx] != rxr); in fbnic_remove_rx_ring()
1095 fbn->rx[rxr->q_idx] = NULL; in fbnic_remove_rx_ring()
1101 struct fbnic_dev *fbd = nv->fbd; in fbnic_free_napi_vector()
1104 for (i = 0; i < nv->txt_count; i++) { in fbnic_free_napi_vector()
1105 fbnic_remove_tx_ring(fbn, &nv->qt[i].sub0); in fbnic_free_napi_vector()
1106 fbnic_remove_tx_ring(fbn, &nv->qt[i].cmpl); in fbnic_free_napi_vector()
1109 for (j = 0; j < nv->rxt_count; j++, i++) { in fbnic_free_napi_vector()
1110 fbnic_remove_rx_ring(fbn, &nv->qt[i].sub0); in fbnic_free_napi_vector()
1111 fbnic_remove_rx_ring(fbn, &nv->qt[i].sub1); in fbnic_free_napi_vector()
1112 fbnic_remove_rx_ring(fbn, &nv->qt[i].cmpl); in fbnic_free_napi_vector()
1116 page_pool_destroy(nv->page_pool); in fbnic_free_napi_vector()
1117 netif_napi_del(&nv->napi); in fbnic_free_napi_vector()
1118 fbn->napi[fbnic_napi_idx(nv)] = NULL; in fbnic_free_napi_vector()
1126 for (i = 0; i < fbn->num_napi; i++) in fbnic_free_napi_vectors()
1127 if (fbn->napi[i]) in fbnic_free_napi_vectors()
1128 fbnic_free_napi_vector(fbn, fbn->napi[i]); in fbnic_free_napi_vectors()
1140 .pool_size = (fbn->hpq_size + fbn->ppq_size) * nv->rxt_count, in fbnic_alloc_nv_page_pool()
1142 .dev = nv->dev, in fbnic_alloc_nv_page_pool()
1149 /* Page pool cannot exceed a size of 32768. This doesn't limit the in fbnic_alloc_nv_page_pool()
1165 nv->page_pool = pp; in fbnic_alloc_nv_page_pool()
1173 u64_stats_init(&ring->stats.syncp); in fbnic_ring_init()
1174 ring->doorbell = doorbell; in fbnic_ring_init()
1175 ring->q_idx = q_idx; in fbnic_ring_init()
1176 ring->flags = flags; in fbnic_ring_init()
1185 u32 __iomem *uc_addr = fbd->uc_addr0; in fbnic_alloc_napi_vector()
1193 return -EINVAL; in fbnic_alloc_napi_vector()
1197 return -EIO; in fbnic_alloc_napi_vector()
1202 return -ENOMEM; in fbnic_alloc_napi_vector()
1205 nv->txt_count = txt_count; in fbnic_alloc_napi_vector()
1206 nv->rxt_count = rxt_count; in fbnic_alloc_napi_vector()
1208 /* Provide pointer back to fbnic and MSI-X vectors */ in fbnic_alloc_napi_vector()
1209 nv->fbd = fbd; in fbnic_alloc_napi_vector()
1210 nv->v_idx = v_idx; in fbnic_alloc_napi_vector()
1213 fbn->napi[fbnic_napi_idx(nv)] = nv; in fbnic_alloc_napi_vector()
1214 netif_napi_add(fbn->netdev, &nv->napi, fbnic_poll); in fbnic_alloc_napi_vector()
1217 netif_napi_set_irq(&nv->napi, in fbnic_alloc_napi_vector()
1218 pci_irq_vector(to_pci_dev(fbd->dev), nv->v_idx)); in fbnic_alloc_napi_vector()
1221 nv->dev = fbd->dev; in fbnic_alloc_napi_vector()
1236 qt = nv->qt; in fbnic_alloc_napi_vector()
1246 fbnic_ring_init(&qt->sub0, db, txq_idx, flags); in fbnic_alloc_napi_vector()
1247 fbn->tx[txq_idx] = &qt->sub0; in fbnic_alloc_napi_vector()
1248 txq_count--; in fbnic_alloc_napi_vector()
1250 fbnic_ring_init(&qt->sub0, db, 0, in fbnic_alloc_napi_vector()
1256 fbnic_ring_init(&qt->cmpl, db, 0, 0); in fbnic_alloc_napi_vector()
1259 txt_count--; in fbnic_alloc_napi_vector()
1269 fbnic_ring_init(&qt->sub0, db, 0, FBNIC_RING_F_CTX); in fbnic_alloc_napi_vector()
1273 fbnic_ring_init(&qt->sub1, db, 0, FBNIC_RING_F_CTX); in fbnic_alloc_napi_vector()
1277 fbnic_ring_init(&qt->cmpl, db, rxq_idx, FBNIC_RING_F_STATS); in fbnic_alloc_napi_vector()
1278 fbn->rx[rxq_idx] = &qt->cmpl; in fbnic_alloc_napi_vector()
1281 rxt_count--; in fbnic_alloc_napi_vector()
1291 page_pool_destroy(nv->page_pool); in fbnic_alloc_napi_vector()
1293 netif_napi_del(&nv->napi); in fbnic_alloc_napi_vector()
1294 fbn->napi[fbnic_napi_idx(nv)] = NULL; in fbnic_alloc_napi_vector()
1302 unsigned int num_tx = fbn->num_tx_queues; in fbnic_alloc_napi_vectors()
1303 unsigned int num_rx = fbn->num_rx_queues; in fbnic_alloc_napi_vectors()
1304 unsigned int num_napi = fbn->num_napi; in fbnic_alloc_napi_vectors()
1305 struct fbnic_dev *fbd = fbn->fbd; in fbnic_alloc_napi_vectors()
1318 num_tx--; in fbnic_alloc_napi_vectors()
1327 int tqpv = DIV_ROUND_UP(num_tx, num_napi - txq_idx); in fbnic_alloc_napi_vectors()
1328 int rqpv = DIV_ROUND_UP(num_rx, num_napi - rxq_idx); in fbnic_alloc_napi_vectors()
1336 num_tx -= tqpv; in fbnic_alloc_napi_vectors()
1339 num_rx -= rqpv; in fbnic_alloc_napi_vectors()
1350 return -ENOMEM; in fbnic_alloc_napi_vectors()
1356 kvfree(ring->buffer); in fbnic_free_ring_resources()
1357 ring->buffer = NULL; in fbnic_free_ring_resources()
1360 if (!ring->size) in fbnic_free_ring_resources()
1363 dma_free_coherent(dev, ring->size, ring->desc, ring->dma); in fbnic_free_ring_resources()
1364 ring->size_mask = 0; in fbnic_free_ring_resources()
1365 ring->size = 0; in fbnic_free_ring_resources()
1371 struct device *dev = fbn->netdev->dev.parent; in fbnic_alloc_tx_ring_desc()
1375 size = ALIGN(array_size(sizeof(*txr->desc), fbn->txq_size), 4096); in fbnic_alloc_tx_ring_desc()
1377 txr->desc = dma_alloc_coherent(dev, size, &txr->dma, in fbnic_alloc_tx_ring_desc()
1379 if (!txr->desc) in fbnic_alloc_tx_ring_desc()
1380 return -ENOMEM; in fbnic_alloc_tx_ring_desc()
1382 /* txq_size should be a power of 2, so mask is just that -1 */ in fbnic_alloc_tx_ring_desc()
1383 txr->size_mask = fbn->txq_size - 1; in fbnic_alloc_tx_ring_desc()
1384 txr->size = size; in fbnic_alloc_tx_ring_desc()
1391 size_t size = array_size(sizeof(*txr->tx_buf), txr->size_mask + 1); in fbnic_alloc_tx_ring_buffer()
1393 txr->tx_buf = kvzalloc(size, GFP_KERNEL | __GFP_NOWARN); in fbnic_alloc_tx_ring_buffer()
1395 return txr->tx_buf ? 0 : -ENOMEM; in fbnic_alloc_tx_ring_buffer()
1401 struct device *dev = fbn->netdev->dev.parent; in fbnic_alloc_tx_ring_resources()
1404 if (txr->flags & FBNIC_RING_F_DISABLED) in fbnic_alloc_tx_ring_resources()
1411 if (!(txr->flags & FBNIC_RING_F_CTX)) in fbnic_alloc_tx_ring_resources()
1428 struct device *dev = fbn->netdev->dev.parent; in fbnic_alloc_rx_ring_desc()
1429 size_t desc_size = sizeof(*rxr->desc); in fbnic_alloc_rx_ring_desc()
1433 switch (rxr->doorbell - fbnic_ring_csr_base(rxr)) { in fbnic_alloc_rx_ring_desc()
1435 rxq_size = fbn->hpq_size / FBNIC_BD_FRAG_COUNT; in fbnic_alloc_rx_ring_desc()
1439 rxq_size = fbn->ppq_size / FBNIC_BD_FRAG_COUNT; in fbnic_alloc_rx_ring_desc()
1443 rxq_size = fbn->rcq_size; in fbnic_alloc_rx_ring_desc()
1446 return -EINVAL; in fbnic_alloc_rx_ring_desc()
1452 rxr->desc = dma_alloc_coherent(dev, size, &rxr->dma, in fbnic_alloc_rx_ring_desc()
1454 if (!rxr->desc) in fbnic_alloc_rx_ring_desc()
1455 return -ENOMEM; in fbnic_alloc_rx_ring_desc()
1457 /* rxq_size should be a power of 2, so mask is just that -1 */ in fbnic_alloc_rx_ring_desc()
1458 rxr->size_mask = rxq_size - 1; in fbnic_alloc_rx_ring_desc()
1459 rxr->size = size; in fbnic_alloc_rx_ring_desc()
1466 size_t size = array_size(sizeof(*rxr->rx_buf), rxr->size_mask + 1); in fbnic_alloc_rx_ring_buffer()
1468 if (rxr->flags & FBNIC_RING_F_CTX) in fbnic_alloc_rx_ring_buffer()
1469 size = sizeof(*rxr->rx_buf) * (rxr->size_mask + 1); in fbnic_alloc_rx_ring_buffer()
1471 size = sizeof(*rxr->pkt); in fbnic_alloc_rx_ring_buffer()
1473 rxr->rx_buf = kvzalloc(size, GFP_KERNEL | __GFP_NOWARN); in fbnic_alloc_rx_ring_buffer()
1475 return rxr->rx_buf ? 0 : -ENOMEM; in fbnic_alloc_rx_ring_buffer()
1481 struct device *dev = fbn->netdev->dev.parent; in fbnic_alloc_rx_ring_resources()
1502 struct device *dev = fbn->netdev->dev.parent; in fbnic_free_qt_resources()
1504 fbnic_free_ring_resources(dev, &qt->cmpl); in fbnic_free_qt_resources()
1505 fbnic_free_ring_resources(dev, &qt->sub1); in fbnic_free_qt_resources()
1506 fbnic_free_ring_resources(dev, &qt->sub0); in fbnic_free_qt_resources()
1512 struct device *dev = fbn->netdev->dev.parent; in fbnic_alloc_tx_qt_resources()
1515 err = fbnic_alloc_tx_ring_resources(fbn, &qt->sub0); in fbnic_alloc_tx_qt_resources()
1519 err = fbnic_alloc_tx_ring_resources(fbn, &qt->cmpl); in fbnic_alloc_tx_qt_resources()
1526 fbnic_free_ring_resources(dev, &qt->sub0); in fbnic_alloc_tx_qt_resources()
1533 struct device *dev = fbn->netdev->dev.parent; in fbnic_alloc_rx_qt_resources()
1536 err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub0); in fbnic_alloc_rx_qt_resources()
1540 err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub1); in fbnic_alloc_rx_qt_resources()
1544 err = fbnic_alloc_rx_ring_resources(fbn, &qt->cmpl); in fbnic_alloc_rx_qt_resources()
1551 fbnic_free_ring_resources(dev, &qt->sub1); in fbnic_alloc_rx_qt_resources()
1553 fbnic_free_ring_resources(dev, &qt->sub0); in fbnic_alloc_rx_qt_resources()
1563 for (i = 0; i < nv->txt_count; i++) in fbnic_free_nv_resources()
1564 fbnic_free_qt_resources(fbn, &nv->qt[i]); in fbnic_free_nv_resources()
1566 for (j = 0; j < nv->rxt_count; j++, i++) in fbnic_free_nv_resources()
1567 fbnic_free_qt_resources(fbn, &nv->qt[i]); in fbnic_free_nv_resources()
1576 for (i = 0; i < nv->txt_count; i++) { in fbnic_alloc_nv_resources()
1577 err = fbnic_alloc_tx_qt_resources(fbn, &nv->qt[i]); in fbnic_alloc_nv_resources()
1583 for (j = 0; j < nv->rxt_count; j++, i++) { in fbnic_alloc_nv_resources()
1584 err = fbnic_alloc_rx_qt_resources(fbn, &nv->qt[i]); in fbnic_alloc_nv_resources()
1592 while (i--) in fbnic_alloc_nv_resources()
1593 fbnic_free_qt_resources(fbn, &nv->qt[i]); in fbnic_alloc_nv_resources()
1601 for (i = 0; i < fbn->num_napi; i++) in fbnic_free_resources()
1602 fbnic_free_nv_resources(fbn, fbn->napi[i]); in fbnic_free_resources()
1607 int i, err = -ENODEV; in fbnic_alloc_resources()
1609 for (i = 0; i < fbn->num_napi; i++) { in fbnic_alloc_resources()
1610 err = fbnic_alloc_nv_resources(fbn, fbn->napi[i]); in fbnic_alloc_resources()
1618 while (i--) in fbnic_alloc_resources()
1619 fbnic_free_nv_resources(fbn, fbn->napi[i]); in fbnic_alloc_resources()
1629 for (i = 0; i < nv->txt_count; i++) { in fbnic_set_netif_napi()
1630 struct fbnic_q_triad *qt = &nv->qt[i]; in fbnic_set_netif_napi()
1632 netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx, in fbnic_set_netif_napi()
1633 NETDEV_QUEUE_TYPE_TX, &nv->napi); in fbnic_set_netif_napi()
1637 for (j = 0; j < nv->rxt_count; j++, i++) { in fbnic_set_netif_napi()
1638 struct fbnic_q_triad *qt = &nv->qt[i]; in fbnic_set_netif_napi()
1640 netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx, in fbnic_set_netif_napi()
1641 NETDEV_QUEUE_TYPE_RX, &nv->napi); in fbnic_set_netif_napi()
1650 for (i = 0; i < nv->txt_count; i++) { in fbnic_reset_netif_napi()
1651 struct fbnic_q_triad *qt = &nv->qt[i]; in fbnic_reset_netif_napi()
1653 netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx, in fbnic_reset_netif_napi()
1658 for (j = 0; j < nv->rxt_count; j++, i++) { in fbnic_reset_netif_napi()
1659 struct fbnic_q_triad *qt = &nv->qt[i]; in fbnic_reset_netif_napi()
1661 netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx, in fbnic_reset_netif_napi()
1670 err = netif_set_real_num_queues(fbn->netdev, fbn->num_tx_queues, in fbnic_set_netif_queues()
1671 fbn->num_rx_queues); in fbnic_set_netif_queues()
1675 for (i = 0; i < fbn->num_napi; i++) in fbnic_set_netif_queues()
1676 fbnic_set_netif_napi(fbn->napi[i]); in fbnic_set_netif_queues()
1685 for (i = 0; i < fbn->num_napi; i++) in fbnic_reset_netif_queues()
1686 fbnic_reset_netif_napi(fbn->napi[i]); in fbnic_reset_netif_queues()
1723 for (i = 0; i < fbn->num_napi; i++) { in fbnic_napi_disable()
1724 napi_disable(&fbn->napi[i]->napi); in fbnic_napi_disable()
1726 fbnic_nv_irq_disable(fbn->napi[i]); in fbnic_napi_disable()
1732 struct fbnic_dev *fbd = fbn->fbd; in fbnic_disable()
1733 int i, j, t; in fbnic_disable() local
1735 for (i = 0; i < fbn->num_napi; i++) { in fbnic_disable()
1736 struct fbnic_napi_vector *nv = fbn->napi[i]; in fbnic_disable()
1739 for (t = 0; t < nv->txt_count; t++) { in fbnic_disable()
1740 struct fbnic_q_triad *qt = &nv->qt[t]; in fbnic_disable()
1742 fbnic_disable_twq0(&qt->sub0); in fbnic_disable()
1743 fbnic_disable_tcq(&qt->cmpl); in fbnic_disable()
1747 for (j = 0; j < nv->rxt_count; j++, t++) { in fbnic_disable()
1748 struct fbnic_q_triad *qt = &nv->qt[t]; in fbnic_disable()
1750 fbnic_disable_bdq(&qt->sub0, &qt->sub1); in fbnic_disable()
1751 fbnic_disable_rcq(&qt->cmpl); in fbnic_disable()
1760 netdev_warn(fbd->netdev, "triggering Tx flush\n"); in fbnic_tx_flush()
1797 netdev_err(fbd->netdev, "error waiting for %s idle %d\n", dir, err); in fbnic_idle_dump()
1800 netdev_err(fbd->netdev, "0x%04x: %08x\n", in fbnic_idle_dump()
1822 if (err == -ETIMEDOUT) { in fbnic_wait_all_queues_idle()
1846 for (i = 0; i < fbn->num_napi; i++) { in fbnic_flush()
1847 struct fbnic_napi_vector *nv = fbn->napi[i]; in fbnic_flush()
1848 int j, t; in fbnic_flush() local
1851 for (t = 0; t < nv->txt_count; t++) { in fbnic_flush()
1852 struct fbnic_q_triad *qt = &nv->qt[t]; in fbnic_flush()
1856 fbnic_clean_twq0(nv, 0, &qt->sub0, true, qt->sub0.tail); in fbnic_flush()
1859 memset(qt->cmpl.desc, 0, qt->cmpl.size); in fbnic_flush()
1862 if (qt->sub0.flags & FBNIC_RING_F_DISABLED) in fbnic_flush()
1866 tx_queue = netdev_get_tx_queue(nv->napi.dev, in fbnic_flush()
1867 qt->sub0.q_idx); in fbnic_flush()
1872 for (j = 0; j < nv->rxt_count; j++, t++) { in fbnic_flush()
1873 struct fbnic_q_triad *qt = &nv->qt[t]; in fbnic_flush()
1876 fbnic_clean_bdq(nv, 0, &qt->sub0, qt->sub0.tail); in fbnic_flush()
1877 fbnic_clean_bdq(nv, 0, &qt->sub1, qt->sub1.tail); in fbnic_flush()
1880 memset(qt->cmpl.desc, 0, qt->cmpl.size); in fbnic_flush()
1882 fbnic_put_pkt_buff(nv, qt->cmpl.pkt, 0); in fbnic_flush()
1883 qt->cmpl.pkt->buff.data_hard_start = NULL; in fbnic_flush()
1892 for (i = 0; i < fbn->num_napi; i++) { in fbnic_fill()
1893 struct fbnic_napi_vector *nv = fbn->napi[i]; in fbnic_fill()
1894 int j, t; in fbnic_fill() local
1899 for (j = 0, t = nv->txt_count; j < nv->rxt_count; j++, t++) { in fbnic_fill()
1900 struct fbnic_q_triad *qt = &nv->qt[t]; in fbnic_fill()
1903 fbnic_fill_bdq(nv, &qt->sub0); in fbnic_fill()
1904 fbnic_fill_bdq(nv, &qt->sub1); in fbnic_fill()
1911 u32 log_size = fls(twq->size_mask); in fbnic_enable_twq0()
1913 if (!twq->size_mask) in fbnic_enable_twq0()
1916 /* Reset head/tail */ in fbnic_enable_twq0()
1918 twq->tail = 0; in fbnic_enable_twq0()
1919 twq->head = 0; in fbnic_enable_twq0()
1922 fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_BAL, lower_32_bits(twq->dma)); in fbnic_enable_twq0()
1923 fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_BAH, upper_32_bits(twq->dma)); in fbnic_enable_twq0()
1934 u32 log_size = fls(tcq->size_mask); in fbnic_enable_tcq()
1936 if (!tcq->size_mask) in fbnic_enable_tcq()
1939 /* Reset head/tail */ in fbnic_enable_tcq()
1941 tcq->tail = 0; in fbnic_enable_tcq()
1942 tcq->head = 0; in fbnic_enable_tcq()
1945 fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_BAL, lower_32_bits(tcq->dma)); in fbnic_enable_tcq()
1946 fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_BAH, upper_32_bits(tcq->dma)); in fbnic_enable_tcq()
1952 fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_CTL, nv->v_idx); in fbnic_enable_tcq()
1953 fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_THRESHOLD, tcq->size_mask / 2); in fbnic_enable_tcq()
1965 /* Reset head/tail */ in fbnic_enable_bdq()
1967 ppq->tail = 0; in fbnic_enable_bdq()
1968 ppq->head = 0; in fbnic_enable_bdq()
1969 hpq->tail = 0; in fbnic_enable_bdq()
1970 hpq->head = 0; in fbnic_enable_bdq()
1972 log_size = fls(hpq->size_mask); in fbnic_enable_bdq()
1975 fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_BAL, lower_32_bits(hpq->dma)); in fbnic_enable_bdq()
1976 fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_BAH, upper_32_bits(hpq->dma)); in fbnic_enable_bdq()
1981 if (!ppq->size_mask) in fbnic_enable_bdq()
1984 log_size = fls(ppq->size_mask); in fbnic_enable_bdq()
1990 fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_BAL, lower_32_bits(ppq->dma)); in fbnic_enable_bdq()
1991 fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_BAH, upper_32_bits(ppq->dma)); in fbnic_enable_bdq()
2016 u32 log_size = fls(rcq->size_mask); in fbnic_enable_rcq()
2030 /* Reset head/tail */ in fbnic_enable_rcq()
2032 rcq->head = 0; in fbnic_enable_rcq()
2033 rcq->tail = 0; in fbnic_enable_rcq()
2036 fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_BAL, lower_32_bits(rcq->dma)); in fbnic_enable_rcq()
2037 fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_BAH, upper_32_bits(rcq->dma)); in fbnic_enable_rcq()
2043 fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_CTL, nv->v_idx); in fbnic_enable_rcq()
2044 fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_THRESHOLD, rcq->size_mask / 2); in fbnic_enable_rcq()
2053 struct fbnic_dev *fbd = fbn->fbd; in fbnic_enable()
2056 for (i = 0; i < fbn->num_napi; i++) { in fbnic_enable()
2057 struct fbnic_napi_vector *nv = fbn->napi[i]; in fbnic_enable()
2058 int j, t; in fbnic_enable() local
2061 for (t = 0; t < nv->txt_count; t++) { in fbnic_enable()
2062 struct fbnic_q_triad *qt = &nv->qt[t]; in fbnic_enable()
2064 fbnic_enable_twq0(&qt->sub0); in fbnic_enable()
2065 fbnic_enable_tcq(nv, &qt->cmpl); in fbnic_enable()
2069 for (j = 0; j < nv->rxt_count; j++, t++) { in fbnic_enable()
2070 struct fbnic_q_triad *qt = &nv->qt[t]; in fbnic_enable()
2072 fbnic_enable_bdq(&qt->sub0, &qt->sub1); in fbnic_enable()
2073 fbnic_config_drop_mode_rcq(nv, &qt->cmpl); in fbnic_enable()
2074 fbnic_enable_rcq(nv, &qt->cmpl); in fbnic_enable()
2083 struct fbnic_dev *fbd = nv->fbd; in fbnic_nv_irq_enable()
2088 fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(nv->v_idx), val); in fbnic_nv_irq_enable()
2094 struct fbnic_dev *fbd = fbn->fbd; in fbnic_napi_enable()
2097 for (i = 0; i < fbn->num_napi; i++) { in fbnic_napi_enable()
2098 struct fbnic_napi_vector *nv = fbn->napi[i]; in fbnic_napi_enable()
2100 napi_enable(&nv->napi); in fbnic_napi_enable()
2107 irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32); in fbnic_napi_enable()
2127 struct fbnic_dev *fbd = fbn->fbd; in fbnic_napi_depletion_check()
2128 int i, j, t; in fbnic_napi_depletion_check() local
2130 for (i = 0; i < fbn->num_napi; i++) { in fbnic_napi_depletion_check()
2131 struct fbnic_napi_vector *nv = fbn->napi[i]; in fbnic_napi_depletion_check()
2134 for (t = nv->txt_count, j = 0; j < nv->rxt_count; j++, t++) { in fbnic_napi_depletion_check()
2138 if (fbnic_desc_used(&nv->qt[t].sub0) < 4 || in fbnic_napi_depletion_check()
2139 fbnic_desc_used(&nv->qt[t].sub1) < 4) in fbnic_napi_depletion_check()
2140 irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32); in fbnic_napi_depletion_check()