Lines Matching +full:disable +full:- +full:eop

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2024 Intel Corporation. */
5 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
42 "Copyright (c) 2009 - 2024 Intel Corporation.";
58 /* ixgbevf_pci_tbl - PCI Device ID Table
88 static int debug = -1;
96 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && in ixgbevf_service_event_schedule()
97 !test_bit(__IXGBEVF_REMOVING, &adapter->state) && in ixgbevf_service_event_schedule()
98 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state)) in ixgbevf_service_event_schedule()
99 queue_work(ixgbevf_wq, &adapter->service_task); in ixgbevf_service_event_schedule()
104 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state)); in ixgbevf_service_event_complete()
108 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state); in ixgbevf_service_event_complete()
121 struct ixgbevf_adapter *adapter = hw->back; in ixgbevf_remove_adapter()
123 if (!hw->hw_addr) in ixgbevf_remove_adapter()
125 hw->hw_addr = NULL; in ixgbevf_remove_adapter()
126 dev_err(&adapter->pdev->dev, "Adapter removed\n"); in ixgbevf_remove_adapter()
127 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state)) in ixgbevf_remove_adapter()
152 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); in ixgbevf_read_reg()
164 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
166 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
174 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_set_ivar()
176 if (direction == -1) { in ixgbevf_set_ivar()
196 return ring->stats.packets; in ixgbevf_get_tx_completed()
201 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev); in ixgbevf_get_tx_pending()
202 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_get_tx_pending()
204 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx)); in ixgbevf_get_tx_pending()
205 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx)); in ixgbevf_get_tx_pending()
209 tail - head : (tail + ring->count - head); in ixgbevf_get_tx_pending()
217 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; in ixgbevf_check_tx_hang()
230 &tx_ring->state); in ixgbevf_check_tx_hang()
233 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state); in ixgbevf_check_tx_hang()
236 tx_ring->tx_stats.tx_done_old = tx_done; in ixgbevf_check_tx_hang()
244 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { in ixgbevf_tx_timeout_reset()
245 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state); in ixgbevf_tx_timeout_reset()
251 * ixgbevf_tx_timeout - Respond to a Tx Hang
263 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
271 struct ixgbevf_adapter *adapter = q_vector->adapter; in ixgbevf_clean_tx_irq()
275 unsigned int budget = tx_ring->count / 2; in ixgbevf_clean_tx_irq()
276 unsigned int i = tx_ring->next_to_clean; in ixgbevf_clean_tx_irq()
278 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) in ixgbevf_clean_tx_irq()
281 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbevf_clean_tx_irq()
283 i -= tx_ring->count; in ixgbevf_clean_tx_irq()
286 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; in ixgbevf_clean_tx_irq()
296 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) in ixgbevf_clean_tx_irq()
300 tx_buffer->next_to_watch = NULL; in ixgbevf_clean_tx_irq()
303 total_bytes += tx_buffer->bytecount; in ixgbevf_clean_tx_irq()
304 total_packets += tx_buffer->gso_segs; in ixgbevf_clean_tx_irq()
305 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC) in ixgbevf_clean_tx_irq()
310 page_frag_free(tx_buffer->data); in ixgbevf_clean_tx_irq()
312 napi_consume_skb(tx_buffer->skb, napi_budget); in ixgbevf_clean_tx_irq()
315 dma_unmap_single(tx_ring->dev, in ixgbevf_clean_tx_irq()
329 i -= tx_ring->count; in ixgbevf_clean_tx_irq()
330 tx_buffer = tx_ring->tx_buffer_info; in ixgbevf_clean_tx_irq()
336 dma_unmap_page(tx_ring->dev, in ixgbevf_clean_tx_irq()
349 i -= tx_ring->count; in ixgbevf_clean_tx_irq()
350 tx_buffer = tx_ring->tx_buffer_info; in ixgbevf_clean_tx_irq()
358 budget--; in ixgbevf_clean_tx_irq()
361 i += tx_ring->count; in ixgbevf_clean_tx_irq()
362 tx_ring->next_to_clean = i; in ixgbevf_clean_tx_irq()
363 u64_stats_update_begin(&tx_ring->syncp); in ixgbevf_clean_tx_irq()
364 tx_ring->stats.bytes += total_bytes; in ixgbevf_clean_tx_irq()
365 tx_ring->stats.packets += total_packets; in ixgbevf_clean_tx_irq()
366 u64_stats_update_end(&tx_ring->syncp); in ixgbevf_clean_tx_irq()
367 q_vector->tx.total_bytes += total_bytes; in ixgbevf_clean_tx_irq()
368 q_vector->tx.total_packets += total_packets; in ixgbevf_clean_tx_irq()
369 adapter->tx_ipsec += total_ipsec; in ixgbevf_clean_tx_irq()
372 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_clean_tx_irq()
375 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch; in ixgbevf_clean_tx_irq()
384 " eop_desc->wb.status <%x>\n" in ixgbevf_clean_tx_irq()
388 tx_ring->queue_index, in ixgbevf_clean_tx_irq()
389 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)), in ixgbevf_clean_tx_irq()
390 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)), in ixgbevf_clean_tx_irq()
391 tx_ring->next_to_use, i, in ixgbevf_clean_tx_irq()
392 eop_desc, (eop_desc ? eop_desc->wb.status : 0), in ixgbevf_clean_tx_irq()
393 tx_ring->tx_buffer_info[i].time_stamp, jiffies); in ixgbevf_clean_tx_irq()
396 netif_stop_subqueue(tx_ring->netdev, in ixgbevf_clean_tx_irq()
397 tx_ring->queue_index); in ixgbevf_clean_tx_irq()
409 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in ixgbevf_clean_tx_irq()
416 if (__netif_subqueue_stopped(tx_ring->netdev, in ixgbevf_clean_tx_irq()
417 tx_ring->queue_index) && in ixgbevf_clean_tx_irq()
418 !test_bit(__IXGBEVF_DOWN, &adapter->state)) { in ixgbevf_clean_tx_irq()
419 netif_wake_subqueue(tx_ring->netdev, in ixgbevf_clean_tx_irq()
420 tx_ring->queue_index); in ixgbevf_clean_tx_irq()
421 ++tx_ring->tx_stats.restart_queue; in ixgbevf_clean_tx_irq()
429 * ixgbevf_rx_skb - Helper function to determine proper Rx method
436 napi_gro_receive(&q_vector->napi, skb); in ixgbevf_rx_skb()
451 if (!(ring->netdev->features & NETIF_F_RXHASH)) in ixgbevf_rx_hash()
454 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & in ixgbevf_rx_hash()
460 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), in ixgbevf_rx_hash()
466 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
478 if (!(ring->netdev->features & NETIF_F_RXCSUM)) in ixgbevf_rx_checksum()
484 ring->rx_stats.csum_err++; in ixgbevf_rx_checksum()
492 ring->rx_stats.csum_err++; in ixgbevf_rx_checksum()
497 skb->ip_summed = CHECKSUM_UNNECESSARY; in ixgbevf_rx_checksum()
501 * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
503 * @rx_desc: pointer to the EOP Rx descriptor
518 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); in ixgbevf_process_skb_fields()
519 unsigned long *active_vlans = netdev_priv(rx_ring->netdev); in ixgbevf_process_skb_fields()
528 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ixgbevf_process_skb_fields()
537 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in ixgbevf_get_rx_buffer()
538 prefetchw(rx_buffer->page); in ixgbevf_get_rx_buffer()
541 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbevf_get_rx_buffer()
542 rx_buffer->dma, in ixgbevf_get_rx_buffer()
543 rx_buffer->page_offset, in ixgbevf_get_rx_buffer()
547 rx_buffer->pagecnt_bias--; in ixgbevf_get_rx_buffer()
564 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in ixgbevf_put_rx_buffer()
568 __page_frag_cache_drain(rx_buffer->page, in ixgbevf_put_rx_buffer()
569 rx_buffer->pagecnt_bias); in ixgbevf_put_rx_buffer()
573 rx_buffer->page = NULL; in ixgbevf_put_rx_buffer()
577 * ixgbevf_is_non_eop - process handling of non-EOP buffers
581 * This function updates next to clean. If the buffer is an EOP buffer
584 * that this is in fact a non-EOP buffer.
589 u32 ntc = rx_ring->next_to_clean + 1; in ixgbevf_is_non_eop()
592 ntc = (ntc < rx_ring->count) ? ntc : 0; in ixgbevf_is_non_eop()
593 rx_ring->next_to_clean = ntc; in ixgbevf_is_non_eop()
611 struct page *page = bi->page; in ixgbevf_alloc_mapped_page()
621 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbevf_alloc_mapped_page()
626 dma = dma_map_page_attrs(rx_ring->dev, page, 0, in ixgbevf_alloc_mapped_page()
633 if (dma_mapping_error(rx_ring->dev, dma)) { in ixgbevf_alloc_mapped_page()
636 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbevf_alloc_mapped_page()
640 bi->dma = dma; in ixgbevf_alloc_mapped_page()
641 bi->page = page; in ixgbevf_alloc_mapped_page()
642 bi->page_offset = ixgbevf_rx_offset(rx_ring); in ixgbevf_alloc_mapped_page()
643 bi->pagecnt_bias = 1; in ixgbevf_alloc_mapped_page()
644 rx_ring->rx_stats.alloc_rx_page++; in ixgbevf_alloc_mapped_page()
650 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
659 unsigned int i = rx_ring->next_to_use; in ixgbevf_alloc_rx_buffers()
662 if (!cleaned_count || !rx_ring->netdev) in ixgbevf_alloc_rx_buffers()
666 bi = &rx_ring->rx_buffer_info[i]; in ixgbevf_alloc_rx_buffers()
667 i -= rx_ring->count; in ixgbevf_alloc_rx_buffers()
674 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in ixgbevf_alloc_rx_buffers()
675 bi->page_offset, in ixgbevf_alloc_rx_buffers()
680 * because each write-back erases this info. in ixgbevf_alloc_rx_buffers()
682 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in ixgbevf_alloc_rx_buffers()
689 bi = rx_ring->rx_buffer_info; in ixgbevf_alloc_rx_buffers()
690 i -= rx_ring->count; in ixgbevf_alloc_rx_buffers()
694 rx_desc->wb.upper.length = 0; in ixgbevf_alloc_rx_buffers()
696 cleaned_count--; in ixgbevf_alloc_rx_buffers()
699 i += rx_ring->count; in ixgbevf_alloc_rx_buffers()
701 if (rx_ring->next_to_use != i) { in ixgbevf_alloc_rx_buffers()
703 rx_ring->next_to_use = i; in ixgbevf_alloc_rx_buffers()
706 rx_ring->next_to_alloc = i; in ixgbevf_alloc_rx_buffers()
710 * applicable for weak-ordered memory model archs, in ixgbevf_alloc_rx_buffers()
711 * such as IA-64). in ixgbevf_alloc_rx_buffers()
719 * ixgbevf_cleanup_headers - Correct corrupted or empty headers
721 * @rx_desc: pointer to the EOP Rx descriptor
743 struct net_device *netdev = rx_ring->netdev; in ixgbevf_cleanup_headers()
745 if (!(netdev->features & NETIF_F_RXALL)) { in ixgbevf_cleanup_headers()
759 * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
769 u16 nta = rx_ring->next_to_alloc; in ixgbevf_reuse_rx_page()
771 new_buff = &rx_ring->rx_buffer_info[nta]; in ixgbevf_reuse_rx_page()
775 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in ixgbevf_reuse_rx_page()
778 new_buff->page = old_buff->page; in ixgbevf_reuse_rx_page()
779 new_buff->dma = old_buff->dma; in ixgbevf_reuse_rx_page()
780 new_buff->page_offset = old_buff->page_offset; in ixgbevf_reuse_rx_page()
781 new_buff->pagecnt_bias = old_buff->pagecnt_bias; in ixgbevf_reuse_rx_page()
786 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; in ixgbevf_can_reuse_rx_page()
787 struct page *page = rx_buffer->page; in ixgbevf_can_reuse_rx_page()
789 /* avoid re-using remote and pfmemalloc pages */ in ixgbevf_can_reuse_rx_page()
795 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) in ixgbevf_can_reuse_rx_page()
799 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048) in ixgbevf_can_reuse_rx_page()
801 if (rx_buffer->page_offset > IXGBEVF_LAST_OFFSET) in ixgbevf_can_reuse_rx_page()
812 rx_buffer->pagecnt_bias = USHRT_MAX; in ixgbevf_can_reuse_rx_page()
819 * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
825 * This function will add the data contained in rx_buffer->page to the skb.
839 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, in ixgbevf_add_rx_frag()
840 rx_buffer->page_offset, size, truesize); in ixgbevf_add_rx_frag()
842 rx_buffer->page_offset ^= truesize; in ixgbevf_add_rx_frag()
844 rx_buffer->page_offset += truesize; in ixgbevf_add_rx_frag()
854 unsigned int size = xdp->data_end - xdp->data; in ixgbevf_construct_skb()
858 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - in ixgbevf_construct_skb()
859 xdp->data_hard_start); in ixgbevf_construct_skb()
865 net_prefetch(xdp->data); in ixgbevf_construct_skb()
867 /* Note, we get here by enabling legacy-rx via: in ixgbevf_construct_skb()
869 * ethtool --set-priv-flags <dev> legacy-rx on in ixgbevf_construct_skb()
872 * opposed to having legacy-rx off, where we process XDP in ixgbevf_construct_skb()
876 * xdp->data_meta will always point to xdp->data, since in ixgbevf_construct_skb()
878 * changed in future for legacy-rx mode on, then lets also in ixgbevf_construct_skb()
879 * add xdp->data_meta handling here. in ixgbevf_construct_skb()
883 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE); in ixgbevf_construct_skb()
890 headlen = eth_get_headlen(skb->dev, xdp->data, in ixgbevf_construct_skb()
894 memcpy(__skb_put(skb, headlen), xdp->data, in ixgbevf_construct_skb()
898 size -= headlen; in ixgbevf_construct_skb()
900 skb_add_rx_frag(skb, 0, rx_buffer->page, in ixgbevf_construct_skb()
901 (xdp->data + headlen) - in ixgbevf_construct_skb()
902 page_address(rx_buffer->page), in ixgbevf_construct_skb()
905 rx_buffer->page_offset ^= truesize; in ixgbevf_construct_skb()
907 rx_buffer->page_offset += truesize; in ixgbevf_construct_skb()
910 rx_buffer->pagecnt_bias++; in ixgbevf_construct_skb()
919 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_irq_enable_queues()
929 unsigned int metasize = xdp->data - xdp->data_meta; in ixgbevf_build_skb()
934 SKB_DATA_ALIGN(xdp->data_end - in ixgbevf_build_skb()
935 xdp->data_hard_start); in ixgbevf_build_skb()
939 /* Prefetch first cache line of first page. If xdp->data_meta in ixgbevf_build_skb()
940 * is unused, this points to xdp->data, otherwise, we likely in ixgbevf_build_skb()
944 net_prefetch(xdp->data_meta); in ixgbevf_build_skb()
947 skb = napi_build_skb(xdp->data_hard_start, truesize); in ixgbevf_build_skb()
952 skb_reserve(skb, xdp->data - xdp->data_hard_start); in ixgbevf_build_skb()
953 __skb_put(skb, xdp->data_end - xdp->data); in ixgbevf_build_skb()
959 rx_buffer->page_offset ^= truesize; in ixgbevf_build_skb()
961 rx_buffer->page_offset += truesize; in ixgbevf_build_skb()
980 len = xdp->data_end - xdp->data; in ixgbevf_xmit_xdp_ring()
985 dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE); in ixgbevf_xmit_xdp_ring()
986 if (dma_mapping_error(ring->dev, dma)) in ixgbevf_xmit_xdp_ring()
990 i = ring->next_to_use; in ixgbevf_xmit_xdp_ring()
991 tx_buffer = &ring->tx_buffer_info[i]; in ixgbevf_xmit_xdp_ring()
995 tx_buffer->data = xdp->data; in ixgbevf_xmit_xdp_ring()
996 tx_buffer->bytecount = len; in ixgbevf_xmit_xdp_ring()
997 tx_buffer->gso_segs = 1; in ixgbevf_xmit_xdp_ring()
998 tx_buffer->protocol = 0; in ixgbevf_xmit_xdp_ring()
1003 if (!test_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state)) { in ixgbevf_xmit_xdp_ring()
1006 set_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state); in ixgbevf_xmit_xdp_ring()
1009 context_desc->vlan_macip_lens = in ixgbevf_xmit_xdp_ring()
1011 context_desc->fceof_saidx = 0; in ixgbevf_xmit_xdp_ring()
1012 context_desc->type_tucmd_mlhl = in ixgbevf_xmit_xdp_ring()
1015 context_desc->mss_l4len_idx = 0; in ixgbevf_xmit_xdp_ring()
1027 tx_desc->read.buffer_addr = cpu_to_le64(dma); in ixgbevf_xmit_xdp_ring()
1029 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); in ixgbevf_xmit_xdp_ring()
1030 tx_desc->read.olinfo_status = in ixgbevf_xmit_xdp_ring()
1039 if (i == ring->count) in ixgbevf_xmit_xdp_ring()
1042 tx_buffer->next_to_watch = tx_desc; in ixgbevf_xmit_xdp_ring()
1043 ring->next_to_use = i; in ixgbevf_xmit_xdp_ring()
1057 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in ixgbevf_run_xdp()
1067 xdp_ring = adapter->xdp_ring[rx_ring->queue_index]; in ixgbevf_run_xdp()
1073 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); in ixgbevf_run_xdp()
1077 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in ixgbevf_run_xdp()
1093 truesize = ixgbevf_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ in ixgbevf_rx_frame_truesize()
1110 rx_buffer->page_offset ^= truesize; in ixgbevf_rx_buffer_flip()
1112 rx_buffer->page_offset += truesize; in ixgbevf_rx_buffer_flip()
1121 struct ixgbevf_adapter *adapter = q_vector->adapter; in ixgbevf_clean_rx_irq()
1123 struct sk_buff *skb = rx_ring->skb; in ixgbevf_clean_rx_irq()
1132 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); in ixgbevf_clean_rx_irq()
1145 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean); in ixgbevf_clean_rx_irq()
1146 size = le16_to_cpu(rx_desc->wb.upper.length); in ixgbevf_clean_rx_irq()
1163 hard_start = page_address(rx_buffer->page) + in ixgbevf_clean_rx_irq()
1164 rx_buffer->page_offset - offset; in ixgbevf_clean_rx_irq()
1179 rx_buffer->pagecnt_bias++; in ixgbevf_clean_rx_irq()
1195 rx_ring->rx_stats.alloc_rx_buff_failed++; in ixgbevf_clean_rx_irq()
1196 rx_buffer->pagecnt_bias++; in ixgbevf_clean_rx_irq()
1203 /* fetch next buffer in frame if non-eop */ in ixgbevf_clean_rx_irq()
1214 total_rx_bytes += skb->len; in ixgbevf_clean_rx_irq()
1219 if ((skb->pkt_type == PACKET_BROADCAST || in ixgbevf_clean_rx_irq()
1220 skb->pkt_type == PACKET_MULTICAST) && in ixgbevf_clean_rx_irq()
1221 ether_addr_equal(rx_ring->netdev->dev_addr, in ixgbevf_clean_rx_irq()
1222 eth_hdr(skb)->h_source)) { in ixgbevf_clean_rx_irq()
1240 rx_ring->skb = skb; in ixgbevf_clean_rx_irq()
1244 adapter->xdp_ring[rx_ring->queue_index]; in ixgbevf_clean_rx_irq()
1250 ixgbevf_write_tail(xdp_ring, xdp_ring->next_to_use); in ixgbevf_clean_rx_irq()
1253 u64_stats_update_begin(&rx_ring->syncp); in ixgbevf_clean_rx_irq()
1254 rx_ring->stats.packets += total_rx_packets; in ixgbevf_clean_rx_irq()
1255 rx_ring->stats.bytes += total_rx_bytes; in ixgbevf_clean_rx_irq()
1256 u64_stats_update_end(&rx_ring->syncp); in ixgbevf_clean_rx_irq()
1257 q_vector->rx.total_packets += total_rx_packets; in ixgbevf_clean_rx_irq()
1258 q_vector->rx.total_bytes += total_rx_bytes; in ixgbevf_clean_rx_irq()
1264 * ixgbevf_poll - NAPI polling calback
1275 struct ixgbevf_adapter *adapter = q_vector->adapter; in ixgbevf_poll()
1280 ixgbevf_for_each_ring(ring, q_vector->tx) { in ixgbevf_poll()
1291 if (q_vector->rx.count > 1) in ixgbevf_poll()
1292 per_ring_budget = max(budget/q_vector->rx.count, 1); in ixgbevf_poll()
1296 ixgbevf_for_each_ring(ring, q_vector->rx) { in ixgbevf_poll()
1308 /* Exit the polling mode, but don't re-enable interrupts if stack might in ixgbevf_poll()
1309 * poll us due to busy-polling in ixgbevf_poll()
1312 if (adapter->rx_itr_setting == 1) in ixgbevf_poll()
1314 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && in ixgbevf_poll()
1315 !test_bit(__IXGBEVF_REMOVING, &adapter->state)) in ixgbevf_poll()
1317 BIT(q_vector->v_idx)); in ixgbevf_poll()
1320 return min(work_done, budget - 1); in ixgbevf_poll()
1324 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
1329 struct ixgbevf_adapter *adapter = q_vector->adapter; in ixgbevf_write_eitr()
1330 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_write_eitr()
1331 int v_idx = q_vector->v_idx; in ixgbevf_write_eitr()
1332 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; in ixgbevf_write_eitr()
1343 * ixgbevf_configure_msix - Configure MSI-X hardware
1346 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1354 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_configure_msix()
1355 adapter->eims_enable_mask = 0; in ixgbevf_configure_msix()
1363 q_vector = adapter->q_vector[v_idx]; in ixgbevf_configure_msix()
1365 ixgbevf_for_each_ring(ring, q_vector->rx) in ixgbevf_configure_msix()
1366 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx); in ixgbevf_configure_msix()
1368 ixgbevf_for_each_ring(ring, q_vector->tx) in ixgbevf_configure_msix()
1369 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); in ixgbevf_configure_msix()
1371 if (q_vector->tx.ring && !q_vector->rx.ring) { in ixgbevf_configure_msix()
1373 if (adapter->tx_itr_setting == 1) in ixgbevf_configure_msix()
1374 q_vector->itr = IXGBE_12K_ITR; in ixgbevf_configure_msix()
1376 q_vector->itr = adapter->tx_itr_setting; in ixgbevf_configure_msix()
1379 if (adapter->rx_itr_setting == 1) in ixgbevf_configure_msix()
1380 q_vector->itr = IXGBE_20K_ITR; in ixgbevf_configure_msix()
1382 q_vector->itr = adapter->rx_itr_setting; in ixgbevf_configure_msix()
1386 adapter->eims_enable_mask |= BIT(v_idx); in ixgbevf_configure_msix()
1391 ixgbevf_set_ivar(adapter, -1, 1, v_idx); in ixgbevf_configure_msix()
1393 adapter->eims_other = BIT(v_idx); in ixgbevf_configure_msix()
1394 adapter->eims_enable_mask |= adapter->eims_other; in ixgbevf_configure_msix()
1405 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
1420 int bytes = ring_container->total_bytes; in ixgbevf_update_itr()
1421 int packets = ring_container->total_packets; in ixgbevf_update_itr()
1424 u8 itr_setting = ring_container->itr; in ixgbevf_update_itr()
1430 * 0-20MB/s lowest (100000 ints/s) in ixgbevf_update_itr()
1431 * 20-100MB/s low (20000 ints/s) in ixgbevf_update_itr()
1432 * 100-1249MB/s bulk (12000 ints/s) in ixgbevf_update_itr()
1435 timepassed_us = q_vector->itr >> 2; in ixgbevf_update_itr()
1459 ring_container->total_bytes = 0; in ixgbevf_update_itr()
1460 ring_container->total_packets = 0; in ixgbevf_update_itr()
1463 ring_container->itr = itr_setting; in ixgbevf_update_itr()
1468 u32 new_itr = q_vector->itr; in ixgbevf_set_itr()
1471 ixgbevf_update_itr(q_vector, &q_vector->tx); in ixgbevf_set_itr()
1472 ixgbevf_update_itr(q_vector, &q_vector->rx); in ixgbevf_set_itr()
1474 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); in ixgbevf_set_itr()
1491 if (new_itr != q_vector->itr) { in ixgbevf_set_itr()
1493 new_itr = (10 * new_itr * q_vector->itr) / in ixgbevf_set_itr()
1494 ((9 * new_itr) + q_vector->itr); in ixgbevf_set_itr()
1497 q_vector->itr = new_itr; in ixgbevf_set_itr()
1506 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_msix_other()
1508 hw->mac.get_link_status = 1; in ixgbevf_msix_other()
1512 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); in ixgbevf_msix_other()
1518 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
1527 if (q_vector->rx.ring || q_vector->tx.ring) in ixgbevf_msix_clean_rings()
1528 napi_schedule_irqoff(&q_vector->napi); in ixgbevf_msix_clean_rings()
1534 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1537 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1542 struct net_device *netdev = adapter->netdev; in ixgbevf_request_msix_irqs()
1543 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_request_msix_irqs()
1548 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector]; in ixgbevf_request_msix_irqs()
1549 struct msix_entry *entry = &adapter->msix_entries[vector]; in ixgbevf_request_msix_irqs()
1551 if (q_vector->tx.ring && q_vector->rx.ring) { in ixgbevf_request_msix_irqs()
1552 snprintf(q_vector->name, sizeof(q_vector->name), in ixgbevf_request_msix_irqs()
1553 "%s-TxRx-%u", netdev->name, ri++); in ixgbevf_request_msix_irqs()
1555 } else if (q_vector->rx.ring) { in ixgbevf_request_msix_irqs()
1556 snprintf(q_vector->name, sizeof(q_vector->name), in ixgbevf_request_msix_irqs()
1557 "%s-rx-%u", netdev->name, ri++); in ixgbevf_request_msix_irqs()
1558 } else if (q_vector->tx.ring) { in ixgbevf_request_msix_irqs()
1559 snprintf(q_vector->name, sizeof(q_vector->name), in ixgbevf_request_msix_irqs()
1560 "%s-tx-%u", netdev->name, ti++); in ixgbevf_request_msix_irqs()
1565 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0, in ixgbevf_request_msix_irqs()
1566 q_vector->name, q_vector); in ixgbevf_request_msix_irqs()
1568 hw_dbg(&adapter->hw, in ixgbevf_request_msix_irqs()
1575 err = request_irq(adapter->msix_entries[vector].vector, in ixgbevf_request_msix_irqs()
1576 &ixgbevf_msix_other, 0, netdev->name, adapter); in ixgbevf_request_msix_irqs()
1578 hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n", in ixgbevf_request_msix_irqs()
1587 vector--; in ixgbevf_request_msix_irqs()
1588 free_irq(adapter->msix_entries[vector].vector, in ixgbevf_request_msix_irqs()
1589 adapter->q_vector[vector]); in ixgbevf_request_msix_irqs()
1591 /* This failure is non-recoverable - it indicates the system is in ixgbevf_request_msix_irqs()
1601 adapter->num_msix_vectors = 0; in ixgbevf_request_msix_irqs()
1606 * ixgbevf_request_irq - initialize interrupts
1617 hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err); in ixgbevf_request_irq()
1626 if (!adapter->msix_entries) in ixgbevf_free_irq()
1629 q_vectors = adapter->num_msix_vectors; in ixgbevf_free_irq()
1630 i = q_vectors - 1; in ixgbevf_free_irq()
1632 free_irq(adapter->msix_entries[i].vector, adapter); in ixgbevf_free_irq()
1633 i--; in ixgbevf_free_irq()
1635 for (; i >= 0; i--) { in ixgbevf_free_irq()
1637 if (!adapter->q_vector[i]->rx.ring && in ixgbevf_free_irq()
1638 !adapter->q_vector[i]->tx.ring) in ixgbevf_free_irq()
1641 free_irq(adapter->msix_entries[i].vector, in ixgbevf_free_irq()
1642 adapter->q_vector[i]); in ixgbevf_free_irq()
1647 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1652 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_irq_disable()
1661 for (i = 0; i < adapter->num_msix_vectors; i++) in ixgbevf_irq_disable()
1662 synchronize_irq(adapter->msix_entries[i].vector); in ixgbevf_irq_disable()
1666 * ixgbevf_irq_enable - Enable default interrupt generation settings
1671 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_irq_enable()
1673 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask); in ixgbevf_irq_enable()
1674 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask); in ixgbevf_irq_enable()
1675 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask); in ixgbevf_irq_enable()
1679 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1688 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_configure_tx_ring()
1689 u64 tdba = ring->dma; in ixgbevf_configure_tx_ring()
1692 u8 reg_idx = ring->reg_idx; in ixgbevf_configure_tx_ring()
1694 /* disable queue to avoid issues while updating state */ in ixgbevf_configure_tx_ring()
1701 ring->count * sizeof(union ixgbe_adv_tx_desc)); in ixgbevf_configure_tx_ring()
1703 /* disable head writeback */ in ixgbevf_configure_tx_ring()
1715 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx); in ixgbevf_configure_tx_ring()
1718 ring->next_to_clean = 0; in ixgbevf_configure_tx_ring()
1719 ring->next_to_use = 0; in ixgbevf_configure_tx_ring()
1732 memset(ring->tx_buffer_info, 0, in ixgbevf_configure_tx_ring()
1733 sizeof(struct ixgbevf_tx_buffer) * ring->count); in ixgbevf_configure_tx_ring()
1735 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state); in ixgbevf_configure_tx_ring()
1736 clear_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state); in ixgbevf_configure_tx_ring()
1744 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); in ixgbevf_configure_tx_ring()
1750 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1760 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbevf_configure_tx()
1761 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]); in ixgbevf_configure_tx()
1762 for (i = 0; i < adapter->num_xdp_queues; i++) in ixgbevf_configure_tx()
1763 ixgbevf_configure_tx_ring(adapter, adapter->xdp_ring[i]); in ixgbevf_configure_tx()
1771 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_configure_srrctl()
1788 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_setup_psrtype()
1795 if (adapter->num_rx_queues > 1) in ixgbevf_setup_psrtype()
1805 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_disable_rx_queue()
1808 u8 reg_idx = ring->reg_idx; in ixgbevf_disable_rx_queue()
1810 if (IXGBE_REMOVED(hw->hw_addr)) in ixgbevf_disable_rx_queue()
1818 /* the hardware may take up to 100us to really disable the Rx queue */ in ixgbevf_disable_rx_queue()
1822 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); in ixgbevf_disable_rx_queue()
1832 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_rx_desc_queue_enable()
1835 u8 reg_idx = ring->reg_idx; in ixgbevf_rx_desc_queue_enable()
1837 if (IXGBE_REMOVED(hw->hw_addr)) in ixgbevf_rx_desc_queue_enable()
1842 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); in ixgbevf_rx_desc_queue_enable()
1850 * ixgbevf_init_rss_key - Initialize adapter RSS key
1859 if (!adapter->rss_key) { in ixgbevf_init_rss_key()
1862 return -ENOMEM; in ixgbevf_init_rss_key()
1865 adapter->rss_key = rss_key; in ixgbevf_init_rss_key()
1873 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_setup_vfmrqc()
1875 u16 rss_i = adapter->num_rx_queues; in ixgbevf_setup_vfmrqc()
1880 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), *(adapter->rss_key + i)); in ixgbevf_setup_vfmrqc()
1886 adapter->rss_indir_tbl[i] = j; in ixgbevf_setup_vfmrqc()
1909 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_configure_rx_ring()
1911 u64 rdba = ring->dma; in ixgbevf_configure_rx_ring()
1913 u8 reg_idx = ring->reg_idx; in ixgbevf_configure_rx_ring()
1915 /* disable queue to avoid issues while updating state */ in ixgbevf_configure_rx_ring()
1922 ring->count * sizeof(union ixgbe_adv_rx_desc)); in ixgbevf_configure_rx_ring()
1937 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx); in ixgbevf_configure_rx_ring()
1940 memset(ring->rx_buffer_info, 0, in ixgbevf_configure_rx_ring()
1941 sizeof(struct ixgbevf_rx_buffer) * ring->count); in ixgbevf_configure_rx_ring()
1945 rx_desc->wb.upper.length = 0; in ixgbevf_configure_rx_ring()
1948 ring->next_to_clean = 0; in ixgbevf_configure_rx_ring()
1949 ring->next_to_use = 0; in ixgbevf_configure_rx_ring()
1950 ring->next_to_alloc = 0; in ixgbevf_configure_rx_ring()
1955 if (adapter->hw.mac.type != ixgbe_mac_82599_vf) { in ixgbevf_configure_rx_ring()
1978 struct net_device *netdev = adapter->netdev; in ixgbevf_set_rx_buffer_len()
1979 unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; in ixgbevf_set_rx_buffer_len()
1985 if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX) in ixgbevf_set_rx_buffer_len()
1993 if (adapter->hw.mac.type == ixgbe_mac_82599_vf && !ring_uses_large_buffer(rx_ring)) in ixgbevf_set_rx_buffer_len()
2000 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
2007 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_configure_rx()
2008 struct net_device *netdev = adapter->netdev; in ixgbevf_configure_rx()
2012 if (hw->mac.type >= ixgbe_mac_X550_vf) in ixgbevf_configure_rx()
2015 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_configure_rx()
2017 ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); in ixgbevf_configure_rx()
2018 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_configure_rx()
2020 dev_err(&adapter->pdev->dev, in ixgbevf_configure_rx()
2021 "Failed to set MTU at %d\n", netdev->mtu); in ixgbevf_configure_rx()
2026 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbevf_configure_rx()
2027 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i]; in ixgbevf_configure_rx()
2038 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_vlan_rx_add_vid()
2041 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_vlan_rx_add_vid()
2044 err = hw->mac.ops.set_vfta(hw, vid, 0, true); in ixgbevf_vlan_rx_add_vid()
2046 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_vlan_rx_add_vid()
2053 return -EIO; in ixgbevf_vlan_rx_add_vid()
2056 return -EACCES; in ixgbevf_vlan_rx_add_vid()
2059 set_bit(vid, adapter->active_vlans); in ixgbevf_vlan_rx_add_vid()
2068 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_vlan_rx_kill_vid()
2071 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_vlan_rx_kill_vid()
2074 err = hw->mac.ops.set_vfta(hw, vid, 0, false); in ixgbevf_vlan_rx_kill_vid()
2076 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_vlan_rx_kill_vid()
2081 clear_bit(vid, adapter->active_vlans); in ixgbevf_vlan_rx_kill_vid()
2090 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) in ixgbevf_restore_vlan()
2091 ixgbevf_vlan_rx_add_vid(adapter->netdev, in ixgbevf_restore_vlan()
2098 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_write_uc_addr_list()
2105 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); in ixgbevf_write_uc_addr_list()
2112 hw->mac.ops.set_uc_addr(hw, 0, NULL); in ixgbevf_write_uc_addr_list()
2119 * ixgbevf_set_rx_mode - Multicast and unicast set
2130 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_set_rx_mode()
2131 unsigned int flags = netdev->flags; in ixgbevf_set_rx_mode()
2144 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_set_rx_mode()
2146 hw->mac.ops.update_xcast_mode(hw, xcast_mode); in ixgbevf_set_rx_mode()
2149 hw->mac.ops.update_mc_addr_list(hw, netdev); in ixgbevf_set_rx_mode()
2153 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_set_rx_mode()
2160 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_napi_enable_all()
2163 q_vector = adapter->q_vector[q_idx]; in ixgbevf_napi_enable_all()
2164 napi_enable(&q_vector->napi); in ixgbevf_napi_enable_all()
2172 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_napi_disable_all()
2175 q_vector = adapter->q_vector[q_idx]; in ixgbevf_napi_disable_all()
2176 napi_disable(&q_vector->napi); in ixgbevf_napi_disable_all()
2182 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_configure_dcb()
2185 unsigned int num_rx_queues = adapter->num_rx_queues; in ixgbevf_configure_dcb()
2186 unsigned int num_tx_queues = adapter->num_tx_queues; in ixgbevf_configure_dcb()
2189 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_configure_dcb()
2194 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_configure_dcb()
2204 adapter->tx_ring[0]->reg_idx = def_q; in ixgbevf_configure_dcb()
2211 if ((adapter->num_rx_queues != num_rx_queues) || in ixgbevf_configure_dcb()
2212 (adapter->num_tx_queues != num_tx_queues)) { in ixgbevf_configure_dcb()
2214 hw->mbx.timeout = 0; in ixgbevf_configure_dcb()
2217 set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state); in ixgbevf_configure_dcb()
2227 ixgbevf_set_rx_mode(adapter->netdev); in ixgbevf_configure()
2238 /* Only save pre-reset stats if there are some */ in ixgbevf_save_reset_stats()
2239 if (adapter->stats.vfgprc || adapter->stats.vfgptc) { in ixgbevf_save_reset_stats()
2240 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - in ixgbevf_save_reset_stats()
2241 adapter->stats.base_vfgprc; in ixgbevf_save_reset_stats()
2242 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - in ixgbevf_save_reset_stats()
2243 adapter->stats.base_vfgptc; in ixgbevf_save_reset_stats()
2244 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - in ixgbevf_save_reset_stats()
2245 adapter->stats.base_vfgorc; in ixgbevf_save_reset_stats()
2246 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - in ixgbevf_save_reset_stats()
2247 adapter->stats.base_vfgotc; in ixgbevf_save_reset_stats()
2248 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - in ixgbevf_save_reset_stats()
2249 adapter->stats.base_vfmprc; in ixgbevf_save_reset_stats()
2255 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_init_last_counter_stats()
2257 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); in ixgbevf_init_last_counter_stats()
2258 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); in ixgbevf_init_last_counter_stats()
2259 adapter->stats.last_vfgorc |= in ixgbevf_init_last_counter_stats()
2261 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); in ixgbevf_init_last_counter_stats()
2262 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); in ixgbevf_init_last_counter_stats()
2263 adapter->stats.last_vfgotc |= in ixgbevf_init_last_counter_stats()
2265 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); in ixgbevf_init_last_counter_stats()
2267 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; in ixgbevf_init_last_counter_stats()
2268 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; in ixgbevf_init_last_counter_stats()
2269 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; in ixgbevf_init_last_counter_stats()
2270 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; in ixgbevf_init_last_counter_stats()
2271 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; in ixgbevf_init_last_counter_stats()
2276 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_negotiate_api()
2288 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_negotiate_api()
2291 err = hw->mac.ops.negotiate_api_version(hw, api[idx]); in ixgbevf_negotiate_api()
2297 if (hw->api_version >= ixgbe_mbox_api_15) { in ixgbevf_negotiate_api()
2298 hw->mbx.ops.init_params(hw); in ixgbevf_negotiate_api()
2299 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, in ixgbevf_negotiate_api()
2303 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_negotiate_api()
2308 struct net_device *netdev = adapter->netdev; in ixgbevf_up_complete()
2309 struct pci_dev *pdev = adapter->pdev; in ixgbevf_up_complete()
2310 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_up_complete()
2315 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_up_complete()
2317 if (is_valid_ether_addr(hw->mac.addr)) in ixgbevf_up_complete()
2318 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); in ixgbevf_up_complete()
2320 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); in ixgbevf_up_complete()
2322 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_up_complete()
2324 state = adapter->link_state; in ixgbevf_up_complete()
2325 hw->mac.ops.get_link_state(hw, &adapter->link_state); in ixgbevf_up_complete()
2326 if (state && state != adapter->link_state) in ixgbevf_up_complete()
2327 dev_info(&pdev->dev, "VF is administratively disabled\n"); in ixgbevf_up_complete()
2330 clear_bit(__IXGBEVF_DOWN, &adapter->state); in ixgbevf_up_complete()
2343 hw->mac.get_link_status = 1; in ixgbevf_up_complete()
2344 mod_timer(&adapter->service_timer, jiffies); in ixgbevf_up_complete()
2355 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
2360 u16 i = rx_ring->next_to_clean; in ixgbevf_clean_rx_ring()
2363 if (rx_ring->skb) { in ixgbevf_clean_rx_ring()
2364 dev_kfree_skb(rx_ring->skb); in ixgbevf_clean_rx_ring()
2365 rx_ring->skb = NULL; in ixgbevf_clean_rx_ring()
2369 while (i != rx_ring->next_to_alloc) { in ixgbevf_clean_rx_ring()
2372 rx_buffer = &rx_ring->rx_buffer_info[i]; in ixgbevf_clean_rx_ring()
2377 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbevf_clean_rx_ring()
2378 rx_buffer->dma, in ixgbevf_clean_rx_ring()
2379 rx_buffer->page_offset, in ixgbevf_clean_rx_ring()
2384 dma_unmap_page_attrs(rx_ring->dev, in ixgbevf_clean_rx_ring()
2385 rx_buffer->dma, in ixgbevf_clean_rx_ring()
2390 __page_frag_cache_drain(rx_buffer->page, in ixgbevf_clean_rx_ring()
2391 rx_buffer->pagecnt_bias); in ixgbevf_clean_rx_ring()
2394 if (i == rx_ring->count) in ixgbevf_clean_rx_ring()
2398 rx_ring->next_to_alloc = 0; in ixgbevf_clean_rx_ring()
2399 rx_ring->next_to_clean = 0; in ixgbevf_clean_rx_ring()
2400 rx_ring->next_to_use = 0; in ixgbevf_clean_rx_ring()
2404 * ixgbevf_clean_tx_ring - Free Tx Buffers
2409 u16 i = tx_ring->next_to_clean; in ixgbevf_clean_tx_ring()
2410 struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbevf_clean_tx_ring()
2412 while (i != tx_ring->next_to_use) { in ixgbevf_clean_tx_ring()
2417 page_frag_free(tx_buffer->data); in ixgbevf_clean_tx_ring()
2419 dev_kfree_skb_any(tx_buffer->skb); in ixgbevf_clean_tx_ring()
2422 dma_unmap_single(tx_ring->dev, in ixgbevf_clean_tx_ring()
2428 eop_desc = tx_buffer->next_to_watch; in ixgbevf_clean_tx_ring()
2436 if (unlikely(i == tx_ring->count)) { in ixgbevf_clean_tx_ring()
2438 tx_buffer = tx_ring->tx_buffer_info; in ixgbevf_clean_tx_ring()
2444 dma_unmap_page(tx_ring->dev, in ixgbevf_clean_tx_ring()
2453 if (unlikely(i == tx_ring->count)) { in ixgbevf_clean_tx_ring()
2455 tx_buffer = tx_ring->tx_buffer_info; in ixgbevf_clean_tx_ring()
2460 tx_ring->next_to_use = 0; in ixgbevf_clean_tx_ring()
2461 tx_ring->next_to_clean = 0; in ixgbevf_clean_tx_ring()
2466 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2473 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbevf_clean_all_rx_rings()
2474 ixgbevf_clean_rx_ring(adapter->rx_ring[i]); in ixgbevf_clean_all_rx_rings()
2478 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2485 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbevf_clean_all_tx_rings()
2486 ixgbevf_clean_tx_ring(adapter->tx_ring[i]); in ixgbevf_clean_all_tx_rings()
2487 for (i = 0; i < adapter->num_xdp_queues; i++) in ixgbevf_clean_all_tx_rings()
2488 ixgbevf_clean_tx_ring(adapter->xdp_ring[i]); in ixgbevf_clean_all_tx_rings()
2493 struct net_device *netdev = adapter->netdev; in ixgbevf_down()
2494 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_down()
2498 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state)) in ixgbevf_down()
2501 /* disable all enabled Rx queues */ in ixgbevf_down()
2502 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbevf_down()
2503 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); in ixgbevf_down()
2517 del_timer_sync(&adapter->service_timer); in ixgbevf_down()
2519 /* disable transmits in the hardware now that interrupts are off */ in ixgbevf_down()
2520 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbevf_down()
2521 u8 reg_idx = adapter->tx_ring[i]->reg_idx; in ixgbevf_down()
2527 for (i = 0; i < adapter->num_xdp_queues; i++) { in ixgbevf_down()
2528 u8 reg_idx = adapter->xdp_ring[i]->reg_idx; in ixgbevf_down()
2534 if (!pci_channel_offline(adapter->pdev)) in ixgbevf_down()
2543 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) in ixgbevf_reinit_locked()
2547 pci_set_master(adapter->pdev); in ixgbevf_reinit_locked()
2550 clear_bit(__IXGBEVF_RESETTING, &adapter->state); in ixgbevf_reinit_locked()
2555 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_reset()
2556 struct net_device *netdev = adapter->netdev; in ixgbevf_reset()
2558 if (hw->mac.ops.reset_hw(hw)) { in ixgbevf_reset()
2561 hw->mac.ops.init_hw(hw); in ixgbevf_reset()
2565 if (is_valid_ether_addr(adapter->hw.mac.addr)) { in ixgbevf_reset()
2566 eth_hw_addr_set(netdev, adapter->hw.mac.addr); in ixgbevf_reset()
2567 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); in ixgbevf_reset()
2570 adapter->last_reset = jiffies; in ixgbevf_reset()
2589 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, in ixgbevf_acquire_msix_vectors()
2593 dev_err(&adapter->pdev->dev, in ixgbevf_acquire_msix_vectors()
2594 "Unable to allocate MSI-X interrupts\n"); in ixgbevf_acquire_msix_vectors()
2595 kfree(adapter->msix_entries); in ixgbevf_acquire_msix_vectors()
2596 adapter->msix_entries = NULL; in ixgbevf_acquire_msix_vectors()
2604 adapter->num_msix_vectors = vectors; in ixgbevf_acquire_msix_vectors()
2610 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
2622 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_set_num_queues()
2628 adapter->num_rx_queues = 1; in ixgbevf_set_num_queues()
2629 adapter->num_tx_queues = 1; in ixgbevf_set_num_queues()
2630 adapter->num_xdp_queues = 0; in ixgbevf_set_num_queues()
2632 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_set_num_queues()
2637 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_set_num_queues()
2644 adapter->num_rx_queues = num_tcs; in ixgbevf_set_num_queues()
2648 switch (hw->api_version) { in ixgbevf_set_num_queues()
2654 if (adapter->xdp_prog && in ixgbevf_set_num_queues()
2655 hw->mac.max_tx_queues == rss) in ixgbevf_set_num_queues()
2658 adapter->num_rx_queues = rss; in ixgbevf_set_num_queues()
2659 adapter->num_tx_queues = rss; in ixgbevf_set_num_queues()
2660 adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0; in ixgbevf_set_num_queues()
2669 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2679 /* It's easy to be greedy for MSI-X vectors, but it really in ixgbevf_set_interrupt_capability()
2685 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); in ixgbevf_set_interrupt_capability()
2689 adapter->msix_entries = kcalloc(v_budget, in ixgbevf_set_interrupt_capability()
2691 if (!adapter->msix_entries) in ixgbevf_set_interrupt_capability()
2692 return -ENOMEM; in ixgbevf_set_interrupt_capability()
2695 adapter->msix_entries[vector].entry = vector; in ixgbevf_set_interrupt_capability()
2697 /* A failure in MSI-X entry allocation isn't fatal, but the VF driver in ixgbevf_set_interrupt_capability()
2699 * that we clean up the msix_entries pointer else-where. in ixgbevf_set_interrupt_capability()
2707 ring->next = head->ring; in ixgbevf_add_ring()
2708 head->ring = ring; in ixgbevf_add_ring()
2709 head->count++; in ixgbevf_add_ring()
2713 * ixgbevf_alloc_q_vector - Allocate memory for a single interrupt vector
2723 * We allocate one q_vector. If allocation fails we return -ENOMEM.
2741 return -ENOMEM; in ixgbevf_alloc_q_vector()
2744 netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll); in ixgbevf_alloc_q_vector()
2747 adapter->q_vector[v_idx] = q_vector; in ixgbevf_alloc_q_vector()
2748 q_vector->adapter = adapter; in ixgbevf_alloc_q_vector()
2749 q_vector->v_idx = v_idx; in ixgbevf_alloc_q_vector()
2752 ring = q_vector->ring; in ixgbevf_alloc_q_vector()
2756 ring->dev = &adapter->pdev->dev; in ixgbevf_alloc_q_vector()
2757 ring->netdev = adapter->netdev; in ixgbevf_alloc_q_vector()
2760 ring->q_vector = q_vector; in ixgbevf_alloc_q_vector()
2763 ixgbevf_add_ring(ring, &q_vector->tx); in ixgbevf_alloc_q_vector()
2766 ring->count = adapter->tx_ring_count; in ixgbevf_alloc_q_vector()
2767 ring->queue_index = txr_idx; in ixgbevf_alloc_q_vector()
2768 ring->reg_idx = reg_idx; in ixgbevf_alloc_q_vector()
2771 adapter->tx_ring[txr_idx] = ring; in ixgbevf_alloc_q_vector()
2774 txr_count--; in ixgbevf_alloc_q_vector()
2784 ring->dev = &adapter->pdev->dev; in ixgbevf_alloc_q_vector()
2785 ring->netdev = adapter->netdev; in ixgbevf_alloc_q_vector()
2788 ring->q_vector = q_vector; in ixgbevf_alloc_q_vector()
2791 ixgbevf_add_ring(ring, &q_vector->tx); in ixgbevf_alloc_q_vector()
2794 ring->count = adapter->tx_ring_count; in ixgbevf_alloc_q_vector()
2795 ring->queue_index = xdp_idx; in ixgbevf_alloc_q_vector()
2796 ring->reg_idx = reg_idx; in ixgbevf_alloc_q_vector()
2800 adapter->xdp_ring[xdp_idx] = ring; in ixgbevf_alloc_q_vector()
2803 xdp_count--; in ixgbevf_alloc_q_vector()
2813 ring->dev = &adapter->pdev->dev; in ixgbevf_alloc_q_vector()
2814 ring->netdev = adapter->netdev; in ixgbevf_alloc_q_vector()
2817 ring->q_vector = q_vector; in ixgbevf_alloc_q_vector()
2820 ixgbevf_add_ring(ring, &q_vector->rx); in ixgbevf_alloc_q_vector()
2823 ring->count = adapter->rx_ring_count; in ixgbevf_alloc_q_vector()
2824 ring->queue_index = rxr_idx; in ixgbevf_alloc_q_vector()
2825 ring->reg_idx = rxr_idx; in ixgbevf_alloc_q_vector()
2828 adapter->rx_ring[rxr_idx] = ring; in ixgbevf_alloc_q_vector()
2831 rxr_count--; in ixgbevf_alloc_q_vector()
2842 * ixgbevf_free_q_vector - Free memory allocated for specific interrupt vector
2852 struct ixgbevf_q_vector *q_vector = adapter->q_vector[v_idx]; in ixgbevf_free_q_vector()
2855 ixgbevf_for_each_ring(ring, q_vector->tx) { in ixgbevf_free_q_vector()
2857 adapter->xdp_ring[ring->queue_index] = NULL; in ixgbevf_free_q_vector()
2859 adapter->tx_ring[ring->queue_index] = NULL; in ixgbevf_free_q_vector()
2862 ixgbevf_for_each_ring(ring, q_vector->rx) in ixgbevf_free_q_vector()
2863 adapter->rx_ring[ring->queue_index] = NULL; in ixgbevf_free_q_vector()
2865 adapter->q_vector[v_idx] = NULL; in ixgbevf_free_q_vector()
2866 netif_napi_del(&q_vector->napi); in ixgbevf_free_q_vector()
2875 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2879 * return -ENOMEM.
2883 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_alloc_q_vectors()
2884 int rxr_remaining = adapter->num_rx_queues; in ixgbevf_alloc_q_vectors()
2885 int txr_remaining = adapter->num_tx_queues; in ixgbevf_alloc_q_vectors()
2886 int xdp_remaining = adapter->num_xdp_queues; in ixgbevf_alloc_q_vectors()
2891 for (; rxr_remaining; v_idx++, q_vectors--) { in ixgbevf_alloc_q_vectors()
2900 rxr_remaining -= rqpv; in ixgbevf_alloc_q_vectors()
2905 for (; q_vectors; v_idx++, q_vectors--) { in ixgbevf_alloc_q_vectors()
2919 rxr_remaining -= rqpv; in ixgbevf_alloc_q_vectors()
2921 txr_remaining -= tqpv; in ixgbevf_alloc_q_vectors()
2923 xdp_remaining -= xqpv; in ixgbevf_alloc_q_vectors()
2931 v_idx--; in ixgbevf_alloc_q_vectors()
2935 return -ENOMEM; in ixgbevf_alloc_q_vectors()
2939 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2948 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_free_q_vectors()
2951 q_vectors--; in ixgbevf_free_q_vectors()
2957 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2963 if (!adapter->msix_entries) in ixgbevf_reset_interrupt_capability()
2966 pci_disable_msix(adapter->pdev); in ixgbevf_reset_interrupt_capability()
2967 kfree(adapter->msix_entries); in ixgbevf_reset_interrupt_capability()
2968 adapter->msix_entries = NULL; in ixgbevf_reset_interrupt_capability()
2972 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2985 hw_dbg(&adapter->hw, in ixgbevf_init_interrupt_scheme()
2992 hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n"); in ixgbevf_init_interrupt_scheme()
2996 …hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count %u\n… in ixgbevf_init_interrupt_scheme()
2997 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", in ixgbevf_init_interrupt_scheme()
2998 adapter->num_rx_queues, adapter->num_tx_queues, in ixgbevf_init_interrupt_scheme()
2999 adapter->num_xdp_queues); in ixgbevf_init_interrupt_scheme()
3001 set_bit(__IXGBEVF_DOWN, &adapter->state); in ixgbevf_init_interrupt_scheme()
3011 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
3015 * to pre-load conditions
3019 adapter->num_tx_queues = 0; in ixgbevf_clear_interrupt_scheme()
3020 adapter->num_xdp_queues = 0; in ixgbevf_clear_interrupt_scheme()
3021 adapter->num_rx_queues = 0; in ixgbevf_clear_interrupt_scheme()
3028 * ixgbevf_sw_init - Initialize general software structures
3037 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_sw_init()
3038 struct pci_dev *pdev = adapter->pdev; in ixgbevf_sw_init()
3039 struct net_device *netdev = adapter->netdev; in ixgbevf_sw_init()
3043 hw->vendor_id = pdev->vendor; in ixgbevf_sw_init()
3044 hw->device_id = pdev->device; in ixgbevf_sw_init()
3045 hw->revision_id = pdev->revision; in ixgbevf_sw_init()
3046 hw->subsystem_vendor_id = pdev->subsystem_vendor; in ixgbevf_sw_init()
3047 hw->subsystem_device_id = pdev->subsystem_device; in ixgbevf_sw_init()
3049 hw->mbx.ops.init_params(hw); in ixgbevf_sw_init()
3051 if (hw->mac.type >= ixgbe_mac_X550_vf) { in ixgbevf_sw_init()
3058 hw->mac.max_tx_queues = 2; in ixgbevf_sw_init()
3059 hw->mac.max_rx_queues = 2; in ixgbevf_sw_init()
3062 spin_lock_init(&adapter->mbx_lock); in ixgbevf_sw_init()
3064 err = hw->mac.ops.reset_hw(hw); in ixgbevf_sw_init()
3066 dev_info(&pdev->dev, in ixgbevf_sw_init()
3069 err = hw->mac.ops.init_hw(hw); in ixgbevf_sw_init()
3075 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr); in ixgbevf_sw_init()
3077 dev_info(&pdev->dev, "Error reading MAC address\n"); in ixgbevf_sw_init()
3078 else if (is_zero_ether_addr(adapter->hw.mac.addr)) in ixgbevf_sw_init()
3079 dev_info(&pdev->dev, in ixgbevf_sw_init()
3081 eth_hw_addr_set(netdev, hw->mac.addr); in ixgbevf_sw_init()
3084 if (!is_valid_ether_addr(netdev->dev_addr)) { in ixgbevf_sw_init()
3085 dev_info(&pdev->dev, "Assigning random MAC address\n"); in ixgbevf_sw_init()
3087 ether_addr_copy(hw->mac.addr, netdev->dev_addr); in ixgbevf_sw_init()
3088 ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr); in ixgbevf_sw_init()
3092 adapter->rx_itr_setting = 1; in ixgbevf_sw_init()
3093 adapter->tx_itr_setting = 1; in ixgbevf_sw_init()
3096 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; in ixgbevf_sw_init()
3097 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; in ixgbevf_sw_init()
3099 adapter->link_state = true; in ixgbevf_sw_init()
3101 set_bit(__IXGBEVF_DOWN, &adapter->state); in ixgbevf_sw_init()
3131 * ixgbevf_update_stats - Update the board statistics counters.
3136 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_update_stats()
3141 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || in ixgbevf_update_stats()
3142 test_bit(__IXGBEVF_RESETTING, &adapter->state)) in ixgbevf_update_stats()
3145 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, in ixgbevf_update_stats()
3146 adapter->stats.vfgprc); in ixgbevf_update_stats()
3147 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, in ixgbevf_update_stats()
3148 adapter->stats.vfgptc); in ixgbevf_update_stats()
3150 adapter->stats.last_vfgorc, in ixgbevf_update_stats()
3151 adapter->stats.vfgorc); in ixgbevf_update_stats()
3153 adapter->stats.last_vfgotc, in ixgbevf_update_stats()
3154 adapter->stats.vfgotc); in ixgbevf_update_stats()
3155 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, in ixgbevf_update_stats()
3156 adapter->stats.vfmprc); in ixgbevf_update_stats()
3158 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbevf_update_stats()
3159 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i]; in ixgbevf_update_stats()
3161 hw_csum_rx_error += rx_ring->rx_stats.csum_err; in ixgbevf_update_stats()
3162 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; in ixgbevf_update_stats()
3163 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; in ixgbevf_update_stats()
3164 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; in ixgbevf_update_stats()
3167 adapter->hw_csum_rx_error = hw_csum_rx_error; in ixgbevf_update_stats()
3168 adapter->alloc_rx_page_failed = alloc_rx_page_failed; in ixgbevf_update_stats()
3169 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; in ixgbevf_update_stats()
3170 adapter->alloc_rx_page = alloc_rx_page; in ixgbevf_update_stats()
3174 * ixgbevf_service_timer - Timer Call-back
3183 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies); in ixgbevf_service_timer()
3190 if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state)) in ixgbevf_reset_subtask()
3195 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || in ixgbevf_reset_subtask()
3196 test_bit(__IXGBEVF_REMOVING, &adapter->state) || in ixgbevf_reset_subtask()
3197 test_bit(__IXGBEVF_RESETTING, &adapter->state)) { in ixgbevf_reset_subtask()
3202 adapter->tx_timeout_count++; in ixgbevf_reset_subtask()
3209 * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
3219 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_check_hang_subtask()
3224 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || in ixgbevf_check_hang_subtask()
3225 test_bit(__IXGBEVF_RESETTING, &adapter->state)) in ixgbevf_check_hang_subtask()
3229 if (netif_carrier_ok(adapter->netdev)) { in ixgbevf_check_hang_subtask()
3230 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbevf_check_hang_subtask()
3231 set_check_for_tx_hang(adapter->tx_ring[i]); in ixgbevf_check_hang_subtask()
3232 for (i = 0; i < adapter->num_xdp_queues; i++) in ixgbevf_check_hang_subtask()
3233 set_check_for_tx_hang(adapter->xdp_ring[i]); in ixgbevf_check_hang_subtask()
3237 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { in ixgbevf_check_hang_subtask()
3238 struct ixgbevf_q_vector *qv = adapter->q_vector[i]; in ixgbevf_check_hang_subtask()
3240 if (qv->rx.ring || qv->tx.ring) in ixgbevf_check_hang_subtask()
3249 * ixgbevf_watchdog_update_link - update the link status
3254 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_watchdog_update_link()
3255 u32 link_speed = adapter->link_speed; in ixgbevf_watchdog_update_link()
3256 bool link_up = adapter->link_up; in ixgbevf_watchdog_update_link()
3259 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_watchdog_update_link()
3261 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); in ixgbevf_watchdog_update_link()
3263 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_watchdog_update_link()
3266 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) { in ixgbevf_watchdog_update_link()
3267 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state); in ixgbevf_watchdog_update_link()
3271 adapter->link_up = link_up; in ixgbevf_watchdog_update_link()
3272 adapter->link_speed = link_speed; in ixgbevf_watchdog_update_link()
3276 * ixgbevf_watchdog_link_is_up - update netif_carrier status and
3282 struct net_device *netdev = adapter->netdev; in ixgbevf_watchdog_link_is_up()
3288 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n", in ixgbevf_watchdog_link_is_up()
3289 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? in ixgbevf_watchdog_link_is_up()
3291 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ? in ixgbevf_watchdog_link_is_up()
3293 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ? in ixgbevf_watchdog_link_is_up()
3301 * ixgbevf_watchdog_link_is_down - update netif_carrier status and
3307 struct net_device *netdev = adapter->netdev; in ixgbevf_watchdog_link_is_down()
3309 adapter->link_speed = 0; in ixgbevf_watchdog_link_is_down()
3315 dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); in ixgbevf_watchdog_link_is_down()
3321 * ixgbevf_watchdog_subtask - worker thread to bring link up
3327 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || in ixgbevf_watchdog_subtask()
3328 test_bit(__IXGBEVF_RESETTING, &adapter->state)) in ixgbevf_watchdog_subtask()
3333 if (adapter->link_up && adapter->link_state) in ixgbevf_watchdog_subtask()
3342 * ixgbevf_service_task - manages and runs subtasks
3350 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_service_task()
3352 if (IXGBE_REMOVED(hw->hw_addr)) { in ixgbevf_service_task()
3353 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { in ixgbevf_service_task()
3370 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
3379 vfree(tx_ring->tx_buffer_info); in ixgbevf_free_tx_resources()
3380 tx_ring->tx_buffer_info = NULL; in ixgbevf_free_tx_resources()
3383 if (!tx_ring->desc) in ixgbevf_free_tx_resources()
3386 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, in ixgbevf_free_tx_resources()
3387 tx_ring->dma); in ixgbevf_free_tx_resources()
3389 tx_ring->desc = NULL; in ixgbevf_free_tx_resources()
3393 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
3402 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbevf_free_all_tx_resources()
3403 if (adapter->tx_ring[i]->desc) in ixgbevf_free_all_tx_resources()
3404 ixgbevf_free_tx_resources(adapter->tx_ring[i]); in ixgbevf_free_all_tx_resources()
3405 for (i = 0; i < adapter->num_xdp_queues; i++) in ixgbevf_free_all_tx_resources()
3406 if (adapter->xdp_ring[i]->desc) in ixgbevf_free_all_tx_resources()
3407 ixgbevf_free_tx_resources(adapter->xdp_ring[i]); in ixgbevf_free_all_tx_resources()
3411 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
3418 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); in ixgbevf_setup_tx_resources()
3421 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; in ixgbevf_setup_tx_resources()
3422 tx_ring->tx_buffer_info = vmalloc(size); in ixgbevf_setup_tx_resources()
3423 if (!tx_ring->tx_buffer_info) in ixgbevf_setup_tx_resources()
3426 u64_stats_init(&tx_ring->syncp); in ixgbevf_setup_tx_resources()
3429 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); in ixgbevf_setup_tx_resources()
3430 tx_ring->size = ALIGN(tx_ring->size, 4096); in ixgbevf_setup_tx_resources()
3432 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size, in ixgbevf_setup_tx_resources()
3433 &tx_ring->dma, GFP_KERNEL); in ixgbevf_setup_tx_resources()
3434 if (!tx_ring->desc) in ixgbevf_setup_tx_resources()
3440 vfree(tx_ring->tx_buffer_info); in ixgbevf_setup_tx_resources()
3441 tx_ring->tx_buffer_info = NULL; in ixgbevf_setup_tx_resources()
3442 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n"); in ixgbevf_setup_tx_resources()
3443 return -ENOMEM; in ixgbevf_setup_tx_resources()
3447 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
3460 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbevf_setup_all_tx_resources()
3461 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]); in ixgbevf_setup_all_tx_resources()
3464 hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i); in ixgbevf_setup_all_tx_resources()
3468 for (j = 0; j < adapter->num_xdp_queues; j++) { in ixgbevf_setup_all_tx_resources()
3469 err = ixgbevf_setup_tx_resources(adapter->xdp_ring[j]); in ixgbevf_setup_all_tx_resources()
3472 hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j); in ixgbevf_setup_all_tx_resources()
3479 while (j--) in ixgbevf_setup_all_tx_resources()
3480 ixgbevf_free_tx_resources(adapter->xdp_ring[j]); in ixgbevf_setup_all_tx_resources()
3481 while (i--) in ixgbevf_setup_all_tx_resources()
3482 ixgbevf_free_tx_resources(adapter->tx_ring[i]); in ixgbevf_setup_all_tx_resources()
3488 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
3499 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; in ixgbevf_setup_rx_resources()
3500 rx_ring->rx_buffer_info = vmalloc(size); in ixgbevf_setup_rx_resources()
3501 if (!rx_ring->rx_buffer_info) in ixgbevf_setup_rx_resources()
3504 u64_stats_init(&rx_ring->syncp); in ixgbevf_setup_rx_resources()
3507 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); in ixgbevf_setup_rx_resources()
3508 rx_ring->size = ALIGN(rx_ring->size, 4096); in ixgbevf_setup_rx_resources()
3510 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size, in ixgbevf_setup_rx_resources()
3511 &rx_ring->dma, GFP_KERNEL); in ixgbevf_setup_rx_resources()
3513 if (!rx_ring->desc) in ixgbevf_setup_rx_resources()
3516 /* XDP RX-queue info */ in ixgbevf_setup_rx_resources()
3517 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, in ixgbevf_setup_rx_resources()
3518 rx_ring->queue_index, 0) < 0) in ixgbevf_setup_rx_resources()
3521 rx_ring->xdp_prog = adapter->xdp_prog; in ixgbevf_setup_rx_resources()
3525 vfree(rx_ring->rx_buffer_info); in ixgbevf_setup_rx_resources()
3526 rx_ring->rx_buffer_info = NULL; in ixgbevf_setup_rx_resources()
3527 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n"); in ixgbevf_setup_rx_resources()
3528 return -ENOMEM; in ixgbevf_setup_rx_resources()
3532 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
3545 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbevf_setup_all_rx_resources()
3546 err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]); in ixgbevf_setup_all_rx_resources()
3549 hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i); in ixgbevf_setup_all_rx_resources()
3556 while (i--) in ixgbevf_setup_all_rx_resources()
3557 ixgbevf_free_rx_resources(adapter->rx_ring[i]); in ixgbevf_setup_all_rx_resources()
3562 * ixgbevf_free_rx_resources - Free Rx Resources
3571 rx_ring->xdp_prog = NULL; in ixgbevf_free_rx_resources()
3572 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in ixgbevf_free_rx_resources()
3573 vfree(rx_ring->rx_buffer_info); in ixgbevf_free_rx_resources()
3574 rx_ring->rx_buffer_info = NULL; in ixgbevf_free_rx_resources()
3576 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, in ixgbevf_free_rx_resources()
3577 rx_ring->dma); in ixgbevf_free_rx_resources()
3579 rx_ring->desc = NULL; in ixgbevf_free_rx_resources()
3583 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
3592 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbevf_free_all_rx_resources()
3593 if (adapter->rx_ring[i]->desc) in ixgbevf_free_all_rx_resources()
3594 ixgbevf_free_rx_resources(adapter->rx_ring[i]); in ixgbevf_free_all_rx_resources()
3598 * ixgbevf_open - Called when a network interface is made active
3612 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_open()
3621 if (!adapter->num_msix_vectors) in ixgbevf_open()
3622 return -ENOMEM; in ixgbevf_open()
3624 if (hw->adapter_stopped) { in ixgbevf_open()
3629 if (hw->adapter_stopped) { in ixgbevf_open()
3631 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n"); in ixgbevf_open()
3637 if (test_bit(__IXGBEVF_TESTING, &adapter->state)) in ixgbevf_open()
3638 return -EBUSY; in ixgbevf_open()
3659 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); in ixgbevf_open()
3663 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); in ixgbevf_open()
3685 * ixgbevf_close_suspend - actions necessary to both suspend and close flows
3700 * ixgbevf_close - Disables a network interface
3705 * The close entry point is called when an interface is de-activated
3722 struct net_device *dev = adapter->netdev; in ixgbevf_queue_reset_subtask()
3725 &adapter->state)) in ixgbevf_queue_reset_subtask()
3729 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || in ixgbevf_queue_reset_subtask()
3730 test_bit(__IXGBEVF_RESETTING, &adapter->state)) in ixgbevf_queue_reset_subtask()
3756 u16 i = tx_ring->next_to_use; in ixgbevf_tx_ctxtdesc()
3761 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in ixgbevf_tx_ctxtdesc()
3766 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); in ixgbevf_tx_ctxtdesc()
3767 context_desc->fceof_saidx = cpu_to_le32(fceof_saidx); in ixgbevf_tx_ctxtdesc()
3768 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); in ixgbevf_tx_ctxtdesc()
3769 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); in ixgbevf_tx_ctxtdesc()
3778 struct sk_buff *skb = first->skb; in ixgbevf_tso()
3792 if (skb->ip_summed != CHECKSUM_PARTIAL) in ixgbevf_tso()
3802 if (eth_p_mpls(first->protocol)) in ixgbevf_tso()
3812 if (ip.v4->version == 4) { in ixgbevf_tso()
3814 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); in ixgbevf_tso()
3815 int len = csum_start - trans_start; in ixgbevf_tso()
3821 ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ? in ixgbevf_tso()
3826 ip.v4->tot_len = 0; in ixgbevf_tso()
3827 first->tx_flags |= IXGBE_TX_FLAGS_TSO | in ixgbevf_tso()
3831 ip.v6->payload_len = 0; in ixgbevf_tso()
3832 first->tx_flags |= IXGBE_TX_FLAGS_TSO | in ixgbevf_tso()
3837 l4_offset = l4.hdr - skb->data; in ixgbevf_tso()
3840 *hdr_len = (l4.tcp->doff * 4) + l4_offset; in ixgbevf_tso()
3843 paylen = skb->len - l4_offset; in ixgbevf_tso()
3844 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); in ixgbevf_tso()
3847 first->gso_segs = skb_shinfo(skb)->gso_segs; in ixgbevf_tso()
3848 first->bytecount += (first->gso_segs - 1) * *hdr_len; in ixgbevf_tso()
3851 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT; in ixgbevf_tso()
3852 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; in ixgbevf_tso()
3855 fceof_saidx |= itd->pfsa; in ixgbevf_tso()
3856 type_tucmd |= itd->flags | itd->trailer_len; in ixgbevf_tso()
3859 vlan_macip_lens = l4.hdr - ip.hdr; in ixgbevf_tso()
3860 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; in ixgbevf_tso()
3861 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; in ixgbevf_tso()
3873 struct sk_buff *skb = first->skb; in ixgbevf_tx_csum()
3878 if (skb->ip_summed != CHECKSUM_PARTIAL) in ixgbevf_tx_csum()
3881 switch (skb->csum_offset) { in ixgbevf_tx_csum()
3899 if (first->protocol == htons(ETH_P_IP)) in ixgbevf_tx_csum()
3903 first->tx_flags |= IXGBE_TX_FLAGS_CSUM; in ixgbevf_tx_csum()
3904 vlan_macip_lens = skb_checksum_start_offset(skb) - in ixgbevf_tx_csum()
3909 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; in ixgbevf_tx_csum()
3911 fceof_saidx |= itd->pfsa; in ixgbevf_tx_csum()
3912 type_tucmd |= itd->flags | itd->trailer_len; in ixgbevf_tx_csum()
3962 tx_desc->read.olinfo_status = olinfo_status; in ixgbevf_tx_olinfo_status()
3969 struct sk_buff *skb = first->skb; in ixgbevf_tx_map()
3975 u32 tx_flags = first->tx_flags; in ixgbevf_tx_map()
3977 u16 i = tx_ring->next_to_use; in ixgbevf_tx_map()
3981 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); in ixgbevf_tx_map()
3984 data_len = skb->data_len; in ixgbevf_tx_map()
3986 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in ixgbevf_tx_map()
3990 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in ixgbevf_tx_map()
3991 if (dma_mapping_error(tx_ring->dev, dma)) in ixgbevf_tx_map()
3998 tx_desc->read.buffer_addr = cpu_to_le64(dma); in ixgbevf_tx_map()
4001 tx_desc->read.cmd_type_len = in ixgbevf_tx_map()
4006 if (i == tx_ring->count) { in ixgbevf_tx_map()
4010 tx_desc->read.olinfo_status = 0; in ixgbevf_tx_map()
4013 size -= IXGBE_MAX_DATA_PER_TXD; in ixgbevf_tx_map()
4015 tx_desc->read.buffer_addr = cpu_to_le64(dma); in ixgbevf_tx_map()
4021 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); in ixgbevf_tx_map()
4025 if (i == tx_ring->count) { in ixgbevf_tx_map()
4029 tx_desc->read.olinfo_status = 0; in ixgbevf_tx_map()
4032 data_len -= size; in ixgbevf_tx_map()
4034 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in ixgbevf_tx_map()
4037 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbevf_tx_map()
4040 /* write last descriptor with RS and EOP bits */ in ixgbevf_tx_map()
4042 tx_desc->read.cmd_type_len = cmd_type; in ixgbevf_tx_map()
4045 first->time_stamp = jiffies; in ixgbevf_tx_map()
4050 * are new descriptors to fetch. (Only applicable for weak-ordered in ixgbevf_tx_map()
4051 * memory model archs, such as IA-64). in ixgbevf_tx_map()
4059 first->next_to_watch = tx_desc; in ixgbevf_tx_map()
4062 if (i == tx_ring->count) in ixgbevf_tx_map()
4065 tx_ring->next_to_use = i; in ixgbevf_tx_map()
4072 dev_err(tx_ring->dev, "TX DMA map failed\n"); in ixgbevf_tx_map()
4073 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbevf_tx_map()
4078 dma_unmap_page(tx_ring->dev, in ixgbevf_tx_map()
4084 if (i-- == 0) in ixgbevf_tx_map()
4085 i += tx_ring->count; in ixgbevf_tx_map()
4086 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbevf_tx_map()
4090 dma_unmap_single(tx_ring->dev, in ixgbevf_tx_map()
4096 dev_kfree_skb_any(tx_buffer->skb); in ixgbevf_tx_map()
4097 tx_buffer->skb = NULL; in ixgbevf_tx_map()
4099 tx_ring->next_to_use = i; in ixgbevf_tx_map()
4104 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __ixgbevf_maybe_stop_tx()
4115 return -EBUSY; in __ixgbevf_maybe_stop_tx()
4117 /* A reprieve! - use start_queue because it doesn't call schedule */ in __ixgbevf_maybe_stop_tx()
4118 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __ixgbevf_maybe_stop_tx()
4119 ++tx_ring->tx_stats.restart_queue; in __ixgbevf_maybe_stop_tx()
4157 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { in ixgbevf_xmit_frame_ring()
4158 skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; in ixgbevf_xmit_frame_ring()
4163 count += skb_shinfo(skb)->nr_frags; in ixgbevf_xmit_frame_ring()
4166 tx_ring->tx_stats.tx_busy++; in ixgbevf_xmit_frame_ring()
4171 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in ixgbevf_xmit_frame_ring()
4172 first->skb = skb; in ixgbevf_xmit_frame_ring()
4173 first->bytecount = skb->len; in ixgbevf_xmit_frame_ring()
4174 first->gso_segs = 1; in ixgbevf_xmit_frame_ring()
4183 first->tx_flags = tx_flags; in ixgbevf_xmit_frame_ring()
4184 first->protocol = vlan_get_protocol(skb); in ixgbevf_xmit_frame_ring()
4203 dev_kfree_skb_any(first->skb); in ixgbevf_xmit_frame_ring()
4204 first->skb = NULL; in ixgbevf_xmit_frame_ring()
4214 if (skb->len <= 0) { in ixgbevf_xmit_frame()
4222 if (skb->len < 17) { in ixgbevf_xmit_frame()
4225 skb->len = 17; in ixgbevf_xmit_frame()
4228 tx_ring = adapter->tx_ring[skb->queue_mapping]; in ixgbevf_xmit_frame()
4233 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
4242 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_set_mac()
4246 if (!is_valid_ether_addr(addr->sa_data)) in ixgbevf_set_mac()
4247 return -EADDRNOTAVAIL; in ixgbevf_set_mac()
4249 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_set_mac()
4251 err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0); in ixgbevf_set_mac()
4253 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_set_mac()
4256 return -EPERM; in ixgbevf_set_mac()
4258 ether_addr_copy(hw->mac.addr, addr->sa_data); in ixgbevf_set_mac()
4259 ether_addr_copy(hw->mac.perm_addr, addr->sa_data); in ixgbevf_set_mac()
4260 eth_hw_addr_set(netdev, addr->sa_data); in ixgbevf_set_mac()
4266 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
4275 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_change_mtu()
4280 if (adapter->xdp_prog) { in ixgbevf_change_mtu()
4281 dev_warn(&adapter->pdev->dev, "MTU cannot be changed while XDP program is loaded\n"); in ixgbevf_change_mtu()
4282 return -EPERM; in ixgbevf_change_mtu()
4285 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_change_mtu()
4287 ret = hw->mac.ops.set_rlpml(hw, max_frame); in ixgbevf_change_mtu()
4288 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_change_mtu()
4290 return -EINVAL; in ixgbevf_change_mtu()
4293 netdev->mtu, new_mtu); in ixgbevf_change_mtu()
4296 WRITE_ONCE(netdev->mtu, new_mtu); in ixgbevf_change_mtu()
4328 adapter->hw.hw_addr = adapter->io_addr; in ixgbevf_resume()
4330 clear_bit(__IXGBEVF_DISABLED, &adapter->state); in ixgbevf_resume()
4350 ixgbevf_suspend(&pdev->dev); in ixgbevf_shutdown()
4361 start = u64_stats_fetch_begin(&ring->syncp); in ixgbevf_get_tx_ring_stats()
4362 bytes = ring->stats.bytes; in ixgbevf_get_tx_ring_stats()
4363 packets = ring->stats.packets; in ixgbevf_get_tx_ring_stats()
4364 } while (u64_stats_fetch_retry(&ring->syncp, start)); in ixgbevf_get_tx_ring_stats()
4365 stats->tx_bytes += bytes; in ixgbevf_get_tx_ring_stats()
4366 stats->tx_packets += packets; in ixgbevf_get_tx_ring_stats()
4381 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; in ixgbevf_get_stats()
4384 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbevf_get_stats()
4385 ring = adapter->rx_ring[i]; in ixgbevf_get_stats()
4387 start = u64_stats_fetch_begin(&ring->syncp); in ixgbevf_get_stats()
4388 bytes = ring->stats.bytes; in ixgbevf_get_stats()
4389 packets = ring->stats.packets; in ixgbevf_get_stats()
4390 } while (u64_stats_fetch_retry(&ring->syncp, start)); in ixgbevf_get_stats()
4391 stats->rx_bytes += bytes; in ixgbevf_get_stats()
4392 stats->rx_packets += packets; in ixgbevf_get_stats()
4395 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbevf_get_stats()
4396 ring = adapter->tx_ring[i]; in ixgbevf_get_stats()
4400 for (i = 0; i < adapter->num_xdp_queues; i++) { in ixgbevf_get_stats()
4401 ring = adapter->xdp_ring[i]; in ixgbevf_get_stats()
4425 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); in ixgbevf_features_check()
4435 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) in ixgbevf_features_check()
4443 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; in ixgbevf_xdp_setup()
4448 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbevf_xdp_setup()
4449 struct ixgbevf_ring *ring = adapter->rx_ring[i]; in ixgbevf_xdp_setup()
4452 return -EINVAL; in ixgbevf_xdp_setup()
4455 old_prog = xchg(&adapter->xdp_prog, prog); in ixgbevf_xdp_setup()
4472 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbevf_xdp_setup()
4473 xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog); in ixgbevf_xdp_setup()
4484 switch (xdp->command) { in ixgbevf_xdp()
4486 return ixgbevf_xdp_setup(dev, xdp->prog); in ixgbevf_xdp()
4488 return -EINVAL; in ixgbevf_xdp()
4510 dev->netdev_ops = &ixgbevf_netdev_ops; in ixgbevf_assign_netdev_ops()
4512 dev->watchdog_timeo = 5 * HZ; in ixgbevf_assign_netdev_ops()
4516 * ixgbevf_probe - Device Initialization Routine
4531 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; in ixgbevf_probe()
4539 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in ixgbevf_probe()
4541 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); in ixgbevf_probe()
4547 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); in ixgbevf_probe()
4556 err = -ENOMEM; in ixgbevf_probe()
4560 SET_NETDEV_DEV(netdev, &pdev->dev); in ixgbevf_probe()
4564 adapter->netdev = netdev; in ixgbevf_probe()
4565 adapter->pdev = pdev; in ixgbevf_probe()
4566 hw = &adapter->hw; in ixgbevf_probe()
4567 hw->back = adapter; in ixgbevf_probe()
4568 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); in ixgbevf_probe()
4575 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), in ixgbevf_probe()
4577 adapter->io_addr = hw->hw_addr; in ixgbevf_probe()
4578 if (!hw->hw_addr) { in ixgbevf_probe()
4579 err = -EIO; in ixgbevf_probe()
4586 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); in ixgbevf_probe()
4587 hw->mac.type = ii->mac; in ixgbevf_probe()
4589 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy, in ixgbevf_probe()
4598 if (!is_valid_ether_addr(netdev->dev_addr)) { in ixgbevf_probe()
4600 err = -EIO; in ixgbevf_probe()
4604 netdev->hw_features = NETIF_F_SG | in ixgbevf_probe()
4618 netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES; in ixgbevf_probe()
4619 netdev->hw_features |= NETIF_F_GSO_PARTIAL | in ixgbevf_probe()
4622 netdev->features = netdev->hw_features | NETIF_F_HIGHDMA; in ixgbevf_probe()
4624 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; in ixgbevf_probe()
4625 netdev->mpls_features |= NETIF_F_SG | in ixgbevf_probe()
4629 netdev->mpls_features |= IXGBEVF_GSO_PARTIAL_FEATURES; in ixgbevf_probe()
4630 netdev->hw_enc_features |= netdev->vlan_features; in ixgbevf_probe()
4633 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | in ixgbevf_probe()
4637 netdev->priv_flags |= IFF_UNICAST_FLT; in ixgbevf_probe()
4638 netdev->xdp_features = NETDEV_XDP_ACT_BASIC; in ixgbevf_probe()
4640 /* MTU range: 68 - 1504 or 9710 */ in ixgbevf_probe()
4641 netdev->min_mtu = ETH_MIN_MTU; in ixgbevf_probe()
4642 switch (adapter->hw.api_version) { in ixgbevf_probe()
4648 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - in ixgbevf_probe()
4652 if (adapter->hw.mac.type != ixgbe_mac_82599_vf) in ixgbevf_probe()
4653 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - in ixgbevf_probe()
4656 netdev->max_mtu = ETH_DATA_LEN + ETH_FCS_LEN; in ixgbevf_probe()
4660 if (IXGBE_REMOVED(hw->hw_addr)) { in ixgbevf_probe()
4661 err = -EIO; in ixgbevf_probe()
4665 timer_setup(&adapter->service_timer, ixgbevf_service_timer, 0); in ixgbevf_probe()
4667 INIT_WORK(&adapter->service_task, ixgbevf_service_task); in ixgbevf_probe()
4668 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state); in ixgbevf_probe()
4669 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state); in ixgbevf_probe()
4675 strcpy(netdev->name, "eth%d"); in ixgbevf_probe()
4688 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr); in ixgbevf_probe()
4689 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type); in ixgbevf_probe()
4691 switch (hw->mac.type) { in ixgbevf_probe()
4693 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n"); in ixgbevf_probe()
4696 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n"); in ixgbevf_probe()
4699 dev_info(&pdev->dev, "Intel(R) E610 Virtual Function\n"); in ixgbevf_probe()
4703 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n"); in ixgbevf_probe()
4713 iounmap(adapter->io_addr); in ixgbevf_probe()
4714 kfree(adapter->rss_key); in ixgbevf_probe()
4716 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); in ixgbevf_probe()
4728 * ixgbevf_remove - Device Removal Routine
4733 * Hot-Plug event, or because the driver is going to be removed from
4747 set_bit(__IXGBEVF_REMOVING, &adapter->state); in ixgbevf_remove()
4748 cancel_work_sync(&adapter->service_task); in ixgbevf_remove()
4750 if (netdev->reg_state == NETREG_REGISTERED) in ixgbevf_remove()
4757 iounmap(adapter->io_addr); in ixgbevf_remove()
4760 hw_dbg(&adapter->hw, "Remove complete\n"); in ixgbevf_remove()
4762 kfree(adapter->rss_key); in ixgbevf_remove()
4763 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); in ixgbevf_remove()
4771 * ixgbevf_io_error_detected - called when PCI error is detected
4784 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state)) in ixgbevf_io_error_detected()
4798 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) in ixgbevf_io_error_detected()
4807 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
4810 * Restart the card from scratch, as if from a cold-boot. Implementation
4811 * resembles the first-half of the ixgbevf_resume routine.
4819 dev_err(&pdev->dev, in ixgbevf_io_slot_reset()
4820 "Cannot re-enable PCI device after reset.\n"); in ixgbevf_io_slot_reset()
4824 adapter->hw.hw_addr = adapter->io_addr; in ixgbevf_io_slot_reset()
4826 clear_bit(__IXGBEVF_DISABLED, &adapter->state); in ixgbevf_io_slot_reset()
4835 * ixgbevf_io_resume - called when traffic can start flowing again.
4840 * second-half of the ixgbevf_resume routine.
4877 * ixgbevf_init_module - Driver Registration Routine
4891 return -ENOMEM; in ixgbevf_init_module()
4906 * ixgbevf_exit_module - Driver Exit Cleanup Routine
4922 * ixgbevf_get_hw_dev_name - return device name string
4928 struct ixgbevf_adapter *adapter = hw->back; in ixgbevf_get_hw_dev_name()
4930 return adapter->netdev->name; in ixgbevf_get_hw_dev_name()