Lines Matching full:vi

511 static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq,
661 static int vq_type(struct virtnet_info *vi, int qid) in vq_type() argument
663 if (qid == vi->max_queue_pairs * 2) in vq_type()
686 /* Find end of list, sew whole thing into vi->rq.pages. */ in give_pages()
705 static void virtnet_rq_free_buf(struct virtnet_info *vi, in virtnet_rq_free_buf() argument
708 if (vi->mergeable_rx_bufs) in virtnet_rq_free_buf()
710 else if (vi->big_packets) in virtnet_rq_free_buf()
716 static void enable_delayed_refill(struct virtnet_info *vi) in enable_delayed_refill() argument
718 spin_lock_bh(&vi->refill_lock); in enable_delayed_refill()
719 vi->refill_enabled = true; in enable_delayed_refill()
720 spin_unlock_bh(&vi->refill_lock); in enable_delayed_refill()
723 static void disable_delayed_refill(struct virtnet_info *vi) in disable_delayed_refill() argument
725 spin_lock_bh(&vi->refill_lock); in disable_delayed_refill()
726 vi->refill_enabled = false; in disable_delayed_refill()
727 spin_unlock_bh(&vi->refill_lock); in disable_delayed_refill()
730 static void enable_rx_mode_work(struct virtnet_info *vi) in enable_rx_mode_work() argument
733 vi->rx_mode_work_enabled = true; in enable_rx_mode_work()
737 static void disable_rx_mode_work(struct virtnet_info *vi) in disable_rx_mode_work() argument
740 vi->rx_mode_work_enabled = false; in disable_rx_mode_work()
773 struct virtnet_info *vi = vq->vdev->priv; in skb_xmit_done() local
774 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; in skb_xmit_done()
783 netif_wake_subqueue(vi->dev, vq2txq(vq)); in skb_xmit_done()
820 static struct sk_buff *page_to_skb(struct virtnet_info *vi, in page_to_skb() argument
836 hdr_len = vi->hdr_len; in page_to_skb()
837 if (vi->mergeable_rx_bufs) in page_to_skb()
878 if (vi->mergeable_rx_bufs) { in page_to_skb()
921 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rq_unmap() local
927 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); in virtnet_rq_unmap()
953 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rq_get_buf() local
956 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); in virtnet_rq_get_buf()
967 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rq_init_one_sg() local
973 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); in virtnet_rq_init_one_sg()
990 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rq_alloc() local
995 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); in virtnet_rq_alloc()
1045 struct virtnet_info *vi = vq->vdev->priv; in virtnet_rq_unmap_free_buf() local
1049 rq = &vi->rq[i]; in virtnet_rq_unmap_free_buf()
1056 if (!vi->big_packets || vi->mergeable_rx_bufs) in virtnet_rq_unmap_free_buf()
1059 virtnet_rq_free_buf(vi, rq, buf); in virtnet_rq_unmap_free_buf()
1081 static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) in is_xdp_raw_buffer_queue() argument
1083 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) in is_xdp_raw_buffer_queue()
1085 else if (q < vi->curr_queue_pairs) in is_xdp_raw_buffer_queue()
1091 static void check_sq_full_and_disable(struct virtnet_info *vi, in check_sq_full_and_disable() argument
1098 qnum = sq - vi->sq; in check_sq_full_and_disable()
1134 static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi, in buf_to_xdp() argument
1142 bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool) + vi->hdr_len; in buf_to_xdp()
1146 vi->dev->name, len, bufsize); in buf_to_xdp()
1147 DEV_STATS_INC(vi->dev, rx_length_errors); in buf_to_xdp()
1187 static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct virtnet_info *vi, in virtnet_receive_xsk_small() argument
1239 static int xsk_append_merge_buffer(struct virtnet_info *vi, in xsk_append_merge_buffer() argument
1258 vi->dev->name, num_buf, in xsk_append_merge_buffer()
1259 virtio16_to_cpu(vi->vdev, in xsk_append_merge_buffer()
1261 DEV_STATS_INC(vi->dev, rx_length_errors); in xsk_append_merge_buffer()
1267 xdp = buf_to_xdp(vi, rq, buf, len); in xsk_append_merge_buffer()
1277 memcpy(buf, xdp->data - vi->hdr_len, len); in xsk_append_merge_buffer()
1296 xsk_drop_follow_bufs(vi->dev, rq, num_buf, stats); in xsk_append_merge_buffer()
1300 static struct sk_buff *virtnet_receive_xsk_merge(struct net_device *dev, struct virtnet_info *vi, in virtnet_receive_xsk_merge() argument
1310 hdr = xdp->data - vi->hdr_len; in virtnet_receive_xsk_merge()
1311 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in virtnet_receive_xsk_merge()
1327 if (xsk_append_merge_buffer(vi, rq, skb, num_buf, hdr, stats)) { in virtnet_receive_xsk_merge()
1351 static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq, in virtnet_receive_xsk_buf() argument
1356 struct net_device *dev = vi->dev; in virtnet_receive_xsk_buf()
1361 len -= vi->hdr_len; in virtnet_receive_xsk_buf()
1365 xdp = buf_to_xdp(vi, rq, buf, len); in virtnet_receive_xsk_buf()
1376 flags = ((struct virtio_net_common_hdr *)(xdp->data - vi->hdr_len))->hdr.flags; in virtnet_receive_xsk_buf()
1378 if (!vi->mergeable_rx_bufs) in virtnet_receive_xsk_buf()
1379 skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats); in virtnet_receive_xsk_buf()
1381 skb = virtnet_receive_xsk_merge(dev, vi, rq, xdp, xdp_xmit, stats); in virtnet_receive_xsk_buf()
1384 virtnet_receive_done(vi, rq, skb, flags); in virtnet_receive_xsk_buf()
1387 static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq, in virtnet_add_recvbuf_xsk() argument
1402 len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len; in virtnet_add_recvbuf_xsk()
1409 addr = xsk_buff_xdp_get_dma(xsk_buffs[i]) - vi->hdr_len; in virtnet_add_recvbuf_xsk()
1442 struct virtnet_info *vi; in virtnet_xsk_xmit_one() local
1445 vi = sq->vq->vdev->priv; in virtnet_xsk_xmit_one()
1451 sg_fill_dma(sq->sg, sq->xsk_hdr_dma_addr, vi->hdr_len); in virtnet_xsk_xmit_one()
1494 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_xsk_xmit() local
1496 struct net_device *dev = vi->dev; in virtnet_xsk_xmit()
1503 __free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq), true, &stats); in virtnet_xsk_xmit()
1510 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq)) in virtnet_xsk_xmit()
1511 check_sq_full_and_disable(vi, vi->dev, sq); in virtnet_xsk_xmit()
1516 txq = netdev_get_tx_queue(vi->dev, sq - vi->sq); in virtnet_xsk_xmit()
1545 struct virtnet_info *vi = netdev_priv(dev); in virtnet_xsk_wakeup() local
1551 if (qid >= vi->curr_queue_pairs) in virtnet_xsk_wakeup()
1554 sq = &vi->sq[qid]; in virtnet_xsk_wakeup()
1571 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, in __virtnet_xdp_xmit_one() argument
1580 if (unlikely(xdpf->headroom < vi->hdr_len)) in __virtnet_xdp_xmit_one()
1595 xdpf->headroom -= vi->hdr_len; in __virtnet_xdp_xmit_one()
1596 xdpf->data -= vi->hdr_len; in __virtnet_xdp_xmit_one()
1599 memset(hdr, 0, vi->hdr_len); in __virtnet_xdp_xmit_one()
1600 xdpf->len += vi->hdr_len; in __virtnet_xdp_xmit_one()
1618 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
1626 #define virtnet_xdp_get_sq(vi) ({ \ argument
1629 typeof(vi) v = (vi); \
1645 #define virtnet_xdp_put_sq(vi, q) { \ argument
1647 typeof(vi) v = (vi); \
1659 struct virtnet_info *vi = netdev_priv(dev); in virtnet_xdp_xmit() local
1661 struct receive_queue *rq = vi->rq; in virtnet_xdp_xmit()
1676 sq = virtnet_xdp_get_sq(vi); in virtnet_xdp_xmit()
1684 virtnet_free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq), in virtnet_xdp_xmit()
1690 if (__virtnet_xdp_xmit_one(vi, sq, xdpf)) in virtnet_xdp_xmit()
1696 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq)) in virtnet_xdp_xmit()
1697 check_sq_full_and_disable(vi, dev, sq); in virtnet_xdp_xmit()
1712 virtnet_xdp_put_sq(vi, sq); in virtnet_xdp_xmit()
1785 static unsigned int virtnet_get_headroom(struct virtnet_info *vi) in virtnet_get_headroom() argument
1787 return vi->xdp_enabled ? XDP_PACKET_HEADROOM : 0; in virtnet_get_headroom()
1858 static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi, in receive_small_build_skb() argument
1869 headroom = vi->hdr_len + header_offset; in receive_small_build_skb()
1878 memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len); in receive_small_build_skb()
1884 struct virtnet_info *vi, in receive_small_xdp() argument
1894 unsigned int headroom = vi->hdr_len + header_offset; in receive_small_xdp()
1914 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { in receive_small_xdp()
1916 unsigned int tlen = len + vi->hdr_len; in receive_small_xdp()
1919 xdp_headroom = virtnet_get_headroom(vi); in receive_small_xdp()
1921 headroom = vi->hdr_len + header_offset; in receive_small_xdp()
1936 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len, in receive_small_xdp()
1975 struct virtnet_info *vi, in receive_small() argument
1991 len -= vi->hdr_len; in receive_small()
2001 if (unlikely(vi->xdp_enabled)) { in receive_small()
2007 skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf, in receive_small()
2016 skb = receive_small_build_skb(vi, xdp_headroom, buf, len); in receive_small()
2027 struct virtnet_info *vi, in receive_big() argument
2035 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0); in receive_big()
2037 u64_stats_add(&stats->bytes, len - vi->hdr_len); in receive_big()
2079 struct virtnet_info *vi, in build_skb_from_xdp_buff() argument
2122 struct virtnet_info *vi, in virtnet_build_xdp_buff_mrg() argument
2144 XDP_PACKET_HEADROOM + vi->hdr_len, len - vi->hdr_len, true); in virtnet_build_xdp_buff_mrg()
2170 virtio16_to_cpu(vi->vdev, hdr->num_buffers)); in virtnet_build_xdp_buff_mrg()
2210 static void *mergeable_xdp_get_buf(struct virtnet_info *vi, in mergeable_xdp_get_buf() argument
2244 if (likely(headroom >= virtnet_get_headroom(vi) && in mergeable_xdp_get_buf()
2289 struct virtnet_info *vi, in receive_mergeable_xdp() argument
2299 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in receive_mergeable_xdp()
2310 data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page, in receive_mergeable_xdp()
2315 err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz, in receive_mergeable_xdp()
2324 head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz); in receive_mergeable_xdp()
2392 struct virtnet_info *vi, in receive_mergeable() argument
2401 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in receive_mergeable()
2411 u64_stats_add(&stats->bytes, len - vi->hdr_len); in receive_mergeable()
2420 if (unlikely(vi->xdp_enabled)) { in receive_mergeable()
2426 head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx, in receive_mergeable()
2434 head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom); in receive_mergeable()
2444 virtio16_to_cpu(vi->vdev, in receive_mergeable()
2512 static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq, in virtnet_receive_done() argument
2516 struct net_device *dev = vi->dev; in virtnet_receive_done()
2519 if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) in virtnet_receive_done()
2526 virtio_is_little_endian(vi->vdev))) { in virtnet_receive_done()
2546 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, in receive_buf() argument
2551 struct net_device *dev = vi->dev; in receive_buf()
2555 if (unlikely(len < vi->hdr_len + ETH_HLEN)) { in receive_buf()
2558 virtnet_rq_free_buf(vi, rq, buf); in receive_buf()
2571 if (vi->mergeable_rx_bufs) in receive_buf()
2572 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, in receive_buf()
2574 else if (vi->big_packets) in receive_buf()
2575 skb = receive_big(dev, vi, rq, buf, len, stats); in receive_buf()
2577 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats); in receive_buf()
2582 virtnet_receive_done(vi, rq, skb, flags); in receive_buf()
2590 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, in add_recvbuf_small() argument
2594 unsigned int xdp_headroom = virtnet_get_headroom(vi); in add_recvbuf_small()
2596 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; in add_recvbuf_small()
2611 virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN); in add_recvbuf_small()
2622 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, in add_recvbuf_big() argument
2629 sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2); in add_recvbuf_big()
2631 /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */ in add_recvbuf_big()
2632 for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) { in add_recvbuf_big()
2655 sg_set_buf(&rq->sg[0], p, vi->hdr_len); in add_recvbuf_big()
2663 err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2, in add_recvbuf_big()
2675 struct virtnet_info *vi = rq->vq->vdev->priv; in get_mergeable_buf_len() local
2676 const size_t hdr_len = vi->hdr_len; in get_mergeable_buf_len()
2688 static int add_recvbuf_mergeable(struct virtnet_info *vi, in add_recvbuf_mergeable() argument
2692 unsigned int headroom = virtnet_get_headroom(vi); in add_recvbuf_mergeable()
2749 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, in try_fill_recv() argument
2755 err = virtnet_add_recvbuf_xsk(vi, rq, rq->xsk_pool, gfp); in try_fill_recv()
2760 if (vi->mergeable_rx_bufs) in try_fill_recv()
2761 err = add_recvbuf_mergeable(vi, rq, gfp); in try_fill_recv()
2762 else if (vi->big_packets) in try_fill_recv()
2763 err = add_recvbuf_big(vi, rq, gfp); in try_fill_recv()
2765 err = add_recvbuf_small(vi, rq, gfp); in try_fill_recv()
2785 struct virtnet_info *vi = rvq->vdev->priv; in skb_recv_done() local
2786 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; in skb_recv_done()
2805 static void virtnet_napi_tx_enable(struct virtnet_info *vi, in virtnet_napi_tx_enable() argument
2815 if (!vi->affinity_hint_set) { in virtnet_napi_tx_enable()
2831 struct virtnet_info *vi = in refill_work() local
2836 for (i = 0; i < vi->curr_queue_pairs; i++) { in refill_work()
2837 struct receive_queue *rq = &vi->rq[i]; in refill_work()
2840 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); in refill_work()
2847 schedule_delayed_work(&vi->refill, HZ/2); in refill_work()
2851 static int virtnet_receive_xsk_bufs(struct virtnet_info *vi, in virtnet_receive_xsk_bufs() argument
2866 virtnet_receive_xsk_buf(vi, rq, buf, len, xdp_xmit, stats); in virtnet_receive_xsk_bufs()
2873 static int virtnet_receive_packets(struct virtnet_info *vi, in virtnet_receive_packets() argument
2883 if (!vi->big_packets || vi->mergeable_rx_bufs) { in virtnet_receive_packets()
2887 receive_buf(vi, rq, buf, len, ctx, xdp_xmit, stats); in virtnet_receive_packets()
2893 receive_buf(vi, rq, buf, len, NULL, xdp_xmit, stats); in virtnet_receive_packets()
2904 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_receive() local
2909 packets = virtnet_receive_xsk_bufs(vi, rq, budget, xdp_xmit, &stats); in virtnet_receive()
2911 packets = virtnet_receive_packets(vi, rq, budget, xdp_xmit, &stats); in virtnet_receive()
2914 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) { in virtnet_receive()
2915 spin_lock(&vi->refill_lock); in virtnet_receive()
2916 if (vi->refill_enabled) in virtnet_receive()
2917 schedule_delayed_work(&vi->refill, 0); in virtnet_receive()
2918 spin_unlock(&vi->refill_lock); in virtnet_receive()
2943 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_poll_cleantx() local
2945 struct send_queue *sq = &vi->sq[index]; in virtnet_poll_cleantx()
2946 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); in virtnet_poll_cleantx()
2948 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) in virtnet_poll_cleantx()
2975 static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq) in virtnet_rx_dim_update() argument
2998 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_poll() local
3020 virtnet_rx_dim_update(vi, rq); in virtnet_poll()
3024 sq = virtnet_xdp_get_sq(vi); in virtnet_poll()
3030 virtnet_xdp_put_sq(vi, sq); in virtnet_poll()
3036 static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index) in virtnet_disable_queue_pair() argument
3038 virtnet_napi_tx_disable(&vi->sq[qp_index].napi); in virtnet_disable_queue_pair()
3039 napi_disable(&vi->rq[qp_index].napi); in virtnet_disable_queue_pair()
3040 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); in virtnet_disable_queue_pair()
3043 static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index) in virtnet_enable_queue_pair() argument
3045 struct net_device *dev = vi->dev; in virtnet_enable_queue_pair()
3048 err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index, in virtnet_enable_queue_pair()
3049 vi->rq[qp_index].napi.napi_id); in virtnet_enable_queue_pair()
3053 err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq, in virtnet_enable_queue_pair()
3058 virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi); in virtnet_enable_queue_pair()
3059 virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi); in virtnet_enable_queue_pair()
3064 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); in virtnet_enable_queue_pair()
3068 static void virtnet_cancel_dim(struct virtnet_info *vi, struct dim *dim) in virtnet_cancel_dim() argument
3070 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_cancel_dim()
3075 static void virtnet_update_settings(struct virtnet_info *vi) in virtnet_update_settings() argument
3080 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) in virtnet_update_settings()
3083 virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed); in virtnet_update_settings()
3086 vi->speed = speed; in virtnet_update_settings()
3088 virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex); in virtnet_update_settings()
3091 vi->duplex = duplex; in virtnet_update_settings()
3096 struct virtnet_info *vi = netdev_priv(dev); in virtnet_open() local
3099 enable_delayed_refill(vi); in virtnet_open()
3101 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_open()
3102 if (i < vi->curr_queue_pairs) in virtnet_open()
3104 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) in virtnet_open()
3105 schedule_delayed_work(&vi->refill, 0); in virtnet_open()
3107 err = virtnet_enable_queue_pair(vi, i); in virtnet_open()
3112 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { in virtnet_open()
3113 if (vi->status & VIRTIO_NET_S_LINK_UP) in virtnet_open()
3114 netif_carrier_on(vi->dev); in virtnet_open()
3115 virtio_config_driver_enable(vi->vdev); in virtnet_open()
3117 vi->status = VIRTIO_NET_S_LINK_UP; in virtnet_open()
3124 disable_delayed_refill(vi); in virtnet_open()
3125 cancel_delayed_work_sync(&vi->refill); in virtnet_open()
3128 virtnet_disable_queue_pair(vi, i); in virtnet_open()
3129 virtnet_cancel_dim(vi, &vi->rq[i].dim); in virtnet_open()
3138 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_poll_tx() local
3144 if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { in virtnet_poll_tx()
3150 txq = netdev_get_tx_queue(vi->dev, index); in virtnet_poll_tx()
3200 struct virtnet_info *vi = sq->vq->vdev->priv; in xmit_skb() local
3202 unsigned hdr_len = vi->hdr_len; in xmit_skb()
3205 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); in xmit_skb()
3207 can_push = vi->any_header_sg && in xmit_skb()
3218 virtio_is_little_endian(vi->vdev), false, in xmit_skb()
3222 if (vi->mergeable_rx_bufs) in xmit_skb()
3247 struct virtnet_info *vi = netdev_priv(dev); in start_xmit() local
3249 struct send_queue *sq = &vi->sq[qnum]; in start_xmit()
3290 check_sq_full_and_disable(vi, dev, sq); in start_xmit()
3305 static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq) in virtnet_rx_pause() argument
3307 bool running = netif_running(vi->dev); in virtnet_rx_pause()
3311 virtnet_cancel_dim(vi, &rq->dim); in virtnet_rx_pause()
3315 static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq) in virtnet_rx_resume() argument
3317 bool running = netif_running(vi->dev); in virtnet_rx_resume()
3319 if (!try_fill_recv(vi, rq, GFP_KERNEL)) in virtnet_rx_resume()
3320 schedule_delayed_work(&vi->refill, 0); in virtnet_rx_resume()
3326 static int virtnet_rx_resize(struct virtnet_info *vi, in virtnet_rx_resize() argument
3331 qindex = rq - vi->rq; in virtnet_rx_resize()
3333 virtnet_rx_pause(vi, rq); in virtnet_rx_resize()
3337 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); in virtnet_rx_resize()
3339 virtnet_rx_resume(vi, rq); in virtnet_rx_resize()
3343 static void virtnet_tx_pause(struct virtnet_info *vi, struct send_queue *sq) in virtnet_tx_pause() argument
3345 bool running = netif_running(vi->dev); in virtnet_tx_pause()
3349 qindex = sq - vi->sq; in virtnet_tx_pause()
3354 txq = netdev_get_tx_queue(vi->dev, qindex); in virtnet_tx_pause()
3365 netif_stop_subqueue(vi->dev, qindex); in virtnet_tx_pause()
3370 static void virtnet_tx_resume(struct virtnet_info *vi, struct send_queue *sq) in virtnet_tx_resume() argument
3372 bool running = netif_running(vi->dev); in virtnet_tx_resume()
3376 qindex = sq - vi->sq; in virtnet_tx_resume()
3378 txq = netdev_get_tx_queue(vi->dev, qindex); in virtnet_tx_resume()
3386 virtnet_napi_tx_enable(vi, sq->vq, &sq->napi); in virtnet_tx_resume()
3389 static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq, in virtnet_tx_resize() argument
3394 qindex = sq - vi->sq; in virtnet_tx_resize()
3396 virtnet_tx_pause(vi, sq); in virtnet_tx_resize()
3401 netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err); in virtnet_tx_resize()
3403 virtnet_tx_resume(vi, sq); in virtnet_tx_resize()
3413 static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd, in virtnet_send_command_reply() argument
3423 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); in virtnet_send_command_reply()
3425 mutex_lock(&vi->cvq_lock); in virtnet_send_command_reply()
3426 vi->ctrl->status = ~0; in virtnet_send_command_reply()
3427 vi->ctrl->hdr.class = class; in virtnet_send_command_reply()
3428 vi->ctrl->hdr.cmd = cmd; in virtnet_send_command_reply()
3430 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); in virtnet_send_command_reply()
3437 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); in virtnet_send_command_reply()
3444 ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC); in virtnet_send_command_reply()
3446 dev_warn(&vi->vdev->dev, in virtnet_send_command_reply()
3448 mutex_unlock(&vi->cvq_lock); in virtnet_send_command_reply()
3452 if (unlikely(!virtqueue_kick(vi->cvq))) in virtnet_send_command_reply()
3458 while (!virtqueue_get_buf(vi->cvq, &tmp) && in virtnet_send_command_reply()
3459 !virtqueue_is_broken(vi->cvq)) { in virtnet_send_command_reply()
3465 ok = vi->ctrl->status == VIRTIO_NET_OK; in virtnet_send_command_reply()
3466 mutex_unlock(&vi->cvq_lock); in virtnet_send_command_reply()
3470 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, in virtnet_send_command() argument
3473 return virtnet_send_command_reply(vi, class, cmd, out, NULL); in virtnet_send_command()
3478 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_mac_address() local
3479 struct virtio_device *vdev = vi->vdev; in virtnet_set_mac_address()
3484 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) in virtnet_set_mac_address()
3497 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, in virtnet_set_mac_address()
3526 struct virtnet_info *vi = netdev_priv(dev); in virtnet_stats() local
3530 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_stats()
3532 struct receive_queue *rq = &vi->rq[i]; in virtnet_stats()
3533 struct send_queue *sq = &vi->sq[i]; in virtnet_stats()
3563 static void virtnet_ack_link_announce(struct virtnet_info *vi) in virtnet_ack_link_announce() argument
3565 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, in virtnet_ack_link_announce()
3567 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); in virtnet_ack_link_announce()
3570 static bool virtnet_commit_rss_command(struct virtnet_info *vi);
3572 static void virtnet_rss_update_by_qpairs(struct virtnet_info *vi, u16 queue_pairs) in virtnet_rss_update_by_qpairs() argument
3577 for (; i < vi->rss_indir_table_size; ++i) { in virtnet_rss_update_by_qpairs()
3579 vi->rss.indirection_table[i] = cpu_to_le16(indir_val); in virtnet_rss_update_by_qpairs()
3581 vi->rss.max_tx_vq = cpu_to_le16(queue_pairs); in virtnet_rss_update_by_qpairs()
3584 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) in virtnet_set_queues() argument
3588 struct net_device *dev = vi->dev; in virtnet_set_queues()
3591 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) in virtnet_set_queues()
3601 if (vi->has_rss && !netif_is_rxfh_configured(dev)) { in virtnet_set_queues()
3602 memcpy(&old_rss, &vi->rss, sizeof(old_rss)); in virtnet_set_queues()
3603 if (rss_indirection_table_alloc(&vi->rss, vi->rss_indir_table_size)) { in virtnet_set_queues()
3604 vi->rss.indirection_table = old_rss.indirection_table; in virtnet_set_queues()
3608 virtnet_rss_update_by_qpairs(vi, queue_pairs); in virtnet_set_queues()
3610 if (!virtnet_commit_rss_command(vi)) { in virtnet_set_queues()
3612 rss_indirection_table_free(&vi->rss); in virtnet_set_queues()
3613 memcpy(&vi->rss, &old_rss, sizeof(old_rss)); in virtnet_set_queues()
3627 mq->virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); in virtnet_set_queues()
3630 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, in virtnet_set_queues()
3637 vi->curr_queue_pairs = queue_pairs; in virtnet_set_queues()
3640 schedule_delayed_work(&vi->refill, 0); in virtnet_set_queues()
3647 struct virtnet_info *vi = netdev_priv(dev); in virtnet_close() local
3651 disable_delayed_refill(vi); in virtnet_close()
3653 cancel_delayed_work_sync(&vi->refill); in virtnet_close()
3657 virtio_config_driver_disable(vi->vdev); in virtnet_close()
3661 cancel_work_sync(&vi->config_work); in virtnet_close()
3663 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_close()
3664 virtnet_disable_queue_pair(vi, i); in virtnet_close()
3665 virtnet_cancel_dim(vi, &vi->rq[i].dim); in virtnet_close()
3675 struct virtnet_info *vi = in virtnet_rx_mode_work() local
3678 struct net_device *dev = vi->dev; in virtnet_rx_mode_work()
3688 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) in virtnet_rx_mode_work()
3702 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, in virtnet_rx_mode_work()
3710 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, in virtnet_rx_mode_work()
3732 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); in virtnet_rx_mode_work()
3743 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); in virtnet_rx_mode_work()
3753 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, in virtnet_rx_mode_work()
3764 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_rx_mode() local
3766 if (vi->rx_mode_work_enabled) in virtnet_set_rx_mode()
3767 schedule_work(&vi->rx_mode_work); in virtnet_set_rx_mode()
3773 struct virtnet_info *vi = netdev_priv(dev); in virtnet_vlan_rx_add_vid() local
3781 *_vid = cpu_to_virtio16(vi->vdev, vid); in virtnet_vlan_rx_add_vid()
3784 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, in virtnet_vlan_rx_add_vid()
3793 struct virtnet_info *vi = netdev_priv(dev); in virtnet_vlan_rx_kill_vid() local
3801 *_vid = cpu_to_virtio16(vi->vdev, vid); in virtnet_vlan_rx_kill_vid()
3804 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, in virtnet_vlan_rx_kill_vid()
3810 static void virtnet_clean_affinity(struct virtnet_info *vi) in virtnet_clean_affinity() argument
3814 if (vi->affinity_hint_set) { in virtnet_clean_affinity()
3815 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_clean_affinity()
3816 virtqueue_set_affinity(vi->rq[i].vq, NULL); in virtnet_clean_affinity()
3817 virtqueue_set_affinity(vi->sq[i].vq, NULL); in virtnet_clean_affinity()
3820 vi->affinity_hint_set = false; in virtnet_clean_affinity()
3824 static void virtnet_set_affinity(struct virtnet_info *vi) in virtnet_set_affinity() argument
3834 virtnet_clean_affinity(vi); in virtnet_set_affinity()
3839 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1); in virtnet_set_affinity()
3840 stragglers = num_cpu >= vi->curr_queue_pairs ? in virtnet_set_affinity()
3841 num_cpu % vi->curr_queue_pairs : in virtnet_set_affinity()
3845 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_set_affinity()
3853 virtqueue_set_affinity(vi->rq[i].vq, mask); in virtnet_set_affinity()
3854 virtqueue_set_affinity(vi->sq[i].vq, mask); in virtnet_set_affinity()
3855 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS); in virtnet_set_affinity()
3859 vi->affinity_hint_set = true; in virtnet_set_affinity()
3865 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, in virtnet_cpu_online() local
3867 virtnet_set_affinity(vi); in virtnet_cpu_online()
3873 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, in virtnet_cpu_dead() local
3875 virtnet_set_affinity(vi); in virtnet_cpu_dead()
3881 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, in virtnet_cpu_down_prep() local
3884 virtnet_clean_affinity(vi); in virtnet_cpu_down_prep()
3890 static int virtnet_cpu_notif_add(struct virtnet_info *vi) in virtnet_cpu_notif_add() argument
3894 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_add()
3898 &vi->node_dead); in virtnet_cpu_notif_add()
3901 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_add()
3905 static void virtnet_cpu_notif_remove(struct virtnet_info *vi) in virtnet_cpu_notif_remove() argument
3907 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_remove()
3909 &vi->node_dead); in virtnet_cpu_notif_remove()
3912 static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi, in virtnet_send_ctrl_coal_vq_cmd() argument
3927 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, in virtnet_send_ctrl_coal_vq_cmd()
3935 static int virtnet_send_rx_ctrl_coal_vq_cmd(struct virtnet_info *vi, in virtnet_send_rx_ctrl_coal_vq_cmd() argument
3941 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_send_rx_ctrl_coal_vq_cmd()
3944 err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue), in virtnet_send_rx_ctrl_coal_vq_cmd()
3949 vi->rq[queue].intr_coal.max_usecs = max_usecs; in virtnet_send_rx_ctrl_coal_vq_cmd()
3950 vi->rq[queue].intr_coal.max_packets = max_packets; in virtnet_send_rx_ctrl_coal_vq_cmd()
3955 static int virtnet_send_tx_ctrl_coal_vq_cmd(struct virtnet_info *vi, in virtnet_send_tx_ctrl_coal_vq_cmd() argument
3961 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_send_tx_ctrl_coal_vq_cmd()
3964 err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue), in virtnet_send_tx_ctrl_coal_vq_cmd()
3969 vi->sq[queue].intr_coal.max_usecs = max_usecs; in virtnet_send_tx_ctrl_coal_vq_cmd()
3970 vi->sq[queue].intr_coal.max_packets = max_packets; in virtnet_send_tx_ctrl_coal_vq_cmd()
3980 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_ringparam() local
3982 ring->rx_max_pending = vi->rq[0].vq->num_max; in virtnet_get_ringparam()
3983 ring->tx_max_pending = vi->sq[0].vq->num_max; in virtnet_get_ringparam()
3984 ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_get_ringparam()
3985 ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_get_ringparam()
3993 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_ringparam() local
4002 rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_set_ringparam()
4003 tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_set_ringparam()
4009 if (ring->rx_pending > vi->rq[0].vq->num_max) in virtnet_set_ringparam()
4012 if (ring->tx_pending > vi->sq[0].vq->num_max) in virtnet_set_ringparam()
4015 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_set_ringparam()
4016 rq = vi->rq + i; in virtnet_set_ringparam()
4017 sq = vi->sq + i; in virtnet_set_ringparam()
4020 err = virtnet_tx_resize(vi, sq, ring->tx_pending); in virtnet_set_ringparam()
4029 err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, i, in virtnet_set_ringparam()
4030 vi->intr_coal_tx.max_usecs, in virtnet_set_ringparam()
4031 vi->intr_coal_tx.max_packets); in virtnet_set_ringparam()
4041 err = virtnet_rx_resize(vi, rq, ring->rx_pending); in virtnet_set_ringparam()
4046 mutex_lock(&vi->rq[i].dim_lock); in virtnet_set_ringparam()
4047 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i, in virtnet_set_ringparam()
4048 vi->intr_coal_rx.max_usecs, in virtnet_set_ringparam()
4049 vi->intr_coal_rx.max_packets); in virtnet_set_ringparam()
4050 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_set_ringparam()
4059 static bool virtnet_commit_rss_command(struct virtnet_info *vi) in virtnet_commit_rss_command() argument
4061 struct net_device *dev = vi->dev; in virtnet_commit_rss_command()
4069 sg_set_buf(&sgs[0], &vi->rss, sg_buf_size); in virtnet_commit_rss_command()
4071 if (vi->has_rss) { in virtnet_commit_rss_command()
4072 sg_buf_size = sizeof(uint16_t) * vi->rss_indir_table_size; in virtnet_commit_rss_command()
4073 sg_set_buf(&sgs[1], vi->rss.indirection_table, sg_buf_size); in virtnet_commit_rss_command()
4075 sg_set_buf(&sgs[1], &vi->rss.hash_cfg_reserved, sizeof(uint16_t)); in virtnet_commit_rss_command()
4080 sg_set_buf(&sgs[2], &vi->rss.max_tx_vq, sg_buf_size); in virtnet_commit_rss_command()
4082 sg_buf_size = vi->rss_key_size; in virtnet_commit_rss_command()
4083 sg_set_buf(&sgs[3], vi->rss.key, sg_buf_size); in virtnet_commit_rss_command()
4085 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, in virtnet_commit_rss_command()
4086 vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG in virtnet_commit_rss_command()
4098 static void virtnet_init_default_rss(struct virtnet_info *vi) in virtnet_init_default_rss() argument
4100 vi->rss.hash_types = cpu_to_le32(vi->rss_hash_types_supported); in virtnet_init_default_rss()
4101 vi->rss_hash_types_saved = vi->rss_hash_types_supported; in virtnet_init_default_rss()
4102 vi->rss.indirection_table_mask = vi->rss_indir_table_size in virtnet_init_default_rss()
4103 ? cpu_to_le16(vi->rss_indir_table_size - 1) : 0; in virtnet_init_default_rss()
4104 vi->rss.unclassified_queue = 0; in virtnet_init_default_rss()
4106 virtnet_rss_update_by_qpairs(vi, vi->curr_queue_pairs); in virtnet_init_default_rss()
4108 vi->rss.hash_key_length = vi->rss_key_size; in virtnet_init_default_rss()
4110 netdev_rss_key_fill(vi->rss.key, vi->rss_key_size); in virtnet_init_default_rss()
4113 static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info) in virtnet_get_hashflow() argument
4118 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) { in virtnet_get_hashflow()
4121 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { in virtnet_get_hashflow()
4126 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) { in virtnet_get_hashflow()
4129 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { in virtnet_get_hashflow()
4134 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) { in virtnet_get_hashflow()
4137 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { in virtnet_get_hashflow()
4142 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) { in virtnet_get_hashflow()
4145 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { in virtnet_get_hashflow()
4150 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) in virtnet_get_hashflow()
4155 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) in virtnet_get_hashflow()
4165 static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info) in virtnet_set_hashflow() argument
4167 u32 new_hashtypes = vi->rss_hash_types_saved; in virtnet_set_hashflow()
4216 if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported)) in virtnet_set_hashflow()
4219 if (new_hashtypes != vi->rss_hash_types_saved) { in virtnet_set_hashflow()
4220 vi->rss_hash_types_saved = new_hashtypes; in virtnet_set_hashflow()
4221 vi->rss.hash_types = cpu_to_le32(vi->rss_hash_types_saved); in virtnet_set_hashflow()
4222 if (vi->dev->features & NETIF_F_RXHASH) in virtnet_set_hashflow()
4223 return virtnet_commit_rss_command(vi); in virtnet_set_hashflow()
4232 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_drvinfo() local
4233 struct virtio_device *vdev = vi->vdev; in virtnet_get_drvinfo()
4245 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_channels() local
4255 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) in virtnet_set_channels()
4262 if (vi->rq[0].xdp_prog) in virtnet_set_channels()
4266 err = virtnet_set_queues(vi, queue_pairs); in virtnet_set_channels()
4271 virtnet_set_affinity(vi); in virtnet_set_channels()
4295 static void virtnet_get_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data) in virtnet_get_stats_string() argument
4305 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) { in virtnet_get_stats_string()
4325 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_get_stats_string()
4332 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_get_stats_string()
4339 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { in virtnet_get_stats_string()
4359 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_get_stats_string()
4366 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_get_stats_string()
4373 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { in virtnet_get_stats_string()
4401 static void virtnet_stats_ctx_init(struct virtnet_info *vi, in virtnet_stats_ctx_init() argument
4416 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_stats_ctx_init()
4422 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_stats_ctx_init()
4428 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) { in virtnet_stats_ctx_init()
4434 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { in virtnet_stats_ctx_init()
4442 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_stats_ctx_init()
4448 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) { in virtnet_stats_ctx_init()
4454 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_stats_ctx_init()
4460 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { in virtnet_stats_ctx_init()
4472 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) { in virtnet_stats_ctx_init()
4482 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_stats_ctx_init()
4488 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_stats_ctx_init()
4494 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { in virtnet_stats_ctx_init()
4502 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_stats_ctx_init()
4508 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_stats_ctx_init()
4514 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { in virtnet_stats_ctx_init()
4542 static void virtnet_fill_total_fields(struct virtnet_info *vi, in virtnet_fill_total_fields() argument
4553 first_tx_q = first_rx_q + vi->curr_queue_pairs * num_rx; in virtnet_fill_total_fields()
4557 stats_sum_queue(data, num_rx, first_rx_q, vi->curr_queue_pairs); in virtnet_fill_total_fields()
4561 stats_sum_queue(data, num_tx, first_tx_q, vi->curr_queue_pairs); in virtnet_fill_total_fields()
4564 static void virtnet_fill_stats_qstat(struct virtnet_info *vi, u32 qid, in virtnet_fill_stats_qstat() argument
4575 queue_type = vq_type(vi, qid); in virtnet_fill_stats_qstat()
4664 * @vi: virtio net info
4671 static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid, in virtnet_fill_stats() argument
4683 return virtnet_fill_stats_qstat(vi, qid, ctx, base, drv_stats, reply_type); in virtnet_fill_stats()
4689 queue_type = vq_type(vi, qid); in virtnet_fill_stats()
4696 offset += num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2); in virtnet_fill_stats()
4798 static int __virtnet_get_hw_stats(struct virtnet_info *vi, in __virtnet_get_hw_stats() argument
4812 ok = virtnet_send_command_reply(vi, VIRTIO_NET_CTRL_STATS, in __virtnet_get_hw_stats()
4822 virtnet_fill_stats(vi, qid, ctx, p, false, hdr->type); in __virtnet_get_hw_stats()
4828 static void virtnet_make_stat_req(struct virtnet_info *vi, in virtnet_make_stat_req() argument
4833 int qtype = vq_type(vi, qid); in virtnet_make_stat_req()
4847 static int virtnet_get_hw_stats(struct virtnet_info *vi, in virtnet_get_hw_stats() argument
4856 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) in virtnet_get_hw_stats()
4860 last_vq = vi->curr_queue_pairs * 2 - 1; in virtnet_get_hw_stats()
4872 qtype = vq_type(vi, i); in virtnet_get_hw_stats()
4896 virtnet_make_stat_req(vi, ctx, req, i, &j); in virtnet_get_hw_stats()
4899 virtnet_make_stat_req(vi, ctx, req, vi->max_queue_pairs * 2, &j); in virtnet_get_hw_stats()
4901 ok = __virtnet_get_hw_stats(vi, ctx, req, sizeof(*req) * j, reply, res_size); in virtnet_get_hw_stats()
4911 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_strings() local
4918 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, -1, &p); in virtnet_get_strings()
4919 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, -1, &p); in virtnet_get_strings()
4921 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_CQ, 0, &p); in virtnet_get_strings()
4923 for (i = 0; i < vi->curr_queue_pairs; ++i) in virtnet_get_strings()
4924 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, i, &p); in virtnet_get_strings()
4926 for (i = 0; i < vi->curr_queue_pairs; ++i) in virtnet_get_strings()
4927 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, i, &p); in virtnet_get_strings()
4934 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_sset_count() local
4940 virtnet_stats_ctx_init(vi, &ctx, NULL, false); in virtnet_get_sset_count()
4945 vi->curr_queue_pairs * pair_count; in virtnet_get_sset_count()
4954 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_ethtool_stats() local
4959 virtnet_stats_ctx_init(vi, &ctx, data, false); in virtnet_get_ethtool_stats()
4960 if (virtnet_get_hw_stats(vi, &ctx, -1)) in virtnet_get_ethtool_stats()
4961 dev_warn(&vi->dev->dev, "Failed to get hw stats.\n"); in virtnet_get_ethtool_stats()
4963 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_get_ethtool_stats()
4964 struct receive_queue *rq = &vi->rq[i]; in virtnet_get_ethtool_stats()
4965 struct send_queue *sq = &vi->sq[i]; in virtnet_get_ethtool_stats()
4970 virtnet_fill_stats(vi, i * 2, &ctx, stats_base, true, 0); in virtnet_get_ethtool_stats()
4976 virtnet_fill_stats(vi, i * 2 + 1, &ctx, stats_base, true, 0); in virtnet_get_ethtool_stats()
4980 virtnet_fill_total_fields(vi, &ctx); in virtnet_get_ethtool_stats()
4986 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_channels() local
4988 channels->combined_count = vi->curr_queue_pairs; in virtnet_get_channels()
4989 channels->max_combined = vi->max_queue_pairs; in virtnet_get_channels()
4999 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_link_ksettings() local
5002 &vi->speed, &vi->duplex); in virtnet_set_link_ksettings()
5008 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_link_ksettings() local
5010 cmd->base.speed = vi->speed; in virtnet_get_link_ksettings()
5011 cmd->base.duplex = vi->duplex; in virtnet_get_link_ksettings()
5017 static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi, in virtnet_send_tx_notf_coal_cmds() argument
5032 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, in virtnet_send_tx_notf_coal_cmds()
5037 vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs; in virtnet_send_tx_notf_coal_cmds()
5038 vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames; in virtnet_send_tx_notf_coal_cmds()
5039 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_tx_notf_coal_cmds()
5040 vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs; in virtnet_send_tx_notf_coal_cmds()
5041 vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames; in virtnet_send_tx_notf_coal_cmds()
5047 static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, in virtnet_send_rx_notf_coal_cmds() argument
5055 if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_send_rx_notf_coal_cmds()
5058 if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs || in virtnet_send_rx_notf_coal_cmds()
5059 ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets)) in virtnet_send_rx_notf_coal_cmds()
5062 if (rx_ctrl_dim_on && !vi->rx_dim_enabled) { in virtnet_send_rx_notf_coal_cmds()
5063 vi->rx_dim_enabled = true; in virtnet_send_rx_notf_coal_cmds()
5064 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_rx_notf_coal_cmds()
5065 mutex_lock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5066 vi->rq[i].dim_enabled = true; in virtnet_send_rx_notf_coal_cmds()
5067 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5076 if (!rx_ctrl_dim_on && vi->rx_dim_enabled) { in virtnet_send_rx_notf_coal_cmds()
5077 vi->rx_dim_enabled = false; in virtnet_send_rx_notf_coal_cmds()
5078 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_rx_notf_coal_cmds()
5079 mutex_lock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5080 vi->rq[i].dim_enabled = false; in virtnet_send_rx_notf_coal_cmds()
5081 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5093 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, in virtnet_send_rx_notf_coal_cmds()
5098 vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs; in virtnet_send_rx_notf_coal_cmds()
5099 vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames; in virtnet_send_rx_notf_coal_cmds()
5100 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_rx_notf_coal_cmds()
5101 mutex_lock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5102 vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs; in virtnet_send_rx_notf_coal_cmds()
5103 vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames; in virtnet_send_rx_notf_coal_cmds()
5104 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5110 static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, in virtnet_send_notf_coal_cmds() argument
5115 err = virtnet_send_tx_notf_coal_cmds(vi, ec); in virtnet_send_notf_coal_cmds()
5119 err = virtnet_send_rx_notf_coal_cmds(vi, ec); in virtnet_send_notf_coal_cmds()
5126 static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi, in virtnet_send_rx_notf_coal_vq_cmds() argument
5135 mutex_lock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
5136 cur_rx_dim = vi->rq[queue].dim_enabled; in virtnet_send_rx_notf_coal_vq_cmds()
5137 max_usecs = vi->rq[queue].intr_coal.max_usecs; in virtnet_send_rx_notf_coal_vq_cmds()
5138 max_packets = vi->rq[queue].intr_coal.max_packets; in virtnet_send_rx_notf_coal_vq_cmds()
5142 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
5147 vi->rq[queue].dim_enabled = true; in virtnet_send_rx_notf_coal_vq_cmds()
5148 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
5153 vi->rq[queue].dim_enabled = false; in virtnet_send_rx_notf_coal_vq_cmds()
5158 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue, in virtnet_send_rx_notf_coal_vq_cmds()
5161 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
5165 static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi, in virtnet_send_notf_coal_vq_cmds() argument
5171 err = virtnet_send_rx_notf_coal_vq_cmds(vi, ec, queue); in virtnet_send_notf_coal_vq_cmds()
5175 err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, queue, in virtnet_send_notf_coal_vq_cmds()
5189 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rx_dim_work() local
5190 struct net_device *dev = vi->dev; in virtnet_rx_dim_work()
5194 qnum = rq - vi->rq; in virtnet_rx_dim_work()
5203 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum, in virtnet_rx_dim_work()
5247 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_coalesce() local
5253 for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) { in virtnet_set_coalesce()
5255 vi->sq[queue_number].napi.weight, in virtnet_set_coalesce()
5261 /* All queues that belong to [queue_number, vi->max_queue_pairs] will be in virtnet_set_coalesce()
5268 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) in virtnet_set_coalesce()
5269 ret = virtnet_send_notf_coal_cmds(vi, ec); in virtnet_set_coalesce()
5280 for (i = queue_number; i < vi->max_queue_pairs; i++) { in virtnet_set_coalesce()
5281 if (vi->sq[i].xsk_pool) in virtnet_set_coalesce()
5285 for (; queue_number < vi->max_queue_pairs; queue_number++) in virtnet_set_coalesce()
5286 vi->sq[queue_number].napi.weight = napi_weight; in virtnet_set_coalesce()
5297 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_coalesce() local
5299 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { in virtnet_get_coalesce()
5300 ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs; in virtnet_get_coalesce()
5301 ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs; in virtnet_get_coalesce()
5302 ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets; in virtnet_get_coalesce()
5303 ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets; in virtnet_get_coalesce()
5304 ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled; in virtnet_get_coalesce()
5308 if (vi->sq[0].napi.weight) in virtnet_get_coalesce()
5319 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_per_queue_coalesce() local
5323 if (queue >= vi->max_queue_pairs) in virtnet_set_per_queue_coalesce()
5329 vi->sq[queue].napi.weight, in virtnet_set_per_queue_coalesce()
5334 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_set_per_queue_coalesce()
5335 ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue); in virtnet_set_per_queue_coalesce()
5343 vi->sq[queue].napi.weight = napi_weight; in virtnet_set_per_queue_coalesce()
5352 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_per_queue_coalesce() local
5354 if (queue >= vi->max_queue_pairs) in virtnet_get_per_queue_coalesce()
5357 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { in virtnet_get_per_queue_coalesce()
5358 mutex_lock(&vi->rq[queue].dim_lock); in virtnet_get_per_queue_coalesce()
5359 ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs; in virtnet_get_per_queue_coalesce()
5360 ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs; in virtnet_get_per_queue_coalesce()
5361 ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets; in virtnet_get_per_queue_coalesce()
5362 ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets; in virtnet_get_per_queue_coalesce()
5363 ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled; in virtnet_get_per_queue_coalesce()
5364 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_get_per_queue_coalesce()
5368 if (vi->sq[queue].napi.weight) in virtnet_get_per_queue_coalesce()
5377 struct virtnet_info *vi = netdev_priv(dev); in virtnet_init_settings() local
5379 vi->speed = SPEED_UNKNOWN; in virtnet_init_settings()
5380 vi->duplex = DUPLEX_UNKNOWN; in virtnet_init_settings()
5396 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_rxfh() local
5400 for (i = 0; i < vi->rss_indir_table_size; ++i) in virtnet_get_rxfh()
5401 rxfh->indir[i] = le16_to_cpu(vi->rss.indirection_table[i]); in virtnet_get_rxfh()
5405 memcpy(rxfh->key, vi->rss.key, vi->rss_key_size); in virtnet_get_rxfh()
5416 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_rxfh() local
5425 if (!vi->has_rss) in virtnet_set_rxfh()
5428 for (i = 0; i < vi->rss_indir_table_size; ++i) in virtnet_set_rxfh()
5429 vi->rss.indirection_table[i] = cpu_to_le16(rxfh->indir[i]); in virtnet_set_rxfh()
5438 if (!vi->has_rss && !vi->has_rss_hash_report) in virtnet_set_rxfh()
5441 memcpy(vi->rss.key, rxfh->key, vi->rss_key_size); in virtnet_set_rxfh()
5446 virtnet_commit_rss_command(vi); in virtnet_set_rxfh()
5453 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_rxnfc() local
5458 info->data = vi->curr_queue_pairs; in virtnet_get_rxnfc()
5461 virtnet_get_hashflow(vi, info); in virtnet_get_rxnfc()
5472 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_rxnfc() local
5477 if (!virtnet_set_hashflow(vi, info)) in virtnet_set_rxnfc()
5518 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_queue_stats_rx() local
5519 struct receive_queue *rq = &vi->rq[i]; in virtnet_get_queue_stats_rx()
5522 virtnet_stats_ctx_init(vi, &ctx, (void *)stats, true); in virtnet_get_queue_stats_rx()
5524 virtnet_get_hw_stats(vi, &ctx, i * 2); in virtnet_get_queue_stats_rx()
5525 virtnet_fill_stats(vi, i * 2, &ctx, (void *)&rq->stats, true, 0); in virtnet_get_queue_stats_rx()
5531 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_queue_stats_tx() local
5532 struct send_queue *sq = &vi->sq[i]; in virtnet_get_queue_stats_tx()
5535 virtnet_stats_ctx_init(vi, &ctx, (void *)stats, true); in virtnet_get_queue_stats_tx()
5537 virtnet_get_hw_stats(vi, &ctx, i * 2 + 1); in virtnet_get_queue_stats_tx()
5538 virtnet_fill_stats(vi, i * 2 + 1, &ctx, (void *)&sq->stats, true, 0); in virtnet_get_queue_stats_tx()
5545 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_base_stats() local
5553 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_get_base_stats()
5558 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_get_base_stats()
5564 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) { in virtnet_get_base_stats()
5571 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) in virtnet_get_base_stats()
5579 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_get_base_stats()
5584 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) { in virtnet_get_base_stats()
5589 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_get_base_stats()
5596 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) in virtnet_get_base_stats()
5608 struct virtnet_info *vi = vdev->priv; in virtnet_freeze_down() local
5611 flush_work(&vi->config_work); in virtnet_freeze_down()
5612 disable_rx_mode_work(vi); in virtnet_freeze_down()
5613 flush_work(&vi->rx_mode_work); in virtnet_freeze_down()
5615 netif_tx_lock_bh(vi->dev); in virtnet_freeze_down()
5616 netif_device_detach(vi->dev); in virtnet_freeze_down()
5617 netif_tx_unlock_bh(vi->dev); in virtnet_freeze_down()
5618 if (netif_running(vi->dev)) in virtnet_freeze_down()
5619 virtnet_close(vi->dev); in virtnet_freeze_down()
5622 static int init_vqs(struct virtnet_info *vi);
5626 struct virtnet_info *vi = vdev->priv; in virtnet_restore_up() local
5629 err = init_vqs(vi); in virtnet_restore_up()
5635 enable_delayed_refill(vi); in virtnet_restore_up()
5636 enable_rx_mode_work(vi); in virtnet_restore_up()
5638 if (netif_running(vi->dev)) { in virtnet_restore_up()
5639 err = virtnet_open(vi->dev); in virtnet_restore_up()
5644 netif_tx_lock_bh(vi->dev); in virtnet_restore_up()
5645 netif_device_attach(vi->dev); in virtnet_restore_up()
5646 netif_tx_unlock_bh(vi->dev); in virtnet_restore_up()
5650 static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) in virtnet_set_guest_offloads() argument
5659 *_offloads = cpu_to_virtio64(vi->vdev, offloads); in virtnet_set_guest_offloads()
5663 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, in virtnet_set_guest_offloads()
5665 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n"); in virtnet_set_guest_offloads()
5672 static int virtnet_clear_guest_offloads(struct virtnet_info *vi) in virtnet_clear_guest_offloads() argument
5676 if (!vi->guest_offloads) in virtnet_clear_guest_offloads()
5679 return virtnet_set_guest_offloads(vi, offloads); in virtnet_clear_guest_offloads()
5682 static int virtnet_restore_guest_offloads(struct virtnet_info *vi) in virtnet_restore_guest_offloads() argument
5684 u64 offloads = vi->guest_offloads; in virtnet_restore_guest_offloads()
5686 if (!vi->guest_offloads) in virtnet_restore_guest_offloads()
5689 return virtnet_set_guest_offloads(vi, offloads); in virtnet_restore_guest_offloads()
5692 static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queue *rq, in virtnet_rq_bind_xsk_pool() argument
5697 qindex = rq - vi->rq; in virtnet_rq_bind_xsk_pool()
5700 err = xdp_rxq_info_reg(&rq->xsk_rxq_info, vi->dev, qindex, rq->napi.napi_id); in virtnet_rq_bind_xsk_pool()
5712 virtnet_rx_pause(vi, rq); in virtnet_rq_bind_xsk_pool()
5716 netdev_err(vi->dev, "reset rx fail: rx queue index: %d err: %d\n", qindex, err); in virtnet_rq_bind_xsk_pool()
5723 virtnet_rx_resume(vi, rq); in virtnet_rq_bind_xsk_pool()
5733 static int virtnet_sq_bind_xsk_pool(struct virtnet_info *vi, in virtnet_sq_bind_xsk_pool() argument
5739 qindex = sq - vi->sq; in virtnet_sq_bind_xsk_pool()
5741 virtnet_tx_pause(vi, sq); in virtnet_sq_bind_xsk_pool()
5746 netdev_err(vi->dev, "reset tx fail: tx queue index: %d err: %d\n", qindex, err); in virtnet_sq_bind_xsk_pool()
5752 virtnet_tx_resume(vi, sq); in virtnet_sq_bind_xsk_pool()
5761 struct virtnet_info *vi = netdev_priv(dev); in virtnet_xsk_pool_enable() local
5768 if (vi->hdr_len > xsk_pool_get_headroom(pool)) in virtnet_xsk_pool_enable()
5774 if (vi->big_packets && !vi->mergeable_rx_bufs) in virtnet_xsk_pool_enable()
5777 if (qid >= vi->curr_queue_pairs) in virtnet_xsk_pool_enable()
5780 sq = &vi->sq[qid]; in virtnet_xsk_pool_enable()
5781 rq = &vi->rq[qid]; in virtnet_xsk_pool_enable()
5803 hdr_dma = virtqueue_dma_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len, in virtnet_xsk_pool_enable()
5812 err = virtnet_rq_bind_xsk_pool(vi, rq, pool); in virtnet_xsk_pool_enable()
5816 err = virtnet_sq_bind_xsk_pool(vi, sq, pool); in virtnet_xsk_pool_enable()
5828 virtnet_rq_bind_xsk_pool(vi, rq, NULL); in virtnet_xsk_pool_enable()
5832 virtqueue_dma_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len, in virtnet_xsk_pool_enable()
5839 struct virtnet_info *vi = netdev_priv(dev); in virtnet_xsk_pool_disable() local
5845 if (qid >= vi->curr_queue_pairs) in virtnet_xsk_pool_disable()
5848 sq = &vi->sq[qid]; in virtnet_xsk_pool_disable()
5849 rq = &vi->rq[qid]; in virtnet_xsk_pool_disable()
5853 err = virtnet_rq_bind_xsk_pool(vi, rq, NULL); in virtnet_xsk_pool_disable()
5854 err |= virtnet_sq_bind_xsk_pool(vi, sq, NULL); in virtnet_xsk_pool_disable()
5859 vi->hdr_len, DMA_TO_DEVICE, 0); in virtnet_xsk_pool_disable()
5880 struct virtnet_info *vi = netdev_priv(dev); in virtnet_xdp_set() local
5885 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) in virtnet_xdp_set()
5886 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || in virtnet_xdp_set()
5887 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || in virtnet_xdp_set()
5888 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || in virtnet_xdp_set()
5889 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || in virtnet_xdp_set()
5890 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) || in virtnet_xdp_set()
5891 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) || in virtnet_xdp_set()
5892 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) { in virtnet_xdp_set()
5897 if (vi->mergeable_rx_bufs && !vi->any_header_sg) { in virtnet_xdp_set()
5908 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; in virtnet_xdp_set()
5913 if (curr_qp + xdp_qp > vi->max_queue_pairs) { in virtnet_xdp_set()
5915 curr_qp + xdp_qp, vi->max_queue_pairs); in virtnet_xdp_set()
5919 old_prog = rtnl_dereference(vi->rq[0].xdp_prog); in virtnet_xdp_set()
5924 bpf_prog_add(prog, vi->max_queue_pairs - 1); in virtnet_xdp_set()
5928 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
5929 napi_disable(&vi->rq[i].napi); in virtnet_xdp_set()
5930 virtnet_napi_tx_disable(&vi->sq[i].napi); in virtnet_xdp_set()
5935 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
5936 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); in virtnet_xdp_set()
5938 virtnet_restore_guest_offloads(vi); in virtnet_xdp_set()
5943 err = virtnet_set_queues(vi, curr_qp + xdp_qp); in virtnet_xdp_set()
5947 vi->xdp_queue_pairs = xdp_qp; in virtnet_xdp_set()
5950 vi->xdp_enabled = true; in virtnet_xdp_set()
5951 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
5952 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); in virtnet_xdp_set()
5954 virtnet_clear_guest_offloads(vi); in virtnet_xdp_set()
5960 vi->xdp_enabled = false; in virtnet_xdp_set()
5963 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
5967 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); in virtnet_xdp_set()
5968 virtnet_napi_tx_enable(vi, vi->sq[i].vq, in virtnet_xdp_set()
5969 &vi->sq[i].napi); in virtnet_xdp_set()
5977 virtnet_clear_guest_offloads(vi); in virtnet_xdp_set()
5978 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_xdp_set()
5979 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); in virtnet_xdp_set()
5983 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
5984 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); in virtnet_xdp_set()
5985 virtnet_napi_tx_enable(vi, vi->sq[i].vq, in virtnet_xdp_set()
5986 &vi->sq[i].napi); in virtnet_xdp_set()
5990 bpf_prog_sub(prog, vi->max_queue_pairs - 1); in virtnet_xdp_set()
6009 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_phys_port_name() local
6012 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) in virtnet_get_phys_port_name()
6025 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_features() local
6030 if (vi->xdp_enabled) in virtnet_set_features()
6034 offloads = vi->guest_offloads_capable; in virtnet_set_features()
6036 offloads = vi->guest_offloads_capable & in virtnet_set_features()
6039 err = virtnet_set_guest_offloads(vi, offloads); in virtnet_set_features()
6042 vi->guest_offloads = offloads; in virtnet_set_features()
6047 vi->rss.hash_types = cpu_to_le32(vi->rss_hash_types_saved); in virtnet_set_features()
6049 vi->rss.hash_types = cpu_to_le32(VIRTIO_NET_HASH_REPORT_NONE); in virtnet_set_features()
6051 if (!virtnet_commit_rss_command(vi)) in virtnet_set_features()
6073 static int virtnet_init_irq_moder(struct virtnet_info *vi) in virtnet_init_irq_moder() argument
6080 ret = net_dim_init_irq_moder(vi->dev, profile_flags, coal_flags, in virtnet_init_irq_moder()
6087 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_init_irq_moder()
6088 net_dim_setting(vi->dev, &vi->rq[i].dim, false); in virtnet_init_irq_moder()
6093 static void virtnet_free_irq_moder(struct virtnet_info *vi) in virtnet_free_irq_moder() argument
6095 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_free_irq_moder()
6099 net_dim_free_irq_moder(vi->dev); in virtnet_free_irq_moder()
6124 struct virtnet_info *vi = in virtnet_config_changed_work() local
6128 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, in virtnet_config_changed_work()
6133 netdev_notify_peers(vi->dev); in virtnet_config_changed_work()
6134 virtnet_ack_link_announce(vi); in virtnet_config_changed_work()
6140 if (vi->status == v) in virtnet_config_changed_work()
6143 vi->status = v; in virtnet_config_changed_work()
6145 if (vi->status & VIRTIO_NET_S_LINK_UP) { in virtnet_config_changed_work()
6146 virtnet_update_settings(vi); in virtnet_config_changed_work()
6147 netif_carrier_on(vi->dev); in virtnet_config_changed_work()
6148 netif_tx_wake_all_queues(vi->dev); in virtnet_config_changed_work()
6150 netif_carrier_off(vi->dev); in virtnet_config_changed_work()
6151 netif_tx_stop_all_queues(vi->dev); in virtnet_config_changed_work()
6157 struct virtnet_info *vi = vdev->priv; in virtnet_config_changed() local
6159 schedule_work(&vi->config_work); in virtnet_config_changed()
6162 static void virtnet_free_queues(struct virtnet_info *vi) in virtnet_free_queues() argument
6166 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_free_queues()
6167 __netif_napi_del(&vi->rq[i].napi); in virtnet_free_queues()
6168 __netif_napi_del(&vi->sq[i].napi); in virtnet_free_queues()
6172 * we need to respect an RCU grace period before freeing vi->rq in virtnet_free_queues()
6176 kfree(vi->rq); in virtnet_free_queues()
6177 kfree(vi->sq); in virtnet_free_queues()
6178 kfree(vi->ctrl); in virtnet_free_queues()
6181 static void _free_receive_bufs(struct virtnet_info *vi) in _free_receive_bufs() argument
6186 for (i = 0; i < vi->max_queue_pairs; i++) { in _free_receive_bufs()
6187 while (vi->rq[i].pages) in _free_receive_bufs()
6188 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); in _free_receive_bufs()
6190 old_prog = rtnl_dereference(vi->rq[i].xdp_prog); in _free_receive_bufs()
6191 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); in _free_receive_bufs()
6197 static void free_receive_bufs(struct virtnet_info *vi) in free_receive_bufs() argument
6200 _free_receive_bufs(vi); in free_receive_bufs()
6204 static void free_receive_page_frags(struct virtnet_info *vi) in free_receive_page_frags() argument
6207 for (i = 0; i < vi->max_queue_pairs; i++) in free_receive_page_frags()
6208 if (vi->rq[i].alloc_frag.page) { in free_receive_page_frags()
6209 if (vi->rq[i].last_dma) in free_receive_page_frags()
6210 virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0); in free_receive_page_frags()
6211 put_page(vi->rq[i].alloc_frag.page); in free_receive_page_frags()
6217 struct virtnet_info *vi = vq->vdev->priv; in virtnet_sq_free_unused_buf() local
6221 sq = &vi->sq[i]; in virtnet_sq_free_unused_buf()
6241 struct virtnet_info *vi = vq->vdev->priv; in virtnet_sq_free_unused_buf_done() local
6244 netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i)); in virtnet_sq_free_unused_buf_done()
6247 static void free_unused_bufs(struct virtnet_info *vi) in free_unused_bufs() argument
6252 for (i = 0; i < vi->max_queue_pairs; i++) { in free_unused_bufs()
6253 struct virtqueue *vq = vi->sq[i].vq; in free_unused_bufs()
6259 for (i = 0; i < vi->max_queue_pairs; i++) { in free_unused_bufs()
6260 struct virtqueue *vq = vi->rq[i].vq; in free_unused_bufs()
6268 static void virtnet_del_vqs(struct virtnet_info *vi) in virtnet_del_vqs() argument
6270 struct virtio_device *vdev = vi->vdev; in virtnet_del_vqs()
6272 virtnet_clean_affinity(vi); in virtnet_del_vqs()
6276 virtnet_free_queues(vi); in virtnet_del_vqs()
6283 static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq) in mergeable_min_buf_len() argument
6285 const unsigned int hdr_len = vi->hdr_len; in mergeable_min_buf_len()
6287 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; in mergeable_min_buf_len()
6295 static int virtnet_find_vqs(struct virtnet_info *vi) in virtnet_find_vqs() argument
6308 total_vqs = vi->max_queue_pairs * 2 + in virtnet_find_vqs()
6309 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); in virtnet_find_vqs()
6318 if (!vi->big_packets || vi->mergeable_rx_bufs) { in virtnet_find_vqs()
6327 if (vi->has_cvq) { in virtnet_find_vqs()
6332 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_find_vqs()
6335 sprintf(vi->rq[i].name, "input.%u", i); in virtnet_find_vqs()
6336 sprintf(vi->sq[i].name, "output.%u", i); in virtnet_find_vqs()
6337 vqs_info[rxq2vq(i)].name = vi->rq[i].name; in virtnet_find_vqs()
6338 vqs_info[txq2vq(i)].name = vi->sq[i].name; in virtnet_find_vqs()
6343 ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, vqs_info, NULL); in virtnet_find_vqs()
6347 if (vi->has_cvq) { in virtnet_find_vqs()
6348 vi->cvq = vqs[total_vqs - 1]; in virtnet_find_vqs()
6349 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) in virtnet_find_vqs()
6350 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in virtnet_find_vqs()
6353 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_find_vqs()
6354 vi->rq[i].vq = vqs[rxq2vq(i)]; in virtnet_find_vqs()
6355 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); in virtnet_find_vqs()
6356 vi->sq[i].vq = vqs[txq2vq(i)]; in virtnet_find_vqs()
6372 static int virtnet_alloc_queues(struct virtnet_info *vi) in virtnet_alloc_queues() argument
6376 if (vi->has_cvq) { in virtnet_alloc_queues()
6377 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); in virtnet_alloc_queues()
6378 if (!vi->ctrl) in virtnet_alloc_queues()
6381 vi->ctrl = NULL; in virtnet_alloc_queues()
6383 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); in virtnet_alloc_queues()
6384 if (!vi->sq) in virtnet_alloc_queues()
6386 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL); in virtnet_alloc_queues()
6387 if (!vi->rq) in virtnet_alloc_queues()
6390 INIT_DELAYED_WORK(&vi->refill, refill_work); in virtnet_alloc_queues()
6391 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_alloc_queues()
6392 vi->rq[i].pages = NULL; in virtnet_alloc_queues()
6393 netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll, in virtnet_alloc_queues()
6395 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi, in virtnet_alloc_queues()
6399 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); in virtnet_alloc_queues()
6400 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); in virtnet_alloc_queues()
6401 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); in virtnet_alloc_queues()
6403 u64_stats_init(&vi->rq[i].stats.syncp); in virtnet_alloc_queues()
6404 u64_stats_init(&vi->sq[i].stats.syncp); in virtnet_alloc_queues()
6405 mutex_init(&vi->rq[i].dim_lock); in virtnet_alloc_queues()
6411 kfree(vi->sq); in virtnet_alloc_queues()
6413 kfree(vi->ctrl); in virtnet_alloc_queues()
6418 static int init_vqs(struct virtnet_info *vi) in init_vqs() argument
6423 ret = virtnet_alloc_queues(vi); in init_vqs()
6427 ret = virtnet_find_vqs(vi); in init_vqs()
6432 virtnet_set_affinity(vi); in init_vqs()
6438 virtnet_free_queues(vi); in init_vqs()
6447 struct virtnet_info *vi = netdev_priv(queue->dev); in mergeable_rx_buffer_size_show() local
6449 unsigned int headroom = virtnet_get_headroom(vi); in mergeable_rx_buffer_size_show()
6453 BUG_ON(queue_index >= vi->max_queue_pairs); in mergeable_rx_buffer_size_show()
6454 avg = &vi->rq[queue_index].mrg_avg_pkt_len; in mergeable_rx_buffer_size_show()
6456 get_mergeable_buf_len(&vi->rq[queue_index], avg, in mergeable_rx_buffer_size_show()
6547 static bool virtnet_check_guest_gso(const struct virtnet_info *vi) in virtnet_check_guest_gso() argument
6549 return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || in virtnet_check_guest_gso()
6550 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || in virtnet_check_guest_gso()
6551 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || in virtnet_check_guest_gso()
6552 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || in virtnet_check_guest_gso()
6553 (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) && in virtnet_check_guest_gso()
6554 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6)); in virtnet_check_guest_gso()
6557 static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu) in virtnet_set_big_packets() argument
6559 bool guest_gso = virtnet_check_guest_gso(vi); in virtnet_set_big_packets()
6566 vi->big_packets = true; in virtnet_set_big_packets()
6567 vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE); in virtnet_set_big_packets()
6591 struct virtnet_info *vi; in virtnet_xdp_rx_hash() local
6597 vi = netdev_priv(xdp->rxq->dev); in virtnet_xdp_rx_hash()
6598 hdr_hash = (struct virtio_net_hdr_v1_hash *)(xdp->data - vi->hdr_len); in virtnet_xdp_rx_hash()
6617 struct virtnet_info *vi; in virtnet_probe() local
6714 vi = netdev_priv(dev); in virtnet_probe()
6715 vi->dev = dev; in virtnet_probe()
6716 vi->vdev = vdev; in virtnet_probe()
6717 vdev->priv = vi; in virtnet_probe()
6719 INIT_WORK(&vi->config_work, virtnet_config_changed_work); in virtnet_probe()
6720 INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work); in virtnet_probe()
6721 spin_lock_init(&vi->refill_lock); in virtnet_probe()
6724 vi->mergeable_rx_bufs = true; in virtnet_probe()
6729 vi->has_rss_hash_report = true; in virtnet_probe()
6732 vi->has_rss = true; in virtnet_probe()
6734 vi->rss_indir_table_size = in virtnet_probe()
6738 err = rss_indirection_table_alloc(&vi->rss, vi->rss_indir_table_size); in virtnet_probe()
6742 if (vi->has_rss || vi->has_rss_hash_report) { in virtnet_probe()
6743 vi->rss_key_size = in virtnet_probe()
6745 if (vi->rss_key_size > VIRTIO_NET_RSS_MAX_KEY_SIZE) { in virtnet_probe()
6747 vi->rss_key_size, VIRTIO_NET_RSS_MAX_KEY_SIZE); in virtnet_probe()
6752 vi->rss_hash_types_supported = in virtnet_probe()
6754 vi->rss_hash_types_supported &= in virtnet_probe()
6763 if (vi->has_rss_hash_report) in virtnet_probe()
6764 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash); in virtnet_probe()
6767 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); in virtnet_probe()
6769 vi->hdr_len = sizeof(struct virtio_net_hdr); in virtnet_probe()
6773 vi->any_header_sg = true; in virtnet_probe()
6776 vi->has_cvq = true; in virtnet_probe()
6778 mutex_init(&vi->cvq_lock); in virtnet_probe()
6799 virtnet_set_big_packets(vi, mtu); in virtnet_probe()
6801 if (vi->any_header_sg) in virtnet_probe()
6802 dev->needed_headroom = vi->hdr_len; in virtnet_probe()
6806 vi->curr_queue_pairs = max_queue_pairs; in virtnet_probe()
6808 vi->curr_queue_pairs = num_online_cpus(); in virtnet_probe()
6809 vi->max_queue_pairs = max_queue_pairs; in virtnet_probe()
6812 err = init_vqs(vi); in virtnet_probe()
6816 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { in virtnet_probe()
6817 vi->intr_coal_rx.max_usecs = 0; in virtnet_probe()
6818 vi->intr_coal_tx.max_usecs = 0; in virtnet_probe()
6819 vi->intr_coal_rx.max_packets = 0; in virtnet_probe()
6824 if (vi->sq[0].napi.weight) in virtnet_probe()
6825 vi->intr_coal_tx.max_packets = 1; in virtnet_probe()
6827 vi->intr_coal_tx.max_packets = 0; in virtnet_probe()
6830 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { in virtnet_probe()
6832 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_probe()
6833 if (vi->sq[i].napi.weight) in virtnet_probe()
6834 vi->sq[i].intr_coal.max_packets = 1; in virtnet_probe()
6836 err = virtnet_init_irq_moder(vi); in virtnet_probe()
6842 if (vi->mergeable_rx_bufs) in virtnet_probe()
6845 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); in virtnet_probe()
6846 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); in virtnet_probe()
6851 vi->failover = net_failover_create(vi->dev); in virtnet_probe()
6852 if (IS_ERR(vi->failover)) { in virtnet_probe()
6853 err = PTR_ERR(vi->failover); in virtnet_probe()
6858 if (vi->has_rss || vi->has_rss_hash_report) in virtnet_probe()
6859 virtnet_init_default_rss(vi); in virtnet_probe()
6861 enable_rx_mode_work(vi); in virtnet_probe()
6874 virtio_config_driver_disable(vi->vdev); in virtnet_probe()
6878 if (vi->has_rss || vi->has_rss_hash_report) { in virtnet_probe()
6879 if (!virtnet_commit_rss_command(vi)) { in virtnet_probe()
6882 vi->has_rss_hash_report = false; in virtnet_probe()
6883 vi->has_rss = false; in virtnet_probe()
6887 virtnet_set_queues(vi, vi->curr_queue_pairs); in virtnet_probe()
6894 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { in virtnet_probe()
6898 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, in virtnet_probe()
6907 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) { in virtnet_probe()
6921 if (!virtnet_send_command_reply(vi, VIRTIO_NET_CTRL_STATS, in virtnet_probe()
6931 vi->device_stats_cap = le64_to_cpu(v); in virtnet_probe()
6937 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { in virtnet_probe()
6938 virtnet_config_changed_work(&vi->config_work); in virtnet_probe()
6940 vi->status = VIRTIO_NET_S_LINK_UP; in virtnet_probe()
6941 virtnet_update_settings(vi); in virtnet_probe()
6946 if (virtio_has_feature(vi->vdev, guest_offloads[i])) in virtnet_probe()
6947 set_bit(guest_offloads[i], &vi->guest_offloads); in virtnet_probe()
6948 vi->guest_offloads_capable = vi->guest_offloads; in virtnet_probe()
6952 err = virtnet_cpu_notif_add(vi); in virtnet_probe()
6966 net_failover_destroy(vi->failover); in virtnet_probe()
6969 cancel_delayed_work_sync(&vi->refill); in virtnet_probe()
6970 free_receive_page_frags(vi); in virtnet_probe()
6971 virtnet_del_vqs(vi); in virtnet_probe()
6977 static void remove_vq_common(struct virtnet_info *vi) in remove_vq_common() argument
6981 virtio_reset_device(vi->vdev); in remove_vq_common()
6984 free_unused_bufs(vi); in remove_vq_common()
6990 for (i = 0; i < vi->max_queue_pairs; i++) in remove_vq_common()
6991 netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i)); in remove_vq_common()
6993 free_receive_bufs(vi); in remove_vq_common()
6995 free_receive_page_frags(vi); in remove_vq_common()
6997 virtnet_del_vqs(vi); in remove_vq_common()
7002 struct virtnet_info *vi = vdev->priv; in virtnet_remove() local
7004 virtnet_cpu_notif_remove(vi); in virtnet_remove()
7007 flush_work(&vi->config_work); in virtnet_remove()
7008 disable_rx_mode_work(vi); in virtnet_remove()
7009 flush_work(&vi->rx_mode_work); in virtnet_remove()
7011 virtnet_free_irq_moder(vi); in virtnet_remove()
7013 unregister_netdev(vi->dev); in virtnet_remove()
7015 net_failover_destroy(vi->failover); in virtnet_remove()
7017 remove_vq_common(vi); in virtnet_remove()
7019 rss_indirection_table_free(&vi->rss); in virtnet_remove()
7021 free_netdev(vi->dev); in virtnet_remove()
7026 struct virtnet_info *vi = vdev->priv; in virtnet_freeze() local
7028 virtnet_cpu_notif_remove(vi); in virtnet_freeze()
7030 remove_vq_common(vi); in virtnet_freeze()
7037 struct virtnet_info *vi = vdev->priv; in virtnet_restore() local
7043 virtnet_set_queues(vi, vi->curr_queue_pairs); in virtnet_restore()
7045 err = virtnet_cpu_notif_add(vi); in virtnet_restore()
7048 remove_vq_common(vi); in virtnet_restore()