Lines Matching +full:dma +full:- +full:poll +full:- +full:cnt
1 // SPDX-License-Identifier: GPL-2.0-only
5 * virtio-net server in host kernel.
41 " 1 -Enable; 0 - Disable");
61 /* Lower device DMA failed */
63 /* Lower device DMA done */
65 /* Lower device DMA in progress */
113 /* last used idx for outstanding DMA zerocopy buffers */
115 /* For TX, first used idx for DMA done zerocopy buffers
135 struct vhost_poll poll[VHOST_NET_VQ_MAX]; member
152 if (rxq->tail != rxq->head) in vhost_net_buf_get_ptr()
153 return rxq->queue[rxq->head]; in vhost_net_buf_get_ptr()
160 return rxq->tail - rxq->head; in vhost_net_buf_get_size()
165 return rxq->tail == rxq->head; in vhost_net_buf_is_empty()
171 ++rxq->head; in vhost_net_buf_consume()
177 struct vhost_net_buf *rxq = &nvq->rxq; in vhost_net_buf_produce()
179 rxq->head = 0; in vhost_net_buf_produce()
180 rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue, in vhost_net_buf_produce()
182 return rxq->tail; in vhost_net_buf_produce()
187 struct vhost_net_buf *rxq = &nvq->rxq; in vhost_net_buf_unproduce()
189 if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) { in vhost_net_buf_unproduce()
190 ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head, in vhost_net_buf_unproduce()
193 rxq->head = rxq->tail = 0; in vhost_net_buf_unproduce()
202 return xdpf->len; in vhost_net_buf_peek_len()
210 struct vhost_net_buf *rxq = &nvq->rxq; in vhost_net_buf_peek()
224 rxq->head = rxq->tail = 0; in vhost_net_buf_init()
241 return ERR_PTR(-ENOMEM); in vhost_net_ubuf_alloc()
242 atomic_set(&ubufs->refcount, 1); in vhost_net_ubuf_alloc()
243 init_waitqueue_head(&ubufs->wait); in vhost_net_ubuf_alloc()
244 ubufs->vq = vq; in vhost_net_ubuf_alloc()
250 int r = atomic_sub_return(1, &ubufs->refcount); in vhost_net_ubuf_put()
252 wake_up(&ubufs->wait); in vhost_net_ubuf_put()
259 wait_event(ubufs->wait, !atomic_read(&ubufs->refcount)); in vhost_net_ubuf_put_and_wait()
273 kfree(n->vqs[i].ubuf_info); in vhost_net_clear_ubuf_info()
274 n->vqs[i].ubuf_info = NULL; in vhost_net_clear_ubuf_info()
287 n->vqs[i].ubuf_info = in vhost_net_set_ubuf_info()
289 sizeof(*n->vqs[i].ubuf_info), in vhost_net_set_ubuf_info()
291 if (!n->vqs[i].ubuf_info) in vhost_net_set_ubuf_info()
298 return -ENOMEM; in vhost_net_set_ubuf_info()
308 n->vqs[i].done_idx = 0; in vhost_net_vq_reset()
309 n->vqs[i].upend_idx = 0; in vhost_net_vq_reset()
310 n->vqs[i].ubufs = NULL; in vhost_net_vq_reset()
311 n->vqs[i].vhost_hlen = 0; in vhost_net_vq_reset()
312 n->vqs[i].sock_hlen = 0; in vhost_net_vq_reset()
313 vhost_net_buf_init(&n->vqs[i].rxq); in vhost_net_vq_reset()
320 ++net->tx_packets; in vhost_net_tx_packet()
321 if (net->tx_packets < 1024) in vhost_net_tx_packet()
323 net->tx_packets = 0; in vhost_net_tx_packet()
324 net->tx_zcopy_err = 0; in vhost_net_tx_packet()
329 ++net->tx_zcopy_err; in vhost_net_tx_err()
337 return !net->tx_flush && in vhost_net_tx_select_zcopy()
338 net->tx_packets / 64 >= net->tx_zcopy_err; in vhost_net_tx_select_zcopy()
344 sock_flag(sock->sk, SOCK_ZEROCOPY); in vhost_sock_zcopy()
349 return sock_flag(sock->sk, SOCK_XDP); in vhost_sock_xdp()
352 /* In case of DMA done not in order in lower device driver for some reason.
354 * of used idx. Once lower device DMA done contiguously, we will signal KVM
365 for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) { in vhost_zerocopy_signal_used()
366 if (vq->heads[i].len == VHOST_DMA_FAILED_LEN) in vhost_zerocopy_signal_used()
368 if (VHOST_DMA_IS_DONE(vq->heads[i].len)) { in vhost_zerocopy_signal_used()
369 vq->heads[i].len = VHOST_DMA_CLEAR_LEN; in vhost_zerocopy_signal_used()
375 add = min(UIO_MAXIOV - nvq->done_idx, j); in vhost_zerocopy_signal_used()
376 vhost_add_used_and_signal_n(vq->dev, vq, in vhost_zerocopy_signal_used()
377 &vq->heads[nvq->done_idx], add); in vhost_zerocopy_signal_used()
378 nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV; in vhost_zerocopy_signal_used()
379 j -= add; in vhost_zerocopy_signal_used()
387 struct vhost_net_ubuf_ref *ubufs = ubuf->ctx; in vhost_zerocopy_complete()
388 struct vhost_virtqueue *vq = ubufs->vq; in vhost_zerocopy_complete()
389 int cnt; in vhost_zerocopy_complete() local
393 /* set len to mark this desc buffers done DMA */ in vhost_zerocopy_complete()
394 vq->heads[ubuf->desc].len = success ? in vhost_zerocopy_complete()
396 cnt = vhost_net_ubuf_put(ubufs); in vhost_zerocopy_complete()
405 if (cnt <= 1 || !(cnt % 16)) in vhost_zerocopy_complete()
406 vhost_poll_queue(&vq->poll); in vhost_zerocopy_complete()
431 struct vhost_poll *poll = n->poll + (nvq - n->vqs); in vhost_net_disable_vq() local
434 vhost_poll_stop(poll); in vhost_net_disable_vq()
442 struct vhost_poll *poll = n->poll + (nvq - n->vqs); in vhost_net_enable_vq() local
449 return vhost_poll_start(poll, sock->file); in vhost_net_enable_vq()
454 struct vhost_virtqueue *vq = &nvq->vq; in vhost_net_signal_used()
455 struct vhost_dev *dev = vq->dev; in vhost_net_signal_used()
457 if (!nvq->done_idx) in vhost_net_signal_used()
460 vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx); in vhost_net_signal_used()
461 nvq->done_idx = 0; in vhost_net_signal_used()
471 .num = nvq->batched_xdp, in vhost_tx_batch()
472 .ptr = nvq->xdp, in vhost_tx_batch()
476 if (nvq->batched_xdp == 0) in vhost_tx_batch()
479 msghdr->msg_control = &ctl; in vhost_tx_batch()
480 msghdr->msg_controllen = sizeof(ctl); in vhost_tx_batch()
481 err = sock->ops->sendmsg(sock, msghdr, 0); in vhost_tx_batch()
483 vq_err(&nvq->vq, "Fail to batch sending packets\n"); in vhost_tx_batch()
489 for (i = 0; i < nvq->batched_xdp; ++i) in vhost_tx_batch()
490 put_page(virt_to_head_page(nvq->xdp[i].data)); in vhost_tx_batch()
491 nvq->batched_xdp = 0; in vhost_tx_batch()
492 nvq->done_idx = 0; in vhost_tx_batch()
498 nvq->batched_xdp = 0; in vhost_tx_batch()
506 if (sock->ops->peek_len) in sock_has_rx_data()
507 return sock->ops->peek_len(sock); in sock_has_rx_data()
509 return skb_queue_empty(&sock->sk->sk_receive_queue); in sock_has_rx_data()
515 if (!vhost_vq_avail_empty(&net->dev, vq)) { in vhost_net_busy_poll_try_queue()
516 vhost_poll_queue(&vq->poll); in vhost_net_busy_poll_try_queue()
517 } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { in vhost_net_busy_poll_try_queue()
518 vhost_disable_notify(&net->dev, vq); in vhost_net_busy_poll_try_queue()
519 vhost_poll_queue(&vq->poll); in vhost_net_busy_poll_try_queue()
538 if (!mutex_trylock(&vq->mutex)) in vhost_net_busy_poll()
541 vhost_disable_notify(&net->dev, vq); in vhost_net_busy_poll()
544 busyloop_timeout = poll_rx ? rvq->busyloop_timeout: in vhost_net_busy_poll()
545 tvq->busyloop_timeout; in vhost_net_busy_poll()
557 !vhost_vq_avail_empty(&net->dev, rvq)) || in vhost_net_busy_poll()
558 !vhost_vq_avail_empty(&net->dev, tvq)) in vhost_net_busy_poll()
569 vhost_enable_notify(&net->dev, rvq); in vhost_net_busy_poll()
571 mutex_unlock(&vq->mutex); in vhost_net_busy_poll()
579 struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX]; in vhost_net_tx_get_vq_desc()
580 struct vhost_virtqueue *rvq = &rnvq->vq; in vhost_net_tx_get_vq_desc()
581 struct vhost_virtqueue *tvq = &tnvq->vq; in vhost_net_tx_get_vq_desc()
583 int r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov), in vhost_net_tx_get_vq_desc()
586 if (r == tvq->num && tvq->busyloop_timeout) { in vhost_net_tx_get_vq_desc()
595 r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov), in vhost_net_tx_get_vq_desc()
604 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; in vhost_exceeds_maxpend()
605 struct vhost_virtqueue *vq = &nvq->vq; in vhost_exceeds_maxpend()
607 return (nvq->upend_idx + UIO_MAXIOV - nvq->done_idx) % UIO_MAXIOV > in vhost_exceeds_maxpend()
608 min_t(unsigned int, VHOST_MAX_PEND, vq->num >> 2); in vhost_exceeds_maxpend()
615 size_t len = iov_length(vq->iov, out); in init_iov_iter()
617 iov_iter_init(iter, ITER_SOURCE, vq->iov, out, len); in init_iov_iter()
629 struct vhost_virtqueue *vq = &nvq->vq; in get_tx_bufs()
634 if (ret < 0 || ret == vq->num) in get_tx_bufs()
640 return -EFAULT; in get_tx_bufs()
644 *len = init_iov_iter(vq, &msg->msg_iter, nvq->vhost_hlen, *out); in get_tx_bufs()
647 *len, nvq->vhost_hlen); in get_tx_bufs()
648 return -EFAULT; in get_tx_bufs()
657 !vhost_vq_avail_empty(vq->dev, vq); in tx_can_batch()
665 struct vhost_virtqueue *vq = &nvq->vq; in vhost_net_build_xdp()
666 struct vhost_net *net = container_of(vq->dev, struct vhost_net, in vhost_net_build_xdp()
670 struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp]; in vhost_net_build_xdp()
675 int pad = SKB_DATA_ALIGN(VHOST_NET_RX_PAD + headroom + nvq->sock_hlen); in vhost_net_build_xdp()
676 int sock_hlen = nvq->sock_hlen; in vhost_net_build_xdp()
681 if (unlikely(len < nvq->sock_hlen)) in vhost_net_build_xdp()
682 return -EFAULT; in vhost_net_build_xdp()
686 return -ENOSPC; in vhost_net_build_xdp()
689 buf = page_frag_alloc_align(&net->pf_cache, buflen, GFP_KERNEL, in vhost_net_build_xdp()
692 return -ENOMEM; in vhost_net_build_xdp()
697 ret = -EFAULT; in vhost_net_build_xdp()
702 gso = &hdr->gso; in vhost_net_build_xdp()
707 if ((gso->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && in vhost_net_build_xdp()
708 vhost16_to_cpu(vq, gso->csum_start) + in vhost_net_build_xdp()
709 vhost16_to_cpu(vq, gso->csum_offset) + 2 > in vhost_net_build_xdp()
710 vhost16_to_cpu(vq, gso->hdr_len)) { in vhost_net_build_xdp()
711 gso->hdr_len = cpu_to_vhost16(vq, in vhost_net_build_xdp()
712 vhost16_to_cpu(vq, gso->csum_start) + in vhost_net_build_xdp()
713 vhost16_to_cpu(vq, gso->csum_offset) + 2); in vhost_net_build_xdp()
715 if (vhost16_to_cpu(vq, gso->hdr_len) > len) { in vhost_net_build_xdp()
716 ret = -EINVAL; in vhost_net_build_xdp()
721 len -= sock_hlen; in vhost_net_build_xdp()
724 ret = -EFAULT; in vhost_net_build_xdp()
730 hdr->buflen = buflen; in vhost_net_build_xdp()
732 ++nvq->batched_xdp; in vhost_net_build_xdp()
743 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; in handle_tx_copy()
744 struct vhost_virtqueue *vq = &nvq->vq; in handle_tx_copy()
757 bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX); in handle_tx_copy()
762 if (nvq->done_idx == VHOST_NET_BATCH) in handle_tx_copy()
771 if (head == vq->num) { in handle_tx_copy()
773 vhost_poll_queue(&vq->poll); in handle_tx_copy()
774 } else if (unlikely(vhost_enable_notify(&net->dev, in handle_tx_copy()
776 vhost_disable_notify(&net->dev, vq); in handle_tx_copy()
791 } else if (unlikely(err != -ENOSPC)) { in handle_tx_copy()
811 err = sock->ops->sendmsg(sock, &msg, len); in handle_tx_copy()
813 if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) { in handle_tx_copy()
823 vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head); in handle_tx_copy()
824 vq->heads[nvq->done_idx].len = 0; in handle_tx_copy()
825 ++nvq->done_idx; in handle_tx_copy()
833 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; in handle_tx_zerocopy()
834 struct vhost_virtqueue *vq = &nvq->vq; in handle_tx_zerocopy()
865 if (head == vq->num) { in handle_tx_zerocopy()
867 vhost_poll_queue(&vq->poll); in handle_tx_zerocopy()
868 } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { in handle_tx_zerocopy()
869 vhost_disable_notify(&net->dev, vq); in handle_tx_zerocopy()
881 ubuf = nvq->ubuf_info + nvq->upend_idx; in handle_tx_zerocopy()
882 vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head); in handle_tx_zerocopy()
883 vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS; in handle_tx_zerocopy()
884 ubuf->ctx = nvq->ubufs; in handle_tx_zerocopy()
885 ubuf->desc = nvq->upend_idx; in handle_tx_zerocopy()
886 ubuf->ubuf.ops = &vhost_ubuf_ops; in handle_tx_zerocopy()
887 ubuf->ubuf.flags = SKBFL_ZEROCOPY_FRAG; in handle_tx_zerocopy()
888 refcount_set(&ubuf->ubuf.refcnt, 1); in handle_tx_zerocopy()
891 ctl.ptr = &ubuf->ubuf; in handle_tx_zerocopy()
893 ubufs = nvq->ubufs; in handle_tx_zerocopy()
894 atomic_inc(&ubufs->refcount); in handle_tx_zerocopy()
895 nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; in handle_tx_zerocopy()
908 err = sock->ops->sendmsg(sock, &msg, len); in handle_tx_zerocopy()
910 bool retry = err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS; in handle_tx_zerocopy()
913 if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS) in handle_tx_zerocopy()
916 nvq->upend_idx = ((unsigned)nvq->upend_idx - 1) in handle_tx_zerocopy()
919 vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN; in handle_tx_zerocopy()
931 vhost_add_used_and_signal(&net->dev, vq, head, 0); in handle_tx_zerocopy()
938 /* Expects to be always run from workqueue - which acts as
939 * read-size critical section for our kind of RCU. */
942 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; in handle_tx()
943 struct vhost_virtqueue *vq = &nvq->vq; in handle_tx()
946 mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX); in handle_tx()
954 vhost_disable_notify(&net->dev, vq); in handle_tx()
963 mutex_unlock(&vq->mutex); in handle_tx()
972 if (rvq->rx_ring) in peek_head_len()
975 spin_lock_irqsave(&sk->sk_receive_queue.lock, flags); in peek_head_len()
976 head = skb_peek(&sk->sk_receive_queue); in peek_head_len()
978 len = head->len; in peek_head_len()
983 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags); in peek_head_len()
990 struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX]; in vhost_net_rx_peek_head_len()
991 struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX]; in vhost_net_rx_peek_head_len()
992 struct vhost_virtqueue *rvq = &rnvq->vq; in vhost_net_rx_peek_head_len()
993 struct vhost_virtqueue *tvq = &tnvq->vq; in vhost_net_rx_peek_head_len()
996 if (!len && rvq->busyloop_timeout) { in vhost_net_rx_peek_head_len()
1008 /* This is a multi-buffer version of vhost_get_desc, that works if
1010 * @vq - the relevant virtqueue
1011 * @datalen - data length we'll be reading
1012 * @iovcount - returned count of io vectors we fill
1013 * @log - vhost log
1014 * @log_num - log offset
1015 * @quota - headcount quota, 1 for big buffer
1038 r = -ENOBUFS; in get_rx_bufs()
1041 r = vhost_get_vq_desc(vq, vq->iov + seg, in get_rx_bufs()
1042 ARRAY_SIZE(vq->iov) - seg, &out, in get_rx_bufs()
1048 if (d == vq->num) { in get_rx_bufs()
1055 r = -EINVAL; in get_rx_bufs()
1063 len = iov_length(vq->iov + seg, in); in get_rx_bufs()
1065 datalen -= len; in get_rx_bufs()
1069 heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen); in get_rx_bufs()
1085 /* Expects to be always run from workqueue - which acts as
1086 * read-size critical section for our kind of RCU. */
1089 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX]; in handle_rx()
1090 struct vhost_virtqueue *vq = &nvq->vq; in handle_rx()
1116 mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX); in handle_rx()
1124 vhost_disable_notify(&net->dev, vq); in handle_rx()
1127 vhost_hlen = nvq->vhost_hlen; in handle_rx()
1128 sock_hlen = nvq->sock_hlen; in handle_rx()
1131 vq->log : NULL; in handle_rx()
1137 sock_len = vhost_net_rx_peek_head_len(net, sock->sk, in handle_rx()
1143 headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx, in handle_rx()
1152 vhost_poll_queue(&vq->poll); in handle_rx()
1153 } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { in handle_rx()
1156 vhost_disable_notify(&net->dev, vq); in handle_rx()
1164 if (nvq->rx_ring) in handle_rx()
1165 msg.msg_control = vhost_net_buf_consume(&nvq->rxq); in handle_rx()
1168 iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, 1, 1); in handle_rx()
1169 err = sock->ops->recvmsg(sock, &msg, in handle_rx()
1175 iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, in, vhost_len); in handle_rx()
1183 err = sock->ops->recvmsg(sock, &msg, in handle_rx()
1199 "at addr %p\n", vq->iov->iov_base); in handle_rx()
1204 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF in handle_rx()
1218 nvq->done_idx += headcount; in handle_rx()
1219 if (nvq->done_idx > VHOST_NET_BATCH) in handle_rx()
1223 vq->iov, in); in handle_rx()
1228 vhost_poll_queue(&vq->poll); in handle_rx()
1233 mutex_unlock(&vq->mutex); in handle_rx()
1239 poll.work); in handle_tx_kick()
1240 struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); in handle_tx_kick()
1248 poll.work); in handle_rx_kick()
1249 struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); in handle_rx_kick()
1257 poll[VHOST_NET_VQ_TX].work); in handle_tx_net()
1264 poll[VHOST_NET_VQ_RX].work); in handle_rx_net()
1279 return -ENOMEM; in vhost_net_open()
1283 return -ENOMEM; in vhost_net_open()
1291 return -ENOMEM; in vhost_net_open()
1293 n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue; in vhost_net_open()
1300 return -ENOMEM; in vhost_net_open()
1302 n->vqs[VHOST_NET_VQ_TX].xdp = xdp; in vhost_net_open()
1304 dev = &n->dev; in vhost_net_open()
1305 vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq; in vhost_net_open()
1306 vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq; in vhost_net_open()
1307 n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick; in vhost_net_open()
1308 n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick; in vhost_net_open()
1310 n->vqs[i].ubufs = NULL; in vhost_net_open()
1311 n->vqs[i].ubuf_info = NULL; in vhost_net_open()
1312 n->vqs[i].upend_idx = 0; in vhost_net_open()
1313 n->vqs[i].done_idx = 0; in vhost_net_open()
1314 n->vqs[i].batched_xdp = 0; in vhost_net_open()
1315 n->vqs[i].vhost_hlen = 0; in vhost_net_open()
1316 n->vqs[i].sock_hlen = 0; in vhost_net_open()
1317 n->vqs[i].rx_ring = NULL; in vhost_net_open()
1318 vhost_net_buf_init(&n->vqs[i].rxq); in vhost_net_open()
1325 vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev, in vhost_net_open()
1327 vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev, in vhost_net_open()
1330 f->private_data = n; in vhost_net_open()
1331 page_frag_cache_init(&n->pf_cache); in vhost_net_open()
1343 mutex_lock(&vq->mutex); in vhost_net_stop_vq()
1348 nvq->rx_ring = NULL; in vhost_net_stop_vq()
1349 mutex_unlock(&vq->mutex); in vhost_net_stop_vq()
1356 *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq); in vhost_net_stop()
1357 *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq); in vhost_net_stop()
1362 vhost_dev_flush(&n->dev); in vhost_net_flush()
1363 if (n->vqs[VHOST_NET_VQ_TX].ubufs) { in vhost_net_flush()
1364 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
1365 n->tx_flush = true; in vhost_net_flush()
1366 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
1368 vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); in vhost_net_flush()
1369 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
1370 n->tx_flush = false; in vhost_net_flush()
1371 atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1); in vhost_net_flush()
1372 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
1378 struct vhost_net *n = f->private_data; in vhost_net_release()
1384 vhost_dev_stop(&n->dev); in vhost_net_release()
1385 vhost_dev_cleanup(&n->dev); in vhost_net_release()
1394 * since jobs can re-queue themselves. */ in vhost_net_release()
1396 kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue); in vhost_net_release()
1397 kfree(n->vqs[VHOST_NET_VQ_TX].xdp); in vhost_net_release()
1398 kfree(n->dev.vqs); in vhost_net_release()
1399 page_frag_cache_drain(&n->pf_cache); in vhost_net_release()
1410 return ERR_PTR(-ENOTSOCK); in get_raw_socket()
1413 if (sock->sk->sk_type != SOCK_RAW) { in get_raw_socket()
1414 r = -ESOCKTNOSUPPORT; in get_raw_socket()
1418 if (sock->sk->sk_family != AF_PACKET) { in get_raw_socket()
1419 r = -EPFNOSUPPORT; in get_raw_socket()
1448 return ERR_PTR(-EBADF); in get_tap_socket()
1463 if (fd == -1) in get_socket()
1471 return ERR_PTR(-ENOTSOCK); in get_socket()
1482 mutex_lock(&n->dev.mutex); in vhost_net_set_backend()
1483 r = vhost_dev_check_owner(&n->dev); in vhost_net_set_backend()
1488 r = -ENOBUFS; in vhost_net_set_backend()
1491 vq = &n->vqs[index].vq; in vhost_net_set_backend()
1492 nvq = &n->vqs[index]; in vhost_net_set_backend()
1493 mutex_lock(&vq->mutex); in vhost_net_set_backend()
1495 if (fd == -1) in vhost_net_set_backend()
1496 vhost_clear_msg(&n->dev); in vhost_net_set_backend()
1500 r = -EFAULT; in vhost_net_set_backend()
1530 nvq->rx_ring = get_tap_ptr_ring(sock->file); in vhost_net_set_backend()
1532 nvq->rx_ring = NULL; in vhost_net_set_backend()
1535 oldubufs = nvq->ubufs; in vhost_net_set_backend()
1536 nvq->ubufs = ubufs; in vhost_net_set_backend()
1538 n->tx_packets = 0; in vhost_net_set_backend()
1539 n->tx_zcopy_err = 0; in vhost_net_set_backend()
1540 n->tx_flush = false; in vhost_net_set_backend()
1543 mutex_unlock(&vq->mutex); in vhost_net_set_backend()
1547 mutex_lock(&vq->mutex); in vhost_net_set_backend()
1549 mutex_unlock(&vq->mutex); in vhost_net_set_backend()
1553 vhost_dev_flush(&n->dev); in vhost_net_set_backend()
1557 mutex_unlock(&n->dev.mutex); in vhost_net_set_backend()
1569 mutex_unlock(&vq->mutex); in vhost_net_set_backend()
1571 mutex_unlock(&n->dev.mutex); in vhost_net_set_backend()
1582 mutex_lock(&n->dev.mutex); in vhost_net_reset_owner()
1583 err = vhost_dev_check_owner(&n->dev); in vhost_net_reset_owner()
1588 err = -ENOMEM; in vhost_net_reset_owner()
1593 vhost_dev_stop(&n->dev); in vhost_net_reset_owner()
1594 vhost_dev_reset_owner(&n->dev, umem); in vhost_net_reset_owner()
1597 mutex_unlock(&n->dev.mutex); in vhost_net_reset_owner()
1623 mutex_lock(&n->dev.mutex); in vhost_net_set_features()
1625 !vhost_log_access_ok(&n->dev)) in vhost_net_set_features()
1629 if (vhost_init_device_iotlb(&n->dev)) in vhost_net_set_features()
1634 mutex_lock(&n->vqs[i].vq.mutex); in vhost_net_set_features()
1635 n->vqs[i].vq.acked_features = features; in vhost_net_set_features()
1636 n->vqs[i].vhost_hlen = vhost_hlen; in vhost_net_set_features()
1637 n->vqs[i].sock_hlen = sock_hlen; in vhost_net_set_features()
1638 mutex_unlock(&n->vqs[i].vq.mutex); in vhost_net_set_features()
1640 mutex_unlock(&n->dev.mutex); in vhost_net_set_features()
1644 mutex_unlock(&n->dev.mutex); in vhost_net_set_features()
1645 return -EFAULT; in vhost_net_set_features()
1652 mutex_lock(&n->dev.mutex); in vhost_net_set_owner()
1653 if (vhost_dev_has_owner(&n->dev)) { in vhost_net_set_owner()
1654 r = -EBUSY; in vhost_net_set_owner()
1660 r = vhost_dev_set_owner(&n->dev); in vhost_net_set_owner()
1665 mutex_unlock(&n->dev.mutex); in vhost_net_set_owner()
1672 struct vhost_net *n = f->private_data; in vhost_net_ioctl()
1682 return -EFAULT; in vhost_net_ioctl()
1687 return -EFAULT; in vhost_net_ioctl()
1691 return -EFAULT; in vhost_net_ioctl()
1693 return -EOPNOTSUPP; in vhost_net_ioctl()
1698 return -EFAULT; in vhost_net_ioctl()
1702 return -EFAULT; in vhost_net_ioctl()
1704 return -EOPNOTSUPP; in vhost_net_ioctl()
1705 vhost_set_backend_features(&n->dev, features); in vhost_net_ioctl()
1712 mutex_lock(&n->dev.mutex); in vhost_net_ioctl()
1713 r = vhost_dev_ioctl(&n->dev, ioctl, argp); in vhost_net_ioctl()
1714 if (r == -ENOIOCTLCMD) in vhost_net_ioctl()
1715 r = vhost_vring_ioctl(&n->dev, ioctl, argp); in vhost_net_ioctl()
1718 mutex_unlock(&n->dev.mutex); in vhost_net_ioctl()
1725 struct file *file = iocb->ki_filp; in vhost_net_chr_read_iter()
1726 struct vhost_net *n = file->private_data; in vhost_net_chr_read_iter()
1727 struct vhost_dev *dev = &n->dev; in vhost_net_chr_read_iter()
1728 int noblock = file->f_flags & O_NONBLOCK; in vhost_net_chr_read_iter()
1736 struct file *file = iocb->ki_filp; in vhost_net_chr_write_iter()
1737 struct vhost_net *n = file->private_data; in vhost_net_chr_write_iter()
1738 struct vhost_dev *dev = &n->dev; in vhost_net_chr_write_iter()
1745 struct vhost_net *n = file->private_data; in vhost_net_chr_poll()
1746 struct vhost_dev *dev = &n->dev; in vhost_net_chr_poll()
1756 .poll = vhost_net_chr_poll,
1765 .name = "vhost-net",
1788 MODULE_ALIAS("devname:vhost-net");