Lines Matching +full:iommu +full:- +full:ctx

1 // SPDX-License-Identifier: GPL-2.0-or-later
13 #include <linux/dma-mapping.h>
22 dev_err(&(_vq)->vq.vdev->dev, \
23 "%s:"fmt, (_vq)->vq.name, ##args); \
29 if ((_vq)->in_use) \
31 (_vq)->vq.name, (_vq)->in_use); \
32 (_vq)->in_use = __LINE__; \
35 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
41 if ((_vq)->last_add_time_valid) \
43 (_vq)->last_add_time)) > 100); \
44 (_vq)->last_add_time = now; \
45 (_vq)->last_add_time_valid = true; \
49 if ((_vq)->last_add_time_valid) { \
51 (_vq)->last_add_time)) > 100); \
55 ((_vq)->last_add_time_valid = false)
59 dev_err(&_vq->vq.vdev->dev, \
60 "%s:"fmt, (_vq)->vq.name, ##args); \
61 (_vq)->broken = true; \
101 /* Last written value to avail->flags */
105 * Last written value to avail->idx in
110 /* Per-descriptor state. */
145 * Last written value to driver->flags in
150 /* Per-descriptor state. */
242 return (vq->indirect && total_sg > 1 && vq->vq.num_free); in virtqueue_use_indirect()
247 * quirk and bypass the IOMMU. If not there, just use the DMA API.
255 * that speaks virtio behind a physical IOMMU, we must use the DMA API
258 * On other systems, including SPARC and PPC64, virtio-pci devices are
259 * enumerated as though they are behind an IOMMU, but the virtio host
260 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
266 * TODO: install a per-device DMA ops structure that does the right thing
278 * In theory, it's possible to have a buggy QEMU-supposed in vring_use_dma_api()
279 * emulated Q35 IOMMU and Xen enabled at the same time. On in vring_use_dma_api()
294 return vring->use_dma_api && (extra->addr != DMA_MAPPING_ERROR); in vring_need_unmap_buffer()
302 max_segment_size = dma_max_mapping_size(vdev->dev.parent); in virtio_max_dma_size()
325 * have 64-bit phys_addr_t but 32-bit dma_addr_t in vring_alloc_queue()
326 * are certain non-highmem MIPS and x86 in vring_alloc_queue()
359 return vq->dma_dev; in vring_dma_dev()
373 *len = sg->length; in vring_map_one_sg()
375 if (!vq->use_dma_api) { in vring_map_one_sg()
381 kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, direction); in vring_map_one_sg()
392 sg_page(sg), sg->offset, sg->length, in vring_map_one_sg()
396 return -ENOMEM; in vring_map_one_sg()
405 if (!vq->use_dma_api) in vring_map_single()
415 if (!vq->use_dma_api) in vring_mapping_error()
423 vq->vq.num_free = num; in virtqueue_init()
425 if (vq->packed_ring) in virtqueue_init()
426 vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR); in virtqueue_init()
428 vq->last_used_idx = 0; in virtqueue_init()
430 vq->event_triggered = false; in virtqueue_init()
431 vq->num_added = 0; in virtqueue_init()
434 vq->in_use = false; in virtqueue_init()
435 vq->last_add_time_valid = false; in virtqueue_init()
441 * Split ring specific functions - *_split().
449 flags = extra->flags; in vring_unmap_one_split()
452 if (!vq->use_dma_api) in vring_unmap_one_split()
456 extra->addr, in vring_unmap_one_split()
457 extra->len, in vring_unmap_one_split()
465 extra->addr, in vring_unmap_one_split()
466 extra->len, in vring_unmap_one_split()
472 return extra->next; in vring_unmap_one_split()
514 desc[i].flags = cpu_to_virtio16(vq->vdev, flags); in virtqueue_add_desc_split()
515 desc[i].addr = cpu_to_virtio64(vq->vdev, addr); in virtqueue_add_desc_split()
516 desc[i].len = cpu_to_virtio32(vq->vdev, len); in virtqueue_add_desc_split()
524 desc[i].next = cpu_to_virtio16(vq->vdev, next); in virtqueue_add_desc_split()
535 void *ctx, in virtqueue_add_split() argument
550 BUG_ON(ctx && vq->indirect); in virtqueue_add_split()
552 if (unlikely(vq->broken)) { in virtqueue_add_split()
554 return -EIO; in virtqueue_add_split()
561 head = vq->free_head; in virtqueue_add_split()
567 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect); in virtqueue_add_split()
579 desc = vq->split.vring.desc; in virtqueue_add_split()
580 extra = vq->split.desc_extra; in virtqueue_add_split()
585 if (unlikely(vq->vq.num_free < descs_used)) { in virtqueue_add_split()
586 pr_debug("Can't add buf len %i - avail = %i\n", in virtqueue_add_split()
587 descs_used, vq->vq.num_free); in virtqueue_add_split()
592 vq->notify(&vq->vq); in virtqueue_add_split()
596 return -ENOSPC; in virtqueue_add_split()
635 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); in virtqueue_add_split()
637 vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &= in virtqueue_add_split()
648 virtqueue_add_desc_split(_vq, vq->split.vring.desc, in virtqueue_add_split()
649 vq->split.desc_extra, in virtqueue_add_split()
656 vq->vq.num_free -= descs_used; in virtqueue_add_split()
660 vq->free_head = vq->split.desc_extra[head].next; in virtqueue_add_split()
662 vq->free_head = i; in virtqueue_add_split()
665 vq->split.desc_state[head].data = data; in virtqueue_add_split()
667 vq->split.desc_state[head].indir_desc = desc; in virtqueue_add_split()
669 vq->split.desc_state[head].indir_desc = ctx; in virtqueue_add_split()
671 /* Put entry in available array (but don't update avail->idx until they in virtqueue_add_split()
673 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1); in virtqueue_add_split()
674 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); in virtqueue_add_split()
678 virtio_wmb(vq->weak_barriers); in virtqueue_add_split()
679 vq->split.avail_idx_shadow++; in virtqueue_add_split()
680 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, in virtqueue_add_split()
681 vq->split.avail_idx_shadow); in virtqueue_add_split()
682 vq->num_added++; in virtqueue_add_split()
689 if (unlikely(vq->num_added == (1 << 16) - 1)) in virtqueue_add_split()
713 return -ENOMEM; in virtqueue_add_split()
725 virtio_mb(vq->weak_barriers); in virtqueue_kick_prepare_split()
727 old = vq->split.avail_idx_shadow - vq->num_added; in virtqueue_kick_prepare_split()
728 new = vq->split.avail_idx_shadow; in virtqueue_kick_prepare_split()
729 vq->num_added = 0; in virtqueue_kick_prepare_split()
734 if (vq->event) { in virtqueue_kick_prepare_split()
735 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, in virtqueue_kick_prepare_split()
736 vring_avail_event(&vq->split.vring)), in virtqueue_kick_prepare_split()
739 needs_kick = !(vq->split.vring.used->flags & in virtqueue_kick_prepare_split()
740 cpu_to_virtio16(_vq->vdev, in virtqueue_kick_prepare_split()
748 void **ctx) in detach_buf_split() argument
752 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); in detach_buf_split()
755 vq->split.desc_state[head].data = NULL; in detach_buf_split()
757 extra = vq->split.desc_extra; in detach_buf_split()
759 /* Put back on free list: unmap first-level descriptors and find end */ in detach_buf_split()
762 while (vq->split.vring.desc[i].flags & nextflag) { in detach_buf_split()
764 i = vq->split.desc_extra[i].next; in detach_buf_split()
765 vq->vq.num_free++; in detach_buf_split()
769 vq->split.desc_extra[i].next = vq->free_head; in detach_buf_split()
770 vq->free_head = head; in detach_buf_split()
773 vq->vq.num_free++; in detach_buf_split()
775 if (vq->indirect) { in detach_buf_split()
777 vq->split.desc_state[head].indir_desc; in detach_buf_split()
783 len = vq->split.desc_extra[head].len; in detach_buf_split()
785 BUG_ON(!(vq->split.desc_extra[head].flags & in detach_buf_split()
793 if (vq->use_dma_api) { in detach_buf_split()
799 vq->split.desc_state[head].indir_desc = NULL; in detach_buf_split()
800 } else if (ctx) { in detach_buf_split()
801 *ctx = vq->split.desc_state[head].indir_desc; in detach_buf_split()
807 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, in more_used_split()
808 vq->split.vring.used->idx); in more_used_split()
813 void **ctx) in virtqueue_get_buf_ctx_split() argument
822 if (unlikely(vq->broken)) { in virtqueue_get_buf_ctx_split()
834 virtio_rmb(vq->weak_barriers); in virtqueue_get_buf_ctx_split()
836 last_used = (vq->last_used_idx & (vq->split.vring.num - 1)); in virtqueue_get_buf_ctx_split()
837 i = virtio32_to_cpu(_vq->vdev, in virtqueue_get_buf_ctx_split()
838 vq->split.vring.used->ring[last_used].id); in virtqueue_get_buf_ctx_split()
839 *len = virtio32_to_cpu(_vq->vdev, in virtqueue_get_buf_ctx_split()
840 vq->split.vring.used->ring[last_used].len); in virtqueue_get_buf_ctx_split()
842 if (unlikely(i >= vq->split.vring.num)) { in virtqueue_get_buf_ctx_split()
846 if (unlikely(!vq->split.desc_state[i].data)) { in virtqueue_get_buf_ctx_split()
852 ret = vq->split.desc_state[i].data; in virtqueue_get_buf_ctx_split()
853 detach_buf_split(vq, i, ctx); in virtqueue_get_buf_ctx_split()
854 vq->last_used_idx++; in virtqueue_get_buf_ctx_split()
858 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) in virtqueue_get_buf_ctx_split()
859 virtio_store_mb(vq->weak_barriers, in virtqueue_get_buf_ctx_split()
860 &vring_used_event(&vq->split.vring), in virtqueue_get_buf_ctx_split()
861 cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); in virtqueue_get_buf_ctx_split()
873 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { in virtqueue_disable_cb_split()
874 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_disable_cb_split()
880 if (vq->event_triggered) in virtqueue_disable_cb_split()
883 if (vq->event) in virtqueue_disable_cb_split()
885 vring_used_event(&vq->split.vring) = 0x0; in virtqueue_disable_cb_split()
887 vq->split.vring.avail->flags = in virtqueue_disable_cb_split()
888 cpu_to_virtio16(_vq->vdev, in virtqueue_disable_cb_split()
889 vq->split.avail_flags_shadow); in virtqueue_disable_cb_split()
905 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { in virtqueue_enable_cb_prepare_split()
906 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_enable_cb_prepare_split()
907 if (!vq->event) in virtqueue_enable_cb_prepare_split()
908 vq->split.vring.avail->flags = in virtqueue_enable_cb_prepare_split()
909 cpu_to_virtio16(_vq->vdev, in virtqueue_enable_cb_prepare_split()
910 vq->split.avail_flags_shadow); in virtqueue_enable_cb_prepare_split()
912 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev, in virtqueue_enable_cb_prepare_split()
913 last_used_idx = vq->last_used_idx); in virtqueue_enable_cb_prepare_split()
922 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, in virtqueue_poll_split()
923 vq->split.vring.used->idx); in virtqueue_poll_split()
938 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { in virtqueue_enable_cb_delayed_split()
939 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_enable_cb_delayed_split()
940 if (!vq->event) in virtqueue_enable_cb_delayed_split()
941 vq->split.vring.avail->flags = in virtqueue_enable_cb_delayed_split()
942 cpu_to_virtio16(_vq->vdev, in virtqueue_enable_cb_delayed_split()
943 vq->split.avail_flags_shadow); in virtqueue_enable_cb_delayed_split()
946 bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4; in virtqueue_enable_cb_delayed_split()
948 virtio_store_mb(vq->weak_barriers, in virtqueue_enable_cb_delayed_split()
949 &vring_used_event(&vq->split.vring), in virtqueue_enable_cb_delayed_split()
950 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); in virtqueue_enable_cb_delayed_split()
952 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx) in virtqueue_enable_cb_delayed_split()
953 - vq->last_used_idx) > bufs)) { in virtqueue_enable_cb_delayed_split()
970 for (i = 0; i < vq->split.vring.num; i++) { in virtqueue_detach_unused_buf_split()
971 if (!vq->split.desc_state[i].data) in virtqueue_detach_unused_buf_split()
974 buf = vq->split.desc_state[i].data; in virtqueue_detach_unused_buf_split()
976 vq->split.avail_idx_shadow--; in virtqueue_detach_unused_buf_split()
977 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, in virtqueue_detach_unused_buf_split()
978 vq->split.avail_idx_shadow); in virtqueue_detach_unused_buf_split()
983 BUG_ON(vq->vq.num_free != vq->split.vring.num); in virtqueue_detach_unused_buf_split()
994 vdev = vq->vq.vdev; in virtqueue_vring_init_split()
996 vring_split->avail_flags_shadow = 0; in virtqueue_vring_init_split()
997 vring_split->avail_idx_shadow = 0; in virtqueue_vring_init_split()
1000 if (!vq->vq.callback) { in virtqueue_vring_init_split()
1001 vring_split->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_vring_init_split()
1002 if (!vq->event) in virtqueue_vring_init_split()
1003 vring_split->vring.avail->flags = cpu_to_virtio16(vdev, in virtqueue_vring_init_split()
1004 vring_split->avail_flags_shadow); in virtqueue_vring_init_split()
1012 num = vq->split.vring.num; in virtqueue_reinit_split()
1014 vq->split.vring.avail->flags = 0; in virtqueue_reinit_split()
1015 vq->split.vring.avail->idx = 0; in virtqueue_reinit_split()
1018 vq->split.vring.avail->ring[num] = 0; in virtqueue_reinit_split()
1020 vq->split.vring.used->flags = 0; in virtqueue_reinit_split()
1021 vq->split.vring.used->idx = 0; in virtqueue_reinit_split()
1024 *(__virtio16 *)&(vq->split.vring.used->ring[num]) = 0; in virtqueue_reinit_split()
1028 virtqueue_vring_init_split(&vq->split, vq); in virtqueue_reinit_split()
1034 vq->split = *vring_split; in virtqueue_vring_attach_split()
1037 vq->free_head = 0; in virtqueue_vring_attach_split()
1044 u32 num = vring_split->vring.num; in vring_alloc_state_extra_split()
1056 vring_split->desc_state = state; in vring_alloc_state_extra_split()
1057 vring_split->desc_extra = extra; in vring_alloc_state_extra_split()
1063 return -ENOMEM; in vring_alloc_state_extra_split()
1069 vring_free_queue(vdev, vring_split->queue_size_in_bytes, in vring_free_split()
1070 vring_split->vring.desc, in vring_free_split()
1071 vring_split->queue_dma_addr, in vring_free_split()
1074 kfree(vring_split->desc_state); in vring_free_split()
1075 kfree(vring_split->desc_extra); in vring_free_split()
1090 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); in vring_alloc_queue_split()
1091 return -EINVAL; in vring_alloc_queue_split()
1103 return -ENOMEM; in vring_alloc_queue_split()
1107 return -ENOMEM; in vring_alloc_queue_split()
1116 return -ENOMEM; in vring_alloc_queue_split()
1118 vring_init(&vring_split->vring, num, queue, vring_align); in vring_alloc_queue_split()
1120 vring_split->queue_dma_addr = dma_addr; in vring_alloc_queue_split()
1121 vring_split->queue_size_in_bytes = vring_size(num, vring_align); in vring_alloc_queue_split()
1123 vring_split->vring_align = vring_align; in vring_alloc_queue_split()
1124 vring_split->may_reduce_num = may_reduce_num; in vring_alloc_queue_split()
1146 vq->packed_ring = false; in __vring_new_virtqueue_split()
1147 vq->vq.callback = callback; in __vring_new_virtqueue_split()
1148 vq->vq.vdev = vdev; in __vring_new_virtqueue_split()
1149 vq->vq.name = name; in __vring_new_virtqueue_split()
1150 vq->vq.index = index; in __vring_new_virtqueue_split()
1151 vq->vq.reset = false; in __vring_new_virtqueue_split()
1152 vq->we_own_ring = false; in __vring_new_virtqueue_split()
1153 vq->notify = notify; in __vring_new_virtqueue_split()
1154 vq->weak_barriers = weak_barriers; in __vring_new_virtqueue_split()
1156 vq->broken = true; in __vring_new_virtqueue_split()
1158 vq->broken = false; in __vring_new_virtqueue_split()
1160 vq->dma_dev = dma_dev; in __vring_new_virtqueue_split()
1161 vq->use_dma_api = vring_use_dma_api(vdev); in __vring_new_virtqueue_split()
1163 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && in __vring_new_virtqueue_split()
1165 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); in __vring_new_virtqueue_split()
1168 vq->weak_barriers = false; in __vring_new_virtqueue_split()
1178 virtqueue_init(vq, vring_split->vring.num); in __vring_new_virtqueue_split()
1181 spin_lock(&vdev->vqs_list_lock); in __vring_new_virtqueue_split()
1182 list_add_tail(&vq->vq.list, &vdev->vqs); in __vring_new_virtqueue_split()
1183 spin_unlock(&vdev->vqs_list_lock); in __vring_new_virtqueue_split()
1184 return &vq->vq; in __vring_new_virtqueue_split()
1216 to_vvq(vq)->we_own_ring = true; in vring_create_virtqueue_split()
1225 struct virtio_device *vdev = _vq->vdev; in virtqueue_resize_split()
1229 vq->split.vring_align, in virtqueue_resize_split()
1230 vq->split.may_reduce_num, in virtqueue_resize_split()
1239 vring_free(&vq->vq); in virtqueue_resize_split()
1252 return -ENOMEM; in virtqueue_resize_split()
1257 * Packed ring specific functions - *_packed().
1266 return last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR)); in packed_last_used()
1274 flags = extra->flags; in vring_unmap_extra_packed()
1277 if (!vq->use_dma_api) in vring_unmap_extra_packed()
1281 extra->addr, extra->len, in vring_unmap_extra_packed()
1289 extra->addr, extra->len, in vring_unmap_extra_packed()
1339 head = vq->packed.next_avail_idx; in virtqueue_add_indirect_packed()
1342 return -ENOMEM; in virtqueue_add_indirect_packed()
1346 if (unlikely(vq->vq.num_free < 1)) { in virtqueue_add_indirect_packed()
1347 pr_debug("Can't add buf len 1 - avail = 0\n"); in virtqueue_add_indirect_packed()
1350 return -ENOSPC; in virtqueue_add_indirect_packed()
1354 id = vq->free_head; in virtqueue_add_indirect_packed()
1355 BUG_ON(id == vq->packed.vring.num); in virtqueue_add_indirect_packed()
1369 if (unlikely(vq->use_dma_api)) { in virtqueue_add_indirect_packed()
1386 vq->packed.vring.desc[head].addr = cpu_to_le64(addr); in virtqueue_add_indirect_packed()
1387 vq->packed.vring.desc[head].len = cpu_to_le32(total_sg * in virtqueue_add_indirect_packed()
1389 vq->packed.vring.desc[head].id = cpu_to_le16(id); in virtqueue_add_indirect_packed()
1391 if (vq->use_dma_api) { in virtqueue_add_indirect_packed()
1392 vq->packed.desc_extra[id].addr = addr; in virtqueue_add_indirect_packed()
1393 vq->packed.desc_extra[id].len = total_sg * in virtqueue_add_indirect_packed()
1395 vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT | in virtqueue_add_indirect_packed()
1396 vq->packed.avail_used_flags; in virtqueue_add_indirect_packed()
1404 virtio_wmb(vq->weak_barriers); in virtqueue_add_indirect_packed()
1405 vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT | in virtqueue_add_indirect_packed()
1406 vq->packed.avail_used_flags); in virtqueue_add_indirect_packed()
1409 vq->vq.num_free -= 1; in virtqueue_add_indirect_packed()
1413 if (n >= vq->packed.vring.num) { in virtqueue_add_indirect_packed()
1415 vq->packed.avail_wrap_counter ^= 1; in virtqueue_add_indirect_packed()
1416 vq->packed.avail_used_flags ^= in virtqueue_add_indirect_packed()
1420 vq->packed.next_avail_idx = n; in virtqueue_add_indirect_packed()
1421 vq->free_head = vq->packed.desc_extra[id].next; in virtqueue_add_indirect_packed()
1424 vq->packed.desc_state[id].num = 1; in virtqueue_add_indirect_packed()
1425 vq->packed.desc_state[id].data = data; in virtqueue_add_indirect_packed()
1426 vq->packed.desc_state[id].indir_desc = desc; in virtqueue_add_indirect_packed()
1427 vq->packed.desc_state[id].last = id; in virtqueue_add_indirect_packed()
1429 vq->num_added += 1; in virtqueue_add_indirect_packed()
1445 return -ENOMEM; in virtqueue_add_indirect_packed()
1454 void *ctx, in virtqueue_add_packed() argument
1469 BUG_ON(ctx && vq->indirect); in virtqueue_add_packed()
1471 if (unlikely(vq->broken)) { in virtqueue_add_packed()
1473 return -EIO; in virtqueue_add_packed()
1483 if (err != -ENOMEM) { in virtqueue_add_packed()
1491 head = vq->packed.next_avail_idx; in virtqueue_add_packed()
1492 avail_used_flags = vq->packed.avail_used_flags; in virtqueue_add_packed()
1494 WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect); in virtqueue_add_packed()
1496 desc = vq->packed.vring.desc; in virtqueue_add_packed()
1500 if (unlikely(vq->vq.num_free < descs_used)) { in virtqueue_add_packed()
1501 pr_debug("Can't add buf len %i - avail = %i\n", in virtqueue_add_packed()
1502 descs_used, vq->vq.num_free); in virtqueue_add_packed()
1504 return -ENOSPC; in virtqueue_add_packed()
1507 id = vq->free_head; in virtqueue_add_packed()
1508 BUG_ON(id == vq->packed.vring.num); in virtqueue_add_packed()
1521 flags = cpu_to_le16(vq->packed.avail_used_flags | in virtqueue_add_packed()
1533 if (unlikely(vq->use_dma_api)) { in virtqueue_add_packed()
1534 vq->packed.desc_extra[curr].addr = premapped ? in virtqueue_add_packed()
1536 vq->packed.desc_extra[curr].len = len; in virtqueue_add_packed()
1537 vq->packed.desc_extra[curr].flags = in virtqueue_add_packed()
1541 curr = vq->packed.desc_extra[curr].next; in virtqueue_add_packed()
1543 if ((unlikely(++i >= vq->packed.vring.num))) { in virtqueue_add_packed()
1545 vq->packed.avail_used_flags ^= in virtqueue_add_packed()
1553 vq->packed.avail_wrap_counter ^= 1; in virtqueue_add_packed()
1556 vq->vq.num_free -= descs_used; in virtqueue_add_packed()
1559 vq->packed.next_avail_idx = i; in virtqueue_add_packed()
1560 vq->free_head = curr; in virtqueue_add_packed()
1563 vq->packed.desc_state[id].num = descs_used; in virtqueue_add_packed()
1564 vq->packed.desc_state[id].data = data; in virtqueue_add_packed()
1565 vq->packed.desc_state[id].indir_desc = ctx; in virtqueue_add_packed()
1566 vq->packed.desc_state[id].last = prev; in virtqueue_add_packed()
1573 virtio_wmb(vq->weak_barriers); in virtqueue_add_packed()
1574 vq->packed.vring.desc[head].flags = head_flags; in virtqueue_add_packed()
1575 vq->num_added += descs_used; in virtqueue_add_packed()
1585 curr = vq->free_head; in virtqueue_add_packed()
1587 vq->packed.avail_used_flags = avail_used_flags; in virtqueue_add_packed()
1592 vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]); in virtqueue_add_packed()
1593 curr = vq->packed.desc_extra[curr].next; in virtqueue_add_packed()
1595 if (i >= vq->packed.vring.num) in virtqueue_add_packed()
1600 return -EIO; in virtqueue_add_packed()
1622 virtio_mb(vq->weak_barriers); in virtqueue_kick_prepare_packed()
1624 old = vq->packed.next_avail_idx - vq->num_added; in virtqueue_kick_prepare_packed()
1625 new = vq->packed.next_avail_idx; in virtqueue_kick_prepare_packed()
1626 vq->num_added = 0; in virtqueue_kick_prepare_packed()
1628 snapshot.u32 = *(u32 *)vq->packed.vring.device; in virtqueue_kick_prepare_packed()
1643 if (wrap_counter != vq->packed.avail_wrap_counter) in virtqueue_kick_prepare_packed()
1644 event_idx -= vq->packed.vring.num; in virtqueue_kick_prepare_packed()
1653 unsigned int id, void **ctx) in detach_buf_packed() argument
1659 state = &vq->packed.desc_state[id]; in detach_buf_packed()
1662 state->data = NULL; in detach_buf_packed()
1664 vq->packed.desc_extra[state->last].next = vq->free_head; in detach_buf_packed()
1665 vq->free_head = id; in detach_buf_packed()
1666 vq->vq.num_free += state->num; in detach_buf_packed()
1668 if (unlikely(vq->use_dma_api)) { in detach_buf_packed()
1670 for (i = 0; i < state->num; i++) { in detach_buf_packed()
1672 &vq->packed.desc_extra[curr]); in detach_buf_packed()
1673 curr = vq->packed.desc_extra[curr].next; in detach_buf_packed()
1677 if (vq->indirect) { in detach_buf_packed()
1682 desc = state->indir_desc; in detach_buf_packed()
1686 if (vq->use_dma_api) { in detach_buf_packed()
1687 len = vq->packed.desc_extra[id].len; in detach_buf_packed()
1696 state->indir_desc = NULL; in detach_buf_packed()
1697 } else if (ctx) { in detach_buf_packed()
1698 *ctx = state->indir_desc; in detach_buf_packed()
1708 flags = le16_to_cpu(vq->packed.vring.desc[idx].flags); in is_used_desc_packed()
1721 last_used_idx = READ_ONCE(vq->last_used_idx); in more_used_packed()
1729 void **ctx) in virtqueue_get_buf_ctx_packed() argument
1738 if (unlikely(vq->broken)) { in virtqueue_get_buf_ctx_packed()
1750 virtio_rmb(vq->weak_barriers); in virtqueue_get_buf_ctx_packed()
1752 last_used_idx = READ_ONCE(vq->last_used_idx); in virtqueue_get_buf_ctx_packed()
1755 id = le16_to_cpu(vq->packed.vring.desc[last_used].id); in virtqueue_get_buf_ctx_packed()
1756 *len = le32_to_cpu(vq->packed.vring.desc[last_used].len); in virtqueue_get_buf_ctx_packed()
1758 if (unlikely(id >= vq->packed.vring.num)) { in virtqueue_get_buf_ctx_packed()
1762 if (unlikely(!vq->packed.desc_state[id].data)) { in virtqueue_get_buf_ctx_packed()
1768 ret = vq->packed.desc_state[id].data; in virtqueue_get_buf_ctx_packed()
1769 detach_buf_packed(vq, id, ctx); in virtqueue_get_buf_ctx_packed()
1771 last_used += vq->packed.desc_state[id].num; in virtqueue_get_buf_ctx_packed()
1772 if (unlikely(last_used >= vq->packed.vring.num)) { in virtqueue_get_buf_ctx_packed()
1773 last_used -= vq->packed.vring.num; in virtqueue_get_buf_ctx_packed()
1778 WRITE_ONCE(vq->last_used_idx, last_used); in virtqueue_get_buf_ctx_packed()
1785 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC) in virtqueue_get_buf_ctx_packed()
1786 virtio_store_mb(vq->weak_barriers, in virtqueue_get_buf_ctx_packed()
1787 &vq->packed.vring.driver->off_wrap, in virtqueue_get_buf_ctx_packed()
1788 cpu_to_le16(vq->last_used_idx)); in virtqueue_get_buf_ctx_packed()
1800 if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) { in virtqueue_disable_cb_packed()
1801 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; in virtqueue_disable_cb_packed()
1807 if (vq->event_triggered) in virtqueue_disable_cb_packed()
1810 vq->packed.vring.driver->flags = in virtqueue_disable_cb_packed()
1811 cpu_to_le16(vq->packed.event_flags_shadow); in virtqueue_disable_cb_packed()
1826 if (vq->event) { in virtqueue_enable_cb_prepare_packed()
1827 vq->packed.vring.driver->off_wrap = in virtqueue_enable_cb_prepare_packed()
1828 cpu_to_le16(vq->last_used_idx); in virtqueue_enable_cb_prepare_packed()
1833 virtio_wmb(vq->weak_barriers); in virtqueue_enable_cb_prepare_packed()
1836 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { in virtqueue_enable_cb_prepare_packed()
1837 vq->packed.event_flags_shadow = vq->event ? in virtqueue_enable_cb_prepare_packed()
1840 vq->packed.vring.driver->flags = in virtqueue_enable_cb_prepare_packed()
1841 cpu_to_le16(vq->packed.event_flags_shadow); in virtqueue_enable_cb_prepare_packed()
1845 return vq->last_used_idx; in virtqueue_enable_cb_prepare_packed()
1873 if (vq->event) { in virtqueue_enable_cb_delayed_packed()
1875 bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4; in virtqueue_enable_cb_delayed_packed()
1876 last_used_idx = READ_ONCE(vq->last_used_idx); in virtqueue_enable_cb_delayed_packed()
1880 if (used_idx >= vq->packed.vring.num) { in virtqueue_enable_cb_delayed_packed()
1881 used_idx -= vq->packed.vring.num; in virtqueue_enable_cb_delayed_packed()
1885 vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx | in virtqueue_enable_cb_delayed_packed()
1892 virtio_wmb(vq->weak_barriers); in virtqueue_enable_cb_delayed_packed()
1895 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { in virtqueue_enable_cb_delayed_packed()
1896 vq->packed.event_flags_shadow = vq->event ? in virtqueue_enable_cb_delayed_packed()
1899 vq->packed.vring.driver->flags = in virtqueue_enable_cb_delayed_packed()
1900 cpu_to_le16(vq->packed.event_flags_shadow); in virtqueue_enable_cb_delayed_packed()
1905 * before re-checking for more used buffers. in virtqueue_enable_cb_delayed_packed()
1907 virtio_mb(vq->weak_barriers); in virtqueue_enable_cb_delayed_packed()
1909 last_used_idx = READ_ONCE(vq->last_used_idx); in virtqueue_enable_cb_delayed_packed()
1929 for (i = 0; i < vq->packed.vring.num; i++) { in virtqueue_detach_unused_buf_packed()
1930 if (!vq->packed.desc_state[i].data) in virtqueue_detach_unused_buf_packed()
1933 buf = vq->packed.desc_state[i].data; in virtqueue_detach_unused_buf_packed()
1939 BUG_ON(vq->vq.num_free != vq->packed.vring.num); in virtqueue_detach_unused_buf_packed()
1957 for (i = 0; i < num - 1; i++) in vring_alloc_desc_extra()
1967 if (vring_packed->vring.desc) in vring_free_packed()
1968 vring_free_queue(vdev, vring_packed->ring_size_in_bytes, in vring_free_packed()
1969 vring_packed->vring.desc, in vring_free_packed()
1970 vring_packed->ring_dma_addr, in vring_free_packed()
1973 if (vring_packed->vring.driver) in vring_free_packed()
1974 vring_free_queue(vdev, vring_packed->event_size_in_bytes, in vring_free_packed()
1975 vring_packed->vring.driver, in vring_free_packed()
1976 vring_packed->driver_event_dma_addr, in vring_free_packed()
1979 if (vring_packed->vring.device) in vring_free_packed()
1980 vring_free_queue(vdev, vring_packed->event_size_in_bytes, in vring_free_packed()
1981 vring_packed->vring.device, in vring_free_packed()
1982 vring_packed->device_event_dma_addr, in vring_free_packed()
1985 kfree(vring_packed->desc_state); in vring_free_packed()
1986 kfree(vring_packed->desc_extra); in vring_free_packed()
2007 vring_packed->vring.desc = ring; in vring_alloc_queue_packed()
2008 vring_packed->ring_dma_addr = ring_dma_addr; in vring_alloc_queue_packed()
2009 vring_packed->ring_size_in_bytes = ring_size_in_bytes; in vring_alloc_queue_packed()
2020 vring_packed->vring.driver = driver; in vring_alloc_queue_packed()
2021 vring_packed->event_size_in_bytes = event_size_in_bytes; in vring_alloc_queue_packed()
2022 vring_packed->driver_event_dma_addr = driver_event_dma_addr; in vring_alloc_queue_packed()
2031 vring_packed->vring.device = device; in vring_alloc_queue_packed()
2032 vring_packed->device_event_dma_addr = device_event_dma_addr; in vring_alloc_queue_packed()
2034 vring_packed->vring.num = num; in vring_alloc_queue_packed()
2040 return -ENOMEM; in vring_alloc_queue_packed()
2047 u32 num = vring_packed->vring.num; in vring_alloc_state_extra_packed()
2059 vring_packed->desc_state = state; in vring_alloc_state_extra_packed()
2060 vring_packed->desc_extra = extra; in vring_alloc_state_extra_packed()
2067 return -ENOMEM; in vring_alloc_state_extra_packed()
2073 vring_packed->next_avail_idx = 0; in virtqueue_vring_init_packed()
2074 vring_packed->avail_wrap_counter = 1; in virtqueue_vring_init_packed()
2075 vring_packed->event_flags_shadow = 0; in virtqueue_vring_init_packed()
2076 vring_packed->avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL; in virtqueue_vring_init_packed()
2080 vring_packed->event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; in virtqueue_vring_init_packed()
2081 vring_packed->vring.driver->flags = in virtqueue_vring_init_packed()
2082 cpu_to_le16(vring_packed->event_flags_shadow); in virtqueue_vring_init_packed()
2089 vq->packed = *vring_packed; in virtqueue_vring_attach_packed()
2092 vq->free_head = 0; in virtqueue_vring_attach_packed()
2097 memset(vq->packed.vring.device, 0, vq->packed.event_size_in_bytes); in virtqueue_reinit_packed()
2098 memset(vq->packed.vring.driver, 0, vq->packed.event_size_in_bytes); in virtqueue_reinit_packed()
2101 memset(vq->packed.vring.desc, 0, vq->packed.ring_size_in_bytes); in virtqueue_reinit_packed()
2103 virtqueue_init(vq, vq->packed.vring.num); in virtqueue_reinit_packed()
2104 virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback); in virtqueue_reinit_packed()
2124 vq->vq.callback = callback; in __vring_new_virtqueue_packed()
2125 vq->vq.vdev = vdev; in __vring_new_virtqueue_packed()
2126 vq->vq.name = name; in __vring_new_virtqueue_packed()
2127 vq->vq.index = index; in __vring_new_virtqueue_packed()
2128 vq->vq.reset = false; in __vring_new_virtqueue_packed()
2129 vq->we_own_ring = false; in __vring_new_virtqueue_packed()
2130 vq->notify = notify; in __vring_new_virtqueue_packed()
2131 vq->weak_barriers = weak_barriers; in __vring_new_virtqueue_packed()
2133 vq->broken = true; in __vring_new_virtqueue_packed()
2135 vq->broken = false; in __vring_new_virtqueue_packed()
2137 vq->packed_ring = true; in __vring_new_virtqueue_packed()
2138 vq->dma_dev = dma_dev; in __vring_new_virtqueue_packed()
2139 vq->use_dma_api = vring_use_dma_api(vdev); in __vring_new_virtqueue_packed()
2141 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && in __vring_new_virtqueue_packed()
2143 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); in __vring_new_virtqueue_packed()
2146 vq->weak_barriers = false; in __vring_new_virtqueue_packed()
2156 virtqueue_init(vq, vring_packed->vring.num); in __vring_new_virtqueue_packed()
2159 spin_lock(&vdev->vqs_list_lock); in __vring_new_virtqueue_packed()
2160 list_add_tail(&vq->vq.list, &vdev->vqs); in __vring_new_virtqueue_packed()
2161 spin_unlock(&vdev->vqs_list_lock); in __vring_new_virtqueue_packed()
2162 return &vq->vq; in __vring_new_virtqueue_packed()
2191 to_vvq(vq)->we_own_ring = true; in vring_create_virtqueue_packed()
2200 struct virtio_device *vdev = _vq->vdev; in virtqueue_resize_packed()
2210 vring_free(&vq->vq); in virtqueue_resize_packed()
2212 virtqueue_vring_init_packed(&vring_packed, !!vq->vq.callback); in virtqueue_resize_packed()
2223 return -ENOMEM; in virtqueue_resize_packed()
2230 struct virtio_device *vdev = vq->vq.vdev; in virtqueue_disable_and_recycle()
2234 if (!vq->we_own_ring) in virtqueue_disable_and_recycle()
2235 return -EPERM; in virtqueue_disable_and_recycle()
2237 if (!vdev->config->disable_vq_and_reset) in virtqueue_disable_and_recycle()
2238 return -ENOENT; in virtqueue_disable_and_recycle()
2240 if (!vdev->config->enable_vq_after_reset) in virtqueue_disable_and_recycle()
2241 return -ENOENT; in virtqueue_disable_and_recycle()
2243 err = vdev->config->disable_vq_and_reset(_vq); in virtqueue_disable_and_recycle()
2256 struct virtio_device *vdev = vq->vq.vdev; in virtqueue_enable_after_reset()
2258 if (vdev->config->enable_vq_after_reset(_vq)) in virtqueue_enable_after_reset()
2259 return -EBUSY; in virtqueue_enable_after_reset()
2274 void *ctx, in virtqueue_add() argument
2280 return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg, in virtqueue_add()
2281 out_sgs, in_sgs, data, ctx, premapped, gfp) : in virtqueue_add()
2283 out_sgs, in_sgs, data, ctx, premapped, gfp); in virtqueue_add()
2287 * virtqueue_add_sgs - expose buffers to other end
2322 * virtqueue_add_outbuf - expose output buffers to other end
2324 * @sg: scatterlist (must be well-formed and terminated!)
2344 * virtqueue_add_outbuf_premapped - expose output buffers to other end
2346 * @sg: scatterlist (must be well-formed and terminated!)
2367 * virtqueue_add_inbuf - expose input buffers to other end
2369 * @sg: scatterlist (must be well-formed and terminated!)
2389 * virtqueue_add_inbuf_ctx - expose input buffers to other end
2391 * @sg: scatterlist (must be well-formed and terminated!)
2394 * @ctx: extra context for the token
2405 void *ctx, in virtqueue_add_inbuf_ctx() argument
2408 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, false, gfp); in virtqueue_add_inbuf_ctx()
2413 * virtqueue_add_inbuf_premapped - expose input buffers to other end
2415 * @sg: scatterlist (must be well-formed and terminated!)
2418 * @ctx: extra context for the token
2430 void *ctx, in virtqueue_add_inbuf_premapped() argument
2433 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, true, gfp); in virtqueue_add_inbuf_premapped()
2438 * virtqueue_dma_dev - get the dma dev
2447 if (vq->use_dma_api) in virtqueue_dma_dev()
2455 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
2469 return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) : in virtqueue_kick_prepare()
2475 * virtqueue_notify - second half of split virtqueue_kick call.
2486 if (unlikely(vq->broken)) in virtqueue_notify()
2490 if (!vq->notify(_vq)) { in virtqueue_notify()
2491 vq->broken = true; in virtqueue_notify()
2499 * virtqueue_kick - update after add_buf
2519 * virtqueue_get_buf_ctx - get the next used buffer
2522 * @ctx: extra context for the token
2536 void **ctx) in virtqueue_get_buf_ctx() argument
2540 return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) : in virtqueue_get_buf_ctx()
2541 virtqueue_get_buf_ctx_split(_vq, len, ctx); in virtqueue_get_buf_ctx()
2551 * virtqueue_disable_cb - disable callbacks
2563 if (vq->packed_ring) in virtqueue_disable_cb()
2571 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
2574 * This re-enables callbacks; it returns current queue state
2586 if (vq->event_triggered) in virtqueue_enable_cb_prepare()
2587 vq->event_triggered = false; in virtqueue_enable_cb_prepare()
2589 return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) : in virtqueue_enable_cb_prepare()
2595 * virtqueue_poll - query pending used buffers
2607 if (unlikely(vq->broken)) in virtqueue_poll()
2610 virtio_mb(vq->weak_barriers); in virtqueue_poll()
2611 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) : in virtqueue_poll()
2617 * virtqueue_enable_cb - restart callbacks after disable_cb.
2620 * This re-enables callbacks; it returns "false" if there are pending
2636 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
2639 * This re-enables callbacks but hints to the other side to delay
2652 if (vq->event_triggered) in virtqueue_enable_cb_delayed()
2653 vq->event_triggered = false; in virtqueue_enable_cb_delayed()
2655 return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) : in virtqueue_enable_cb_delayed()
2661 * virtqueue_detach_unused_buf - detach first unused buffer
2672 return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) : in virtqueue_detach_unused_buf()
2679 return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq); in more_used()
2683 * vring_interrupt - notify a virtqueue on an interrupt
2699 if (unlikely(vq->broken)) { in vring_interrupt()
2701 dev_warn_once(&vq->vq.vdev->dev, in vring_interrupt()
2710 if (vq->event) in vring_interrupt()
2711 data_race(vq->event_triggered = true); in vring_interrupt()
2713 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); in vring_interrupt()
2714 if (vq->vq.callback) in vring_interrupt()
2715 vq->vq.callback(&vq->vq); in vring_interrupt()
2737 context, notify, callback, name, vdev->dev.parent); in vring_create_virtqueue()
2741 context, notify, callback, name, vdev->dev.parent); in vring_create_virtqueue()
2771 * virtqueue_resize - resize the vring of vq
2787 * -ENOMEM: Failed to allocate a new ring, fall back to the original ring size.
2789 * -EBUSY: Failed to sync with device, vq may not work properly
2790 * -ENOENT: Transport or device not supported
2791 * -E2BIG/-EINVAL: num error
2792 * -EPERM: Operation not permitted
2802 if (num > vq->vq.num_max) in virtqueue_resize()
2803 return -E2BIG; in virtqueue_resize()
2806 return -EINVAL; in virtqueue_resize()
2808 if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num) in virtqueue_resize()
2817 if (vq->packed_ring) in virtqueue_resize()
2827 * virtqueue_reset - detach and recycle all unused buffers
2837 * -EBUSY: Failed to sync with device, vq may not work properly
2838 * -ENOENT: Transport or device not supported
2839 * -EPERM: Operation not permitted
2854 if (vq->packed_ring) in virtqueue_reset()
2884 name, vdev->dev.parent); in vring_new_virtqueue()
2890 vdev->dev.parent); in vring_new_virtqueue()
2898 if (vq->we_own_ring) { in vring_free()
2899 if (vq->packed_ring) { in vring_free()
2900 vring_free_queue(vq->vq.vdev, in vring_free()
2901 vq->packed.ring_size_in_bytes, in vring_free()
2902 vq->packed.vring.desc, in vring_free()
2903 vq->packed.ring_dma_addr, in vring_free()
2906 vring_free_queue(vq->vq.vdev, in vring_free()
2907 vq->packed.event_size_in_bytes, in vring_free()
2908 vq->packed.vring.driver, in vring_free()
2909 vq->packed.driver_event_dma_addr, in vring_free()
2912 vring_free_queue(vq->vq.vdev, in vring_free()
2913 vq->packed.event_size_in_bytes, in vring_free()
2914 vq->packed.vring.device, in vring_free()
2915 vq->packed.device_event_dma_addr, in vring_free()
2918 kfree(vq->packed.desc_state); in vring_free()
2919 kfree(vq->packed.desc_extra); in vring_free()
2921 vring_free_queue(vq->vq.vdev, in vring_free()
2922 vq->split.queue_size_in_bytes, in vring_free()
2923 vq->split.vring.desc, in vring_free()
2924 vq->split.queue_dma_addr, in vring_free()
2928 if (!vq->packed_ring) { in vring_free()
2929 kfree(vq->split.desc_state); in vring_free()
2930 kfree(vq->split.desc_extra); in vring_free()
2938 spin_lock(&vq->vq.vdev->vqs_list_lock); in vring_del_virtqueue()
2939 list_del(&_vq->list); in vring_del_virtqueue()
2940 spin_unlock(&vq->vq.vdev->vqs_list_lock); in vring_del_virtqueue()
2953 if (vq->packed_ring) in vring_notification_data()
2954 next = (vq->packed.next_avail_idx & in vring_notification_data()
2955 ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR))) | in vring_notification_data()
2956 vq->packed.avail_wrap_counter << in vring_notification_data()
2959 next = vq->split.avail_idx_shadow; in vring_notification_data()
2961 return next << 16 | _vq->index; in vring_notification_data()
2965 /* Manipulates transport-specific feature bits. */
2995 * virtqueue_get_vring_size - return the size of the virtqueue's vring
3006 return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num; in virtqueue_get_vring_size()
3018 WRITE_ONCE(vq->broken, true); in __virtqueue_break()
3030 WRITE_ONCE(vq->broken, false); in __virtqueue_unbreak()
3038 return READ_ONCE(vq->broken); in virtqueue_is_broken()
3050 spin_lock(&dev->vqs_list_lock); in virtio_break_device()
3051 list_for_each_entry(_vq, &dev->vqs, list) { in virtio_break_device()
3055 WRITE_ONCE(vq->broken, true); in virtio_break_device()
3057 spin_unlock(&dev->vqs_list_lock); in virtio_break_device()
3064 * vq->broken. This should only be used in some specific case e.g
3072 spin_lock(&dev->vqs_list_lock); in __virtio_unbreak_device()
3073 list_for_each_entry(_vq, &dev->vqs, list) { in __virtio_unbreak_device()
3077 WRITE_ONCE(vq->broken, false); in __virtio_unbreak_device()
3079 spin_unlock(&dev->vqs_list_lock); in __virtio_unbreak_device()
3087 BUG_ON(!vq->we_own_ring); in virtqueue_get_desc_addr()
3089 if (vq->packed_ring) in virtqueue_get_desc_addr()
3090 return vq->packed.ring_dma_addr; in virtqueue_get_desc_addr()
3092 return vq->split.queue_dma_addr; in virtqueue_get_desc_addr()
3100 BUG_ON(!vq->we_own_ring); in virtqueue_get_avail_addr()
3102 if (vq->packed_ring) in virtqueue_get_avail_addr()
3103 return vq->packed.driver_event_dma_addr; in virtqueue_get_avail_addr()
3105 return vq->split.queue_dma_addr + in virtqueue_get_avail_addr()
3106 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc); in virtqueue_get_avail_addr()
3114 BUG_ON(!vq->we_own_ring); in virtqueue_get_used_addr()
3116 if (vq->packed_ring) in virtqueue_get_used_addr()
3117 return vq->packed.device_event_dma_addr; in virtqueue_get_used_addr()
3119 return vq->split.queue_dma_addr + in virtqueue_get_used_addr()
3120 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc); in virtqueue_get_used_addr()
3127 return &to_vvq(vq)->split.vring; in virtqueue_get_vring()
3132 * virtqueue_dma_map_single_attrs - map DMA for _vq
3140 * passed to this _vq when it is in pre-mapped mode.
3151 if (!vq->use_dma_api) { in virtqueue_dma_map_single_attrs()
3161 * virtqueue_dma_unmap_single_attrs - unmap DMA for _vq
3177 if (!vq->use_dma_api) in virtqueue_dma_unmap_single_attrs()
3185 * virtqueue_dma_mapping_error - check dma address
3195 if (!vq->use_dma_api) in virtqueue_dma_mapping_error()
3203 * virtqueue_dma_need_sync - check a dma address needs sync
3216 if (!vq->use_dma_api) in virtqueue_dma_need_sync()
3224 * virtqueue_dma_sync_single_range_for_cpu - dma sync for cpu
3243 if (!vq->use_dma_api) in virtqueue_dma_sync_single_range_for_cpu()
3251 * virtqueue_dma_sync_single_range_for_device - dma sync for device
3269 if (!vq->use_dma_api) in virtqueue_dma_sync_single_range_for_device()