Lines Matching +full:config +full:- +full:cond

1 // SPDX-License-Identifier: GPL-2.0-only
15 #include <linux/blk-mq.h>
58 * virtblk_remove() sets vblk->vdev to NULL.
60 * blk-mq, virtqueue processing, and sysfs attribute code paths are
61 * shut down before vblk->vdev is set to NULL and therefore do not need
73 /* Process context for config space updates */
76 /* Ida index - used to track minor number allocations. */
133 struct virtio_blk *vblk = hctx->queue->queuedata; in get_virtio_blk_vq()
134 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; in get_virtio_blk_vq()
144 sg_init_one(&out_hdr, &vbr->out_hdr, sizeof(vbr->out_hdr)); in virtblk_add_req()
147 if (vbr->sg_table.nents) { in virtblk_add_req()
148 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT)) in virtblk_add_req()
149 sgs[num_out++] = vbr->sg_table.sgl; in virtblk_add_req()
151 sgs[num_out + num_in++] = vbr->sg_table.sgl; in virtblk_add_req()
154 sg_init_one(&in_hdr, &vbr->in_hdr.status, vbr->in_hdr_len); in virtblk_add_req()
173 return -ENOMEM; in virtblk_setup_discard_write_zeroes_erase()
176 * Single max discard segment means multi-range discard isn't in virtblk_setup_discard_write_zeroes_erase()
181 if (queue_max_discard_segments(req->q) == 1) { in virtblk_setup_discard_write_zeroes_erase()
188 u64 sector = bio->bi_iter.bi_sector; in virtblk_setup_discard_write_zeroes_erase()
189 u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT; in virtblk_setup_discard_write_zeroes_erase()
200 bvec_set_virt(&req->special_vec, range, sizeof(*range) * segments); in virtblk_setup_discard_write_zeroes_erase()
201 req->rq_flags |= RQF_SPECIAL_PAYLOAD; in virtblk_setup_discard_write_zeroes_erase()
209 sg_free_table_chained(&vbr->sg_table, in virtblk_unmap_data()
221 vbr->sg_table.sgl = vbr->sg; in virtblk_map_data()
222 err = sg_alloc_table_chained(&vbr->sg_table, in virtblk_map_data()
224 vbr->sg_table.sgl, in virtblk_map_data()
227 return -ENOMEM; in virtblk_map_data()
229 return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl); in virtblk_map_data()
234 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) in virtblk_cleanup_cmd()
235 kfree(bvec_virt(&req->special_vec)); in virtblk_cleanup_cmd()
242 size_t in_hdr_len = sizeof(vbr->in_hdr.status); in virtblk_setup_cmd()
251 vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req)); in virtblk_setup_cmd()
270 unmap = !(req->cmd_flags & REQ_NOUNMAP); in virtblk_setup_cmd()
290 in_hdr_len = sizeof(vbr->in_hdr.zone_append); in virtblk_setup_cmd()
310 /* Set fields for non-REQ_OP_DRV_IN request types */ in virtblk_setup_cmd()
311 vbr->in_hdr_len = in_hdr_len; in virtblk_setup_cmd()
312 vbr->out_hdr.type = cpu_to_virtio32(vdev, type); in virtblk_setup_cmd()
313 vbr->out_hdr.sector = cpu_to_virtio64(vdev, sector); in virtblk_setup_cmd()
326 * in-header. This helper fetches its value for all in-header formats
331 return *((u8 *)&vbr->in_hdr + vbr->in_hdr_len - 1); in virtblk_vbr_status()
338 struct virtio_blk *vblk = req->mq_hctx->queue->queuedata; in virtblk_request_done()
344 req->__sector = virtio64_to_cpu(vblk->vdev, in virtblk_request_done()
345 vbr->in_hdr.zone_append.sector); in virtblk_request_done()
352 struct virtio_blk *vblk = vq->vdev->priv; in virtblk_done()
354 int qid = vq->index; in virtblk_done()
359 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); in virtblk_done()
362 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { in virtblk_done()
365 if (likely(!blk_should_fake_timeout(req->q))) in virtblk_done()
373 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); in virtblk_done()
374 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); in virtblk_done()
379 struct virtio_blk *vblk = hctx->queue->queuedata; in virtio_commit_rqs()
380 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; in virtio_commit_rqs()
383 spin_lock_irq(&vq->lock); in virtio_commit_rqs()
384 kick = virtqueue_kick_prepare(vq->vq); in virtio_commit_rqs()
385 spin_unlock_irq(&vq->lock); in virtio_commit_rqs()
388 virtqueue_notify(vq->vq); in virtio_commit_rqs()
395 case -ENOSPC: in virtblk_fail_to_queue()
397 case -ENOMEM: in virtblk_fail_to_queue()
412 status = virtblk_setup_cmd(vblk->vdev, req, vbr); in virtblk_prep_rq()
418 return virtblk_fail_to_queue(req, -ENOMEM); in virtblk_prep_rq()
419 vbr->sg_table.nents = num; in virtblk_prep_rq()
429 struct virtio_blk *vblk = hctx->queue->queuedata; in virtio_queue_rq()
430 struct request *req = bd->rq; in virtio_queue_rq()
433 int qid = hctx->queue_num; in virtio_queue_rq()
442 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); in virtio_queue_rq()
443 err = virtblk_add_req(vblk->vqs[qid].vq, vbr); in virtio_queue_rq()
445 virtqueue_kick(vblk->vqs[qid].vq); in virtio_queue_rq()
446 /* Don't stop the queue if -ENOMEM: we may have failed to in virtio_queue_rq()
449 if (err == -ENOSPC) in virtio_queue_rq()
451 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); in virtio_queue_rq()
456 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) in virtio_queue_rq()
458 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); in virtio_queue_rq()
461 virtqueue_notify(vblk->vqs[qid].vq); in virtio_queue_rq()
467 struct virtio_blk *vblk = req->mq_hctx->queue->queuedata; in virtblk_prep_rq_batch()
470 return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK; in virtblk_prep_rq_batch()
480 spin_lock_irqsave(&vq->lock, flags); in virtblk_add_req_batch()
486 err = virtblk_add_req(vq->vq, vbr); in virtblk_add_req_batch()
494 kick = virtqueue_kick_prepare(vq->vq); in virtblk_add_req_batch()
495 spin_unlock_irqrestore(&vq->lock, flags); in virtblk_add_req_batch()
498 virtqueue_notify(vq->vq); in virtblk_add_req_batch()
509 struct virtio_blk_vq *this_vq = get_virtio_blk_vq(req->mq_hctx); in virtio_queue_rqs()
531 struct request_queue *q = vblk->disk->queue; in virtblk_alloc_report_buffer()
536 get_capacity(vblk->disk) >> ilog2(vblk->zone_sectors)); in virtblk_alloc_report_buffer()
560 struct request_queue *q = vblk->disk->queue; in virtblk_submit_zone_report()
570 vbr->in_hdr_len = sizeof(vbr->in_hdr.status); in virtblk_submit_zone_report()
571 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT); in virtblk_submit_zone_report()
572 vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector); in virtblk_submit_zone_report()
579 err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status)); in virtblk_submit_zone_report()
591 zone.start = virtio64_to_cpu(vblk->vdev, entry->z_start); in virtblk_parse_zone()
592 if (zone.start + vblk->zone_sectors <= get_capacity(vblk->disk)) in virtblk_parse_zone()
593 zone.len = vblk->zone_sectors; in virtblk_parse_zone()
595 zone.len = get_capacity(vblk->disk) - zone.start; in virtblk_parse_zone()
596 zone.capacity = virtio64_to_cpu(vblk->vdev, entry->z_cap); in virtblk_parse_zone()
597 zone.wp = virtio64_to_cpu(vblk->vdev, entry->z_wp); in virtblk_parse_zone()
599 switch (entry->z_type) { in virtblk_parse_zone()
610 dev_err(&vblk->vdev->dev, "zone %llu: invalid type %#x\n", in virtblk_parse_zone()
611 zone.start, entry->z_type); in virtblk_parse_zone()
612 return -EIO; in virtblk_parse_zone()
615 switch (entry->z_state) { in virtblk_parse_zone()
617 zone.cond = BLK_ZONE_COND_EMPTY; in virtblk_parse_zone()
620 zone.cond = BLK_ZONE_COND_CLOSED; in virtblk_parse_zone()
623 zone.cond = BLK_ZONE_COND_FULL; in virtblk_parse_zone()
627 zone.cond = BLK_ZONE_COND_EXP_OPEN; in virtblk_parse_zone()
630 zone.cond = BLK_ZONE_COND_IMP_OPEN; in virtblk_parse_zone()
633 zone.cond = BLK_ZONE_COND_NOT_WP; in virtblk_parse_zone()
636 zone.cond = BLK_ZONE_COND_READONLY; in virtblk_parse_zone()
640 zone.cond = BLK_ZONE_COND_OFFLINE; in virtblk_parse_zone()
644 dev_err(&vblk->vdev->dev, "zone %llu: invalid condition %#x\n", in virtblk_parse_zone()
645 zone.start, entry->z_state); in virtblk_parse_zone()
646 return -EIO; in virtblk_parse_zone()
660 struct virtio_blk *vblk = disk->private_data; in virtblk_report_zones()
667 if (WARN_ON_ONCE(!vblk->zone_sectors)) in virtblk_report_zones()
668 return -EOPNOTSUPP; in virtblk_report_zones()
672 return -ENOMEM; in virtblk_report_zones()
674 mutex_lock(&vblk->vdev_mutex); in virtblk_report_zones()
676 if (!vblk->vdev) { in virtblk_report_zones()
677 ret = -ENXIO; in virtblk_report_zones()
681 while (zone_idx < nr_zones && sector < get_capacity(vblk->disk)) { in virtblk_report_zones()
689 nz = min_t(u64, virtio64_to_cpu(vblk->vdev, report->nr_zones), in virtblk_report_zones()
695 ret = virtblk_parse_zone(vblk, &report->zones[i], in virtblk_report_zones()
700 sector = virtio64_to_cpu(vblk->vdev, in virtblk_report_zones()
701 report->zones[i].z_start) + in virtblk_report_zones()
702 vblk->zone_sectors; in virtblk_report_zones()
710 ret = -EINVAL; in virtblk_report_zones()
712 mutex_unlock(&vblk->vdev_mutex); in virtblk_report_zones()
720 struct virtio_device *vdev = vblk->vdev; in virtblk_read_zoned_limits()
723 dev_dbg(&vdev->dev, "probing host-managed zoned device\n"); in virtblk_read_zoned_limits()
725 lim->features |= BLK_FEAT_ZONED; in virtblk_read_zoned_limits()
729 lim->max_open_zones = v; in virtblk_read_zoned_limits()
730 dev_dbg(&vdev->dev, "max open zones = %u\n", v); in virtblk_read_zoned_limits()
734 lim->max_active_zones = v; in virtblk_read_zoned_limits()
735 dev_dbg(&vdev->dev, "max active zones = %u\n", v); in virtblk_read_zoned_limits()
740 dev_warn(&vdev->dev, "zero write granularity reported\n"); in virtblk_read_zoned_limits()
741 return -ENODEV; in virtblk_read_zoned_limits()
743 lim->physical_block_size = wg; in virtblk_read_zoned_limits()
744 lim->io_min = wg; in virtblk_read_zoned_limits()
746 dev_dbg(&vdev->dev, "write granularity = %u\n", wg); in virtblk_read_zoned_limits()
753 &vblk->zone_sectors); in virtblk_read_zoned_limits()
754 if (vblk->zone_sectors == 0 || !is_power_of_2(vblk->zone_sectors)) { in virtblk_read_zoned_limits()
755 dev_err(&vdev->dev, in virtblk_read_zoned_limits()
757 vblk->zone_sectors); in virtblk_read_zoned_limits()
758 return -ENODEV; in virtblk_read_zoned_limits()
760 lim->chunk_sectors = vblk->zone_sectors; in virtblk_read_zoned_limits()
761 dev_dbg(&vdev->dev, "zone sectors = %u\n", vblk->zone_sectors); in virtblk_read_zoned_limits()
764 dev_warn(&vblk->vdev->dev, in virtblk_read_zoned_limits()
766 lim->max_hw_discard_sectors = 0; in virtblk_read_zoned_limits()
772 dev_warn(&vdev->dev, "zero max_append_sectors reported\n"); in virtblk_read_zoned_limits()
773 return -ENODEV; in virtblk_read_zoned_limits()
776 dev_err(&vdev->dev, in virtblk_read_zoned_limits()
779 return -ENODEV; in virtblk_read_zoned_limits()
781 lim->max_hw_zone_append_sectors = v; in virtblk_read_zoned_limits()
782 dev_dbg(&vdev->dev, "max append sectors = %u\n", v); in virtblk_read_zoned_limits()
788 * Zoned block device support is not configured in this kernel, host-managed
795 dev_err(&vblk->vdev->dev, in virtblk_read_zoned_limits()
797 return -EOPNOTSUPP; in virtblk_read_zoned_limits()
805 struct virtio_blk *vblk = disk->private_data; in virtblk_get_id()
806 struct request_queue *q = vblk->disk->queue; in virtblk_get_id()
816 vbr->in_hdr_len = sizeof(vbr->in_hdr.status); in virtblk_get_id()
817 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID); in virtblk_get_id()
818 vbr->out_hdr.sector = 0; in virtblk_get_id()
825 err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status)); in virtblk_get_id()
834 struct virtio_blk *vblk = bd->bd_disk->private_data; in virtblk_getgeo()
837 mutex_lock(&vblk->vdev_mutex); in virtblk_getgeo()
839 if (!vblk->vdev) { in virtblk_getgeo()
840 ret = -ENXIO; in virtblk_getgeo()
844 /* see if the host passed in geometry config */ in virtblk_getgeo()
845 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) { in virtblk_getgeo()
846 virtio_cread(vblk->vdev, struct virtio_blk_config, in virtblk_getgeo()
847 geometry.cylinders, &geo->cylinders); in virtblk_getgeo()
848 virtio_cread(vblk->vdev, struct virtio_blk_config, in virtblk_getgeo()
849 geometry.heads, &geo->heads); in virtblk_getgeo()
850 virtio_cread(vblk->vdev, struct virtio_blk_config, in virtblk_getgeo()
851 geometry.sectors, &geo->sectors); in virtblk_getgeo()
854 geo->heads = 1 << 6; in virtblk_getgeo()
855 geo->sectors = 1 << 5; in virtblk_getgeo()
856 geo->cylinders = get_capacity(bd->bd_disk) >> 11; in virtblk_getgeo()
859 mutex_unlock(&vblk->vdev_mutex); in virtblk_getgeo()
865 struct virtio_blk *vblk = disk->private_data; in virtblk_free_disk()
867 ida_free(&vd_index_ida, vblk->index); in virtblk_free_disk()
868 mutex_destroy(&vblk->vdev_mutex); in virtblk_free_disk()
903 if (err == -EIO) /* Unsupported? Make it empty. */ in serial_show()
914 struct virtio_device *vdev = vblk->vdev; in virtblk_update_capacity()
915 struct request_queue *q = vblk->disk->queue; in virtblk_update_capacity()
930 dev_notice(&vdev->dev, in virtblk_update_capacity()
931 "[%s] %s%llu %d-byte logical blocks (%s/%s)\n", in virtblk_update_capacity()
932 vblk->disk->disk_name, in virtblk_update_capacity()
939 set_capacity_and_notify(vblk->disk, capacity); in virtblk_update_capacity()
952 struct virtio_blk *vblk = vdev->priv; in virtblk_config_changed()
954 queue_work(virtblk_wq, &vblk->config_work); in virtblk_config_changed()
965 struct virtio_device *vdev = vblk->vdev; in init_vq()
975 dev_err(&vdev->dev, "MQ advertised but zero queues reported\n"); in init_vq()
976 return -EINVAL; in init_vq()
983 num_poll_vqs = min_t(unsigned int, poll_queues, num_vqs - 1); in init_vq()
985 vblk->io_queues[HCTX_TYPE_DEFAULT] = num_vqs - num_poll_vqs; in init_vq()
986 vblk->io_queues[HCTX_TYPE_READ] = 0; in init_vq()
987 vblk->io_queues[HCTX_TYPE_POLL] = num_poll_vqs; in init_vq()
989 dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n", in init_vq()
990 vblk->io_queues[HCTX_TYPE_DEFAULT], in init_vq()
991 vblk->io_queues[HCTX_TYPE_READ], in init_vq()
992 vblk->io_queues[HCTX_TYPE_POLL]); in init_vq()
994 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL); in init_vq()
995 if (!vblk->vqs) in init_vq()
996 return -ENOMEM; in init_vq()
1001 err = -ENOMEM; in init_vq()
1005 for (i = 0; i < num_vqs - num_poll_vqs; i++) { in init_vq()
1007 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%u", i); in init_vq()
1008 vqs_info[i].name = vblk->vqs[i].name; in init_vq()
1012 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%u", i); in init_vq()
1013 vqs_info[i].name = vblk->vqs[i].name; in init_vq()
1022 spin_lock_init(&vblk->vqs[i].lock); in init_vq()
1023 vblk->vqs[i].vq = vqs[i]; in init_vq()
1025 vblk->num_vqs = num_vqs; in init_vq()
1031 kfree(vblk->vqs); in init_vq()
1041 const int base = 'z' - 'a' + 1; in virtblk_name_format()
1047 p = end - 1; in virtblk_name_format()
1052 return -EINVAL; in virtblk_name_format()
1053 *--p = 'a' + (index % unit); in virtblk_name_format()
1054 index = (index / unit) - 1; in virtblk_name_format()
1057 memmove(begin, p, end - p); in virtblk_name_format()
1091 struct virtio_blk *vblk = disk->private_data; in cache_type_store()
1092 struct virtio_device *vdev = vblk->vdev; in cache_type_store()
1096 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE)); in cache_type_store()
1103 lim = queue_limits_start_update(disk->queue); in cache_type_store()
1108 i = queue_limits_commit_update_frozen(disk->queue, &lim); in cache_type_store()
1118 struct virtio_blk *vblk = disk->private_data; in cache_type_show()
1119 u8 writeback = virtblk_get_cache_mode(vblk->vdev); in cache_type_show()
1138 struct virtio_blk *vblk = disk->private_data; in virtblk_attrs_are_visible()
1139 struct virtio_device *vdev = vblk->vdev; in virtblk_attrs_are_visible()
1145 return a->mode; in virtblk_attrs_are_visible()
1160 struct virtio_blk *vblk = set->driver_data; in virtblk_map_queues()
1163 for (i = 0, qoff = 0; i < set->nr_maps; i++) { in virtblk_map_queues()
1164 struct blk_mq_queue_map *map = &set->map[i]; in virtblk_map_queues()
1166 map->nr_queues = vblk->io_queues[i]; in virtblk_map_queues()
1167 map->queue_offset = qoff; in virtblk_map_queues()
1168 qoff += map->nr_queues; in virtblk_map_queues()
1170 if (map->nr_queues == 0) in virtblk_map_queues()
1179 blk_mq_map_queues(&set->map[i]); in virtblk_map_queues()
1181 blk_mq_map_hw_queues(&set->map[i], in virtblk_map_queues()
1182 &vblk->vdev->dev, 0); in virtblk_map_queues()
1190 rq_list_for_each(&iob->req_list, req) { in virtblk_complete_batch()
1199 struct virtio_blk *vblk = hctx->queue->queuedata; in virtblk_poll()
1206 spin_lock_irqsave(&vq->lock, flags); in virtblk_poll()
1208 while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) { in virtblk_poll()
1220 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); in virtblk_poll()
1222 spin_unlock_irqrestore(&vq->lock, flags); in virtblk_poll()
1242 struct virtio_device *vdev = vblk->vdev; in virtblk_read_limits()
1261 sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2); in virtblk_read_limits()
1264 lim->max_segments = sg_elems; in virtblk_read_limits()
1267 lim->max_hw_sectors = UINT_MAX; in virtblk_read_limits()
1279 lim->max_segment_size = max_size; in virtblk_read_limits()
1284 &lim->logical_block_size); in virtblk_read_limits()
1291 lim->physical_block_size = in virtblk_read_limits()
1292 lim->logical_block_size * (1 << physical_block_exp); in virtblk_read_limits()
1298 lim->alignment_offset = in virtblk_read_limits()
1299 lim->logical_block_size * alignment_offset; in virtblk_read_limits()
1305 lim->io_min = lim->logical_block_size * min_io_size; in virtblk_read_limits()
1311 lim->io_opt = lim->logical_block_size * opt_io_size; in virtblk_read_limits()
1319 lim->max_hw_discard_sectors = v ? v : UINT_MAX; in virtblk_read_limits()
1328 lim->max_write_zeroes_sectors = v ? v : UINT_MAX; in virtblk_read_limits()
1352 dev_err(&vdev->dev, in virtblk_read_limits()
1354 return -EINVAL; in virtblk_read_limits()
1366 dev_err(&vdev->dev, in virtblk_read_limits()
1368 return -EINVAL; in virtblk_read_limits()
1371 lim->max_secure_erase_sectors = v; in virtblk_read_limits()
1380 dev_err(&vdev->dev, in virtblk_read_limits()
1382 return -EINVAL; in virtblk_read_limits()
1392 * config are 0 and VIRTIO_BLK_F_SECURE_ERASE feature is not negotiated. in virtblk_read_limits()
1398 lim->max_discard_segments = in virtblk_read_limits()
1402 lim->discard_granularity = in virtblk_read_limits()
1405 lim->discard_granularity = lim->logical_block_size; in virtblk_read_limits()
1415 /* treat host-aware devices as non-zoned */ in virtblk_read_limits()
1423 dev_err(&vdev->dev, "unsupported zone model %d\n", model); in virtblk_read_limits()
1424 return -EINVAL; in virtblk_read_limits()
1441 if (!vdev->config->get) { in virtblk_probe()
1442 dev_err(&vdev->dev, "%s failure: config access disabled\n", in virtblk_probe()
1444 return -EINVAL; in virtblk_probe()
1448 minor_to_index(1 << MINORBITS) - 1, GFP_KERNEL); in virtblk_probe()
1453 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); in virtblk_probe()
1455 err = -ENOMEM; in virtblk_probe()
1459 mutex_init(&vblk->vdev_mutex); in virtblk_probe()
1461 vblk->vdev = vdev; in virtblk_probe()
1463 INIT_WORK(&vblk->config_work, virtblk_config_changed_work); in virtblk_probe()
1471 queue_depth = vblk->vqs[0].vq->num_free; in virtblk_probe()
1479 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); in virtblk_probe()
1480 vblk->tag_set.ops = &virtio_mq_ops; in virtblk_probe()
1481 vblk->tag_set.queue_depth = queue_depth; in virtblk_probe()
1482 vblk->tag_set.numa_node = NUMA_NO_NODE; in virtblk_probe()
1483 vblk->tag_set.cmd_size = in virtblk_probe()
1486 vblk->tag_set.driver_data = vblk; in virtblk_probe()
1487 vblk->tag_set.nr_hw_queues = vblk->num_vqs; in virtblk_probe()
1488 vblk->tag_set.nr_maps = 1; in virtblk_probe()
1489 if (vblk->io_queues[HCTX_TYPE_POLL]) in virtblk_probe()
1490 vblk->tag_set.nr_maps = 3; in virtblk_probe()
1492 err = blk_mq_alloc_tag_set(&vblk->tag_set); in virtblk_probe()
1503 vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, &lim, vblk); in virtblk_probe()
1504 if (IS_ERR(vblk->disk)) { in virtblk_probe()
1505 err = PTR_ERR(vblk->disk); in virtblk_probe()
1509 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); in virtblk_probe()
1511 vblk->disk->major = major; in virtblk_probe()
1512 vblk->disk->first_minor = index_to_minor(index); in virtblk_probe()
1513 vblk->disk->minors = 1 << PART_BITS; in virtblk_probe()
1514 vblk->disk->private_data = vblk; in virtblk_probe()
1515 vblk->disk->fops = &virtblk_fops; in virtblk_probe()
1516 vblk->index = index; in virtblk_probe()
1518 /* If disk is read-only in the host, the guest should obey */ in virtblk_probe()
1520 set_disk_ro(vblk->disk, 1); in virtblk_probe()
1531 err = blk_revalidate_disk_zones(vblk->disk); in virtblk_probe()
1536 err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups); in virtblk_probe()
1543 put_disk(vblk->disk); in virtblk_probe()
1545 blk_mq_free_tag_set(&vblk->tag_set); in virtblk_probe()
1547 vdev->config->del_vqs(vdev); in virtblk_probe()
1548 kfree(vblk->vqs); in virtblk_probe()
1559 struct virtio_blk *vblk = vdev->priv; in virtblk_remove()
1562 flush_work(&vblk->config_work); in virtblk_remove()
1564 del_gendisk(vblk->disk); in virtblk_remove()
1565 blk_mq_free_tag_set(&vblk->tag_set); in virtblk_remove()
1567 mutex_lock(&vblk->vdev_mutex); in virtblk_remove()
1572 /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */ in virtblk_remove()
1573 vblk->vdev = NULL; in virtblk_remove()
1575 vdev->config->del_vqs(vdev); in virtblk_remove()
1576 kfree(vblk->vqs); in virtblk_remove()
1578 mutex_unlock(&vblk->vdev_mutex); in virtblk_remove()
1580 put_disk(vblk->disk); in virtblk_remove()
1585 struct virtio_blk *vblk = vdev->priv; in virtblk_freeze_priv()
1586 struct request_queue *q = vblk->disk->queue; in virtblk_freeze_priv()
1598 flush_work(&vblk->config_work); in virtblk_freeze_priv()
1600 vdev->config->del_vqs(vdev); in virtblk_freeze_priv()
1601 kfree(vblk->vqs); in virtblk_freeze_priv()
1608 struct virtio_blk *vblk = vdev->priv; in virtblk_restore_priv()
1611 ret = init_vq(vdev->priv); in virtblk_restore_priv()
1616 blk_mq_unquiesce_queue(vblk->disk->queue); in virtblk_restore_priv()
1686 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0); in virtio_blk_init()
1688 return -ENOMEM; in virtio_blk_init()