Lines Matching +full:a +full:- +full:za +full:- +full:z

1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe ZNS-ZBD command implementation.
21 * Zone Append Size Limit (zasl) is expressed as a power of 2 value in nvmet_zasl()
24 return ilog2(zone_append_sects >> (NVMET_MPSMIN_SHIFT - 9)); in nvmet_zasl()
27 static int validate_conv_zones_cb(struct blk_zone *z, in validate_conv_zones_cb() argument
30 if (z->type == BLK_ZONE_TYPE_CONVENTIONAL) in validate_conv_zones_cb()
31 return -EOPNOTSUPP; in validate_conv_zones_cb()
37 u8 zasl = nvmet_zasl(bdev_max_zone_append_sectors(ns->bdev)); in nvmet_bdev_zns_enable()
38 struct gendisk *bd_disk = ns->bdev->bd_disk; in nvmet_bdev_zns_enable()
41 if (ns->subsys->zasl) { in nvmet_bdev_zns_enable()
42 if (ns->subsys->zasl > zasl) in nvmet_bdev_zns_enable()
45 ns->subsys->zasl = zasl; in nvmet_bdev_zns_enable()
48 * Generic zoned block devices may have a smaller last zone which is in nvmet_bdev_zns_enable()
52 if (get_capacity(bd_disk) & (bdev_zone_sectors(ns->bdev) - 1)) in nvmet_bdev_zns_enable()
55 * ZNS does not define a conventional zone type. Use report zones in nvmet_bdev_zns_enable()
59 ret = blkdev_report_zones(ns->bdev, 0, bdev_nr_zones(ns->bdev), in nvmet_bdev_zns_enable()
64 ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev)); in nvmet_bdev_zns_enable()
71 u8 zasl = req->sq->ctrl->subsys->zasl; in nvmet_execute_identify_ctrl_zns()
72 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_identify_ctrl_zns()
82 if (ctrl->ops->get_mdts) in nvmet_execute_identify_ctrl_zns()
83 id->zasl = min_t(u8, ctrl->ops->get_mdts(ctrl), zasl); in nvmet_execute_identify_ctrl_zns()
85 id->zasl = zasl; in nvmet_execute_identify_ctrl_zns()
101 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { in nvmet_execute_identify_ns_zns()
102 req->error_loc = offsetof(struct nvme_identify, nsid); in nvmet_execute_identify_ns_zns()
117 if (nvmet_ns_revalidate(req->ns)) { in nvmet_execute_identify_ns_zns()
118 mutex_lock(&req->ns->subsys->lock); in nvmet_execute_identify_ns_zns()
119 nvmet_ns_changed(req->ns->subsys, req->ns->nsid); in nvmet_execute_identify_ns_zns()
120 mutex_unlock(&req->ns->subsys->lock); in nvmet_execute_identify_ns_zns()
123 if (!bdev_is_zoned(req->ns->bdev)) { in nvmet_execute_identify_ns_zns()
125 req->error_loc = offsetof(struct nvme_identify, nsid); in nvmet_execute_identify_ns_zns()
129 zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >> in nvmet_execute_identify_ns_zns()
130 req->ns->blksize_shift; in nvmet_execute_identify_ns_zns()
131 id_zns->lbafe[0].zsze = cpu_to_le64(zsze); in nvmet_execute_identify_ns_zns()
133 mor = bdev_max_open_zones(req->ns->bdev); in nvmet_execute_identify_ns_zns()
137 mor--; in nvmet_execute_identify_ns_zns()
138 id_zns->mor = cpu_to_le32(mor); in nvmet_execute_identify_ns_zns()
140 mar = bdev_max_active_zones(req->ns->bdev); in nvmet_execute_identify_ns_zns()
144 mar--; in nvmet_execute_identify_ns_zns()
145 id_zns->mar = cpu_to_le32(mar); in nvmet_execute_identify_ns_zns()
156 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); in nvmet_bdev_validate_zone_mgmt_recv()
157 u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2; in nvmet_bdev_validate_zone_mgmt_recv()
159 if (sect >= get_capacity(req->ns->bdev->bd_disk)) { in nvmet_bdev_validate_zone_mgmt_recv()
160 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba); in nvmet_bdev_validate_zone_mgmt_recv()
165 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd); in nvmet_bdev_validate_zone_mgmt_recv()
169 if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) { in nvmet_bdev_validate_zone_mgmt_recv()
170 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra); in nvmet_bdev_validate_zone_mgmt_recv()
174 switch (req->cmd->zmr.pr) { in nvmet_bdev_validate_zone_mgmt_recv()
179 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr); in nvmet_bdev_validate_zone_mgmt_recv()
183 switch (req->cmd->zmr.zrasf) { in nvmet_bdev_validate_zone_mgmt_recv()
194 req->error_loc = in nvmet_bdev_validate_zone_mgmt_recv()
210 static int nvmet_bdev_report_zone_cb(struct blk_zone *z, unsigned i, void *d) in nvmet_bdev_report_zone_cb() argument
223 if (rz->zrasf != NVME_ZRASF_ZONE_REPORT_ALL && in nvmet_bdev_report_zone_cb()
224 z->cond != nvme_zrasf_to_blk_zcond[rz->zrasf]) in nvmet_bdev_report_zone_cb()
227 if (rz->nr_zones < rz->out_nr_zones) { in nvmet_bdev_report_zone_cb()
231 zdesc.zcap = nvmet_sect_to_lba(rz->req->ns, z->capacity); in nvmet_bdev_report_zone_cb()
232 zdesc.zslba = nvmet_sect_to_lba(rz->req->ns, z->start); in nvmet_bdev_report_zone_cb()
233 zdesc.wp = nvmet_sect_to_lba(rz->req->ns, z->wp); in nvmet_bdev_report_zone_cb()
234 zdesc.za = z->reset ? 1 << 2 : 0; in nvmet_bdev_report_zone_cb()
235 zdesc.zs = z->cond << 4; in nvmet_bdev_report_zone_cb()
236 zdesc.zt = z->type; in nvmet_bdev_report_zone_cb()
238 status = nvmet_copy_to_sgl(rz->req, rz->out_buf_offset, &zdesc, in nvmet_bdev_report_zone_cb()
241 return -EINVAL; in nvmet_bdev_report_zone_cb()
243 rz->out_buf_offset += sizeof(zdesc); in nvmet_bdev_report_zone_cb()
246 rz->nr_zones++; in nvmet_bdev_report_zone_cb()
253 unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); in nvmet_req_nr_zones_from_slba()
255 return bdev_nr_zones(req->ns->bdev) - bdev_zone_no(req->ns->bdev, sect); in nvmet_req_nr_zones_from_slba()
263 return (bufsize - sizeof(struct nvme_zone_report)) / in get_nr_zones_from_buf()
269 struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work); in nvmet_bdev_zone_zmgmt_recv_work()
270 sector_t start_sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); in nvmet_bdev_zone_zmgmt_recv_work()
272 u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2; in nvmet_bdev_zone_zmgmt_recv_work()
280 .zrasf = req->cmd->zmr.zrasf, in nvmet_bdev_zone_zmgmt_recv_work()
294 ret = blkdev_report_zones(req->ns->bdev, start_sect, req_slba_nr_zones, in nvmet_bdev_zone_zmgmt_recv_work()
305 if (req->cmd->zmr.pr) in nvmet_bdev_zone_zmgmt_recv_work()
317 INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zone_zmgmt_recv_work); in nvmet_bdev_execute_zone_mgmt_recv()
318 queue_work(zbd_wq, &req->z.zmgmt_work); in nvmet_bdev_execute_zone_mgmt_recv()
342 case -EINVAL: in blkdev_zone_mgmt_errno_to_nvme_status()
343 case -EIO: in blkdev_zone_mgmt_errno_to_nvme_status()
355 static int zmgmt_send_scan_cb(struct blk_zone *z, unsigned i, void *d) in zmgmt_send_scan_cb() argument
359 switch (zsa_req_op(data->req->cmd->zms.zsa)) { in zmgmt_send_scan_cb()
361 switch (z->cond) { in zmgmt_send_scan_cb()
369 switch (z->cond) { in zmgmt_send_scan_cb()
378 switch (z->cond) { in zmgmt_send_scan_cb()
388 return -EINVAL; in zmgmt_send_scan_cb()
391 set_bit(i, data->zbitmap); in zmgmt_send_scan_cb()
398 struct block_device *bdev = req->ns->bdev; in nvmet_bdev_zone_mgmt_emulate_all()
408 GFP_NOIO, bdev->bd_disk->node_id); in nvmet_bdev_zone_mgmt_emulate_all()
410 ret = -ENOMEM; in nvmet_bdev_zone_mgmt_emulate_all()
418 ret = -EIO; in nvmet_bdev_zone_mgmt_emulate_all()
426 if (test_bit(disk_zone_no(bdev->bd_disk, sector), d.zbitmap)) { in nvmet_bdev_zone_mgmt_emulate_all()
428 zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC, in nvmet_bdev_zone_mgmt_emulate_all()
430 bio->bi_iter.bi_sector = sector; in nvmet_bdev_zone_mgmt_emulate_all()
431 /* This may take a while, so be nice to others */ in nvmet_bdev_zone_mgmt_emulate_all()
452 switch (zsa_req_op(req->cmd->zms.zsa)) { in nvmet_bdev_execute_zmgmt_send_all()
454 ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0, in nvmet_bdev_execute_zmgmt_send_all()
455 get_capacity(req->ns->bdev->bd_disk)); in nvmet_bdev_execute_zmgmt_send_all()
465 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); in nvmet_bdev_execute_zmgmt_send_all()
474 struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work); in nvmet_bdev_zmgmt_send_work()
475 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba); in nvmet_bdev_zmgmt_send_work()
476 enum req_op op = zsa_req_op(req->cmd->zms.zsa); in nvmet_bdev_zmgmt_send_work()
477 struct block_device *bdev = req->ns->bdev; in nvmet_bdev_zmgmt_send_work()
483 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); in nvmet_bdev_zmgmt_send_work()
489 if (req->cmd->zms.select_all) { in nvmet_bdev_zmgmt_send_work()
494 if (sect >= get_capacity(bdev->bd_disk)) { in nvmet_bdev_zmgmt_send_work()
495 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); in nvmet_bdev_zmgmt_send_work()
500 if (sect & (zone_sectors - 1)) { in nvmet_bdev_zmgmt_send_work()
501 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); in nvmet_bdev_zmgmt_send_work()
516 INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zmgmt_send_work); in nvmet_bdev_execute_zone_mgmt_send()
517 queue_work(zbd_wq, &req->z.zmgmt_work); in nvmet_bdev_execute_zone_mgmt_send()
522 struct nvmet_req *req = bio->bi_private; in nvmet_bdev_zone_append_bio_done()
524 if (bio->bi_status == BLK_STS_OK) { in nvmet_bdev_zone_append_bio_done()
525 req->cqe->result.u64 = in nvmet_bdev_zone_append_bio_done()
526 nvmet_sect_to_lba(req->ns, bio->bi_iter.bi_sector); in nvmet_bdev_zone_append_bio_done()
529 nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); in nvmet_bdev_zone_append_bio_done()
535 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); in nvmet_bdev_execute_zone_append()
549 bdev_max_zone_append_sectors(req->ns->bdev) << SECTOR_SHIFT) { in nvmet_bdev_execute_zone_append()
550 req->error_loc = offsetof(struct nvme_rw_command, length); in nvmet_bdev_execute_zone_append()
555 if (!req->sg_cnt) { in nvmet_bdev_execute_zone_append()
560 if (sect >= get_capacity(req->ns->bdev->bd_disk)) { in nvmet_bdev_execute_zone_append()
561 req->error_loc = offsetof(struct nvme_rw_command, slba); in nvmet_bdev_execute_zone_append()
566 if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) { in nvmet_bdev_execute_zone_append()
567 req->error_loc = offsetof(struct nvme_rw_command, slba); in nvmet_bdev_execute_zone_append()
573 bio = &req->z.inline_bio; in nvmet_bdev_execute_zone_append()
574 bio_init(bio, req->ns->bdev, req->inline_bvec, in nvmet_bdev_execute_zone_append()
575 ARRAY_SIZE(req->inline_bvec), opf); in nvmet_bdev_execute_zone_append()
577 bio = bio_alloc(req->ns->bdev, req->sg_cnt, opf, GFP_KERNEL); in nvmet_bdev_execute_zone_append()
580 bio->bi_end_io = nvmet_bdev_zone_append_bio_done; in nvmet_bdev_execute_zone_append()
581 bio->bi_iter.bi_sector = sect; in nvmet_bdev_execute_zone_append()
582 bio->bi_private = req; in nvmet_bdev_execute_zone_append()
583 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) in nvmet_bdev_execute_zone_append()
584 bio->bi_opf |= REQ_FUA; in nvmet_bdev_execute_zone_append()
586 for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) { in nvmet_bdev_execute_zone_append()
587 unsigned int len = sg->length; in nvmet_bdev_execute_zone_append()
589 if (bio_add_page(bio, sg_page(sg), len, sg->offset) != len) { in nvmet_bdev_execute_zone_append()
612 struct nvme_command *cmd = req->cmd; in nvmet_bdev_zns_parse_io_cmd()
614 switch (cmd->common.opcode) { in nvmet_bdev_zns_parse_io_cmd()
616 req->execute = nvmet_bdev_execute_zone_append; in nvmet_bdev_zns_parse_io_cmd()
619 req->execute = nvmet_bdev_execute_zone_mgmt_recv; in nvmet_bdev_zns_parse_io_cmd()
622 req->execute = nvmet_bdev_execute_zone_mgmt_send; in nvmet_bdev_zns_parse_io_cmd()