Lines Matching full:zone
25 struct nullb_zone *zone) in null_init_zone_lock() argument
28 spin_lock_init(&zone->spinlock); in null_init_zone_lock()
30 mutex_init(&zone->mutex); in null_init_zone_lock()
34 struct nullb_zone *zone) in null_lock_zone() argument
37 spin_lock_irq(&zone->spinlock); in null_lock_zone()
39 mutex_lock(&zone->mutex); in null_lock_zone()
43 struct nullb_zone *zone) in null_unlock_zone() argument
46 spin_unlock_irq(&zone->spinlock); in null_unlock_zone()
48 mutex_unlock(&zone->mutex); in null_unlock_zone()
55 struct nullb_zone *zone; in null_init_zoned_dev() local
64 pr_err("Zone size larger than device capacity\n"); in null_init_zoned_dev()
72 pr_err("zone capacity (%lu MB) larger than zone size (%lu MB)\n", in null_init_zoned_dev()
78 * If a smaller zone capacity was requested, do not allow a smaller last in null_init_zoned_dev()
79 * zone at the same time as such zone configuration does not correspond in null_init_zoned_dev()
84 pr_err("A smaller last zone is not allowed with zone capacity smaller than zone size.\n"); in null_init_zoned_dev()
115 pr_info("zone_max_active limit disabled, limit >= zone count\n"); in null_init_zoned_dev()
125 pr_info("zone_max_open limit disabled, limit >= zone count\n"); in null_init_zoned_dev()
131 zone = &dev->zones[i]; in null_init_zoned_dev()
133 null_init_zone_lock(dev, zone); in null_init_zoned_dev()
134 zone->start = sector; in null_init_zoned_dev()
135 zone->len = dev->zone_size_sects; in null_init_zoned_dev()
136 zone->capacity = zone->len; in null_init_zoned_dev()
137 zone->wp = zone->start + zone->len; in null_init_zoned_dev()
138 zone->type = BLK_ZONE_TYPE_CONVENTIONAL; in null_init_zoned_dev()
139 zone->cond = BLK_ZONE_COND_NOT_WP; in null_init_zoned_dev()
145 zone = &dev->zones[i]; in null_init_zoned_dev()
147 null_init_zone_lock(dev, zone); in null_init_zoned_dev()
148 zone->start = sector; in null_init_zoned_dev()
149 if (zone->start + dev->zone_size_sects > dev_capacity_sects) in null_init_zoned_dev()
150 zone->len = dev_capacity_sects - zone->start; in null_init_zoned_dev()
152 zone->len = dev->zone_size_sects; in null_init_zoned_dev()
153 zone->capacity = in null_init_zoned_dev()
154 min_t(sector_t, zone->len, zone_capacity_sects); in null_init_zoned_dev()
155 zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ; in null_init_zoned_dev()
157 zone->cond = BLK_ZONE_COND_FULL; in null_init_zoned_dev()
158 zone->wp = zone->start + zone->capacity; in null_init_zoned_dev()
160 zone->cond = BLK_ZONE_COND_EMPTY; in null_init_zoned_dev()
161 zone->wp = zone->start; in null_init_zoned_dev()
180 pr_info("%s: using %s zone append\n", in null_register_zoned_dev()
199 struct nullb_zone *zone; in null_report_zones() local
211 zone = &dev->zones[first_zone]; in null_report_zones()
212 for (i = 0; i < nr_zones; i++, zone++) { in null_report_zones()
214 * Stacked DM target drivers will remap the zone information by in null_report_zones()
215 * modifying the zone information passed to the report callback. in null_report_zones()
216 * So use a local copy to avoid corruption of the device zone in null_report_zones()
219 null_lock_zone(dev, zone); in null_report_zones()
220 blkz.start = zone->start; in null_report_zones()
221 blkz.len = zone->len; in null_report_zones()
222 blkz.wp = zone->wp; in null_report_zones()
223 blkz.type = zone->type; in null_report_zones()
224 blkz.cond = zone->cond; in null_report_zones()
225 blkz.capacity = zone->capacity; in null_report_zones()
226 null_unlock_zone(dev, zone); in null_report_zones()
238 * with the target zone already locked.
244 struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)]; in null_zone_valid_read_len() local
248 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL || in null_zone_valid_read_len()
249 sector + nr_sectors <= zone->wp) in null_zone_valid_read_len()
252 if (sector > zone->wp) in null_zone_valid_read_len()
255 return (zone->wp - sector) << SECTOR_SHIFT; in null_zone_valid_read_len()
260 struct nullb_zone *zone; in null_close_imp_open_zone() local
268 zone = &dev->zones[zno]; in null_close_imp_open_zone()
273 if (zone->cond == BLK_ZONE_COND_IMP_OPEN) { in null_close_imp_open_zone()
275 if (zone->wp == zone->start) { in null_close_imp_open_zone()
276 zone->cond = BLK_ZONE_COND_EMPTY; in null_close_imp_open_zone()
278 zone->cond = BLK_ZONE_COND_CLOSED; in null_close_imp_open_zone()
318 * This function matches the manage open zone resources function in the ZBC standard,
321 * The function determines if a zone can transition to implicit open or explicit open,
322 * while maintaining the max open zone (and max active zone) limit(s). It may close an
323 * implicit open zone in order to make additional zone resources available.
325 * ZBC states that an implicit open zone shall be closed only if there is not
327 * it is not certain that closing an implicit open zone will allow a new zone
331 struct nullb_zone *zone) in null_check_zone_resources() argument
335 switch (zone->cond) { in null_check_zone_resources()
355 struct nullb_zone *zone = &dev->zones[zno]; in null_zone_write() local
358 trace_nullb_zone_op(cmd, zno, zone->cond); in null_zone_write()
360 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) { in null_zone_write()
366 null_lock_zone(dev, zone); in null_zone_write()
369 * Regular writes must be at the write pointer position. Zone append in null_zone_write()
371 * returned using the request sector. Note that we do not check the zone in null_zone_write()
373 * check against the zone write pointer will always result in failing in null_zone_write()
378 zone->wp == NULL_ZONE_INVALID_WP) { in null_zone_write()
382 sector = zone->wp; in null_zone_write()
386 if (sector != zone->wp || in null_zone_write()
387 zone->wp + nr_sectors > zone->start + zone->capacity) { in null_zone_write()
392 if (zone->cond == BLK_ZONE_COND_CLOSED || in null_zone_write()
393 zone->cond == BLK_ZONE_COND_EMPTY) { in null_zone_write()
397 ret = null_check_zone_resources(dev, zone); in null_zone_write()
402 if (zone->cond == BLK_ZONE_COND_CLOSED) { in null_zone_write()
405 } else if (zone->cond == BLK_ZONE_COND_EMPTY) { in null_zone_write()
412 zone->cond = BLK_ZONE_COND_IMP_OPEN; in null_zone_write()
419 zone->wp += nr_sectors; in null_zone_write()
420 if (zone->wp == zone->start + zone->capacity) { in null_zone_write()
423 if (zone->cond == BLK_ZONE_COND_EXP_OPEN) in null_zone_write()
425 else if (zone->cond == BLK_ZONE_COND_IMP_OPEN) in null_zone_write()
429 zone->cond = BLK_ZONE_COND_FULL; in null_zone_write()
435 null_unlock_zone(dev, zone); in null_zone_write()
441 struct nullb_zone *zone) in null_open_zone() argument
445 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) in null_open_zone()
448 switch (zone->cond) { in null_open_zone()
464 switch (zone->cond) { in null_open_zone()
466 ret = null_check_zone_resources(dev, zone); in null_open_zone()
476 ret = null_check_zone_resources(dev, zone); in null_open_zone()
492 zone->cond = BLK_ZONE_COND_EXP_OPEN; in null_open_zone()
498 struct nullb_zone *zone) in null_close_zone() argument
500 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) in null_close_zone()
503 switch (zone->cond) { in null_close_zone()
519 switch (zone->cond) { in null_close_zone()
530 if (zone->wp > zone->start) in null_close_zone()
536 if (zone->wp == zone->start) in null_close_zone()
537 zone->cond = BLK_ZONE_COND_EMPTY; in null_close_zone()
539 zone->cond = BLK_ZONE_COND_CLOSED; in null_close_zone()
545 struct nullb_zone *zone) in null_finish_zone() argument
549 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) in null_finish_zone()
555 switch (zone->cond) { in null_finish_zone()
561 ret = null_check_zone_resources(dev, zone); in null_finish_zone()
574 ret = null_check_zone_resources(dev, zone); in null_finish_zone()
589 zone->cond = BLK_ZONE_COND_FULL; in null_finish_zone()
590 zone->wp = zone->start + zone->len; in null_finish_zone()
596 struct nullb_zone *zone) in null_reset_zone() argument
598 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) in null_reset_zone()
604 switch (zone->cond) { in null_reset_zone()
625 zone->cond = BLK_ZONE_COND_EMPTY; in null_reset_zone()
626 zone->wp = zone->start; in null_reset_zone()
629 return null_handle_discard(dev, zone->start, zone->len); in null_reset_zone()
639 struct nullb_zone *zone; in null_zone_mgmt() local
645 zone = &dev->zones[i]; in null_zone_mgmt()
646 null_lock_zone(dev, zone); in null_zone_mgmt()
647 if (zone->cond != BLK_ZONE_COND_EMPTY && in null_zone_mgmt()
648 zone->cond != BLK_ZONE_COND_READONLY && in null_zone_mgmt()
649 zone->cond != BLK_ZONE_COND_OFFLINE) { in null_zone_mgmt()
650 null_reset_zone(dev, zone); in null_zone_mgmt()
651 trace_nullb_zone_op(cmd, i, zone->cond); in null_zone_mgmt()
653 null_unlock_zone(dev, zone); in null_zone_mgmt()
659 zone = &dev->zones[zone_no]; in null_zone_mgmt()
661 null_lock_zone(dev, zone); in null_zone_mgmt()
663 if (zone->cond == BLK_ZONE_COND_READONLY || in null_zone_mgmt()
664 zone->cond == BLK_ZONE_COND_OFFLINE) { in null_zone_mgmt()
671 ret = null_reset_zone(dev, zone); in null_zone_mgmt()
674 ret = null_open_zone(dev, zone); in null_zone_mgmt()
677 ret = null_close_zone(dev, zone); in null_zone_mgmt()
680 ret = null_finish_zone(dev, zone); in null_zone_mgmt()
688 trace_nullb_zone_op(cmd, zone_no, zone->cond); in null_zone_mgmt()
691 null_unlock_zone(dev, zone); in null_zone_mgmt()
700 struct nullb_zone *zone; in null_process_zoned_cmd() local
716 zone = &dev->zones[null_zone_no(dev, sector)]; in null_process_zoned_cmd()
717 if (zone->cond == BLK_ZONE_COND_OFFLINE) in null_process_zoned_cmd()
720 null_lock_zone(dev, zone); in null_process_zoned_cmd()
722 null_unlock_zone(dev, zone); in null_process_zoned_cmd()
728 * Set a zone in the read-only or offline condition.
731 struct nullb_zone *zone, enum blk_zone_cond cond) in null_set_zone_cond() argument
737 null_lock_zone(dev, zone); in null_set_zone_cond()
743 * set the specified zone condition to the zones. Finish the zones in null_set_zone_cond()
744 * beforehand to free up zone resources. in null_set_zone_cond()
746 if (zone->cond == cond) { in null_set_zone_cond()
747 zone->cond = BLK_ZONE_COND_EMPTY; in null_set_zone_cond()
748 zone->wp = zone->start; in null_set_zone_cond()
750 null_handle_discard(dev, zone->start, zone->len); in null_set_zone_cond()
752 if (zone->cond != BLK_ZONE_COND_READONLY && in null_set_zone_cond()
753 zone->cond != BLK_ZONE_COND_OFFLINE) in null_set_zone_cond()
754 null_finish_zone(dev, zone); in null_set_zone_cond()
755 zone->cond = cond; in null_set_zone_cond()
756 zone->wp = NULL_ZONE_INVALID_WP; in null_set_zone_cond()
759 null_unlock_zone(dev, zone); in null_set_zone_cond()
763 * Identify a zone from the sector written to configfs file. Then set zone
764 * condition to the zone.