1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions related to setting various queue properties from drivers
4 */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/bio.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/pagemap.h>
11 #include <linux/backing-dev-defs.h>
12 #include <linux/gcd.h>
13 #include <linux/lcm.h>
14 #include <linux/jiffies.h>
15 #include <linux/gfp.h>
16 #include <linux/dma-mapping.h>
17
18 #include "blk.h"
19 #include "blk-rq-qos.h"
20 #include "blk-wbt.h"
21
blk_queue_rq_timeout(struct request_queue * q,unsigned int timeout)22 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
23 {
24 q->rq_timeout = timeout;
25 }
26 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
27
28 /**
29 * blk_set_stacking_limits - set default limits for stacking devices
30 * @lim: the queue_limits structure to reset
31 *
32 * Prepare queue limits for applying limits from underlying devices using
33 * blk_stack_limits().
34 */
blk_set_stacking_limits(struct queue_limits * lim)35 void blk_set_stacking_limits(struct queue_limits *lim)
36 {
37 memset(lim, 0, sizeof(*lim));
38 lim->logical_block_size = SECTOR_SIZE;
39 lim->physical_block_size = SECTOR_SIZE;
40 lim->io_min = SECTOR_SIZE;
41 lim->discard_granularity = SECTOR_SIZE;
42 lim->dma_alignment = SECTOR_SIZE - 1;
43 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
44
45 /* Inherit limits from component devices */
46 lim->max_segments = USHRT_MAX;
47 lim->max_discard_segments = USHRT_MAX;
48 lim->max_hw_sectors = UINT_MAX;
49 lim->max_segment_size = UINT_MAX;
50 lim->max_sectors = UINT_MAX;
51 lim->max_dev_sectors = UINT_MAX;
52 lim->max_write_zeroes_sectors = UINT_MAX;
53 lim->max_hw_zone_append_sectors = UINT_MAX;
54 lim->max_user_discard_sectors = UINT_MAX;
55 }
56 EXPORT_SYMBOL(blk_set_stacking_limits);
57
blk_apply_bdi_limits(struct backing_dev_info * bdi,struct queue_limits * lim)58 void blk_apply_bdi_limits(struct backing_dev_info *bdi,
59 struct queue_limits *lim)
60 {
61 /*
62 * For read-ahead of large files to be effective, we need to read ahead
63 * at least twice the optimal I/O size.
64 */
65 bdi->ra_pages = max(lim->io_opt * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
66 bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
67 }
68
blk_validate_zoned_limits(struct queue_limits * lim)69 static int blk_validate_zoned_limits(struct queue_limits *lim)
70 {
71 if (!(lim->features & BLK_FEAT_ZONED)) {
72 if (WARN_ON_ONCE(lim->max_open_zones) ||
73 WARN_ON_ONCE(lim->max_active_zones) ||
74 WARN_ON_ONCE(lim->zone_write_granularity) ||
75 WARN_ON_ONCE(lim->max_zone_append_sectors))
76 return -EINVAL;
77 return 0;
78 }
79
80 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
81 return -EINVAL;
82
83 /*
84 * Given that active zones include open zones, the maximum number of
85 * open zones cannot be larger than the maximum number of active zones.
86 */
87 if (lim->max_active_zones &&
88 lim->max_open_zones > lim->max_active_zones)
89 return -EINVAL;
90
91 if (lim->zone_write_granularity < lim->logical_block_size)
92 lim->zone_write_granularity = lim->logical_block_size;
93
94 /*
95 * The Zone Append size is limited by the maximum I/O size and the zone
96 * size given that it can't span zones.
97 *
98 * If no max_hw_zone_append_sectors limit is provided, the block layer
99 * will emulated it, else we're also bound by the hardware limit.
100 */
101 lim->max_zone_append_sectors =
102 min_not_zero(lim->max_hw_zone_append_sectors,
103 min(lim->chunk_sectors, lim->max_hw_sectors));
104 return 0;
105 }
106
blk_validate_integrity_limits(struct queue_limits * lim)107 static int blk_validate_integrity_limits(struct queue_limits *lim)
108 {
109 struct blk_integrity *bi = &lim->integrity;
110
111 if (!bi->tuple_size) {
112 if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE ||
113 bi->tag_size || ((bi->flags & BLK_INTEGRITY_REF_TAG))) {
114 pr_warn("invalid PI settings.\n");
115 return -EINVAL;
116 }
117 bi->flags |= BLK_INTEGRITY_NOGENERATE | BLK_INTEGRITY_NOVERIFY;
118 return 0;
119 }
120
121 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
122 pr_warn("integrity support disabled.\n");
123 return -EINVAL;
124 }
125
126 if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE &&
127 (bi->flags & BLK_INTEGRITY_REF_TAG)) {
128 pr_warn("ref tag not support without checksum.\n");
129 return -EINVAL;
130 }
131
132 if (!bi->interval_exp)
133 bi->interval_exp = ilog2(lim->logical_block_size);
134
135 return 0;
136 }
137
138 /*
139 * Returns max guaranteed bytes which we can fit in a bio.
140 *
141 * We request that an atomic_write is ITER_UBUF iov_iter (so a single vector),
142 * so we assume that we can fit in at least PAGE_SIZE in a segment, apart from
143 * the first and last segments.
144 */
blk_queue_max_guaranteed_bio(struct queue_limits * lim)145 static unsigned int blk_queue_max_guaranteed_bio(struct queue_limits *lim)
146 {
147 unsigned int max_segments = min(BIO_MAX_VECS, lim->max_segments);
148 unsigned int length;
149
150 length = min(max_segments, 2) * lim->logical_block_size;
151 if (max_segments > 2)
152 length += (max_segments - 2) * PAGE_SIZE;
153
154 return length;
155 }
156
blk_atomic_writes_update_limits(struct queue_limits * lim)157 static void blk_atomic_writes_update_limits(struct queue_limits *lim)
158 {
159 unsigned int unit_limit = min(lim->max_hw_sectors << SECTOR_SHIFT,
160 blk_queue_max_guaranteed_bio(lim));
161
162 unit_limit = rounddown_pow_of_two(unit_limit);
163
164 lim->atomic_write_max_sectors =
165 min(lim->atomic_write_hw_max >> SECTOR_SHIFT,
166 lim->max_hw_sectors);
167 lim->atomic_write_unit_min =
168 min(lim->atomic_write_hw_unit_min, unit_limit);
169 lim->atomic_write_unit_max =
170 min(lim->atomic_write_hw_unit_max, unit_limit);
171 lim->atomic_write_boundary_sectors =
172 lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
173 }
174
blk_validate_atomic_write_limits(struct queue_limits * lim)175 static void blk_validate_atomic_write_limits(struct queue_limits *lim)
176 {
177 unsigned int boundary_sectors;
178
179 if (!(lim->features & BLK_FEAT_ATOMIC_WRITES))
180 goto unsupported;
181
182 if (!lim->atomic_write_hw_max)
183 goto unsupported;
184
185 if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_min)))
186 goto unsupported;
187
188 if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_max)))
189 goto unsupported;
190
191 if (WARN_ON_ONCE(lim->atomic_write_hw_unit_min >
192 lim->atomic_write_hw_unit_max))
193 goto unsupported;
194
195 if (WARN_ON_ONCE(lim->atomic_write_hw_unit_max >
196 lim->atomic_write_hw_max))
197 goto unsupported;
198
199 boundary_sectors = lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
200
201 if (boundary_sectors) {
202 if (WARN_ON_ONCE(lim->atomic_write_hw_max >
203 lim->atomic_write_hw_boundary))
204 goto unsupported;
205 /*
206 * A feature of boundary support is that it disallows bios to
207 * be merged which would result in a merged request which
208 * crosses either a chunk sector or atomic write HW boundary,
209 * even though chunk sectors may be just set for performance.
210 * For simplicity, disallow atomic writes for a chunk sector
211 * which is non-zero and smaller than atomic write HW boundary.
212 * Furthermore, chunk sectors must be a multiple of atomic
213 * write HW boundary. Otherwise boundary support becomes
214 * complicated.
215 * Devices which do not conform to these rules can be dealt
216 * with if and when they show up.
217 */
218 if (WARN_ON_ONCE(lim->chunk_sectors % boundary_sectors))
219 goto unsupported;
220
221 /*
222 * The boundary size just needs to be a multiple of unit_max
223 * (and not necessarily a power-of-2), so this following check
224 * could be relaxed in future.
225 * Furthermore, if needed, unit_max could even be reduced so
226 * that it is compliant with a !power-of-2 boundary.
227 */
228 if (!is_power_of_2(boundary_sectors))
229 goto unsupported;
230 }
231
232 blk_atomic_writes_update_limits(lim);
233 return;
234
235 unsupported:
236 lim->atomic_write_max_sectors = 0;
237 lim->atomic_write_boundary_sectors = 0;
238 lim->atomic_write_unit_min = 0;
239 lim->atomic_write_unit_max = 0;
240 }
241
242 /*
243 * Check that the limits in lim are valid, initialize defaults for unset
244 * values, and cap values based on others where needed.
245 */
blk_validate_limits(struct queue_limits * lim)246 int blk_validate_limits(struct queue_limits *lim)
247 {
248 unsigned int max_hw_sectors;
249 unsigned int logical_block_sectors;
250 unsigned long seg_size;
251 int err;
252
253 /*
254 * Unless otherwise specified, default to 512 byte logical blocks and a
255 * physical block size equal to the logical block size.
256 */
257 if (!lim->logical_block_size)
258 lim->logical_block_size = SECTOR_SIZE;
259 else if (blk_validate_block_size(lim->logical_block_size)) {
260 pr_warn("Invalid logical block size (%d)\n", lim->logical_block_size);
261 return -EINVAL;
262 }
263 if (lim->physical_block_size < lim->logical_block_size)
264 lim->physical_block_size = lim->logical_block_size;
265
266 /*
267 * The minimum I/O size defaults to the physical block size unless
268 * explicitly overridden.
269 */
270 if (lim->io_min < lim->physical_block_size)
271 lim->io_min = lim->physical_block_size;
272
273 /*
274 * The optimal I/O size may not be aligned to physical block size
275 * (because it may be limited by dma engines which have no clue about
276 * block size of the disks attached to them), so we round it down here.
277 */
278 lim->io_opt = round_down(lim->io_opt, lim->physical_block_size);
279
280 /*
281 * max_hw_sectors has a somewhat weird default for historical reason,
282 * but driver really should set their own instead of relying on this
283 * value.
284 *
285 * The block layer relies on the fact that every driver can
286 * handle at lest a page worth of data per I/O, and needs the value
287 * aligned to the logical block size.
288 */
289 if (!lim->max_hw_sectors)
290 lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
291 if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
292 return -EINVAL;
293 logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
294 if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
295 return -EINVAL;
296 lim->max_hw_sectors = round_down(lim->max_hw_sectors,
297 logical_block_sectors);
298
299 /*
300 * The actual max_sectors value is a complex beast and also takes the
301 * max_dev_sectors value (set by SCSI ULPs) and a user configurable
302 * value into account. The ->max_sectors value is always calculated
303 * from these, so directly setting it won't have any effect.
304 */
305 max_hw_sectors = min_not_zero(lim->max_hw_sectors,
306 lim->max_dev_sectors);
307 if (lim->max_user_sectors) {
308 if (lim->max_user_sectors < BLK_MIN_SEGMENT_SIZE / SECTOR_SIZE)
309 return -EINVAL;
310 lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
311 } else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
312 lim->max_sectors =
313 min(max_hw_sectors, lim->io_opt >> SECTOR_SHIFT);
314 } else if (lim->io_min > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
315 lim->max_sectors =
316 min(max_hw_sectors, lim->io_min >> SECTOR_SHIFT);
317 } else {
318 lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
319 }
320 lim->max_sectors = round_down(lim->max_sectors,
321 logical_block_sectors);
322
323 /*
324 * Random default for the maximum number of segments. Driver should not
325 * rely on this and set their own.
326 */
327 if (!lim->max_segments)
328 lim->max_segments = BLK_MAX_SEGMENTS;
329
330 lim->max_discard_sectors =
331 min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
332
333 if (!lim->max_discard_segments)
334 lim->max_discard_segments = 1;
335
336 if (lim->discard_granularity < lim->physical_block_size)
337 lim->discard_granularity = lim->physical_block_size;
338
339 /*
340 * By default there is no limit on the segment boundary alignment,
341 * but if there is one it can't be smaller than the page size as
342 * that would break all the normal I/O patterns.
343 */
344 if (!lim->seg_boundary_mask)
345 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
346 if (WARN_ON_ONCE(lim->seg_boundary_mask < BLK_MIN_SEGMENT_SIZE - 1))
347 return -EINVAL;
348
349 /*
350 * Stacking device may have both virtual boundary and max segment
351 * size limit, so allow this setting now, and long-term the two
352 * might need to move out of stacking limits since we have immutable
353 * bvec and lower layer bio splitting is supposed to handle the two
354 * correctly.
355 */
356 if (lim->virt_boundary_mask) {
357 if (!lim->max_segment_size)
358 lim->max_segment_size = UINT_MAX;
359 } else {
360 /*
361 * The maximum segment size has an odd historic 64k default that
362 * drivers probably should override. Just like the I/O size we
363 * require drivers to at least handle a full page per segment.
364 */
365 if (!lim->max_segment_size)
366 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
367 if (WARN_ON_ONCE(lim->max_segment_size < BLK_MIN_SEGMENT_SIZE))
368 return -EINVAL;
369 }
370
371 /* setup min segment size for building new segment in fast path */
372 if (lim->seg_boundary_mask > lim->max_segment_size - 1)
373 seg_size = lim->max_segment_size;
374 else
375 seg_size = lim->seg_boundary_mask + 1;
376 lim->min_segment_size = min_t(unsigned int, seg_size, PAGE_SIZE);
377
378 /*
379 * We require drivers to at least do logical block aligned I/O, but
380 * historically could not check for that due to the separate calls
381 * to set the limits. Once the transition is finished the check
382 * below should be narrowed down to check the logical block size.
383 */
384 if (!lim->dma_alignment)
385 lim->dma_alignment = SECTOR_SIZE - 1;
386 if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
387 return -EINVAL;
388
389 if (lim->alignment_offset) {
390 lim->alignment_offset &= (lim->physical_block_size - 1);
391 lim->flags &= ~BLK_FLAG_MISALIGNED;
392 }
393
394 if (!(lim->features & BLK_FEAT_WRITE_CACHE))
395 lim->features &= ~BLK_FEAT_FUA;
396
397 blk_validate_atomic_write_limits(lim);
398
399 err = blk_validate_integrity_limits(lim);
400 if (err)
401 return err;
402 return blk_validate_zoned_limits(lim);
403 }
404 EXPORT_SYMBOL_GPL(blk_validate_limits);
405
406 /*
407 * Set the default limits for a newly allocated queue. @lim contains the
408 * initial limits set by the driver, which could be no limit in which case
409 * all fields are cleared to zero.
410 */
blk_set_default_limits(struct queue_limits * lim)411 int blk_set_default_limits(struct queue_limits *lim)
412 {
413 /*
414 * Most defaults are set by capping the bounds in blk_validate_limits,
415 * but max_user_discard_sectors is special and needs an explicit
416 * initialization to the max value here.
417 */
418 lim->max_user_discard_sectors = UINT_MAX;
419 return blk_validate_limits(lim);
420 }
421
422 /**
423 * queue_limits_commit_update - commit an atomic update of queue limits
424 * @q: queue to update
425 * @lim: limits to apply
426 *
427 * Apply the limits in @lim that were obtained from queue_limits_start_update()
428 * and updated by the caller to @q. The caller must have frozen the queue or
429 * ensure that there are no outstanding I/Os by other means.
430 *
431 * Returns 0 if successful, else a negative error code.
432 */
queue_limits_commit_update(struct request_queue * q,struct queue_limits * lim)433 int queue_limits_commit_update(struct request_queue *q,
434 struct queue_limits *lim)
435 {
436 int error;
437
438 error = blk_validate_limits(lim);
439 if (error)
440 goto out_unlock;
441
442 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
443 if (q->crypto_profile && lim->integrity.tag_size) {
444 pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n");
445 error = -EINVAL;
446 goto out_unlock;
447 }
448 #endif
449
450 q->limits = *lim;
451 if (q->disk)
452 blk_apply_bdi_limits(q->disk->bdi, lim);
453 out_unlock:
454 mutex_unlock(&q->limits_lock);
455 return error;
456 }
457 EXPORT_SYMBOL_GPL(queue_limits_commit_update);
458
459 /**
460 * queue_limits_commit_update_frozen - commit an atomic update of queue limits
461 * @q: queue to update
462 * @lim: limits to apply
463 *
464 * Apply the limits in @lim that were obtained from queue_limits_start_update()
465 * and updated with the new values by the caller to @q. Freezes the queue
466 * before the update and unfreezes it after.
467 *
468 * Returns 0 if successful, else a negative error code.
469 */
queue_limits_commit_update_frozen(struct request_queue * q,struct queue_limits * lim)470 int queue_limits_commit_update_frozen(struct request_queue *q,
471 struct queue_limits *lim)
472 {
473 unsigned int memflags;
474 int ret;
475
476 memflags = blk_mq_freeze_queue(q);
477 ret = queue_limits_commit_update(q, lim);
478 blk_mq_unfreeze_queue(q, memflags);
479
480 return ret;
481 }
482 EXPORT_SYMBOL_GPL(queue_limits_commit_update_frozen);
483
484 /**
485 * queue_limits_set - apply queue limits to queue
486 * @q: queue to update
487 * @lim: limits to apply
488 *
489 * Apply the limits in @lim that were freshly initialized to @q.
490 * To update existing limits use queue_limits_start_update() and
491 * queue_limits_commit_update() instead.
492 *
493 * Returns 0 if successful, else a negative error code.
494 */
queue_limits_set(struct request_queue * q,struct queue_limits * lim)495 int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
496 {
497 mutex_lock(&q->limits_lock);
498 return queue_limits_commit_update(q, lim);
499 }
500 EXPORT_SYMBOL_GPL(queue_limits_set);
501
queue_limit_alignment_offset(const struct queue_limits * lim,sector_t sector)502 static int queue_limit_alignment_offset(const struct queue_limits *lim,
503 sector_t sector)
504 {
505 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
506 unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
507 << SECTOR_SHIFT;
508
509 return (granularity + lim->alignment_offset - alignment) % granularity;
510 }
511
queue_limit_discard_alignment(const struct queue_limits * lim,sector_t sector)512 static unsigned int queue_limit_discard_alignment(
513 const struct queue_limits *lim, sector_t sector)
514 {
515 unsigned int alignment, granularity, offset;
516
517 if (!lim->max_discard_sectors)
518 return 0;
519
520 /* Why are these in bytes, not sectors? */
521 alignment = lim->discard_alignment >> SECTOR_SHIFT;
522 granularity = lim->discard_granularity >> SECTOR_SHIFT;
523
524 /* Offset of the partition start in 'granularity' sectors */
525 offset = sector_div(sector, granularity);
526
527 /* And why do we do this modulus *again* in blkdev_issue_discard()? */
528 offset = (granularity + alignment - offset) % granularity;
529
530 /* Turn it back into bytes, gaah */
531 return offset << SECTOR_SHIFT;
532 }
533
blk_round_down_sectors(unsigned int sectors,unsigned int lbs)534 static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
535 {
536 sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
537 if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
538 sectors = PAGE_SIZE >> SECTOR_SHIFT;
539 return sectors;
540 }
541
542 /* Check if second and later bottom devices are compliant */
blk_stack_atomic_writes_tail(struct queue_limits * t,struct queue_limits * b)543 static bool blk_stack_atomic_writes_tail(struct queue_limits *t,
544 struct queue_limits *b)
545 {
546 /* We're not going to support different boundary sizes.. yet */
547 if (t->atomic_write_hw_boundary != b->atomic_write_hw_boundary)
548 return false;
549
550 /* Can't support this */
551 if (t->atomic_write_hw_unit_min > b->atomic_write_hw_unit_max)
552 return false;
553
554 /* Or this */
555 if (t->atomic_write_hw_unit_max < b->atomic_write_hw_unit_min)
556 return false;
557
558 t->atomic_write_hw_max = min(t->atomic_write_hw_max,
559 b->atomic_write_hw_max);
560 t->atomic_write_hw_unit_min = max(t->atomic_write_hw_unit_min,
561 b->atomic_write_hw_unit_min);
562 t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
563 b->atomic_write_hw_unit_max);
564 return true;
565 }
566
567 /* Check for valid boundary of first bottom device */
blk_stack_atomic_writes_boundary_head(struct queue_limits * t,struct queue_limits * b)568 static bool blk_stack_atomic_writes_boundary_head(struct queue_limits *t,
569 struct queue_limits *b)
570 {
571 /*
572 * Ensure atomic write boundary is aligned with chunk sectors. Stacked
573 * devices store chunk sectors in t->io_min.
574 */
575 if (b->atomic_write_hw_boundary > t->io_min &&
576 b->atomic_write_hw_boundary % t->io_min)
577 return false;
578 if (t->io_min > b->atomic_write_hw_boundary &&
579 t->io_min % b->atomic_write_hw_boundary)
580 return false;
581
582 t->atomic_write_hw_boundary = b->atomic_write_hw_boundary;
583 return true;
584 }
585
586
587 /* Check stacking of first bottom device */
blk_stack_atomic_writes_head(struct queue_limits * t,struct queue_limits * b)588 static bool blk_stack_atomic_writes_head(struct queue_limits *t,
589 struct queue_limits *b)
590 {
591 if (b->atomic_write_hw_boundary &&
592 !blk_stack_atomic_writes_boundary_head(t, b))
593 return false;
594
595 if (t->io_min <= SECTOR_SIZE) {
596 /* No chunk sectors, so use bottom device values directly */
597 t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
598 t->atomic_write_hw_unit_min = b->atomic_write_hw_unit_min;
599 t->atomic_write_hw_max = b->atomic_write_hw_max;
600 return true;
601 }
602
603 /*
604 * Find values for limits which work for chunk size.
605 * b->atomic_write_hw_unit_{min, max} may not be aligned with chunk
606 * size (t->io_min), as chunk size is not restricted to a power-of-2.
607 * So we need to find highest power-of-2 which works for the chunk
608 * size.
609 * As an example scenario, we could have b->unit_max = 16K and
610 * t->io_min = 24K. For this case, reduce t->unit_max to a value
611 * aligned with both limits, i.e. 8K in this example.
612 */
613 t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
614 while (t->io_min % t->atomic_write_hw_unit_max)
615 t->atomic_write_hw_unit_max /= 2;
616
617 t->atomic_write_hw_unit_min = min(b->atomic_write_hw_unit_min,
618 t->atomic_write_hw_unit_max);
619 t->atomic_write_hw_max = min(b->atomic_write_hw_max, t->io_min);
620
621 return true;
622 }
623
blk_stack_atomic_writes_limits(struct queue_limits * t,struct queue_limits * b,sector_t start)624 static void blk_stack_atomic_writes_limits(struct queue_limits *t,
625 struct queue_limits *b, sector_t start)
626 {
627 if (!(b->features & BLK_FEAT_ATOMIC_WRITES))
628 goto unsupported;
629
630 if (!b->atomic_write_hw_unit_min)
631 goto unsupported;
632
633 if (!blk_atomic_write_start_sect_aligned(start, b))
634 goto unsupported;
635
636 /*
637 * If atomic_write_hw_max is set, we have already stacked 1x bottom
638 * device, so check for compliance.
639 */
640 if (t->atomic_write_hw_max) {
641 if (!blk_stack_atomic_writes_tail(t, b))
642 goto unsupported;
643 return;
644 }
645
646 if (!blk_stack_atomic_writes_head(t, b))
647 goto unsupported;
648 return;
649
650 unsupported:
651 t->atomic_write_hw_max = 0;
652 t->atomic_write_hw_unit_max = 0;
653 t->atomic_write_hw_unit_min = 0;
654 t->atomic_write_hw_boundary = 0;
655 }
656
657 /**
658 * blk_stack_limits - adjust queue_limits for stacked devices
659 * @t: the stacking driver limits (top device)
660 * @b: the underlying queue limits (bottom, component device)
661 * @start: first data sector within component device
662 *
663 * Description:
664 * This function is used by stacking drivers like MD and DM to ensure
665 * that all component devices have compatible block sizes and
666 * alignments. The stacking driver must provide a queue_limits
667 * struct (top) and then iteratively call the stacking function for
668 * all component (bottom) devices. The stacking function will
669 * attempt to combine the values and ensure proper alignment.
670 *
671 * Returns 0 if the top and bottom queue_limits are compatible. The
672 * top device's block sizes and alignment offsets may be adjusted to
673 * ensure alignment with the bottom device. If no compatible sizes
674 * and alignments exist, -1 is returned and the resulting top
675 * queue_limits will have the misaligned flag set to indicate that
676 * the alignment_offset is undefined.
677 */
blk_stack_limits(struct queue_limits * t,struct queue_limits * b,sector_t start)678 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
679 sector_t start)
680 {
681 unsigned int top, bottom, alignment, ret = 0;
682
683 t->features |= (b->features & BLK_FEAT_INHERIT_MASK);
684
685 /*
686 * Some feaures need to be supported both by the stacking driver and all
687 * underlying devices. The stacking driver sets these flags before
688 * stacking the limits, and this will clear the flags if any of the
689 * underlying devices does not support it.
690 */
691 if (!(b->features & BLK_FEAT_NOWAIT))
692 t->features &= ~BLK_FEAT_NOWAIT;
693 if (!(b->features & BLK_FEAT_POLL))
694 t->features &= ~BLK_FEAT_POLL;
695
696 t->flags |= (b->flags & BLK_FLAG_MISALIGNED);
697
698 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
699 t->max_user_sectors = min_not_zero(t->max_user_sectors,
700 b->max_user_sectors);
701 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
702 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
703 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
704 b->max_write_zeroes_sectors);
705 t->max_hw_zone_append_sectors = min(t->max_hw_zone_append_sectors,
706 b->max_hw_zone_append_sectors);
707
708 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
709 b->seg_boundary_mask);
710 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
711 b->virt_boundary_mask);
712
713 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
714 t->max_discard_segments = min_not_zero(t->max_discard_segments,
715 b->max_discard_segments);
716 t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
717 b->max_integrity_segments);
718
719 t->max_segment_size = min_not_zero(t->max_segment_size,
720 b->max_segment_size);
721
722 alignment = queue_limit_alignment_offset(b, start);
723
724 /* Bottom device has different alignment. Check that it is
725 * compatible with the current top alignment.
726 */
727 if (t->alignment_offset != alignment) {
728
729 top = max(t->physical_block_size, t->io_min)
730 + t->alignment_offset;
731 bottom = max(b->physical_block_size, b->io_min) + alignment;
732
733 /* Verify that top and bottom intervals line up */
734 if (max(top, bottom) % min(top, bottom)) {
735 t->flags |= BLK_FLAG_MISALIGNED;
736 ret = -1;
737 }
738 }
739
740 t->logical_block_size = max(t->logical_block_size,
741 b->logical_block_size);
742
743 t->physical_block_size = max(t->physical_block_size,
744 b->physical_block_size);
745
746 t->io_min = max(t->io_min, b->io_min);
747 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
748 t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
749
750 /* Set non-power-of-2 compatible chunk_sectors boundary */
751 if (b->chunk_sectors)
752 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
753
754 /* Physical block size a multiple of the logical block size? */
755 if (t->physical_block_size & (t->logical_block_size - 1)) {
756 t->physical_block_size = t->logical_block_size;
757 t->flags |= BLK_FLAG_MISALIGNED;
758 ret = -1;
759 }
760
761 /* Minimum I/O a multiple of the physical block size? */
762 if (t->io_min & (t->physical_block_size - 1)) {
763 t->io_min = t->physical_block_size;
764 t->flags |= BLK_FLAG_MISALIGNED;
765 ret = -1;
766 }
767
768 /* Optimal I/O a multiple of the physical block size? */
769 if (t->io_opt & (t->physical_block_size - 1)) {
770 t->io_opt = 0;
771 t->flags |= BLK_FLAG_MISALIGNED;
772 ret = -1;
773 }
774
775 /* chunk_sectors a multiple of the physical block size? */
776 if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
777 t->chunk_sectors = 0;
778 t->flags |= BLK_FLAG_MISALIGNED;
779 ret = -1;
780 }
781
782 /* Find lowest common alignment_offset */
783 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
784 % max(t->physical_block_size, t->io_min);
785
786 /* Verify that new alignment_offset is on a logical block boundary */
787 if (t->alignment_offset & (t->logical_block_size - 1)) {
788 t->flags |= BLK_FLAG_MISALIGNED;
789 ret = -1;
790 }
791
792 t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
793 t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
794 t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
795
796 /* Discard alignment and granularity */
797 if (b->discard_granularity) {
798 alignment = queue_limit_discard_alignment(b, start);
799
800 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
801 b->max_discard_sectors);
802 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
803 b->max_hw_discard_sectors);
804 t->discard_granularity = max(t->discard_granularity,
805 b->discard_granularity);
806 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
807 t->discard_granularity;
808 }
809 t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
810 b->max_secure_erase_sectors);
811 t->zone_write_granularity = max(t->zone_write_granularity,
812 b->zone_write_granularity);
813 if (!(t->features & BLK_FEAT_ZONED)) {
814 t->zone_write_granularity = 0;
815 t->max_zone_append_sectors = 0;
816 }
817 blk_stack_atomic_writes_limits(t, b, start);
818
819 return ret;
820 }
821 EXPORT_SYMBOL(blk_stack_limits);
822
823 /**
824 * queue_limits_stack_bdev - adjust queue_limits for stacked devices
825 * @t: the stacking driver limits (top device)
826 * @bdev: the underlying block device (bottom)
827 * @offset: offset to beginning of data within component device
828 * @pfx: prefix to use for warnings logged
829 *
830 * Description:
831 * This function is used by stacking drivers like MD and DM to ensure
832 * that all component devices have compatible block sizes and
833 * alignments. The stacking driver must provide a queue_limits
834 * struct (top) and then iteratively call the stacking function for
835 * all component (bottom) devices. The stacking function will
836 * attempt to combine the values and ensure proper alignment.
837 */
queue_limits_stack_bdev(struct queue_limits * t,struct block_device * bdev,sector_t offset,const char * pfx)838 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
839 sector_t offset, const char *pfx)
840 {
841 if (blk_stack_limits(t, bdev_limits(bdev),
842 get_start_sect(bdev) + offset))
843 pr_notice("%s: Warning: Device %pg is misaligned\n",
844 pfx, bdev);
845 }
846 EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
847
848 /**
849 * queue_limits_stack_integrity - stack integrity profile
850 * @t: target queue limits
851 * @b: base queue limits
852 *
853 * Check if the integrity profile in the @b can be stacked into the
854 * target @t. Stacking is possible if either:
855 *
856 * a) does not have any integrity information stacked into it yet
857 * b) the integrity profile in @b is identical to the one in @t
858 *
859 * If @b can be stacked into @t, return %true. Else return %false and clear the
860 * integrity information in @t.
861 */
queue_limits_stack_integrity(struct queue_limits * t,struct queue_limits * b)862 bool queue_limits_stack_integrity(struct queue_limits *t,
863 struct queue_limits *b)
864 {
865 struct blk_integrity *ti = &t->integrity;
866 struct blk_integrity *bi = &b->integrity;
867
868 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
869 return true;
870
871 if (ti->flags & BLK_INTEGRITY_STACKED) {
872 if (ti->tuple_size != bi->tuple_size)
873 goto incompatible;
874 if (ti->interval_exp != bi->interval_exp)
875 goto incompatible;
876 if (ti->tag_size != bi->tag_size)
877 goto incompatible;
878 if (ti->csum_type != bi->csum_type)
879 goto incompatible;
880 if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
881 (bi->flags & BLK_INTEGRITY_REF_TAG))
882 goto incompatible;
883 } else {
884 ti->flags = BLK_INTEGRITY_STACKED;
885 ti->flags |= (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) |
886 (bi->flags & BLK_INTEGRITY_REF_TAG);
887 ti->csum_type = bi->csum_type;
888 ti->tuple_size = bi->tuple_size;
889 ti->pi_offset = bi->pi_offset;
890 ti->interval_exp = bi->interval_exp;
891 ti->tag_size = bi->tag_size;
892 }
893 return true;
894
895 incompatible:
896 memset(ti, 0, sizeof(*ti));
897 return false;
898 }
899 EXPORT_SYMBOL_GPL(queue_limits_stack_integrity);
900
901 /**
902 * blk_set_queue_depth - tell the block layer about the device queue depth
903 * @q: the request queue for the device
904 * @depth: queue depth
905 *
906 */
blk_set_queue_depth(struct request_queue * q,unsigned int depth)907 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
908 {
909 q->queue_depth = depth;
910 rq_qos_queue_depth_changed(q);
911 }
912 EXPORT_SYMBOL(blk_set_queue_depth);
913
bdev_alignment_offset(struct block_device * bdev)914 int bdev_alignment_offset(struct block_device *bdev)
915 {
916 struct request_queue *q = bdev_get_queue(bdev);
917
918 if (q->limits.flags & BLK_FLAG_MISALIGNED)
919 return -1;
920 if (bdev_is_partition(bdev))
921 return queue_limit_alignment_offset(&q->limits,
922 bdev->bd_start_sect);
923 return q->limits.alignment_offset;
924 }
925 EXPORT_SYMBOL_GPL(bdev_alignment_offset);
926
bdev_discard_alignment(struct block_device * bdev)927 unsigned int bdev_discard_alignment(struct block_device *bdev)
928 {
929 struct request_queue *q = bdev_get_queue(bdev);
930
931 if (bdev_is_partition(bdev))
932 return queue_limit_discard_alignment(&q->limits,
933 bdev->bd_start_sect);
934 return q->limits.discard_alignment;
935 }
936 EXPORT_SYMBOL_GPL(bdev_discard_alignment);
937