Lines Matching +full:pd +full:- +full:idle +full:- +full:ns

1 // SPDX-License-Identifier: GPL-2.0
3 * Block rq-qos base io controller
7 * - It's bio based, so the latency covers the whole block layer in addition to
9 * - We will throttle all IO that comes in here if we need to.
10 * - We use the mean latency over the 100ms window. This is because writes can
13 * - By default there's no throttling, we set the queue_depth to UINT_MAX so
44 * number of IO's we're allowed to have in flight. This starts at (u64)-1 down
55 * total_time += min_lat_nsec - actual_io_completion
69 #include <linux/backing-dev.h>
76 #include <linux/blk-mq.h>
77 #include "blk-rq-qos.h"
78 #include "blk-stat.h"
79 #include "blk-cgroup.h"
92 * ->enabled is the master enable switch gating the throttling logic and
94 * tracked in ->enable_cnt, and ->enable is flipped on/off accordingly
95 * from ->enable_work with the request_queue frozen. For details, See
140 struct blkg_policy_data pd; member
164 * These are the constants used to fake the fixed-point moving average
166 * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg. The sampling
169 * elapse immediately. Note, windows only elapse with IO activity. Idle
174 (BLKIOLATENCY_NR_EXP_FACTORS - 1))
176 2045, // exp(1/600) - 600 samples
177 2039, // exp(1/240) - 240 samples
178 2031, // exp(1/120) - 120 samples
179 2023, // exp(1/80) - 80 samples
180 2014, // exp(1/60) - 60 samples
183 static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd) in pd_to_lat() argument
185 return pd ? container_of(pd, struct iolatency_grp, pd) : NULL; in pd_to_lat()
195 return pd_to_blkg(&iolat->pd); in lat_to_blkg()
201 if (iolat->ssd) { in latency_stat_init()
202 stat->ps.total = 0; in latency_stat_init()
203 stat->ps.missed = 0; in latency_stat_init()
205 blk_rq_stat_init(&stat->rqs); in latency_stat_init()
212 if (iolat->ssd) { in latency_stat_sum()
213 sum->ps.total += stat->ps.total; in latency_stat_sum()
214 sum->ps.missed += stat->ps.missed; in latency_stat_sum()
216 blk_rq_stat_sum(&sum->rqs, &stat->rqs); in latency_stat_sum()
222 struct latency_stat *stat = get_cpu_ptr(iolat->stats); in latency_stat_record_time()
223 if (iolat->ssd) { in latency_stat_record_time()
224 if (req_time >= iolat->min_lat_nsec) in latency_stat_record_time()
225 stat->ps.missed++; in latency_stat_record_time()
226 stat->ps.total++; in latency_stat_record_time()
228 blk_rq_stat_add(&stat->rqs, req_time); in latency_stat_record_time()
235 if (iolat->ssd) { in latency_sum_ok()
236 u64 thresh = div64_u64(stat->ps.total, 10); in latency_sum_ok()
238 return stat->ps.missed < thresh; in latency_sum_ok()
240 return stat->rqs.mean <= iolat->min_lat_nsec; in latency_sum_ok()
246 if (iolat->ssd) in latency_stat_samples()
247 return stat->ps.total; in latency_stat_samples()
248 return stat->rqs.nr_samples; in latency_stat_samples()
256 if (iolat->ssd) in iolat_update_total_lat_avg()
261 * Because we are using this for IO time in ns, the values stored in iolat_update_total_lat_avg()
266 exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1, in iolat_update_total_lat_avg()
267 div64_u64(iolat->cur_win_nsec, in iolat_update_total_lat_avg()
269 iolat->lat_avg = calc_load(iolat->lat_avg, in iolat_update_total_lat_avg()
271 stat->rqs.mean); in iolat_update_total_lat_avg()
276 atomic_dec(&rqw->inflight); in iolat_cleanup_cb()
277 wake_up(&rqw->wait); in iolat_cleanup_cb()
283 return rq_wait_inc_below(rqw, iolat->max_depth); in iolat_acquire_inflight()
291 struct rq_wait *rqw = &iolat->rq_wait; in __blkcg_iolatency_throttle()
292 unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay); in __blkcg_iolatency_throttle()
295 blkcg_schedule_throttle(rqos->disk, use_memdelay); in __blkcg_iolatency_throttle()
305 atomic_inc(&rqw->inflight); in __blkcg_iolatency_throttle()
333 unsigned long qd = blkiolat->rqos.disk->queue->nr_requests; in scale_cookie_change()
335 unsigned long old = atomic_read(&lat_info->scale_cookie); in scale_cookie_change()
340 diff = DEFAULT_SCALE_COOKIE - old; in scale_cookie_change()
344 atomic_set(&lat_info->scale_cookie, in scale_cookie_change()
347 atomic_inc(&lat_info->scale_cookie); in scale_cookie_change()
349 atomic_add(scale, &lat_info->scale_cookie); in scale_cookie_change()
359 atomic_dec(&lat_info->scale_cookie); in scale_cookie_change()
361 atomic_sub(scale, &lat_info->scale_cookie); in scale_cookie_change()
375 unsigned long qd = iolat->blkiolat->rqos.disk->queue->nr_requests; in scale_change()
377 unsigned long old = iolat->max_depth; in scale_change()
389 iolat->max_depth = old; in scale_change()
390 wake_up_all(&iolat->rq_wait.wait); in scale_change()
394 iolat->max_depth = max(old, 1UL); in scale_change()
404 unsigned int our_cookie = atomic_read(&iolat->scale_cookie); in check_scale_change()
408 parent = blkg_to_lat(lat_to_blkg(iolat)->parent); in check_scale_change()
412 lat_info = &parent->child_lat; in check_scale_change()
413 cur_cookie = atomic_read(&lat_info->scale_cookie); in check_scale_change()
414 scale_lat = READ_ONCE(lat_info->scale_lat); in check_scale_change()
417 direction = -1; in check_scale_change()
423 if (!atomic_try_cmpxchg(&iolat->scale_cookie, &our_cookie, cur_cookie)) { in check_scale_change()
428 if (direction < 0 && iolat->min_lat_nsec) { in check_scale_change()
431 if (!scale_lat || iolat->min_lat_nsec <= scale_lat) in check_scale_change()
440 samples_thresh = lat_info->nr_samples * 5; in check_scale_change()
442 if (iolat->nr_samples <= samples_thresh) in check_scale_change()
447 if (iolat->max_depth == 1 && direction < 0) { in check_scale_change()
455 iolat->max_depth = UINT_MAX; in check_scale_change()
456 wake_up_all(&iolat->rq_wait.wait); in check_scale_change()
466 struct blkcg_gq *blkg = bio->bi_blkg; in blkcg_iolatency_throttle()
469 if (!blkiolat->enabled) in blkcg_iolatency_throttle()
472 while (blkg && blkg->parent) { in blkcg_iolatency_throttle()
475 blkg = blkg->parent; in blkcg_iolatency_throttle()
481 (bio->bi_opf & REQ_SWAP) == REQ_SWAP); in blkcg_iolatency_throttle()
482 blkg = blkg->parent; in blkcg_iolatency_throttle()
484 if (!timer_pending(&blkiolat->timer)) in blkcg_iolatency_throttle()
485 mod_timer(&blkiolat->timer, jiffies + HZ); in blkcg_iolatency_throttle()
504 req_time = now - start; in iolatency_record_time()
510 if (unlikely(issue_as_root && iolat->max_depth != UINT_MAX)) { in iolatency_record_time()
511 u64 sub = iolat->min_lat_nsec; in iolatency_record_time()
513 blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time); in iolatency_record_time()
536 s = per_cpu_ptr(iolat->stats, cpu); in iolatency_check_latencies()
542 parent = blkg_to_lat(blkg->parent); in iolatency_check_latencies()
546 lat_info = &parent->child_lat; in iolatency_check_latencies()
552 atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE) in iolatency_check_latencies()
556 spin_lock_irqsave(&lat_info->lock, flags); in iolatency_check_latencies()
558 latency_stat_sum(iolat, &iolat->cur_stat, &stat); in iolatency_check_latencies()
559 lat_info->nr_samples -= iolat->nr_samples; in iolatency_check_latencies()
560 lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat); in iolatency_check_latencies()
561 iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat); in iolatency_check_latencies()
563 if ((lat_info->last_scale_event >= now || in iolatency_check_latencies()
564 now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME)) in iolatency_check_latencies()
567 if (latency_sum_ok(iolat, &iolat->cur_stat) && in iolatency_check_latencies()
569 if (latency_stat_samples(iolat, &iolat->cur_stat) < in iolatency_check_latencies()
572 if (lat_info->scale_grp == iolat) { in iolatency_check_latencies()
573 lat_info->last_scale_event = now; in iolatency_check_latencies()
574 scale_cookie_change(iolat->blkiolat, lat_info, true); in iolatency_check_latencies()
576 } else if (lat_info->scale_lat == 0 || in iolatency_check_latencies()
577 lat_info->scale_lat >= iolat->min_lat_nsec) { in iolatency_check_latencies()
578 lat_info->last_scale_event = now; in iolatency_check_latencies()
579 if (!lat_info->scale_grp || in iolatency_check_latencies()
580 lat_info->scale_lat > iolat->min_lat_nsec) { in iolatency_check_latencies()
581 WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec); in iolatency_check_latencies()
582 lat_info->scale_grp = iolat; in iolatency_check_latencies()
584 scale_cookie_change(iolat->blkiolat, lat_info, false); in iolatency_check_latencies()
586 latency_stat_init(iolat, &iolat->cur_stat); in iolatency_check_latencies()
588 spin_unlock_irqrestore(&lat_info->lock, flags); in iolatency_check_latencies()
601 blkg = bio->bi_blkg; in blkcg_iolatency_done_bio()
605 iolat = blkg_to_lat(bio->bi_blkg); in blkcg_iolatency_done_bio()
609 if (!iolat->blkiolat->enabled) in blkcg_iolatency_done_bio()
613 while (blkg && blkg->parent) { in blkcg_iolatency_done_bio()
616 blkg = blkg->parent; in blkcg_iolatency_done_bio()
619 rqw = &iolat->rq_wait; in blkcg_iolatency_done_bio()
621 inflight = atomic_dec_return(&rqw->inflight); in blkcg_iolatency_done_bio()
627 if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) { in blkcg_iolatency_done_bio()
628 iolatency_record_time(iolat, &bio->bi_issue, now, in blkcg_iolatency_done_bio()
630 window_start = atomic64_read(&iolat->window_start); in blkcg_iolatency_done_bio()
632 (now - window_start) >= iolat->cur_win_nsec) { in blkcg_iolatency_done_bio()
633 if (atomic64_try_cmpxchg(&iolat->window_start, in blkcg_iolatency_done_bio()
638 wake_up(&rqw->wait); in blkcg_iolatency_done_bio()
639 blkg = blkg->parent; in blkcg_iolatency_done_bio()
647 timer_shutdown_sync(&blkiolat->timer); in blkcg_iolatency_exit()
648 flush_work(&blkiolat->enable_work); in blkcg_iolatency_exit()
649 blkcg_deactivate_policy(rqos->disk, &blkcg_policy_iolatency); in blkcg_iolatency_exit()
668 blkiolat->rqos.disk->queue->root_blkg) { in blkiolatency_timer_fn()
675 * We could be exiting, don't access the pd unless we have a in blkiolatency_timer_fn()
685 lat_info = &iolat->child_lat; in blkiolatency_timer_fn()
686 cookie = atomic_read(&lat_info->scale_cookie); in blkiolatency_timer_fn()
691 spin_lock_irqsave(&lat_info->lock, flags); in blkiolatency_timer_fn()
692 if (lat_info->last_scale_event >= now) in blkiolatency_timer_fn()
699 if (lat_info->scale_grp == NULL) { in blkiolatency_timer_fn()
700 scale_cookie_change(iolat->blkiolat, lat_info, true); in blkiolatency_timer_fn()
709 if (now - lat_info->last_scale_event >= in blkiolatency_timer_fn()
711 lat_info->scale_grp = NULL; in blkiolatency_timer_fn()
713 spin_unlock_irqrestore(&lat_info->lock, flags); in blkiolatency_timer_fn()
721 * blkiolatency_enable_work_fn - Enable or disable iolatency on the device
724 * iolatency needs to keep track of the number of in-flight IOs per cgroup. This
727 * want to disable the in-flight tracking.
729 * We have to make sure that the counting is balanced - we don't want to leak
730 * the in-flight counts by disabling accounting in the completion path while IOs
732 * freezing the queue while flipping ->enabled. As this requires a sleepable
733 * context, ->enabled flipping is punted to this work function.
744 * ->enabled_cnt modification. Acting on the latest ->enable_cnt is in blkiolatency_enable_work_fn()
747 * Also, we know @blkiolat is safe to access as ->enable_work is flushed in blkiolatency_enable_work_fn()
750 enabled = atomic_read(&blkiolat->enable_cnt); in blkiolatency_enable_work_fn()
751 if (enabled != blkiolat->enabled) { in blkiolatency_enable_work_fn()
754 memflags = blk_mq_freeze_queue(blkiolat->rqos.disk->queue); in blkiolatency_enable_work_fn()
755 blkiolat->enabled = enabled; in blkiolatency_enable_work_fn()
756 blk_mq_unfreeze_queue(blkiolat->rqos.disk->queue, memflags); in blkiolatency_enable_work_fn()
767 return -ENOMEM; in blk_iolatency_init()
769 ret = rq_qos_add(&blkiolat->rqos, disk, RQ_QOS_LATENCY, in blk_iolatency_init()
777 timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0); in blk_iolatency_init()
778 INIT_WORK(&blkiolat->enable_work, blkiolatency_enable_work_fn); in blk_iolatency_init()
783 rq_qos_del(&blkiolat->rqos); in blk_iolatency_init()
792 struct blk_iolatency *blkiolat = iolat->blkiolat; in iolatency_set_min_lat_nsec()
793 u64 oldval = iolat->min_lat_nsec; in iolatency_set_min_lat_nsec()
795 iolat->min_lat_nsec = val; in iolatency_set_min_lat_nsec()
796 iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE); in iolatency_set_min_lat_nsec()
797 iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec, in iolatency_set_min_lat_nsec()
801 if (atomic_inc_return(&blkiolat->enable_cnt) == 1) in iolatency_set_min_lat_nsec()
802 schedule_work(&blkiolat->enable_work); in iolatency_set_min_lat_nsec()
806 if (atomic_dec_return(&blkiolat->enable_cnt) == 0) in iolatency_set_min_lat_nsec()
807 schedule_work(&blkiolat->enable_work); in iolatency_set_min_lat_nsec()
813 if (blkg->parent) { in iolatency_clear_scaling()
814 struct iolatency_grp *iolat = blkg_to_lat(blkg->parent); in iolatency_clear_scaling()
819 lat_info = &iolat->child_lat; in iolatency_clear_scaling()
820 spin_lock(&lat_info->lock); in iolatency_clear_scaling()
821 atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE); in iolatency_clear_scaling()
822 lat_info->last_scale_event = 0; in iolatency_clear_scaling()
823 lat_info->scale_grp = NULL; in iolatency_clear_scaling()
824 lat_info->scale_lat = 0; in iolatency_clear_scaling()
825 spin_unlock(&lat_info->lock); in iolatency_clear_scaling()
851 lockdep_assert_held(&ctx.bdev->bd_queue->rq_qos_mutex); in iolatency_set_limit()
852 if (!iolat_rq_qos(ctx.bdev->bd_queue)) in iolatency_set_limit()
853 ret = blk_iolatency_init(ctx.bdev->bd_disk); in iolatency_set_limit()
864 ret = -EINVAL; in iolatency_set_limit()
888 oldval = iolat->min_lat_nsec; in iolatency_set_limit()
891 if (oldval != iolat->min_lat_nsec) in iolatency_set_limit()
900 struct blkg_policy_data *pd, int off) in iolatency_prfill_limit() argument
902 struct iolatency_grp *iolat = pd_to_lat(pd); in iolatency_prfill_limit()
903 const char *dname = blkg_dev_name(pd->blkg); in iolatency_prfill_limit()
905 if (!dname || !iolat->min_lat_nsec) in iolatency_prfill_limit()
908 dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC)); in iolatency_prfill_limit()
916 &blkcg_policy_iolatency, seq_cft(sf)->private, false); in iolatency_print_limit()
929 s = per_cpu_ptr(iolat->stats, cpu); in iolatency_ssd_stat()
934 if (iolat->max_depth == UINT_MAX) in iolatency_ssd_stat()
942 iolat->max_depth); in iolatency_ssd_stat()
945 static void iolatency_pd_stat(struct blkg_policy_data *pd, struct seq_file *s) in iolatency_pd_stat() argument
947 struct iolatency_grp *iolat = pd_to_lat(pd); in iolatency_pd_stat()
954 if (iolat->ssd) in iolatency_pd_stat()
957 avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC); in iolatency_pd_stat()
958 cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC); in iolatency_pd_stat()
959 if (iolat->max_depth == UINT_MAX) in iolatency_pd_stat()
964 iolat->max_depth, avg_lat, cur_win); in iolatency_pd_stat()
972 iolat = kzalloc_node(sizeof(*iolat), gfp, disk->node_id); in iolatency_pd_alloc()
975 iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat), in iolatency_pd_alloc()
977 if (!iolat->stats) { in iolatency_pd_alloc()
981 return &iolat->pd; in iolatency_pd_alloc()
984 static void iolatency_pd_init(struct blkg_policy_data *pd) in iolatency_pd_init() argument
986 struct iolatency_grp *iolat = pd_to_lat(pd); in iolatency_pd_init()
988 struct rq_qos *rqos = iolat_rq_qos(blkg->q); in iolatency_pd_init()
993 if (blk_queue_nonrot(blkg->q)) in iolatency_pd_init()
994 iolat->ssd = true; in iolatency_pd_init()
996 iolat->ssd = false; in iolatency_pd_init()
1000 stat = per_cpu_ptr(iolat->stats, cpu); in iolatency_pd_init()
1004 latency_stat_init(iolat, &iolat->cur_stat); in iolatency_pd_init()
1005 rq_wait_init(&iolat->rq_wait); in iolatency_pd_init()
1006 spin_lock_init(&iolat->child_lat.lock); in iolatency_pd_init()
1007 iolat->max_depth = UINT_MAX; in iolatency_pd_init()
1008 iolat->blkiolat = blkiolat; in iolatency_pd_init()
1009 iolat->cur_win_nsec = 100 * NSEC_PER_MSEC; in iolatency_pd_init()
1010 atomic64_set(&iolat->window_start, now); in iolatency_pd_init()
1013 * We init things in list order, so the pd for the parent may not be in iolatency_pd_init()
1016 if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) { in iolatency_pd_init()
1017 struct iolatency_grp *parent = blkg_to_lat(blkg->parent); in iolatency_pd_init()
1018 atomic_set(&iolat->scale_cookie, in iolatency_pd_init()
1019 atomic_read(&parent->child_lat.scale_cookie)); in iolatency_pd_init()
1021 atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE); in iolatency_pd_init()
1024 atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE); in iolatency_pd_init()
1027 static void iolatency_pd_offline(struct blkg_policy_data *pd) in iolatency_pd_offline() argument
1029 struct iolatency_grp *iolat = pd_to_lat(pd); in iolatency_pd_offline()
1036 static void iolatency_pd_free(struct blkg_policy_data *pd) in iolatency_pd_free() argument
1038 struct iolatency_grp *iolat = pd_to_lat(pd); in iolatency_pd_free()
1039 free_percpu(iolat->stats); in iolatency_pd_free()