Lines Matching full:rw

91 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)  in tg_bps_limit()  argument
98 return tg->bps[rw]; in tg_bps_limit()
101 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw) in tg_iops_limit() argument
108 return tg->iops[rw]; in tg_iops_limit()
241 int rw; in throtl_pd_alloc() local
255 for (rw = READ; rw <= WRITE; rw++) { in throtl_pd_alloc()
256 throtl_qnode_init(&tg->qnode_on_self[rw], tg); in throtl_pd_alloc()
257 throtl_qnode_init(&tg->qnode_on_parent[rw], tg); in throtl_pd_alloc()
310 int rw; in tg_update_has_rules() local
312 for (rw = READ; rw <= WRITE; rw++) { in tg_update_has_rules()
313 tg->has_rules_iops[rw] = in tg_update_has_rules()
314 (parent_tg && parent_tg->has_rules_iops[rw]) || in tg_update_has_rules()
315 tg_iops_limit(tg, rw) != UINT_MAX; in tg_update_has_rules()
316 tg->has_rules_bps[rw] = in tg_update_has_rules()
317 (parent_tg && parent_tg->has_rules_bps[rw]) || in tg_update_has_rules()
318 tg_bps_limit(tg, rw) != U64_MAX; in tg_update_has_rules()
477 bool rw, unsigned long start) in throtl_start_new_slice_with_credit() argument
479 tg->bytes_disp[rw] = 0; in throtl_start_new_slice_with_credit()
480 tg->io_disp[rw] = 0; in throtl_start_new_slice_with_credit()
481 tg->carryover_bytes[rw] = 0; in throtl_start_new_slice_with_credit()
482 tg->carryover_ios[rw] = 0; in throtl_start_new_slice_with_credit()
490 if (time_after(start, tg->slice_start[rw])) in throtl_start_new_slice_with_credit()
491 tg->slice_start[rw] = start; in throtl_start_new_slice_with_credit()
493 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice_with_credit()
496 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_start_new_slice_with_credit()
497 tg->slice_end[rw], jiffies); in throtl_start_new_slice_with_credit()
500 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw, in throtl_start_new_slice() argument
503 tg->bytes_disp[rw] = 0; in throtl_start_new_slice()
504 tg->io_disp[rw] = 0; in throtl_start_new_slice()
505 tg->slice_start[rw] = jiffies; in throtl_start_new_slice()
506 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice()
508 tg->carryover_bytes[rw] = 0; in throtl_start_new_slice()
509 tg->carryover_ios[rw] = 0; in throtl_start_new_slice()
514 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_start_new_slice()
515 tg->slice_end[rw], jiffies); in throtl_start_new_slice()
518 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw, in throtl_set_slice_end() argument
521 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice); in throtl_set_slice_end()
524 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw, in throtl_extend_slice() argument
527 throtl_set_slice_end(tg, rw, jiffy_end); in throtl_extend_slice()
530 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_extend_slice()
531 tg->slice_end[rw], jiffies); in throtl_extend_slice()
535 static bool throtl_slice_used(struct throtl_grp *tg, bool rw) in throtl_slice_used() argument
537 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) in throtl_slice_used()
579 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) in throtl_trim_slice() argument
585 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw])); in throtl_trim_slice()
592 if (throtl_slice_used(tg, rw)) in throtl_trim_slice()
602 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice); in throtl_trim_slice()
604 time_elapsed = rounddown(jiffies - tg->slice_start[rw], in throtl_trim_slice()
619 bytes_trim = calculate_bytes_allowed(tg_bps_limit(tg, rw), in throtl_trim_slice()
621 tg->carryover_bytes[rw]; in throtl_trim_slice()
622 io_trim = calculate_io_allowed(tg_iops_limit(tg, rw), time_elapsed) + in throtl_trim_slice()
623 tg->carryover_ios[rw]; in throtl_trim_slice()
627 tg->carryover_bytes[rw] = 0; in throtl_trim_slice()
628 if ((long long)tg->bytes_disp[rw] >= bytes_trim) in throtl_trim_slice()
629 tg->bytes_disp[rw] -= bytes_trim; in throtl_trim_slice()
631 tg->bytes_disp[rw] = 0; in throtl_trim_slice()
633 tg->carryover_ios[rw] = 0; in throtl_trim_slice()
634 if ((int)tg->io_disp[rw] >= io_trim) in throtl_trim_slice()
635 tg->io_disp[rw] -= io_trim; in throtl_trim_slice()
637 tg->io_disp[rw] = 0; in throtl_trim_slice()
639 tg->slice_start[rw] += time_elapsed; in throtl_trim_slice()
643 rw == READ ? 'R' : 'W', time_elapsed / tg->td->throtl_slice, in throtl_trim_slice()
644 bytes_trim, io_trim, tg->slice_start[rw], tg->slice_end[rw], in throtl_trim_slice()
648 static void __tg_update_carryover(struct throtl_grp *tg, bool rw) in __tg_update_carryover() argument
650 unsigned long jiffy_elapsed = jiffies - tg->slice_start[rw]; in __tg_update_carryover()
651 u64 bps_limit = tg_bps_limit(tg, rw); in __tg_update_carryover()
652 u32 iops_limit = tg_iops_limit(tg, rw); in __tg_update_carryover()
661 tg->carryover_bytes[rw] += in __tg_update_carryover()
663 tg->bytes_disp[rw]; in __tg_update_carryover()
665 tg->carryover_ios[rw] += in __tg_update_carryover()
667 tg->io_disp[rw]; in __tg_update_carryover()
686 bool rw = bio_data_dir(bio); in tg_within_iops_limit() local
694 jiffy_elapsed = jiffies - tg->slice_start[rw]; in tg_within_iops_limit()
699 tg->carryover_ios[rw]; in tg_within_iops_limit()
700 if (io_allowed > 0 && tg->io_disp[rw] + 1 <= io_allowed) in tg_within_iops_limit()
714 bool rw = bio_data_dir(bio); in tg_within_bps_limit() local
725 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; in tg_within_bps_limit()
733 tg->carryover_bytes[rw]; in tg_within_bps_limit()
734 if (bytes_allowed > 0 && tg->bytes_disp[rw] + bio_size <= bytes_allowed) in tg_within_bps_limit()
738 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed; in tg_within_bps_limit()
759 bool rw = bio_data_dir(bio); in tg_may_dispatch() local
761 u64 bps_limit = tg_bps_limit(tg, rw); in tg_may_dispatch()
762 u32 iops_limit = tg_iops_limit(tg, rw); in tg_may_dispatch()
770 BUG_ON(tg->service_queue.nr_queued[rw] && in tg_may_dispatch()
771 bio != throtl_peek_queued(&tg->service_queue.queued[rw])); in tg_may_dispatch()
788 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw])) in tg_may_dispatch()
789 throtl_start_new_slice(tg, rw, true); in tg_may_dispatch()
791 if (time_before(tg->slice_end[rw], in tg_may_dispatch()
793 throtl_extend_slice(tg, rw, in tg_may_dispatch()
810 if (time_before(tg->slice_end[rw], jiffies + max_wait)) in tg_may_dispatch()
811 throtl_extend_slice(tg, rw, jiffies + max_wait); in tg_may_dispatch()
818 bool rw = bio_data_dir(bio); in throtl_charge_bio() local
823 tg->bytes_disp[rw] += bio_size; in throtl_charge_bio()
824 tg->last_bytes_disp[rw] += bio_size; in throtl_charge_bio()
827 tg->io_disp[rw]++; in throtl_charge_bio()
828 tg->last_io_disp[rw]++; in throtl_charge_bio()
844 bool rw = bio_data_dir(bio); in throtl_add_bio_tg() local
847 qn = &tg->qnode_on_self[rw]; in throtl_add_bio_tg()
855 if (!sq->nr_queued[rw]) in throtl_add_bio_tg()
858 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]); in throtl_add_bio_tg()
860 sq->nr_queued[rw]++; in throtl_add_bio_tg()
891 struct throtl_grp *parent_tg, bool rw) in start_parent_slice_with_credit() argument
893 if (throtl_slice_used(parent_tg, rw)) { in start_parent_slice_with_credit()
894 throtl_start_new_slice_with_credit(parent_tg, rw, in start_parent_slice_with_credit()
895 child_tg->slice_start[rw]); in start_parent_slice_with_credit()
900 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw) in tg_dispatch_one_bio() argument
914 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put); in tg_dispatch_one_bio()
915 sq->nr_queued[rw]--; in tg_dispatch_one_bio()
927 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg); in tg_dispatch_one_bio()
928 start_parent_slice_with_credit(tg, parent_tg, rw); in tg_dispatch_one_bio()
931 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw], in tg_dispatch_one_bio()
932 &parent_sq->queued[rw]); in tg_dispatch_one_bio()
933 BUG_ON(tg->td->nr_queued[rw] <= 0); in tg_dispatch_one_bio()
934 tg->td->nr_queued[rw]--; in tg_dispatch_one_bio()
937 throtl_trim_slice(tg, rw); in tg_dispatch_one_bio()
1108 int rw; in blk_throtl_dispatch_work_fn() local
1113 for (rw = READ; rw <= WRITE; rw++) in blk_throtl_dispatch_work_fn()
1114 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL))) in blk_throtl_dispatch_work_fn()
1617 static bool tg_within_limit(struct throtl_grp *tg, struct bio *bio, bool rw) in tg_within_limit() argument
1620 if (tg->service_queue.nr_queued[rw]) in tg_within_limit()
1626 static void tg_dispatch_in_debt(struct throtl_grp *tg, struct bio *bio, bool rw) in tg_dispatch_in_debt() argument
1629 tg->carryover_bytes[rw] -= throtl_bio_data_size(bio); in tg_dispatch_in_debt()
1630 tg->carryover_ios[rw]--; in tg_dispatch_in_debt()
1640 bool rw = bio_data_dir(bio); in __blk_throtl_bio() local
1649 if (tg_within_limit(tg, bio, rw)) { in __blk_throtl_bio()
1664 throtl_trim_slice(tg, rw); in __blk_throtl_bio()
1672 tg_dispatch_in_debt(tg, bio, rw); in __blk_throtl_bio()
1683 qn = &tg->qnode_on_parent[rw]; in __blk_throtl_bio()
1694 rw == READ ? 'R' : 'W', in __blk_throtl_bio()
1695 tg->bytes_disp[rw], bio->bi_iter.bi_size, in __blk_throtl_bio()
1696 tg_bps_limit(tg, rw), in __blk_throtl_bio()
1697 tg->io_disp[rw], tg_iops_limit(tg, rw), in __blk_throtl_bio()
1700 td->nr_queued[rw]++; in __blk_throtl_bio()