1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Interface for controlling IO bandwidth on a request queue
4  *
5  * Copyright (C) 2010 Vivek Goyal <[email protected]>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/bio.h>
12 #include <linux/blktrace_api.h>
13 #include "blk.h"
14 #include "blk-cgroup-rwstat.h"
15 #include "blk-stat.h"
16 #include "blk-throttle.h"
17 
18 /* Max dispatch from a group in 1 round */
19 #define THROTL_GRP_QUANTUM 8
20 
21 /* Total max dispatch from all groups in one round */
22 #define THROTL_QUANTUM 32
23 
24 /* Throttling is performed over a slice and after that slice is renewed */
25 #define DFL_THROTL_SLICE_HD (HZ / 10)
26 #define DFL_THROTL_SLICE_SSD (HZ / 50)
27 #define MAX_THROTL_SLICE (HZ)
28 
29 /* A workqueue to queue throttle related work */
30 static struct workqueue_struct *kthrotld_workqueue;
31 
32 #define rb_entry_tg(node)	rb_entry((node), struct throtl_grp, rb_node)
33 
34 struct throtl_data
35 {
36 	/* service tree for active throtl groups */
37 	struct throtl_service_queue service_queue;
38 
39 	struct request_queue *queue;
40 
41 	/* Total Number of queued bios on READ and WRITE lists */
42 	unsigned int nr_queued[2];
43 
44 	unsigned int throtl_slice;
45 
46 	/* Work for dispatching throttled bios */
47 	struct work_struct dispatch_work;
48 
49 	bool track_bio_latency;
50 };
51 
52 static void throtl_pending_timer_fn(struct timer_list *t);
53 
tg_to_blkg(struct throtl_grp * tg)54 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
55 {
56 	return pd_to_blkg(&tg->pd);
57 }
58 
59 /**
60  * sq_to_tg - return the throl_grp the specified service queue belongs to
61  * @sq: the throtl_service_queue of interest
62  *
63  * Return the throtl_grp @sq belongs to.  If @sq is the top-level one
64  * embedded in throtl_data, %NULL is returned.
65  */
sq_to_tg(struct throtl_service_queue * sq)66 static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
67 {
68 	if (sq && sq->parent_sq)
69 		return container_of(sq, struct throtl_grp, service_queue);
70 	else
71 		return NULL;
72 }
73 
74 /**
75  * sq_to_td - return throtl_data the specified service queue belongs to
76  * @sq: the throtl_service_queue of interest
77  *
78  * A service_queue can be embedded in either a throtl_grp or throtl_data.
79  * Determine the associated throtl_data accordingly and return it.
80  */
sq_to_td(struct throtl_service_queue * sq)81 static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
82 {
83 	struct throtl_grp *tg = sq_to_tg(sq);
84 
85 	if (tg)
86 		return tg->td;
87 	else
88 		return container_of(sq, struct throtl_data, service_queue);
89 }
90 
tg_bps_limit(struct throtl_grp * tg,int rw)91 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
92 {
93 	struct blkcg_gq *blkg = tg_to_blkg(tg);
94 
95 	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
96 		return U64_MAX;
97 
98 	return tg->bps[rw];
99 }
100 
tg_iops_limit(struct throtl_grp * tg,int rw)101 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
102 {
103 	struct blkcg_gq *blkg = tg_to_blkg(tg);
104 
105 	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
106 		return UINT_MAX;
107 
108 	return tg->iops[rw];
109 }
110 
111 /**
112  * throtl_log - log debug message via blktrace
113  * @sq: the service_queue being reported
114  * @fmt: printf format string
115  * @args: printf args
116  *
117  * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
118  * throtl_grp; otherwise, just "throtl".
119  */
120 #define throtl_log(sq, fmt, args...)	do {				\
121 	struct throtl_grp *__tg = sq_to_tg((sq));			\
122 	struct throtl_data *__td = sq_to_td((sq));			\
123 									\
124 	(void)__td;							\
125 	if (likely(!blk_trace_note_message_enabled(__td->queue)))	\
126 		break;							\
127 	if ((__tg)) {							\
128 		blk_add_cgroup_trace_msg(__td->queue,			\
129 			&tg_to_blkg(__tg)->blkcg->css, "throtl " fmt, ##args);\
130 	} else {							\
131 		blk_add_trace_msg(__td->queue, "throtl " fmt, ##args);	\
132 	}								\
133 } while (0)
134 
throtl_bio_data_size(struct bio * bio)135 static inline unsigned int throtl_bio_data_size(struct bio *bio)
136 {
137 	/* assume it's one sector */
138 	if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
139 		return 512;
140 	return bio->bi_iter.bi_size;
141 }
142 
throtl_qnode_init(struct throtl_qnode * qn,struct throtl_grp * tg)143 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
144 {
145 	INIT_LIST_HEAD(&qn->node);
146 	bio_list_init(&qn->bios);
147 	qn->tg = tg;
148 }
149 
150 /**
151  * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
152  * @bio: bio being added
153  * @qn: qnode to add bio to
154  * @queued: the service_queue->queued[] list @qn belongs to
155  *
156  * Add @bio to @qn and put @qn on @queued if it's not already on.
157  * @qn->tg's reference count is bumped when @qn is activated.  See the
158  * comment on top of throtl_qnode definition for details.
159  */
throtl_qnode_add_bio(struct bio * bio,struct throtl_qnode * qn,struct list_head * queued)160 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
161 				 struct list_head *queued)
162 {
163 	bio_list_add(&qn->bios, bio);
164 	if (list_empty(&qn->node)) {
165 		list_add_tail(&qn->node, queued);
166 		blkg_get(tg_to_blkg(qn->tg));
167 	}
168 }
169 
170 /**
171  * throtl_peek_queued - peek the first bio on a qnode list
172  * @queued: the qnode list to peek
173  */
throtl_peek_queued(struct list_head * queued)174 static struct bio *throtl_peek_queued(struct list_head *queued)
175 {
176 	struct throtl_qnode *qn;
177 	struct bio *bio;
178 
179 	if (list_empty(queued))
180 		return NULL;
181 
182 	qn = list_first_entry(queued, struct throtl_qnode, node);
183 	bio = bio_list_peek(&qn->bios);
184 	WARN_ON_ONCE(!bio);
185 	return bio;
186 }
187 
188 /**
189  * throtl_pop_queued - pop the first bio form a qnode list
190  * @queued: the qnode list to pop a bio from
191  * @tg_to_put: optional out argument for throtl_grp to put
192  *
193  * Pop the first bio from the qnode list @queued.  After popping, the first
194  * qnode is removed from @queued if empty or moved to the end of @queued so
195  * that the popping order is round-robin.
196  *
197  * When the first qnode is removed, its associated throtl_grp should be put
198  * too.  If @tg_to_put is NULL, this function automatically puts it;
199  * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
200  * responsible for putting it.
201  */
throtl_pop_queued(struct list_head * queued,struct throtl_grp ** tg_to_put)202 static struct bio *throtl_pop_queued(struct list_head *queued,
203 				     struct throtl_grp **tg_to_put)
204 {
205 	struct throtl_qnode *qn;
206 	struct bio *bio;
207 
208 	if (list_empty(queued))
209 		return NULL;
210 
211 	qn = list_first_entry(queued, struct throtl_qnode, node);
212 	bio = bio_list_pop(&qn->bios);
213 	WARN_ON_ONCE(!bio);
214 
215 	if (bio_list_empty(&qn->bios)) {
216 		list_del_init(&qn->node);
217 		if (tg_to_put)
218 			*tg_to_put = qn->tg;
219 		else
220 			blkg_put(tg_to_blkg(qn->tg));
221 	} else {
222 		list_move_tail(&qn->node, queued);
223 	}
224 
225 	return bio;
226 }
227 
228 /* init a service_queue, assumes the caller zeroed it */
throtl_service_queue_init(struct throtl_service_queue * sq)229 static void throtl_service_queue_init(struct throtl_service_queue *sq)
230 {
231 	INIT_LIST_HEAD(&sq->queued[READ]);
232 	INIT_LIST_HEAD(&sq->queued[WRITE]);
233 	sq->pending_tree = RB_ROOT_CACHED;
234 	timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
235 }
236 
throtl_pd_alloc(struct gendisk * disk,struct blkcg * blkcg,gfp_t gfp)237 static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk,
238 		struct blkcg *blkcg, gfp_t gfp)
239 {
240 	struct throtl_grp *tg;
241 	int rw;
242 
243 	tg = kzalloc_node(sizeof(*tg), gfp, disk->node_id);
244 	if (!tg)
245 		return NULL;
246 
247 	if (blkg_rwstat_init(&tg->stat_bytes, gfp))
248 		goto err_free_tg;
249 
250 	if (blkg_rwstat_init(&tg->stat_ios, gfp))
251 		goto err_exit_stat_bytes;
252 
253 	throtl_service_queue_init(&tg->service_queue);
254 
255 	for (rw = READ; rw <= WRITE; rw++) {
256 		throtl_qnode_init(&tg->qnode_on_self[rw], tg);
257 		throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
258 	}
259 
260 	RB_CLEAR_NODE(&tg->rb_node);
261 	tg->bps[READ] = U64_MAX;
262 	tg->bps[WRITE] = U64_MAX;
263 	tg->iops[READ] = UINT_MAX;
264 	tg->iops[WRITE] = UINT_MAX;
265 
266 	return &tg->pd;
267 
268 err_exit_stat_bytes:
269 	blkg_rwstat_exit(&tg->stat_bytes);
270 err_free_tg:
271 	kfree(tg);
272 	return NULL;
273 }
274 
throtl_pd_init(struct blkg_policy_data * pd)275 static void throtl_pd_init(struct blkg_policy_data *pd)
276 {
277 	struct throtl_grp *tg = pd_to_tg(pd);
278 	struct blkcg_gq *blkg = tg_to_blkg(tg);
279 	struct throtl_data *td = blkg->q->td;
280 	struct throtl_service_queue *sq = &tg->service_queue;
281 
282 	/*
283 	 * If on the default hierarchy, we switch to properly hierarchical
284 	 * behavior where limits on a given throtl_grp are applied to the
285 	 * whole subtree rather than just the group itself.  e.g. If 16M
286 	 * read_bps limit is set on a parent group, summary bps of
287 	 * parent group and its subtree groups can't exceed 16M for the
288 	 * device.
289 	 *
290 	 * If not on the default hierarchy, the broken flat hierarchy
291 	 * behavior is retained where all throtl_grps are treated as if
292 	 * they're all separate root groups right below throtl_data.
293 	 * Limits of a group don't interact with limits of other groups
294 	 * regardless of the position of the group in the hierarchy.
295 	 */
296 	sq->parent_sq = &td->service_queue;
297 	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
298 		sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
299 	tg->td = td;
300 }
301 
302 /*
303  * Set has_rules[] if @tg or any of its parents have limits configured.
304  * This doesn't require walking up to the top of the hierarchy as the
305  * parent's has_rules[] is guaranteed to be correct.
306  */
tg_update_has_rules(struct throtl_grp * tg)307 static void tg_update_has_rules(struct throtl_grp *tg)
308 {
309 	struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
310 	int rw;
311 
312 	for (rw = READ; rw <= WRITE; rw++) {
313 		tg->has_rules_iops[rw] =
314 			(parent_tg && parent_tg->has_rules_iops[rw]) ||
315 			tg_iops_limit(tg, rw) != UINT_MAX;
316 		tg->has_rules_bps[rw] =
317 			(parent_tg && parent_tg->has_rules_bps[rw]) ||
318 			tg_bps_limit(tg, rw) != U64_MAX;
319 	}
320 }
321 
throtl_pd_online(struct blkg_policy_data * pd)322 static void throtl_pd_online(struct blkg_policy_data *pd)
323 {
324 	struct throtl_grp *tg = pd_to_tg(pd);
325 	/*
326 	 * We don't want new groups to escape the limits of its ancestors.
327 	 * Update has_rules[] after a new group is brought online.
328 	 */
329 	tg_update_has_rules(tg);
330 }
331 
throtl_pd_free(struct blkg_policy_data * pd)332 static void throtl_pd_free(struct blkg_policy_data *pd)
333 {
334 	struct throtl_grp *tg = pd_to_tg(pd);
335 
336 	del_timer_sync(&tg->service_queue.pending_timer);
337 	blkg_rwstat_exit(&tg->stat_bytes);
338 	blkg_rwstat_exit(&tg->stat_ios);
339 	kfree(tg);
340 }
341 
342 static struct throtl_grp *
throtl_rb_first(struct throtl_service_queue * parent_sq)343 throtl_rb_first(struct throtl_service_queue *parent_sq)
344 {
345 	struct rb_node *n;
346 
347 	n = rb_first_cached(&parent_sq->pending_tree);
348 	WARN_ON_ONCE(!n);
349 	if (!n)
350 		return NULL;
351 	return rb_entry_tg(n);
352 }
353 
throtl_rb_erase(struct rb_node * n,struct throtl_service_queue * parent_sq)354 static void throtl_rb_erase(struct rb_node *n,
355 			    struct throtl_service_queue *parent_sq)
356 {
357 	rb_erase_cached(n, &parent_sq->pending_tree);
358 	RB_CLEAR_NODE(n);
359 }
360 
update_min_dispatch_time(struct throtl_service_queue * parent_sq)361 static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
362 {
363 	struct throtl_grp *tg;
364 
365 	tg = throtl_rb_first(parent_sq);
366 	if (!tg)
367 		return;
368 
369 	parent_sq->first_pending_disptime = tg->disptime;
370 }
371 
tg_service_queue_add(struct throtl_grp * tg)372 static void tg_service_queue_add(struct throtl_grp *tg)
373 {
374 	struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
375 	struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node;
376 	struct rb_node *parent = NULL;
377 	struct throtl_grp *__tg;
378 	unsigned long key = tg->disptime;
379 	bool leftmost = true;
380 
381 	while (*node != NULL) {
382 		parent = *node;
383 		__tg = rb_entry_tg(parent);
384 
385 		if (time_before(key, __tg->disptime))
386 			node = &parent->rb_left;
387 		else {
388 			node = &parent->rb_right;
389 			leftmost = false;
390 		}
391 	}
392 
393 	rb_link_node(&tg->rb_node, parent, node);
394 	rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree,
395 			       leftmost);
396 }
397 
throtl_enqueue_tg(struct throtl_grp * tg)398 static void throtl_enqueue_tg(struct throtl_grp *tg)
399 {
400 	if (!(tg->flags & THROTL_TG_PENDING)) {
401 		tg_service_queue_add(tg);
402 		tg->flags |= THROTL_TG_PENDING;
403 		tg->service_queue.parent_sq->nr_pending++;
404 	}
405 }
406 
throtl_dequeue_tg(struct throtl_grp * tg)407 static void throtl_dequeue_tg(struct throtl_grp *tg)
408 {
409 	if (tg->flags & THROTL_TG_PENDING) {
410 		struct throtl_service_queue *parent_sq =
411 			tg->service_queue.parent_sq;
412 
413 		throtl_rb_erase(&tg->rb_node, parent_sq);
414 		--parent_sq->nr_pending;
415 		tg->flags &= ~THROTL_TG_PENDING;
416 	}
417 }
418 
419 /* Call with queue lock held */
throtl_schedule_pending_timer(struct throtl_service_queue * sq,unsigned long expires)420 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
421 					  unsigned long expires)
422 {
423 	unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
424 
425 	/*
426 	 * Since we are adjusting the throttle limit dynamically, the sleep
427 	 * time calculated according to previous limit might be invalid. It's
428 	 * possible the cgroup sleep time is very long and no other cgroups
429 	 * have IO running so notify the limit changes. Make sure the cgroup
430 	 * doesn't sleep too long to avoid the missed notification.
431 	 */
432 	if (time_after(expires, max_expire))
433 		expires = max_expire;
434 	mod_timer(&sq->pending_timer, expires);
435 	throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
436 		   expires - jiffies, jiffies);
437 }
438 
439 /**
440  * throtl_schedule_next_dispatch - schedule the next dispatch cycle
441  * @sq: the service_queue to schedule dispatch for
442  * @force: force scheduling
443  *
444  * Arm @sq->pending_timer so that the next dispatch cycle starts on the
445  * dispatch time of the first pending child.  Returns %true if either timer
446  * is armed or there's no pending child left.  %false if the current
447  * dispatch window is still open and the caller should continue
448  * dispatching.
449  *
450  * If @force is %true, the dispatch timer is always scheduled and this
451  * function is guaranteed to return %true.  This is to be used when the
452  * caller can't dispatch itself and needs to invoke pending_timer
453  * unconditionally.  Note that forced scheduling is likely to induce short
454  * delay before dispatch starts even if @sq->first_pending_disptime is not
455  * in the future and thus shouldn't be used in hot paths.
456  */
throtl_schedule_next_dispatch(struct throtl_service_queue * sq,bool force)457 static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
458 					  bool force)
459 {
460 	/* any pending children left? */
461 	if (!sq->nr_pending)
462 		return true;
463 
464 	update_min_dispatch_time(sq);
465 
466 	/* is the next dispatch time in the future? */
467 	if (force || time_after(sq->first_pending_disptime, jiffies)) {
468 		throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
469 		return true;
470 	}
471 
472 	/* tell the caller to continue dispatching */
473 	return false;
474 }
475 
throtl_start_new_slice_with_credit(struct throtl_grp * tg,bool rw,unsigned long start)476 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
477 		bool rw, unsigned long start)
478 {
479 	tg->bytes_disp[rw] = 0;
480 	tg->io_disp[rw] = 0;
481 	tg->carryover_bytes[rw] = 0;
482 	tg->carryover_ios[rw] = 0;
483 
484 	/*
485 	 * Previous slice has expired. We must have trimmed it after last
486 	 * bio dispatch. That means since start of last slice, we never used
487 	 * that bandwidth. Do try to make use of that bandwidth while giving
488 	 * credit.
489 	 */
490 	if (time_after(start, tg->slice_start[rw]))
491 		tg->slice_start[rw] = start;
492 
493 	tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
494 	throtl_log(&tg->service_queue,
495 		   "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
496 		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
497 		   tg->slice_end[rw], jiffies);
498 }
499 
throtl_start_new_slice(struct throtl_grp * tg,bool rw,bool clear_carryover)500 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw,
501 					  bool clear_carryover)
502 {
503 	tg->bytes_disp[rw] = 0;
504 	tg->io_disp[rw] = 0;
505 	tg->slice_start[rw] = jiffies;
506 	tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
507 	if (clear_carryover) {
508 		tg->carryover_bytes[rw] = 0;
509 		tg->carryover_ios[rw] = 0;
510 	}
511 
512 	throtl_log(&tg->service_queue,
513 		   "[%c] new slice start=%lu end=%lu jiffies=%lu",
514 		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
515 		   tg->slice_end[rw], jiffies);
516 }
517 
throtl_set_slice_end(struct throtl_grp * tg,bool rw,unsigned long jiffy_end)518 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
519 					unsigned long jiffy_end)
520 {
521 	tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
522 }
523 
throtl_extend_slice(struct throtl_grp * tg,bool rw,unsigned long jiffy_end)524 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
525 				       unsigned long jiffy_end)
526 {
527 	throtl_set_slice_end(tg, rw, jiffy_end);
528 	throtl_log(&tg->service_queue,
529 		   "[%c] extend slice start=%lu end=%lu jiffies=%lu",
530 		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
531 		   tg->slice_end[rw], jiffies);
532 }
533 
534 /* Determine if previously allocated or extended slice is complete or not */
throtl_slice_used(struct throtl_grp * tg,bool rw)535 static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
536 {
537 	if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
538 		return false;
539 
540 	return true;
541 }
542 
calculate_io_allowed(u32 iops_limit,unsigned long jiffy_elapsed)543 static unsigned int calculate_io_allowed(u32 iops_limit,
544 					 unsigned long jiffy_elapsed)
545 {
546 	unsigned int io_allowed;
547 	u64 tmp;
548 
549 	/*
550 	 * jiffy_elapsed should not be a big value as minimum iops can be
551 	 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
552 	 * will allow dispatch after 1 second and after that slice should
553 	 * have been trimmed.
554 	 */
555 
556 	tmp = (u64)iops_limit * jiffy_elapsed;
557 	do_div(tmp, HZ);
558 
559 	if (tmp > UINT_MAX)
560 		io_allowed = UINT_MAX;
561 	else
562 		io_allowed = tmp;
563 
564 	return io_allowed;
565 }
566 
calculate_bytes_allowed(u64 bps_limit,unsigned long jiffy_elapsed)567 static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
568 {
569 	/*
570 	 * Can result be wider than 64 bits?
571 	 * We check against 62, not 64, due to ilog2 truncation.
572 	 */
573 	if (ilog2(bps_limit) + ilog2(jiffy_elapsed) - ilog2(HZ) > 62)
574 		return U64_MAX;
575 	return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
576 }
577 
578 /* Trim the used slices and adjust slice start accordingly */
throtl_trim_slice(struct throtl_grp * tg,bool rw)579 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
580 {
581 	unsigned long time_elapsed;
582 	long long bytes_trim;
583 	int io_trim;
584 
585 	BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
586 
587 	/*
588 	 * If bps are unlimited (-1), then time slice don't get
589 	 * renewed. Don't try to trim the slice if slice is used. A new
590 	 * slice will start when appropriate.
591 	 */
592 	if (throtl_slice_used(tg, rw))
593 		return;
594 
595 	/*
596 	 * A bio has been dispatched. Also adjust slice_end. It might happen
597 	 * that initially cgroup limit was very low resulting in high
598 	 * slice_end, but later limit was bumped up and bio was dispatched
599 	 * sooner, then we need to reduce slice_end. A high bogus slice_end
600 	 * is bad because it does not allow new slice to start.
601 	 */
602 	throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
603 
604 	time_elapsed = rounddown(jiffies - tg->slice_start[rw],
605 				 tg->td->throtl_slice);
606 	/* Don't trim slice until at least 2 slices are used */
607 	if (time_elapsed < tg->td->throtl_slice * 2)
608 		return;
609 
610 	/*
611 	 * The bio submission time may be a few jiffies more than the expected
612 	 * waiting time, due to 'extra_bytes' can't be divided in
613 	 * tg_within_bps_limit(), and also due to timer wakeup delay. In this
614 	 * case, adjust slice_start will discard the extra wait time, causing
615 	 * lower rate than expected. Therefore, other than the above rounddown,
616 	 * one extra slice is preserved for deviation.
617 	 */
618 	time_elapsed -= tg->td->throtl_slice;
619 	bytes_trim = calculate_bytes_allowed(tg_bps_limit(tg, rw),
620 					     time_elapsed) +
621 		     tg->carryover_bytes[rw];
622 	io_trim = calculate_io_allowed(tg_iops_limit(tg, rw), time_elapsed) +
623 		  tg->carryover_ios[rw];
624 	if (bytes_trim <= 0 && io_trim <= 0)
625 		return;
626 
627 	tg->carryover_bytes[rw] = 0;
628 	if ((long long)tg->bytes_disp[rw] >= bytes_trim)
629 		tg->bytes_disp[rw] -= bytes_trim;
630 	else
631 		tg->bytes_disp[rw] = 0;
632 
633 	tg->carryover_ios[rw] = 0;
634 	if ((int)tg->io_disp[rw] >= io_trim)
635 		tg->io_disp[rw] -= io_trim;
636 	else
637 		tg->io_disp[rw] = 0;
638 
639 	tg->slice_start[rw] += time_elapsed;
640 
641 	throtl_log(&tg->service_queue,
642 		   "[%c] trim slice nr=%lu bytes=%lld io=%d start=%lu end=%lu jiffies=%lu",
643 		   rw == READ ? 'R' : 'W', time_elapsed / tg->td->throtl_slice,
644 		   bytes_trim, io_trim, tg->slice_start[rw], tg->slice_end[rw],
645 		   jiffies);
646 }
647 
__tg_update_carryover(struct throtl_grp * tg,bool rw)648 static void __tg_update_carryover(struct throtl_grp *tg, bool rw)
649 {
650 	unsigned long jiffy_elapsed = jiffies - tg->slice_start[rw];
651 	u64 bps_limit = tg_bps_limit(tg, rw);
652 	u32 iops_limit = tg_iops_limit(tg, rw);
653 
654 	/*
655 	 * If config is updated while bios are still throttled, calculate and
656 	 * accumulate how many bytes/ios are waited across changes. And
657 	 * carryover_bytes/ios will be used to calculate new wait time under new
658 	 * configuration.
659 	 */
660 	if (bps_limit != U64_MAX)
661 		tg->carryover_bytes[rw] +=
662 			calculate_bytes_allowed(bps_limit, jiffy_elapsed) -
663 			tg->bytes_disp[rw];
664 	if (iops_limit != UINT_MAX)
665 		tg->carryover_ios[rw] +=
666 			calculate_io_allowed(iops_limit, jiffy_elapsed) -
667 			tg->io_disp[rw];
668 }
669 
tg_update_carryover(struct throtl_grp * tg)670 static void tg_update_carryover(struct throtl_grp *tg)
671 {
672 	if (tg->service_queue.nr_queued[READ])
673 		__tg_update_carryover(tg, READ);
674 	if (tg->service_queue.nr_queued[WRITE])
675 		__tg_update_carryover(tg, WRITE);
676 
677 	/* see comments in struct throtl_grp for meaning of these fields. */
678 	throtl_log(&tg->service_queue, "%s: %lld %lld %d %d\n", __func__,
679 		   tg->carryover_bytes[READ], tg->carryover_bytes[WRITE],
680 		   tg->carryover_ios[READ], tg->carryover_ios[WRITE]);
681 }
682 
tg_within_iops_limit(struct throtl_grp * tg,struct bio * bio,u32 iops_limit)683 static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio,
684 				 u32 iops_limit)
685 {
686 	bool rw = bio_data_dir(bio);
687 	int io_allowed;
688 	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
689 
690 	if (iops_limit == UINT_MAX) {
691 		return 0;
692 	}
693 
694 	jiffy_elapsed = jiffies - tg->slice_start[rw];
695 
696 	/* Round up to the next throttle slice, wait time must be nonzero */
697 	jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
698 	io_allowed = calculate_io_allowed(iops_limit, jiffy_elapsed_rnd) +
699 		     tg->carryover_ios[rw];
700 	if (io_allowed > 0 && tg->io_disp[rw] + 1 <= io_allowed)
701 		return 0;
702 
703 	/* Calc approx time to dispatch */
704 	jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
705 
706 	/* make sure at least one io can be dispatched after waiting */
707 	jiffy_wait = max(jiffy_wait, HZ / iops_limit + 1);
708 	return jiffy_wait;
709 }
710 
tg_within_bps_limit(struct throtl_grp * tg,struct bio * bio,u64 bps_limit)711 static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
712 				u64 bps_limit)
713 {
714 	bool rw = bio_data_dir(bio);
715 	long long bytes_allowed;
716 	u64 extra_bytes;
717 	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
718 	unsigned int bio_size = throtl_bio_data_size(bio);
719 
720 	/* no need to throttle if this bio's bytes have been accounted */
721 	if (bps_limit == U64_MAX || bio_flagged(bio, BIO_BPS_THROTTLED)) {
722 		return 0;
723 	}
724 
725 	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
726 
727 	/* Slice has just started. Consider one slice interval */
728 	if (!jiffy_elapsed)
729 		jiffy_elapsed_rnd = tg->td->throtl_slice;
730 
731 	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
732 	bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed_rnd) +
733 			tg->carryover_bytes[rw];
734 	if (bytes_allowed > 0 && tg->bytes_disp[rw] + bio_size <= bytes_allowed)
735 		return 0;
736 
737 	/* Calc approx time to dispatch */
738 	extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
739 	jiffy_wait = div64_u64(extra_bytes * HZ, bps_limit);
740 
741 	if (!jiffy_wait)
742 		jiffy_wait = 1;
743 
744 	/*
745 	 * This wait time is without taking into consideration the rounding
746 	 * up we did. Add that time also.
747 	 */
748 	jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
749 	return jiffy_wait;
750 }
751 
752 /*
753  * Returns whether one can dispatch a bio or not. Also returns approx number
754  * of jiffies to wait before this bio is with-in IO rate and can be dispatched
755  */
tg_may_dispatch(struct throtl_grp * tg,struct bio * bio,unsigned long * wait)756 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
757 			    unsigned long *wait)
758 {
759 	bool rw = bio_data_dir(bio);
760 	unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
761 	u64 bps_limit = tg_bps_limit(tg, rw);
762 	u32 iops_limit = tg_iops_limit(tg, rw);
763 
764 	/*
765  	 * Currently whole state machine of group depends on first bio
766 	 * queued in the group bio list. So one should not be calling
767 	 * this function with a different bio if there are other bios
768 	 * queued.
769 	 */
770 	BUG_ON(tg->service_queue.nr_queued[rw] &&
771 	       bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
772 
773 	/* If tg->bps = -1, then BW is unlimited */
774 	if ((bps_limit == U64_MAX && iops_limit == UINT_MAX) ||
775 	    tg->flags & THROTL_TG_CANCELING) {
776 		if (wait)
777 			*wait = 0;
778 		return true;
779 	}
780 
781 	/*
782 	 * If previous slice expired, start a new one otherwise renew/extend
783 	 * existing slice to make sure it is at least throtl_slice interval
784 	 * long since now. New slice is started only for empty throttle group.
785 	 * If there is queued bio, that means there should be an active
786 	 * slice and it should be extended instead.
787 	 */
788 	if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
789 		throtl_start_new_slice(tg, rw, true);
790 	else {
791 		if (time_before(tg->slice_end[rw],
792 		    jiffies + tg->td->throtl_slice))
793 			throtl_extend_slice(tg, rw,
794 				jiffies + tg->td->throtl_slice);
795 	}
796 
797 	bps_wait = tg_within_bps_limit(tg, bio, bps_limit);
798 	iops_wait = tg_within_iops_limit(tg, bio, iops_limit);
799 	if (bps_wait + iops_wait == 0) {
800 		if (wait)
801 			*wait = 0;
802 		return true;
803 	}
804 
805 	max_wait = max(bps_wait, iops_wait);
806 
807 	if (wait)
808 		*wait = max_wait;
809 
810 	if (time_before(tg->slice_end[rw], jiffies + max_wait))
811 		throtl_extend_slice(tg, rw, jiffies + max_wait);
812 
813 	return false;
814 }
815 
throtl_charge_bio(struct throtl_grp * tg,struct bio * bio)816 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
817 {
818 	bool rw = bio_data_dir(bio);
819 	unsigned int bio_size = throtl_bio_data_size(bio);
820 
821 	/* Charge the bio to the group */
822 	if (!bio_flagged(bio, BIO_BPS_THROTTLED)) {
823 		tg->bytes_disp[rw] += bio_size;
824 		tg->last_bytes_disp[rw] += bio_size;
825 	}
826 
827 	tg->io_disp[rw]++;
828 	tg->last_io_disp[rw]++;
829 }
830 
831 /**
832  * throtl_add_bio_tg - add a bio to the specified throtl_grp
833  * @bio: bio to add
834  * @qn: qnode to use
835  * @tg: the target throtl_grp
836  *
837  * Add @bio to @tg's service_queue using @qn.  If @qn is not specified,
838  * tg->qnode_on_self[] is used.
839  */
throtl_add_bio_tg(struct bio * bio,struct throtl_qnode * qn,struct throtl_grp * tg)840 static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
841 			      struct throtl_grp *tg)
842 {
843 	struct throtl_service_queue *sq = &tg->service_queue;
844 	bool rw = bio_data_dir(bio);
845 
846 	if (!qn)
847 		qn = &tg->qnode_on_self[rw];
848 
849 	/*
850 	 * If @tg doesn't currently have any bios queued in the same
851 	 * direction, queueing @bio can change when @tg should be
852 	 * dispatched.  Mark that @tg was empty.  This is automatically
853 	 * cleared on the next tg_update_disptime().
854 	 */
855 	if (!sq->nr_queued[rw])
856 		tg->flags |= THROTL_TG_WAS_EMPTY;
857 
858 	throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
859 
860 	sq->nr_queued[rw]++;
861 	throtl_enqueue_tg(tg);
862 }
863 
tg_update_disptime(struct throtl_grp * tg)864 static void tg_update_disptime(struct throtl_grp *tg)
865 {
866 	struct throtl_service_queue *sq = &tg->service_queue;
867 	unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
868 	struct bio *bio;
869 
870 	bio = throtl_peek_queued(&sq->queued[READ]);
871 	if (bio)
872 		tg_may_dispatch(tg, bio, &read_wait);
873 
874 	bio = throtl_peek_queued(&sq->queued[WRITE]);
875 	if (bio)
876 		tg_may_dispatch(tg, bio, &write_wait);
877 
878 	min_wait = min(read_wait, write_wait);
879 	disptime = jiffies + min_wait;
880 
881 	/* Update dispatch time */
882 	throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
883 	tg->disptime = disptime;
884 	tg_service_queue_add(tg);
885 
886 	/* see throtl_add_bio_tg() */
887 	tg->flags &= ~THROTL_TG_WAS_EMPTY;
888 }
889 
start_parent_slice_with_credit(struct throtl_grp * child_tg,struct throtl_grp * parent_tg,bool rw)890 static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
891 					struct throtl_grp *parent_tg, bool rw)
892 {
893 	if (throtl_slice_used(parent_tg, rw)) {
894 		throtl_start_new_slice_with_credit(parent_tg, rw,
895 				child_tg->slice_start[rw]);
896 	}
897 
898 }
899 
tg_dispatch_one_bio(struct throtl_grp * tg,bool rw)900 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
901 {
902 	struct throtl_service_queue *sq = &tg->service_queue;
903 	struct throtl_service_queue *parent_sq = sq->parent_sq;
904 	struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
905 	struct throtl_grp *tg_to_put = NULL;
906 	struct bio *bio;
907 
908 	/*
909 	 * @bio is being transferred from @tg to @parent_sq.  Popping a bio
910 	 * from @tg may put its reference and @parent_sq might end up
911 	 * getting released prematurely.  Remember the tg to put and put it
912 	 * after @bio is transferred to @parent_sq.
913 	 */
914 	bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
915 	sq->nr_queued[rw]--;
916 
917 	throtl_charge_bio(tg, bio);
918 
919 	/*
920 	 * If our parent is another tg, we just need to transfer @bio to
921 	 * the parent using throtl_add_bio_tg().  If our parent is
922 	 * @td->service_queue, @bio is ready to be issued.  Put it on its
923 	 * bio_lists[] and decrease total number queued.  The caller is
924 	 * responsible for issuing these bios.
925 	 */
926 	if (parent_tg) {
927 		throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
928 		start_parent_slice_with_credit(tg, parent_tg, rw);
929 	} else {
930 		bio_set_flag(bio, BIO_BPS_THROTTLED);
931 		throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
932 				     &parent_sq->queued[rw]);
933 		BUG_ON(tg->td->nr_queued[rw] <= 0);
934 		tg->td->nr_queued[rw]--;
935 	}
936 
937 	throtl_trim_slice(tg, rw);
938 
939 	if (tg_to_put)
940 		blkg_put(tg_to_blkg(tg_to_put));
941 }
942 
throtl_dispatch_tg(struct throtl_grp * tg)943 static int throtl_dispatch_tg(struct throtl_grp *tg)
944 {
945 	struct throtl_service_queue *sq = &tg->service_queue;
946 	unsigned int nr_reads = 0, nr_writes = 0;
947 	unsigned int max_nr_reads = THROTL_GRP_QUANTUM * 3 / 4;
948 	unsigned int max_nr_writes = THROTL_GRP_QUANTUM - max_nr_reads;
949 	struct bio *bio;
950 
951 	/* Try to dispatch 75% READS and 25% WRITES */
952 
953 	while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
954 	       tg_may_dispatch(tg, bio, NULL)) {
955 
956 		tg_dispatch_one_bio(tg, READ);
957 		nr_reads++;
958 
959 		if (nr_reads >= max_nr_reads)
960 			break;
961 	}
962 
963 	while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
964 	       tg_may_dispatch(tg, bio, NULL)) {
965 
966 		tg_dispatch_one_bio(tg, WRITE);
967 		nr_writes++;
968 
969 		if (nr_writes >= max_nr_writes)
970 			break;
971 	}
972 
973 	return nr_reads + nr_writes;
974 }
975 
throtl_select_dispatch(struct throtl_service_queue * parent_sq)976 static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
977 {
978 	unsigned int nr_disp = 0;
979 
980 	while (1) {
981 		struct throtl_grp *tg;
982 		struct throtl_service_queue *sq;
983 
984 		if (!parent_sq->nr_pending)
985 			break;
986 
987 		tg = throtl_rb_first(parent_sq);
988 		if (!tg)
989 			break;
990 
991 		if (time_before(jiffies, tg->disptime))
992 			break;
993 
994 		nr_disp += throtl_dispatch_tg(tg);
995 
996 		sq = &tg->service_queue;
997 		if (sq->nr_queued[READ] || sq->nr_queued[WRITE])
998 			tg_update_disptime(tg);
999 		else
1000 			throtl_dequeue_tg(tg);
1001 
1002 		if (nr_disp >= THROTL_QUANTUM)
1003 			break;
1004 	}
1005 
1006 	return nr_disp;
1007 }
1008 
1009 /**
1010  * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1011  * @t: the pending_timer member of the throtl_service_queue being serviced
1012  *
1013  * This timer is armed when a child throtl_grp with active bio's become
1014  * pending and queued on the service_queue's pending_tree and expires when
1015  * the first child throtl_grp should be dispatched.  This function
1016  * dispatches bio's from the children throtl_grps to the parent
1017  * service_queue.
1018  *
1019  * If the parent's parent is another throtl_grp, dispatching is propagated
1020  * by either arming its pending_timer or repeating dispatch directly.  If
1021  * the top-level service_tree is reached, throtl_data->dispatch_work is
1022  * kicked so that the ready bio's are issued.
1023  */
throtl_pending_timer_fn(struct timer_list * t)1024 static void throtl_pending_timer_fn(struct timer_list *t)
1025 {
1026 	struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
1027 	struct throtl_grp *tg = sq_to_tg(sq);
1028 	struct throtl_data *td = sq_to_td(sq);
1029 	struct throtl_service_queue *parent_sq;
1030 	struct request_queue *q;
1031 	bool dispatched;
1032 	int ret;
1033 
1034 	/* throtl_data may be gone, so figure out request queue by blkg */
1035 	if (tg)
1036 		q = tg->pd.blkg->q;
1037 	else
1038 		q = td->queue;
1039 
1040 	spin_lock_irq(&q->queue_lock);
1041 
1042 	if (!q->root_blkg)
1043 		goto out_unlock;
1044 
1045 again:
1046 	parent_sq = sq->parent_sq;
1047 	dispatched = false;
1048 
1049 	while (true) {
1050 		throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1051 			   sq->nr_queued[READ] + sq->nr_queued[WRITE],
1052 			   sq->nr_queued[READ], sq->nr_queued[WRITE]);
1053 
1054 		ret = throtl_select_dispatch(sq);
1055 		if (ret) {
1056 			throtl_log(sq, "bios disp=%u", ret);
1057 			dispatched = true;
1058 		}
1059 
1060 		if (throtl_schedule_next_dispatch(sq, false))
1061 			break;
1062 
1063 		/* this dispatch windows is still open, relax and repeat */
1064 		spin_unlock_irq(&q->queue_lock);
1065 		cpu_relax();
1066 		spin_lock_irq(&q->queue_lock);
1067 	}
1068 
1069 	if (!dispatched)
1070 		goto out_unlock;
1071 
1072 	if (parent_sq) {
1073 		/* @parent_sq is another throl_grp, propagate dispatch */
1074 		if (tg->flags & THROTL_TG_WAS_EMPTY) {
1075 			tg_update_disptime(tg);
1076 			if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1077 				/* window is already open, repeat dispatching */
1078 				sq = parent_sq;
1079 				tg = sq_to_tg(sq);
1080 				goto again;
1081 			}
1082 		}
1083 	} else {
1084 		/* reached the top-level, queue issuing */
1085 		queue_work(kthrotld_workqueue, &td->dispatch_work);
1086 	}
1087 out_unlock:
1088 	spin_unlock_irq(&q->queue_lock);
1089 }
1090 
1091 /**
1092  * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1093  * @work: work item being executed
1094  *
1095  * This function is queued for execution when bios reach the bio_lists[]
1096  * of throtl_data->service_queue.  Those bios are ready and issued by this
1097  * function.
1098  */
blk_throtl_dispatch_work_fn(struct work_struct * work)1099 static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1100 {
1101 	struct throtl_data *td = container_of(work, struct throtl_data,
1102 					      dispatch_work);
1103 	struct throtl_service_queue *td_sq = &td->service_queue;
1104 	struct request_queue *q = td->queue;
1105 	struct bio_list bio_list_on_stack;
1106 	struct bio *bio;
1107 	struct blk_plug plug;
1108 	int rw;
1109 
1110 	bio_list_init(&bio_list_on_stack);
1111 
1112 	spin_lock_irq(&q->queue_lock);
1113 	for (rw = READ; rw <= WRITE; rw++)
1114 		while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1115 			bio_list_add(&bio_list_on_stack, bio);
1116 	spin_unlock_irq(&q->queue_lock);
1117 
1118 	if (!bio_list_empty(&bio_list_on_stack)) {
1119 		blk_start_plug(&plug);
1120 		while ((bio = bio_list_pop(&bio_list_on_stack)))
1121 			submit_bio_noacct_nocheck(bio);
1122 		blk_finish_plug(&plug);
1123 	}
1124 }
1125 
tg_prfill_conf_u64(struct seq_file * sf,struct blkg_policy_data * pd,int off)1126 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1127 			      int off)
1128 {
1129 	struct throtl_grp *tg = pd_to_tg(pd);
1130 	u64 v = *(u64 *)((void *)tg + off);
1131 
1132 	if (v == U64_MAX)
1133 		return 0;
1134 	return __blkg_prfill_u64(sf, pd, v);
1135 }
1136 
tg_prfill_conf_uint(struct seq_file * sf,struct blkg_policy_data * pd,int off)1137 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1138 			       int off)
1139 {
1140 	struct throtl_grp *tg = pd_to_tg(pd);
1141 	unsigned int v = *(unsigned int *)((void *)tg + off);
1142 
1143 	if (v == UINT_MAX)
1144 		return 0;
1145 	return __blkg_prfill_u64(sf, pd, v);
1146 }
1147 
tg_print_conf_u64(struct seq_file * sf,void * v)1148 static int tg_print_conf_u64(struct seq_file *sf, void *v)
1149 {
1150 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1151 			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1152 	return 0;
1153 }
1154 
tg_print_conf_uint(struct seq_file * sf,void * v)1155 static int tg_print_conf_uint(struct seq_file *sf, void *v)
1156 {
1157 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1158 			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1159 	return 0;
1160 }
1161 
tg_conf_updated(struct throtl_grp * tg,bool global)1162 static void tg_conf_updated(struct throtl_grp *tg, bool global)
1163 {
1164 	struct throtl_service_queue *sq = &tg->service_queue;
1165 	struct cgroup_subsys_state *pos_css;
1166 	struct blkcg_gq *blkg;
1167 
1168 	throtl_log(&tg->service_queue,
1169 		   "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1170 		   tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1171 		   tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1172 
1173 	rcu_read_lock();
1174 	/*
1175 	 * Update has_rules[] flags for the updated tg's subtree.  A tg is
1176 	 * considered to have rules if either the tg itself or any of its
1177 	 * ancestors has rules.  This identifies groups without any
1178 	 * restrictions in the whole hierarchy and allows them to bypass
1179 	 * blk-throttle.
1180 	 */
1181 	blkg_for_each_descendant_pre(blkg, pos_css,
1182 			global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
1183 		struct throtl_grp *this_tg = blkg_to_tg(blkg);
1184 
1185 		tg_update_has_rules(this_tg);
1186 		/* ignore root/second level */
1187 		if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1188 		    !blkg->parent->parent)
1189 			continue;
1190 	}
1191 	rcu_read_unlock();
1192 
1193 	/*
1194 	 * We're already holding queue_lock and know @tg is valid.  Let's
1195 	 * apply the new config directly.
1196 	 *
1197 	 * Restart the slices for both READ and WRITES. It might happen
1198 	 * that a group's limit are dropped suddenly and we don't want to
1199 	 * account recently dispatched IO with new low rate.
1200 	 */
1201 	throtl_start_new_slice(tg, READ, false);
1202 	throtl_start_new_slice(tg, WRITE, false);
1203 
1204 	if (tg->flags & THROTL_TG_PENDING) {
1205 		tg_update_disptime(tg);
1206 		throtl_schedule_next_dispatch(sq->parent_sq, true);
1207 	}
1208 }
1209 
blk_throtl_init(struct gendisk * disk)1210 static int blk_throtl_init(struct gendisk *disk)
1211 {
1212 	struct request_queue *q = disk->queue;
1213 	struct throtl_data *td;
1214 	unsigned int memflags;
1215 	int ret;
1216 
1217 	td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1218 	if (!td)
1219 		return -ENOMEM;
1220 
1221 	INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
1222 	throtl_service_queue_init(&td->service_queue);
1223 
1224 	/*
1225 	 * Freeze queue before activating policy, to synchronize with IO path,
1226 	 * which is protected by 'q_usage_counter'.
1227 	 */
1228 	memflags = blk_mq_freeze_queue(disk->queue);
1229 	blk_mq_quiesce_queue(disk->queue);
1230 
1231 	q->td = td;
1232 	td->queue = q;
1233 
1234 	/* activate policy */
1235 	ret = blkcg_activate_policy(disk, &blkcg_policy_throtl);
1236 	if (ret) {
1237 		q->td = NULL;
1238 		kfree(td);
1239 		goto out;
1240 	}
1241 
1242 	if (blk_queue_nonrot(q))
1243 		td->throtl_slice = DFL_THROTL_SLICE_SSD;
1244 	else
1245 		td->throtl_slice = DFL_THROTL_SLICE_HD;
1246 	td->track_bio_latency = !queue_is_mq(q);
1247 	if (!td->track_bio_latency)
1248 		blk_stat_enable_accounting(q);
1249 
1250 out:
1251 	blk_mq_unquiesce_queue(disk->queue);
1252 	blk_mq_unfreeze_queue(disk->queue, memflags);
1253 
1254 	return ret;
1255 }
1256 
1257 
tg_set_conf(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,bool is_u64)1258 static ssize_t tg_set_conf(struct kernfs_open_file *of,
1259 			   char *buf, size_t nbytes, loff_t off, bool is_u64)
1260 {
1261 	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1262 	struct blkg_conf_ctx ctx;
1263 	struct throtl_grp *tg;
1264 	int ret;
1265 	u64 v;
1266 
1267 	blkg_conf_init(&ctx, buf);
1268 
1269 	ret = blkg_conf_open_bdev(&ctx);
1270 	if (ret)
1271 		goto out_finish;
1272 
1273 	if (!blk_throtl_activated(ctx.bdev->bd_queue)) {
1274 		ret = blk_throtl_init(ctx.bdev->bd_disk);
1275 		if (ret)
1276 			goto out_finish;
1277 	}
1278 
1279 	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, &ctx);
1280 	if (ret)
1281 		goto out_finish;
1282 
1283 	ret = -EINVAL;
1284 	if (sscanf(ctx.body, "%llu", &v) != 1)
1285 		goto out_finish;
1286 	if (!v)
1287 		v = U64_MAX;
1288 
1289 	tg = blkg_to_tg(ctx.blkg);
1290 	tg_update_carryover(tg);
1291 
1292 	if (is_u64)
1293 		*(u64 *)((void *)tg + of_cft(of)->private) = v;
1294 	else
1295 		*(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1296 
1297 	tg_conf_updated(tg, false);
1298 	ret = 0;
1299 out_finish:
1300 	blkg_conf_exit(&ctx);
1301 	return ret ?: nbytes;
1302 }
1303 
tg_set_conf_u64(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1304 static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1305 			       char *buf, size_t nbytes, loff_t off)
1306 {
1307 	return tg_set_conf(of, buf, nbytes, off, true);
1308 }
1309 
tg_set_conf_uint(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1310 static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1311 				char *buf, size_t nbytes, loff_t off)
1312 {
1313 	return tg_set_conf(of, buf, nbytes, off, false);
1314 }
1315 
tg_print_rwstat(struct seq_file * sf,void * v)1316 static int tg_print_rwstat(struct seq_file *sf, void *v)
1317 {
1318 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1319 			  blkg_prfill_rwstat, &blkcg_policy_throtl,
1320 			  seq_cft(sf)->private, true);
1321 	return 0;
1322 }
1323 
tg_prfill_rwstat_recursive(struct seq_file * sf,struct blkg_policy_data * pd,int off)1324 static u64 tg_prfill_rwstat_recursive(struct seq_file *sf,
1325 				      struct blkg_policy_data *pd, int off)
1326 {
1327 	struct blkg_rwstat_sample sum;
1328 
1329 	blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_throtl, off,
1330 				  &sum);
1331 	return __blkg_prfill_rwstat(sf, pd, &sum);
1332 }
1333 
tg_print_rwstat_recursive(struct seq_file * sf,void * v)1334 static int tg_print_rwstat_recursive(struct seq_file *sf, void *v)
1335 {
1336 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1337 			  tg_prfill_rwstat_recursive, &blkcg_policy_throtl,
1338 			  seq_cft(sf)->private, true);
1339 	return 0;
1340 }
1341 
1342 static struct cftype throtl_legacy_files[] = {
1343 	{
1344 		.name = "throttle.read_bps_device",
1345 		.private = offsetof(struct throtl_grp, bps[READ]),
1346 		.seq_show = tg_print_conf_u64,
1347 		.write = tg_set_conf_u64,
1348 	},
1349 	{
1350 		.name = "throttle.write_bps_device",
1351 		.private = offsetof(struct throtl_grp, bps[WRITE]),
1352 		.seq_show = tg_print_conf_u64,
1353 		.write = tg_set_conf_u64,
1354 	},
1355 	{
1356 		.name = "throttle.read_iops_device",
1357 		.private = offsetof(struct throtl_grp, iops[READ]),
1358 		.seq_show = tg_print_conf_uint,
1359 		.write = tg_set_conf_uint,
1360 	},
1361 	{
1362 		.name = "throttle.write_iops_device",
1363 		.private = offsetof(struct throtl_grp, iops[WRITE]),
1364 		.seq_show = tg_print_conf_uint,
1365 		.write = tg_set_conf_uint,
1366 	},
1367 	{
1368 		.name = "throttle.io_service_bytes",
1369 		.private = offsetof(struct throtl_grp, stat_bytes),
1370 		.seq_show = tg_print_rwstat,
1371 	},
1372 	{
1373 		.name = "throttle.io_service_bytes_recursive",
1374 		.private = offsetof(struct throtl_grp, stat_bytes),
1375 		.seq_show = tg_print_rwstat_recursive,
1376 	},
1377 	{
1378 		.name = "throttle.io_serviced",
1379 		.private = offsetof(struct throtl_grp, stat_ios),
1380 		.seq_show = tg_print_rwstat,
1381 	},
1382 	{
1383 		.name = "throttle.io_serviced_recursive",
1384 		.private = offsetof(struct throtl_grp, stat_ios),
1385 		.seq_show = tg_print_rwstat_recursive,
1386 	},
1387 	{ }	/* terminate */
1388 };
1389 
tg_prfill_limit(struct seq_file * sf,struct blkg_policy_data * pd,int off)1390 static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
1391 			 int off)
1392 {
1393 	struct throtl_grp *tg = pd_to_tg(pd);
1394 	const char *dname = blkg_dev_name(pd->blkg);
1395 	u64 bps_dft;
1396 	unsigned int iops_dft;
1397 
1398 	if (!dname)
1399 		return 0;
1400 
1401 	bps_dft = U64_MAX;
1402 	iops_dft = UINT_MAX;
1403 
1404 	if (tg->bps[READ] == bps_dft &&
1405 	    tg->bps[WRITE] == bps_dft &&
1406 	    tg->iops[READ] == iops_dft &&
1407 	    tg->iops[WRITE] == iops_dft)
1408 		return 0;
1409 
1410 	seq_printf(sf, "%s", dname);
1411 	if (tg->bps[READ] == U64_MAX)
1412 		seq_printf(sf, " rbps=max");
1413 	else
1414 		seq_printf(sf, " rbps=%llu", tg->bps[READ]);
1415 
1416 	if (tg->bps[WRITE] == U64_MAX)
1417 		seq_printf(sf, " wbps=max");
1418 	else
1419 		seq_printf(sf, " wbps=%llu", tg->bps[WRITE]);
1420 
1421 	if (tg->iops[READ] == UINT_MAX)
1422 		seq_printf(sf, " riops=max");
1423 	else
1424 		seq_printf(sf, " riops=%u", tg->iops[READ]);
1425 
1426 	if (tg->iops[WRITE] == UINT_MAX)
1427 		seq_printf(sf, " wiops=max");
1428 	else
1429 		seq_printf(sf, " wiops=%u", tg->iops[WRITE]);
1430 
1431 	seq_printf(sf, "\n");
1432 	return 0;
1433 }
1434 
tg_print_limit(struct seq_file * sf,void * v)1435 static int tg_print_limit(struct seq_file *sf, void *v)
1436 {
1437 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
1438 			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1439 	return 0;
1440 }
1441 
tg_set_limit(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1442 static ssize_t tg_set_limit(struct kernfs_open_file *of,
1443 			  char *buf, size_t nbytes, loff_t off)
1444 {
1445 	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1446 	struct blkg_conf_ctx ctx;
1447 	struct throtl_grp *tg;
1448 	u64 v[4];
1449 	int ret;
1450 
1451 	blkg_conf_init(&ctx, buf);
1452 
1453 	ret = blkg_conf_open_bdev(&ctx);
1454 	if (ret)
1455 		goto out_finish;
1456 
1457 	if (!blk_throtl_activated(ctx.bdev->bd_queue)) {
1458 		ret = blk_throtl_init(ctx.bdev->bd_disk);
1459 		if (ret)
1460 			goto out_finish;
1461 	}
1462 
1463 	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, &ctx);
1464 	if (ret)
1465 		goto out_finish;
1466 
1467 	tg = blkg_to_tg(ctx.blkg);
1468 	tg_update_carryover(tg);
1469 
1470 	v[0] = tg->bps[READ];
1471 	v[1] = tg->bps[WRITE];
1472 	v[2] = tg->iops[READ];
1473 	v[3] = tg->iops[WRITE];
1474 
1475 	while (true) {
1476 		char tok[27];	/* wiops=18446744073709551616 */
1477 		char *p;
1478 		u64 val = U64_MAX;
1479 		int len;
1480 
1481 		if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1482 			break;
1483 		if (tok[0] == '\0')
1484 			break;
1485 		ctx.body += len;
1486 
1487 		ret = -EINVAL;
1488 		p = tok;
1489 		strsep(&p, "=");
1490 		if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1491 			goto out_finish;
1492 
1493 		ret = -ERANGE;
1494 		if (!val)
1495 			goto out_finish;
1496 
1497 		ret = -EINVAL;
1498 		if (!strcmp(tok, "rbps"))
1499 			v[0] = val;
1500 		else if (!strcmp(tok, "wbps"))
1501 			v[1] = val;
1502 		else if (!strcmp(tok, "riops"))
1503 			v[2] = min_t(u64, val, UINT_MAX);
1504 		else if (!strcmp(tok, "wiops"))
1505 			v[3] = min_t(u64, val, UINT_MAX);
1506 		else
1507 			goto out_finish;
1508 	}
1509 
1510 	tg->bps[READ] = v[0];
1511 	tg->bps[WRITE] = v[1];
1512 	tg->iops[READ] = v[2];
1513 	tg->iops[WRITE] = v[3];
1514 
1515 	tg_conf_updated(tg, false);
1516 	ret = 0;
1517 out_finish:
1518 	blkg_conf_exit(&ctx);
1519 	return ret ?: nbytes;
1520 }
1521 
1522 static struct cftype throtl_files[] = {
1523 	{
1524 		.name = "max",
1525 		.flags = CFTYPE_NOT_ON_ROOT,
1526 		.seq_show = tg_print_limit,
1527 		.write = tg_set_limit,
1528 	},
1529 	{ }	/* terminate */
1530 };
1531 
throtl_shutdown_wq(struct request_queue * q)1532 static void throtl_shutdown_wq(struct request_queue *q)
1533 {
1534 	struct throtl_data *td = q->td;
1535 
1536 	cancel_work_sync(&td->dispatch_work);
1537 }
1538 
tg_flush_bios(struct throtl_grp * tg)1539 static void tg_flush_bios(struct throtl_grp *tg)
1540 {
1541 	struct throtl_service_queue *sq = &tg->service_queue;
1542 
1543 	if (tg->flags & THROTL_TG_CANCELING)
1544 		return;
1545 	/*
1546 	 * Set the flag to make sure throtl_pending_timer_fn() won't
1547 	 * stop until all throttled bios are dispatched.
1548 	 */
1549 	tg->flags |= THROTL_TG_CANCELING;
1550 
1551 	/*
1552 	 * Do not dispatch cgroup without THROTL_TG_PENDING or cgroup
1553 	 * will be inserted to service queue without THROTL_TG_PENDING
1554 	 * set in tg_update_disptime below. Then IO dispatched from
1555 	 * child in tg_dispatch_one_bio will trigger double insertion
1556 	 * and corrupt the tree.
1557 	 */
1558 	if (!(tg->flags & THROTL_TG_PENDING))
1559 		return;
1560 
1561 	/*
1562 	 * Update disptime after setting the above flag to make sure
1563 	 * throtl_select_dispatch() won't exit without dispatching.
1564 	 */
1565 	tg_update_disptime(tg);
1566 
1567 	throtl_schedule_pending_timer(sq, jiffies + 1);
1568 }
1569 
throtl_pd_offline(struct blkg_policy_data * pd)1570 static void throtl_pd_offline(struct blkg_policy_data *pd)
1571 {
1572 	tg_flush_bios(pd_to_tg(pd));
1573 }
1574 
1575 struct blkcg_policy blkcg_policy_throtl = {
1576 	.dfl_cftypes		= throtl_files,
1577 	.legacy_cftypes		= throtl_legacy_files,
1578 
1579 	.pd_alloc_fn		= throtl_pd_alloc,
1580 	.pd_init_fn		= throtl_pd_init,
1581 	.pd_online_fn		= throtl_pd_online,
1582 	.pd_offline_fn		= throtl_pd_offline,
1583 	.pd_free_fn		= throtl_pd_free,
1584 };
1585 
blk_throtl_cancel_bios(struct gendisk * disk)1586 void blk_throtl_cancel_bios(struct gendisk *disk)
1587 {
1588 	struct request_queue *q = disk->queue;
1589 	struct cgroup_subsys_state *pos_css;
1590 	struct blkcg_gq *blkg;
1591 
1592 	if (!blk_throtl_activated(q))
1593 		return;
1594 
1595 	spin_lock_irq(&q->queue_lock);
1596 	/*
1597 	 * queue_lock is held, rcu lock is not needed here technically.
1598 	 * However, rcu lock is still held to emphasize that following
1599 	 * path need RCU protection and to prevent warning from lockdep.
1600 	 */
1601 	rcu_read_lock();
1602 	blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) {
1603 		/*
1604 		 * disk_release will call pd_offline_fn to cancel bios.
1605 		 * However, disk_release can't be called if someone get
1606 		 * the refcount of device and issued bios which are
1607 		 * inflight after del_gendisk.
1608 		 * Cancel bios here to ensure no bios are inflight after
1609 		 * del_gendisk.
1610 		 */
1611 		tg_flush_bios(blkg_to_tg(blkg));
1612 	}
1613 	rcu_read_unlock();
1614 	spin_unlock_irq(&q->queue_lock);
1615 }
1616 
tg_within_limit(struct throtl_grp * tg,struct bio * bio,bool rw)1617 static bool tg_within_limit(struct throtl_grp *tg, struct bio *bio, bool rw)
1618 {
1619 	/* throtl is FIFO - if bios are already queued, should queue */
1620 	if (tg->service_queue.nr_queued[rw])
1621 		return false;
1622 
1623 	return tg_may_dispatch(tg, bio, NULL);
1624 }
1625 
tg_dispatch_in_debt(struct throtl_grp * tg,struct bio * bio,bool rw)1626 static void tg_dispatch_in_debt(struct throtl_grp *tg, struct bio *bio, bool rw)
1627 {
1628 	if (!bio_flagged(bio, BIO_BPS_THROTTLED))
1629 		tg->carryover_bytes[rw] -= throtl_bio_data_size(bio);
1630 	tg->carryover_ios[rw]--;
1631 }
1632 
__blk_throtl_bio(struct bio * bio)1633 bool __blk_throtl_bio(struct bio *bio)
1634 {
1635 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1636 	struct blkcg_gq *blkg = bio->bi_blkg;
1637 	struct throtl_qnode *qn = NULL;
1638 	struct throtl_grp *tg = blkg_to_tg(blkg);
1639 	struct throtl_service_queue *sq;
1640 	bool rw = bio_data_dir(bio);
1641 	bool throttled = false;
1642 	struct throtl_data *td = tg->td;
1643 
1644 	rcu_read_lock();
1645 	spin_lock_irq(&q->queue_lock);
1646 	sq = &tg->service_queue;
1647 
1648 	while (true) {
1649 		if (tg_within_limit(tg, bio, rw)) {
1650 			/* within limits, let's charge and dispatch directly */
1651 			throtl_charge_bio(tg, bio);
1652 
1653 			/*
1654 			 * We need to trim slice even when bios are not being
1655 			 * queued otherwise it might happen that a bio is not
1656 			 * queued for a long time and slice keeps on extending
1657 			 * and trim is not called for a long time. Now if limits
1658 			 * are reduced suddenly we take into account all the IO
1659 			 * dispatched so far at new low rate and * newly queued
1660 			 * IO gets a really long dispatch time.
1661 			 *
1662 			 * So keep on trimming slice even if bio is not queued.
1663 			 */
1664 			throtl_trim_slice(tg, rw);
1665 		} else if (bio_issue_as_root_blkg(bio)) {
1666 			/*
1667 			 * IOs which may cause priority inversions are
1668 			 * dispatched directly, even if they're over limit.
1669 			 * Debts are handled by carryover_bytes/ios while
1670 			 * calculating wait time.
1671 			 */
1672 			tg_dispatch_in_debt(tg, bio, rw);
1673 		} else {
1674 			/* if above limits, break to queue */
1675 			break;
1676 		}
1677 
1678 		/*
1679 		 * @bio passed through this layer without being throttled.
1680 		 * Climb up the ladder.  If we're already at the top, it
1681 		 * can be executed directly.
1682 		 */
1683 		qn = &tg->qnode_on_parent[rw];
1684 		sq = sq->parent_sq;
1685 		tg = sq_to_tg(sq);
1686 		if (!tg) {
1687 			bio_set_flag(bio, BIO_BPS_THROTTLED);
1688 			goto out_unlock;
1689 		}
1690 	}
1691 
1692 	/* out-of-limit, queue to @tg */
1693 	throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
1694 		   rw == READ ? 'R' : 'W',
1695 		   tg->bytes_disp[rw], bio->bi_iter.bi_size,
1696 		   tg_bps_limit(tg, rw),
1697 		   tg->io_disp[rw], tg_iops_limit(tg, rw),
1698 		   sq->nr_queued[READ], sq->nr_queued[WRITE]);
1699 
1700 	td->nr_queued[rw]++;
1701 	throtl_add_bio_tg(bio, qn, tg);
1702 	throttled = true;
1703 
1704 	/*
1705 	 * Update @tg's dispatch time and force schedule dispatch if @tg
1706 	 * was empty before @bio.  The forced scheduling isn't likely to
1707 	 * cause undue delay as @bio is likely to be dispatched directly if
1708 	 * its @tg's disptime is not in the future.
1709 	 */
1710 	if (tg->flags & THROTL_TG_WAS_EMPTY) {
1711 		tg_update_disptime(tg);
1712 		throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
1713 	}
1714 
1715 out_unlock:
1716 	spin_unlock_irq(&q->queue_lock);
1717 
1718 	rcu_read_unlock();
1719 	return throttled;
1720 }
1721 
blk_throtl_exit(struct gendisk * disk)1722 void blk_throtl_exit(struct gendisk *disk)
1723 {
1724 	struct request_queue *q = disk->queue;
1725 
1726 	if (!blk_throtl_activated(q))
1727 		return;
1728 
1729 	del_timer_sync(&q->td->service_queue.pending_timer);
1730 	throtl_shutdown_wq(q);
1731 	blkcg_deactivate_policy(disk, &blkcg_policy_throtl);
1732 	kfree(q->td);
1733 }
1734 
throtl_init(void)1735 static int __init throtl_init(void)
1736 {
1737 	kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1738 	if (!kthrotld_workqueue)
1739 		panic("Failed to create kthrotld\n");
1740 
1741 	return blkcg_policy_register(&blkcg_policy_throtl);
1742 }
1743 
1744 module_init(throtl_init);
1745