Lines Matching full:se
292 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) in calc_delta_fair() argument
294 if (unlikely(se->load.weight != NICE_0_LOAD)) in calc_delta_fair()
295 delta = __calc_delta(delta, NICE_0_LOAD, &se->load); in calc_delta_fair()
309 #define for_each_sched_entity(se) \ argument
310 for (; se; se = se->parent)
412 is_same_group(struct sched_entity *se, struct sched_entity *pse) in is_same_group() argument
414 if (se->cfs_rq == pse->cfs_rq) in is_same_group()
415 return se->cfs_rq; in is_same_group()
420 static inline struct sched_entity *parent_entity(const struct sched_entity *se) in parent_entity() argument
422 return se->parent; in parent_entity()
426 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se() argument
438 se_depth = (*se)->depth; in find_matching_se()
443 *se = parent_entity(*se); in find_matching_se()
451 while (!is_same_group(*se, *pse)) { in find_matching_se()
452 *se = parent_entity(*se); in find_matching_se()
467 static int se_is_idle(struct sched_entity *se) in se_is_idle() argument
469 if (entity_is_task(se)) in se_is_idle()
470 return task_has_idle_policy(task_of(se)); in se_is_idle()
471 return cfs_rq_is_idle(group_cfs_rq(se)); in se_is_idle()
476 #define for_each_sched_entity(se) \ argument
477 for (; se; se = NULL)
495 static inline struct sched_entity *parent_entity(struct sched_entity *se) in parent_entity() argument
501 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se() argument
515 static int se_is_idle(struct sched_entity *se) in se_is_idle() argument
517 return task_has_idle_policy(task_of(se)); in se_is_idle()
557 static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) in entity_key() argument
559 return (s64)(se->vruntime - cfs_rq->min_vruntime); in entity_key()
584 * se->vruntime):
624 avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se) in avg_vruntime_add() argument
626 unsigned long weight = scale_load_down(se->load.weight); in avg_vruntime_add()
627 s64 key = entity_key(cfs_rq, se); in avg_vruntime_add()
634 avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se) in avg_vruntime_sub() argument
636 unsigned long weight = scale_load_down(se->load.weight); in avg_vruntime_sub()
637 s64 key = entity_key(cfs_rq, se); in avg_vruntime_sub()
695 static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_entity_lag() argument
699 SCHED_WARN_ON(!se->on_rq); in update_entity_lag()
701 vlag = avg_vruntime(cfs_rq) - se->vruntime; in update_entity_lag()
702 limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se); in update_entity_lag()
704 se->vlag = clamp(vlag, -limit, limit); in update_entity_lag()
721 * Note: using 'avg_vruntime() > se->vruntime' is inaccurate due
740 int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se) in entity_eligible() argument
742 return vruntime_eligible(cfs_rq, se->vruntime); in entity_eligible()
761 struct sched_entity *se = __pick_root_entity(cfs_rq); in update_min_vruntime() local
772 if (se) { in update_min_vruntime()
774 vruntime = se->min_vruntime; in update_min_vruntime()
776 vruntime = min_vruntime(vruntime, se->min_vruntime); in update_min_vruntime()
805 static inline void __min_vruntime_update(struct sched_entity *se, struct rb_node *node) in __min_vruntime_update() argument
809 if (vruntime_gt(min_vruntime, se, rse)) in __min_vruntime_update()
810 se->min_vruntime = rse->min_vruntime; in __min_vruntime_update()
814 static inline void __min_slice_update(struct sched_entity *se, struct rb_node *node) in __min_slice_update() argument
818 if (rse->min_slice < se->min_slice) in __min_slice_update()
819 se->min_slice = rse->min_slice; in __min_slice_update()
824 * se->min_vruntime = min(se->vruntime, {left,right}->min_vruntime)
826 static inline bool min_vruntime_update(struct sched_entity *se, bool exit) in min_vruntime_update() argument
828 u64 old_min_vruntime = se->min_vruntime; in min_vruntime_update()
829 u64 old_min_slice = se->min_slice; in min_vruntime_update()
830 struct rb_node *node = &se->run_node; in min_vruntime_update()
832 se->min_vruntime = se->vruntime; in min_vruntime_update()
833 __min_vruntime_update(se, node->rb_right); in min_vruntime_update()
834 __min_vruntime_update(se, node->rb_left); in min_vruntime_update()
836 se->min_slice = se->slice; in min_vruntime_update()
837 __min_slice_update(se, node->rb_right); in min_vruntime_update()
838 __min_slice_update(se, node->rb_left); in min_vruntime_update()
840 return se->min_vruntime == old_min_vruntime && in min_vruntime_update()
841 se->min_slice == old_min_slice; in min_vruntime_update()
850 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __enqueue_entity() argument
852 avg_vruntime_add(cfs_rq, se); in __enqueue_entity()
853 se->min_vruntime = se->vruntime; in __enqueue_entity()
854 se->min_slice = se->slice; in __enqueue_entity()
855 rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline, in __enqueue_entity()
859 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __dequeue_entity() argument
861 rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline, in __dequeue_entity()
863 avg_vruntime_sub(cfs_rq, se); in __dequeue_entity()
890 static inline void set_protect_slice(struct sched_entity *se) in set_protect_slice() argument
892 se->vlag = se->deadline; in set_protect_slice()
895 static inline bool protect_slice(struct sched_entity *se) in protect_slice() argument
897 return se->vlag == se->deadline; in protect_slice()
900 static inline void cancel_protect_slice(struct sched_entity *se) in cancel_protect_slice() argument
902 if (protect_slice(se)) in cancel_protect_slice()
903 se->vlag = se->deadline + 1; in cancel_protect_slice()
921 * se->min_vruntime = min(se->vruntime, se->{left,right}->min_vruntime)
928 struct sched_entity *se = __pick_first_entity(cfs_rq); in pick_eevdf() local
937 return curr && curr->on_rq ? curr : se; in pick_eevdf()
946 if (se && entity_eligible(cfs_rq, se)) { in pick_eevdf()
947 best = se; in pick_eevdf()
965 se = __node_2_se(node); in pick_eevdf()
972 if (entity_eligible(cfs_rq, se)) { in pick_eevdf()
973 best = se; in pick_eevdf()
1015 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);
1021 static bool update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_deadline() argument
1023 if ((s64)(se->vruntime - se->deadline) < 0) in update_deadline()
1031 if (!se->custom_slice) in update_deadline()
1032 se->slice = sysctl_sched_base_slice; in update_deadline()
1037 se->deadline = se->vruntime + calc_delta_fair(se->slice, se); in update_deadline()
1053 void init_entity_runnable_average(struct sched_entity *se) in init_entity_runnable_average() argument
1055 struct sched_avg *sa = &se->avg; in init_entity_runnable_average()
1065 if (entity_is_task(se)) in init_entity_runnable_average()
1066 sa->load_avg = scale_load_down(se->load.weight); in init_entity_runnable_average()
1076 * * se_weight(se)
1100 struct sched_entity *se = &p->se; in post_init_entity_util_avg() local
1101 struct cfs_rq *cfs_rq = cfs_rq_of(se); in post_init_entity_util_avg()
1102 struct sched_avg *sa = &se->avg; in post_init_entity_util_avg()
1111 attach_entity_load_avg(cfs_rq, se); in post_init_entity_util_avg()
1117 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); in post_init_entity_util_avg()
1123 sa->util_avg = cfs_rq->avg.util_avg * se_weight(se); in post_init_entity_util_avg()
1137 void init_entity_runnable_average(struct sched_entity *se) in init_entity_runnable_average() argument
1190 struct sched_entity *pse, struct sched_entity *se) in do_preempt_short() argument
1195 if (pse->slice >= se->slice) in do_preempt_short()
1201 if (entity_before(pse, se)) in do_preempt_short()
1204 if (!entity_eligible(cfs_rq, se)) in do_preempt_short()
1218 delta_exec = update_curr_se(rq, &donor->se); in update_curr_common()
1278 update_curr(cfs_rq_of(&rq->donor->se)); in update_curr_fair()
1282 update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_start_fair() argument
1290 stats = __schedstats_from_se(se); in update_stats_wait_start_fair()
1292 if (entity_is_task(se)) in update_stats_wait_start_fair()
1293 p = task_of(se); in update_stats_wait_start_fair()
1299 update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_end_fair() argument
1307 stats = __schedstats_from_se(se); in update_stats_wait_end_fair()
1310 * When the sched_schedstat changes from 0 to 1, some sched se in update_stats_wait_end_fair()
1311 * maybe already in the runqueue, the se->statistics.wait_start in update_stats_wait_end_fair()
1318 if (entity_is_task(se)) in update_stats_wait_end_fair()
1319 p = task_of(se); in update_stats_wait_end_fair()
1325 update_stats_enqueue_sleeper_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_enqueue_sleeper_fair() argument
1333 stats = __schedstats_from_se(se); in update_stats_enqueue_sleeper_fair()
1335 if (entity_is_task(se)) in update_stats_enqueue_sleeper_fair()
1336 tsk = task_of(se); in update_stats_enqueue_sleeper_fair()
1345 update_stats_enqueue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_enqueue_fair() argument
1354 if (se != cfs_rq->curr) in update_stats_enqueue_fair()
1355 update_stats_wait_start_fair(cfs_rq, se); in update_stats_enqueue_fair()
1358 update_stats_enqueue_sleeper_fair(cfs_rq, se); in update_stats_enqueue_fair()
1362 update_stats_dequeue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_dequeue_fair() argument
1372 if (se != cfs_rq->curr) in update_stats_dequeue_fair()
1373 update_stats_wait_end_fair(cfs_rq, se); in update_stats_dequeue_fair()
1375 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) { in update_stats_dequeue_fair()
1376 struct task_struct *tsk = task_of(se); in update_stats_dequeue_fair()
1394 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_curr_start() argument
1399 se->exec_start = rq_clock_task(rq_of(cfs_rq)); in update_stats_curr_start()
2787 now = p->se.exec_start; in numa_get_avg_runtime()
2788 runtime = p->se.sum_exec_runtime; in numa_get_avg_runtime()
2798 delta = p->se.avg.load_sum; in numa_get_avg_runtime()
3311 u64 runtime = p->se.sum_exec_runtime; in task_numa_work()
3548 if (unlikely(p->se.sum_exec_runtime != runtime)) { in task_numa_work()
3549 u64 diff = p->se.sum_exec_runtime - runtime; in task_numa_work()
3621 now = curr->se.sum_exec_runtime; in task_tick_numa()
3688 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_enqueue() argument
3690 update_load_add(&cfs_rq->load, se->load.weight); in account_entity_enqueue()
3692 if (entity_is_task(se)) { in account_entity_enqueue()
3695 account_numa_enqueue(rq, task_of(se)); in account_entity_enqueue()
3696 list_add(&se->group_node, &rq->cfs_tasks); in account_entity_enqueue()
3703 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_dequeue() argument
3705 update_load_sub(&cfs_rq->load, se->load.weight); in account_entity_dequeue()
3707 if (entity_is_task(se)) { in account_entity_dequeue()
3708 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); in account_entity_dequeue()
3709 list_del_init(&se->group_node); in account_entity_dequeue()
3765 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_load_avg() argument
3767 cfs_rq->avg.load_avg += se->avg.load_avg; in enqueue_load_avg()
3768 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; in enqueue_load_avg()
3772 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in dequeue_load_avg() argument
3774 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); in dequeue_load_avg()
3775 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); in dequeue_load_avg()
3782 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in enqueue_load_avg() argument
3784 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in dequeue_load_avg() argument
3787 static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags);
3789 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, in reweight_entity() argument
3792 bool curr = cfs_rq->curr == se; in reweight_entity()
3794 if (se->on_rq) { in reweight_entity()
3797 update_entity_lag(cfs_rq, se); in reweight_entity()
3798 se->deadline -= se->vruntime; in reweight_entity()
3799 se->rel_deadline = 1; in reweight_entity()
3801 __dequeue_entity(cfs_rq, se); in reweight_entity()
3802 update_load_sub(&cfs_rq->load, se->load.weight); in reweight_entity()
3804 dequeue_load_avg(cfs_rq, se); in reweight_entity()
3807 * Because we keep se->vlag = V - v_i, while: lag_i = w_i*(V - v_i), in reweight_entity()
3808 * we need to scale se->vlag when w_i changes. in reweight_entity()
3810 se->vlag = div_s64(se->vlag * se->load.weight, weight); in reweight_entity()
3811 if (se->rel_deadline) in reweight_entity()
3812 se->deadline = div_s64(se->deadline * se->load.weight, weight); in reweight_entity()
3814 update_load_set(&se->load, weight); in reweight_entity()
3818 u32 divider = get_pelt_divider(&se->avg); in reweight_entity()
3820 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); in reweight_entity()
3824 enqueue_load_avg(cfs_rq, se); in reweight_entity()
3825 if (se->on_rq) { in reweight_entity()
3826 update_load_add(&cfs_rq->load, se->load.weight); in reweight_entity()
3827 place_entity(cfs_rq, se, 0); in reweight_entity()
3829 __enqueue_entity(cfs_rq, se); in reweight_entity()
3845 struct sched_entity *se = &p->se; in reweight_task_fair() local
3846 struct cfs_rq *cfs_rq = cfs_rq_of(se); in reweight_task_fair()
3847 struct load_weight *load = &se->load; in reweight_task_fair()
3849 reweight_entity(cfs_rq, se, lw->weight); in reweight_task_fair()
3969 static void update_cfs_group(struct sched_entity *se) in update_cfs_group() argument
3971 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in update_cfs_group()
3989 if (unlikely(se->load.weight != shares)) in update_cfs_group()
3990 reweight_entity(cfs_rq_of(se), se, shares); in update_cfs_group()
3994 static inline void update_cfs_group(struct sched_entity *se) in update_cfs_group() argument
4187 void set_task_rq_fair(struct sched_entity *se, in set_task_rq_fair() argument
4203 if (!(se->avg.last_update_time && prev)) in set_task_rq_fair()
4209 __update_load_avg_blocked_se(p_last_update_time, se); in set_task_rq_fair()
4210 se->avg.last_update_time = n_last_update_time; in set_task_rq_fair()
4281 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_util() argument
4283 long delta_sum, delta_avg = gcfs_rq->avg.util_avg - se->avg.util_avg; in update_tg_cfs_util()
4291 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. in update_tg_cfs_util()
4298 se->avg.util_avg = gcfs_rq->avg.util_avg; in update_tg_cfs_util()
4299 new_sum = se->avg.util_avg * divider; in update_tg_cfs_util()
4300 delta_sum = (long)new_sum - (long)se->avg.util_sum; in update_tg_cfs_util()
4301 se->avg.util_sum = new_sum; in update_tg_cfs_util()
4313 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_runnable() argument
4315 long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg; in update_tg_cfs_runnable()
4323 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. in update_tg_cfs_runnable()
4329 se->avg.runnable_avg = gcfs_rq->avg.runnable_avg; in update_tg_cfs_runnable()
4330 new_sum = se->avg.runnable_avg * divider; in update_tg_cfs_runnable()
4331 delta_sum = (long)new_sum - (long)se->avg.runnable_sum; in update_tg_cfs_runnable()
4332 se->avg.runnable_sum = new_sum; in update_tg_cfs_runnable()
4343 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_load() argument
4357 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. in update_tg_cfs_load()
4367 runnable_sum += se->avg.load_sum; in update_tg_cfs_load()
4379 /* But make sure to not inflate se's runnable */ in update_tg_cfs_load()
4380 runnable_sum = min(se->avg.load_sum, load_sum); in update_tg_cfs_load()
4389 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT; in update_tg_cfs_load()
4392 load_sum = se_weight(se) * runnable_sum; in update_tg_cfs_load()
4395 delta_avg = load_avg - se->avg.load_avg; in update_tg_cfs_load()
4399 delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum; in update_tg_cfs_load()
4401 se->avg.load_sum = runnable_sum; in update_tg_cfs_load()
4402 se->avg.load_avg = load_avg; in update_tg_cfs_load()
4417 static inline int propagate_entity_load_avg(struct sched_entity *se) in propagate_entity_load_avg() argument
4421 if (entity_is_task(se)) in propagate_entity_load_avg()
4424 gcfs_rq = group_cfs_rq(se); in propagate_entity_load_avg()
4430 cfs_rq = cfs_rq_of(se); in propagate_entity_load_avg()
4434 update_tg_cfs_util(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
4435 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
4436 update_tg_cfs_load(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
4439 trace_pelt_se_tp(se); in propagate_entity_load_avg()
4448 static inline bool skip_blocked_update(struct sched_entity *se) in skip_blocked_update() argument
4450 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in skip_blocked_update()
4456 if (se->avg.load_avg || se->avg.util_avg) in skip_blocked_update()
4480 static inline int propagate_entity_load_avg(struct sched_entity *se) in propagate_entity_load_avg() argument
4490 static inline void migrate_se_pelt_lag(struct sched_entity *se) in migrate_se_pelt_lag() argument
4497 if (load_avg_is_decayed(&se->avg)) in migrate_se_pelt_lag()
4500 cfs_rq = cfs_rq_of(se); in migrate_se_pelt_lag()
4566 __update_load_avg_blocked_se(now, se); in migrate_se_pelt_lag()
4569 static void migrate_se_pelt_lag(struct sched_entity *se) {} in migrate_se_pelt_lag() argument
4615 * Because of rounding, se->util_sum might ends up being +1 more than in update_cfs_rq_load_avg()
4654 * @se: sched_entity to attach
4659 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in attach_entity_load_avg() argument
4662 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. in attach_entity_load_avg()
4668 * When we attach the @se to the @cfs_rq, we must align the decay in attach_entity_load_avg()
4674 se->avg.last_update_time = cfs_rq->avg.last_update_time; in attach_entity_load_avg()
4675 se->avg.period_contrib = cfs_rq->avg.period_contrib; in attach_entity_load_avg()
4683 se->avg.util_sum = se->avg.util_avg * divider; in attach_entity_load_avg()
4685 se->avg.runnable_sum = se->avg.runnable_avg * divider; in attach_entity_load_avg()
4687 se->avg.load_sum = se->avg.load_avg * divider; in attach_entity_load_avg()
4688 if (se_weight(se) < se->avg.load_sum) in attach_entity_load_avg()
4689 se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se)); in attach_entity_load_avg()
4691 se->avg.load_sum = 1; in attach_entity_load_avg()
4693 enqueue_load_avg(cfs_rq, se); in attach_entity_load_avg()
4694 cfs_rq->avg.util_avg += se->avg.util_avg; in attach_entity_load_avg()
4695 cfs_rq->avg.util_sum += se->avg.util_sum; in attach_entity_load_avg()
4696 cfs_rq->avg.runnable_avg += se->avg.runnable_avg; in attach_entity_load_avg()
4697 cfs_rq->avg.runnable_sum += se->avg.runnable_sum; in attach_entity_load_avg()
4699 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); in attach_entity_load_avg()
4709 * @se: sched_entity to detach
4714 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in detach_entity_load_avg() argument
4716 dequeue_load_avg(cfs_rq, se); in detach_entity_load_avg()
4717 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); in detach_entity_load_avg()
4718 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); in detach_entity_load_avg()
4723 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg); in detach_entity_load_avg()
4724 sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum); in detach_entity_load_avg()
4729 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); in detach_entity_load_avg()
4745 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_load_avg() argument
4754 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) in update_load_avg()
4755 __update_load_avg_se(now, cfs_rq, se); in update_load_avg()
4758 decayed |= propagate_entity_load_avg(se); in update_load_avg()
4760 if (!se->avg.last_update_time && (flags & DO_ATTACH)) { in update_load_avg()
4769 attach_entity_load_avg(cfs_rq, se); in update_load_avg()
4777 detach_entity_load_avg(cfs_rq, se); in update_load_avg()
4791 static void sync_entity_load_avg(struct sched_entity *se) in sync_entity_load_avg() argument
4793 struct cfs_rq *cfs_rq = cfs_rq_of(se); in sync_entity_load_avg()
4797 __update_load_avg_blocked_se(last_update_time, se); in sync_entity_load_avg()
4804 static void remove_entity_load_avg(struct sched_entity *se) in remove_entity_load_avg() argument
4806 struct cfs_rq *cfs_rq = cfs_rq_of(se); in remove_entity_load_avg()
4815 sync_entity_load_avg(se); in remove_entity_load_avg()
4819 cfs_rq->removed.util_avg += se->avg.util_avg; in remove_entity_load_avg()
4820 cfs_rq->removed.load_avg += se->avg.load_avg; in remove_entity_load_avg()
4821 cfs_rq->removed.runnable_avg += se->avg.runnable_avg; in remove_entity_load_avg()
4839 return READ_ONCE(p->se.avg.util_avg); in task_util()
4844 return READ_ONCE(p->se.avg.runnable_avg); in task_runnable()
4849 return READ_ONCE(p->se.avg.util_est) & ~UTIL_AVG_UNCHANGED; in _task_util_est()
4908 ewma = READ_ONCE(p->se.avg.util_est); in util_est_update()
4973 WRITE_ONCE(p->se.avg.util_est, ewma); in util_est_update()
4975 trace_sched_util_est_se_tp(&p->se); in util_est_update()
5156 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) in update_load_avg() argument
5161 static inline void remove_entity_load_avg(struct sched_entity *se) {} in remove_entity_load_avg() argument
5164 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in attach_entity_load_avg() argument
5166 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in detach_entity_load_avg() argument
5188 struct sched_entity *se = &p->se; in __setparam_fair() local
5192 se->custom_slice = 1; in __setparam_fair()
5193 se->slice = clamp_t(u64, attr->sched_runtime, in __setparam_fair()
5197 se->custom_slice = 0; in __setparam_fair()
5198 se->slice = sysctl_sched_base_slice; in __setparam_fair()
5203 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in place_entity() argument
5208 if (!se->custom_slice) in place_entity()
5209 se->slice = sysctl_sched_base_slice; in place_entity()
5210 vslice = calc_delta_fair(se->slice, se); in place_entity()
5220 if (sched_feat(PLACE_LAG) && cfs_rq->nr_queued && se->vlag) { in place_entity()
5224 lag = se->vlag; in place_entity()
5282 lag *= load + scale_load_down(se->load.weight); in place_entity()
5288 se->vruntime = vruntime - lag; in place_entity()
5290 if (se->rel_deadline) { in place_entity()
5291 se->deadline += se->vruntime; in place_entity()
5292 se->rel_deadline = 0; in place_entity()
5307 se->deadline = se->vruntime + vslice; in place_entity()
5314 requeue_delayed_entity(struct sched_entity *se);
5317 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in enqueue_entity() argument
5319 bool curr = cfs_rq->curr == se; in enqueue_entity()
5326 place_entity(cfs_rq, se, flags); in enqueue_entity()
5339 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH); in enqueue_entity()
5340 se_update_runnable(se); in enqueue_entity()
5346 update_cfs_group(se); in enqueue_entity()
5353 place_entity(cfs_rq, se, flags); in enqueue_entity()
5355 account_entity_enqueue(cfs_rq, se); in enqueue_entity()
5359 se->exec_start = 0; in enqueue_entity()
5362 update_stats_enqueue_fair(cfs_rq, se, flags); in enqueue_entity()
5364 __enqueue_entity(cfs_rq, se); in enqueue_entity()
5365 se->on_rq = 1; in enqueue_entity()
5384 static void __clear_buddies_next(struct sched_entity *se) in __clear_buddies_next() argument
5386 for_each_sched_entity(se) { in __clear_buddies_next()
5387 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_next()
5388 if (cfs_rq->next != se) in __clear_buddies_next()
5395 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) in clear_buddies() argument
5397 if (cfs_rq->next == se) in clear_buddies()
5398 __clear_buddies_next(se); in clear_buddies()
5403 static void set_delayed(struct sched_entity *se) in set_delayed() argument
5405 se->sched_delayed = 1; in set_delayed()
5408 * Delayed se of cfs_rq have no tasks queued on them. in set_delayed()
5412 if (!entity_is_task(se)) in set_delayed()
5415 for_each_sched_entity(se) { in set_delayed()
5416 struct cfs_rq *cfs_rq = cfs_rq_of(se); in set_delayed()
5424 static void clear_delayed(struct sched_entity *se) in clear_delayed() argument
5426 se->sched_delayed = 0; in clear_delayed()
5429 * Delayed se of cfs_rq have no tasks queued on them. in clear_delayed()
5434 if (!entity_is_task(se)) in clear_delayed()
5437 for_each_sched_entity(se) { in clear_delayed()
5438 struct cfs_rq *cfs_rq = cfs_rq_of(se); in clear_delayed()
5446 static inline void finish_delayed_dequeue_entity(struct sched_entity *se) in finish_delayed_dequeue_entity() argument
5448 clear_delayed(se); in finish_delayed_dequeue_entity()
5449 if (sched_feat(DELAY_ZERO) && se->vlag > 0) in finish_delayed_dequeue_entity()
5450 se->vlag = 0; in finish_delayed_dequeue_entity()
5454 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in dequeue_entity() argument
5460 clear_buddies(cfs_rq, se); in dequeue_entity()
5463 SCHED_WARN_ON(!se->sched_delayed); in dequeue_entity()
5473 SCHED_WARN_ON(delay && se->sched_delayed); in dequeue_entity()
5476 !entity_eligible(cfs_rq, se)) { in dequeue_entity()
5477 update_load_avg(cfs_rq, se, 0); in dequeue_entity()
5478 set_delayed(se); in dequeue_entity()
5483 if (entity_is_task(se) && task_on_rq_migrating(task_of(se))) in dequeue_entity()
5495 update_load_avg(cfs_rq, se, action); in dequeue_entity()
5496 se_update_runnable(se); in dequeue_entity()
5498 update_stats_dequeue_fair(cfs_rq, se, flags); in dequeue_entity()
5500 update_entity_lag(cfs_rq, se); in dequeue_entity()
5502 se->deadline -= se->vruntime; in dequeue_entity()
5503 se->rel_deadline = 1; in dequeue_entity()
5506 if (se != cfs_rq->curr) in dequeue_entity()
5507 __dequeue_entity(cfs_rq, se); in dequeue_entity()
5508 se->on_rq = 0; in dequeue_entity()
5509 account_entity_dequeue(cfs_rq, se); in dequeue_entity()
5514 update_cfs_group(se); in dequeue_entity()
5517 * Now advance min_vruntime if @se was the entity holding it back, in dequeue_entity()
5526 finish_delayed_dequeue_entity(se); in dequeue_entity()
5535 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in set_next_entity() argument
5537 clear_buddies(cfs_rq, se); in set_next_entity()
5540 if (se->on_rq) { in set_next_entity()
5546 update_stats_wait_end_fair(cfs_rq, se); in set_next_entity()
5547 __dequeue_entity(cfs_rq, se); in set_next_entity()
5548 update_load_avg(cfs_rq, se, UPDATE_TG); in set_next_entity()
5550 set_protect_slice(se); in set_next_entity()
5553 update_stats_curr_start(cfs_rq, se); in set_next_entity()
5555 cfs_rq->curr = se; in set_next_entity()
5563 rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) { in set_next_entity()
5566 stats = __schedstats_from_se(se); in set_next_entity()
5569 se->sum_exec_runtime - se->prev_sum_exec_runtime)); in set_next_entity()
5572 se->prev_sum_exec_runtime = se->sum_exec_runtime; in set_next_entity()
5575 static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags);
5587 struct sched_entity *se; in pick_next_entity() local
5599 se = pick_eevdf(cfs_rq); in pick_next_entity()
5600 if (se->sched_delayed) { in pick_next_entity()
5601 dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED); in pick_next_entity()
5603 * Must not reference @se again, see __block_task(). in pick_next_entity()
5607 return se; in pick_next_entity()
5887 struct sched_entity *se; in throttle_cfs_rq() local
5912 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq()
5922 for_each_sched_entity(se) { in throttle_cfs_rq()
5923 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in throttle_cfs_rq()
5927 if (!se->on_rq) in throttle_cfs_rq()
5936 if (se->sched_delayed) in throttle_cfs_rq()
5938 dequeue_entity(qcfs_rq, se, flags); in throttle_cfs_rq()
5940 if (cfs_rq_is_idle(group_cfs_rq(se))) in throttle_cfs_rq()
5949 se = parent_entity(se); in throttle_cfs_rq()
5954 for_each_sched_entity(se) { in throttle_cfs_rq()
5955 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in throttle_cfs_rq()
5957 if (!se->on_rq) in throttle_cfs_rq()
5960 update_load_avg(qcfs_rq, se, 0); in throttle_cfs_rq()
5961 se_update_runnable(se); in throttle_cfs_rq()
5963 if (cfs_rq_is_idle(group_cfs_rq(se))) in throttle_cfs_rq()
5971 /* At this point se is NULL and we are at root level*/ in throttle_cfs_rq()
5993 struct sched_entity *se; in unthrottle_cfs_rq() local
5997 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
6021 for_each_sched_entity(se) { in unthrottle_cfs_rq()
6022 if (list_add_leaf_cfs_rq(cfs_rq_of(se))) in unthrottle_cfs_rq()
6031 for_each_sched_entity(se) { in unthrottle_cfs_rq()
6032 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
6035 if (se->sched_delayed) { in unthrottle_cfs_rq()
6038 dequeue_entity(qcfs_rq, se, flags); in unthrottle_cfs_rq()
6039 } else if (se->on_rq) in unthrottle_cfs_rq()
6041 enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP); in unthrottle_cfs_rq()
6043 if (cfs_rq_is_idle(group_cfs_rq(se))) in unthrottle_cfs_rq()
6055 for_each_sched_entity(se) { in unthrottle_cfs_rq()
6056 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
6058 update_load_avg(qcfs_rq, se, UPDATE_TG); in unthrottle_cfs_rq()
6059 se_update_runnable(se); in unthrottle_cfs_rq()
6061 if (cfs_rq_is_idle(group_cfs_rq(se))) in unthrottle_cfs_rq()
6077 /* At this point se is NULL and we are at root level*/ in unthrottle_cfs_rq()
6790 struct sched_entity *se = &p->se; in hrtick_start_fair() local
6795 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; in hrtick_start_fair()
6796 u64 slice = se->slice; in hrtick_start_fair()
6894 requeue_delayed_entity(struct sched_entity *se) in requeue_delayed_entity() argument
6896 struct cfs_rq *cfs_rq = cfs_rq_of(se); in requeue_delayed_entity()
6899 * se->sched_delayed should imply: se->on_rq == 1. in requeue_delayed_entity()
6903 SCHED_WARN_ON(!se->sched_delayed); in requeue_delayed_entity()
6904 SCHED_WARN_ON(!se->on_rq); in requeue_delayed_entity()
6907 update_entity_lag(cfs_rq, se); in requeue_delayed_entity()
6908 if (se->vlag > 0) { in requeue_delayed_entity()
6910 if (se != cfs_rq->curr) in requeue_delayed_entity()
6911 __dequeue_entity(cfs_rq, se); in requeue_delayed_entity()
6912 se->vlag = 0; in requeue_delayed_entity()
6913 place_entity(cfs_rq, se, 0); in requeue_delayed_entity()
6914 if (se != cfs_rq->curr) in requeue_delayed_entity()
6915 __enqueue_entity(cfs_rq, se); in requeue_delayed_entity()
6920 update_load_avg(cfs_rq, se, 0); in requeue_delayed_entity()
6921 clear_delayed(se); in requeue_delayed_entity()
6933 struct sched_entity *se = &p->se; in enqueue_task_fair() local
6946 if (!(p->se.sched_delayed && (task_on_rq_migrating(p) || (flags & ENQUEUE_RESTORE)))) in enqueue_task_fair()
6950 requeue_delayed_entity(se); in enqueue_task_fair()
6962 if (task_new && se->sched_delayed) in enqueue_task_fair()
6965 for_each_sched_entity(se) { in enqueue_task_fair()
6966 if (se->on_rq) { in enqueue_task_fair()
6967 if (se->sched_delayed) in enqueue_task_fair()
6968 requeue_delayed_entity(se); in enqueue_task_fair()
6971 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
6979 se->slice = slice; in enqueue_task_fair()
6980 se->custom_slice = 1; in enqueue_task_fair()
6982 enqueue_entity(cfs_rq, se, flags); in enqueue_task_fair()
6999 for_each_sched_entity(se) { in enqueue_task_fair()
7000 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
7002 update_load_avg(cfs_rq, se, UPDATE_TG); in enqueue_task_fair()
7003 se_update_runnable(se); in enqueue_task_fair()
7004 update_cfs_group(se); in enqueue_task_fair()
7006 se->slice = slice; in enqueue_task_fair()
7007 if (se != cfs_rq->curr) in enqueue_task_fair()
7008 min_vruntime_cb_propagate(&se->run_node, NULL); in enqueue_task_fair()
7030 /* At this point se is NULL and we are at root level*/ in enqueue_task_fair()
7056 static void set_next_buddy(struct sched_entity *se);
7067 static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) in dequeue_entities() argument
7080 if (entity_is_task(se)) { in dequeue_entities()
7081 p = task_of(se); in dequeue_entities()
7084 if (task_sleep || task_delayed || !se->sched_delayed) in dequeue_entities()
7087 cfs_rq = group_cfs_rq(se); in dequeue_entities()
7091 for_each_sched_entity(se) { in dequeue_entities()
7092 cfs_rq = cfs_rq_of(se); in dequeue_entities()
7094 if (!dequeue_entity(cfs_rq, se, flags)) { in dequeue_entities()
7095 if (p && &p->se == se) in dequeue_entities()
7117 se = parent_entity(se); in dequeue_entities()
7122 if (task_sleep && se && !throttled_hierarchy(cfs_rq)) in dequeue_entities()
7123 set_next_buddy(se); in dequeue_entities()
7130 for_each_sched_entity(se) { in dequeue_entities()
7131 cfs_rq = cfs_rq_of(se); in dequeue_entities()
7133 update_load_avg(cfs_rq, se, UPDATE_TG); in dequeue_entities()
7134 se_update_runnable(se); in dequeue_entities()
7135 update_cfs_group(se); in dequeue_entities()
7137 se->slice = slice; in dequeue_entities()
7138 if (se != cfs_rq->curr) in dequeue_entities()
7139 min_vruntime_cb_propagate(&se->run_node, NULL); in dequeue_entities()
7188 if (!(p->se.sched_delayed && (task_on_rq_migrating(p) || (flags & DEQUEUE_SAVE)))) in dequeue_task_fair()
7192 if (dequeue_entities(rq, &p->se, flags) < 0) in dequeue_task_fair()
7247 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_load_without()
7270 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_runnable_without()
7277 lsub_positive(&runnable, p->se.avg.runnable_avg); in cpu_runnable_without()
7512 sync_entity_load_avg(&p->se); in sched_balance_find_dst_cpu()
7845 sync_entity_load_avg(&p->se); in select_idle_sibling()
8092 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_util_without()
8413 sync_entity_load_avg(&p->se); in find_energy_efficient_cpu()
8655 struct sched_entity *se = &p->se; in migrate_task_rq_fair() local
8658 remove_entity_load_avg(se); in migrate_task_rq_fair()
8670 migrate_se_pelt_lag(se); in migrate_task_rq_fair()
8674 se->avg.last_update_time = 0; in migrate_task_rq_fair()
8681 struct sched_entity *se = &p->se; in task_dead_fair() local
8683 if (se->sched_delayed) { in task_dead_fair()
8688 if (se->sched_delayed) { in task_dead_fair()
8690 dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED); in task_dead_fair()
8695 remove_entity_load_avg(se); in task_dead_fair()
8740 static void set_next_buddy(struct sched_entity *se) in set_next_buddy() argument
8742 for_each_sched_entity(se) { in set_next_buddy()
8743 if (SCHED_WARN_ON(!se->on_rq)) in set_next_buddy()
8745 if (se_is_idle(se)) in set_next_buddy()
8747 cfs_rq_of(se)->next = se; in set_next_buddy()
8757 struct sched_entity *se = &donor->se, *pse = &p->se; in check_preempt_wakeup_fair() local
8761 if (unlikely(se == pse)) in check_preempt_wakeup_fair()
8793 find_matching_se(&se, &pse); in check_preempt_wakeup_fair()
8796 cse_is_idle = se_is_idle(se); in check_preempt_wakeup_fair()
8808 cancel_protect_slice(se); in check_preempt_wakeup_fair()
8821 cfs_rq = cfs_rq_of(se); in check_preempt_wakeup_fair()
8830 if (do_preempt_short(cfs_rq, pse, se)) in check_preempt_wakeup_fair()
8831 cancel_protect_slice(se); in check_preempt_wakeup_fair()
8847 struct sched_entity *se; in pick_task_fair() local
8863 se = pick_next_entity(rq, cfs_rq); in pick_task_fair()
8864 if (!se) in pick_task_fair()
8866 cfs_rq = group_cfs_rq(se); in pick_task_fair()
8869 return task_of(se); in pick_task_fair()
8878 struct sched_entity *se; in pick_next_task_fair() local
8886 se = &p->se; in pick_next_task_fair()
8906 struct sched_entity *pse = &prev->se; in pick_next_task_fair()
8909 while (!(cfs_rq = is_same_group(se, pse))) { in pick_next_task_fair()
8910 int se_depth = se->depth; in pick_next_task_fair()
8918 set_next_entity(cfs_rq_of(se), se); in pick_next_task_fair()
8919 se = parent_entity(se); in pick_next_task_fair()
8924 set_next_entity(cfs_rq, se); in pick_next_task_fair()
8991 struct sched_entity *se = &prev->se; in put_prev_task_fair() local
8994 for_each_sched_entity(se) { in put_prev_task_fair()
8995 cfs_rq = cfs_rq_of(se); in put_prev_task_fair()
8996 put_prev_entity(cfs_rq, se); in put_prev_task_fair()
9007 struct sched_entity *se = &curr->se; in yield_task_fair() local
9015 clear_buddies(cfs_rq, se); in yield_task_fair()
9029 se->deadline += calc_delta_fair(se->slice, se); in yield_task_fair()
9034 struct sched_entity *se = &p->se; in yield_to_task_fair() local
9037 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) in yield_to_task_fair()
9040 /* Tell the scheduler that we'd really like se to run next. */ in yield_to_task_fair()
9041 set_next_buddy(se); in yield_to_task_fair()
9277 (&p->se == cfs_rq_of(&p->se)->next)) in task_hot()
9293 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot()
9376 !entity_eligible(task_cfs_rq(p), &p->se)) in task_is_ineligible_on_dst_cpu()
9402 if ((p->se.sched_delayed) && (env->migration_type != migrate_load)) in can_migrate_task()
9519 &env->src_rq->cfs_tasks, se.group_node) { in detach_one_task()
9584 p = list_last_entry(tasks, struct task_struct, se.group_node); in detach_tasks()
9639 list_add(&p->se.group_node, &env->tasks); in detach_tasks()
9665 list_move(&p->se.group_node, tasks); in detach_tasks()
9718 p = list_first_entry(tasks, struct task_struct, se.group_node); in attach_tasks()
9719 list_del_init(&p->se.group_node); in attach_tasks()
9802 struct sched_entity *se; in __update_blocked_fair() local
9815 se = cfs_rq->tg->se[cpu]; in __update_blocked_fair()
9816 if (se && !skip_blocked_update(se)) in __update_blocked_fair()
9817 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); in __update_blocked_fair()
9842 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load() local
9850 for_each_sched_entity(se) { in update_cfs_rq_h_load()
9851 cfs_rq = cfs_rq_of(se); in update_cfs_rq_h_load()
9852 WRITE_ONCE(cfs_rq->h_load_next, se); in update_cfs_rq_h_load()
9857 if (!se) { in update_cfs_rq_h_load()
9862 while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) { in update_cfs_rq_h_load()
9864 load = div64_ul(load * se->avg.load_avg, in update_cfs_rq_h_load()
9866 cfs_rq = group_cfs_rq(se); in update_cfs_rq_h_load()
9877 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
9895 return p->se.avg.load_avg; in task_h_load()
10631 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in task_running_on_cpu()
12975 __entity_slice_used(struct sched_entity *se, int min_nr_tasks) in __entity_slice_used() argument
12977 u64 rtime = se->sum_exec_runtime - se->prev_sum_exec_runtime; in __entity_slice_used()
12978 u64 slice = se->slice; in __entity_slice_used()
13004 __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE)) in task_tick_core()
13011 static void se_fi_update(const struct sched_entity *se, unsigned int fi_seq, in se_fi_update() argument
13014 for_each_sched_entity(se) { in se_fi_update()
13015 struct cfs_rq *cfs_rq = cfs_rq_of(se); in se_fi_update()
13029 struct sched_entity *se = &p->se; in task_vruntime_update() local
13034 se_fi_update(se, rq->core->core_forceidle_seq, in_fi); in task_vruntime_update()
13041 const struct sched_entity *sea = &a->se; in cfs_prio_less()
13042 const struct sched_entity *seb = &b->se; in cfs_prio_less()
13051 * Find an se in the hierarchy for tasks a and b, such that the se's in cfs_prio_less()
13075 * Find delta after normalizing se's vruntime with its cfs_rq's in cfs_prio_less()
13111 struct sched_entity *se = &curr->se; in task_tick_fair() local
13113 for_each_sched_entity(se) { in task_tick_fair()
13114 cfs_rq = cfs_rq_of(se); in task_tick_fair()
13115 entity_tick(cfs_rq, se, queued); in task_tick_fair()
13167 static void propagate_entity_cfs_rq(struct sched_entity *se) in propagate_entity_cfs_rq() argument
13169 struct cfs_rq *cfs_rq = cfs_rq_of(se); in propagate_entity_cfs_rq()
13178 se = se->parent; in propagate_entity_cfs_rq()
13180 for_each_sched_entity(se) { in propagate_entity_cfs_rq()
13181 cfs_rq = cfs_rq_of(se); in propagate_entity_cfs_rq()
13183 update_load_avg(cfs_rq, se, UPDATE_TG); in propagate_entity_cfs_rq()
13193 static void propagate_entity_cfs_rq(struct sched_entity *se) { } in propagate_entity_cfs_rq() argument
13196 static void detach_entity_cfs_rq(struct sched_entity *se) in detach_entity_cfs_rq() argument
13198 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_entity_cfs_rq()
13207 if (!se->avg.last_update_time) in detach_entity_cfs_rq()
13212 update_load_avg(cfs_rq, se, 0); in detach_entity_cfs_rq()
13213 detach_entity_load_avg(cfs_rq, se); in detach_entity_cfs_rq()
13215 propagate_entity_cfs_rq(se); in detach_entity_cfs_rq()
13218 static void attach_entity_cfs_rq(struct sched_entity *se) in attach_entity_cfs_rq() argument
13220 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_entity_cfs_rq()
13223 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); in attach_entity_cfs_rq()
13224 attach_entity_load_avg(cfs_rq, se); in attach_entity_cfs_rq()
13226 propagate_entity_cfs_rq(se); in attach_entity_cfs_rq()
13231 struct sched_entity *se = &p->se; in detach_task_cfs_rq() local
13233 detach_entity_cfs_rq(se); in detach_task_cfs_rq()
13238 struct sched_entity *se = &p->se; in attach_task_cfs_rq() local
13240 attach_entity_cfs_rq(se); in attach_task_cfs_rq()
13250 SCHED_WARN_ON(p->se.sched_delayed); in switched_to_fair()
13271 struct sched_entity *se = &p->se; in __set_next_task_fair() local
13279 list_move(&se->group_node, &rq->cfs_tasks); in __set_next_task_fair()
13285 SCHED_WARN_ON(se->sched_delayed); in __set_next_task_fair()
13302 struct sched_entity *se = &p->se; in set_next_task_fair() local
13304 for_each_sched_entity(se) { in set_next_task_fair()
13305 struct cfs_rq *cfs_rq = cfs_rq_of(se); in set_next_task_fair()
13307 set_next_entity(cfs_rq, se); in set_next_task_fair()
13337 /* Tell se's cfs_rq has been changed -- migrated */ in task_change_group_fair()
13338 p->se.avg.last_update_time = 0; in task_change_group_fair()
13351 if (tg->se) in free_fair_sched_group()
13352 kfree(tg->se[i]); in free_fair_sched_group()
13356 kfree(tg->se); in free_fair_sched_group()
13361 struct sched_entity *se; in alloc_fair_sched_group() local
13368 tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL); in alloc_fair_sched_group()
13369 if (!tg->se) in alloc_fair_sched_group()
13382 se = kzalloc_node(sizeof(struct sched_entity_stats), in alloc_fair_sched_group()
13384 if (!se) in alloc_fair_sched_group()
13388 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); in alloc_fair_sched_group()
13389 init_entity_runnable_average(se); in alloc_fair_sched_group()
13402 struct sched_entity *se; in online_fair_sched_group() local
13409 se = tg->se[i]; in online_fair_sched_group()
13412 attach_entity_cfs_rq(se); in online_fair_sched_group()
13426 struct sched_entity *se = tg->se[cpu]; in unregister_fair_sched_group() local
13429 if (se) { in unregister_fair_sched_group()
13430 if (se->sched_delayed) { in unregister_fair_sched_group()
13432 if (se->sched_delayed) { in unregister_fair_sched_group()
13434 dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED); in unregister_fair_sched_group()
13438 remove_entity_load_avg(se); in unregister_fair_sched_group()
13453 struct sched_entity *se, int cpu, in init_tg_cfs_entry() argument
13463 tg->se[cpu] = se; in init_tg_cfs_entry()
13465 /* se could be NULL for root_task_group */ in init_tg_cfs_entry()
13466 if (!se) in init_tg_cfs_entry()
13470 se->cfs_rq = &rq->cfs; in init_tg_cfs_entry()
13471 se->depth = 0; in init_tg_cfs_entry()
13473 se->cfs_rq = parent->my_q; in init_tg_cfs_entry()
13474 se->depth = parent->depth + 1; in init_tg_cfs_entry()
13477 se->my_q = cfs_rq; in init_tg_cfs_entry()
13479 update_load_set(&se->load, NICE_0_LOAD); in init_tg_cfs_entry()
13480 se->parent = parent; in init_tg_cfs_entry()
13494 if (!tg->se[0]) in __sched_group_set_shares()
13505 struct sched_entity *se = tg->se[i]; in __sched_group_set_shares() local
13511 for_each_sched_entity(se) { in __sched_group_set_shares()
13512 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); in __sched_group_set_shares()
13513 update_cfs_group(se); in __sched_group_set_shares()
13556 struct sched_entity *se = tg->se[i]; in sched_group_set_idle() local
13573 for_each_sched_entity(se) { in sched_group_set_idle()
13574 struct cfs_rq *cfs_rq = cfs_rq_of(se); in sched_group_set_idle()
13576 if (!se->on_rq) in sched_group_set_idle()
13605 struct sched_entity *se = &task->se; in get_rr_interval_fair() local
13613 rr_interval = NS_TO_JIFFIES(se->slice); in get_rr_interval_fair()