1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Scheduler topology setup/handling methods
4  */
5 
6 #include <linux/bsearch.h>
7 
8 DEFINE_MUTEX(sched_domains_mutex);
sched_domains_mutex_lock(void)9 void sched_domains_mutex_lock(void)
10 {
11 	mutex_lock(&sched_domains_mutex);
12 }
sched_domains_mutex_unlock(void)13 void sched_domains_mutex_unlock(void)
14 {
15 	mutex_unlock(&sched_domains_mutex);
16 }
17 
18 /* Protected by sched_domains_mutex: */
19 static cpumask_var_t sched_domains_tmpmask;
20 static cpumask_var_t sched_domains_tmpmask2;
21 
22 #ifdef CONFIG_SCHED_DEBUG
23 
sched_debug_setup(char * str)24 static int __init sched_debug_setup(char *str)
25 {
26 	sched_debug_verbose = true;
27 
28 	return 0;
29 }
30 early_param("sched_verbose", sched_debug_setup);
31 
sched_debug(void)32 static inline bool sched_debug(void)
33 {
34 	return sched_debug_verbose;
35 }
36 
37 #define SD_FLAG(_name, mflags) [__##_name] = { .meta_flags = mflags, .name = #_name },
38 const struct sd_flag_debug sd_flag_debug[] = {
39 #include <linux/sched/sd_flags.h>
40 };
41 #undef SD_FLAG
42 
sched_domain_debug_one(struct sched_domain * sd,int cpu,int level,struct cpumask * groupmask)43 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
44 				  struct cpumask *groupmask)
45 {
46 	struct sched_group *group = sd->groups;
47 	unsigned long flags = sd->flags;
48 	unsigned int idx;
49 
50 	cpumask_clear(groupmask);
51 
52 	printk(KERN_DEBUG "%*s domain-%d: ", level, "", level);
53 	printk(KERN_CONT "span=%*pbl level=%s\n",
54 	       cpumask_pr_args(sched_domain_span(sd)), sd->name);
55 
56 	if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
57 		printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
58 	}
59 	if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) {
60 		printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
61 	}
62 
63 	for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
64 		unsigned int flag = BIT(idx);
65 		unsigned int meta_flags = sd_flag_debug[idx].meta_flags;
66 
67 		if ((meta_flags & SDF_SHARED_CHILD) && sd->child &&
68 		    !(sd->child->flags & flag))
69 			printk(KERN_ERR "ERROR: flag %s set here but not in child\n",
70 			       sd_flag_debug[idx].name);
71 
72 		if ((meta_flags & SDF_SHARED_PARENT) && sd->parent &&
73 		    !(sd->parent->flags & flag))
74 			printk(KERN_ERR "ERROR: flag %s set here but not in parent\n",
75 			       sd_flag_debug[idx].name);
76 	}
77 
78 	printk(KERN_DEBUG "%*s groups:", level + 1, "");
79 	do {
80 		if (!group) {
81 			printk("\n");
82 			printk(KERN_ERR "ERROR: group is NULL\n");
83 			break;
84 		}
85 
86 		if (cpumask_empty(sched_group_span(group))) {
87 			printk(KERN_CONT "\n");
88 			printk(KERN_ERR "ERROR: empty group\n");
89 			break;
90 		}
91 
92 		if (!(sd->flags & SD_OVERLAP) &&
93 		    cpumask_intersects(groupmask, sched_group_span(group))) {
94 			printk(KERN_CONT "\n");
95 			printk(KERN_ERR "ERROR: repeated CPUs\n");
96 			break;
97 		}
98 
99 		cpumask_or(groupmask, groupmask, sched_group_span(group));
100 
101 		printk(KERN_CONT " %d:{ span=%*pbl",
102 				group->sgc->id,
103 				cpumask_pr_args(sched_group_span(group)));
104 
105 		if ((sd->flags & SD_OVERLAP) &&
106 		    !cpumask_equal(group_balance_mask(group), sched_group_span(group))) {
107 			printk(KERN_CONT " mask=%*pbl",
108 				cpumask_pr_args(group_balance_mask(group)));
109 		}
110 
111 		if (group->sgc->capacity != SCHED_CAPACITY_SCALE)
112 			printk(KERN_CONT " cap=%lu", group->sgc->capacity);
113 
114 		if (group == sd->groups && sd->child &&
115 		    !cpumask_equal(sched_domain_span(sd->child),
116 				   sched_group_span(group))) {
117 			printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n");
118 		}
119 
120 		printk(KERN_CONT " }");
121 
122 		group = group->next;
123 
124 		if (group != sd->groups)
125 			printk(KERN_CONT ",");
126 
127 	} while (group != sd->groups);
128 	printk(KERN_CONT "\n");
129 
130 	if (!cpumask_equal(sched_domain_span(sd), groupmask))
131 		printk(KERN_ERR "ERROR: groups don't span domain->span\n");
132 
133 	if (sd->parent &&
134 	    !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
135 		printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n");
136 	return 0;
137 }
138 
sched_domain_debug(struct sched_domain * sd,int cpu)139 static void sched_domain_debug(struct sched_domain *sd, int cpu)
140 {
141 	int level = 0;
142 
143 	if (!sched_debug_verbose)
144 		return;
145 
146 	if (!sd) {
147 		printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
148 		return;
149 	}
150 
151 	printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu);
152 
153 	for (;;) {
154 		if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
155 			break;
156 		level++;
157 		sd = sd->parent;
158 		if (!sd)
159 			break;
160 	}
161 }
162 #else /* !CONFIG_SCHED_DEBUG */
163 
164 # define sched_debug_verbose 0
165 # define sched_domain_debug(sd, cpu) do { } while (0)
sched_debug(void)166 static inline bool sched_debug(void)
167 {
168 	return false;
169 }
170 #endif /* CONFIG_SCHED_DEBUG */
171 
172 /* Generate a mask of SD flags with the SDF_NEEDS_GROUPS metaflag */
173 #define SD_FLAG(name, mflags) (name * !!((mflags) & SDF_NEEDS_GROUPS)) |
174 static const unsigned int SD_DEGENERATE_GROUPS_MASK =
175 #include <linux/sched/sd_flags.h>
176 0;
177 #undef SD_FLAG
178 
sd_degenerate(struct sched_domain * sd)179 static int sd_degenerate(struct sched_domain *sd)
180 {
181 	if (cpumask_weight(sched_domain_span(sd)) == 1)
182 		return 1;
183 
184 	/* Following flags need at least 2 groups */
185 	if ((sd->flags & SD_DEGENERATE_GROUPS_MASK) &&
186 	    (sd->groups != sd->groups->next))
187 		return 0;
188 
189 	/* Following flags don't use groups */
190 	if (sd->flags & (SD_WAKE_AFFINE))
191 		return 0;
192 
193 	return 1;
194 }
195 
196 static int
sd_parent_degenerate(struct sched_domain * sd,struct sched_domain * parent)197 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
198 {
199 	unsigned long cflags = sd->flags, pflags = parent->flags;
200 
201 	if (sd_degenerate(parent))
202 		return 1;
203 
204 	if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
205 		return 0;
206 
207 	/* Flags needing groups don't count if only 1 group in parent */
208 	if (parent->groups == parent->groups->next)
209 		pflags &= ~SD_DEGENERATE_GROUPS_MASK;
210 
211 	if (~cflags & pflags)
212 		return 0;
213 
214 	return 1;
215 }
216 
217 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
218 DEFINE_STATIC_KEY_FALSE(sched_energy_present);
219 static unsigned int sysctl_sched_energy_aware = 1;
220 static DEFINE_MUTEX(sched_energy_mutex);
221 static bool sched_energy_update;
222 
sched_is_eas_possible(const struct cpumask * cpu_mask)223 static bool sched_is_eas_possible(const struct cpumask *cpu_mask)
224 {
225 	bool any_asym_capacity = false;
226 	struct cpufreq_policy *policy;
227 	struct cpufreq_governor *gov;
228 	int i;
229 
230 	/* EAS is enabled for asymmetric CPU capacity topologies. */
231 	for_each_cpu(i, cpu_mask) {
232 		if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, i))) {
233 			any_asym_capacity = true;
234 			break;
235 		}
236 	}
237 	if (!any_asym_capacity) {
238 		if (sched_debug()) {
239 			pr_info("rd %*pbl: Checking EAS, CPUs do not have asymmetric capacities\n",
240 				cpumask_pr_args(cpu_mask));
241 		}
242 		return false;
243 	}
244 
245 	/* EAS definitely does *not* handle SMT */
246 	if (sched_smt_active()) {
247 		if (sched_debug()) {
248 			pr_info("rd %*pbl: Checking EAS, SMT is not supported\n",
249 				cpumask_pr_args(cpu_mask));
250 		}
251 		return false;
252 	}
253 
254 	if (!arch_scale_freq_invariant()) {
255 		if (sched_debug()) {
256 			pr_info("rd %*pbl: Checking EAS: frequency-invariant load tracking not yet supported",
257 				cpumask_pr_args(cpu_mask));
258 		}
259 		return false;
260 	}
261 
262 	/* Do not attempt EAS if schedutil is not being used. */
263 	for_each_cpu(i, cpu_mask) {
264 		policy = cpufreq_cpu_get(i);
265 		if (!policy) {
266 			if (sched_debug()) {
267 				pr_info("rd %*pbl: Checking EAS, cpufreq policy not set for CPU: %d",
268 					cpumask_pr_args(cpu_mask), i);
269 			}
270 			return false;
271 		}
272 		gov = policy->governor;
273 		cpufreq_cpu_put(policy);
274 		if (gov != &schedutil_gov) {
275 			if (sched_debug()) {
276 				pr_info("rd %*pbl: Checking EAS, schedutil is mandatory\n",
277 					cpumask_pr_args(cpu_mask));
278 			}
279 			return false;
280 		}
281 	}
282 
283 	return true;
284 }
285 
rebuild_sched_domains_energy(void)286 void rebuild_sched_domains_energy(void)
287 {
288 	mutex_lock(&sched_energy_mutex);
289 	sched_energy_update = true;
290 	rebuild_sched_domains();
291 	sched_energy_update = false;
292 	mutex_unlock(&sched_energy_mutex);
293 }
294 
295 #ifdef CONFIG_PROC_SYSCTL
sched_energy_aware_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)296 static int sched_energy_aware_handler(const struct ctl_table *table, int write,
297 		void *buffer, size_t *lenp, loff_t *ppos)
298 {
299 	int ret, state;
300 
301 	if (write && !capable(CAP_SYS_ADMIN))
302 		return -EPERM;
303 
304 	if (!sched_is_eas_possible(cpu_active_mask)) {
305 		if (write) {
306 			return -EOPNOTSUPP;
307 		} else {
308 			*lenp = 0;
309 			return 0;
310 		}
311 	}
312 
313 	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
314 	if (!ret && write) {
315 		state = static_branch_unlikely(&sched_energy_present);
316 		if (state != sysctl_sched_energy_aware)
317 			rebuild_sched_domains_energy();
318 	}
319 
320 	return ret;
321 }
322 
323 static const struct ctl_table sched_energy_aware_sysctls[] = {
324 	{
325 		.procname       = "sched_energy_aware",
326 		.data           = &sysctl_sched_energy_aware,
327 		.maxlen         = sizeof(unsigned int),
328 		.mode           = 0644,
329 		.proc_handler   = sched_energy_aware_handler,
330 		.extra1         = SYSCTL_ZERO,
331 		.extra2         = SYSCTL_ONE,
332 	},
333 };
334 
sched_energy_aware_sysctl_init(void)335 static int __init sched_energy_aware_sysctl_init(void)
336 {
337 	register_sysctl_init("kernel", sched_energy_aware_sysctls);
338 	return 0;
339 }
340 
341 late_initcall(sched_energy_aware_sysctl_init);
342 #endif
343 
free_pd(struct perf_domain * pd)344 static void free_pd(struct perf_domain *pd)
345 {
346 	struct perf_domain *tmp;
347 
348 	while (pd) {
349 		tmp = pd->next;
350 		kfree(pd);
351 		pd = tmp;
352 	}
353 }
354 
find_pd(struct perf_domain * pd,int cpu)355 static struct perf_domain *find_pd(struct perf_domain *pd, int cpu)
356 {
357 	while (pd) {
358 		if (cpumask_test_cpu(cpu, perf_domain_span(pd)))
359 			return pd;
360 		pd = pd->next;
361 	}
362 
363 	return NULL;
364 }
365 
pd_init(int cpu)366 static struct perf_domain *pd_init(int cpu)
367 {
368 	struct em_perf_domain *obj = em_cpu_get(cpu);
369 	struct perf_domain *pd;
370 
371 	if (!obj) {
372 		if (sched_debug())
373 			pr_info("%s: no EM found for CPU%d\n", __func__, cpu);
374 		return NULL;
375 	}
376 
377 	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
378 	if (!pd)
379 		return NULL;
380 	pd->em_pd = obj;
381 
382 	return pd;
383 }
384 
perf_domain_debug(const struct cpumask * cpu_map,struct perf_domain * pd)385 static void perf_domain_debug(const struct cpumask *cpu_map,
386 						struct perf_domain *pd)
387 {
388 	if (!sched_debug() || !pd)
389 		return;
390 
391 	printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map));
392 
393 	while (pd) {
394 		printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_pstate=%d }",
395 				cpumask_first(perf_domain_span(pd)),
396 				cpumask_pr_args(perf_domain_span(pd)),
397 				em_pd_nr_perf_states(pd->em_pd));
398 		pd = pd->next;
399 	}
400 
401 	printk(KERN_CONT "\n");
402 }
403 
destroy_perf_domain_rcu(struct rcu_head * rp)404 static void destroy_perf_domain_rcu(struct rcu_head *rp)
405 {
406 	struct perf_domain *pd;
407 
408 	pd = container_of(rp, struct perf_domain, rcu);
409 	free_pd(pd);
410 }
411 
sched_energy_set(bool has_eas)412 static void sched_energy_set(bool has_eas)
413 {
414 	if (!has_eas && static_branch_unlikely(&sched_energy_present)) {
415 		if (sched_debug())
416 			pr_info("%s: stopping EAS\n", __func__);
417 		static_branch_disable_cpuslocked(&sched_energy_present);
418 	} else if (has_eas && !static_branch_unlikely(&sched_energy_present)) {
419 		if (sched_debug())
420 			pr_info("%s: starting EAS\n", __func__);
421 		static_branch_enable_cpuslocked(&sched_energy_present);
422 	}
423 }
424 
425 /*
426  * EAS can be used on a root domain if it meets all the following conditions:
427  *    1. an Energy Model (EM) is available;
428  *    2. the SD_ASYM_CPUCAPACITY flag is set in the sched_domain hierarchy.
429  *    3. no SMT is detected.
430  *    4. schedutil is driving the frequency of all CPUs of the rd;
431  *    5. frequency invariance support is present;
432  */
build_perf_domains(const struct cpumask * cpu_map)433 static bool build_perf_domains(const struct cpumask *cpu_map)
434 {
435 	int i;
436 	struct perf_domain *pd = NULL, *tmp;
437 	int cpu = cpumask_first(cpu_map);
438 	struct root_domain *rd = cpu_rq(cpu)->rd;
439 
440 	if (!sysctl_sched_energy_aware)
441 		goto free;
442 
443 	if (!sched_is_eas_possible(cpu_map))
444 		goto free;
445 
446 	for_each_cpu(i, cpu_map) {
447 		/* Skip already covered CPUs. */
448 		if (find_pd(pd, i))
449 			continue;
450 
451 		/* Create the new pd and add it to the local list. */
452 		tmp = pd_init(i);
453 		if (!tmp)
454 			goto free;
455 		tmp->next = pd;
456 		pd = tmp;
457 	}
458 
459 	perf_domain_debug(cpu_map, pd);
460 
461 	/* Attach the new list of performance domains to the root domain. */
462 	tmp = rd->pd;
463 	rcu_assign_pointer(rd->pd, pd);
464 	if (tmp)
465 		call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
466 
467 	return !!pd;
468 
469 free:
470 	free_pd(pd);
471 	tmp = rd->pd;
472 	rcu_assign_pointer(rd->pd, NULL);
473 	if (tmp)
474 		call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
475 
476 	return false;
477 }
478 #else
free_pd(struct perf_domain * pd)479 static void free_pd(struct perf_domain *pd) { }
480 #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL*/
481 
free_rootdomain(struct rcu_head * rcu)482 static void free_rootdomain(struct rcu_head *rcu)
483 {
484 	struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
485 
486 	cpupri_cleanup(&rd->cpupri);
487 	cpudl_cleanup(&rd->cpudl);
488 	free_cpumask_var(rd->dlo_mask);
489 	free_cpumask_var(rd->rto_mask);
490 	free_cpumask_var(rd->online);
491 	free_cpumask_var(rd->span);
492 	free_pd(rd->pd);
493 	kfree(rd);
494 }
495 
rq_attach_root(struct rq * rq,struct root_domain * rd)496 void rq_attach_root(struct rq *rq, struct root_domain *rd)
497 {
498 	struct root_domain *old_rd = NULL;
499 	struct rq_flags rf;
500 
501 	rq_lock_irqsave(rq, &rf);
502 
503 	if (rq->rd) {
504 		old_rd = rq->rd;
505 
506 		if (cpumask_test_cpu(rq->cpu, old_rd->online))
507 			set_rq_offline(rq);
508 
509 		cpumask_clear_cpu(rq->cpu, old_rd->span);
510 
511 		/*
512 		 * If we don't want to free the old_rd yet then
513 		 * set old_rd to NULL to skip the freeing later
514 		 * in this function:
515 		 */
516 		if (!atomic_dec_and_test(&old_rd->refcount))
517 			old_rd = NULL;
518 	}
519 
520 	atomic_inc(&rd->refcount);
521 	rq->rd = rd;
522 
523 	cpumask_set_cpu(rq->cpu, rd->span);
524 	if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
525 		set_rq_online(rq);
526 
527 	/*
528 	 * Because the rq is not a task, dl_add_task_root_domain() did not
529 	 * move the fair server bw to the rd if it already started.
530 	 * Add it now.
531 	 */
532 	if (rq->fair_server.dl_server)
533 		__dl_server_attach_root(&rq->fair_server, rq);
534 
535 	rq_unlock_irqrestore(rq, &rf);
536 
537 	if (old_rd)
538 		call_rcu(&old_rd->rcu, free_rootdomain);
539 }
540 
sched_get_rd(struct root_domain * rd)541 void sched_get_rd(struct root_domain *rd)
542 {
543 	atomic_inc(&rd->refcount);
544 }
545 
sched_put_rd(struct root_domain * rd)546 void sched_put_rd(struct root_domain *rd)
547 {
548 	if (!atomic_dec_and_test(&rd->refcount))
549 		return;
550 
551 	call_rcu(&rd->rcu, free_rootdomain);
552 }
553 
init_rootdomain(struct root_domain * rd)554 static int init_rootdomain(struct root_domain *rd)
555 {
556 	if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
557 		goto out;
558 	if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
559 		goto free_span;
560 	if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
561 		goto free_online;
562 	if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
563 		goto free_dlo_mask;
564 
565 #ifdef HAVE_RT_PUSH_IPI
566 	rd->rto_cpu = -1;
567 	raw_spin_lock_init(&rd->rto_lock);
568 	rd->rto_push_work = IRQ_WORK_INIT_HARD(rto_push_irq_work_func);
569 #endif
570 
571 	rd->visit_cookie = 0;
572 	init_dl_bw(&rd->dl_bw);
573 	if (cpudl_init(&rd->cpudl) != 0)
574 		goto free_rto_mask;
575 
576 	if (cpupri_init(&rd->cpupri) != 0)
577 		goto free_cpudl;
578 	return 0;
579 
580 free_cpudl:
581 	cpudl_cleanup(&rd->cpudl);
582 free_rto_mask:
583 	free_cpumask_var(rd->rto_mask);
584 free_dlo_mask:
585 	free_cpumask_var(rd->dlo_mask);
586 free_online:
587 	free_cpumask_var(rd->online);
588 free_span:
589 	free_cpumask_var(rd->span);
590 out:
591 	return -ENOMEM;
592 }
593 
594 /*
595  * By default the system creates a single root-domain with all CPUs as
596  * members (mimicking the global state we have today).
597  */
598 struct root_domain def_root_domain;
599 
init_defrootdomain(void)600 void __init init_defrootdomain(void)
601 {
602 	init_rootdomain(&def_root_domain);
603 
604 	atomic_set(&def_root_domain.refcount, 1);
605 }
606 
alloc_rootdomain(void)607 static struct root_domain *alloc_rootdomain(void)
608 {
609 	struct root_domain *rd;
610 
611 	rd = kzalloc(sizeof(*rd), GFP_KERNEL);
612 	if (!rd)
613 		return NULL;
614 
615 	if (init_rootdomain(rd) != 0) {
616 		kfree(rd);
617 		return NULL;
618 	}
619 
620 	return rd;
621 }
622 
free_sched_groups(struct sched_group * sg,int free_sgc)623 static void free_sched_groups(struct sched_group *sg, int free_sgc)
624 {
625 	struct sched_group *tmp, *first;
626 
627 	if (!sg)
628 		return;
629 
630 	first = sg;
631 	do {
632 		tmp = sg->next;
633 
634 		if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
635 			kfree(sg->sgc);
636 
637 		if (atomic_dec_and_test(&sg->ref))
638 			kfree(sg);
639 		sg = tmp;
640 	} while (sg != first);
641 }
642 
destroy_sched_domain(struct sched_domain * sd)643 static void destroy_sched_domain(struct sched_domain *sd)
644 {
645 	/*
646 	 * A normal sched domain may have multiple group references, an
647 	 * overlapping domain, having private groups, only one.  Iterate,
648 	 * dropping group/capacity references, freeing where none remain.
649 	 */
650 	free_sched_groups(sd->groups, 1);
651 
652 	if (sd->shared && atomic_dec_and_test(&sd->shared->ref))
653 		kfree(sd->shared);
654 	kfree(sd);
655 }
656 
destroy_sched_domains_rcu(struct rcu_head * rcu)657 static void destroy_sched_domains_rcu(struct rcu_head *rcu)
658 {
659 	struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
660 
661 	while (sd) {
662 		struct sched_domain *parent = sd->parent;
663 		destroy_sched_domain(sd);
664 		sd = parent;
665 	}
666 }
667 
destroy_sched_domains(struct sched_domain * sd)668 static void destroy_sched_domains(struct sched_domain *sd)
669 {
670 	if (sd)
671 		call_rcu(&sd->rcu, destroy_sched_domains_rcu);
672 }
673 
674 /*
675  * Keep a special pointer to the highest sched_domain that has SD_SHARE_LLC set
676  * (Last Level Cache Domain) for this allows us to avoid some pointer chasing
677  * select_idle_sibling().
678  *
679  * Also keep a unique ID per domain (we use the first CPU number in the cpumask
680  * of the domain), this allows us to quickly tell if two CPUs are in the same
681  * cache domain, see cpus_share_cache().
682  */
683 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc);
684 DEFINE_PER_CPU(int, sd_llc_size);
685 DEFINE_PER_CPU(int, sd_llc_id);
686 DEFINE_PER_CPU(int, sd_share_id);
687 DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
688 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa);
689 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
690 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
691 
692 DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity);
693 DEFINE_STATIC_KEY_FALSE(sched_cluster_active);
694 
update_top_cache_domain(int cpu)695 static void update_top_cache_domain(int cpu)
696 {
697 	struct sched_domain_shared *sds = NULL;
698 	struct sched_domain *sd;
699 	int id = cpu;
700 	int size = 1;
701 
702 	sd = highest_flag_domain(cpu, SD_SHARE_LLC);
703 	if (sd) {
704 		id = cpumask_first(sched_domain_span(sd));
705 		size = cpumask_weight(sched_domain_span(sd));
706 		sds = sd->shared;
707 	}
708 
709 	rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
710 	per_cpu(sd_llc_size, cpu) = size;
711 	per_cpu(sd_llc_id, cpu) = id;
712 	rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
713 
714 	sd = lowest_flag_domain(cpu, SD_CLUSTER);
715 	if (sd)
716 		id = cpumask_first(sched_domain_span(sd));
717 
718 	/*
719 	 * This assignment should be placed after the sd_llc_id as
720 	 * we want this id equals to cluster id on cluster machines
721 	 * but equals to LLC id on non-Cluster machines.
722 	 */
723 	per_cpu(sd_share_id, cpu) = id;
724 
725 	sd = lowest_flag_domain(cpu, SD_NUMA);
726 	rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
727 
728 	sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
729 	rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd);
730 
731 	sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY_FULL);
732 	rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd);
733 }
734 
735 /*
736  * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
737  * hold the hotplug lock.
738  */
739 static void
cpu_attach_domain(struct sched_domain * sd,struct root_domain * rd,int cpu)740 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
741 {
742 	struct rq *rq = cpu_rq(cpu);
743 	struct sched_domain *tmp;
744 
745 	/* Remove the sched domains which do not contribute to scheduling. */
746 	for (tmp = sd; tmp; ) {
747 		struct sched_domain *parent = tmp->parent;
748 		if (!parent)
749 			break;
750 
751 		if (sd_parent_degenerate(tmp, parent)) {
752 			tmp->parent = parent->parent;
753 
754 			if (parent->parent) {
755 				parent->parent->child = tmp;
756 				parent->parent->groups->flags = tmp->flags;
757 			}
758 
759 			/*
760 			 * Transfer SD_PREFER_SIBLING down in case of a
761 			 * degenerate parent; the spans match for this
762 			 * so the property transfers.
763 			 */
764 			if (parent->flags & SD_PREFER_SIBLING)
765 				tmp->flags |= SD_PREFER_SIBLING;
766 			destroy_sched_domain(parent);
767 		} else
768 			tmp = tmp->parent;
769 	}
770 
771 	if (sd && sd_degenerate(sd)) {
772 		tmp = sd;
773 		sd = sd->parent;
774 		destroy_sched_domain(tmp);
775 		if (sd) {
776 			struct sched_group *sg = sd->groups;
777 
778 			/*
779 			 * sched groups hold the flags of the child sched
780 			 * domain for convenience. Clear such flags since
781 			 * the child is being destroyed.
782 			 */
783 			do {
784 				sg->flags = 0;
785 			} while (sg != sd->groups);
786 
787 			sd->child = NULL;
788 		}
789 	}
790 
791 	sched_domain_debug(sd, cpu);
792 
793 	rq_attach_root(rq, rd);
794 	tmp = rq->sd;
795 	rcu_assign_pointer(rq->sd, sd);
796 	dirty_sched_domain_sysctl(cpu);
797 	destroy_sched_domains(tmp);
798 
799 	update_top_cache_domain(cpu);
800 }
801 
802 struct s_data {
803 	struct sched_domain * __percpu *sd;
804 	struct root_domain	*rd;
805 };
806 
807 enum s_alloc {
808 	sa_rootdomain,
809 	sa_sd,
810 	sa_sd_storage,
811 	sa_none,
812 };
813 
814 /*
815  * Return the canonical balance CPU for this group, this is the first CPU
816  * of this group that's also in the balance mask.
817  *
818  * The balance mask are all those CPUs that could actually end up at this
819  * group. See build_balance_mask().
820  *
821  * Also see should_we_balance().
822  */
group_balance_cpu(struct sched_group * sg)823 int group_balance_cpu(struct sched_group *sg)
824 {
825 	return cpumask_first(group_balance_mask(sg));
826 }
827 
828 
829 /*
830  * NUMA topology (first read the regular topology blurb below)
831  *
832  * Given a node-distance table, for example:
833  *
834  *   node   0   1   2   3
835  *     0:  10  20  30  20
836  *     1:  20  10  20  30
837  *     2:  30  20  10  20
838  *     3:  20  30  20  10
839  *
840  * which represents a 4 node ring topology like:
841  *
842  *   0 ----- 1
843  *   |       |
844  *   |       |
845  *   |       |
846  *   3 ----- 2
847  *
848  * We want to construct domains and groups to represent this. The way we go
849  * about doing this is to build the domains on 'hops'. For each NUMA level we
850  * construct the mask of all nodes reachable in @level hops.
851  *
852  * For the above NUMA topology that gives 3 levels:
853  *
854  * NUMA-2	0-3		0-3		0-3		0-3
855  *  groups:	{0-1,3},{1-3}	{0-2},{0,2-3}	{1-3},{0-1,3}	{0,2-3},{0-2}
856  *
857  * NUMA-1	0-1,3		0-2		1-3		0,2-3
858  *  groups:	{0},{1},{3}	{0},{1},{2}	{1},{2},{3}	{0},{2},{3}
859  *
860  * NUMA-0	0		1		2		3
861  *
862  *
863  * As can be seen; things don't nicely line up as with the regular topology.
864  * When we iterate a domain in child domain chunks some nodes can be
865  * represented multiple times -- hence the "overlap" naming for this part of
866  * the topology.
867  *
868  * In order to minimize this overlap, we only build enough groups to cover the
869  * domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3.
870  *
871  * Because:
872  *
873  *  - the first group of each domain is its child domain; this
874  *    gets us the first 0-1,3
875  *  - the only uncovered node is 2, who's child domain is 1-3.
876  *
877  * However, because of the overlap, computing a unique CPU for each group is
878  * more complicated. Consider for instance the groups of NODE-1 NUMA-2, both
879  * groups include the CPUs of Node-0, while those CPUs would not in fact ever
880  * end up at those groups (they would end up in group: 0-1,3).
881  *
882  * To correct this we have to introduce the group balance mask. This mask
883  * will contain those CPUs in the group that can reach this group given the
884  * (child) domain tree.
885  *
886  * With this we can once again compute balance_cpu and sched_group_capacity
887  * relations.
888  *
889  * XXX include words on how balance_cpu is unique and therefore can be
890  * used for sched_group_capacity links.
891  *
892  *
893  * Another 'interesting' topology is:
894  *
895  *   node   0   1   2   3
896  *     0:  10  20  20  30
897  *     1:  20  10  20  20
898  *     2:  20  20  10  20
899  *     3:  30  20  20  10
900  *
901  * Which looks a little like:
902  *
903  *   0 ----- 1
904  *   |     / |
905  *   |   /   |
906  *   | /     |
907  *   2 ----- 3
908  *
909  * This topology is asymmetric, nodes 1,2 are fully connected, but nodes 0,3
910  * are not.
911  *
912  * This leads to a few particularly weird cases where the sched_domain's are
913  * not of the same number for each CPU. Consider:
914  *
915  * NUMA-2	0-3						0-3
916  *  groups:	{0-2},{1-3}					{1-3},{0-2}
917  *
918  * NUMA-1	0-2		0-3		0-3		1-3
919  *
920  * NUMA-0	0		1		2		3
921  *
922  */
923 
924 
925 /*
926  * Build the balance mask; it contains only those CPUs that can arrive at this
927  * group and should be considered to continue balancing.
928  *
929  * We do this during the group creation pass, therefore the group information
930  * isn't complete yet, however since each group represents a (child) domain we
931  * can fully construct this using the sched_domain bits (which are already
932  * complete).
933  */
934 static void
build_balance_mask(struct sched_domain * sd,struct sched_group * sg,struct cpumask * mask)935 build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
936 {
937 	const struct cpumask *sg_span = sched_group_span(sg);
938 	struct sd_data *sdd = sd->private;
939 	struct sched_domain *sibling;
940 	int i;
941 
942 	cpumask_clear(mask);
943 
944 	for_each_cpu(i, sg_span) {
945 		sibling = *per_cpu_ptr(sdd->sd, i);
946 
947 		/*
948 		 * Can happen in the asymmetric case, where these siblings are
949 		 * unused. The mask will not be empty because those CPUs that
950 		 * do have the top domain _should_ span the domain.
951 		 */
952 		if (!sibling->child)
953 			continue;
954 
955 		/* If we would not end up here, we can't continue from here */
956 		if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
957 			continue;
958 
959 		cpumask_set_cpu(i, mask);
960 	}
961 
962 	/* We must not have empty masks here */
963 	WARN_ON_ONCE(cpumask_empty(mask));
964 }
965 
966 /*
967  * XXX: This creates per-node group entries; since the load-balancer will
968  * immediately access remote memory to construct this group's load-balance
969  * statistics having the groups node local is of dubious benefit.
970  */
971 static struct sched_group *
build_group_from_child_sched_domain(struct sched_domain * sd,int cpu)972 build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
973 {
974 	struct sched_group *sg;
975 	struct cpumask *sg_span;
976 
977 	sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
978 			GFP_KERNEL, cpu_to_node(cpu));
979 
980 	if (!sg)
981 		return NULL;
982 
983 	sg_span = sched_group_span(sg);
984 	if (sd->child) {
985 		cpumask_copy(sg_span, sched_domain_span(sd->child));
986 		sg->flags = sd->child->flags;
987 	} else {
988 		cpumask_copy(sg_span, sched_domain_span(sd));
989 	}
990 
991 	atomic_inc(&sg->ref);
992 	return sg;
993 }
994 
init_overlap_sched_group(struct sched_domain * sd,struct sched_group * sg)995 static void init_overlap_sched_group(struct sched_domain *sd,
996 				     struct sched_group *sg)
997 {
998 	struct cpumask *mask = sched_domains_tmpmask2;
999 	struct sd_data *sdd = sd->private;
1000 	struct cpumask *sg_span;
1001 	int cpu;
1002 
1003 	build_balance_mask(sd, sg, mask);
1004 	cpu = cpumask_first(mask);
1005 
1006 	sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
1007 	if (atomic_inc_return(&sg->sgc->ref) == 1)
1008 		cpumask_copy(group_balance_mask(sg), mask);
1009 	else
1010 		WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask));
1011 
1012 	/*
1013 	 * Initialize sgc->capacity such that even if we mess up the
1014 	 * domains and no possible iteration will get us here, we won't
1015 	 * die on a /0 trap.
1016 	 */
1017 	sg_span = sched_group_span(sg);
1018 	sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
1019 	sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
1020 	sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
1021 }
1022 
1023 static struct sched_domain *
find_descended_sibling(struct sched_domain * sd,struct sched_domain * sibling)1024 find_descended_sibling(struct sched_domain *sd, struct sched_domain *sibling)
1025 {
1026 	/*
1027 	 * The proper descendant would be the one whose child won't span out
1028 	 * of sd
1029 	 */
1030 	while (sibling->child &&
1031 	       !cpumask_subset(sched_domain_span(sibling->child),
1032 			       sched_domain_span(sd)))
1033 		sibling = sibling->child;
1034 
1035 	/*
1036 	 * As we are referencing sgc across different topology level, we need
1037 	 * to go down to skip those sched_domains which don't contribute to
1038 	 * scheduling because they will be degenerated in cpu_attach_domain
1039 	 */
1040 	while (sibling->child &&
1041 	       cpumask_equal(sched_domain_span(sibling->child),
1042 			     sched_domain_span(sibling)))
1043 		sibling = sibling->child;
1044 
1045 	return sibling;
1046 }
1047 
1048 static int
build_overlap_sched_groups(struct sched_domain * sd,int cpu)1049 build_overlap_sched_groups(struct sched_domain *sd, int cpu)
1050 {
1051 	struct sched_group *first = NULL, *last = NULL, *sg;
1052 	const struct cpumask *span = sched_domain_span(sd);
1053 	struct cpumask *covered = sched_domains_tmpmask;
1054 	struct sd_data *sdd = sd->private;
1055 	struct sched_domain *sibling;
1056 	int i;
1057 
1058 	cpumask_clear(covered);
1059 
1060 	for_each_cpu_wrap(i, span, cpu) {
1061 		struct cpumask *sg_span;
1062 
1063 		if (cpumask_test_cpu(i, covered))
1064 			continue;
1065 
1066 		sibling = *per_cpu_ptr(sdd->sd, i);
1067 
1068 		/*
1069 		 * Asymmetric node setups can result in situations where the
1070 		 * domain tree is of unequal depth, make sure to skip domains
1071 		 * that already cover the entire range.
1072 		 *
1073 		 * In that case build_sched_domains() will have terminated the
1074 		 * iteration early and our sibling sd spans will be empty.
1075 		 * Domains should always include the CPU they're built on, so
1076 		 * check that.
1077 		 */
1078 		if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
1079 			continue;
1080 
1081 		/*
1082 		 * Usually we build sched_group by sibling's child sched_domain
1083 		 * But for machines whose NUMA diameter are 3 or above, we move
1084 		 * to build sched_group by sibling's proper descendant's child
1085 		 * domain because sibling's child sched_domain will span out of
1086 		 * the sched_domain being built as below.
1087 		 *
1088 		 * Smallest diameter=3 topology is:
1089 		 *
1090 		 *   node   0   1   2   3
1091 		 *     0:  10  20  30  40
1092 		 *     1:  20  10  20  30
1093 		 *     2:  30  20  10  20
1094 		 *     3:  40  30  20  10
1095 		 *
1096 		 *   0 --- 1 --- 2 --- 3
1097 		 *
1098 		 * NUMA-3       0-3             N/A             N/A             0-3
1099 		 *  groups:     {0-2},{1-3}                                     {1-3},{0-2}
1100 		 *
1101 		 * NUMA-2       0-2             0-3             0-3             1-3
1102 		 *  groups:     {0-1},{1-3}     {0-2},{2-3}     {1-3},{0-1}     {2-3},{0-2}
1103 		 *
1104 		 * NUMA-1       0-1             0-2             1-3             2-3
1105 		 *  groups:     {0},{1}         {1},{2},{0}     {2},{3},{1}     {3},{2}
1106 		 *
1107 		 * NUMA-0       0               1               2               3
1108 		 *
1109 		 * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the
1110 		 * group span isn't a subset of the domain span.
1111 		 */
1112 		if (sibling->child &&
1113 		    !cpumask_subset(sched_domain_span(sibling->child), span))
1114 			sibling = find_descended_sibling(sd, sibling);
1115 
1116 		sg = build_group_from_child_sched_domain(sibling, cpu);
1117 		if (!sg)
1118 			goto fail;
1119 
1120 		sg_span = sched_group_span(sg);
1121 		cpumask_or(covered, covered, sg_span);
1122 
1123 		init_overlap_sched_group(sibling, sg);
1124 
1125 		if (!first)
1126 			first = sg;
1127 		if (last)
1128 			last->next = sg;
1129 		last = sg;
1130 		last->next = first;
1131 	}
1132 	sd->groups = first;
1133 
1134 	return 0;
1135 
1136 fail:
1137 	free_sched_groups(first, 0);
1138 
1139 	return -ENOMEM;
1140 }
1141 
1142 
1143 /*
1144  * Package topology (also see the load-balance blurb in fair.c)
1145  *
1146  * The scheduler builds a tree structure to represent a number of important
1147  * topology features. By default (default_topology[]) these include:
1148  *
1149  *  - Simultaneous multithreading (SMT)
1150  *  - Multi-Core Cache (MC)
1151  *  - Package (PKG)
1152  *
1153  * Where the last one more or less denotes everything up to a NUMA node.
1154  *
1155  * The tree consists of 3 primary data structures:
1156  *
1157  *	sched_domain -> sched_group -> sched_group_capacity
1158  *	    ^ ^             ^ ^
1159  *          `-'             `-'
1160  *
1161  * The sched_domains are per-CPU and have a two way link (parent & child) and
1162  * denote the ever growing mask of CPUs belonging to that level of topology.
1163  *
1164  * Each sched_domain has a circular (double) linked list of sched_group's, each
1165  * denoting the domains of the level below (or individual CPUs in case of the
1166  * first domain level). The sched_group linked by a sched_domain includes the
1167  * CPU of that sched_domain [*].
1168  *
1169  * Take for instance a 2 threaded, 2 core, 2 cache cluster part:
1170  *
1171  * CPU   0   1   2   3   4   5   6   7
1172  *
1173  * PKG  [                             ]
1174  * MC   [             ] [             ]
1175  * SMT  [     ] [     ] [     ] [     ]
1176  *
1177  *  - or -
1178  *
1179  * PKG  0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7
1180  * MC	0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7
1181  * SMT  0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7
1182  *
1183  * CPU   0   1   2   3   4   5   6   7
1184  *
1185  * One way to think about it is: sched_domain moves you up and down among these
1186  * topology levels, while sched_group moves you sideways through it, at child
1187  * domain granularity.
1188  *
1189  * sched_group_capacity ensures each unique sched_group has shared storage.
1190  *
1191  * There are two related construction problems, both require a CPU that
1192  * uniquely identify each group (for a given domain):
1193  *
1194  *  - The first is the balance_cpu (see should_we_balance() and the
1195  *    load-balance blurb in fair.c); for each group we only want 1 CPU to
1196  *    continue balancing at a higher domain.
1197  *
1198  *  - The second is the sched_group_capacity; we want all identical groups
1199  *    to share a single sched_group_capacity.
1200  *
1201  * Since these topologies are exclusive by construction. That is, its
1202  * impossible for an SMT thread to belong to multiple cores, and cores to
1203  * be part of multiple caches. There is a very clear and unique location
1204  * for each CPU in the hierarchy.
1205  *
1206  * Therefore computing a unique CPU for each group is trivial (the iteration
1207  * mask is redundant and set all 1s; all CPUs in a group will end up at _that_
1208  * group), we can simply pick the first CPU in each group.
1209  *
1210  *
1211  * [*] in other words, the first group of each domain is its child domain.
1212  */
1213 
get_group(int cpu,struct sd_data * sdd)1214 static struct sched_group *get_group(int cpu, struct sd_data *sdd)
1215 {
1216 	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
1217 	struct sched_domain *child = sd->child;
1218 	struct sched_group *sg;
1219 	bool already_visited;
1220 
1221 	if (child)
1222 		cpu = cpumask_first(sched_domain_span(child));
1223 
1224 	sg = *per_cpu_ptr(sdd->sg, cpu);
1225 	sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
1226 
1227 	/* Increase refcounts for claim_allocations: */
1228 	already_visited = atomic_inc_return(&sg->ref) > 1;
1229 	/* sgc visits should follow a similar trend as sg */
1230 	WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1));
1231 
1232 	/* If we have already visited that group, it's already initialized. */
1233 	if (already_visited)
1234 		return sg;
1235 
1236 	if (child) {
1237 		cpumask_copy(sched_group_span(sg), sched_domain_span(child));
1238 		cpumask_copy(group_balance_mask(sg), sched_group_span(sg));
1239 		sg->flags = child->flags;
1240 	} else {
1241 		cpumask_set_cpu(cpu, sched_group_span(sg));
1242 		cpumask_set_cpu(cpu, group_balance_mask(sg));
1243 	}
1244 
1245 	sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
1246 	sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
1247 	sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
1248 
1249 	return sg;
1250 }
1251 
1252 /*
1253  * build_sched_groups will build a circular linked list of the groups
1254  * covered by the given span, will set each group's ->cpumask correctly,
1255  * and will initialize their ->sgc.
1256  *
1257  * Assumes the sched_domain tree is fully constructed
1258  */
1259 static int
build_sched_groups(struct sched_domain * sd,int cpu)1260 build_sched_groups(struct sched_domain *sd, int cpu)
1261 {
1262 	struct sched_group *first = NULL, *last = NULL;
1263 	struct sd_data *sdd = sd->private;
1264 	const struct cpumask *span = sched_domain_span(sd);
1265 	struct cpumask *covered;
1266 	int i;
1267 
1268 	lockdep_assert_held(&sched_domains_mutex);
1269 	covered = sched_domains_tmpmask;
1270 
1271 	cpumask_clear(covered);
1272 
1273 	for_each_cpu_wrap(i, span, cpu) {
1274 		struct sched_group *sg;
1275 
1276 		if (cpumask_test_cpu(i, covered))
1277 			continue;
1278 
1279 		sg = get_group(i, sdd);
1280 
1281 		cpumask_or(covered, covered, sched_group_span(sg));
1282 
1283 		if (!first)
1284 			first = sg;
1285 		if (last)
1286 			last->next = sg;
1287 		last = sg;
1288 	}
1289 	last->next = first;
1290 	sd->groups = first;
1291 
1292 	return 0;
1293 }
1294 
1295 /*
1296  * Initialize sched groups cpu_capacity.
1297  *
1298  * cpu_capacity indicates the capacity of sched group, which is used while
1299  * distributing the load between different sched groups in a sched domain.
1300  * Typically cpu_capacity for all the groups in a sched domain will be same
1301  * unless there are asymmetries in the topology. If there are asymmetries,
1302  * group having more cpu_capacity will pickup more load compared to the
1303  * group having less cpu_capacity.
1304  */
init_sched_groups_capacity(int cpu,struct sched_domain * sd)1305 static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
1306 {
1307 	struct sched_group *sg = sd->groups;
1308 	struct cpumask *mask = sched_domains_tmpmask2;
1309 
1310 	WARN_ON(!sg);
1311 
1312 	do {
1313 		int cpu, cores = 0, max_cpu = -1;
1314 
1315 		sg->group_weight = cpumask_weight(sched_group_span(sg));
1316 
1317 		cpumask_copy(mask, sched_group_span(sg));
1318 		for_each_cpu(cpu, mask) {
1319 			cores++;
1320 #ifdef CONFIG_SCHED_SMT
1321 			cpumask_andnot(mask, mask, cpu_smt_mask(cpu));
1322 #endif
1323 		}
1324 		sg->cores = cores;
1325 
1326 		if (!(sd->flags & SD_ASYM_PACKING))
1327 			goto next;
1328 
1329 		for_each_cpu(cpu, sched_group_span(sg)) {
1330 			if (max_cpu < 0)
1331 				max_cpu = cpu;
1332 			else if (sched_asym_prefer(cpu, max_cpu))
1333 				max_cpu = cpu;
1334 		}
1335 		sg->asym_prefer_cpu = max_cpu;
1336 
1337 next:
1338 		sg = sg->next;
1339 	} while (sg != sd->groups);
1340 
1341 	if (cpu != group_balance_cpu(sg))
1342 		return;
1343 
1344 	update_group_capacity(sd, cpu);
1345 }
1346 
1347 /*
1348  * Set of available CPUs grouped by their corresponding capacities
1349  * Each list entry contains a CPU mask reflecting CPUs that share the same
1350  * capacity.
1351  * The lifespan of data is unlimited.
1352  */
1353 LIST_HEAD(asym_cap_list);
1354 
1355 /*
1356  * Verify whether there is any CPU capacity asymmetry in a given sched domain.
1357  * Provides sd_flags reflecting the asymmetry scope.
1358  */
1359 static inline int
asym_cpu_capacity_classify(const struct cpumask * sd_span,const struct cpumask * cpu_map)1360 asym_cpu_capacity_classify(const struct cpumask *sd_span,
1361 			   const struct cpumask *cpu_map)
1362 {
1363 	struct asym_cap_data *entry;
1364 	int count = 0, miss = 0;
1365 
1366 	/*
1367 	 * Count how many unique CPU capacities this domain spans across
1368 	 * (compare sched_domain CPUs mask with ones representing  available
1369 	 * CPUs capacities). Take into account CPUs that might be offline:
1370 	 * skip those.
1371 	 */
1372 	list_for_each_entry(entry, &asym_cap_list, link) {
1373 		if (cpumask_intersects(sd_span, cpu_capacity_span(entry)))
1374 			++count;
1375 		else if (cpumask_intersects(cpu_map, cpu_capacity_span(entry)))
1376 			++miss;
1377 	}
1378 
1379 	WARN_ON_ONCE(!count && !list_empty(&asym_cap_list));
1380 
1381 	/* No asymmetry detected */
1382 	if (count < 2)
1383 		return 0;
1384 	/* Some of the available CPU capacity values have not been detected */
1385 	if (miss)
1386 		return SD_ASYM_CPUCAPACITY;
1387 
1388 	/* Full asymmetry */
1389 	return SD_ASYM_CPUCAPACITY | SD_ASYM_CPUCAPACITY_FULL;
1390 
1391 }
1392 
free_asym_cap_entry(struct rcu_head * head)1393 static void free_asym_cap_entry(struct rcu_head *head)
1394 {
1395 	struct asym_cap_data *entry = container_of(head, struct asym_cap_data, rcu);
1396 	kfree(entry);
1397 }
1398 
asym_cpu_capacity_update_data(int cpu)1399 static inline void asym_cpu_capacity_update_data(int cpu)
1400 {
1401 	unsigned long capacity = arch_scale_cpu_capacity(cpu);
1402 	struct asym_cap_data *insert_entry = NULL;
1403 	struct asym_cap_data *entry;
1404 
1405 	/*
1406 	 * Search if capacity already exits. If not, track which the entry
1407 	 * where we should insert to keep the list ordered descending.
1408 	 */
1409 	list_for_each_entry(entry, &asym_cap_list, link) {
1410 		if (capacity == entry->capacity)
1411 			goto done;
1412 		else if (!insert_entry && capacity > entry->capacity)
1413 			insert_entry = list_prev_entry(entry, link);
1414 	}
1415 
1416 	entry = kzalloc(sizeof(*entry) + cpumask_size(), GFP_KERNEL);
1417 	if (WARN_ONCE(!entry, "Failed to allocate memory for asymmetry data\n"))
1418 		return;
1419 	entry->capacity = capacity;
1420 
1421 	/* If NULL then the new capacity is the smallest, add last. */
1422 	if (!insert_entry)
1423 		list_add_tail_rcu(&entry->link, &asym_cap_list);
1424 	else
1425 		list_add_rcu(&entry->link, &insert_entry->link);
1426 done:
1427 	__cpumask_set_cpu(cpu, cpu_capacity_span(entry));
1428 }
1429 
1430 /*
1431  * Build-up/update list of CPUs grouped by their capacities
1432  * An update requires explicit request to rebuild sched domains
1433  * with state indicating CPU topology changes.
1434  */
asym_cpu_capacity_scan(void)1435 static void asym_cpu_capacity_scan(void)
1436 {
1437 	struct asym_cap_data *entry, *next;
1438 	int cpu;
1439 
1440 	list_for_each_entry(entry, &asym_cap_list, link)
1441 		cpumask_clear(cpu_capacity_span(entry));
1442 
1443 	for_each_cpu_and(cpu, cpu_possible_mask, housekeeping_cpumask(HK_TYPE_DOMAIN))
1444 		asym_cpu_capacity_update_data(cpu);
1445 
1446 	list_for_each_entry_safe(entry, next, &asym_cap_list, link) {
1447 		if (cpumask_empty(cpu_capacity_span(entry))) {
1448 			list_del_rcu(&entry->link);
1449 			call_rcu(&entry->rcu, free_asym_cap_entry);
1450 		}
1451 	}
1452 
1453 	/*
1454 	 * Only one capacity value has been detected i.e. this system is symmetric.
1455 	 * No need to keep this data around.
1456 	 */
1457 	if (list_is_singular(&asym_cap_list)) {
1458 		entry = list_first_entry(&asym_cap_list, typeof(*entry), link);
1459 		list_del_rcu(&entry->link);
1460 		call_rcu(&entry->rcu, free_asym_cap_entry);
1461 	}
1462 }
1463 
1464 /*
1465  * Initializers for schedule domains
1466  * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
1467  */
1468 
1469 static int default_relax_domain_level = -1;
1470 int sched_domain_level_max;
1471 
setup_relax_domain_level(char * str)1472 static int __init setup_relax_domain_level(char *str)
1473 {
1474 	if (kstrtoint(str, 0, &default_relax_domain_level))
1475 		pr_warn("Unable to set relax_domain_level\n");
1476 
1477 	return 1;
1478 }
1479 __setup("relax_domain_level=", setup_relax_domain_level);
1480 
set_domain_attribute(struct sched_domain * sd,struct sched_domain_attr * attr)1481 static void set_domain_attribute(struct sched_domain *sd,
1482 				 struct sched_domain_attr *attr)
1483 {
1484 	int request;
1485 
1486 	if (!attr || attr->relax_domain_level < 0) {
1487 		if (default_relax_domain_level < 0)
1488 			return;
1489 		request = default_relax_domain_level;
1490 	} else
1491 		request = attr->relax_domain_level;
1492 
1493 	if (sd->level >= request) {
1494 		/* Turn off idle balance on this domain: */
1495 		sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1496 	}
1497 }
1498 
1499 static void __sdt_free(const struct cpumask *cpu_map);
1500 static int __sdt_alloc(const struct cpumask *cpu_map);
1501 
__free_domain_allocs(struct s_data * d,enum s_alloc what,const struct cpumask * cpu_map)1502 static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
1503 				 const struct cpumask *cpu_map)
1504 {
1505 	switch (what) {
1506 	case sa_rootdomain:
1507 		if (!atomic_read(&d->rd->refcount))
1508 			free_rootdomain(&d->rd->rcu);
1509 		fallthrough;
1510 	case sa_sd:
1511 		free_percpu(d->sd);
1512 		fallthrough;
1513 	case sa_sd_storage:
1514 		__sdt_free(cpu_map);
1515 		fallthrough;
1516 	case sa_none:
1517 		break;
1518 	}
1519 }
1520 
1521 static enum s_alloc
__visit_domain_allocation_hell(struct s_data * d,const struct cpumask * cpu_map)1522 __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map)
1523 {
1524 	memset(d, 0, sizeof(*d));
1525 
1526 	if (__sdt_alloc(cpu_map))
1527 		return sa_sd_storage;
1528 	d->sd = alloc_percpu(struct sched_domain *);
1529 	if (!d->sd)
1530 		return sa_sd_storage;
1531 	d->rd = alloc_rootdomain();
1532 	if (!d->rd)
1533 		return sa_sd;
1534 
1535 	return sa_rootdomain;
1536 }
1537 
1538 /*
1539  * NULL the sd_data elements we've used to build the sched_domain and
1540  * sched_group structure so that the subsequent __free_domain_allocs()
1541  * will not free the data we're using.
1542  */
claim_allocations(int cpu,struct sched_domain * sd)1543 static void claim_allocations(int cpu, struct sched_domain *sd)
1544 {
1545 	struct sd_data *sdd = sd->private;
1546 
1547 	WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
1548 	*per_cpu_ptr(sdd->sd, cpu) = NULL;
1549 
1550 	if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref))
1551 		*per_cpu_ptr(sdd->sds, cpu) = NULL;
1552 
1553 	if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
1554 		*per_cpu_ptr(sdd->sg, cpu) = NULL;
1555 
1556 	if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
1557 		*per_cpu_ptr(sdd->sgc, cpu) = NULL;
1558 }
1559 
1560 #ifdef CONFIG_NUMA
1561 enum numa_topology_type sched_numa_topology_type;
1562 
1563 static int			sched_domains_numa_levels;
1564 static int			sched_domains_curr_level;
1565 
1566 int				sched_max_numa_distance;
1567 static int			*sched_domains_numa_distance;
1568 static struct cpumask		***sched_domains_numa_masks;
1569 #endif
1570 
1571 /*
1572  * SD_flags allowed in topology descriptions.
1573  *
1574  * These flags are purely descriptive of the topology and do not prescribe
1575  * behaviour. Behaviour is artificial and mapped in the below sd_init()
1576  * function. For details, see include/linux/sched/sd_flags.h.
1577  *
1578  *   SD_SHARE_CPUCAPACITY
1579  *   SD_SHARE_LLC
1580  *   SD_CLUSTER
1581  *   SD_NUMA
1582  *
1583  * Odd one out, which beside describing the topology has a quirk also
1584  * prescribes the desired behaviour that goes along with it:
1585  *
1586  *   SD_ASYM_PACKING        - describes SMT quirks
1587  */
1588 #define TOPOLOGY_SD_FLAGS		\
1589 	(SD_SHARE_CPUCAPACITY	|	\
1590 	 SD_CLUSTER		|	\
1591 	 SD_SHARE_LLC		|	\
1592 	 SD_NUMA		|	\
1593 	 SD_ASYM_PACKING)
1594 
1595 static struct sched_domain *
sd_init(struct sched_domain_topology_level * tl,const struct cpumask * cpu_map,struct sched_domain * child,int cpu)1596 sd_init(struct sched_domain_topology_level *tl,
1597 	const struct cpumask *cpu_map,
1598 	struct sched_domain *child, int cpu)
1599 {
1600 	struct sd_data *sdd = &tl->data;
1601 	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
1602 	int sd_id, sd_weight, sd_flags = 0;
1603 	struct cpumask *sd_span;
1604 
1605 #ifdef CONFIG_NUMA
1606 	/*
1607 	 * Ugly hack to pass state to sd_numa_mask()...
1608 	 */
1609 	sched_domains_curr_level = tl->numa_level;
1610 #endif
1611 
1612 	sd_weight = cpumask_weight(tl->mask(cpu));
1613 
1614 	if (tl->sd_flags)
1615 		sd_flags = (*tl->sd_flags)();
1616 	if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
1617 			"wrong sd_flags in topology description\n"))
1618 		sd_flags &= TOPOLOGY_SD_FLAGS;
1619 
1620 	*sd = (struct sched_domain){
1621 		.min_interval		= sd_weight,
1622 		.max_interval		= 2*sd_weight,
1623 		.busy_factor		= 16,
1624 		.imbalance_pct		= 117,
1625 
1626 		.cache_nice_tries	= 0,
1627 
1628 		.flags			= 1*SD_BALANCE_NEWIDLE
1629 					| 1*SD_BALANCE_EXEC
1630 					| 1*SD_BALANCE_FORK
1631 					| 0*SD_BALANCE_WAKE
1632 					| 1*SD_WAKE_AFFINE
1633 					| 0*SD_SHARE_CPUCAPACITY
1634 					| 0*SD_SHARE_LLC
1635 					| 0*SD_SERIALIZE
1636 					| 1*SD_PREFER_SIBLING
1637 					| 0*SD_NUMA
1638 					| sd_flags
1639 					,
1640 
1641 		.last_balance		= jiffies,
1642 		.balance_interval	= sd_weight,
1643 		.max_newidle_lb_cost	= 0,
1644 		.last_decay_max_lb_cost	= jiffies,
1645 		.child			= child,
1646 		.name			= tl->name,
1647 	};
1648 
1649 	sd_span = sched_domain_span(sd);
1650 	cpumask_and(sd_span, cpu_map, tl->mask(cpu));
1651 	sd_id = cpumask_first(sd_span);
1652 
1653 	sd->flags |= asym_cpu_capacity_classify(sd_span, cpu_map);
1654 
1655 	WARN_ONCE((sd->flags & (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY)) ==
1656 		  (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY),
1657 		  "CPU capacity asymmetry not supported on SMT\n");
1658 
1659 	/*
1660 	 * Convert topological properties into behaviour.
1661 	 */
1662 	/* Don't attempt to spread across CPUs of different capacities. */
1663 	if ((sd->flags & SD_ASYM_CPUCAPACITY) && sd->child)
1664 		sd->child->flags &= ~SD_PREFER_SIBLING;
1665 
1666 	if (sd->flags & SD_SHARE_CPUCAPACITY) {
1667 		sd->imbalance_pct = 110;
1668 
1669 	} else if (sd->flags & SD_SHARE_LLC) {
1670 		sd->imbalance_pct = 117;
1671 		sd->cache_nice_tries = 1;
1672 
1673 #ifdef CONFIG_NUMA
1674 	} else if (sd->flags & SD_NUMA) {
1675 		sd->cache_nice_tries = 2;
1676 
1677 		sd->flags &= ~SD_PREFER_SIBLING;
1678 		sd->flags |= SD_SERIALIZE;
1679 		if (sched_domains_numa_distance[tl->numa_level] > node_reclaim_distance) {
1680 			sd->flags &= ~(SD_BALANCE_EXEC |
1681 				       SD_BALANCE_FORK |
1682 				       SD_WAKE_AFFINE);
1683 		}
1684 
1685 #endif
1686 	} else {
1687 		sd->cache_nice_tries = 1;
1688 	}
1689 
1690 	/*
1691 	 * For all levels sharing cache; connect a sched_domain_shared
1692 	 * instance.
1693 	 */
1694 	if (sd->flags & SD_SHARE_LLC) {
1695 		sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
1696 		atomic_inc(&sd->shared->ref);
1697 		atomic_set(&sd->shared->nr_busy_cpus, sd_weight);
1698 	}
1699 
1700 	sd->private = sdd;
1701 
1702 	return sd;
1703 }
1704 
1705 /*
1706  * Topology list, bottom-up.
1707  */
1708 static struct sched_domain_topology_level default_topology[] = {
1709 #ifdef CONFIG_SCHED_SMT
1710 	{ cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
1711 #endif
1712 
1713 #ifdef CONFIG_SCHED_CLUSTER
1714 	{ cpu_clustergroup_mask, cpu_cluster_flags, SD_INIT_NAME(CLS) },
1715 #endif
1716 
1717 #ifdef CONFIG_SCHED_MC
1718 	{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
1719 #endif
1720 	{ cpu_cpu_mask, SD_INIT_NAME(PKG) },
1721 	{ NULL, },
1722 };
1723 
1724 static struct sched_domain_topology_level *sched_domain_topology =
1725 	default_topology;
1726 static struct sched_domain_topology_level *sched_domain_topology_saved;
1727 
1728 #define for_each_sd_topology(tl)			\
1729 	for (tl = sched_domain_topology; tl->mask; tl++)
1730 
set_sched_topology(struct sched_domain_topology_level * tl)1731 void __init set_sched_topology(struct sched_domain_topology_level *tl)
1732 {
1733 	if (WARN_ON_ONCE(sched_smp_initialized))
1734 		return;
1735 
1736 	sched_domain_topology = tl;
1737 	sched_domain_topology_saved = NULL;
1738 }
1739 
1740 #ifdef CONFIG_NUMA
1741 
sd_numa_mask(int cpu)1742 static const struct cpumask *sd_numa_mask(int cpu)
1743 {
1744 	return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
1745 }
1746 
sched_numa_warn(const char * str)1747 static void sched_numa_warn(const char *str)
1748 {
1749 	static int done = false;
1750 	int i,j;
1751 
1752 	if (done)
1753 		return;
1754 
1755 	done = true;
1756 
1757 	printk(KERN_WARNING "ERROR: %s\n\n", str);
1758 
1759 	for (i = 0; i < nr_node_ids; i++) {
1760 		printk(KERN_WARNING "  ");
1761 		for (j = 0; j < nr_node_ids; j++) {
1762 			if (!node_state(i, N_CPU) || !node_state(j, N_CPU))
1763 				printk(KERN_CONT "(%02d) ", node_distance(i,j));
1764 			else
1765 				printk(KERN_CONT " %02d  ", node_distance(i,j));
1766 		}
1767 		printk(KERN_CONT "\n");
1768 	}
1769 	printk(KERN_WARNING "\n");
1770 }
1771 
find_numa_distance(int distance)1772 bool find_numa_distance(int distance)
1773 {
1774 	bool found = false;
1775 	int i, *distances;
1776 
1777 	if (distance == node_distance(0, 0))
1778 		return true;
1779 
1780 	rcu_read_lock();
1781 	distances = rcu_dereference(sched_domains_numa_distance);
1782 	if (!distances)
1783 		goto unlock;
1784 	for (i = 0; i < sched_domains_numa_levels; i++) {
1785 		if (distances[i] == distance) {
1786 			found = true;
1787 			break;
1788 		}
1789 	}
1790 unlock:
1791 	rcu_read_unlock();
1792 
1793 	return found;
1794 }
1795 
1796 #define for_each_cpu_node_but(n, nbut)		\
1797 	for_each_node_state(n, N_CPU)		\
1798 		if (n == nbut)			\
1799 			continue;		\
1800 		else
1801 
1802 /*
1803  * A system can have three types of NUMA topology:
1804  * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
1805  * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
1806  * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
1807  *
1808  * The difference between a glueless mesh topology and a backplane
1809  * topology lies in whether communication between not directly
1810  * connected nodes goes through intermediary nodes (where programs
1811  * could run), or through backplane controllers. This affects
1812  * placement of programs.
1813  *
1814  * The type of topology can be discerned with the following tests:
1815  * - If the maximum distance between any nodes is 1 hop, the system
1816  *   is directly connected.
1817  * - If for two nodes A and B, located N > 1 hops away from each other,
1818  *   there is an intermediary node C, which is < N hops away from both
1819  *   nodes A and B, the system is a glueless mesh.
1820  */
init_numa_topology_type(int offline_node)1821 static void init_numa_topology_type(int offline_node)
1822 {
1823 	int a, b, c, n;
1824 
1825 	n = sched_max_numa_distance;
1826 
1827 	if (sched_domains_numa_levels <= 2) {
1828 		sched_numa_topology_type = NUMA_DIRECT;
1829 		return;
1830 	}
1831 
1832 	for_each_cpu_node_but(a, offline_node) {
1833 		for_each_cpu_node_but(b, offline_node) {
1834 			/* Find two nodes furthest removed from each other. */
1835 			if (node_distance(a, b) < n)
1836 				continue;
1837 
1838 			/* Is there an intermediary node between a and b? */
1839 			for_each_cpu_node_but(c, offline_node) {
1840 				if (node_distance(a, c) < n &&
1841 				    node_distance(b, c) < n) {
1842 					sched_numa_topology_type =
1843 							NUMA_GLUELESS_MESH;
1844 					return;
1845 				}
1846 			}
1847 
1848 			sched_numa_topology_type = NUMA_BACKPLANE;
1849 			return;
1850 		}
1851 	}
1852 
1853 	pr_err("Failed to find a NUMA topology type, defaulting to DIRECT\n");
1854 	sched_numa_topology_type = NUMA_DIRECT;
1855 }
1856 
1857 
1858 #define NR_DISTANCE_VALUES (1 << DISTANCE_BITS)
1859 
sched_init_numa(int offline_node)1860 void sched_init_numa(int offline_node)
1861 {
1862 	struct sched_domain_topology_level *tl;
1863 	unsigned long *distance_map;
1864 	int nr_levels = 0;
1865 	int i, j;
1866 	int *distances;
1867 	struct cpumask ***masks;
1868 
1869 	/*
1870 	 * O(nr_nodes^2) de-duplicating selection sort -- in order to find the
1871 	 * unique distances in the node_distance() table.
1872 	 */
1873 	distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL);
1874 	if (!distance_map)
1875 		return;
1876 
1877 	bitmap_zero(distance_map, NR_DISTANCE_VALUES);
1878 	for_each_cpu_node_but(i, offline_node) {
1879 		for_each_cpu_node_but(j, offline_node) {
1880 			int distance = node_distance(i, j);
1881 
1882 			if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) {
1883 				sched_numa_warn("Invalid distance value range");
1884 				bitmap_free(distance_map);
1885 				return;
1886 			}
1887 
1888 			bitmap_set(distance_map, distance, 1);
1889 		}
1890 	}
1891 	/*
1892 	 * We can now figure out how many unique distance values there are and
1893 	 * allocate memory accordingly.
1894 	 */
1895 	nr_levels = bitmap_weight(distance_map, NR_DISTANCE_VALUES);
1896 
1897 	distances = kcalloc(nr_levels, sizeof(int), GFP_KERNEL);
1898 	if (!distances) {
1899 		bitmap_free(distance_map);
1900 		return;
1901 	}
1902 
1903 	for (i = 0, j = 0; i < nr_levels; i++, j++) {
1904 		j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j);
1905 		distances[i] = j;
1906 	}
1907 	rcu_assign_pointer(sched_domains_numa_distance, distances);
1908 
1909 	bitmap_free(distance_map);
1910 
1911 	/*
1912 	 * 'nr_levels' contains the number of unique distances
1913 	 *
1914 	 * The sched_domains_numa_distance[] array includes the actual distance
1915 	 * numbers.
1916 	 */
1917 
1918 	/*
1919 	 * Here, we should temporarily reset sched_domains_numa_levels to 0.
1920 	 * If it fails to allocate memory for array sched_domains_numa_masks[][],
1921 	 * the array will contain less then 'nr_levels' members. This could be
1922 	 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
1923 	 * in other functions.
1924 	 *
1925 	 * We reset it to 'nr_levels' at the end of this function.
1926 	 */
1927 	sched_domains_numa_levels = 0;
1928 
1929 	masks = kzalloc(sizeof(void *) * nr_levels, GFP_KERNEL);
1930 	if (!masks)
1931 		return;
1932 
1933 	/*
1934 	 * Now for each level, construct a mask per node which contains all
1935 	 * CPUs of nodes that are that many hops away from us.
1936 	 */
1937 	for (i = 0; i < nr_levels; i++) {
1938 		masks[i] = kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
1939 		if (!masks[i])
1940 			return;
1941 
1942 		for_each_cpu_node_but(j, offline_node) {
1943 			struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
1944 			int k;
1945 
1946 			if (!mask)
1947 				return;
1948 
1949 			masks[i][j] = mask;
1950 
1951 			for_each_cpu_node_but(k, offline_node) {
1952 				if (sched_debug() && (node_distance(j, k) != node_distance(k, j)))
1953 					sched_numa_warn("Node-distance not symmetric");
1954 
1955 				if (node_distance(j, k) > sched_domains_numa_distance[i])
1956 					continue;
1957 
1958 				cpumask_or(mask, mask, cpumask_of_node(k));
1959 			}
1960 		}
1961 	}
1962 	rcu_assign_pointer(sched_domains_numa_masks, masks);
1963 
1964 	/* Compute default topology size */
1965 	for (i = 0; sched_domain_topology[i].mask; i++);
1966 
1967 	tl = kzalloc((i + nr_levels + 1) *
1968 			sizeof(struct sched_domain_topology_level), GFP_KERNEL);
1969 	if (!tl)
1970 		return;
1971 
1972 	/*
1973 	 * Copy the default topology bits..
1974 	 */
1975 	for (i = 0; sched_domain_topology[i].mask; i++)
1976 		tl[i] = sched_domain_topology[i];
1977 
1978 	/*
1979 	 * Add the NUMA identity distance, aka single NODE.
1980 	 */
1981 	tl[i++] = (struct sched_domain_topology_level){
1982 		.mask = sd_numa_mask,
1983 		.numa_level = 0,
1984 		SD_INIT_NAME(NODE)
1985 	};
1986 
1987 	/*
1988 	 * .. and append 'j' levels of NUMA goodness.
1989 	 */
1990 	for (j = 1; j < nr_levels; i++, j++) {
1991 		tl[i] = (struct sched_domain_topology_level){
1992 			.mask = sd_numa_mask,
1993 			.sd_flags = cpu_numa_flags,
1994 			.flags = SDTL_OVERLAP,
1995 			.numa_level = j,
1996 			SD_INIT_NAME(NUMA)
1997 		};
1998 	}
1999 
2000 	sched_domain_topology_saved = sched_domain_topology;
2001 	sched_domain_topology = tl;
2002 
2003 	sched_domains_numa_levels = nr_levels;
2004 	WRITE_ONCE(sched_max_numa_distance, sched_domains_numa_distance[nr_levels - 1]);
2005 
2006 	init_numa_topology_type(offline_node);
2007 }
2008 
2009 
sched_reset_numa(void)2010 static void sched_reset_numa(void)
2011 {
2012 	int nr_levels, *distances;
2013 	struct cpumask ***masks;
2014 
2015 	nr_levels = sched_domains_numa_levels;
2016 	sched_domains_numa_levels = 0;
2017 	sched_max_numa_distance = 0;
2018 	sched_numa_topology_type = NUMA_DIRECT;
2019 	distances = sched_domains_numa_distance;
2020 	rcu_assign_pointer(sched_domains_numa_distance, NULL);
2021 	masks = sched_domains_numa_masks;
2022 	rcu_assign_pointer(sched_domains_numa_masks, NULL);
2023 	if (distances || masks) {
2024 		int i, j;
2025 
2026 		synchronize_rcu();
2027 		kfree(distances);
2028 		for (i = 0; i < nr_levels && masks; i++) {
2029 			if (!masks[i])
2030 				continue;
2031 			for_each_node(j)
2032 				kfree(masks[i][j]);
2033 			kfree(masks[i]);
2034 		}
2035 		kfree(masks);
2036 	}
2037 	if (sched_domain_topology_saved) {
2038 		kfree(sched_domain_topology);
2039 		sched_domain_topology = sched_domain_topology_saved;
2040 		sched_domain_topology_saved = NULL;
2041 	}
2042 }
2043 
2044 /*
2045  * Call with hotplug lock held
2046  */
sched_update_numa(int cpu,bool online)2047 void sched_update_numa(int cpu, bool online)
2048 {
2049 	int node;
2050 
2051 	node = cpu_to_node(cpu);
2052 	/*
2053 	 * Scheduler NUMA topology is updated when the first CPU of a
2054 	 * node is onlined or the last CPU of a node is offlined.
2055 	 */
2056 	if (cpumask_weight(cpumask_of_node(node)) != 1)
2057 		return;
2058 
2059 	sched_reset_numa();
2060 	sched_init_numa(online ? NUMA_NO_NODE : node);
2061 }
2062 
sched_domains_numa_masks_set(unsigned int cpu)2063 void sched_domains_numa_masks_set(unsigned int cpu)
2064 {
2065 	int node = cpu_to_node(cpu);
2066 	int i, j;
2067 
2068 	for (i = 0; i < sched_domains_numa_levels; i++) {
2069 		for (j = 0; j < nr_node_ids; j++) {
2070 			if (!node_state(j, N_CPU))
2071 				continue;
2072 
2073 			/* Set ourselves in the remote node's masks */
2074 			if (node_distance(j, node) <= sched_domains_numa_distance[i])
2075 				cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
2076 		}
2077 	}
2078 }
2079 
sched_domains_numa_masks_clear(unsigned int cpu)2080 void sched_domains_numa_masks_clear(unsigned int cpu)
2081 {
2082 	int i, j;
2083 
2084 	for (i = 0; i < sched_domains_numa_levels; i++) {
2085 		for (j = 0; j < nr_node_ids; j++) {
2086 			if (sched_domains_numa_masks[i][j])
2087 				cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
2088 		}
2089 	}
2090 }
2091 
2092 /*
2093  * sched_numa_find_closest() - given the NUMA topology, find the cpu
2094  *                             closest to @cpu from @cpumask.
2095  * cpumask: cpumask to find a cpu from
2096  * cpu: cpu to be close to
2097  *
2098  * returns: cpu, or nr_cpu_ids when nothing found.
2099  */
sched_numa_find_closest(const struct cpumask * cpus,int cpu)2100 int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
2101 {
2102 	int i, j = cpu_to_node(cpu), found = nr_cpu_ids;
2103 	struct cpumask ***masks;
2104 
2105 	rcu_read_lock();
2106 	masks = rcu_dereference(sched_domains_numa_masks);
2107 	if (!masks)
2108 		goto unlock;
2109 	for (i = 0; i < sched_domains_numa_levels; i++) {
2110 		if (!masks[i][j])
2111 			break;
2112 		cpu = cpumask_any_and(cpus, masks[i][j]);
2113 		if (cpu < nr_cpu_ids) {
2114 			found = cpu;
2115 			break;
2116 		}
2117 	}
2118 unlock:
2119 	rcu_read_unlock();
2120 
2121 	return found;
2122 }
2123 
2124 struct __cmp_key {
2125 	const struct cpumask *cpus;
2126 	struct cpumask ***masks;
2127 	int node;
2128 	int cpu;
2129 	int w;
2130 };
2131 
hop_cmp(const void * a,const void * b)2132 static int hop_cmp(const void *a, const void *b)
2133 {
2134 	struct cpumask **prev_hop, **cur_hop = *(struct cpumask ***)b;
2135 	struct __cmp_key *k = (struct __cmp_key *)a;
2136 
2137 	if (cpumask_weight_and(k->cpus, cur_hop[k->node]) <= k->cpu)
2138 		return 1;
2139 
2140 	if (b == k->masks) {
2141 		k->w = 0;
2142 		return 0;
2143 	}
2144 
2145 	prev_hop = *((struct cpumask ***)b - 1);
2146 	k->w = cpumask_weight_and(k->cpus, prev_hop[k->node]);
2147 	if (k->w <= k->cpu)
2148 		return 0;
2149 
2150 	return -1;
2151 }
2152 
2153 /**
2154  * sched_numa_find_nth_cpu() - given the NUMA topology, find the Nth closest CPU
2155  *                             from @cpus to @cpu, taking into account distance
2156  *                             from a given @node.
2157  * @cpus: cpumask to find a cpu from
2158  * @cpu: CPU to start searching
2159  * @node: NUMA node to order CPUs by distance
2160  *
2161  * Return: cpu, or nr_cpu_ids when nothing found.
2162  */
sched_numa_find_nth_cpu(const struct cpumask * cpus,int cpu,int node)2163 int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
2164 {
2165 	struct __cmp_key k = { .cpus = cpus, .cpu = cpu };
2166 	struct cpumask ***hop_masks;
2167 	int hop, ret = nr_cpu_ids;
2168 
2169 	if (node == NUMA_NO_NODE)
2170 		return cpumask_nth_and(cpu, cpus, cpu_online_mask);
2171 
2172 	rcu_read_lock();
2173 
2174 	/* CPU-less node entries are uninitialized in sched_domains_numa_masks */
2175 	node = numa_nearest_node(node, N_CPU);
2176 	k.node = node;
2177 
2178 	k.masks = rcu_dereference(sched_domains_numa_masks);
2179 	if (!k.masks)
2180 		goto unlock;
2181 
2182 	hop_masks = bsearch(&k, k.masks, sched_domains_numa_levels, sizeof(k.masks[0]), hop_cmp);
2183 	hop = hop_masks	- k.masks;
2184 
2185 	ret = hop ?
2186 		cpumask_nth_and_andnot(cpu - k.w, cpus, k.masks[hop][node], k.masks[hop-1][node]) :
2187 		cpumask_nth_and(cpu, cpus, k.masks[0][node]);
2188 unlock:
2189 	rcu_read_unlock();
2190 	return ret;
2191 }
2192 EXPORT_SYMBOL_GPL(sched_numa_find_nth_cpu);
2193 
2194 /**
2195  * sched_numa_hop_mask() - Get the cpumask of CPUs at most @hops hops away from
2196  *                         @node
2197  * @node: The node to count hops from.
2198  * @hops: Include CPUs up to that many hops away. 0 means local node.
2199  *
2200  * Return: On success, a pointer to a cpumask of CPUs at most @hops away from
2201  * @node, an error value otherwise.
2202  *
2203  * Requires rcu_lock to be held. Returned cpumask is only valid within that
2204  * read-side section, copy it if required beyond that.
2205  *
2206  * Note that not all hops are equal in distance; see sched_init_numa() for how
2207  * distances and masks are handled.
2208  * Also note that this is a reflection of sched_domains_numa_masks, which may change
2209  * during the lifetime of the system (offline nodes are taken out of the masks).
2210  */
sched_numa_hop_mask(unsigned int node,unsigned int hops)2211 const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops)
2212 {
2213 	struct cpumask ***masks;
2214 
2215 	if (node >= nr_node_ids || hops >= sched_domains_numa_levels)
2216 		return ERR_PTR(-EINVAL);
2217 
2218 	masks = rcu_dereference(sched_domains_numa_masks);
2219 	if (!masks)
2220 		return ERR_PTR(-EBUSY);
2221 
2222 	return masks[hops][node];
2223 }
2224 EXPORT_SYMBOL_GPL(sched_numa_hop_mask);
2225 
2226 #endif /* CONFIG_NUMA */
2227 
__sdt_alloc(const struct cpumask * cpu_map)2228 static int __sdt_alloc(const struct cpumask *cpu_map)
2229 {
2230 	struct sched_domain_topology_level *tl;
2231 	int j;
2232 
2233 	for_each_sd_topology(tl) {
2234 		struct sd_data *sdd = &tl->data;
2235 
2236 		sdd->sd = alloc_percpu(struct sched_domain *);
2237 		if (!sdd->sd)
2238 			return -ENOMEM;
2239 
2240 		sdd->sds = alloc_percpu(struct sched_domain_shared *);
2241 		if (!sdd->sds)
2242 			return -ENOMEM;
2243 
2244 		sdd->sg = alloc_percpu(struct sched_group *);
2245 		if (!sdd->sg)
2246 			return -ENOMEM;
2247 
2248 		sdd->sgc = alloc_percpu(struct sched_group_capacity *);
2249 		if (!sdd->sgc)
2250 			return -ENOMEM;
2251 
2252 		for_each_cpu(j, cpu_map) {
2253 			struct sched_domain *sd;
2254 			struct sched_domain_shared *sds;
2255 			struct sched_group *sg;
2256 			struct sched_group_capacity *sgc;
2257 
2258 			sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
2259 					GFP_KERNEL, cpu_to_node(j));
2260 			if (!sd)
2261 				return -ENOMEM;
2262 
2263 			*per_cpu_ptr(sdd->sd, j) = sd;
2264 
2265 			sds = kzalloc_node(sizeof(struct sched_domain_shared),
2266 					GFP_KERNEL, cpu_to_node(j));
2267 			if (!sds)
2268 				return -ENOMEM;
2269 
2270 			*per_cpu_ptr(sdd->sds, j) = sds;
2271 
2272 			sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
2273 					GFP_KERNEL, cpu_to_node(j));
2274 			if (!sg)
2275 				return -ENOMEM;
2276 
2277 			sg->next = sg;
2278 
2279 			*per_cpu_ptr(sdd->sg, j) = sg;
2280 
2281 			sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
2282 					GFP_KERNEL, cpu_to_node(j));
2283 			if (!sgc)
2284 				return -ENOMEM;
2285 
2286 #ifdef CONFIG_SCHED_DEBUG
2287 			sgc->id = j;
2288 #endif
2289 
2290 			*per_cpu_ptr(sdd->sgc, j) = sgc;
2291 		}
2292 	}
2293 
2294 	return 0;
2295 }
2296 
__sdt_free(const struct cpumask * cpu_map)2297 static void __sdt_free(const struct cpumask *cpu_map)
2298 {
2299 	struct sched_domain_topology_level *tl;
2300 	int j;
2301 
2302 	for_each_sd_topology(tl) {
2303 		struct sd_data *sdd = &tl->data;
2304 
2305 		for_each_cpu(j, cpu_map) {
2306 			struct sched_domain *sd;
2307 
2308 			if (sdd->sd) {
2309 				sd = *per_cpu_ptr(sdd->sd, j);
2310 				if (sd && (sd->flags & SD_OVERLAP))
2311 					free_sched_groups(sd->groups, 0);
2312 				kfree(*per_cpu_ptr(sdd->sd, j));
2313 			}
2314 
2315 			if (sdd->sds)
2316 				kfree(*per_cpu_ptr(sdd->sds, j));
2317 			if (sdd->sg)
2318 				kfree(*per_cpu_ptr(sdd->sg, j));
2319 			if (sdd->sgc)
2320 				kfree(*per_cpu_ptr(sdd->sgc, j));
2321 		}
2322 		free_percpu(sdd->sd);
2323 		sdd->sd = NULL;
2324 		free_percpu(sdd->sds);
2325 		sdd->sds = NULL;
2326 		free_percpu(sdd->sg);
2327 		sdd->sg = NULL;
2328 		free_percpu(sdd->sgc);
2329 		sdd->sgc = NULL;
2330 	}
2331 }
2332 
build_sched_domain(struct sched_domain_topology_level * tl,const struct cpumask * cpu_map,struct sched_domain_attr * attr,struct sched_domain * child,int cpu)2333 static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
2334 		const struct cpumask *cpu_map, struct sched_domain_attr *attr,
2335 		struct sched_domain *child, int cpu)
2336 {
2337 	struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu);
2338 
2339 	if (child) {
2340 		sd->level = child->level + 1;
2341 		sched_domain_level_max = max(sched_domain_level_max, sd->level);
2342 		child->parent = sd;
2343 
2344 		if (!cpumask_subset(sched_domain_span(child),
2345 				    sched_domain_span(sd))) {
2346 			pr_err("BUG: arch topology borken\n");
2347 			pr_err("     the %s domain not a subset of the %s domain\n",
2348 					child->name, sd->name);
2349 			/* Fixup, ensure @sd has at least @child CPUs. */
2350 			cpumask_or(sched_domain_span(sd),
2351 				   sched_domain_span(sd),
2352 				   sched_domain_span(child));
2353 		}
2354 
2355 	}
2356 	set_domain_attribute(sd, attr);
2357 
2358 	return sd;
2359 }
2360 
2361 /*
2362  * Ensure topology masks are sane, i.e. there are no conflicts (overlaps) for
2363  * any two given CPUs at this (non-NUMA) topology level.
2364  */
topology_span_sane(struct sched_domain_topology_level * tl,const struct cpumask * cpu_map,int cpu)2365 static bool topology_span_sane(struct sched_domain_topology_level *tl,
2366 			      const struct cpumask *cpu_map, int cpu)
2367 {
2368 	int i = cpu + 1;
2369 
2370 	/* NUMA levels are allowed to overlap */
2371 	if (tl->flags & SDTL_OVERLAP)
2372 		return true;
2373 
2374 	/*
2375 	 * Non-NUMA levels cannot partially overlap - they must be either
2376 	 * completely equal or completely disjoint. Otherwise we can end up
2377 	 * breaking the sched_group lists - i.e. a later get_group() pass
2378 	 * breaks the linking done for an earlier span.
2379 	 */
2380 	for_each_cpu_from(i, cpu_map) {
2381 		/*
2382 		 * We should 'and' all those masks with 'cpu_map' to exactly
2383 		 * match the topology we're about to build, but that can only
2384 		 * remove CPUs, which only lessens our ability to detect
2385 		 * overlaps
2386 		 */
2387 		if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) &&
2388 		    cpumask_intersects(tl->mask(cpu), tl->mask(i)))
2389 			return false;
2390 	}
2391 
2392 	return true;
2393 }
2394 
2395 /*
2396  * Build sched domains for a given set of CPUs and attach the sched domains
2397  * to the individual CPUs
2398  */
2399 static int
build_sched_domains(const struct cpumask * cpu_map,struct sched_domain_attr * attr)2400 build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr)
2401 {
2402 	enum s_alloc alloc_state = sa_none;
2403 	struct sched_domain *sd;
2404 	struct s_data d;
2405 	struct rq *rq = NULL;
2406 	int i, ret = -ENOMEM;
2407 	bool has_asym = false;
2408 	bool has_cluster = false;
2409 
2410 	if (WARN_ON(cpumask_empty(cpu_map)))
2411 		goto error;
2412 
2413 	alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
2414 	if (alloc_state != sa_rootdomain)
2415 		goto error;
2416 
2417 	/* Set up domains for CPUs specified by the cpu_map: */
2418 	for_each_cpu(i, cpu_map) {
2419 		struct sched_domain_topology_level *tl;
2420 
2421 		sd = NULL;
2422 		for_each_sd_topology(tl) {
2423 
2424 			if (WARN_ON(!topology_span_sane(tl, cpu_map, i)))
2425 				goto error;
2426 
2427 			sd = build_sched_domain(tl, cpu_map, attr, sd, i);
2428 
2429 			has_asym |= sd->flags & SD_ASYM_CPUCAPACITY;
2430 
2431 			if (tl == sched_domain_topology)
2432 				*per_cpu_ptr(d.sd, i) = sd;
2433 			if (tl->flags & SDTL_OVERLAP)
2434 				sd->flags |= SD_OVERLAP;
2435 			if (cpumask_equal(cpu_map, sched_domain_span(sd)))
2436 				break;
2437 		}
2438 	}
2439 
2440 	/* Build the groups for the domains */
2441 	for_each_cpu(i, cpu_map) {
2442 		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
2443 			sd->span_weight = cpumask_weight(sched_domain_span(sd));
2444 			if (sd->flags & SD_OVERLAP) {
2445 				if (build_overlap_sched_groups(sd, i))
2446 					goto error;
2447 			} else {
2448 				if (build_sched_groups(sd, i))
2449 					goto error;
2450 			}
2451 		}
2452 	}
2453 
2454 	/*
2455 	 * Calculate an allowed NUMA imbalance such that LLCs do not get
2456 	 * imbalanced.
2457 	 */
2458 	for_each_cpu(i, cpu_map) {
2459 		unsigned int imb = 0;
2460 		unsigned int imb_span = 1;
2461 
2462 		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
2463 			struct sched_domain *child = sd->child;
2464 
2465 			if (!(sd->flags & SD_SHARE_LLC) && child &&
2466 			    (child->flags & SD_SHARE_LLC)) {
2467 				struct sched_domain __rcu *top_p;
2468 				unsigned int nr_llcs;
2469 
2470 				/*
2471 				 * For a single LLC per node, allow an
2472 				 * imbalance up to 12.5% of the node. This is
2473 				 * arbitrary cutoff based two factors -- SMT and
2474 				 * memory channels. For SMT-2, the intent is to
2475 				 * avoid premature sharing of HT resources but
2476 				 * SMT-4 or SMT-8 *may* benefit from a different
2477 				 * cutoff. For memory channels, this is a very
2478 				 * rough estimate of how many channels may be
2479 				 * active and is based on recent CPUs with
2480 				 * many cores.
2481 				 *
2482 				 * For multiple LLCs, allow an imbalance
2483 				 * until multiple tasks would share an LLC
2484 				 * on one node while LLCs on another node
2485 				 * remain idle. This assumes that there are
2486 				 * enough logical CPUs per LLC to avoid SMT
2487 				 * factors and that there is a correlation
2488 				 * between LLCs and memory channels.
2489 				 */
2490 				nr_llcs = sd->span_weight / child->span_weight;
2491 				if (nr_llcs == 1)
2492 					imb = sd->span_weight >> 3;
2493 				else
2494 					imb = nr_llcs;
2495 				imb = max(1U, imb);
2496 				sd->imb_numa_nr = imb;
2497 
2498 				/* Set span based on the first NUMA domain. */
2499 				top_p = sd->parent;
2500 				while (top_p && !(top_p->flags & SD_NUMA)) {
2501 					top_p = top_p->parent;
2502 				}
2503 				imb_span = top_p ? top_p->span_weight : sd->span_weight;
2504 			} else {
2505 				int factor = max(1U, (sd->span_weight / imb_span));
2506 
2507 				sd->imb_numa_nr = imb * factor;
2508 			}
2509 		}
2510 	}
2511 
2512 	/* Calculate CPU capacity for physical packages and nodes */
2513 	for (i = nr_cpumask_bits-1; i >= 0; i--) {
2514 		if (!cpumask_test_cpu(i, cpu_map))
2515 			continue;
2516 
2517 		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
2518 			claim_allocations(i, sd);
2519 			init_sched_groups_capacity(i, sd);
2520 		}
2521 	}
2522 
2523 	/* Attach the domains */
2524 	rcu_read_lock();
2525 	for_each_cpu(i, cpu_map) {
2526 		rq = cpu_rq(i);
2527 		sd = *per_cpu_ptr(d.sd, i);
2528 
2529 		cpu_attach_domain(sd, d.rd, i);
2530 
2531 		if (lowest_flag_domain(i, SD_CLUSTER))
2532 			has_cluster = true;
2533 	}
2534 	rcu_read_unlock();
2535 
2536 	if (has_asym)
2537 		static_branch_inc_cpuslocked(&sched_asym_cpucapacity);
2538 
2539 	if (has_cluster)
2540 		static_branch_inc_cpuslocked(&sched_cluster_active);
2541 
2542 	if (rq && sched_debug_verbose)
2543 		pr_info("root domain span: %*pbl\n", cpumask_pr_args(cpu_map));
2544 
2545 	ret = 0;
2546 error:
2547 	__free_domain_allocs(&d, alloc_state, cpu_map);
2548 
2549 	return ret;
2550 }
2551 
2552 /* Current sched domains: */
2553 static cpumask_var_t			*doms_cur;
2554 
2555 /* Number of sched domains in 'doms_cur': */
2556 static int				ndoms_cur;
2557 
2558 /* Attributes of custom domains in 'doms_cur' */
2559 static struct sched_domain_attr		*dattr_cur;
2560 
2561 /*
2562  * Special case: If a kmalloc() of a doms_cur partition (array of
2563  * cpumask) fails, then fallback to a single sched domain,
2564  * as determined by the single cpumask fallback_doms.
2565  */
2566 static cpumask_var_t			fallback_doms;
2567 
2568 /*
2569  * arch_update_cpu_topology lets virtualized architectures update the
2570  * CPU core maps. It is supposed to return 1 if the topology changed
2571  * or 0 if it stayed the same.
2572  */
arch_update_cpu_topology(void)2573 int __weak arch_update_cpu_topology(void)
2574 {
2575 	return 0;
2576 }
2577 
alloc_sched_domains(unsigned int ndoms)2578 cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
2579 {
2580 	int i;
2581 	cpumask_var_t *doms;
2582 
2583 	doms = kmalloc_array(ndoms, sizeof(*doms), GFP_KERNEL);
2584 	if (!doms)
2585 		return NULL;
2586 	for (i = 0; i < ndoms; i++) {
2587 		if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
2588 			free_sched_domains(doms, i);
2589 			return NULL;
2590 		}
2591 	}
2592 	return doms;
2593 }
2594 
free_sched_domains(cpumask_var_t doms[],unsigned int ndoms)2595 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
2596 {
2597 	unsigned int i;
2598 	for (i = 0; i < ndoms; i++)
2599 		free_cpumask_var(doms[i]);
2600 	kfree(doms);
2601 }
2602 
2603 /*
2604  * Set up scheduler domains and groups.  For now this just excludes isolated
2605  * CPUs, but could be used to exclude other special cases in the future.
2606  */
sched_init_domains(const struct cpumask * cpu_map)2607 int __init sched_init_domains(const struct cpumask *cpu_map)
2608 {
2609 	int err;
2610 
2611 	zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL);
2612 	zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_KERNEL);
2613 	zalloc_cpumask_var(&fallback_doms, GFP_KERNEL);
2614 
2615 	arch_update_cpu_topology();
2616 	asym_cpu_capacity_scan();
2617 	ndoms_cur = 1;
2618 	doms_cur = alloc_sched_domains(ndoms_cur);
2619 	if (!doms_cur)
2620 		doms_cur = &fallback_doms;
2621 	cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_TYPE_DOMAIN));
2622 	err = build_sched_domains(doms_cur[0], NULL);
2623 
2624 	return err;
2625 }
2626 
2627 /*
2628  * Detach sched domains from a group of CPUs specified in cpu_map
2629  * These CPUs will now be attached to the NULL domain
2630  */
detach_destroy_domains(const struct cpumask * cpu_map)2631 static void detach_destroy_domains(const struct cpumask *cpu_map)
2632 {
2633 	unsigned int cpu = cpumask_any(cpu_map);
2634 	int i;
2635 
2636 	if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu)))
2637 		static_branch_dec_cpuslocked(&sched_asym_cpucapacity);
2638 
2639 	if (static_branch_unlikely(&sched_cluster_active))
2640 		static_branch_dec_cpuslocked(&sched_cluster_active);
2641 
2642 	rcu_read_lock();
2643 	for_each_cpu(i, cpu_map)
2644 		cpu_attach_domain(NULL, &def_root_domain, i);
2645 	rcu_read_unlock();
2646 }
2647 
2648 /* handle null as "default" */
dattrs_equal(struct sched_domain_attr * cur,int idx_cur,struct sched_domain_attr * new,int idx_new)2649 static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
2650 			struct sched_domain_attr *new, int idx_new)
2651 {
2652 	struct sched_domain_attr tmp;
2653 
2654 	/* Fast path: */
2655 	if (!new && !cur)
2656 		return 1;
2657 
2658 	tmp = SD_ATTR_INIT;
2659 
2660 	return !memcmp(cur ? (cur + idx_cur) : &tmp,
2661 			new ? (new + idx_new) : &tmp,
2662 			sizeof(struct sched_domain_attr));
2663 }
2664 
2665 /*
2666  * Partition sched domains as specified by the 'ndoms_new'
2667  * cpumasks in the array doms_new[] of cpumasks. This compares
2668  * doms_new[] to the current sched domain partitioning, doms_cur[].
2669  * It destroys each deleted domain and builds each new domain.
2670  *
2671  * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
2672  * The masks don't intersect (don't overlap.) We should setup one
2673  * sched domain for each mask. CPUs not in any of the cpumasks will
2674  * not be load balanced. If the same cpumask appears both in the
2675  * current 'doms_cur' domains and in the new 'doms_new', we can leave
2676  * it as it is.
2677  *
2678  * The passed in 'doms_new' should be allocated using
2679  * alloc_sched_domains.  This routine takes ownership of it and will
2680  * free_sched_domains it when done with it. If the caller failed the
2681  * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
2682  * and partition_sched_domains() will fallback to the single partition
2683  * 'fallback_doms', it also forces the domains to be rebuilt.
2684  *
2685  * If doms_new == NULL it will be replaced with cpu_online_mask.
2686  * ndoms_new == 0 is a special case for destroying existing domains,
2687  * and it will not create the default domain.
2688  *
2689  * Call with hotplug lock and sched_domains_mutex held
2690  */
partition_sched_domains_locked(int ndoms_new,cpumask_var_t doms_new[],struct sched_domain_attr * dattr_new)2691 void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
2692 				    struct sched_domain_attr *dattr_new)
2693 {
2694 	bool __maybe_unused has_eas = false;
2695 	int i, j, n;
2696 	int new_topology;
2697 
2698 	lockdep_assert_held(&sched_domains_mutex);
2699 
2700 	/* Let the architecture update CPU core mappings: */
2701 	new_topology = arch_update_cpu_topology();
2702 	/* Trigger rebuilding CPU capacity asymmetry data */
2703 	if (new_topology)
2704 		asym_cpu_capacity_scan();
2705 
2706 	if (!doms_new) {
2707 		WARN_ON_ONCE(dattr_new);
2708 		n = 0;
2709 		doms_new = alloc_sched_domains(1);
2710 		if (doms_new) {
2711 			n = 1;
2712 			cpumask_and(doms_new[0], cpu_active_mask,
2713 				    housekeeping_cpumask(HK_TYPE_DOMAIN));
2714 		}
2715 	} else {
2716 		n = ndoms_new;
2717 	}
2718 
2719 	/* Destroy deleted domains: */
2720 	for (i = 0; i < ndoms_cur; i++) {
2721 		for (j = 0; j < n && !new_topology; j++) {
2722 			if (cpumask_equal(doms_cur[i], doms_new[j]) &&
2723 			    dattrs_equal(dattr_cur, i, dattr_new, j)) {
2724 				struct root_domain *rd;
2725 
2726 				/*
2727 				 * This domain won't be destroyed and as such
2728 				 * its dl_bw->total_bw needs to be cleared.
2729 				 * Tasks contribution will be then recomputed
2730 				 * in function dl_update_tasks_root_domain(),
2731 				 * dl_servers contribution in function
2732 				 * dl_restore_server_root_domain().
2733 				 */
2734 				rd = cpu_rq(cpumask_any(doms_cur[i]))->rd;
2735 				dl_clear_root_domain(rd);
2736 				goto match1;
2737 			}
2738 		}
2739 		/* No match - a current sched domain not in new doms_new[] */
2740 		detach_destroy_domains(doms_cur[i]);
2741 match1:
2742 		;
2743 	}
2744 
2745 	n = ndoms_cur;
2746 	if (!doms_new) {
2747 		n = 0;
2748 		doms_new = &fallback_doms;
2749 		cpumask_and(doms_new[0], cpu_active_mask,
2750 			    housekeeping_cpumask(HK_TYPE_DOMAIN));
2751 	}
2752 
2753 	/* Build new domains: */
2754 	for (i = 0; i < ndoms_new; i++) {
2755 		for (j = 0; j < n && !new_topology; j++) {
2756 			if (cpumask_equal(doms_new[i], doms_cur[j]) &&
2757 			    dattrs_equal(dattr_new, i, dattr_cur, j))
2758 				goto match2;
2759 		}
2760 		/* No match - add a new doms_new */
2761 		build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
2762 match2:
2763 		;
2764 	}
2765 
2766 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
2767 	/* Build perf domains: */
2768 	for (i = 0; i < ndoms_new; i++) {
2769 		for (j = 0; j < n && !sched_energy_update; j++) {
2770 			if (cpumask_equal(doms_new[i], doms_cur[j]) &&
2771 			    cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) {
2772 				has_eas = true;
2773 				goto match3;
2774 			}
2775 		}
2776 		/* No match - add perf domains for a new rd */
2777 		has_eas |= build_perf_domains(doms_new[i]);
2778 match3:
2779 		;
2780 	}
2781 	sched_energy_set(has_eas);
2782 #endif
2783 
2784 	/* Remember the new sched domains: */
2785 	if (doms_cur != &fallback_doms)
2786 		free_sched_domains(doms_cur, ndoms_cur);
2787 
2788 	kfree(dattr_cur);
2789 	doms_cur = doms_new;
2790 	dattr_cur = dattr_new;
2791 	ndoms_cur = ndoms_new;
2792 
2793 	update_sched_domain_debugfs();
2794 	dl_rebuild_rd_accounting();
2795 }
2796 
2797 /*
2798  * Call with hotplug lock held
2799  */
partition_sched_domains(int ndoms_new,cpumask_var_t doms_new[],struct sched_domain_attr * dattr_new)2800 void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
2801 			     struct sched_domain_attr *dattr_new)
2802 {
2803 	sched_domains_mutex_lock();
2804 	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
2805 	sched_domains_mutex_unlock();
2806 }
2807