Lines Matching +full:non +full:- +full:exclusive

7  *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
11 * sysfs is Copyright (c) 2001-3 Patrick Mochel
13 * 2003-10-10 Written by Simon Derr.
14 * 2003-10-22 Updates by Stephen Hemminger.
15 * 2004 May-July Rework by Paul Jackson.
24 #include "cgroup-internal.h"
25 #include "cpuset-internal.h"
50 * node binding, add this key to provide a quick low-cost judgment
56 [PERR_INVCPUS] = "Invalid cpu list in cpuset.cpus.exclusive",
59 [PERR_NOTEXCL] = "Cpu list in cpuset.cpus not exclusive",
62 [PERR_CPUSEMPTY] = "cpuset.cpus and cpuset.cpus.exclusive are empty",
68 * Exclusive CPUs distributed out to sub-partitions of top_cpuset
73 * Exclusive CPUs in isolated partitions
89 * - update_partition_sd_lb()
90 * - remote_partition_check()
91 * - update_cpumasks_hier()
92 * - cpuset_update_flag()
93 * - cpuset_hotplug_update_tasks()
94 * - cpuset_handle_hotplug()
98 * Note that update_relax_domain_level() in cpuset-v1.c can still call
106 * 0 - member (not a partition root)
107 * 1 - partition root
108 * 2 - partition root without load balancing (isolated)
109 * -1 - invalid partition root
110 * -2 - invalid isolated partition root
112 * There are 2 types of partitions - local or remote. Local partitions are
114 * cpuset.cpus.exclusive are optional in setting up local partitions.
116 * down exclusive CPUs by setting cpuset.cpus.exclusive along its ancestor
126 #define PRS_INVALID_ROOT -1
127 #define PRS_INVALID_ISOLATED -2
147 cs->nr_deadline_tasks++; in inc_dl_tasks_cs()
154 cs->nr_deadline_tasks--; in dec_dl_tasks_cs()
159 return cs->partition_root_state > 0; in is_partition_valid()
164 return cs->partition_root_state < 0; in is_partition_invalid()
172 if (cs->partition_root_state > 0) in make_partition_invalid()
173 cs->partition_root_state = -cs->partition_root_state; in make_partition_invalid()
181 if (old_prs == cs->partition_root_state) in notify_partition_change()
183 cgroup_file_notify(&cs->partition_file); in notify_partition_change()
187 WRITE_ONCE(cs->prs_err, PERR_NONE); in notify_partition_change()
194 .relax_domain_level = -1,
199 * There are two global locks guarding cpuset structures - cpuset_mutex and
203 * paths that rely on priority inheritance (e.g. scheduler - on RT) for
220 * If a task is only holding callback_lock, then it has read-only
228 * small pieces of code, such as when reading out possibly multi-word
272 * decrease cs->attach_in_progress.
273 * wake_up cpuset_attach_wq if cs->attach_in_progress==0.
279 cs->attach_in_progress--; in dec_attach_in_progress_locked()
280 if (!cs->attach_in_progress) in dec_attach_in_progress_locked()
308 (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE); in is_in_v2_mode()
312 * partition_is_populated - check if partition has tasks
318 * be non-NULL when this cpuset is going to become a partition itself.
326 if (cs->css.cgroup->nr_populated_csets) in partition_is_populated()
328 if (!excluded_child && !cs->nr_subparts) in partition_is_populated()
329 return cgroup_is_populated(cs->css.cgroup); in partition_is_populated()
337 if (cgroup_is_populated(child->css.cgroup)) { in partition_is_populated()
352 * One way or another, we guarantee to return some non-empty subset
369 while (!cpumask_intersects(cs->effective_cpus, pmask)) in guarantee_online_cpus()
372 cpumask_and(pmask, pmask, cs->effective_cpus); in guarantee_online_cpus()
382 * One way or another, we guarantee to return some non-empty subset
389 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) in guarantee_online_mems()
391 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); in guarantee_online_mems()
395 * alloc_cpumasks - allocate three cpumasks for cpuset
398 * Return: 0 if successful, -ENOMEM otherwise.
400 * Only one of the two input arguments should be non-NULL.
407 pmask1 = &cs->cpus_allowed; in alloc_cpumasks()
408 pmask2 = &cs->effective_cpus; in alloc_cpumasks()
409 pmask3 = &cs->effective_xcpus; in alloc_cpumasks()
410 pmask4 = &cs->exclusive_cpus; in alloc_cpumasks()
412 pmask1 = &tmp->new_cpus; in alloc_cpumasks()
413 pmask2 = &tmp->addmask; in alloc_cpumasks()
414 pmask3 = &tmp->delmask; in alloc_cpumasks()
419 return -ENOMEM; in alloc_cpumasks()
439 return -ENOMEM; in alloc_cpumasks()
443 * free_cpumasks - free cpumasks in a tmpmasks structure
450 free_cpumask_var(cs->cpus_allowed); in free_cpumasks()
451 free_cpumask_var(cs->effective_cpus); in free_cpumasks()
452 free_cpumask_var(cs->effective_xcpus); in free_cpumasks()
453 free_cpumask_var(cs->exclusive_cpus); in free_cpumasks()
456 free_cpumask_var(tmp->new_cpus); in free_cpumasks()
457 free_cpumask_var(tmp->addmask); in free_cpumasks()
458 free_cpumask_var(tmp->delmask); in free_cpumasks()
463 * alloc_trial_cpuset - allocate a trial cpuset
479 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); in alloc_trial_cpuset()
480 cpumask_copy(trial->effective_cpus, cs->effective_cpus); in alloc_trial_cpuset()
481 cpumask_copy(trial->effective_xcpus, cs->effective_xcpus); in alloc_trial_cpuset()
482 cpumask_copy(trial->exclusive_cpus, cs->exclusive_cpus); in alloc_trial_cpuset()
487 * free_cpuset - free the cpuset
496 /* Return user specified exclusive CPUs */
499 return cpumask_empty(cs->exclusive_cpus) ? cs->cpus_allowed in user_xcpus()
500 : cs->exclusive_cpus; in user_xcpus()
505 return cpumask_empty(cs->cpus_allowed) && in xcpus_empty()
506 cpumask_empty(cs->exclusive_cpus); in xcpus_empty()
510 * cpusets_are_exclusive() - check if two cpusets are exclusive
512 * Return true if exclusive, false if not
525 * validate_change() - Used to validate that any proposed cpuset change
530 * our various subset and exclusive rules still be valid? Presumes
533 * 'cur' is the address of an actual, in-use cpuset. Operations
541 * Return 0 if valid, -errno if not.
564 * Cpusets with tasks - existing or newly being attached - can't in validate_change()
567 ret = -ENOSPC; in validate_change()
568 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) { in validate_change()
569 if (!cpumask_empty(cur->cpus_allowed) && in validate_change()
570 cpumask_empty(trial->cpus_allowed)) in validate_change()
572 if (!nodes_empty(cur->mems_allowed) && in validate_change()
573 nodes_empty(trial->mems_allowed)) in validate_change()
586 * for non-isolated partition root. At this point, the target in validate_change()
594 ret = -EBUSY; in validate_change()
596 !cpuset_cpumask_can_shrink(cur->effective_cpus, user_xcpus(trial))) in validate_change()
600 * If either I or some sibling (!= me) is exclusive, we can't in validate_change()
603 ret = -EINVAL; in validate_change()
610 txset = !cpumask_empty(trial->exclusive_cpus); in validate_change()
611 cxset = !cpumask_empty(c->exclusive_cpus); in validate_change()
623 * available if these exclusive CPUs are activated. in validate_change()
626 xcpus = trial->exclusive_cpus; in validate_change()
627 acpus = c->cpus_allowed; in validate_change()
629 xcpus = c->exclusive_cpus; in validate_change()
630 acpus = trial->cpus_allowed; in validate_change()
636 nodes_intersects(trial->mems_allowed, c->mems_allowed)) in validate_change()
653 return cpumask_intersects(a->effective_cpus, b->effective_cpus); in cpusets_overlap()
659 if (dattr->relax_domain_level < c->relax_domain_level) in update_domain_attr()
660 dattr->relax_domain_level = c->relax_domain_level; in update_domain_attr()
673 if (cpumask_empty(cp->cpus_allowed)) { in update_domain_attr_tree()
687 /* jump label reference count + the top-level cpuset */ in nr_cpusets()
695 * A 'partial partition' is a set of non-overlapping subsets whose
702 * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
713 * cp - cpuset pointer, used (together with pos_css) to perform a
714 * top-down scan of all cpusets. For our purposes, rebuilding
717 * csa - (for CpuSet Array) Array of pointers to all the cpusets
724 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
734 * and merging them using a union-find algorithm.
744 struct cpuset *cp; /* top-down scan of cpusets */ in generate_sched_domains()
804 if (!cpumask_empty(cp->cpus_allowed) && in generate_sched_domains()
806 cpumask_intersects(cp->cpus_allowed, in generate_sched_domains()
811 !cpumask_empty(cp->effective_cpus)) in generate_sched_domains()
821 * non-empty effective_cpus will be saved into csn[]. in generate_sched_domains()
823 if ((cp->partition_root_state == PRS_ROOT) && in generate_sched_domains()
824 !cpumask_empty(cp->effective_cpus)) in generate_sched_domains()
829 * exclusive CPUs to be granted to child cpusets. in generate_sched_domains()
831 if (!is_partition_valid(cp) && cpumask_empty(cp->exclusive_cpus)) in generate_sched_domains()
844 uf_node_init(&csa[i]->node); in generate_sched_domains()
855 uf_union(&csa[i]->node, &csa[j]->node); in generate_sched_domains()
862 if (uf_find(&csa[i]->node) == &csa[i]->node) in generate_sched_domains()
883 * to SD_ATTR_INIT. Also non-isolating partition root CPUs are a in generate_sched_domains()
893 cpumask_and(doms[i], csa[i]->effective_cpus, in generate_sched_domains()
896 cpumask_copy(doms[i], csa[i]->effective_cpus); in generate_sched_domains()
906 if (uf_find(&csa[j]->node) == &csa[i]->node) { in generate_sched_domains()
915 cpumask_or(dp, dp, csa[j]->effective_cpus); in generate_sched_domains()
946 if (cs->nr_deadline_tasks == 0) in dl_update_tasks_root_domain()
949 css_task_iter_start(&cs->css, 0, &it); in dl_update_tasks_root_domain()
979 if (cpumask_empty(cs->effective_cpus)) { in dl_rebuild_rd_accounting()
984 css_get(&cs->css); in dl_rebuild_rd_accounting()
991 css_put(&cs->css); in dl_rebuild_rd_accounting()
1008 * If the flag 'sched_load_balance' of any cpuset with non-empty
1010 * which has that flag enabled, or if any cpuset with a non-empty
1053 if (!cpumask_subset(cs->effective_cpus, in rebuild_sched_domains_locked()
1096 * cpuset_update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1112 css_task_iter_start(&cs->css, 0, &it); in cpuset_update_tasks_cpumask()
1124 cpumask_and(new_cpus, possible_mask, cs->effective_cpus); in cpuset_update_tasks_cpumask()
1132 * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1142 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); in compute_effective_cpumask()
1160 * Update partition exclusive flag
1166 bool exclusive = (new_prs > PRS_MEMBER); in update_partition_exclusive() local
1168 if (exclusive && !is_cpu_exclusive(cs)) { in update_partition_exclusive()
1171 } else if (!exclusive && is_cpu_exclusive(cs)) { in update_partition_exclusive()
1187 int new_prs = cs->partition_root_state; in update_partition_sd_lb()
1203 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in update_partition_sd_lb()
1205 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in update_partition_sd_lb()
1213 * tasks_nocpu_error - Return true if tasks will have no effective_cpus
1221 return (cpumask_subset(parent->effective_cpus, xcpus) && in tasks_nocpu_error()
1236 cs->nr_subparts = 0; in reset_partition_data()
1237 if (cpumask_empty(cs->exclusive_cpus)) { in reset_partition_data()
1238 cpumask_clear(cs->effective_xcpus); in reset_partition_data()
1240 clear_bit(CS_CPU_EXCLUSIVE, &cs->flags); in reset_partition_data()
1242 if (!cpumask_and(cs->effective_cpus, parent->effective_cpus, cs->cpus_allowed)) in reset_partition_data()
1243 cpumask_copy(cs->effective_cpus, parent->effective_cpus); in reset_partition_data()
1247 * partition_xcpus_newstate - Exclusive CPUs state change
1250 * @xcpus: exclusive CPUs with state change
1262 * partition_xcpus_add - Add new exclusive CPUs to partition
1265 * @xcpus: exclusive CPUs to be added
1284 isolcpus_updated = (new_prs != parent->partition_root_state); in partition_xcpus_add()
1286 partition_xcpus_newstate(parent->partition_root_state, new_prs, in partition_xcpus_add()
1289 cpumask_andnot(parent->effective_cpus, parent->effective_cpus, xcpus); in partition_xcpus_add()
1294 * partition_xcpus_del - Remove exclusive CPUs from partition
1297 * @xcpus: exclusive CPUs to be removed
1315 isolcpus_updated = (old_prs != parent->partition_root_state); in partition_xcpus_del()
1317 partition_xcpus_newstate(old_prs, parent->partition_root_state, in partition_xcpus_del()
1321 cpumask_or(parent->effective_cpus, parent->effective_cpus, xcpus); in partition_xcpus_del()
1339 * cpuset_cpu_is_isolated - Check if the given CPU is isolated
1350 * compute_effective_exclusive_cpumask - compute effective exclusive CPUs
1352 * @xcpus: effective exclusive CPUs value to be set
1364 xcpus = cs->effective_xcpus; in compute_effective_exclusive_cpumask()
1366 return cpumask_and(xcpus, user_xcpus(cs), parent->effective_xcpus); in compute_effective_exclusive_cpumask()
1371 return !list_empty(&cs->remote_sibling); in is_remote_partition()
1380 * remote_partition_enable - Enable current cpuset as a remote partition root
1408 compute_effective_exclusive_cpumask(cs, tmp->new_cpus); in remote_partition_enable()
1409 if (cpumask_empty(tmp->new_cpus) || in remote_partition_enable()
1410 cpumask_intersects(tmp->new_cpus, subpartitions_cpus) || in remote_partition_enable()
1411 cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus)) in remote_partition_enable()
1415 isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus); in remote_partition_enable()
1416 list_add(&cs->remote_sibling, &remote_children); in remote_partition_enable()
1419 cs->prs_err = 0; in remote_partition_enable()
1424 cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus); in remote_partition_enable()
1430 * remote_partition_disable - Remove current cpuset from remote partition list
1442 compute_effective_exclusive_cpumask(cs, tmp->new_cpus); in remote_partition_disable()
1444 WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, subpartitions_cpus)); in remote_partition_disable()
1447 list_del_init(&cs->remote_sibling); in remote_partition_disable()
1448 isolcpus_updated = partition_xcpus_del(cs->partition_root_state, in remote_partition_disable()
1449 NULL, tmp->new_cpus); in remote_partition_disable()
1450 if (cs->prs_err) in remote_partition_disable()
1451 cs->partition_root_state = -cs->partition_root_state; in remote_partition_disable()
1453 cs->partition_root_state = PRS_MEMBER; in remote_partition_disable()
1462 cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus); in remote_partition_disable()
1467 * remote_cpus_update - cpus_exclusive change of remote partition
1479 int prs = cs->partition_root_state; in remote_cpus_update()
1485 WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus)); in remote_cpus_update()
1488 cs->prs_err = PERR_CPUSEMPTY; in remote_cpus_update()
1492 adding = cpumask_andnot(tmp->addmask, newmask, cs->effective_xcpus); in remote_cpus_update()
1493 deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, newmask); in remote_cpus_update()
1502 cs->prs_err = PERR_ACCESS; in remote_cpus_update()
1503 else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) || in remote_cpus_update()
1504 cpumask_subset(top_cpuset.effective_cpus, tmp->addmask)) in remote_cpus_update()
1505 cs->prs_err = PERR_NOCPUS; in remote_cpus_update()
1506 if (cs->prs_err) in remote_cpus_update()
1512 isolcpus_updated += partition_xcpus_add(prs, NULL, tmp->addmask); in remote_cpus_update()
1514 isolcpus_updated += partition_xcpus_del(prs, NULL, tmp->delmask); in remote_cpus_update()
1521 cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus); in remote_cpus_update()
1530 * remote_partition_check - check if a child remote partition needs update
1546 * Compute the effective exclusive CPUs that will be deleted. in remote_partition_check()
1548 if (!cpumask_andnot(delmask, cs->effective_xcpus, newmask) || in remote_partition_check()
1550 return; /* No deletion of exclusive CPUs in partitions */ in remote_partition_check()
1554 * be impacted by the deletion of exclusive CPUs. in remote_partition_check()
1562 if (cpumask_intersects(child->effective_cpus, delmask)) { in remote_partition_check()
1571 * prstate_housekeeping_conflict - check for partition & housekeeping conflicts
1591 * update_parent_effective_cpumask - update effective_cpus mask of parent cpuset
1598 * For partcmd_enable*, the cpuset is being transformed from a non-partition
1605 * root back to a non-partition root. Any CPUs in effective_xcpus will be
1648 old_prs = new_prs = cs->partition_root_state; in update_parent_effective_cpumask()
1659 adding = cpumask_and(tmp->addmask, in update_parent_effective_cpumask()
1660 xcpus, parent->effective_xcpus); in update_parent_effective_cpumask()
1662 new_prs = -old_prs; in update_parent_effective_cpumask()
1663 subparts_delta--; in update_parent_effective_cpumask()
1689 !cpumask_intersects(xcpus, parent->effective_xcpus)) in update_parent_effective_cpumask()
1702 deleting = cpumask_and(tmp->delmask, xcpus, parent->effective_xcpus); in update_parent_effective_cpumask()
1712 cpumask_and(tmp->addmask, xcpus, parent->effective_xcpus); in update_parent_effective_cpumask()
1714 subparts_delta--; in update_parent_effective_cpumask()
1734 * & parent->effective_xcpus in update_parent_effective_cpumask()
1736 * & parent->effective_xcpus in update_parent_effective_cpumask()
1739 * delmask = newmask & parent->effective_xcpus in update_parent_effective_cpumask()
1743 deleting = cpumask_and(tmp->delmask, in update_parent_effective_cpumask()
1744 newmask, parent->effective_xcpus); in update_parent_effective_cpumask()
1746 cpumask_andnot(tmp->addmask, xcpus, newmask); in update_parent_effective_cpumask()
1747 adding = cpumask_and(tmp->addmask, tmp->addmask, in update_parent_effective_cpumask()
1748 parent->effective_xcpus); in update_parent_effective_cpumask()
1750 cpumask_andnot(tmp->delmask, newmask, xcpus); in update_parent_effective_cpumask()
1751 deleting = cpumask_and(tmp->delmask, tmp->delmask, in update_parent_effective_cpumask()
1752 parent->effective_xcpus); in update_parent_effective_cpumask()
1759 !cpumask_intersects(tmp->addmask, cpu_active_mask))) { in update_parent_effective_cpumask()
1762 adding = cpumask_and(tmp->addmask, in update_parent_effective_cpumask()
1763 xcpus, parent->effective_xcpus); in update_parent_effective_cpumask()
1769 * delmask = effective_xcpus & parent->effective_cpus in update_parent_effective_cpumask()
1785 adding = cpumask_and(tmp->addmask, in update_parent_effective_cpumask()
1786 xcpus, parent->effective_xcpus); in update_parent_effective_cpumask()
1788 cpumask_subset(xcpus, parent->effective_xcpus)) { in update_parent_effective_cpumask()
1791 bool exclusive = true; in update_parent_effective_cpumask() local
1802 exclusive = false; in update_parent_effective_cpumask()
1807 if (exclusive) in update_parent_effective_cpumask()
1808 deleting = cpumask_and(tmp->delmask, in update_parent_effective_cpumask()
1809 xcpus, parent->effective_cpus); in update_parent_effective_cpumask()
1817 WRITE_ONCE(cs->prs_err, part_error); in update_parent_effective_cpumask()
1824 switch (cs->partition_root_state) { in update_parent_effective_cpumask()
1828 new_prs = -old_prs; in update_parent_effective_cpumask()
1829 subparts_delta--; in update_parent_effective_cpumask()
1835 new_prs = -old_prs; in update_parent_effective_cpumask()
1867 cs->partition_root_state = new_prs; in update_parent_effective_cpumask()
1869 cs->nr_subparts = 0; in update_parent_effective_cpumask()
1877 tmp->addmask); in update_parent_effective_cpumask()
1880 tmp->delmask); in update_parent_effective_cpumask()
1883 parent->nr_subparts += subparts_delta; in update_parent_effective_cpumask()
1884 WARN_ON_ONCE(parent->nr_subparts < 0); in update_parent_effective_cpumask()
1893 cpuset_update_tasks_cpumask(parent, tmp->addmask); in update_parent_effective_cpumask()
1910 * compute_partition_effective_cpumask - compute effective_cpus for partition
1948 child->prs_err = 0; in compute_partition_effective_cpumask()
1949 if (!cpumask_subset(child->effective_xcpus, in compute_partition_effective_cpumask()
1950 cs->effective_xcpus)) in compute_partition_effective_cpumask()
1951 child->prs_err = PERR_INVCPUS; in compute_partition_effective_cpumask()
1953 cpumask_subset(new_ecpus, child->effective_xcpus)) in compute_partition_effective_cpumask()
1954 child->prs_err = PERR_NOCPUS; in compute_partition_effective_cpumask()
1956 if (child->prs_err) { in compute_partition_effective_cpumask()
1957 int old_prs = child->partition_root_state; in compute_partition_effective_cpumask()
1964 cs->nr_subparts--; in compute_partition_effective_cpumask()
1965 child->nr_subparts = 0; in compute_partition_effective_cpumask()
1971 child->effective_xcpus); in compute_partition_effective_cpumask()
1977 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
2016 if (!cpumask_empty(cp->exclusive_cpus) && (cp != cs)) { in update_cpumasks_hier()
2022 old_prs = new_prs = cp->partition_root_state; in update_cpumasks_hier()
2025 compute_partition_effective_cpumask(cp, tmp->new_cpus); in update_cpumasks_hier()
2027 compute_effective_cpumask(tmp->new_cpus, cp, parent); in update_cpumasks_hier()
2034 if (is_partition_valid(cp) && cpumask_empty(tmp->new_cpus)) { in update_cpumasks_hier()
2045 if (is_in_v2_mode() && !remote && cpumask_empty(tmp->new_cpus)) in update_cpumasks_hier()
2046 cpumask_copy(tmp->new_cpus, parent->effective_cpus); in update_cpumasks_hier()
2058 if (!cp->partition_root_state && !force && in update_cpumasks_hier()
2059 cpumask_equal(tmp->new_cpus, cp->effective_cpus) && in update_cpumasks_hier()
2074 switch (parent->partition_root_state) { in update_cpumasks_hier()
2087 new_prs = -cp->partition_root_state; in update_cpumasks_hier()
2088 WRITE_ONCE(cp->prs_err, in update_cpumasks_hier()
2095 if (!css_tryget_online(&cp->css)) in update_cpumasks_hier()
2105 new_prs = cp->partition_root_state; in update_cpumasks_hier()
2109 cpumask_copy(cp->effective_cpus, tmp->new_cpus); in update_cpumasks_hier()
2110 cp->partition_root_state = new_prs; in update_cpumasks_hier()
2115 if ((new_prs > 0) && cpumask_empty(cp->exclusive_cpus)) in update_cpumasks_hier()
2116 cpumask_and(cp->effective_xcpus, in update_cpumasks_hier()
2117 cp->cpus_allowed, parent->effective_xcpus); in update_cpumasks_hier()
2125 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); in update_cpumasks_hier()
2127 cpuset_update_tasks_cpumask(cp, cp->effective_cpus); in update_cpumasks_hier()
2137 set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags); in update_cpumasks_hier()
2139 clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags); in update_cpumasks_hier()
2143 * On legacy hierarchy, if the effective cpumask of any non- in update_cpumasks_hier()
2148 if (!cpumask_empty(cp->cpus_allowed) && in update_cpumasks_hier()
2154 css_put(&cp->css); in update_cpumasks_hier()
2163 * update_sibling_cpumasks - Update siblings cpumasks
2193 compute_effective_cpumask(tmp->new_cpus, sibling, in update_sibling_cpumasks()
2195 if (cpumask_equal(tmp->new_cpus, sibling->effective_cpus)) in update_sibling_cpumasks()
2198 if (!css_tryget_online(&sibling->css)) in update_sibling_cpumasks()
2204 css_put(&sibling->css); in update_sibling_cpumasks()
2210 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
2223 int old_prs = cs->partition_root_state; in update_cpumask()
2225 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */ in update_cpumask()
2227 return -EACCES; in update_cpumask()
2236 cpumask_clear(trialcs->cpus_allowed); in update_cpumask()
2237 if (cpumask_empty(trialcs->exclusive_cpus)) in update_cpumask()
2238 cpumask_clear(trialcs->effective_xcpus); in update_cpumask()
2240 retval = cpulist_parse(buf, trialcs->cpus_allowed); in update_cpumask()
2244 if (!cpumask_subset(trialcs->cpus_allowed, in update_cpumask()
2246 return -EINVAL; in update_cpumask()
2251 * trialcs->effective_xcpus is used as a temporary cpumask in update_cpumask()
2254 if (!cpumask_empty(trialcs->exclusive_cpus) || is_partition_valid(cs)) in update_cpumask()
2259 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) in update_cpumask()
2263 return -ENOMEM; in update_cpumask()
2267 cpumask_empty(trialcs->effective_xcpus)) { in update_cpumask()
2269 cs->prs_err = PERR_INVCPUS; in update_cpumask()
2270 } else if (prstate_housekeeping_conflict(old_prs, trialcs->effective_xcpus)) { in update_cpumask()
2272 cs->prs_err = PERR_HKEEPING; in update_cpumask()
2273 } else if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus)) { in update_cpumask()
2275 cs->prs_err = PERR_NOCPUS; in update_cpumask()
2283 force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus); in update_cpumask()
2287 if ((retval == -EINVAL) && cpuset_v2()) { in update_cpumask()
2292 * The -EINVAL error code indicates that partition sibling in update_cpumask()
2304 cpumask_intersects(xcpus, cp->effective_xcpus)) { in update_cpumask()
2319 struct cpumask *xcpus = trialcs->effective_xcpus; in update_cpumask()
2322 xcpus = trialcs->cpus_allowed; in update_cpumask()
2335 } else if (!cpumask_empty(cs->exclusive_cpus)) { in update_cpumask()
2337 * Use trialcs->effective_cpus as a temp cpumask in update_cpumask()
2339 remote_partition_check(cs, trialcs->effective_xcpus, in update_cpumask()
2340 trialcs->effective_cpus, &tmp); in update_cpumask()
2344 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); in update_cpumask()
2345 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus); in update_cpumask()
2354 if (cs->partition_root_state) in update_cpumask()
2362 * update_exclusive_cpumask - update the exclusive_cpus mask of a cpuset
2377 int old_prs = cs->partition_root_state; in update_exclusive_cpumask()
2380 cpumask_clear(trialcs->exclusive_cpus); in update_exclusive_cpumask()
2381 cpumask_clear(trialcs->effective_xcpus); in update_exclusive_cpumask()
2383 retval = cpulist_parse(buf, trialcs->exclusive_cpus); in update_exclusive_cpumask()
2389 if (cpumask_equal(cs->exclusive_cpus, trialcs->exclusive_cpus)) in update_exclusive_cpumask()
2399 force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus); in update_exclusive_cpumask()
2406 return -ENOMEM; in update_exclusive_cpumask()
2409 if (cpumask_empty(trialcs->effective_xcpus)) { in update_exclusive_cpumask()
2411 cs->prs_err = PERR_INVCPUS; in update_exclusive_cpumask()
2412 } else if (prstate_housekeeping_conflict(old_prs, trialcs->effective_xcpus)) { in update_exclusive_cpumask()
2414 cs->prs_err = PERR_HKEEPING; in update_exclusive_cpumask()
2415 } else if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus)) { in update_exclusive_cpumask()
2417 cs->prs_err = PERR_NOCPUS; in update_exclusive_cpumask()
2424 remote_cpus_update(cs, trialcs->effective_xcpus, in update_exclusive_cpumask()
2431 trialcs->effective_xcpus, &tmp); in update_exclusive_cpumask()
2433 } else if (!cpumask_empty(trialcs->exclusive_cpus)) { in update_exclusive_cpumask()
2435 * Use trialcs->effective_cpus as a temp cpumask in update_exclusive_cpumask()
2437 remote_partition_check(cs, trialcs->effective_xcpus, in update_exclusive_cpumask()
2438 trialcs->effective_cpus, &tmp); in update_exclusive_cpumask()
2441 cpumask_copy(cs->exclusive_cpus, trialcs->exclusive_cpus); in update_exclusive_cpumask()
2442 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus); in update_exclusive_cpumask()
2456 if (cs->partition_root_state) in update_exclusive_cpumask()
2484 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); in cpuset_migrate_mm_workfn()
2485 mmput(mwork->mm); in cpuset_migrate_mm_workfn()
2501 mwork->mm = mm; in cpuset_migrate_mm()
2502 mwork->from = *from; in cpuset_migrate_mm()
2503 mwork->to = *to; in cpuset_migrate_mm()
2504 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn); in cpuset_migrate_mm()
2505 queue_work(cpuset_migrate_mm_wq, &mwork->work); in cpuset_migrate_mm()
2517 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
2521 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
2532 write_seqcount_begin(&tsk->mems_allowed_seq); in cpuset_change_task_nodemask()
2534 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); in cpuset_change_task_nodemask()
2536 tsk->mems_allowed = *newmems; in cpuset_change_task_nodemask()
2538 write_seqcount_end(&tsk->mems_allowed_seq); in cpuset_change_task_nodemask()
2547 * cpuset_update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
2566 * take while holding tasklist_lock. Forks can happen - the in cpuset_update_tasks_nodemask()
2574 css_task_iter_start(&cs->css, 0, &it); in cpuset_update_tasks_nodemask()
2587 mpol_rebind_mm(mm, &cs->mems_allowed); in cpuset_update_tasks_nodemask()
2589 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); in cpuset_update_tasks_nodemask()
2597 * cs->old_mems_allowed. in cpuset_update_tasks_nodemask()
2599 cs->old_mems_allowed = newmems; in cpuset_update_tasks_nodemask()
2606 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
2626 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); in update_nodemasks_hier()
2633 *new_mems = parent->effective_mems; in update_nodemasks_hier()
2636 if (nodes_equal(*new_mems, cp->effective_mems)) { in update_nodemasks_hier()
2641 if (!css_tryget_online(&cp->css)) in update_nodemasks_hier()
2646 cp->effective_mems = *new_mems; in update_nodemasks_hier()
2650 !nodes_equal(cp->mems_allowed, cp->effective_mems)); in update_nodemasks_hier()
2655 css_put(&cp->css); in update_nodemasks_hier()
2670 * lock each such tasks mm->mmap_lock, scan its vma's and rebind
2680 * it's read-only in update_nodemask()
2683 retval = -EACCES; in update_nodemask()
2694 nodes_clear(trialcs->mems_allowed); in update_nodemask()
2696 retval = nodelist_parse(buf, trialcs->mems_allowed); in update_nodemask()
2700 if (!nodes_subset(trialcs->mems_allowed, in update_nodemask()
2702 retval = -EINVAL; in update_nodemask()
2707 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { in update_nodemask()
2708 retval = 0; /* Too easy - nothing to do */ in update_nodemask()
2715 check_insane_mems_config(&trialcs->mems_allowed); in update_nodemask()
2718 cs->mems_allowed = trialcs->mems_allowed; in update_nodemask()
2721 /* use trialcs->mems_allowed as a temp variable */ in update_nodemask()
2722 update_nodemasks_hier(cs, &trialcs->mems_allowed); in update_nodemask()
2739 * cpuset_update_flag - read a 0 or a 1 in a file and update associated flag
2757 return -ENOMEM; in cpuset_update_flag()
2760 set_bit(bit, &trialcs->flags); in cpuset_update_flag()
2762 clear_bit(bit, &trialcs->flags); in cpuset_update_flag()
2775 cs->flags = trialcs->flags; in cpuset_update_flag()
2778 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) { in cpuset_update_flag()
2793 * update_prstate - update partition_root_state
2802 int err = PERR_NONE, old_prs = cs->partition_root_state; in update_prstate()
2817 return -ENOMEM; in update_prstate()
2823 if ((new_prs > 0) && cpumask_empty(cs->exclusive_cpus)) { in update_prstate()
2825 cpumask_and(cs->effective_xcpus, in update_prstate()
2826 cs->cpus_allowed, parent->effective_xcpus); in update_prstate()
2882 new_prs = -new_prs; in update_prstate()
2887 cs->partition_root_state = new_prs; in update_prstate()
2888 WRITE_ONCE(cs->prs_err, err); in update_prstate()
2892 partition_xcpus_newstate(old_prs, new_prs, cs->effective_xcpus); in update_prstate()
2919 if (cpumask_empty(cs->effective_cpus) || in cpuset_can_attach_check()
2920 (!is_in_v2_mode() && nodes_empty(cs->mems_allowed))) in cpuset_can_attach_check()
2921 return -ENOSPC; in cpuset_can_attach_check()
2927 cs->nr_migrate_dl_tasks = 0; in reset_migrate_dl_data()
2928 cs->sum_migrate_dl_bw = 0; in reset_migrate_dl_data()
2952 cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus); in cpuset_can_attach()
2953 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems); in cpuset_can_attach()
2972 cs->nr_migrate_dl_tasks++; in cpuset_can_attach()
2973 cs->sum_migrate_dl_bw += task->dl.dl_bw; in cpuset_can_attach()
2977 if (!cs->nr_migrate_dl_tasks) in cpuset_can_attach()
2980 if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) { in cpuset_can_attach()
2981 int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus); in cpuset_can_attach()
2985 ret = -EINVAL; in cpuset_can_attach()
2989 ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw); in cpuset_can_attach()
3001 cs->attach_in_progress++; in cpuset_can_attach()
3018 if (cs->nr_migrate_dl_tasks) { in cpuset_cancel_attach()
3019 int cpu = cpumask_any(cs->effective_cpus); in cpuset_cancel_attach()
3021 dl_bw_free(cpu, cs->sum_migrate_dl_bw); in cpuset_cancel_attach()
3069 cpus_updated = !cpumask_equal(cs->effective_cpus, in cpuset_attach()
3070 oldcs->effective_cpus); in cpuset_attach()
3071 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems); in cpuset_attach()
3080 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
3095 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
3114 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, in cpuset_attach()
3122 cs->old_mems_allowed = cpuset_attach_nodemask_to; in cpuset_attach()
3124 if (cs->nr_migrate_dl_tasks) { in cpuset_attach()
3125 cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks; in cpuset_attach()
3126 oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks; in cpuset_attach()
3143 int retval = -ENODEV; in cpuset_write_resmask()
3153 retval = -ENOMEM; in cpuset_write_resmask()
3157 switch (of_cft(of)->private) { in cpuset_write_resmask()
3168 retval = -EINVAL; in cpuset_write_resmask()
3193 cpuset_filetype_t type = seq_cft(sf)->private; in cpuset_common_seq_show()
3200 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); in cpuset_common_seq_show()
3203 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); in cpuset_common_seq_show()
3206 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); in cpuset_common_seq_show()
3209 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); in cpuset_common_seq_show()
3212 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->exclusive_cpus)); in cpuset_common_seq_show()
3215 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_xcpus)); in cpuset_common_seq_show()
3224 ret = -EINVAL; in cpuset_common_seq_show()
3236 switch (cs->partition_root_state) { in sched_partition_show()
3252 err = perr_strings[READ_ONCE(cs->prs_err)]; in sched_partition_show()
3267 int retval = -ENODEV; in sched_partition_write()
3278 return -EINVAL; in sched_partition_write()
3280 css_get(&cs->css); in sched_partition_write()
3290 css_put(&cs->css); in sched_partition_write()
3339 .name = "cpus.exclusive",
3348 .name = "cpus.exclusive.effective",
3373 * cpuset_css_alloc - Allocate a cpuset css
3376 * Return: cpuset css on success, -ENOMEM on failure.
3378 * Allocate and initialize a new cpuset css, for non-NULL @parent_css, return
3391 return ERR_PTR(-ENOMEM); in cpuset_css_alloc()
3395 return ERR_PTR(-ENOMEM); in cpuset_css_alloc()
3398 __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in cpuset_css_alloc()
3399 fmeter_init(&cs->fmeter); in cpuset_css_alloc()
3400 cs->relax_domain_level = -1; in cpuset_css_alloc()
3401 INIT_LIST_HEAD(&cs->remote_sibling); in cpuset_css_alloc()
3405 __set_bit(CS_MEMORY_MIGRATE, &cs->flags); in cpuset_css_alloc()
3407 return &cs->css; in cpuset_css_alloc()
3423 set_bit(CS_ONLINE, &cs->flags); in cpuset_css_online()
3425 set_bit(CS_SPREAD_PAGE, &cs->flags); in cpuset_css_online()
3427 set_bit(CS_SPREAD_SLAB, &cs->flags); in cpuset_css_online()
3432 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in cpuset_css_online()
3438 cpumask_copy(cs->effective_cpus, parent->effective_cpus); in cpuset_css_online()
3439 cs->effective_mems = parent->effective_mems; in cpuset_css_online()
3443 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) in cpuset_css_online()
3449 * historical reasons - the flag may be specified during mount. in cpuset_css_online()
3451 * Currently, if any sibling cpusets have exclusive cpus or mem, we in cpuset_css_online()
3452 * refuse to clone the configuration - thereby refusing the task to in cpuset_css_online()
3456 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive in cpuset_css_online()
3469 cs->mems_allowed = parent->mems_allowed; in cpuset_css_online()
3470 cs->effective_mems = parent->mems_allowed; in cpuset_css_online()
3471 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); in cpuset_css_online()
3472 cpumask_copy(cs->effective_cpus, parent->cpus_allowed); in cpuset_css_online()
3502 clear_bit(CS_ONLINE, &cs->flags); in cpuset_css_offline()
3556 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]); in cpuset_can_fork()
3587 cs->attach_in_progress++; in cpuset_can_fork()
3595 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]); in cpuset_cancel_fork()
3627 set_cpus_allowed_ptr(task, current->cpus_ptr); in cpuset_fork()
3628 task->mems_allowed = current->mems_allowed; in cpuset_fork()
3664 * cpuset_init - initialize cpusets at system boot
3707 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); in hotplug_update_tasks()
3709 *new_mems = parent_cs(cs)->effective_mems; in hotplug_update_tasks()
3712 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks()
3713 cs->effective_mems = *new_mems; in hotplug_update_tasks()
3728 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3743 int partcmd = -1; in cpuset_hotplug_update_tasks()
3746 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); in cpuset_hotplug_update_tasks()
3754 if (cs->attach_in_progress) { in cpuset_hotplug_update_tasks()
3761 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); in cpuset_hotplug_update_tasks()
3763 if (!tmp || !cs->partition_root_state) in cpuset_hotplug_update_tasks()
3776 cs->prs_err = PERR_HOTPLUG; in cpuset_hotplug_update_tasks()
3809 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); in cpuset_hotplug_update_tasks()
3810 mems_updated = !nodes_equal(new_mems, cs->effective_mems); in cpuset_hotplug_update_tasks()
3829 * cpuset_handle_hotplug - handle CPU/memory hot{,un}plug for a cpuset
3837 * Non-root cpusets are only affected by offlining. If any CPUs or memory
3918 if (cs == &top_cpuset || !css_tryget_online(&cs->css)) in cpuset_handle_hotplug()
3925 css_put(&cs->css); in cpuset_handle_hotplug()
3960 * cpuset_init_smp - initialize cpus_allowed
3983 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
3984 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
3988 * attached to the specified @tsk. Guaranteed to return some non-empty
4026 * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
4030 * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
4031 * mode however, this value is the same as task_cs(tsk)->effective_cpus,
4046 cs_mask = task_cs(tsk)->cpus_allowed; in cpuset_cpus_allowed_fallback()
4054 * We own tsk->cpus_allowed, nobody can change it under us. in cpuset_cpus_allowed_fallback()
4056 * But we used cs && cs->cpus_allowed lockless and thus can in cpuset_cpus_allowed_fallback()
4058 * the wrong tsk->cpus_allowed. However, both cases imply the in cpuset_cpus_allowed_fallback()
4059 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr() in cpuset_cpus_allowed_fallback()
4063 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary in cpuset_cpus_allowed_fallback()
4075 nodes_setall(current->mems_allowed); in cpuset_init_current_mems_allowed()
4079 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
4080 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
4083 * attached to the specified @tsk. Guaranteed to return some non-empty
4103 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
4106 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
4110 return nodes_intersects(*nodemask, current->mems_allowed); in cpuset_nodemask_valid_mems_allowed()
4114 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
4127 * cpuset_node_allowed - Can we allocate on a memory node?
4160 * in_interrupt - any node ok (current task context irrelevant)
4161 * GFP_ATOMIC - any node ok
4162 * tsk_is_oom_victim - any node ok
4163 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
4164 * GFP_USER - only nodes in current tasks mems allowed ok.
4174 if (node_isset(node, current->mems_allowed)) in cpuset_node_allowed()
4185 if (current->flags & PF_EXITING) /* Let dying task have memory */ in cpuset_node_allowed()
4193 allowed = node_isset(node, cs->mems_allowed); in cpuset_node_allowed()
4201 * cpuset_spread_node() - On which node to begin search for a page
4217 * only set nodes in task->mems_allowed that are online. So it
4228 return *rotor = next_node_in(*rotor, current->mems_allowed); in cpuset_spread_node()
4232 * cpuset_mem_spread_node() - On which node to begin search for a file page
4236 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE) in cpuset_mem_spread_node()
4237 current->cpuset_mem_spread_rotor = in cpuset_mem_spread_node()
4238 node_random(&current->mems_allowed); in cpuset_mem_spread_node()
4240 return cpuset_spread_node(&current->cpuset_mem_spread_rotor); in cpuset_mem_spread_node()
4244 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
4257 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); in cpuset_mems_allowed_intersects()
4261 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
4272 cgrp = task_cs(current)->css.cgroup; in cpuset_print_current_mems_allowed()
4276 nodemask_pr_args(&current->mems_allowed)); in cpuset_print_current_mems_allowed()
4284 * - Print tasks cpuset path into seq_file.
4285 * - Used for /proc/<pid>/cpuset.
4286 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
4287 * doesn't really matter if tsk->cpuset changes after we read it,
4298 retval = -ENOMEM; in proc_cpuset_show()
4306 retval = cgroup_path_ns_locked(css->cgroup, buf, PATH_MAX, in proc_cpuset_show()
4307 current->nsproxy->cgroup_ns); in proc_cpuset_show()
4311 if (retval == -E2BIG) in proc_cpuset_show()
4312 retval = -ENAMETOOLONG; in proc_cpuset_show()
4329 nodemask_pr_args(&task->mems_allowed)); in cpuset_task_status_allowed()
4331 nodemask_pr_args(&task->mems_allowed)); in cpuset_task_status_allowed()