1 /* 2 * kernel/cpuset.c 3 * 4 * Processor and Memory placement constraints for sets of tasks. 5 * 6 * Copyright (C) 2003 BULL SA. 7 * Copyright (C) 2004-2007 Silicon Graphics, Inc. 8 * Copyright (C) 2006 Google, Inc 9 * 10 * Portions derived from Patrick Mochel's sysfs code. 11 * sysfs is Copyright (c) 2001-3 Patrick Mochel 12 * 13 * 2003-10-10 Written by Simon Derr. 14 * 2003-10-22 Updates by Stephen Hemminger. 15 * 2004 May-July Rework by Paul Jackson. 16 * 2006 Rework by Paul Menage to use generic cgroups 17 * 2008 Rework of the scheduler domains and CPU hotplug handling 18 * by Max Krasnyansky 19 * 20 * This file is subject to the terms and conditions of the GNU General Public 21 * License. See the file COPYING in the main directory of the Linux 22 * distribution for more details. 23 */ 24 #include "cgroup-internal.h" 25 26 #include <linux/cpu.h> 27 #include <linux/cpumask.h> 28 #include <linux/cpuset.h> 29 #include <linux/delay.h> 30 #include <linux/init.h> 31 #include <linux/interrupt.h> 32 #include <linux/kernel.h> 33 #include <linux/mempolicy.h> 34 #include <linux/mm.h> 35 #include <linux/memory.h> 36 #include <linux/export.h> 37 #include <linux/rcupdate.h> 38 #include <linux/sched.h> 39 #include <linux/sched/deadline.h> 40 #include <linux/sched/mm.h> 41 #include <linux/sched/task.h> 42 #include <linux/security.h> 43 #include <linux/spinlock.h> 44 #include <linux/oom.h> 45 #include <linux/sched/isolation.h> 46 #include <linux/cgroup.h> 47 #include <linux/wait.h> 48 #include <linux/workqueue.h> 49 50 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key); 51 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key); 52 53 /* 54 * There could be abnormal cpuset configurations for cpu or memory 55 * node binding, add this key to provide a quick low-cost judgment 56 * of the situation. 57 */ 58 DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key); 59 60 /* See "Frequency meter" comments, below. */ 61 62 struct fmeter { 63 int cnt; /* unprocessed events count */ 64 int val; /* most recent output value */ 65 time64_t time; /* clock (secs) when val computed */ 66 spinlock_t lock; /* guards read or write of above */ 67 }; 68 69 /* 70 * Invalid partition error code 71 */ 72 enum prs_errcode { 73 PERR_NONE = 0, 74 PERR_INVCPUS, 75 PERR_INVPARENT, 76 PERR_NOTPART, 77 PERR_NOTEXCL, 78 PERR_NOCPUS, 79 PERR_HOTPLUG, 80 PERR_CPUSEMPTY, 81 PERR_HKEEPING, 82 }; 83 84 static const char * const perr_strings[] = { 85 [PERR_INVCPUS] = "Invalid cpu list in cpuset.cpus.exclusive", 86 [PERR_INVPARENT] = "Parent is an invalid partition root", 87 [PERR_NOTPART] = "Parent is not a partition root", 88 [PERR_NOTEXCL] = "Cpu list in cpuset.cpus not exclusive", 89 [PERR_NOCPUS] = "Parent unable to distribute cpu downstream", 90 [PERR_HOTPLUG] = "No cpu available due to hotplug", 91 [PERR_CPUSEMPTY] = "cpuset.cpus and cpuset.cpus.exclusive are empty", 92 [PERR_HKEEPING] = "partition config conflicts with housekeeping setup", 93 }; 94 95 struct cpuset { 96 struct cgroup_subsys_state css; 97 98 unsigned long flags; /* "unsigned long" so bitops work */ 99 100 /* 101 * On default hierarchy: 102 * 103 * The user-configured masks can only be changed by writing to 104 * cpuset.cpus and cpuset.mems, and won't be limited by the 105 * parent masks. 106 * 107 * The effective masks is the real masks that apply to the tasks 108 * in the cpuset. They may be changed if the configured masks are 109 * changed or hotplug happens. 110 * 111 * effective_mask == configured_mask & parent's effective_mask, 112 * and if it ends up empty, it will inherit the parent's mask. 113 * 114 * 115 * On legacy hierarchy: 116 * 117 * The user-configured masks are always the same with effective masks. 118 */ 119 120 /* user-configured CPUs and Memory Nodes allow to tasks */ 121 cpumask_var_t cpus_allowed; 122 nodemask_t mems_allowed; 123 124 /* effective CPUs and Memory Nodes allow to tasks */ 125 cpumask_var_t effective_cpus; 126 nodemask_t effective_mems; 127 128 /* 129 * Exclusive CPUs dedicated to current cgroup (default hierarchy only) 130 * 131 * The effective_cpus of a valid partition root comes solely from its 132 * effective_xcpus and some of the effective_xcpus may be distributed 133 * to sub-partitions below & hence excluded from its effective_cpus. 134 * For a valid partition root, its effective_cpus have no relationship 135 * with cpus_allowed unless its exclusive_cpus isn't set. 136 * 137 * This value will only be set if either exclusive_cpus is set or 138 * when this cpuset becomes a local partition root. 139 */ 140 cpumask_var_t effective_xcpus; 141 142 /* 143 * Exclusive CPUs as requested by the user (default hierarchy only) 144 * 145 * Its value is independent of cpus_allowed and designates the set of 146 * CPUs that can be granted to the current cpuset or its children when 147 * it becomes a valid partition root. The effective set of exclusive 148 * CPUs granted (effective_xcpus) depends on whether those exclusive 149 * CPUs are passed down by its ancestors and not yet taken up by 150 * another sibling partition root along the way. 151 * 152 * If its value isn't set, it defaults to cpus_allowed. 153 */ 154 cpumask_var_t exclusive_cpus; 155 156 /* 157 * This is old Memory Nodes tasks took on. 158 * 159 * - top_cpuset.old_mems_allowed is initialized to mems_allowed. 160 * - A new cpuset's old_mems_allowed is initialized when some 161 * task is moved into it. 162 * - old_mems_allowed is used in cpuset_migrate_mm() when we change 163 * cpuset.mems_allowed and have tasks' nodemask updated, and 164 * then old_mems_allowed is updated to mems_allowed. 165 */ 166 nodemask_t old_mems_allowed; 167 168 struct fmeter fmeter; /* memory_pressure filter */ 169 170 /* 171 * Tasks are being attached to this cpuset. Used to prevent 172 * zeroing cpus/mems_allowed between ->can_attach() and ->attach(). 173 */ 174 int attach_in_progress; 175 176 /* partition number for rebuild_sched_domains() */ 177 int pn; 178 179 /* for custom sched domain */ 180 int relax_domain_level; 181 182 /* number of valid local child partitions */ 183 int nr_subparts; 184 185 /* partition root state */ 186 int partition_root_state; 187 188 /* 189 * Default hierarchy only: 190 * use_parent_ecpus - set if using parent's effective_cpus 191 * child_ecpus_count - # of children with use_parent_ecpus set 192 */ 193 int use_parent_ecpus; 194 int child_ecpus_count; 195 196 /* 197 * number of SCHED_DEADLINE tasks attached to this cpuset, so that we 198 * know when to rebuild associated root domain bandwidth information. 199 */ 200 int nr_deadline_tasks; 201 int nr_migrate_dl_tasks; 202 u64 sum_migrate_dl_bw; 203 204 /* Invalid partition error code, not lock protected */ 205 enum prs_errcode prs_err; 206 207 /* Handle for cpuset.cpus.partition */ 208 struct cgroup_file partition_file; 209 210 /* Remote partition silbling list anchored at remote_children */ 211 struct list_head remote_sibling; 212 }; 213 214 /* 215 * Legacy hierarchy call to cgroup_transfer_tasks() is handled asynchrously 216 */ 217 struct cpuset_remove_tasks_struct { 218 struct work_struct work; 219 struct cpuset *cs; 220 }; 221 222 /* 223 * Exclusive CPUs distributed out to sub-partitions of top_cpuset 224 */ 225 static cpumask_var_t subpartitions_cpus; 226 227 /* 228 * Exclusive CPUs in isolated partitions 229 */ 230 static cpumask_var_t isolated_cpus; 231 232 /* List of remote partition root children */ 233 static struct list_head remote_children; 234 235 /* 236 * A flag to force sched domain rebuild at the end of an operation while 237 * inhibiting it in the intermediate stages when set. Currently it is only 238 * set in hotplug code. 239 */ 240 static bool force_sd_rebuild; 241 242 /* 243 * Partition root states: 244 * 245 * 0 - member (not a partition root) 246 * 1 - partition root 247 * 2 - partition root without load balancing (isolated) 248 * -1 - invalid partition root 249 * -2 - invalid isolated partition root 250 * 251 * There are 2 types of partitions - local or remote. Local partitions are 252 * those whose parents are partition root themselves. Setting of 253 * cpuset.cpus.exclusive are optional in setting up local partitions. 254 * Remote partitions are those whose parents are not partition roots. Passing 255 * down exclusive CPUs by setting cpuset.cpus.exclusive along its ancestor 256 * nodes are mandatory in creating a remote partition. 257 * 258 * For simplicity, a local partition can be created under a local or remote 259 * partition but a remote partition cannot have any partition root in its 260 * ancestor chain except the cgroup root. 261 */ 262 #define PRS_MEMBER 0 263 #define PRS_ROOT 1 264 #define PRS_ISOLATED 2 265 #define PRS_INVALID_ROOT -1 266 #define PRS_INVALID_ISOLATED -2 267 268 static inline bool is_prs_invalid(int prs_state) 269 { 270 return prs_state < 0; 271 } 272 273 /* 274 * Temporary cpumasks for working with partitions that are passed among 275 * functions to avoid memory allocation in inner functions. 276 */ 277 struct tmpmasks { 278 cpumask_var_t addmask, delmask; /* For partition root */ 279 cpumask_var_t new_cpus; /* For update_cpumasks_hier() */ 280 }; 281 282 static inline struct cpuset *css_cs(struct cgroup_subsys_state *css) 283 { 284 return css ? container_of(css, struct cpuset, css) : NULL; 285 } 286 287 /* Retrieve the cpuset for a task */ 288 static inline struct cpuset *task_cs(struct task_struct *task) 289 { 290 return css_cs(task_css(task, cpuset_cgrp_id)); 291 } 292 293 static inline struct cpuset *parent_cs(struct cpuset *cs) 294 { 295 return css_cs(cs->css.parent); 296 } 297 298 void inc_dl_tasks_cs(struct task_struct *p) 299 { 300 struct cpuset *cs = task_cs(p); 301 302 cs->nr_deadline_tasks++; 303 } 304 305 void dec_dl_tasks_cs(struct task_struct *p) 306 { 307 struct cpuset *cs = task_cs(p); 308 309 cs->nr_deadline_tasks--; 310 } 311 312 /* bits in struct cpuset flags field */ 313 typedef enum { 314 CS_ONLINE, 315 CS_CPU_EXCLUSIVE, 316 CS_MEM_EXCLUSIVE, 317 CS_MEM_HARDWALL, 318 CS_MEMORY_MIGRATE, 319 CS_SCHED_LOAD_BALANCE, 320 CS_SPREAD_PAGE, 321 CS_SPREAD_SLAB, 322 } cpuset_flagbits_t; 323 324 /* convenient tests for these bits */ 325 static inline bool is_cpuset_online(struct cpuset *cs) 326 { 327 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); 328 } 329 330 static inline int is_cpu_exclusive(const struct cpuset *cs) 331 { 332 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); 333 } 334 335 static inline int is_mem_exclusive(const struct cpuset *cs) 336 { 337 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); 338 } 339 340 static inline int is_mem_hardwall(const struct cpuset *cs) 341 { 342 return test_bit(CS_MEM_HARDWALL, &cs->flags); 343 } 344 345 static inline int is_sched_load_balance(const struct cpuset *cs) 346 { 347 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 348 } 349 350 static inline int is_memory_migrate(const struct cpuset *cs) 351 { 352 return test_bit(CS_MEMORY_MIGRATE, &cs->flags); 353 } 354 355 static inline int is_spread_page(const struct cpuset *cs) 356 { 357 return test_bit(CS_SPREAD_PAGE, &cs->flags); 358 } 359 360 static inline int is_spread_slab(const struct cpuset *cs) 361 { 362 return test_bit(CS_SPREAD_SLAB, &cs->flags); 363 } 364 365 static inline int is_partition_valid(const struct cpuset *cs) 366 { 367 return cs->partition_root_state > 0; 368 } 369 370 static inline int is_partition_invalid(const struct cpuset *cs) 371 { 372 return cs->partition_root_state < 0; 373 } 374 375 /* 376 * Callers should hold callback_lock to modify partition_root_state. 377 */ 378 static inline void make_partition_invalid(struct cpuset *cs) 379 { 380 if (cs->partition_root_state > 0) 381 cs->partition_root_state = -cs->partition_root_state; 382 } 383 384 /* 385 * Send notification event of whenever partition_root_state changes. 386 */ 387 static inline void notify_partition_change(struct cpuset *cs, int old_prs) 388 { 389 if (old_prs == cs->partition_root_state) 390 return; 391 cgroup_file_notify(&cs->partition_file); 392 393 /* Reset prs_err if not invalid */ 394 if (is_partition_valid(cs)) 395 WRITE_ONCE(cs->prs_err, PERR_NONE); 396 } 397 398 static struct cpuset top_cpuset = { 399 .flags = BIT(CS_ONLINE) | BIT(CS_CPU_EXCLUSIVE) | 400 BIT(CS_MEM_EXCLUSIVE) | BIT(CS_SCHED_LOAD_BALANCE), 401 .partition_root_state = PRS_ROOT, 402 .relax_domain_level = -1, 403 .remote_sibling = LIST_HEAD_INIT(top_cpuset.remote_sibling), 404 }; 405 406 /** 407 * cpuset_for_each_child - traverse online children of a cpuset 408 * @child_cs: loop cursor pointing to the current child 409 * @pos_css: used for iteration 410 * @parent_cs: target cpuset to walk children of 411 * 412 * Walk @child_cs through the online children of @parent_cs. Must be used 413 * with RCU read locked. 414 */ 415 #define cpuset_for_each_child(child_cs, pos_css, parent_cs) \ 416 css_for_each_child((pos_css), &(parent_cs)->css) \ 417 if (is_cpuset_online(((child_cs) = css_cs((pos_css))))) 418 419 /** 420 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants 421 * @des_cs: loop cursor pointing to the current descendant 422 * @pos_css: used for iteration 423 * @root_cs: target cpuset to walk ancestor of 424 * 425 * Walk @des_cs through the online descendants of @root_cs. Must be used 426 * with RCU read locked. The caller may modify @pos_css by calling 427 * css_rightmost_descendant() to skip subtree. @root_cs is included in the 428 * iteration and the first node to be visited. 429 */ 430 #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \ 431 css_for_each_descendant_pre((pos_css), &(root_cs)->css) \ 432 if (is_cpuset_online(((des_cs) = css_cs((pos_css))))) 433 434 /* 435 * There are two global locks guarding cpuset structures - cpuset_mutex and 436 * callback_lock. We also require taking task_lock() when dereferencing a 437 * task's cpuset pointer. See "The task_lock() exception", at the end of this 438 * comment. The cpuset code uses only cpuset_mutex. Other kernel subsystems 439 * can use cpuset_lock()/cpuset_unlock() to prevent change to cpuset 440 * structures. Note that cpuset_mutex needs to be a mutex as it is used in 441 * paths that rely on priority inheritance (e.g. scheduler - on RT) for 442 * correctness. 443 * 444 * A task must hold both locks to modify cpusets. If a task holds 445 * cpuset_mutex, it blocks others, ensuring that it is the only task able to 446 * also acquire callback_lock and be able to modify cpusets. It can perform 447 * various checks on the cpuset structure first, knowing nothing will change. 448 * It can also allocate memory while just holding cpuset_mutex. While it is 449 * performing these checks, various callback routines can briefly acquire 450 * callback_lock to query cpusets. Once it is ready to make the changes, it 451 * takes callback_lock, blocking everyone else. 452 * 453 * Calls to the kernel memory allocator can not be made while holding 454 * callback_lock, as that would risk double tripping on callback_lock 455 * from one of the callbacks into the cpuset code from within 456 * __alloc_pages(). 457 * 458 * If a task is only holding callback_lock, then it has read-only 459 * access to cpusets. 460 * 461 * Now, the task_struct fields mems_allowed and mempolicy may be changed 462 * by other task, we use alloc_lock in the task_struct fields to protect 463 * them. 464 * 465 * The cpuset_common_seq_show() handlers only hold callback_lock across 466 * small pieces of code, such as when reading out possibly multi-word 467 * cpumasks and nodemasks. 468 * 469 * Accessing a task's cpuset should be done in accordance with the 470 * guidelines for accessing subsystem state in kernel/cgroup.c 471 */ 472 473 static DEFINE_MUTEX(cpuset_mutex); 474 475 void cpuset_lock(void) 476 { 477 mutex_lock(&cpuset_mutex); 478 } 479 480 void cpuset_unlock(void) 481 { 482 mutex_unlock(&cpuset_mutex); 483 } 484 485 static DEFINE_SPINLOCK(callback_lock); 486 487 static struct workqueue_struct *cpuset_migrate_mm_wq; 488 489 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq); 490 491 static inline void check_insane_mems_config(nodemask_t *nodes) 492 { 493 if (!cpusets_insane_config() && 494 movable_only_nodes(nodes)) { 495 static_branch_enable(&cpusets_insane_config_key); 496 pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n" 497 "Cpuset allocations might fail even with a lot of memory available.\n", 498 nodemask_pr_args(nodes)); 499 } 500 } 501 502 /* 503 * Cgroup v2 behavior is used on the "cpus" and "mems" control files when 504 * on default hierarchy or when the cpuset_v2_mode flag is set by mounting 505 * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option. 506 * With v2 behavior, "cpus" and "mems" are always what the users have 507 * requested and won't be changed by hotplug events. Only the effective 508 * cpus or mems will be affected. 509 */ 510 static inline bool is_in_v2_mode(void) 511 { 512 return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || 513 (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE); 514 } 515 516 /** 517 * partition_is_populated - check if partition has tasks 518 * @cs: partition root to be checked 519 * @excluded_child: a child cpuset to be excluded in task checking 520 * Return: true if there are tasks, false otherwise 521 * 522 * It is assumed that @cs is a valid partition root. @excluded_child should 523 * be non-NULL when this cpuset is going to become a partition itself. 524 */ 525 static inline bool partition_is_populated(struct cpuset *cs, 526 struct cpuset *excluded_child) 527 { 528 struct cgroup_subsys_state *css; 529 struct cpuset *child; 530 531 if (cs->css.cgroup->nr_populated_csets) 532 return true; 533 if (!excluded_child && !cs->nr_subparts) 534 return cgroup_is_populated(cs->css.cgroup); 535 536 rcu_read_lock(); 537 cpuset_for_each_child(child, css, cs) { 538 if (child == excluded_child) 539 continue; 540 if (is_partition_valid(child)) 541 continue; 542 if (cgroup_is_populated(child->css.cgroup)) { 543 rcu_read_unlock(); 544 return true; 545 } 546 } 547 rcu_read_unlock(); 548 return false; 549 } 550 551 /* 552 * Return in pmask the portion of a task's cpusets's cpus_allowed that 553 * are online and are capable of running the task. If none are found, 554 * walk up the cpuset hierarchy until we find one that does have some 555 * appropriate cpus. 556 * 557 * One way or another, we guarantee to return some non-empty subset 558 * of cpu_online_mask. 559 * 560 * Call with callback_lock or cpuset_mutex held. 561 */ 562 static void guarantee_online_cpus(struct task_struct *tsk, 563 struct cpumask *pmask) 564 { 565 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk); 566 struct cpuset *cs; 567 568 if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_online_mask))) 569 cpumask_copy(pmask, cpu_online_mask); 570 571 rcu_read_lock(); 572 cs = task_cs(tsk); 573 574 while (!cpumask_intersects(cs->effective_cpus, pmask)) 575 cs = parent_cs(cs); 576 577 cpumask_and(pmask, pmask, cs->effective_cpus); 578 rcu_read_unlock(); 579 } 580 581 /* 582 * Return in *pmask the portion of a cpusets's mems_allowed that 583 * are online, with memory. If none are online with memory, walk 584 * up the cpuset hierarchy until we find one that does have some 585 * online mems. The top cpuset always has some mems online. 586 * 587 * One way or another, we guarantee to return some non-empty subset 588 * of node_states[N_MEMORY]. 589 * 590 * Call with callback_lock or cpuset_mutex held. 591 */ 592 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) 593 { 594 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) 595 cs = parent_cs(cs); 596 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); 597 } 598 599 /* 600 * update task's spread flag if cpuset's page/slab spread flag is set 601 * 602 * Call with callback_lock or cpuset_mutex held. The check can be skipped 603 * if on default hierarchy. 604 */ 605 static void cpuset_update_task_spread_flags(struct cpuset *cs, 606 struct task_struct *tsk) 607 { 608 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) 609 return; 610 611 if (is_spread_page(cs)) 612 task_set_spread_page(tsk); 613 else 614 task_clear_spread_page(tsk); 615 616 if (is_spread_slab(cs)) 617 task_set_spread_slab(tsk); 618 else 619 task_clear_spread_slab(tsk); 620 } 621 622 /* 623 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q? 624 * 625 * One cpuset is a subset of another if all its allowed CPUs and 626 * Memory Nodes are a subset of the other, and its exclusive flags 627 * are only set if the other's are set. Call holding cpuset_mutex. 628 */ 629 630 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) 631 { 632 return cpumask_subset(p->cpus_allowed, q->cpus_allowed) && 633 nodes_subset(p->mems_allowed, q->mems_allowed) && 634 is_cpu_exclusive(p) <= is_cpu_exclusive(q) && 635 is_mem_exclusive(p) <= is_mem_exclusive(q); 636 } 637 638 /** 639 * alloc_cpumasks - allocate three cpumasks for cpuset 640 * @cs: the cpuset that have cpumasks to be allocated. 641 * @tmp: the tmpmasks structure pointer 642 * Return: 0 if successful, -ENOMEM otherwise. 643 * 644 * Only one of the two input arguments should be non-NULL. 645 */ 646 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) 647 { 648 cpumask_var_t *pmask1, *pmask2, *pmask3, *pmask4; 649 650 if (cs) { 651 pmask1 = &cs->cpus_allowed; 652 pmask2 = &cs->effective_cpus; 653 pmask3 = &cs->effective_xcpus; 654 pmask4 = &cs->exclusive_cpus; 655 } else { 656 pmask1 = &tmp->new_cpus; 657 pmask2 = &tmp->addmask; 658 pmask3 = &tmp->delmask; 659 pmask4 = NULL; 660 } 661 662 if (!zalloc_cpumask_var(pmask1, GFP_KERNEL)) 663 return -ENOMEM; 664 665 if (!zalloc_cpumask_var(pmask2, GFP_KERNEL)) 666 goto free_one; 667 668 if (!zalloc_cpumask_var(pmask3, GFP_KERNEL)) 669 goto free_two; 670 671 if (pmask4 && !zalloc_cpumask_var(pmask4, GFP_KERNEL)) 672 goto free_three; 673 674 675 return 0; 676 677 free_three: 678 free_cpumask_var(*pmask3); 679 free_two: 680 free_cpumask_var(*pmask2); 681 free_one: 682 free_cpumask_var(*pmask1); 683 return -ENOMEM; 684 } 685 686 /** 687 * free_cpumasks - free cpumasks in a tmpmasks structure 688 * @cs: the cpuset that have cpumasks to be free. 689 * @tmp: the tmpmasks structure pointer 690 */ 691 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) 692 { 693 if (cs) { 694 free_cpumask_var(cs->cpus_allowed); 695 free_cpumask_var(cs->effective_cpus); 696 free_cpumask_var(cs->effective_xcpus); 697 free_cpumask_var(cs->exclusive_cpus); 698 } 699 if (tmp) { 700 free_cpumask_var(tmp->new_cpus); 701 free_cpumask_var(tmp->addmask); 702 free_cpumask_var(tmp->delmask); 703 } 704 } 705 706 /** 707 * alloc_trial_cpuset - allocate a trial cpuset 708 * @cs: the cpuset that the trial cpuset duplicates 709 */ 710 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) 711 { 712 struct cpuset *trial; 713 714 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); 715 if (!trial) 716 return NULL; 717 718 if (alloc_cpumasks(trial, NULL)) { 719 kfree(trial); 720 return NULL; 721 } 722 723 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); 724 cpumask_copy(trial->effective_cpus, cs->effective_cpus); 725 cpumask_copy(trial->effective_xcpus, cs->effective_xcpus); 726 cpumask_copy(trial->exclusive_cpus, cs->exclusive_cpus); 727 return trial; 728 } 729 730 /** 731 * free_cpuset - free the cpuset 732 * @cs: the cpuset to be freed 733 */ 734 static inline void free_cpuset(struct cpuset *cs) 735 { 736 free_cpumasks(cs, NULL); 737 kfree(cs); 738 } 739 740 /* Return user specified exclusive CPUs */ 741 static inline struct cpumask *user_xcpus(struct cpuset *cs) 742 { 743 return cpumask_empty(cs->exclusive_cpus) ? cs->cpus_allowed 744 : cs->exclusive_cpus; 745 } 746 747 static inline bool xcpus_empty(struct cpuset *cs) 748 { 749 return cpumask_empty(cs->cpus_allowed) && 750 cpumask_empty(cs->exclusive_cpus); 751 } 752 753 static inline struct cpumask *fetch_xcpus(struct cpuset *cs) 754 { 755 return !cpumask_empty(cs->exclusive_cpus) ? cs->exclusive_cpus : 756 cpumask_empty(cs->effective_xcpus) ? cs->cpus_allowed 757 : cs->effective_xcpus; 758 } 759 760 /* 761 * cpusets_are_exclusive() - check if two cpusets are exclusive 762 * 763 * Return true if exclusive, false if not 764 */ 765 static inline bool cpusets_are_exclusive(struct cpuset *cs1, struct cpuset *cs2) 766 { 767 struct cpumask *xcpus1 = fetch_xcpus(cs1); 768 struct cpumask *xcpus2 = fetch_xcpus(cs2); 769 770 if (cpumask_intersects(xcpus1, xcpus2)) 771 return false; 772 return true; 773 } 774 775 /* 776 * validate_change_legacy() - Validate conditions specific to legacy (v1) 777 * behavior. 778 */ 779 static int validate_change_legacy(struct cpuset *cur, struct cpuset *trial) 780 { 781 struct cgroup_subsys_state *css; 782 struct cpuset *c, *par; 783 int ret; 784 785 WARN_ON_ONCE(!rcu_read_lock_held()); 786 787 /* Each of our child cpusets must be a subset of us */ 788 ret = -EBUSY; 789 cpuset_for_each_child(c, css, cur) 790 if (!is_cpuset_subset(c, trial)) 791 goto out; 792 793 /* On legacy hierarchy, we must be a subset of our parent cpuset. */ 794 ret = -EACCES; 795 par = parent_cs(cur); 796 if (par && !is_cpuset_subset(trial, par)) 797 goto out; 798 799 ret = 0; 800 out: 801 return ret; 802 } 803 804 /* 805 * validate_change() - Used to validate that any proposed cpuset change 806 * follows the structural rules for cpusets. 807 * 808 * If we replaced the flag and mask values of the current cpuset 809 * (cur) with those values in the trial cpuset (trial), would 810 * our various subset and exclusive rules still be valid? Presumes 811 * cpuset_mutex held. 812 * 813 * 'cur' is the address of an actual, in-use cpuset. Operations 814 * such as list traversal that depend on the actual address of the 815 * cpuset in the list must use cur below, not trial. 816 * 817 * 'trial' is the address of bulk structure copy of cur, with 818 * perhaps one or more of the fields cpus_allowed, mems_allowed, 819 * or flags changed to new, trial values. 820 * 821 * Return 0 if valid, -errno if not. 822 */ 823 824 static int validate_change(struct cpuset *cur, struct cpuset *trial) 825 { 826 struct cgroup_subsys_state *css; 827 struct cpuset *c, *par; 828 int ret = 0; 829 830 rcu_read_lock(); 831 832 if (!is_in_v2_mode()) 833 ret = validate_change_legacy(cur, trial); 834 if (ret) 835 goto out; 836 837 /* Remaining checks don't apply to root cpuset */ 838 if (cur == &top_cpuset) 839 goto out; 840 841 par = parent_cs(cur); 842 843 /* 844 * Cpusets with tasks - existing or newly being attached - can't 845 * be changed to have empty cpus_allowed or mems_allowed. 846 */ 847 ret = -ENOSPC; 848 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) { 849 if (!cpumask_empty(cur->cpus_allowed) && 850 cpumask_empty(trial->cpus_allowed)) 851 goto out; 852 if (!nodes_empty(cur->mems_allowed) && 853 nodes_empty(trial->mems_allowed)) 854 goto out; 855 } 856 857 /* 858 * We can't shrink if we won't have enough room for SCHED_DEADLINE 859 * tasks. 860 */ 861 ret = -EBUSY; 862 if (is_cpu_exclusive(cur) && 863 !cpuset_cpumask_can_shrink(cur->cpus_allowed, 864 trial->cpus_allowed)) 865 goto out; 866 867 /* 868 * If either I or some sibling (!= me) is exclusive, we can't 869 * overlap. exclusive_cpus cannot overlap with each other if set. 870 */ 871 ret = -EINVAL; 872 cpuset_for_each_child(c, css, par) { 873 bool txset, cxset; /* Are exclusive_cpus set? */ 874 875 if (c == cur) 876 continue; 877 878 txset = !cpumask_empty(trial->exclusive_cpus); 879 cxset = !cpumask_empty(c->exclusive_cpus); 880 if (is_cpu_exclusive(trial) || is_cpu_exclusive(c) || 881 (txset && cxset)) { 882 if (!cpusets_are_exclusive(trial, c)) 883 goto out; 884 } else if (txset || cxset) { 885 struct cpumask *xcpus, *acpus; 886 887 /* 888 * When just one of the exclusive_cpus's is set, 889 * cpus_allowed of the other cpuset, if set, cannot be 890 * a subset of it or none of those CPUs will be 891 * available if these exclusive CPUs are activated. 892 */ 893 if (txset) { 894 xcpus = trial->exclusive_cpus; 895 acpus = c->cpus_allowed; 896 } else { 897 xcpus = c->exclusive_cpus; 898 acpus = trial->cpus_allowed; 899 } 900 if (!cpumask_empty(acpus) && cpumask_subset(acpus, xcpus)) 901 goto out; 902 } 903 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && 904 nodes_intersects(trial->mems_allowed, c->mems_allowed)) 905 goto out; 906 } 907 908 ret = 0; 909 out: 910 rcu_read_unlock(); 911 return ret; 912 } 913 914 #ifdef CONFIG_SMP 915 /* 916 * Helper routine for generate_sched_domains(). 917 * Do cpusets a, b have overlapping effective cpus_allowed masks? 918 */ 919 static int cpusets_overlap(struct cpuset *a, struct cpuset *b) 920 { 921 return cpumask_intersects(a->effective_cpus, b->effective_cpus); 922 } 923 924 static void 925 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) 926 { 927 if (dattr->relax_domain_level < c->relax_domain_level) 928 dattr->relax_domain_level = c->relax_domain_level; 929 return; 930 } 931 932 static void update_domain_attr_tree(struct sched_domain_attr *dattr, 933 struct cpuset *root_cs) 934 { 935 struct cpuset *cp; 936 struct cgroup_subsys_state *pos_css; 937 938 rcu_read_lock(); 939 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { 940 /* skip the whole subtree if @cp doesn't have any CPU */ 941 if (cpumask_empty(cp->cpus_allowed)) { 942 pos_css = css_rightmost_descendant(pos_css); 943 continue; 944 } 945 946 if (is_sched_load_balance(cp)) 947 update_domain_attr(dattr, cp); 948 } 949 rcu_read_unlock(); 950 } 951 952 /* Must be called with cpuset_mutex held. */ 953 static inline int nr_cpusets(void) 954 { 955 /* jump label reference count + the top-level cpuset */ 956 return static_key_count(&cpusets_enabled_key.key) + 1; 957 } 958 959 /* 960 * generate_sched_domains() 961 * 962 * This function builds a partial partition of the systems CPUs 963 * A 'partial partition' is a set of non-overlapping subsets whose 964 * union is a subset of that set. 965 * The output of this function needs to be passed to kernel/sched/core.c 966 * partition_sched_domains() routine, which will rebuild the scheduler's 967 * load balancing domains (sched domains) as specified by that partial 968 * partition. 969 * 970 * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst 971 * for a background explanation of this. 972 * 973 * Does not return errors, on the theory that the callers of this 974 * routine would rather not worry about failures to rebuild sched 975 * domains when operating in the severe memory shortage situations 976 * that could cause allocation failures below. 977 * 978 * Must be called with cpuset_mutex held. 979 * 980 * The three key local variables below are: 981 * cp - cpuset pointer, used (together with pos_css) to perform a 982 * top-down scan of all cpusets. For our purposes, rebuilding 983 * the schedulers sched domains, we can ignore !is_sched_load_ 984 * balance cpusets. 985 * csa - (for CpuSet Array) Array of pointers to all the cpusets 986 * that need to be load balanced, for convenient iterative 987 * access by the subsequent code that finds the best partition, 988 * i.e the set of domains (subsets) of CPUs such that the 989 * cpus_allowed of every cpuset marked is_sched_load_balance 990 * is a subset of one of these domains, while there are as 991 * many such domains as possible, each as small as possible. 992 * doms - Conversion of 'csa' to an array of cpumasks, for passing to 993 * the kernel/sched/core.c routine partition_sched_domains() in a 994 * convenient format, that can be easily compared to the prior 995 * value to determine what partition elements (sched domains) 996 * were changed (added or removed.) 997 * 998 * Finding the best partition (set of domains): 999 * The triple nested loops below over i, j, k scan over the 1000 * load balanced cpusets (using the array of cpuset pointers in 1001 * csa[]) looking for pairs of cpusets that have overlapping 1002 * cpus_allowed, but which don't have the same 'pn' partition 1003 * number and gives them in the same partition number. It keeps 1004 * looping on the 'restart' label until it can no longer find 1005 * any such pairs. 1006 * 1007 * The union of the cpus_allowed masks from the set of 1008 * all cpusets having the same 'pn' value then form the one 1009 * element of the partition (one sched domain) to be passed to 1010 * partition_sched_domains(). 1011 */ 1012 static int generate_sched_domains(cpumask_var_t **domains, 1013 struct sched_domain_attr **attributes) 1014 { 1015 struct cpuset *cp; /* top-down scan of cpusets */ 1016 struct cpuset **csa; /* array of all cpuset ptrs */ 1017 int csn; /* how many cpuset ptrs in csa so far */ 1018 int i, j, k; /* indices for partition finding loops */ 1019 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */ 1020 struct sched_domain_attr *dattr; /* attributes for custom domains */ 1021 int ndoms = 0; /* number of sched domains in result */ 1022 int nslot; /* next empty doms[] struct cpumask slot */ 1023 struct cgroup_subsys_state *pos_css; 1024 bool root_load_balance = is_sched_load_balance(&top_cpuset); 1025 bool cgrpv2 = cgroup_subsys_on_dfl(cpuset_cgrp_subsys); 1026 1027 doms = NULL; 1028 dattr = NULL; 1029 csa = NULL; 1030 1031 /* Special case for the 99% of systems with one, full, sched domain */ 1032 if (root_load_balance && cpumask_empty(subpartitions_cpus)) { 1033 single_root_domain: 1034 ndoms = 1; 1035 doms = alloc_sched_domains(ndoms); 1036 if (!doms) 1037 goto done; 1038 1039 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); 1040 if (dattr) { 1041 *dattr = SD_ATTR_INIT; 1042 update_domain_attr_tree(dattr, &top_cpuset); 1043 } 1044 cpumask_and(doms[0], top_cpuset.effective_cpus, 1045 housekeeping_cpumask(HK_TYPE_DOMAIN)); 1046 1047 goto done; 1048 } 1049 1050 csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL); 1051 if (!csa) 1052 goto done; 1053 csn = 0; 1054 1055 rcu_read_lock(); 1056 if (root_load_balance) 1057 csa[csn++] = &top_cpuset; 1058 cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) { 1059 if (cp == &top_cpuset) 1060 continue; 1061 1062 if (cgrpv2) 1063 goto v2; 1064 1065 /* 1066 * v1: 1067 * Continue traversing beyond @cp iff @cp has some CPUs and 1068 * isn't load balancing. The former is obvious. The 1069 * latter: All child cpusets contain a subset of the 1070 * parent's cpus, so just skip them, and then we call 1071 * update_domain_attr_tree() to calc relax_domain_level of 1072 * the corresponding sched domain. 1073 */ 1074 if (!cpumask_empty(cp->cpus_allowed) && 1075 !(is_sched_load_balance(cp) && 1076 cpumask_intersects(cp->cpus_allowed, 1077 housekeeping_cpumask(HK_TYPE_DOMAIN)))) 1078 continue; 1079 1080 if (is_sched_load_balance(cp) && 1081 !cpumask_empty(cp->effective_cpus)) 1082 csa[csn++] = cp; 1083 1084 /* skip @cp's subtree */ 1085 pos_css = css_rightmost_descendant(pos_css); 1086 continue; 1087 1088 v2: 1089 /* 1090 * Only valid partition roots that are not isolated and with 1091 * non-empty effective_cpus will be saved into csn[]. 1092 */ 1093 if ((cp->partition_root_state == PRS_ROOT) && 1094 !cpumask_empty(cp->effective_cpus)) 1095 csa[csn++] = cp; 1096 1097 /* 1098 * Skip @cp's subtree if not a partition root and has no 1099 * exclusive CPUs to be granted to child cpusets. 1100 */ 1101 if (!is_partition_valid(cp) && cpumask_empty(cp->exclusive_cpus)) 1102 pos_css = css_rightmost_descendant(pos_css); 1103 } 1104 rcu_read_unlock(); 1105 1106 /* 1107 * If there are only isolated partitions underneath the cgroup root, 1108 * we can optimize out unneeded sched domains scanning. 1109 */ 1110 if (root_load_balance && (csn == 1)) 1111 goto single_root_domain; 1112 1113 for (i = 0; i < csn; i++) 1114 csa[i]->pn = i; 1115 ndoms = csn; 1116 1117 restart: 1118 /* Find the best partition (set of sched domains) */ 1119 for (i = 0; i < csn; i++) { 1120 struct cpuset *a = csa[i]; 1121 int apn = a->pn; 1122 1123 for (j = 0; j < csn; j++) { 1124 struct cpuset *b = csa[j]; 1125 int bpn = b->pn; 1126 1127 if (apn != bpn && cpusets_overlap(a, b)) { 1128 for (k = 0; k < csn; k++) { 1129 struct cpuset *c = csa[k]; 1130 1131 if (c->pn == bpn) 1132 c->pn = apn; 1133 } 1134 ndoms--; /* one less element */ 1135 goto restart; 1136 } 1137 } 1138 } 1139 1140 /* 1141 * Now we know how many domains to create. 1142 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. 1143 */ 1144 doms = alloc_sched_domains(ndoms); 1145 if (!doms) 1146 goto done; 1147 1148 /* 1149 * The rest of the code, including the scheduler, can deal with 1150 * dattr==NULL case. No need to abort if alloc fails. 1151 */ 1152 dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr), 1153 GFP_KERNEL); 1154 1155 /* 1156 * Cgroup v2 doesn't support domain attributes, just set all of them 1157 * to SD_ATTR_INIT. Also non-isolating partition root CPUs are a 1158 * subset of HK_TYPE_DOMAIN housekeeping CPUs. 1159 */ 1160 if (cgrpv2) { 1161 for (i = 0; i < ndoms; i++) { 1162 cpumask_copy(doms[i], csa[i]->effective_cpus); 1163 if (dattr) 1164 dattr[i] = SD_ATTR_INIT; 1165 } 1166 goto done; 1167 } 1168 1169 for (nslot = 0, i = 0; i < csn; i++) { 1170 struct cpuset *a = csa[i]; 1171 struct cpumask *dp; 1172 int apn = a->pn; 1173 1174 if (apn < 0) { 1175 /* Skip completed partitions */ 1176 continue; 1177 } 1178 1179 dp = doms[nslot]; 1180 1181 if (nslot == ndoms) { 1182 static int warnings = 10; 1183 if (warnings) { 1184 pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n", 1185 nslot, ndoms, csn, i, apn); 1186 warnings--; 1187 } 1188 continue; 1189 } 1190 1191 cpumask_clear(dp); 1192 if (dattr) 1193 *(dattr + nslot) = SD_ATTR_INIT; 1194 for (j = i; j < csn; j++) { 1195 struct cpuset *b = csa[j]; 1196 1197 if (apn == b->pn) { 1198 cpumask_or(dp, dp, b->effective_cpus); 1199 cpumask_and(dp, dp, housekeeping_cpumask(HK_TYPE_DOMAIN)); 1200 if (dattr) 1201 update_domain_attr_tree(dattr + nslot, b); 1202 1203 /* Done with this partition */ 1204 b->pn = -1; 1205 } 1206 } 1207 nslot++; 1208 } 1209 BUG_ON(nslot != ndoms); 1210 1211 done: 1212 kfree(csa); 1213 1214 /* 1215 * Fallback to the default domain if kmalloc() failed. 1216 * See comments in partition_sched_domains(). 1217 */ 1218 if (doms == NULL) 1219 ndoms = 1; 1220 1221 *domains = doms; 1222 *attributes = dattr; 1223 return ndoms; 1224 } 1225 1226 static void dl_update_tasks_root_domain(struct cpuset *cs) 1227 { 1228 struct css_task_iter it; 1229 struct task_struct *task; 1230 1231 if (cs->nr_deadline_tasks == 0) 1232 return; 1233 1234 css_task_iter_start(&cs->css, 0, &it); 1235 1236 while ((task = css_task_iter_next(&it))) 1237 dl_add_task_root_domain(task); 1238 1239 css_task_iter_end(&it); 1240 } 1241 1242 static void dl_rebuild_rd_accounting(void) 1243 { 1244 struct cpuset *cs = NULL; 1245 struct cgroup_subsys_state *pos_css; 1246 1247 lockdep_assert_held(&cpuset_mutex); 1248 lockdep_assert_cpus_held(); 1249 lockdep_assert_held(&sched_domains_mutex); 1250 1251 rcu_read_lock(); 1252 1253 /* 1254 * Clear default root domain DL accounting, it will be computed again 1255 * if a task belongs to it. 1256 */ 1257 dl_clear_root_domain(&def_root_domain); 1258 1259 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { 1260 1261 if (cpumask_empty(cs->effective_cpus)) { 1262 pos_css = css_rightmost_descendant(pos_css); 1263 continue; 1264 } 1265 1266 css_get(&cs->css); 1267 1268 rcu_read_unlock(); 1269 1270 dl_update_tasks_root_domain(cs); 1271 1272 rcu_read_lock(); 1273 css_put(&cs->css); 1274 } 1275 rcu_read_unlock(); 1276 } 1277 1278 static void 1279 partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 1280 struct sched_domain_attr *dattr_new) 1281 { 1282 mutex_lock(&sched_domains_mutex); 1283 partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); 1284 dl_rebuild_rd_accounting(); 1285 mutex_unlock(&sched_domains_mutex); 1286 } 1287 1288 /* 1289 * Rebuild scheduler domains. 1290 * 1291 * If the flag 'sched_load_balance' of any cpuset with non-empty 1292 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset 1293 * which has that flag enabled, or if any cpuset with a non-empty 1294 * 'cpus' is removed, then call this routine to rebuild the 1295 * scheduler's dynamic sched domains. 1296 * 1297 * Call with cpuset_mutex held. Takes cpus_read_lock(). 1298 */ 1299 static void rebuild_sched_domains_locked(void) 1300 { 1301 struct cgroup_subsys_state *pos_css; 1302 struct sched_domain_attr *attr; 1303 cpumask_var_t *doms; 1304 struct cpuset *cs; 1305 int ndoms; 1306 1307 lockdep_assert_cpus_held(); 1308 lockdep_assert_held(&cpuset_mutex); 1309 1310 /* 1311 * If we have raced with CPU hotplug, return early to avoid 1312 * passing doms with offlined cpu to partition_sched_domains(). 1313 * Anyways, cpuset_handle_hotplug() will rebuild sched domains. 1314 * 1315 * With no CPUs in any subpartitions, top_cpuset's effective CPUs 1316 * should be the same as the active CPUs, so checking only top_cpuset 1317 * is enough to detect racing CPU offlines. 1318 */ 1319 if (cpumask_empty(subpartitions_cpus) && 1320 !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask)) 1321 return; 1322 1323 /* 1324 * With subpartition CPUs, however, the effective CPUs of a partition 1325 * root should be only a subset of the active CPUs. Since a CPU in any 1326 * partition root could be offlined, all must be checked. 1327 */ 1328 if (!cpumask_empty(subpartitions_cpus)) { 1329 rcu_read_lock(); 1330 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { 1331 if (!is_partition_valid(cs)) { 1332 pos_css = css_rightmost_descendant(pos_css); 1333 continue; 1334 } 1335 if (!cpumask_subset(cs->effective_cpus, 1336 cpu_active_mask)) { 1337 rcu_read_unlock(); 1338 return; 1339 } 1340 } 1341 rcu_read_unlock(); 1342 } 1343 1344 /* Generate domain masks and attrs */ 1345 ndoms = generate_sched_domains(&doms, &attr); 1346 1347 /* Have scheduler rebuild the domains */ 1348 partition_and_rebuild_sched_domains(ndoms, doms, attr); 1349 } 1350 #else /* !CONFIG_SMP */ 1351 static void rebuild_sched_domains_locked(void) 1352 { 1353 } 1354 #endif /* CONFIG_SMP */ 1355 1356 static void rebuild_sched_domains_cpuslocked(void) 1357 { 1358 mutex_lock(&cpuset_mutex); 1359 rebuild_sched_domains_locked(); 1360 mutex_unlock(&cpuset_mutex); 1361 } 1362 1363 void rebuild_sched_domains(void) 1364 { 1365 cpus_read_lock(); 1366 rebuild_sched_domains_cpuslocked(); 1367 cpus_read_unlock(); 1368 } 1369 1370 /** 1371 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. 1372 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed 1373 * @new_cpus: the temp variable for the new effective_cpus mask 1374 * 1375 * Iterate through each task of @cs updating its cpus_allowed to the 1376 * effective cpuset's. As this function is called with cpuset_mutex held, 1377 * cpuset membership stays stable. For top_cpuset, task_cpu_possible_mask() 1378 * is used instead of effective_cpus to make sure all offline CPUs are also 1379 * included as hotplug code won't update cpumasks for tasks in top_cpuset. 1380 */ 1381 static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus) 1382 { 1383 struct css_task_iter it; 1384 struct task_struct *task; 1385 bool top_cs = cs == &top_cpuset; 1386 1387 css_task_iter_start(&cs->css, 0, &it); 1388 while ((task = css_task_iter_next(&it))) { 1389 const struct cpumask *possible_mask = task_cpu_possible_mask(task); 1390 1391 if (top_cs) { 1392 /* 1393 * Percpu kthreads in top_cpuset are ignored 1394 */ 1395 if (kthread_is_per_cpu(task)) 1396 continue; 1397 cpumask_andnot(new_cpus, possible_mask, subpartitions_cpus); 1398 } else { 1399 cpumask_and(new_cpus, possible_mask, cs->effective_cpus); 1400 } 1401 set_cpus_allowed_ptr(task, new_cpus); 1402 } 1403 css_task_iter_end(&it); 1404 } 1405 1406 /** 1407 * compute_effective_cpumask - Compute the effective cpumask of the cpuset 1408 * @new_cpus: the temp variable for the new effective_cpus mask 1409 * @cs: the cpuset the need to recompute the new effective_cpus mask 1410 * @parent: the parent cpuset 1411 * 1412 * The result is valid only if the given cpuset isn't a partition root. 1413 */ 1414 static void compute_effective_cpumask(struct cpumask *new_cpus, 1415 struct cpuset *cs, struct cpuset *parent) 1416 { 1417 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); 1418 } 1419 1420 /* 1421 * Commands for update_parent_effective_cpumask 1422 */ 1423 enum partition_cmd { 1424 partcmd_enable, /* Enable partition root */ 1425 partcmd_enablei, /* Enable isolated partition root */ 1426 partcmd_disable, /* Disable partition root */ 1427 partcmd_update, /* Update parent's effective_cpus */ 1428 partcmd_invalidate, /* Make partition invalid */ 1429 }; 1430 1431 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, 1432 int turning_on); 1433 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, 1434 struct tmpmasks *tmp); 1435 1436 /* 1437 * Update partition exclusive flag 1438 * 1439 * Return: 0 if successful, an error code otherwise 1440 */ 1441 static int update_partition_exclusive(struct cpuset *cs, int new_prs) 1442 { 1443 bool exclusive = (new_prs > PRS_MEMBER); 1444 1445 if (exclusive && !is_cpu_exclusive(cs)) { 1446 if (update_flag(CS_CPU_EXCLUSIVE, cs, 1)) 1447 return PERR_NOTEXCL; 1448 } else if (!exclusive && is_cpu_exclusive(cs)) { 1449 /* Turning off CS_CPU_EXCLUSIVE will not return error */ 1450 update_flag(CS_CPU_EXCLUSIVE, cs, 0); 1451 } 1452 return 0; 1453 } 1454 1455 /* 1456 * Update partition load balance flag and/or rebuild sched domain 1457 * 1458 * Changing load balance flag will automatically call 1459 * rebuild_sched_domains_locked(). 1460 * This function is for cgroup v2 only. 1461 */ 1462 static void update_partition_sd_lb(struct cpuset *cs, int old_prs) 1463 { 1464 int new_prs = cs->partition_root_state; 1465 bool rebuild_domains = (new_prs > 0) || (old_prs > 0); 1466 bool new_lb; 1467 1468 /* 1469 * If cs is not a valid partition root, the load balance state 1470 * will follow its parent. 1471 */ 1472 if (new_prs > 0) { 1473 new_lb = (new_prs != PRS_ISOLATED); 1474 } else { 1475 new_lb = is_sched_load_balance(parent_cs(cs)); 1476 } 1477 if (new_lb != !!is_sched_load_balance(cs)) { 1478 rebuild_domains = true; 1479 if (new_lb) 1480 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 1481 else 1482 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 1483 } 1484 1485 if (rebuild_domains && !force_sd_rebuild) 1486 rebuild_sched_domains_locked(); 1487 } 1488 1489 /* 1490 * tasks_nocpu_error - Return true if tasks will have no effective_cpus 1491 */ 1492 static bool tasks_nocpu_error(struct cpuset *parent, struct cpuset *cs, 1493 struct cpumask *xcpus) 1494 { 1495 /* 1496 * A populated partition (cs or parent) can't have empty effective_cpus 1497 */ 1498 return (cpumask_subset(parent->effective_cpus, xcpus) && 1499 partition_is_populated(parent, cs)) || 1500 (!cpumask_intersects(xcpus, cpu_active_mask) && 1501 partition_is_populated(cs, NULL)); 1502 } 1503 1504 static void reset_partition_data(struct cpuset *cs) 1505 { 1506 struct cpuset *parent = parent_cs(cs); 1507 1508 if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) 1509 return; 1510 1511 lockdep_assert_held(&callback_lock); 1512 1513 cs->nr_subparts = 0; 1514 if (cpumask_empty(cs->exclusive_cpus)) { 1515 cpumask_clear(cs->effective_xcpus); 1516 if (is_cpu_exclusive(cs)) 1517 clear_bit(CS_CPU_EXCLUSIVE, &cs->flags); 1518 } 1519 if (!cpumask_and(cs->effective_cpus, 1520 parent->effective_cpus, cs->cpus_allowed)) { 1521 cs->use_parent_ecpus = true; 1522 parent->child_ecpus_count++; 1523 cpumask_copy(cs->effective_cpus, parent->effective_cpus); 1524 } 1525 } 1526 1527 /* 1528 * partition_xcpus_newstate - Exclusive CPUs state change 1529 * @old_prs: old partition_root_state 1530 * @new_prs: new partition_root_state 1531 * @xcpus: exclusive CPUs with state change 1532 */ 1533 static void partition_xcpus_newstate(int old_prs, int new_prs, struct cpumask *xcpus) 1534 { 1535 WARN_ON_ONCE(old_prs == new_prs); 1536 if (new_prs == PRS_ISOLATED) 1537 cpumask_or(isolated_cpus, isolated_cpus, xcpus); 1538 else 1539 cpumask_andnot(isolated_cpus, isolated_cpus, xcpus); 1540 } 1541 1542 /* 1543 * partition_xcpus_add - Add new exclusive CPUs to partition 1544 * @new_prs: new partition_root_state 1545 * @parent: parent cpuset 1546 * @xcpus: exclusive CPUs to be added 1547 * Return: true if isolated_cpus modified, false otherwise 1548 * 1549 * Remote partition if parent == NULL 1550 */ 1551 static bool partition_xcpus_add(int new_prs, struct cpuset *parent, 1552 struct cpumask *xcpus) 1553 { 1554 bool isolcpus_updated; 1555 1556 WARN_ON_ONCE(new_prs < 0); 1557 lockdep_assert_held(&callback_lock); 1558 if (!parent) 1559 parent = &top_cpuset; 1560 1561 1562 if (parent == &top_cpuset) 1563 cpumask_or(subpartitions_cpus, subpartitions_cpus, xcpus); 1564 1565 isolcpus_updated = (new_prs != parent->partition_root_state); 1566 if (isolcpus_updated) 1567 partition_xcpus_newstate(parent->partition_root_state, new_prs, 1568 xcpus); 1569 1570 cpumask_andnot(parent->effective_cpus, parent->effective_cpus, xcpus); 1571 return isolcpus_updated; 1572 } 1573 1574 /* 1575 * partition_xcpus_del - Remove exclusive CPUs from partition 1576 * @old_prs: old partition_root_state 1577 * @parent: parent cpuset 1578 * @xcpus: exclusive CPUs to be removed 1579 * Return: true if isolated_cpus modified, false otherwise 1580 * 1581 * Remote partition if parent == NULL 1582 */ 1583 static bool partition_xcpus_del(int old_prs, struct cpuset *parent, 1584 struct cpumask *xcpus) 1585 { 1586 bool isolcpus_updated; 1587 1588 WARN_ON_ONCE(old_prs < 0); 1589 lockdep_assert_held(&callback_lock); 1590 if (!parent) 1591 parent = &top_cpuset; 1592 1593 if (parent == &top_cpuset) 1594 cpumask_andnot(subpartitions_cpus, subpartitions_cpus, xcpus); 1595 1596 isolcpus_updated = (old_prs != parent->partition_root_state); 1597 if (isolcpus_updated) 1598 partition_xcpus_newstate(old_prs, parent->partition_root_state, 1599 xcpus); 1600 1601 cpumask_and(xcpus, xcpus, cpu_active_mask); 1602 cpumask_or(parent->effective_cpus, parent->effective_cpus, xcpus); 1603 return isolcpus_updated; 1604 } 1605 1606 static void update_unbound_workqueue_cpumask(bool isolcpus_updated) 1607 { 1608 int ret; 1609 1610 lockdep_assert_cpus_held(); 1611 1612 if (!isolcpus_updated) 1613 return; 1614 1615 ret = workqueue_unbound_exclude_cpumask(isolated_cpus); 1616 WARN_ON_ONCE(ret < 0); 1617 } 1618 1619 /** 1620 * cpuset_cpu_is_isolated - Check if the given CPU is isolated 1621 * @cpu: the CPU number to be checked 1622 * Return: true if CPU is used in an isolated partition, false otherwise 1623 */ 1624 bool cpuset_cpu_is_isolated(int cpu) 1625 { 1626 return cpumask_test_cpu(cpu, isolated_cpus); 1627 } 1628 EXPORT_SYMBOL_GPL(cpuset_cpu_is_isolated); 1629 1630 /* 1631 * compute_effective_exclusive_cpumask - compute effective exclusive CPUs 1632 * @cs: cpuset 1633 * @xcpus: effective exclusive CPUs value to be set 1634 * Return: true if xcpus is not empty, false otherwise. 1635 * 1636 * Starting with exclusive_cpus (cpus_allowed if exclusive_cpus is not set), 1637 * it must be a subset of parent's effective_xcpus. 1638 */ 1639 static bool compute_effective_exclusive_cpumask(struct cpuset *cs, 1640 struct cpumask *xcpus) 1641 { 1642 struct cpuset *parent = parent_cs(cs); 1643 1644 if (!xcpus) 1645 xcpus = cs->effective_xcpus; 1646 1647 return cpumask_and(xcpus, user_xcpus(cs), parent->effective_xcpus); 1648 } 1649 1650 static inline bool is_remote_partition(struct cpuset *cs) 1651 { 1652 return !list_empty(&cs->remote_sibling); 1653 } 1654 1655 static inline bool is_local_partition(struct cpuset *cs) 1656 { 1657 return is_partition_valid(cs) && !is_remote_partition(cs); 1658 } 1659 1660 /* 1661 * remote_partition_enable - Enable current cpuset as a remote partition root 1662 * @cs: the cpuset to update 1663 * @new_prs: new partition_root_state 1664 * @tmp: temparary masks 1665 * Return: 1 if successful, 0 if error 1666 * 1667 * Enable the current cpuset to become a remote partition root taking CPUs 1668 * directly from the top cpuset. cpuset_mutex must be held by the caller. 1669 */ 1670 static int remote_partition_enable(struct cpuset *cs, int new_prs, 1671 struct tmpmasks *tmp) 1672 { 1673 bool isolcpus_updated; 1674 1675 /* 1676 * The user must have sysadmin privilege. 1677 */ 1678 if (!capable(CAP_SYS_ADMIN)) 1679 return 0; 1680 1681 /* 1682 * The requested exclusive_cpus must not be allocated to other 1683 * partitions and it can't use up all the root's effective_cpus. 1684 * 1685 * Note that if there is any local partition root above it or 1686 * remote partition root underneath it, its exclusive_cpus must 1687 * have overlapped with subpartitions_cpus. 1688 */ 1689 compute_effective_exclusive_cpumask(cs, tmp->new_cpus); 1690 if (cpumask_empty(tmp->new_cpus) || 1691 cpumask_intersects(tmp->new_cpus, subpartitions_cpus) || 1692 cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus)) 1693 return 0; 1694 1695 spin_lock_irq(&callback_lock); 1696 isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus); 1697 list_add(&cs->remote_sibling, &remote_children); 1698 if (cs->use_parent_ecpus) { 1699 struct cpuset *parent = parent_cs(cs); 1700 1701 cs->use_parent_ecpus = false; 1702 parent->child_ecpus_count--; 1703 } 1704 spin_unlock_irq(&callback_lock); 1705 update_unbound_workqueue_cpumask(isolcpus_updated); 1706 1707 /* 1708 * Proprogate changes in top_cpuset's effective_cpus down the hierarchy. 1709 */ 1710 update_tasks_cpumask(&top_cpuset, tmp->new_cpus); 1711 update_sibling_cpumasks(&top_cpuset, NULL, tmp); 1712 return 1; 1713 } 1714 1715 /* 1716 * remote_partition_disable - Remove current cpuset from remote partition list 1717 * @cs: the cpuset to update 1718 * @tmp: temparary masks 1719 * 1720 * The effective_cpus is also updated. 1721 * 1722 * cpuset_mutex must be held by the caller. 1723 */ 1724 static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp) 1725 { 1726 bool isolcpus_updated; 1727 1728 compute_effective_exclusive_cpumask(cs, tmp->new_cpus); 1729 WARN_ON_ONCE(!is_remote_partition(cs)); 1730 WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, subpartitions_cpus)); 1731 1732 spin_lock_irq(&callback_lock); 1733 list_del_init(&cs->remote_sibling); 1734 isolcpus_updated = partition_xcpus_del(cs->partition_root_state, 1735 NULL, tmp->new_cpus); 1736 cs->partition_root_state = -cs->partition_root_state; 1737 if (!cs->prs_err) 1738 cs->prs_err = PERR_INVCPUS; 1739 reset_partition_data(cs); 1740 spin_unlock_irq(&callback_lock); 1741 update_unbound_workqueue_cpumask(isolcpus_updated); 1742 1743 /* 1744 * Proprogate changes in top_cpuset's effective_cpus down the hierarchy. 1745 */ 1746 update_tasks_cpumask(&top_cpuset, tmp->new_cpus); 1747 update_sibling_cpumasks(&top_cpuset, NULL, tmp); 1748 } 1749 1750 /* 1751 * remote_cpus_update - cpus_exclusive change of remote partition 1752 * @cs: the cpuset to be updated 1753 * @newmask: the new effective_xcpus mask 1754 * @tmp: temparary masks 1755 * 1756 * top_cpuset and subpartitions_cpus will be updated or partition can be 1757 * invalidated. 1758 */ 1759 static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask, 1760 struct tmpmasks *tmp) 1761 { 1762 bool adding, deleting; 1763 int prs = cs->partition_root_state; 1764 int isolcpus_updated = 0; 1765 1766 if (WARN_ON_ONCE(!is_remote_partition(cs))) 1767 return; 1768 1769 WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus)); 1770 1771 if (cpumask_empty(newmask)) 1772 goto invalidate; 1773 1774 adding = cpumask_andnot(tmp->addmask, newmask, cs->effective_xcpus); 1775 deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, newmask); 1776 1777 /* 1778 * Additions of remote CPUs is only allowed if those CPUs are 1779 * not allocated to other partitions and there are effective_cpus 1780 * left in the top cpuset. 1781 */ 1782 if (adding && (!capable(CAP_SYS_ADMIN) || 1783 cpumask_intersects(tmp->addmask, subpartitions_cpus) || 1784 cpumask_subset(top_cpuset.effective_cpus, tmp->addmask))) 1785 goto invalidate; 1786 1787 spin_lock_irq(&callback_lock); 1788 if (adding) 1789 isolcpus_updated += partition_xcpus_add(prs, NULL, tmp->addmask); 1790 if (deleting) 1791 isolcpus_updated += partition_xcpus_del(prs, NULL, tmp->delmask); 1792 spin_unlock_irq(&callback_lock); 1793 update_unbound_workqueue_cpumask(isolcpus_updated); 1794 1795 /* 1796 * Proprogate changes in top_cpuset's effective_cpus down the hierarchy. 1797 */ 1798 update_tasks_cpumask(&top_cpuset, tmp->new_cpus); 1799 update_sibling_cpumasks(&top_cpuset, NULL, tmp); 1800 return; 1801 1802 invalidate: 1803 remote_partition_disable(cs, tmp); 1804 } 1805 1806 /* 1807 * remote_partition_check - check if a child remote partition needs update 1808 * @cs: the cpuset to be updated 1809 * @newmask: the new effective_xcpus mask 1810 * @delmask: temporary mask for deletion (not in tmp) 1811 * @tmp: temparary masks 1812 * 1813 * This should be called before the given cs has updated its cpus_allowed 1814 * and/or effective_xcpus. 1815 */ 1816 static void remote_partition_check(struct cpuset *cs, struct cpumask *newmask, 1817 struct cpumask *delmask, struct tmpmasks *tmp) 1818 { 1819 struct cpuset *child, *next; 1820 int disable_cnt = 0; 1821 1822 /* 1823 * Compute the effective exclusive CPUs that will be deleted. 1824 */ 1825 if (!cpumask_andnot(delmask, cs->effective_xcpus, newmask) || 1826 !cpumask_intersects(delmask, subpartitions_cpus)) 1827 return; /* No deletion of exclusive CPUs in partitions */ 1828 1829 /* 1830 * Searching the remote children list to look for those that will 1831 * be impacted by the deletion of exclusive CPUs. 1832 * 1833 * Since a cpuset must be removed from the remote children list 1834 * before it can go offline and holding cpuset_mutex will prevent 1835 * any change in cpuset status. RCU read lock isn't needed. 1836 */ 1837 lockdep_assert_held(&cpuset_mutex); 1838 list_for_each_entry_safe(child, next, &remote_children, remote_sibling) 1839 if (cpumask_intersects(child->effective_cpus, delmask)) { 1840 remote_partition_disable(child, tmp); 1841 disable_cnt++; 1842 } 1843 if (disable_cnt && !force_sd_rebuild) 1844 rebuild_sched_domains_locked(); 1845 } 1846 1847 /* 1848 * prstate_housekeeping_conflict - check for partition & housekeeping conflicts 1849 * @prstate: partition root state to be checked 1850 * @new_cpus: cpu mask 1851 * Return: true if there is conflict, false otherwise 1852 * 1853 * CPUs outside of housekeeping_cpumask(HK_TYPE_DOMAIN) can only be used in 1854 * an isolated partition. 1855 */ 1856 static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus) 1857 { 1858 const struct cpumask *hk_domain = housekeeping_cpumask(HK_TYPE_DOMAIN); 1859 bool all_in_hk = cpumask_subset(new_cpus, hk_domain); 1860 1861 if (!all_in_hk && (prstate != PRS_ISOLATED)) 1862 return true; 1863 1864 return false; 1865 } 1866 1867 /** 1868 * update_parent_effective_cpumask - update effective_cpus mask of parent cpuset 1869 * @cs: The cpuset that requests change in partition root state 1870 * @cmd: Partition root state change command 1871 * @newmask: Optional new cpumask for partcmd_update 1872 * @tmp: Temporary addmask and delmask 1873 * Return: 0 or a partition root state error code 1874 * 1875 * For partcmd_enable*, the cpuset is being transformed from a non-partition 1876 * root to a partition root. The effective_xcpus (cpus_allowed if 1877 * effective_xcpus not set) mask of the given cpuset will be taken away from 1878 * parent's effective_cpus. The function will return 0 if all the CPUs listed 1879 * in effective_xcpus can be granted or an error code will be returned. 1880 * 1881 * For partcmd_disable, the cpuset is being transformed from a partition 1882 * root back to a non-partition root. Any CPUs in effective_xcpus will be 1883 * given back to parent's effective_cpus. 0 will always be returned. 1884 * 1885 * For partcmd_update, if the optional newmask is specified, the cpu list is 1886 * to be changed from effective_xcpus to newmask. Otherwise, effective_xcpus is 1887 * assumed to remain the same. The cpuset should either be a valid or invalid 1888 * partition root. The partition root state may change from valid to invalid 1889 * or vice versa. An error code will be returned if transitioning from 1890 * invalid to valid violates the exclusivity rule. 1891 * 1892 * For partcmd_invalidate, the current partition will be made invalid. 1893 * 1894 * The partcmd_enable* and partcmd_disable commands are used by 1895 * update_prstate(). An error code may be returned and the caller will check 1896 * for error. 1897 * 1898 * The partcmd_update command is used by update_cpumasks_hier() with newmask 1899 * NULL and update_cpumask() with newmask set. The partcmd_invalidate is used 1900 * by update_cpumask() with NULL newmask. In both cases, the callers won't 1901 * check for error and so partition_root_state and prs_error will be updated 1902 * directly. 1903 */ 1904 static int update_parent_effective_cpumask(struct cpuset *cs, int cmd, 1905 struct cpumask *newmask, 1906 struct tmpmasks *tmp) 1907 { 1908 struct cpuset *parent = parent_cs(cs); 1909 int adding; /* Adding cpus to parent's effective_cpus */ 1910 int deleting; /* Deleting cpus from parent's effective_cpus */ 1911 int old_prs, new_prs; 1912 int part_error = PERR_NONE; /* Partition error? */ 1913 int subparts_delta = 0; 1914 struct cpumask *xcpus; /* cs effective_xcpus */ 1915 int isolcpus_updated = 0; 1916 bool nocpu; 1917 1918 lockdep_assert_held(&cpuset_mutex); 1919 1920 /* 1921 * new_prs will only be changed for the partcmd_update and 1922 * partcmd_invalidate commands. 1923 */ 1924 adding = deleting = false; 1925 old_prs = new_prs = cs->partition_root_state; 1926 xcpus = user_xcpus(cs); 1927 1928 if (cmd == partcmd_invalidate) { 1929 if (is_prs_invalid(old_prs)) 1930 return 0; 1931 1932 /* 1933 * Make the current partition invalid. 1934 */ 1935 if (is_partition_valid(parent)) 1936 adding = cpumask_and(tmp->addmask, 1937 xcpus, parent->effective_xcpus); 1938 if (old_prs > 0) { 1939 new_prs = -old_prs; 1940 subparts_delta--; 1941 } 1942 goto write_error; 1943 } 1944 1945 /* 1946 * The parent must be a partition root. 1947 * The new cpumask, if present, or the current cpus_allowed must 1948 * not be empty. 1949 */ 1950 if (!is_partition_valid(parent)) { 1951 return is_partition_invalid(parent) 1952 ? PERR_INVPARENT : PERR_NOTPART; 1953 } 1954 if (!newmask && xcpus_empty(cs)) 1955 return PERR_CPUSEMPTY; 1956 1957 nocpu = tasks_nocpu_error(parent, cs, xcpus); 1958 1959 if ((cmd == partcmd_enable) || (cmd == partcmd_enablei)) { 1960 /* 1961 * Enabling partition root is not allowed if its 1962 * effective_xcpus is empty or doesn't overlap with 1963 * parent's effective_xcpus. 1964 */ 1965 if (cpumask_empty(xcpus) || 1966 !cpumask_intersects(xcpus, parent->effective_xcpus)) 1967 return PERR_INVCPUS; 1968 1969 if (prstate_housekeeping_conflict(new_prs, xcpus)) 1970 return PERR_HKEEPING; 1971 1972 /* 1973 * A parent can be left with no CPU as long as there is no 1974 * task directly associated with the parent partition. 1975 */ 1976 if (nocpu) 1977 return PERR_NOCPUS; 1978 1979 cpumask_copy(tmp->delmask, xcpus); 1980 deleting = true; 1981 subparts_delta++; 1982 new_prs = (cmd == partcmd_enable) ? PRS_ROOT : PRS_ISOLATED; 1983 } else if (cmd == partcmd_disable) { 1984 /* 1985 * May need to add cpus to parent's effective_cpus for 1986 * valid partition root. 1987 */ 1988 adding = !is_prs_invalid(old_prs) && 1989 cpumask_and(tmp->addmask, xcpus, parent->effective_xcpus); 1990 if (adding) 1991 subparts_delta--; 1992 new_prs = PRS_MEMBER; 1993 } else if (newmask) { 1994 /* 1995 * Empty cpumask is not allowed 1996 */ 1997 if (cpumask_empty(newmask)) { 1998 part_error = PERR_CPUSEMPTY; 1999 goto write_error; 2000 } 2001 /* Check newmask again, whether cpus are available for parent/cs */ 2002 nocpu |= tasks_nocpu_error(parent, cs, newmask); 2003 2004 /* 2005 * partcmd_update with newmask: 2006 * 2007 * Compute add/delete mask to/from effective_cpus 2008 * 2009 * For valid partition: 2010 * addmask = exclusive_cpus & ~newmask 2011 * & parent->effective_xcpus 2012 * delmask = newmask & ~exclusive_cpus 2013 * & parent->effective_xcpus 2014 * 2015 * For invalid partition: 2016 * delmask = newmask & parent->effective_xcpus 2017 */ 2018 if (is_prs_invalid(old_prs)) { 2019 adding = false; 2020 deleting = cpumask_and(tmp->delmask, 2021 newmask, parent->effective_xcpus); 2022 } else { 2023 cpumask_andnot(tmp->addmask, xcpus, newmask); 2024 adding = cpumask_and(tmp->addmask, tmp->addmask, 2025 parent->effective_xcpus); 2026 2027 cpumask_andnot(tmp->delmask, newmask, xcpus); 2028 deleting = cpumask_and(tmp->delmask, tmp->delmask, 2029 parent->effective_xcpus); 2030 } 2031 /* 2032 * Make partition invalid if parent's effective_cpus could 2033 * become empty and there are tasks in the parent. 2034 */ 2035 if (nocpu && (!adding || 2036 !cpumask_intersects(tmp->addmask, cpu_active_mask))) { 2037 part_error = PERR_NOCPUS; 2038 deleting = false; 2039 adding = cpumask_and(tmp->addmask, 2040 xcpus, parent->effective_xcpus); 2041 } 2042 } else { 2043 /* 2044 * partcmd_update w/o newmask 2045 * 2046 * delmask = effective_xcpus & parent->effective_cpus 2047 * 2048 * This can be called from: 2049 * 1) update_cpumasks_hier() 2050 * 2) cpuset_hotplug_update_tasks() 2051 * 2052 * Check to see if it can be transitioned from valid to 2053 * invalid partition or vice versa. 2054 * 2055 * A partition error happens when parent has tasks and all 2056 * its effective CPUs will have to be distributed out. 2057 */ 2058 WARN_ON_ONCE(!is_partition_valid(parent)); 2059 if (nocpu) { 2060 part_error = PERR_NOCPUS; 2061 if (is_partition_valid(cs)) 2062 adding = cpumask_and(tmp->addmask, 2063 xcpus, parent->effective_xcpus); 2064 } else if (is_partition_invalid(cs) && 2065 cpumask_subset(xcpus, parent->effective_xcpus)) { 2066 struct cgroup_subsys_state *css; 2067 struct cpuset *child; 2068 bool exclusive = true; 2069 2070 /* 2071 * Convert invalid partition to valid has to 2072 * pass the cpu exclusivity test. 2073 */ 2074 rcu_read_lock(); 2075 cpuset_for_each_child(child, css, parent) { 2076 if (child == cs) 2077 continue; 2078 if (!cpusets_are_exclusive(cs, child)) { 2079 exclusive = false; 2080 break; 2081 } 2082 } 2083 rcu_read_unlock(); 2084 if (exclusive) 2085 deleting = cpumask_and(tmp->delmask, 2086 xcpus, parent->effective_cpus); 2087 else 2088 part_error = PERR_NOTEXCL; 2089 } 2090 } 2091 2092 write_error: 2093 if (part_error) 2094 WRITE_ONCE(cs->prs_err, part_error); 2095 2096 if (cmd == partcmd_update) { 2097 /* 2098 * Check for possible transition between valid and invalid 2099 * partition root. 2100 */ 2101 switch (cs->partition_root_state) { 2102 case PRS_ROOT: 2103 case PRS_ISOLATED: 2104 if (part_error) { 2105 new_prs = -old_prs; 2106 subparts_delta--; 2107 } 2108 break; 2109 case PRS_INVALID_ROOT: 2110 case PRS_INVALID_ISOLATED: 2111 if (!part_error) { 2112 new_prs = -old_prs; 2113 subparts_delta++; 2114 } 2115 break; 2116 } 2117 } 2118 2119 if (!adding && !deleting && (new_prs == old_prs)) 2120 return 0; 2121 2122 /* 2123 * Transitioning between invalid to valid or vice versa may require 2124 * changing CS_CPU_EXCLUSIVE. In the case of partcmd_update, 2125 * validate_change() has already been successfully called and 2126 * CPU lists in cs haven't been updated yet. So defer it to later. 2127 */ 2128 if ((old_prs != new_prs) && (cmd != partcmd_update)) { 2129 int err = update_partition_exclusive(cs, new_prs); 2130 2131 if (err) 2132 return err; 2133 } 2134 2135 /* 2136 * Change the parent's effective_cpus & effective_xcpus (top cpuset 2137 * only). 2138 * 2139 * Newly added CPUs will be removed from effective_cpus and 2140 * newly deleted ones will be added back to effective_cpus. 2141 */ 2142 spin_lock_irq(&callback_lock); 2143 if (old_prs != new_prs) { 2144 cs->partition_root_state = new_prs; 2145 if (new_prs <= 0) 2146 cs->nr_subparts = 0; 2147 } 2148 /* 2149 * Adding to parent's effective_cpus means deletion CPUs from cs 2150 * and vice versa. 2151 */ 2152 if (adding) 2153 isolcpus_updated += partition_xcpus_del(old_prs, parent, 2154 tmp->addmask); 2155 if (deleting) 2156 isolcpus_updated += partition_xcpus_add(new_prs, parent, 2157 tmp->delmask); 2158 2159 if (is_partition_valid(parent)) { 2160 parent->nr_subparts += subparts_delta; 2161 WARN_ON_ONCE(parent->nr_subparts < 0); 2162 } 2163 spin_unlock_irq(&callback_lock); 2164 update_unbound_workqueue_cpumask(isolcpus_updated); 2165 2166 if ((old_prs != new_prs) && (cmd == partcmd_update)) 2167 update_partition_exclusive(cs, new_prs); 2168 2169 if (adding || deleting) { 2170 update_tasks_cpumask(parent, tmp->addmask); 2171 update_sibling_cpumasks(parent, cs, tmp); 2172 } 2173 2174 /* 2175 * For partcmd_update without newmask, it is being called from 2176 * cpuset_handle_hotplug(). Update the load balance flag and 2177 * scheduling domain accordingly. 2178 */ 2179 if ((cmd == partcmd_update) && !newmask) 2180 update_partition_sd_lb(cs, old_prs); 2181 2182 notify_partition_change(cs, old_prs); 2183 return 0; 2184 } 2185 2186 /** 2187 * compute_partition_effective_cpumask - compute effective_cpus for partition 2188 * @cs: partition root cpuset 2189 * @new_ecpus: previously computed effective_cpus to be updated 2190 * 2191 * Compute the effective_cpus of a partition root by scanning effective_xcpus 2192 * of child partition roots and excluding their effective_xcpus. 2193 * 2194 * This has the side effect of invalidating valid child partition roots, 2195 * if necessary. Since it is called from either cpuset_hotplug_update_tasks() 2196 * or update_cpumasks_hier() where parent and children are modified 2197 * successively, we don't need to call update_parent_effective_cpumask() 2198 * and the child's effective_cpus will be updated in later iterations. 2199 * 2200 * Note that rcu_read_lock() is assumed to be held. 2201 */ 2202 static void compute_partition_effective_cpumask(struct cpuset *cs, 2203 struct cpumask *new_ecpus) 2204 { 2205 struct cgroup_subsys_state *css; 2206 struct cpuset *child; 2207 bool populated = partition_is_populated(cs, NULL); 2208 2209 /* 2210 * Check child partition roots to see if they should be 2211 * invalidated when 2212 * 1) child effective_xcpus not a subset of new 2213 * excluisve_cpus 2214 * 2) All the effective_cpus will be used up and cp 2215 * has tasks 2216 */ 2217 compute_effective_exclusive_cpumask(cs, new_ecpus); 2218 cpumask_and(new_ecpus, new_ecpus, cpu_active_mask); 2219 2220 rcu_read_lock(); 2221 cpuset_for_each_child(child, css, cs) { 2222 if (!is_partition_valid(child)) 2223 continue; 2224 2225 child->prs_err = 0; 2226 if (!cpumask_subset(child->effective_xcpus, 2227 cs->effective_xcpus)) 2228 child->prs_err = PERR_INVCPUS; 2229 else if (populated && 2230 cpumask_subset(new_ecpus, child->effective_xcpus)) 2231 child->prs_err = PERR_NOCPUS; 2232 2233 if (child->prs_err) { 2234 int old_prs = child->partition_root_state; 2235 2236 /* 2237 * Invalidate child partition 2238 */ 2239 spin_lock_irq(&callback_lock); 2240 make_partition_invalid(child); 2241 cs->nr_subparts--; 2242 child->nr_subparts = 0; 2243 spin_unlock_irq(&callback_lock); 2244 notify_partition_change(child, old_prs); 2245 continue; 2246 } 2247 cpumask_andnot(new_ecpus, new_ecpus, 2248 child->effective_xcpus); 2249 } 2250 rcu_read_unlock(); 2251 } 2252 2253 /* 2254 * update_cpumasks_hier() flags 2255 */ 2256 #define HIER_CHECKALL 0x01 /* Check all cpusets with no skipping */ 2257 #define HIER_NO_SD_REBUILD 0x02 /* Don't rebuild sched domains */ 2258 2259 /* 2260 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree 2261 * @cs: the cpuset to consider 2262 * @tmp: temp variables for calculating effective_cpus & partition setup 2263 * @force: don't skip any descendant cpusets if set 2264 * 2265 * When configured cpumask is changed, the effective cpumasks of this cpuset 2266 * and all its descendants need to be updated. 2267 * 2268 * On legacy hierarchy, effective_cpus will be the same with cpu_allowed. 2269 * 2270 * Called with cpuset_mutex held 2271 */ 2272 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, 2273 int flags) 2274 { 2275 struct cpuset *cp; 2276 struct cgroup_subsys_state *pos_css; 2277 bool need_rebuild_sched_domains = false; 2278 int old_prs, new_prs; 2279 2280 rcu_read_lock(); 2281 cpuset_for_each_descendant_pre(cp, pos_css, cs) { 2282 struct cpuset *parent = parent_cs(cp); 2283 bool remote = is_remote_partition(cp); 2284 bool update_parent = false; 2285 2286 /* 2287 * Skip descendent remote partition that acquires CPUs 2288 * directly from top cpuset unless it is cs. 2289 */ 2290 if (remote && (cp != cs)) { 2291 pos_css = css_rightmost_descendant(pos_css); 2292 continue; 2293 } 2294 2295 /* 2296 * Update effective_xcpus if exclusive_cpus set. 2297 * The case when exclusive_cpus isn't set is handled later. 2298 */ 2299 if (!cpumask_empty(cp->exclusive_cpus) && (cp != cs)) { 2300 spin_lock_irq(&callback_lock); 2301 compute_effective_exclusive_cpumask(cp, NULL); 2302 spin_unlock_irq(&callback_lock); 2303 } 2304 2305 old_prs = new_prs = cp->partition_root_state; 2306 if (remote || (is_partition_valid(parent) && 2307 is_partition_valid(cp))) 2308 compute_partition_effective_cpumask(cp, tmp->new_cpus); 2309 else 2310 compute_effective_cpumask(tmp->new_cpus, cp, parent); 2311 2312 /* 2313 * A partition with no effective_cpus is allowed as long as 2314 * there is no task associated with it. Call 2315 * update_parent_effective_cpumask() to check it. 2316 */ 2317 if (is_partition_valid(cp) && cpumask_empty(tmp->new_cpus)) { 2318 update_parent = true; 2319 goto update_parent_effective; 2320 } 2321 2322 /* 2323 * If it becomes empty, inherit the effective mask of the 2324 * parent, which is guaranteed to have some CPUs unless 2325 * it is a partition root that has explicitly distributed 2326 * out all its CPUs. 2327 */ 2328 if (is_in_v2_mode() && !remote && cpumask_empty(tmp->new_cpus)) { 2329 cpumask_copy(tmp->new_cpus, parent->effective_cpus); 2330 if (!cp->use_parent_ecpus) { 2331 cp->use_parent_ecpus = true; 2332 parent->child_ecpus_count++; 2333 } 2334 } else if (cp->use_parent_ecpus) { 2335 cp->use_parent_ecpus = false; 2336 WARN_ON_ONCE(!parent->child_ecpus_count); 2337 parent->child_ecpus_count--; 2338 } 2339 2340 if (remote) 2341 goto get_css; 2342 2343 /* 2344 * Skip the whole subtree if 2345 * 1) the cpumask remains the same, 2346 * 2) has no partition root state, 2347 * 3) HIER_CHECKALL flag not set, and 2348 * 4) for v2 load balance state same as its parent. 2349 */ 2350 if (!cp->partition_root_state && !(flags & HIER_CHECKALL) && 2351 cpumask_equal(tmp->new_cpus, cp->effective_cpus) && 2352 (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || 2353 (is_sched_load_balance(parent) == is_sched_load_balance(cp)))) { 2354 pos_css = css_rightmost_descendant(pos_css); 2355 continue; 2356 } 2357 2358 update_parent_effective: 2359 /* 2360 * update_parent_effective_cpumask() should have been called 2361 * for cs already in update_cpumask(). We should also call 2362 * update_tasks_cpumask() again for tasks in the parent 2363 * cpuset if the parent's effective_cpus changes. 2364 */ 2365 if ((cp != cs) && old_prs) { 2366 switch (parent->partition_root_state) { 2367 case PRS_ROOT: 2368 case PRS_ISOLATED: 2369 update_parent = true; 2370 break; 2371 2372 default: 2373 /* 2374 * When parent is not a partition root or is 2375 * invalid, child partition roots become 2376 * invalid too. 2377 */ 2378 if (is_partition_valid(cp)) 2379 new_prs = -cp->partition_root_state; 2380 WRITE_ONCE(cp->prs_err, 2381 is_partition_invalid(parent) 2382 ? PERR_INVPARENT : PERR_NOTPART); 2383 break; 2384 } 2385 } 2386 get_css: 2387 if (!css_tryget_online(&cp->css)) 2388 continue; 2389 rcu_read_unlock(); 2390 2391 if (update_parent) { 2392 update_parent_effective_cpumask(cp, partcmd_update, NULL, tmp); 2393 /* 2394 * The cpuset partition_root_state may become 2395 * invalid. Capture it. 2396 */ 2397 new_prs = cp->partition_root_state; 2398 } 2399 2400 spin_lock_irq(&callback_lock); 2401 cpumask_copy(cp->effective_cpus, tmp->new_cpus); 2402 cp->partition_root_state = new_prs; 2403 /* 2404 * Make sure effective_xcpus is properly set for a valid 2405 * partition root. 2406 */ 2407 if ((new_prs > 0) && cpumask_empty(cp->exclusive_cpus)) 2408 cpumask_and(cp->effective_xcpus, 2409 cp->cpus_allowed, parent->effective_xcpus); 2410 else if (new_prs < 0) 2411 reset_partition_data(cp); 2412 spin_unlock_irq(&callback_lock); 2413 2414 notify_partition_change(cp, old_prs); 2415 2416 WARN_ON(!is_in_v2_mode() && 2417 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); 2418 2419 update_tasks_cpumask(cp, cp->effective_cpus); 2420 2421 /* 2422 * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE 2423 * from parent if current cpuset isn't a valid partition root 2424 * and their load balance states differ. 2425 */ 2426 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && 2427 !is_partition_valid(cp) && 2428 (is_sched_load_balance(parent) != is_sched_load_balance(cp))) { 2429 if (is_sched_load_balance(parent)) 2430 set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags); 2431 else 2432 clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags); 2433 } 2434 2435 /* 2436 * On legacy hierarchy, if the effective cpumask of any non- 2437 * empty cpuset is changed, we need to rebuild sched domains. 2438 * On default hierarchy, the cpuset needs to be a partition 2439 * root as well. 2440 */ 2441 if (!cpumask_empty(cp->cpus_allowed) && 2442 is_sched_load_balance(cp) && 2443 (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || 2444 is_partition_valid(cp))) 2445 need_rebuild_sched_domains = true; 2446 2447 rcu_read_lock(); 2448 css_put(&cp->css); 2449 } 2450 rcu_read_unlock(); 2451 2452 if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD) && 2453 !force_sd_rebuild) 2454 rebuild_sched_domains_locked(); 2455 } 2456 2457 /** 2458 * update_sibling_cpumasks - Update siblings cpumasks 2459 * @parent: Parent cpuset 2460 * @cs: Current cpuset 2461 * @tmp: Temp variables 2462 */ 2463 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, 2464 struct tmpmasks *tmp) 2465 { 2466 struct cpuset *sibling; 2467 struct cgroup_subsys_state *pos_css; 2468 2469 lockdep_assert_held(&cpuset_mutex); 2470 2471 /* 2472 * Check all its siblings and call update_cpumasks_hier() 2473 * if their effective_cpus will need to be changed. 2474 * 2475 * With the addition of effective_xcpus which is a subset of 2476 * cpus_allowed. It is possible a change in parent's effective_cpus 2477 * due to a change in a child partition's effective_xcpus will impact 2478 * its siblings even if they do not inherit parent's effective_cpus 2479 * directly. 2480 * 2481 * The update_cpumasks_hier() function may sleep. So we have to 2482 * release the RCU read lock before calling it. HIER_NO_SD_REBUILD 2483 * flag is used to suppress rebuild of sched domains as the callers 2484 * will take care of that. 2485 */ 2486 rcu_read_lock(); 2487 cpuset_for_each_child(sibling, pos_css, parent) { 2488 if (sibling == cs) 2489 continue; 2490 if (!sibling->use_parent_ecpus && 2491 !is_partition_valid(sibling)) { 2492 compute_effective_cpumask(tmp->new_cpus, sibling, 2493 parent); 2494 if (cpumask_equal(tmp->new_cpus, sibling->effective_cpus)) 2495 continue; 2496 } 2497 if (!css_tryget_online(&sibling->css)) 2498 continue; 2499 2500 rcu_read_unlock(); 2501 update_cpumasks_hier(sibling, tmp, HIER_NO_SD_REBUILD); 2502 rcu_read_lock(); 2503 css_put(&sibling->css); 2504 } 2505 rcu_read_unlock(); 2506 } 2507 2508 /** 2509 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it 2510 * @cs: the cpuset to consider 2511 * @trialcs: trial cpuset 2512 * @buf: buffer of cpu numbers written to this cpuset 2513 */ 2514 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, 2515 const char *buf) 2516 { 2517 int retval; 2518 struct tmpmasks tmp; 2519 struct cpuset *parent = parent_cs(cs); 2520 bool invalidate = false; 2521 int hier_flags = 0; 2522 int old_prs = cs->partition_root_state; 2523 2524 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */ 2525 if (cs == &top_cpuset) 2526 return -EACCES; 2527 2528 /* 2529 * An empty cpus_allowed is ok only if the cpuset has no tasks. 2530 * Since cpulist_parse() fails on an empty mask, we special case 2531 * that parsing. The validate_change() call ensures that cpusets 2532 * with tasks have cpus. 2533 */ 2534 if (!*buf) { 2535 cpumask_clear(trialcs->cpus_allowed); 2536 if (cpumask_empty(trialcs->exclusive_cpus)) 2537 cpumask_clear(trialcs->effective_xcpus); 2538 } else { 2539 retval = cpulist_parse(buf, trialcs->cpus_allowed); 2540 if (retval < 0) 2541 return retval; 2542 2543 if (!cpumask_subset(trialcs->cpus_allowed, 2544 top_cpuset.cpus_allowed)) 2545 return -EINVAL; 2546 2547 /* 2548 * When exclusive_cpus isn't explicitly set, it is constrainted 2549 * by cpus_allowed and parent's effective_xcpus. Otherwise, 2550 * trialcs->effective_xcpus is used as a temporary cpumask 2551 * for checking validity of the partition root. 2552 */ 2553 if (!cpumask_empty(trialcs->exclusive_cpus) || is_partition_valid(cs)) 2554 compute_effective_exclusive_cpumask(trialcs, NULL); 2555 } 2556 2557 /* Nothing to do if the cpus didn't change */ 2558 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) 2559 return 0; 2560 2561 if (alloc_cpumasks(NULL, &tmp)) 2562 return -ENOMEM; 2563 2564 if (old_prs) { 2565 if (is_partition_valid(cs) && 2566 cpumask_empty(trialcs->effective_xcpus)) { 2567 invalidate = true; 2568 cs->prs_err = PERR_INVCPUS; 2569 } else if (prstate_housekeeping_conflict(old_prs, trialcs->effective_xcpus)) { 2570 invalidate = true; 2571 cs->prs_err = PERR_HKEEPING; 2572 } else if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus)) { 2573 invalidate = true; 2574 cs->prs_err = PERR_NOCPUS; 2575 } 2576 } 2577 2578 /* 2579 * Check all the descendants in update_cpumasks_hier() if 2580 * effective_xcpus is to be changed. 2581 */ 2582 if (!cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus)) 2583 hier_flags = HIER_CHECKALL; 2584 2585 retval = validate_change(cs, trialcs); 2586 2587 if ((retval == -EINVAL) && cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) { 2588 struct cgroup_subsys_state *css; 2589 struct cpuset *cp; 2590 2591 /* 2592 * The -EINVAL error code indicates that partition sibling 2593 * CPU exclusivity rule has been violated. We still allow 2594 * the cpumask change to proceed while invalidating the 2595 * partition. However, any conflicting sibling partitions 2596 * have to be marked as invalid too. 2597 */ 2598 invalidate = true; 2599 rcu_read_lock(); 2600 cpuset_for_each_child(cp, css, parent) { 2601 struct cpumask *xcpus = fetch_xcpus(trialcs); 2602 2603 if (is_partition_valid(cp) && 2604 cpumask_intersects(xcpus, cp->effective_xcpus)) { 2605 rcu_read_unlock(); 2606 update_parent_effective_cpumask(cp, partcmd_invalidate, NULL, &tmp); 2607 rcu_read_lock(); 2608 } 2609 } 2610 rcu_read_unlock(); 2611 retval = 0; 2612 } 2613 2614 if (retval < 0) 2615 goto out_free; 2616 2617 if (is_partition_valid(cs) || 2618 (is_partition_invalid(cs) && !invalidate)) { 2619 struct cpumask *xcpus = trialcs->effective_xcpus; 2620 2621 if (cpumask_empty(xcpus) && is_partition_invalid(cs)) 2622 xcpus = trialcs->cpus_allowed; 2623 2624 /* 2625 * Call remote_cpus_update() to handle valid remote partition 2626 */ 2627 if (is_remote_partition(cs)) 2628 remote_cpus_update(cs, xcpus, &tmp); 2629 else if (invalidate) 2630 update_parent_effective_cpumask(cs, partcmd_invalidate, 2631 NULL, &tmp); 2632 else 2633 update_parent_effective_cpumask(cs, partcmd_update, 2634 xcpus, &tmp); 2635 } else if (!cpumask_empty(cs->exclusive_cpus)) { 2636 /* 2637 * Use trialcs->effective_cpus as a temp cpumask 2638 */ 2639 remote_partition_check(cs, trialcs->effective_xcpus, 2640 trialcs->effective_cpus, &tmp); 2641 } 2642 2643 spin_lock_irq(&callback_lock); 2644 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); 2645 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus); 2646 if ((old_prs > 0) && !is_partition_valid(cs)) 2647 reset_partition_data(cs); 2648 spin_unlock_irq(&callback_lock); 2649 2650 /* effective_cpus/effective_xcpus will be updated here */ 2651 update_cpumasks_hier(cs, &tmp, hier_flags); 2652 2653 /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */ 2654 if (cs->partition_root_state) 2655 update_partition_sd_lb(cs, old_prs); 2656 out_free: 2657 free_cpumasks(NULL, &tmp); 2658 return retval; 2659 } 2660 2661 /** 2662 * update_exclusive_cpumask - update the exclusive_cpus mask of a cpuset 2663 * @cs: the cpuset to consider 2664 * @trialcs: trial cpuset 2665 * @buf: buffer of cpu numbers written to this cpuset 2666 * 2667 * The tasks' cpumask will be updated if cs is a valid partition root. 2668 */ 2669 static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs, 2670 const char *buf) 2671 { 2672 int retval; 2673 struct tmpmasks tmp; 2674 struct cpuset *parent = parent_cs(cs); 2675 bool invalidate = false; 2676 int hier_flags = 0; 2677 int old_prs = cs->partition_root_state; 2678 2679 if (!*buf) { 2680 cpumask_clear(trialcs->exclusive_cpus); 2681 cpumask_clear(trialcs->effective_xcpus); 2682 } else { 2683 retval = cpulist_parse(buf, trialcs->exclusive_cpus); 2684 if (retval < 0) 2685 return retval; 2686 } 2687 2688 /* Nothing to do if the CPUs didn't change */ 2689 if (cpumask_equal(cs->exclusive_cpus, trialcs->exclusive_cpus)) 2690 return 0; 2691 2692 if (*buf) 2693 compute_effective_exclusive_cpumask(trialcs, NULL); 2694 2695 /* 2696 * Check all the descendants in update_cpumasks_hier() if 2697 * effective_xcpus is to be changed. 2698 */ 2699 if (!cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus)) 2700 hier_flags = HIER_CHECKALL; 2701 2702 retval = validate_change(cs, trialcs); 2703 if (retval) 2704 return retval; 2705 2706 if (alloc_cpumasks(NULL, &tmp)) 2707 return -ENOMEM; 2708 2709 if (old_prs) { 2710 if (cpumask_empty(trialcs->effective_xcpus)) { 2711 invalidate = true; 2712 cs->prs_err = PERR_INVCPUS; 2713 } else if (prstate_housekeeping_conflict(old_prs, trialcs->effective_xcpus)) { 2714 invalidate = true; 2715 cs->prs_err = PERR_HKEEPING; 2716 } else if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus)) { 2717 invalidate = true; 2718 cs->prs_err = PERR_NOCPUS; 2719 } 2720 2721 if (is_remote_partition(cs)) { 2722 if (invalidate) 2723 remote_partition_disable(cs, &tmp); 2724 else 2725 remote_cpus_update(cs, trialcs->effective_xcpus, 2726 &tmp); 2727 } else if (invalidate) { 2728 update_parent_effective_cpumask(cs, partcmd_invalidate, 2729 NULL, &tmp); 2730 } else { 2731 update_parent_effective_cpumask(cs, partcmd_update, 2732 trialcs->effective_xcpus, &tmp); 2733 } 2734 } else if (!cpumask_empty(trialcs->exclusive_cpus)) { 2735 /* 2736 * Use trialcs->effective_cpus as a temp cpumask 2737 */ 2738 remote_partition_check(cs, trialcs->effective_xcpus, 2739 trialcs->effective_cpus, &tmp); 2740 } 2741 spin_lock_irq(&callback_lock); 2742 cpumask_copy(cs->exclusive_cpus, trialcs->exclusive_cpus); 2743 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus); 2744 if ((old_prs > 0) && !is_partition_valid(cs)) 2745 reset_partition_data(cs); 2746 spin_unlock_irq(&callback_lock); 2747 2748 /* 2749 * Call update_cpumasks_hier() to update effective_cpus/effective_xcpus 2750 * of the subtree when it is a valid partition root or effective_xcpus 2751 * is updated. 2752 */ 2753 if (is_partition_valid(cs) || hier_flags) 2754 update_cpumasks_hier(cs, &tmp, hier_flags); 2755 2756 /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */ 2757 if (cs->partition_root_state) 2758 update_partition_sd_lb(cs, old_prs); 2759 2760 free_cpumasks(NULL, &tmp); 2761 return 0; 2762 } 2763 2764 /* 2765 * Migrate memory region from one set of nodes to another. This is 2766 * performed asynchronously as it can be called from process migration path 2767 * holding locks involved in process management. All mm migrations are 2768 * performed in the queued order and can be waited for by flushing 2769 * cpuset_migrate_mm_wq. 2770 */ 2771 2772 struct cpuset_migrate_mm_work { 2773 struct work_struct work; 2774 struct mm_struct *mm; 2775 nodemask_t from; 2776 nodemask_t to; 2777 }; 2778 2779 static void cpuset_migrate_mm_workfn(struct work_struct *work) 2780 { 2781 struct cpuset_migrate_mm_work *mwork = 2782 container_of(work, struct cpuset_migrate_mm_work, work); 2783 2784 /* on a wq worker, no need to worry about %current's mems_allowed */ 2785 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); 2786 mmput(mwork->mm); 2787 kfree(mwork); 2788 } 2789 2790 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, 2791 const nodemask_t *to) 2792 { 2793 struct cpuset_migrate_mm_work *mwork; 2794 2795 if (nodes_equal(*from, *to)) { 2796 mmput(mm); 2797 return; 2798 } 2799 2800 mwork = kzalloc(sizeof(*mwork), GFP_KERNEL); 2801 if (mwork) { 2802 mwork->mm = mm; 2803 mwork->from = *from; 2804 mwork->to = *to; 2805 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn); 2806 queue_work(cpuset_migrate_mm_wq, &mwork->work); 2807 } else { 2808 mmput(mm); 2809 } 2810 } 2811 2812 static void cpuset_post_attach(void) 2813 { 2814 flush_workqueue(cpuset_migrate_mm_wq); 2815 } 2816 2817 /* 2818 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy 2819 * @tsk: the task to change 2820 * @newmems: new nodes that the task will be set 2821 * 2822 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed 2823 * and rebind an eventual tasks' mempolicy. If the task is allocating in 2824 * parallel, it might temporarily see an empty intersection, which results in 2825 * a seqlock check and retry before OOM or allocation failure. 2826 */ 2827 static void cpuset_change_task_nodemask(struct task_struct *tsk, 2828 nodemask_t *newmems) 2829 { 2830 task_lock(tsk); 2831 2832 local_irq_disable(); 2833 write_seqcount_begin(&tsk->mems_allowed_seq); 2834 2835 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); 2836 mpol_rebind_task(tsk, newmems); 2837 tsk->mems_allowed = *newmems; 2838 2839 write_seqcount_end(&tsk->mems_allowed_seq); 2840 local_irq_enable(); 2841 2842 task_unlock(tsk); 2843 } 2844 2845 static void *cpuset_being_rebound; 2846 2847 /** 2848 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. 2849 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed 2850 * 2851 * Iterate through each task of @cs updating its mems_allowed to the 2852 * effective cpuset's. As this function is called with cpuset_mutex held, 2853 * cpuset membership stays stable. 2854 */ 2855 static void update_tasks_nodemask(struct cpuset *cs) 2856 { 2857 static nodemask_t newmems; /* protected by cpuset_mutex */ 2858 struct css_task_iter it; 2859 struct task_struct *task; 2860 2861 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ 2862 2863 guarantee_online_mems(cs, &newmems); 2864 2865 /* 2866 * The mpol_rebind_mm() call takes mmap_lock, which we couldn't 2867 * take while holding tasklist_lock. Forks can happen - the 2868 * mpol_dup() cpuset_being_rebound check will catch such forks, 2869 * and rebind their vma mempolicies too. Because we still hold 2870 * the global cpuset_mutex, we know that no other rebind effort 2871 * will be contending for the global variable cpuset_being_rebound. 2872 * It's ok if we rebind the same mm twice; mpol_rebind_mm() 2873 * is idempotent. Also migrate pages in each mm to new nodes. 2874 */ 2875 css_task_iter_start(&cs->css, 0, &it); 2876 while ((task = css_task_iter_next(&it))) { 2877 struct mm_struct *mm; 2878 bool migrate; 2879 2880 cpuset_change_task_nodemask(task, &newmems); 2881 2882 mm = get_task_mm(task); 2883 if (!mm) 2884 continue; 2885 2886 migrate = is_memory_migrate(cs); 2887 2888 mpol_rebind_mm(mm, &cs->mems_allowed); 2889 if (migrate) 2890 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); 2891 else 2892 mmput(mm); 2893 } 2894 css_task_iter_end(&it); 2895 2896 /* 2897 * All the tasks' nodemasks have been updated, update 2898 * cs->old_mems_allowed. 2899 */ 2900 cs->old_mems_allowed = newmems; 2901 2902 /* We're done rebinding vmas to this cpuset's new mems_allowed. */ 2903 cpuset_being_rebound = NULL; 2904 } 2905 2906 /* 2907 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree 2908 * @cs: the cpuset to consider 2909 * @new_mems: a temp variable for calculating new effective_mems 2910 * 2911 * When configured nodemask is changed, the effective nodemasks of this cpuset 2912 * and all its descendants need to be updated. 2913 * 2914 * On legacy hierarchy, effective_mems will be the same with mems_allowed. 2915 * 2916 * Called with cpuset_mutex held 2917 */ 2918 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) 2919 { 2920 struct cpuset *cp; 2921 struct cgroup_subsys_state *pos_css; 2922 2923 rcu_read_lock(); 2924 cpuset_for_each_descendant_pre(cp, pos_css, cs) { 2925 struct cpuset *parent = parent_cs(cp); 2926 2927 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); 2928 2929 /* 2930 * If it becomes empty, inherit the effective mask of the 2931 * parent, which is guaranteed to have some MEMs. 2932 */ 2933 if (is_in_v2_mode() && nodes_empty(*new_mems)) 2934 *new_mems = parent->effective_mems; 2935 2936 /* Skip the whole subtree if the nodemask remains the same. */ 2937 if (nodes_equal(*new_mems, cp->effective_mems)) { 2938 pos_css = css_rightmost_descendant(pos_css); 2939 continue; 2940 } 2941 2942 if (!css_tryget_online(&cp->css)) 2943 continue; 2944 rcu_read_unlock(); 2945 2946 spin_lock_irq(&callback_lock); 2947 cp->effective_mems = *new_mems; 2948 spin_unlock_irq(&callback_lock); 2949 2950 WARN_ON(!is_in_v2_mode() && 2951 !nodes_equal(cp->mems_allowed, cp->effective_mems)); 2952 2953 update_tasks_nodemask(cp); 2954 2955 rcu_read_lock(); 2956 css_put(&cp->css); 2957 } 2958 rcu_read_unlock(); 2959 } 2960 2961 /* 2962 * Handle user request to change the 'mems' memory placement 2963 * of a cpuset. Needs to validate the request, update the 2964 * cpusets mems_allowed, and for each task in the cpuset, 2965 * update mems_allowed and rebind task's mempolicy and any vma 2966 * mempolicies and if the cpuset is marked 'memory_migrate', 2967 * migrate the tasks pages to the new memory. 2968 * 2969 * Call with cpuset_mutex held. May take callback_lock during call. 2970 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, 2971 * lock each such tasks mm->mmap_lock, scan its vma's and rebind 2972 * their mempolicies to the cpusets new mems_allowed. 2973 */ 2974 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, 2975 const char *buf) 2976 { 2977 int retval; 2978 2979 /* 2980 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY]; 2981 * it's read-only 2982 */ 2983 if (cs == &top_cpuset) { 2984 retval = -EACCES; 2985 goto done; 2986 } 2987 2988 /* 2989 * An empty mems_allowed is ok iff there are no tasks in the cpuset. 2990 * Since nodelist_parse() fails on an empty mask, we special case 2991 * that parsing. The validate_change() call ensures that cpusets 2992 * with tasks have memory. 2993 */ 2994 if (!*buf) { 2995 nodes_clear(trialcs->mems_allowed); 2996 } else { 2997 retval = nodelist_parse(buf, trialcs->mems_allowed); 2998 if (retval < 0) 2999 goto done; 3000 3001 if (!nodes_subset(trialcs->mems_allowed, 3002 top_cpuset.mems_allowed)) { 3003 retval = -EINVAL; 3004 goto done; 3005 } 3006 } 3007 3008 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { 3009 retval = 0; /* Too easy - nothing to do */ 3010 goto done; 3011 } 3012 retval = validate_change(cs, trialcs); 3013 if (retval < 0) 3014 goto done; 3015 3016 check_insane_mems_config(&trialcs->mems_allowed); 3017 3018 spin_lock_irq(&callback_lock); 3019 cs->mems_allowed = trialcs->mems_allowed; 3020 spin_unlock_irq(&callback_lock); 3021 3022 /* use trialcs->mems_allowed as a temp variable */ 3023 update_nodemasks_hier(cs, &trialcs->mems_allowed); 3024 done: 3025 return retval; 3026 } 3027 3028 bool current_cpuset_is_being_rebound(void) 3029 { 3030 bool ret; 3031 3032 rcu_read_lock(); 3033 ret = task_cs(current) == cpuset_being_rebound; 3034 rcu_read_unlock(); 3035 3036 return ret; 3037 } 3038 3039 static int update_relax_domain_level(struct cpuset *cs, s64 val) 3040 { 3041 #ifdef CONFIG_SMP 3042 if (val < -1 || val > sched_domain_level_max + 1) 3043 return -EINVAL; 3044 #endif 3045 3046 if (val != cs->relax_domain_level) { 3047 cs->relax_domain_level = val; 3048 if (!cpumask_empty(cs->cpus_allowed) && 3049 is_sched_load_balance(cs)) 3050 rebuild_sched_domains_locked(); 3051 } 3052 3053 return 0; 3054 } 3055 3056 /** 3057 * update_tasks_flags - update the spread flags of tasks in the cpuset. 3058 * @cs: the cpuset in which each task's spread flags needs to be changed 3059 * 3060 * Iterate through each task of @cs updating its spread flags. As this 3061 * function is called with cpuset_mutex held, cpuset membership stays 3062 * stable. 3063 */ 3064 static void update_tasks_flags(struct cpuset *cs) 3065 { 3066 struct css_task_iter it; 3067 struct task_struct *task; 3068 3069 css_task_iter_start(&cs->css, 0, &it); 3070 while ((task = css_task_iter_next(&it))) 3071 cpuset_update_task_spread_flags(cs, task); 3072 css_task_iter_end(&it); 3073 } 3074 3075 /* 3076 * update_flag - read a 0 or a 1 in a file and update associated flag 3077 * bit: the bit to update (see cpuset_flagbits_t) 3078 * cs: the cpuset to update 3079 * turning_on: whether the flag is being set or cleared 3080 * 3081 * Call with cpuset_mutex held. 3082 */ 3083 3084 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, 3085 int turning_on) 3086 { 3087 struct cpuset *trialcs; 3088 int balance_flag_changed; 3089 int spread_flag_changed; 3090 int err; 3091 3092 trialcs = alloc_trial_cpuset(cs); 3093 if (!trialcs) 3094 return -ENOMEM; 3095 3096 if (turning_on) 3097 set_bit(bit, &trialcs->flags); 3098 else 3099 clear_bit(bit, &trialcs->flags); 3100 3101 err = validate_change(cs, trialcs); 3102 if (err < 0) 3103 goto out; 3104 3105 balance_flag_changed = (is_sched_load_balance(cs) != 3106 is_sched_load_balance(trialcs)); 3107 3108 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) 3109 || (is_spread_page(cs) != is_spread_page(trialcs))); 3110 3111 spin_lock_irq(&callback_lock); 3112 cs->flags = trialcs->flags; 3113 spin_unlock_irq(&callback_lock); 3114 3115 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed && 3116 !force_sd_rebuild) 3117 rebuild_sched_domains_locked(); 3118 3119 if (spread_flag_changed) 3120 update_tasks_flags(cs); 3121 out: 3122 free_cpuset(trialcs); 3123 return err; 3124 } 3125 3126 /** 3127 * update_prstate - update partition_root_state 3128 * @cs: the cpuset to update 3129 * @new_prs: new partition root state 3130 * Return: 0 if successful, != 0 if error 3131 * 3132 * Call with cpuset_mutex held. 3133 */ 3134 static int update_prstate(struct cpuset *cs, int new_prs) 3135 { 3136 int err = PERR_NONE, old_prs = cs->partition_root_state; 3137 struct cpuset *parent = parent_cs(cs); 3138 struct tmpmasks tmpmask; 3139 bool new_xcpus_state = false; 3140 3141 if (old_prs == new_prs) 3142 return 0; 3143 3144 /* 3145 * Treat a previously invalid partition root as if it is a "member". 3146 */ 3147 if (new_prs && is_prs_invalid(old_prs)) 3148 old_prs = PRS_MEMBER; 3149 3150 if (alloc_cpumasks(NULL, &tmpmask)) 3151 return -ENOMEM; 3152 3153 /* 3154 * Setup effective_xcpus if not properly set yet, it will be cleared 3155 * later if partition becomes invalid. 3156 */ 3157 if ((new_prs > 0) && cpumask_empty(cs->exclusive_cpus)) { 3158 spin_lock_irq(&callback_lock); 3159 cpumask_and(cs->effective_xcpus, 3160 cs->cpus_allowed, parent->effective_xcpus); 3161 spin_unlock_irq(&callback_lock); 3162 } 3163 3164 err = update_partition_exclusive(cs, new_prs); 3165 if (err) 3166 goto out; 3167 3168 if (!old_prs) { 3169 enum partition_cmd cmd = (new_prs == PRS_ROOT) 3170 ? partcmd_enable : partcmd_enablei; 3171 3172 /* 3173 * cpus_allowed and exclusive_cpus cannot be both empty. 3174 */ 3175 if (xcpus_empty(cs)) { 3176 err = PERR_CPUSEMPTY; 3177 goto out; 3178 } 3179 3180 err = update_parent_effective_cpumask(cs, cmd, NULL, &tmpmask); 3181 /* 3182 * If an attempt to become local partition root fails, 3183 * try to become a remote partition root instead. 3184 */ 3185 if (err && remote_partition_enable(cs, new_prs, &tmpmask)) 3186 err = 0; 3187 } else if (old_prs && new_prs) { 3188 /* 3189 * A change in load balance state only, no change in cpumasks. 3190 */ 3191 new_xcpus_state = true; 3192 } else { 3193 /* 3194 * Switching back to member is always allowed even if it 3195 * disables child partitions. 3196 */ 3197 if (is_remote_partition(cs)) 3198 remote_partition_disable(cs, &tmpmask); 3199 else 3200 update_parent_effective_cpumask(cs, partcmd_disable, 3201 NULL, &tmpmask); 3202 3203 /* 3204 * Invalidation of child partitions will be done in 3205 * update_cpumasks_hier(). 3206 */ 3207 } 3208 out: 3209 /* 3210 * Make partition invalid & disable CS_CPU_EXCLUSIVE if an error 3211 * happens. 3212 */ 3213 if (err) { 3214 new_prs = -new_prs; 3215 update_partition_exclusive(cs, new_prs); 3216 } 3217 3218 spin_lock_irq(&callback_lock); 3219 cs->partition_root_state = new_prs; 3220 WRITE_ONCE(cs->prs_err, err); 3221 if (!is_partition_valid(cs)) 3222 reset_partition_data(cs); 3223 else if (new_xcpus_state) 3224 partition_xcpus_newstate(old_prs, new_prs, cs->effective_xcpus); 3225 spin_unlock_irq(&callback_lock); 3226 update_unbound_workqueue_cpumask(new_xcpus_state); 3227 3228 /* Force update if switching back to member */ 3229 update_cpumasks_hier(cs, &tmpmask, !new_prs ? HIER_CHECKALL : 0); 3230 3231 /* Update sched domains and load balance flag */ 3232 update_partition_sd_lb(cs, old_prs); 3233 3234 notify_partition_change(cs, old_prs); 3235 free_cpumasks(NULL, &tmpmask); 3236 return 0; 3237 } 3238 3239 /* 3240 * Frequency meter - How fast is some event occurring? 3241 * 3242 * These routines manage a digitally filtered, constant time based, 3243 * event frequency meter. There are four routines: 3244 * fmeter_init() - initialize a frequency meter. 3245 * fmeter_markevent() - called each time the event happens. 3246 * fmeter_getrate() - returns the recent rate of such events. 3247 * fmeter_update() - internal routine used to update fmeter. 3248 * 3249 * A common data structure is passed to each of these routines, 3250 * which is used to keep track of the state required to manage the 3251 * frequency meter and its digital filter. 3252 * 3253 * The filter works on the number of events marked per unit time. 3254 * The filter is single-pole low-pass recursive (IIR). The time unit 3255 * is 1 second. Arithmetic is done using 32-bit integers scaled to 3256 * simulate 3 decimal digits of precision (multiplied by 1000). 3257 * 3258 * With an FM_COEF of 933, and a time base of 1 second, the filter 3259 * has a half-life of 10 seconds, meaning that if the events quit 3260 * happening, then the rate returned from the fmeter_getrate() 3261 * will be cut in half each 10 seconds, until it converges to zero. 3262 * 3263 * It is not worth doing a real infinitely recursive filter. If more 3264 * than FM_MAXTICKS ticks have elapsed since the last filter event, 3265 * just compute FM_MAXTICKS ticks worth, by which point the level 3266 * will be stable. 3267 * 3268 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid 3269 * arithmetic overflow in the fmeter_update() routine. 3270 * 3271 * Given the simple 32 bit integer arithmetic used, this meter works 3272 * best for reporting rates between one per millisecond (msec) and 3273 * one per 32 (approx) seconds. At constant rates faster than one 3274 * per msec it maxes out at values just under 1,000,000. At constant 3275 * rates between one per msec, and one per second it will stabilize 3276 * to a value N*1000, where N is the rate of events per second. 3277 * At constant rates between one per second and one per 32 seconds, 3278 * it will be choppy, moving up on the seconds that have an event, 3279 * and then decaying until the next event. At rates slower than 3280 * about one in 32 seconds, it decays all the way back to zero between 3281 * each event. 3282 */ 3283 3284 #define FM_COEF 933 /* coefficient for half-life of 10 secs */ 3285 #define FM_MAXTICKS ((u32)99) /* useless computing more ticks than this */ 3286 #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */ 3287 #define FM_SCALE 1000 /* faux fixed point scale */ 3288 3289 /* Initialize a frequency meter */ 3290 static void fmeter_init(struct fmeter *fmp) 3291 { 3292 fmp->cnt = 0; 3293 fmp->val = 0; 3294 fmp->time = 0; 3295 spin_lock_init(&fmp->lock); 3296 } 3297 3298 /* Internal meter update - process cnt events and update value */ 3299 static void fmeter_update(struct fmeter *fmp) 3300 { 3301 time64_t now; 3302 u32 ticks; 3303 3304 now = ktime_get_seconds(); 3305 ticks = now - fmp->time; 3306 3307 if (ticks == 0) 3308 return; 3309 3310 ticks = min(FM_MAXTICKS, ticks); 3311 while (ticks-- > 0) 3312 fmp->val = (FM_COEF * fmp->val) / FM_SCALE; 3313 fmp->time = now; 3314 3315 fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; 3316 fmp->cnt = 0; 3317 } 3318 3319 /* Process any previous ticks, then bump cnt by one (times scale). */ 3320 static void fmeter_markevent(struct fmeter *fmp) 3321 { 3322 spin_lock(&fmp->lock); 3323 fmeter_update(fmp); 3324 fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); 3325 spin_unlock(&fmp->lock); 3326 } 3327 3328 /* Process any previous ticks, then return current value. */ 3329 static int fmeter_getrate(struct fmeter *fmp) 3330 { 3331 int val; 3332 3333 spin_lock(&fmp->lock); 3334 fmeter_update(fmp); 3335 val = fmp->val; 3336 spin_unlock(&fmp->lock); 3337 return val; 3338 } 3339 3340 static struct cpuset *cpuset_attach_old_cs; 3341 3342 /* 3343 * Check to see if a cpuset can accept a new task 3344 * For v1, cpus_allowed and mems_allowed can't be empty. 3345 * For v2, effective_cpus can't be empty. 3346 * Note that in v1, effective_cpus = cpus_allowed. 3347 */ 3348 static int cpuset_can_attach_check(struct cpuset *cs) 3349 { 3350 if (cpumask_empty(cs->effective_cpus) || 3351 (!is_in_v2_mode() && nodes_empty(cs->mems_allowed))) 3352 return -ENOSPC; 3353 return 0; 3354 } 3355 3356 static void reset_migrate_dl_data(struct cpuset *cs) 3357 { 3358 cs->nr_migrate_dl_tasks = 0; 3359 cs->sum_migrate_dl_bw = 0; 3360 } 3361 3362 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ 3363 static int cpuset_can_attach(struct cgroup_taskset *tset) 3364 { 3365 struct cgroup_subsys_state *css; 3366 struct cpuset *cs, *oldcs; 3367 struct task_struct *task; 3368 bool cpus_updated, mems_updated; 3369 int ret; 3370 3371 /* used later by cpuset_attach() */ 3372 cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css)); 3373 oldcs = cpuset_attach_old_cs; 3374 cs = css_cs(css); 3375 3376 mutex_lock(&cpuset_mutex); 3377 3378 /* Check to see if task is allowed in the cpuset */ 3379 ret = cpuset_can_attach_check(cs); 3380 if (ret) 3381 goto out_unlock; 3382 3383 cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus); 3384 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems); 3385 3386 cgroup_taskset_for_each(task, css, tset) { 3387 ret = task_can_attach(task); 3388 if (ret) 3389 goto out_unlock; 3390 3391 /* 3392 * Skip rights over task check in v2 when nothing changes, 3393 * migration permission derives from hierarchy ownership in 3394 * cgroup_procs_write_permission()). 3395 */ 3396 if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || 3397 (cpus_updated || mems_updated)) { 3398 ret = security_task_setscheduler(task); 3399 if (ret) 3400 goto out_unlock; 3401 } 3402 3403 if (dl_task(task)) { 3404 cs->nr_migrate_dl_tasks++; 3405 cs->sum_migrate_dl_bw += task->dl.dl_bw; 3406 } 3407 } 3408 3409 if (!cs->nr_migrate_dl_tasks) 3410 goto out_success; 3411 3412 if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) { 3413 int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus); 3414 3415 if (unlikely(cpu >= nr_cpu_ids)) { 3416 reset_migrate_dl_data(cs); 3417 ret = -EINVAL; 3418 goto out_unlock; 3419 } 3420 3421 ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw); 3422 if (ret) { 3423 reset_migrate_dl_data(cs); 3424 goto out_unlock; 3425 } 3426 } 3427 3428 out_success: 3429 /* 3430 * Mark attach is in progress. This makes validate_change() fail 3431 * changes which zero cpus/mems_allowed. 3432 */ 3433 cs->attach_in_progress++; 3434 out_unlock: 3435 mutex_unlock(&cpuset_mutex); 3436 return ret; 3437 } 3438 3439 static void cpuset_cancel_attach(struct cgroup_taskset *tset) 3440 { 3441 struct cgroup_subsys_state *css; 3442 struct cpuset *cs; 3443 3444 cgroup_taskset_first(tset, &css); 3445 cs = css_cs(css); 3446 3447 mutex_lock(&cpuset_mutex); 3448 cs->attach_in_progress--; 3449 if (!cs->attach_in_progress) 3450 wake_up(&cpuset_attach_wq); 3451 3452 if (cs->nr_migrate_dl_tasks) { 3453 int cpu = cpumask_any(cs->effective_cpus); 3454 3455 dl_bw_free(cpu, cs->sum_migrate_dl_bw); 3456 reset_migrate_dl_data(cs); 3457 } 3458 3459 mutex_unlock(&cpuset_mutex); 3460 } 3461 3462 /* 3463 * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach_task() 3464 * but we can't allocate it dynamically there. Define it global and 3465 * allocate from cpuset_init(). 3466 */ 3467 static cpumask_var_t cpus_attach; 3468 static nodemask_t cpuset_attach_nodemask_to; 3469 3470 static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task) 3471 { 3472 lockdep_assert_held(&cpuset_mutex); 3473 3474 if (cs != &top_cpuset) 3475 guarantee_online_cpus(task, cpus_attach); 3476 else 3477 cpumask_andnot(cpus_attach, task_cpu_possible_mask(task), 3478 subpartitions_cpus); 3479 /* 3480 * can_attach beforehand should guarantee that this doesn't 3481 * fail. TODO: have a better way to handle failure here 3482 */ 3483 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach)); 3484 3485 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to); 3486 cpuset_update_task_spread_flags(cs, task); 3487 } 3488 3489 static void cpuset_attach(struct cgroup_taskset *tset) 3490 { 3491 struct task_struct *task; 3492 struct task_struct *leader; 3493 struct cgroup_subsys_state *css; 3494 struct cpuset *cs; 3495 struct cpuset *oldcs = cpuset_attach_old_cs; 3496 bool cpus_updated, mems_updated; 3497 3498 cgroup_taskset_first(tset, &css); 3499 cs = css_cs(css); 3500 3501 lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */ 3502 mutex_lock(&cpuset_mutex); 3503 cpus_updated = !cpumask_equal(cs->effective_cpus, 3504 oldcs->effective_cpus); 3505 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems); 3506 3507 /* 3508 * In the default hierarchy, enabling cpuset in the child cgroups 3509 * will trigger a number of cpuset_attach() calls with no change 3510 * in effective cpus and mems. In that case, we can optimize out 3511 * by skipping the task iteration and update. 3512 */ 3513 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && 3514 !cpus_updated && !mems_updated) { 3515 cpuset_attach_nodemask_to = cs->effective_mems; 3516 goto out; 3517 } 3518 3519 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); 3520 3521 cgroup_taskset_for_each(task, css, tset) 3522 cpuset_attach_task(cs, task); 3523 3524 /* 3525 * Change mm for all threadgroup leaders. This is expensive and may 3526 * sleep and should be moved outside migration path proper. Skip it 3527 * if there is no change in effective_mems and CS_MEMORY_MIGRATE is 3528 * not set. 3529 */ 3530 cpuset_attach_nodemask_to = cs->effective_mems; 3531 if (!is_memory_migrate(cs) && !mems_updated) 3532 goto out; 3533 3534 cgroup_taskset_for_each_leader(leader, css, tset) { 3535 struct mm_struct *mm = get_task_mm(leader); 3536 3537 if (mm) { 3538 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to); 3539 3540 /* 3541 * old_mems_allowed is the same with mems_allowed 3542 * here, except if this task is being moved 3543 * automatically due to hotplug. In that case 3544 * @mems_allowed has been updated and is empty, so 3545 * @old_mems_allowed is the right nodesets that we 3546 * migrate mm from. 3547 */ 3548 if (is_memory_migrate(cs)) 3549 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, 3550 &cpuset_attach_nodemask_to); 3551 else 3552 mmput(mm); 3553 } 3554 } 3555 3556 out: 3557 cs->old_mems_allowed = cpuset_attach_nodemask_to; 3558 3559 if (cs->nr_migrate_dl_tasks) { 3560 cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks; 3561 oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks; 3562 reset_migrate_dl_data(cs); 3563 } 3564 3565 cs->attach_in_progress--; 3566 if (!cs->attach_in_progress) 3567 wake_up(&cpuset_attach_wq); 3568 3569 mutex_unlock(&cpuset_mutex); 3570 } 3571 3572 /* The various types of files and directories in a cpuset file system */ 3573 3574 typedef enum { 3575 FILE_MEMORY_MIGRATE, 3576 FILE_CPULIST, 3577 FILE_MEMLIST, 3578 FILE_EFFECTIVE_CPULIST, 3579 FILE_EFFECTIVE_MEMLIST, 3580 FILE_SUBPARTS_CPULIST, 3581 FILE_EXCLUSIVE_CPULIST, 3582 FILE_EFFECTIVE_XCPULIST, 3583 FILE_ISOLATED_CPULIST, 3584 FILE_CPU_EXCLUSIVE, 3585 FILE_MEM_EXCLUSIVE, 3586 FILE_MEM_HARDWALL, 3587 FILE_SCHED_LOAD_BALANCE, 3588 FILE_PARTITION_ROOT, 3589 FILE_SCHED_RELAX_DOMAIN_LEVEL, 3590 FILE_MEMORY_PRESSURE_ENABLED, 3591 FILE_MEMORY_PRESSURE, 3592 FILE_SPREAD_PAGE, 3593 FILE_SPREAD_SLAB, 3594 } cpuset_filetype_t; 3595 3596 static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, 3597 u64 val) 3598 { 3599 struct cpuset *cs = css_cs(css); 3600 cpuset_filetype_t type = cft->private; 3601 int retval = 0; 3602 3603 cpus_read_lock(); 3604 mutex_lock(&cpuset_mutex); 3605 if (!is_cpuset_online(cs)) { 3606 retval = -ENODEV; 3607 goto out_unlock; 3608 } 3609 3610 switch (type) { 3611 case FILE_CPU_EXCLUSIVE: 3612 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); 3613 break; 3614 case FILE_MEM_EXCLUSIVE: 3615 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); 3616 break; 3617 case FILE_MEM_HARDWALL: 3618 retval = update_flag(CS_MEM_HARDWALL, cs, val); 3619 break; 3620 case FILE_SCHED_LOAD_BALANCE: 3621 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); 3622 break; 3623 case FILE_MEMORY_MIGRATE: 3624 retval = update_flag(CS_MEMORY_MIGRATE, cs, val); 3625 break; 3626 case FILE_MEMORY_PRESSURE_ENABLED: 3627 cpuset_memory_pressure_enabled = !!val; 3628 break; 3629 case FILE_SPREAD_PAGE: 3630 retval = update_flag(CS_SPREAD_PAGE, cs, val); 3631 break; 3632 case FILE_SPREAD_SLAB: 3633 retval = update_flag(CS_SPREAD_SLAB, cs, val); 3634 break; 3635 default: 3636 retval = -EINVAL; 3637 break; 3638 } 3639 out_unlock: 3640 mutex_unlock(&cpuset_mutex); 3641 cpus_read_unlock(); 3642 return retval; 3643 } 3644 3645 static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, 3646 s64 val) 3647 { 3648 struct cpuset *cs = css_cs(css); 3649 cpuset_filetype_t type = cft->private; 3650 int retval = -ENODEV; 3651 3652 cpus_read_lock(); 3653 mutex_lock(&cpuset_mutex); 3654 if (!is_cpuset_online(cs)) 3655 goto out_unlock; 3656 3657 switch (type) { 3658 case FILE_SCHED_RELAX_DOMAIN_LEVEL: 3659 retval = update_relax_domain_level(cs, val); 3660 break; 3661 default: 3662 retval = -EINVAL; 3663 break; 3664 } 3665 out_unlock: 3666 mutex_unlock(&cpuset_mutex); 3667 cpus_read_unlock(); 3668 return retval; 3669 } 3670 3671 /* 3672 * Common handling for a write to a "cpus" or "mems" file. 3673 */ 3674 static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, 3675 char *buf, size_t nbytes, loff_t off) 3676 { 3677 struct cpuset *cs = css_cs(of_css(of)); 3678 struct cpuset *trialcs; 3679 int retval = -ENODEV; 3680 3681 buf = strstrip(buf); 3682 3683 /* 3684 * CPU or memory hotunplug may leave @cs w/o any execution 3685 * resources, in which case the hotplug code asynchronously updates 3686 * configuration and transfers all tasks to the nearest ancestor 3687 * which can execute. 3688 * 3689 * As writes to "cpus" or "mems" may restore @cs's execution 3690 * resources, wait for the previously scheduled operations before 3691 * proceeding, so that we don't end up keep removing tasks added 3692 * after execution capability is restored. 3693 * 3694 * cpuset_handle_hotplug may call back into cgroup core asynchronously 3695 * via cgroup_transfer_tasks() and waiting for it from a cgroupfs 3696 * operation like this one can lead to a deadlock through kernfs 3697 * active_ref protection. Let's break the protection. Losing the 3698 * protection is okay as we check whether @cs is online after 3699 * grabbing cpuset_mutex anyway. This only happens on the legacy 3700 * hierarchies. 3701 */ 3702 css_get(&cs->css); 3703 kernfs_break_active_protection(of->kn); 3704 3705 cpus_read_lock(); 3706 mutex_lock(&cpuset_mutex); 3707 if (!is_cpuset_online(cs)) 3708 goto out_unlock; 3709 3710 trialcs = alloc_trial_cpuset(cs); 3711 if (!trialcs) { 3712 retval = -ENOMEM; 3713 goto out_unlock; 3714 } 3715 3716 switch (of_cft(of)->private) { 3717 case FILE_CPULIST: 3718 retval = update_cpumask(cs, trialcs, buf); 3719 break; 3720 case FILE_EXCLUSIVE_CPULIST: 3721 retval = update_exclusive_cpumask(cs, trialcs, buf); 3722 break; 3723 case FILE_MEMLIST: 3724 retval = update_nodemask(cs, trialcs, buf); 3725 break; 3726 default: 3727 retval = -EINVAL; 3728 break; 3729 } 3730 3731 free_cpuset(trialcs); 3732 out_unlock: 3733 mutex_unlock(&cpuset_mutex); 3734 cpus_read_unlock(); 3735 kernfs_unbreak_active_protection(of->kn); 3736 css_put(&cs->css); 3737 flush_workqueue(cpuset_migrate_mm_wq); 3738 return retval ?: nbytes; 3739 } 3740 3741 /* 3742 * These ascii lists should be read in a single call, by using a user 3743 * buffer large enough to hold the entire map. If read in smaller 3744 * chunks, there is no guarantee of atomicity. Since the display format 3745 * used, list of ranges of sequential numbers, is variable length, 3746 * and since these maps can change value dynamically, one could read 3747 * gibberish by doing partial reads while a list was changing. 3748 */ 3749 static int cpuset_common_seq_show(struct seq_file *sf, void *v) 3750 { 3751 struct cpuset *cs = css_cs(seq_css(sf)); 3752 cpuset_filetype_t type = seq_cft(sf)->private; 3753 int ret = 0; 3754 3755 spin_lock_irq(&callback_lock); 3756 3757 switch (type) { 3758 case FILE_CPULIST: 3759 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); 3760 break; 3761 case FILE_MEMLIST: 3762 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); 3763 break; 3764 case FILE_EFFECTIVE_CPULIST: 3765 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); 3766 break; 3767 case FILE_EFFECTIVE_MEMLIST: 3768 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); 3769 break; 3770 case FILE_EXCLUSIVE_CPULIST: 3771 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->exclusive_cpus)); 3772 break; 3773 case FILE_EFFECTIVE_XCPULIST: 3774 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_xcpus)); 3775 break; 3776 case FILE_SUBPARTS_CPULIST: 3777 seq_printf(sf, "%*pbl\n", cpumask_pr_args(subpartitions_cpus)); 3778 break; 3779 case FILE_ISOLATED_CPULIST: 3780 seq_printf(sf, "%*pbl\n", cpumask_pr_args(isolated_cpus)); 3781 break; 3782 default: 3783 ret = -EINVAL; 3784 } 3785 3786 spin_unlock_irq(&callback_lock); 3787 return ret; 3788 } 3789 3790 static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft) 3791 { 3792 struct cpuset *cs = css_cs(css); 3793 cpuset_filetype_t type = cft->private; 3794 switch (type) { 3795 case FILE_CPU_EXCLUSIVE: 3796 return is_cpu_exclusive(cs); 3797 case FILE_MEM_EXCLUSIVE: 3798 return is_mem_exclusive(cs); 3799 case FILE_MEM_HARDWALL: 3800 return is_mem_hardwall(cs); 3801 case FILE_SCHED_LOAD_BALANCE: 3802 return is_sched_load_balance(cs); 3803 case FILE_MEMORY_MIGRATE: 3804 return is_memory_migrate(cs); 3805 case FILE_MEMORY_PRESSURE_ENABLED: 3806 return cpuset_memory_pressure_enabled; 3807 case FILE_MEMORY_PRESSURE: 3808 return fmeter_getrate(&cs->fmeter); 3809 case FILE_SPREAD_PAGE: 3810 return is_spread_page(cs); 3811 case FILE_SPREAD_SLAB: 3812 return is_spread_slab(cs); 3813 default: 3814 BUG(); 3815 } 3816 3817 /* Unreachable but makes gcc happy */ 3818 return 0; 3819 } 3820 3821 static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) 3822 { 3823 struct cpuset *cs = css_cs(css); 3824 cpuset_filetype_t type = cft->private; 3825 switch (type) { 3826 case FILE_SCHED_RELAX_DOMAIN_LEVEL: 3827 return cs->relax_domain_level; 3828 default: 3829 BUG(); 3830 } 3831 3832 /* Unreachable but makes gcc happy */ 3833 return 0; 3834 } 3835 3836 static int sched_partition_show(struct seq_file *seq, void *v) 3837 { 3838 struct cpuset *cs = css_cs(seq_css(seq)); 3839 const char *err, *type = NULL; 3840 3841 switch (cs->partition_root_state) { 3842 case PRS_ROOT: 3843 seq_puts(seq, "root\n"); 3844 break; 3845 case PRS_ISOLATED: 3846 seq_puts(seq, "isolated\n"); 3847 break; 3848 case PRS_MEMBER: 3849 seq_puts(seq, "member\n"); 3850 break; 3851 case PRS_INVALID_ROOT: 3852 type = "root"; 3853 fallthrough; 3854 case PRS_INVALID_ISOLATED: 3855 if (!type) 3856 type = "isolated"; 3857 err = perr_strings[READ_ONCE(cs->prs_err)]; 3858 if (err) 3859 seq_printf(seq, "%s invalid (%s)\n", type, err); 3860 else 3861 seq_printf(seq, "%s invalid\n", type); 3862 break; 3863 } 3864 return 0; 3865 } 3866 3867 static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf, 3868 size_t nbytes, loff_t off) 3869 { 3870 struct cpuset *cs = css_cs(of_css(of)); 3871 int val; 3872 int retval = -ENODEV; 3873 3874 buf = strstrip(buf); 3875 3876 if (!strcmp(buf, "root")) 3877 val = PRS_ROOT; 3878 else if (!strcmp(buf, "member")) 3879 val = PRS_MEMBER; 3880 else if (!strcmp(buf, "isolated")) 3881 val = PRS_ISOLATED; 3882 else 3883 return -EINVAL; 3884 3885 css_get(&cs->css); 3886 cpus_read_lock(); 3887 mutex_lock(&cpuset_mutex); 3888 if (!is_cpuset_online(cs)) 3889 goto out_unlock; 3890 3891 retval = update_prstate(cs, val); 3892 out_unlock: 3893 mutex_unlock(&cpuset_mutex); 3894 cpus_read_unlock(); 3895 css_put(&cs->css); 3896 return retval ?: nbytes; 3897 } 3898 3899 /* 3900 * for the common functions, 'private' gives the type of file 3901 */ 3902 3903 static struct cftype legacy_files[] = { 3904 { 3905 .name = "cpus", 3906 .seq_show = cpuset_common_seq_show, 3907 .write = cpuset_write_resmask, 3908 .max_write_len = (100U + 6 * NR_CPUS), 3909 .private = FILE_CPULIST, 3910 }, 3911 3912 { 3913 .name = "mems", 3914 .seq_show = cpuset_common_seq_show, 3915 .write = cpuset_write_resmask, 3916 .max_write_len = (100U + 6 * MAX_NUMNODES), 3917 .private = FILE_MEMLIST, 3918 }, 3919 3920 { 3921 .name = "effective_cpus", 3922 .seq_show = cpuset_common_seq_show, 3923 .private = FILE_EFFECTIVE_CPULIST, 3924 }, 3925 3926 { 3927 .name = "effective_mems", 3928 .seq_show = cpuset_common_seq_show, 3929 .private = FILE_EFFECTIVE_MEMLIST, 3930 }, 3931 3932 { 3933 .name = "cpu_exclusive", 3934 .read_u64 = cpuset_read_u64, 3935 .write_u64 = cpuset_write_u64, 3936 .private = FILE_CPU_EXCLUSIVE, 3937 }, 3938 3939 { 3940 .name = "mem_exclusive", 3941 .read_u64 = cpuset_read_u64, 3942 .write_u64 = cpuset_write_u64, 3943 .private = FILE_MEM_EXCLUSIVE, 3944 }, 3945 3946 { 3947 .name = "mem_hardwall", 3948 .read_u64 = cpuset_read_u64, 3949 .write_u64 = cpuset_write_u64, 3950 .private = FILE_MEM_HARDWALL, 3951 }, 3952 3953 { 3954 .name = "sched_load_balance", 3955 .read_u64 = cpuset_read_u64, 3956 .write_u64 = cpuset_write_u64, 3957 .private = FILE_SCHED_LOAD_BALANCE, 3958 }, 3959 3960 { 3961 .name = "sched_relax_domain_level", 3962 .read_s64 = cpuset_read_s64, 3963 .write_s64 = cpuset_write_s64, 3964 .private = FILE_SCHED_RELAX_DOMAIN_LEVEL, 3965 }, 3966 3967 { 3968 .name = "memory_migrate", 3969 .read_u64 = cpuset_read_u64, 3970 .write_u64 = cpuset_write_u64, 3971 .private = FILE_MEMORY_MIGRATE, 3972 }, 3973 3974 { 3975 .name = "memory_pressure", 3976 .read_u64 = cpuset_read_u64, 3977 .private = FILE_MEMORY_PRESSURE, 3978 }, 3979 3980 { 3981 .name = "memory_spread_page", 3982 .read_u64 = cpuset_read_u64, 3983 .write_u64 = cpuset_write_u64, 3984 .private = FILE_SPREAD_PAGE, 3985 }, 3986 3987 { 3988 /* obsolete, may be removed in the future */ 3989 .name = "memory_spread_slab", 3990 .read_u64 = cpuset_read_u64, 3991 .write_u64 = cpuset_write_u64, 3992 .private = FILE_SPREAD_SLAB, 3993 }, 3994 3995 { 3996 .name = "memory_pressure_enabled", 3997 .flags = CFTYPE_ONLY_ON_ROOT, 3998 .read_u64 = cpuset_read_u64, 3999 .write_u64 = cpuset_write_u64, 4000 .private = FILE_MEMORY_PRESSURE_ENABLED, 4001 }, 4002 4003 { } /* terminate */ 4004 }; 4005 4006 /* 4007 * This is currently a minimal set for the default hierarchy. It can be 4008 * expanded later on by migrating more features and control files from v1. 4009 */ 4010 static struct cftype dfl_files[] = { 4011 { 4012 .name = "cpus", 4013 .seq_show = cpuset_common_seq_show, 4014 .write = cpuset_write_resmask, 4015 .max_write_len = (100U + 6 * NR_CPUS), 4016 .private = FILE_CPULIST, 4017 .flags = CFTYPE_NOT_ON_ROOT, 4018 }, 4019 4020 { 4021 .name = "mems", 4022 .seq_show = cpuset_common_seq_show, 4023 .write = cpuset_write_resmask, 4024 .max_write_len = (100U + 6 * MAX_NUMNODES), 4025 .private = FILE_MEMLIST, 4026 .flags = CFTYPE_NOT_ON_ROOT, 4027 }, 4028 4029 { 4030 .name = "cpus.effective", 4031 .seq_show = cpuset_common_seq_show, 4032 .private = FILE_EFFECTIVE_CPULIST, 4033 }, 4034 4035 { 4036 .name = "mems.effective", 4037 .seq_show = cpuset_common_seq_show, 4038 .private = FILE_EFFECTIVE_MEMLIST, 4039 }, 4040 4041 { 4042 .name = "cpus.partition", 4043 .seq_show = sched_partition_show, 4044 .write = sched_partition_write, 4045 .private = FILE_PARTITION_ROOT, 4046 .flags = CFTYPE_NOT_ON_ROOT, 4047 .file_offset = offsetof(struct cpuset, partition_file), 4048 }, 4049 4050 { 4051 .name = "cpus.exclusive", 4052 .seq_show = cpuset_common_seq_show, 4053 .write = cpuset_write_resmask, 4054 .max_write_len = (100U + 6 * NR_CPUS), 4055 .private = FILE_EXCLUSIVE_CPULIST, 4056 .flags = CFTYPE_NOT_ON_ROOT, 4057 }, 4058 4059 { 4060 .name = "cpus.exclusive.effective", 4061 .seq_show = cpuset_common_seq_show, 4062 .private = FILE_EFFECTIVE_XCPULIST, 4063 .flags = CFTYPE_NOT_ON_ROOT, 4064 }, 4065 4066 { 4067 .name = "cpus.subpartitions", 4068 .seq_show = cpuset_common_seq_show, 4069 .private = FILE_SUBPARTS_CPULIST, 4070 .flags = CFTYPE_ONLY_ON_ROOT | CFTYPE_DEBUG, 4071 }, 4072 4073 { 4074 .name = "cpus.isolated", 4075 .seq_show = cpuset_common_seq_show, 4076 .private = FILE_ISOLATED_CPULIST, 4077 .flags = CFTYPE_ONLY_ON_ROOT, 4078 }, 4079 4080 { } /* terminate */ 4081 }; 4082 4083 4084 /** 4085 * cpuset_css_alloc - Allocate a cpuset css 4086 * @parent_css: Parent css of the control group that the new cpuset will be 4087 * part of 4088 * Return: cpuset css on success, -ENOMEM on failure. 4089 * 4090 * Allocate and initialize a new cpuset css, for non-NULL @parent_css, return 4091 * top cpuset css otherwise. 4092 */ 4093 static struct cgroup_subsys_state * 4094 cpuset_css_alloc(struct cgroup_subsys_state *parent_css) 4095 { 4096 struct cpuset *cs; 4097 4098 if (!parent_css) 4099 return &top_cpuset.css; 4100 4101 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 4102 if (!cs) 4103 return ERR_PTR(-ENOMEM); 4104 4105 if (alloc_cpumasks(cs, NULL)) { 4106 kfree(cs); 4107 return ERR_PTR(-ENOMEM); 4108 } 4109 4110 __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 4111 fmeter_init(&cs->fmeter); 4112 cs->relax_domain_level = -1; 4113 INIT_LIST_HEAD(&cs->remote_sibling); 4114 4115 /* Set CS_MEMORY_MIGRATE for default hierarchy */ 4116 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) 4117 __set_bit(CS_MEMORY_MIGRATE, &cs->flags); 4118 4119 return &cs->css; 4120 } 4121 4122 static int cpuset_css_online(struct cgroup_subsys_state *css) 4123 { 4124 struct cpuset *cs = css_cs(css); 4125 struct cpuset *parent = parent_cs(cs); 4126 struct cpuset *tmp_cs; 4127 struct cgroup_subsys_state *pos_css; 4128 4129 if (!parent) 4130 return 0; 4131 4132 cpus_read_lock(); 4133 mutex_lock(&cpuset_mutex); 4134 4135 set_bit(CS_ONLINE, &cs->flags); 4136 if (is_spread_page(parent)) 4137 set_bit(CS_SPREAD_PAGE, &cs->flags); 4138 if (is_spread_slab(parent)) 4139 set_bit(CS_SPREAD_SLAB, &cs->flags); 4140 /* 4141 * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated 4142 */ 4143 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && 4144 !is_sched_load_balance(parent)) 4145 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 4146 4147 cpuset_inc(); 4148 4149 spin_lock_irq(&callback_lock); 4150 if (is_in_v2_mode()) { 4151 cpumask_copy(cs->effective_cpus, parent->effective_cpus); 4152 cs->effective_mems = parent->effective_mems; 4153 cs->use_parent_ecpus = true; 4154 parent->child_ecpus_count++; 4155 } 4156 spin_unlock_irq(&callback_lock); 4157 4158 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) 4159 goto out_unlock; 4160 4161 /* 4162 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is 4163 * set. This flag handling is implemented in cgroup core for 4164 * historical reasons - the flag may be specified during mount. 4165 * 4166 * Currently, if any sibling cpusets have exclusive cpus or mem, we 4167 * refuse to clone the configuration - thereby refusing the task to 4168 * be entered, and as a result refusing the sys_unshare() or 4169 * clone() which initiated it. If this becomes a problem for some 4170 * users who wish to allow that scenario, then this could be 4171 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive 4172 * (and likewise for mems) to the new cgroup. 4173 */ 4174 rcu_read_lock(); 4175 cpuset_for_each_child(tmp_cs, pos_css, parent) { 4176 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) { 4177 rcu_read_unlock(); 4178 goto out_unlock; 4179 } 4180 } 4181 rcu_read_unlock(); 4182 4183 spin_lock_irq(&callback_lock); 4184 cs->mems_allowed = parent->mems_allowed; 4185 cs->effective_mems = parent->mems_allowed; 4186 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); 4187 cpumask_copy(cs->effective_cpus, parent->cpus_allowed); 4188 spin_unlock_irq(&callback_lock); 4189 out_unlock: 4190 mutex_unlock(&cpuset_mutex); 4191 cpus_read_unlock(); 4192 return 0; 4193 } 4194 4195 /* 4196 * If the cpuset being removed has its flag 'sched_load_balance' 4197 * enabled, then simulate turning sched_load_balance off, which 4198 * will call rebuild_sched_domains_locked(). That is not needed 4199 * in the default hierarchy where only changes in partition 4200 * will cause repartitioning. 4201 * 4202 * If the cpuset has the 'sched.partition' flag enabled, simulate 4203 * turning 'sched.partition" off. 4204 */ 4205 4206 static void cpuset_css_offline(struct cgroup_subsys_state *css) 4207 { 4208 struct cpuset *cs = css_cs(css); 4209 4210 cpus_read_lock(); 4211 mutex_lock(&cpuset_mutex); 4212 4213 if (is_partition_valid(cs)) 4214 update_prstate(cs, 0); 4215 4216 if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && 4217 is_sched_load_balance(cs)) 4218 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); 4219 4220 if (cs->use_parent_ecpus) { 4221 struct cpuset *parent = parent_cs(cs); 4222 4223 cs->use_parent_ecpus = false; 4224 parent->child_ecpus_count--; 4225 } 4226 4227 cpuset_dec(); 4228 clear_bit(CS_ONLINE, &cs->flags); 4229 4230 mutex_unlock(&cpuset_mutex); 4231 cpus_read_unlock(); 4232 } 4233 4234 static void cpuset_css_free(struct cgroup_subsys_state *css) 4235 { 4236 struct cpuset *cs = css_cs(css); 4237 4238 free_cpuset(cs); 4239 } 4240 4241 static void cpuset_bind(struct cgroup_subsys_state *root_css) 4242 { 4243 mutex_lock(&cpuset_mutex); 4244 spin_lock_irq(&callback_lock); 4245 4246 if (is_in_v2_mode()) { 4247 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); 4248 cpumask_copy(top_cpuset.effective_xcpus, cpu_possible_mask); 4249 top_cpuset.mems_allowed = node_possible_map; 4250 } else { 4251 cpumask_copy(top_cpuset.cpus_allowed, 4252 top_cpuset.effective_cpus); 4253 top_cpuset.mems_allowed = top_cpuset.effective_mems; 4254 } 4255 4256 spin_unlock_irq(&callback_lock); 4257 mutex_unlock(&cpuset_mutex); 4258 } 4259 4260 /* 4261 * In case the child is cloned into a cpuset different from its parent, 4262 * additional checks are done to see if the move is allowed. 4263 */ 4264 static int cpuset_can_fork(struct task_struct *task, struct css_set *cset) 4265 { 4266 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]); 4267 bool same_cs; 4268 int ret; 4269 4270 rcu_read_lock(); 4271 same_cs = (cs == task_cs(current)); 4272 rcu_read_unlock(); 4273 4274 if (same_cs) 4275 return 0; 4276 4277 lockdep_assert_held(&cgroup_mutex); 4278 mutex_lock(&cpuset_mutex); 4279 4280 /* Check to see if task is allowed in the cpuset */ 4281 ret = cpuset_can_attach_check(cs); 4282 if (ret) 4283 goto out_unlock; 4284 4285 ret = task_can_attach(task); 4286 if (ret) 4287 goto out_unlock; 4288 4289 ret = security_task_setscheduler(task); 4290 if (ret) 4291 goto out_unlock; 4292 4293 /* 4294 * Mark attach is in progress. This makes validate_change() fail 4295 * changes which zero cpus/mems_allowed. 4296 */ 4297 cs->attach_in_progress++; 4298 out_unlock: 4299 mutex_unlock(&cpuset_mutex); 4300 return ret; 4301 } 4302 4303 static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset) 4304 { 4305 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]); 4306 bool same_cs; 4307 4308 rcu_read_lock(); 4309 same_cs = (cs == task_cs(current)); 4310 rcu_read_unlock(); 4311 4312 if (same_cs) 4313 return; 4314 4315 mutex_lock(&cpuset_mutex); 4316 cs->attach_in_progress--; 4317 if (!cs->attach_in_progress) 4318 wake_up(&cpuset_attach_wq); 4319 mutex_unlock(&cpuset_mutex); 4320 } 4321 4322 /* 4323 * Make sure the new task conform to the current state of its parent, 4324 * which could have been changed by cpuset just after it inherits the 4325 * state from the parent and before it sits on the cgroup's task list. 4326 */ 4327 static void cpuset_fork(struct task_struct *task) 4328 { 4329 struct cpuset *cs; 4330 bool same_cs; 4331 4332 rcu_read_lock(); 4333 cs = task_cs(task); 4334 same_cs = (cs == task_cs(current)); 4335 rcu_read_unlock(); 4336 4337 if (same_cs) { 4338 if (cs == &top_cpuset) 4339 return; 4340 4341 set_cpus_allowed_ptr(task, current->cpus_ptr); 4342 task->mems_allowed = current->mems_allowed; 4343 return; 4344 } 4345 4346 /* CLONE_INTO_CGROUP */ 4347 mutex_lock(&cpuset_mutex); 4348 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); 4349 cpuset_attach_task(cs, task); 4350 4351 cs->attach_in_progress--; 4352 if (!cs->attach_in_progress) 4353 wake_up(&cpuset_attach_wq); 4354 4355 mutex_unlock(&cpuset_mutex); 4356 } 4357 4358 struct cgroup_subsys cpuset_cgrp_subsys = { 4359 .css_alloc = cpuset_css_alloc, 4360 .css_online = cpuset_css_online, 4361 .css_offline = cpuset_css_offline, 4362 .css_free = cpuset_css_free, 4363 .can_attach = cpuset_can_attach, 4364 .cancel_attach = cpuset_cancel_attach, 4365 .attach = cpuset_attach, 4366 .post_attach = cpuset_post_attach, 4367 .bind = cpuset_bind, 4368 .can_fork = cpuset_can_fork, 4369 .cancel_fork = cpuset_cancel_fork, 4370 .fork = cpuset_fork, 4371 .legacy_cftypes = legacy_files, 4372 .dfl_cftypes = dfl_files, 4373 .early_init = true, 4374 .threaded = true, 4375 }; 4376 4377 /** 4378 * cpuset_init - initialize cpusets at system boot 4379 * 4380 * Description: Initialize top_cpuset 4381 **/ 4382 4383 int __init cpuset_init(void) 4384 { 4385 BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL)); 4386 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL)); 4387 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_xcpus, GFP_KERNEL)); 4388 BUG_ON(!alloc_cpumask_var(&top_cpuset.exclusive_cpus, GFP_KERNEL)); 4389 BUG_ON(!zalloc_cpumask_var(&subpartitions_cpus, GFP_KERNEL)); 4390 BUG_ON(!zalloc_cpumask_var(&isolated_cpus, GFP_KERNEL)); 4391 4392 cpumask_setall(top_cpuset.cpus_allowed); 4393 nodes_setall(top_cpuset.mems_allowed); 4394 cpumask_setall(top_cpuset.effective_cpus); 4395 cpumask_setall(top_cpuset.effective_xcpus); 4396 cpumask_setall(top_cpuset.exclusive_cpus); 4397 nodes_setall(top_cpuset.effective_mems); 4398 4399 fmeter_init(&top_cpuset.fmeter); 4400 INIT_LIST_HEAD(&remote_children); 4401 4402 BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL)); 4403 4404 return 0; 4405 } 4406 4407 /* 4408 * If CPU and/or memory hotplug handlers, below, unplug any CPUs 4409 * or memory nodes, we need to walk over the cpuset hierarchy, 4410 * removing that CPU or node from all cpusets. If this removes the 4411 * last CPU or node from a cpuset, then move the tasks in the empty 4412 * cpuset to its next-highest non-empty parent. 4413 */ 4414 static void remove_tasks_in_empty_cpuset(struct cpuset *cs) 4415 { 4416 struct cpuset *parent; 4417 4418 /* 4419 * Find its next-highest non-empty parent, (top cpuset 4420 * has online cpus, so can't be empty). 4421 */ 4422 parent = parent_cs(cs); 4423 while (cpumask_empty(parent->cpus_allowed) || 4424 nodes_empty(parent->mems_allowed)) 4425 parent = parent_cs(parent); 4426 4427 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { 4428 pr_err("cpuset: failed to transfer tasks out of empty cpuset "); 4429 pr_cont_cgroup_name(cs->css.cgroup); 4430 pr_cont("\n"); 4431 } 4432 } 4433 4434 static void cpuset_migrate_tasks_workfn(struct work_struct *work) 4435 { 4436 struct cpuset_remove_tasks_struct *s; 4437 4438 s = container_of(work, struct cpuset_remove_tasks_struct, work); 4439 remove_tasks_in_empty_cpuset(s->cs); 4440 css_put(&s->cs->css); 4441 kfree(s); 4442 } 4443 4444 static void 4445 hotplug_update_tasks_legacy(struct cpuset *cs, 4446 struct cpumask *new_cpus, nodemask_t *new_mems, 4447 bool cpus_updated, bool mems_updated) 4448 { 4449 bool is_empty; 4450 4451 spin_lock_irq(&callback_lock); 4452 cpumask_copy(cs->cpus_allowed, new_cpus); 4453 cpumask_copy(cs->effective_cpus, new_cpus); 4454 cs->mems_allowed = *new_mems; 4455 cs->effective_mems = *new_mems; 4456 spin_unlock_irq(&callback_lock); 4457 4458 /* 4459 * Don't call update_tasks_cpumask() if the cpuset becomes empty, 4460 * as the tasks will be migrated to an ancestor. 4461 */ 4462 if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) 4463 update_tasks_cpumask(cs, new_cpus); 4464 if (mems_updated && !nodes_empty(cs->mems_allowed)) 4465 update_tasks_nodemask(cs); 4466 4467 is_empty = cpumask_empty(cs->cpus_allowed) || 4468 nodes_empty(cs->mems_allowed); 4469 4470 /* 4471 * Move tasks to the nearest ancestor with execution resources, 4472 * This is full cgroup operation which will also call back into 4473 * cpuset. Execute it asynchronously using workqueue. 4474 */ 4475 if (is_empty && cs->css.cgroup->nr_populated_csets && 4476 css_tryget_online(&cs->css)) { 4477 struct cpuset_remove_tasks_struct *s; 4478 4479 s = kzalloc(sizeof(*s), GFP_KERNEL); 4480 if (WARN_ON_ONCE(!s)) { 4481 css_put(&cs->css); 4482 return; 4483 } 4484 4485 s->cs = cs; 4486 INIT_WORK(&s->work, cpuset_migrate_tasks_workfn); 4487 schedule_work(&s->work); 4488 } 4489 } 4490 4491 static void 4492 hotplug_update_tasks(struct cpuset *cs, 4493 struct cpumask *new_cpus, nodemask_t *new_mems, 4494 bool cpus_updated, bool mems_updated) 4495 { 4496 /* A partition root is allowed to have empty effective cpus */ 4497 if (cpumask_empty(new_cpus) && !is_partition_valid(cs)) 4498 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); 4499 if (nodes_empty(*new_mems)) 4500 *new_mems = parent_cs(cs)->effective_mems; 4501 4502 spin_lock_irq(&callback_lock); 4503 cpumask_copy(cs->effective_cpus, new_cpus); 4504 cs->effective_mems = *new_mems; 4505 spin_unlock_irq(&callback_lock); 4506 4507 if (cpus_updated) 4508 update_tasks_cpumask(cs, new_cpus); 4509 if (mems_updated) 4510 update_tasks_nodemask(cs); 4511 } 4512 4513 void cpuset_force_rebuild(void) 4514 { 4515 force_sd_rebuild = true; 4516 } 4517 4518 /** 4519 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug 4520 * @cs: cpuset in interest 4521 * @tmp: the tmpmasks structure pointer 4522 * 4523 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone 4524 * offline, update @cs accordingly. If @cs ends up with no CPU or memory, 4525 * all its tasks are moved to the nearest ancestor with both resources. 4526 */ 4527 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) 4528 { 4529 static cpumask_t new_cpus; 4530 static nodemask_t new_mems; 4531 bool cpus_updated; 4532 bool mems_updated; 4533 bool remote; 4534 int partcmd = -1; 4535 struct cpuset *parent; 4536 retry: 4537 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); 4538 4539 mutex_lock(&cpuset_mutex); 4540 4541 /* 4542 * We have raced with task attaching. We wait until attaching 4543 * is finished, so we won't attach a task to an empty cpuset. 4544 */ 4545 if (cs->attach_in_progress) { 4546 mutex_unlock(&cpuset_mutex); 4547 goto retry; 4548 } 4549 4550 parent = parent_cs(cs); 4551 compute_effective_cpumask(&new_cpus, cs, parent); 4552 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); 4553 4554 if (!tmp || !cs->partition_root_state) 4555 goto update_tasks; 4556 4557 /* 4558 * Compute effective_cpus for valid partition root, may invalidate 4559 * child partition roots if necessary. 4560 */ 4561 remote = is_remote_partition(cs); 4562 if (remote || (is_partition_valid(cs) && is_partition_valid(parent))) 4563 compute_partition_effective_cpumask(cs, &new_cpus); 4564 4565 if (remote && cpumask_empty(&new_cpus) && 4566 partition_is_populated(cs, NULL)) { 4567 remote_partition_disable(cs, tmp); 4568 compute_effective_cpumask(&new_cpus, cs, parent); 4569 remote = false; 4570 cpuset_force_rebuild(); 4571 } 4572 4573 /* 4574 * Force the partition to become invalid if either one of 4575 * the following conditions hold: 4576 * 1) empty effective cpus but not valid empty partition. 4577 * 2) parent is invalid or doesn't grant any cpus to child 4578 * partitions. 4579 */ 4580 if (is_local_partition(cs) && (!is_partition_valid(parent) || 4581 tasks_nocpu_error(parent, cs, &new_cpus))) 4582 partcmd = partcmd_invalidate; 4583 /* 4584 * On the other hand, an invalid partition root may be transitioned 4585 * back to a regular one. 4586 */ 4587 else if (is_partition_valid(parent) && is_partition_invalid(cs)) 4588 partcmd = partcmd_update; 4589 4590 if (partcmd >= 0) { 4591 update_parent_effective_cpumask(cs, partcmd, NULL, tmp); 4592 if ((partcmd == partcmd_invalidate) || is_partition_valid(cs)) { 4593 compute_partition_effective_cpumask(cs, &new_cpus); 4594 cpuset_force_rebuild(); 4595 } 4596 } 4597 4598 update_tasks: 4599 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); 4600 mems_updated = !nodes_equal(new_mems, cs->effective_mems); 4601 if (!cpus_updated && !mems_updated) 4602 goto unlock; /* Hotplug doesn't affect this cpuset */ 4603 4604 if (mems_updated) 4605 check_insane_mems_config(&new_mems); 4606 4607 if (is_in_v2_mode()) 4608 hotplug_update_tasks(cs, &new_cpus, &new_mems, 4609 cpus_updated, mems_updated); 4610 else 4611 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, 4612 cpus_updated, mems_updated); 4613 4614 unlock: 4615 mutex_unlock(&cpuset_mutex); 4616 } 4617 4618 /** 4619 * cpuset_handle_hotplug - handle CPU/memory hot{,un}plug for a cpuset 4620 * 4621 * This function is called after either CPU or memory configuration has 4622 * changed and updates cpuset accordingly. The top_cpuset is always 4623 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in 4624 * order to make cpusets transparent (of no affect) on systems that are 4625 * actively using CPU hotplug but making no active use of cpusets. 4626 * 4627 * Non-root cpusets are only affected by offlining. If any CPUs or memory 4628 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on 4629 * all descendants. 4630 * 4631 * Note that CPU offlining during suspend is ignored. We don't modify 4632 * cpusets across suspend/resume cycles at all. 4633 * 4634 * CPU / memory hotplug is handled synchronously. 4635 */ 4636 static void cpuset_handle_hotplug(void) 4637 { 4638 static cpumask_t new_cpus; 4639 static nodemask_t new_mems; 4640 bool cpus_updated, mems_updated; 4641 bool on_dfl = is_in_v2_mode(); 4642 struct tmpmasks tmp, *ptmp = NULL; 4643 4644 if (on_dfl && !alloc_cpumasks(NULL, &tmp)) 4645 ptmp = &tmp; 4646 4647 lockdep_assert_cpus_held(); 4648 mutex_lock(&cpuset_mutex); 4649 4650 /* fetch the available cpus/mems and find out which changed how */ 4651 cpumask_copy(&new_cpus, cpu_active_mask); 4652 new_mems = node_states[N_MEMORY]; 4653 4654 /* 4655 * If subpartitions_cpus is populated, it is likely that the check 4656 * below will produce a false positive on cpus_updated when the cpu 4657 * list isn't changed. It is extra work, but it is better to be safe. 4658 */ 4659 cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus) || 4660 !cpumask_empty(subpartitions_cpus); 4661 mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems); 4662 4663 /* For v1, synchronize cpus_allowed to cpu_active_mask */ 4664 if (cpus_updated) { 4665 cpuset_force_rebuild(); 4666 spin_lock_irq(&callback_lock); 4667 if (!on_dfl) 4668 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); 4669 /* 4670 * Make sure that CPUs allocated to child partitions 4671 * do not show up in effective_cpus. If no CPU is left, 4672 * we clear the subpartitions_cpus & let the child partitions 4673 * fight for the CPUs again. 4674 */ 4675 if (!cpumask_empty(subpartitions_cpus)) { 4676 if (cpumask_subset(&new_cpus, subpartitions_cpus)) { 4677 top_cpuset.nr_subparts = 0; 4678 cpumask_clear(subpartitions_cpus); 4679 } else { 4680 cpumask_andnot(&new_cpus, &new_cpus, 4681 subpartitions_cpus); 4682 } 4683 } 4684 cpumask_copy(top_cpuset.effective_cpus, &new_cpus); 4685 spin_unlock_irq(&callback_lock); 4686 /* we don't mess with cpumasks of tasks in top_cpuset */ 4687 } 4688 4689 /* synchronize mems_allowed to N_MEMORY */ 4690 if (mems_updated) { 4691 spin_lock_irq(&callback_lock); 4692 if (!on_dfl) 4693 top_cpuset.mems_allowed = new_mems; 4694 top_cpuset.effective_mems = new_mems; 4695 spin_unlock_irq(&callback_lock); 4696 update_tasks_nodemask(&top_cpuset); 4697 } 4698 4699 mutex_unlock(&cpuset_mutex); 4700 4701 /* if cpus or mems changed, we need to propagate to descendants */ 4702 if (cpus_updated || mems_updated) { 4703 struct cpuset *cs; 4704 struct cgroup_subsys_state *pos_css; 4705 4706 rcu_read_lock(); 4707 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { 4708 if (cs == &top_cpuset || !css_tryget_online(&cs->css)) 4709 continue; 4710 rcu_read_unlock(); 4711 4712 cpuset_hotplug_update_tasks(cs, ptmp); 4713 4714 rcu_read_lock(); 4715 css_put(&cs->css); 4716 } 4717 rcu_read_unlock(); 4718 } 4719 4720 /* rebuild sched domains if cpus_allowed has changed */ 4721 if (force_sd_rebuild) { 4722 force_sd_rebuild = false; 4723 rebuild_sched_domains_cpuslocked(); 4724 } 4725 4726 free_cpumasks(NULL, ptmp); 4727 } 4728 4729 void cpuset_update_active_cpus(void) 4730 { 4731 /* 4732 * We're inside cpu hotplug critical region which usually nests 4733 * inside cgroup synchronization. Bounce actual hotplug processing 4734 * to a work item to avoid reverse locking order. 4735 */ 4736 cpuset_handle_hotplug(); 4737 } 4738 4739 /* 4740 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY]. 4741 * Call this routine anytime after node_states[N_MEMORY] changes. 4742 * See cpuset_update_active_cpus() for CPU hotplug handling. 4743 */ 4744 static int cpuset_track_online_nodes(struct notifier_block *self, 4745 unsigned long action, void *arg) 4746 { 4747 cpuset_handle_hotplug(); 4748 return NOTIFY_OK; 4749 } 4750 4751 /** 4752 * cpuset_init_smp - initialize cpus_allowed 4753 * 4754 * Description: Finish top cpuset after cpu, node maps are initialized 4755 */ 4756 void __init cpuset_init_smp(void) 4757 { 4758 /* 4759 * cpus_allowd/mems_allowed set to v2 values in the initial 4760 * cpuset_bind() call will be reset to v1 values in another 4761 * cpuset_bind() call when v1 cpuset is mounted. 4762 */ 4763 top_cpuset.old_mems_allowed = top_cpuset.mems_allowed; 4764 4765 cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask); 4766 top_cpuset.effective_mems = node_states[N_MEMORY]; 4767 4768 hotplug_memory_notifier(cpuset_track_online_nodes, CPUSET_CALLBACK_PRI); 4769 4770 cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0); 4771 BUG_ON(!cpuset_migrate_mm_wq); 4772 } 4773 4774 /** 4775 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. 4776 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. 4777 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set. 4778 * 4779 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset 4780 * attached to the specified @tsk. Guaranteed to return some non-empty 4781 * subset of cpu_online_mask, even if this means going outside the 4782 * tasks cpuset, except when the task is in the top cpuset. 4783 **/ 4784 4785 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) 4786 { 4787 unsigned long flags; 4788 struct cpuset *cs; 4789 4790 spin_lock_irqsave(&callback_lock, flags); 4791 rcu_read_lock(); 4792 4793 cs = task_cs(tsk); 4794 if (cs != &top_cpuset) 4795 guarantee_online_cpus(tsk, pmask); 4796 /* 4797 * Tasks in the top cpuset won't get update to their cpumasks 4798 * when a hotplug online/offline event happens. So we include all 4799 * offline cpus in the allowed cpu list. 4800 */ 4801 if ((cs == &top_cpuset) || cpumask_empty(pmask)) { 4802 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk); 4803 4804 /* 4805 * We first exclude cpus allocated to partitions. If there is no 4806 * allowable online cpu left, we fall back to all possible cpus. 4807 */ 4808 cpumask_andnot(pmask, possible_mask, subpartitions_cpus); 4809 if (!cpumask_intersects(pmask, cpu_online_mask)) 4810 cpumask_copy(pmask, possible_mask); 4811 } 4812 4813 rcu_read_unlock(); 4814 spin_unlock_irqrestore(&callback_lock, flags); 4815 } 4816 4817 /** 4818 * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe. 4819 * @tsk: pointer to task_struct with which the scheduler is struggling 4820 * 4821 * Description: In the case that the scheduler cannot find an allowed cpu in 4822 * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy 4823 * mode however, this value is the same as task_cs(tsk)->effective_cpus, 4824 * which will not contain a sane cpumask during cases such as cpu hotplugging. 4825 * This is the absolute last resort for the scheduler and it is only used if 4826 * _every_ other avenue has been traveled. 4827 * 4828 * Returns true if the affinity of @tsk was changed, false otherwise. 4829 **/ 4830 4831 bool cpuset_cpus_allowed_fallback(struct task_struct *tsk) 4832 { 4833 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk); 4834 const struct cpumask *cs_mask; 4835 bool changed = false; 4836 4837 rcu_read_lock(); 4838 cs_mask = task_cs(tsk)->cpus_allowed; 4839 if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) { 4840 do_set_cpus_allowed(tsk, cs_mask); 4841 changed = true; 4842 } 4843 rcu_read_unlock(); 4844 4845 /* 4846 * We own tsk->cpus_allowed, nobody can change it under us. 4847 * 4848 * But we used cs && cs->cpus_allowed lockless and thus can 4849 * race with cgroup_attach_task() or update_cpumask() and get 4850 * the wrong tsk->cpus_allowed. However, both cases imply the 4851 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr() 4852 * which takes task_rq_lock(). 4853 * 4854 * If we are called after it dropped the lock we must see all 4855 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary 4856 * set any mask even if it is not right from task_cs() pov, 4857 * the pending set_cpus_allowed_ptr() will fix things. 4858 * 4859 * select_fallback_rq() will fix things ups and set cpu_possible_mask 4860 * if required. 4861 */ 4862 return changed; 4863 } 4864 4865 void __init cpuset_init_current_mems_allowed(void) 4866 { 4867 nodes_setall(current->mems_allowed); 4868 } 4869 4870 /** 4871 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset. 4872 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed. 4873 * 4874 * Description: Returns the nodemask_t mems_allowed of the cpuset 4875 * attached to the specified @tsk. Guaranteed to return some non-empty 4876 * subset of node_states[N_MEMORY], even if this means going outside the 4877 * tasks cpuset. 4878 **/ 4879 4880 nodemask_t cpuset_mems_allowed(struct task_struct *tsk) 4881 { 4882 nodemask_t mask; 4883 unsigned long flags; 4884 4885 spin_lock_irqsave(&callback_lock, flags); 4886 rcu_read_lock(); 4887 guarantee_online_mems(task_cs(tsk), &mask); 4888 rcu_read_unlock(); 4889 spin_unlock_irqrestore(&callback_lock, flags); 4890 4891 return mask; 4892 } 4893 4894 /** 4895 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed 4896 * @nodemask: the nodemask to be checked 4897 * 4898 * Are any of the nodes in the nodemask allowed in current->mems_allowed? 4899 */ 4900 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) 4901 { 4902 return nodes_intersects(*nodemask, current->mems_allowed); 4903 } 4904 4905 /* 4906 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or 4907 * mem_hardwall ancestor to the specified cpuset. Call holding 4908 * callback_lock. If no ancestor is mem_exclusive or mem_hardwall 4909 * (an unusual configuration), then returns the root cpuset. 4910 */ 4911 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) 4912 { 4913 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) 4914 cs = parent_cs(cs); 4915 return cs; 4916 } 4917 4918 /* 4919 * cpuset_node_allowed - Can we allocate on a memory node? 4920 * @node: is this an allowed node? 4921 * @gfp_mask: memory allocation flags 4922 * 4923 * If we're in interrupt, yes, we can always allocate. If @node is set in 4924 * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this 4925 * node is set in the nearest hardwalled cpuset ancestor to current's cpuset, 4926 * yes. If current has access to memory reserves as an oom victim, yes. 4927 * Otherwise, no. 4928 * 4929 * GFP_USER allocations are marked with the __GFP_HARDWALL bit, 4930 * and do not allow allocations outside the current tasks cpuset 4931 * unless the task has been OOM killed. 4932 * GFP_KERNEL allocations are not so marked, so can escape to the 4933 * nearest enclosing hardwalled ancestor cpuset. 4934 * 4935 * Scanning up parent cpusets requires callback_lock. The 4936 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit 4937 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the 4938 * current tasks mems_allowed came up empty on the first pass over 4939 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the 4940 * cpuset are short of memory, might require taking the callback_lock. 4941 * 4942 * The first call here from mm/page_alloc:get_page_from_freelist() 4943 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, 4944 * so no allocation on a node outside the cpuset is allowed (unless 4945 * in interrupt, of course). 4946 * 4947 * The second pass through get_page_from_freelist() doesn't even call 4948 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages() 4949 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set 4950 * in alloc_flags. That logic and the checks below have the combined 4951 * affect that: 4952 * in_interrupt - any node ok (current task context irrelevant) 4953 * GFP_ATOMIC - any node ok 4954 * tsk_is_oom_victim - any node ok 4955 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok 4956 * GFP_USER - only nodes in current tasks mems allowed ok. 4957 */ 4958 bool cpuset_node_allowed(int node, gfp_t gfp_mask) 4959 { 4960 struct cpuset *cs; /* current cpuset ancestors */ 4961 bool allowed; /* is allocation in zone z allowed? */ 4962 unsigned long flags; 4963 4964 if (in_interrupt()) 4965 return true; 4966 if (node_isset(node, current->mems_allowed)) 4967 return true; 4968 /* 4969 * Allow tasks that have access to memory reserves because they have 4970 * been OOM killed to get memory anywhere. 4971 */ 4972 if (unlikely(tsk_is_oom_victim(current))) 4973 return true; 4974 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ 4975 return false; 4976 4977 if (current->flags & PF_EXITING) /* Let dying task have memory */ 4978 return true; 4979 4980 /* Not hardwall and node outside mems_allowed: scan up cpusets */ 4981 spin_lock_irqsave(&callback_lock, flags); 4982 4983 rcu_read_lock(); 4984 cs = nearest_hardwall_ancestor(task_cs(current)); 4985 allowed = node_isset(node, cs->mems_allowed); 4986 rcu_read_unlock(); 4987 4988 spin_unlock_irqrestore(&callback_lock, flags); 4989 return allowed; 4990 } 4991 4992 /** 4993 * cpuset_spread_node() - On which node to begin search for a page 4994 * @rotor: round robin rotor 4995 * 4996 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for 4997 * tasks in a cpuset with is_spread_page or is_spread_slab set), 4998 * and if the memory allocation used cpuset_mem_spread_node() 4999 * to determine on which node to start looking, as it will for 5000 * certain page cache or slab cache pages such as used for file 5001 * system buffers and inode caches, then instead of starting on the 5002 * local node to look for a free page, rather spread the starting 5003 * node around the tasks mems_allowed nodes. 5004 * 5005 * We don't have to worry about the returned node being offline 5006 * because "it can't happen", and even if it did, it would be ok. 5007 * 5008 * The routines calling guarantee_online_mems() are careful to 5009 * only set nodes in task->mems_allowed that are online. So it 5010 * should not be possible for the following code to return an 5011 * offline node. But if it did, that would be ok, as this routine 5012 * is not returning the node where the allocation must be, only 5013 * the node where the search should start. The zonelist passed to 5014 * __alloc_pages() will include all nodes. If the slab allocator 5015 * is passed an offline node, it will fall back to the local node. 5016 * See kmem_cache_alloc_node(). 5017 */ 5018 static int cpuset_spread_node(int *rotor) 5019 { 5020 return *rotor = next_node_in(*rotor, current->mems_allowed); 5021 } 5022 5023 /** 5024 * cpuset_mem_spread_node() - On which node to begin search for a file page 5025 */ 5026 int cpuset_mem_spread_node(void) 5027 { 5028 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE) 5029 current->cpuset_mem_spread_rotor = 5030 node_random(¤t->mems_allowed); 5031 5032 return cpuset_spread_node(¤t->cpuset_mem_spread_rotor); 5033 } 5034 5035 /** 5036 * cpuset_slab_spread_node() - On which node to begin search for a slab page 5037 */ 5038 int cpuset_slab_spread_node(void) 5039 { 5040 if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE) 5041 current->cpuset_slab_spread_rotor = 5042 node_random(¤t->mems_allowed); 5043 5044 return cpuset_spread_node(¤t->cpuset_slab_spread_rotor); 5045 } 5046 EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); 5047 5048 /** 5049 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's? 5050 * @tsk1: pointer to task_struct of some task. 5051 * @tsk2: pointer to task_struct of some other task. 5052 * 5053 * Description: Return true if @tsk1's mems_allowed intersects the 5054 * mems_allowed of @tsk2. Used by the OOM killer to determine if 5055 * one of the task's memory usage might impact the memory available 5056 * to the other. 5057 **/ 5058 5059 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, 5060 const struct task_struct *tsk2) 5061 { 5062 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); 5063 } 5064 5065 /** 5066 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed 5067 * 5068 * Description: Prints current's name, cpuset name, and cached copy of its 5069 * mems_allowed to the kernel log. 5070 */ 5071 void cpuset_print_current_mems_allowed(void) 5072 { 5073 struct cgroup *cgrp; 5074 5075 rcu_read_lock(); 5076 5077 cgrp = task_cs(current)->css.cgroup; 5078 pr_cont(",cpuset="); 5079 pr_cont_cgroup_name(cgrp); 5080 pr_cont(",mems_allowed=%*pbl", 5081 nodemask_pr_args(¤t->mems_allowed)); 5082 5083 rcu_read_unlock(); 5084 } 5085 5086 /* 5087 * Collection of memory_pressure is suppressed unless 5088 * this flag is enabled by writing "1" to the special 5089 * cpuset file 'memory_pressure_enabled' in the root cpuset. 5090 */ 5091 5092 int cpuset_memory_pressure_enabled __read_mostly; 5093 5094 /* 5095 * __cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims. 5096 * 5097 * Keep a running average of the rate of synchronous (direct) 5098 * page reclaim efforts initiated by tasks in each cpuset. 5099 * 5100 * This represents the rate at which some task in the cpuset 5101 * ran low on memory on all nodes it was allowed to use, and 5102 * had to enter the kernels page reclaim code in an effort to 5103 * create more free memory by tossing clean pages or swapping 5104 * or writing dirty pages. 5105 * 5106 * Display to user space in the per-cpuset read-only file 5107 * "memory_pressure". Value displayed is an integer 5108 * representing the recent rate of entry into the synchronous 5109 * (direct) page reclaim by any task attached to the cpuset. 5110 */ 5111 5112 void __cpuset_memory_pressure_bump(void) 5113 { 5114 rcu_read_lock(); 5115 fmeter_markevent(&task_cs(current)->fmeter); 5116 rcu_read_unlock(); 5117 } 5118 5119 #ifdef CONFIG_PROC_PID_CPUSET 5120 /* 5121 * proc_cpuset_show() 5122 * - Print tasks cpuset path into seq_file. 5123 * - Used for /proc/<pid>/cpuset. 5124 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it 5125 * doesn't really matter if tsk->cpuset changes after we read it, 5126 * and we take cpuset_mutex, keeping cpuset_attach() from changing it 5127 * anyway. 5128 */ 5129 int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, 5130 struct pid *pid, struct task_struct *tsk) 5131 { 5132 char *buf; 5133 struct cgroup_subsys_state *css; 5134 int retval; 5135 5136 retval = -ENOMEM; 5137 buf = kmalloc(PATH_MAX, GFP_KERNEL); 5138 if (!buf) 5139 goto out; 5140 5141 rcu_read_lock(); 5142 spin_lock_irq(&css_set_lock); 5143 css = task_css(tsk, cpuset_cgrp_id); 5144 retval = cgroup_path_ns_locked(css->cgroup, buf, PATH_MAX, 5145 current->nsproxy->cgroup_ns); 5146 spin_unlock_irq(&css_set_lock); 5147 rcu_read_unlock(); 5148 5149 if (retval == -E2BIG) 5150 retval = -ENAMETOOLONG; 5151 if (retval < 0) 5152 goto out_free; 5153 seq_puts(m, buf); 5154 seq_putc(m, '\n'); 5155 retval = 0; 5156 out_free: 5157 kfree(buf); 5158 out: 5159 return retval; 5160 } 5161 #endif /* CONFIG_PROC_PID_CPUSET */ 5162 5163 /* Display task mems_allowed in /proc/<pid>/status file. */ 5164 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) 5165 { 5166 seq_printf(m, "Mems_allowed:\t%*pb\n", 5167 nodemask_pr_args(&task->mems_allowed)); 5168 seq_printf(m, "Mems_allowed_list:\t%*pbl\n", 5169 nodemask_pr_args(&task->mems_allowed)); 5170 } 5171