1 /* 2 * kernel/cpuset.c 3 * 4 * Processor and Memory placement constraints for sets of tasks. 5 * 6 * Copyright (C) 2003 BULL SA. 7 * Copyright (C) 2004-2007 Silicon Graphics, Inc. 8 * Copyright (C) 2006 Google, Inc 9 * 10 * Portions derived from Patrick Mochel's sysfs code. 11 * sysfs is Copyright (c) 2001-3 Patrick Mochel 12 * 13 * 2003-10-10 Written by Simon Derr. 14 * 2003-10-22 Updates by Stephen Hemminger. 15 * 2004 May-July Rework by Paul Jackson. 16 * 2006 Rework by Paul Menage to use generic cgroups 17 * 2008 Rework of the scheduler domains and CPU hotplug handling 18 * by Max Krasnyansky 19 * 20 * This file is subject to the terms and conditions of the GNU General Public 21 * License. See the file COPYING in the main directory of the Linux 22 * distribution for more details. 23 */ 24 25 #include <linux/cpu.h> 26 #include <linux/cpumask.h> 27 #include <linux/cpuset.h> 28 #include <linux/init.h> 29 #include <linux/interrupt.h> 30 #include <linux/kernel.h> 31 #include <linux/mempolicy.h> 32 #include <linux/mm.h> 33 #include <linux/memory.h> 34 #include <linux/export.h> 35 #include <linux/rcupdate.h> 36 #include <linux/sched.h> 37 #include <linux/sched/deadline.h> 38 #include <linux/sched/mm.h> 39 #include <linux/sched/task.h> 40 #include <linux/security.h> 41 #include <linux/spinlock.h> 42 #include <linux/oom.h> 43 #include <linux/sched/isolation.h> 44 #include <linux/cgroup.h> 45 #include <linux/wait.h> 46 47 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key); 48 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key); 49 50 /* 51 * There could be abnormal cpuset configurations for cpu or memory 52 * node binding, add this key to provide a quick low-cost judgment 53 * of the situation. 54 */ 55 DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key); 56 57 /* See "Frequency meter" comments, below. */ 58 59 struct fmeter { 60 int cnt; /* unprocessed events count */ 61 int val; /* most recent output value */ 62 time64_t time; /* clock (secs) when val computed */ 63 spinlock_t lock; /* guards read or write of above */ 64 }; 65 66 /* 67 * Invalid partition error code 68 */ 69 enum prs_errcode { 70 PERR_NONE = 0, 71 PERR_INVCPUS, 72 PERR_INVPARENT, 73 PERR_NOTPART, 74 PERR_NOTEXCL, 75 PERR_NOCPUS, 76 PERR_HOTPLUG, 77 PERR_CPUSEMPTY, 78 PERR_HKEEPING, 79 }; 80 81 static const char * const perr_strings[] = { 82 [PERR_INVCPUS] = "Invalid cpu list in cpuset.cpus.exclusive", 83 [PERR_INVPARENT] = "Parent is an invalid partition root", 84 [PERR_NOTPART] = "Parent is not a partition root", 85 [PERR_NOTEXCL] = "Cpu list in cpuset.cpus not exclusive", 86 [PERR_NOCPUS] = "Parent unable to distribute cpu downstream", 87 [PERR_HOTPLUG] = "No cpu available due to hotplug", 88 [PERR_CPUSEMPTY] = "cpuset.cpus is empty", 89 [PERR_HKEEPING] = "partition config conflicts with housekeeping setup", 90 }; 91 92 struct cpuset { 93 struct cgroup_subsys_state css; 94 95 unsigned long flags; /* "unsigned long" so bitops work */ 96 97 /* 98 * On default hierarchy: 99 * 100 * The user-configured masks can only be changed by writing to 101 * cpuset.cpus and cpuset.mems, and won't be limited by the 102 * parent masks. 103 * 104 * The effective masks is the real masks that apply to the tasks 105 * in the cpuset. They may be changed if the configured masks are 106 * changed or hotplug happens. 107 * 108 * effective_mask == configured_mask & parent's effective_mask, 109 * and if it ends up empty, it will inherit the parent's mask. 110 * 111 * 112 * On legacy hierarchy: 113 * 114 * The user-configured masks are always the same with effective masks. 115 */ 116 117 /* user-configured CPUs and Memory Nodes allow to tasks */ 118 cpumask_var_t cpus_allowed; 119 nodemask_t mems_allowed; 120 121 /* effective CPUs and Memory Nodes allow to tasks */ 122 cpumask_var_t effective_cpus; 123 nodemask_t effective_mems; 124 125 /* 126 * Exclusive CPUs dedicated to current cgroup (default hierarchy only) 127 * 128 * This exclusive CPUs must be a subset of cpus_allowed. A parent 129 * cgroup can only grant exclusive CPUs to one of its children. 130 * 131 * When the cgroup becomes a valid partition root, effective_xcpus 132 * defaults to cpus_allowed if not set. The effective_cpus of a valid 133 * partition root comes solely from its effective_xcpus and some of the 134 * effective_xcpus may be distributed to sub-partitions below & hence 135 * excluded from its effective_cpus. 136 */ 137 cpumask_var_t effective_xcpus; 138 139 /* 140 * Exclusive CPUs as requested by the user (default hierarchy only) 141 */ 142 cpumask_var_t exclusive_cpus; 143 144 /* 145 * This is old Memory Nodes tasks took on. 146 * 147 * - top_cpuset.old_mems_allowed is initialized to mems_allowed. 148 * - A new cpuset's old_mems_allowed is initialized when some 149 * task is moved into it. 150 * - old_mems_allowed is used in cpuset_migrate_mm() when we change 151 * cpuset.mems_allowed and have tasks' nodemask updated, and 152 * then old_mems_allowed is updated to mems_allowed. 153 */ 154 nodemask_t old_mems_allowed; 155 156 struct fmeter fmeter; /* memory_pressure filter */ 157 158 /* 159 * Tasks are being attached to this cpuset. Used to prevent 160 * zeroing cpus/mems_allowed between ->can_attach() and ->attach(). 161 */ 162 int attach_in_progress; 163 164 /* partition number for rebuild_sched_domains() */ 165 int pn; 166 167 /* for custom sched domain */ 168 int relax_domain_level; 169 170 /* number of valid sub-partitions */ 171 int nr_subparts; 172 173 /* partition root state */ 174 int partition_root_state; 175 176 /* 177 * Default hierarchy only: 178 * use_parent_ecpus - set if using parent's effective_cpus 179 * child_ecpus_count - # of children with use_parent_ecpus set 180 */ 181 int use_parent_ecpus; 182 int child_ecpus_count; 183 184 /* 185 * number of SCHED_DEADLINE tasks attached to this cpuset, so that we 186 * know when to rebuild associated root domain bandwidth information. 187 */ 188 int nr_deadline_tasks; 189 int nr_migrate_dl_tasks; 190 u64 sum_migrate_dl_bw; 191 192 /* Invalid partition error code, not lock protected */ 193 enum prs_errcode prs_err; 194 195 /* Handle for cpuset.cpus.partition */ 196 struct cgroup_file partition_file; 197 198 /* Remote partition silbling list anchored at remote_children */ 199 struct list_head remote_sibling; 200 }; 201 202 /* 203 * Exclusive CPUs distributed out to sub-partitions of top_cpuset 204 */ 205 static cpumask_var_t subpartitions_cpus; 206 207 /* List of remote partition root children */ 208 static struct list_head remote_children; 209 210 /* 211 * Partition root states: 212 * 213 * 0 - member (not a partition root) 214 * 1 - partition root 215 * 2 - partition root without load balancing (isolated) 216 * -1 - invalid partition root 217 * -2 - invalid isolated partition root 218 */ 219 #define PRS_MEMBER 0 220 #define PRS_ROOT 1 221 #define PRS_ISOLATED 2 222 #define PRS_INVALID_ROOT -1 223 #define PRS_INVALID_ISOLATED -2 224 225 static inline bool is_prs_invalid(int prs_state) 226 { 227 return prs_state < 0; 228 } 229 230 /* 231 * Temporary cpumasks for working with partitions that are passed among 232 * functions to avoid memory allocation in inner functions. 233 */ 234 struct tmpmasks { 235 cpumask_var_t addmask, delmask; /* For partition root */ 236 cpumask_var_t new_cpus; /* For update_cpumasks_hier() */ 237 }; 238 239 static inline struct cpuset *css_cs(struct cgroup_subsys_state *css) 240 { 241 return css ? container_of(css, struct cpuset, css) : NULL; 242 } 243 244 /* Retrieve the cpuset for a task */ 245 static inline struct cpuset *task_cs(struct task_struct *task) 246 { 247 return css_cs(task_css(task, cpuset_cgrp_id)); 248 } 249 250 static inline struct cpuset *parent_cs(struct cpuset *cs) 251 { 252 return css_cs(cs->css.parent); 253 } 254 255 void inc_dl_tasks_cs(struct task_struct *p) 256 { 257 struct cpuset *cs = task_cs(p); 258 259 cs->nr_deadline_tasks++; 260 } 261 262 void dec_dl_tasks_cs(struct task_struct *p) 263 { 264 struct cpuset *cs = task_cs(p); 265 266 cs->nr_deadline_tasks--; 267 } 268 269 /* bits in struct cpuset flags field */ 270 typedef enum { 271 CS_ONLINE, 272 CS_CPU_EXCLUSIVE, 273 CS_MEM_EXCLUSIVE, 274 CS_MEM_HARDWALL, 275 CS_MEMORY_MIGRATE, 276 CS_SCHED_LOAD_BALANCE, 277 CS_SPREAD_PAGE, 278 CS_SPREAD_SLAB, 279 } cpuset_flagbits_t; 280 281 /* convenient tests for these bits */ 282 static inline bool is_cpuset_online(struct cpuset *cs) 283 { 284 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); 285 } 286 287 static inline int is_cpu_exclusive(const struct cpuset *cs) 288 { 289 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); 290 } 291 292 static inline int is_mem_exclusive(const struct cpuset *cs) 293 { 294 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); 295 } 296 297 static inline int is_mem_hardwall(const struct cpuset *cs) 298 { 299 return test_bit(CS_MEM_HARDWALL, &cs->flags); 300 } 301 302 static inline int is_sched_load_balance(const struct cpuset *cs) 303 { 304 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 305 } 306 307 static inline int is_memory_migrate(const struct cpuset *cs) 308 { 309 return test_bit(CS_MEMORY_MIGRATE, &cs->flags); 310 } 311 312 static inline int is_spread_page(const struct cpuset *cs) 313 { 314 return test_bit(CS_SPREAD_PAGE, &cs->flags); 315 } 316 317 static inline int is_spread_slab(const struct cpuset *cs) 318 { 319 return test_bit(CS_SPREAD_SLAB, &cs->flags); 320 } 321 322 static inline int is_partition_valid(const struct cpuset *cs) 323 { 324 return cs->partition_root_state > 0; 325 } 326 327 static inline int is_partition_invalid(const struct cpuset *cs) 328 { 329 return cs->partition_root_state < 0; 330 } 331 332 /* 333 * Callers should hold callback_lock to modify partition_root_state. 334 */ 335 static inline void make_partition_invalid(struct cpuset *cs) 336 { 337 if (cs->partition_root_state > 0) 338 cs->partition_root_state = -cs->partition_root_state; 339 } 340 341 /* 342 * Send notification event of whenever partition_root_state changes. 343 */ 344 static inline void notify_partition_change(struct cpuset *cs, int old_prs) 345 { 346 if (old_prs == cs->partition_root_state) 347 return; 348 cgroup_file_notify(&cs->partition_file); 349 350 /* Reset prs_err if not invalid */ 351 if (is_partition_valid(cs)) 352 WRITE_ONCE(cs->prs_err, PERR_NONE); 353 } 354 355 static struct cpuset top_cpuset = { 356 .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) | 357 (1 << CS_MEM_EXCLUSIVE)), 358 .partition_root_state = PRS_ROOT, 359 .remote_sibling = LIST_HEAD_INIT(top_cpuset.remote_sibling), 360 }; 361 362 /** 363 * cpuset_for_each_child - traverse online children of a cpuset 364 * @child_cs: loop cursor pointing to the current child 365 * @pos_css: used for iteration 366 * @parent_cs: target cpuset to walk children of 367 * 368 * Walk @child_cs through the online children of @parent_cs. Must be used 369 * with RCU read locked. 370 */ 371 #define cpuset_for_each_child(child_cs, pos_css, parent_cs) \ 372 css_for_each_child((pos_css), &(parent_cs)->css) \ 373 if (is_cpuset_online(((child_cs) = css_cs((pos_css))))) 374 375 /** 376 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants 377 * @des_cs: loop cursor pointing to the current descendant 378 * @pos_css: used for iteration 379 * @root_cs: target cpuset to walk ancestor of 380 * 381 * Walk @des_cs through the online descendants of @root_cs. Must be used 382 * with RCU read locked. The caller may modify @pos_css by calling 383 * css_rightmost_descendant() to skip subtree. @root_cs is included in the 384 * iteration and the first node to be visited. 385 */ 386 #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \ 387 css_for_each_descendant_pre((pos_css), &(root_cs)->css) \ 388 if (is_cpuset_online(((des_cs) = css_cs((pos_css))))) 389 390 /* 391 * There are two global locks guarding cpuset structures - cpuset_mutex and 392 * callback_lock. We also require taking task_lock() when dereferencing a 393 * task's cpuset pointer. See "The task_lock() exception", at the end of this 394 * comment. The cpuset code uses only cpuset_mutex. Other kernel subsystems 395 * can use cpuset_lock()/cpuset_unlock() to prevent change to cpuset 396 * structures. Note that cpuset_mutex needs to be a mutex as it is used in 397 * paths that rely on priority inheritance (e.g. scheduler - on RT) for 398 * correctness. 399 * 400 * A task must hold both locks to modify cpusets. If a task holds 401 * cpuset_mutex, it blocks others, ensuring that it is the only task able to 402 * also acquire callback_lock and be able to modify cpusets. It can perform 403 * various checks on the cpuset structure first, knowing nothing will change. 404 * It can also allocate memory while just holding cpuset_mutex. While it is 405 * performing these checks, various callback routines can briefly acquire 406 * callback_lock to query cpusets. Once it is ready to make the changes, it 407 * takes callback_lock, blocking everyone else. 408 * 409 * Calls to the kernel memory allocator can not be made while holding 410 * callback_lock, as that would risk double tripping on callback_lock 411 * from one of the callbacks into the cpuset code from within 412 * __alloc_pages(). 413 * 414 * If a task is only holding callback_lock, then it has read-only 415 * access to cpusets. 416 * 417 * Now, the task_struct fields mems_allowed and mempolicy may be changed 418 * by other task, we use alloc_lock in the task_struct fields to protect 419 * them. 420 * 421 * The cpuset_common_file_read() handlers only hold callback_lock across 422 * small pieces of code, such as when reading out possibly multi-word 423 * cpumasks and nodemasks. 424 * 425 * Accessing a task's cpuset should be done in accordance with the 426 * guidelines for accessing subsystem state in kernel/cgroup.c 427 */ 428 429 static DEFINE_MUTEX(cpuset_mutex); 430 431 void cpuset_lock(void) 432 { 433 mutex_lock(&cpuset_mutex); 434 } 435 436 void cpuset_unlock(void) 437 { 438 mutex_unlock(&cpuset_mutex); 439 } 440 441 static DEFINE_SPINLOCK(callback_lock); 442 443 static struct workqueue_struct *cpuset_migrate_mm_wq; 444 445 /* 446 * CPU / memory hotplug is handled asynchronously. 447 */ 448 static void cpuset_hotplug_workfn(struct work_struct *work); 449 static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn); 450 451 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq); 452 453 static inline void check_insane_mems_config(nodemask_t *nodes) 454 { 455 if (!cpusets_insane_config() && 456 movable_only_nodes(nodes)) { 457 static_branch_enable(&cpusets_insane_config_key); 458 pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n" 459 "Cpuset allocations might fail even with a lot of memory available.\n", 460 nodemask_pr_args(nodes)); 461 } 462 } 463 464 /* 465 * Cgroup v2 behavior is used on the "cpus" and "mems" control files when 466 * on default hierarchy or when the cpuset_v2_mode flag is set by mounting 467 * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option. 468 * With v2 behavior, "cpus" and "mems" are always what the users have 469 * requested and won't be changed by hotplug events. Only the effective 470 * cpus or mems will be affected. 471 */ 472 static inline bool is_in_v2_mode(void) 473 { 474 return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || 475 (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE); 476 } 477 478 /** 479 * partition_is_populated - check if partition has tasks 480 * @cs: partition root to be checked 481 * @excluded_child: a child cpuset to be excluded in task checking 482 * Return: true if there are tasks, false otherwise 483 * 484 * It is assumed that @cs is a valid partition root. @excluded_child should 485 * be non-NULL when this cpuset is going to become a partition itself. 486 */ 487 static inline bool partition_is_populated(struct cpuset *cs, 488 struct cpuset *excluded_child) 489 { 490 struct cgroup_subsys_state *css; 491 struct cpuset *child; 492 493 if (cs->css.cgroup->nr_populated_csets) 494 return true; 495 if (!excluded_child && !cs->nr_subparts) 496 return cgroup_is_populated(cs->css.cgroup); 497 498 rcu_read_lock(); 499 cpuset_for_each_child(child, css, cs) { 500 if (child == excluded_child) 501 continue; 502 if (is_partition_valid(child)) 503 continue; 504 if (cgroup_is_populated(child->css.cgroup)) { 505 rcu_read_unlock(); 506 return true; 507 } 508 } 509 rcu_read_unlock(); 510 return false; 511 } 512 513 /* 514 * Return in pmask the portion of a task's cpusets's cpus_allowed that 515 * are online and are capable of running the task. If none are found, 516 * walk up the cpuset hierarchy until we find one that does have some 517 * appropriate cpus. 518 * 519 * One way or another, we guarantee to return some non-empty subset 520 * of cpu_online_mask. 521 * 522 * Call with callback_lock or cpuset_mutex held. 523 */ 524 static void guarantee_online_cpus(struct task_struct *tsk, 525 struct cpumask *pmask) 526 { 527 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk); 528 struct cpuset *cs; 529 530 if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_online_mask))) 531 cpumask_copy(pmask, cpu_online_mask); 532 533 rcu_read_lock(); 534 cs = task_cs(tsk); 535 536 while (!cpumask_intersects(cs->effective_cpus, pmask)) { 537 cs = parent_cs(cs); 538 if (unlikely(!cs)) { 539 /* 540 * The top cpuset doesn't have any online cpu as a 541 * consequence of a race between cpuset_hotplug_work 542 * and cpu hotplug notifier. But we know the top 543 * cpuset's effective_cpus is on its way to be 544 * identical to cpu_online_mask. 545 */ 546 goto out_unlock; 547 } 548 } 549 cpumask_and(pmask, pmask, cs->effective_cpus); 550 551 out_unlock: 552 rcu_read_unlock(); 553 } 554 555 /* 556 * Return in *pmask the portion of a cpusets's mems_allowed that 557 * are online, with memory. If none are online with memory, walk 558 * up the cpuset hierarchy until we find one that does have some 559 * online mems. The top cpuset always has some mems online. 560 * 561 * One way or another, we guarantee to return some non-empty subset 562 * of node_states[N_MEMORY]. 563 * 564 * Call with callback_lock or cpuset_mutex held. 565 */ 566 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) 567 { 568 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) 569 cs = parent_cs(cs); 570 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); 571 } 572 573 /* 574 * update task's spread flag if cpuset's page/slab spread flag is set 575 * 576 * Call with callback_lock or cpuset_mutex held. The check can be skipped 577 * if on default hierarchy. 578 */ 579 static void cpuset_update_task_spread_flags(struct cpuset *cs, 580 struct task_struct *tsk) 581 { 582 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) 583 return; 584 585 if (is_spread_page(cs)) 586 task_set_spread_page(tsk); 587 else 588 task_clear_spread_page(tsk); 589 590 if (is_spread_slab(cs)) 591 task_set_spread_slab(tsk); 592 else 593 task_clear_spread_slab(tsk); 594 } 595 596 /* 597 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q? 598 * 599 * One cpuset is a subset of another if all its allowed CPUs and 600 * Memory Nodes are a subset of the other, and its exclusive flags 601 * are only set if the other's are set. Call holding cpuset_mutex. 602 */ 603 604 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) 605 { 606 return cpumask_subset(p->cpus_allowed, q->cpus_allowed) && 607 nodes_subset(p->mems_allowed, q->mems_allowed) && 608 is_cpu_exclusive(p) <= is_cpu_exclusive(q) && 609 is_mem_exclusive(p) <= is_mem_exclusive(q); 610 } 611 612 /** 613 * alloc_cpumasks - allocate three cpumasks for cpuset 614 * @cs: the cpuset that have cpumasks to be allocated. 615 * @tmp: the tmpmasks structure pointer 616 * Return: 0 if successful, -ENOMEM otherwise. 617 * 618 * Only one of the two input arguments should be non-NULL. 619 */ 620 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) 621 { 622 cpumask_var_t *pmask1, *pmask2, *pmask3, *pmask4; 623 624 if (cs) { 625 pmask1 = &cs->cpus_allowed; 626 pmask2 = &cs->effective_cpus; 627 pmask3 = &cs->effective_xcpus; 628 pmask4 = &cs->exclusive_cpus; 629 } else { 630 pmask1 = &tmp->new_cpus; 631 pmask2 = &tmp->addmask; 632 pmask3 = &tmp->delmask; 633 pmask4 = NULL; 634 } 635 636 if (!zalloc_cpumask_var(pmask1, GFP_KERNEL)) 637 return -ENOMEM; 638 639 if (!zalloc_cpumask_var(pmask2, GFP_KERNEL)) 640 goto free_one; 641 642 if (!zalloc_cpumask_var(pmask3, GFP_KERNEL)) 643 goto free_two; 644 645 if (pmask4 && !zalloc_cpumask_var(pmask4, GFP_KERNEL)) 646 goto free_three; 647 648 649 return 0; 650 651 free_three: 652 free_cpumask_var(*pmask3); 653 free_two: 654 free_cpumask_var(*pmask2); 655 free_one: 656 free_cpumask_var(*pmask1); 657 return -ENOMEM; 658 } 659 660 /** 661 * free_cpumasks - free cpumasks in a tmpmasks structure 662 * @cs: the cpuset that have cpumasks to be free. 663 * @tmp: the tmpmasks structure pointer 664 */ 665 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) 666 { 667 if (cs) { 668 free_cpumask_var(cs->cpus_allowed); 669 free_cpumask_var(cs->effective_cpus); 670 free_cpumask_var(cs->effective_xcpus); 671 free_cpumask_var(cs->exclusive_cpus); 672 } 673 if (tmp) { 674 free_cpumask_var(tmp->new_cpus); 675 free_cpumask_var(tmp->addmask); 676 free_cpumask_var(tmp->delmask); 677 } 678 } 679 680 /** 681 * alloc_trial_cpuset - allocate a trial cpuset 682 * @cs: the cpuset that the trial cpuset duplicates 683 */ 684 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) 685 { 686 struct cpuset *trial; 687 688 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); 689 if (!trial) 690 return NULL; 691 692 if (alloc_cpumasks(trial, NULL)) { 693 kfree(trial); 694 return NULL; 695 } 696 697 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); 698 cpumask_copy(trial->effective_cpus, cs->effective_cpus); 699 cpumask_copy(trial->effective_xcpus, cs->effective_xcpus); 700 cpumask_copy(trial->exclusive_cpus, cs->exclusive_cpus); 701 return trial; 702 } 703 704 /** 705 * free_cpuset - free the cpuset 706 * @cs: the cpuset to be freed 707 */ 708 static inline void free_cpuset(struct cpuset *cs) 709 { 710 free_cpumasks(cs, NULL); 711 kfree(cs); 712 } 713 714 static inline struct cpumask *fetch_xcpus(struct cpuset *cs) 715 { 716 return !cpumask_empty(cs->exclusive_cpus) ? cs->exclusive_cpus : 717 cpumask_empty(cs->effective_xcpus) ? cs->cpus_allowed 718 : cs->effective_xcpus; 719 } 720 721 /* 722 * cpusets_are_exclusive() - check if two cpusets are exclusive 723 * 724 * Return true if exclusive, false if not 725 */ 726 static inline bool cpusets_are_exclusive(struct cpuset *cs1, struct cpuset *cs2) 727 { 728 struct cpumask *xcpus1 = fetch_xcpus(cs1); 729 struct cpumask *xcpus2 = fetch_xcpus(cs2); 730 731 if (cpumask_intersects(xcpus1, xcpus2)) 732 return false; 733 return true; 734 } 735 736 /* 737 * validate_change_legacy() - Validate conditions specific to legacy (v1) 738 * behavior. 739 */ 740 static int validate_change_legacy(struct cpuset *cur, struct cpuset *trial) 741 { 742 struct cgroup_subsys_state *css; 743 struct cpuset *c, *par; 744 int ret; 745 746 WARN_ON_ONCE(!rcu_read_lock_held()); 747 748 /* Each of our child cpusets must be a subset of us */ 749 ret = -EBUSY; 750 cpuset_for_each_child(c, css, cur) 751 if (!is_cpuset_subset(c, trial)) 752 goto out; 753 754 /* On legacy hierarchy, we must be a subset of our parent cpuset. */ 755 ret = -EACCES; 756 par = parent_cs(cur); 757 if (par && !is_cpuset_subset(trial, par)) 758 goto out; 759 760 ret = 0; 761 out: 762 return ret; 763 } 764 765 /* 766 * validate_change() - Used to validate that any proposed cpuset change 767 * follows the structural rules for cpusets. 768 * 769 * If we replaced the flag and mask values of the current cpuset 770 * (cur) with those values in the trial cpuset (trial), would 771 * our various subset and exclusive rules still be valid? Presumes 772 * cpuset_mutex held. 773 * 774 * 'cur' is the address of an actual, in-use cpuset. Operations 775 * such as list traversal that depend on the actual address of the 776 * cpuset in the list must use cur below, not trial. 777 * 778 * 'trial' is the address of bulk structure copy of cur, with 779 * perhaps one or more of the fields cpus_allowed, mems_allowed, 780 * or flags changed to new, trial values. 781 * 782 * Return 0 if valid, -errno if not. 783 */ 784 785 static int validate_change(struct cpuset *cur, struct cpuset *trial) 786 { 787 struct cgroup_subsys_state *css; 788 struct cpuset *c, *par; 789 int ret = 0; 790 791 rcu_read_lock(); 792 793 if (!is_in_v2_mode()) 794 ret = validate_change_legacy(cur, trial); 795 if (ret) 796 goto out; 797 798 /* Remaining checks don't apply to root cpuset */ 799 if (cur == &top_cpuset) 800 goto out; 801 802 par = parent_cs(cur); 803 804 /* 805 * Cpusets with tasks - existing or newly being attached - can't 806 * be changed to have empty cpus_allowed or mems_allowed. 807 */ 808 ret = -ENOSPC; 809 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) { 810 if (!cpumask_empty(cur->cpus_allowed) && 811 cpumask_empty(trial->cpus_allowed)) 812 goto out; 813 if (!nodes_empty(cur->mems_allowed) && 814 nodes_empty(trial->mems_allowed)) 815 goto out; 816 } 817 818 /* 819 * We can't shrink if we won't have enough room for SCHED_DEADLINE 820 * tasks. 821 */ 822 ret = -EBUSY; 823 if (is_cpu_exclusive(cur) && 824 !cpuset_cpumask_can_shrink(cur->cpus_allowed, 825 trial->cpus_allowed)) 826 goto out; 827 828 /* 829 * If either I or some sibling (!= me) is exclusive, we can't 830 * overlap 831 */ 832 ret = -EINVAL; 833 cpuset_for_each_child(c, css, par) { 834 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && 835 c != cur) { 836 if (!cpusets_are_exclusive(trial, c)) 837 goto out; 838 } 839 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && 840 c != cur && 841 nodes_intersects(trial->mems_allowed, c->mems_allowed)) 842 goto out; 843 } 844 845 ret = 0; 846 out: 847 rcu_read_unlock(); 848 return ret; 849 } 850 851 #ifdef CONFIG_SMP 852 /* 853 * Helper routine for generate_sched_domains(). 854 * Do cpusets a, b have overlapping effective cpus_allowed masks? 855 */ 856 static int cpusets_overlap(struct cpuset *a, struct cpuset *b) 857 { 858 return cpumask_intersects(a->effective_cpus, b->effective_cpus); 859 } 860 861 static void 862 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) 863 { 864 if (dattr->relax_domain_level < c->relax_domain_level) 865 dattr->relax_domain_level = c->relax_domain_level; 866 return; 867 } 868 869 static void update_domain_attr_tree(struct sched_domain_attr *dattr, 870 struct cpuset *root_cs) 871 { 872 struct cpuset *cp; 873 struct cgroup_subsys_state *pos_css; 874 875 rcu_read_lock(); 876 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { 877 /* skip the whole subtree if @cp doesn't have any CPU */ 878 if (cpumask_empty(cp->cpus_allowed)) { 879 pos_css = css_rightmost_descendant(pos_css); 880 continue; 881 } 882 883 if (is_sched_load_balance(cp)) 884 update_domain_attr(dattr, cp); 885 } 886 rcu_read_unlock(); 887 } 888 889 /* Must be called with cpuset_mutex held. */ 890 static inline int nr_cpusets(void) 891 { 892 /* jump label reference count + the top-level cpuset */ 893 return static_key_count(&cpusets_enabled_key.key) + 1; 894 } 895 896 /* 897 * generate_sched_domains() 898 * 899 * This function builds a partial partition of the systems CPUs 900 * A 'partial partition' is a set of non-overlapping subsets whose 901 * union is a subset of that set. 902 * The output of this function needs to be passed to kernel/sched/core.c 903 * partition_sched_domains() routine, which will rebuild the scheduler's 904 * load balancing domains (sched domains) as specified by that partial 905 * partition. 906 * 907 * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst 908 * for a background explanation of this. 909 * 910 * Does not return errors, on the theory that the callers of this 911 * routine would rather not worry about failures to rebuild sched 912 * domains when operating in the severe memory shortage situations 913 * that could cause allocation failures below. 914 * 915 * Must be called with cpuset_mutex held. 916 * 917 * The three key local variables below are: 918 * cp - cpuset pointer, used (together with pos_css) to perform a 919 * top-down scan of all cpusets. For our purposes, rebuilding 920 * the schedulers sched domains, we can ignore !is_sched_load_ 921 * balance cpusets. 922 * csa - (for CpuSet Array) Array of pointers to all the cpusets 923 * that need to be load balanced, for convenient iterative 924 * access by the subsequent code that finds the best partition, 925 * i.e the set of domains (subsets) of CPUs such that the 926 * cpus_allowed of every cpuset marked is_sched_load_balance 927 * is a subset of one of these domains, while there are as 928 * many such domains as possible, each as small as possible. 929 * doms - Conversion of 'csa' to an array of cpumasks, for passing to 930 * the kernel/sched/core.c routine partition_sched_domains() in a 931 * convenient format, that can be easily compared to the prior 932 * value to determine what partition elements (sched domains) 933 * were changed (added or removed.) 934 * 935 * Finding the best partition (set of domains): 936 * The triple nested loops below over i, j, k scan over the 937 * load balanced cpusets (using the array of cpuset pointers in 938 * csa[]) looking for pairs of cpusets that have overlapping 939 * cpus_allowed, but which don't have the same 'pn' partition 940 * number and gives them in the same partition number. It keeps 941 * looping on the 'restart' label until it can no longer find 942 * any such pairs. 943 * 944 * The union of the cpus_allowed masks from the set of 945 * all cpusets having the same 'pn' value then form the one 946 * element of the partition (one sched domain) to be passed to 947 * partition_sched_domains(). 948 */ 949 static int generate_sched_domains(cpumask_var_t **domains, 950 struct sched_domain_attr **attributes) 951 { 952 struct cpuset *cp; /* top-down scan of cpusets */ 953 struct cpuset **csa; /* array of all cpuset ptrs */ 954 int csn; /* how many cpuset ptrs in csa so far */ 955 int i, j, k; /* indices for partition finding loops */ 956 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */ 957 struct sched_domain_attr *dattr; /* attributes for custom domains */ 958 int ndoms = 0; /* number of sched domains in result */ 959 int nslot; /* next empty doms[] struct cpumask slot */ 960 struct cgroup_subsys_state *pos_css; 961 bool root_load_balance = is_sched_load_balance(&top_cpuset); 962 963 doms = NULL; 964 dattr = NULL; 965 csa = NULL; 966 967 /* Special case for the 99% of systems with one, full, sched domain */ 968 if (root_load_balance && !top_cpuset.nr_subparts) { 969 ndoms = 1; 970 doms = alloc_sched_domains(ndoms); 971 if (!doms) 972 goto done; 973 974 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); 975 if (dattr) { 976 *dattr = SD_ATTR_INIT; 977 update_domain_attr_tree(dattr, &top_cpuset); 978 } 979 cpumask_and(doms[0], top_cpuset.effective_cpus, 980 housekeeping_cpumask(HK_TYPE_DOMAIN)); 981 982 goto done; 983 } 984 985 csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL); 986 if (!csa) 987 goto done; 988 csn = 0; 989 990 rcu_read_lock(); 991 if (root_load_balance) 992 csa[csn++] = &top_cpuset; 993 cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) { 994 if (cp == &top_cpuset) 995 continue; 996 /* 997 * Continue traversing beyond @cp iff @cp has some CPUs and 998 * isn't load balancing. The former is obvious. The 999 * latter: All child cpusets contain a subset of the 1000 * parent's cpus, so just skip them, and then we call 1001 * update_domain_attr_tree() to calc relax_domain_level of 1002 * the corresponding sched domain. 1003 * 1004 * If root is load-balancing, we can skip @cp if it 1005 * is a subset of the root's effective_cpus. 1006 */ 1007 if (!cpumask_empty(cp->cpus_allowed) && 1008 !(is_sched_load_balance(cp) && 1009 cpumask_intersects(cp->cpus_allowed, 1010 housekeeping_cpumask(HK_TYPE_DOMAIN)))) 1011 continue; 1012 1013 if (root_load_balance && 1014 cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus)) 1015 continue; 1016 1017 if (is_sched_load_balance(cp) && 1018 !cpumask_empty(cp->effective_cpus)) 1019 csa[csn++] = cp; 1020 1021 /* skip @cp's subtree if not a partition root */ 1022 if (!is_partition_valid(cp)) 1023 pos_css = css_rightmost_descendant(pos_css); 1024 } 1025 rcu_read_unlock(); 1026 1027 for (i = 0; i < csn; i++) 1028 csa[i]->pn = i; 1029 ndoms = csn; 1030 1031 restart: 1032 /* Find the best partition (set of sched domains) */ 1033 for (i = 0; i < csn; i++) { 1034 struct cpuset *a = csa[i]; 1035 int apn = a->pn; 1036 1037 for (j = 0; j < csn; j++) { 1038 struct cpuset *b = csa[j]; 1039 int bpn = b->pn; 1040 1041 if (apn != bpn && cpusets_overlap(a, b)) { 1042 for (k = 0; k < csn; k++) { 1043 struct cpuset *c = csa[k]; 1044 1045 if (c->pn == bpn) 1046 c->pn = apn; 1047 } 1048 ndoms--; /* one less element */ 1049 goto restart; 1050 } 1051 } 1052 } 1053 1054 /* 1055 * Now we know how many domains to create. 1056 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. 1057 */ 1058 doms = alloc_sched_domains(ndoms); 1059 if (!doms) 1060 goto done; 1061 1062 /* 1063 * The rest of the code, including the scheduler, can deal with 1064 * dattr==NULL case. No need to abort if alloc fails. 1065 */ 1066 dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr), 1067 GFP_KERNEL); 1068 1069 for (nslot = 0, i = 0; i < csn; i++) { 1070 struct cpuset *a = csa[i]; 1071 struct cpumask *dp; 1072 int apn = a->pn; 1073 1074 if (apn < 0) { 1075 /* Skip completed partitions */ 1076 continue; 1077 } 1078 1079 dp = doms[nslot]; 1080 1081 if (nslot == ndoms) { 1082 static int warnings = 10; 1083 if (warnings) { 1084 pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n", 1085 nslot, ndoms, csn, i, apn); 1086 warnings--; 1087 } 1088 continue; 1089 } 1090 1091 cpumask_clear(dp); 1092 if (dattr) 1093 *(dattr + nslot) = SD_ATTR_INIT; 1094 for (j = i; j < csn; j++) { 1095 struct cpuset *b = csa[j]; 1096 1097 if (apn == b->pn) { 1098 cpumask_or(dp, dp, b->effective_cpus); 1099 cpumask_and(dp, dp, housekeeping_cpumask(HK_TYPE_DOMAIN)); 1100 if (dattr) 1101 update_domain_attr_tree(dattr + nslot, b); 1102 1103 /* Done with this partition */ 1104 b->pn = -1; 1105 } 1106 } 1107 nslot++; 1108 } 1109 BUG_ON(nslot != ndoms); 1110 1111 done: 1112 kfree(csa); 1113 1114 /* 1115 * Fallback to the default domain if kmalloc() failed. 1116 * See comments in partition_sched_domains(). 1117 */ 1118 if (doms == NULL) 1119 ndoms = 1; 1120 1121 *domains = doms; 1122 *attributes = dattr; 1123 return ndoms; 1124 } 1125 1126 static void dl_update_tasks_root_domain(struct cpuset *cs) 1127 { 1128 struct css_task_iter it; 1129 struct task_struct *task; 1130 1131 if (cs->nr_deadline_tasks == 0) 1132 return; 1133 1134 css_task_iter_start(&cs->css, 0, &it); 1135 1136 while ((task = css_task_iter_next(&it))) 1137 dl_add_task_root_domain(task); 1138 1139 css_task_iter_end(&it); 1140 } 1141 1142 static void dl_rebuild_rd_accounting(void) 1143 { 1144 struct cpuset *cs = NULL; 1145 struct cgroup_subsys_state *pos_css; 1146 1147 lockdep_assert_held(&cpuset_mutex); 1148 lockdep_assert_cpus_held(); 1149 lockdep_assert_held(&sched_domains_mutex); 1150 1151 rcu_read_lock(); 1152 1153 /* 1154 * Clear default root domain DL accounting, it will be computed again 1155 * if a task belongs to it. 1156 */ 1157 dl_clear_root_domain(&def_root_domain); 1158 1159 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { 1160 1161 if (cpumask_empty(cs->effective_cpus)) { 1162 pos_css = css_rightmost_descendant(pos_css); 1163 continue; 1164 } 1165 1166 css_get(&cs->css); 1167 1168 rcu_read_unlock(); 1169 1170 dl_update_tasks_root_domain(cs); 1171 1172 rcu_read_lock(); 1173 css_put(&cs->css); 1174 } 1175 rcu_read_unlock(); 1176 } 1177 1178 static void 1179 partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 1180 struct sched_domain_attr *dattr_new) 1181 { 1182 mutex_lock(&sched_domains_mutex); 1183 partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); 1184 dl_rebuild_rd_accounting(); 1185 mutex_unlock(&sched_domains_mutex); 1186 } 1187 1188 /* 1189 * Rebuild scheduler domains. 1190 * 1191 * If the flag 'sched_load_balance' of any cpuset with non-empty 1192 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset 1193 * which has that flag enabled, or if any cpuset with a non-empty 1194 * 'cpus' is removed, then call this routine to rebuild the 1195 * scheduler's dynamic sched domains. 1196 * 1197 * Call with cpuset_mutex held. Takes cpus_read_lock(). 1198 */ 1199 static void rebuild_sched_domains_locked(void) 1200 { 1201 struct cgroup_subsys_state *pos_css; 1202 struct sched_domain_attr *attr; 1203 cpumask_var_t *doms; 1204 struct cpuset *cs; 1205 int ndoms; 1206 1207 lockdep_assert_cpus_held(); 1208 lockdep_assert_held(&cpuset_mutex); 1209 1210 /* 1211 * If we have raced with CPU hotplug, return early to avoid 1212 * passing doms with offlined cpu to partition_sched_domains(). 1213 * Anyways, cpuset_hotplug_workfn() will rebuild sched domains. 1214 * 1215 * With no CPUs in any subpartitions, top_cpuset's effective CPUs 1216 * should be the same as the active CPUs, so checking only top_cpuset 1217 * is enough to detect racing CPU offlines. 1218 */ 1219 if (cpumask_empty(subpartitions_cpus) && 1220 !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask)) 1221 return; 1222 1223 /* 1224 * With subpartition CPUs, however, the effective CPUs of a partition 1225 * root should be only a subset of the active CPUs. Since a CPU in any 1226 * partition root could be offlined, all must be checked. 1227 */ 1228 if (top_cpuset.nr_subparts) { 1229 rcu_read_lock(); 1230 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { 1231 if (!is_partition_valid(cs)) { 1232 pos_css = css_rightmost_descendant(pos_css); 1233 continue; 1234 } 1235 if (!cpumask_subset(cs->effective_cpus, 1236 cpu_active_mask)) { 1237 rcu_read_unlock(); 1238 return; 1239 } 1240 } 1241 rcu_read_unlock(); 1242 } 1243 1244 /* Generate domain masks and attrs */ 1245 ndoms = generate_sched_domains(&doms, &attr); 1246 1247 /* Have scheduler rebuild the domains */ 1248 partition_and_rebuild_sched_domains(ndoms, doms, attr); 1249 } 1250 #else /* !CONFIG_SMP */ 1251 static void rebuild_sched_domains_locked(void) 1252 { 1253 } 1254 #endif /* CONFIG_SMP */ 1255 1256 void rebuild_sched_domains(void) 1257 { 1258 cpus_read_lock(); 1259 mutex_lock(&cpuset_mutex); 1260 rebuild_sched_domains_locked(); 1261 mutex_unlock(&cpuset_mutex); 1262 cpus_read_unlock(); 1263 } 1264 1265 /** 1266 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. 1267 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed 1268 * @new_cpus: the temp variable for the new effective_cpus mask 1269 * 1270 * Iterate through each task of @cs updating its cpus_allowed to the 1271 * effective cpuset's. As this function is called with cpuset_mutex held, 1272 * cpuset membership stays stable. For top_cpuset, task_cpu_possible_mask() 1273 * is used instead of effective_cpus to make sure all offline CPUs are also 1274 * included as hotplug code won't update cpumasks for tasks in top_cpuset. 1275 */ 1276 static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus) 1277 { 1278 struct css_task_iter it; 1279 struct task_struct *task; 1280 bool top_cs = cs == &top_cpuset; 1281 1282 css_task_iter_start(&cs->css, 0, &it); 1283 while ((task = css_task_iter_next(&it))) { 1284 const struct cpumask *possible_mask = task_cpu_possible_mask(task); 1285 1286 if (top_cs) { 1287 /* 1288 * Percpu kthreads in top_cpuset are ignored 1289 */ 1290 if (kthread_is_per_cpu(task)) 1291 continue; 1292 cpumask_andnot(new_cpus, possible_mask, subpartitions_cpus); 1293 } else { 1294 cpumask_and(new_cpus, possible_mask, cs->effective_cpus); 1295 } 1296 set_cpus_allowed_ptr(task, new_cpus); 1297 } 1298 css_task_iter_end(&it); 1299 } 1300 1301 /** 1302 * compute_effective_cpumask - Compute the effective cpumask of the cpuset 1303 * @new_cpus: the temp variable for the new effective_cpus mask 1304 * @cs: the cpuset the need to recompute the new effective_cpus mask 1305 * @parent: the parent cpuset 1306 * 1307 * The result is valid only if the given cpuset isn't a partition root. 1308 */ 1309 static void compute_effective_cpumask(struct cpumask *new_cpus, 1310 struct cpuset *cs, struct cpuset *parent) 1311 { 1312 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); 1313 } 1314 1315 /* 1316 * Commands for update_parent_effective_cpumask 1317 */ 1318 enum partition_cmd { 1319 partcmd_enable, /* Enable partition root */ 1320 partcmd_disable, /* Disable partition root */ 1321 partcmd_update, /* Update parent's effective_cpus */ 1322 partcmd_invalidate, /* Make partition invalid */ 1323 }; 1324 1325 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, 1326 int turning_on); 1327 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, 1328 struct tmpmasks *tmp); 1329 1330 /* 1331 * Update partition exclusive flag 1332 * 1333 * Return: 0 if successful, an error code otherwise 1334 */ 1335 static int update_partition_exclusive(struct cpuset *cs, int new_prs) 1336 { 1337 bool exclusive = (new_prs > 0); 1338 1339 if (exclusive && !is_cpu_exclusive(cs)) { 1340 if (update_flag(CS_CPU_EXCLUSIVE, cs, 1)) 1341 return PERR_NOTEXCL; 1342 } else if (!exclusive && is_cpu_exclusive(cs)) { 1343 /* Turning off CS_CPU_EXCLUSIVE will not return error */ 1344 update_flag(CS_CPU_EXCLUSIVE, cs, 0); 1345 } 1346 return 0; 1347 } 1348 1349 /* 1350 * Update partition load balance flag and/or rebuild sched domain 1351 * 1352 * Changing load balance flag will automatically call 1353 * rebuild_sched_domains_locked(). 1354 * This function is for cgroup v2 only. 1355 */ 1356 static void update_partition_sd_lb(struct cpuset *cs, int old_prs) 1357 { 1358 int new_prs = cs->partition_root_state; 1359 bool rebuild_domains = (new_prs > 0) || (old_prs > 0); 1360 bool new_lb; 1361 1362 /* 1363 * If cs is not a valid partition root, the load balance state 1364 * will follow its parent. 1365 */ 1366 if (new_prs > 0) { 1367 new_lb = (new_prs != PRS_ISOLATED); 1368 } else { 1369 new_lb = is_sched_load_balance(parent_cs(cs)); 1370 } 1371 if (new_lb != !!is_sched_load_balance(cs)) { 1372 rebuild_domains = true; 1373 if (new_lb) 1374 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 1375 else 1376 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 1377 } 1378 1379 if (rebuild_domains) 1380 rebuild_sched_domains_locked(); 1381 } 1382 1383 /* 1384 * tasks_nocpu_error - Return true if tasks will have no effective_cpus 1385 */ 1386 static bool tasks_nocpu_error(struct cpuset *parent, struct cpuset *cs, 1387 struct cpumask *xcpus) 1388 { 1389 /* 1390 * A populated partition (cs or parent) can't have empty effective_cpus 1391 */ 1392 return (cpumask_subset(parent->effective_cpus, xcpus) && 1393 partition_is_populated(parent, cs)) || 1394 (!cpumask_intersects(xcpus, cpu_active_mask) && 1395 partition_is_populated(cs, NULL)); 1396 } 1397 1398 static void reset_partition_data(struct cpuset *cs) 1399 { 1400 struct cpuset *parent = parent_cs(cs); 1401 1402 if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) 1403 return; 1404 1405 lockdep_assert_held(&callback_lock); 1406 1407 cs->nr_subparts = 0; 1408 if (cpumask_empty(cs->exclusive_cpus)) { 1409 cpumask_clear(cs->effective_xcpus); 1410 if (is_cpu_exclusive(cs)) 1411 clear_bit(CS_CPU_EXCLUSIVE, &cs->flags); 1412 } 1413 if (!cpumask_and(cs->effective_cpus, 1414 parent->effective_cpus, cs->cpus_allowed)) { 1415 cs->use_parent_ecpus = true; 1416 parent->child_ecpus_count++; 1417 cpumask_copy(cs->effective_cpus, parent->effective_cpus); 1418 } 1419 } 1420 1421 /* 1422 * compute_effective_exclusive_cpumask - compute effective exclusive CPUs 1423 * @cs: cpuset 1424 * @xcpus: effective exclusive CPUs value to be set 1425 * Return: true if xcpus is not empty, false otherwise. 1426 * 1427 * Starting with exclusive_cpus (cpus_allowed if exclusive_cpus is not set), 1428 * it must be a subset of cpus_allowed and parent's effective_xcpus. 1429 */ 1430 static bool compute_effective_exclusive_cpumask(struct cpuset *cs, 1431 struct cpumask *xcpus) 1432 { 1433 struct cpuset *parent = parent_cs(cs); 1434 1435 if (!xcpus) 1436 xcpus = cs->effective_xcpus; 1437 1438 if (!cpumask_empty(cs->exclusive_cpus)) 1439 cpumask_and(xcpus, cs->exclusive_cpus, cs->cpus_allowed); 1440 else 1441 cpumask_copy(xcpus, cs->cpus_allowed); 1442 1443 return cpumask_and(xcpus, xcpus, parent->effective_xcpus); 1444 } 1445 1446 static inline bool is_remote_partition(struct cpuset *cs) 1447 { 1448 return !list_empty(&cs->remote_sibling); 1449 } 1450 1451 static inline bool is_local_partition(struct cpuset *cs) 1452 { 1453 return is_partition_valid(cs) && !is_remote_partition(cs); 1454 } 1455 1456 /* 1457 * remote_partition_enable - Enable current cpuset as a remote partition root 1458 * @cs: the cpuset to update 1459 * @tmp: temparary masks 1460 * Return: 1 if successful, 0 if error 1461 * 1462 * Enable the current cpuset to become a remote partition root taking CPUs 1463 * directly from the top cpuset. cpuset_mutex must be held by the caller. 1464 */ 1465 static int remote_partition_enable(struct cpuset *cs, struct tmpmasks *tmp) 1466 { 1467 /* 1468 * The user must have sysadmin privilege. 1469 */ 1470 if (!capable(CAP_SYS_ADMIN)) 1471 return 0; 1472 1473 /* 1474 * The requested exclusive_cpus must not be allocated to other 1475 * partitions and it can't use up all the root's effective_cpus. 1476 * 1477 * Note that if there is any local partition root above it or 1478 * remote partition root underneath it, its exclusive_cpus must 1479 * have overlapped with subpartitions_cpus. 1480 */ 1481 compute_effective_exclusive_cpumask(cs, tmp->new_cpus); 1482 if (cpumask_empty(tmp->new_cpus) || 1483 cpumask_intersects(tmp->new_cpus, subpartitions_cpus) || 1484 cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus)) 1485 return 0; 1486 1487 spin_lock_irq(&callback_lock); 1488 cpumask_andnot(top_cpuset.effective_cpus, 1489 top_cpuset.effective_cpus, tmp->new_cpus); 1490 cpumask_or(subpartitions_cpus, 1491 subpartitions_cpus, tmp->new_cpus); 1492 1493 if (cs->use_parent_ecpus) { 1494 struct cpuset *parent = parent_cs(cs); 1495 1496 cs->use_parent_ecpus = false; 1497 parent->child_ecpus_count--; 1498 } 1499 list_add(&cs->remote_sibling, &remote_children); 1500 spin_unlock_irq(&callback_lock); 1501 1502 /* 1503 * Proprogate changes in top_cpuset's effective_cpus down the hierarchy. 1504 */ 1505 update_tasks_cpumask(&top_cpuset, tmp->new_cpus); 1506 update_sibling_cpumasks(&top_cpuset, NULL, tmp); 1507 1508 return 1; 1509 } 1510 1511 /* 1512 * remote_partition_disable - Remove current cpuset from remote partition list 1513 * @cs: the cpuset to update 1514 * @tmp: temparary masks 1515 * 1516 * The effective_cpus is also updated. 1517 * 1518 * cpuset_mutex must be held by the caller. 1519 */ 1520 static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp) 1521 { 1522 compute_effective_exclusive_cpumask(cs, tmp->new_cpus); 1523 WARN_ON_ONCE(!is_remote_partition(cs)); 1524 WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, subpartitions_cpus)); 1525 1526 spin_lock_irq(&callback_lock); 1527 cpumask_andnot(subpartitions_cpus, 1528 subpartitions_cpus, tmp->new_cpus); 1529 cpumask_and(tmp->new_cpus, 1530 tmp->new_cpus, cpu_active_mask); 1531 cpumask_or(top_cpuset.effective_cpus, 1532 top_cpuset.effective_cpus, tmp->new_cpus); 1533 list_del_init(&cs->remote_sibling); 1534 cs->partition_root_state = -cs->partition_root_state; 1535 if (!cs->prs_err) 1536 cs->prs_err = PERR_INVCPUS; 1537 reset_partition_data(cs); 1538 spin_unlock_irq(&callback_lock); 1539 1540 /* 1541 * Proprogate changes in top_cpuset's effective_cpus down the hierarchy. 1542 */ 1543 update_tasks_cpumask(&top_cpuset, tmp->new_cpus); 1544 update_sibling_cpumasks(&top_cpuset, NULL, tmp); 1545 } 1546 1547 /* 1548 * remote_cpus_update - cpus_exclusive change of remote partition 1549 * @cs: the cpuset to be updated 1550 * @newmask: the new effective_xcpus mask 1551 * @tmp: temparary masks 1552 * 1553 * top_cpuset and subpartitions_cpus will be updated or partition can be 1554 * invalidated. 1555 */ 1556 static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask, 1557 struct tmpmasks *tmp) 1558 { 1559 bool adding, deleting; 1560 1561 if (WARN_ON_ONCE(!is_remote_partition(cs))) 1562 return; 1563 1564 WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus)); 1565 1566 if (cpumask_empty(newmask)) 1567 goto invalidate; 1568 1569 adding = cpumask_andnot(tmp->addmask, newmask, cs->effective_xcpus); 1570 deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, newmask); 1571 1572 /* 1573 * Additions of remote CPUs is only allowed if those CPUs are 1574 * not allocated to other partitions and there are effective_cpus 1575 * left in the top cpuset. 1576 */ 1577 if (adding && (!capable(CAP_SYS_ADMIN) || 1578 cpumask_intersects(tmp->addmask, subpartitions_cpus) || 1579 cpumask_subset(top_cpuset.effective_cpus, tmp->addmask))) 1580 goto invalidate; 1581 1582 spin_lock_irq(&callback_lock); 1583 if (adding) { 1584 cpumask_or(subpartitions_cpus, 1585 subpartitions_cpus, tmp->addmask); 1586 cpumask_andnot(top_cpuset.effective_cpus, 1587 top_cpuset.effective_cpus, tmp->addmask); 1588 } 1589 if (deleting) { 1590 cpumask_andnot(subpartitions_cpus, 1591 subpartitions_cpus, tmp->delmask); 1592 cpumask_and(tmp->delmask, 1593 tmp->delmask, cpu_active_mask); 1594 cpumask_or(top_cpuset.effective_cpus, 1595 top_cpuset.effective_cpus, tmp->delmask); 1596 } 1597 spin_unlock_irq(&callback_lock); 1598 1599 /* 1600 * Proprogate changes in top_cpuset's effective_cpus down the hierarchy. 1601 */ 1602 update_tasks_cpumask(&top_cpuset, tmp->new_cpus); 1603 update_sibling_cpumasks(&top_cpuset, NULL, tmp); 1604 return; 1605 1606 invalidate: 1607 remote_partition_disable(cs, tmp); 1608 } 1609 1610 /* 1611 * remote_partition_check - check if a child remote partition needs update 1612 * @cs: the cpuset to be updated 1613 * @newmask: the new effective_xcpus mask 1614 * @delmask: temporary mask for deletion (not in tmp) 1615 * @tmp: temparary masks 1616 * 1617 * This should be called before the given cs has updated its cpus_allowed 1618 * and/or effective_xcpus. 1619 */ 1620 static void remote_partition_check(struct cpuset *cs, struct cpumask *newmask, 1621 struct cpumask *delmask, struct tmpmasks *tmp) 1622 { 1623 struct cpuset *child, *next; 1624 int disable_cnt = 0; 1625 1626 /* 1627 * Compute the effective exclusive CPUs that will be deleted. 1628 */ 1629 if (!cpumask_andnot(delmask, cs->effective_xcpus, newmask) || 1630 !cpumask_intersects(delmask, subpartitions_cpus)) 1631 return; /* No deletion of exclusive CPUs in partitions */ 1632 1633 /* 1634 * Searching the remote children list to look for those that will 1635 * be impacted by the deletion of exclusive CPUs. 1636 * 1637 * Since a cpuset must be removed from the remote children list 1638 * before it can go offline and holding cpuset_mutex will prevent 1639 * any change in cpuset status. RCU read lock isn't needed. 1640 */ 1641 lockdep_assert_held(&cpuset_mutex); 1642 list_for_each_entry_safe(child, next, &remote_children, remote_sibling) 1643 if (cpumask_intersects(child->effective_cpus, delmask)) { 1644 remote_partition_disable(child, tmp); 1645 disable_cnt++; 1646 } 1647 if (disable_cnt) 1648 rebuild_sched_domains_locked(); 1649 } 1650 1651 /* 1652 * prstate_housekeeping_conflict - check for partition & housekeeping conflicts 1653 * @prstate: partition root state to be checked 1654 * @new_cpus: cpu mask 1655 * Return: true if there is conflict, false otherwise 1656 * 1657 * CPUs outside of housekeeping_cpumask(HK_TYPE_DOMAIN) can only be used in 1658 * an isolated partition. 1659 */ 1660 static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus) 1661 { 1662 const struct cpumask *hk_domain = housekeeping_cpumask(HK_TYPE_DOMAIN); 1663 bool all_in_hk = cpumask_subset(new_cpus, hk_domain); 1664 1665 if (!all_in_hk && (prstate != PRS_ISOLATED)) 1666 return true; 1667 1668 return false; 1669 } 1670 1671 /** 1672 * update_parent_effective_cpumask - update effective_cpus mask of parent cpuset 1673 * @cs: The cpuset that requests change in partition root state 1674 * @cmd: Partition root state change command 1675 * @newmask: Optional new cpumask for partcmd_update 1676 * @tmp: Temporary addmask and delmask 1677 * Return: 0 or a partition root state error code 1678 * 1679 * For partcmd_enable, the cpuset is being transformed from a non-partition 1680 * root to a partition root. The effective_xcpus (cpus_allowed if effective_xcpus 1681 * not set) mask of the given cpuset will be taken away from parent's 1682 * effective_cpus. The function will return 0 if all the CPUs listed in 1683 * effective_xcpus can be granted or an error code will be returned. 1684 * 1685 * For partcmd_disable, the cpuset is being transformed from a partition 1686 * root back to a non-partition root. Any CPUs in effective_xcpus will be 1687 * given back to parent's effective_cpus. 0 will always be returned. 1688 * 1689 * For partcmd_update, if the optional newmask is specified, the cpu list is 1690 * to be changed from effective_xcpus to newmask. Otherwise, effective_xcpus is 1691 * assumed to remain the same. The cpuset should either be a valid or invalid 1692 * partition root. The partition root state may change from valid to invalid 1693 * or vice versa. An error code will be returned if transitioning from 1694 * invalid to valid violates the exclusivity rule. 1695 * 1696 * For partcmd_invalidate, the current partition will be made invalid. 1697 * 1698 * The partcmd_enable and partcmd_disable commands are used by 1699 * update_prstate(). An error code may be returned and the caller will check 1700 * for error. 1701 * 1702 * The partcmd_update command is used by update_cpumasks_hier() with newmask 1703 * NULL and update_cpumask() with newmask set. The partcmd_invalidate is used 1704 * by update_cpumask() with NULL newmask. In both cases, the callers won't 1705 * check for error and so partition_root_state and prs_error will be updated 1706 * directly. 1707 */ 1708 static int update_parent_effective_cpumask(struct cpuset *cs, int cmd, 1709 struct cpumask *newmask, 1710 struct tmpmasks *tmp) 1711 { 1712 struct cpuset *parent = parent_cs(cs); 1713 int adding; /* Adding cpus to parent's effective_cpus */ 1714 int deleting; /* Deleting cpus from parent's effective_cpus */ 1715 int old_prs, new_prs; 1716 int part_error = PERR_NONE; /* Partition error? */ 1717 int subparts_delta = 0; 1718 struct cpumask *xcpus; /* cs effective_xcpus */ 1719 bool nocpu; 1720 1721 lockdep_assert_held(&cpuset_mutex); 1722 1723 /* 1724 * new_prs will only be changed for the partcmd_update and 1725 * partcmd_invalidate commands. 1726 */ 1727 adding = deleting = false; 1728 old_prs = new_prs = cs->partition_root_state; 1729 xcpus = !cpumask_empty(cs->exclusive_cpus) 1730 ? cs->effective_xcpus : cs->cpus_allowed; 1731 1732 if (cmd == partcmd_invalidate) { 1733 if (is_prs_invalid(old_prs)) 1734 return 0; 1735 1736 /* 1737 * Make the current partition invalid. 1738 */ 1739 if (is_partition_valid(parent)) 1740 adding = cpumask_and(tmp->addmask, 1741 xcpus, parent->effective_xcpus); 1742 if (old_prs > 0) { 1743 new_prs = -old_prs; 1744 subparts_delta--; 1745 } 1746 goto write_error; 1747 } 1748 1749 /* 1750 * The parent must be a partition root. 1751 * The new cpumask, if present, or the current cpus_allowed must 1752 * not be empty. 1753 */ 1754 if (!is_partition_valid(parent)) { 1755 return is_partition_invalid(parent) 1756 ? PERR_INVPARENT : PERR_NOTPART; 1757 } 1758 if (!newmask && cpumask_empty(cs->cpus_allowed)) 1759 return PERR_CPUSEMPTY; 1760 1761 nocpu = tasks_nocpu_error(parent, cs, xcpus); 1762 1763 if (cmd == partcmd_enable) { 1764 /* 1765 * Enabling partition root is not allowed if its 1766 * effective_xcpus is empty or doesn't overlap with 1767 * parent's effective_xcpus. 1768 */ 1769 if (cpumask_empty(xcpus) || 1770 !cpumask_intersects(xcpus, parent->effective_xcpus)) 1771 return PERR_INVCPUS; 1772 1773 if (prstate_housekeeping_conflict(new_prs, xcpus)) 1774 return PERR_HKEEPING; 1775 1776 /* 1777 * A parent can be left with no CPU as long as there is no 1778 * task directly associated with the parent partition. 1779 */ 1780 if (nocpu) 1781 return PERR_NOCPUS; 1782 1783 cpumask_copy(tmp->delmask, xcpus); 1784 deleting = true; 1785 subparts_delta++; 1786 } else if (cmd == partcmd_disable) { 1787 /* 1788 * May need to add cpus to parent's effective_cpus for 1789 * valid partition root. 1790 */ 1791 adding = !is_prs_invalid(old_prs) && 1792 cpumask_and(tmp->addmask, xcpus, parent->effective_xcpus); 1793 if (adding) 1794 subparts_delta--; 1795 } else if (newmask) { 1796 /* 1797 * Empty cpumask is not allowed 1798 */ 1799 if (cpumask_empty(newmask)) { 1800 part_error = PERR_CPUSEMPTY; 1801 goto write_error; 1802 } 1803 1804 /* 1805 * partcmd_update with newmask: 1806 * 1807 * Compute add/delete mask to/from effective_cpus 1808 * 1809 * For valid partition: 1810 * addmask = exclusive_cpus & ~newmask 1811 * & parent->effective_xcpus 1812 * delmask = newmask & ~exclusive_cpus 1813 * & parent->effective_xcpus 1814 * 1815 * For invalid partition: 1816 * delmask = newmask & parent->effective_xcpus 1817 */ 1818 if (is_prs_invalid(old_prs)) { 1819 adding = false; 1820 deleting = cpumask_and(tmp->delmask, 1821 newmask, parent->effective_xcpus); 1822 } else { 1823 cpumask_andnot(tmp->addmask, xcpus, newmask); 1824 adding = cpumask_and(tmp->addmask, tmp->addmask, 1825 parent->effective_xcpus); 1826 1827 cpumask_andnot(tmp->delmask, newmask, xcpus); 1828 deleting = cpumask_and(tmp->delmask, tmp->delmask, 1829 parent->effective_xcpus); 1830 } 1831 /* 1832 * Make partition invalid if parent's effective_cpus could 1833 * become empty and there are tasks in the parent. 1834 */ 1835 if (nocpu && (!adding || 1836 !cpumask_intersects(tmp->addmask, cpu_active_mask))) { 1837 part_error = PERR_NOCPUS; 1838 deleting = false; 1839 adding = cpumask_and(tmp->addmask, 1840 xcpus, parent->effective_xcpus); 1841 } 1842 } else { 1843 /* 1844 * partcmd_update w/o newmask 1845 * 1846 * delmask = effective_xcpus & parent->effective_cpus 1847 * 1848 * This can be called from: 1849 * 1) update_cpumasks_hier() 1850 * 2) cpuset_hotplug_update_tasks() 1851 * 1852 * Check to see if it can be transitioned from valid to 1853 * invalid partition or vice versa. 1854 * 1855 * A partition error happens when parent has tasks and all 1856 * its effective CPUs will have to be distributed out. 1857 */ 1858 WARN_ON_ONCE(!is_partition_valid(parent)); 1859 if (nocpu) { 1860 part_error = PERR_NOCPUS; 1861 if (is_partition_valid(cs)) 1862 adding = cpumask_and(tmp->addmask, 1863 xcpus, parent->effective_xcpus); 1864 } else if (is_partition_invalid(cs) && 1865 cpumask_subset(xcpus, parent->effective_xcpus)) { 1866 struct cgroup_subsys_state *css; 1867 struct cpuset *child; 1868 bool exclusive = true; 1869 1870 /* 1871 * Convert invalid partition to valid has to 1872 * pass the cpu exclusivity test. 1873 */ 1874 rcu_read_lock(); 1875 cpuset_for_each_child(child, css, parent) { 1876 if (child == cs) 1877 continue; 1878 if (!cpusets_are_exclusive(cs, child)) { 1879 exclusive = false; 1880 break; 1881 } 1882 } 1883 rcu_read_unlock(); 1884 if (exclusive) 1885 deleting = cpumask_and(tmp->delmask, 1886 xcpus, parent->effective_cpus); 1887 else 1888 part_error = PERR_NOTEXCL; 1889 } 1890 } 1891 1892 write_error: 1893 if (part_error) 1894 WRITE_ONCE(cs->prs_err, part_error); 1895 1896 if (cmd == partcmd_update) { 1897 /* 1898 * Check for possible transition between valid and invalid 1899 * partition root. 1900 */ 1901 switch (cs->partition_root_state) { 1902 case PRS_ROOT: 1903 case PRS_ISOLATED: 1904 if (part_error) { 1905 new_prs = -old_prs; 1906 subparts_delta--; 1907 } 1908 break; 1909 case PRS_INVALID_ROOT: 1910 case PRS_INVALID_ISOLATED: 1911 if (!part_error) { 1912 new_prs = -old_prs; 1913 subparts_delta++; 1914 } 1915 break; 1916 } 1917 } 1918 1919 if (!adding && !deleting && (new_prs == old_prs)) 1920 return 0; 1921 1922 /* 1923 * Transitioning between invalid to valid or vice versa may require 1924 * changing CS_CPU_EXCLUSIVE. In the case of partcmd_update, 1925 * validate_change() has already been successfully called and 1926 * CPU lists in cs haven't been updated yet. So defer it to later. 1927 */ 1928 if ((old_prs != new_prs) && (cmd != partcmd_update)) { 1929 int err = update_partition_exclusive(cs, new_prs); 1930 1931 if (err) 1932 return err; 1933 } 1934 1935 /* 1936 * Change the parent's effective_cpus & effective_xcpus (top cpuset 1937 * only). 1938 * 1939 * Newly added CPUs will be removed from effective_cpus and 1940 * newly deleted ones will be added back to effective_cpus. 1941 */ 1942 spin_lock_irq(&callback_lock); 1943 if (adding) { 1944 if (parent == &top_cpuset) 1945 cpumask_andnot(subpartitions_cpus, 1946 subpartitions_cpus, tmp->addmask); 1947 /* 1948 * Some of the CPUs in effective_xcpus might have been offlined. 1949 */ 1950 cpumask_or(parent->effective_cpus, 1951 parent->effective_cpus, tmp->addmask); 1952 cpumask_and(parent->effective_cpus, 1953 parent->effective_cpus, cpu_active_mask); 1954 } 1955 if (deleting) { 1956 if (parent == &top_cpuset) 1957 cpumask_or(subpartitions_cpus, 1958 subpartitions_cpus, tmp->delmask); 1959 cpumask_andnot(parent->effective_cpus, 1960 parent->effective_cpus, tmp->delmask); 1961 } 1962 1963 if (is_partition_valid(parent)) { 1964 parent->nr_subparts += subparts_delta; 1965 WARN_ON_ONCE(parent->nr_subparts < 0); 1966 } 1967 1968 if (old_prs != new_prs) { 1969 cs->partition_root_state = new_prs; 1970 if (new_prs <= 0) 1971 cs->nr_subparts = 0; 1972 } 1973 1974 spin_unlock_irq(&callback_lock); 1975 1976 if ((old_prs != new_prs) && (cmd == partcmd_update)) 1977 update_partition_exclusive(cs, new_prs); 1978 1979 if (adding || deleting) { 1980 update_tasks_cpumask(parent, tmp->addmask); 1981 update_sibling_cpumasks(parent, cs, tmp); 1982 } 1983 1984 /* 1985 * For partcmd_update without newmask, it is being called from 1986 * cpuset_hotplug_workfn() where cpus_read_lock() wasn't taken. 1987 * Update the load balance flag and scheduling domain if 1988 * cpus_read_trylock() is successful. 1989 */ 1990 if ((cmd == partcmd_update) && !newmask && cpus_read_trylock()) { 1991 update_partition_sd_lb(cs, old_prs); 1992 cpus_read_unlock(); 1993 } 1994 1995 notify_partition_change(cs, old_prs); 1996 return 0; 1997 } 1998 1999 /** 2000 * compute_partition_effective_cpumask - compute effective_cpus for partition 2001 * @cs: partition root cpuset 2002 * @new_ecpus: previously computed effective_cpus to be updated 2003 * 2004 * Compute the effective_cpus of a partition root by scanning effective_xcpus 2005 * of child partition roots and excluding their effective_xcpus. 2006 * 2007 * This has the side effect of invalidating valid child partition roots, 2008 * if necessary. Since it is called from either cpuset_hotplug_update_tasks() 2009 * or update_cpumasks_hier() where parent and children are modified 2010 * successively, we don't need to call update_parent_effective_cpumask() 2011 * and the child's effective_cpus will be updated in later iterations. 2012 * 2013 * Note that rcu_read_lock() is assumed to be held. 2014 */ 2015 static void compute_partition_effective_cpumask(struct cpuset *cs, 2016 struct cpumask *new_ecpus) 2017 { 2018 struct cgroup_subsys_state *css; 2019 struct cpuset *child; 2020 bool populated = partition_is_populated(cs, NULL); 2021 2022 /* 2023 * Check child partition roots to see if they should be 2024 * invalidated when 2025 * 1) child effective_xcpus not a subset of new 2026 * excluisve_cpus 2027 * 2) All the effective_cpus will be used up and cp 2028 * has tasks 2029 */ 2030 compute_effective_exclusive_cpumask(cs, new_ecpus); 2031 cpumask_and(new_ecpus, new_ecpus, cpu_active_mask); 2032 2033 rcu_read_lock(); 2034 cpuset_for_each_child(child, css, cs) { 2035 if (!is_partition_valid(child)) 2036 continue; 2037 2038 child->prs_err = 0; 2039 if (!cpumask_subset(child->effective_xcpus, 2040 cs->effective_xcpus)) 2041 child->prs_err = PERR_INVCPUS; 2042 else if (populated && 2043 cpumask_subset(new_ecpus, child->effective_xcpus)) 2044 child->prs_err = PERR_NOCPUS; 2045 2046 if (child->prs_err) { 2047 int old_prs = child->partition_root_state; 2048 2049 /* 2050 * Invalidate child partition 2051 */ 2052 spin_lock_irq(&callback_lock); 2053 make_partition_invalid(child); 2054 cs->nr_subparts--; 2055 child->nr_subparts = 0; 2056 spin_unlock_irq(&callback_lock); 2057 notify_partition_change(child, old_prs); 2058 continue; 2059 } 2060 cpumask_andnot(new_ecpus, new_ecpus, 2061 child->effective_xcpus); 2062 } 2063 rcu_read_unlock(); 2064 } 2065 2066 /* 2067 * update_cpumasks_hier() flags 2068 */ 2069 #define HIER_CHECKALL 0x01 /* Check all cpusets with no skipping */ 2070 #define HIER_NO_SD_REBUILD 0x02 /* Don't rebuild sched domains */ 2071 2072 /* 2073 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree 2074 * @cs: the cpuset to consider 2075 * @tmp: temp variables for calculating effective_cpus & partition setup 2076 * @force: don't skip any descendant cpusets if set 2077 * 2078 * When configured cpumask is changed, the effective cpumasks of this cpuset 2079 * and all its descendants need to be updated. 2080 * 2081 * On legacy hierarchy, effective_cpus will be the same with cpu_allowed. 2082 * 2083 * Called with cpuset_mutex held 2084 */ 2085 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, 2086 int flags) 2087 { 2088 struct cpuset *cp; 2089 struct cgroup_subsys_state *pos_css; 2090 bool need_rebuild_sched_domains = false; 2091 int old_prs, new_prs; 2092 2093 rcu_read_lock(); 2094 cpuset_for_each_descendant_pre(cp, pos_css, cs) { 2095 struct cpuset *parent = parent_cs(cp); 2096 bool remote = is_remote_partition(cp); 2097 bool update_parent = false; 2098 2099 /* 2100 * Skip descendent remote partition that acquires CPUs 2101 * directly from top cpuset unless it is cs. 2102 */ 2103 if (remote && (cp != cs)) { 2104 pos_css = css_rightmost_descendant(pos_css); 2105 continue; 2106 } 2107 2108 /* 2109 * Update effective_xcpus if exclusive_cpus set. 2110 * The case when exclusive_cpus isn't set is handled later. 2111 */ 2112 if (!cpumask_empty(cp->exclusive_cpus) && (cp != cs)) { 2113 spin_lock_irq(&callback_lock); 2114 compute_effective_exclusive_cpumask(cp, NULL); 2115 spin_unlock_irq(&callback_lock); 2116 } 2117 2118 old_prs = new_prs = cp->partition_root_state; 2119 if (remote || (is_partition_valid(parent) && 2120 is_partition_valid(cp))) 2121 compute_partition_effective_cpumask(cp, tmp->new_cpus); 2122 else 2123 compute_effective_cpumask(tmp->new_cpus, cp, parent); 2124 2125 /* 2126 * A partition with no effective_cpus is allowed as long as 2127 * there is no task associated with it. Call 2128 * update_parent_effective_cpumask() to check it. 2129 */ 2130 if (is_partition_valid(cp) && cpumask_empty(tmp->new_cpus)) { 2131 update_parent = true; 2132 goto update_parent_effective; 2133 } 2134 2135 /* 2136 * If it becomes empty, inherit the effective mask of the 2137 * parent, which is guaranteed to have some CPUs unless 2138 * it is a partition root that has explicitly distributed 2139 * out all its CPUs. 2140 */ 2141 if (is_in_v2_mode() && !remote && cpumask_empty(tmp->new_cpus)) { 2142 cpumask_copy(tmp->new_cpus, parent->effective_cpus); 2143 if (!cp->use_parent_ecpus) { 2144 cp->use_parent_ecpus = true; 2145 parent->child_ecpus_count++; 2146 } 2147 } else if (cp->use_parent_ecpus) { 2148 cp->use_parent_ecpus = false; 2149 WARN_ON_ONCE(!parent->child_ecpus_count); 2150 parent->child_ecpus_count--; 2151 } 2152 2153 if (remote) 2154 goto get_css; 2155 2156 /* 2157 * Skip the whole subtree if 2158 * 1) the cpumask remains the same, 2159 * 2) has no partition root state, 2160 * 3) HIER_CHECKALL flag not set, and 2161 * 4) for v2 load balance state same as its parent. 2162 */ 2163 if (!cp->partition_root_state && !(flags & HIER_CHECKALL) && 2164 cpumask_equal(tmp->new_cpus, cp->effective_cpus) && 2165 (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || 2166 (is_sched_load_balance(parent) == is_sched_load_balance(cp)))) { 2167 pos_css = css_rightmost_descendant(pos_css); 2168 continue; 2169 } 2170 2171 update_parent_effective: 2172 /* 2173 * update_parent_effective_cpumask() should have been called 2174 * for cs already in update_cpumask(). We should also call 2175 * update_tasks_cpumask() again for tasks in the parent 2176 * cpuset if the parent's effective_cpus changes. 2177 */ 2178 if ((cp != cs) && old_prs) { 2179 switch (parent->partition_root_state) { 2180 case PRS_ROOT: 2181 case PRS_ISOLATED: 2182 update_parent = true; 2183 break; 2184 2185 default: 2186 /* 2187 * When parent is not a partition root or is 2188 * invalid, child partition roots become 2189 * invalid too. 2190 */ 2191 if (is_partition_valid(cp)) 2192 new_prs = -cp->partition_root_state; 2193 WRITE_ONCE(cp->prs_err, 2194 is_partition_invalid(parent) 2195 ? PERR_INVPARENT : PERR_NOTPART); 2196 break; 2197 } 2198 } 2199 get_css: 2200 if (!css_tryget_online(&cp->css)) 2201 continue; 2202 rcu_read_unlock(); 2203 2204 if (update_parent) { 2205 update_parent_effective_cpumask(cp, partcmd_update, NULL, tmp); 2206 /* 2207 * The cpuset partition_root_state may become 2208 * invalid. Capture it. 2209 */ 2210 new_prs = cp->partition_root_state; 2211 } 2212 2213 spin_lock_irq(&callback_lock); 2214 cpumask_copy(cp->effective_cpus, tmp->new_cpus); 2215 cp->partition_root_state = new_prs; 2216 /* 2217 * Make sure effective_xcpus is properly set for a valid 2218 * partition root. 2219 */ 2220 if ((new_prs > 0) && cpumask_empty(cp->exclusive_cpus)) 2221 cpumask_and(cp->effective_xcpus, 2222 cp->cpus_allowed, parent->effective_xcpus); 2223 else if (new_prs < 0) 2224 reset_partition_data(cp); 2225 spin_unlock_irq(&callback_lock); 2226 2227 notify_partition_change(cp, old_prs); 2228 2229 WARN_ON(!is_in_v2_mode() && 2230 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); 2231 2232 update_tasks_cpumask(cp, cp->effective_cpus); 2233 2234 /* 2235 * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE 2236 * from parent if current cpuset isn't a valid partition root 2237 * and their load balance states differ. 2238 */ 2239 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && 2240 !is_partition_valid(cp) && 2241 (is_sched_load_balance(parent) != is_sched_load_balance(cp))) { 2242 if (is_sched_load_balance(parent)) 2243 set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags); 2244 else 2245 clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags); 2246 } 2247 2248 /* 2249 * On legacy hierarchy, if the effective cpumask of any non- 2250 * empty cpuset is changed, we need to rebuild sched domains. 2251 * On default hierarchy, the cpuset needs to be a partition 2252 * root as well. 2253 */ 2254 if (!cpumask_empty(cp->cpus_allowed) && 2255 is_sched_load_balance(cp) && 2256 (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || 2257 is_partition_valid(cp))) 2258 need_rebuild_sched_domains = true; 2259 2260 rcu_read_lock(); 2261 css_put(&cp->css); 2262 } 2263 rcu_read_unlock(); 2264 2265 if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD)) 2266 rebuild_sched_domains_locked(); 2267 } 2268 2269 /** 2270 * update_sibling_cpumasks - Update siblings cpumasks 2271 * @parent: Parent cpuset 2272 * @cs: Current cpuset 2273 * @tmp: Temp variables 2274 */ 2275 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, 2276 struct tmpmasks *tmp) 2277 { 2278 struct cpuset *sibling; 2279 struct cgroup_subsys_state *pos_css; 2280 2281 lockdep_assert_held(&cpuset_mutex); 2282 2283 /* 2284 * Check all its siblings and call update_cpumasks_hier() 2285 * if their effective_cpus will need to be changed. 2286 * 2287 * With the addition of effective_xcpus which is a subset of 2288 * cpus_allowed. It is possible a change in parent's effective_cpus 2289 * due to a change in a child partition's effective_xcpus will impact 2290 * its siblings even if they do not inherit parent's effective_cpus 2291 * directly. 2292 * 2293 * The update_cpumasks_hier() function may sleep. So we have to 2294 * release the RCU read lock before calling it. HIER_NO_SD_REBUILD 2295 * flag is used to suppress rebuild of sched domains as the callers 2296 * will take care of that. 2297 */ 2298 rcu_read_lock(); 2299 cpuset_for_each_child(sibling, pos_css, parent) { 2300 if (sibling == cs) 2301 continue; 2302 if (!sibling->use_parent_ecpus && 2303 !is_partition_valid(sibling)) { 2304 compute_effective_cpumask(tmp->new_cpus, sibling, 2305 parent); 2306 if (cpumask_equal(tmp->new_cpus, sibling->effective_cpus)) 2307 continue; 2308 } 2309 if (!css_tryget_online(&sibling->css)) 2310 continue; 2311 2312 rcu_read_unlock(); 2313 update_cpumasks_hier(sibling, tmp, HIER_NO_SD_REBUILD); 2314 rcu_read_lock(); 2315 css_put(&sibling->css); 2316 } 2317 rcu_read_unlock(); 2318 } 2319 2320 /** 2321 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it 2322 * @cs: the cpuset to consider 2323 * @trialcs: trial cpuset 2324 * @buf: buffer of cpu numbers written to this cpuset 2325 */ 2326 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, 2327 const char *buf) 2328 { 2329 int retval; 2330 struct tmpmasks tmp; 2331 struct cpuset *parent = parent_cs(cs); 2332 bool invalidate = false; 2333 int hier_flags = 0; 2334 int old_prs = cs->partition_root_state; 2335 2336 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */ 2337 if (cs == &top_cpuset) 2338 return -EACCES; 2339 2340 /* 2341 * An empty cpus_allowed is ok only if the cpuset has no tasks. 2342 * Since cpulist_parse() fails on an empty mask, we special case 2343 * that parsing. The validate_change() call ensures that cpusets 2344 * with tasks have cpus. 2345 */ 2346 if (!*buf) { 2347 cpumask_clear(trialcs->cpus_allowed); 2348 cpumask_clear(trialcs->effective_xcpus); 2349 } else { 2350 retval = cpulist_parse(buf, trialcs->cpus_allowed); 2351 if (retval < 0) 2352 return retval; 2353 2354 if (!cpumask_subset(trialcs->cpus_allowed, 2355 top_cpuset.cpus_allowed)) 2356 return -EINVAL; 2357 2358 /* 2359 * When exclusive_cpus isn't explicitly set, it is constrainted 2360 * by cpus_allowed and parent's effective_xcpus. Otherwise, 2361 * trialcs->effective_xcpus is used as a temporary cpumask 2362 * for checking validity of the partition root. 2363 */ 2364 if (!cpumask_empty(trialcs->exclusive_cpus) || is_partition_valid(cs)) 2365 compute_effective_exclusive_cpumask(trialcs, NULL); 2366 } 2367 2368 /* Nothing to do if the cpus didn't change */ 2369 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) 2370 return 0; 2371 2372 if (alloc_cpumasks(NULL, &tmp)) 2373 return -ENOMEM; 2374 2375 if (old_prs) { 2376 if (is_partition_valid(cs) && 2377 cpumask_empty(trialcs->effective_xcpus)) { 2378 invalidate = true; 2379 cs->prs_err = PERR_INVCPUS; 2380 } else if (prstate_housekeeping_conflict(old_prs, trialcs->effective_xcpus)) { 2381 invalidate = true; 2382 cs->prs_err = PERR_HKEEPING; 2383 } else if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus)) { 2384 invalidate = true; 2385 cs->prs_err = PERR_NOCPUS; 2386 } 2387 } 2388 2389 /* 2390 * Check all the descendants in update_cpumasks_hier() if 2391 * effective_xcpus is to be changed. 2392 */ 2393 if (!cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus)) 2394 hier_flags = HIER_CHECKALL; 2395 2396 retval = validate_change(cs, trialcs); 2397 2398 if ((retval == -EINVAL) && cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) { 2399 struct cgroup_subsys_state *css; 2400 struct cpuset *cp; 2401 2402 /* 2403 * The -EINVAL error code indicates that partition sibling 2404 * CPU exclusivity rule has been violated. We still allow 2405 * the cpumask change to proceed while invalidating the 2406 * partition. However, any conflicting sibling partitions 2407 * have to be marked as invalid too. 2408 */ 2409 invalidate = true; 2410 rcu_read_lock(); 2411 cpuset_for_each_child(cp, css, parent) { 2412 struct cpumask *xcpus = fetch_xcpus(trialcs); 2413 2414 if (is_partition_valid(cp) && 2415 cpumask_intersects(xcpus, cp->effective_xcpus)) { 2416 rcu_read_unlock(); 2417 update_parent_effective_cpumask(cp, partcmd_invalidate, NULL, &tmp); 2418 rcu_read_lock(); 2419 } 2420 } 2421 rcu_read_unlock(); 2422 retval = 0; 2423 } 2424 2425 if (retval < 0) 2426 goto out_free; 2427 2428 if (is_partition_valid(cs) || 2429 (is_partition_invalid(cs) && !invalidate)) { 2430 struct cpumask *xcpus = trialcs->effective_xcpus; 2431 2432 if (cpumask_empty(xcpus) && is_partition_invalid(cs)) 2433 xcpus = trialcs->cpus_allowed; 2434 2435 /* 2436 * Call remote_cpus_update() to handle valid remote partition 2437 */ 2438 if (is_remote_partition(cs)) 2439 remote_cpus_update(cs, xcpus, &tmp); 2440 else if (invalidate) 2441 update_parent_effective_cpumask(cs, partcmd_invalidate, 2442 NULL, &tmp); 2443 else 2444 update_parent_effective_cpumask(cs, partcmd_update, 2445 xcpus, &tmp); 2446 } else if (!cpumask_empty(cs->exclusive_cpus)) { 2447 /* 2448 * Use trialcs->effective_cpus as a temp cpumask 2449 */ 2450 remote_partition_check(cs, trialcs->effective_xcpus, 2451 trialcs->effective_cpus, &tmp); 2452 } 2453 2454 spin_lock_irq(&callback_lock); 2455 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); 2456 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus); 2457 if ((old_prs > 0) && !is_partition_valid(cs)) 2458 reset_partition_data(cs); 2459 spin_unlock_irq(&callback_lock); 2460 2461 /* effective_cpus/effective_xcpus will be updated here */ 2462 update_cpumasks_hier(cs, &tmp, hier_flags); 2463 2464 /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */ 2465 if (cs->partition_root_state) 2466 update_partition_sd_lb(cs, old_prs); 2467 out_free: 2468 free_cpumasks(NULL, &tmp); 2469 return 0; 2470 } 2471 2472 /** 2473 * update_exclusive_cpumask - update the exclusive_cpus mask of a cpuset 2474 * @cs: the cpuset to consider 2475 * @trialcs: trial cpuset 2476 * @buf: buffer of cpu numbers written to this cpuset 2477 * 2478 * The tasks' cpumask will be updated if cs is a valid partition root. 2479 */ 2480 static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs, 2481 const char *buf) 2482 { 2483 int retval; 2484 struct tmpmasks tmp; 2485 struct cpuset *parent = parent_cs(cs); 2486 bool invalidate = false; 2487 int hier_flags = 0; 2488 int old_prs = cs->partition_root_state; 2489 2490 if (!*buf) { 2491 cpumask_clear(trialcs->exclusive_cpus); 2492 cpumask_clear(trialcs->effective_xcpus); 2493 } else { 2494 retval = cpulist_parse(buf, trialcs->exclusive_cpus); 2495 if (retval < 0) 2496 return retval; 2497 if (!is_cpu_exclusive(cs)) 2498 set_bit(CS_CPU_EXCLUSIVE, &trialcs->flags); 2499 } 2500 2501 /* Nothing to do if the CPUs didn't change */ 2502 if (cpumask_equal(cs->exclusive_cpus, trialcs->exclusive_cpus)) 2503 return 0; 2504 2505 if (alloc_cpumasks(NULL, &tmp)) 2506 return -ENOMEM; 2507 2508 if (*buf) 2509 compute_effective_exclusive_cpumask(trialcs, NULL); 2510 2511 /* 2512 * Check all the descendants in update_cpumasks_hier() if 2513 * effective_xcpus is to be changed. 2514 */ 2515 if (!cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus)) 2516 hier_flags = HIER_CHECKALL; 2517 2518 retval = validate_change(cs, trialcs); 2519 if (retval) 2520 return retval; 2521 2522 if (old_prs) { 2523 if (cpumask_empty(trialcs->effective_xcpus)) { 2524 invalidate = true; 2525 cs->prs_err = PERR_INVCPUS; 2526 } else if (prstate_housekeeping_conflict(old_prs, trialcs->effective_xcpus)) { 2527 invalidate = true; 2528 cs->prs_err = PERR_HKEEPING; 2529 } else if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus)) { 2530 invalidate = true; 2531 cs->prs_err = PERR_NOCPUS; 2532 } 2533 2534 if (is_remote_partition(cs)) { 2535 if (invalidate) 2536 remote_partition_disable(cs, &tmp); 2537 else 2538 remote_cpus_update(cs, trialcs->effective_xcpus, 2539 &tmp); 2540 } else if (invalidate) { 2541 update_parent_effective_cpumask(cs, partcmd_invalidate, 2542 NULL, &tmp); 2543 } else { 2544 update_parent_effective_cpumask(cs, partcmd_update, 2545 trialcs->effective_xcpus, &tmp); 2546 } 2547 } else if (!cpumask_empty(trialcs->exclusive_cpus)) { 2548 /* 2549 * Use trialcs->effective_cpus as a temp cpumask 2550 */ 2551 remote_partition_check(cs, trialcs->effective_xcpus, 2552 trialcs->effective_cpus, &tmp); 2553 } 2554 spin_lock_irq(&callback_lock); 2555 cpumask_copy(cs->exclusive_cpus, trialcs->exclusive_cpus); 2556 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus); 2557 if ((old_prs > 0) && !is_partition_valid(cs)) 2558 reset_partition_data(cs); 2559 spin_unlock_irq(&callback_lock); 2560 2561 /* 2562 * Call update_cpumasks_hier() to update effective_cpus/effective_xcpus 2563 * of the subtree when it is a valid partition root or effective_xcpus 2564 * is updated. 2565 */ 2566 if (is_partition_valid(cs) || hier_flags) 2567 update_cpumasks_hier(cs, &tmp, hier_flags); 2568 2569 /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */ 2570 if (cs->partition_root_state) 2571 update_partition_sd_lb(cs, old_prs); 2572 2573 free_cpumasks(NULL, &tmp); 2574 return 0; 2575 } 2576 2577 /* 2578 * Migrate memory region from one set of nodes to another. This is 2579 * performed asynchronously as it can be called from process migration path 2580 * holding locks involved in process management. All mm migrations are 2581 * performed in the queued order and can be waited for by flushing 2582 * cpuset_migrate_mm_wq. 2583 */ 2584 2585 struct cpuset_migrate_mm_work { 2586 struct work_struct work; 2587 struct mm_struct *mm; 2588 nodemask_t from; 2589 nodemask_t to; 2590 }; 2591 2592 static void cpuset_migrate_mm_workfn(struct work_struct *work) 2593 { 2594 struct cpuset_migrate_mm_work *mwork = 2595 container_of(work, struct cpuset_migrate_mm_work, work); 2596 2597 /* on a wq worker, no need to worry about %current's mems_allowed */ 2598 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); 2599 mmput(mwork->mm); 2600 kfree(mwork); 2601 } 2602 2603 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, 2604 const nodemask_t *to) 2605 { 2606 struct cpuset_migrate_mm_work *mwork; 2607 2608 if (nodes_equal(*from, *to)) { 2609 mmput(mm); 2610 return; 2611 } 2612 2613 mwork = kzalloc(sizeof(*mwork), GFP_KERNEL); 2614 if (mwork) { 2615 mwork->mm = mm; 2616 mwork->from = *from; 2617 mwork->to = *to; 2618 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn); 2619 queue_work(cpuset_migrate_mm_wq, &mwork->work); 2620 } else { 2621 mmput(mm); 2622 } 2623 } 2624 2625 static void cpuset_post_attach(void) 2626 { 2627 flush_workqueue(cpuset_migrate_mm_wq); 2628 } 2629 2630 /* 2631 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy 2632 * @tsk: the task to change 2633 * @newmems: new nodes that the task will be set 2634 * 2635 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed 2636 * and rebind an eventual tasks' mempolicy. If the task is allocating in 2637 * parallel, it might temporarily see an empty intersection, which results in 2638 * a seqlock check and retry before OOM or allocation failure. 2639 */ 2640 static void cpuset_change_task_nodemask(struct task_struct *tsk, 2641 nodemask_t *newmems) 2642 { 2643 task_lock(tsk); 2644 2645 local_irq_disable(); 2646 write_seqcount_begin(&tsk->mems_allowed_seq); 2647 2648 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); 2649 mpol_rebind_task(tsk, newmems); 2650 tsk->mems_allowed = *newmems; 2651 2652 write_seqcount_end(&tsk->mems_allowed_seq); 2653 local_irq_enable(); 2654 2655 task_unlock(tsk); 2656 } 2657 2658 static void *cpuset_being_rebound; 2659 2660 /** 2661 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. 2662 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed 2663 * 2664 * Iterate through each task of @cs updating its mems_allowed to the 2665 * effective cpuset's. As this function is called with cpuset_mutex held, 2666 * cpuset membership stays stable. 2667 */ 2668 static void update_tasks_nodemask(struct cpuset *cs) 2669 { 2670 static nodemask_t newmems; /* protected by cpuset_mutex */ 2671 struct css_task_iter it; 2672 struct task_struct *task; 2673 2674 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ 2675 2676 guarantee_online_mems(cs, &newmems); 2677 2678 /* 2679 * The mpol_rebind_mm() call takes mmap_lock, which we couldn't 2680 * take while holding tasklist_lock. Forks can happen - the 2681 * mpol_dup() cpuset_being_rebound check will catch such forks, 2682 * and rebind their vma mempolicies too. Because we still hold 2683 * the global cpuset_mutex, we know that no other rebind effort 2684 * will be contending for the global variable cpuset_being_rebound. 2685 * It's ok if we rebind the same mm twice; mpol_rebind_mm() 2686 * is idempotent. Also migrate pages in each mm to new nodes. 2687 */ 2688 css_task_iter_start(&cs->css, 0, &it); 2689 while ((task = css_task_iter_next(&it))) { 2690 struct mm_struct *mm; 2691 bool migrate; 2692 2693 cpuset_change_task_nodemask(task, &newmems); 2694 2695 mm = get_task_mm(task); 2696 if (!mm) 2697 continue; 2698 2699 migrate = is_memory_migrate(cs); 2700 2701 mpol_rebind_mm(mm, &cs->mems_allowed); 2702 if (migrate) 2703 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); 2704 else 2705 mmput(mm); 2706 } 2707 css_task_iter_end(&it); 2708 2709 /* 2710 * All the tasks' nodemasks have been updated, update 2711 * cs->old_mems_allowed. 2712 */ 2713 cs->old_mems_allowed = newmems; 2714 2715 /* We're done rebinding vmas to this cpuset's new mems_allowed. */ 2716 cpuset_being_rebound = NULL; 2717 } 2718 2719 /* 2720 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree 2721 * @cs: the cpuset to consider 2722 * @new_mems: a temp variable for calculating new effective_mems 2723 * 2724 * When configured nodemask is changed, the effective nodemasks of this cpuset 2725 * and all its descendants need to be updated. 2726 * 2727 * On legacy hierarchy, effective_mems will be the same with mems_allowed. 2728 * 2729 * Called with cpuset_mutex held 2730 */ 2731 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) 2732 { 2733 struct cpuset *cp; 2734 struct cgroup_subsys_state *pos_css; 2735 2736 rcu_read_lock(); 2737 cpuset_for_each_descendant_pre(cp, pos_css, cs) { 2738 struct cpuset *parent = parent_cs(cp); 2739 2740 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); 2741 2742 /* 2743 * If it becomes empty, inherit the effective mask of the 2744 * parent, which is guaranteed to have some MEMs. 2745 */ 2746 if (is_in_v2_mode() && nodes_empty(*new_mems)) 2747 *new_mems = parent->effective_mems; 2748 2749 /* Skip the whole subtree if the nodemask remains the same. */ 2750 if (nodes_equal(*new_mems, cp->effective_mems)) { 2751 pos_css = css_rightmost_descendant(pos_css); 2752 continue; 2753 } 2754 2755 if (!css_tryget_online(&cp->css)) 2756 continue; 2757 rcu_read_unlock(); 2758 2759 spin_lock_irq(&callback_lock); 2760 cp->effective_mems = *new_mems; 2761 spin_unlock_irq(&callback_lock); 2762 2763 WARN_ON(!is_in_v2_mode() && 2764 !nodes_equal(cp->mems_allowed, cp->effective_mems)); 2765 2766 update_tasks_nodemask(cp); 2767 2768 rcu_read_lock(); 2769 css_put(&cp->css); 2770 } 2771 rcu_read_unlock(); 2772 } 2773 2774 /* 2775 * Handle user request to change the 'mems' memory placement 2776 * of a cpuset. Needs to validate the request, update the 2777 * cpusets mems_allowed, and for each task in the cpuset, 2778 * update mems_allowed and rebind task's mempolicy and any vma 2779 * mempolicies and if the cpuset is marked 'memory_migrate', 2780 * migrate the tasks pages to the new memory. 2781 * 2782 * Call with cpuset_mutex held. May take callback_lock during call. 2783 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, 2784 * lock each such tasks mm->mmap_lock, scan its vma's and rebind 2785 * their mempolicies to the cpusets new mems_allowed. 2786 */ 2787 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, 2788 const char *buf) 2789 { 2790 int retval; 2791 2792 /* 2793 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY]; 2794 * it's read-only 2795 */ 2796 if (cs == &top_cpuset) { 2797 retval = -EACCES; 2798 goto done; 2799 } 2800 2801 /* 2802 * An empty mems_allowed is ok iff there are no tasks in the cpuset. 2803 * Since nodelist_parse() fails on an empty mask, we special case 2804 * that parsing. The validate_change() call ensures that cpusets 2805 * with tasks have memory. 2806 */ 2807 if (!*buf) { 2808 nodes_clear(trialcs->mems_allowed); 2809 } else { 2810 retval = nodelist_parse(buf, trialcs->mems_allowed); 2811 if (retval < 0) 2812 goto done; 2813 2814 if (!nodes_subset(trialcs->mems_allowed, 2815 top_cpuset.mems_allowed)) { 2816 retval = -EINVAL; 2817 goto done; 2818 } 2819 } 2820 2821 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { 2822 retval = 0; /* Too easy - nothing to do */ 2823 goto done; 2824 } 2825 retval = validate_change(cs, trialcs); 2826 if (retval < 0) 2827 goto done; 2828 2829 check_insane_mems_config(&trialcs->mems_allowed); 2830 2831 spin_lock_irq(&callback_lock); 2832 cs->mems_allowed = trialcs->mems_allowed; 2833 spin_unlock_irq(&callback_lock); 2834 2835 /* use trialcs->mems_allowed as a temp variable */ 2836 update_nodemasks_hier(cs, &trialcs->mems_allowed); 2837 done: 2838 return retval; 2839 } 2840 2841 bool current_cpuset_is_being_rebound(void) 2842 { 2843 bool ret; 2844 2845 rcu_read_lock(); 2846 ret = task_cs(current) == cpuset_being_rebound; 2847 rcu_read_unlock(); 2848 2849 return ret; 2850 } 2851 2852 static int update_relax_domain_level(struct cpuset *cs, s64 val) 2853 { 2854 #ifdef CONFIG_SMP 2855 if (val < -1 || val >= sched_domain_level_max) 2856 return -EINVAL; 2857 #endif 2858 2859 if (val != cs->relax_domain_level) { 2860 cs->relax_domain_level = val; 2861 if (!cpumask_empty(cs->cpus_allowed) && 2862 is_sched_load_balance(cs)) 2863 rebuild_sched_domains_locked(); 2864 } 2865 2866 return 0; 2867 } 2868 2869 /** 2870 * update_tasks_flags - update the spread flags of tasks in the cpuset. 2871 * @cs: the cpuset in which each task's spread flags needs to be changed 2872 * 2873 * Iterate through each task of @cs updating its spread flags. As this 2874 * function is called with cpuset_mutex held, cpuset membership stays 2875 * stable. 2876 */ 2877 static void update_tasks_flags(struct cpuset *cs) 2878 { 2879 struct css_task_iter it; 2880 struct task_struct *task; 2881 2882 css_task_iter_start(&cs->css, 0, &it); 2883 while ((task = css_task_iter_next(&it))) 2884 cpuset_update_task_spread_flags(cs, task); 2885 css_task_iter_end(&it); 2886 } 2887 2888 /* 2889 * update_flag - read a 0 or a 1 in a file and update associated flag 2890 * bit: the bit to update (see cpuset_flagbits_t) 2891 * cs: the cpuset to update 2892 * turning_on: whether the flag is being set or cleared 2893 * 2894 * Call with cpuset_mutex held. 2895 */ 2896 2897 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, 2898 int turning_on) 2899 { 2900 struct cpuset *trialcs; 2901 int balance_flag_changed; 2902 int spread_flag_changed; 2903 int err; 2904 2905 trialcs = alloc_trial_cpuset(cs); 2906 if (!trialcs) 2907 return -ENOMEM; 2908 2909 if (turning_on) 2910 set_bit(bit, &trialcs->flags); 2911 else 2912 clear_bit(bit, &trialcs->flags); 2913 2914 err = validate_change(cs, trialcs); 2915 if (err < 0) 2916 goto out; 2917 2918 balance_flag_changed = (is_sched_load_balance(cs) != 2919 is_sched_load_balance(trialcs)); 2920 2921 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) 2922 || (is_spread_page(cs) != is_spread_page(trialcs))); 2923 2924 spin_lock_irq(&callback_lock); 2925 cs->flags = trialcs->flags; 2926 spin_unlock_irq(&callback_lock); 2927 2928 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) 2929 rebuild_sched_domains_locked(); 2930 2931 if (spread_flag_changed) 2932 update_tasks_flags(cs); 2933 out: 2934 free_cpuset(trialcs); 2935 return err; 2936 } 2937 2938 /** 2939 * update_prstate - update partition_root_state 2940 * @cs: the cpuset to update 2941 * @new_prs: new partition root state 2942 * Return: 0 if successful, != 0 if error 2943 * 2944 * Call with cpuset_mutex held. 2945 */ 2946 static int update_prstate(struct cpuset *cs, int new_prs) 2947 { 2948 int err = PERR_NONE, old_prs = cs->partition_root_state; 2949 struct cpuset *parent = parent_cs(cs); 2950 struct tmpmasks tmpmask; 2951 2952 if (old_prs == new_prs) 2953 return 0; 2954 2955 /* 2956 * Treat a previously invalid partition root as if it is a "member". 2957 */ 2958 if (new_prs && is_prs_invalid(old_prs)) 2959 old_prs = PRS_MEMBER; 2960 2961 if (alloc_cpumasks(NULL, &tmpmask)) 2962 return -ENOMEM; 2963 2964 /* 2965 * Setup effective_xcpus if not properly set yet, it will be cleared 2966 * later if partition becomes invalid. 2967 */ 2968 if ((new_prs > 0) && cpumask_empty(cs->exclusive_cpus)) { 2969 spin_lock_irq(&callback_lock); 2970 cpumask_and(cs->effective_xcpus, 2971 cs->cpus_allowed, parent->effective_xcpus); 2972 spin_unlock_irq(&callback_lock); 2973 } 2974 2975 err = update_partition_exclusive(cs, new_prs); 2976 if (err) 2977 goto out; 2978 2979 if (!old_prs) { 2980 /* 2981 * cpus_allowed cannot be empty. 2982 */ 2983 if (cpumask_empty(cs->cpus_allowed)) { 2984 err = PERR_CPUSEMPTY; 2985 goto out; 2986 } 2987 2988 err = update_parent_effective_cpumask(cs, partcmd_enable, 2989 NULL, &tmpmask); 2990 /* 2991 * If an attempt to become local partition root fails, 2992 * try to become a remote partition root instead. 2993 */ 2994 if (err && remote_partition_enable(cs, &tmpmask)) 2995 err = 0; 2996 } else if (old_prs && new_prs) { 2997 /* 2998 * A change in load balance state only, no change in cpumasks. 2999 */ 3000 ; 3001 } else { 3002 /* 3003 * Switching back to member is always allowed even if it 3004 * disables child partitions. 3005 */ 3006 if (is_remote_partition(cs)) 3007 remote_partition_disable(cs, &tmpmask); 3008 else 3009 update_parent_effective_cpumask(cs, partcmd_disable, 3010 NULL, &tmpmask); 3011 3012 /* 3013 * Invalidation of child partitions will be done in 3014 * update_cpumasks_hier(). 3015 */ 3016 } 3017 out: 3018 /* 3019 * Make partition invalid & disable CS_CPU_EXCLUSIVE if an error 3020 * happens. 3021 */ 3022 if (err) { 3023 new_prs = -new_prs; 3024 update_partition_exclusive(cs, new_prs); 3025 } 3026 3027 spin_lock_irq(&callback_lock); 3028 cs->partition_root_state = new_prs; 3029 WRITE_ONCE(cs->prs_err, err); 3030 if (!is_partition_valid(cs)) 3031 reset_partition_data(cs); 3032 spin_unlock_irq(&callback_lock); 3033 3034 /* Force update if switching back to member */ 3035 update_cpumasks_hier(cs, &tmpmask, !new_prs ? HIER_CHECKALL : 0); 3036 3037 /* Update sched domains and load balance flag */ 3038 update_partition_sd_lb(cs, old_prs); 3039 3040 notify_partition_change(cs, old_prs); 3041 free_cpumasks(NULL, &tmpmask); 3042 return 0; 3043 } 3044 3045 /* 3046 * Frequency meter - How fast is some event occurring? 3047 * 3048 * These routines manage a digitally filtered, constant time based, 3049 * event frequency meter. There are four routines: 3050 * fmeter_init() - initialize a frequency meter. 3051 * fmeter_markevent() - called each time the event happens. 3052 * fmeter_getrate() - returns the recent rate of such events. 3053 * fmeter_update() - internal routine used to update fmeter. 3054 * 3055 * A common data structure is passed to each of these routines, 3056 * which is used to keep track of the state required to manage the 3057 * frequency meter and its digital filter. 3058 * 3059 * The filter works on the number of events marked per unit time. 3060 * The filter is single-pole low-pass recursive (IIR). The time unit 3061 * is 1 second. Arithmetic is done using 32-bit integers scaled to 3062 * simulate 3 decimal digits of precision (multiplied by 1000). 3063 * 3064 * With an FM_COEF of 933, and a time base of 1 second, the filter 3065 * has a half-life of 10 seconds, meaning that if the events quit 3066 * happening, then the rate returned from the fmeter_getrate() 3067 * will be cut in half each 10 seconds, until it converges to zero. 3068 * 3069 * It is not worth doing a real infinitely recursive filter. If more 3070 * than FM_MAXTICKS ticks have elapsed since the last filter event, 3071 * just compute FM_MAXTICKS ticks worth, by which point the level 3072 * will be stable. 3073 * 3074 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid 3075 * arithmetic overflow in the fmeter_update() routine. 3076 * 3077 * Given the simple 32 bit integer arithmetic used, this meter works 3078 * best for reporting rates between one per millisecond (msec) and 3079 * one per 32 (approx) seconds. At constant rates faster than one 3080 * per msec it maxes out at values just under 1,000,000. At constant 3081 * rates between one per msec, and one per second it will stabilize 3082 * to a value N*1000, where N is the rate of events per second. 3083 * At constant rates between one per second and one per 32 seconds, 3084 * it will be choppy, moving up on the seconds that have an event, 3085 * and then decaying until the next event. At rates slower than 3086 * about one in 32 seconds, it decays all the way back to zero between 3087 * each event. 3088 */ 3089 3090 #define FM_COEF 933 /* coefficient for half-life of 10 secs */ 3091 #define FM_MAXTICKS ((u32)99) /* useless computing more ticks than this */ 3092 #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */ 3093 #define FM_SCALE 1000 /* faux fixed point scale */ 3094 3095 /* Initialize a frequency meter */ 3096 static void fmeter_init(struct fmeter *fmp) 3097 { 3098 fmp->cnt = 0; 3099 fmp->val = 0; 3100 fmp->time = 0; 3101 spin_lock_init(&fmp->lock); 3102 } 3103 3104 /* Internal meter update - process cnt events and update value */ 3105 static void fmeter_update(struct fmeter *fmp) 3106 { 3107 time64_t now; 3108 u32 ticks; 3109 3110 now = ktime_get_seconds(); 3111 ticks = now - fmp->time; 3112 3113 if (ticks == 0) 3114 return; 3115 3116 ticks = min(FM_MAXTICKS, ticks); 3117 while (ticks-- > 0) 3118 fmp->val = (FM_COEF * fmp->val) / FM_SCALE; 3119 fmp->time = now; 3120 3121 fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; 3122 fmp->cnt = 0; 3123 } 3124 3125 /* Process any previous ticks, then bump cnt by one (times scale). */ 3126 static void fmeter_markevent(struct fmeter *fmp) 3127 { 3128 spin_lock(&fmp->lock); 3129 fmeter_update(fmp); 3130 fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); 3131 spin_unlock(&fmp->lock); 3132 } 3133 3134 /* Process any previous ticks, then return current value. */ 3135 static int fmeter_getrate(struct fmeter *fmp) 3136 { 3137 int val; 3138 3139 spin_lock(&fmp->lock); 3140 fmeter_update(fmp); 3141 val = fmp->val; 3142 spin_unlock(&fmp->lock); 3143 return val; 3144 } 3145 3146 static struct cpuset *cpuset_attach_old_cs; 3147 3148 /* 3149 * Check to see if a cpuset can accept a new task 3150 * For v1, cpus_allowed and mems_allowed can't be empty. 3151 * For v2, effective_cpus can't be empty. 3152 * Note that in v1, effective_cpus = cpus_allowed. 3153 */ 3154 static int cpuset_can_attach_check(struct cpuset *cs) 3155 { 3156 if (cpumask_empty(cs->effective_cpus) || 3157 (!is_in_v2_mode() && nodes_empty(cs->mems_allowed))) 3158 return -ENOSPC; 3159 return 0; 3160 } 3161 3162 static void reset_migrate_dl_data(struct cpuset *cs) 3163 { 3164 cs->nr_migrate_dl_tasks = 0; 3165 cs->sum_migrate_dl_bw = 0; 3166 } 3167 3168 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ 3169 static int cpuset_can_attach(struct cgroup_taskset *tset) 3170 { 3171 struct cgroup_subsys_state *css; 3172 struct cpuset *cs, *oldcs; 3173 struct task_struct *task; 3174 bool cpus_updated, mems_updated; 3175 int ret; 3176 3177 /* used later by cpuset_attach() */ 3178 cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css)); 3179 oldcs = cpuset_attach_old_cs; 3180 cs = css_cs(css); 3181 3182 mutex_lock(&cpuset_mutex); 3183 3184 /* Check to see if task is allowed in the cpuset */ 3185 ret = cpuset_can_attach_check(cs); 3186 if (ret) 3187 goto out_unlock; 3188 3189 cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus); 3190 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems); 3191 3192 cgroup_taskset_for_each(task, css, tset) { 3193 ret = task_can_attach(task); 3194 if (ret) 3195 goto out_unlock; 3196 3197 /* 3198 * Skip rights over task check in v2 when nothing changes, 3199 * migration permission derives from hierarchy ownership in 3200 * cgroup_procs_write_permission()). 3201 */ 3202 if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || 3203 (cpus_updated || mems_updated)) { 3204 ret = security_task_setscheduler(task); 3205 if (ret) 3206 goto out_unlock; 3207 } 3208 3209 if (dl_task(task)) { 3210 cs->nr_migrate_dl_tasks++; 3211 cs->sum_migrate_dl_bw += task->dl.dl_bw; 3212 } 3213 } 3214 3215 if (!cs->nr_migrate_dl_tasks) 3216 goto out_success; 3217 3218 if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) { 3219 int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus); 3220 3221 if (unlikely(cpu >= nr_cpu_ids)) { 3222 reset_migrate_dl_data(cs); 3223 ret = -EINVAL; 3224 goto out_unlock; 3225 } 3226 3227 ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw); 3228 if (ret) { 3229 reset_migrate_dl_data(cs); 3230 goto out_unlock; 3231 } 3232 } 3233 3234 out_success: 3235 /* 3236 * Mark attach is in progress. This makes validate_change() fail 3237 * changes which zero cpus/mems_allowed. 3238 */ 3239 cs->attach_in_progress++; 3240 out_unlock: 3241 mutex_unlock(&cpuset_mutex); 3242 return ret; 3243 } 3244 3245 static void cpuset_cancel_attach(struct cgroup_taskset *tset) 3246 { 3247 struct cgroup_subsys_state *css; 3248 struct cpuset *cs; 3249 3250 cgroup_taskset_first(tset, &css); 3251 cs = css_cs(css); 3252 3253 mutex_lock(&cpuset_mutex); 3254 cs->attach_in_progress--; 3255 if (!cs->attach_in_progress) 3256 wake_up(&cpuset_attach_wq); 3257 3258 if (cs->nr_migrate_dl_tasks) { 3259 int cpu = cpumask_any(cs->effective_cpus); 3260 3261 dl_bw_free(cpu, cs->sum_migrate_dl_bw); 3262 reset_migrate_dl_data(cs); 3263 } 3264 3265 mutex_unlock(&cpuset_mutex); 3266 } 3267 3268 /* 3269 * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach_task() 3270 * but we can't allocate it dynamically there. Define it global and 3271 * allocate from cpuset_init(). 3272 */ 3273 static cpumask_var_t cpus_attach; 3274 static nodemask_t cpuset_attach_nodemask_to; 3275 3276 static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task) 3277 { 3278 lockdep_assert_held(&cpuset_mutex); 3279 3280 if (cs != &top_cpuset) 3281 guarantee_online_cpus(task, cpus_attach); 3282 else 3283 cpumask_andnot(cpus_attach, task_cpu_possible_mask(task), 3284 subpartitions_cpus); 3285 /* 3286 * can_attach beforehand should guarantee that this doesn't 3287 * fail. TODO: have a better way to handle failure here 3288 */ 3289 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach)); 3290 3291 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to); 3292 cpuset_update_task_spread_flags(cs, task); 3293 } 3294 3295 static void cpuset_attach(struct cgroup_taskset *tset) 3296 { 3297 struct task_struct *task; 3298 struct task_struct *leader; 3299 struct cgroup_subsys_state *css; 3300 struct cpuset *cs; 3301 struct cpuset *oldcs = cpuset_attach_old_cs; 3302 bool cpus_updated, mems_updated; 3303 3304 cgroup_taskset_first(tset, &css); 3305 cs = css_cs(css); 3306 3307 lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */ 3308 mutex_lock(&cpuset_mutex); 3309 cpus_updated = !cpumask_equal(cs->effective_cpus, 3310 oldcs->effective_cpus); 3311 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems); 3312 3313 /* 3314 * In the default hierarchy, enabling cpuset in the child cgroups 3315 * will trigger a number of cpuset_attach() calls with no change 3316 * in effective cpus and mems. In that case, we can optimize out 3317 * by skipping the task iteration and update. 3318 */ 3319 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && 3320 !cpus_updated && !mems_updated) { 3321 cpuset_attach_nodemask_to = cs->effective_mems; 3322 goto out; 3323 } 3324 3325 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); 3326 3327 cgroup_taskset_for_each(task, css, tset) 3328 cpuset_attach_task(cs, task); 3329 3330 /* 3331 * Change mm for all threadgroup leaders. This is expensive and may 3332 * sleep and should be moved outside migration path proper. Skip it 3333 * if there is no change in effective_mems and CS_MEMORY_MIGRATE is 3334 * not set. 3335 */ 3336 cpuset_attach_nodemask_to = cs->effective_mems; 3337 if (!is_memory_migrate(cs) && !mems_updated) 3338 goto out; 3339 3340 cgroup_taskset_for_each_leader(leader, css, tset) { 3341 struct mm_struct *mm = get_task_mm(leader); 3342 3343 if (mm) { 3344 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to); 3345 3346 /* 3347 * old_mems_allowed is the same with mems_allowed 3348 * here, except if this task is being moved 3349 * automatically due to hotplug. In that case 3350 * @mems_allowed has been updated and is empty, so 3351 * @old_mems_allowed is the right nodesets that we 3352 * migrate mm from. 3353 */ 3354 if (is_memory_migrate(cs)) 3355 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, 3356 &cpuset_attach_nodemask_to); 3357 else 3358 mmput(mm); 3359 } 3360 } 3361 3362 out: 3363 cs->old_mems_allowed = cpuset_attach_nodemask_to; 3364 3365 if (cs->nr_migrate_dl_tasks) { 3366 cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks; 3367 oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks; 3368 reset_migrate_dl_data(cs); 3369 } 3370 3371 cs->attach_in_progress--; 3372 if (!cs->attach_in_progress) 3373 wake_up(&cpuset_attach_wq); 3374 3375 mutex_unlock(&cpuset_mutex); 3376 } 3377 3378 /* The various types of files and directories in a cpuset file system */ 3379 3380 typedef enum { 3381 FILE_MEMORY_MIGRATE, 3382 FILE_CPULIST, 3383 FILE_MEMLIST, 3384 FILE_EFFECTIVE_CPULIST, 3385 FILE_EFFECTIVE_MEMLIST, 3386 FILE_SUBPARTS_CPULIST, 3387 FILE_EXCLUSIVE_CPULIST, 3388 FILE_EFFECTIVE_XCPULIST, 3389 FILE_CPU_EXCLUSIVE, 3390 FILE_MEM_EXCLUSIVE, 3391 FILE_MEM_HARDWALL, 3392 FILE_SCHED_LOAD_BALANCE, 3393 FILE_PARTITION_ROOT, 3394 FILE_SCHED_RELAX_DOMAIN_LEVEL, 3395 FILE_MEMORY_PRESSURE_ENABLED, 3396 FILE_MEMORY_PRESSURE, 3397 FILE_SPREAD_PAGE, 3398 FILE_SPREAD_SLAB, 3399 } cpuset_filetype_t; 3400 3401 static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, 3402 u64 val) 3403 { 3404 struct cpuset *cs = css_cs(css); 3405 cpuset_filetype_t type = cft->private; 3406 int retval = 0; 3407 3408 cpus_read_lock(); 3409 mutex_lock(&cpuset_mutex); 3410 if (!is_cpuset_online(cs)) { 3411 retval = -ENODEV; 3412 goto out_unlock; 3413 } 3414 3415 switch (type) { 3416 case FILE_CPU_EXCLUSIVE: 3417 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); 3418 break; 3419 case FILE_MEM_EXCLUSIVE: 3420 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); 3421 break; 3422 case FILE_MEM_HARDWALL: 3423 retval = update_flag(CS_MEM_HARDWALL, cs, val); 3424 break; 3425 case FILE_SCHED_LOAD_BALANCE: 3426 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); 3427 break; 3428 case FILE_MEMORY_MIGRATE: 3429 retval = update_flag(CS_MEMORY_MIGRATE, cs, val); 3430 break; 3431 case FILE_MEMORY_PRESSURE_ENABLED: 3432 cpuset_memory_pressure_enabled = !!val; 3433 break; 3434 case FILE_SPREAD_PAGE: 3435 retval = update_flag(CS_SPREAD_PAGE, cs, val); 3436 break; 3437 case FILE_SPREAD_SLAB: 3438 retval = update_flag(CS_SPREAD_SLAB, cs, val); 3439 break; 3440 default: 3441 retval = -EINVAL; 3442 break; 3443 } 3444 out_unlock: 3445 mutex_unlock(&cpuset_mutex); 3446 cpus_read_unlock(); 3447 return retval; 3448 } 3449 3450 static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, 3451 s64 val) 3452 { 3453 struct cpuset *cs = css_cs(css); 3454 cpuset_filetype_t type = cft->private; 3455 int retval = -ENODEV; 3456 3457 cpus_read_lock(); 3458 mutex_lock(&cpuset_mutex); 3459 if (!is_cpuset_online(cs)) 3460 goto out_unlock; 3461 3462 switch (type) { 3463 case FILE_SCHED_RELAX_DOMAIN_LEVEL: 3464 retval = update_relax_domain_level(cs, val); 3465 break; 3466 default: 3467 retval = -EINVAL; 3468 break; 3469 } 3470 out_unlock: 3471 mutex_unlock(&cpuset_mutex); 3472 cpus_read_unlock(); 3473 return retval; 3474 } 3475 3476 /* 3477 * Common handling for a write to a "cpus" or "mems" file. 3478 */ 3479 static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, 3480 char *buf, size_t nbytes, loff_t off) 3481 { 3482 struct cpuset *cs = css_cs(of_css(of)); 3483 struct cpuset *trialcs; 3484 int retval = -ENODEV; 3485 3486 buf = strstrip(buf); 3487 3488 /* 3489 * CPU or memory hotunplug may leave @cs w/o any execution 3490 * resources, in which case the hotplug code asynchronously updates 3491 * configuration and transfers all tasks to the nearest ancestor 3492 * which can execute. 3493 * 3494 * As writes to "cpus" or "mems" may restore @cs's execution 3495 * resources, wait for the previously scheduled operations before 3496 * proceeding, so that we don't end up keep removing tasks added 3497 * after execution capability is restored. 3498 * 3499 * cpuset_hotplug_work calls back into cgroup core via 3500 * cgroup_transfer_tasks() and waiting for it from a cgroupfs 3501 * operation like this one can lead to a deadlock through kernfs 3502 * active_ref protection. Let's break the protection. Losing the 3503 * protection is okay as we check whether @cs is online after 3504 * grabbing cpuset_mutex anyway. This only happens on the legacy 3505 * hierarchies. 3506 */ 3507 css_get(&cs->css); 3508 kernfs_break_active_protection(of->kn); 3509 flush_work(&cpuset_hotplug_work); 3510 3511 cpus_read_lock(); 3512 mutex_lock(&cpuset_mutex); 3513 if (!is_cpuset_online(cs)) 3514 goto out_unlock; 3515 3516 trialcs = alloc_trial_cpuset(cs); 3517 if (!trialcs) { 3518 retval = -ENOMEM; 3519 goto out_unlock; 3520 } 3521 3522 switch (of_cft(of)->private) { 3523 case FILE_CPULIST: 3524 retval = update_cpumask(cs, trialcs, buf); 3525 break; 3526 case FILE_EXCLUSIVE_CPULIST: 3527 retval = update_exclusive_cpumask(cs, trialcs, buf); 3528 break; 3529 case FILE_MEMLIST: 3530 retval = update_nodemask(cs, trialcs, buf); 3531 break; 3532 default: 3533 retval = -EINVAL; 3534 break; 3535 } 3536 3537 free_cpuset(trialcs); 3538 out_unlock: 3539 mutex_unlock(&cpuset_mutex); 3540 cpus_read_unlock(); 3541 kernfs_unbreak_active_protection(of->kn); 3542 css_put(&cs->css); 3543 flush_workqueue(cpuset_migrate_mm_wq); 3544 return retval ?: nbytes; 3545 } 3546 3547 /* 3548 * These ascii lists should be read in a single call, by using a user 3549 * buffer large enough to hold the entire map. If read in smaller 3550 * chunks, there is no guarantee of atomicity. Since the display format 3551 * used, list of ranges of sequential numbers, is variable length, 3552 * and since these maps can change value dynamically, one could read 3553 * gibberish by doing partial reads while a list was changing. 3554 */ 3555 static int cpuset_common_seq_show(struct seq_file *sf, void *v) 3556 { 3557 struct cpuset *cs = css_cs(seq_css(sf)); 3558 cpuset_filetype_t type = seq_cft(sf)->private; 3559 int ret = 0; 3560 3561 spin_lock_irq(&callback_lock); 3562 3563 switch (type) { 3564 case FILE_CPULIST: 3565 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); 3566 break; 3567 case FILE_MEMLIST: 3568 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); 3569 break; 3570 case FILE_EFFECTIVE_CPULIST: 3571 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); 3572 break; 3573 case FILE_EFFECTIVE_MEMLIST: 3574 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); 3575 break; 3576 case FILE_EXCLUSIVE_CPULIST: 3577 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->exclusive_cpus)); 3578 break; 3579 case FILE_EFFECTIVE_XCPULIST: 3580 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_xcpus)); 3581 break; 3582 case FILE_SUBPARTS_CPULIST: 3583 seq_printf(sf, "%*pbl\n", cpumask_pr_args(subpartitions_cpus)); 3584 break; 3585 default: 3586 ret = -EINVAL; 3587 } 3588 3589 spin_unlock_irq(&callback_lock); 3590 return ret; 3591 } 3592 3593 static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft) 3594 { 3595 struct cpuset *cs = css_cs(css); 3596 cpuset_filetype_t type = cft->private; 3597 switch (type) { 3598 case FILE_CPU_EXCLUSIVE: 3599 return is_cpu_exclusive(cs); 3600 case FILE_MEM_EXCLUSIVE: 3601 return is_mem_exclusive(cs); 3602 case FILE_MEM_HARDWALL: 3603 return is_mem_hardwall(cs); 3604 case FILE_SCHED_LOAD_BALANCE: 3605 return is_sched_load_balance(cs); 3606 case FILE_MEMORY_MIGRATE: 3607 return is_memory_migrate(cs); 3608 case FILE_MEMORY_PRESSURE_ENABLED: 3609 return cpuset_memory_pressure_enabled; 3610 case FILE_MEMORY_PRESSURE: 3611 return fmeter_getrate(&cs->fmeter); 3612 case FILE_SPREAD_PAGE: 3613 return is_spread_page(cs); 3614 case FILE_SPREAD_SLAB: 3615 return is_spread_slab(cs); 3616 default: 3617 BUG(); 3618 } 3619 3620 /* Unreachable but makes gcc happy */ 3621 return 0; 3622 } 3623 3624 static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) 3625 { 3626 struct cpuset *cs = css_cs(css); 3627 cpuset_filetype_t type = cft->private; 3628 switch (type) { 3629 case FILE_SCHED_RELAX_DOMAIN_LEVEL: 3630 return cs->relax_domain_level; 3631 default: 3632 BUG(); 3633 } 3634 3635 /* Unreachable but makes gcc happy */ 3636 return 0; 3637 } 3638 3639 static int sched_partition_show(struct seq_file *seq, void *v) 3640 { 3641 struct cpuset *cs = css_cs(seq_css(seq)); 3642 const char *err, *type = NULL; 3643 3644 switch (cs->partition_root_state) { 3645 case PRS_ROOT: 3646 seq_puts(seq, "root\n"); 3647 break; 3648 case PRS_ISOLATED: 3649 seq_puts(seq, "isolated\n"); 3650 break; 3651 case PRS_MEMBER: 3652 seq_puts(seq, "member\n"); 3653 break; 3654 case PRS_INVALID_ROOT: 3655 type = "root"; 3656 fallthrough; 3657 case PRS_INVALID_ISOLATED: 3658 if (!type) 3659 type = "isolated"; 3660 err = perr_strings[READ_ONCE(cs->prs_err)]; 3661 if (err) 3662 seq_printf(seq, "%s invalid (%s)\n", type, err); 3663 else 3664 seq_printf(seq, "%s invalid\n", type); 3665 break; 3666 } 3667 return 0; 3668 } 3669 3670 static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf, 3671 size_t nbytes, loff_t off) 3672 { 3673 struct cpuset *cs = css_cs(of_css(of)); 3674 int val; 3675 int retval = -ENODEV; 3676 3677 buf = strstrip(buf); 3678 3679 /* 3680 * Convert "root" to ENABLED, and convert "member" to DISABLED. 3681 */ 3682 if (!strcmp(buf, "root")) 3683 val = PRS_ROOT; 3684 else if (!strcmp(buf, "member")) 3685 val = PRS_MEMBER; 3686 else if (!strcmp(buf, "isolated")) 3687 val = PRS_ISOLATED; 3688 else 3689 return -EINVAL; 3690 3691 css_get(&cs->css); 3692 cpus_read_lock(); 3693 mutex_lock(&cpuset_mutex); 3694 if (!is_cpuset_online(cs)) 3695 goto out_unlock; 3696 3697 retval = update_prstate(cs, val); 3698 out_unlock: 3699 mutex_unlock(&cpuset_mutex); 3700 cpus_read_unlock(); 3701 css_put(&cs->css); 3702 return retval ?: nbytes; 3703 } 3704 3705 /* 3706 * for the common functions, 'private' gives the type of file 3707 */ 3708 3709 static struct cftype legacy_files[] = { 3710 { 3711 .name = "cpus", 3712 .seq_show = cpuset_common_seq_show, 3713 .write = cpuset_write_resmask, 3714 .max_write_len = (100U + 6 * NR_CPUS), 3715 .private = FILE_CPULIST, 3716 }, 3717 3718 { 3719 .name = "mems", 3720 .seq_show = cpuset_common_seq_show, 3721 .write = cpuset_write_resmask, 3722 .max_write_len = (100U + 6 * MAX_NUMNODES), 3723 .private = FILE_MEMLIST, 3724 }, 3725 3726 { 3727 .name = "effective_cpus", 3728 .seq_show = cpuset_common_seq_show, 3729 .private = FILE_EFFECTIVE_CPULIST, 3730 }, 3731 3732 { 3733 .name = "effective_mems", 3734 .seq_show = cpuset_common_seq_show, 3735 .private = FILE_EFFECTIVE_MEMLIST, 3736 }, 3737 3738 { 3739 .name = "cpu_exclusive", 3740 .read_u64 = cpuset_read_u64, 3741 .write_u64 = cpuset_write_u64, 3742 .private = FILE_CPU_EXCLUSIVE, 3743 }, 3744 3745 { 3746 .name = "mem_exclusive", 3747 .read_u64 = cpuset_read_u64, 3748 .write_u64 = cpuset_write_u64, 3749 .private = FILE_MEM_EXCLUSIVE, 3750 }, 3751 3752 { 3753 .name = "mem_hardwall", 3754 .read_u64 = cpuset_read_u64, 3755 .write_u64 = cpuset_write_u64, 3756 .private = FILE_MEM_HARDWALL, 3757 }, 3758 3759 { 3760 .name = "sched_load_balance", 3761 .read_u64 = cpuset_read_u64, 3762 .write_u64 = cpuset_write_u64, 3763 .private = FILE_SCHED_LOAD_BALANCE, 3764 }, 3765 3766 { 3767 .name = "sched_relax_domain_level", 3768 .read_s64 = cpuset_read_s64, 3769 .write_s64 = cpuset_write_s64, 3770 .private = FILE_SCHED_RELAX_DOMAIN_LEVEL, 3771 }, 3772 3773 { 3774 .name = "memory_migrate", 3775 .read_u64 = cpuset_read_u64, 3776 .write_u64 = cpuset_write_u64, 3777 .private = FILE_MEMORY_MIGRATE, 3778 }, 3779 3780 { 3781 .name = "memory_pressure", 3782 .read_u64 = cpuset_read_u64, 3783 .private = FILE_MEMORY_PRESSURE, 3784 }, 3785 3786 { 3787 .name = "memory_spread_page", 3788 .read_u64 = cpuset_read_u64, 3789 .write_u64 = cpuset_write_u64, 3790 .private = FILE_SPREAD_PAGE, 3791 }, 3792 3793 { 3794 .name = "memory_spread_slab", 3795 .read_u64 = cpuset_read_u64, 3796 .write_u64 = cpuset_write_u64, 3797 .private = FILE_SPREAD_SLAB, 3798 }, 3799 3800 { 3801 .name = "memory_pressure_enabled", 3802 .flags = CFTYPE_ONLY_ON_ROOT, 3803 .read_u64 = cpuset_read_u64, 3804 .write_u64 = cpuset_write_u64, 3805 .private = FILE_MEMORY_PRESSURE_ENABLED, 3806 }, 3807 3808 { } /* terminate */ 3809 }; 3810 3811 /* 3812 * This is currently a minimal set for the default hierarchy. It can be 3813 * expanded later on by migrating more features and control files from v1. 3814 */ 3815 static struct cftype dfl_files[] = { 3816 { 3817 .name = "cpus", 3818 .seq_show = cpuset_common_seq_show, 3819 .write = cpuset_write_resmask, 3820 .max_write_len = (100U + 6 * NR_CPUS), 3821 .private = FILE_CPULIST, 3822 .flags = CFTYPE_NOT_ON_ROOT, 3823 }, 3824 3825 { 3826 .name = "mems", 3827 .seq_show = cpuset_common_seq_show, 3828 .write = cpuset_write_resmask, 3829 .max_write_len = (100U + 6 * MAX_NUMNODES), 3830 .private = FILE_MEMLIST, 3831 .flags = CFTYPE_NOT_ON_ROOT, 3832 }, 3833 3834 { 3835 .name = "cpus.effective", 3836 .seq_show = cpuset_common_seq_show, 3837 .private = FILE_EFFECTIVE_CPULIST, 3838 }, 3839 3840 { 3841 .name = "mems.effective", 3842 .seq_show = cpuset_common_seq_show, 3843 .private = FILE_EFFECTIVE_MEMLIST, 3844 }, 3845 3846 { 3847 .name = "cpus.partition", 3848 .seq_show = sched_partition_show, 3849 .write = sched_partition_write, 3850 .private = FILE_PARTITION_ROOT, 3851 .flags = CFTYPE_NOT_ON_ROOT, 3852 .file_offset = offsetof(struct cpuset, partition_file), 3853 }, 3854 3855 { 3856 .name = "cpus.exclusive", 3857 .seq_show = cpuset_common_seq_show, 3858 .write = cpuset_write_resmask, 3859 .max_write_len = (100U + 6 * NR_CPUS), 3860 .private = FILE_EXCLUSIVE_CPULIST, 3861 .flags = CFTYPE_NOT_ON_ROOT, 3862 }, 3863 3864 { 3865 .name = "cpus.exclusive.effective", 3866 .seq_show = cpuset_common_seq_show, 3867 .private = FILE_EFFECTIVE_XCPULIST, 3868 .flags = CFTYPE_NOT_ON_ROOT, 3869 }, 3870 3871 { 3872 .name = "cpus.subpartitions", 3873 .seq_show = cpuset_common_seq_show, 3874 .private = FILE_SUBPARTS_CPULIST, 3875 .flags = CFTYPE_ONLY_ON_ROOT | CFTYPE_DEBUG, 3876 }, 3877 3878 { } /* terminate */ 3879 }; 3880 3881 3882 /** 3883 * cpuset_css_alloc - Allocate a cpuset css 3884 * @parent_css: Parent css of the control group that the new cpuset will be 3885 * part of 3886 * Return: cpuset css on success, -ENOMEM on failure. 3887 * 3888 * Allocate and initialize a new cpuset css, for non-NULL @parent_css, return 3889 * top cpuset css otherwise. 3890 */ 3891 static struct cgroup_subsys_state * 3892 cpuset_css_alloc(struct cgroup_subsys_state *parent_css) 3893 { 3894 struct cpuset *cs; 3895 3896 if (!parent_css) 3897 return &top_cpuset.css; 3898 3899 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 3900 if (!cs) 3901 return ERR_PTR(-ENOMEM); 3902 3903 if (alloc_cpumasks(cs, NULL)) { 3904 kfree(cs); 3905 return ERR_PTR(-ENOMEM); 3906 } 3907 3908 __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 3909 nodes_clear(cs->mems_allowed); 3910 nodes_clear(cs->effective_mems); 3911 fmeter_init(&cs->fmeter); 3912 cs->relax_domain_level = -1; 3913 INIT_LIST_HEAD(&cs->remote_sibling); 3914 3915 /* Set CS_MEMORY_MIGRATE for default hierarchy */ 3916 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) 3917 __set_bit(CS_MEMORY_MIGRATE, &cs->flags); 3918 3919 return &cs->css; 3920 } 3921 3922 static int cpuset_css_online(struct cgroup_subsys_state *css) 3923 { 3924 struct cpuset *cs = css_cs(css); 3925 struct cpuset *parent = parent_cs(cs); 3926 struct cpuset *tmp_cs; 3927 struct cgroup_subsys_state *pos_css; 3928 3929 if (!parent) 3930 return 0; 3931 3932 cpus_read_lock(); 3933 mutex_lock(&cpuset_mutex); 3934 3935 set_bit(CS_ONLINE, &cs->flags); 3936 if (is_spread_page(parent)) 3937 set_bit(CS_SPREAD_PAGE, &cs->flags); 3938 if (is_spread_slab(parent)) 3939 set_bit(CS_SPREAD_SLAB, &cs->flags); 3940 3941 cpuset_inc(); 3942 3943 spin_lock_irq(&callback_lock); 3944 if (is_in_v2_mode()) { 3945 cpumask_copy(cs->effective_cpus, parent->effective_cpus); 3946 cs->effective_mems = parent->effective_mems; 3947 cs->use_parent_ecpus = true; 3948 parent->child_ecpus_count++; 3949 /* 3950 * Clear CS_SCHED_LOAD_BALANCE if parent is isolated 3951 */ 3952 if (!is_sched_load_balance(parent)) 3953 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 3954 } 3955 3956 /* 3957 * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated 3958 */ 3959 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && 3960 !is_sched_load_balance(parent)) 3961 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 3962 3963 spin_unlock_irq(&callback_lock); 3964 3965 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) 3966 goto out_unlock; 3967 3968 /* 3969 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is 3970 * set. This flag handling is implemented in cgroup core for 3971 * historical reasons - the flag may be specified during mount. 3972 * 3973 * Currently, if any sibling cpusets have exclusive cpus or mem, we 3974 * refuse to clone the configuration - thereby refusing the task to 3975 * be entered, and as a result refusing the sys_unshare() or 3976 * clone() which initiated it. If this becomes a problem for some 3977 * users who wish to allow that scenario, then this could be 3978 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive 3979 * (and likewise for mems) to the new cgroup. 3980 */ 3981 rcu_read_lock(); 3982 cpuset_for_each_child(tmp_cs, pos_css, parent) { 3983 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) { 3984 rcu_read_unlock(); 3985 goto out_unlock; 3986 } 3987 } 3988 rcu_read_unlock(); 3989 3990 spin_lock_irq(&callback_lock); 3991 cs->mems_allowed = parent->mems_allowed; 3992 cs->effective_mems = parent->mems_allowed; 3993 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); 3994 cpumask_copy(cs->effective_cpus, parent->cpus_allowed); 3995 spin_unlock_irq(&callback_lock); 3996 out_unlock: 3997 mutex_unlock(&cpuset_mutex); 3998 cpus_read_unlock(); 3999 return 0; 4000 } 4001 4002 /* 4003 * If the cpuset being removed has its flag 'sched_load_balance' 4004 * enabled, then simulate turning sched_load_balance off, which 4005 * will call rebuild_sched_domains_locked(). That is not needed 4006 * in the default hierarchy where only changes in partition 4007 * will cause repartitioning. 4008 * 4009 * If the cpuset has the 'sched.partition' flag enabled, simulate 4010 * turning 'sched.partition" off. 4011 */ 4012 4013 static void cpuset_css_offline(struct cgroup_subsys_state *css) 4014 { 4015 struct cpuset *cs = css_cs(css); 4016 4017 cpus_read_lock(); 4018 mutex_lock(&cpuset_mutex); 4019 4020 if (is_partition_valid(cs)) 4021 update_prstate(cs, 0); 4022 4023 if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && 4024 is_sched_load_balance(cs)) 4025 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); 4026 4027 if (cs->use_parent_ecpus) { 4028 struct cpuset *parent = parent_cs(cs); 4029 4030 cs->use_parent_ecpus = false; 4031 parent->child_ecpus_count--; 4032 } 4033 4034 cpuset_dec(); 4035 clear_bit(CS_ONLINE, &cs->flags); 4036 4037 mutex_unlock(&cpuset_mutex); 4038 cpus_read_unlock(); 4039 } 4040 4041 static void cpuset_css_free(struct cgroup_subsys_state *css) 4042 { 4043 struct cpuset *cs = css_cs(css); 4044 4045 free_cpuset(cs); 4046 } 4047 4048 static void cpuset_bind(struct cgroup_subsys_state *root_css) 4049 { 4050 mutex_lock(&cpuset_mutex); 4051 spin_lock_irq(&callback_lock); 4052 4053 if (is_in_v2_mode()) { 4054 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); 4055 cpumask_copy(top_cpuset.effective_xcpus, cpu_possible_mask); 4056 top_cpuset.mems_allowed = node_possible_map; 4057 } else { 4058 cpumask_copy(top_cpuset.cpus_allowed, 4059 top_cpuset.effective_cpus); 4060 top_cpuset.mems_allowed = top_cpuset.effective_mems; 4061 } 4062 4063 spin_unlock_irq(&callback_lock); 4064 mutex_unlock(&cpuset_mutex); 4065 } 4066 4067 /* 4068 * In case the child is cloned into a cpuset different from its parent, 4069 * additional checks are done to see if the move is allowed. 4070 */ 4071 static int cpuset_can_fork(struct task_struct *task, struct css_set *cset) 4072 { 4073 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]); 4074 bool same_cs; 4075 int ret; 4076 4077 rcu_read_lock(); 4078 same_cs = (cs == task_cs(current)); 4079 rcu_read_unlock(); 4080 4081 if (same_cs) 4082 return 0; 4083 4084 lockdep_assert_held(&cgroup_mutex); 4085 mutex_lock(&cpuset_mutex); 4086 4087 /* Check to see if task is allowed in the cpuset */ 4088 ret = cpuset_can_attach_check(cs); 4089 if (ret) 4090 goto out_unlock; 4091 4092 ret = task_can_attach(task); 4093 if (ret) 4094 goto out_unlock; 4095 4096 ret = security_task_setscheduler(task); 4097 if (ret) 4098 goto out_unlock; 4099 4100 /* 4101 * Mark attach is in progress. This makes validate_change() fail 4102 * changes which zero cpus/mems_allowed. 4103 */ 4104 cs->attach_in_progress++; 4105 out_unlock: 4106 mutex_unlock(&cpuset_mutex); 4107 return ret; 4108 } 4109 4110 static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset) 4111 { 4112 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]); 4113 bool same_cs; 4114 4115 rcu_read_lock(); 4116 same_cs = (cs == task_cs(current)); 4117 rcu_read_unlock(); 4118 4119 if (same_cs) 4120 return; 4121 4122 mutex_lock(&cpuset_mutex); 4123 cs->attach_in_progress--; 4124 if (!cs->attach_in_progress) 4125 wake_up(&cpuset_attach_wq); 4126 mutex_unlock(&cpuset_mutex); 4127 } 4128 4129 /* 4130 * Make sure the new task conform to the current state of its parent, 4131 * which could have been changed by cpuset just after it inherits the 4132 * state from the parent and before it sits on the cgroup's task list. 4133 */ 4134 static void cpuset_fork(struct task_struct *task) 4135 { 4136 struct cpuset *cs; 4137 bool same_cs; 4138 4139 rcu_read_lock(); 4140 cs = task_cs(task); 4141 same_cs = (cs == task_cs(current)); 4142 rcu_read_unlock(); 4143 4144 if (same_cs) { 4145 if (cs == &top_cpuset) 4146 return; 4147 4148 set_cpus_allowed_ptr(task, current->cpus_ptr); 4149 task->mems_allowed = current->mems_allowed; 4150 return; 4151 } 4152 4153 /* CLONE_INTO_CGROUP */ 4154 mutex_lock(&cpuset_mutex); 4155 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); 4156 cpuset_attach_task(cs, task); 4157 4158 cs->attach_in_progress--; 4159 if (!cs->attach_in_progress) 4160 wake_up(&cpuset_attach_wq); 4161 4162 mutex_unlock(&cpuset_mutex); 4163 } 4164 4165 struct cgroup_subsys cpuset_cgrp_subsys = { 4166 .css_alloc = cpuset_css_alloc, 4167 .css_online = cpuset_css_online, 4168 .css_offline = cpuset_css_offline, 4169 .css_free = cpuset_css_free, 4170 .can_attach = cpuset_can_attach, 4171 .cancel_attach = cpuset_cancel_attach, 4172 .attach = cpuset_attach, 4173 .post_attach = cpuset_post_attach, 4174 .bind = cpuset_bind, 4175 .can_fork = cpuset_can_fork, 4176 .cancel_fork = cpuset_cancel_fork, 4177 .fork = cpuset_fork, 4178 .legacy_cftypes = legacy_files, 4179 .dfl_cftypes = dfl_files, 4180 .early_init = true, 4181 .threaded = true, 4182 }; 4183 4184 /** 4185 * cpuset_init - initialize cpusets at system boot 4186 * 4187 * Description: Initialize top_cpuset 4188 **/ 4189 4190 int __init cpuset_init(void) 4191 { 4192 BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL)); 4193 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL)); 4194 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_xcpus, GFP_KERNEL)); 4195 BUG_ON(!alloc_cpumask_var(&top_cpuset.exclusive_cpus, GFP_KERNEL)); 4196 BUG_ON(!zalloc_cpumask_var(&subpartitions_cpus, GFP_KERNEL)); 4197 4198 cpumask_setall(top_cpuset.cpus_allowed); 4199 nodes_setall(top_cpuset.mems_allowed); 4200 cpumask_setall(top_cpuset.effective_cpus); 4201 cpumask_setall(top_cpuset.effective_xcpus); 4202 cpumask_setall(top_cpuset.exclusive_cpus); 4203 nodes_setall(top_cpuset.effective_mems); 4204 4205 fmeter_init(&top_cpuset.fmeter); 4206 set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags); 4207 top_cpuset.relax_domain_level = -1; 4208 INIT_LIST_HEAD(&remote_children); 4209 4210 BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL)); 4211 4212 return 0; 4213 } 4214 4215 /* 4216 * If CPU and/or memory hotplug handlers, below, unplug any CPUs 4217 * or memory nodes, we need to walk over the cpuset hierarchy, 4218 * removing that CPU or node from all cpusets. If this removes the 4219 * last CPU or node from a cpuset, then move the tasks in the empty 4220 * cpuset to its next-highest non-empty parent. 4221 */ 4222 static void remove_tasks_in_empty_cpuset(struct cpuset *cs) 4223 { 4224 struct cpuset *parent; 4225 4226 /* 4227 * Find its next-highest non-empty parent, (top cpuset 4228 * has online cpus, so can't be empty). 4229 */ 4230 parent = parent_cs(cs); 4231 while (cpumask_empty(parent->cpus_allowed) || 4232 nodes_empty(parent->mems_allowed)) 4233 parent = parent_cs(parent); 4234 4235 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { 4236 pr_err("cpuset: failed to transfer tasks out of empty cpuset "); 4237 pr_cont_cgroup_name(cs->css.cgroup); 4238 pr_cont("\n"); 4239 } 4240 } 4241 4242 static void 4243 hotplug_update_tasks_legacy(struct cpuset *cs, 4244 struct cpumask *new_cpus, nodemask_t *new_mems, 4245 bool cpus_updated, bool mems_updated) 4246 { 4247 bool is_empty; 4248 4249 spin_lock_irq(&callback_lock); 4250 cpumask_copy(cs->cpus_allowed, new_cpus); 4251 cpumask_copy(cs->effective_cpus, new_cpus); 4252 cs->mems_allowed = *new_mems; 4253 cs->effective_mems = *new_mems; 4254 spin_unlock_irq(&callback_lock); 4255 4256 /* 4257 * Don't call update_tasks_cpumask() if the cpuset becomes empty, 4258 * as the tasks will be migrated to an ancestor. 4259 */ 4260 if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) 4261 update_tasks_cpumask(cs, new_cpus); 4262 if (mems_updated && !nodes_empty(cs->mems_allowed)) 4263 update_tasks_nodemask(cs); 4264 4265 is_empty = cpumask_empty(cs->cpus_allowed) || 4266 nodes_empty(cs->mems_allowed); 4267 4268 /* 4269 * Move tasks to the nearest ancestor with execution resources, 4270 * This is full cgroup operation which will also call back into 4271 * cpuset. Should be done outside any lock. 4272 */ 4273 if (is_empty) { 4274 mutex_unlock(&cpuset_mutex); 4275 remove_tasks_in_empty_cpuset(cs); 4276 mutex_lock(&cpuset_mutex); 4277 } 4278 } 4279 4280 static void 4281 hotplug_update_tasks(struct cpuset *cs, 4282 struct cpumask *new_cpus, nodemask_t *new_mems, 4283 bool cpus_updated, bool mems_updated) 4284 { 4285 /* A partition root is allowed to have empty effective cpus */ 4286 if (cpumask_empty(new_cpus) && !is_partition_valid(cs)) 4287 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); 4288 if (nodes_empty(*new_mems)) 4289 *new_mems = parent_cs(cs)->effective_mems; 4290 4291 spin_lock_irq(&callback_lock); 4292 cpumask_copy(cs->effective_cpus, new_cpus); 4293 cs->effective_mems = *new_mems; 4294 spin_unlock_irq(&callback_lock); 4295 4296 if (cpus_updated) 4297 update_tasks_cpumask(cs, new_cpus); 4298 if (mems_updated) 4299 update_tasks_nodemask(cs); 4300 } 4301 4302 static bool force_rebuild; 4303 4304 void cpuset_force_rebuild(void) 4305 { 4306 force_rebuild = true; 4307 } 4308 4309 /** 4310 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug 4311 * @cs: cpuset in interest 4312 * @tmp: the tmpmasks structure pointer 4313 * 4314 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone 4315 * offline, update @cs accordingly. If @cs ends up with no CPU or memory, 4316 * all its tasks are moved to the nearest ancestor with both resources. 4317 */ 4318 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) 4319 { 4320 static cpumask_t new_cpus; 4321 static nodemask_t new_mems; 4322 bool cpus_updated; 4323 bool mems_updated; 4324 bool remote; 4325 struct cpuset *parent; 4326 retry: 4327 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); 4328 4329 mutex_lock(&cpuset_mutex); 4330 4331 /* 4332 * We have raced with task attaching. We wait until attaching 4333 * is finished, so we won't attach a task to an empty cpuset. 4334 */ 4335 if (cs->attach_in_progress) { 4336 mutex_unlock(&cpuset_mutex); 4337 goto retry; 4338 } 4339 4340 parent = parent_cs(cs); 4341 compute_effective_cpumask(&new_cpus, cs, parent); 4342 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); 4343 4344 if (!tmp || !cs->partition_root_state) 4345 goto update_tasks; 4346 4347 /* 4348 * Compute effective_cpus for valid partition root, may invalidate 4349 * child partition roots if necessary. 4350 */ 4351 remote = is_remote_partition(cs); 4352 if (remote || (is_partition_valid(cs) && is_partition_valid(parent))) 4353 compute_partition_effective_cpumask(cs, &new_cpus); 4354 4355 if (remote && cpumask_empty(&new_cpus) && 4356 partition_is_populated(cs, NULL)) { 4357 remote_partition_disable(cs, tmp); 4358 compute_effective_cpumask(&new_cpus, cs, parent); 4359 remote = false; 4360 cpuset_force_rebuild(); 4361 } 4362 4363 /* 4364 * Force the partition to become invalid if either one of 4365 * the following conditions hold: 4366 * 1) empty effective cpus but not valid empty partition. 4367 * 2) parent is invalid or doesn't grant any cpus to child 4368 * partitions. 4369 */ 4370 if (is_local_partition(cs) && (!is_partition_valid(parent) || 4371 tasks_nocpu_error(parent, cs, &new_cpus))) { 4372 update_parent_effective_cpumask(cs, partcmd_invalidate, NULL, tmp); 4373 compute_effective_cpumask(&new_cpus, cs, parent); 4374 cpuset_force_rebuild(); 4375 } 4376 /* 4377 * On the other hand, an invalid partition root may be transitioned 4378 * back to a regular one. 4379 */ 4380 else if (is_partition_valid(parent) && is_partition_invalid(cs)) { 4381 update_parent_effective_cpumask(cs, partcmd_update, NULL, tmp); 4382 if (is_partition_valid(cs)) { 4383 compute_partition_effective_cpumask(cs, &new_cpus); 4384 cpuset_force_rebuild(); 4385 } 4386 } 4387 4388 update_tasks: 4389 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); 4390 mems_updated = !nodes_equal(new_mems, cs->effective_mems); 4391 if (!cpus_updated && !mems_updated) 4392 goto unlock; /* Hotplug doesn't affect this cpuset */ 4393 4394 if (mems_updated) 4395 check_insane_mems_config(&new_mems); 4396 4397 if (is_in_v2_mode()) 4398 hotplug_update_tasks(cs, &new_cpus, &new_mems, 4399 cpus_updated, mems_updated); 4400 else 4401 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, 4402 cpus_updated, mems_updated); 4403 4404 unlock: 4405 mutex_unlock(&cpuset_mutex); 4406 } 4407 4408 /** 4409 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset 4410 * @work: unused 4411 * 4412 * This function is called after either CPU or memory configuration has 4413 * changed and updates cpuset accordingly. The top_cpuset is always 4414 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in 4415 * order to make cpusets transparent (of no affect) on systems that are 4416 * actively using CPU hotplug but making no active use of cpusets. 4417 * 4418 * Non-root cpusets are only affected by offlining. If any CPUs or memory 4419 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on 4420 * all descendants. 4421 * 4422 * Note that CPU offlining during suspend is ignored. We don't modify 4423 * cpusets across suspend/resume cycles at all. 4424 */ 4425 static void cpuset_hotplug_workfn(struct work_struct *work) 4426 { 4427 static cpumask_t new_cpus; 4428 static nodemask_t new_mems; 4429 bool cpus_updated, mems_updated; 4430 bool on_dfl = is_in_v2_mode(); 4431 struct tmpmasks tmp, *ptmp = NULL; 4432 4433 if (on_dfl && !alloc_cpumasks(NULL, &tmp)) 4434 ptmp = &tmp; 4435 4436 mutex_lock(&cpuset_mutex); 4437 4438 /* fetch the available cpus/mems and find out which changed how */ 4439 cpumask_copy(&new_cpus, cpu_active_mask); 4440 new_mems = node_states[N_MEMORY]; 4441 4442 /* 4443 * If subpartitions_cpus is populated, it is likely that the check 4444 * below will produce a false positive on cpus_updated when the cpu 4445 * list isn't changed. It is extra work, but it is better to be safe. 4446 */ 4447 cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus) || 4448 !cpumask_empty(subpartitions_cpus); 4449 mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems); 4450 4451 /* 4452 * In the rare case that hotplug removes all the cpus in 4453 * subpartitions_cpus, we assumed that cpus are updated. 4454 */ 4455 if (!cpus_updated && top_cpuset.nr_subparts) 4456 cpus_updated = true; 4457 4458 /* For v1, synchronize cpus_allowed to cpu_active_mask */ 4459 if (cpus_updated) { 4460 spin_lock_irq(&callback_lock); 4461 if (!on_dfl) 4462 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); 4463 /* 4464 * Make sure that CPUs allocated to child partitions 4465 * do not show up in effective_cpus. If no CPU is left, 4466 * we clear the subpartitions_cpus & let the child partitions 4467 * fight for the CPUs again. 4468 */ 4469 if (!cpumask_empty(subpartitions_cpus)) { 4470 if (cpumask_subset(&new_cpus, subpartitions_cpus)) { 4471 top_cpuset.nr_subparts = 0; 4472 cpumask_clear(subpartitions_cpus); 4473 } else { 4474 cpumask_andnot(&new_cpus, &new_cpus, 4475 subpartitions_cpus); 4476 } 4477 } 4478 cpumask_copy(top_cpuset.effective_cpus, &new_cpus); 4479 spin_unlock_irq(&callback_lock); 4480 /* we don't mess with cpumasks of tasks in top_cpuset */ 4481 } 4482 4483 /* synchronize mems_allowed to N_MEMORY */ 4484 if (mems_updated) { 4485 spin_lock_irq(&callback_lock); 4486 if (!on_dfl) 4487 top_cpuset.mems_allowed = new_mems; 4488 top_cpuset.effective_mems = new_mems; 4489 spin_unlock_irq(&callback_lock); 4490 update_tasks_nodemask(&top_cpuset); 4491 } 4492 4493 mutex_unlock(&cpuset_mutex); 4494 4495 /* if cpus or mems changed, we need to propagate to descendants */ 4496 if (cpus_updated || mems_updated) { 4497 struct cpuset *cs; 4498 struct cgroup_subsys_state *pos_css; 4499 4500 rcu_read_lock(); 4501 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { 4502 if (cs == &top_cpuset || !css_tryget_online(&cs->css)) 4503 continue; 4504 rcu_read_unlock(); 4505 4506 cpuset_hotplug_update_tasks(cs, ptmp); 4507 4508 rcu_read_lock(); 4509 css_put(&cs->css); 4510 } 4511 rcu_read_unlock(); 4512 } 4513 4514 /* rebuild sched domains if cpus_allowed has changed */ 4515 if (cpus_updated || force_rebuild) { 4516 force_rebuild = false; 4517 rebuild_sched_domains(); 4518 } 4519 4520 free_cpumasks(NULL, ptmp); 4521 } 4522 4523 void cpuset_update_active_cpus(void) 4524 { 4525 /* 4526 * We're inside cpu hotplug critical region which usually nests 4527 * inside cgroup synchronization. Bounce actual hotplug processing 4528 * to a work item to avoid reverse locking order. 4529 */ 4530 schedule_work(&cpuset_hotplug_work); 4531 } 4532 4533 void cpuset_wait_for_hotplug(void) 4534 { 4535 flush_work(&cpuset_hotplug_work); 4536 } 4537 4538 /* 4539 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY]. 4540 * Call this routine anytime after node_states[N_MEMORY] changes. 4541 * See cpuset_update_active_cpus() for CPU hotplug handling. 4542 */ 4543 static int cpuset_track_online_nodes(struct notifier_block *self, 4544 unsigned long action, void *arg) 4545 { 4546 schedule_work(&cpuset_hotplug_work); 4547 return NOTIFY_OK; 4548 } 4549 4550 /** 4551 * cpuset_init_smp - initialize cpus_allowed 4552 * 4553 * Description: Finish top cpuset after cpu, node maps are initialized 4554 */ 4555 void __init cpuset_init_smp(void) 4556 { 4557 /* 4558 * cpus_allowd/mems_allowed set to v2 values in the initial 4559 * cpuset_bind() call will be reset to v1 values in another 4560 * cpuset_bind() call when v1 cpuset is mounted. 4561 */ 4562 top_cpuset.old_mems_allowed = top_cpuset.mems_allowed; 4563 4564 cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask); 4565 top_cpuset.effective_mems = node_states[N_MEMORY]; 4566 4567 hotplug_memory_notifier(cpuset_track_online_nodes, CPUSET_CALLBACK_PRI); 4568 4569 cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0); 4570 BUG_ON(!cpuset_migrate_mm_wq); 4571 } 4572 4573 /** 4574 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. 4575 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. 4576 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set. 4577 * 4578 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset 4579 * attached to the specified @tsk. Guaranteed to return some non-empty 4580 * subset of cpu_online_mask, even if this means going outside the 4581 * tasks cpuset, except when the task is in the top cpuset. 4582 **/ 4583 4584 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) 4585 { 4586 unsigned long flags; 4587 struct cpuset *cs; 4588 4589 spin_lock_irqsave(&callback_lock, flags); 4590 rcu_read_lock(); 4591 4592 cs = task_cs(tsk); 4593 if (cs != &top_cpuset) 4594 guarantee_online_cpus(tsk, pmask); 4595 /* 4596 * Tasks in the top cpuset won't get update to their cpumasks 4597 * when a hotplug online/offline event happens. So we include all 4598 * offline cpus in the allowed cpu list. 4599 */ 4600 if ((cs == &top_cpuset) || cpumask_empty(pmask)) { 4601 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk); 4602 4603 /* 4604 * We first exclude cpus allocated to partitions. If there is no 4605 * allowable online cpu left, we fall back to all possible cpus. 4606 */ 4607 cpumask_andnot(pmask, possible_mask, subpartitions_cpus); 4608 if (!cpumask_intersects(pmask, cpu_online_mask)) 4609 cpumask_copy(pmask, possible_mask); 4610 } 4611 4612 rcu_read_unlock(); 4613 spin_unlock_irqrestore(&callback_lock, flags); 4614 } 4615 4616 /** 4617 * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe. 4618 * @tsk: pointer to task_struct with which the scheduler is struggling 4619 * 4620 * Description: In the case that the scheduler cannot find an allowed cpu in 4621 * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy 4622 * mode however, this value is the same as task_cs(tsk)->effective_cpus, 4623 * which will not contain a sane cpumask during cases such as cpu hotplugging. 4624 * This is the absolute last resort for the scheduler and it is only used if 4625 * _every_ other avenue has been traveled. 4626 * 4627 * Returns true if the affinity of @tsk was changed, false otherwise. 4628 **/ 4629 4630 bool cpuset_cpus_allowed_fallback(struct task_struct *tsk) 4631 { 4632 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk); 4633 const struct cpumask *cs_mask; 4634 bool changed = false; 4635 4636 rcu_read_lock(); 4637 cs_mask = task_cs(tsk)->cpus_allowed; 4638 if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) { 4639 do_set_cpus_allowed(tsk, cs_mask); 4640 changed = true; 4641 } 4642 rcu_read_unlock(); 4643 4644 /* 4645 * We own tsk->cpus_allowed, nobody can change it under us. 4646 * 4647 * But we used cs && cs->cpus_allowed lockless and thus can 4648 * race with cgroup_attach_task() or update_cpumask() and get 4649 * the wrong tsk->cpus_allowed. However, both cases imply the 4650 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr() 4651 * which takes task_rq_lock(). 4652 * 4653 * If we are called after it dropped the lock we must see all 4654 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary 4655 * set any mask even if it is not right from task_cs() pov, 4656 * the pending set_cpus_allowed_ptr() will fix things. 4657 * 4658 * select_fallback_rq() will fix things ups and set cpu_possible_mask 4659 * if required. 4660 */ 4661 return changed; 4662 } 4663 4664 void __init cpuset_init_current_mems_allowed(void) 4665 { 4666 nodes_setall(current->mems_allowed); 4667 } 4668 4669 /** 4670 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset. 4671 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed. 4672 * 4673 * Description: Returns the nodemask_t mems_allowed of the cpuset 4674 * attached to the specified @tsk. Guaranteed to return some non-empty 4675 * subset of node_states[N_MEMORY], even if this means going outside the 4676 * tasks cpuset. 4677 **/ 4678 4679 nodemask_t cpuset_mems_allowed(struct task_struct *tsk) 4680 { 4681 nodemask_t mask; 4682 unsigned long flags; 4683 4684 spin_lock_irqsave(&callback_lock, flags); 4685 rcu_read_lock(); 4686 guarantee_online_mems(task_cs(tsk), &mask); 4687 rcu_read_unlock(); 4688 spin_unlock_irqrestore(&callback_lock, flags); 4689 4690 return mask; 4691 } 4692 4693 /** 4694 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed 4695 * @nodemask: the nodemask to be checked 4696 * 4697 * Are any of the nodes in the nodemask allowed in current->mems_allowed? 4698 */ 4699 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) 4700 { 4701 return nodes_intersects(*nodemask, current->mems_allowed); 4702 } 4703 4704 /* 4705 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or 4706 * mem_hardwall ancestor to the specified cpuset. Call holding 4707 * callback_lock. If no ancestor is mem_exclusive or mem_hardwall 4708 * (an unusual configuration), then returns the root cpuset. 4709 */ 4710 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) 4711 { 4712 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) 4713 cs = parent_cs(cs); 4714 return cs; 4715 } 4716 4717 /* 4718 * cpuset_node_allowed - Can we allocate on a memory node? 4719 * @node: is this an allowed node? 4720 * @gfp_mask: memory allocation flags 4721 * 4722 * If we're in interrupt, yes, we can always allocate. If @node is set in 4723 * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this 4724 * node is set in the nearest hardwalled cpuset ancestor to current's cpuset, 4725 * yes. If current has access to memory reserves as an oom victim, yes. 4726 * Otherwise, no. 4727 * 4728 * GFP_USER allocations are marked with the __GFP_HARDWALL bit, 4729 * and do not allow allocations outside the current tasks cpuset 4730 * unless the task has been OOM killed. 4731 * GFP_KERNEL allocations are not so marked, so can escape to the 4732 * nearest enclosing hardwalled ancestor cpuset. 4733 * 4734 * Scanning up parent cpusets requires callback_lock. The 4735 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit 4736 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the 4737 * current tasks mems_allowed came up empty on the first pass over 4738 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the 4739 * cpuset are short of memory, might require taking the callback_lock. 4740 * 4741 * The first call here from mm/page_alloc:get_page_from_freelist() 4742 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, 4743 * so no allocation on a node outside the cpuset is allowed (unless 4744 * in interrupt, of course). 4745 * 4746 * The second pass through get_page_from_freelist() doesn't even call 4747 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages() 4748 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set 4749 * in alloc_flags. That logic and the checks below have the combined 4750 * affect that: 4751 * in_interrupt - any node ok (current task context irrelevant) 4752 * GFP_ATOMIC - any node ok 4753 * tsk_is_oom_victim - any node ok 4754 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok 4755 * GFP_USER - only nodes in current tasks mems allowed ok. 4756 */ 4757 bool cpuset_node_allowed(int node, gfp_t gfp_mask) 4758 { 4759 struct cpuset *cs; /* current cpuset ancestors */ 4760 bool allowed; /* is allocation in zone z allowed? */ 4761 unsigned long flags; 4762 4763 if (in_interrupt()) 4764 return true; 4765 if (node_isset(node, current->mems_allowed)) 4766 return true; 4767 /* 4768 * Allow tasks that have access to memory reserves because they have 4769 * been OOM killed to get memory anywhere. 4770 */ 4771 if (unlikely(tsk_is_oom_victim(current))) 4772 return true; 4773 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ 4774 return false; 4775 4776 if (current->flags & PF_EXITING) /* Let dying task have memory */ 4777 return true; 4778 4779 /* Not hardwall and node outside mems_allowed: scan up cpusets */ 4780 spin_lock_irqsave(&callback_lock, flags); 4781 4782 rcu_read_lock(); 4783 cs = nearest_hardwall_ancestor(task_cs(current)); 4784 allowed = node_isset(node, cs->mems_allowed); 4785 rcu_read_unlock(); 4786 4787 spin_unlock_irqrestore(&callback_lock, flags); 4788 return allowed; 4789 } 4790 4791 /** 4792 * cpuset_spread_node() - On which node to begin search for a page 4793 * @rotor: round robin rotor 4794 * 4795 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for 4796 * tasks in a cpuset with is_spread_page or is_spread_slab set), 4797 * and if the memory allocation used cpuset_mem_spread_node() 4798 * to determine on which node to start looking, as it will for 4799 * certain page cache or slab cache pages such as used for file 4800 * system buffers and inode caches, then instead of starting on the 4801 * local node to look for a free page, rather spread the starting 4802 * node around the tasks mems_allowed nodes. 4803 * 4804 * We don't have to worry about the returned node being offline 4805 * because "it can't happen", and even if it did, it would be ok. 4806 * 4807 * The routines calling guarantee_online_mems() are careful to 4808 * only set nodes in task->mems_allowed that are online. So it 4809 * should not be possible for the following code to return an 4810 * offline node. But if it did, that would be ok, as this routine 4811 * is not returning the node where the allocation must be, only 4812 * the node where the search should start. The zonelist passed to 4813 * __alloc_pages() will include all nodes. If the slab allocator 4814 * is passed an offline node, it will fall back to the local node. 4815 * See kmem_cache_alloc_node(). 4816 */ 4817 static int cpuset_spread_node(int *rotor) 4818 { 4819 return *rotor = next_node_in(*rotor, current->mems_allowed); 4820 } 4821 4822 /** 4823 * cpuset_mem_spread_node() - On which node to begin search for a file page 4824 */ 4825 int cpuset_mem_spread_node(void) 4826 { 4827 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE) 4828 current->cpuset_mem_spread_rotor = 4829 node_random(¤t->mems_allowed); 4830 4831 return cpuset_spread_node(¤t->cpuset_mem_spread_rotor); 4832 } 4833 4834 /** 4835 * cpuset_slab_spread_node() - On which node to begin search for a slab page 4836 */ 4837 int cpuset_slab_spread_node(void) 4838 { 4839 if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE) 4840 current->cpuset_slab_spread_rotor = 4841 node_random(¤t->mems_allowed); 4842 4843 return cpuset_spread_node(¤t->cpuset_slab_spread_rotor); 4844 } 4845 EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); 4846 4847 /** 4848 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's? 4849 * @tsk1: pointer to task_struct of some task. 4850 * @tsk2: pointer to task_struct of some other task. 4851 * 4852 * Description: Return true if @tsk1's mems_allowed intersects the 4853 * mems_allowed of @tsk2. Used by the OOM killer to determine if 4854 * one of the task's memory usage might impact the memory available 4855 * to the other. 4856 **/ 4857 4858 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, 4859 const struct task_struct *tsk2) 4860 { 4861 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); 4862 } 4863 4864 /** 4865 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed 4866 * 4867 * Description: Prints current's name, cpuset name, and cached copy of its 4868 * mems_allowed to the kernel log. 4869 */ 4870 void cpuset_print_current_mems_allowed(void) 4871 { 4872 struct cgroup *cgrp; 4873 4874 rcu_read_lock(); 4875 4876 cgrp = task_cs(current)->css.cgroup; 4877 pr_cont(",cpuset="); 4878 pr_cont_cgroup_name(cgrp); 4879 pr_cont(",mems_allowed=%*pbl", 4880 nodemask_pr_args(¤t->mems_allowed)); 4881 4882 rcu_read_unlock(); 4883 } 4884 4885 /* 4886 * Collection of memory_pressure is suppressed unless 4887 * this flag is enabled by writing "1" to the special 4888 * cpuset file 'memory_pressure_enabled' in the root cpuset. 4889 */ 4890 4891 int cpuset_memory_pressure_enabled __read_mostly; 4892 4893 /* 4894 * __cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims. 4895 * 4896 * Keep a running average of the rate of synchronous (direct) 4897 * page reclaim efforts initiated by tasks in each cpuset. 4898 * 4899 * This represents the rate at which some task in the cpuset 4900 * ran low on memory on all nodes it was allowed to use, and 4901 * had to enter the kernels page reclaim code in an effort to 4902 * create more free memory by tossing clean pages or swapping 4903 * or writing dirty pages. 4904 * 4905 * Display to user space in the per-cpuset read-only file 4906 * "memory_pressure". Value displayed is an integer 4907 * representing the recent rate of entry into the synchronous 4908 * (direct) page reclaim by any task attached to the cpuset. 4909 */ 4910 4911 void __cpuset_memory_pressure_bump(void) 4912 { 4913 rcu_read_lock(); 4914 fmeter_markevent(&task_cs(current)->fmeter); 4915 rcu_read_unlock(); 4916 } 4917 4918 #ifdef CONFIG_PROC_PID_CPUSET 4919 /* 4920 * proc_cpuset_show() 4921 * - Print tasks cpuset path into seq_file. 4922 * - Used for /proc/<pid>/cpuset. 4923 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it 4924 * doesn't really matter if tsk->cpuset changes after we read it, 4925 * and we take cpuset_mutex, keeping cpuset_attach() from changing it 4926 * anyway. 4927 */ 4928 int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, 4929 struct pid *pid, struct task_struct *tsk) 4930 { 4931 char *buf; 4932 struct cgroup_subsys_state *css; 4933 int retval; 4934 4935 retval = -ENOMEM; 4936 buf = kmalloc(PATH_MAX, GFP_KERNEL); 4937 if (!buf) 4938 goto out; 4939 4940 css = task_get_css(tsk, cpuset_cgrp_id); 4941 retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX, 4942 current->nsproxy->cgroup_ns); 4943 css_put(css); 4944 if (retval >= PATH_MAX) 4945 retval = -ENAMETOOLONG; 4946 if (retval < 0) 4947 goto out_free; 4948 seq_puts(m, buf); 4949 seq_putc(m, '\n'); 4950 retval = 0; 4951 out_free: 4952 kfree(buf); 4953 out: 4954 return retval; 4955 } 4956 #endif /* CONFIG_PROC_PID_CPUSET */ 4957 4958 /* Display task mems_allowed in /proc/<pid>/status file. */ 4959 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) 4960 { 4961 seq_printf(m, "Mems_allowed:\t%*pb\n", 4962 nodemask_pr_args(&task->mems_allowed)); 4963 seq_printf(m, "Mems_allowed_list:\t%*pbl\n", 4964 nodemask_pr_args(&task->mems_allowed)); 4965 } 4966