1 /* 2 * kernel/cpuset.c 3 * 4 * Processor and Memory placement constraints for sets of tasks. 5 * 6 * Copyright (C) 2003 BULL SA. 7 * Copyright (C) 2004-2007 Silicon Graphics, Inc. 8 * Copyright (C) 2006 Google, Inc 9 * 10 * Portions derived from Patrick Mochel's sysfs code. 11 * sysfs is Copyright (c) 2001-3 Patrick Mochel 12 * 13 * 2003-10-10 Written by Simon Derr. 14 * 2003-10-22 Updates by Stephen Hemminger. 15 * 2004 May-July Rework by Paul Jackson. 16 * 2006 Rework by Paul Menage to use generic cgroups 17 * 2008 Rework of the scheduler domains and CPU hotplug handling 18 * by Max Krasnyansky 19 * 20 * This file is subject to the terms and conditions of the GNU General Public 21 * License. See the file COPYING in the main directory of the Linux 22 * distribution for more details. 23 */ 24 25 #include <linux/cpu.h> 26 #include <linux/cpumask.h> 27 #include <linux/cpuset.h> 28 #include <linux/err.h> 29 #include <linux/errno.h> 30 #include <linux/file.h> 31 #include <linux/fs.h> 32 #include <linux/init.h> 33 #include <linux/interrupt.h> 34 #include <linux/kernel.h> 35 #include <linux/kmod.h> 36 #include <linux/list.h> 37 #include <linux/mempolicy.h> 38 #include <linux/mm.h> 39 #include <linux/memory.h> 40 #include <linux/export.h> 41 #include <linux/mount.h> 42 #include <linux/fs_context.h> 43 #include <linux/namei.h> 44 #include <linux/pagemap.h> 45 #include <linux/proc_fs.h> 46 #include <linux/rcupdate.h> 47 #include <linux/sched.h> 48 #include <linux/sched/deadline.h> 49 #include <linux/sched/mm.h> 50 #include <linux/sched/task.h> 51 #include <linux/seq_file.h> 52 #include <linux/security.h> 53 #include <linux/slab.h> 54 #include <linux/spinlock.h> 55 #include <linux/stat.h> 56 #include <linux/string.h> 57 #include <linux/time.h> 58 #include <linux/time64.h> 59 #include <linux/backing-dev.h> 60 #include <linux/sort.h> 61 #include <linux/oom.h> 62 #include <linux/sched/isolation.h> 63 #include <linux/uaccess.h> 64 #include <linux/atomic.h> 65 #include <linux/mutex.h> 66 #include <linux/cgroup.h> 67 #include <linux/wait.h> 68 69 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key); 70 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key); 71 72 /* See "Frequency meter" comments, below. */ 73 74 struct fmeter { 75 int cnt; /* unprocessed events count */ 76 int val; /* most recent output value */ 77 time64_t time; /* clock (secs) when val computed */ 78 spinlock_t lock; /* guards read or write of above */ 79 }; 80 81 struct cpuset { 82 struct cgroup_subsys_state css; 83 84 unsigned long flags; /* "unsigned long" so bitops work */ 85 86 /* 87 * On default hierarchy: 88 * 89 * The user-configured masks can only be changed by writing to 90 * cpuset.cpus and cpuset.mems, and won't be limited by the 91 * parent masks. 92 * 93 * The effective masks is the real masks that apply to the tasks 94 * in the cpuset. They may be changed if the configured masks are 95 * changed or hotplug happens. 96 * 97 * effective_mask == configured_mask & parent's effective_mask, 98 * and if it ends up empty, it will inherit the parent's mask. 99 * 100 * 101 * On legacy hierarchy: 102 * 103 * The user-configured masks are always the same with effective masks. 104 */ 105 106 /* user-configured CPUs and Memory Nodes allow to tasks */ 107 cpumask_var_t cpus_allowed; 108 nodemask_t mems_allowed; 109 110 /* effective CPUs and Memory Nodes allow to tasks */ 111 cpumask_var_t effective_cpus; 112 nodemask_t effective_mems; 113 114 /* 115 * CPUs allocated to child sub-partitions (default hierarchy only) 116 * - CPUs granted by the parent = effective_cpus U subparts_cpus 117 * - effective_cpus and subparts_cpus are mutually exclusive. 118 * 119 * effective_cpus contains only onlined CPUs, but subparts_cpus 120 * may have offlined ones. 121 */ 122 cpumask_var_t subparts_cpus; 123 124 /* 125 * This is old Memory Nodes tasks took on. 126 * 127 * - top_cpuset.old_mems_allowed is initialized to mems_allowed. 128 * - A new cpuset's old_mems_allowed is initialized when some 129 * task is moved into it. 130 * - old_mems_allowed is used in cpuset_migrate_mm() when we change 131 * cpuset.mems_allowed and have tasks' nodemask updated, and 132 * then old_mems_allowed is updated to mems_allowed. 133 */ 134 nodemask_t old_mems_allowed; 135 136 struct fmeter fmeter; /* memory_pressure filter */ 137 138 /* 139 * Tasks are being attached to this cpuset. Used to prevent 140 * zeroing cpus/mems_allowed between ->can_attach() and ->attach(). 141 */ 142 int attach_in_progress; 143 144 /* partition number for rebuild_sched_domains() */ 145 int pn; 146 147 /* for custom sched domain */ 148 int relax_domain_level; 149 150 /* number of CPUs in subparts_cpus */ 151 int nr_subparts_cpus; 152 153 /* partition root state */ 154 int partition_root_state; 155 156 /* 157 * Default hierarchy only: 158 * use_parent_ecpus - set if using parent's effective_cpus 159 * child_ecpus_count - # of children with use_parent_ecpus set 160 */ 161 int use_parent_ecpus; 162 int child_ecpus_count; 163 }; 164 165 /* 166 * Partition root states: 167 * 168 * 0 - not a partition root 169 * 170 * 1 - partition root 171 * 172 * -1 - invalid partition root 173 * None of the cpus in cpus_allowed can be put into the parent's 174 * subparts_cpus. In this case, the cpuset is not a real partition 175 * root anymore. However, the CPU_EXCLUSIVE bit will still be set 176 * and the cpuset can be restored back to a partition root if the 177 * parent cpuset can give more CPUs back to this child cpuset. 178 */ 179 #define PRS_DISABLED 0 180 #define PRS_ENABLED 1 181 #define PRS_ERROR -1 182 183 /* 184 * Temporary cpumasks for working with partitions that are passed among 185 * functions to avoid memory allocation in inner functions. 186 */ 187 struct tmpmasks { 188 cpumask_var_t addmask, delmask; /* For partition root */ 189 cpumask_var_t new_cpus; /* For update_cpumasks_hier() */ 190 }; 191 192 static inline struct cpuset *css_cs(struct cgroup_subsys_state *css) 193 { 194 return css ? container_of(css, struct cpuset, css) : NULL; 195 } 196 197 /* Retrieve the cpuset for a task */ 198 static inline struct cpuset *task_cs(struct task_struct *task) 199 { 200 return css_cs(task_css(task, cpuset_cgrp_id)); 201 } 202 203 static inline struct cpuset *parent_cs(struct cpuset *cs) 204 { 205 return css_cs(cs->css.parent); 206 } 207 208 /* bits in struct cpuset flags field */ 209 typedef enum { 210 CS_ONLINE, 211 CS_CPU_EXCLUSIVE, 212 CS_MEM_EXCLUSIVE, 213 CS_MEM_HARDWALL, 214 CS_MEMORY_MIGRATE, 215 CS_SCHED_LOAD_BALANCE, 216 CS_SPREAD_PAGE, 217 CS_SPREAD_SLAB, 218 } cpuset_flagbits_t; 219 220 /* convenient tests for these bits */ 221 static inline bool is_cpuset_online(struct cpuset *cs) 222 { 223 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); 224 } 225 226 static inline int is_cpu_exclusive(const struct cpuset *cs) 227 { 228 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); 229 } 230 231 static inline int is_mem_exclusive(const struct cpuset *cs) 232 { 233 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); 234 } 235 236 static inline int is_mem_hardwall(const struct cpuset *cs) 237 { 238 return test_bit(CS_MEM_HARDWALL, &cs->flags); 239 } 240 241 static inline int is_sched_load_balance(const struct cpuset *cs) 242 { 243 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 244 } 245 246 static inline int is_memory_migrate(const struct cpuset *cs) 247 { 248 return test_bit(CS_MEMORY_MIGRATE, &cs->flags); 249 } 250 251 static inline int is_spread_page(const struct cpuset *cs) 252 { 253 return test_bit(CS_SPREAD_PAGE, &cs->flags); 254 } 255 256 static inline int is_spread_slab(const struct cpuset *cs) 257 { 258 return test_bit(CS_SPREAD_SLAB, &cs->flags); 259 } 260 261 static inline int is_partition_root(const struct cpuset *cs) 262 { 263 return cs->partition_root_state > 0; 264 } 265 266 static struct cpuset top_cpuset = { 267 .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) | 268 (1 << CS_MEM_EXCLUSIVE)), 269 .partition_root_state = PRS_ENABLED, 270 }; 271 272 /** 273 * cpuset_for_each_child - traverse online children of a cpuset 274 * @child_cs: loop cursor pointing to the current child 275 * @pos_css: used for iteration 276 * @parent_cs: target cpuset to walk children of 277 * 278 * Walk @child_cs through the online children of @parent_cs. Must be used 279 * with RCU read locked. 280 */ 281 #define cpuset_for_each_child(child_cs, pos_css, parent_cs) \ 282 css_for_each_child((pos_css), &(parent_cs)->css) \ 283 if (is_cpuset_online(((child_cs) = css_cs((pos_css))))) 284 285 /** 286 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants 287 * @des_cs: loop cursor pointing to the current descendant 288 * @pos_css: used for iteration 289 * @root_cs: target cpuset to walk ancestor of 290 * 291 * Walk @des_cs through the online descendants of @root_cs. Must be used 292 * with RCU read locked. The caller may modify @pos_css by calling 293 * css_rightmost_descendant() to skip subtree. @root_cs is included in the 294 * iteration and the first node to be visited. 295 */ 296 #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \ 297 css_for_each_descendant_pre((pos_css), &(root_cs)->css) \ 298 if (is_cpuset_online(((des_cs) = css_cs((pos_css))))) 299 300 /* 301 * There are two global locks guarding cpuset structures - cpuset_mutex and 302 * callback_lock. We also require taking task_lock() when dereferencing a 303 * task's cpuset pointer. See "The task_lock() exception", at the end of this 304 * comment. 305 * 306 * A task must hold both locks to modify cpusets. If a task holds 307 * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it 308 * is the only task able to also acquire callback_lock and be able to 309 * modify cpusets. It can perform various checks on the cpuset structure 310 * first, knowing nothing will change. It can also allocate memory while 311 * just holding cpuset_mutex. While it is performing these checks, various 312 * callback routines can briefly acquire callback_lock to query cpusets. 313 * Once it is ready to make the changes, it takes callback_lock, blocking 314 * everyone else. 315 * 316 * Calls to the kernel memory allocator can not be made while holding 317 * callback_lock, as that would risk double tripping on callback_lock 318 * from one of the callbacks into the cpuset code from within 319 * __alloc_pages(). 320 * 321 * If a task is only holding callback_lock, then it has read-only 322 * access to cpusets. 323 * 324 * Now, the task_struct fields mems_allowed and mempolicy may be changed 325 * by other task, we use alloc_lock in the task_struct fields to protect 326 * them. 327 * 328 * The cpuset_common_file_read() handlers only hold callback_lock across 329 * small pieces of code, such as when reading out possibly multi-word 330 * cpumasks and nodemasks. 331 * 332 * Accessing a task's cpuset should be done in accordance with the 333 * guidelines for accessing subsystem state in kernel/cgroup.c 334 */ 335 336 DEFINE_STATIC_PERCPU_RWSEM(cpuset_rwsem); 337 338 void cpuset_read_lock(void) 339 { 340 percpu_down_read(&cpuset_rwsem); 341 } 342 343 void cpuset_read_unlock(void) 344 { 345 percpu_up_read(&cpuset_rwsem); 346 } 347 348 static DEFINE_SPINLOCK(callback_lock); 349 350 static struct workqueue_struct *cpuset_migrate_mm_wq; 351 352 /* 353 * CPU / memory hotplug is handled asynchronously. 354 */ 355 static void cpuset_hotplug_workfn(struct work_struct *work); 356 static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn); 357 358 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq); 359 360 /* 361 * Cgroup v2 behavior is used on the "cpus" and "mems" control files when 362 * on default hierarchy or when the cpuset_v2_mode flag is set by mounting 363 * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option. 364 * With v2 behavior, "cpus" and "mems" are always what the users have 365 * requested and won't be changed by hotplug events. Only the effective 366 * cpus or mems will be affected. 367 */ 368 static inline bool is_in_v2_mode(void) 369 { 370 return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || 371 (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE); 372 } 373 374 /* 375 * Return in pmask the portion of a task's cpusets's cpus_allowed that 376 * are online and are capable of running the task. If none are found, 377 * walk up the cpuset hierarchy until we find one that does have some 378 * appropriate cpus. 379 * 380 * One way or another, we guarantee to return some non-empty subset 381 * of cpu_online_mask. 382 * 383 * Call with callback_lock or cpuset_mutex held. 384 */ 385 static void guarantee_online_cpus(struct task_struct *tsk, 386 struct cpumask *pmask) 387 { 388 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk); 389 struct cpuset *cs; 390 391 if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_online_mask))) 392 cpumask_copy(pmask, cpu_online_mask); 393 394 rcu_read_lock(); 395 cs = task_cs(tsk); 396 397 while (!cpumask_intersects(cs->effective_cpus, pmask)) { 398 cs = parent_cs(cs); 399 if (unlikely(!cs)) { 400 /* 401 * The top cpuset doesn't have any online cpu as a 402 * consequence of a race between cpuset_hotplug_work 403 * and cpu hotplug notifier. But we know the top 404 * cpuset's effective_cpus is on its way to be 405 * identical to cpu_online_mask. 406 */ 407 goto out_unlock; 408 } 409 } 410 cpumask_and(pmask, pmask, cs->effective_cpus); 411 412 out_unlock: 413 rcu_read_unlock(); 414 } 415 416 /* 417 * Return in *pmask the portion of a cpusets's mems_allowed that 418 * are online, with memory. If none are online with memory, walk 419 * up the cpuset hierarchy until we find one that does have some 420 * online mems. The top cpuset always has some mems online. 421 * 422 * One way or another, we guarantee to return some non-empty subset 423 * of node_states[N_MEMORY]. 424 * 425 * Call with callback_lock or cpuset_mutex held. 426 */ 427 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) 428 { 429 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) 430 cs = parent_cs(cs); 431 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); 432 } 433 434 /* 435 * update task's spread flag if cpuset's page/slab spread flag is set 436 * 437 * Call with callback_lock or cpuset_mutex held. 438 */ 439 static void cpuset_update_task_spread_flag(struct cpuset *cs, 440 struct task_struct *tsk) 441 { 442 if (is_spread_page(cs)) 443 task_set_spread_page(tsk); 444 else 445 task_clear_spread_page(tsk); 446 447 if (is_spread_slab(cs)) 448 task_set_spread_slab(tsk); 449 else 450 task_clear_spread_slab(tsk); 451 } 452 453 /* 454 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q? 455 * 456 * One cpuset is a subset of another if all its allowed CPUs and 457 * Memory Nodes are a subset of the other, and its exclusive flags 458 * are only set if the other's are set. Call holding cpuset_mutex. 459 */ 460 461 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) 462 { 463 return cpumask_subset(p->cpus_allowed, q->cpus_allowed) && 464 nodes_subset(p->mems_allowed, q->mems_allowed) && 465 is_cpu_exclusive(p) <= is_cpu_exclusive(q) && 466 is_mem_exclusive(p) <= is_mem_exclusive(q); 467 } 468 469 /** 470 * alloc_cpumasks - allocate three cpumasks for cpuset 471 * @cs: the cpuset that have cpumasks to be allocated. 472 * @tmp: the tmpmasks structure pointer 473 * Return: 0 if successful, -ENOMEM otherwise. 474 * 475 * Only one of the two input arguments should be non-NULL. 476 */ 477 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) 478 { 479 cpumask_var_t *pmask1, *pmask2, *pmask3; 480 481 if (cs) { 482 pmask1 = &cs->cpus_allowed; 483 pmask2 = &cs->effective_cpus; 484 pmask3 = &cs->subparts_cpus; 485 } else { 486 pmask1 = &tmp->new_cpus; 487 pmask2 = &tmp->addmask; 488 pmask3 = &tmp->delmask; 489 } 490 491 if (!zalloc_cpumask_var(pmask1, GFP_KERNEL)) 492 return -ENOMEM; 493 494 if (!zalloc_cpumask_var(pmask2, GFP_KERNEL)) 495 goto free_one; 496 497 if (!zalloc_cpumask_var(pmask3, GFP_KERNEL)) 498 goto free_two; 499 500 return 0; 501 502 free_two: 503 free_cpumask_var(*pmask2); 504 free_one: 505 free_cpumask_var(*pmask1); 506 return -ENOMEM; 507 } 508 509 /** 510 * free_cpumasks - free cpumasks in a tmpmasks structure 511 * @cs: the cpuset that have cpumasks to be free. 512 * @tmp: the tmpmasks structure pointer 513 */ 514 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) 515 { 516 if (cs) { 517 free_cpumask_var(cs->cpus_allowed); 518 free_cpumask_var(cs->effective_cpus); 519 free_cpumask_var(cs->subparts_cpus); 520 } 521 if (tmp) { 522 free_cpumask_var(tmp->new_cpus); 523 free_cpumask_var(tmp->addmask); 524 free_cpumask_var(tmp->delmask); 525 } 526 } 527 528 /** 529 * alloc_trial_cpuset - allocate a trial cpuset 530 * @cs: the cpuset that the trial cpuset duplicates 531 */ 532 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) 533 { 534 struct cpuset *trial; 535 536 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); 537 if (!trial) 538 return NULL; 539 540 if (alloc_cpumasks(trial, NULL)) { 541 kfree(trial); 542 return NULL; 543 } 544 545 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); 546 cpumask_copy(trial->effective_cpus, cs->effective_cpus); 547 return trial; 548 } 549 550 /** 551 * free_cpuset - free the cpuset 552 * @cs: the cpuset to be freed 553 */ 554 static inline void free_cpuset(struct cpuset *cs) 555 { 556 free_cpumasks(cs, NULL); 557 kfree(cs); 558 } 559 560 /* 561 * validate_change() - Used to validate that any proposed cpuset change 562 * follows the structural rules for cpusets. 563 * 564 * If we replaced the flag and mask values of the current cpuset 565 * (cur) with those values in the trial cpuset (trial), would 566 * our various subset and exclusive rules still be valid? Presumes 567 * cpuset_mutex held. 568 * 569 * 'cur' is the address of an actual, in-use cpuset. Operations 570 * such as list traversal that depend on the actual address of the 571 * cpuset in the list must use cur below, not trial. 572 * 573 * 'trial' is the address of bulk structure copy of cur, with 574 * perhaps one or more of the fields cpus_allowed, mems_allowed, 575 * or flags changed to new, trial values. 576 * 577 * Return 0 if valid, -errno if not. 578 */ 579 580 static int validate_change(struct cpuset *cur, struct cpuset *trial) 581 { 582 struct cgroup_subsys_state *css; 583 struct cpuset *c, *par; 584 int ret; 585 586 rcu_read_lock(); 587 588 /* Each of our child cpusets must be a subset of us */ 589 ret = -EBUSY; 590 cpuset_for_each_child(c, css, cur) 591 if (!is_cpuset_subset(c, trial)) 592 goto out; 593 594 /* Remaining checks don't apply to root cpuset */ 595 ret = 0; 596 if (cur == &top_cpuset) 597 goto out; 598 599 par = parent_cs(cur); 600 601 /* On legacy hierarchy, we must be a subset of our parent cpuset. */ 602 ret = -EACCES; 603 if (!is_in_v2_mode() && !is_cpuset_subset(trial, par)) 604 goto out; 605 606 /* 607 * If either I or some sibling (!= me) is exclusive, we can't 608 * overlap 609 */ 610 ret = -EINVAL; 611 cpuset_for_each_child(c, css, par) { 612 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && 613 c != cur && 614 cpumask_intersects(trial->cpus_allowed, c->cpus_allowed)) 615 goto out; 616 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && 617 c != cur && 618 nodes_intersects(trial->mems_allowed, c->mems_allowed)) 619 goto out; 620 } 621 622 /* 623 * Cpusets with tasks - existing or newly being attached - can't 624 * be changed to have empty cpus_allowed or mems_allowed. 625 */ 626 ret = -ENOSPC; 627 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) { 628 if (!cpumask_empty(cur->cpus_allowed) && 629 cpumask_empty(trial->cpus_allowed)) 630 goto out; 631 if (!nodes_empty(cur->mems_allowed) && 632 nodes_empty(trial->mems_allowed)) 633 goto out; 634 } 635 636 /* 637 * We can't shrink if we won't have enough room for SCHED_DEADLINE 638 * tasks. 639 */ 640 ret = -EBUSY; 641 if (is_cpu_exclusive(cur) && 642 !cpuset_cpumask_can_shrink(cur->cpus_allowed, 643 trial->cpus_allowed)) 644 goto out; 645 646 ret = 0; 647 out: 648 rcu_read_unlock(); 649 return ret; 650 } 651 652 #ifdef CONFIG_SMP 653 /* 654 * Helper routine for generate_sched_domains(). 655 * Do cpusets a, b have overlapping effective cpus_allowed masks? 656 */ 657 static int cpusets_overlap(struct cpuset *a, struct cpuset *b) 658 { 659 return cpumask_intersects(a->effective_cpus, b->effective_cpus); 660 } 661 662 static void 663 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) 664 { 665 if (dattr->relax_domain_level < c->relax_domain_level) 666 dattr->relax_domain_level = c->relax_domain_level; 667 return; 668 } 669 670 static void update_domain_attr_tree(struct sched_domain_attr *dattr, 671 struct cpuset *root_cs) 672 { 673 struct cpuset *cp; 674 struct cgroup_subsys_state *pos_css; 675 676 rcu_read_lock(); 677 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { 678 /* skip the whole subtree if @cp doesn't have any CPU */ 679 if (cpumask_empty(cp->cpus_allowed)) { 680 pos_css = css_rightmost_descendant(pos_css); 681 continue; 682 } 683 684 if (is_sched_load_balance(cp)) 685 update_domain_attr(dattr, cp); 686 } 687 rcu_read_unlock(); 688 } 689 690 /* Must be called with cpuset_mutex held. */ 691 static inline int nr_cpusets(void) 692 { 693 /* jump label reference count + the top-level cpuset */ 694 return static_key_count(&cpusets_enabled_key.key) + 1; 695 } 696 697 /* 698 * generate_sched_domains() 699 * 700 * This function builds a partial partition of the systems CPUs 701 * A 'partial partition' is a set of non-overlapping subsets whose 702 * union is a subset of that set. 703 * The output of this function needs to be passed to kernel/sched/core.c 704 * partition_sched_domains() routine, which will rebuild the scheduler's 705 * load balancing domains (sched domains) as specified by that partial 706 * partition. 707 * 708 * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst 709 * for a background explanation of this. 710 * 711 * Does not return errors, on the theory that the callers of this 712 * routine would rather not worry about failures to rebuild sched 713 * domains when operating in the severe memory shortage situations 714 * that could cause allocation failures below. 715 * 716 * Must be called with cpuset_mutex held. 717 * 718 * The three key local variables below are: 719 * cp - cpuset pointer, used (together with pos_css) to perform a 720 * top-down scan of all cpusets. For our purposes, rebuilding 721 * the schedulers sched domains, we can ignore !is_sched_load_ 722 * balance cpusets. 723 * csa - (for CpuSet Array) Array of pointers to all the cpusets 724 * that need to be load balanced, for convenient iterative 725 * access by the subsequent code that finds the best partition, 726 * i.e the set of domains (subsets) of CPUs such that the 727 * cpus_allowed of every cpuset marked is_sched_load_balance 728 * is a subset of one of these domains, while there are as 729 * many such domains as possible, each as small as possible. 730 * doms - Conversion of 'csa' to an array of cpumasks, for passing to 731 * the kernel/sched/core.c routine partition_sched_domains() in a 732 * convenient format, that can be easily compared to the prior 733 * value to determine what partition elements (sched domains) 734 * were changed (added or removed.) 735 * 736 * Finding the best partition (set of domains): 737 * The triple nested loops below over i, j, k scan over the 738 * load balanced cpusets (using the array of cpuset pointers in 739 * csa[]) looking for pairs of cpusets that have overlapping 740 * cpus_allowed, but which don't have the same 'pn' partition 741 * number and gives them in the same partition number. It keeps 742 * looping on the 'restart' label until it can no longer find 743 * any such pairs. 744 * 745 * The union of the cpus_allowed masks from the set of 746 * all cpusets having the same 'pn' value then form the one 747 * element of the partition (one sched domain) to be passed to 748 * partition_sched_domains(). 749 */ 750 static int generate_sched_domains(cpumask_var_t **domains, 751 struct sched_domain_attr **attributes) 752 { 753 struct cpuset *cp; /* top-down scan of cpusets */ 754 struct cpuset **csa; /* array of all cpuset ptrs */ 755 int csn; /* how many cpuset ptrs in csa so far */ 756 int i, j, k; /* indices for partition finding loops */ 757 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */ 758 struct sched_domain_attr *dattr; /* attributes for custom domains */ 759 int ndoms = 0; /* number of sched domains in result */ 760 int nslot; /* next empty doms[] struct cpumask slot */ 761 struct cgroup_subsys_state *pos_css; 762 bool root_load_balance = is_sched_load_balance(&top_cpuset); 763 764 doms = NULL; 765 dattr = NULL; 766 csa = NULL; 767 768 /* Special case for the 99% of systems with one, full, sched domain */ 769 if (root_load_balance && !top_cpuset.nr_subparts_cpus) { 770 ndoms = 1; 771 doms = alloc_sched_domains(ndoms); 772 if (!doms) 773 goto done; 774 775 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); 776 if (dattr) { 777 *dattr = SD_ATTR_INIT; 778 update_domain_attr_tree(dattr, &top_cpuset); 779 } 780 cpumask_and(doms[0], top_cpuset.effective_cpus, 781 housekeeping_cpumask(HK_FLAG_DOMAIN)); 782 783 goto done; 784 } 785 786 csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL); 787 if (!csa) 788 goto done; 789 csn = 0; 790 791 rcu_read_lock(); 792 if (root_load_balance) 793 csa[csn++] = &top_cpuset; 794 cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) { 795 if (cp == &top_cpuset) 796 continue; 797 /* 798 * Continue traversing beyond @cp iff @cp has some CPUs and 799 * isn't load balancing. The former is obvious. The 800 * latter: All child cpusets contain a subset of the 801 * parent's cpus, so just skip them, and then we call 802 * update_domain_attr_tree() to calc relax_domain_level of 803 * the corresponding sched domain. 804 * 805 * If root is load-balancing, we can skip @cp if it 806 * is a subset of the root's effective_cpus. 807 */ 808 if (!cpumask_empty(cp->cpus_allowed) && 809 !(is_sched_load_balance(cp) && 810 cpumask_intersects(cp->cpus_allowed, 811 housekeeping_cpumask(HK_FLAG_DOMAIN)))) 812 continue; 813 814 if (root_load_balance && 815 cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus)) 816 continue; 817 818 if (is_sched_load_balance(cp) && 819 !cpumask_empty(cp->effective_cpus)) 820 csa[csn++] = cp; 821 822 /* skip @cp's subtree if not a partition root */ 823 if (!is_partition_root(cp)) 824 pos_css = css_rightmost_descendant(pos_css); 825 } 826 rcu_read_unlock(); 827 828 for (i = 0; i < csn; i++) 829 csa[i]->pn = i; 830 ndoms = csn; 831 832 restart: 833 /* Find the best partition (set of sched domains) */ 834 for (i = 0; i < csn; i++) { 835 struct cpuset *a = csa[i]; 836 int apn = a->pn; 837 838 for (j = 0; j < csn; j++) { 839 struct cpuset *b = csa[j]; 840 int bpn = b->pn; 841 842 if (apn != bpn && cpusets_overlap(a, b)) { 843 for (k = 0; k < csn; k++) { 844 struct cpuset *c = csa[k]; 845 846 if (c->pn == bpn) 847 c->pn = apn; 848 } 849 ndoms--; /* one less element */ 850 goto restart; 851 } 852 } 853 } 854 855 /* 856 * Now we know how many domains to create. 857 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. 858 */ 859 doms = alloc_sched_domains(ndoms); 860 if (!doms) 861 goto done; 862 863 /* 864 * The rest of the code, including the scheduler, can deal with 865 * dattr==NULL case. No need to abort if alloc fails. 866 */ 867 dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr), 868 GFP_KERNEL); 869 870 for (nslot = 0, i = 0; i < csn; i++) { 871 struct cpuset *a = csa[i]; 872 struct cpumask *dp; 873 int apn = a->pn; 874 875 if (apn < 0) { 876 /* Skip completed partitions */ 877 continue; 878 } 879 880 dp = doms[nslot]; 881 882 if (nslot == ndoms) { 883 static int warnings = 10; 884 if (warnings) { 885 pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n", 886 nslot, ndoms, csn, i, apn); 887 warnings--; 888 } 889 continue; 890 } 891 892 cpumask_clear(dp); 893 if (dattr) 894 *(dattr + nslot) = SD_ATTR_INIT; 895 for (j = i; j < csn; j++) { 896 struct cpuset *b = csa[j]; 897 898 if (apn == b->pn) { 899 cpumask_or(dp, dp, b->effective_cpus); 900 cpumask_and(dp, dp, housekeeping_cpumask(HK_FLAG_DOMAIN)); 901 if (dattr) 902 update_domain_attr_tree(dattr + nslot, b); 903 904 /* Done with this partition */ 905 b->pn = -1; 906 } 907 } 908 nslot++; 909 } 910 BUG_ON(nslot != ndoms); 911 912 done: 913 kfree(csa); 914 915 /* 916 * Fallback to the default domain if kmalloc() failed. 917 * See comments in partition_sched_domains(). 918 */ 919 if (doms == NULL) 920 ndoms = 1; 921 922 *domains = doms; 923 *attributes = dattr; 924 return ndoms; 925 } 926 927 static void update_tasks_root_domain(struct cpuset *cs) 928 { 929 struct css_task_iter it; 930 struct task_struct *task; 931 932 css_task_iter_start(&cs->css, 0, &it); 933 934 while ((task = css_task_iter_next(&it))) 935 dl_add_task_root_domain(task); 936 937 css_task_iter_end(&it); 938 } 939 940 static void rebuild_root_domains(void) 941 { 942 struct cpuset *cs = NULL; 943 struct cgroup_subsys_state *pos_css; 944 945 percpu_rwsem_assert_held(&cpuset_rwsem); 946 lockdep_assert_cpus_held(); 947 lockdep_assert_held(&sched_domains_mutex); 948 949 rcu_read_lock(); 950 951 /* 952 * Clear default root domain DL accounting, it will be computed again 953 * if a task belongs to it. 954 */ 955 dl_clear_root_domain(&def_root_domain); 956 957 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { 958 959 if (cpumask_empty(cs->effective_cpus)) { 960 pos_css = css_rightmost_descendant(pos_css); 961 continue; 962 } 963 964 css_get(&cs->css); 965 966 rcu_read_unlock(); 967 968 update_tasks_root_domain(cs); 969 970 rcu_read_lock(); 971 css_put(&cs->css); 972 } 973 rcu_read_unlock(); 974 } 975 976 static void 977 partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 978 struct sched_domain_attr *dattr_new) 979 { 980 mutex_lock(&sched_domains_mutex); 981 partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); 982 rebuild_root_domains(); 983 mutex_unlock(&sched_domains_mutex); 984 } 985 986 /* 987 * Rebuild scheduler domains. 988 * 989 * If the flag 'sched_load_balance' of any cpuset with non-empty 990 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset 991 * which has that flag enabled, or if any cpuset with a non-empty 992 * 'cpus' is removed, then call this routine to rebuild the 993 * scheduler's dynamic sched domains. 994 * 995 * Call with cpuset_mutex held. Takes get_online_cpus(). 996 */ 997 static void rebuild_sched_domains_locked(void) 998 { 999 struct cgroup_subsys_state *pos_css; 1000 struct sched_domain_attr *attr; 1001 cpumask_var_t *doms; 1002 struct cpuset *cs; 1003 int ndoms; 1004 1005 lockdep_assert_cpus_held(); 1006 percpu_rwsem_assert_held(&cpuset_rwsem); 1007 1008 /* 1009 * If we have raced with CPU hotplug, return early to avoid 1010 * passing doms with offlined cpu to partition_sched_domains(). 1011 * Anyways, cpuset_hotplug_workfn() will rebuild sched domains. 1012 * 1013 * With no CPUs in any subpartitions, top_cpuset's effective CPUs 1014 * should be the same as the active CPUs, so checking only top_cpuset 1015 * is enough to detect racing CPU offlines. 1016 */ 1017 if (!top_cpuset.nr_subparts_cpus && 1018 !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask)) 1019 return; 1020 1021 /* 1022 * With subpartition CPUs, however, the effective CPUs of a partition 1023 * root should be only a subset of the active CPUs. Since a CPU in any 1024 * partition root could be offlined, all must be checked. 1025 */ 1026 if (top_cpuset.nr_subparts_cpus) { 1027 rcu_read_lock(); 1028 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { 1029 if (!is_partition_root(cs)) { 1030 pos_css = css_rightmost_descendant(pos_css); 1031 continue; 1032 } 1033 if (!cpumask_subset(cs->effective_cpus, 1034 cpu_active_mask)) { 1035 rcu_read_unlock(); 1036 return; 1037 } 1038 } 1039 rcu_read_unlock(); 1040 } 1041 1042 /* Generate domain masks and attrs */ 1043 ndoms = generate_sched_domains(&doms, &attr); 1044 1045 /* Have scheduler rebuild the domains */ 1046 partition_and_rebuild_sched_domains(ndoms, doms, attr); 1047 } 1048 #else /* !CONFIG_SMP */ 1049 static void rebuild_sched_domains_locked(void) 1050 { 1051 } 1052 #endif /* CONFIG_SMP */ 1053 1054 void rebuild_sched_domains(void) 1055 { 1056 get_online_cpus(); 1057 percpu_down_write(&cpuset_rwsem); 1058 rebuild_sched_domains_locked(); 1059 percpu_up_write(&cpuset_rwsem); 1060 put_online_cpus(); 1061 } 1062 1063 /** 1064 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. 1065 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed 1066 * 1067 * Iterate through each task of @cs updating its cpus_allowed to the 1068 * effective cpuset's. As this function is called with cpuset_mutex held, 1069 * cpuset membership stays stable. 1070 */ 1071 static void update_tasks_cpumask(struct cpuset *cs) 1072 { 1073 struct css_task_iter it; 1074 struct task_struct *task; 1075 1076 css_task_iter_start(&cs->css, 0, &it); 1077 while ((task = css_task_iter_next(&it))) 1078 set_cpus_allowed_ptr(task, cs->effective_cpus); 1079 css_task_iter_end(&it); 1080 } 1081 1082 /** 1083 * compute_effective_cpumask - Compute the effective cpumask of the cpuset 1084 * @new_cpus: the temp variable for the new effective_cpus mask 1085 * @cs: the cpuset the need to recompute the new effective_cpus mask 1086 * @parent: the parent cpuset 1087 * 1088 * If the parent has subpartition CPUs, include them in the list of 1089 * allowable CPUs in computing the new effective_cpus mask. Since offlined 1090 * CPUs are not removed from subparts_cpus, we have to use cpu_active_mask 1091 * to mask those out. 1092 */ 1093 static void compute_effective_cpumask(struct cpumask *new_cpus, 1094 struct cpuset *cs, struct cpuset *parent) 1095 { 1096 if (parent->nr_subparts_cpus) { 1097 cpumask_or(new_cpus, parent->effective_cpus, 1098 parent->subparts_cpus); 1099 cpumask_and(new_cpus, new_cpus, cs->cpus_allowed); 1100 cpumask_and(new_cpus, new_cpus, cpu_active_mask); 1101 } else { 1102 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); 1103 } 1104 } 1105 1106 /* 1107 * Commands for update_parent_subparts_cpumask 1108 */ 1109 enum subparts_cmd { 1110 partcmd_enable, /* Enable partition root */ 1111 partcmd_disable, /* Disable partition root */ 1112 partcmd_update, /* Update parent's subparts_cpus */ 1113 }; 1114 1115 /** 1116 * update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset 1117 * @cpuset: The cpuset that requests change in partition root state 1118 * @cmd: Partition root state change command 1119 * @newmask: Optional new cpumask for partcmd_update 1120 * @tmp: Temporary addmask and delmask 1121 * Return: 0, 1 or an error code 1122 * 1123 * For partcmd_enable, the cpuset is being transformed from a non-partition 1124 * root to a partition root. The cpus_allowed mask of the given cpuset will 1125 * be put into parent's subparts_cpus and taken away from parent's 1126 * effective_cpus. The function will return 0 if all the CPUs listed in 1127 * cpus_allowed can be granted or an error code will be returned. 1128 * 1129 * For partcmd_disable, the cpuset is being transofrmed from a partition 1130 * root back to a non-partition root. any CPUs in cpus_allowed that are in 1131 * parent's subparts_cpus will be taken away from that cpumask and put back 1132 * into parent's effective_cpus. 0 should always be returned. 1133 * 1134 * For partcmd_update, if the optional newmask is specified, the cpu 1135 * list is to be changed from cpus_allowed to newmask. Otherwise, 1136 * cpus_allowed is assumed to remain the same. The cpuset should either 1137 * be a partition root or an invalid partition root. The partition root 1138 * state may change if newmask is NULL and none of the requested CPUs can 1139 * be granted by the parent. The function will return 1 if changes to 1140 * parent's subparts_cpus and effective_cpus happen or 0 otherwise. 1141 * Error code should only be returned when newmask is non-NULL. 1142 * 1143 * The partcmd_enable and partcmd_disable commands are used by 1144 * update_prstate(). The partcmd_update command is used by 1145 * update_cpumasks_hier() with newmask NULL and update_cpumask() with 1146 * newmask set. 1147 * 1148 * The checking is more strict when enabling partition root than the 1149 * other two commands. 1150 * 1151 * Because of the implicit cpu exclusive nature of a partition root, 1152 * cpumask changes that violates the cpu exclusivity rule will not be 1153 * permitted when checked by validate_change(). The validate_change() 1154 * function will also prevent any changes to the cpu list if it is not 1155 * a superset of children's cpu lists. 1156 */ 1157 static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd, 1158 struct cpumask *newmask, 1159 struct tmpmasks *tmp) 1160 { 1161 struct cpuset *parent = parent_cs(cpuset); 1162 int adding; /* Moving cpus from effective_cpus to subparts_cpus */ 1163 int deleting; /* Moving cpus from subparts_cpus to effective_cpus */ 1164 bool part_error = false; /* Partition error? */ 1165 1166 percpu_rwsem_assert_held(&cpuset_rwsem); 1167 1168 /* 1169 * The parent must be a partition root. 1170 * The new cpumask, if present, or the current cpus_allowed must 1171 * not be empty. 1172 */ 1173 if (!is_partition_root(parent) || 1174 (newmask && cpumask_empty(newmask)) || 1175 (!newmask && cpumask_empty(cpuset->cpus_allowed))) 1176 return -EINVAL; 1177 1178 /* 1179 * Enabling/disabling partition root is not allowed if there are 1180 * online children. 1181 */ 1182 if ((cmd != partcmd_update) && css_has_online_children(&cpuset->css)) 1183 return -EBUSY; 1184 1185 /* 1186 * Enabling partition root is not allowed if not all the CPUs 1187 * can be granted from parent's effective_cpus or at least one 1188 * CPU will be left after that. 1189 */ 1190 if ((cmd == partcmd_enable) && 1191 (!cpumask_subset(cpuset->cpus_allowed, parent->effective_cpus) || 1192 cpumask_equal(cpuset->cpus_allowed, parent->effective_cpus))) 1193 return -EINVAL; 1194 1195 /* 1196 * A cpumask update cannot make parent's effective_cpus become empty. 1197 */ 1198 adding = deleting = false; 1199 if (cmd == partcmd_enable) { 1200 cpumask_copy(tmp->addmask, cpuset->cpus_allowed); 1201 adding = true; 1202 } else if (cmd == partcmd_disable) { 1203 deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, 1204 parent->subparts_cpus); 1205 } else if (newmask) { 1206 /* 1207 * partcmd_update with newmask: 1208 * 1209 * delmask = cpus_allowed & ~newmask & parent->subparts_cpus 1210 * addmask = newmask & parent->effective_cpus 1211 * & ~parent->subparts_cpus 1212 */ 1213 cpumask_andnot(tmp->delmask, cpuset->cpus_allowed, newmask); 1214 deleting = cpumask_and(tmp->delmask, tmp->delmask, 1215 parent->subparts_cpus); 1216 1217 cpumask_and(tmp->addmask, newmask, parent->effective_cpus); 1218 adding = cpumask_andnot(tmp->addmask, tmp->addmask, 1219 parent->subparts_cpus); 1220 /* 1221 * Return error if the new effective_cpus could become empty. 1222 */ 1223 if (adding && 1224 cpumask_equal(parent->effective_cpus, tmp->addmask)) { 1225 if (!deleting) 1226 return -EINVAL; 1227 /* 1228 * As some of the CPUs in subparts_cpus might have 1229 * been offlined, we need to compute the real delmask 1230 * to confirm that. 1231 */ 1232 if (!cpumask_and(tmp->addmask, tmp->delmask, 1233 cpu_active_mask)) 1234 return -EINVAL; 1235 cpumask_copy(tmp->addmask, parent->effective_cpus); 1236 } 1237 } else { 1238 /* 1239 * partcmd_update w/o newmask: 1240 * 1241 * addmask = cpus_allowed & parent->effectiveb_cpus 1242 * 1243 * Note that parent's subparts_cpus may have been 1244 * pre-shrunk in case there is a change in the cpu list. 1245 * So no deletion is needed. 1246 */ 1247 adding = cpumask_and(tmp->addmask, cpuset->cpus_allowed, 1248 parent->effective_cpus); 1249 part_error = cpumask_equal(tmp->addmask, 1250 parent->effective_cpus); 1251 } 1252 1253 if (cmd == partcmd_update) { 1254 int prev_prs = cpuset->partition_root_state; 1255 1256 /* 1257 * Check for possible transition between PRS_ENABLED 1258 * and PRS_ERROR. 1259 */ 1260 switch (cpuset->partition_root_state) { 1261 case PRS_ENABLED: 1262 if (part_error) 1263 cpuset->partition_root_state = PRS_ERROR; 1264 break; 1265 case PRS_ERROR: 1266 if (!part_error) 1267 cpuset->partition_root_state = PRS_ENABLED; 1268 break; 1269 } 1270 /* 1271 * Set part_error if previously in invalid state. 1272 */ 1273 part_error = (prev_prs == PRS_ERROR); 1274 } 1275 1276 if (!part_error && (cpuset->partition_root_state == PRS_ERROR)) 1277 return 0; /* Nothing need to be done */ 1278 1279 if (cpuset->partition_root_state == PRS_ERROR) { 1280 /* 1281 * Remove all its cpus from parent's subparts_cpus. 1282 */ 1283 adding = false; 1284 deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, 1285 parent->subparts_cpus); 1286 } 1287 1288 if (!adding && !deleting) 1289 return 0; 1290 1291 /* 1292 * Change the parent's subparts_cpus. 1293 * Newly added CPUs will be removed from effective_cpus and 1294 * newly deleted ones will be added back to effective_cpus. 1295 */ 1296 spin_lock_irq(&callback_lock); 1297 if (adding) { 1298 cpumask_or(parent->subparts_cpus, 1299 parent->subparts_cpus, tmp->addmask); 1300 cpumask_andnot(parent->effective_cpus, 1301 parent->effective_cpus, tmp->addmask); 1302 } 1303 if (deleting) { 1304 cpumask_andnot(parent->subparts_cpus, 1305 parent->subparts_cpus, tmp->delmask); 1306 /* 1307 * Some of the CPUs in subparts_cpus might have been offlined. 1308 */ 1309 cpumask_and(tmp->delmask, tmp->delmask, cpu_active_mask); 1310 cpumask_or(parent->effective_cpus, 1311 parent->effective_cpus, tmp->delmask); 1312 } 1313 1314 parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus); 1315 spin_unlock_irq(&callback_lock); 1316 1317 return cmd == partcmd_update; 1318 } 1319 1320 /* 1321 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree 1322 * @cs: the cpuset to consider 1323 * @tmp: temp variables for calculating effective_cpus & partition setup 1324 * 1325 * When configured cpumask is changed, the effective cpumasks of this cpuset 1326 * and all its descendants need to be updated. 1327 * 1328 * On legacy hierarchy, effective_cpus will be the same with cpu_allowed. 1329 * 1330 * Called with cpuset_mutex held 1331 */ 1332 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp) 1333 { 1334 struct cpuset *cp; 1335 struct cgroup_subsys_state *pos_css; 1336 bool need_rebuild_sched_domains = false; 1337 1338 rcu_read_lock(); 1339 cpuset_for_each_descendant_pre(cp, pos_css, cs) { 1340 struct cpuset *parent = parent_cs(cp); 1341 1342 compute_effective_cpumask(tmp->new_cpus, cp, parent); 1343 1344 /* 1345 * If it becomes empty, inherit the effective mask of the 1346 * parent, which is guaranteed to have some CPUs. 1347 */ 1348 if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) { 1349 cpumask_copy(tmp->new_cpus, parent->effective_cpus); 1350 if (!cp->use_parent_ecpus) { 1351 cp->use_parent_ecpus = true; 1352 parent->child_ecpus_count++; 1353 } 1354 } else if (cp->use_parent_ecpus) { 1355 cp->use_parent_ecpus = false; 1356 WARN_ON_ONCE(!parent->child_ecpus_count); 1357 parent->child_ecpus_count--; 1358 } 1359 1360 /* 1361 * Skip the whole subtree if the cpumask remains the same 1362 * and has no partition root state. 1363 */ 1364 if (!cp->partition_root_state && 1365 cpumask_equal(tmp->new_cpus, cp->effective_cpus)) { 1366 pos_css = css_rightmost_descendant(pos_css); 1367 continue; 1368 } 1369 1370 /* 1371 * update_parent_subparts_cpumask() should have been called 1372 * for cs already in update_cpumask(). We should also call 1373 * update_tasks_cpumask() again for tasks in the parent 1374 * cpuset if the parent's subparts_cpus changes. 1375 */ 1376 if ((cp != cs) && cp->partition_root_state) { 1377 switch (parent->partition_root_state) { 1378 case PRS_DISABLED: 1379 /* 1380 * If parent is not a partition root or an 1381 * invalid partition root, clear the state 1382 * state and the CS_CPU_EXCLUSIVE flag. 1383 */ 1384 WARN_ON_ONCE(cp->partition_root_state 1385 != PRS_ERROR); 1386 cp->partition_root_state = 0; 1387 1388 /* 1389 * clear_bit() is an atomic operation and 1390 * readers aren't interested in the state 1391 * of CS_CPU_EXCLUSIVE anyway. So we can 1392 * just update the flag without holding 1393 * the callback_lock. 1394 */ 1395 clear_bit(CS_CPU_EXCLUSIVE, &cp->flags); 1396 break; 1397 1398 case PRS_ENABLED: 1399 if (update_parent_subparts_cpumask(cp, partcmd_update, NULL, tmp)) 1400 update_tasks_cpumask(parent); 1401 break; 1402 1403 case PRS_ERROR: 1404 /* 1405 * When parent is invalid, it has to be too. 1406 */ 1407 cp->partition_root_state = PRS_ERROR; 1408 if (cp->nr_subparts_cpus) { 1409 cp->nr_subparts_cpus = 0; 1410 cpumask_clear(cp->subparts_cpus); 1411 } 1412 break; 1413 } 1414 } 1415 1416 if (!css_tryget_online(&cp->css)) 1417 continue; 1418 rcu_read_unlock(); 1419 1420 spin_lock_irq(&callback_lock); 1421 1422 cpumask_copy(cp->effective_cpus, tmp->new_cpus); 1423 if (cp->nr_subparts_cpus && 1424 (cp->partition_root_state != PRS_ENABLED)) { 1425 cp->nr_subparts_cpus = 0; 1426 cpumask_clear(cp->subparts_cpus); 1427 } else if (cp->nr_subparts_cpus) { 1428 /* 1429 * Make sure that effective_cpus & subparts_cpus 1430 * are mutually exclusive. 1431 * 1432 * In the unlikely event that effective_cpus 1433 * becomes empty. we clear cp->nr_subparts_cpus and 1434 * let its child partition roots to compete for 1435 * CPUs again. 1436 */ 1437 cpumask_andnot(cp->effective_cpus, cp->effective_cpus, 1438 cp->subparts_cpus); 1439 if (cpumask_empty(cp->effective_cpus)) { 1440 cpumask_copy(cp->effective_cpus, tmp->new_cpus); 1441 cpumask_clear(cp->subparts_cpus); 1442 cp->nr_subparts_cpus = 0; 1443 } else if (!cpumask_subset(cp->subparts_cpus, 1444 tmp->new_cpus)) { 1445 cpumask_andnot(cp->subparts_cpus, 1446 cp->subparts_cpus, tmp->new_cpus); 1447 cp->nr_subparts_cpus 1448 = cpumask_weight(cp->subparts_cpus); 1449 } 1450 } 1451 spin_unlock_irq(&callback_lock); 1452 1453 WARN_ON(!is_in_v2_mode() && 1454 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); 1455 1456 update_tasks_cpumask(cp); 1457 1458 /* 1459 * On legacy hierarchy, if the effective cpumask of any non- 1460 * empty cpuset is changed, we need to rebuild sched domains. 1461 * On default hierarchy, the cpuset needs to be a partition 1462 * root as well. 1463 */ 1464 if (!cpumask_empty(cp->cpus_allowed) && 1465 is_sched_load_balance(cp) && 1466 (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || 1467 is_partition_root(cp))) 1468 need_rebuild_sched_domains = true; 1469 1470 rcu_read_lock(); 1471 css_put(&cp->css); 1472 } 1473 rcu_read_unlock(); 1474 1475 if (need_rebuild_sched_domains) 1476 rebuild_sched_domains_locked(); 1477 } 1478 1479 /** 1480 * update_sibling_cpumasks - Update siblings cpumasks 1481 * @parent: Parent cpuset 1482 * @cs: Current cpuset 1483 * @tmp: Temp variables 1484 */ 1485 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, 1486 struct tmpmasks *tmp) 1487 { 1488 struct cpuset *sibling; 1489 struct cgroup_subsys_state *pos_css; 1490 1491 /* 1492 * Check all its siblings and call update_cpumasks_hier() 1493 * if their use_parent_ecpus flag is set in order for them 1494 * to use the right effective_cpus value. 1495 */ 1496 rcu_read_lock(); 1497 cpuset_for_each_child(sibling, pos_css, parent) { 1498 if (sibling == cs) 1499 continue; 1500 if (!sibling->use_parent_ecpus) 1501 continue; 1502 1503 update_cpumasks_hier(sibling, tmp); 1504 } 1505 rcu_read_unlock(); 1506 } 1507 1508 /** 1509 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it 1510 * @cs: the cpuset to consider 1511 * @trialcs: trial cpuset 1512 * @buf: buffer of cpu numbers written to this cpuset 1513 */ 1514 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, 1515 const char *buf) 1516 { 1517 int retval; 1518 struct tmpmasks tmp; 1519 1520 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */ 1521 if (cs == &top_cpuset) 1522 return -EACCES; 1523 1524 /* 1525 * An empty cpus_allowed is ok only if the cpuset has no tasks. 1526 * Since cpulist_parse() fails on an empty mask, we special case 1527 * that parsing. The validate_change() call ensures that cpusets 1528 * with tasks have cpus. 1529 */ 1530 if (!*buf) { 1531 cpumask_clear(trialcs->cpus_allowed); 1532 } else { 1533 retval = cpulist_parse(buf, trialcs->cpus_allowed); 1534 if (retval < 0) 1535 return retval; 1536 1537 if (!cpumask_subset(trialcs->cpus_allowed, 1538 top_cpuset.cpus_allowed)) 1539 return -EINVAL; 1540 } 1541 1542 /* Nothing to do if the cpus didn't change */ 1543 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) 1544 return 0; 1545 1546 retval = validate_change(cs, trialcs); 1547 if (retval < 0) 1548 return retval; 1549 1550 #ifdef CONFIG_CPUMASK_OFFSTACK 1551 /* 1552 * Use the cpumasks in trialcs for tmpmasks when they are pointers 1553 * to allocated cpumasks. 1554 */ 1555 tmp.addmask = trialcs->subparts_cpus; 1556 tmp.delmask = trialcs->effective_cpus; 1557 tmp.new_cpus = trialcs->cpus_allowed; 1558 #endif 1559 1560 if (cs->partition_root_state) { 1561 /* Cpumask of a partition root cannot be empty */ 1562 if (cpumask_empty(trialcs->cpus_allowed)) 1563 return -EINVAL; 1564 if (update_parent_subparts_cpumask(cs, partcmd_update, 1565 trialcs->cpus_allowed, &tmp) < 0) 1566 return -EINVAL; 1567 } 1568 1569 spin_lock_irq(&callback_lock); 1570 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); 1571 1572 /* 1573 * Make sure that subparts_cpus is a subset of cpus_allowed. 1574 */ 1575 if (cs->nr_subparts_cpus) { 1576 cpumask_andnot(cs->subparts_cpus, cs->subparts_cpus, 1577 cs->cpus_allowed); 1578 cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); 1579 } 1580 spin_unlock_irq(&callback_lock); 1581 1582 update_cpumasks_hier(cs, &tmp); 1583 1584 if (cs->partition_root_state) { 1585 struct cpuset *parent = parent_cs(cs); 1586 1587 /* 1588 * For partition root, update the cpumasks of sibling 1589 * cpusets if they use parent's effective_cpus. 1590 */ 1591 if (parent->child_ecpus_count) 1592 update_sibling_cpumasks(parent, cs, &tmp); 1593 } 1594 return 0; 1595 } 1596 1597 /* 1598 * Migrate memory region from one set of nodes to another. This is 1599 * performed asynchronously as it can be called from process migration path 1600 * holding locks involved in process management. All mm migrations are 1601 * performed in the queued order and can be waited for by flushing 1602 * cpuset_migrate_mm_wq. 1603 */ 1604 1605 struct cpuset_migrate_mm_work { 1606 struct work_struct work; 1607 struct mm_struct *mm; 1608 nodemask_t from; 1609 nodemask_t to; 1610 }; 1611 1612 static void cpuset_migrate_mm_workfn(struct work_struct *work) 1613 { 1614 struct cpuset_migrate_mm_work *mwork = 1615 container_of(work, struct cpuset_migrate_mm_work, work); 1616 1617 /* on a wq worker, no need to worry about %current's mems_allowed */ 1618 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); 1619 mmput(mwork->mm); 1620 kfree(mwork); 1621 } 1622 1623 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, 1624 const nodemask_t *to) 1625 { 1626 struct cpuset_migrate_mm_work *mwork; 1627 1628 mwork = kzalloc(sizeof(*mwork), GFP_KERNEL); 1629 if (mwork) { 1630 mwork->mm = mm; 1631 mwork->from = *from; 1632 mwork->to = *to; 1633 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn); 1634 queue_work(cpuset_migrate_mm_wq, &mwork->work); 1635 } else { 1636 mmput(mm); 1637 } 1638 } 1639 1640 static void cpuset_post_attach(void) 1641 { 1642 flush_workqueue(cpuset_migrate_mm_wq); 1643 } 1644 1645 /* 1646 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy 1647 * @tsk: the task to change 1648 * @newmems: new nodes that the task will be set 1649 * 1650 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed 1651 * and rebind an eventual tasks' mempolicy. If the task is allocating in 1652 * parallel, it might temporarily see an empty intersection, which results in 1653 * a seqlock check and retry before OOM or allocation failure. 1654 */ 1655 static void cpuset_change_task_nodemask(struct task_struct *tsk, 1656 nodemask_t *newmems) 1657 { 1658 task_lock(tsk); 1659 1660 local_irq_disable(); 1661 write_seqcount_begin(&tsk->mems_allowed_seq); 1662 1663 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); 1664 mpol_rebind_task(tsk, newmems); 1665 tsk->mems_allowed = *newmems; 1666 1667 write_seqcount_end(&tsk->mems_allowed_seq); 1668 local_irq_enable(); 1669 1670 task_unlock(tsk); 1671 } 1672 1673 static void *cpuset_being_rebound; 1674 1675 /** 1676 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. 1677 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed 1678 * 1679 * Iterate through each task of @cs updating its mems_allowed to the 1680 * effective cpuset's. As this function is called with cpuset_mutex held, 1681 * cpuset membership stays stable. 1682 */ 1683 static void update_tasks_nodemask(struct cpuset *cs) 1684 { 1685 static nodemask_t newmems; /* protected by cpuset_mutex */ 1686 struct css_task_iter it; 1687 struct task_struct *task; 1688 1689 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ 1690 1691 guarantee_online_mems(cs, &newmems); 1692 1693 /* 1694 * The mpol_rebind_mm() call takes mmap_lock, which we couldn't 1695 * take while holding tasklist_lock. Forks can happen - the 1696 * mpol_dup() cpuset_being_rebound check will catch such forks, 1697 * and rebind their vma mempolicies too. Because we still hold 1698 * the global cpuset_mutex, we know that no other rebind effort 1699 * will be contending for the global variable cpuset_being_rebound. 1700 * It's ok if we rebind the same mm twice; mpol_rebind_mm() 1701 * is idempotent. Also migrate pages in each mm to new nodes. 1702 */ 1703 css_task_iter_start(&cs->css, 0, &it); 1704 while ((task = css_task_iter_next(&it))) { 1705 struct mm_struct *mm; 1706 bool migrate; 1707 1708 cpuset_change_task_nodemask(task, &newmems); 1709 1710 mm = get_task_mm(task); 1711 if (!mm) 1712 continue; 1713 1714 migrate = is_memory_migrate(cs); 1715 1716 mpol_rebind_mm(mm, &cs->mems_allowed); 1717 if (migrate) 1718 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); 1719 else 1720 mmput(mm); 1721 } 1722 css_task_iter_end(&it); 1723 1724 /* 1725 * All the tasks' nodemasks have been updated, update 1726 * cs->old_mems_allowed. 1727 */ 1728 cs->old_mems_allowed = newmems; 1729 1730 /* We're done rebinding vmas to this cpuset's new mems_allowed. */ 1731 cpuset_being_rebound = NULL; 1732 } 1733 1734 /* 1735 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree 1736 * @cs: the cpuset to consider 1737 * @new_mems: a temp variable for calculating new effective_mems 1738 * 1739 * When configured nodemask is changed, the effective nodemasks of this cpuset 1740 * and all its descendants need to be updated. 1741 * 1742 * On legacy hierarchy, effective_mems will be the same with mems_allowed. 1743 * 1744 * Called with cpuset_mutex held 1745 */ 1746 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) 1747 { 1748 struct cpuset *cp; 1749 struct cgroup_subsys_state *pos_css; 1750 1751 rcu_read_lock(); 1752 cpuset_for_each_descendant_pre(cp, pos_css, cs) { 1753 struct cpuset *parent = parent_cs(cp); 1754 1755 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); 1756 1757 /* 1758 * If it becomes empty, inherit the effective mask of the 1759 * parent, which is guaranteed to have some MEMs. 1760 */ 1761 if (is_in_v2_mode() && nodes_empty(*new_mems)) 1762 *new_mems = parent->effective_mems; 1763 1764 /* Skip the whole subtree if the nodemask remains the same. */ 1765 if (nodes_equal(*new_mems, cp->effective_mems)) { 1766 pos_css = css_rightmost_descendant(pos_css); 1767 continue; 1768 } 1769 1770 if (!css_tryget_online(&cp->css)) 1771 continue; 1772 rcu_read_unlock(); 1773 1774 spin_lock_irq(&callback_lock); 1775 cp->effective_mems = *new_mems; 1776 spin_unlock_irq(&callback_lock); 1777 1778 WARN_ON(!is_in_v2_mode() && 1779 !nodes_equal(cp->mems_allowed, cp->effective_mems)); 1780 1781 update_tasks_nodemask(cp); 1782 1783 rcu_read_lock(); 1784 css_put(&cp->css); 1785 } 1786 rcu_read_unlock(); 1787 } 1788 1789 /* 1790 * Handle user request to change the 'mems' memory placement 1791 * of a cpuset. Needs to validate the request, update the 1792 * cpusets mems_allowed, and for each task in the cpuset, 1793 * update mems_allowed and rebind task's mempolicy and any vma 1794 * mempolicies and if the cpuset is marked 'memory_migrate', 1795 * migrate the tasks pages to the new memory. 1796 * 1797 * Call with cpuset_mutex held. May take callback_lock during call. 1798 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, 1799 * lock each such tasks mm->mmap_lock, scan its vma's and rebind 1800 * their mempolicies to the cpusets new mems_allowed. 1801 */ 1802 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, 1803 const char *buf) 1804 { 1805 int retval; 1806 1807 /* 1808 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY]; 1809 * it's read-only 1810 */ 1811 if (cs == &top_cpuset) { 1812 retval = -EACCES; 1813 goto done; 1814 } 1815 1816 /* 1817 * An empty mems_allowed is ok iff there are no tasks in the cpuset. 1818 * Since nodelist_parse() fails on an empty mask, we special case 1819 * that parsing. The validate_change() call ensures that cpusets 1820 * with tasks have memory. 1821 */ 1822 if (!*buf) { 1823 nodes_clear(trialcs->mems_allowed); 1824 } else { 1825 retval = nodelist_parse(buf, trialcs->mems_allowed); 1826 if (retval < 0) 1827 goto done; 1828 1829 if (!nodes_subset(trialcs->mems_allowed, 1830 top_cpuset.mems_allowed)) { 1831 retval = -EINVAL; 1832 goto done; 1833 } 1834 } 1835 1836 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { 1837 retval = 0; /* Too easy - nothing to do */ 1838 goto done; 1839 } 1840 retval = validate_change(cs, trialcs); 1841 if (retval < 0) 1842 goto done; 1843 1844 spin_lock_irq(&callback_lock); 1845 cs->mems_allowed = trialcs->mems_allowed; 1846 spin_unlock_irq(&callback_lock); 1847 1848 /* use trialcs->mems_allowed as a temp variable */ 1849 update_nodemasks_hier(cs, &trialcs->mems_allowed); 1850 done: 1851 return retval; 1852 } 1853 1854 bool current_cpuset_is_being_rebound(void) 1855 { 1856 bool ret; 1857 1858 rcu_read_lock(); 1859 ret = task_cs(current) == cpuset_being_rebound; 1860 rcu_read_unlock(); 1861 1862 return ret; 1863 } 1864 1865 static int update_relax_domain_level(struct cpuset *cs, s64 val) 1866 { 1867 #ifdef CONFIG_SMP 1868 if (val < -1 || val >= sched_domain_level_max) 1869 return -EINVAL; 1870 #endif 1871 1872 if (val != cs->relax_domain_level) { 1873 cs->relax_domain_level = val; 1874 if (!cpumask_empty(cs->cpus_allowed) && 1875 is_sched_load_balance(cs)) 1876 rebuild_sched_domains_locked(); 1877 } 1878 1879 return 0; 1880 } 1881 1882 /** 1883 * update_tasks_flags - update the spread flags of tasks in the cpuset. 1884 * @cs: the cpuset in which each task's spread flags needs to be changed 1885 * 1886 * Iterate through each task of @cs updating its spread flags. As this 1887 * function is called with cpuset_mutex held, cpuset membership stays 1888 * stable. 1889 */ 1890 static void update_tasks_flags(struct cpuset *cs) 1891 { 1892 struct css_task_iter it; 1893 struct task_struct *task; 1894 1895 css_task_iter_start(&cs->css, 0, &it); 1896 while ((task = css_task_iter_next(&it))) 1897 cpuset_update_task_spread_flag(cs, task); 1898 css_task_iter_end(&it); 1899 } 1900 1901 /* 1902 * update_flag - read a 0 or a 1 in a file and update associated flag 1903 * bit: the bit to update (see cpuset_flagbits_t) 1904 * cs: the cpuset to update 1905 * turning_on: whether the flag is being set or cleared 1906 * 1907 * Call with cpuset_mutex held. 1908 */ 1909 1910 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, 1911 int turning_on) 1912 { 1913 struct cpuset *trialcs; 1914 int balance_flag_changed; 1915 int spread_flag_changed; 1916 int err; 1917 1918 trialcs = alloc_trial_cpuset(cs); 1919 if (!trialcs) 1920 return -ENOMEM; 1921 1922 if (turning_on) 1923 set_bit(bit, &trialcs->flags); 1924 else 1925 clear_bit(bit, &trialcs->flags); 1926 1927 err = validate_change(cs, trialcs); 1928 if (err < 0) 1929 goto out; 1930 1931 balance_flag_changed = (is_sched_load_balance(cs) != 1932 is_sched_load_balance(trialcs)); 1933 1934 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) 1935 || (is_spread_page(cs) != is_spread_page(trialcs))); 1936 1937 spin_lock_irq(&callback_lock); 1938 cs->flags = trialcs->flags; 1939 spin_unlock_irq(&callback_lock); 1940 1941 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) 1942 rebuild_sched_domains_locked(); 1943 1944 if (spread_flag_changed) 1945 update_tasks_flags(cs); 1946 out: 1947 free_cpuset(trialcs); 1948 return err; 1949 } 1950 1951 /* 1952 * update_prstate - update partititon_root_state 1953 * cs: the cpuset to update 1954 * val: 0 - disabled, 1 - enabled 1955 * 1956 * Call with cpuset_mutex held. 1957 */ 1958 static int update_prstate(struct cpuset *cs, int val) 1959 { 1960 int err; 1961 struct cpuset *parent = parent_cs(cs); 1962 struct tmpmasks tmp; 1963 1964 if ((val != 0) && (val != 1)) 1965 return -EINVAL; 1966 if (val == cs->partition_root_state) 1967 return 0; 1968 1969 /* 1970 * Cannot force a partial or invalid partition root to a full 1971 * partition root. 1972 */ 1973 if (val && cs->partition_root_state) 1974 return -EINVAL; 1975 1976 if (alloc_cpumasks(NULL, &tmp)) 1977 return -ENOMEM; 1978 1979 err = -EINVAL; 1980 if (!cs->partition_root_state) { 1981 /* 1982 * Turning on partition root requires setting the 1983 * CS_CPU_EXCLUSIVE bit implicitly as well and cpus_allowed 1984 * cannot be NULL. 1985 */ 1986 if (cpumask_empty(cs->cpus_allowed)) 1987 goto out; 1988 1989 err = update_flag(CS_CPU_EXCLUSIVE, cs, 1); 1990 if (err) 1991 goto out; 1992 1993 err = update_parent_subparts_cpumask(cs, partcmd_enable, 1994 NULL, &tmp); 1995 if (err) { 1996 update_flag(CS_CPU_EXCLUSIVE, cs, 0); 1997 goto out; 1998 } 1999 cs->partition_root_state = PRS_ENABLED; 2000 } else { 2001 /* 2002 * Turning off partition root will clear the 2003 * CS_CPU_EXCLUSIVE bit. 2004 */ 2005 if (cs->partition_root_state == PRS_ERROR) { 2006 cs->partition_root_state = 0; 2007 update_flag(CS_CPU_EXCLUSIVE, cs, 0); 2008 err = 0; 2009 goto out; 2010 } 2011 2012 err = update_parent_subparts_cpumask(cs, partcmd_disable, 2013 NULL, &tmp); 2014 if (err) 2015 goto out; 2016 2017 cs->partition_root_state = 0; 2018 2019 /* Turning off CS_CPU_EXCLUSIVE will not return error */ 2020 update_flag(CS_CPU_EXCLUSIVE, cs, 0); 2021 } 2022 2023 /* 2024 * Update cpumask of parent's tasks except when it is the top 2025 * cpuset as some system daemons cannot be mapped to other CPUs. 2026 */ 2027 if (parent != &top_cpuset) 2028 update_tasks_cpumask(parent); 2029 2030 if (parent->child_ecpus_count) 2031 update_sibling_cpumasks(parent, cs, &tmp); 2032 2033 rebuild_sched_domains_locked(); 2034 out: 2035 free_cpumasks(NULL, &tmp); 2036 return err; 2037 } 2038 2039 /* 2040 * Frequency meter - How fast is some event occurring? 2041 * 2042 * These routines manage a digitally filtered, constant time based, 2043 * event frequency meter. There are four routines: 2044 * fmeter_init() - initialize a frequency meter. 2045 * fmeter_markevent() - called each time the event happens. 2046 * fmeter_getrate() - returns the recent rate of such events. 2047 * fmeter_update() - internal routine used to update fmeter. 2048 * 2049 * A common data structure is passed to each of these routines, 2050 * which is used to keep track of the state required to manage the 2051 * frequency meter and its digital filter. 2052 * 2053 * The filter works on the number of events marked per unit time. 2054 * The filter is single-pole low-pass recursive (IIR). The time unit 2055 * is 1 second. Arithmetic is done using 32-bit integers scaled to 2056 * simulate 3 decimal digits of precision (multiplied by 1000). 2057 * 2058 * With an FM_COEF of 933, and a time base of 1 second, the filter 2059 * has a half-life of 10 seconds, meaning that if the events quit 2060 * happening, then the rate returned from the fmeter_getrate() 2061 * will be cut in half each 10 seconds, until it converges to zero. 2062 * 2063 * It is not worth doing a real infinitely recursive filter. If more 2064 * than FM_MAXTICKS ticks have elapsed since the last filter event, 2065 * just compute FM_MAXTICKS ticks worth, by which point the level 2066 * will be stable. 2067 * 2068 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid 2069 * arithmetic overflow in the fmeter_update() routine. 2070 * 2071 * Given the simple 32 bit integer arithmetic used, this meter works 2072 * best for reporting rates between one per millisecond (msec) and 2073 * one per 32 (approx) seconds. At constant rates faster than one 2074 * per msec it maxes out at values just under 1,000,000. At constant 2075 * rates between one per msec, and one per second it will stabilize 2076 * to a value N*1000, where N is the rate of events per second. 2077 * At constant rates between one per second and one per 32 seconds, 2078 * it will be choppy, moving up on the seconds that have an event, 2079 * and then decaying until the next event. At rates slower than 2080 * about one in 32 seconds, it decays all the way back to zero between 2081 * each event. 2082 */ 2083 2084 #define FM_COEF 933 /* coefficient for half-life of 10 secs */ 2085 #define FM_MAXTICKS ((u32)99) /* useless computing more ticks than this */ 2086 #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */ 2087 #define FM_SCALE 1000 /* faux fixed point scale */ 2088 2089 /* Initialize a frequency meter */ 2090 static void fmeter_init(struct fmeter *fmp) 2091 { 2092 fmp->cnt = 0; 2093 fmp->val = 0; 2094 fmp->time = 0; 2095 spin_lock_init(&fmp->lock); 2096 } 2097 2098 /* Internal meter update - process cnt events and update value */ 2099 static void fmeter_update(struct fmeter *fmp) 2100 { 2101 time64_t now; 2102 u32 ticks; 2103 2104 now = ktime_get_seconds(); 2105 ticks = now - fmp->time; 2106 2107 if (ticks == 0) 2108 return; 2109 2110 ticks = min(FM_MAXTICKS, ticks); 2111 while (ticks-- > 0) 2112 fmp->val = (FM_COEF * fmp->val) / FM_SCALE; 2113 fmp->time = now; 2114 2115 fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; 2116 fmp->cnt = 0; 2117 } 2118 2119 /* Process any previous ticks, then bump cnt by one (times scale). */ 2120 static void fmeter_markevent(struct fmeter *fmp) 2121 { 2122 spin_lock(&fmp->lock); 2123 fmeter_update(fmp); 2124 fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); 2125 spin_unlock(&fmp->lock); 2126 } 2127 2128 /* Process any previous ticks, then return current value. */ 2129 static int fmeter_getrate(struct fmeter *fmp) 2130 { 2131 int val; 2132 2133 spin_lock(&fmp->lock); 2134 fmeter_update(fmp); 2135 val = fmp->val; 2136 spin_unlock(&fmp->lock); 2137 return val; 2138 } 2139 2140 static struct cpuset *cpuset_attach_old_cs; 2141 2142 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ 2143 static int cpuset_can_attach(struct cgroup_taskset *tset) 2144 { 2145 struct cgroup_subsys_state *css; 2146 struct cpuset *cs; 2147 struct task_struct *task; 2148 int ret; 2149 2150 /* used later by cpuset_attach() */ 2151 cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css)); 2152 cs = css_cs(css); 2153 2154 percpu_down_write(&cpuset_rwsem); 2155 2156 /* allow moving tasks into an empty cpuset if on default hierarchy */ 2157 ret = -ENOSPC; 2158 if (!is_in_v2_mode() && 2159 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) 2160 goto out_unlock; 2161 2162 cgroup_taskset_for_each(task, css, tset) { 2163 ret = task_can_attach(task, cs->cpus_allowed); 2164 if (ret) 2165 goto out_unlock; 2166 ret = security_task_setscheduler(task); 2167 if (ret) 2168 goto out_unlock; 2169 } 2170 2171 /* 2172 * Mark attach is in progress. This makes validate_change() fail 2173 * changes which zero cpus/mems_allowed. 2174 */ 2175 cs->attach_in_progress++; 2176 ret = 0; 2177 out_unlock: 2178 percpu_up_write(&cpuset_rwsem); 2179 return ret; 2180 } 2181 2182 static void cpuset_cancel_attach(struct cgroup_taskset *tset) 2183 { 2184 struct cgroup_subsys_state *css; 2185 2186 cgroup_taskset_first(tset, &css); 2187 2188 percpu_down_write(&cpuset_rwsem); 2189 css_cs(css)->attach_in_progress--; 2190 percpu_up_write(&cpuset_rwsem); 2191 } 2192 2193 /* 2194 * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach() 2195 * but we can't allocate it dynamically there. Define it global and 2196 * allocate from cpuset_init(). 2197 */ 2198 static cpumask_var_t cpus_attach; 2199 2200 static void cpuset_attach(struct cgroup_taskset *tset) 2201 { 2202 /* static buf protected by cpuset_mutex */ 2203 static nodemask_t cpuset_attach_nodemask_to; 2204 struct task_struct *task; 2205 struct task_struct *leader; 2206 struct cgroup_subsys_state *css; 2207 struct cpuset *cs; 2208 struct cpuset *oldcs = cpuset_attach_old_cs; 2209 2210 cgroup_taskset_first(tset, &css); 2211 cs = css_cs(css); 2212 2213 percpu_down_write(&cpuset_rwsem); 2214 2215 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); 2216 2217 cgroup_taskset_for_each(task, css, tset) { 2218 if (cs != &top_cpuset) 2219 guarantee_online_cpus(task, cpus_attach); 2220 else 2221 cpumask_copy(cpus_attach, task_cpu_possible_mask(task)); 2222 /* 2223 * can_attach beforehand should guarantee that this doesn't 2224 * fail. TODO: have a better way to handle failure here 2225 */ 2226 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach)); 2227 2228 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to); 2229 cpuset_update_task_spread_flag(cs, task); 2230 } 2231 2232 /* 2233 * Change mm for all threadgroup leaders. This is expensive and may 2234 * sleep and should be moved outside migration path proper. 2235 */ 2236 cpuset_attach_nodemask_to = cs->effective_mems; 2237 cgroup_taskset_for_each_leader(leader, css, tset) { 2238 struct mm_struct *mm = get_task_mm(leader); 2239 2240 if (mm) { 2241 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to); 2242 2243 /* 2244 * old_mems_allowed is the same with mems_allowed 2245 * here, except if this task is being moved 2246 * automatically due to hotplug. In that case 2247 * @mems_allowed has been updated and is empty, so 2248 * @old_mems_allowed is the right nodesets that we 2249 * migrate mm from. 2250 */ 2251 if (is_memory_migrate(cs)) 2252 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, 2253 &cpuset_attach_nodemask_to); 2254 else 2255 mmput(mm); 2256 } 2257 } 2258 2259 cs->old_mems_allowed = cpuset_attach_nodemask_to; 2260 2261 cs->attach_in_progress--; 2262 if (!cs->attach_in_progress) 2263 wake_up(&cpuset_attach_wq); 2264 2265 percpu_up_write(&cpuset_rwsem); 2266 } 2267 2268 /* The various types of files and directories in a cpuset file system */ 2269 2270 typedef enum { 2271 FILE_MEMORY_MIGRATE, 2272 FILE_CPULIST, 2273 FILE_MEMLIST, 2274 FILE_EFFECTIVE_CPULIST, 2275 FILE_EFFECTIVE_MEMLIST, 2276 FILE_SUBPARTS_CPULIST, 2277 FILE_CPU_EXCLUSIVE, 2278 FILE_MEM_EXCLUSIVE, 2279 FILE_MEM_HARDWALL, 2280 FILE_SCHED_LOAD_BALANCE, 2281 FILE_PARTITION_ROOT, 2282 FILE_SCHED_RELAX_DOMAIN_LEVEL, 2283 FILE_MEMORY_PRESSURE_ENABLED, 2284 FILE_MEMORY_PRESSURE, 2285 FILE_SPREAD_PAGE, 2286 FILE_SPREAD_SLAB, 2287 } cpuset_filetype_t; 2288 2289 static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, 2290 u64 val) 2291 { 2292 struct cpuset *cs = css_cs(css); 2293 cpuset_filetype_t type = cft->private; 2294 int retval = 0; 2295 2296 get_online_cpus(); 2297 percpu_down_write(&cpuset_rwsem); 2298 if (!is_cpuset_online(cs)) { 2299 retval = -ENODEV; 2300 goto out_unlock; 2301 } 2302 2303 switch (type) { 2304 case FILE_CPU_EXCLUSIVE: 2305 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); 2306 break; 2307 case FILE_MEM_EXCLUSIVE: 2308 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); 2309 break; 2310 case FILE_MEM_HARDWALL: 2311 retval = update_flag(CS_MEM_HARDWALL, cs, val); 2312 break; 2313 case FILE_SCHED_LOAD_BALANCE: 2314 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); 2315 break; 2316 case FILE_MEMORY_MIGRATE: 2317 retval = update_flag(CS_MEMORY_MIGRATE, cs, val); 2318 break; 2319 case FILE_MEMORY_PRESSURE_ENABLED: 2320 cpuset_memory_pressure_enabled = !!val; 2321 break; 2322 case FILE_SPREAD_PAGE: 2323 retval = update_flag(CS_SPREAD_PAGE, cs, val); 2324 break; 2325 case FILE_SPREAD_SLAB: 2326 retval = update_flag(CS_SPREAD_SLAB, cs, val); 2327 break; 2328 default: 2329 retval = -EINVAL; 2330 break; 2331 } 2332 out_unlock: 2333 percpu_up_write(&cpuset_rwsem); 2334 put_online_cpus(); 2335 return retval; 2336 } 2337 2338 static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, 2339 s64 val) 2340 { 2341 struct cpuset *cs = css_cs(css); 2342 cpuset_filetype_t type = cft->private; 2343 int retval = -ENODEV; 2344 2345 get_online_cpus(); 2346 percpu_down_write(&cpuset_rwsem); 2347 if (!is_cpuset_online(cs)) 2348 goto out_unlock; 2349 2350 switch (type) { 2351 case FILE_SCHED_RELAX_DOMAIN_LEVEL: 2352 retval = update_relax_domain_level(cs, val); 2353 break; 2354 default: 2355 retval = -EINVAL; 2356 break; 2357 } 2358 out_unlock: 2359 percpu_up_write(&cpuset_rwsem); 2360 put_online_cpus(); 2361 return retval; 2362 } 2363 2364 /* 2365 * Common handling for a write to a "cpus" or "mems" file. 2366 */ 2367 static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, 2368 char *buf, size_t nbytes, loff_t off) 2369 { 2370 struct cpuset *cs = css_cs(of_css(of)); 2371 struct cpuset *trialcs; 2372 int retval = -ENODEV; 2373 2374 buf = strstrip(buf); 2375 2376 /* 2377 * CPU or memory hotunplug may leave @cs w/o any execution 2378 * resources, in which case the hotplug code asynchronously updates 2379 * configuration and transfers all tasks to the nearest ancestor 2380 * which can execute. 2381 * 2382 * As writes to "cpus" or "mems" may restore @cs's execution 2383 * resources, wait for the previously scheduled operations before 2384 * proceeding, so that we don't end up keep removing tasks added 2385 * after execution capability is restored. 2386 * 2387 * cpuset_hotplug_work calls back into cgroup core via 2388 * cgroup_transfer_tasks() and waiting for it from a cgroupfs 2389 * operation like this one can lead to a deadlock through kernfs 2390 * active_ref protection. Let's break the protection. Losing the 2391 * protection is okay as we check whether @cs is online after 2392 * grabbing cpuset_mutex anyway. This only happens on the legacy 2393 * hierarchies. 2394 */ 2395 css_get(&cs->css); 2396 kernfs_break_active_protection(of->kn); 2397 flush_work(&cpuset_hotplug_work); 2398 2399 get_online_cpus(); 2400 percpu_down_write(&cpuset_rwsem); 2401 if (!is_cpuset_online(cs)) 2402 goto out_unlock; 2403 2404 trialcs = alloc_trial_cpuset(cs); 2405 if (!trialcs) { 2406 retval = -ENOMEM; 2407 goto out_unlock; 2408 } 2409 2410 switch (of_cft(of)->private) { 2411 case FILE_CPULIST: 2412 retval = update_cpumask(cs, trialcs, buf); 2413 break; 2414 case FILE_MEMLIST: 2415 retval = update_nodemask(cs, trialcs, buf); 2416 break; 2417 default: 2418 retval = -EINVAL; 2419 break; 2420 } 2421 2422 free_cpuset(trialcs); 2423 out_unlock: 2424 percpu_up_write(&cpuset_rwsem); 2425 put_online_cpus(); 2426 kernfs_unbreak_active_protection(of->kn); 2427 css_put(&cs->css); 2428 flush_workqueue(cpuset_migrate_mm_wq); 2429 return retval ?: nbytes; 2430 } 2431 2432 /* 2433 * These ascii lists should be read in a single call, by using a user 2434 * buffer large enough to hold the entire map. If read in smaller 2435 * chunks, there is no guarantee of atomicity. Since the display format 2436 * used, list of ranges of sequential numbers, is variable length, 2437 * and since these maps can change value dynamically, one could read 2438 * gibberish by doing partial reads while a list was changing. 2439 */ 2440 static int cpuset_common_seq_show(struct seq_file *sf, void *v) 2441 { 2442 struct cpuset *cs = css_cs(seq_css(sf)); 2443 cpuset_filetype_t type = seq_cft(sf)->private; 2444 int ret = 0; 2445 2446 spin_lock_irq(&callback_lock); 2447 2448 switch (type) { 2449 case FILE_CPULIST: 2450 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); 2451 break; 2452 case FILE_MEMLIST: 2453 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); 2454 break; 2455 case FILE_EFFECTIVE_CPULIST: 2456 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); 2457 break; 2458 case FILE_EFFECTIVE_MEMLIST: 2459 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); 2460 break; 2461 case FILE_SUBPARTS_CPULIST: 2462 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus)); 2463 break; 2464 default: 2465 ret = -EINVAL; 2466 } 2467 2468 spin_unlock_irq(&callback_lock); 2469 return ret; 2470 } 2471 2472 static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft) 2473 { 2474 struct cpuset *cs = css_cs(css); 2475 cpuset_filetype_t type = cft->private; 2476 switch (type) { 2477 case FILE_CPU_EXCLUSIVE: 2478 return is_cpu_exclusive(cs); 2479 case FILE_MEM_EXCLUSIVE: 2480 return is_mem_exclusive(cs); 2481 case FILE_MEM_HARDWALL: 2482 return is_mem_hardwall(cs); 2483 case FILE_SCHED_LOAD_BALANCE: 2484 return is_sched_load_balance(cs); 2485 case FILE_MEMORY_MIGRATE: 2486 return is_memory_migrate(cs); 2487 case FILE_MEMORY_PRESSURE_ENABLED: 2488 return cpuset_memory_pressure_enabled; 2489 case FILE_MEMORY_PRESSURE: 2490 return fmeter_getrate(&cs->fmeter); 2491 case FILE_SPREAD_PAGE: 2492 return is_spread_page(cs); 2493 case FILE_SPREAD_SLAB: 2494 return is_spread_slab(cs); 2495 default: 2496 BUG(); 2497 } 2498 2499 /* Unreachable but makes gcc happy */ 2500 return 0; 2501 } 2502 2503 static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) 2504 { 2505 struct cpuset *cs = css_cs(css); 2506 cpuset_filetype_t type = cft->private; 2507 switch (type) { 2508 case FILE_SCHED_RELAX_DOMAIN_LEVEL: 2509 return cs->relax_domain_level; 2510 default: 2511 BUG(); 2512 } 2513 2514 /* Unreachable but makes gcc happy */ 2515 return 0; 2516 } 2517 2518 static int sched_partition_show(struct seq_file *seq, void *v) 2519 { 2520 struct cpuset *cs = css_cs(seq_css(seq)); 2521 2522 switch (cs->partition_root_state) { 2523 case PRS_ENABLED: 2524 seq_puts(seq, "root\n"); 2525 break; 2526 case PRS_DISABLED: 2527 seq_puts(seq, "member\n"); 2528 break; 2529 case PRS_ERROR: 2530 seq_puts(seq, "root invalid\n"); 2531 break; 2532 } 2533 return 0; 2534 } 2535 2536 static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf, 2537 size_t nbytes, loff_t off) 2538 { 2539 struct cpuset *cs = css_cs(of_css(of)); 2540 int val; 2541 int retval = -ENODEV; 2542 2543 buf = strstrip(buf); 2544 2545 /* 2546 * Convert "root" to ENABLED, and convert "member" to DISABLED. 2547 */ 2548 if (!strcmp(buf, "root")) 2549 val = PRS_ENABLED; 2550 else if (!strcmp(buf, "member")) 2551 val = PRS_DISABLED; 2552 else 2553 return -EINVAL; 2554 2555 css_get(&cs->css); 2556 get_online_cpus(); 2557 percpu_down_write(&cpuset_rwsem); 2558 if (!is_cpuset_online(cs)) 2559 goto out_unlock; 2560 2561 retval = update_prstate(cs, val); 2562 out_unlock: 2563 percpu_up_write(&cpuset_rwsem); 2564 put_online_cpus(); 2565 css_put(&cs->css); 2566 return retval ?: nbytes; 2567 } 2568 2569 /* 2570 * for the common functions, 'private' gives the type of file 2571 */ 2572 2573 static struct cftype legacy_files[] = { 2574 { 2575 .name = "cpus", 2576 .seq_show = cpuset_common_seq_show, 2577 .write = cpuset_write_resmask, 2578 .max_write_len = (100U + 6 * NR_CPUS), 2579 .private = FILE_CPULIST, 2580 }, 2581 2582 { 2583 .name = "mems", 2584 .seq_show = cpuset_common_seq_show, 2585 .write = cpuset_write_resmask, 2586 .max_write_len = (100U + 6 * MAX_NUMNODES), 2587 .private = FILE_MEMLIST, 2588 }, 2589 2590 { 2591 .name = "effective_cpus", 2592 .seq_show = cpuset_common_seq_show, 2593 .private = FILE_EFFECTIVE_CPULIST, 2594 }, 2595 2596 { 2597 .name = "effective_mems", 2598 .seq_show = cpuset_common_seq_show, 2599 .private = FILE_EFFECTIVE_MEMLIST, 2600 }, 2601 2602 { 2603 .name = "cpu_exclusive", 2604 .read_u64 = cpuset_read_u64, 2605 .write_u64 = cpuset_write_u64, 2606 .private = FILE_CPU_EXCLUSIVE, 2607 }, 2608 2609 { 2610 .name = "mem_exclusive", 2611 .read_u64 = cpuset_read_u64, 2612 .write_u64 = cpuset_write_u64, 2613 .private = FILE_MEM_EXCLUSIVE, 2614 }, 2615 2616 { 2617 .name = "mem_hardwall", 2618 .read_u64 = cpuset_read_u64, 2619 .write_u64 = cpuset_write_u64, 2620 .private = FILE_MEM_HARDWALL, 2621 }, 2622 2623 { 2624 .name = "sched_load_balance", 2625 .read_u64 = cpuset_read_u64, 2626 .write_u64 = cpuset_write_u64, 2627 .private = FILE_SCHED_LOAD_BALANCE, 2628 }, 2629 2630 { 2631 .name = "sched_relax_domain_level", 2632 .read_s64 = cpuset_read_s64, 2633 .write_s64 = cpuset_write_s64, 2634 .private = FILE_SCHED_RELAX_DOMAIN_LEVEL, 2635 }, 2636 2637 { 2638 .name = "memory_migrate", 2639 .read_u64 = cpuset_read_u64, 2640 .write_u64 = cpuset_write_u64, 2641 .private = FILE_MEMORY_MIGRATE, 2642 }, 2643 2644 { 2645 .name = "memory_pressure", 2646 .read_u64 = cpuset_read_u64, 2647 .private = FILE_MEMORY_PRESSURE, 2648 }, 2649 2650 { 2651 .name = "memory_spread_page", 2652 .read_u64 = cpuset_read_u64, 2653 .write_u64 = cpuset_write_u64, 2654 .private = FILE_SPREAD_PAGE, 2655 }, 2656 2657 { 2658 .name = "memory_spread_slab", 2659 .read_u64 = cpuset_read_u64, 2660 .write_u64 = cpuset_write_u64, 2661 .private = FILE_SPREAD_SLAB, 2662 }, 2663 2664 { 2665 .name = "memory_pressure_enabled", 2666 .flags = CFTYPE_ONLY_ON_ROOT, 2667 .read_u64 = cpuset_read_u64, 2668 .write_u64 = cpuset_write_u64, 2669 .private = FILE_MEMORY_PRESSURE_ENABLED, 2670 }, 2671 2672 { } /* terminate */ 2673 }; 2674 2675 /* 2676 * This is currently a minimal set for the default hierarchy. It can be 2677 * expanded later on by migrating more features and control files from v1. 2678 */ 2679 static struct cftype dfl_files[] = { 2680 { 2681 .name = "cpus", 2682 .seq_show = cpuset_common_seq_show, 2683 .write = cpuset_write_resmask, 2684 .max_write_len = (100U + 6 * NR_CPUS), 2685 .private = FILE_CPULIST, 2686 .flags = CFTYPE_NOT_ON_ROOT, 2687 }, 2688 2689 { 2690 .name = "mems", 2691 .seq_show = cpuset_common_seq_show, 2692 .write = cpuset_write_resmask, 2693 .max_write_len = (100U + 6 * MAX_NUMNODES), 2694 .private = FILE_MEMLIST, 2695 .flags = CFTYPE_NOT_ON_ROOT, 2696 }, 2697 2698 { 2699 .name = "cpus.effective", 2700 .seq_show = cpuset_common_seq_show, 2701 .private = FILE_EFFECTIVE_CPULIST, 2702 }, 2703 2704 { 2705 .name = "mems.effective", 2706 .seq_show = cpuset_common_seq_show, 2707 .private = FILE_EFFECTIVE_MEMLIST, 2708 }, 2709 2710 { 2711 .name = "cpus.partition", 2712 .seq_show = sched_partition_show, 2713 .write = sched_partition_write, 2714 .private = FILE_PARTITION_ROOT, 2715 .flags = CFTYPE_NOT_ON_ROOT, 2716 }, 2717 2718 { 2719 .name = "cpus.subpartitions", 2720 .seq_show = cpuset_common_seq_show, 2721 .private = FILE_SUBPARTS_CPULIST, 2722 .flags = CFTYPE_DEBUG, 2723 }, 2724 2725 { } /* terminate */ 2726 }; 2727 2728 2729 /* 2730 * cpuset_css_alloc - allocate a cpuset css 2731 * cgrp: control group that the new cpuset will be part of 2732 */ 2733 2734 static struct cgroup_subsys_state * 2735 cpuset_css_alloc(struct cgroup_subsys_state *parent_css) 2736 { 2737 struct cpuset *cs; 2738 2739 if (!parent_css) 2740 return &top_cpuset.css; 2741 2742 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 2743 if (!cs) 2744 return ERR_PTR(-ENOMEM); 2745 2746 if (alloc_cpumasks(cs, NULL)) { 2747 kfree(cs); 2748 return ERR_PTR(-ENOMEM); 2749 } 2750 2751 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 2752 nodes_clear(cs->mems_allowed); 2753 nodes_clear(cs->effective_mems); 2754 fmeter_init(&cs->fmeter); 2755 cs->relax_domain_level = -1; 2756 2757 return &cs->css; 2758 } 2759 2760 static int cpuset_css_online(struct cgroup_subsys_state *css) 2761 { 2762 struct cpuset *cs = css_cs(css); 2763 struct cpuset *parent = parent_cs(cs); 2764 struct cpuset *tmp_cs; 2765 struct cgroup_subsys_state *pos_css; 2766 2767 if (!parent) 2768 return 0; 2769 2770 get_online_cpus(); 2771 percpu_down_write(&cpuset_rwsem); 2772 2773 set_bit(CS_ONLINE, &cs->flags); 2774 if (is_spread_page(parent)) 2775 set_bit(CS_SPREAD_PAGE, &cs->flags); 2776 if (is_spread_slab(parent)) 2777 set_bit(CS_SPREAD_SLAB, &cs->flags); 2778 2779 cpuset_inc(); 2780 2781 spin_lock_irq(&callback_lock); 2782 if (is_in_v2_mode()) { 2783 cpumask_copy(cs->effective_cpus, parent->effective_cpus); 2784 cs->effective_mems = parent->effective_mems; 2785 cs->use_parent_ecpus = true; 2786 parent->child_ecpus_count++; 2787 } 2788 spin_unlock_irq(&callback_lock); 2789 2790 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) 2791 goto out_unlock; 2792 2793 /* 2794 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is 2795 * set. This flag handling is implemented in cgroup core for 2796 * histrical reasons - the flag may be specified during mount. 2797 * 2798 * Currently, if any sibling cpusets have exclusive cpus or mem, we 2799 * refuse to clone the configuration - thereby refusing the task to 2800 * be entered, and as a result refusing the sys_unshare() or 2801 * clone() which initiated it. If this becomes a problem for some 2802 * users who wish to allow that scenario, then this could be 2803 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive 2804 * (and likewise for mems) to the new cgroup. 2805 */ 2806 rcu_read_lock(); 2807 cpuset_for_each_child(tmp_cs, pos_css, parent) { 2808 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) { 2809 rcu_read_unlock(); 2810 goto out_unlock; 2811 } 2812 } 2813 rcu_read_unlock(); 2814 2815 spin_lock_irq(&callback_lock); 2816 cs->mems_allowed = parent->mems_allowed; 2817 cs->effective_mems = parent->mems_allowed; 2818 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); 2819 cpumask_copy(cs->effective_cpus, parent->cpus_allowed); 2820 spin_unlock_irq(&callback_lock); 2821 out_unlock: 2822 percpu_up_write(&cpuset_rwsem); 2823 put_online_cpus(); 2824 return 0; 2825 } 2826 2827 /* 2828 * If the cpuset being removed has its flag 'sched_load_balance' 2829 * enabled, then simulate turning sched_load_balance off, which 2830 * will call rebuild_sched_domains_locked(). That is not needed 2831 * in the default hierarchy where only changes in partition 2832 * will cause repartitioning. 2833 * 2834 * If the cpuset has the 'sched.partition' flag enabled, simulate 2835 * turning 'sched.partition" off. 2836 */ 2837 2838 static void cpuset_css_offline(struct cgroup_subsys_state *css) 2839 { 2840 struct cpuset *cs = css_cs(css); 2841 2842 get_online_cpus(); 2843 percpu_down_write(&cpuset_rwsem); 2844 2845 if (is_partition_root(cs)) 2846 update_prstate(cs, 0); 2847 2848 if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && 2849 is_sched_load_balance(cs)) 2850 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); 2851 2852 if (cs->use_parent_ecpus) { 2853 struct cpuset *parent = parent_cs(cs); 2854 2855 cs->use_parent_ecpus = false; 2856 parent->child_ecpus_count--; 2857 } 2858 2859 cpuset_dec(); 2860 clear_bit(CS_ONLINE, &cs->flags); 2861 2862 percpu_up_write(&cpuset_rwsem); 2863 put_online_cpus(); 2864 } 2865 2866 static void cpuset_css_free(struct cgroup_subsys_state *css) 2867 { 2868 struct cpuset *cs = css_cs(css); 2869 2870 free_cpuset(cs); 2871 } 2872 2873 static void cpuset_bind(struct cgroup_subsys_state *root_css) 2874 { 2875 percpu_down_write(&cpuset_rwsem); 2876 spin_lock_irq(&callback_lock); 2877 2878 if (is_in_v2_mode()) { 2879 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); 2880 top_cpuset.mems_allowed = node_possible_map; 2881 } else { 2882 cpumask_copy(top_cpuset.cpus_allowed, 2883 top_cpuset.effective_cpus); 2884 top_cpuset.mems_allowed = top_cpuset.effective_mems; 2885 } 2886 2887 spin_unlock_irq(&callback_lock); 2888 percpu_up_write(&cpuset_rwsem); 2889 } 2890 2891 /* 2892 * Make sure the new task conform to the current state of its parent, 2893 * which could have been changed by cpuset just after it inherits the 2894 * state from the parent and before it sits on the cgroup's task list. 2895 */ 2896 static void cpuset_fork(struct task_struct *task) 2897 { 2898 if (task_css_is_root(task, cpuset_cgrp_id)) 2899 return; 2900 2901 set_cpus_allowed_ptr(task, current->cpus_ptr); 2902 task->mems_allowed = current->mems_allowed; 2903 } 2904 2905 struct cgroup_subsys cpuset_cgrp_subsys = { 2906 .css_alloc = cpuset_css_alloc, 2907 .css_online = cpuset_css_online, 2908 .css_offline = cpuset_css_offline, 2909 .css_free = cpuset_css_free, 2910 .can_attach = cpuset_can_attach, 2911 .cancel_attach = cpuset_cancel_attach, 2912 .attach = cpuset_attach, 2913 .post_attach = cpuset_post_attach, 2914 .bind = cpuset_bind, 2915 .fork = cpuset_fork, 2916 .legacy_cftypes = legacy_files, 2917 .dfl_cftypes = dfl_files, 2918 .early_init = true, 2919 .threaded = true, 2920 }; 2921 2922 /** 2923 * cpuset_init - initialize cpusets at system boot 2924 * 2925 * Description: Initialize top_cpuset 2926 **/ 2927 2928 int __init cpuset_init(void) 2929 { 2930 BUG_ON(percpu_init_rwsem(&cpuset_rwsem)); 2931 2932 BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL)); 2933 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL)); 2934 BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL)); 2935 2936 cpumask_setall(top_cpuset.cpus_allowed); 2937 nodes_setall(top_cpuset.mems_allowed); 2938 cpumask_setall(top_cpuset.effective_cpus); 2939 nodes_setall(top_cpuset.effective_mems); 2940 2941 fmeter_init(&top_cpuset.fmeter); 2942 set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags); 2943 top_cpuset.relax_domain_level = -1; 2944 2945 BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL)); 2946 2947 return 0; 2948 } 2949 2950 /* 2951 * If CPU and/or memory hotplug handlers, below, unplug any CPUs 2952 * or memory nodes, we need to walk over the cpuset hierarchy, 2953 * removing that CPU or node from all cpusets. If this removes the 2954 * last CPU or node from a cpuset, then move the tasks in the empty 2955 * cpuset to its next-highest non-empty parent. 2956 */ 2957 static void remove_tasks_in_empty_cpuset(struct cpuset *cs) 2958 { 2959 struct cpuset *parent; 2960 2961 /* 2962 * Find its next-highest non-empty parent, (top cpuset 2963 * has online cpus, so can't be empty). 2964 */ 2965 parent = parent_cs(cs); 2966 while (cpumask_empty(parent->cpus_allowed) || 2967 nodes_empty(parent->mems_allowed)) 2968 parent = parent_cs(parent); 2969 2970 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { 2971 pr_err("cpuset: failed to transfer tasks out of empty cpuset "); 2972 pr_cont_cgroup_name(cs->css.cgroup); 2973 pr_cont("\n"); 2974 } 2975 } 2976 2977 static void 2978 hotplug_update_tasks_legacy(struct cpuset *cs, 2979 struct cpumask *new_cpus, nodemask_t *new_mems, 2980 bool cpus_updated, bool mems_updated) 2981 { 2982 bool is_empty; 2983 2984 spin_lock_irq(&callback_lock); 2985 cpumask_copy(cs->cpus_allowed, new_cpus); 2986 cpumask_copy(cs->effective_cpus, new_cpus); 2987 cs->mems_allowed = *new_mems; 2988 cs->effective_mems = *new_mems; 2989 spin_unlock_irq(&callback_lock); 2990 2991 /* 2992 * Don't call update_tasks_cpumask() if the cpuset becomes empty, 2993 * as the tasks will be migratecd to an ancestor. 2994 */ 2995 if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) 2996 update_tasks_cpumask(cs); 2997 if (mems_updated && !nodes_empty(cs->mems_allowed)) 2998 update_tasks_nodemask(cs); 2999 3000 is_empty = cpumask_empty(cs->cpus_allowed) || 3001 nodes_empty(cs->mems_allowed); 3002 3003 percpu_up_write(&cpuset_rwsem); 3004 3005 /* 3006 * Move tasks to the nearest ancestor with execution resources, 3007 * This is full cgroup operation which will also call back into 3008 * cpuset. Should be done outside any lock. 3009 */ 3010 if (is_empty) 3011 remove_tasks_in_empty_cpuset(cs); 3012 3013 percpu_down_write(&cpuset_rwsem); 3014 } 3015 3016 static void 3017 hotplug_update_tasks(struct cpuset *cs, 3018 struct cpumask *new_cpus, nodemask_t *new_mems, 3019 bool cpus_updated, bool mems_updated) 3020 { 3021 if (cpumask_empty(new_cpus)) 3022 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); 3023 if (nodes_empty(*new_mems)) 3024 *new_mems = parent_cs(cs)->effective_mems; 3025 3026 spin_lock_irq(&callback_lock); 3027 cpumask_copy(cs->effective_cpus, new_cpus); 3028 cs->effective_mems = *new_mems; 3029 spin_unlock_irq(&callback_lock); 3030 3031 if (cpus_updated) 3032 update_tasks_cpumask(cs); 3033 if (mems_updated) 3034 update_tasks_nodemask(cs); 3035 } 3036 3037 static bool force_rebuild; 3038 3039 void cpuset_force_rebuild(void) 3040 { 3041 force_rebuild = true; 3042 } 3043 3044 /** 3045 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug 3046 * @cs: cpuset in interest 3047 * @tmp: the tmpmasks structure pointer 3048 * 3049 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone 3050 * offline, update @cs accordingly. If @cs ends up with no CPU or memory, 3051 * all its tasks are moved to the nearest ancestor with both resources. 3052 */ 3053 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) 3054 { 3055 static cpumask_t new_cpus; 3056 static nodemask_t new_mems; 3057 bool cpus_updated; 3058 bool mems_updated; 3059 struct cpuset *parent; 3060 retry: 3061 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); 3062 3063 percpu_down_write(&cpuset_rwsem); 3064 3065 /* 3066 * We have raced with task attaching. We wait until attaching 3067 * is finished, so we won't attach a task to an empty cpuset. 3068 */ 3069 if (cs->attach_in_progress) { 3070 percpu_up_write(&cpuset_rwsem); 3071 goto retry; 3072 } 3073 3074 parent = parent_cs(cs); 3075 compute_effective_cpumask(&new_cpus, cs, parent); 3076 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); 3077 3078 if (cs->nr_subparts_cpus) 3079 /* 3080 * Make sure that CPUs allocated to child partitions 3081 * do not show up in effective_cpus. 3082 */ 3083 cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus); 3084 3085 if (!tmp || !cs->partition_root_state) 3086 goto update_tasks; 3087 3088 /* 3089 * In the unlikely event that a partition root has empty 3090 * effective_cpus or its parent becomes erroneous, we have to 3091 * transition it to the erroneous state. 3092 */ 3093 if (is_partition_root(cs) && (cpumask_empty(&new_cpus) || 3094 (parent->partition_root_state == PRS_ERROR))) { 3095 if (cs->nr_subparts_cpus) { 3096 cs->nr_subparts_cpus = 0; 3097 cpumask_clear(cs->subparts_cpus); 3098 compute_effective_cpumask(&new_cpus, cs, parent); 3099 } 3100 3101 /* 3102 * If the effective_cpus is empty because the child 3103 * partitions take away all the CPUs, we can keep 3104 * the current partition and let the child partitions 3105 * fight for available CPUs. 3106 */ 3107 if ((parent->partition_root_state == PRS_ERROR) || 3108 cpumask_empty(&new_cpus)) { 3109 update_parent_subparts_cpumask(cs, partcmd_disable, 3110 NULL, tmp); 3111 cs->partition_root_state = PRS_ERROR; 3112 } 3113 cpuset_force_rebuild(); 3114 } 3115 3116 /* 3117 * On the other hand, an erroneous partition root may be transitioned 3118 * back to a regular one or a partition root with no CPU allocated 3119 * from the parent may change to erroneous. 3120 */ 3121 if (is_partition_root(parent) && 3122 ((cs->partition_root_state == PRS_ERROR) || 3123 !cpumask_intersects(&new_cpus, parent->subparts_cpus)) && 3124 update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp)) 3125 cpuset_force_rebuild(); 3126 3127 update_tasks: 3128 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); 3129 mems_updated = !nodes_equal(new_mems, cs->effective_mems); 3130 3131 if (is_in_v2_mode()) 3132 hotplug_update_tasks(cs, &new_cpus, &new_mems, 3133 cpus_updated, mems_updated); 3134 else 3135 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, 3136 cpus_updated, mems_updated); 3137 3138 percpu_up_write(&cpuset_rwsem); 3139 } 3140 3141 /** 3142 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset 3143 * 3144 * This function is called after either CPU or memory configuration has 3145 * changed and updates cpuset accordingly. The top_cpuset is always 3146 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in 3147 * order to make cpusets transparent (of no affect) on systems that are 3148 * actively using CPU hotplug but making no active use of cpusets. 3149 * 3150 * Non-root cpusets are only affected by offlining. If any CPUs or memory 3151 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on 3152 * all descendants. 3153 * 3154 * Note that CPU offlining during suspend is ignored. We don't modify 3155 * cpusets across suspend/resume cycles at all. 3156 */ 3157 static void cpuset_hotplug_workfn(struct work_struct *work) 3158 { 3159 static cpumask_t new_cpus; 3160 static nodemask_t new_mems; 3161 bool cpus_updated, mems_updated; 3162 bool on_dfl = is_in_v2_mode(); 3163 struct tmpmasks tmp, *ptmp = NULL; 3164 3165 if (on_dfl && !alloc_cpumasks(NULL, &tmp)) 3166 ptmp = &tmp; 3167 3168 percpu_down_write(&cpuset_rwsem); 3169 3170 /* fetch the available cpus/mems and find out which changed how */ 3171 cpumask_copy(&new_cpus, cpu_active_mask); 3172 new_mems = node_states[N_MEMORY]; 3173 3174 /* 3175 * If subparts_cpus is populated, it is likely that the check below 3176 * will produce a false positive on cpus_updated when the cpu list 3177 * isn't changed. It is extra work, but it is better to be safe. 3178 */ 3179 cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus); 3180 mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems); 3181 3182 /* synchronize cpus_allowed to cpu_active_mask */ 3183 if (cpus_updated) { 3184 spin_lock_irq(&callback_lock); 3185 if (!on_dfl) 3186 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); 3187 /* 3188 * Make sure that CPUs allocated to child partitions 3189 * do not show up in effective_cpus. If no CPU is left, 3190 * we clear the subparts_cpus & let the child partitions 3191 * fight for the CPUs again. 3192 */ 3193 if (top_cpuset.nr_subparts_cpus) { 3194 if (cpumask_subset(&new_cpus, 3195 top_cpuset.subparts_cpus)) { 3196 top_cpuset.nr_subparts_cpus = 0; 3197 cpumask_clear(top_cpuset.subparts_cpus); 3198 } else { 3199 cpumask_andnot(&new_cpus, &new_cpus, 3200 top_cpuset.subparts_cpus); 3201 } 3202 } 3203 cpumask_copy(top_cpuset.effective_cpus, &new_cpus); 3204 spin_unlock_irq(&callback_lock); 3205 /* we don't mess with cpumasks of tasks in top_cpuset */ 3206 } 3207 3208 /* synchronize mems_allowed to N_MEMORY */ 3209 if (mems_updated) { 3210 spin_lock_irq(&callback_lock); 3211 if (!on_dfl) 3212 top_cpuset.mems_allowed = new_mems; 3213 top_cpuset.effective_mems = new_mems; 3214 spin_unlock_irq(&callback_lock); 3215 update_tasks_nodemask(&top_cpuset); 3216 } 3217 3218 percpu_up_write(&cpuset_rwsem); 3219 3220 /* if cpus or mems changed, we need to propagate to descendants */ 3221 if (cpus_updated || mems_updated) { 3222 struct cpuset *cs; 3223 struct cgroup_subsys_state *pos_css; 3224 3225 rcu_read_lock(); 3226 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { 3227 if (cs == &top_cpuset || !css_tryget_online(&cs->css)) 3228 continue; 3229 rcu_read_unlock(); 3230 3231 cpuset_hotplug_update_tasks(cs, ptmp); 3232 3233 rcu_read_lock(); 3234 css_put(&cs->css); 3235 } 3236 rcu_read_unlock(); 3237 } 3238 3239 /* rebuild sched domains if cpus_allowed has changed */ 3240 if (cpus_updated || force_rebuild) { 3241 force_rebuild = false; 3242 rebuild_sched_domains(); 3243 } 3244 3245 free_cpumasks(NULL, ptmp); 3246 } 3247 3248 void cpuset_update_active_cpus(void) 3249 { 3250 /* 3251 * We're inside cpu hotplug critical region which usually nests 3252 * inside cgroup synchronization. Bounce actual hotplug processing 3253 * to a work item to avoid reverse locking order. 3254 */ 3255 schedule_work(&cpuset_hotplug_work); 3256 } 3257 3258 void cpuset_wait_for_hotplug(void) 3259 { 3260 flush_work(&cpuset_hotplug_work); 3261 } 3262 3263 /* 3264 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY]. 3265 * Call this routine anytime after node_states[N_MEMORY] changes. 3266 * See cpuset_update_active_cpus() for CPU hotplug handling. 3267 */ 3268 static int cpuset_track_online_nodes(struct notifier_block *self, 3269 unsigned long action, void *arg) 3270 { 3271 schedule_work(&cpuset_hotplug_work); 3272 return NOTIFY_OK; 3273 } 3274 3275 static struct notifier_block cpuset_track_online_nodes_nb = { 3276 .notifier_call = cpuset_track_online_nodes, 3277 .priority = 10, /* ??! */ 3278 }; 3279 3280 /** 3281 * cpuset_init_smp - initialize cpus_allowed 3282 * 3283 * Description: Finish top cpuset after cpu, node maps are initialized 3284 */ 3285 void __init cpuset_init_smp(void) 3286 { 3287 cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask); 3288 top_cpuset.mems_allowed = node_states[N_MEMORY]; 3289 top_cpuset.old_mems_allowed = top_cpuset.mems_allowed; 3290 3291 cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask); 3292 top_cpuset.effective_mems = node_states[N_MEMORY]; 3293 3294 register_hotmemory_notifier(&cpuset_track_online_nodes_nb); 3295 3296 cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0); 3297 BUG_ON(!cpuset_migrate_mm_wq); 3298 } 3299 3300 /** 3301 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. 3302 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. 3303 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set. 3304 * 3305 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset 3306 * attached to the specified @tsk. Guaranteed to return some non-empty 3307 * subset of cpu_online_mask, even if this means going outside the 3308 * tasks cpuset. 3309 **/ 3310 3311 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) 3312 { 3313 unsigned long flags; 3314 3315 spin_lock_irqsave(&callback_lock, flags); 3316 guarantee_online_cpus(tsk, pmask); 3317 spin_unlock_irqrestore(&callback_lock, flags); 3318 } 3319 3320 /** 3321 * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe. 3322 * @tsk: pointer to task_struct with which the scheduler is struggling 3323 * 3324 * Description: In the case that the scheduler cannot find an allowed cpu in 3325 * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy 3326 * mode however, this value is the same as task_cs(tsk)->effective_cpus, 3327 * which will not contain a sane cpumask during cases such as cpu hotplugging. 3328 * This is the absolute last resort for the scheduler and it is only used if 3329 * _every_ other avenue has been traveled. 3330 * 3331 * Returns true if the affinity of @tsk was changed, false otherwise. 3332 **/ 3333 3334 bool cpuset_cpus_allowed_fallback(struct task_struct *tsk) 3335 { 3336 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk); 3337 const struct cpumask *cs_mask; 3338 bool changed = false; 3339 3340 rcu_read_lock(); 3341 cs_mask = task_cs(tsk)->cpus_allowed; 3342 if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) { 3343 do_set_cpus_allowed(tsk, cs_mask); 3344 changed = true; 3345 } 3346 rcu_read_unlock(); 3347 3348 /* 3349 * We own tsk->cpus_allowed, nobody can change it under us. 3350 * 3351 * But we used cs && cs->cpus_allowed lockless and thus can 3352 * race with cgroup_attach_task() or update_cpumask() and get 3353 * the wrong tsk->cpus_allowed. However, both cases imply the 3354 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr() 3355 * which takes task_rq_lock(). 3356 * 3357 * If we are called after it dropped the lock we must see all 3358 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary 3359 * set any mask even if it is not right from task_cs() pov, 3360 * the pending set_cpus_allowed_ptr() will fix things. 3361 * 3362 * select_fallback_rq() will fix things ups and set cpu_possible_mask 3363 * if required. 3364 */ 3365 return changed; 3366 } 3367 3368 void __init cpuset_init_current_mems_allowed(void) 3369 { 3370 nodes_setall(current->mems_allowed); 3371 } 3372 3373 /** 3374 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset. 3375 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed. 3376 * 3377 * Description: Returns the nodemask_t mems_allowed of the cpuset 3378 * attached to the specified @tsk. Guaranteed to return some non-empty 3379 * subset of node_states[N_MEMORY], even if this means going outside the 3380 * tasks cpuset. 3381 **/ 3382 3383 nodemask_t cpuset_mems_allowed(struct task_struct *tsk) 3384 { 3385 nodemask_t mask; 3386 unsigned long flags; 3387 3388 spin_lock_irqsave(&callback_lock, flags); 3389 rcu_read_lock(); 3390 guarantee_online_mems(task_cs(tsk), &mask); 3391 rcu_read_unlock(); 3392 spin_unlock_irqrestore(&callback_lock, flags); 3393 3394 return mask; 3395 } 3396 3397 /** 3398 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed 3399 * @nodemask: the nodemask to be checked 3400 * 3401 * Are any of the nodes in the nodemask allowed in current->mems_allowed? 3402 */ 3403 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) 3404 { 3405 return nodes_intersects(*nodemask, current->mems_allowed); 3406 } 3407 3408 /* 3409 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or 3410 * mem_hardwall ancestor to the specified cpuset. Call holding 3411 * callback_lock. If no ancestor is mem_exclusive or mem_hardwall 3412 * (an unusual configuration), then returns the root cpuset. 3413 */ 3414 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) 3415 { 3416 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) 3417 cs = parent_cs(cs); 3418 return cs; 3419 } 3420 3421 /** 3422 * cpuset_node_allowed - Can we allocate on a memory node? 3423 * @node: is this an allowed node? 3424 * @gfp_mask: memory allocation flags 3425 * 3426 * If we're in interrupt, yes, we can always allocate. If @node is set in 3427 * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this 3428 * node is set in the nearest hardwalled cpuset ancestor to current's cpuset, 3429 * yes. If current has access to memory reserves as an oom victim, yes. 3430 * Otherwise, no. 3431 * 3432 * GFP_USER allocations are marked with the __GFP_HARDWALL bit, 3433 * and do not allow allocations outside the current tasks cpuset 3434 * unless the task has been OOM killed. 3435 * GFP_KERNEL allocations are not so marked, so can escape to the 3436 * nearest enclosing hardwalled ancestor cpuset. 3437 * 3438 * Scanning up parent cpusets requires callback_lock. The 3439 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit 3440 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the 3441 * current tasks mems_allowed came up empty on the first pass over 3442 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the 3443 * cpuset are short of memory, might require taking the callback_lock. 3444 * 3445 * The first call here from mm/page_alloc:get_page_from_freelist() 3446 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, 3447 * so no allocation on a node outside the cpuset is allowed (unless 3448 * in interrupt, of course). 3449 * 3450 * The second pass through get_page_from_freelist() doesn't even call 3451 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages() 3452 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set 3453 * in alloc_flags. That logic and the checks below have the combined 3454 * affect that: 3455 * in_interrupt - any node ok (current task context irrelevant) 3456 * GFP_ATOMIC - any node ok 3457 * tsk_is_oom_victim - any node ok 3458 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok 3459 * GFP_USER - only nodes in current tasks mems allowed ok. 3460 */ 3461 bool __cpuset_node_allowed(int node, gfp_t gfp_mask) 3462 { 3463 struct cpuset *cs; /* current cpuset ancestors */ 3464 int allowed; /* is allocation in zone z allowed? */ 3465 unsigned long flags; 3466 3467 if (in_interrupt()) 3468 return true; 3469 if (node_isset(node, current->mems_allowed)) 3470 return true; 3471 /* 3472 * Allow tasks that have access to memory reserves because they have 3473 * been OOM killed to get memory anywhere. 3474 */ 3475 if (unlikely(tsk_is_oom_victim(current))) 3476 return true; 3477 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ 3478 return false; 3479 3480 if (current->flags & PF_EXITING) /* Let dying task have memory */ 3481 return true; 3482 3483 /* Not hardwall and node outside mems_allowed: scan up cpusets */ 3484 spin_lock_irqsave(&callback_lock, flags); 3485 3486 rcu_read_lock(); 3487 cs = nearest_hardwall_ancestor(task_cs(current)); 3488 allowed = node_isset(node, cs->mems_allowed); 3489 rcu_read_unlock(); 3490 3491 spin_unlock_irqrestore(&callback_lock, flags); 3492 return allowed; 3493 } 3494 3495 /** 3496 * cpuset_mem_spread_node() - On which node to begin search for a file page 3497 * cpuset_slab_spread_node() - On which node to begin search for a slab page 3498 * 3499 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for 3500 * tasks in a cpuset with is_spread_page or is_spread_slab set), 3501 * and if the memory allocation used cpuset_mem_spread_node() 3502 * to determine on which node to start looking, as it will for 3503 * certain page cache or slab cache pages such as used for file 3504 * system buffers and inode caches, then instead of starting on the 3505 * local node to look for a free page, rather spread the starting 3506 * node around the tasks mems_allowed nodes. 3507 * 3508 * We don't have to worry about the returned node being offline 3509 * because "it can't happen", and even if it did, it would be ok. 3510 * 3511 * The routines calling guarantee_online_mems() are careful to 3512 * only set nodes in task->mems_allowed that are online. So it 3513 * should not be possible for the following code to return an 3514 * offline node. But if it did, that would be ok, as this routine 3515 * is not returning the node where the allocation must be, only 3516 * the node where the search should start. The zonelist passed to 3517 * __alloc_pages() will include all nodes. If the slab allocator 3518 * is passed an offline node, it will fall back to the local node. 3519 * See kmem_cache_alloc_node(). 3520 */ 3521 3522 static int cpuset_spread_node(int *rotor) 3523 { 3524 return *rotor = next_node_in(*rotor, current->mems_allowed); 3525 } 3526 3527 int cpuset_mem_spread_node(void) 3528 { 3529 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE) 3530 current->cpuset_mem_spread_rotor = 3531 node_random(¤t->mems_allowed); 3532 3533 return cpuset_spread_node(¤t->cpuset_mem_spread_rotor); 3534 } 3535 3536 int cpuset_slab_spread_node(void) 3537 { 3538 if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE) 3539 current->cpuset_slab_spread_rotor = 3540 node_random(¤t->mems_allowed); 3541 3542 return cpuset_spread_node(¤t->cpuset_slab_spread_rotor); 3543 } 3544 3545 EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); 3546 3547 /** 3548 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's? 3549 * @tsk1: pointer to task_struct of some task. 3550 * @tsk2: pointer to task_struct of some other task. 3551 * 3552 * Description: Return true if @tsk1's mems_allowed intersects the 3553 * mems_allowed of @tsk2. Used by the OOM killer to determine if 3554 * one of the task's memory usage might impact the memory available 3555 * to the other. 3556 **/ 3557 3558 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, 3559 const struct task_struct *tsk2) 3560 { 3561 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); 3562 } 3563 3564 /** 3565 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed 3566 * 3567 * Description: Prints current's name, cpuset name, and cached copy of its 3568 * mems_allowed to the kernel log. 3569 */ 3570 void cpuset_print_current_mems_allowed(void) 3571 { 3572 struct cgroup *cgrp; 3573 3574 rcu_read_lock(); 3575 3576 cgrp = task_cs(current)->css.cgroup; 3577 pr_cont(",cpuset="); 3578 pr_cont_cgroup_name(cgrp); 3579 pr_cont(",mems_allowed=%*pbl", 3580 nodemask_pr_args(¤t->mems_allowed)); 3581 3582 rcu_read_unlock(); 3583 } 3584 3585 /* 3586 * Collection of memory_pressure is suppressed unless 3587 * this flag is enabled by writing "1" to the special 3588 * cpuset file 'memory_pressure_enabled' in the root cpuset. 3589 */ 3590 3591 int cpuset_memory_pressure_enabled __read_mostly; 3592 3593 /** 3594 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims. 3595 * 3596 * Keep a running average of the rate of synchronous (direct) 3597 * page reclaim efforts initiated by tasks in each cpuset. 3598 * 3599 * This represents the rate at which some task in the cpuset 3600 * ran low on memory on all nodes it was allowed to use, and 3601 * had to enter the kernels page reclaim code in an effort to 3602 * create more free memory by tossing clean pages or swapping 3603 * or writing dirty pages. 3604 * 3605 * Display to user space in the per-cpuset read-only file 3606 * "memory_pressure". Value displayed is an integer 3607 * representing the recent rate of entry into the synchronous 3608 * (direct) page reclaim by any task attached to the cpuset. 3609 **/ 3610 3611 void __cpuset_memory_pressure_bump(void) 3612 { 3613 rcu_read_lock(); 3614 fmeter_markevent(&task_cs(current)->fmeter); 3615 rcu_read_unlock(); 3616 } 3617 3618 #ifdef CONFIG_PROC_PID_CPUSET 3619 /* 3620 * proc_cpuset_show() 3621 * - Print tasks cpuset path into seq_file. 3622 * - Used for /proc/<pid>/cpuset. 3623 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it 3624 * doesn't really matter if tsk->cpuset changes after we read it, 3625 * and we take cpuset_mutex, keeping cpuset_attach() from changing it 3626 * anyway. 3627 */ 3628 int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, 3629 struct pid *pid, struct task_struct *tsk) 3630 { 3631 char *buf; 3632 struct cgroup_subsys_state *css; 3633 int retval; 3634 3635 retval = -ENOMEM; 3636 buf = kmalloc(PATH_MAX, GFP_KERNEL); 3637 if (!buf) 3638 goto out; 3639 3640 css = task_get_css(tsk, cpuset_cgrp_id); 3641 retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX, 3642 current->nsproxy->cgroup_ns); 3643 css_put(css); 3644 if (retval >= PATH_MAX) 3645 retval = -ENAMETOOLONG; 3646 if (retval < 0) 3647 goto out_free; 3648 seq_puts(m, buf); 3649 seq_putc(m, '\n'); 3650 retval = 0; 3651 out_free: 3652 kfree(buf); 3653 out: 3654 return retval; 3655 } 3656 #endif /* CONFIG_PROC_PID_CPUSET */ 3657 3658 /* Display task mems_allowed in /proc/<pid>/status file. */ 3659 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) 3660 { 3661 seq_printf(m, "Mems_allowed:\t%*pb\n", 3662 nodemask_pr_args(&task->mems_allowed)); 3663 seq_printf(m, "Mems_allowed_list:\t%*pbl\n", 3664 nodemask_pr_args(&task->mems_allowed)); 3665 } 3666