1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Simple NUMA memory policy for the Linux kernel. 4 * 5 * Copyright 2003,2004 Andi Kleen, SuSE Labs. 6 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 7 * 8 * NUMA policy allows the user to give hints in which node(s) memory should 9 * be allocated. 10 * 11 * Support four policies per VMA and per process: 12 * 13 * The VMA policy has priority over the process policy for a page fault. 14 * 15 * interleave Allocate memory interleaved over a set of nodes, 16 * with normal fallback if it fails. 17 * For VMA based allocations this interleaves based on the 18 * offset into the backing object or offset into the mapping 19 * for anonymous memory. For process policy an process counter 20 * is used. 21 * 22 * weighted interleave 23 * Allocate memory interleaved over a set of nodes based on 24 * a set of weights (per-node), with normal fallback if it 25 * fails. Otherwise operates the same as interleave. 26 * Example: nodeset(0,1) & weights (2,1) - 2 pages allocated 27 * on node 0 for every 1 page allocated on node 1. 28 * 29 * bind Only allocate memory on a specific set of nodes, 30 * no fallback. 31 * FIXME: memory is allocated starting with the first node 32 * to the last. It would be better if bind would truly restrict 33 * the allocation to memory nodes instead 34 * 35 * preferred Try a specific node first before normal fallback. 36 * As a special case NUMA_NO_NODE here means do the allocation 37 * on the local CPU. This is normally identical to default, 38 * but useful to set in a VMA when you have a non default 39 * process policy. 40 * 41 * preferred many Try a set of nodes first before normal fallback. This is 42 * similar to preferred without the special case. 43 * 44 * default Allocate on the local node first, or when on a VMA 45 * use the process policy. This is what Linux always did 46 * in a NUMA aware kernel and still does by, ahem, default. 47 * 48 * The process policy is applied for most non interrupt memory allocations 49 * in that process' context. Interrupts ignore the policies and always 50 * try to allocate on the local CPU. The VMA policy is only applied for memory 51 * allocations for a VMA in the VM. 52 * 53 * Currently there are a few corner cases in swapping where the policy 54 * is not applied, but the majority should be handled. When process policy 55 * is used it is not remembered over swap outs/swap ins. 56 * 57 * Only the highest zone in the zone hierarchy gets policied. Allocations 58 * requesting a lower zone just use default policy. This implies that 59 * on systems with highmem kernel lowmem allocation don't get policied. 60 * Same with GFP_DMA allocations. 61 * 62 * For shmem/tmpfs shared memory the policy is shared between 63 * all users and remembered even when nobody has memory mapped. 64 */ 65 66 /* Notebook: 67 fix mmap readahead to honour policy and enable policy for any page cache 68 object 69 statistics for bigpages 70 global policy for page cache? currently it uses process policy. Requires 71 first item above. 72 handle mremap for shared memory (currently ignored for the policy) 73 grows down? 74 make bind policy root only? It can trigger oom much faster and the 75 kernel is not always grateful with that. 76 */ 77 78 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 79 80 #include <linux/mempolicy.h> 81 #include <linux/pagewalk.h> 82 #include <linux/highmem.h> 83 #include <linux/hugetlb.h> 84 #include <linux/kernel.h> 85 #include <linux/sched.h> 86 #include <linux/sched/mm.h> 87 #include <linux/sched/numa_balancing.h> 88 #include <linux/sched/task.h> 89 #include <linux/nodemask.h> 90 #include <linux/cpuset.h> 91 #include <linux/slab.h> 92 #include <linux/string.h> 93 #include <linux/export.h> 94 #include <linux/nsproxy.h> 95 #include <linux/interrupt.h> 96 #include <linux/init.h> 97 #include <linux/compat.h> 98 #include <linux/ptrace.h> 99 #include <linux/swap.h> 100 #include <linux/seq_file.h> 101 #include <linux/proc_fs.h> 102 #include <linux/migrate.h> 103 #include <linux/ksm.h> 104 #include <linux/rmap.h> 105 #include <linux/security.h> 106 #include <linux/syscalls.h> 107 #include <linux/ctype.h> 108 #include <linux/mm_inline.h> 109 #include <linux/mmu_notifier.h> 110 #include <linux/printk.h> 111 #include <linux/swapops.h> 112 113 #include <asm/tlbflush.h> 114 #include <asm/tlb.h> 115 #include <linux/uaccess.h> 116 117 #include "internal.h" 118 119 /* Internal flags */ 120 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 121 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 122 #define MPOL_MF_WRLOCK (MPOL_MF_INTERNAL << 2) /* Write-lock walked vmas */ 123 124 static struct kmem_cache *policy_cache; 125 static struct kmem_cache *sn_cache; 126 127 /* Highest zone. An specific allocation for a zone below that is not 128 policied. */ 129 enum zone_type policy_zone = 0; 130 131 /* 132 * run-time system-wide default policy => local allocation 133 */ 134 static struct mempolicy default_policy = { 135 .refcnt = ATOMIC_INIT(1), /* never free it */ 136 .mode = MPOL_LOCAL, 137 }; 138 139 static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 140 141 /* 142 * iw_table is the sysfs-set interleave weight table, a value of 0 denotes 143 * system-default value should be used. A NULL iw_table also denotes that 144 * system-default values should be used. Until the system-default table 145 * is implemented, the system-default is always 1. 146 * 147 * iw_table is RCU protected 148 */ 149 static u8 __rcu *iw_table; 150 static DEFINE_MUTEX(iw_table_lock); 151 152 static u8 get_il_weight(int node) 153 { 154 u8 *table; 155 u8 weight; 156 157 rcu_read_lock(); 158 table = rcu_dereference(iw_table); 159 /* if no iw_table, use system default */ 160 weight = table ? table[node] : 1; 161 /* if value in iw_table is 0, use system default */ 162 weight = weight ? weight : 1; 163 rcu_read_unlock(); 164 return weight; 165 } 166 167 /** 168 * numa_nearest_node - Find nearest node by state 169 * @node: Node id to start the search 170 * @state: State to filter the search 171 * 172 * Lookup the closest node by distance if @nid is not in state. 173 * 174 * Return: this @node if it is in state, otherwise the closest node by distance 175 */ 176 int numa_nearest_node(int node, unsigned int state) 177 { 178 int min_dist = INT_MAX, dist, n, min_node; 179 180 if (state >= NR_NODE_STATES) 181 return -EINVAL; 182 183 if (node == NUMA_NO_NODE || node_state(node, state)) 184 return node; 185 186 min_node = node; 187 for_each_node_state(n, state) { 188 dist = node_distance(node, n); 189 if (dist < min_dist) { 190 min_dist = dist; 191 min_node = n; 192 } 193 } 194 195 return min_node; 196 } 197 EXPORT_SYMBOL_GPL(numa_nearest_node); 198 199 struct mempolicy *get_task_policy(struct task_struct *p) 200 { 201 struct mempolicy *pol = p->mempolicy; 202 int node; 203 204 if (pol) 205 return pol; 206 207 node = numa_node_id(); 208 if (node != NUMA_NO_NODE) { 209 pol = &preferred_node_policy[node]; 210 /* preferred_node_policy is not initialised early in boot */ 211 if (pol->mode) 212 return pol; 213 } 214 215 return &default_policy; 216 } 217 218 static const struct mempolicy_operations { 219 int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 220 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 221 } mpol_ops[MPOL_MAX]; 222 223 static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 224 { 225 return pol->flags & MPOL_MODE_FLAGS; 226 } 227 228 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 229 const nodemask_t *rel) 230 { 231 nodemask_t tmp; 232 nodes_fold(tmp, *orig, nodes_weight(*rel)); 233 nodes_onto(*ret, tmp, *rel); 234 } 235 236 static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 237 { 238 if (nodes_empty(*nodes)) 239 return -EINVAL; 240 pol->nodes = *nodes; 241 return 0; 242 } 243 244 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 245 { 246 if (nodes_empty(*nodes)) 247 return -EINVAL; 248 249 nodes_clear(pol->nodes); 250 node_set(first_node(*nodes), pol->nodes); 251 return 0; 252 } 253 254 /* 255 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 256 * any, for the new policy. mpol_new() has already validated the nodes 257 * parameter with respect to the policy mode and flags. 258 * 259 * Must be called holding task's alloc_lock to protect task's mems_allowed 260 * and mempolicy. May also be called holding the mmap_lock for write. 261 */ 262 static int mpol_set_nodemask(struct mempolicy *pol, 263 const nodemask_t *nodes, struct nodemask_scratch *nsc) 264 { 265 int ret; 266 267 /* 268 * Default (pol==NULL) resp. local memory policies are not a 269 * subject of any remapping. They also do not need any special 270 * constructor. 271 */ 272 if (!pol || pol->mode == MPOL_LOCAL) 273 return 0; 274 275 /* Check N_MEMORY */ 276 nodes_and(nsc->mask1, 277 cpuset_current_mems_allowed, node_states[N_MEMORY]); 278 279 VM_BUG_ON(!nodes); 280 281 if (pol->flags & MPOL_F_RELATIVE_NODES) 282 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); 283 else 284 nodes_and(nsc->mask2, *nodes, nsc->mask1); 285 286 if (mpol_store_user_nodemask(pol)) 287 pol->w.user_nodemask = *nodes; 288 else 289 pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed; 290 291 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 292 return ret; 293 } 294 295 /* 296 * This function just creates a new policy, does some check and simple 297 * initialization. You must invoke mpol_set_nodemask() to set nodes. 298 */ 299 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 300 nodemask_t *nodes) 301 { 302 struct mempolicy *policy; 303 304 if (mode == MPOL_DEFAULT) { 305 if (nodes && !nodes_empty(*nodes)) 306 return ERR_PTR(-EINVAL); 307 return NULL; 308 } 309 VM_BUG_ON(!nodes); 310 311 /* 312 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 313 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 314 * All other modes require a valid pointer to a non-empty nodemask. 315 */ 316 if (mode == MPOL_PREFERRED) { 317 if (nodes_empty(*nodes)) { 318 if (((flags & MPOL_F_STATIC_NODES) || 319 (flags & MPOL_F_RELATIVE_NODES))) 320 return ERR_PTR(-EINVAL); 321 322 mode = MPOL_LOCAL; 323 } 324 } else if (mode == MPOL_LOCAL) { 325 if (!nodes_empty(*nodes) || 326 (flags & MPOL_F_STATIC_NODES) || 327 (flags & MPOL_F_RELATIVE_NODES)) 328 return ERR_PTR(-EINVAL); 329 } else if (nodes_empty(*nodes)) 330 return ERR_PTR(-EINVAL); 331 332 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 333 if (!policy) 334 return ERR_PTR(-ENOMEM); 335 atomic_set(&policy->refcnt, 1); 336 policy->mode = mode; 337 policy->flags = flags; 338 policy->home_node = NUMA_NO_NODE; 339 340 return policy; 341 } 342 343 /* Slow path of a mpol destructor. */ 344 void __mpol_put(struct mempolicy *pol) 345 { 346 if (!atomic_dec_and_test(&pol->refcnt)) 347 return; 348 kmem_cache_free(policy_cache, pol); 349 } 350 351 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) 352 { 353 } 354 355 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 356 { 357 nodemask_t tmp; 358 359 if (pol->flags & MPOL_F_STATIC_NODES) 360 nodes_and(tmp, pol->w.user_nodemask, *nodes); 361 else if (pol->flags & MPOL_F_RELATIVE_NODES) 362 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 363 else { 364 nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed, 365 *nodes); 366 pol->w.cpuset_mems_allowed = *nodes; 367 } 368 369 if (nodes_empty(tmp)) 370 tmp = *nodes; 371 372 pol->nodes = tmp; 373 } 374 375 static void mpol_rebind_preferred(struct mempolicy *pol, 376 const nodemask_t *nodes) 377 { 378 pol->w.cpuset_mems_allowed = *nodes; 379 } 380 381 /* 382 * mpol_rebind_policy - Migrate a policy to a different set of nodes 383 * 384 * Per-vma policies are protected by mmap_lock. Allocations using per-task 385 * policies are protected by task->mems_allowed_seq to prevent a premature 386 * OOM/allocation failure due to parallel nodemask modification. 387 */ 388 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) 389 { 390 if (!pol || pol->mode == MPOL_LOCAL) 391 return; 392 if (!mpol_store_user_nodemask(pol) && 393 nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 394 return; 395 396 mpol_ops[pol->mode].rebind(pol, newmask); 397 } 398 399 /* 400 * Wrapper for mpol_rebind_policy() that just requires task 401 * pointer, and updates task mempolicy. 402 * 403 * Called with task's alloc_lock held. 404 */ 405 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 406 { 407 mpol_rebind_policy(tsk->mempolicy, new); 408 } 409 410 /* 411 * Rebind each vma in mm to new nodemask. 412 * 413 * Call holding a reference to mm. Takes mm->mmap_lock during call. 414 */ 415 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 416 { 417 struct vm_area_struct *vma; 418 VMA_ITERATOR(vmi, mm, 0); 419 420 mmap_write_lock(mm); 421 for_each_vma(vmi, vma) { 422 vma_start_write(vma); 423 mpol_rebind_policy(vma->vm_policy, new); 424 } 425 mmap_write_unlock(mm); 426 } 427 428 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 429 [MPOL_DEFAULT] = { 430 .rebind = mpol_rebind_default, 431 }, 432 [MPOL_INTERLEAVE] = { 433 .create = mpol_new_nodemask, 434 .rebind = mpol_rebind_nodemask, 435 }, 436 [MPOL_PREFERRED] = { 437 .create = mpol_new_preferred, 438 .rebind = mpol_rebind_preferred, 439 }, 440 [MPOL_BIND] = { 441 .create = mpol_new_nodemask, 442 .rebind = mpol_rebind_nodemask, 443 }, 444 [MPOL_LOCAL] = { 445 .rebind = mpol_rebind_default, 446 }, 447 [MPOL_PREFERRED_MANY] = { 448 .create = mpol_new_nodemask, 449 .rebind = mpol_rebind_preferred, 450 }, 451 [MPOL_WEIGHTED_INTERLEAVE] = { 452 .create = mpol_new_nodemask, 453 .rebind = mpol_rebind_nodemask, 454 }, 455 }; 456 457 static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist, 458 unsigned long flags); 459 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *pol, 460 pgoff_t ilx, int *nid); 461 462 static bool strictly_unmovable(unsigned long flags) 463 { 464 /* 465 * STRICT without MOVE flags lets do_mbind() fail immediately with -EIO 466 * if any misplaced page is found. 467 */ 468 return (flags & (MPOL_MF_STRICT | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) == 469 MPOL_MF_STRICT; 470 } 471 472 struct migration_mpol { /* for alloc_migration_target_by_mpol() */ 473 struct mempolicy *pol; 474 pgoff_t ilx; 475 }; 476 477 struct queue_pages { 478 struct list_head *pagelist; 479 unsigned long flags; 480 nodemask_t *nmask; 481 unsigned long start; 482 unsigned long end; 483 struct vm_area_struct *first; 484 struct folio *large; /* note last large folio encountered */ 485 long nr_failed; /* could not be isolated at this time */ 486 }; 487 488 /* 489 * Check if the folio's nid is in qp->nmask. 490 * 491 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is 492 * in the invert of qp->nmask. 493 */ 494 static inline bool queue_folio_required(struct folio *folio, 495 struct queue_pages *qp) 496 { 497 int nid = folio_nid(folio); 498 unsigned long flags = qp->flags; 499 500 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); 501 } 502 503 static void queue_folios_pmd(pmd_t *pmd, struct mm_walk *walk) 504 { 505 struct folio *folio; 506 struct queue_pages *qp = walk->private; 507 508 if (unlikely(is_pmd_migration_entry(*pmd))) { 509 qp->nr_failed++; 510 return; 511 } 512 folio = pmd_folio(*pmd); 513 if (is_huge_zero_folio(folio)) { 514 walk->action = ACTION_CONTINUE; 515 return; 516 } 517 if (!queue_folio_required(folio, qp)) 518 return; 519 if (!(qp->flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) || 520 !vma_migratable(walk->vma) || 521 !migrate_folio_add(folio, qp->pagelist, qp->flags)) 522 qp->nr_failed++; 523 } 524 525 /* 526 * Scan through folios, checking if they satisfy the required conditions, 527 * moving them from LRU to local pagelist for migration if they do (or not). 528 * 529 * queue_folios_pte_range() has two possible return values: 530 * 0 - continue walking to scan for more, even if an existing folio on the 531 * wrong node could not be isolated and queued for migration. 532 * -EIO - only MPOL_MF_STRICT was specified, without MPOL_MF_MOVE or ..._ALL, 533 * and an existing folio was on a node that does not follow the policy. 534 */ 535 static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr, 536 unsigned long end, struct mm_walk *walk) 537 { 538 struct vm_area_struct *vma = walk->vma; 539 struct folio *folio; 540 struct queue_pages *qp = walk->private; 541 unsigned long flags = qp->flags; 542 pte_t *pte, *mapped_pte; 543 pte_t ptent; 544 spinlock_t *ptl; 545 546 ptl = pmd_trans_huge_lock(pmd, vma); 547 if (ptl) { 548 queue_folios_pmd(pmd, walk); 549 spin_unlock(ptl); 550 goto out; 551 } 552 553 mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 554 if (!pte) { 555 walk->action = ACTION_AGAIN; 556 return 0; 557 } 558 for (; addr != end; pte++, addr += PAGE_SIZE) { 559 ptent = ptep_get(pte); 560 if (pte_none(ptent)) 561 continue; 562 if (!pte_present(ptent)) { 563 if (is_migration_entry(pte_to_swp_entry(ptent))) 564 qp->nr_failed++; 565 continue; 566 } 567 folio = vm_normal_folio(vma, addr, ptent); 568 if (!folio || folio_is_zone_device(folio)) 569 continue; 570 /* 571 * vm_normal_folio() filters out zero pages, but there might 572 * still be reserved folios to skip, perhaps in a VDSO. 573 */ 574 if (folio_test_reserved(folio)) 575 continue; 576 if (!queue_folio_required(folio, qp)) 577 continue; 578 if (folio_test_large(folio)) { 579 /* 580 * A large folio can only be isolated from LRU once, 581 * but may be mapped by many PTEs (and Copy-On-Write may 582 * intersperse PTEs of other, order 0, folios). This is 583 * a common case, so don't mistake it for failure (but 584 * there can be other cases of multi-mapped pages which 585 * this quick check does not help to filter out - and a 586 * search of the pagelist might grow to be prohibitive). 587 * 588 * migrate_pages(&pagelist) returns nr_failed folios, so 589 * check "large" now so that queue_pages_range() returns 590 * a comparable nr_failed folios. This does imply that 591 * if folio could not be isolated for some racy reason 592 * at its first PTE, later PTEs will not give it another 593 * chance of isolation; but keeps the accounting simple. 594 */ 595 if (folio == qp->large) 596 continue; 597 qp->large = folio; 598 } 599 if (!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) || 600 !vma_migratable(vma) || 601 !migrate_folio_add(folio, qp->pagelist, flags)) { 602 qp->nr_failed++; 603 if (strictly_unmovable(flags)) 604 break; 605 } 606 } 607 pte_unmap_unlock(mapped_pte, ptl); 608 cond_resched(); 609 out: 610 if (qp->nr_failed && strictly_unmovable(flags)) 611 return -EIO; 612 return 0; 613 } 614 615 static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask, 616 unsigned long addr, unsigned long end, 617 struct mm_walk *walk) 618 { 619 #ifdef CONFIG_HUGETLB_PAGE 620 struct queue_pages *qp = walk->private; 621 unsigned long flags = qp->flags; 622 struct folio *folio; 623 spinlock_t *ptl; 624 pte_t entry; 625 626 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 627 entry = huge_ptep_get(pte); 628 if (!pte_present(entry)) { 629 if (unlikely(is_hugetlb_entry_migration(entry))) 630 qp->nr_failed++; 631 goto unlock; 632 } 633 folio = pfn_folio(pte_pfn(entry)); 634 if (!queue_folio_required(folio, qp)) 635 goto unlock; 636 if (!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) || 637 !vma_migratable(walk->vma)) { 638 qp->nr_failed++; 639 goto unlock; 640 } 641 /* 642 * Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio. 643 * Choosing not to migrate a shared folio is not counted as a failure. 644 * 645 * See folio_likely_mapped_shared() on possible imprecision when we 646 * cannot easily detect if a folio is shared. 647 */ 648 if ((flags & MPOL_MF_MOVE_ALL) || 649 (!folio_likely_mapped_shared(folio) && !hugetlb_pmd_shared(pte))) 650 if (!isolate_hugetlb(folio, qp->pagelist)) 651 qp->nr_failed++; 652 unlock: 653 spin_unlock(ptl); 654 if (qp->nr_failed && strictly_unmovable(flags)) 655 return -EIO; 656 #endif 657 return 0; 658 } 659 660 #ifdef CONFIG_NUMA_BALANCING 661 /* 662 * This is used to mark a range of virtual addresses to be inaccessible. 663 * These are later cleared by a NUMA hinting fault. Depending on these 664 * faults, pages may be migrated for better NUMA placement. 665 * 666 * This is assuming that NUMA faults are handled using PROT_NONE. If 667 * an architecture makes a different choice, it will need further 668 * changes to the core. 669 */ 670 unsigned long change_prot_numa(struct vm_area_struct *vma, 671 unsigned long addr, unsigned long end) 672 { 673 struct mmu_gather tlb; 674 long nr_updated; 675 676 tlb_gather_mmu(&tlb, vma->vm_mm); 677 678 nr_updated = change_protection(&tlb, vma, addr, end, MM_CP_PROT_NUMA); 679 if (nr_updated > 0) 680 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 681 682 tlb_finish_mmu(&tlb); 683 684 return nr_updated; 685 } 686 #endif /* CONFIG_NUMA_BALANCING */ 687 688 static int queue_pages_test_walk(unsigned long start, unsigned long end, 689 struct mm_walk *walk) 690 { 691 struct vm_area_struct *next, *vma = walk->vma; 692 struct queue_pages *qp = walk->private; 693 unsigned long flags = qp->flags; 694 695 /* range check first */ 696 VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma); 697 698 if (!qp->first) { 699 qp->first = vma; 700 if (!(flags & MPOL_MF_DISCONTIG_OK) && 701 (qp->start < vma->vm_start)) 702 /* hole at head side of range */ 703 return -EFAULT; 704 } 705 next = find_vma(vma->vm_mm, vma->vm_end); 706 if (!(flags & MPOL_MF_DISCONTIG_OK) && 707 ((vma->vm_end < qp->end) && 708 (!next || vma->vm_end < next->vm_start))) 709 /* hole at middle or tail of range */ 710 return -EFAULT; 711 712 /* 713 * Need check MPOL_MF_STRICT to return -EIO if possible 714 * regardless of vma_migratable 715 */ 716 if (!vma_migratable(vma) && 717 !(flags & MPOL_MF_STRICT)) 718 return 1; 719 720 /* 721 * Check page nodes, and queue pages to move, in the current vma. 722 * But if no moving, and no strict checking, the scan can be skipped. 723 */ 724 if (flags & (MPOL_MF_STRICT | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 725 return 0; 726 return 1; 727 } 728 729 static const struct mm_walk_ops queue_pages_walk_ops = { 730 .hugetlb_entry = queue_folios_hugetlb, 731 .pmd_entry = queue_folios_pte_range, 732 .test_walk = queue_pages_test_walk, 733 .walk_lock = PGWALK_RDLOCK, 734 }; 735 736 static const struct mm_walk_ops queue_pages_lock_vma_walk_ops = { 737 .hugetlb_entry = queue_folios_hugetlb, 738 .pmd_entry = queue_folios_pte_range, 739 .test_walk = queue_pages_test_walk, 740 .walk_lock = PGWALK_WRLOCK, 741 }; 742 743 /* 744 * Walk through page tables and collect pages to be migrated. 745 * 746 * If pages found in a given range are not on the required set of @nodes, 747 * and migration is allowed, they are isolated and queued to @pagelist. 748 * 749 * queue_pages_range() may return: 750 * 0 - all pages already on the right node, or successfully queued for moving 751 * (or neither strict checking nor moving requested: only range checking). 752 * >0 - this number of misplaced folios could not be queued for moving 753 * (a hugetlbfs page or a transparent huge page being counted as 1). 754 * -EIO - a misplaced page found, when MPOL_MF_STRICT specified without MOVEs. 755 * -EFAULT - a hole in the memory range, when MPOL_MF_DISCONTIG_OK unspecified. 756 */ 757 static long 758 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 759 nodemask_t *nodes, unsigned long flags, 760 struct list_head *pagelist) 761 { 762 int err; 763 struct queue_pages qp = { 764 .pagelist = pagelist, 765 .flags = flags, 766 .nmask = nodes, 767 .start = start, 768 .end = end, 769 .first = NULL, 770 }; 771 const struct mm_walk_ops *ops = (flags & MPOL_MF_WRLOCK) ? 772 &queue_pages_lock_vma_walk_ops : &queue_pages_walk_ops; 773 774 err = walk_page_range(mm, start, end, ops, &qp); 775 776 if (!qp.first) 777 /* whole range in hole */ 778 err = -EFAULT; 779 780 return err ? : qp.nr_failed; 781 } 782 783 /* 784 * Apply policy to a single VMA 785 * This must be called with the mmap_lock held for writing. 786 */ 787 static int vma_replace_policy(struct vm_area_struct *vma, 788 struct mempolicy *pol) 789 { 790 int err; 791 struct mempolicy *old; 792 struct mempolicy *new; 793 794 vma_assert_write_locked(vma); 795 796 new = mpol_dup(pol); 797 if (IS_ERR(new)) 798 return PTR_ERR(new); 799 800 if (vma->vm_ops && vma->vm_ops->set_policy) { 801 err = vma->vm_ops->set_policy(vma, new); 802 if (err) 803 goto err_out; 804 } 805 806 old = vma->vm_policy; 807 vma->vm_policy = new; /* protected by mmap_lock */ 808 mpol_put(old); 809 810 return 0; 811 err_out: 812 mpol_put(new); 813 return err; 814 } 815 816 /* Split or merge the VMA (if required) and apply the new policy */ 817 static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma, 818 struct vm_area_struct **prev, unsigned long start, 819 unsigned long end, struct mempolicy *new_pol) 820 { 821 unsigned long vmstart, vmend; 822 823 vmend = min(end, vma->vm_end); 824 if (start > vma->vm_start) { 825 *prev = vma; 826 vmstart = start; 827 } else { 828 vmstart = vma->vm_start; 829 } 830 831 if (mpol_equal(vma->vm_policy, new_pol)) { 832 *prev = vma; 833 return 0; 834 } 835 836 vma = vma_modify_policy(vmi, *prev, vma, vmstart, vmend, new_pol); 837 if (IS_ERR(vma)) 838 return PTR_ERR(vma); 839 840 *prev = vma; 841 return vma_replace_policy(vma, new_pol); 842 } 843 844 /* Set the process memory policy */ 845 static long do_set_mempolicy(unsigned short mode, unsigned short flags, 846 nodemask_t *nodes) 847 { 848 struct mempolicy *new, *old; 849 NODEMASK_SCRATCH(scratch); 850 int ret; 851 852 if (!scratch) 853 return -ENOMEM; 854 855 new = mpol_new(mode, flags, nodes); 856 if (IS_ERR(new)) { 857 ret = PTR_ERR(new); 858 goto out; 859 } 860 861 task_lock(current); 862 ret = mpol_set_nodemask(new, nodes, scratch); 863 if (ret) { 864 task_unlock(current); 865 mpol_put(new); 866 goto out; 867 } 868 869 old = current->mempolicy; 870 current->mempolicy = new; 871 if (new && (new->mode == MPOL_INTERLEAVE || 872 new->mode == MPOL_WEIGHTED_INTERLEAVE)) { 873 current->il_prev = MAX_NUMNODES-1; 874 current->il_weight = 0; 875 } 876 task_unlock(current); 877 mpol_put(old); 878 ret = 0; 879 out: 880 NODEMASK_SCRATCH_FREE(scratch); 881 return ret; 882 } 883 884 /* 885 * Return nodemask for policy for get_mempolicy() query 886 * 887 * Called with task's alloc_lock held 888 */ 889 static void get_policy_nodemask(struct mempolicy *pol, nodemask_t *nodes) 890 { 891 nodes_clear(*nodes); 892 if (pol == &default_policy) 893 return; 894 895 switch (pol->mode) { 896 case MPOL_BIND: 897 case MPOL_INTERLEAVE: 898 case MPOL_PREFERRED: 899 case MPOL_PREFERRED_MANY: 900 case MPOL_WEIGHTED_INTERLEAVE: 901 *nodes = pol->nodes; 902 break; 903 case MPOL_LOCAL: 904 /* return empty node mask for local allocation */ 905 break; 906 default: 907 BUG(); 908 } 909 } 910 911 static int lookup_node(struct mm_struct *mm, unsigned long addr) 912 { 913 struct page *p = NULL; 914 int ret; 915 916 ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p); 917 if (ret > 0) { 918 ret = page_to_nid(p); 919 put_page(p); 920 } 921 return ret; 922 } 923 924 /* Retrieve NUMA policy */ 925 static long do_get_mempolicy(int *policy, nodemask_t *nmask, 926 unsigned long addr, unsigned long flags) 927 { 928 int err; 929 struct mm_struct *mm = current->mm; 930 struct vm_area_struct *vma = NULL; 931 struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL; 932 933 if (flags & 934 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 935 return -EINVAL; 936 937 if (flags & MPOL_F_MEMS_ALLOWED) { 938 if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 939 return -EINVAL; 940 *policy = 0; /* just so it's initialized */ 941 task_lock(current); 942 *nmask = cpuset_current_mems_allowed; 943 task_unlock(current); 944 return 0; 945 } 946 947 if (flags & MPOL_F_ADDR) { 948 pgoff_t ilx; /* ignored here */ 949 /* 950 * Do NOT fall back to task policy if the 951 * vma/shared policy at addr is NULL. We 952 * want to return MPOL_DEFAULT in this case. 953 */ 954 mmap_read_lock(mm); 955 vma = vma_lookup(mm, addr); 956 if (!vma) { 957 mmap_read_unlock(mm); 958 return -EFAULT; 959 } 960 pol = __get_vma_policy(vma, addr, &ilx); 961 } else if (addr) 962 return -EINVAL; 963 964 if (!pol) 965 pol = &default_policy; /* indicates default behavior */ 966 967 if (flags & MPOL_F_NODE) { 968 if (flags & MPOL_F_ADDR) { 969 /* 970 * Take a refcount on the mpol, because we are about to 971 * drop the mmap_lock, after which only "pol" remains 972 * valid, "vma" is stale. 973 */ 974 pol_refcount = pol; 975 vma = NULL; 976 mpol_get(pol); 977 mmap_read_unlock(mm); 978 err = lookup_node(mm, addr); 979 if (err < 0) 980 goto out; 981 *policy = err; 982 } else if (pol == current->mempolicy && 983 pol->mode == MPOL_INTERLEAVE) { 984 *policy = next_node_in(current->il_prev, pol->nodes); 985 } else if (pol == current->mempolicy && 986 pol->mode == MPOL_WEIGHTED_INTERLEAVE) { 987 if (current->il_weight) 988 *policy = current->il_prev; 989 else 990 *policy = next_node_in(current->il_prev, 991 pol->nodes); 992 } else { 993 err = -EINVAL; 994 goto out; 995 } 996 } else { 997 *policy = pol == &default_policy ? MPOL_DEFAULT : 998 pol->mode; 999 /* 1000 * Internal mempolicy flags must be masked off before exposing 1001 * the policy to userspace. 1002 */ 1003 *policy |= (pol->flags & MPOL_MODE_FLAGS); 1004 } 1005 1006 err = 0; 1007 if (nmask) { 1008 if (mpol_store_user_nodemask(pol)) { 1009 *nmask = pol->w.user_nodemask; 1010 } else { 1011 task_lock(current); 1012 get_policy_nodemask(pol, nmask); 1013 task_unlock(current); 1014 } 1015 } 1016 1017 out: 1018 mpol_cond_put(pol); 1019 if (vma) 1020 mmap_read_unlock(mm); 1021 if (pol_refcount) 1022 mpol_put(pol_refcount); 1023 return err; 1024 } 1025 1026 #ifdef CONFIG_MIGRATION 1027 static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist, 1028 unsigned long flags) 1029 { 1030 /* 1031 * Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio. 1032 * Choosing not to migrate a shared folio is not counted as a failure. 1033 * 1034 * See folio_likely_mapped_shared() on possible imprecision when we 1035 * cannot easily detect if a folio is shared. 1036 */ 1037 if ((flags & MPOL_MF_MOVE_ALL) || !folio_likely_mapped_shared(folio)) { 1038 if (folio_isolate_lru(folio)) { 1039 list_add_tail(&folio->lru, foliolist); 1040 node_stat_mod_folio(folio, 1041 NR_ISOLATED_ANON + folio_is_file_lru(folio), 1042 folio_nr_pages(folio)); 1043 } else { 1044 /* 1045 * Non-movable folio may reach here. And, there may be 1046 * temporary off LRU folios or non-LRU movable folios. 1047 * Treat them as unmovable folios since they can't be 1048 * isolated, so they can't be moved at the moment. 1049 */ 1050 return false; 1051 } 1052 } 1053 return true; 1054 } 1055 1056 /* 1057 * Migrate pages from one node to a target node. 1058 * Returns error or the number of pages not migrated. 1059 */ 1060 static long migrate_to_node(struct mm_struct *mm, int source, int dest, 1061 int flags) 1062 { 1063 nodemask_t nmask; 1064 struct vm_area_struct *vma; 1065 LIST_HEAD(pagelist); 1066 long nr_failed; 1067 long err = 0; 1068 struct migration_target_control mtc = { 1069 .nid = dest, 1070 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 1071 .reason = MR_SYSCALL, 1072 }; 1073 1074 nodes_clear(nmask); 1075 node_set(source, nmask); 1076 1077 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 1078 1079 mmap_read_lock(mm); 1080 vma = find_vma(mm, 0); 1081 1082 /* 1083 * This does not migrate the range, but isolates all pages that 1084 * need migration. Between passing in the full user address 1085 * space range and MPOL_MF_DISCONTIG_OK, this call cannot fail, 1086 * but passes back the count of pages which could not be isolated. 1087 */ 1088 nr_failed = queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask, 1089 flags | MPOL_MF_DISCONTIG_OK, &pagelist); 1090 mmap_read_unlock(mm); 1091 1092 if (!list_empty(&pagelist)) { 1093 err = migrate_pages(&pagelist, alloc_migration_target, NULL, 1094 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); 1095 if (err) 1096 putback_movable_pages(&pagelist); 1097 } 1098 1099 if (err >= 0) 1100 err += nr_failed; 1101 return err; 1102 } 1103 1104 /* 1105 * Move pages between the two nodesets so as to preserve the physical 1106 * layout as much as possible. 1107 * 1108 * Returns the number of page that could not be moved. 1109 */ 1110 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 1111 const nodemask_t *to, int flags) 1112 { 1113 long nr_failed = 0; 1114 long err = 0; 1115 nodemask_t tmp; 1116 1117 lru_cache_disable(); 1118 1119 /* 1120 * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 1121 * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 1122 * bit in 'tmp', and return that <source, dest> pair for migration. 1123 * The pair of nodemasks 'to' and 'from' define the map. 1124 * 1125 * If no pair of bits is found that way, fallback to picking some 1126 * pair of 'source' and 'dest' bits that are not the same. If the 1127 * 'source' and 'dest' bits are the same, this represents a node 1128 * that will be migrating to itself, so no pages need move. 1129 * 1130 * If no bits are left in 'tmp', or if all remaining bits left 1131 * in 'tmp' correspond to the same bit in 'to', return false 1132 * (nothing left to migrate). 1133 * 1134 * This lets us pick a pair of nodes to migrate between, such that 1135 * if possible the dest node is not already occupied by some other 1136 * source node, minimizing the risk of overloading the memory on a 1137 * node that would happen if we migrated incoming memory to a node 1138 * before migrating outgoing memory source that same node. 1139 * 1140 * A single scan of tmp is sufficient. As we go, we remember the 1141 * most recent <s, d> pair that moved (s != d). If we find a pair 1142 * that not only moved, but what's better, moved to an empty slot 1143 * (d is not set in tmp), then we break out then, with that pair. 1144 * Otherwise when we finish scanning from_tmp, we at least have the 1145 * most recent <s, d> pair that moved. If we get all the way through 1146 * the scan of tmp without finding any node that moved, much less 1147 * moved to an empty node, then there is nothing left worth migrating. 1148 */ 1149 1150 tmp = *from; 1151 while (!nodes_empty(tmp)) { 1152 int s, d; 1153 int source = NUMA_NO_NODE; 1154 int dest = 0; 1155 1156 for_each_node_mask(s, tmp) { 1157 1158 /* 1159 * do_migrate_pages() tries to maintain the relative 1160 * node relationship of the pages established between 1161 * threads and memory areas. 1162 * 1163 * However if the number of source nodes is not equal to 1164 * the number of destination nodes we can not preserve 1165 * this node relative relationship. In that case, skip 1166 * copying memory from a node that is in the destination 1167 * mask. 1168 * 1169 * Example: [2,3,4] -> [3,4,5] moves everything. 1170 * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 1171 */ 1172 1173 if ((nodes_weight(*from) != nodes_weight(*to)) && 1174 (node_isset(s, *to))) 1175 continue; 1176 1177 d = node_remap(s, *from, *to); 1178 if (s == d) 1179 continue; 1180 1181 source = s; /* Node moved. Memorize */ 1182 dest = d; 1183 1184 /* dest not in remaining from nodes? */ 1185 if (!node_isset(dest, tmp)) 1186 break; 1187 } 1188 if (source == NUMA_NO_NODE) 1189 break; 1190 1191 node_clear(source, tmp); 1192 err = migrate_to_node(mm, source, dest, flags); 1193 if (err > 0) 1194 nr_failed += err; 1195 if (err < 0) 1196 break; 1197 } 1198 1199 lru_cache_enable(); 1200 if (err < 0) 1201 return err; 1202 return (nr_failed < INT_MAX) ? nr_failed : INT_MAX; 1203 } 1204 1205 /* 1206 * Allocate a new folio for page migration, according to NUMA mempolicy. 1207 */ 1208 static struct folio *alloc_migration_target_by_mpol(struct folio *src, 1209 unsigned long private) 1210 { 1211 struct migration_mpol *mmpol = (struct migration_mpol *)private; 1212 struct mempolicy *pol = mmpol->pol; 1213 pgoff_t ilx = mmpol->ilx; 1214 unsigned int order; 1215 int nid = numa_node_id(); 1216 gfp_t gfp; 1217 1218 order = folio_order(src); 1219 ilx += src->index >> order; 1220 1221 if (folio_test_hugetlb(src)) { 1222 nodemask_t *nodemask; 1223 struct hstate *h; 1224 1225 h = folio_hstate(src); 1226 gfp = htlb_alloc_mask(h); 1227 nodemask = policy_nodemask(gfp, pol, ilx, &nid); 1228 return alloc_hugetlb_folio_nodemask(h, nid, nodemask, gfp, 1229 htlb_allow_alloc_fallback(MR_MEMPOLICY_MBIND)); 1230 } 1231 1232 if (folio_test_large(src)) 1233 gfp = GFP_TRANSHUGE; 1234 else 1235 gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL | __GFP_COMP; 1236 1237 return folio_alloc_mpol(gfp, order, pol, ilx, nid); 1238 } 1239 #else 1240 1241 static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist, 1242 unsigned long flags) 1243 { 1244 return false; 1245 } 1246 1247 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 1248 const nodemask_t *to, int flags) 1249 { 1250 return -ENOSYS; 1251 } 1252 1253 static struct folio *alloc_migration_target_by_mpol(struct folio *src, 1254 unsigned long private) 1255 { 1256 return NULL; 1257 } 1258 #endif 1259 1260 static long do_mbind(unsigned long start, unsigned long len, 1261 unsigned short mode, unsigned short mode_flags, 1262 nodemask_t *nmask, unsigned long flags) 1263 { 1264 struct mm_struct *mm = current->mm; 1265 struct vm_area_struct *vma, *prev; 1266 struct vma_iterator vmi; 1267 struct migration_mpol mmpol; 1268 struct mempolicy *new; 1269 unsigned long end; 1270 long err; 1271 long nr_failed; 1272 LIST_HEAD(pagelist); 1273 1274 if (flags & ~(unsigned long)MPOL_MF_VALID) 1275 return -EINVAL; 1276 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 1277 return -EPERM; 1278 1279 if (start & ~PAGE_MASK) 1280 return -EINVAL; 1281 1282 if (mode == MPOL_DEFAULT) 1283 flags &= ~MPOL_MF_STRICT; 1284 1285 len = PAGE_ALIGN(len); 1286 end = start + len; 1287 1288 if (end < start) 1289 return -EINVAL; 1290 if (end == start) 1291 return 0; 1292 1293 new = mpol_new(mode, mode_flags, nmask); 1294 if (IS_ERR(new)) 1295 return PTR_ERR(new); 1296 1297 /* 1298 * If we are using the default policy then operation 1299 * on discontinuous address spaces is okay after all 1300 */ 1301 if (!new) 1302 flags |= MPOL_MF_DISCONTIG_OK; 1303 1304 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 1305 lru_cache_disable(); 1306 { 1307 NODEMASK_SCRATCH(scratch); 1308 if (scratch) { 1309 mmap_write_lock(mm); 1310 err = mpol_set_nodemask(new, nmask, scratch); 1311 if (err) 1312 mmap_write_unlock(mm); 1313 } else 1314 err = -ENOMEM; 1315 NODEMASK_SCRATCH_FREE(scratch); 1316 } 1317 if (err) 1318 goto mpol_out; 1319 1320 /* 1321 * Lock the VMAs before scanning for pages to migrate, 1322 * to ensure we don't miss a concurrently inserted page. 1323 */ 1324 nr_failed = queue_pages_range(mm, start, end, nmask, 1325 flags | MPOL_MF_INVERT | MPOL_MF_WRLOCK, &pagelist); 1326 1327 if (nr_failed < 0) { 1328 err = nr_failed; 1329 nr_failed = 0; 1330 } else { 1331 vma_iter_init(&vmi, mm, start); 1332 prev = vma_prev(&vmi); 1333 for_each_vma_range(vmi, vma, end) { 1334 err = mbind_range(&vmi, vma, &prev, start, end, new); 1335 if (err) 1336 break; 1337 } 1338 } 1339 1340 if (!err && !list_empty(&pagelist)) { 1341 /* Convert MPOL_DEFAULT's NULL to task or default policy */ 1342 if (!new) { 1343 new = get_task_policy(current); 1344 mpol_get(new); 1345 } 1346 mmpol.pol = new; 1347 mmpol.ilx = 0; 1348 1349 /* 1350 * In the interleaved case, attempt to allocate on exactly the 1351 * targeted nodes, for the first VMA to be migrated; for later 1352 * VMAs, the nodes will still be interleaved from the targeted 1353 * nodemask, but one by one may be selected differently. 1354 */ 1355 if (new->mode == MPOL_INTERLEAVE || 1356 new->mode == MPOL_WEIGHTED_INTERLEAVE) { 1357 struct folio *folio; 1358 unsigned int order; 1359 unsigned long addr = -EFAULT; 1360 1361 list_for_each_entry(folio, &pagelist, lru) { 1362 if (!folio_test_ksm(folio)) 1363 break; 1364 } 1365 if (!list_entry_is_head(folio, &pagelist, lru)) { 1366 vma_iter_init(&vmi, mm, start); 1367 for_each_vma_range(vmi, vma, end) { 1368 addr = page_address_in_vma( 1369 folio_page(folio, 0), vma); 1370 if (addr != -EFAULT) 1371 break; 1372 } 1373 } 1374 if (addr != -EFAULT) { 1375 order = folio_order(folio); 1376 /* We already know the pol, but not the ilx */ 1377 mpol_cond_put(get_vma_policy(vma, addr, order, 1378 &mmpol.ilx)); 1379 /* Set base from which to increment by index */ 1380 mmpol.ilx -= folio->index >> order; 1381 } 1382 } 1383 } 1384 1385 mmap_write_unlock(mm); 1386 1387 if (!err && !list_empty(&pagelist)) { 1388 nr_failed |= migrate_pages(&pagelist, 1389 alloc_migration_target_by_mpol, NULL, 1390 (unsigned long)&mmpol, MIGRATE_SYNC, 1391 MR_MEMPOLICY_MBIND, NULL); 1392 } 1393 1394 if (nr_failed && (flags & MPOL_MF_STRICT)) 1395 err = -EIO; 1396 if (!list_empty(&pagelist)) 1397 putback_movable_pages(&pagelist); 1398 mpol_out: 1399 mpol_put(new); 1400 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 1401 lru_cache_enable(); 1402 return err; 1403 } 1404 1405 /* 1406 * User space interface with variable sized bitmaps for nodelists. 1407 */ 1408 static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask, 1409 unsigned long maxnode) 1410 { 1411 unsigned long nlongs = BITS_TO_LONGS(maxnode); 1412 int ret; 1413 1414 if (in_compat_syscall()) 1415 ret = compat_get_bitmap(mask, 1416 (const compat_ulong_t __user *)nmask, 1417 maxnode); 1418 else 1419 ret = copy_from_user(mask, nmask, 1420 nlongs * sizeof(unsigned long)); 1421 1422 if (ret) 1423 return -EFAULT; 1424 1425 if (maxnode % BITS_PER_LONG) 1426 mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1; 1427 1428 return 0; 1429 } 1430 1431 /* Copy a node mask from user space. */ 1432 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 1433 unsigned long maxnode) 1434 { 1435 --maxnode; 1436 nodes_clear(*nodes); 1437 if (maxnode == 0 || !nmask) 1438 return 0; 1439 if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1440 return -EINVAL; 1441 1442 /* 1443 * When the user specified more nodes than supported just check 1444 * if the non supported part is all zero, one word at a time, 1445 * starting at the end. 1446 */ 1447 while (maxnode > MAX_NUMNODES) { 1448 unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG); 1449 unsigned long t; 1450 1451 if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits)) 1452 return -EFAULT; 1453 1454 if (maxnode - bits >= MAX_NUMNODES) { 1455 maxnode -= bits; 1456 } else { 1457 maxnode = MAX_NUMNODES; 1458 t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1); 1459 } 1460 if (t) 1461 return -EINVAL; 1462 } 1463 1464 return get_bitmap(nodes_addr(*nodes), nmask, maxnode); 1465 } 1466 1467 /* Copy a kernel node mask to user space */ 1468 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 1469 nodemask_t *nodes) 1470 { 1471 unsigned long copy = ALIGN(maxnode-1, 64) / 8; 1472 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); 1473 bool compat = in_compat_syscall(); 1474 1475 if (compat) 1476 nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t); 1477 1478 if (copy > nbytes) { 1479 if (copy > PAGE_SIZE) 1480 return -EINVAL; 1481 if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 1482 return -EFAULT; 1483 copy = nbytes; 1484 maxnode = nr_node_ids; 1485 } 1486 1487 if (compat) 1488 return compat_put_bitmap((compat_ulong_t __user *)mask, 1489 nodes_addr(*nodes), maxnode); 1490 1491 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 1492 } 1493 1494 /* Basic parameter sanity check used by both mbind() and set_mempolicy() */ 1495 static inline int sanitize_mpol_flags(int *mode, unsigned short *flags) 1496 { 1497 *flags = *mode & MPOL_MODE_FLAGS; 1498 *mode &= ~MPOL_MODE_FLAGS; 1499 1500 if ((unsigned int)(*mode) >= MPOL_MAX) 1501 return -EINVAL; 1502 if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES)) 1503 return -EINVAL; 1504 if (*flags & MPOL_F_NUMA_BALANCING) { 1505 if (*mode == MPOL_BIND || *mode == MPOL_PREFERRED_MANY) 1506 *flags |= (MPOL_F_MOF | MPOL_F_MORON); 1507 else 1508 return -EINVAL; 1509 } 1510 return 0; 1511 } 1512 1513 static long kernel_mbind(unsigned long start, unsigned long len, 1514 unsigned long mode, const unsigned long __user *nmask, 1515 unsigned long maxnode, unsigned int flags) 1516 { 1517 unsigned short mode_flags; 1518 nodemask_t nodes; 1519 int lmode = mode; 1520 int err; 1521 1522 start = untagged_addr(start); 1523 err = sanitize_mpol_flags(&lmode, &mode_flags); 1524 if (err) 1525 return err; 1526 1527 err = get_nodes(&nodes, nmask, maxnode); 1528 if (err) 1529 return err; 1530 1531 return do_mbind(start, len, lmode, mode_flags, &nodes, flags); 1532 } 1533 1534 SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len, 1535 unsigned long, home_node, unsigned long, flags) 1536 { 1537 struct mm_struct *mm = current->mm; 1538 struct vm_area_struct *vma, *prev; 1539 struct mempolicy *new, *old; 1540 unsigned long end; 1541 int err = -ENOENT; 1542 VMA_ITERATOR(vmi, mm, start); 1543 1544 start = untagged_addr(start); 1545 if (start & ~PAGE_MASK) 1546 return -EINVAL; 1547 /* 1548 * flags is used for future extension if any. 1549 */ 1550 if (flags != 0) 1551 return -EINVAL; 1552 1553 /* 1554 * Check home_node is online to avoid accessing uninitialized 1555 * NODE_DATA. 1556 */ 1557 if (home_node >= MAX_NUMNODES || !node_online(home_node)) 1558 return -EINVAL; 1559 1560 len = PAGE_ALIGN(len); 1561 end = start + len; 1562 1563 if (end < start) 1564 return -EINVAL; 1565 if (end == start) 1566 return 0; 1567 mmap_write_lock(mm); 1568 prev = vma_prev(&vmi); 1569 for_each_vma_range(vmi, vma, end) { 1570 /* 1571 * If any vma in the range got policy other than MPOL_BIND 1572 * or MPOL_PREFERRED_MANY we return error. We don't reset 1573 * the home node for vmas we already updated before. 1574 */ 1575 old = vma_policy(vma); 1576 if (!old) { 1577 prev = vma; 1578 continue; 1579 } 1580 if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) { 1581 err = -EOPNOTSUPP; 1582 break; 1583 } 1584 new = mpol_dup(old); 1585 if (IS_ERR(new)) { 1586 err = PTR_ERR(new); 1587 break; 1588 } 1589 1590 vma_start_write(vma); 1591 new->home_node = home_node; 1592 err = mbind_range(&vmi, vma, &prev, start, end, new); 1593 mpol_put(new); 1594 if (err) 1595 break; 1596 } 1597 mmap_write_unlock(mm); 1598 return err; 1599 } 1600 1601 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1602 unsigned long, mode, const unsigned long __user *, nmask, 1603 unsigned long, maxnode, unsigned int, flags) 1604 { 1605 return kernel_mbind(start, len, mode, nmask, maxnode, flags); 1606 } 1607 1608 /* Set the process memory policy */ 1609 static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask, 1610 unsigned long maxnode) 1611 { 1612 unsigned short mode_flags; 1613 nodemask_t nodes; 1614 int lmode = mode; 1615 int err; 1616 1617 err = sanitize_mpol_flags(&lmode, &mode_flags); 1618 if (err) 1619 return err; 1620 1621 err = get_nodes(&nodes, nmask, maxnode); 1622 if (err) 1623 return err; 1624 1625 return do_set_mempolicy(lmode, mode_flags, &nodes); 1626 } 1627 1628 SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1629 unsigned long, maxnode) 1630 { 1631 return kernel_set_mempolicy(mode, nmask, maxnode); 1632 } 1633 1634 static int kernel_migrate_pages(pid_t pid, unsigned long maxnode, 1635 const unsigned long __user *old_nodes, 1636 const unsigned long __user *new_nodes) 1637 { 1638 struct mm_struct *mm = NULL; 1639 struct task_struct *task; 1640 nodemask_t task_nodes; 1641 int err; 1642 nodemask_t *old; 1643 nodemask_t *new; 1644 NODEMASK_SCRATCH(scratch); 1645 1646 if (!scratch) 1647 return -ENOMEM; 1648 1649 old = &scratch->mask1; 1650 new = &scratch->mask2; 1651 1652 err = get_nodes(old, old_nodes, maxnode); 1653 if (err) 1654 goto out; 1655 1656 err = get_nodes(new, new_nodes, maxnode); 1657 if (err) 1658 goto out; 1659 1660 /* Find the mm_struct */ 1661 rcu_read_lock(); 1662 task = pid ? find_task_by_vpid(pid) : current; 1663 if (!task) { 1664 rcu_read_unlock(); 1665 err = -ESRCH; 1666 goto out; 1667 } 1668 get_task_struct(task); 1669 1670 err = -EINVAL; 1671 1672 /* 1673 * Check if this process has the right to modify the specified process. 1674 * Use the regular "ptrace_may_access()" checks. 1675 */ 1676 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1677 rcu_read_unlock(); 1678 err = -EPERM; 1679 goto out_put; 1680 } 1681 rcu_read_unlock(); 1682 1683 task_nodes = cpuset_mems_allowed(task); 1684 /* Is the user allowed to access the target nodes? */ 1685 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 1686 err = -EPERM; 1687 goto out_put; 1688 } 1689 1690 task_nodes = cpuset_mems_allowed(current); 1691 nodes_and(*new, *new, task_nodes); 1692 if (nodes_empty(*new)) 1693 goto out_put; 1694 1695 err = security_task_movememory(task); 1696 if (err) 1697 goto out_put; 1698 1699 mm = get_task_mm(task); 1700 put_task_struct(task); 1701 1702 if (!mm) { 1703 err = -EINVAL; 1704 goto out; 1705 } 1706 1707 err = do_migrate_pages(mm, old, new, 1708 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 1709 1710 mmput(mm); 1711 out: 1712 NODEMASK_SCRATCH_FREE(scratch); 1713 1714 return err; 1715 1716 out_put: 1717 put_task_struct(task); 1718 goto out; 1719 } 1720 1721 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1722 const unsigned long __user *, old_nodes, 1723 const unsigned long __user *, new_nodes) 1724 { 1725 return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes); 1726 } 1727 1728 /* Retrieve NUMA policy */ 1729 static int kernel_get_mempolicy(int __user *policy, 1730 unsigned long __user *nmask, 1731 unsigned long maxnode, 1732 unsigned long addr, 1733 unsigned long flags) 1734 { 1735 int err; 1736 int pval; 1737 nodemask_t nodes; 1738 1739 if (nmask != NULL && maxnode < nr_node_ids) 1740 return -EINVAL; 1741 1742 addr = untagged_addr(addr); 1743 1744 err = do_get_mempolicy(&pval, &nodes, addr, flags); 1745 1746 if (err) 1747 return err; 1748 1749 if (policy && put_user(pval, policy)) 1750 return -EFAULT; 1751 1752 if (nmask) 1753 err = copy_nodes_to_user(nmask, maxnode, &nodes); 1754 1755 return err; 1756 } 1757 1758 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1759 unsigned long __user *, nmask, unsigned long, maxnode, 1760 unsigned long, addr, unsigned long, flags) 1761 { 1762 return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags); 1763 } 1764 1765 bool vma_migratable(struct vm_area_struct *vma) 1766 { 1767 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 1768 return false; 1769 1770 /* 1771 * DAX device mappings require predictable access latency, so avoid 1772 * incurring periodic faults. 1773 */ 1774 if (vma_is_dax(vma)) 1775 return false; 1776 1777 if (is_vm_hugetlb_page(vma) && 1778 !hugepage_migration_supported(hstate_vma(vma))) 1779 return false; 1780 1781 /* 1782 * Migration allocates pages in the highest zone. If we cannot 1783 * do so then migration (at least from node to node) is not 1784 * possible. 1785 */ 1786 if (vma->vm_file && 1787 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) 1788 < policy_zone) 1789 return false; 1790 return true; 1791 } 1792 1793 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 1794 unsigned long addr, pgoff_t *ilx) 1795 { 1796 *ilx = 0; 1797 return (vma->vm_ops && vma->vm_ops->get_policy) ? 1798 vma->vm_ops->get_policy(vma, addr, ilx) : vma->vm_policy; 1799 } 1800 1801 /* 1802 * get_vma_policy(@vma, @addr, @order, @ilx) 1803 * @vma: virtual memory area whose policy is sought 1804 * @addr: address in @vma for shared policy lookup 1805 * @order: 0, or appropriate huge_page_order for interleaving 1806 * @ilx: interleave index (output), for use only when MPOL_INTERLEAVE or 1807 * MPOL_WEIGHTED_INTERLEAVE 1808 * 1809 * Returns effective policy for a VMA at specified address. 1810 * Falls back to current->mempolicy or system default policy, as necessary. 1811 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 1812 * count--added by the get_policy() vm_op, as appropriate--to protect against 1813 * freeing by another task. It is the caller's responsibility to free the 1814 * extra reference for shared policies. 1815 */ 1816 struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1817 unsigned long addr, int order, pgoff_t *ilx) 1818 { 1819 struct mempolicy *pol; 1820 1821 pol = __get_vma_policy(vma, addr, ilx); 1822 if (!pol) 1823 pol = get_task_policy(current); 1824 if (pol->mode == MPOL_INTERLEAVE || 1825 pol->mode == MPOL_WEIGHTED_INTERLEAVE) { 1826 *ilx += vma->vm_pgoff >> order; 1827 *ilx += (addr - vma->vm_start) >> (PAGE_SHIFT + order); 1828 } 1829 return pol; 1830 } 1831 1832 bool vma_policy_mof(struct vm_area_struct *vma) 1833 { 1834 struct mempolicy *pol; 1835 1836 if (vma->vm_ops && vma->vm_ops->get_policy) { 1837 bool ret = false; 1838 pgoff_t ilx; /* ignored here */ 1839 1840 pol = vma->vm_ops->get_policy(vma, vma->vm_start, &ilx); 1841 if (pol && (pol->flags & MPOL_F_MOF)) 1842 ret = true; 1843 mpol_cond_put(pol); 1844 1845 return ret; 1846 } 1847 1848 pol = vma->vm_policy; 1849 if (!pol) 1850 pol = get_task_policy(current); 1851 1852 return pol->flags & MPOL_F_MOF; 1853 } 1854 1855 bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1856 { 1857 enum zone_type dynamic_policy_zone = policy_zone; 1858 1859 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1860 1861 /* 1862 * if policy->nodes has movable memory only, 1863 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1864 * 1865 * policy->nodes is intersect with node_states[N_MEMORY]. 1866 * so if the following test fails, it implies 1867 * policy->nodes has movable memory only. 1868 */ 1869 if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY])) 1870 dynamic_policy_zone = ZONE_MOVABLE; 1871 1872 return zone >= dynamic_policy_zone; 1873 } 1874 1875 static unsigned int weighted_interleave_nodes(struct mempolicy *policy) 1876 { 1877 unsigned int node; 1878 unsigned int cpuset_mems_cookie; 1879 1880 retry: 1881 /* to prevent miscount use tsk->mems_allowed_seq to detect rebind */ 1882 cpuset_mems_cookie = read_mems_allowed_begin(); 1883 node = current->il_prev; 1884 if (!current->il_weight || !node_isset(node, policy->nodes)) { 1885 node = next_node_in(node, policy->nodes); 1886 if (read_mems_allowed_retry(cpuset_mems_cookie)) 1887 goto retry; 1888 if (node == MAX_NUMNODES) 1889 return node; 1890 current->il_prev = node; 1891 current->il_weight = get_il_weight(node); 1892 } 1893 current->il_weight--; 1894 return node; 1895 } 1896 1897 /* Do dynamic interleaving for a process */ 1898 static unsigned int interleave_nodes(struct mempolicy *policy) 1899 { 1900 unsigned int nid; 1901 unsigned int cpuset_mems_cookie; 1902 1903 /* to prevent miscount, use tsk->mems_allowed_seq to detect rebind */ 1904 do { 1905 cpuset_mems_cookie = read_mems_allowed_begin(); 1906 nid = next_node_in(current->il_prev, policy->nodes); 1907 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 1908 1909 if (nid < MAX_NUMNODES) 1910 current->il_prev = nid; 1911 return nid; 1912 } 1913 1914 /* 1915 * Depending on the memory policy provide a node from which to allocate the 1916 * next slab entry. 1917 */ 1918 unsigned int mempolicy_slab_node(void) 1919 { 1920 struct mempolicy *policy; 1921 int node = numa_mem_id(); 1922 1923 if (!in_task()) 1924 return node; 1925 1926 policy = current->mempolicy; 1927 if (!policy) 1928 return node; 1929 1930 switch (policy->mode) { 1931 case MPOL_PREFERRED: 1932 return first_node(policy->nodes); 1933 1934 case MPOL_INTERLEAVE: 1935 return interleave_nodes(policy); 1936 1937 case MPOL_WEIGHTED_INTERLEAVE: 1938 return weighted_interleave_nodes(policy); 1939 1940 case MPOL_BIND: 1941 case MPOL_PREFERRED_MANY: 1942 { 1943 struct zoneref *z; 1944 1945 /* 1946 * Follow bind policy behavior and start allocation at the 1947 * first node. 1948 */ 1949 struct zonelist *zonelist; 1950 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 1951 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; 1952 z = first_zones_zonelist(zonelist, highest_zoneidx, 1953 &policy->nodes); 1954 return z->zone ? zone_to_nid(z->zone) : node; 1955 } 1956 case MPOL_LOCAL: 1957 return node; 1958 1959 default: 1960 BUG(); 1961 } 1962 } 1963 1964 static unsigned int read_once_policy_nodemask(struct mempolicy *pol, 1965 nodemask_t *mask) 1966 { 1967 /* 1968 * barrier stabilizes the nodemask locally so that it can be iterated 1969 * over safely without concern for changes. Allocators validate node 1970 * selection does not violate mems_allowed, so this is safe. 1971 */ 1972 barrier(); 1973 memcpy(mask, &pol->nodes, sizeof(nodemask_t)); 1974 barrier(); 1975 return nodes_weight(*mask); 1976 } 1977 1978 static unsigned int weighted_interleave_nid(struct mempolicy *pol, pgoff_t ilx) 1979 { 1980 nodemask_t nodemask; 1981 unsigned int target, nr_nodes; 1982 u8 *table; 1983 unsigned int weight_total = 0; 1984 u8 weight; 1985 int nid; 1986 1987 nr_nodes = read_once_policy_nodemask(pol, &nodemask); 1988 if (!nr_nodes) 1989 return numa_node_id(); 1990 1991 rcu_read_lock(); 1992 table = rcu_dereference(iw_table); 1993 /* calculate the total weight */ 1994 for_each_node_mask(nid, nodemask) { 1995 /* detect system default usage */ 1996 weight = table ? table[nid] : 1; 1997 weight = weight ? weight : 1; 1998 weight_total += weight; 1999 } 2000 2001 /* Calculate the node offset based on totals */ 2002 target = ilx % weight_total; 2003 nid = first_node(nodemask); 2004 while (target) { 2005 /* detect system default usage */ 2006 weight = table ? table[nid] : 1; 2007 weight = weight ? weight : 1; 2008 if (target < weight) 2009 break; 2010 target -= weight; 2011 nid = next_node_in(nid, nodemask); 2012 } 2013 rcu_read_unlock(); 2014 return nid; 2015 } 2016 2017 /* 2018 * Do static interleaving for interleave index @ilx. Returns the ilx'th 2019 * node in pol->nodes (starting from ilx=0), wrapping around if ilx 2020 * exceeds the number of present nodes. 2021 */ 2022 static unsigned int interleave_nid(struct mempolicy *pol, pgoff_t ilx) 2023 { 2024 nodemask_t nodemask; 2025 unsigned int target, nnodes; 2026 int i; 2027 int nid; 2028 2029 nnodes = read_once_policy_nodemask(pol, &nodemask); 2030 if (!nnodes) 2031 return numa_node_id(); 2032 target = ilx % nnodes; 2033 nid = first_node(nodemask); 2034 for (i = 0; i < target; i++) 2035 nid = next_node(nid, nodemask); 2036 return nid; 2037 } 2038 2039 /* 2040 * Return a nodemask representing a mempolicy for filtering nodes for 2041 * page allocation, together with preferred node id (or the input node id). 2042 */ 2043 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *pol, 2044 pgoff_t ilx, int *nid) 2045 { 2046 nodemask_t *nodemask = NULL; 2047 2048 switch (pol->mode) { 2049 case MPOL_PREFERRED: 2050 /* Override input node id */ 2051 *nid = first_node(pol->nodes); 2052 break; 2053 case MPOL_PREFERRED_MANY: 2054 nodemask = &pol->nodes; 2055 if (pol->home_node != NUMA_NO_NODE) 2056 *nid = pol->home_node; 2057 break; 2058 case MPOL_BIND: 2059 /* Restrict to nodemask (but not on lower zones) */ 2060 if (apply_policy_zone(pol, gfp_zone(gfp)) && 2061 cpuset_nodemask_valid_mems_allowed(&pol->nodes)) 2062 nodemask = &pol->nodes; 2063 if (pol->home_node != NUMA_NO_NODE) 2064 *nid = pol->home_node; 2065 /* 2066 * __GFP_THISNODE shouldn't even be used with the bind policy 2067 * because we might easily break the expectation to stay on the 2068 * requested node and not break the policy. 2069 */ 2070 WARN_ON_ONCE(gfp & __GFP_THISNODE); 2071 break; 2072 case MPOL_INTERLEAVE: 2073 /* Override input node id */ 2074 *nid = (ilx == NO_INTERLEAVE_INDEX) ? 2075 interleave_nodes(pol) : interleave_nid(pol, ilx); 2076 break; 2077 case MPOL_WEIGHTED_INTERLEAVE: 2078 *nid = (ilx == NO_INTERLEAVE_INDEX) ? 2079 weighted_interleave_nodes(pol) : 2080 weighted_interleave_nid(pol, ilx); 2081 break; 2082 } 2083 2084 return nodemask; 2085 } 2086 2087 #ifdef CONFIG_HUGETLBFS 2088 /* 2089 * huge_node(@vma, @addr, @gfp_flags, @mpol) 2090 * @vma: virtual memory area whose policy is sought 2091 * @addr: address in @vma for shared policy lookup and interleave policy 2092 * @gfp_flags: for requested zone 2093 * @mpol: pointer to mempolicy pointer for reference counted mempolicy 2094 * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy 2095 * 2096 * Returns a nid suitable for a huge page allocation and a pointer 2097 * to the struct mempolicy for conditional unref after allocation. 2098 * If the effective policy is 'bind' or 'prefer-many', returns a pointer 2099 * to the mempolicy's @nodemask for filtering the zonelist. 2100 */ 2101 int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, 2102 struct mempolicy **mpol, nodemask_t **nodemask) 2103 { 2104 pgoff_t ilx; 2105 int nid; 2106 2107 nid = numa_node_id(); 2108 *mpol = get_vma_policy(vma, addr, hstate_vma(vma)->order, &ilx); 2109 *nodemask = policy_nodemask(gfp_flags, *mpol, ilx, &nid); 2110 return nid; 2111 } 2112 2113 /* 2114 * init_nodemask_of_mempolicy 2115 * 2116 * If the current task's mempolicy is "default" [NULL], return 'false' 2117 * to indicate default policy. Otherwise, extract the policy nodemask 2118 * for 'bind' or 'interleave' policy into the argument nodemask, or 2119 * initialize the argument nodemask to contain the single node for 2120 * 'preferred' or 'local' policy and return 'true' to indicate presence 2121 * of non-default mempolicy. 2122 * 2123 * We don't bother with reference counting the mempolicy [mpol_get/put] 2124 * because the current task is examining it's own mempolicy and a task's 2125 * mempolicy is only ever changed by the task itself. 2126 * 2127 * N.B., it is the caller's responsibility to free a returned nodemask. 2128 */ 2129 bool init_nodemask_of_mempolicy(nodemask_t *mask) 2130 { 2131 struct mempolicy *mempolicy; 2132 2133 if (!(mask && current->mempolicy)) 2134 return false; 2135 2136 task_lock(current); 2137 mempolicy = current->mempolicy; 2138 switch (mempolicy->mode) { 2139 case MPOL_PREFERRED: 2140 case MPOL_PREFERRED_MANY: 2141 case MPOL_BIND: 2142 case MPOL_INTERLEAVE: 2143 case MPOL_WEIGHTED_INTERLEAVE: 2144 *mask = mempolicy->nodes; 2145 break; 2146 2147 case MPOL_LOCAL: 2148 init_nodemask_of_node(mask, numa_node_id()); 2149 break; 2150 2151 default: 2152 BUG(); 2153 } 2154 task_unlock(current); 2155 2156 return true; 2157 } 2158 #endif 2159 2160 /* 2161 * mempolicy_in_oom_domain 2162 * 2163 * If tsk's mempolicy is "bind", check for intersection between mask and 2164 * the policy nodemask. Otherwise, return true for all other policies 2165 * including "interleave", as a tsk with "interleave" policy may have 2166 * memory allocated from all nodes in system. 2167 * 2168 * Takes task_lock(tsk) to prevent freeing of its mempolicy. 2169 */ 2170 bool mempolicy_in_oom_domain(struct task_struct *tsk, 2171 const nodemask_t *mask) 2172 { 2173 struct mempolicy *mempolicy; 2174 bool ret = true; 2175 2176 if (!mask) 2177 return ret; 2178 2179 task_lock(tsk); 2180 mempolicy = tsk->mempolicy; 2181 if (mempolicy && mempolicy->mode == MPOL_BIND) 2182 ret = nodes_intersects(mempolicy->nodes, *mask); 2183 task_unlock(tsk); 2184 2185 return ret; 2186 } 2187 2188 static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order, 2189 int nid, nodemask_t *nodemask) 2190 { 2191 struct page *page; 2192 gfp_t preferred_gfp; 2193 2194 /* 2195 * This is a two pass approach. The first pass will only try the 2196 * preferred nodes but skip the direct reclaim and allow the 2197 * allocation to fail, while the second pass will try all the 2198 * nodes in system. 2199 */ 2200 preferred_gfp = gfp | __GFP_NOWARN; 2201 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2202 page = __alloc_pages_noprof(preferred_gfp, order, nid, nodemask); 2203 if (!page) 2204 page = __alloc_pages_noprof(gfp, order, nid, NULL); 2205 2206 return page; 2207 } 2208 2209 /** 2210 * alloc_pages_mpol - Allocate pages according to NUMA mempolicy. 2211 * @gfp: GFP flags. 2212 * @order: Order of the page allocation. 2213 * @pol: Pointer to the NUMA mempolicy. 2214 * @ilx: Index for interleave mempolicy (also distinguishes alloc_pages()). 2215 * @nid: Preferred node (usually numa_node_id() but @mpol may override it). 2216 * 2217 * Return: The page on success or NULL if allocation fails. 2218 */ 2219 struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order, 2220 struct mempolicy *pol, pgoff_t ilx, int nid) 2221 { 2222 nodemask_t *nodemask; 2223 struct page *page; 2224 2225 nodemask = policy_nodemask(gfp, pol, ilx, &nid); 2226 2227 if (pol->mode == MPOL_PREFERRED_MANY) 2228 return alloc_pages_preferred_many(gfp, order, nid, nodemask); 2229 2230 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 2231 /* filter "hugepage" allocation, unless from alloc_pages() */ 2232 order == HPAGE_PMD_ORDER && ilx != NO_INTERLEAVE_INDEX) { 2233 /* 2234 * For hugepage allocation and non-interleave policy which 2235 * allows the current node (or other explicitly preferred 2236 * node) we only try to allocate from the current/preferred 2237 * node and don't fall back to other nodes, as the cost of 2238 * remote accesses would likely offset THP benefits. 2239 * 2240 * If the policy is interleave or does not allow the current 2241 * node in its nodemask, we allocate the standard way. 2242 */ 2243 if (pol->mode != MPOL_INTERLEAVE && 2244 pol->mode != MPOL_WEIGHTED_INTERLEAVE && 2245 (!nodemask || node_isset(nid, *nodemask))) { 2246 /* 2247 * First, try to allocate THP only on local node, but 2248 * don't reclaim unnecessarily, just compact. 2249 */ 2250 page = __alloc_pages_node_noprof(nid, 2251 gfp | __GFP_THISNODE | __GFP_NORETRY, order); 2252 if (page || !(gfp & __GFP_DIRECT_RECLAIM)) 2253 return page; 2254 /* 2255 * If hugepage allocations are configured to always 2256 * synchronous compact or the vma has been madvised 2257 * to prefer hugepage backing, retry allowing remote 2258 * memory with both reclaim and compact as well. 2259 */ 2260 } 2261 } 2262 2263 page = __alloc_pages_noprof(gfp, order, nid, nodemask); 2264 2265 if (unlikely(pol->mode == MPOL_INTERLEAVE) && page) { 2266 /* skip NUMA_INTERLEAVE_HIT update if numa stats is disabled */ 2267 if (static_branch_likely(&vm_numa_stat_key) && 2268 page_to_nid(page) == nid) { 2269 preempt_disable(); 2270 __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT); 2271 preempt_enable(); 2272 } 2273 } 2274 2275 return page; 2276 } 2277 2278 struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order, 2279 struct mempolicy *pol, pgoff_t ilx, int nid) 2280 { 2281 return page_rmappable_folio(alloc_pages_mpol_noprof(gfp | __GFP_COMP, 2282 order, pol, ilx, nid)); 2283 } 2284 2285 /** 2286 * vma_alloc_folio - Allocate a folio for a VMA. 2287 * @gfp: GFP flags. 2288 * @order: Order of the folio. 2289 * @vma: Pointer to VMA. 2290 * @addr: Virtual address of the allocation. Must be inside @vma. 2291 * @hugepage: Unused (was: For hugepages try only preferred node if possible). 2292 * 2293 * Allocate a folio for a specific address in @vma, using the appropriate 2294 * NUMA policy. The caller must hold the mmap_lock of the mm_struct of the 2295 * VMA to prevent it from going away. Should be used for all allocations 2296 * for folios that will be mapped into user space, excepting hugetlbfs, and 2297 * excepting where direct use of alloc_pages_mpol() is more appropriate. 2298 * 2299 * Return: The folio on success or NULL if allocation fails. 2300 */ 2301 struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma, 2302 unsigned long addr, bool hugepage) 2303 { 2304 struct mempolicy *pol; 2305 pgoff_t ilx; 2306 struct folio *folio; 2307 2308 pol = get_vma_policy(vma, addr, order, &ilx); 2309 folio = folio_alloc_mpol_noprof(gfp, order, pol, ilx, numa_node_id()); 2310 mpol_cond_put(pol); 2311 return folio; 2312 } 2313 EXPORT_SYMBOL(vma_alloc_folio_noprof); 2314 2315 /** 2316 * alloc_pages - Allocate pages. 2317 * @gfp: GFP flags. 2318 * @order: Power of two of number of pages to allocate. 2319 * 2320 * Allocate 1 << @order contiguous pages. The physical address of the 2321 * first page is naturally aligned (eg an order-3 allocation will be aligned 2322 * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current 2323 * process is honoured when in process context. 2324 * 2325 * Context: Can be called from any context, providing the appropriate GFP 2326 * flags are used. 2327 * Return: The page on success or NULL if allocation fails. 2328 */ 2329 struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order) 2330 { 2331 struct mempolicy *pol = &default_policy; 2332 2333 /* 2334 * No reference counting needed for current->mempolicy 2335 * nor system default_policy 2336 */ 2337 if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 2338 pol = get_task_policy(current); 2339 2340 return alloc_pages_mpol_noprof(gfp, order, pol, NO_INTERLEAVE_INDEX, 2341 numa_node_id()); 2342 } 2343 EXPORT_SYMBOL(alloc_pages_noprof); 2344 2345 struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order) 2346 { 2347 return page_rmappable_folio(alloc_pages_noprof(gfp | __GFP_COMP, order)); 2348 } 2349 EXPORT_SYMBOL(folio_alloc_noprof); 2350 2351 static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp, 2352 struct mempolicy *pol, unsigned long nr_pages, 2353 struct page **page_array) 2354 { 2355 int nodes; 2356 unsigned long nr_pages_per_node; 2357 int delta; 2358 int i; 2359 unsigned long nr_allocated; 2360 unsigned long total_allocated = 0; 2361 2362 nodes = nodes_weight(pol->nodes); 2363 nr_pages_per_node = nr_pages / nodes; 2364 delta = nr_pages - nodes * nr_pages_per_node; 2365 2366 for (i = 0; i < nodes; i++) { 2367 if (delta) { 2368 nr_allocated = alloc_pages_bulk_noprof(gfp, 2369 interleave_nodes(pol), NULL, 2370 nr_pages_per_node + 1, NULL, 2371 page_array); 2372 delta--; 2373 } else { 2374 nr_allocated = alloc_pages_bulk_noprof(gfp, 2375 interleave_nodes(pol), NULL, 2376 nr_pages_per_node, NULL, page_array); 2377 } 2378 2379 page_array += nr_allocated; 2380 total_allocated += nr_allocated; 2381 } 2382 2383 return total_allocated; 2384 } 2385 2386 static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp, 2387 struct mempolicy *pol, unsigned long nr_pages, 2388 struct page **page_array) 2389 { 2390 struct task_struct *me = current; 2391 unsigned int cpuset_mems_cookie; 2392 unsigned long total_allocated = 0; 2393 unsigned long nr_allocated = 0; 2394 unsigned long rounds; 2395 unsigned long node_pages, delta; 2396 u8 *table, *weights, weight; 2397 unsigned int weight_total = 0; 2398 unsigned long rem_pages = nr_pages; 2399 nodemask_t nodes; 2400 int nnodes, node; 2401 int resume_node = MAX_NUMNODES - 1; 2402 u8 resume_weight = 0; 2403 int prev_node; 2404 int i; 2405 2406 if (!nr_pages) 2407 return 0; 2408 2409 /* read the nodes onto the stack, retry if done during rebind */ 2410 do { 2411 cpuset_mems_cookie = read_mems_allowed_begin(); 2412 nnodes = read_once_policy_nodemask(pol, &nodes); 2413 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 2414 2415 /* if the nodemask has become invalid, we cannot do anything */ 2416 if (!nnodes) 2417 return 0; 2418 2419 /* Continue allocating from most recent node and adjust the nr_pages */ 2420 node = me->il_prev; 2421 weight = me->il_weight; 2422 if (weight && node_isset(node, nodes)) { 2423 node_pages = min(rem_pages, weight); 2424 nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages, 2425 NULL, page_array); 2426 page_array += nr_allocated; 2427 total_allocated += nr_allocated; 2428 /* if that's all the pages, no need to interleave */ 2429 if (rem_pages <= weight) { 2430 me->il_weight -= rem_pages; 2431 return total_allocated; 2432 } 2433 /* Otherwise we adjust remaining pages, continue from there */ 2434 rem_pages -= weight; 2435 } 2436 /* clear active weight in case of an allocation failure */ 2437 me->il_weight = 0; 2438 prev_node = node; 2439 2440 /* create a local copy of node weights to operate on outside rcu */ 2441 weights = kzalloc(nr_node_ids, GFP_KERNEL); 2442 if (!weights) 2443 return total_allocated; 2444 2445 rcu_read_lock(); 2446 table = rcu_dereference(iw_table); 2447 if (table) 2448 memcpy(weights, table, nr_node_ids); 2449 rcu_read_unlock(); 2450 2451 /* calculate total, detect system default usage */ 2452 for_each_node_mask(node, nodes) { 2453 if (!weights[node]) 2454 weights[node] = 1; 2455 weight_total += weights[node]; 2456 } 2457 2458 /* 2459 * Calculate rounds/partial rounds to minimize __alloc_pages_bulk calls. 2460 * Track which node weighted interleave should resume from. 2461 * 2462 * if (rounds > 0) and (delta == 0), resume_node will always be 2463 * the node following prev_node and its weight. 2464 */ 2465 rounds = rem_pages / weight_total; 2466 delta = rem_pages % weight_total; 2467 resume_node = next_node_in(prev_node, nodes); 2468 resume_weight = weights[resume_node]; 2469 for (i = 0; i < nnodes; i++) { 2470 node = next_node_in(prev_node, nodes); 2471 weight = weights[node]; 2472 node_pages = weight * rounds; 2473 /* If a delta exists, add this node's portion of the delta */ 2474 if (delta > weight) { 2475 node_pages += weight; 2476 delta -= weight; 2477 } else if (delta) { 2478 /* when delta is depleted, resume from that node */ 2479 node_pages += delta; 2480 resume_node = node; 2481 resume_weight = weight - delta; 2482 delta = 0; 2483 } 2484 /* node_pages can be 0 if an allocation fails and rounds == 0 */ 2485 if (!node_pages) 2486 break; 2487 nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages, 2488 NULL, page_array); 2489 page_array += nr_allocated; 2490 total_allocated += nr_allocated; 2491 if (total_allocated == nr_pages) 2492 break; 2493 prev_node = node; 2494 } 2495 me->il_prev = resume_node; 2496 me->il_weight = resume_weight; 2497 kfree(weights); 2498 return total_allocated; 2499 } 2500 2501 static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid, 2502 struct mempolicy *pol, unsigned long nr_pages, 2503 struct page **page_array) 2504 { 2505 gfp_t preferred_gfp; 2506 unsigned long nr_allocated = 0; 2507 2508 preferred_gfp = gfp | __GFP_NOWARN; 2509 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2510 2511 nr_allocated = alloc_pages_bulk_noprof(preferred_gfp, nid, &pol->nodes, 2512 nr_pages, NULL, page_array); 2513 2514 if (nr_allocated < nr_pages) 2515 nr_allocated += alloc_pages_bulk_noprof(gfp, numa_node_id(), NULL, 2516 nr_pages - nr_allocated, NULL, 2517 page_array + nr_allocated); 2518 return nr_allocated; 2519 } 2520 2521 /* alloc pages bulk and mempolicy should be considered at the 2522 * same time in some situation such as vmalloc. 2523 * 2524 * It can accelerate memory allocation especially interleaving 2525 * allocate memory. 2526 */ 2527 unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp, 2528 unsigned long nr_pages, struct page **page_array) 2529 { 2530 struct mempolicy *pol = &default_policy; 2531 nodemask_t *nodemask; 2532 int nid; 2533 2534 if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 2535 pol = get_task_policy(current); 2536 2537 if (pol->mode == MPOL_INTERLEAVE) 2538 return alloc_pages_bulk_array_interleave(gfp, pol, 2539 nr_pages, page_array); 2540 2541 if (pol->mode == MPOL_WEIGHTED_INTERLEAVE) 2542 return alloc_pages_bulk_array_weighted_interleave( 2543 gfp, pol, nr_pages, page_array); 2544 2545 if (pol->mode == MPOL_PREFERRED_MANY) 2546 return alloc_pages_bulk_array_preferred_many(gfp, 2547 numa_node_id(), pol, nr_pages, page_array); 2548 2549 nid = numa_node_id(); 2550 nodemask = policy_nodemask(gfp, pol, NO_INTERLEAVE_INDEX, &nid); 2551 return alloc_pages_bulk_noprof(gfp, nid, nodemask, 2552 nr_pages, NULL, page_array); 2553 } 2554 2555 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2556 { 2557 struct mempolicy *pol = mpol_dup(src->vm_policy); 2558 2559 if (IS_ERR(pol)) 2560 return PTR_ERR(pol); 2561 dst->vm_policy = pol; 2562 return 0; 2563 } 2564 2565 /* 2566 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 2567 * rebinds the mempolicy its copying by calling mpol_rebind_policy() 2568 * with the mems_allowed returned by cpuset_mems_allowed(). This 2569 * keeps mempolicies cpuset relative after its cpuset moves. See 2570 * further kernel/cpuset.c update_nodemask(). 2571 * 2572 * current's mempolicy may be rebinded by the other task(the task that changes 2573 * cpuset's mems), so we needn't do rebind work for current task. 2574 */ 2575 2576 /* Slow path of a mempolicy duplicate */ 2577 struct mempolicy *__mpol_dup(struct mempolicy *old) 2578 { 2579 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2580 2581 if (!new) 2582 return ERR_PTR(-ENOMEM); 2583 2584 /* task's mempolicy is protected by alloc_lock */ 2585 if (old == current->mempolicy) { 2586 task_lock(current); 2587 *new = *old; 2588 task_unlock(current); 2589 } else 2590 *new = *old; 2591 2592 if (current_cpuset_is_being_rebound()) { 2593 nodemask_t mems = cpuset_mems_allowed(current); 2594 mpol_rebind_policy(new, &mems); 2595 } 2596 atomic_set(&new->refcnt, 1); 2597 return new; 2598 } 2599 2600 /* Slow path of a mempolicy comparison */ 2601 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 2602 { 2603 if (!a || !b) 2604 return false; 2605 if (a->mode != b->mode) 2606 return false; 2607 if (a->flags != b->flags) 2608 return false; 2609 if (a->home_node != b->home_node) 2610 return false; 2611 if (mpol_store_user_nodemask(a)) 2612 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2613 return false; 2614 2615 switch (a->mode) { 2616 case MPOL_BIND: 2617 case MPOL_INTERLEAVE: 2618 case MPOL_PREFERRED: 2619 case MPOL_PREFERRED_MANY: 2620 case MPOL_WEIGHTED_INTERLEAVE: 2621 return !!nodes_equal(a->nodes, b->nodes); 2622 case MPOL_LOCAL: 2623 return true; 2624 default: 2625 BUG(); 2626 return false; 2627 } 2628 } 2629 2630 /* 2631 * Shared memory backing store policy support. 2632 * 2633 * Remember policies even when nobody has shared memory mapped. 2634 * The policies are kept in Red-Black tree linked from the inode. 2635 * They are protected by the sp->lock rwlock, which should be held 2636 * for any accesses to the tree. 2637 */ 2638 2639 /* 2640 * lookup first element intersecting start-end. Caller holds sp->lock for 2641 * reading or for writing 2642 */ 2643 static struct sp_node *sp_lookup(struct shared_policy *sp, 2644 pgoff_t start, pgoff_t end) 2645 { 2646 struct rb_node *n = sp->root.rb_node; 2647 2648 while (n) { 2649 struct sp_node *p = rb_entry(n, struct sp_node, nd); 2650 2651 if (start >= p->end) 2652 n = n->rb_right; 2653 else if (end <= p->start) 2654 n = n->rb_left; 2655 else 2656 break; 2657 } 2658 if (!n) 2659 return NULL; 2660 for (;;) { 2661 struct sp_node *w = NULL; 2662 struct rb_node *prev = rb_prev(n); 2663 if (!prev) 2664 break; 2665 w = rb_entry(prev, struct sp_node, nd); 2666 if (w->end <= start) 2667 break; 2668 n = prev; 2669 } 2670 return rb_entry(n, struct sp_node, nd); 2671 } 2672 2673 /* 2674 * Insert a new shared policy into the list. Caller holds sp->lock for 2675 * writing. 2676 */ 2677 static void sp_insert(struct shared_policy *sp, struct sp_node *new) 2678 { 2679 struct rb_node **p = &sp->root.rb_node; 2680 struct rb_node *parent = NULL; 2681 struct sp_node *nd; 2682 2683 while (*p) { 2684 parent = *p; 2685 nd = rb_entry(parent, struct sp_node, nd); 2686 if (new->start < nd->start) 2687 p = &(*p)->rb_left; 2688 else if (new->end > nd->end) 2689 p = &(*p)->rb_right; 2690 else 2691 BUG(); 2692 } 2693 rb_link_node(&new->nd, parent, p); 2694 rb_insert_color(&new->nd, &sp->root); 2695 } 2696 2697 /* Find shared policy intersecting idx */ 2698 struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, 2699 pgoff_t idx) 2700 { 2701 struct mempolicy *pol = NULL; 2702 struct sp_node *sn; 2703 2704 if (!sp->root.rb_node) 2705 return NULL; 2706 read_lock(&sp->lock); 2707 sn = sp_lookup(sp, idx, idx+1); 2708 if (sn) { 2709 mpol_get(sn->policy); 2710 pol = sn->policy; 2711 } 2712 read_unlock(&sp->lock); 2713 return pol; 2714 } 2715 2716 static void sp_free(struct sp_node *n) 2717 { 2718 mpol_put(n->policy); 2719 kmem_cache_free(sn_cache, n); 2720 } 2721 2722 /** 2723 * mpol_misplaced - check whether current folio node is valid in policy 2724 * 2725 * @folio: folio to be checked 2726 * @vmf: structure describing the fault 2727 * @addr: virtual address in @vma for shared policy lookup and interleave policy 2728 * 2729 * Lookup current policy node id for vma,addr and "compare to" folio's 2730 * node id. Policy determination "mimics" alloc_page_vma(). 2731 * Called from fault path where we know the vma and faulting address. 2732 * 2733 * Return: NUMA_NO_NODE if the page is in a node that is valid for this 2734 * policy, or a suitable node ID to allocate a replacement folio from. 2735 */ 2736 int mpol_misplaced(struct folio *folio, struct vm_fault *vmf, 2737 unsigned long addr) 2738 { 2739 struct mempolicy *pol; 2740 pgoff_t ilx; 2741 struct zoneref *z; 2742 int curnid = folio_nid(folio); 2743 struct vm_area_struct *vma = vmf->vma; 2744 int thiscpu = raw_smp_processor_id(); 2745 int thisnid = numa_node_id(); 2746 int polnid = NUMA_NO_NODE; 2747 int ret = NUMA_NO_NODE; 2748 2749 /* 2750 * Make sure ptl is held so that we don't preempt and we 2751 * have a stable smp processor id 2752 */ 2753 lockdep_assert_held(vmf->ptl); 2754 pol = get_vma_policy(vma, addr, folio_order(folio), &ilx); 2755 if (!(pol->flags & MPOL_F_MOF)) 2756 goto out; 2757 2758 switch (pol->mode) { 2759 case MPOL_INTERLEAVE: 2760 polnid = interleave_nid(pol, ilx); 2761 break; 2762 2763 case MPOL_WEIGHTED_INTERLEAVE: 2764 polnid = weighted_interleave_nid(pol, ilx); 2765 break; 2766 2767 case MPOL_PREFERRED: 2768 if (node_isset(curnid, pol->nodes)) 2769 goto out; 2770 polnid = first_node(pol->nodes); 2771 break; 2772 2773 case MPOL_LOCAL: 2774 polnid = numa_node_id(); 2775 break; 2776 2777 case MPOL_BIND: 2778 case MPOL_PREFERRED_MANY: 2779 /* 2780 * Even though MPOL_PREFERRED_MANY can allocate pages outside 2781 * policy nodemask we don't allow numa migration to nodes 2782 * outside policy nodemask for now. This is done so that if we 2783 * want demotion to slow memory to happen, before allocating 2784 * from some DRAM node say 'x', we will end up using a 2785 * MPOL_PREFERRED_MANY mask excluding node 'x'. In such scenario 2786 * we should not promote to node 'x' from slow memory node. 2787 */ 2788 if (pol->flags & MPOL_F_MORON) { 2789 /* 2790 * Optimize placement among multiple nodes 2791 * via NUMA balancing 2792 */ 2793 if (node_isset(thisnid, pol->nodes)) 2794 break; 2795 goto out; 2796 } 2797 2798 /* 2799 * use current page if in policy nodemask, 2800 * else select nearest allowed node, if any. 2801 * If no allowed nodes, use current [!misplaced]. 2802 */ 2803 if (node_isset(curnid, pol->nodes)) 2804 goto out; 2805 z = first_zones_zonelist( 2806 node_zonelist(thisnid, GFP_HIGHUSER), 2807 gfp_zone(GFP_HIGHUSER), 2808 &pol->nodes); 2809 polnid = zone_to_nid(z->zone); 2810 break; 2811 2812 default: 2813 BUG(); 2814 } 2815 2816 /* Migrate the folio towards the node whose CPU is referencing it */ 2817 if (pol->flags & MPOL_F_MORON) { 2818 polnid = thisnid; 2819 2820 if (!should_numa_migrate_memory(current, folio, curnid, 2821 thiscpu)) 2822 goto out; 2823 } 2824 2825 if (curnid != polnid) 2826 ret = polnid; 2827 out: 2828 mpol_cond_put(pol); 2829 2830 return ret; 2831 } 2832 2833 /* 2834 * Drop the (possibly final) reference to task->mempolicy. It needs to be 2835 * dropped after task->mempolicy is set to NULL so that any allocation done as 2836 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed 2837 * policy. 2838 */ 2839 void mpol_put_task_policy(struct task_struct *task) 2840 { 2841 struct mempolicy *pol; 2842 2843 task_lock(task); 2844 pol = task->mempolicy; 2845 task->mempolicy = NULL; 2846 task_unlock(task); 2847 mpol_put(pol); 2848 } 2849 2850 static void sp_delete(struct shared_policy *sp, struct sp_node *n) 2851 { 2852 rb_erase(&n->nd, &sp->root); 2853 sp_free(n); 2854 } 2855 2856 static void sp_node_init(struct sp_node *node, unsigned long start, 2857 unsigned long end, struct mempolicy *pol) 2858 { 2859 node->start = start; 2860 node->end = end; 2861 node->policy = pol; 2862 } 2863 2864 static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2865 struct mempolicy *pol) 2866 { 2867 struct sp_node *n; 2868 struct mempolicy *newpol; 2869 2870 n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 2871 if (!n) 2872 return NULL; 2873 2874 newpol = mpol_dup(pol); 2875 if (IS_ERR(newpol)) { 2876 kmem_cache_free(sn_cache, n); 2877 return NULL; 2878 } 2879 newpol->flags |= MPOL_F_SHARED; 2880 sp_node_init(n, start, end, newpol); 2881 2882 return n; 2883 } 2884 2885 /* Replace a policy range. */ 2886 static int shared_policy_replace(struct shared_policy *sp, pgoff_t start, 2887 pgoff_t end, struct sp_node *new) 2888 { 2889 struct sp_node *n; 2890 struct sp_node *n_new = NULL; 2891 struct mempolicy *mpol_new = NULL; 2892 int ret = 0; 2893 2894 restart: 2895 write_lock(&sp->lock); 2896 n = sp_lookup(sp, start, end); 2897 /* Take care of old policies in the same range. */ 2898 while (n && n->start < end) { 2899 struct rb_node *next = rb_next(&n->nd); 2900 if (n->start >= start) { 2901 if (n->end <= end) 2902 sp_delete(sp, n); 2903 else 2904 n->start = end; 2905 } else { 2906 /* Old policy spanning whole new range. */ 2907 if (n->end > end) { 2908 if (!n_new) 2909 goto alloc_new; 2910 2911 *mpol_new = *n->policy; 2912 atomic_set(&mpol_new->refcnt, 1); 2913 sp_node_init(n_new, end, n->end, mpol_new); 2914 n->end = start; 2915 sp_insert(sp, n_new); 2916 n_new = NULL; 2917 mpol_new = NULL; 2918 break; 2919 } else 2920 n->end = start; 2921 } 2922 if (!next) 2923 break; 2924 n = rb_entry(next, struct sp_node, nd); 2925 } 2926 if (new) 2927 sp_insert(sp, new); 2928 write_unlock(&sp->lock); 2929 ret = 0; 2930 2931 err_out: 2932 if (mpol_new) 2933 mpol_put(mpol_new); 2934 if (n_new) 2935 kmem_cache_free(sn_cache, n_new); 2936 2937 return ret; 2938 2939 alloc_new: 2940 write_unlock(&sp->lock); 2941 ret = -ENOMEM; 2942 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 2943 if (!n_new) 2944 goto err_out; 2945 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2946 if (!mpol_new) 2947 goto err_out; 2948 atomic_set(&mpol_new->refcnt, 1); 2949 goto restart; 2950 } 2951 2952 /** 2953 * mpol_shared_policy_init - initialize shared policy for inode 2954 * @sp: pointer to inode shared policy 2955 * @mpol: struct mempolicy to install 2956 * 2957 * Install non-NULL @mpol in inode's shared policy rb-tree. 2958 * On entry, the current task has a reference on a non-NULL @mpol. 2959 * This must be released on exit. 2960 * This is called at get_inode() calls and we can use GFP_KERNEL. 2961 */ 2962 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 2963 { 2964 int ret; 2965 2966 sp->root = RB_ROOT; /* empty tree == default mempolicy */ 2967 rwlock_init(&sp->lock); 2968 2969 if (mpol) { 2970 struct sp_node *sn; 2971 struct mempolicy *npol; 2972 NODEMASK_SCRATCH(scratch); 2973 2974 if (!scratch) 2975 goto put_mpol; 2976 2977 /* contextualize the tmpfs mount point mempolicy to this file */ 2978 npol = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 2979 if (IS_ERR(npol)) 2980 goto free_scratch; /* no valid nodemask intersection */ 2981 2982 task_lock(current); 2983 ret = mpol_set_nodemask(npol, &mpol->w.user_nodemask, scratch); 2984 task_unlock(current); 2985 if (ret) 2986 goto put_npol; 2987 2988 /* alloc node covering entire file; adds ref to file's npol */ 2989 sn = sp_alloc(0, MAX_LFS_FILESIZE >> PAGE_SHIFT, npol); 2990 if (sn) 2991 sp_insert(sp, sn); 2992 put_npol: 2993 mpol_put(npol); /* drop initial ref on file's npol */ 2994 free_scratch: 2995 NODEMASK_SCRATCH_FREE(scratch); 2996 put_mpol: 2997 mpol_put(mpol); /* drop our incoming ref on sb mpol */ 2998 } 2999 } 3000 3001 int mpol_set_shared_policy(struct shared_policy *sp, 3002 struct vm_area_struct *vma, struct mempolicy *pol) 3003 { 3004 int err; 3005 struct sp_node *new = NULL; 3006 unsigned long sz = vma_pages(vma); 3007 3008 if (pol) { 3009 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, pol); 3010 if (!new) 3011 return -ENOMEM; 3012 } 3013 err = shared_policy_replace(sp, vma->vm_pgoff, vma->vm_pgoff + sz, new); 3014 if (err && new) 3015 sp_free(new); 3016 return err; 3017 } 3018 3019 /* Free a backing policy store on inode delete. */ 3020 void mpol_free_shared_policy(struct shared_policy *sp) 3021 { 3022 struct sp_node *n; 3023 struct rb_node *next; 3024 3025 if (!sp->root.rb_node) 3026 return; 3027 write_lock(&sp->lock); 3028 next = rb_first(&sp->root); 3029 while (next) { 3030 n = rb_entry(next, struct sp_node, nd); 3031 next = rb_next(&n->nd); 3032 sp_delete(sp, n); 3033 } 3034 write_unlock(&sp->lock); 3035 } 3036 3037 #ifdef CONFIG_NUMA_BALANCING 3038 static int __initdata numabalancing_override; 3039 3040 static void __init check_numabalancing_enable(void) 3041 { 3042 bool numabalancing_default = false; 3043 3044 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 3045 numabalancing_default = true; 3046 3047 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 3048 if (numabalancing_override) 3049 set_numabalancing_state(numabalancing_override == 1); 3050 3051 if (num_online_nodes() > 1 && !numabalancing_override) { 3052 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", 3053 numabalancing_default ? "Enabling" : "Disabling"); 3054 set_numabalancing_state(numabalancing_default); 3055 } 3056 } 3057 3058 static int __init setup_numabalancing(char *str) 3059 { 3060 int ret = 0; 3061 if (!str) 3062 goto out; 3063 3064 if (!strcmp(str, "enable")) { 3065 numabalancing_override = 1; 3066 ret = 1; 3067 } else if (!strcmp(str, "disable")) { 3068 numabalancing_override = -1; 3069 ret = 1; 3070 } 3071 out: 3072 if (!ret) 3073 pr_warn("Unable to parse numa_balancing=\n"); 3074 3075 return ret; 3076 } 3077 __setup("numa_balancing=", setup_numabalancing); 3078 #else 3079 static inline void __init check_numabalancing_enable(void) 3080 { 3081 } 3082 #endif /* CONFIG_NUMA_BALANCING */ 3083 3084 void __init numa_policy_init(void) 3085 { 3086 nodemask_t interleave_nodes; 3087 unsigned long largest = 0; 3088 int nid, prefer = 0; 3089 3090 policy_cache = kmem_cache_create("numa_policy", 3091 sizeof(struct mempolicy), 3092 0, SLAB_PANIC, NULL); 3093 3094 sn_cache = kmem_cache_create("shared_policy_node", 3095 sizeof(struct sp_node), 3096 0, SLAB_PANIC, NULL); 3097 3098 for_each_node(nid) { 3099 preferred_node_policy[nid] = (struct mempolicy) { 3100 .refcnt = ATOMIC_INIT(1), 3101 .mode = MPOL_PREFERRED, 3102 .flags = MPOL_F_MOF | MPOL_F_MORON, 3103 .nodes = nodemask_of_node(nid), 3104 }; 3105 } 3106 3107 /* 3108 * Set interleaving policy for system init. Interleaving is only 3109 * enabled across suitably sized nodes (default is >= 16MB), or 3110 * fall back to the largest node if they're all smaller. 3111 */ 3112 nodes_clear(interleave_nodes); 3113 for_each_node_state(nid, N_MEMORY) { 3114 unsigned long total_pages = node_present_pages(nid); 3115 3116 /* Preserve the largest node */ 3117 if (largest < total_pages) { 3118 largest = total_pages; 3119 prefer = nid; 3120 } 3121 3122 /* Interleave this node? */ 3123 if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 3124 node_set(nid, interleave_nodes); 3125 } 3126 3127 /* All too small, use the largest */ 3128 if (unlikely(nodes_empty(interleave_nodes))) 3129 node_set(prefer, interleave_nodes); 3130 3131 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 3132 pr_err("%s: interleaving failed\n", __func__); 3133 3134 check_numabalancing_enable(); 3135 } 3136 3137 /* Reset policy of current process to default */ 3138 void numa_default_policy(void) 3139 { 3140 do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 3141 } 3142 3143 /* 3144 * Parse and format mempolicy from/to strings 3145 */ 3146 static const char * const policy_modes[] = 3147 { 3148 [MPOL_DEFAULT] = "default", 3149 [MPOL_PREFERRED] = "prefer", 3150 [MPOL_BIND] = "bind", 3151 [MPOL_INTERLEAVE] = "interleave", 3152 [MPOL_WEIGHTED_INTERLEAVE] = "weighted interleave", 3153 [MPOL_LOCAL] = "local", 3154 [MPOL_PREFERRED_MANY] = "prefer (many)", 3155 }; 3156 3157 #ifdef CONFIG_TMPFS 3158 /** 3159 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 3160 * @str: string containing mempolicy to parse 3161 * @mpol: pointer to struct mempolicy pointer, returned on success. 3162 * 3163 * Format of input: 3164 * <mode>[=<flags>][:<nodelist>] 3165 * 3166 * Return: %0 on success, else %1 3167 */ 3168 int mpol_parse_str(char *str, struct mempolicy **mpol) 3169 { 3170 struct mempolicy *new = NULL; 3171 unsigned short mode_flags; 3172 nodemask_t nodes; 3173 char *nodelist = strchr(str, ':'); 3174 char *flags = strchr(str, '='); 3175 int err = 1, mode; 3176 3177 if (flags) 3178 *flags++ = '\0'; /* terminate mode string */ 3179 3180 if (nodelist) { 3181 /* NUL-terminate mode or flags string */ 3182 *nodelist++ = '\0'; 3183 if (nodelist_parse(nodelist, nodes)) 3184 goto out; 3185 if (!nodes_subset(nodes, node_states[N_MEMORY])) 3186 goto out; 3187 } else 3188 nodes_clear(nodes); 3189 3190 mode = match_string(policy_modes, MPOL_MAX, str); 3191 if (mode < 0) 3192 goto out; 3193 3194 switch (mode) { 3195 case MPOL_PREFERRED: 3196 /* 3197 * Insist on a nodelist of one node only, although later 3198 * we use first_node(nodes) to grab a single node, so here 3199 * nodelist (or nodes) cannot be empty. 3200 */ 3201 if (nodelist) { 3202 char *rest = nodelist; 3203 while (isdigit(*rest)) 3204 rest++; 3205 if (*rest) 3206 goto out; 3207 if (nodes_empty(nodes)) 3208 goto out; 3209 } 3210 break; 3211 case MPOL_INTERLEAVE: 3212 case MPOL_WEIGHTED_INTERLEAVE: 3213 /* 3214 * Default to online nodes with memory if no nodelist 3215 */ 3216 if (!nodelist) 3217 nodes = node_states[N_MEMORY]; 3218 break; 3219 case MPOL_LOCAL: 3220 /* 3221 * Don't allow a nodelist; mpol_new() checks flags 3222 */ 3223 if (nodelist) 3224 goto out; 3225 break; 3226 case MPOL_DEFAULT: 3227 /* 3228 * Insist on a empty nodelist 3229 */ 3230 if (!nodelist) 3231 err = 0; 3232 goto out; 3233 case MPOL_PREFERRED_MANY: 3234 case MPOL_BIND: 3235 /* 3236 * Insist on a nodelist 3237 */ 3238 if (!nodelist) 3239 goto out; 3240 } 3241 3242 mode_flags = 0; 3243 if (flags) { 3244 /* 3245 * Currently, we only support two mutually exclusive 3246 * mode flags. 3247 */ 3248 if (!strcmp(flags, "static")) 3249 mode_flags |= MPOL_F_STATIC_NODES; 3250 else if (!strcmp(flags, "relative")) 3251 mode_flags |= MPOL_F_RELATIVE_NODES; 3252 else 3253 goto out; 3254 } 3255 3256 new = mpol_new(mode, mode_flags, &nodes); 3257 if (IS_ERR(new)) 3258 goto out; 3259 3260 /* 3261 * Save nodes for mpol_to_str() to show the tmpfs mount options 3262 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 3263 */ 3264 if (mode != MPOL_PREFERRED) { 3265 new->nodes = nodes; 3266 } else if (nodelist) { 3267 nodes_clear(new->nodes); 3268 node_set(first_node(nodes), new->nodes); 3269 } else { 3270 new->mode = MPOL_LOCAL; 3271 } 3272 3273 /* 3274 * Save nodes for contextualization: this will be used to "clone" 3275 * the mempolicy in a specific context [cpuset] at a later time. 3276 */ 3277 new->w.user_nodemask = nodes; 3278 3279 err = 0; 3280 3281 out: 3282 /* Restore string for error message */ 3283 if (nodelist) 3284 *--nodelist = ':'; 3285 if (flags) 3286 *--flags = '='; 3287 if (!err) 3288 *mpol = new; 3289 return err; 3290 } 3291 #endif /* CONFIG_TMPFS */ 3292 3293 /** 3294 * mpol_to_str - format a mempolicy structure for printing 3295 * @buffer: to contain formatted mempolicy string 3296 * @maxlen: length of @buffer 3297 * @pol: pointer to mempolicy to be formatted 3298 * 3299 * Convert @pol into a string. If @buffer is too short, truncate the string. 3300 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 3301 * longest flag, "relative", and to display at least a few node ids. 3302 */ 3303 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 3304 { 3305 char *p = buffer; 3306 nodemask_t nodes = NODE_MASK_NONE; 3307 unsigned short mode = MPOL_DEFAULT; 3308 unsigned short flags = 0; 3309 3310 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 3311 mode = pol->mode; 3312 flags = pol->flags; 3313 } 3314 3315 switch (mode) { 3316 case MPOL_DEFAULT: 3317 case MPOL_LOCAL: 3318 break; 3319 case MPOL_PREFERRED: 3320 case MPOL_PREFERRED_MANY: 3321 case MPOL_BIND: 3322 case MPOL_INTERLEAVE: 3323 case MPOL_WEIGHTED_INTERLEAVE: 3324 nodes = pol->nodes; 3325 break; 3326 default: 3327 WARN_ON_ONCE(1); 3328 snprintf(p, maxlen, "unknown"); 3329 return; 3330 } 3331 3332 p += snprintf(p, maxlen, "%s", policy_modes[mode]); 3333 3334 if (flags & MPOL_MODE_FLAGS) { 3335 p += snprintf(p, buffer + maxlen - p, "="); 3336 3337 /* 3338 * Currently, the only defined flags are mutually exclusive 3339 */ 3340 if (flags & MPOL_F_STATIC_NODES) 3341 p += snprintf(p, buffer + maxlen - p, "static"); 3342 else if (flags & MPOL_F_RELATIVE_NODES) 3343 p += snprintf(p, buffer + maxlen - p, "relative"); 3344 } 3345 3346 if (!nodes_empty(nodes)) 3347 p += scnprintf(p, buffer + maxlen - p, ":%*pbl", 3348 nodemask_pr_args(&nodes)); 3349 } 3350 3351 #ifdef CONFIG_SYSFS 3352 struct iw_node_attr { 3353 struct kobj_attribute kobj_attr; 3354 int nid; 3355 }; 3356 3357 static ssize_t node_show(struct kobject *kobj, struct kobj_attribute *attr, 3358 char *buf) 3359 { 3360 struct iw_node_attr *node_attr; 3361 u8 weight; 3362 3363 node_attr = container_of(attr, struct iw_node_attr, kobj_attr); 3364 weight = get_il_weight(node_attr->nid); 3365 return sysfs_emit(buf, "%d\n", weight); 3366 } 3367 3368 static ssize_t node_store(struct kobject *kobj, struct kobj_attribute *attr, 3369 const char *buf, size_t count) 3370 { 3371 struct iw_node_attr *node_attr; 3372 u8 *new; 3373 u8 *old; 3374 u8 weight = 0; 3375 3376 node_attr = container_of(attr, struct iw_node_attr, kobj_attr); 3377 if (count == 0 || sysfs_streq(buf, "")) 3378 weight = 0; 3379 else if (kstrtou8(buf, 0, &weight)) 3380 return -EINVAL; 3381 3382 new = kzalloc(nr_node_ids, GFP_KERNEL); 3383 if (!new) 3384 return -ENOMEM; 3385 3386 mutex_lock(&iw_table_lock); 3387 old = rcu_dereference_protected(iw_table, 3388 lockdep_is_held(&iw_table_lock)); 3389 if (old) 3390 memcpy(new, old, nr_node_ids); 3391 new[node_attr->nid] = weight; 3392 rcu_assign_pointer(iw_table, new); 3393 mutex_unlock(&iw_table_lock); 3394 synchronize_rcu(); 3395 kfree(old); 3396 return count; 3397 } 3398 3399 static struct iw_node_attr **node_attrs; 3400 3401 static void sysfs_wi_node_release(struct iw_node_attr *node_attr, 3402 struct kobject *parent) 3403 { 3404 if (!node_attr) 3405 return; 3406 sysfs_remove_file(parent, &node_attr->kobj_attr.attr); 3407 kfree(node_attr->kobj_attr.attr.name); 3408 kfree(node_attr); 3409 } 3410 3411 static void sysfs_wi_release(struct kobject *wi_kobj) 3412 { 3413 int i; 3414 3415 for (i = 0; i < nr_node_ids; i++) 3416 sysfs_wi_node_release(node_attrs[i], wi_kobj); 3417 kobject_put(wi_kobj); 3418 } 3419 3420 static const struct kobj_type wi_ktype = { 3421 .sysfs_ops = &kobj_sysfs_ops, 3422 .release = sysfs_wi_release, 3423 }; 3424 3425 static int add_weight_node(int nid, struct kobject *wi_kobj) 3426 { 3427 struct iw_node_attr *node_attr; 3428 char *name; 3429 3430 node_attr = kzalloc(sizeof(*node_attr), GFP_KERNEL); 3431 if (!node_attr) 3432 return -ENOMEM; 3433 3434 name = kasprintf(GFP_KERNEL, "node%d", nid); 3435 if (!name) { 3436 kfree(node_attr); 3437 return -ENOMEM; 3438 } 3439 3440 sysfs_attr_init(&node_attr->kobj_attr.attr); 3441 node_attr->kobj_attr.attr.name = name; 3442 node_attr->kobj_attr.attr.mode = 0644; 3443 node_attr->kobj_attr.show = node_show; 3444 node_attr->kobj_attr.store = node_store; 3445 node_attr->nid = nid; 3446 3447 if (sysfs_create_file(wi_kobj, &node_attr->kobj_attr.attr)) { 3448 kfree(node_attr->kobj_attr.attr.name); 3449 kfree(node_attr); 3450 pr_err("failed to add attribute to weighted_interleave\n"); 3451 return -ENOMEM; 3452 } 3453 3454 node_attrs[nid] = node_attr; 3455 return 0; 3456 } 3457 3458 static int add_weighted_interleave_group(struct kobject *root_kobj) 3459 { 3460 struct kobject *wi_kobj; 3461 int nid, err; 3462 3463 wi_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL); 3464 if (!wi_kobj) 3465 return -ENOMEM; 3466 3467 err = kobject_init_and_add(wi_kobj, &wi_ktype, root_kobj, 3468 "weighted_interleave"); 3469 if (err) { 3470 kfree(wi_kobj); 3471 return err; 3472 } 3473 3474 for_each_node_state(nid, N_POSSIBLE) { 3475 err = add_weight_node(nid, wi_kobj); 3476 if (err) { 3477 pr_err("failed to add sysfs [node%d]\n", nid); 3478 break; 3479 } 3480 } 3481 if (err) 3482 kobject_put(wi_kobj); 3483 return 0; 3484 } 3485 3486 static void mempolicy_kobj_release(struct kobject *kobj) 3487 { 3488 u8 *old; 3489 3490 mutex_lock(&iw_table_lock); 3491 old = rcu_dereference_protected(iw_table, 3492 lockdep_is_held(&iw_table_lock)); 3493 rcu_assign_pointer(iw_table, NULL); 3494 mutex_unlock(&iw_table_lock); 3495 synchronize_rcu(); 3496 kfree(old); 3497 kfree(node_attrs); 3498 kfree(kobj); 3499 } 3500 3501 static const struct kobj_type mempolicy_ktype = { 3502 .release = mempolicy_kobj_release 3503 }; 3504 3505 static int __init mempolicy_sysfs_init(void) 3506 { 3507 int err; 3508 static struct kobject *mempolicy_kobj; 3509 3510 mempolicy_kobj = kzalloc(sizeof(*mempolicy_kobj), GFP_KERNEL); 3511 if (!mempolicy_kobj) { 3512 err = -ENOMEM; 3513 goto err_out; 3514 } 3515 3516 node_attrs = kcalloc(nr_node_ids, sizeof(struct iw_node_attr *), 3517 GFP_KERNEL); 3518 if (!node_attrs) { 3519 err = -ENOMEM; 3520 goto mempol_out; 3521 } 3522 3523 err = kobject_init_and_add(mempolicy_kobj, &mempolicy_ktype, mm_kobj, 3524 "mempolicy"); 3525 if (err) 3526 goto node_out; 3527 3528 err = add_weighted_interleave_group(mempolicy_kobj); 3529 if (err) { 3530 pr_err("mempolicy sysfs structure failed to initialize\n"); 3531 kobject_put(mempolicy_kobj); 3532 return err; 3533 } 3534 3535 return err; 3536 node_out: 3537 kfree(node_attrs); 3538 mempol_out: 3539 kfree(mempolicy_kobj); 3540 err_out: 3541 pr_err("failed to add mempolicy kobject to the system\n"); 3542 return err; 3543 } 3544 3545 late_initcall(mempolicy_sysfs_init); 3546 #endif /* CONFIG_SYSFS */ 3547