1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Simple NUMA memory policy for the Linux kernel. 4 * 5 * Copyright 2003,2004 Andi Kleen, SuSE Labs. 6 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 7 * 8 * NUMA policy allows the user to give hints in which node(s) memory should 9 * be allocated. 10 * 11 * Support four policies per VMA and per process: 12 * 13 * The VMA policy has priority over the process policy for a page fault. 14 * 15 * interleave Allocate memory interleaved over a set of nodes, 16 * with normal fallback if it fails. 17 * For VMA based allocations this interleaves based on the 18 * offset into the backing object or offset into the mapping 19 * for anonymous memory. For process policy an process counter 20 * is used. 21 * 22 * bind Only allocate memory on a specific set of nodes, 23 * no fallback. 24 * FIXME: memory is allocated starting with the first node 25 * to the last. It would be better if bind would truly restrict 26 * the allocation to memory nodes instead 27 * 28 * preferred Try a specific node first before normal fallback. 29 * As a special case NUMA_NO_NODE here means do the allocation 30 * on the local CPU. This is normally identical to default, 31 * but useful to set in a VMA when you have a non default 32 * process policy. 33 * 34 * preferred many Try a set of nodes first before normal fallback. This is 35 * similar to preferred without the special case. 36 * 37 * default Allocate on the local node first, or when on a VMA 38 * use the process policy. This is what Linux always did 39 * in a NUMA aware kernel and still does by, ahem, default. 40 * 41 * The process policy is applied for most non interrupt memory allocations 42 * in that process' context. Interrupts ignore the policies and always 43 * try to allocate on the local CPU. The VMA policy is only applied for memory 44 * allocations for a VMA in the VM. 45 * 46 * Currently there are a few corner cases in swapping where the policy 47 * is not applied, but the majority should be handled. When process policy 48 * is used it is not remembered over swap outs/swap ins. 49 * 50 * Only the highest zone in the zone hierarchy gets policied. Allocations 51 * requesting a lower zone just use default policy. This implies that 52 * on systems with highmem kernel lowmem allocation don't get policied. 53 * Same with GFP_DMA allocations. 54 * 55 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 56 * all users and remembered even when nobody has memory mapped. 57 */ 58 59 /* Notebook: 60 fix mmap readahead to honour policy and enable policy for any page cache 61 object 62 statistics for bigpages 63 global policy for page cache? currently it uses process policy. Requires 64 first item above. 65 handle mremap for shared memory (currently ignored for the policy) 66 grows down? 67 make bind policy root only? It can trigger oom much faster and the 68 kernel is not always grateful with that. 69 */ 70 71 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 72 73 #include <linux/mempolicy.h> 74 #include <linux/pagewalk.h> 75 #include <linux/highmem.h> 76 #include <linux/hugetlb.h> 77 #include <linux/kernel.h> 78 #include <linux/sched.h> 79 #include <linux/sched/mm.h> 80 #include <linux/sched/numa_balancing.h> 81 #include <linux/sched/task.h> 82 #include <linux/nodemask.h> 83 #include <linux/cpuset.h> 84 #include <linux/slab.h> 85 #include <linux/string.h> 86 #include <linux/export.h> 87 #include <linux/nsproxy.h> 88 #include <linux/interrupt.h> 89 #include <linux/init.h> 90 #include <linux/compat.h> 91 #include <linux/ptrace.h> 92 #include <linux/swap.h> 93 #include <linux/seq_file.h> 94 #include <linux/proc_fs.h> 95 #include <linux/migrate.h> 96 #include <linux/ksm.h> 97 #include <linux/rmap.h> 98 #include <linux/security.h> 99 #include <linux/syscalls.h> 100 #include <linux/ctype.h> 101 #include <linux/mm_inline.h> 102 #include <linux/mmu_notifier.h> 103 #include <linux/printk.h> 104 #include <linux/swapops.h> 105 106 #include <asm/tlbflush.h> 107 #include <linux/uaccess.h> 108 109 #include "internal.h" 110 111 /* Internal flags */ 112 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 113 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 114 115 static struct kmem_cache *policy_cache; 116 static struct kmem_cache *sn_cache; 117 118 /* Highest zone. An specific allocation for a zone below that is not 119 policied. */ 120 enum zone_type policy_zone = 0; 121 122 /* 123 * run-time system-wide default policy => local allocation 124 */ 125 static struct mempolicy default_policy = { 126 .refcnt = ATOMIC_INIT(1), /* never free it */ 127 .mode = MPOL_LOCAL, 128 }; 129 130 static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 131 132 /** 133 * numa_map_to_online_node - Find closest online node 134 * @node: Node id to start the search 135 * 136 * Lookup the next closest node by distance if @nid is not online. 137 * 138 * Return: this @node if it is online, otherwise the closest node by distance 139 */ 140 int numa_map_to_online_node(int node) 141 { 142 int min_dist = INT_MAX, dist, n, min_node; 143 144 if (node == NUMA_NO_NODE || node_online(node)) 145 return node; 146 147 min_node = node; 148 for_each_online_node(n) { 149 dist = node_distance(node, n); 150 if (dist < min_dist) { 151 min_dist = dist; 152 min_node = n; 153 } 154 } 155 156 return min_node; 157 } 158 EXPORT_SYMBOL_GPL(numa_map_to_online_node); 159 160 struct mempolicy *get_task_policy(struct task_struct *p) 161 { 162 struct mempolicy *pol = p->mempolicy; 163 int node; 164 165 if (pol) 166 return pol; 167 168 node = numa_node_id(); 169 if (node != NUMA_NO_NODE) { 170 pol = &preferred_node_policy[node]; 171 /* preferred_node_policy is not initialised early in boot */ 172 if (pol->mode) 173 return pol; 174 } 175 176 return &default_policy; 177 } 178 179 static const struct mempolicy_operations { 180 int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 181 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 182 } mpol_ops[MPOL_MAX]; 183 184 static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 185 { 186 return pol->flags & MPOL_MODE_FLAGS; 187 } 188 189 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 190 const nodemask_t *rel) 191 { 192 nodemask_t tmp; 193 nodes_fold(tmp, *orig, nodes_weight(*rel)); 194 nodes_onto(*ret, tmp, *rel); 195 } 196 197 static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 198 { 199 if (nodes_empty(*nodes)) 200 return -EINVAL; 201 pol->nodes = *nodes; 202 return 0; 203 } 204 205 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 206 { 207 if (nodes_empty(*nodes)) 208 return -EINVAL; 209 210 nodes_clear(pol->nodes); 211 node_set(first_node(*nodes), pol->nodes); 212 return 0; 213 } 214 215 /* 216 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 217 * any, for the new policy. mpol_new() has already validated the nodes 218 * parameter with respect to the policy mode and flags. 219 * 220 * Must be called holding task's alloc_lock to protect task's mems_allowed 221 * and mempolicy. May also be called holding the mmap_lock for write. 222 */ 223 static int mpol_set_nodemask(struct mempolicy *pol, 224 const nodemask_t *nodes, struct nodemask_scratch *nsc) 225 { 226 int ret; 227 228 /* 229 * Default (pol==NULL) resp. local memory policies are not a 230 * subject of any remapping. They also do not need any special 231 * constructor. 232 */ 233 if (!pol || pol->mode == MPOL_LOCAL) 234 return 0; 235 236 /* Check N_MEMORY */ 237 nodes_and(nsc->mask1, 238 cpuset_current_mems_allowed, node_states[N_MEMORY]); 239 240 VM_BUG_ON(!nodes); 241 242 if (pol->flags & MPOL_F_RELATIVE_NODES) 243 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); 244 else 245 nodes_and(nsc->mask2, *nodes, nsc->mask1); 246 247 if (mpol_store_user_nodemask(pol)) 248 pol->w.user_nodemask = *nodes; 249 else 250 pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed; 251 252 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 253 return ret; 254 } 255 256 /* 257 * This function just creates a new policy, does some check and simple 258 * initialization. You must invoke mpol_set_nodemask() to set nodes. 259 */ 260 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 261 nodemask_t *nodes) 262 { 263 struct mempolicy *policy; 264 265 pr_debug("setting mode %d flags %d nodes[0] %lx\n", 266 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 267 268 if (mode == MPOL_DEFAULT) { 269 if (nodes && !nodes_empty(*nodes)) 270 return ERR_PTR(-EINVAL); 271 return NULL; 272 } 273 VM_BUG_ON(!nodes); 274 275 /* 276 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 277 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 278 * All other modes require a valid pointer to a non-empty nodemask. 279 */ 280 if (mode == MPOL_PREFERRED) { 281 if (nodes_empty(*nodes)) { 282 if (((flags & MPOL_F_STATIC_NODES) || 283 (flags & MPOL_F_RELATIVE_NODES))) 284 return ERR_PTR(-EINVAL); 285 286 mode = MPOL_LOCAL; 287 } 288 } else if (mode == MPOL_LOCAL) { 289 if (!nodes_empty(*nodes) || 290 (flags & MPOL_F_STATIC_NODES) || 291 (flags & MPOL_F_RELATIVE_NODES)) 292 return ERR_PTR(-EINVAL); 293 } else if (nodes_empty(*nodes)) 294 return ERR_PTR(-EINVAL); 295 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 296 if (!policy) 297 return ERR_PTR(-ENOMEM); 298 atomic_set(&policy->refcnt, 1); 299 policy->mode = mode; 300 policy->flags = flags; 301 policy->home_node = NUMA_NO_NODE; 302 303 return policy; 304 } 305 306 /* Slow path of a mpol destructor. */ 307 void __mpol_put(struct mempolicy *p) 308 { 309 if (!atomic_dec_and_test(&p->refcnt)) 310 return; 311 kmem_cache_free(policy_cache, p); 312 } 313 314 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) 315 { 316 } 317 318 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 319 { 320 nodemask_t tmp; 321 322 if (pol->flags & MPOL_F_STATIC_NODES) 323 nodes_and(tmp, pol->w.user_nodemask, *nodes); 324 else if (pol->flags & MPOL_F_RELATIVE_NODES) 325 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 326 else { 327 nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed, 328 *nodes); 329 pol->w.cpuset_mems_allowed = *nodes; 330 } 331 332 if (nodes_empty(tmp)) 333 tmp = *nodes; 334 335 pol->nodes = tmp; 336 } 337 338 static void mpol_rebind_preferred(struct mempolicy *pol, 339 const nodemask_t *nodes) 340 { 341 pol->w.cpuset_mems_allowed = *nodes; 342 } 343 344 /* 345 * mpol_rebind_policy - Migrate a policy to a different set of nodes 346 * 347 * Per-vma policies are protected by mmap_lock. Allocations using per-task 348 * policies are protected by task->mems_allowed_seq to prevent a premature 349 * OOM/allocation failure due to parallel nodemask modification. 350 */ 351 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) 352 { 353 if (!pol) 354 return; 355 if (!mpol_store_user_nodemask(pol) && 356 nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 357 return; 358 359 mpol_ops[pol->mode].rebind(pol, newmask); 360 } 361 362 /* 363 * Wrapper for mpol_rebind_policy() that just requires task 364 * pointer, and updates task mempolicy. 365 * 366 * Called with task's alloc_lock held. 367 */ 368 369 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 370 { 371 mpol_rebind_policy(tsk->mempolicy, new); 372 } 373 374 /* 375 * Rebind each vma in mm to new nodemask. 376 * 377 * Call holding a reference to mm. Takes mm->mmap_lock during call. 378 */ 379 380 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 381 { 382 struct vm_area_struct *vma; 383 384 mmap_write_lock(mm); 385 for (vma = mm->mmap; vma; vma = vma->vm_next) 386 mpol_rebind_policy(vma->vm_policy, new); 387 mmap_write_unlock(mm); 388 } 389 390 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 391 [MPOL_DEFAULT] = { 392 .rebind = mpol_rebind_default, 393 }, 394 [MPOL_INTERLEAVE] = { 395 .create = mpol_new_nodemask, 396 .rebind = mpol_rebind_nodemask, 397 }, 398 [MPOL_PREFERRED] = { 399 .create = mpol_new_preferred, 400 .rebind = mpol_rebind_preferred, 401 }, 402 [MPOL_BIND] = { 403 .create = mpol_new_nodemask, 404 .rebind = mpol_rebind_nodemask, 405 }, 406 [MPOL_LOCAL] = { 407 .rebind = mpol_rebind_default, 408 }, 409 [MPOL_PREFERRED_MANY] = { 410 .create = mpol_new_nodemask, 411 .rebind = mpol_rebind_preferred, 412 }, 413 }; 414 415 static int migrate_page_add(struct page *page, struct list_head *pagelist, 416 unsigned long flags); 417 418 struct queue_pages { 419 struct list_head *pagelist; 420 unsigned long flags; 421 nodemask_t *nmask; 422 unsigned long start; 423 unsigned long end; 424 struct vm_area_struct *first; 425 }; 426 427 /* 428 * Check if the page's nid is in qp->nmask. 429 * 430 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is 431 * in the invert of qp->nmask. 432 */ 433 static inline bool queue_pages_required(struct page *page, 434 struct queue_pages *qp) 435 { 436 int nid = page_to_nid(page); 437 unsigned long flags = qp->flags; 438 439 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); 440 } 441 442 /* 443 * queue_pages_pmd() has four possible return values: 444 * 0 - pages are placed on the right node or queued successfully, or 445 * special page is met, i.e. huge zero page. 446 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 447 * specified. 448 * 2 - THP was split. 449 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an 450 * existing page was already on a node that does not follow the 451 * policy. 452 */ 453 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 454 unsigned long end, struct mm_walk *walk) 455 __releases(ptl) 456 { 457 int ret = 0; 458 struct page *page; 459 struct queue_pages *qp = walk->private; 460 unsigned long flags; 461 462 if (unlikely(is_pmd_migration_entry(*pmd))) { 463 ret = -EIO; 464 goto unlock; 465 } 466 page = pmd_page(*pmd); 467 if (is_huge_zero_page(page)) { 468 spin_unlock(ptl); 469 walk->action = ACTION_CONTINUE; 470 goto out; 471 } 472 if (!queue_pages_required(page, qp)) 473 goto unlock; 474 475 flags = qp->flags; 476 /* go to thp migration */ 477 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 478 if (!vma_migratable(walk->vma) || 479 migrate_page_add(page, qp->pagelist, flags)) { 480 ret = 1; 481 goto unlock; 482 } 483 } else 484 ret = -EIO; 485 unlock: 486 spin_unlock(ptl); 487 out: 488 return ret; 489 } 490 491 /* 492 * Scan through pages checking if pages follow certain conditions, 493 * and move them to the pagelist if they do. 494 * 495 * queue_pages_pte_range() has three possible return values: 496 * 0 - pages are placed on the right node or queued successfully, or 497 * special page is met, i.e. zero page. 498 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 499 * specified. 500 * -EIO - only MPOL_MF_STRICT was specified and an existing page was already 501 * on a node that does not follow the policy. 502 */ 503 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, 504 unsigned long end, struct mm_walk *walk) 505 { 506 struct vm_area_struct *vma = walk->vma; 507 struct page *page; 508 struct queue_pages *qp = walk->private; 509 unsigned long flags = qp->flags; 510 int ret; 511 bool has_unmovable = false; 512 pte_t *pte, *mapped_pte; 513 spinlock_t *ptl; 514 515 ptl = pmd_trans_huge_lock(pmd, vma); 516 if (ptl) { 517 ret = queue_pages_pmd(pmd, ptl, addr, end, walk); 518 if (ret != 2) 519 return ret; 520 } 521 /* THP was split, fall through to pte walk */ 522 523 if (pmd_trans_unstable(pmd)) 524 return 0; 525 526 mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 527 for (; addr != end; pte++, addr += PAGE_SIZE) { 528 if (!pte_present(*pte)) 529 continue; 530 page = vm_normal_page(vma, addr, *pte); 531 if (!page) 532 continue; 533 /* 534 * vm_normal_page() filters out zero pages, but there might 535 * still be PageReserved pages to skip, perhaps in a VDSO. 536 */ 537 if (PageReserved(page)) 538 continue; 539 if (!queue_pages_required(page, qp)) 540 continue; 541 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 542 /* MPOL_MF_STRICT must be specified if we get here */ 543 if (!vma_migratable(vma)) { 544 has_unmovable = true; 545 break; 546 } 547 548 /* 549 * Do not abort immediately since there may be 550 * temporary off LRU pages in the range. Still 551 * need migrate other LRU pages. 552 */ 553 if (migrate_page_add(page, qp->pagelist, flags)) 554 has_unmovable = true; 555 } else 556 break; 557 } 558 pte_unmap_unlock(mapped_pte, ptl); 559 cond_resched(); 560 561 if (has_unmovable) 562 return 1; 563 564 return addr != end ? -EIO : 0; 565 } 566 567 static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, 568 unsigned long addr, unsigned long end, 569 struct mm_walk *walk) 570 { 571 int ret = 0; 572 #ifdef CONFIG_HUGETLB_PAGE 573 struct queue_pages *qp = walk->private; 574 unsigned long flags = (qp->flags & MPOL_MF_VALID); 575 struct page *page; 576 spinlock_t *ptl; 577 pte_t entry; 578 579 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 580 entry = huge_ptep_get(pte); 581 if (!pte_present(entry)) 582 goto unlock; 583 page = pte_page(entry); 584 if (!queue_pages_required(page, qp)) 585 goto unlock; 586 587 if (flags == MPOL_MF_STRICT) { 588 /* 589 * STRICT alone means only detecting misplaced page and no 590 * need to further check other vma. 591 */ 592 ret = -EIO; 593 goto unlock; 594 } 595 596 if (!vma_migratable(walk->vma)) { 597 /* 598 * Must be STRICT with MOVE*, otherwise .test_walk() have 599 * stopped walking current vma. 600 * Detecting misplaced page but allow migrating pages which 601 * have been queued. 602 */ 603 ret = 1; 604 goto unlock; 605 } 606 607 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ 608 if (flags & (MPOL_MF_MOVE_ALL) || 609 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) { 610 if (!isolate_huge_page(page, qp->pagelist) && 611 (flags & MPOL_MF_STRICT)) 612 /* 613 * Failed to isolate page but allow migrating pages 614 * which have been queued. 615 */ 616 ret = 1; 617 } 618 unlock: 619 spin_unlock(ptl); 620 #else 621 BUG(); 622 #endif 623 return ret; 624 } 625 626 #ifdef CONFIG_NUMA_BALANCING 627 /* 628 * This is used to mark a range of virtual addresses to be inaccessible. 629 * These are later cleared by a NUMA hinting fault. Depending on these 630 * faults, pages may be migrated for better NUMA placement. 631 * 632 * This is assuming that NUMA faults are handled using PROT_NONE. If 633 * an architecture makes a different choice, it will need further 634 * changes to the core. 635 */ 636 unsigned long change_prot_numa(struct vm_area_struct *vma, 637 unsigned long addr, unsigned long end) 638 { 639 int nr_updated; 640 641 nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA); 642 if (nr_updated) 643 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 644 645 return nr_updated; 646 } 647 #else 648 static unsigned long change_prot_numa(struct vm_area_struct *vma, 649 unsigned long addr, unsigned long end) 650 { 651 return 0; 652 } 653 #endif /* CONFIG_NUMA_BALANCING */ 654 655 static int queue_pages_test_walk(unsigned long start, unsigned long end, 656 struct mm_walk *walk) 657 { 658 struct vm_area_struct *vma = walk->vma; 659 struct queue_pages *qp = walk->private; 660 unsigned long endvma = vma->vm_end; 661 unsigned long flags = qp->flags; 662 663 /* range check first */ 664 VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma); 665 666 if (!qp->first) { 667 qp->first = vma; 668 if (!(flags & MPOL_MF_DISCONTIG_OK) && 669 (qp->start < vma->vm_start)) 670 /* hole at head side of range */ 671 return -EFAULT; 672 } 673 if (!(flags & MPOL_MF_DISCONTIG_OK) && 674 ((vma->vm_end < qp->end) && 675 (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start))) 676 /* hole at middle or tail of range */ 677 return -EFAULT; 678 679 /* 680 * Need check MPOL_MF_STRICT to return -EIO if possible 681 * regardless of vma_migratable 682 */ 683 if (!vma_migratable(vma) && 684 !(flags & MPOL_MF_STRICT)) 685 return 1; 686 687 if (endvma > end) 688 endvma = end; 689 690 if (flags & MPOL_MF_LAZY) { 691 /* Similar to task_numa_work, skip inaccessible VMAs */ 692 if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) && 693 !(vma->vm_flags & VM_MIXEDMAP)) 694 change_prot_numa(vma, start, endvma); 695 return 1; 696 } 697 698 /* queue pages from current vma */ 699 if (flags & MPOL_MF_VALID) 700 return 0; 701 return 1; 702 } 703 704 static const struct mm_walk_ops queue_pages_walk_ops = { 705 .hugetlb_entry = queue_pages_hugetlb, 706 .pmd_entry = queue_pages_pte_range, 707 .test_walk = queue_pages_test_walk, 708 }; 709 710 /* 711 * Walk through page tables and collect pages to be migrated. 712 * 713 * If pages found in a given range are on a set of nodes (determined by 714 * @nodes and @flags,) it's isolated and queued to the pagelist which is 715 * passed via @private. 716 * 717 * queue_pages_range() has three possible return values: 718 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were 719 * specified. 720 * 0 - queue pages successfully or no misplaced page. 721 * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or 722 * memory range specified by nodemask and maxnode points outside 723 * your accessible address space (-EFAULT) 724 */ 725 static int 726 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 727 nodemask_t *nodes, unsigned long flags, 728 struct list_head *pagelist) 729 { 730 int err; 731 struct queue_pages qp = { 732 .pagelist = pagelist, 733 .flags = flags, 734 .nmask = nodes, 735 .start = start, 736 .end = end, 737 .first = NULL, 738 }; 739 740 err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp); 741 742 if (!qp.first) 743 /* whole range in hole */ 744 err = -EFAULT; 745 746 return err; 747 } 748 749 /* 750 * Apply policy to a single VMA 751 * This must be called with the mmap_lock held for writing. 752 */ 753 static int vma_replace_policy(struct vm_area_struct *vma, 754 struct mempolicy *pol) 755 { 756 int err; 757 struct mempolicy *old; 758 struct mempolicy *new; 759 760 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 761 vma->vm_start, vma->vm_end, vma->vm_pgoff, 762 vma->vm_ops, vma->vm_file, 763 vma->vm_ops ? vma->vm_ops->set_policy : NULL); 764 765 new = mpol_dup(pol); 766 if (IS_ERR(new)) 767 return PTR_ERR(new); 768 769 if (vma->vm_ops && vma->vm_ops->set_policy) { 770 err = vma->vm_ops->set_policy(vma, new); 771 if (err) 772 goto err_out; 773 } 774 775 old = vma->vm_policy; 776 vma->vm_policy = new; /* protected by mmap_lock */ 777 mpol_put(old); 778 779 return 0; 780 err_out: 781 mpol_put(new); 782 return err; 783 } 784 785 /* Step 2: apply policy to a range and do splits. */ 786 static int mbind_range(struct mm_struct *mm, unsigned long start, 787 unsigned long end, struct mempolicy *new_pol) 788 { 789 struct vm_area_struct *prev; 790 struct vm_area_struct *vma; 791 int err = 0; 792 pgoff_t pgoff; 793 unsigned long vmstart; 794 unsigned long vmend; 795 796 vma = find_vma(mm, start); 797 VM_BUG_ON(!vma); 798 799 prev = vma->vm_prev; 800 if (start > vma->vm_start) 801 prev = vma; 802 803 for (; vma && vma->vm_start < end; prev = vma, vma = vma->vm_next) { 804 vmstart = max(start, vma->vm_start); 805 vmend = min(end, vma->vm_end); 806 807 if (mpol_equal(vma_policy(vma), new_pol)) 808 continue; 809 810 pgoff = vma->vm_pgoff + 811 ((vmstart - vma->vm_start) >> PAGE_SHIFT); 812 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 813 vma->anon_vma, vma->vm_file, pgoff, 814 new_pol, vma->vm_userfaultfd_ctx, 815 anon_vma_name(vma)); 816 if (prev) { 817 vma = prev; 818 goto replace; 819 } 820 if (vma->vm_start != vmstart) { 821 err = split_vma(vma->vm_mm, vma, vmstart, 1); 822 if (err) 823 goto out; 824 } 825 if (vma->vm_end != vmend) { 826 err = split_vma(vma->vm_mm, vma, vmend, 0); 827 if (err) 828 goto out; 829 } 830 replace: 831 err = vma_replace_policy(vma, new_pol); 832 if (err) 833 goto out; 834 } 835 836 out: 837 return err; 838 } 839 840 /* Set the process memory policy */ 841 static long do_set_mempolicy(unsigned short mode, unsigned short flags, 842 nodemask_t *nodes) 843 { 844 struct mempolicy *new, *old; 845 NODEMASK_SCRATCH(scratch); 846 int ret; 847 848 if (!scratch) 849 return -ENOMEM; 850 851 new = mpol_new(mode, flags, nodes); 852 if (IS_ERR(new)) { 853 ret = PTR_ERR(new); 854 goto out; 855 } 856 857 ret = mpol_set_nodemask(new, nodes, scratch); 858 if (ret) { 859 mpol_put(new); 860 goto out; 861 } 862 task_lock(current); 863 old = current->mempolicy; 864 current->mempolicy = new; 865 if (new && new->mode == MPOL_INTERLEAVE) 866 current->il_prev = MAX_NUMNODES-1; 867 task_unlock(current); 868 mpol_put(old); 869 ret = 0; 870 out: 871 NODEMASK_SCRATCH_FREE(scratch); 872 return ret; 873 } 874 875 /* 876 * Return nodemask for policy for get_mempolicy() query 877 * 878 * Called with task's alloc_lock held 879 */ 880 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 881 { 882 nodes_clear(*nodes); 883 if (p == &default_policy) 884 return; 885 886 switch (p->mode) { 887 case MPOL_BIND: 888 case MPOL_INTERLEAVE: 889 case MPOL_PREFERRED: 890 case MPOL_PREFERRED_MANY: 891 *nodes = p->nodes; 892 break; 893 case MPOL_LOCAL: 894 /* return empty node mask for local allocation */ 895 break; 896 default: 897 BUG(); 898 } 899 } 900 901 static int lookup_node(struct mm_struct *mm, unsigned long addr) 902 { 903 struct page *p = NULL; 904 int ret; 905 906 ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p); 907 if (ret > 0) { 908 ret = page_to_nid(p); 909 put_page(p); 910 } 911 return ret; 912 } 913 914 /* Retrieve NUMA policy */ 915 static long do_get_mempolicy(int *policy, nodemask_t *nmask, 916 unsigned long addr, unsigned long flags) 917 { 918 int err; 919 struct mm_struct *mm = current->mm; 920 struct vm_area_struct *vma = NULL; 921 struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL; 922 923 if (flags & 924 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 925 return -EINVAL; 926 927 if (flags & MPOL_F_MEMS_ALLOWED) { 928 if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 929 return -EINVAL; 930 *policy = 0; /* just so it's initialized */ 931 task_lock(current); 932 *nmask = cpuset_current_mems_allowed; 933 task_unlock(current); 934 return 0; 935 } 936 937 if (flags & MPOL_F_ADDR) { 938 /* 939 * Do NOT fall back to task policy if the 940 * vma/shared policy at addr is NULL. We 941 * want to return MPOL_DEFAULT in this case. 942 */ 943 mmap_read_lock(mm); 944 vma = vma_lookup(mm, addr); 945 if (!vma) { 946 mmap_read_unlock(mm); 947 return -EFAULT; 948 } 949 if (vma->vm_ops && vma->vm_ops->get_policy) 950 pol = vma->vm_ops->get_policy(vma, addr); 951 else 952 pol = vma->vm_policy; 953 } else if (addr) 954 return -EINVAL; 955 956 if (!pol) 957 pol = &default_policy; /* indicates default behavior */ 958 959 if (flags & MPOL_F_NODE) { 960 if (flags & MPOL_F_ADDR) { 961 /* 962 * Take a refcount on the mpol, because we are about to 963 * drop the mmap_lock, after which only "pol" remains 964 * valid, "vma" is stale. 965 */ 966 pol_refcount = pol; 967 vma = NULL; 968 mpol_get(pol); 969 mmap_read_unlock(mm); 970 err = lookup_node(mm, addr); 971 if (err < 0) 972 goto out; 973 *policy = err; 974 } else if (pol == current->mempolicy && 975 pol->mode == MPOL_INTERLEAVE) { 976 *policy = next_node_in(current->il_prev, pol->nodes); 977 } else { 978 err = -EINVAL; 979 goto out; 980 } 981 } else { 982 *policy = pol == &default_policy ? MPOL_DEFAULT : 983 pol->mode; 984 /* 985 * Internal mempolicy flags must be masked off before exposing 986 * the policy to userspace. 987 */ 988 *policy |= (pol->flags & MPOL_MODE_FLAGS); 989 } 990 991 err = 0; 992 if (nmask) { 993 if (mpol_store_user_nodemask(pol)) { 994 *nmask = pol->w.user_nodemask; 995 } else { 996 task_lock(current); 997 get_policy_nodemask(pol, nmask); 998 task_unlock(current); 999 } 1000 } 1001 1002 out: 1003 mpol_cond_put(pol); 1004 if (vma) 1005 mmap_read_unlock(mm); 1006 if (pol_refcount) 1007 mpol_put(pol_refcount); 1008 return err; 1009 } 1010 1011 #ifdef CONFIG_MIGRATION 1012 /* 1013 * page migration, thp tail pages can be passed. 1014 */ 1015 static int migrate_page_add(struct page *page, struct list_head *pagelist, 1016 unsigned long flags) 1017 { 1018 struct page *head = compound_head(page); 1019 /* 1020 * Avoid migrating a page that is shared with others. 1021 */ 1022 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) { 1023 if (!isolate_lru_page(head)) { 1024 list_add_tail(&head->lru, pagelist); 1025 mod_node_page_state(page_pgdat(head), 1026 NR_ISOLATED_ANON + page_is_file_lru(head), 1027 thp_nr_pages(head)); 1028 } else if (flags & MPOL_MF_STRICT) { 1029 /* 1030 * Non-movable page may reach here. And, there may be 1031 * temporary off LRU pages or non-LRU movable pages. 1032 * Treat them as unmovable pages since they can't be 1033 * isolated, so they can't be moved at the moment. It 1034 * should return -EIO for this case too. 1035 */ 1036 return -EIO; 1037 } 1038 } 1039 1040 return 0; 1041 } 1042 1043 /* 1044 * Migrate pages from one node to a target node. 1045 * Returns error or the number of pages not migrated. 1046 */ 1047 static int migrate_to_node(struct mm_struct *mm, int source, int dest, 1048 int flags) 1049 { 1050 nodemask_t nmask; 1051 LIST_HEAD(pagelist); 1052 int err = 0; 1053 struct migration_target_control mtc = { 1054 .nid = dest, 1055 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 1056 }; 1057 1058 nodes_clear(nmask); 1059 node_set(source, nmask); 1060 1061 /* 1062 * This does not "check" the range but isolates all pages that 1063 * need migration. Between passing in the full user address 1064 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 1065 */ 1066 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 1067 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, 1068 flags | MPOL_MF_DISCONTIG_OK, &pagelist); 1069 1070 if (!list_empty(&pagelist)) { 1071 err = migrate_pages(&pagelist, alloc_migration_target, NULL, 1072 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); 1073 if (err) 1074 putback_movable_pages(&pagelist); 1075 } 1076 1077 return err; 1078 } 1079 1080 /* 1081 * Move pages between the two nodesets so as to preserve the physical 1082 * layout as much as possible. 1083 * 1084 * Returns the number of page that could not be moved. 1085 */ 1086 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 1087 const nodemask_t *to, int flags) 1088 { 1089 int busy = 0; 1090 int err = 0; 1091 nodemask_t tmp; 1092 1093 lru_cache_disable(); 1094 1095 mmap_read_lock(mm); 1096 1097 /* 1098 * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 1099 * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 1100 * bit in 'tmp', and return that <source, dest> pair for migration. 1101 * The pair of nodemasks 'to' and 'from' define the map. 1102 * 1103 * If no pair of bits is found that way, fallback to picking some 1104 * pair of 'source' and 'dest' bits that are not the same. If the 1105 * 'source' and 'dest' bits are the same, this represents a node 1106 * that will be migrating to itself, so no pages need move. 1107 * 1108 * If no bits are left in 'tmp', or if all remaining bits left 1109 * in 'tmp' correspond to the same bit in 'to', return false 1110 * (nothing left to migrate). 1111 * 1112 * This lets us pick a pair of nodes to migrate between, such that 1113 * if possible the dest node is not already occupied by some other 1114 * source node, minimizing the risk of overloading the memory on a 1115 * node that would happen if we migrated incoming memory to a node 1116 * before migrating outgoing memory source that same node. 1117 * 1118 * A single scan of tmp is sufficient. As we go, we remember the 1119 * most recent <s, d> pair that moved (s != d). If we find a pair 1120 * that not only moved, but what's better, moved to an empty slot 1121 * (d is not set in tmp), then we break out then, with that pair. 1122 * Otherwise when we finish scanning from_tmp, we at least have the 1123 * most recent <s, d> pair that moved. If we get all the way through 1124 * the scan of tmp without finding any node that moved, much less 1125 * moved to an empty node, then there is nothing left worth migrating. 1126 */ 1127 1128 tmp = *from; 1129 while (!nodes_empty(tmp)) { 1130 int s, d; 1131 int source = NUMA_NO_NODE; 1132 int dest = 0; 1133 1134 for_each_node_mask(s, tmp) { 1135 1136 /* 1137 * do_migrate_pages() tries to maintain the relative 1138 * node relationship of the pages established between 1139 * threads and memory areas. 1140 * 1141 * However if the number of source nodes is not equal to 1142 * the number of destination nodes we can not preserve 1143 * this node relative relationship. In that case, skip 1144 * copying memory from a node that is in the destination 1145 * mask. 1146 * 1147 * Example: [2,3,4] -> [3,4,5] moves everything. 1148 * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 1149 */ 1150 1151 if ((nodes_weight(*from) != nodes_weight(*to)) && 1152 (node_isset(s, *to))) 1153 continue; 1154 1155 d = node_remap(s, *from, *to); 1156 if (s == d) 1157 continue; 1158 1159 source = s; /* Node moved. Memorize */ 1160 dest = d; 1161 1162 /* dest not in remaining from nodes? */ 1163 if (!node_isset(dest, tmp)) 1164 break; 1165 } 1166 if (source == NUMA_NO_NODE) 1167 break; 1168 1169 node_clear(source, tmp); 1170 err = migrate_to_node(mm, source, dest, flags); 1171 if (err > 0) 1172 busy += err; 1173 if (err < 0) 1174 break; 1175 } 1176 mmap_read_unlock(mm); 1177 1178 lru_cache_enable(); 1179 if (err < 0) 1180 return err; 1181 return busy; 1182 1183 } 1184 1185 /* 1186 * Allocate a new page for page migration based on vma policy. 1187 * Start by assuming the page is mapped by the same vma as contains @start. 1188 * Search forward from there, if not. N.B., this assumes that the 1189 * list of pages handed to migrate_pages()--which is how we get here-- 1190 * is in virtual address order. 1191 */ 1192 static struct page *new_page(struct page *page, unsigned long start) 1193 { 1194 struct folio *dst, *src = page_folio(page); 1195 struct vm_area_struct *vma; 1196 unsigned long address; 1197 gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL; 1198 1199 vma = find_vma(current->mm, start); 1200 while (vma) { 1201 address = page_address_in_vma(page, vma); 1202 if (address != -EFAULT) 1203 break; 1204 vma = vma->vm_next; 1205 } 1206 1207 if (folio_test_hugetlb(src)) 1208 return alloc_huge_page_vma(page_hstate(&src->page), 1209 vma, address); 1210 1211 if (folio_test_large(src)) 1212 gfp = GFP_TRANSHUGE; 1213 1214 /* 1215 * if !vma, vma_alloc_folio() will use task or system default policy 1216 */ 1217 dst = vma_alloc_folio(gfp, folio_order(src), vma, address, 1218 folio_test_large(src)); 1219 return &dst->page; 1220 } 1221 #else 1222 1223 static int migrate_page_add(struct page *page, struct list_head *pagelist, 1224 unsigned long flags) 1225 { 1226 return -EIO; 1227 } 1228 1229 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 1230 const nodemask_t *to, int flags) 1231 { 1232 return -ENOSYS; 1233 } 1234 1235 static struct page *new_page(struct page *page, unsigned long start) 1236 { 1237 return NULL; 1238 } 1239 #endif 1240 1241 static long do_mbind(unsigned long start, unsigned long len, 1242 unsigned short mode, unsigned short mode_flags, 1243 nodemask_t *nmask, unsigned long flags) 1244 { 1245 struct mm_struct *mm = current->mm; 1246 struct mempolicy *new; 1247 unsigned long end; 1248 int err; 1249 int ret; 1250 LIST_HEAD(pagelist); 1251 1252 if (flags & ~(unsigned long)MPOL_MF_VALID) 1253 return -EINVAL; 1254 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 1255 return -EPERM; 1256 1257 if (start & ~PAGE_MASK) 1258 return -EINVAL; 1259 1260 if (mode == MPOL_DEFAULT) 1261 flags &= ~MPOL_MF_STRICT; 1262 1263 len = (len + PAGE_SIZE - 1) & PAGE_MASK; 1264 end = start + len; 1265 1266 if (end < start) 1267 return -EINVAL; 1268 if (end == start) 1269 return 0; 1270 1271 new = mpol_new(mode, mode_flags, nmask); 1272 if (IS_ERR(new)) 1273 return PTR_ERR(new); 1274 1275 if (flags & MPOL_MF_LAZY) 1276 new->flags |= MPOL_F_MOF; 1277 1278 /* 1279 * If we are using the default policy then operation 1280 * on discontinuous address spaces is okay after all 1281 */ 1282 if (!new) 1283 flags |= MPOL_MF_DISCONTIG_OK; 1284 1285 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1286 start, start + len, mode, mode_flags, 1287 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 1288 1289 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 1290 1291 lru_cache_disable(); 1292 } 1293 { 1294 NODEMASK_SCRATCH(scratch); 1295 if (scratch) { 1296 mmap_write_lock(mm); 1297 err = mpol_set_nodemask(new, nmask, scratch); 1298 if (err) 1299 mmap_write_unlock(mm); 1300 } else 1301 err = -ENOMEM; 1302 NODEMASK_SCRATCH_FREE(scratch); 1303 } 1304 if (err) 1305 goto mpol_out; 1306 1307 ret = queue_pages_range(mm, start, end, nmask, 1308 flags | MPOL_MF_INVERT, &pagelist); 1309 1310 if (ret < 0) { 1311 err = ret; 1312 goto up_out; 1313 } 1314 1315 err = mbind_range(mm, start, end, new); 1316 1317 if (!err) { 1318 int nr_failed = 0; 1319 1320 if (!list_empty(&pagelist)) { 1321 WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1322 nr_failed = migrate_pages(&pagelist, new_page, NULL, 1323 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL); 1324 if (nr_failed) 1325 putback_movable_pages(&pagelist); 1326 } 1327 1328 if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) 1329 err = -EIO; 1330 } else { 1331 up_out: 1332 if (!list_empty(&pagelist)) 1333 putback_movable_pages(&pagelist); 1334 } 1335 1336 mmap_write_unlock(mm); 1337 mpol_out: 1338 mpol_put(new); 1339 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 1340 lru_cache_enable(); 1341 return err; 1342 } 1343 1344 /* 1345 * User space interface with variable sized bitmaps for nodelists. 1346 */ 1347 static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask, 1348 unsigned long maxnode) 1349 { 1350 unsigned long nlongs = BITS_TO_LONGS(maxnode); 1351 int ret; 1352 1353 if (in_compat_syscall()) 1354 ret = compat_get_bitmap(mask, 1355 (const compat_ulong_t __user *)nmask, 1356 maxnode); 1357 else 1358 ret = copy_from_user(mask, nmask, 1359 nlongs * sizeof(unsigned long)); 1360 1361 if (ret) 1362 return -EFAULT; 1363 1364 if (maxnode % BITS_PER_LONG) 1365 mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1; 1366 1367 return 0; 1368 } 1369 1370 /* Copy a node mask from user space. */ 1371 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 1372 unsigned long maxnode) 1373 { 1374 --maxnode; 1375 nodes_clear(*nodes); 1376 if (maxnode == 0 || !nmask) 1377 return 0; 1378 if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1379 return -EINVAL; 1380 1381 /* 1382 * When the user specified more nodes than supported just check 1383 * if the non supported part is all zero, one word at a time, 1384 * starting at the end. 1385 */ 1386 while (maxnode > MAX_NUMNODES) { 1387 unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG); 1388 unsigned long t; 1389 1390 if (get_bitmap(&t, &nmask[maxnode / BITS_PER_LONG], bits)) 1391 return -EFAULT; 1392 1393 if (maxnode - bits >= MAX_NUMNODES) { 1394 maxnode -= bits; 1395 } else { 1396 maxnode = MAX_NUMNODES; 1397 t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1); 1398 } 1399 if (t) 1400 return -EINVAL; 1401 } 1402 1403 return get_bitmap(nodes_addr(*nodes), nmask, maxnode); 1404 } 1405 1406 /* Copy a kernel node mask to user space */ 1407 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 1408 nodemask_t *nodes) 1409 { 1410 unsigned long copy = ALIGN(maxnode-1, 64) / 8; 1411 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); 1412 bool compat = in_compat_syscall(); 1413 1414 if (compat) 1415 nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t); 1416 1417 if (copy > nbytes) { 1418 if (copy > PAGE_SIZE) 1419 return -EINVAL; 1420 if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 1421 return -EFAULT; 1422 copy = nbytes; 1423 maxnode = nr_node_ids; 1424 } 1425 1426 if (compat) 1427 return compat_put_bitmap((compat_ulong_t __user *)mask, 1428 nodes_addr(*nodes), maxnode); 1429 1430 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 1431 } 1432 1433 /* Basic parameter sanity check used by both mbind() and set_mempolicy() */ 1434 static inline int sanitize_mpol_flags(int *mode, unsigned short *flags) 1435 { 1436 *flags = *mode & MPOL_MODE_FLAGS; 1437 *mode &= ~MPOL_MODE_FLAGS; 1438 1439 if ((unsigned int)(*mode) >= MPOL_MAX) 1440 return -EINVAL; 1441 if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES)) 1442 return -EINVAL; 1443 if (*flags & MPOL_F_NUMA_BALANCING) { 1444 if (*mode != MPOL_BIND) 1445 return -EINVAL; 1446 *flags |= (MPOL_F_MOF | MPOL_F_MORON); 1447 } 1448 return 0; 1449 } 1450 1451 static long kernel_mbind(unsigned long start, unsigned long len, 1452 unsigned long mode, const unsigned long __user *nmask, 1453 unsigned long maxnode, unsigned int flags) 1454 { 1455 unsigned short mode_flags; 1456 nodemask_t nodes; 1457 int lmode = mode; 1458 int err; 1459 1460 start = untagged_addr(start); 1461 err = sanitize_mpol_flags(&lmode, &mode_flags); 1462 if (err) 1463 return err; 1464 1465 err = get_nodes(&nodes, nmask, maxnode); 1466 if (err) 1467 return err; 1468 1469 return do_mbind(start, len, lmode, mode_flags, &nodes, flags); 1470 } 1471 1472 SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len, 1473 unsigned long, home_node, unsigned long, flags) 1474 { 1475 struct mm_struct *mm = current->mm; 1476 struct vm_area_struct *vma; 1477 struct mempolicy *new; 1478 unsigned long vmstart; 1479 unsigned long vmend; 1480 unsigned long end; 1481 int err = -ENOENT; 1482 1483 start = untagged_addr(start); 1484 if (start & ~PAGE_MASK) 1485 return -EINVAL; 1486 /* 1487 * flags is used for future extension if any. 1488 */ 1489 if (flags != 0) 1490 return -EINVAL; 1491 1492 /* 1493 * Check home_node is online to avoid accessing uninitialized 1494 * NODE_DATA. 1495 */ 1496 if (home_node >= MAX_NUMNODES || !node_online(home_node)) 1497 return -EINVAL; 1498 1499 len = (len + PAGE_SIZE - 1) & PAGE_MASK; 1500 end = start + len; 1501 1502 if (end < start) 1503 return -EINVAL; 1504 if (end == start) 1505 return 0; 1506 mmap_write_lock(mm); 1507 vma = find_vma(mm, start); 1508 for (; vma && vma->vm_start < end; vma = vma->vm_next) { 1509 1510 vmstart = max(start, vma->vm_start); 1511 vmend = min(end, vma->vm_end); 1512 new = mpol_dup(vma_policy(vma)); 1513 if (IS_ERR(new)) { 1514 err = PTR_ERR(new); 1515 break; 1516 } 1517 /* 1518 * Only update home node if there is an existing vma policy 1519 */ 1520 if (!new) 1521 continue; 1522 1523 /* 1524 * If any vma in the range got policy other than MPOL_BIND 1525 * or MPOL_PREFERRED_MANY we return error. We don't reset 1526 * the home node for vmas we already updated before. 1527 */ 1528 if (new->mode != MPOL_BIND && new->mode != MPOL_PREFERRED_MANY) { 1529 err = -EOPNOTSUPP; 1530 break; 1531 } 1532 1533 new->home_node = home_node; 1534 err = mbind_range(mm, vmstart, vmend, new); 1535 mpol_put(new); 1536 if (err) 1537 break; 1538 } 1539 mmap_write_unlock(mm); 1540 return err; 1541 } 1542 1543 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1544 unsigned long, mode, const unsigned long __user *, nmask, 1545 unsigned long, maxnode, unsigned int, flags) 1546 { 1547 return kernel_mbind(start, len, mode, nmask, maxnode, flags); 1548 } 1549 1550 /* Set the process memory policy */ 1551 static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask, 1552 unsigned long maxnode) 1553 { 1554 unsigned short mode_flags; 1555 nodemask_t nodes; 1556 int lmode = mode; 1557 int err; 1558 1559 err = sanitize_mpol_flags(&lmode, &mode_flags); 1560 if (err) 1561 return err; 1562 1563 err = get_nodes(&nodes, nmask, maxnode); 1564 if (err) 1565 return err; 1566 1567 return do_set_mempolicy(lmode, mode_flags, &nodes); 1568 } 1569 1570 SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1571 unsigned long, maxnode) 1572 { 1573 return kernel_set_mempolicy(mode, nmask, maxnode); 1574 } 1575 1576 static int kernel_migrate_pages(pid_t pid, unsigned long maxnode, 1577 const unsigned long __user *old_nodes, 1578 const unsigned long __user *new_nodes) 1579 { 1580 struct mm_struct *mm = NULL; 1581 struct task_struct *task; 1582 nodemask_t task_nodes; 1583 int err; 1584 nodemask_t *old; 1585 nodemask_t *new; 1586 NODEMASK_SCRATCH(scratch); 1587 1588 if (!scratch) 1589 return -ENOMEM; 1590 1591 old = &scratch->mask1; 1592 new = &scratch->mask2; 1593 1594 err = get_nodes(old, old_nodes, maxnode); 1595 if (err) 1596 goto out; 1597 1598 err = get_nodes(new, new_nodes, maxnode); 1599 if (err) 1600 goto out; 1601 1602 /* Find the mm_struct */ 1603 rcu_read_lock(); 1604 task = pid ? find_task_by_vpid(pid) : current; 1605 if (!task) { 1606 rcu_read_unlock(); 1607 err = -ESRCH; 1608 goto out; 1609 } 1610 get_task_struct(task); 1611 1612 err = -EINVAL; 1613 1614 /* 1615 * Check if this process has the right to modify the specified process. 1616 * Use the regular "ptrace_may_access()" checks. 1617 */ 1618 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1619 rcu_read_unlock(); 1620 err = -EPERM; 1621 goto out_put; 1622 } 1623 rcu_read_unlock(); 1624 1625 task_nodes = cpuset_mems_allowed(task); 1626 /* Is the user allowed to access the target nodes? */ 1627 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 1628 err = -EPERM; 1629 goto out_put; 1630 } 1631 1632 task_nodes = cpuset_mems_allowed(current); 1633 nodes_and(*new, *new, task_nodes); 1634 if (nodes_empty(*new)) 1635 goto out_put; 1636 1637 err = security_task_movememory(task); 1638 if (err) 1639 goto out_put; 1640 1641 mm = get_task_mm(task); 1642 put_task_struct(task); 1643 1644 if (!mm) { 1645 err = -EINVAL; 1646 goto out; 1647 } 1648 1649 err = do_migrate_pages(mm, old, new, 1650 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 1651 1652 mmput(mm); 1653 out: 1654 NODEMASK_SCRATCH_FREE(scratch); 1655 1656 return err; 1657 1658 out_put: 1659 put_task_struct(task); 1660 goto out; 1661 1662 } 1663 1664 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1665 const unsigned long __user *, old_nodes, 1666 const unsigned long __user *, new_nodes) 1667 { 1668 return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes); 1669 } 1670 1671 1672 /* Retrieve NUMA policy */ 1673 static int kernel_get_mempolicy(int __user *policy, 1674 unsigned long __user *nmask, 1675 unsigned long maxnode, 1676 unsigned long addr, 1677 unsigned long flags) 1678 { 1679 int err; 1680 int pval; 1681 nodemask_t nodes; 1682 1683 if (nmask != NULL && maxnode < nr_node_ids) 1684 return -EINVAL; 1685 1686 addr = untagged_addr(addr); 1687 1688 err = do_get_mempolicy(&pval, &nodes, addr, flags); 1689 1690 if (err) 1691 return err; 1692 1693 if (policy && put_user(pval, policy)) 1694 return -EFAULT; 1695 1696 if (nmask) 1697 err = copy_nodes_to_user(nmask, maxnode, &nodes); 1698 1699 return err; 1700 } 1701 1702 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1703 unsigned long __user *, nmask, unsigned long, maxnode, 1704 unsigned long, addr, unsigned long, flags) 1705 { 1706 return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags); 1707 } 1708 1709 bool vma_migratable(struct vm_area_struct *vma) 1710 { 1711 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 1712 return false; 1713 1714 /* 1715 * DAX device mappings require predictable access latency, so avoid 1716 * incurring periodic faults. 1717 */ 1718 if (vma_is_dax(vma)) 1719 return false; 1720 1721 if (is_vm_hugetlb_page(vma) && 1722 !hugepage_migration_supported(hstate_vma(vma))) 1723 return false; 1724 1725 /* 1726 * Migration allocates pages in the highest zone. If we cannot 1727 * do so then migration (at least from node to node) is not 1728 * possible. 1729 */ 1730 if (vma->vm_file && 1731 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) 1732 < policy_zone) 1733 return false; 1734 return true; 1735 } 1736 1737 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 1738 unsigned long addr) 1739 { 1740 struct mempolicy *pol = NULL; 1741 1742 if (vma) { 1743 if (vma->vm_ops && vma->vm_ops->get_policy) { 1744 pol = vma->vm_ops->get_policy(vma, addr); 1745 } else if (vma->vm_policy) { 1746 pol = vma->vm_policy; 1747 1748 /* 1749 * shmem_alloc_page() passes MPOL_F_SHARED policy with 1750 * a pseudo vma whose vma->vm_ops=NULL. Take a reference 1751 * count on these policies which will be dropped by 1752 * mpol_cond_put() later 1753 */ 1754 if (mpol_needs_cond_ref(pol)) 1755 mpol_get(pol); 1756 } 1757 } 1758 1759 return pol; 1760 } 1761 1762 /* 1763 * get_vma_policy(@vma, @addr) 1764 * @vma: virtual memory area whose policy is sought 1765 * @addr: address in @vma for shared policy lookup 1766 * 1767 * Returns effective policy for a VMA at specified address. 1768 * Falls back to current->mempolicy or system default policy, as necessary. 1769 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 1770 * count--added by the get_policy() vm_op, as appropriate--to protect against 1771 * freeing by another task. It is the caller's responsibility to free the 1772 * extra reference for shared policies. 1773 */ 1774 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1775 unsigned long addr) 1776 { 1777 struct mempolicy *pol = __get_vma_policy(vma, addr); 1778 1779 if (!pol) 1780 pol = get_task_policy(current); 1781 1782 return pol; 1783 } 1784 1785 bool vma_policy_mof(struct vm_area_struct *vma) 1786 { 1787 struct mempolicy *pol; 1788 1789 if (vma->vm_ops && vma->vm_ops->get_policy) { 1790 bool ret = false; 1791 1792 pol = vma->vm_ops->get_policy(vma, vma->vm_start); 1793 if (pol && (pol->flags & MPOL_F_MOF)) 1794 ret = true; 1795 mpol_cond_put(pol); 1796 1797 return ret; 1798 } 1799 1800 pol = vma->vm_policy; 1801 if (!pol) 1802 pol = get_task_policy(current); 1803 1804 return pol->flags & MPOL_F_MOF; 1805 } 1806 1807 static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1808 { 1809 enum zone_type dynamic_policy_zone = policy_zone; 1810 1811 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1812 1813 /* 1814 * if policy->nodes has movable memory only, 1815 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1816 * 1817 * policy->nodes is intersect with node_states[N_MEMORY]. 1818 * so if the following test fails, it implies 1819 * policy->nodes has movable memory only. 1820 */ 1821 if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY])) 1822 dynamic_policy_zone = ZONE_MOVABLE; 1823 1824 return zone >= dynamic_policy_zone; 1825 } 1826 1827 /* 1828 * Return a nodemask representing a mempolicy for filtering nodes for 1829 * page allocation 1830 */ 1831 nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 1832 { 1833 int mode = policy->mode; 1834 1835 /* Lower zones don't get a nodemask applied for MPOL_BIND */ 1836 if (unlikely(mode == MPOL_BIND) && 1837 apply_policy_zone(policy, gfp_zone(gfp)) && 1838 cpuset_nodemask_valid_mems_allowed(&policy->nodes)) 1839 return &policy->nodes; 1840 1841 if (mode == MPOL_PREFERRED_MANY) 1842 return &policy->nodes; 1843 1844 return NULL; 1845 } 1846 1847 /* 1848 * Return the preferred node id for 'prefer' mempolicy, and return 1849 * the given id for all other policies. 1850 * 1851 * policy_node() is always coupled with policy_nodemask(), which 1852 * secures the nodemask limit for 'bind' and 'prefer-many' policy. 1853 */ 1854 static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd) 1855 { 1856 if (policy->mode == MPOL_PREFERRED) { 1857 nd = first_node(policy->nodes); 1858 } else { 1859 /* 1860 * __GFP_THISNODE shouldn't even be used with the bind policy 1861 * because we might easily break the expectation to stay on the 1862 * requested node and not break the policy. 1863 */ 1864 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); 1865 } 1866 1867 if ((policy->mode == MPOL_BIND || 1868 policy->mode == MPOL_PREFERRED_MANY) && 1869 policy->home_node != NUMA_NO_NODE) 1870 return policy->home_node; 1871 1872 return nd; 1873 } 1874 1875 /* Do dynamic interleaving for a process */ 1876 static unsigned interleave_nodes(struct mempolicy *policy) 1877 { 1878 unsigned next; 1879 struct task_struct *me = current; 1880 1881 next = next_node_in(me->il_prev, policy->nodes); 1882 if (next < MAX_NUMNODES) 1883 me->il_prev = next; 1884 return next; 1885 } 1886 1887 /* 1888 * Depending on the memory policy provide a node from which to allocate the 1889 * next slab entry. 1890 */ 1891 unsigned int mempolicy_slab_node(void) 1892 { 1893 struct mempolicy *policy; 1894 int node = numa_mem_id(); 1895 1896 if (!in_task()) 1897 return node; 1898 1899 policy = current->mempolicy; 1900 if (!policy) 1901 return node; 1902 1903 switch (policy->mode) { 1904 case MPOL_PREFERRED: 1905 return first_node(policy->nodes); 1906 1907 case MPOL_INTERLEAVE: 1908 return interleave_nodes(policy); 1909 1910 case MPOL_BIND: 1911 case MPOL_PREFERRED_MANY: 1912 { 1913 struct zoneref *z; 1914 1915 /* 1916 * Follow bind policy behavior and start allocation at the 1917 * first node. 1918 */ 1919 struct zonelist *zonelist; 1920 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 1921 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; 1922 z = first_zones_zonelist(zonelist, highest_zoneidx, 1923 &policy->nodes); 1924 return z->zone ? zone_to_nid(z->zone) : node; 1925 } 1926 case MPOL_LOCAL: 1927 return node; 1928 1929 default: 1930 BUG(); 1931 } 1932 } 1933 1934 /* 1935 * Do static interleaving for a VMA with known offset @n. Returns the n'th 1936 * node in pol->nodes (starting from n=0), wrapping around if n exceeds the 1937 * number of present nodes. 1938 */ 1939 static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) 1940 { 1941 nodemask_t nodemask = pol->nodes; 1942 unsigned int target, nnodes; 1943 int i; 1944 int nid; 1945 /* 1946 * The barrier will stabilize the nodemask in a register or on 1947 * the stack so that it will stop changing under the code. 1948 * 1949 * Between first_node() and next_node(), pol->nodes could be changed 1950 * by other threads. So we put pol->nodes in a local stack. 1951 */ 1952 barrier(); 1953 1954 nnodes = nodes_weight(nodemask); 1955 if (!nnodes) 1956 return numa_node_id(); 1957 target = (unsigned int)n % nnodes; 1958 nid = first_node(nodemask); 1959 for (i = 0; i < target; i++) 1960 nid = next_node(nid, nodemask); 1961 return nid; 1962 } 1963 1964 /* Determine a node number for interleave */ 1965 static inline unsigned interleave_nid(struct mempolicy *pol, 1966 struct vm_area_struct *vma, unsigned long addr, int shift) 1967 { 1968 if (vma) { 1969 unsigned long off; 1970 1971 /* 1972 * for small pages, there is no difference between 1973 * shift and PAGE_SHIFT, so the bit-shift is safe. 1974 * for huge pages, since vm_pgoff is in units of small 1975 * pages, we need to shift off the always 0 bits to get 1976 * a useful offset. 1977 */ 1978 BUG_ON(shift < PAGE_SHIFT); 1979 off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 1980 off += (addr - vma->vm_start) >> shift; 1981 return offset_il_node(pol, off); 1982 } else 1983 return interleave_nodes(pol); 1984 } 1985 1986 #ifdef CONFIG_HUGETLBFS 1987 /* 1988 * huge_node(@vma, @addr, @gfp_flags, @mpol) 1989 * @vma: virtual memory area whose policy is sought 1990 * @addr: address in @vma for shared policy lookup and interleave policy 1991 * @gfp_flags: for requested zone 1992 * @mpol: pointer to mempolicy pointer for reference counted mempolicy 1993 * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy 1994 * 1995 * Returns a nid suitable for a huge page allocation and a pointer 1996 * to the struct mempolicy for conditional unref after allocation. 1997 * If the effective policy is 'bind' or 'prefer-many', returns a pointer 1998 * to the mempolicy's @nodemask for filtering the zonelist. 1999 * 2000 * Must be protected by read_mems_allowed_begin() 2001 */ 2002 int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, 2003 struct mempolicy **mpol, nodemask_t **nodemask) 2004 { 2005 int nid; 2006 int mode; 2007 2008 *mpol = get_vma_policy(vma, addr); 2009 *nodemask = NULL; 2010 mode = (*mpol)->mode; 2011 2012 if (unlikely(mode == MPOL_INTERLEAVE)) { 2013 nid = interleave_nid(*mpol, vma, addr, 2014 huge_page_shift(hstate_vma(vma))); 2015 } else { 2016 nid = policy_node(gfp_flags, *mpol, numa_node_id()); 2017 if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY) 2018 *nodemask = &(*mpol)->nodes; 2019 } 2020 return nid; 2021 } 2022 2023 /* 2024 * init_nodemask_of_mempolicy 2025 * 2026 * If the current task's mempolicy is "default" [NULL], return 'false' 2027 * to indicate default policy. Otherwise, extract the policy nodemask 2028 * for 'bind' or 'interleave' policy into the argument nodemask, or 2029 * initialize the argument nodemask to contain the single node for 2030 * 'preferred' or 'local' policy and return 'true' to indicate presence 2031 * of non-default mempolicy. 2032 * 2033 * We don't bother with reference counting the mempolicy [mpol_get/put] 2034 * because the current task is examining it's own mempolicy and a task's 2035 * mempolicy is only ever changed by the task itself. 2036 * 2037 * N.B., it is the caller's responsibility to free a returned nodemask. 2038 */ 2039 bool init_nodemask_of_mempolicy(nodemask_t *mask) 2040 { 2041 struct mempolicy *mempolicy; 2042 2043 if (!(mask && current->mempolicy)) 2044 return false; 2045 2046 task_lock(current); 2047 mempolicy = current->mempolicy; 2048 switch (mempolicy->mode) { 2049 case MPOL_PREFERRED: 2050 case MPOL_PREFERRED_MANY: 2051 case MPOL_BIND: 2052 case MPOL_INTERLEAVE: 2053 *mask = mempolicy->nodes; 2054 break; 2055 2056 case MPOL_LOCAL: 2057 init_nodemask_of_node(mask, numa_node_id()); 2058 break; 2059 2060 default: 2061 BUG(); 2062 } 2063 task_unlock(current); 2064 2065 return true; 2066 } 2067 #endif 2068 2069 /* 2070 * mempolicy_in_oom_domain 2071 * 2072 * If tsk's mempolicy is "bind", check for intersection between mask and 2073 * the policy nodemask. Otherwise, return true for all other policies 2074 * including "interleave", as a tsk with "interleave" policy may have 2075 * memory allocated from all nodes in system. 2076 * 2077 * Takes task_lock(tsk) to prevent freeing of its mempolicy. 2078 */ 2079 bool mempolicy_in_oom_domain(struct task_struct *tsk, 2080 const nodemask_t *mask) 2081 { 2082 struct mempolicy *mempolicy; 2083 bool ret = true; 2084 2085 if (!mask) 2086 return ret; 2087 2088 task_lock(tsk); 2089 mempolicy = tsk->mempolicy; 2090 if (mempolicy && mempolicy->mode == MPOL_BIND) 2091 ret = nodes_intersects(mempolicy->nodes, *mask); 2092 task_unlock(tsk); 2093 2094 return ret; 2095 } 2096 2097 /* Allocate a page in interleaved policy. 2098 Own path because it needs to do special accounting. */ 2099 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 2100 unsigned nid) 2101 { 2102 struct page *page; 2103 2104 page = __alloc_pages(gfp, order, nid, NULL); 2105 /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ 2106 if (!static_branch_likely(&vm_numa_stat_key)) 2107 return page; 2108 if (page && page_to_nid(page) == nid) { 2109 preempt_disable(); 2110 __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT); 2111 preempt_enable(); 2112 } 2113 return page; 2114 } 2115 2116 static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order, 2117 int nid, struct mempolicy *pol) 2118 { 2119 struct page *page; 2120 gfp_t preferred_gfp; 2121 2122 /* 2123 * This is a two pass approach. The first pass will only try the 2124 * preferred nodes but skip the direct reclaim and allow the 2125 * allocation to fail, while the second pass will try all the 2126 * nodes in system. 2127 */ 2128 preferred_gfp = gfp | __GFP_NOWARN; 2129 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2130 page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes); 2131 if (!page) 2132 page = __alloc_pages(gfp, order, nid, NULL); 2133 2134 return page; 2135 } 2136 2137 /** 2138 * alloc_pages_vma - Allocate a page for a VMA. 2139 * @gfp: GFP flags. 2140 * @order: Order of the GFP allocation. 2141 * @vma: Pointer to VMA or NULL if not available. 2142 * @addr: Virtual address of the allocation. Must be inside @vma. 2143 * @hugepage: For hugepages try only the preferred node if possible. 2144 * 2145 * Allocate a page for a specific address in @vma, using the appropriate 2146 * NUMA policy. When @vma is not NULL the caller must hold the mmap_lock 2147 * of the mm_struct of the VMA to prevent it from going away. Should be 2148 * used for all allocations for pages that will be mapped into user space. 2149 * 2150 * Return: The page on success or NULL if allocation fails. 2151 */ 2152 struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 2153 unsigned long addr, bool hugepage) 2154 { 2155 struct mempolicy *pol; 2156 int node = numa_node_id(); 2157 struct page *page; 2158 int preferred_nid; 2159 nodemask_t *nmask; 2160 2161 pol = get_vma_policy(vma, addr); 2162 2163 if (pol->mode == MPOL_INTERLEAVE) { 2164 unsigned nid; 2165 2166 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 2167 mpol_cond_put(pol); 2168 page = alloc_page_interleave(gfp, order, nid); 2169 goto out; 2170 } 2171 2172 if (pol->mode == MPOL_PREFERRED_MANY) { 2173 node = policy_node(gfp, pol, node); 2174 page = alloc_pages_preferred_many(gfp, order, node, pol); 2175 mpol_cond_put(pol); 2176 goto out; 2177 } 2178 2179 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { 2180 int hpage_node = node; 2181 2182 /* 2183 * For hugepage allocation and non-interleave policy which 2184 * allows the current node (or other explicitly preferred 2185 * node) we only try to allocate from the current/preferred 2186 * node and don't fall back to other nodes, as the cost of 2187 * remote accesses would likely offset THP benefits. 2188 * 2189 * If the policy is interleave or does not allow the current 2190 * node in its nodemask, we allocate the standard way. 2191 */ 2192 if (pol->mode == MPOL_PREFERRED) 2193 hpage_node = first_node(pol->nodes); 2194 2195 nmask = policy_nodemask(gfp, pol); 2196 if (!nmask || node_isset(hpage_node, *nmask)) { 2197 mpol_cond_put(pol); 2198 /* 2199 * First, try to allocate THP only on local node, but 2200 * don't reclaim unnecessarily, just compact. 2201 */ 2202 page = __alloc_pages_node(hpage_node, 2203 gfp | __GFP_THISNODE | __GFP_NORETRY, order); 2204 2205 /* 2206 * If hugepage allocations are configured to always 2207 * synchronous compact or the vma has been madvised 2208 * to prefer hugepage backing, retry allowing remote 2209 * memory with both reclaim and compact as well. 2210 */ 2211 if (!page && (gfp & __GFP_DIRECT_RECLAIM)) 2212 page = __alloc_pages(gfp, order, hpage_node, nmask); 2213 2214 goto out; 2215 } 2216 } 2217 2218 nmask = policy_nodemask(gfp, pol); 2219 preferred_nid = policy_node(gfp, pol, node); 2220 page = __alloc_pages(gfp, order, preferred_nid, nmask); 2221 mpol_cond_put(pol); 2222 out: 2223 return page; 2224 } 2225 EXPORT_SYMBOL(alloc_pages_vma); 2226 2227 struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, 2228 unsigned long addr, bool hugepage) 2229 { 2230 struct folio *folio; 2231 2232 folio = (struct folio *)alloc_pages_vma(gfp, order, vma, addr, 2233 hugepage); 2234 if (folio && order > 1) 2235 prep_transhuge_page(&folio->page); 2236 2237 return folio; 2238 } 2239 2240 /** 2241 * alloc_pages - Allocate pages. 2242 * @gfp: GFP flags. 2243 * @order: Power of two of number of pages to allocate. 2244 * 2245 * Allocate 1 << @order contiguous pages. The physical address of the 2246 * first page is naturally aligned (eg an order-3 allocation will be aligned 2247 * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current 2248 * process is honoured when in process context. 2249 * 2250 * Context: Can be called from any context, providing the appropriate GFP 2251 * flags are used. 2252 * Return: The page on success or NULL if allocation fails. 2253 */ 2254 struct page *alloc_pages(gfp_t gfp, unsigned order) 2255 { 2256 struct mempolicy *pol = &default_policy; 2257 struct page *page; 2258 2259 if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 2260 pol = get_task_policy(current); 2261 2262 /* 2263 * No reference counting needed for current->mempolicy 2264 * nor system default_policy 2265 */ 2266 if (pol->mode == MPOL_INTERLEAVE) 2267 page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 2268 else if (pol->mode == MPOL_PREFERRED_MANY) 2269 page = alloc_pages_preferred_many(gfp, order, 2270 policy_node(gfp, pol, numa_node_id()), pol); 2271 else 2272 page = __alloc_pages(gfp, order, 2273 policy_node(gfp, pol, numa_node_id()), 2274 policy_nodemask(gfp, pol)); 2275 2276 return page; 2277 } 2278 EXPORT_SYMBOL(alloc_pages); 2279 2280 struct folio *folio_alloc(gfp_t gfp, unsigned order) 2281 { 2282 struct page *page = alloc_pages(gfp | __GFP_COMP, order); 2283 2284 if (page && order > 1) 2285 prep_transhuge_page(page); 2286 return (struct folio *)page; 2287 } 2288 EXPORT_SYMBOL(folio_alloc); 2289 2290 static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp, 2291 struct mempolicy *pol, unsigned long nr_pages, 2292 struct page **page_array) 2293 { 2294 int nodes; 2295 unsigned long nr_pages_per_node; 2296 int delta; 2297 int i; 2298 unsigned long nr_allocated; 2299 unsigned long total_allocated = 0; 2300 2301 nodes = nodes_weight(pol->nodes); 2302 nr_pages_per_node = nr_pages / nodes; 2303 delta = nr_pages - nodes * nr_pages_per_node; 2304 2305 for (i = 0; i < nodes; i++) { 2306 if (delta) { 2307 nr_allocated = __alloc_pages_bulk(gfp, 2308 interleave_nodes(pol), NULL, 2309 nr_pages_per_node + 1, NULL, 2310 page_array); 2311 delta--; 2312 } else { 2313 nr_allocated = __alloc_pages_bulk(gfp, 2314 interleave_nodes(pol), NULL, 2315 nr_pages_per_node, NULL, page_array); 2316 } 2317 2318 page_array += nr_allocated; 2319 total_allocated += nr_allocated; 2320 } 2321 2322 return total_allocated; 2323 } 2324 2325 static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid, 2326 struct mempolicy *pol, unsigned long nr_pages, 2327 struct page **page_array) 2328 { 2329 gfp_t preferred_gfp; 2330 unsigned long nr_allocated = 0; 2331 2332 preferred_gfp = gfp | __GFP_NOWARN; 2333 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2334 2335 nr_allocated = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes, 2336 nr_pages, NULL, page_array); 2337 2338 if (nr_allocated < nr_pages) 2339 nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL, 2340 nr_pages - nr_allocated, NULL, 2341 page_array + nr_allocated); 2342 return nr_allocated; 2343 } 2344 2345 /* alloc pages bulk and mempolicy should be considered at the 2346 * same time in some situation such as vmalloc. 2347 * 2348 * It can accelerate memory allocation especially interleaving 2349 * allocate memory. 2350 */ 2351 unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp, 2352 unsigned long nr_pages, struct page **page_array) 2353 { 2354 struct mempolicy *pol = &default_policy; 2355 2356 if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 2357 pol = get_task_policy(current); 2358 2359 if (pol->mode == MPOL_INTERLEAVE) 2360 return alloc_pages_bulk_array_interleave(gfp, pol, 2361 nr_pages, page_array); 2362 2363 if (pol->mode == MPOL_PREFERRED_MANY) 2364 return alloc_pages_bulk_array_preferred_many(gfp, 2365 numa_node_id(), pol, nr_pages, page_array); 2366 2367 return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()), 2368 policy_nodemask(gfp, pol), nr_pages, NULL, 2369 page_array); 2370 } 2371 2372 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2373 { 2374 struct mempolicy *pol = mpol_dup(vma_policy(src)); 2375 2376 if (IS_ERR(pol)) 2377 return PTR_ERR(pol); 2378 dst->vm_policy = pol; 2379 return 0; 2380 } 2381 2382 /* 2383 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 2384 * rebinds the mempolicy its copying by calling mpol_rebind_policy() 2385 * with the mems_allowed returned by cpuset_mems_allowed(). This 2386 * keeps mempolicies cpuset relative after its cpuset moves. See 2387 * further kernel/cpuset.c update_nodemask(). 2388 * 2389 * current's mempolicy may be rebinded by the other task(the task that changes 2390 * cpuset's mems), so we needn't do rebind work for current task. 2391 */ 2392 2393 /* Slow path of a mempolicy duplicate */ 2394 struct mempolicy *__mpol_dup(struct mempolicy *old) 2395 { 2396 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2397 2398 if (!new) 2399 return ERR_PTR(-ENOMEM); 2400 2401 /* task's mempolicy is protected by alloc_lock */ 2402 if (old == current->mempolicy) { 2403 task_lock(current); 2404 *new = *old; 2405 task_unlock(current); 2406 } else 2407 *new = *old; 2408 2409 if (current_cpuset_is_being_rebound()) { 2410 nodemask_t mems = cpuset_mems_allowed(current); 2411 mpol_rebind_policy(new, &mems); 2412 } 2413 atomic_set(&new->refcnt, 1); 2414 return new; 2415 } 2416 2417 /* Slow path of a mempolicy comparison */ 2418 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 2419 { 2420 if (!a || !b) 2421 return false; 2422 if (a->mode != b->mode) 2423 return false; 2424 if (a->flags != b->flags) 2425 return false; 2426 if (a->home_node != b->home_node) 2427 return false; 2428 if (mpol_store_user_nodemask(a)) 2429 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2430 return false; 2431 2432 switch (a->mode) { 2433 case MPOL_BIND: 2434 case MPOL_INTERLEAVE: 2435 case MPOL_PREFERRED: 2436 case MPOL_PREFERRED_MANY: 2437 return !!nodes_equal(a->nodes, b->nodes); 2438 case MPOL_LOCAL: 2439 return true; 2440 default: 2441 BUG(); 2442 return false; 2443 } 2444 } 2445 2446 /* 2447 * Shared memory backing store policy support. 2448 * 2449 * Remember policies even when nobody has shared memory mapped. 2450 * The policies are kept in Red-Black tree linked from the inode. 2451 * They are protected by the sp->lock rwlock, which should be held 2452 * for any accesses to the tree. 2453 */ 2454 2455 /* 2456 * lookup first element intersecting start-end. Caller holds sp->lock for 2457 * reading or for writing 2458 */ 2459 static struct sp_node * 2460 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 2461 { 2462 struct rb_node *n = sp->root.rb_node; 2463 2464 while (n) { 2465 struct sp_node *p = rb_entry(n, struct sp_node, nd); 2466 2467 if (start >= p->end) 2468 n = n->rb_right; 2469 else if (end <= p->start) 2470 n = n->rb_left; 2471 else 2472 break; 2473 } 2474 if (!n) 2475 return NULL; 2476 for (;;) { 2477 struct sp_node *w = NULL; 2478 struct rb_node *prev = rb_prev(n); 2479 if (!prev) 2480 break; 2481 w = rb_entry(prev, struct sp_node, nd); 2482 if (w->end <= start) 2483 break; 2484 n = prev; 2485 } 2486 return rb_entry(n, struct sp_node, nd); 2487 } 2488 2489 /* 2490 * Insert a new shared policy into the list. Caller holds sp->lock for 2491 * writing. 2492 */ 2493 static void sp_insert(struct shared_policy *sp, struct sp_node *new) 2494 { 2495 struct rb_node **p = &sp->root.rb_node; 2496 struct rb_node *parent = NULL; 2497 struct sp_node *nd; 2498 2499 while (*p) { 2500 parent = *p; 2501 nd = rb_entry(parent, struct sp_node, nd); 2502 if (new->start < nd->start) 2503 p = &(*p)->rb_left; 2504 else if (new->end > nd->end) 2505 p = &(*p)->rb_right; 2506 else 2507 BUG(); 2508 } 2509 rb_link_node(&new->nd, parent, p); 2510 rb_insert_color(&new->nd, &sp->root); 2511 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 2512 new->policy ? new->policy->mode : 0); 2513 } 2514 2515 /* Find shared policy intersecting idx */ 2516 struct mempolicy * 2517 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 2518 { 2519 struct mempolicy *pol = NULL; 2520 struct sp_node *sn; 2521 2522 if (!sp->root.rb_node) 2523 return NULL; 2524 read_lock(&sp->lock); 2525 sn = sp_lookup(sp, idx, idx+1); 2526 if (sn) { 2527 mpol_get(sn->policy); 2528 pol = sn->policy; 2529 } 2530 read_unlock(&sp->lock); 2531 return pol; 2532 } 2533 2534 static void sp_free(struct sp_node *n) 2535 { 2536 mpol_put(n->policy); 2537 kmem_cache_free(sn_cache, n); 2538 } 2539 2540 /** 2541 * mpol_misplaced - check whether current page node is valid in policy 2542 * 2543 * @page: page to be checked 2544 * @vma: vm area where page mapped 2545 * @addr: virtual address where page mapped 2546 * 2547 * Lookup current policy node id for vma,addr and "compare to" page's 2548 * node id. Policy determination "mimics" alloc_page_vma(). 2549 * Called from fault path where we know the vma and faulting address. 2550 * 2551 * Return: NUMA_NO_NODE if the page is in a node that is valid for this 2552 * policy, or a suitable node ID to allocate a replacement page from. 2553 */ 2554 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2555 { 2556 struct mempolicy *pol; 2557 struct zoneref *z; 2558 int curnid = page_to_nid(page); 2559 unsigned long pgoff; 2560 int thiscpu = raw_smp_processor_id(); 2561 int thisnid = cpu_to_node(thiscpu); 2562 int polnid = NUMA_NO_NODE; 2563 int ret = NUMA_NO_NODE; 2564 2565 pol = get_vma_policy(vma, addr); 2566 if (!(pol->flags & MPOL_F_MOF)) 2567 goto out; 2568 2569 switch (pol->mode) { 2570 case MPOL_INTERLEAVE: 2571 pgoff = vma->vm_pgoff; 2572 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 2573 polnid = offset_il_node(pol, pgoff); 2574 break; 2575 2576 case MPOL_PREFERRED: 2577 if (node_isset(curnid, pol->nodes)) 2578 goto out; 2579 polnid = first_node(pol->nodes); 2580 break; 2581 2582 case MPOL_LOCAL: 2583 polnid = numa_node_id(); 2584 break; 2585 2586 case MPOL_BIND: 2587 /* Optimize placement among multiple nodes via NUMA balancing */ 2588 if (pol->flags & MPOL_F_MORON) { 2589 if (node_isset(thisnid, pol->nodes)) 2590 break; 2591 goto out; 2592 } 2593 fallthrough; 2594 2595 case MPOL_PREFERRED_MANY: 2596 /* 2597 * use current page if in policy nodemask, 2598 * else select nearest allowed node, if any. 2599 * If no allowed nodes, use current [!misplaced]. 2600 */ 2601 if (node_isset(curnid, pol->nodes)) 2602 goto out; 2603 z = first_zones_zonelist( 2604 node_zonelist(numa_node_id(), GFP_HIGHUSER), 2605 gfp_zone(GFP_HIGHUSER), 2606 &pol->nodes); 2607 polnid = zone_to_nid(z->zone); 2608 break; 2609 2610 default: 2611 BUG(); 2612 } 2613 2614 /* Migrate the page towards the node whose CPU is referencing it */ 2615 if (pol->flags & MPOL_F_MORON) { 2616 polnid = thisnid; 2617 2618 if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) 2619 goto out; 2620 } 2621 2622 if (curnid != polnid) 2623 ret = polnid; 2624 out: 2625 mpol_cond_put(pol); 2626 2627 return ret; 2628 } 2629 2630 /* 2631 * Drop the (possibly final) reference to task->mempolicy. It needs to be 2632 * dropped after task->mempolicy is set to NULL so that any allocation done as 2633 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed 2634 * policy. 2635 */ 2636 void mpol_put_task_policy(struct task_struct *task) 2637 { 2638 struct mempolicy *pol; 2639 2640 task_lock(task); 2641 pol = task->mempolicy; 2642 task->mempolicy = NULL; 2643 task_unlock(task); 2644 mpol_put(pol); 2645 } 2646 2647 static void sp_delete(struct shared_policy *sp, struct sp_node *n) 2648 { 2649 pr_debug("deleting %lx-l%lx\n", n->start, n->end); 2650 rb_erase(&n->nd, &sp->root); 2651 sp_free(n); 2652 } 2653 2654 static void sp_node_init(struct sp_node *node, unsigned long start, 2655 unsigned long end, struct mempolicy *pol) 2656 { 2657 node->start = start; 2658 node->end = end; 2659 node->policy = pol; 2660 } 2661 2662 static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2663 struct mempolicy *pol) 2664 { 2665 struct sp_node *n; 2666 struct mempolicy *newpol; 2667 2668 n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 2669 if (!n) 2670 return NULL; 2671 2672 newpol = mpol_dup(pol); 2673 if (IS_ERR(newpol)) { 2674 kmem_cache_free(sn_cache, n); 2675 return NULL; 2676 } 2677 newpol->flags |= MPOL_F_SHARED; 2678 sp_node_init(n, start, end, newpol); 2679 2680 return n; 2681 } 2682 2683 /* Replace a policy range. */ 2684 static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 2685 unsigned long end, struct sp_node *new) 2686 { 2687 struct sp_node *n; 2688 struct sp_node *n_new = NULL; 2689 struct mempolicy *mpol_new = NULL; 2690 int ret = 0; 2691 2692 restart: 2693 write_lock(&sp->lock); 2694 n = sp_lookup(sp, start, end); 2695 /* Take care of old policies in the same range. */ 2696 while (n && n->start < end) { 2697 struct rb_node *next = rb_next(&n->nd); 2698 if (n->start >= start) { 2699 if (n->end <= end) 2700 sp_delete(sp, n); 2701 else 2702 n->start = end; 2703 } else { 2704 /* Old policy spanning whole new range. */ 2705 if (n->end > end) { 2706 if (!n_new) 2707 goto alloc_new; 2708 2709 *mpol_new = *n->policy; 2710 atomic_set(&mpol_new->refcnt, 1); 2711 sp_node_init(n_new, end, n->end, mpol_new); 2712 n->end = start; 2713 sp_insert(sp, n_new); 2714 n_new = NULL; 2715 mpol_new = NULL; 2716 break; 2717 } else 2718 n->end = start; 2719 } 2720 if (!next) 2721 break; 2722 n = rb_entry(next, struct sp_node, nd); 2723 } 2724 if (new) 2725 sp_insert(sp, new); 2726 write_unlock(&sp->lock); 2727 ret = 0; 2728 2729 err_out: 2730 if (mpol_new) 2731 mpol_put(mpol_new); 2732 if (n_new) 2733 kmem_cache_free(sn_cache, n_new); 2734 2735 return ret; 2736 2737 alloc_new: 2738 write_unlock(&sp->lock); 2739 ret = -ENOMEM; 2740 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 2741 if (!n_new) 2742 goto err_out; 2743 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2744 if (!mpol_new) 2745 goto err_out; 2746 atomic_set(&mpol_new->refcnt, 1); 2747 goto restart; 2748 } 2749 2750 /** 2751 * mpol_shared_policy_init - initialize shared policy for inode 2752 * @sp: pointer to inode shared policy 2753 * @mpol: struct mempolicy to install 2754 * 2755 * Install non-NULL @mpol in inode's shared policy rb-tree. 2756 * On entry, the current task has a reference on a non-NULL @mpol. 2757 * This must be released on exit. 2758 * This is called at get_inode() calls and we can use GFP_KERNEL. 2759 */ 2760 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 2761 { 2762 int ret; 2763 2764 sp->root = RB_ROOT; /* empty tree == default mempolicy */ 2765 rwlock_init(&sp->lock); 2766 2767 if (mpol) { 2768 struct vm_area_struct pvma; 2769 struct mempolicy *new; 2770 NODEMASK_SCRATCH(scratch); 2771 2772 if (!scratch) 2773 goto put_mpol; 2774 /* contextualize the tmpfs mount point mempolicy */ 2775 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 2776 if (IS_ERR(new)) 2777 goto free_scratch; /* no valid nodemask intersection */ 2778 2779 task_lock(current); 2780 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 2781 task_unlock(current); 2782 if (ret) 2783 goto put_new; 2784 2785 /* Create pseudo-vma that contains just the policy */ 2786 vma_init(&pvma, NULL); 2787 pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 2788 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 2789 2790 put_new: 2791 mpol_put(new); /* drop initial ref */ 2792 free_scratch: 2793 NODEMASK_SCRATCH_FREE(scratch); 2794 put_mpol: 2795 mpol_put(mpol); /* drop our incoming ref on sb mpol */ 2796 } 2797 } 2798 2799 int mpol_set_shared_policy(struct shared_policy *info, 2800 struct vm_area_struct *vma, struct mempolicy *npol) 2801 { 2802 int err; 2803 struct sp_node *new = NULL; 2804 unsigned long sz = vma_pages(vma); 2805 2806 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 2807 vma->vm_pgoff, 2808 sz, npol ? npol->mode : -1, 2809 npol ? npol->flags : -1, 2810 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE); 2811 2812 if (npol) { 2813 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 2814 if (!new) 2815 return -ENOMEM; 2816 } 2817 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 2818 if (err && new) 2819 sp_free(new); 2820 return err; 2821 } 2822 2823 /* Free a backing policy store on inode delete. */ 2824 void mpol_free_shared_policy(struct shared_policy *p) 2825 { 2826 struct sp_node *n; 2827 struct rb_node *next; 2828 2829 if (!p->root.rb_node) 2830 return; 2831 write_lock(&p->lock); 2832 next = rb_first(&p->root); 2833 while (next) { 2834 n = rb_entry(next, struct sp_node, nd); 2835 next = rb_next(&n->nd); 2836 sp_delete(p, n); 2837 } 2838 write_unlock(&p->lock); 2839 } 2840 2841 #ifdef CONFIG_NUMA_BALANCING 2842 static int __initdata numabalancing_override; 2843 2844 static void __init check_numabalancing_enable(void) 2845 { 2846 bool numabalancing_default = false; 2847 2848 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 2849 numabalancing_default = true; 2850 2851 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 2852 if (numabalancing_override) 2853 set_numabalancing_state(numabalancing_override == 1); 2854 2855 if (num_online_nodes() > 1 && !numabalancing_override) { 2856 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", 2857 numabalancing_default ? "Enabling" : "Disabling"); 2858 set_numabalancing_state(numabalancing_default); 2859 } 2860 } 2861 2862 static int __init setup_numabalancing(char *str) 2863 { 2864 int ret = 0; 2865 if (!str) 2866 goto out; 2867 2868 if (!strcmp(str, "enable")) { 2869 numabalancing_override = 1; 2870 ret = 1; 2871 } else if (!strcmp(str, "disable")) { 2872 numabalancing_override = -1; 2873 ret = 1; 2874 } 2875 out: 2876 if (!ret) 2877 pr_warn("Unable to parse numa_balancing=\n"); 2878 2879 return ret; 2880 } 2881 __setup("numa_balancing=", setup_numabalancing); 2882 #else 2883 static inline void __init check_numabalancing_enable(void) 2884 { 2885 } 2886 #endif /* CONFIG_NUMA_BALANCING */ 2887 2888 /* assumes fs == KERNEL_DS */ 2889 void __init numa_policy_init(void) 2890 { 2891 nodemask_t interleave_nodes; 2892 unsigned long largest = 0; 2893 int nid, prefer = 0; 2894 2895 policy_cache = kmem_cache_create("numa_policy", 2896 sizeof(struct mempolicy), 2897 0, SLAB_PANIC, NULL); 2898 2899 sn_cache = kmem_cache_create("shared_policy_node", 2900 sizeof(struct sp_node), 2901 0, SLAB_PANIC, NULL); 2902 2903 for_each_node(nid) { 2904 preferred_node_policy[nid] = (struct mempolicy) { 2905 .refcnt = ATOMIC_INIT(1), 2906 .mode = MPOL_PREFERRED, 2907 .flags = MPOL_F_MOF | MPOL_F_MORON, 2908 .nodes = nodemask_of_node(nid), 2909 }; 2910 } 2911 2912 /* 2913 * Set interleaving policy for system init. Interleaving is only 2914 * enabled across suitably sized nodes (default is >= 16MB), or 2915 * fall back to the largest node if they're all smaller. 2916 */ 2917 nodes_clear(interleave_nodes); 2918 for_each_node_state(nid, N_MEMORY) { 2919 unsigned long total_pages = node_present_pages(nid); 2920 2921 /* Preserve the largest node */ 2922 if (largest < total_pages) { 2923 largest = total_pages; 2924 prefer = nid; 2925 } 2926 2927 /* Interleave this node? */ 2928 if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2929 node_set(nid, interleave_nodes); 2930 } 2931 2932 /* All too small, use the largest */ 2933 if (unlikely(nodes_empty(interleave_nodes))) 2934 node_set(prefer, interleave_nodes); 2935 2936 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2937 pr_err("%s: interleaving failed\n", __func__); 2938 2939 check_numabalancing_enable(); 2940 } 2941 2942 /* Reset policy of current process to default */ 2943 void numa_default_policy(void) 2944 { 2945 do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 2946 } 2947 2948 /* 2949 * Parse and format mempolicy from/to strings 2950 */ 2951 2952 static const char * const policy_modes[] = 2953 { 2954 [MPOL_DEFAULT] = "default", 2955 [MPOL_PREFERRED] = "prefer", 2956 [MPOL_BIND] = "bind", 2957 [MPOL_INTERLEAVE] = "interleave", 2958 [MPOL_LOCAL] = "local", 2959 [MPOL_PREFERRED_MANY] = "prefer (many)", 2960 }; 2961 2962 2963 #ifdef CONFIG_TMPFS 2964 /** 2965 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2966 * @str: string containing mempolicy to parse 2967 * @mpol: pointer to struct mempolicy pointer, returned on success. 2968 * 2969 * Format of input: 2970 * <mode>[=<flags>][:<nodelist>] 2971 * 2972 * Return: %0 on success, else %1 2973 */ 2974 int mpol_parse_str(char *str, struct mempolicy **mpol) 2975 { 2976 struct mempolicy *new = NULL; 2977 unsigned short mode_flags; 2978 nodemask_t nodes; 2979 char *nodelist = strchr(str, ':'); 2980 char *flags = strchr(str, '='); 2981 int err = 1, mode; 2982 2983 if (flags) 2984 *flags++ = '\0'; /* terminate mode string */ 2985 2986 if (nodelist) { 2987 /* NUL-terminate mode or flags string */ 2988 *nodelist++ = '\0'; 2989 if (nodelist_parse(nodelist, nodes)) 2990 goto out; 2991 if (!nodes_subset(nodes, node_states[N_MEMORY])) 2992 goto out; 2993 } else 2994 nodes_clear(nodes); 2995 2996 mode = match_string(policy_modes, MPOL_MAX, str); 2997 if (mode < 0) 2998 goto out; 2999 3000 switch (mode) { 3001 case MPOL_PREFERRED: 3002 /* 3003 * Insist on a nodelist of one node only, although later 3004 * we use first_node(nodes) to grab a single node, so here 3005 * nodelist (or nodes) cannot be empty. 3006 */ 3007 if (nodelist) { 3008 char *rest = nodelist; 3009 while (isdigit(*rest)) 3010 rest++; 3011 if (*rest) 3012 goto out; 3013 if (nodes_empty(nodes)) 3014 goto out; 3015 } 3016 break; 3017 case MPOL_INTERLEAVE: 3018 /* 3019 * Default to online nodes with memory if no nodelist 3020 */ 3021 if (!nodelist) 3022 nodes = node_states[N_MEMORY]; 3023 break; 3024 case MPOL_LOCAL: 3025 /* 3026 * Don't allow a nodelist; mpol_new() checks flags 3027 */ 3028 if (nodelist) 3029 goto out; 3030 break; 3031 case MPOL_DEFAULT: 3032 /* 3033 * Insist on a empty nodelist 3034 */ 3035 if (!nodelist) 3036 err = 0; 3037 goto out; 3038 case MPOL_PREFERRED_MANY: 3039 case MPOL_BIND: 3040 /* 3041 * Insist on a nodelist 3042 */ 3043 if (!nodelist) 3044 goto out; 3045 } 3046 3047 mode_flags = 0; 3048 if (flags) { 3049 /* 3050 * Currently, we only support two mutually exclusive 3051 * mode flags. 3052 */ 3053 if (!strcmp(flags, "static")) 3054 mode_flags |= MPOL_F_STATIC_NODES; 3055 else if (!strcmp(flags, "relative")) 3056 mode_flags |= MPOL_F_RELATIVE_NODES; 3057 else 3058 goto out; 3059 } 3060 3061 new = mpol_new(mode, mode_flags, &nodes); 3062 if (IS_ERR(new)) 3063 goto out; 3064 3065 /* 3066 * Save nodes for mpol_to_str() to show the tmpfs mount options 3067 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 3068 */ 3069 if (mode != MPOL_PREFERRED) { 3070 new->nodes = nodes; 3071 } else if (nodelist) { 3072 nodes_clear(new->nodes); 3073 node_set(first_node(nodes), new->nodes); 3074 } else { 3075 new->mode = MPOL_LOCAL; 3076 } 3077 3078 /* 3079 * Save nodes for contextualization: this will be used to "clone" 3080 * the mempolicy in a specific context [cpuset] at a later time. 3081 */ 3082 new->w.user_nodemask = nodes; 3083 3084 err = 0; 3085 3086 out: 3087 /* Restore string for error message */ 3088 if (nodelist) 3089 *--nodelist = ':'; 3090 if (flags) 3091 *--flags = '='; 3092 if (!err) 3093 *mpol = new; 3094 return err; 3095 } 3096 #endif /* CONFIG_TMPFS */ 3097 3098 /** 3099 * mpol_to_str - format a mempolicy structure for printing 3100 * @buffer: to contain formatted mempolicy string 3101 * @maxlen: length of @buffer 3102 * @pol: pointer to mempolicy to be formatted 3103 * 3104 * Convert @pol into a string. If @buffer is too short, truncate the string. 3105 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 3106 * longest flag, "relative", and to display at least a few node ids. 3107 */ 3108 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 3109 { 3110 char *p = buffer; 3111 nodemask_t nodes = NODE_MASK_NONE; 3112 unsigned short mode = MPOL_DEFAULT; 3113 unsigned short flags = 0; 3114 3115 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 3116 mode = pol->mode; 3117 flags = pol->flags; 3118 } 3119 3120 switch (mode) { 3121 case MPOL_DEFAULT: 3122 case MPOL_LOCAL: 3123 break; 3124 case MPOL_PREFERRED: 3125 case MPOL_PREFERRED_MANY: 3126 case MPOL_BIND: 3127 case MPOL_INTERLEAVE: 3128 nodes = pol->nodes; 3129 break; 3130 default: 3131 WARN_ON_ONCE(1); 3132 snprintf(p, maxlen, "unknown"); 3133 return; 3134 } 3135 3136 p += snprintf(p, maxlen, "%s", policy_modes[mode]); 3137 3138 if (flags & MPOL_MODE_FLAGS) { 3139 p += snprintf(p, buffer + maxlen - p, "="); 3140 3141 /* 3142 * Currently, the only defined flags are mutually exclusive 3143 */ 3144 if (flags & MPOL_F_STATIC_NODES) 3145 p += snprintf(p, buffer + maxlen - p, "static"); 3146 else if (flags & MPOL_F_RELATIVE_NODES) 3147 p += snprintf(p, buffer + maxlen - p, "relative"); 3148 } 3149 3150 if (!nodes_empty(nodes)) 3151 p += scnprintf(p, buffer + maxlen - p, ":%*pbl", 3152 nodemask_pr_args(&nodes)); 3153 } 3154