1 /* 2 * Simple NUMA memory policy for the Linux kernel. 3 * 4 * Copyright 2003,2004 Andi Kleen, SuSE Labs. 5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 6 * Subject to the GNU Public License, version 2. 7 * 8 * NUMA policy allows the user to give hints in which node(s) memory should 9 * be allocated. 10 * 11 * Support four policies per VMA and per process: 12 * 13 * The VMA policy has priority over the process policy for a page fault. 14 * 15 * interleave Allocate memory interleaved over a set of nodes, 16 * with normal fallback if it fails. 17 * For VMA based allocations this interleaves based on the 18 * offset into the backing object or offset into the mapping 19 * for anonymous memory. For process policy an process counter 20 * is used. 21 * 22 * bind Only allocate memory on a specific set of nodes, 23 * no fallback. 24 * FIXME: memory is allocated starting with the first node 25 * to the last. It would be better if bind would truly restrict 26 * the allocation to memory nodes instead 27 * 28 * preferred Try a specific node first before normal fallback. 29 * As a special case NUMA_NO_NODE here means do the allocation 30 * on the local CPU. This is normally identical to default, 31 * but useful to set in a VMA when you have a non default 32 * process policy. 33 * 34 * default Allocate on the local node first, or when on a VMA 35 * use the process policy. This is what Linux always did 36 * in a NUMA aware kernel and still does by, ahem, default. 37 * 38 * The process policy is applied for most non interrupt memory allocations 39 * in that process' context. Interrupts ignore the policies and always 40 * try to allocate on the local CPU. The VMA policy is only applied for memory 41 * allocations for a VMA in the VM. 42 * 43 * Currently there are a few corner cases in swapping where the policy 44 * is not applied, but the majority should be handled. When process policy 45 * is used it is not remembered over swap outs/swap ins. 46 * 47 * Only the highest zone in the zone hierarchy gets policied. Allocations 48 * requesting a lower zone just use default policy. This implies that 49 * on systems with highmem kernel lowmem allocation don't get policied. 50 * Same with GFP_DMA allocations. 51 * 52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 53 * all users and remembered even when nobody has memory mapped. 54 */ 55 56 /* Notebook: 57 fix mmap readahead to honour policy and enable policy for any page cache 58 object 59 statistics for bigpages 60 global policy for page cache? currently it uses process policy. Requires 61 first item above. 62 handle mremap for shared memory (currently ignored for the policy) 63 grows down? 64 make bind policy root only? It can trigger oom much faster and the 65 kernel is not always grateful with that. 66 */ 67 68 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 69 70 #include <linux/mempolicy.h> 71 #include <linux/mm.h> 72 #include <linux/highmem.h> 73 #include <linux/hugetlb.h> 74 #include <linux/kernel.h> 75 #include <linux/sched.h> 76 #include <linux/sched/mm.h> 77 #include <linux/nodemask.h> 78 #include <linux/cpuset.h> 79 #include <linux/slab.h> 80 #include <linux/string.h> 81 #include <linux/export.h> 82 #include <linux/nsproxy.h> 83 #include <linux/interrupt.h> 84 #include <linux/init.h> 85 #include <linux/compat.h> 86 #include <linux/swap.h> 87 #include <linux/seq_file.h> 88 #include <linux/proc_fs.h> 89 #include <linux/migrate.h> 90 #include <linux/ksm.h> 91 #include <linux/rmap.h> 92 #include <linux/security.h> 93 #include <linux/syscalls.h> 94 #include <linux/ctype.h> 95 #include <linux/mm_inline.h> 96 #include <linux/mmu_notifier.h> 97 #include <linux/printk.h> 98 99 #include <asm/tlbflush.h> 100 #include <linux/uaccess.h> 101 102 #include "internal.h" 103 104 /* Internal flags */ 105 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 106 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 107 108 static struct kmem_cache *policy_cache; 109 static struct kmem_cache *sn_cache; 110 111 /* Highest zone. An specific allocation for a zone below that is not 112 policied. */ 113 enum zone_type policy_zone = 0; 114 115 /* 116 * run-time system-wide default policy => local allocation 117 */ 118 static struct mempolicy default_policy = { 119 .refcnt = ATOMIC_INIT(1), /* never free it */ 120 .mode = MPOL_PREFERRED, 121 .flags = MPOL_F_LOCAL, 122 }; 123 124 static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 125 126 struct mempolicy *get_task_policy(struct task_struct *p) 127 { 128 struct mempolicy *pol = p->mempolicy; 129 int node; 130 131 if (pol) 132 return pol; 133 134 node = numa_node_id(); 135 if (node != NUMA_NO_NODE) { 136 pol = &preferred_node_policy[node]; 137 /* preferred_node_policy is not initialised early in boot */ 138 if (pol->mode) 139 return pol; 140 } 141 142 return &default_policy; 143 } 144 145 static const struct mempolicy_operations { 146 int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 147 /* 148 * If read-side task has no lock to protect task->mempolicy, write-side 149 * task will rebind the task->mempolicy by two step. The first step is 150 * setting all the newly nodes, and the second step is cleaning all the 151 * disallowed nodes. In this way, we can avoid finding no node to alloc 152 * page. 153 * If we have a lock to protect task->mempolicy in read-side, we do 154 * rebind directly. 155 * 156 * step: 157 * MPOL_REBIND_ONCE - do rebind work at once 158 * MPOL_REBIND_STEP1 - set all the newly nodes 159 * MPOL_REBIND_STEP2 - clean all the disallowed nodes 160 */ 161 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes, 162 enum mpol_rebind_step step); 163 } mpol_ops[MPOL_MAX]; 164 165 static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 166 { 167 return pol->flags & MPOL_MODE_FLAGS; 168 } 169 170 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 171 const nodemask_t *rel) 172 { 173 nodemask_t tmp; 174 nodes_fold(tmp, *orig, nodes_weight(*rel)); 175 nodes_onto(*ret, tmp, *rel); 176 } 177 178 static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) 179 { 180 if (nodes_empty(*nodes)) 181 return -EINVAL; 182 pol->v.nodes = *nodes; 183 return 0; 184 } 185 186 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 187 { 188 if (!nodes) 189 pol->flags |= MPOL_F_LOCAL; /* local allocation */ 190 else if (nodes_empty(*nodes)) 191 return -EINVAL; /* no allowed nodes */ 192 else 193 pol->v.preferred_node = first_node(*nodes); 194 return 0; 195 } 196 197 static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) 198 { 199 if (nodes_empty(*nodes)) 200 return -EINVAL; 201 pol->v.nodes = *nodes; 202 return 0; 203 } 204 205 /* 206 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 207 * any, for the new policy. mpol_new() has already validated the nodes 208 * parameter with respect to the policy mode and flags. But, we need to 209 * handle an empty nodemask with MPOL_PREFERRED here. 210 * 211 * Must be called holding task's alloc_lock to protect task's mems_allowed 212 * and mempolicy. May also be called holding the mmap_semaphore for write. 213 */ 214 static int mpol_set_nodemask(struct mempolicy *pol, 215 const nodemask_t *nodes, struct nodemask_scratch *nsc) 216 { 217 int ret; 218 219 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ 220 if (pol == NULL) 221 return 0; 222 /* Check N_MEMORY */ 223 nodes_and(nsc->mask1, 224 cpuset_current_mems_allowed, node_states[N_MEMORY]); 225 226 VM_BUG_ON(!nodes); 227 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) 228 nodes = NULL; /* explicit local allocation */ 229 else { 230 if (pol->flags & MPOL_F_RELATIVE_NODES) 231 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); 232 else 233 nodes_and(nsc->mask2, *nodes, nsc->mask1); 234 235 if (mpol_store_user_nodemask(pol)) 236 pol->w.user_nodemask = *nodes; 237 else 238 pol->w.cpuset_mems_allowed = 239 cpuset_current_mems_allowed; 240 } 241 242 if (nodes) 243 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 244 else 245 ret = mpol_ops[pol->mode].create(pol, NULL); 246 return ret; 247 } 248 249 /* 250 * This function just creates a new policy, does some check and simple 251 * initialization. You must invoke mpol_set_nodemask() to set nodes. 252 */ 253 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 254 nodemask_t *nodes) 255 { 256 struct mempolicy *policy; 257 258 pr_debug("setting mode %d flags %d nodes[0] %lx\n", 259 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 260 261 if (mode == MPOL_DEFAULT) { 262 if (nodes && !nodes_empty(*nodes)) 263 return ERR_PTR(-EINVAL); 264 return NULL; 265 } 266 VM_BUG_ON(!nodes); 267 268 /* 269 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 270 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 271 * All other modes require a valid pointer to a non-empty nodemask. 272 */ 273 if (mode == MPOL_PREFERRED) { 274 if (nodes_empty(*nodes)) { 275 if (((flags & MPOL_F_STATIC_NODES) || 276 (flags & MPOL_F_RELATIVE_NODES))) 277 return ERR_PTR(-EINVAL); 278 } 279 } else if (mode == MPOL_LOCAL) { 280 if (!nodes_empty(*nodes) || 281 (flags & MPOL_F_STATIC_NODES) || 282 (flags & MPOL_F_RELATIVE_NODES)) 283 return ERR_PTR(-EINVAL); 284 mode = MPOL_PREFERRED; 285 } else if (nodes_empty(*nodes)) 286 return ERR_PTR(-EINVAL); 287 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 288 if (!policy) 289 return ERR_PTR(-ENOMEM); 290 atomic_set(&policy->refcnt, 1); 291 policy->mode = mode; 292 policy->flags = flags; 293 294 return policy; 295 } 296 297 /* Slow path of a mpol destructor. */ 298 void __mpol_put(struct mempolicy *p) 299 { 300 if (!atomic_dec_and_test(&p->refcnt)) 301 return; 302 kmem_cache_free(policy_cache, p); 303 } 304 305 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes, 306 enum mpol_rebind_step step) 307 { 308 } 309 310 /* 311 * step: 312 * MPOL_REBIND_ONCE - do rebind work at once 313 * MPOL_REBIND_STEP1 - set all the newly nodes 314 * MPOL_REBIND_STEP2 - clean all the disallowed nodes 315 */ 316 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes, 317 enum mpol_rebind_step step) 318 { 319 nodemask_t tmp; 320 321 if (pol->flags & MPOL_F_STATIC_NODES) 322 nodes_and(tmp, pol->w.user_nodemask, *nodes); 323 else if (pol->flags & MPOL_F_RELATIVE_NODES) 324 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 325 else { 326 /* 327 * if step == 1, we use ->w.cpuset_mems_allowed to cache the 328 * result 329 */ 330 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) { 331 nodes_remap(tmp, pol->v.nodes, 332 pol->w.cpuset_mems_allowed, *nodes); 333 pol->w.cpuset_mems_allowed = step ? tmp : *nodes; 334 } else if (step == MPOL_REBIND_STEP2) { 335 tmp = pol->w.cpuset_mems_allowed; 336 pol->w.cpuset_mems_allowed = *nodes; 337 } else 338 BUG(); 339 } 340 341 if (nodes_empty(tmp)) 342 tmp = *nodes; 343 344 if (step == MPOL_REBIND_STEP1) 345 nodes_or(pol->v.nodes, pol->v.nodes, tmp); 346 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2) 347 pol->v.nodes = tmp; 348 else 349 BUG(); 350 351 if (!node_isset(current->il_next, tmp)) { 352 current->il_next = next_node_in(current->il_next, tmp); 353 if (current->il_next >= MAX_NUMNODES) 354 current->il_next = numa_node_id(); 355 } 356 } 357 358 static void mpol_rebind_preferred(struct mempolicy *pol, 359 const nodemask_t *nodes, 360 enum mpol_rebind_step step) 361 { 362 nodemask_t tmp; 363 364 if (pol->flags & MPOL_F_STATIC_NODES) { 365 int node = first_node(pol->w.user_nodemask); 366 367 if (node_isset(node, *nodes)) { 368 pol->v.preferred_node = node; 369 pol->flags &= ~MPOL_F_LOCAL; 370 } else 371 pol->flags |= MPOL_F_LOCAL; 372 } else if (pol->flags & MPOL_F_RELATIVE_NODES) { 373 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 374 pol->v.preferred_node = first_node(tmp); 375 } else if (!(pol->flags & MPOL_F_LOCAL)) { 376 pol->v.preferred_node = node_remap(pol->v.preferred_node, 377 pol->w.cpuset_mems_allowed, 378 *nodes); 379 pol->w.cpuset_mems_allowed = *nodes; 380 } 381 } 382 383 /* 384 * mpol_rebind_policy - Migrate a policy to a different set of nodes 385 * 386 * If read-side task has no lock to protect task->mempolicy, write-side 387 * task will rebind the task->mempolicy by two step. The first step is 388 * setting all the newly nodes, and the second step is cleaning all the 389 * disallowed nodes. In this way, we can avoid finding no node to alloc 390 * page. 391 * If we have a lock to protect task->mempolicy in read-side, we do 392 * rebind directly. 393 * 394 * step: 395 * MPOL_REBIND_ONCE - do rebind work at once 396 * MPOL_REBIND_STEP1 - set all the newly nodes 397 * MPOL_REBIND_STEP2 - clean all the disallowed nodes 398 */ 399 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask, 400 enum mpol_rebind_step step) 401 { 402 if (!pol) 403 return; 404 if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE && 405 nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 406 return; 407 408 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING)) 409 return; 410 411 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING)) 412 BUG(); 413 414 if (step == MPOL_REBIND_STEP1) 415 pol->flags |= MPOL_F_REBINDING; 416 else if (step == MPOL_REBIND_STEP2) 417 pol->flags &= ~MPOL_F_REBINDING; 418 else if (step >= MPOL_REBIND_NSTEP) 419 BUG(); 420 421 mpol_ops[pol->mode].rebind(pol, newmask, step); 422 } 423 424 /* 425 * Wrapper for mpol_rebind_policy() that just requires task 426 * pointer, and updates task mempolicy. 427 * 428 * Called with task's alloc_lock held. 429 */ 430 431 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, 432 enum mpol_rebind_step step) 433 { 434 mpol_rebind_policy(tsk->mempolicy, new, step); 435 } 436 437 /* 438 * Rebind each vma in mm to new nodemask. 439 * 440 * Call holding a reference to mm. Takes mm->mmap_sem during call. 441 */ 442 443 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 444 { 445 struct vm_area_struct *vma; 446 447 down_write(&mm->mmap_sem); 448 for (vma = mm->mmap; vma; vma = vma->vm_next) 449 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE); 450 up_write(&mm->mmap_sem); 451 } 452 453 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 454 [MPOL_DEFAULT] = { 455 .rebind = mpol_rebind_default, 456 }, 457 [MPOL_INTERLEAVE] = { 458 .create = mpol_new_interleave, 459 .rebind = mpol_rebind_nodemask, 460 }, 461 [MPOL_PREFERRED] = { 462 .create = mpol_new_preferred, 463 .rebind = mpol_rebind_preferred, 464 }, 465 [MPOL_BIND] = { 466 .create = mpol_new_bind, 467 .rebind = mpol_rebind_nodemask, 468 }, 469 }; 470 471 static void migrate_page_add(struct page *page, struct list_head *pagelist, 472 unsigned long flags); 473 474 struct queue_pages { 475 struct list_head *pagelist; 476 unsigned long flags; 477 nodemask_t *nmask; 478 struct vm_area_struct *prev; 479 }; 480 481 /* 482 * Scan through pages checking if pages follow certain conditions, 483 * and move them to the pagelist if they do. 484 */ 485 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, 486 unsigned long end, struct mm_walk *walk) 487 { 488 struct vm_area_struct *vma = walk->vma; 489 struct page *page; 490 struct queue_pages *qp = walk->private; 491 unsigned long flags = qp->flags; 492 int nid, ret; 493 pte_t *pte; 494 spinlock_t *ptl; 495 496 if (pmd_trans_huge(*pmd)) { 497 ptl = pmd_lock(walk->mm, pmd); 498 if (pmd_trans_huge(*pmd)) { 499 page = pmd_page(*pmd); 500 if (is_huge_zero_page(page)) { 501 spin_unlock(ptl); 502 __split_huge_pmd(vma, pmd, addr, false, NULL); 503 } else { 504 get_page(page); 505 spin_unlock(ptl); 506 lock_page(page); 507 ret = split_huge_page(page); 508 unlock_page(page); 509 put_page(page); 510 if (ret) 511 return 0; 512 } 513 } else { 514 spin_unlock(ptl); 515 } 516 } 517 518 if (pmd_trans_unstable(pmd)) 519 return 0; 520 retry: 521 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 522 for (; addr != end; pte++, addr += PAGE_SIZE) { 523 if (!pte_present(*pte)) 524 continue; 525 page = vm_normal_page(vma, addr, *pte); 526 if (!page) 527 continue; 528 /* 529 * vm_normal_page() filters out zero pages, but there might 530 * still be PageReserved pages to skip, perhaps in a VDSO. 531 */ 532 if (PageReserved(page)) 533 continue; 534 nid = page_to_nid(page); 535 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT)) 536 continue; 537 if (PageTransCompound(page)) { 538 get_page(page); 539 pte_unmap_unlock(pte, ptl); 540 lock_page(page); 541 ret = split_huge_page(page); 542 unlock_page(page); 543 put_page(page); 544 /* Failed to split -- skip. */ 545 if (ret) { 546 pte = pte_offset_map_lock(walk->mm, pmd, 547 addr, &ptl); 548 continue; 549 } 550 goto retry; 551 } 552 553 migrate_page_add(page, qp->pagelist, flags); 554 } 555 pte_unmap_unlock(pte - 1, ptl); 556 cond_resched(); 557 return 0; 558 } 559 560 static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, 561 unsigned long addr, unsigned long end, 562 struct mm_walk *walk) 563 { 564 #ifdef CONFIG_HUGETLB_PAGE 565 struct queue_pages *qp = walk->private; 566 unsigned long flags = qp->flags; 567 int nid; 568 struct page *page; 569 spinlock_t *ptl; 570 pte_t entry; 571 572 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 573 entry = huge_ptep_get(pte); 574 if (!pte_present(entry)) 575 goto unlock; 576 page = pte_page(entry); 577 nid = page_to_nid(page); 578 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT)) 579 goto unlock; 580 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ 581 if (flags & (MPOL_MF_MOVE_ALL) || 582 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) 583 isolate_huge_page(page, qp->pagelist); 584 unlock: 585 spin_unlock(ptl); 586 #else 587 BUG(); 588 #endif 589 return 0; 590 } 591 592 #ifdef CONFIG_NUMA_BALANCING 593 /* 594 * This is used to mark a range of virtual addresses to be inaccessible. 595 * These are later cleared by a NUMA hinting fault. Depending on these 596 * faults, pages may be migrated for better NUMA placement. 597 * 598 * This is assuming that NUMA faults are handled using PROT_NONE. If 599 * an architecture makes a different choice, it will need further 600 * changes to the core. 601 */ 602 unsigned long change_prot_numa(struct vm_area_struct *vma, 603 unsigned long addr, unsigned long end) 604 { 605 int nr_updated; 606 607 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1); 608 if (nr_updated) 609 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 610 611 return nr_updated; 612 } 613 #else 614 static unsigned long change_prot_numa(struct vm_area_struct *vma, 615 unsigned long addr, unsigned long end) 616 { 617 return 0; 618 } 619 #endif /* CONFIG_NUMA_BALANCING */ 620 621 static int queue_pages_test_walk(unsigned long start, unsigned long end, 622 struct mm_walk *walk) 623 { 624 struct vm_area_struct *vma = walk->vma; 625 struct queue_pages *qp = walk->private; 626 unsigned long endvma = vma->vm_end; 627 unsigned long flags = qp->flags; 628 629 if (!vma_migratable(vma)) 630 return 1; 631 632 if (endvma > end) 633 endvma = end; 634 if (vma->vm_start > start) 635 start = vma->vm_start; 636 637 if (!(flags & MPOL_MF_DISCONTIG_OK)) { 638 if (!vma->vm_next && vma->vm_end < end) 639 return -EFAULT; 640 if (qp->prev && qp->prev->vm_end < vma->vm_start) 641 return -EFAULT; 642 } 643 644 qp->prev = vma; 645 646 if (flags & MPOL_MF_LAZY) { 647 /* Similar to task_numa_work, skip inaccessible VMAs */ 648 if (!is_vm_hugetlb_page(vma) && 649 (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) && 650 !(vma->vm_flags & VM_MIXEDMAP)) 651 change_prot_numa(vma, start, endvma); 652 return 1; 653 } 654 655 /* queue pages from current vma */ 656 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 657 return 0; 658 return 1; 659 } 660 661 /* 662 * Walk through page tables and collect pages to be migrated. 663 * 664 * If pages found in a given range are on a set of nodes (determined by 665 * @nodes and @flags,) it's isolated and queued to the pagelist which is 666 * passed via @private.) 667 */ 668 static int 669 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 670 nodemask_t *nodes, unsigned long flags, 671 struct list_head *pagelist) 672 { 673 struct queue_pages qp = { 674 .pagelist = pagelist, 675 .flags = flags, 676 .nmask = nodes, 677 .prev = NULL, 678 }; 679 struct mm_walk queue_pages_walk = { 680 .hugetlb_entry = queue_pages_hugetlb, 681 .pmd_entry = queue_pages_pte_range, 682 .test_walk = queue_pages_test_walk, 683 .mm = mm, 684 .private = &qp, 685 }; 686 687 return walk_page_range(start, end, &queue_pages_walk); 688 } 689 690 /* 691 * Apply policy to a single VMA 692 * This must be called with the mmap_sem held for writing. 693 */ 694 static int vma_replace_policy(struct vm_area_struct *vma, 695 struct mempolicy *pol) 696 { 697 int err; 698 struct mempolicy *old; 699 struct mempolicy *new; 700 701 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 702 vma->vm_start, vma->vm_end, vma->vm_pgoff, 703 vma->vm_ops, vma->vm_file, 704 vma->vm_ops ? vma->vm_ops->set_policy : NULL); 705 706 new = mpol_dup(pol); 707 if (IS_ERR(new)) 708 return PTR_ERR(new); 709 710 if (vma->vm_ops && vma->vm_ops->set_policy) { 711 err = vma->vm_ops->set_policy(vma, new); 712 if (err) 713 goto err_out; 714 } 715 716 old = vma->vm_policy; 717 vma->vm_policy = new; /* protected by mmap_sem */ 718 mpol_put(old); 719 720 return 0; 721 err_out: 722 mpol_put(new); 723 return err; 724 } 725 726 /* Step 2: apply policy to a range and do splits. */ 727 static int mbind_range(struct mm_struct *mm, unsigned long start, 728 unsigned long end, struct mempolicy *new_pol) 729 { 730 struct vm_area_struct *next; 731 struct vm_area_struct *prev; 732 struct vm_area_struct *vma; 733 int err = 0; 734 pgoff_t pgoff; 735 unsigned long vmstart; 736 unsigned long vmend; 737 738 vma = find_vma(mm, start); 739 if (!vma || vma->vm_start > start) 740 return -EFAULT; 741 742 prev = vma->vm_prev; 743 if (start > vma->vm_start) 744 prev = vma; 745 746 for (; vma && vma->vm_start < end; prev = vma, vma = next) { 747 next = vma->vm_next; 748 vmstart = max(start, vma->vm_start); 749 vmend = min(end, vma->vm_end); 750 751 if (mpol_equal(vma_policy(vma), new_pol)) 752 continue; 753 754 pgoff = vma->vm_pgoff + 755 ((vmstart - vma->vm_start) >> PAGE_SHIFT); 756 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 757 vma->anon_vma, vma->vm_file, pgoff, 758 new_pol, vma->vm_userfaultfd_ctx); 759 if (prev) { 760 vma = prev; 761 next = vma->vm_next; 762 if (mpol_equal(vma_policy(vma), new_pol)) 763 continue; 764 /* vma_merge() joined vma && vma->next, case 8 */ 765 goto replace; 766 } 767 if (vma->vm_start != vmstart) { 768 err = split_vma(vma->vm_mm, vma, vmstart, 1); 769 if (err) 770 goto out; 771 } 772 if (vma->vm_end != vmend) { 773 err = split_vma(vma->vm_mm, vma, vmend, 0); 774 if (err) 775 goto out; 776 } 777 replace: 778 err = vma_replace_policy(vma, new_pol); 779 if (err) 780 goto out; 781 } 782 783 out: 784 return err; 785 } 786 787 /* Set the process memory policy */ 788 static long do_set_mempolicy(unsigned short mode, unsigned short flags, 789 nodemask_t *nodes) 790 { 791 struct mempolicy *new, *old; 792 NODEMASK_SCRATCH(scratch); 793 int ret; 794 795 if (!scratch) 796 return -ENOMEM; 797 798 new = mpol_new(mode, flags, nodes); 799 if (IS_ERR(new)) { 800 ret = PTR_ERR(new); 801 goto out; 802 } 803 804 task_lock(current); 805 ret = mpol_set_nodemask(new, nodes, scratch); 806 if (ret) { 807 task_unlock(current); 808 mpol_put(new); 809 goto out; 810 } 811 old = current->mempolicy; 812 current->mempolicy = new; 813 if (new && new->mode == MPOL_INTERLEAVE && 814 nodes_weight(new->v.nodes)) 815 current->il_next = first_node(new->v.nodes); 816 task_unlock(current); 817 mpol_put(old); 818 ret = 0; 819 out: 820 NODEMASK_SCRATCH_FREE(scratch); 821 return ret; 822 } 823 824 /* 825 * Return nodemask for policy for get_mempolicy() query 826 * 827 * Called with task's alloc_lock held 828 */ 829 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 830 { 831 nodes_clear(*nodes); 832 if (p == &default_policy) 833 return; 834 835 switch (p->mode) { 836 case MPOL_BIND: 837 /* Fall through */ 838 case MPOL_INTERLEAVE: 839 *nodes = p->v.nodes; 840 break; 841 case MPOL_PREFERRED: 842 if (!(p->flags & MPOL_F_LOCAL)) 843 node_set(p->v.preferred_node, *nodes); 844 /* else return empty node mask for local allocation */ 845 break; 846 default: 847 BUG(); 848 } 849 } 850 851 static int lookup_node(unsigned long addr) 852 { 853 struct page *p; 854 int err; 855 856 err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL); 857 if (err >= 0) { 858 err = page_to_nid(p); 859 put_page(p); 860 } 861 return err; 862 } 863 864 /* Retrieve NUMA policy */ 865 static long do_get_mempolicy(int *policy, nodemask_t *nmask, 866 unsigned long addr, unsigned long flags) 867 { 868 int err; 869 struct mm_struct *mm = current->mm; 870 struct vm_area_struct *vma = NULL; 871 struct mempolicy *pol = current->mempolicy; 872 873 if (flags & 874 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 875 return -EINVAL; 876 877 if (flags & MPOL_F_MEMS_ALLOWED) { 878 if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 879 return -EINVAL; 880 *policy = 0; /* just so it's initialized */ 881 task_lock(current); 882 *nmask = cpuset_current_mems_allowed; 883 task_unlock(current); 884 return 0; 885 } 886 887 if (flags & MPOL_F_ADDR) { 888 /* 889 * Do NOT fall back to task policy if the 890 * vma/shared policy at addr is NULL. We 891 * want to return MPOL_DEFAULT in this case. 892 */ 893 down_read(&mm->mmap_sem); 894 vma = find_vma_intersection(mm, addr, addr+1); 895 if (!vma) { 896 up_read(&mm->mmap_sem); 897 return -EFAULT; 898 } 899 if (vma->vm_ops && vma->vm_ops->get_policy) 900 pol = vma->vm_ops->get_policy(vma, addr); 901 else 902 pol = vma->vm_policy; 903 } else if (addr) 904 return -EINVAL; 905 906 if (!pol) 907 pol = &default_policy; /* indicates default behavior */ 908 909 if (flags & MPOL_F_NODE) { 910 if (flags & MPOL_F_ADDR) { 911 err = lookup_node(addr); 912 if (err < 0) 913 goto out; 914 *policy = err; 915 } else if (pol == current->mempolicy && 916 pol->mode == MPOL_INTERLEAVE) { 917 *policy = current->il_next; 918 } else { 919 err = -EINVAL; 920 goto out; 921 } 922 } else { 923 *policy = pol == &default_policy ? MPOL_DEFAULT : 924 pol->mode; 925 /* 926 * Internal mempolicy flags must be masked off before exposing 927 * the policy to userspace. 928 */ 929 *policy |= (pol->flags & MPOL_MODE_FLAGS); 930 } 931 932 if (vma) { 933 up_read(¤t->mm->mmap_sem); 934 vma = NULL; 935 } 936 937 err = 0; 938 if (nmask) { 939 if (mpol_store_user_nodemask(pol)) { 940 *nmask = pol->w.user_nodemask; 941 } else { 942 task_lock(current); 943 get_policy_nodemask(pol, nmask); 944 task_unlock(current); 945 } 946 } 947 948 out: 949 mpol_cond_put(pol); 950 if (vma) 951 up_read(¤t->mm->mmap_sem); 952 return err; 953 } 954 955 #ifdef CONFIG_MIGRATION 956 /* 957 * page migration 958 */ 959 static void migrate_page_add(struct page *page, struct list_head *pagelist, 960 unsigned long flags) 961 { 962 /* 963 * Avoid migrating a page that is shared with others. 964 */ 965 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) { 966 if (!isolate_lru_page(page)) { 967 list_add_tail(&page->lru, pagelist); 968 inc_node_page_state(page, NR_ISOLATED_ANON + 969 page_is_file_cache(page)); 970 } 971 } 972 } 973 974 static struct page *new_node_page(struct page *page, unsigned long node, int **x) 975 { 976 if (PageHuge(page)) 977 return alloc_huge_page_node(page_hstate(compound_head(page)), 978 node); 979 else 980 return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE | 981 __GFP_THISNODE, 0); 982 } 983 984 /* 985 * Migrate pages from one node to a target node. 986 * Returns error or the number of pages not migrated. 987 */ 988 static int migrate_to_node(struct mm_struct *mm, int source, int dest, 989 int flags) 990 { 991 nodemask_t nmask; 992 LIST_HEAD(pagelist); 993 int err = 0; 994 995 nodes_clear(nmask); 996 node_set(source, nmask); 997 998 /* 999 * This does not "check" the range but isolates all pages that 1000 * need migration. Between passing in the full user address 1001 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 1002 */ 1003 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 1004 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, 1005 flags | MPOL_MF_DISCONTIG_OK, &pagelist); 1006 1007 if (!list_empty(&pagelist)) { 1008 err = migrate_pages(&pagelist, new_node_page, NULL, dest, 1009 MIGRATE_SYNC, MR_SYSCALL); 1010 if (err) 1011 putback_movable_pages(&pagelist); 1012 } 1013 1014 return err; 1015 } 1016 1017 /* 1018 * Move pages between the two nodesets so as to preserve the physical 1019 * layout as much as possible. 1020 * 1021 * Returns the number of page that could not be moved. 1022 */ 1023 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 1024 const nodemask_t *to, int flags) 1025 { 1026 int busy = 0; 1027 int err; 1028 nodemask_t tmp; 1029 1030 err = migrate_prep(); 1031 if (err) 1032 return err; 1033 1034 down_read(&mm->mmap_sem); 1035 1036 /* 1037 * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 1038 * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 1039 * bit in 'tmp', and return that <source, dest> pair for migration. 1040 * The pair of nodemasks 'to' and 'from' define the map. 1041 * 1042 * If no pair of bits is found that way, fallback to picking some 1043 * pair of 'source' and 'dest' bits that are not the same. If the 1044 * 'source' and 'dest' bits are the same, this represents a node 1045 * that will be migrating to itself, so no pages need move. 1046 * 1047 * If no bits are left in 'tmp', or if all remaining bits left 1048 * in 'tmp' correspond to the same bit in 'to', return false 1049 * (nothing left to migrate). 1050 * 1051 * This lets us pick a pair of nodes to migrate between, such that 1052 * if possible the dest node is not already occupied by some other 1053 * source node, minimizing the risk of overloading the memory on a 1054 * node that would happen if we migrated incoming memory to a node 1055 * before migrating outgoing memory source that same node. 1056 * 1057 * A single scan of tmp is sufficient. As we go, we remember the 1058 * most recent <s, d> pair that moved (s != d). If we find a pair 1059 * that not only moved, but what's better, moved to an empty slot 1060 * (d is not set in tmp), then we break out then, with that pair. 1061 * Otherwise when we finish scanning from_tmp, we at least have the 1062 * most recent <s, d> pair that moved. If we get all the way through 1063 * the scan of tmp without finding any node that moved, much less 1064 * moved to an empty node, then there is nothing left worth migrating. 1065 */ 1066 1067 tmp = *from; 1068 while (!nodes_empty(tmp)) { 1069 int s,d; 1070 int source = NUMA_NO_NODE; 1071 int dest = 0; 1072 1073 for_each_node_mask(s, tmp) { 1074 1075 /* 1076 * do_migrate_pages() tries to maintain the relative 1077 * node relationship of the pages established between 1078 * threads and memory areas. 1079 * 1080 * However if the number of source nodes is not equal to 1081 * the number of destination nodes we can not preserve 1082 * this node relative relationship. In that case, skip 1083 * copying memory from a node that is in the destination 1084 * mask. 1085 * 1086 * Example: [2,3,4] -> [3,4,5] moves everything. 1087 * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 1088 */ 1089 1090 if ((nodes_weight(*from) != nodes_weight(*to)) && 1091 (node_isset(s, *to))) 1092 continue; 1093 1094 d = node_remap(s, *from, *to); 1095 if (s == d) 1096 continue; 1097 1098 source = s; /* Node moved. Memorize */ 1099 dest = d; 1100 1101 /* dest not in remaining from nodes? */ 1102 if (!node_isset(dest, tmp)) 1103 break; 1104 } 1105 if (source == NUMA_NO_NODE) 1106 break; 1107 1108 node_clear(source, tmp); 1109 err = migrate_to_node(mm, source, dest, flags); 1110 if (err > 0) 1111 busy += err; 1112 if (err < 0) 1113 break; 1114 } 1115 up_read(&mm->mmap_sem); 1116 if (err < 0) 1117 return err; 1118 return busy; 1119 1120 } 1121 1122 /* 1123 * Allocate a new page for page migration based on vma policy. 1124 * Start by assuming the page is mapped by the same vma as contains @start. 1125 * Search forward from there, if not. N.B., this assumes that the 1126 * list of pages handed to migrate_pages()--which is how we get here-- 1127 * is in virtual address order. 1128 */ 1129 static struct page *new_page(struct page *page, unsigned long start, int **x) 1130 { 1131 struct vm_area_struct *vma; 1132 unsigned long uninitialized_var(address); 1133 1134 vma = find_vma(current->mm, start); 1135 while (vma) { 1136 address = page_address_in_vma(page, vma); 1137 if (address != -EFAULT) 1138 break; 1139 vma = vma->vm_next; 1140 } 1141 1142 if (PageHuge(page)) { 1143 BUG_ON(!vma); 1144 return alloc_huge_page_noerr(vma, address, 1); 1145 } 1146 /* 1147 * if !vma, alloc_page_vma() will use task or system default policy 1148 */ 1149 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 1150 } 1151 #else 1152 1153 static void migrate_page_add(struct page *page, struct list_head *pagelist, 1154 unsigned long flags) 1155 { 1156 } 1157 1158 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 1159 const nodemask_t *to, int flags) 1160 { 1161 return -ENOSYS; 1162 } 1163 1164 static struct page *new_page(struct page *page, unsigned long start, int **x) 1165 { 1166 return NULL; 1167 } 1168 #endif 1169 1170 static long do_mbind(unsigned long start, unsigned long len, 1171 unsigned short mode, unsigned short mode_flags, 1172 nodemask_t *nmask, unsigned long flags) 1173 { 1174 struct mm_struct *mm = current->mm; 1175 struct mempolicy *new; 1176 unsigned long end; 1177 int err; 1178 LIST_HEAD(pagelist); 1179 1180 if (flags & ~(unsigned long)MPOL_MF_VALID) 1181 return -EINVAL; 1182 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 1183 return -EPERM; 1184 1185 if (start & ~PAGE_MASK) 1186 return -EINVAL; 1187 1188 if (mode == MPOL_DEFAULT) 1189 flags &= ~MPOL_MF_STRICT; 1190 1191 len = (len + PAGE_SIZE - 1) & PAGE_MASK; 1192 end = start + len; 1193 1194 if (end < start) 1195 return -EINVAL; 1196 if (end == start) 1197 return 0; 1198 1199 new = mpol_new(mode, mode_flags, nmask); 1200 if (IS_ERR(new)) 1201 return PTR_ERR(new); 1202 1203 if (flags & MPOL_MF_LAZY) 1204 new->flags |= MPOL_F_MOF; 1205 1206 /* 1207 * If we are using the default policy then operation 1208 * on discontinuous address spaces is okay after all 1209 */ 1210 if (!new) 1211 flags |= MPOL_MF_DISCONTIG_OK; 1212 1213 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1214 start, start + len, mode, mode_flags, 1215 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 1216 1217 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 1218 1219 err = migrate_prep(); 1220 if (err) 1221 goto mpol_out; 1222 } 1223 { 1224 NODEMASK_SCRATCH(scratch); 1225 if (scratch) { 1226 down_write(&mm->mmap_sem); 1227 task_lock(current); 1228 err = mpol_set_nodemask(new, nmask, scratch); 1229 task_unlock(current); 1230 if (err) 1231 up_write(&mm->mmap_sem); 1232 } else 1233 err = -ENOMEM; 1234 NODEMASK_SCRATCH_FREE(scratch); 1235 } 1236 if (err) 1237 goto mpol_out; 1238 1239 err = queue_pages_range(mm, start, end, nmask, 1240 flags | MPOL_MF_INVERT, &pagelist); 1241 if (!err) 1242 err = mbind_range(mm, start, end, new); 1243 1244 if (!err) { 1245 int nr_failed = 0; 1246 1247 if (!list_empty(&pagelist)) { 1248 WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1249 nr_failed = migrate_pages(&pagelist, new_page, NULL, 1250 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND); 1251 if (nr_failed) 1252 putback_movable_pages(&pagelist); 1253 } 1254 1255 if (nr_failed && (flags & MPOL_MF_STRICT)) 1256 err = -EIO; 1257 } else 1258 putback_movable_pages(&pagelist); 1259 1260 up_write(&mm->mmap_sem); 1261 mpol_out: 1262 mpol_put(new); 1263 return err; 1264 } 1265 1266 /* 1267 * User space interface with variable sized bitmaps for nodelists. 1268 */ 1269 1270 /* Copy a node mask from user space. */ 1271 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 1272 unsigned long maxnode) 1273 { 1274 unsigned long k; 1275 unsigned long nlongs; 1276 unsigned long endmask; 1277 1278 --maxnode; 1279 nodes_clear(*nodes); 1280 if (maxnode == 0 || !nmask) 1281 return 0; 1282 if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1283 return -EINVAL; 1284 1285 nlongs = BITS_TO_LONGS(maxnode); 1286 if ((maxnode % BITS_PER_LONG) == 0) 1287 endmask = ~0UL; 1288 else 1289 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; 1290 1291 /* When the user specified more nodes than supported just check 1292 if the non supported part is all zero. */ 1293 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { 1294 if (nlongs > PAGE_SIZE/sizeof(long)) 1295 return -EINVAL; 1296 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 1297 unsigned long t; 1298 if (get_user(t, nmask + k)) 1299 return -EFAULT; 1300 if (k == nlongs - 1) { 1301 if (t & endmask) 1302 return -EINVAL; 1303 } else if (t) 1304 return -EINVAL; 1305 } 1306 nlongs = BITS_TO_LONGS(MAX_NUMNODES); 1307 endmask = ~0UL; 1308 } 1309 1310 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) 1311 return -EFAULT; 1312 nodes_addr(*nodes)[nlongs-1] &= endmask; 1313 return 0; 1314 } 1315 1316 /* Copy a kernel node mask to user space */ 1317 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 1318 nodemask_t *nodes) 1319 { 1320 unsigned long copy = ALIGN(maxnode-1, 64) / 8; 1321 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); 1322 1323 if (copy > nbytes) { 1324 if (copy > PAGE_SIZE) 1325 return -EINVAL; 1326 if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 1327 return -EFAULT; 1328 copy = nbytes; 1329 } 1330 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 1331 } 1332 1333 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1334 unsigned long, mode, const unsigned long __user *, nmask, 1335 unsigned long, maxnode, unsigned, flags) 1336 { 1337 nodemask_t nodes; 1338 int err; 1339 unsigned short mode_flags; 1340 1341 mode_flags = mode & MPOL_MODE_FLAGS; 1342 mode &= ~MPOL_MODE_FLAGS; 1343 if (mode >= MPOL_MAX) 1344 return -EINVAL; 1345 if ((mode_flags & MPOL_F_STATIC_NODES) && 1346 (mode_flags & MPOL_F_RELATIVE_NODES)) 1347 return -EINVAL; 1348 err = get_nodes(&nodes, nmask, maxnode); 1349 if (err) 1350 return err; 1351 return do_mbind(start, len, mode, mode_flags, &nodes, flags); 1352 } 1353 1354 /* Set the process memory policy */ 1355 SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1356 unsigned long, maxnode) 1357 { 1358 int err; 1359 nodemask_t nodes; 1360 unsigned short flags; 1361 1362 flags = mode & MPOL_MODE_FLAGS; 1363 mode &= ~MPOL_MODE_FLAGS; 1364 if ((unsigned int)mode >= MPOL_MAX) 1365 return -EINVAL; 1366 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES)) 1367 return -EINVAL; 1368 err = get_nodes(&nodes, nmask, maxnode); 1369 if (err) 1370 return err; 1371 return do_set_mempolicy(mode, flags, &nodes); 1372 } 1373 1374 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1375 const unsigned long __user *, old_nodes, 1376 const unsigned long __user *, new_nodes) 1377 { 1378 const struct cred *cred = current_cred(), *tcred; 1379 struct mm_struct *mm = NULL; 1380 struct task_struct *task; 1381 nodemask_t task_nodes; 1382 int err; 1383 nodemask_t *old; 1384 nodemask_t *new; 1385 NODEMASK_SCRATCH(scratch); 1386 1387 if (!scratch) 1388 return -ENOMEM; 1389 1390 old = &scratch->mask1; 1391 new = &scratch->mask2; 1392 1393 err = get_nodes(old, old_nodes, maxnode); 1394 if (err) 1395 goto out; 1396 1397 err = get_nodes(new, new_nodes, maxnode); 1398 if (err) 1399 goto out; 1400 1401 /* Find the mm_struct */ 1402 rcu_read_lock(); 1403 task = pid ? find_task_by_vpid(pid) : current; 1404 if (!task) { 1405 rcu_read_unlock(); 1406 err = -ESRCH; 1407 goto out; 1408 } 1409 get_task_struct(task); 1410 1411 err = -EINVAL; 1412 1413 /* 1414 * Check if this process has the right to modify the specified 1415 * process. The right exists if the process has administrative 1416 * capabilities, superuser privileges or the same 1417 * userid as the target process. 1418 */ 1419 tcred = __task_cred(task); 1420 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) && 1421 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) && 1422 !capable(CAP_SYS_NICE)) { 1423 rcu_read_unlock(); 1424 err = -EPERM; 1425 goto out_put; 1426 } 1427 rcu_read_unlock(); 1428 1429 task_nodes = cpuset_mems_allowed(task); 1430 /* Is the user allowed to access the target nodes? */ 1431 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 1432 err = -EPERM; 1433 goto out_put; 1434 } 1435 1436 if (!nodes_subset(*new, node_states[N_MEMORY])) { 1437 err = -EINVAL; 1438 goto out_put; 1439 } 1440 1441 err = security_task_movememory(task); 1442 if (err) 1443 goto out_put; 1444 1445 mm = get_task_mm(task); 1446 put_task_struct(task); 1447 1448 if (!mm) { 1449 err = -EINVAL; 1450 goto out; 1451 } 1452 1453 err = do_migrate_pages(mm, old, new, 1454 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 1455 1456 mmput(mm); 1457 out: 1458 NODEMASK_SCRATCH_FREE(scratch); 1459 1460 return err; 1461 1462 out_put: 1463 put_task_struct(task); 1464 goto out; 1465 1466 } 1467 1468 1469 /* Retrieve NUMA policy */ 1470 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1471 unsigned long __user *, nmask, unsigned long, maxnode, 1472 unsigned long, addr, unsigned long, flags) 1473 { 1474 int err; 1475 int uninitialized_var(pval); 1476 nodemask_t nodes; 1477 1478 if (nmask != NULL && maxnode < MAX_NUMNODES) 1479 return -EINVAL; 1480 1481 err = do_get_mempolicy(&pval, &nodes, addr, flags); 1482 1483 if (err) 1484 return err; 1485 1486 if (policy && put_user(pval, policy)) 1487 return -EFAULT; 1488 1489 if (nmask) 1490 err = copy_nodes_to_user(nmask, maxnode, &nodes); 1491 1492 return err; 1493 } 1494 1495 #ifdef CONFIG_COMPAT 1496 1497 COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1498 compat_ulong_t __user *, nmask, 1499 compat_ulong_t, maxnode, 1500 compat_ulong_t, addr, compat_ulong_t, flags) 1501 { 1502 long err; 1503 unsigned long __user *nm = NULL; 1504 unsigned long nr_bits, alloc_size; 1505 DECLARE_BITMAP(bm, MAX_NUMNODES); 1506 1507 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 1508 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1509 1510 if (nmask) 1511 nm = compat_alloc_user_space(alloc_size); 1512 1513 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 1514 1515 if (!err && nmask) { 1516 unsigned long copy_size; 1517 copy_size = min_t(unsigned long, sizeof(bm), alloc_size); 1518 err = copy_from_user(bm, nm, copy_size); 1519 /* ensure entire bitmap is zeroed */ 1520 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 1521 err |= compat_put_bitmap(nmask, bm, nr_bits); 1522 } 1523 1524 return err; 1525 } 1526 1527 COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, 1528 compat_ulong_t, maxnode) 1529 { 1530 long err = 0; 1531 unsigned long __user *nm = NULL; 1532 unsigned long nr_bits, alloc_size; 1533 DECLARE_BITMAP(bm, MAX_NUMNODES); 1534 1535 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 1536 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1537 1538 if (nmask) { 1539 err = compat_get_bitmap(bm, nmask, nr_bits); 1540 nm = compat_alloc_user_space(alloc_size); 1541 err |= copy_to_user(nm, bm, alloc_size); 1542 } 1543 1544 if (err) 1545 return -EFAULT; 1546 1547 return sys_set_mempolicy(mode, nm, nr_bits+1); 1548 } 1549 1550 COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, 1551 compat_ulong_t, mode, compat_ulong_t __user *, nmask, 1552 compat_ulong_t, maxnode, compat_ulong_t, flags) 1553 { 1554 long err = 0; 1555 unsigned long __user *nm = NULL; 1556 unsigned long nr_bits, alloc_size; 1557 nodemask_t bm; 1558 1559 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 1560 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1561 1562 if (nmask) { 1563 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); 1564 nm = compat_alloc_user_space(alloc_size); 1565 err |= copy_to_user(nm, nodes_addr(bm), alloc_size); 1566 } 1567 1568 if (err) 1569 return -EFAULT; 1570 1571 return sys_mbind(start, len, mode, nm, nr_bits+1, flags); 1572 } 1573 1574 #endif 1575 1576 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 1577 unsigned long addr) 1578 { 1579 struct mempolicy *pol = NULL; 1580 1581 if (vma) { 1582 if (vma->vm_ops && vma->vm_ops->get_policy) { 1583 pol = vma->vm_ops->get_policy(vma, addr); 1584 } else if (vma->vm_policy) { 1585 pol = vma->vm_policy; 1586 1587 /* 1588 * shmem_alloc_page() passes MPOL_F_SHARED policy with 1589 * a pseudo vma whose vma->vm_ops=NULL. Take a reference 1590 * count on these policies which will be dropped by 1591 * mpol_cond_put() later 1592 */ 1593 if (mpol_needs_cond_ref(pol)) 1594 mpol_get(pol); 1595 } 1596 } 1597 1598 return pol; 1599 } 1600 1601 /* 1602 * get_vma_policy(@vma, @addr) 1603 * @vma: virtual memory area whose policy is sought 1604 * @addr: address in @vma for shared policy lookup 1605 * 1606 * Returns effective policy for a VMA at specified address. 1607 * Falls back to current->mempolicy or system default policy, as necessary. 1608 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 1609 * count--added by the get_policy() vm_op, as appropriate--to protect against 1610 * freeing by another task. It is the caller's responsibility to free the 1611 * extra reference for shared policies. 1612 */ 1613 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1614 unsigned long addr) 1615 { 1616 struct mempolicy *pol = __get_vma_policy(vma, addr); 1617 1618 if (!pol) 1619 pol = get_task_policy(current); 1620 1621 return pol; 1622 } 1623 1624 bool vma_policy_mof(struct vm_area_struct *vma) 1625 { 1626 struct mempolicy *pol; 1627 1628 if (vma->vm_ops && vma->vm_ops->get_policy) { 1629 bool ret = false; 1630 1631 pol = vma->vm_ops->get_policy(vma, vma->vm_start); 1632 if (pol && (pol->flags & MPOL_F_MOF)) 1633 ret = true; 1634 mpol_cond_put(pol); 1635 1636 return ret; 1637 } 1638 1639 pol = vma->vm_policy; 1640 if (!pol) 1641 pol = get_task_policy(current); 1642 1643 return pol->flags & MPOL_F_MOF; 1644 } 1645 1646 static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1647 { 1648 enum zone_type dynamic_policy_zone = policy_zone; 1649 1650 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1651 1652 /* 1653 * if policy->v.nodes has movable memory only, 1654 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1655 * 1656 * policy->v.nodes is intersect with node_states[N_MEMORY]. 1657 * so if the following test faile, it implies 1658 * policy->v.nodes has movable memory only. 1659 */ 1660 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY])) 1661 dynamic_policy_zone = ZONE_MOVABLE; 1662 1663 return zone >= dynamic_policy_zone; 1664 } 1665 1666 /* 1667 * Return a nodemask representing a mempolicy for filtering nodes for 1668 * page allocation 1669 */ 1670 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 1671 { 1672 /* Lower zones don't get a nodemask applied for MPOL_BIND */ 1673 if (unlikely(policy->mode == MPOL_BIND) && 1674 apply_policy_zone(policy, gfp_zone(gfp)) && 1675 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) 1676 return &policy->v.nodes; 1677 1678 return NULL; 1679 } 1680 1681 /* Return a zonelist indicated by gfp for node representing a mempolicy */ 1682 static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy, 1683 int nd) 1684 { 1685 if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL)) 1686 nd = policy->v.preferred_node; 1687 else { 1688 /* 1689 * __GFP_THISNODE shouldn't even be used with the bind policy 1690 * because we might easily break the expectation to stay on the 1691 * requested node and not break the policy. 1692 */ 1693 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); 1694 } 1695 1696 return node_zonelist(nd, gfp); 1697 } 1698 1699 /* Do dynamic interleaving for a process */ 1700 static unsigned interleave_nodes(struct mempolicy *policy) 1701 { 1702 unsigned nid, next; 1703 struct task_struct *me = current; 1704 1705 nid = me->il_next; 1706 next = next_node_in(nid, policy->v.nodes); 1707 if (next < MAX_NUMNODES) 1708 me->il_next = next; 1709 return nid; 1710 } 1711 1712 /* 1713 * Depending on the memory policy provide a node from which to allocate the 1714 * next slab entry. 1715 */ 1716 unsigned int mempolicy_slab_node(void) 1717 { 1718 struct mempolicy *policy; 1719 int node = numa_mem_id(); 1720 1721 if (in_interrupt()) 1722 return node; 1723 1724 policy = current->mempolicy; 1725 if (!policy || policy->flags & MPOL_F_LOCAL) 1726 return node; 1727 1728 switch (policy->mode) { 1729 case MPOL_PREFERRED: 1730 /* 1731 * handled MPOL_F_LOCAL above 1732 */ 1733 return policy->v.preferred_node; 1734 1735 case MPOL_INTERLEAVE: 1736 return interleave_nodes(policy); 1737 1738 case MPOL_BIND: { 1739 struct zoneref *z; 1740 1741 /* 1742 * Follow bind policy behavior and start allocation at the 1743 * first node. 1744 */ 1745 struct zonelist *zonelist; 1746 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 1747 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; 1748 z = first_zones_zonelist(zonelist, highest_zoneidx, 1749 &policy->v.nodes); 1750 return z->zone ? z->zone->node : node; 1751 } 1752 1753 default: 1754 BUG(); 1755 } 1756 } 1757 1758 /* 1759 * Do static interleaving for a VMA with known offset @n. Returns the n'th 1760 * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the 1761 * number of present nodes. 1762 */ 1763 static unsigned offset_il_node(struct mempolicy *pol, 1764 struct vm_area_struct *vma, unsigned long n) 1765 { 1766 unsigned nnodes = nodes_weight(pol->v.nodes); 1767 unsigned target; 1768 int i; 1769 int nid; 1770 1771 if (!nnodes) 1772 return numa_node_id(); 1773 target = (unsigned int)n % nnodes; 1774 nid = first_node(pol->v.nodes); 1775 for (i = 0; i < target; i++) 1776 nid = next_node(nid, pol->v.nodes); 1777 return nid; 1778 } 1779 1780 /* Determine a node number for interleave */ 1781 static inline unsigned interleave_nid(struct mempolicy *pol, 1782 struct vm_area_struct *vma, unsigned long addr, int shift) 1783 { 1784 if (vma) { 1785 unsigned long off; 1786 1787 /* 1788 * for small pages, there is no difference between 1789 * shift and PAGE_SHIFT, so the bit-shift is safe. 1790 * for huge pages, since vm_pgoff is in units of small 1791 * pages, we need to shift off the always 0 bits to get 1792 * a useful offset. 1793 */ 1794 BUG_ON(shift < PAGE_SHIFT); 1795 off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 1796 off += (addr - vma->vm_start) >> shift; 1797 return offset_il_node(pol, vma, off); 1798 } else 1799 return interleave_nodes(pol); 1800 } 1801 1802 #ifdef CONFIG_HUGETLBFS 1803 /* 1804 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol) 1805 * @vma: virtual memory area whose policy is sought 1806 * @addr: address in @vma for shared policy lookup and interleave policy 1807 * @gfp_flags: for requested zone 1808 * @mpol: pointer to mempolicy pointer for reference counted mempolicy 1809 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask 1810 * 1811 * Returns a zonelist suitable for a huge page allocation and a pointer 1812 * to the struct mempolicy for conditional unref after allocation. 1813 * If the effective policy is 'BIND, returns a pointer to the mempolicy's 1814 * @nodemask for filtering the zonelist. 1815 * 1816 * Must be protected by read_mems_allowed_begin() 1817 */ 1818 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, 1819 gfp_t gfp_flags, struct mempolicy **mpol, 1820 nodemask_t **nodemask) 1821 { 1822 struct zonelist *zl; 1823 1824 *mpol = get_vma_policy(vma, addr); 1825 *nodemask = NULL; /* assume !MPOL_BIND */ 1826 1827 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { 1828 zl = node_zonelist(interleave_nid(*mpol, vma, addr, 1829 huge_page_shift(hstate_vma(vma))), gfp_flags); 1830 } else { 1831 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id()); 1832 if ((*mpol)->mode == MPOL_BIND) 1833 *nodemask = &(*mpol)->v.nodes; 1834 } 1835 return zl; 1836 } 1837 1838 /* 1839 * init_nodemask_of_mempolicy 1840 * 1841 * If the current task's mempolicy is "default" [NULL], return 'false' 1842 * to indicate default policy. Otherwise, extract the policy nodemask 1843 * for 'bind' or 'interleave' policy into the argument nodemask, or 1844 * initialize the argument nodemask to contain the single node for 1845 * 'preferred' or 'local' policy and return 'true' to indicate presence 1846 * of non-default mempolicy. 1847 * 1848 * We don't bother with reference counting the mempolicy [mpol_get/put] 1849 * because the current task is examining it's own mempolicy and a task's 1850 * mempolicy is only ever changed by the task itself. 1851 * 1852 * N.B., it is the caller's responsibility to free a returned nodemask. 1853 */ 1854 bool init_nodemask_of_mempolicy(nodemask_t *mask) 1855 { 1856 struct mempolicy *mempolicy; 1857 int nid; 1858 1859 if (!(mask && current->mempolicy)) 1860 return false; 1861 1862 task_lock(current); 1863 mempolicy = current->mempolicy; 1864 switch (mempolicy->mode) { 1865 case MPOL_PREFERRED: 1866 if (mempolicy->flags & MPOL_F_LOCAL) 1867 nid = numa_node_id(); 1868 else 1869 nid = mempolicy->v.preferred_node; 1870 init_nodemask_of_node(mask, nid); 1871 break; 1872 1873 case MPOL_BIND: 1874 /* Fall through */ 1875 case MPOL_INTERLEAVE: 1876 *mask = mempolicy->v.nodes; 1877 break; 1878 1879 default: 1880 BUG(); 1881 } 1882 task_unlock(current); 1883 1884 return true; 1885 } 1886 #endif 1887 1888 /* 1889 * mempolicy_nodemask_intersects 1890 * 1891 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default 1892 * policy. Otherwise, check for intersection between mask and the policy 1893 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local' 1894 * policy, always return true since it may allocate elsewhere on fallback. 1895 * 1896 * Takes task_lock(tsk) to prevent freeing of its mempolicy. 1897 */ 1898 bool mempolicy_nodemask_intersects(struct task_struct *tsk, 1899 const nodemask_t *mask) 1900 { 1901 struct mempolicy *mempolicy; 1902 bool ret = true; 1903 1904 if (!mask) 1905 return ret; 1906 task_lock(tsk); 1907 mempolicy = tsk->mempolicy; 1908 if (!mempolicy) 1909 goto out; 1910 1911 switch (mempolicy->mode) { 1912 case MPOL_PREFERRED: 1913 /* 1914 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to 1915 * allocate from, they may fallback to other nodes when oom. 1916 * Thus, it's possible for tsk to have allocated memory from 1917 * nodes in mask. 1918 */ 1919 break; 1920 case MPOL_BIND: 1921 case MPOL_INTERLEAVE: 1922 ret = nodes_intersects(mempolicy->v.nodes, *mask); 1923 break; 1924 default: 1925 BUG(); 1926 } 1927 out: 1928 task_unlock(tsk); 1929 return ret; 1930 } 1931 1932 /* Allocate a page in interleaved policy. 1933 Own path because it needs to do special accounting. */ 1934 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 1935 unsigned nid) 1936 { 1937 struct zonelist *zl; 1938 struct page *page; 1939 1940 zl = node_zonelist(nid, gfp); 1941 page = __alloc_pages(gfp, order, zl); 1942 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0])) 1943 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); 1944 return page; 1945 } 1946 1947 /** 1948 * alloc_pages_vma - Allocate a page for a VMA. 1949 * 1950 * @gfp: 1951 * %GFP_USER user allocation. 1952 * %GFP_KERNEL kernel allocations, 1953 * %GFP_HIGHMEM highmem/user allocations, 1954 * %GFP_FS allocation should not call back into a file system. 1955 * %GFP_ATOMIC don't sleep. 1956 * 1957 * @order:Order of the GFP allocation. 1958 * @vma: Pointer to VMA or NULL if not available. 1959 * @addr: Virtual Address of the allocation. Must be inside the VMA. 1960 * @node: Which node to prefer for allocation (modulo policy). 1961 * @hugepage: for hugepages try only the preferred node if possible 1962 * 1963 * This function allocates a page from the kernel page pool and applies 1964 * a NUMA policy associated with the VMA or the current process. 1965 * When VMA is not NULL caller must hold down_read on the mmap_sem of the 1966 * mm_struct of the VMA to prevent it from going away. Should be used for 1967 * all allocations for pages that will be mapped into user space. Returns 1968 * NULL when no page can be allocated. 1969 */ 1970 struct page * 1971 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 1972 unsigned long addr, int node, bool hugepage) 1973 { 1974 struct mempolicy *pol; 1975 struct page *page; 1976 unsigned int cpuset_mems_cookie; 1977 struct zonelist *zl; 1978 nodemask_t *nmask; 1979 1980 retry_cpuset: 1981 pol = get_vma_policy(vma, addr); 1982 cpuset_mems_cookie = read_mems_allowed_begin(); 1983 1984 if (pol->mode == MPOL_INTERLEAVE) { 1985 unsigned nid; 1986 1987 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 1988 mpol_cond_put(pol); 1989 page = alloc_page_interleave(gfp, order, nid); 1990 goto out; 1991 } 1992 1993 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { 1994 int hpage_node = node; 1995 1996 /* 1997 * For hugepage allocation and non-interleave policy which 1998 * allows the current node (or other explicitly preferred 1999 * node) we only try to allocate from the current/preferred 2000 * node and don't fall back to other nodes, as the cost of 2001 * remote accesses would likely offset THP benefits. 2002 * 2003 * If the policy is interleave, or does not allow the current 2004 * node in its nodemask, we allocate the standard way. 2005 */ 2006 if (pol->mode == MPOL_PREFERRED && 2007 !(pol->flags & MPOL_F_LOCAL)) 2008 hpage_node = pol->v.preferred_node; 2009 2010 nmask = policy_nodemask(gfp, pol); 2011 if (!nmask || node_isset(hpage_node, *nmask)) { 2012 mpol_cond_put(pol); 2013 page = __alloc_pages_node(hpage_node, 2014 gfp | __GFP_THISNODE, order); 2015 goto out; 2016 } 2017 } 2018 2019 nmask = policy_nodemask(gfp, pol); 2020 zl = policy_zonelist(gfp, pol, node); 2021 page = __alloc_pages_nodemask(gfp, order, zl, nmask); 2022 mpol_cond_put(pol); 2023 out: 2024 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) 2025 goto retry_cpuset; 2026 return page; 2027 } 2028 2029 /** 2030 * alloc_pages_current - Allocate pages. 2031 * 2032 * @gfp: 2033 * %GFP_USER user allocation, 2034 * %GFP_KERNEL kernel allocation, 2035 * %GFP_HIGHMEM highmem allocation, 2036 * %GFP_FS don't call back into a file system. 2037 * %GFP_ATOMIC don't sleep. 2038 * @order: Power of two of allocation size in pages. 0 is a single page. 2039 * 2040 * Allocate a page from the kernel page pool. When not in 2041 * interrupt context and apply the current process NUMA policy. 2042 * Returns NULL when no page can be allocated. 2043 * 2044 * Don't call cpuset_update_task_memory_state() unless 2045 * 1) it's ok to take cpuset_sem (can WAIT), and 2046 * 2) allocating for current task (not interrupt). 2047 */ 2048 struct page *alloc_pages_current(gfp_t gfp, unsigned order) 2049 { 2050 struct mempolicy *pol = &default_policy; 2051 struct page *page; 2052 unsigned int cpuset_mems_cookie; 2053 2054 if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 2055 pol = get_task_policy(current); 2056 2057 retry_cpuset: 2058 cpuset_mems_cookie = read_mems_allowed_begin(); 2059 2060 /* 2061 * No reference counting needed for current->mempolicy 2062 * nor system default_policy 2063 */ 2064 if (pol->mode == MPOL_INTERLEAVE) 2065 page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 2066 else 2067 page = __alloc_pages_nodemask(gfp, order, 2068 policy_zonelist(gfp, pol, numa_node_id()), 2069 policy_nodemask(gfp, pol)); 2070 2071 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) 2072 goto retry_cpuset; 2073 2074 return page; 2075 } 2076 EXPORT_SYMBOL(alloc_pages_current); 2077 2078 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2079 { 2080 struct mempolicy *pol = mpol_dup(vma_policy(src)); 2081 2082 if (IS_ERR(pol)) 2083 return PTR_ERR(pol); 2084 dst->vm_policy = pol; 2085 return 0; 2086 } 2087 2088 /* 2089 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 2090 * rebinds the mempolicy its copying by calling mpol_rebind_policy() 2091 * with the mems_allowed returned by cpuset_mems_allowed(). This 2092 * keeps mempolicies cpuset relative after its cpuset moves. See 2093 * further kernel/cpuset.c update_nodemask(). 2094 * 2095 * current's mempolicy may be rebinded by the other task(the task that changes 2096 * cpuset's mems), so we needn't do rebind work for current task. 2097 */ 2098 2099 /* Slow path of a mempolicy duplicate */ 2100 struct mempolicy *__mpol_dup(struct mempolicy *old) 2101 { 2102 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2103 2104 if (!new) 2105 return ERR_PTR(-ENOMEM); 2106 2107 /* task's mempolicy is protected by alloc_lock */ 2108 if (old == current->mempolicy) { 2109 task_lock(current); 2110 *new = *old; 2111 task_unlock(current); 2112 } else 2113 *new = *old; 2114 2115 if (current_cpuset_is_being_rebound()) { 2116 nodemask_t mems = cpuset_mems_allowed(current); 2117 if (new->flags & MPOL_F_REBINDING) 2118 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2); 2119 else 2120 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE); 2121 } 2122 atomic_set(&new->refcnt, 1); 2123 return new; 2124 } 2125 2126 /* Slow path of a mempolicy comparison */ 2127 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 2128 { 2129 if (!a || !b) 2130 return false; 2131 if (a->mode != b->mode) 2132 return false; 2133 if (a->flags != b->flags) 2134 return false; 2135 if (mpol_store_user_nodemask(a)) 2136 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2137 return false; 2138 2139 switch (a->mode) { 2140 case MPOL_BIND: 2141 /* Fall through */ 2142 case MPOL_INTERLEAVE: 2143 return !!nodes_equal(a->v.nodes, b->v.nodes); 2144 case MPOL_PREFERRED: 2145 return a->v.preferred_node == b->v.preferred_node; 2146 default: 2147 BUG(); 2148 return false; 2149 } 2150 } 2151 2152 /* 2153 * Shared memory backing store policy support. 2154 * 2155 * Remember policies even when nobody has shared memory mapped. 2156 * The policies are kept in Red-Black tree linked from the inode. 2157 * They are protected by the sp->lock rwlock, which should be held 2158 * for any accesses to the tree. 2159 */ 2160 2161 /* 2162 * lookup first element intersecting start-end. Caller holds sp->lock for 2163 * reading or for writing 2164 */ 2165 static struct sp_node * 2166 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 2167 { 2168 struct rb_node *n = sp->root.rb_node; 2169 2170 while (n) { 2171 struct sp_node *p = rb_entry(n, struct sp_node, nd); 2172 2173 if (start >= p->end) 2174 n = n->rb_right; 2175 else if (end <= p->start) 2176 n = n->rb_left; 2177 else 2178 break; 2179 } 2180 if (!n) 2181 return NULL; 2182 for (;;) { 2183 struct sp_node *w = NULL; 2184 struct rb_node *prev = rb_prev(n); 2185 if (!prev) 2186 break; 2187 w = rb_entry(prev, struct sp_node, nd); 2188 if (w->end <= start) 2189 break; 2190 n = prev; 2191 } 2192 return rb_entry(n, struct sp_node, nd); 2193 } 2194 2195 /* 2196 * Insert a new shared policy into the list. Caller holds sp->lock for 2197 * writing. 2198 */ 2199 static void sp_insert(struct shared_policy *sp, struct sp_node *new) 2200 { 2201 struct rb_node **p = &sp->root.rb_node; 2202 struct rb_node *parent = NULL; 2203 struct sp_node *nd; 2204 2205 while (*p) { 2206 parent = *p; 2207 nd = rb_entry(parent, struct sp_node, nd); 2208 if (new->start < nd->start) 2209 p = &(*p)->rb_left; 2210 else if (new->end > nd->end) 2211 p = &(*p)->rb_right; 2212 else 2213 BUG(); 2214 } 2215 rb_link_node(&new->nd, parent, p); 2216 rb_insert_color(&new->nd, &sp->root); 2217 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 2218 new->policy ? new->policy->mode : 0); 2219 } 2220 2221 /* Find shared policy intersecting idx */ 2222 struct mempolicy * 2223 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 2224 { 2225 struct mempolicy *pol = NULL; 2226 struct sp_node *sn; 2227 2228 if (!sp->root.rb_node) 2229 return NULL; 2230 read_lock(&sp->lock); 2231 sn = sp_lookup(sp, idx, idx+1); 2232 if (sn) { 2233 mpol_get(sn->policy); 2234 pol = sn->policy; 2235 } 2236 read_unlock(&sp->lock); 2237 return pol; 2238 } 2239 2240 static void sp_free(struct sp_node *n) 2241 { 2242 mpol_put(n->policy); 2243 kmem_cache_free(sn_cache, n); 2244 } 2245 2246 /** 2247 * mpol_misplaced - check whether current page node is valid in policy 2248 * 2249 * @page: page to be checked 2250 * @vma: vm area where page mapped 2251 * @addr: virtual address where page mapped 2252 * 2253 * Lookup current policy node id for vma,addr and "compare to" page's 2254 * node id. 2255 * 2256 * Returns: 2257 * -1 - not misplaced, page is in the right node 2258 * node - node id where the page should be 2259 * 2260 * Policy determination "mimics" alloc_page_vma(). 2261 * Called from fault path where we know the vma and faulting address. 2262 */ 2263 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2264 { 2265 struct mempolicy *pol; 2266 struct zoneref *z; 2267 int curnid = page_to_nid(page); 2268 unsigned long pgoff; 2269 int thiscpu = raw_smp_processor_id(); 2270 int thisnid = cpu_to_node(thiscpu); 2271 int polnid = -1; 2272 int ret = -1; 2273 2274 BUG_ON(!vma); 2275 2276 pol = get_vma_policy(vma, addr); 2277 if (!(pol->flags & MPOL_F_MOF)) 2278 goto out; 2279 2280 switch (pol->mode) { 2281 case MPOL_INTERLEAVE: 2282 BUG_ON(addr >= vma->vm_end); 2283 BUG_ON(addr < vma->vm_start); 2284 2285 pgoff = vma->vm_pgoff; 2286 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 2287 polnid = offset_il_node(pol, vma, pgoff); 2288 break; 2289 2290 case MPOL_PREFERRED: 2291 if (pol->flags & MPOL_F_LOCAL) 2292 polnid = numa_node_id(); 2293 else 2294 polnid = pol->v.preferred_node; 2295 break; 2296 2297 case MPOL_BIND: 2298 2299 /* 2300 * allows binding to multiple nodes. 2301 * use current page if in policy nodemask, 2302 * else select nearest allowed node, if any. 2303 * If no allowed nodes, use current [!misplaced]. 2304 */ 2305 if (node_isset(curnid, pol->v.nodes)) 2306 goto out; 2307 z = first_zones_zonelist( 2308 node_zonelist(numa_node_id(), GFP_HIGHUSER), 2309 gfp_zone(GFP_HIGHUSER), 2310 &pol->v.nodes); 2311 polnid = z->zone->node; 2312 break; 2313 2314 default: 2315 BUG(); 2316 } 2317 2318 /* Migrate the page towards the node whose CPU is referencing it */ 2319 if (pol->flags & MPOL_F_MORON) { 2320 polnid = thisnid; 2321 2322 if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) 2323 goto out; 2324 } 2325 2326 if (curnid != polnid) 2327 ret = polnid; 2328 out: 2329 mpol_cond_put(pol); 2330 2331 return ret; 2332 } 2333 2334 /* 2335 * Drop the (possibly final) reference to task->mempolicy. It needs to be 2336 * dropped after task->mempolicy is set to NULL so that any allocation done as 2337 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed 2338 * policy. 2339 */ 2340 void mpol_put_task_policy(struct task_struct *task) 2341 { 2342 struct mempolicy *pol; 2343 2344 task_lock(task); 2345 pol = task->mempolicy; 2346 task->mempolicy = NULL; 2347 task_unlock(task); 2348 mpol_put(pol); 2349 } 2350 2351 static void sp_delete(struct shared_policy *sp, struct sp_node *n) 2352 { 2353 pr_debug("deleting %lx-l%lx\n", n->start, n->end); 2354 rb_erase(&n->nd, &sp->root); 2355 sp_free(n); 2356 } 2357 2358 static void sp_node_init(struct sp_node *node, unsigned long start, 2359 unsigned long end, struct mempolicy *pol) 2360 { 2361 node->start = start; 2362 node->end = end; 2363 node->policy = pol; 2364 } 2365 2366 static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2367 struct mempolicy *pol) 2368 { 2369 struct sp_node *n; 2370 struct mempolicy *newpol; 2371 2372 n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 2373 if (!n) 2374 return NULL; 2375 2376 newpol = mpol_dup(pol); 2377 if (IS_ERR(newpol)) { 2378 kmem_cache_free(sn_cache, n); 2379 return NULL; 2380 } 2381 newpol->flags |= MPOL_F_SHARED; 2382 sp_node_init(n, start, end, newpol); 2383 2384 return n; 2385 } 2386 2387 /* Replace a policy range. */ 2388 static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 2389 unsigned long end, struct sp_node *new) 2390 { 2391 struct sp_node *n; 2392 struct sp_node *n_new = NULL; 2393 struct mempolicy *mpol_new = NULL; 2394 int ret = 0; 2395 2396 restart: 2397 write_lock(&sp->lock); 2398 n = sp_lookup(sp, start, end); 2399 /* Take care of old policies in the same range. */ 2400 while (n && n->start < end) { 2401 struct rb_node *next = rb_next(&n->nd); 2402 if (n->start >= start) { 2403 if (n->end <= end) 2404 sp_delete(sp, n); 2405 else 2406 n->start = end; 2407 } else { 2408 /* Old policy spanning whole new range. */ 2409 if (n->end > end) { 2410 if (!n_new) 2411 goto alloc_new; 2412 2413 *mpol_new = *n->policy; 2414 atomic_set(&mpol_new->refcnt, 1); 2415 sp_node_init(n_new, end, n->end, mpol_new); 2416 n->end = start; 2417 sp_insert(sp, n_new); 2418 n_new = NULL; 2419 mpol_new = NULL; 2420 break; 2421 } else 2422 n->end = start; 2423 } 2424 if (!next) 2425 break; 2426 n = rb_entry(next, struct sp_node, nd); 2427 } 2428 if (new) 2429 sp_insert(sp, new); 2430 write_unlock(&sp->lock); 2431 ret = 0; 2432 2433 err_out: 2434 if (mpol_new) 2435 mpol_put(mpol_new); 2436 if (n_new) 2437 kmem_cache_free(sn_cache, n_new); 2438 2439 return ret; 2440 2441 alloc_new: 2442 write_unlock(&sp->lock); 2443 ret = -ENOMEM; 2444 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 2445 if (!n_new) 2446 goto err_out; 2447 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2448 if (!mpol_new) 2449 goto err_out; 2450 goto restart; 2451 } 2452 2453 /** 2454 * mpol_shared_policy_init - initialize shared policy for inode 2455 * @sp: pointer to inode shared policy 2456 * @mpol: struct mempolicy to install 2457 * 2458 * Install non-NULL @mpol in inode's shared policy rb-tree. 2459 * On entry, the current task has a reference on a non-NULL @mpol. 2460 * This must be released on exit. 2461 * This is called at get_inode() calls and we can use GFP_KERNEL. 2462 */ 2463 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 2464 { 2465 int ret; 2466 2467 sp->root = RB_ROOT; /* empty tree == default mempolicy */ 2468 rwlock_init(&sp->lock); 2469 2470 if (mpol) { 2471 struct vm_area_struct pvma; 2472 struct mempolicy *new; 2473 NODEMASK_SCRATCH(scratch); 2474 2475 if (!scratch) 2476 goto put_mpol; 2477 /* contextualize the tmpfs mount point mempolicy */ 2478 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 2479 if (IS_ERR(new)) 2480 goto free_scratch; /* no valid nodemask intersection */ 2481 2482 task_lock(current); 2483 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 2484 task_unlock(current); 2485 if (ret) 2486 goto put_new; 2487 2488 /* Create pseudo-vma that contains just the policy */ 2489 memset(&pvma, 0, sizeof(struct vm_area_struct)); 2490 pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 2491 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 2492 2493 put_new: 2494 mpol_put(new); /* drop initial ref */ 2495 free_scratch: 2496 NODEMASK_SCRATCH_FREE(scratch); 2497 put_mpol: 2498 mpol_put(mpol); /* drop our incoming ref on sb mpol */ 2499 } 2500 } 2501 2502 int mpol_set_shared_policy(struct shared_policy *info, 2503 struct vm_area_struct *vma, struct mempolicy *npol) 2504 { 2505 int err; 2506 struct sp_node *new = NULL; 2507 unsigned long sz = vma_pages(vma); 2508 2509 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 2510 vma->vm_pgoff, 2511 sz, npol ? npol->mode : -1, 2512 npol ? npol->flags : -1, 2513 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE); 2514 2515 if (npol) { 2516 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 2517 if (!new) 2518 return -ENOMEM; 2519 } 2520 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 2521 if (err && new) 2522 sp_free(new); 2523 return err; 2524 } 2525 2526 /* Free a backing policy store on inode delete. */ 2527 void mpol_free_shared_policy(struct shared_policy *p) 2528 { 2529 struct sp_node *n; 2530 struct rb_node *next; 2531 2532 if (!p->root.rb_node) 2533 return; 2534 write_lock(&p->lock); 2535 next = rb_first(&p->root); 2536 while (next) { 2537 n = rb_entry(next, struct sp_node, nd); 2538 next = rb_next(&n->nd); 2539 sp_delete(p, n); 2540 } 2541 write_unlock(&p->lock); 2542 } 2543 2544 #ifdef CONFIG_NUMA_BALANCING 2545 static int __initdata numabalancing_override; 2546 2547 static void __init check_numabalancing_enable(void) 2548 { 2549 bool numabalancing_default = false; 2550 2551 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 2552 numabalancing_default = true; 2553 2554 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 2555 if (numabalancing_override) 2556 set_numabalancing_state(numabalancing_override == 1); 2557 2558 if (num_online_nodes() > 1 && !numabalancing_override) { 2559 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", 2560 numabalancing_default ? "Enabling" : "Disabling"); 2561 set_numabalancing_state(numabalancing_default); 2562 } 2563 } 2564 2565 static int __init setup_numabalancing(char *str) 2566 { 2567 int ret = 0; 2568 if (!str) 2569 goto out; 2570 2571 if (!strcmp(str, "enable")) { 2572 numabalancing_override = 1; 2573 ret = 1; 2574 } else if (!strcmp(str, "disable")) { 2575 numabalancing_override = -1; 2576 ret = 1; 2577 } 2578 out: 2579 if (!ret) 2580 pr_warn("Unable to parse numa_balancing=\n"); 2581 2582 return ret; 2583 } 2584 __setup("numa_balancing=", setup_numabalancing); 2585 #else 2586 static inline void __init check_numabalancing_enable(void) 2587 { 2588 } 2589 #endif /* CONFIG_NUMA_BALANCING */ 2590 2591 /* assumes fs == KERNEL_DS */ 2592 void __init numa_policy_init(void) 2593 { 2594 nodemask_t interleave_nodes; 2595 unsigned long largest = 0; 2596 int nid, prefer = 0; 2597 2598 policy_cache = kmem_cache_create("numa_policy", 2599 sizeof(struct mempolicy), 2600 0, SLAB_PANIC, NULL); 2601 2602 sn_cache = kmem_cache_create("shared_policy_node", 2603 sizeof(struct sp_node), 2604 0, SLAB_PANIC, NULL); 2605 2606 for_each_node(nid) { 2607 preferred_node_policy[nid] = (struct mempolicy) { 2608 .refcnt = ATOMIC_INIT(1), 2609 .mode = MPOL_PREFERRED, 2610 .flags = MPOL_F_MOF | MPOL_F_MORON, 2611 .v = { .preferred_node = nid, }, 2612 }; 2613 } 2614 2615 /* 2616 * Set interleaving policy for system init. Interleaving is only 2617 * enabled across suitably sized nodes (default is >= 16MB), or 2618 * fall back to the largest node if they're all smaller. 2619 */ 2620 nodes_clear(interleave_nodes); 2621 for_each_node_state(nid, N_MEMORY) { 2622 unsigned long total_pages = node_present_pages(nid); 2623 2624 /* Preserve the largest node */ 2625 if (largest < total_pages) { 2626 largest = total_pages; 2627 prefer = nid; 2628 } 2629 2630 /* Interleave this node? */ 2631 if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2632 node_set(nid, interleave_nodes); 2633 } 2634 2635 /* All too small, use the largest */ 2636 if (unlikely(nodes_empty(interleave_nodes))) 2637 node_set(prefer, interleave_nodes); 2638 2639 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2640 pr_err("%s: interleaving failed\n", __func__); 2641 2642 check_numabalancing_enable(); 2643 } 2644 2645 /* Reset policy of current process to default */ 2646 void numa_default_policy(void) 2647 { 2648 do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 2649 } 2650 2651 /* 2652 * Parse and format mempolicy from/to strings 2653 */ 2654 2655 /* 2656 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag. 2657 */ 2658 static const char * const policy_modes[] = 2659 { 2660 [MPOL_DEFAULT] = "default", 2661 [MPOL_PREFERRED] = "prefer", 2662 [MPOL_BIND] = "bind", 2663 [MPOL_INTERLEAVE] = "interleave", 2664 [MPOL_LOCAL] = "local", 2665 }; 2666 2667 2668 #ifdef CONFIG_TMPFS 2669 /** 2670 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2671 * @str: string containing mempolicy to parse 2672 * @mpol: pointer to struct mempolicy pointer, returned on success. 2673 * 2674 * Format of input: 2675 * <mode>[=<flags>][:<nodelist>] 2676 * 2677 * On success, returns 0, else 1 2678 */ 2679 int mpol_parse_str(char *str, struct mempolicy **mpol) 2680 { 2681 struct mempolicy *new = NULL; 2682 unsigned short mode; 2683 unsigned short mode_flags; 2684 nodemask_t nodes; 2685 char *nodelist = strchr(str, ':'); 2686 char *flags = strchr(str, '='); 2687 int err = 1; 2688 2689 if (nodelist) { 2690 /* NUL-terminate mode or flags string */ 2691 *nodelist++ = '\0'; 2692 if (nodelist_parse(nodelist, nodes)) 2693 goto out; 2694 if (!nodes_subset(nodes, node_states[N_MEMORY])) 2695 goto out; 2696 } else 2697 nodes_clear(nodes); 2698 2699 if (flags) 2700 *flags++ = '\0'; /* terminate mode string */ 2701 2702 for (mode = 0; mode < MPOL_MAX; mode++) { 2703 if (!strcmp(str, policy_modes[mode])) { 2704 break; 2705 } 2706 } 2707 if (mode >= MPOL_MAX) 2708 goto out; 2709 2710 switch (mode) { 2711 case MPOL_PREFERRED: 2712 /* 2713 * Insist on a nodelist of one node only 2714 */ 2715 if (nodelist) { 2716 char *rest = nodelist; 2717 while (isdigit(*rest)) 2718 rest++; 2719 if (*rest) 2720 goto out; 2721 } 2722 break; 2723 case MPOL_INTERLEAVE: 2724 /* 2725 * Default to online nodes with memory if no nodelist 2726 */ 2727 if (!nodelist) 2728 nodes = node_states[N_MEMORY]; 2729 break; 2730 case MPOL_LOCAL: 2731 /* 2732 * Don't allow a nodelist; mpol_new() checks flags 2733 */ 2734 if (nodelist) 2735 goto out; 2736 mode = MPOL_PREFERRED; 2737 break; 2738 case MPOL_DEFAULT: 2739 /* 2740 * Insist on a empty nodelist 2741 */ 2742 if (!nodelist) 2743 err = 0; 2744 goto out; 2745 case MPOL_BIND: 2746 /* 2747 * Insist on a nodelist 2748 */ 2749 if (!nodelist) 2750 goto out; 2751 } 2752 2753 mode_flags = 0; 2754 if (flags) { 2755 /* 2756 * Currently, we only support two mutually exclusive 2757 * mode flags. 2758 */ 2759 if (!strcmp(flags, "static")) 2760 mode_flags |= MPOL_F_STATIC_NODES; 2761 else if (!strcmp(flags, "relative")) 2762 mode_flags |= MPOL_F_RELATIVE_NODES; 2763 else 2764 goto out; 2765 } 2766 2767 new = mpol_new(mode, mode_flags, &nodes); 2768 if (IS_ERR(new)) 2769 goto out; 2770 2771 /* 2772 * Save nodes for mpol_to_str() to show the tmpfs mount options 2773 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 2774 */ 2775 if (mode != MPOL_PREFERRED) 2776 new->v.nodes = nodes; 2777 else if (nodelist) 2778 new->v.preferred_node = first_node(nodes); 2779 else 2780 new->flags |= MPOL_F_LOCAL; 2781 2782 /* 2783 * Save nodes for contextualization: this will be used to "clone" 2784 * the mempolicy in a specific context [cpuset] at a later time. 2785 */ 2786 new->w.user_nodemask = nodes; 2787 2788 err = 0; 2789 2790 out: 2791 /* Restore string for error message */ 2792 if (nodelist) 2793 *--nodelist = ':'; 2794 if (flags) 2795 *--flags = '='; 2796 if (!err) 2797 *mpol = new; 2798 return err; 2799 } 2800 #endif /* CONFIG_TMPFS */ 2801 2802 /** 2803 * mpol_to_str - format a mempolicy structure for printing 2804 * @buffer: to contain formatted mempolicy string 2805 * @maxlen: length of @buffer 2806 * @pol: pointer to mempolicy to be formatted 2807 * 2808 * Convert @pol into a string. If @buffer is too short, truncate the string. 2809 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 2810 * longest flag, "relative", and to display at least a few node ids. 2811 */ 2812 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 2813 { 2814 char *p = buffer; 2815 nodemask_t nodes = NODE_MASK_NONE; 2816 unsigned short mode = MPOL_DEFAULT; 2817 unsigned short flags = 0; 2818 2819 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 2820 mode = pol->mode; 2821 flags = pol->flags; 2822 } 2823 2824 switch (mode) { 2825 case MPOL_DEFAULT: 2826 break; 2827 case MPOL_PREFERRED: 2828 if (flags & MPOL_F_LOCAL) 2829 mode = MPOL_LOCAL; 2830 else 2831 node_set(pol->v.preferred_node, nodes); 2832 break; 2833 case MPOL_BIND: 2834 case MPOL_INTERLEAVE: 2835 nodes = pol->v.nodes; 2836 break; 2837 default: 2838 WARN_ON_ONCE(1); 2839 snprintf(p, maxlen, "unknown"); 2840 return; 2841 } 2842 2843 p += snprintf(p, maxlen, "%s", policy_modes[mode]); 2844 2845 if (flags & MPOL_MODE_FLAGS) { 2846 p += snprintf(p, buffer + maxlen - p, "="); 2847 2848 /* 2849 * Currently, the only defined flags are mutually exclusive 2850 */ 2851 if (flags & MPOL_F_STATIC_NODES) 2852 p += snprintf(p, buffer + maxlen - p, "static"); 2853 else if (flags & MPOL_F_RELATIVE_NODES) 2854 p += snprintf(p, buffer + maxlen - p, "relative"); 2855 } 2856 2857 if (!nodes_empty(nodes)) 2858 p += scnprintf(p, buffer + maxlen - p, ":%*pbl", 2859 nodemask_pr_args(&nodes)); 2860 } 2861