146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 68bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 2900ef2d2fSDavid Rientjes * As a special case NUMA_NO_NODE here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 341da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 351da177e4SLinus Torvalds * use the process policy. This is what Linux always did 361da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 371da177e4SLinus Torvalds * 381da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 391da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 401da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 411da177e4SLinus Torvalds * allocations for a VMA in the VM. 421da177e4SLinus Torvalds * 431da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 441da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 451da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 461da177e4SLinus Torvalds * 471da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 481da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 491da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 501da177e4SLinus Torvalds * Same with GFP_DMA allocations. 511da177e4SLinus Torvalds * 521da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 531da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 541da177e4SLinus Torvalds */ 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds /* Notebook: 571da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 581da177e4SLinus Torvalds object 591da177e4SLinus Torvalds statistics for bigpages 601da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 611da177e4SLinus Torvalds first item above. 621da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 631da177e4SLinus Torvalds grows down? 641da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 651da177e4SLinus Torvalds kernel is not always grateful with that. 661da177e4SLinus Torvalds */ 671da177e4SLinus Torvalds 68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 69b1de0d13SMitchel Humpherys 701da177e4SLinus Torvalds #include <linux/mempolicy.h> 71a520110eSChristoph Hellwig #include <linux/pagewalk.h> 721da177e4SLinus Torvalds #include <linux/highmem.h> 731da177e4SLinus Torvalds #include <linux/hugetlb.h> 741da177e4SLinus Torvalds #include <linux/kernel.h> 751da177e4SLinus Torvalds #include <linux/sched.h> 766e84f315SIngo Molnar #include <linux/sched/mm.h> 776a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h> 78f719ff9bSIngo Molnar #include <linux/sched/task.h> 791da177e4SLinus Torvalds #include <linux/nodemask.h> 801da177e4SLinus Torvalds #include <linux/cpuset.h> 811da177e4SLinus Torvalds #include <linux/slab.h> 821da177e4SLinus Torvalds #include <linux/string.h> 83b95f1b31SPaul Gortmaker #include <linux/export.h> 84b488893aSPavel Emelyanov #include <linux/nsproxy.h> 851da177e4SLinus Torvalds #include <linux/interrupt.h> 861da177e4SLinus Torvalds #include <linux/init.h> 871da177e4SLinus Torvalds #include <linux/compat.h> 8831367466SOtto Ebeling #include <linux/ptrace.h> 89dc9aa5b9SChristoph Lameter #include <linux/swap.h> 901a75a6c8SChristoph Lameter #include <linux/seq_file.h> 911a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 92b20a3503SChristoph Lameter #include <linux/migrate.h> 9362b61f61SHugh Dickins #include <linux/ksm.h> 9495a402c3SChristoph Lameter #include <linux/rmap.h> 9586c3a764SDavid Quigley #include <linux/security.h> 96dbcb0f19SAdrian Bunk #include <linux/syscalls.h> 97095f1fc4SLee Schermerhorn #include <linux/ctype.h> 986d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 99b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h> 100b1de0d13SMitchel Humpherys #include <linux/printk.h> 101c8633798SNaoya Horiguchi #include <linux/swapops.h> 102dc9aa5b9SChristoph Lameter 1031da177e4SLinus Torvalds #include <asm/tlbflush.h> 1047c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 1051da177e4SLinus Torvalds 10662695a84SNick Piggin #include "internal.h" 10762695a84SNick Piggin 10838e35860SChristoph Lameter /* Internal flags */ 109dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 11038e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 111dc9aa5b9SChristoph Lameter 112fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 113fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1141da177e4SLinus Torvalds 1151da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1161da177e4SLinus Torvalds policied. */ 1176267276fSChristoph Lameter enum zone_type policy_zone = 0; 1181da177e4SLinus Torvalds 119bea904d5SLee Schermerhorn /* 120bea904d5SLee Schermerhorn * run-time system-wide default policy => local allocation 121bea904d5SLee Schermerhorn */ 122e754d79dSH Hartley Sweeten static struct mempolicy default_policy = { 1231da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 124*7858d7bcSFeng Tang .mode = MPOL_LOCAL, 1251da177e4SLinus Torvalds }; 1261da177e4SLinus Torvalds 1275606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 1285606e387SMel Gorman 129b2ca916cSDan Williams /** 130b2ca916cSDan Williams * numa_map_to_online_node - Find closest online node 131f6e92f40SKrzysztof Kozlowski * @node: Node id to start the search 132b2ca916cSDan Williams * 133b2ca916cSDan Williams * Lookup the next closest node by distance if @nid is not online. 134b2ca916cSDan Williams */ 135b2ca916cSDan Williams int numa_map_to_online_node(int node) 136b2ca916cSDan Williams { 1374fcbe96eSDan Williams int min_dist = INT_MAX, dist, n, min_node; 138b2ca916cSDan Williams 1394fcbe96eSDan Williams if (node == NUMA_NO_NODE || node_online(node)) 1404fcbe96eSDan Williams return node; 141b2ca916cSDan Williams 142b2ca916cSDan Williams min_node = node; 143b2ca916cSDan Williams for_each_online_node(n) { 144b2ca916cSDan Williams dist = node_distance(node, n); 145b2ca916cSDan Williams if (dist < min_dist) { 146b2ca916cSDan Williams min_dist = dist; 147b2ca916cSDan Williams min_node = n; 148b2ca916cSDan Williams } 149b2ca916cSDan Williams } 150b2ca916cSDan Williams 151b2ca916cSDan Williams return min_node; 152b2ca916cSDan Williams } 153b2ca916cSDan Williams EXPORT_SYMBOL_GPL(numa_map_to_online_node); 154b2ca916cSDan Williams 15574d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p) 1565606e387SMel Gorman { 1575606e387SMel Gorman struct mempolicy *pol = p->mempolicy; 158f15ca78eSOleg Nesterov int node; 1595606e387SMel Gorman 160f15ca78eSOleg Nesterov if (pol) 161f15ca78eSOleg Nesterov return pol; 1625606e387SMel Gorman 163f15ca78eSOleg Nesterov node = numa_node_id(); 1641da6f0e1SJianguo Wu if (node != NUMA_NO_NODE) { 1651da6f0e1SJianguo Wu pol = &preferred_node_policy[node]; 166f15ca78eSOleg Nesterov /* preferred_node_policy is not initialised early in boot */ 167f15ca78eSOleg Nesterov if (pol->mode) 168f15ca78eSOleg Nesterov return pol; 1691da6f0e1SJianguo Wu } 1705606e387SMel Gorman 171f15ca78eSOleg Nesterov return &default_policy; 1725606e387SMel Gorman } 1735606e387SMel Gorman 17437012946SDavid Rientjes static const struct mempolicy_operations { 17537012946SDavid Rientjes int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 176213980c0SVlastimil Babka void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 17737012946SDavid Rientjes } mpol_ops[MPOL_MAX]; 17837012946SDavid Rientjes 179f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 180f5b087b5SDavid Rientjes { 1816d556294SBob Liu return pol->flags & MPOL_MODE_FLAGS; 1824c50bc01SDavid Rientjes } 1834c50bc01SDavid Rientjes 1844c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 1854c50bc01SDavid Rientjes const nodemask_t *rel) 1864c50bc01SDavid Rientjes { 1874c50bc01SDavid Rientjes nodemask_t tmp; 1884c50bc01SDavid Rientjes nodes_fold(tmp, *orig, nodes_weight(*rel)); 1894c50bc01SDavid Rientjes nodes_onto(*ret, tmp, *rel); 190f5b087b5SDavid Rientjes } 191f5b087b5SDavid Rientjes 19237012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) 19337012946SDavid Rientjes { 19437012946SDavid Rientjes if (nodes_empty(*nodes)) 19537012946SDavid Rientjes return -EINVAL; 19637012946SDavid Rientjes pol->v.nodes = *nodes; 19737012946SDavid Rientjes return 0; 19837012946SDavid Rientjes } 19937012946SDavid Rientjes 20037012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 20137012946SDavid Rientjes { 202*7858d7bcSFeng Tang if (nodes_empty(*nodes)) 203*7858d7bcSFeng Tang return -EINVAL; 20437012946SDavid Rientjes pol->v.preferred_node = first_node(*nodes); 20537012946SDavid Rientjes return 0; 20637012946SDavid Rientjes } 20737012946SDavid Rientjes 20837012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) 20937012946SDavid Rientjes { 210859f7ef1SZhihui Zhang if (nodes_empty(*nodes)) 21137012946SDavid Rientjes return -EINVAL; 21237012946SDavid Rientjes pol->v.nodes = *nodes; 21337012946SDavid Rientjes return 0; 21437012946SDavid Rientjes } 21537012946SDavid Rientjes 21658568d2aSMiao Xie /* 21758568d2aSMiao Xie * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 21858568d2aSMiao Xie * any, for the new policy. mpol_new() has already validated the nodes 219*7858d7bcSFeng Tang * parameter with respect to the policy mode and flags. 22058568d2aSMiao Xie * 22158568d2aSMiao Xie * Must be called holding task's alloc_lock to protect task's mems_allowed 222c1e8d7c6SMichel Lespinasse * and mempolicy. May also be called holding the mmap_lock for write. 22358568d2aSMiao Xie */ 2244bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol, 2254bfc4495SKAMEZAWA Hiroyuki const nodemask_t *nodes, struct nodemask_scratch *nsc) 22658568d2aSMiao Xie { 22758568d2aSMiao Xie int ret; 22858568d2aSMiao Xie 229*7858d7bcSFeng Tang /* 230*7858d7bcSFeng Tang * Default (pol==NULL) resp. local memory policies are not a 231*7858d7bcSFeng Tang * subject of any remapping. They also do not need any special 232*7858d7bcSFeng Tang * constructor. 233*7858d7bcSFeng Tang */ 234*7858d7bcSFeng Tang if (!pol || pol->mode == MPOL_LOCAL) 23558568d2aSMiao Xie return 0; 236*7858d7bcSFeng Tang 23701f13bd6SLai Jiangshan /* Check N_MEMORY */ 2384bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask1, 23901f13bd6SLai Jiangshan cpuset_current_mems_allowed, node_states[N_MEMORY]); 24058568d2aSMiao Xie 24158568d2aSMiao Xie VM_BUG_ON(!nodes); 242*7858d7bcSFeng Tang 24358568d2aSMiao Xie if (pol->flags & MPOL_F_RELATIVE_NODES) 2444bfc4495SKAMEZAWA Hiroyuki mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); 24558568d2aSMiao Xie else 2464bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask2, *nodes, nsc->mask1); 2474bfc4495SKAMEZAWA Hiroyuki 24858568d2aSMiao Xie if (mpol_store_user_nodemask(pol)) 24958568d2aSMiao Xie pol->w.user_nodemask = *nodes; 25058568d2aSMiao Xie else 251*7858d7bcSFeng Tang pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed; 25258568d2aSMiao Xie 2534bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 25458568d2aSMiao Xie return ret; 25558568d2aSMiao Xie } 25658568d2aSMiao Xie 25758568d2aSMiao Xie /* 25858568d2aSMiao Xie * This function just creates a new policy, does some check and simple 25958568d2aSMiao Xie * initialization. You must invoke mpol_set_nodemask() to set nodes. 26058568d2aSMiao Xie */ 261028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 262028fec41SDavid Rientjes nodemask_t *nodes) 2631da177e4SLinus Torvalds { 2641da177e4SLinus Torvalds struct mempolicy *policy; 2651da177e4SLinus Torvalds 266028fec41SDavid Rientjes pr_debug("setting mode %d flags %d nodes[0] %lx\n", 26700ef2d2fSDavid Rientjes mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 268140d5a49SPaul Mundt 2693e1f0645SDavid Rientjes if (mode == MPOL_DEFAULT) { 2703e1f0645SDavid Rientjes if (nodes && !nodes_empty(*nodes)) 27137012946SDavid Rientjes return ERR_PTR(-EINVAL); 272d3a71033SLee Schermerhorn return NULL; 27337012946SDavid Rientjes } 2743e1f0645SDavid Rientjes VM_BUG_ON(!nodes); 2753e1f0645SDavid Rientjes 2763e1f0645SDavid Rientjes /* 2773e1f0645SDavid Rientjes * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 2783e1f0645SDavid Rientjes * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 2793e1f0645SDavid Rientjes * All other modes require a valid pointer to a non-empty nodemask. 2803e1f0645SDavid Rientjes */ 2813e1f0645SDavid Rientjes if (mode == MPOL_PREFERRED) { 2823e1f0645SDavid Rientjes if (nodes_empty(*nodes)) { 2833e1f0645SDavid Rientjes if (((flags & MPOL_F_STATIC_NODES) || 2843e1f0645SDavid Rientjes (flags & MPOL_F_RELATIVE_NODES))) 2853e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 286*7858d7bcSFeng Tang 287*7858d7bcSFeng Tang mode = MPOL_LOCAL; 2883e1f0645SDavid Rientjes } 289479e2802SPeter Zijlstra } else if (mode == MPOL_LOCAL) { 2908d303e44SPiotr Kwapulinski if (!nodes_empty(*nodes) || 2918d303e44SPiotr Kwapulinski (flags & MPOL_F_STATIC_NODES) || 2928d303e44SPiotr Kwapulinski (flags & MPOL_F_RELATIVE_NODES)) 293479e2802SPeter Zijlstra return ERR_PTR(-EINVAL); 2943e1f0645SDavid Rientjes } else if (nodes_empty(*nodes)) 2953e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2961da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2971da177e4SLinus Torvalds if (!policy) 2981da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2991da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 30045c4745aSLee Schermerhorn policy->mode = mode; 30137012946SDavid Rientjes policy->flags = flags; 3023e1f0645SDavid Rientjes 30337012946SDavid Rientjes return policy; 30437012946SDavid Rientjes } 30537012946SDavid Rientjes 30652cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */ 30752cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p) 30852cd3b07SLee Schermerhorn { 30952cd3b07SLee Schermerhorn if (!atomic_dec_and_test(&p->refcnt)) 31052cd3b07SLee Schermerhorn return; 31152cd3b07SLee Schermerhorn kmem_cache_free(policy_cache, p); 31252cd3b07SLee Schermerhorn } 31352cd3b07SLee Schermerhorn 314213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) 31537012946SDavid Rientjes { 31637012946SDavid Rientjes } 31737012946SDavid Rientjes 318213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 3191d0d2680SDavid Rientjes { 3201d0d2680SDavid Rientjes nodemask_t tmp; 3211d0d2680SDavid Rientjes 32237012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) 32337012946SDavid Rientjes nodes_and(tmp, pol->w.user_nodemask, *nodes); 32437012946SDavid Rientjes else if (pol->flags & MPOL_F_RELATIVE_NODES) 32537012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3261d0d2680SDavid Rientjes else { 327213980c0SVlastimil Babka nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed, 328213980c0SVlastimil Babka *nodes); 32929b190faSzhong jiang pol->w.cpuset_mems_allowed = *nodes; 3301d0d2680SDavid Rientjes } 33137012946SDavid Rientjes 332708c1bbcSMiao Xie if (nodes_empty(tmp)) 333708c1bbcSMiao Xie tmp = *nodes; 334708c1bbcSMiao Xie 3351d0d2680SDavid Rientjes pol->v.nodes = tmp; 33637012946SDavid Rientjes } 33737012946SDavid Rientjes 33837012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol, 339213980c0SVlastimil Babka const nodemask_t *nodes) 34037012946SDavid Rientjes { 34137012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 3421d0d2680SDavid Rientjes } 34337012946SDavid Rientjes 344708c1bbcSMiao Xie /* 345708c1bbcSMiao Xie * mpol_rebind_policy - Migrate a policy to a different set of nodes 346708c1bbcSMiao Xie * 347c1e8d7c6SMichel Lespinasse * Per-vma policies are protected by mmap_lock. Allocations using per-task 348213980c0SVlastimil Babka * policies are protected by task->mems_allowed_seq to prevent a premature 349213980c0SVlastimil Babka * OOM/allocation failure due to parallel nodemask modification. 350708c1bbcSMiao Xie */ 351213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) 35237012946SDavid Rientjes { 35337012946SDavid Rientjes if (!pol) 35437012946SDavid Rientjes return; 355*7858d7bcSFeng Tang if (!mpol_store_user_nodemask(pol) && 35637012946SDavid Rientjes nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 35737012946SDavid Rientjes return; 358708c1bbcSMiao Xie 359213980c0SVlastimil Babka mpol_ops[pol->mode].rebind(pol, newmask); 3601d0d2680SDavid Rientjes } 3611d0d2680SDavid Rientjes 3621d0d2680SDavid Rientjes /* 3631d0d2680SDavid Rientjes * Wrapper for mpol_rebind_policy() that just requires task 3641d0d2680SDavid Rientjes * pointer, and updates task mempolicy. 36558568d2aSMiao Xie * 36658568d2aSMiao Xie * Called with task's alloc_lock held. 3671d0d2680SDavid Rientjes */ 3681d0d2680SDavid Rientjes 369213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 3701d0d2680SDavid Rientjes { 371213980c0SVlastimil Babka mpol_rebind_policy(tsk->mempolicy, new); 3721d0d2680SDavid Rientjes } 3731d0d2680SDavid Rientjes 3741d0d2680SDavid Rientjes /* 3751d0d2680SDavid Rientjes * Rebind each vma in mm to new nodemask. 3761d0d2680SDavid Rientjes * 377c1e8d7c6SMichel Lespinasse * Call holding a reference to mm. Takes mm->mmap_lock during call. 3781d0d2680SDavid Rientjes */ 3791d0d2680SDavid Rientjes 3801d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 3811d0d2680SDavid Rientjes { 3821d0d2680SDavid Rientjes struct vm_area_struct *vma; 3831d0d2680SDavid Rientjes 384d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 3851d0d2680SDavid Rientjes for (vma = mm->mmap; vma; vma = vma->vm_next) 386213980c0SVlastimil Babka mpol_rebind_policy(vma->vm_policy, new); 387d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 3881d0d2680SDavid Rientjes } 3891d0d2680SDavid Rientjes 39037012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 39137012946SDavid Rientjes [MPOL_DEFAULT] = { 39237012946SDavid Rientjes .rebind = mpol_rebind_default, 39337012946SDavid Rientjes }, 39437012946SDavid Rientjes [MPOL_INTERLEAVE] = { 39537012946SDavid Rientjes .create = mpol_new_interleave, 39637012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 39737012946SDavid Rientjes }, 39837012946SDavid Rientjes [MPOL_PREFERRED] = { 39937012946SDavid Rientjes .create = mpol_new_preferred, 40037012946SDavid Rientjes .rebind = mpol_rebind_preferred, 40137012946SDavid Rientjes }, 40237012946SDavid Rientjes [MPOL_BIND] = { 40337012946SDavid Rientjes .create = mpol_new_bind, 40437012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 40537012946SDavid Rientjes }, 406*7858d7bcSFeng Tang [MPOL_LOCAL] = { 407*7858d7bcSFeng Tang .rebind = mpol_rebind_default, 408*7858d7bcSFeng Tang }, 40937012946SDavid Rientjes }; 41037012946SDavid Rientjes 411a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 412fc301289SChristoph Lameter unsigned long flags); 4131a75a6c8SChristoph Lameter 4146f4576e3SNaoya Horiguchi struct queue_pages { 4156f4576e3SNaoya Horiguchi struct list_head *pagelist; 4166f4576e3SNaoya Horiguchi unsigned long flags; 4176f4576e3SNaoya Horiguchi nodemask_t *nmask; 418f18da660SLi Xinhai unsigned long start; 419f18da660SLi Xinhai unsigned long end; 420f18da660SLi Xinhai struct vm_area_struct *first; 4216f4576e3SNaoya Horiguchi }; 4226f4576e3SNaoya Horiguchi 42398094945SNaoya Horiguchi /* 42488aaa2a1SNaoya Horiguchi * Check if the page's nid is in qp->nmask. 42588aaa2a1SNaoya Horiguchi * 42688aaa2a1SNaoya Horiguchi * If MPOL_MF_INVERT is set in qp->flags, check if the nid is 42788aaa2a1SNaoya Horiguchi * in the invert of qp->nmask. 42888aaa2a1SNaoya Horiguchi */ 42988aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page, 43088aaa2a1SNaoya Horiguchi struct queue_pages *qp) 43188aaa2a1SNaoya Horiguchi { 43288aaa2a1SNaoya Horiguchi int nid = page_to_nid(page); 43388aaa2a1SNaoya Horiguchi unsigned long flags = qp->flags; 43488aaa2a1SNaoya Horiguchi 43588aaa2a1SNaoya Horiguchi return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); 43688aaa2a1SNaoya Horiguchi } 43788aaa2a1SNaoya Horiguchi 438a7f40cfeSYang Shi /* 439d8835445SYang Shi * queue_pages_pmd() has four possible return values: 440d8835445SYang Shi * 0 - pages are placed on the right node or queued successfully. 441d8835445SYang Shi * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 442d8835445SYang Shi * specified. 443d8835445SYang Shi * 2 - THP was split. 444d8835445SYang Shi * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an 445d8835445SYang Shi * existing page was already on a node that does not follow the 446d8835445SYang Shi * policy. 447a7f40cfeSYang Shi */ 448c8633798SNaoya Horiguchi static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 449c8633798SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 450959a7e13SJules Irenge __releases(ptl) 451c8633798SNaoya Horiguchi { 452c8633798SNaoya Horiguchi int ret = 0; 453c8633798SNaoya Horiguchi struct page *page; 454c8633798SNaoya Horiguchi struct queue_pages *qp = walk->private; 455c8633798SNaoya Horiguchi unsigned long flags; 456c8633798SNaoya Horiguchi 457c8633798SNaoya Horiguchi if (unlikely(is_pmd_migration_entry(*pmd))) { 458a7f40cfeSYang Shi ret = -EIO; 459c8633798SNaoya Horiguchi goto unlock; 460c8633798SNaoya Horiguchi } 461c8633798SNaoya Horiguchi page = pmd_page(*pmd); 462c8633798SNaoya Horiguchi if (is_huge_zero_page(page)) { 463c8633798SNaoya Horiguchi spin_unlock(ptl); 464c8633798SNaoya Horiguchi __split_huge_pmd(walk->vma, pmd, addr, false, NULL); 465d8835445SYang Shi ret = 2; 466c8633798SNaoya Horiguchi goto out; 467c8633798SNaoya Horiguchi } 468d8835445SYang Shi if (!queue_pages_required(page, qp)) 469c8633798SNaoya Horiguchi goto unlock; 470c8633798SNaoya Horiguchi 471c8633798SNaoya Horiguchi flags = qp->flags; 472c8633798SNaoya Horiguchi /* go to thp migration */ 473a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 474a53190a4SYang Shi if (!vma_migratable(walk->vma) || 475a53190a4SYang Shi migrate_page_add(page, qp->pagelist, flags)) { 476d8835445SYang Shi ret = 1; 477a7f40cfeSYang Shi goto unlock; 478a7f40cfeSYang Shi } 479a7f40cfeSYang Shi } else 480a7f40cfeSYang Shi ret = -EIO; 481c8633798SNaoya Horiguchi unlock: 482c8633798SNaoya Horiguchi spin_unlock(ptl); 483c8633798SNaoya Horiguchi out: 484c8633798SNaoya Horiguchi return ret; 485c8633798SNaoya Horiguchi } 486c8633798SNaoya Horiguchi 48788aaa2a1SNaoya Horiguchi /* 48898094945SNaoya Horiguchi * Scan through pages checking if pages follow certain conditions, 48998094945SNaoya Horiguchi * and move them to the pagelist if they do. 490d8835445SYang Shi * 491d8835445SYang Shi * queue_pages_pte_range() has three possible return values: 492d8835445SYang Shi * 0 - pages are placed on the right node or queued successfully. 493d8835445SYang Shi * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 494d8835445SYang Shi * specified. 495d8835445SYang Shi * -EIO - only MPOL_MF_STRICT was specified and an existing page was already 496d8835445SYang Shi * on a node that does not follow the policy. 49798094945SNaoya Horiguchi */ 4986f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, 4996f4576e3SNaoya Horiguchi unsigned long end, struct mm_walk *walk) 5001da177e4SLinus Torvalds { 5016f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 5026f4576e3SNaoya Horiguchi struct page *page; 5036f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 5046f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 505c8633798SNaoya Horiguchi int ret; 506d8835445SYang Shi bool has_unmovable = false; 5073f088420SShijie Luo pte_t *pte, *mapped_pte; 508705e87c0SHugh Dickins spinlock_t *ptl; 509941150a3SHugh Dickins 510c8633798SNaoya Horiguchi ptl = pmd_trans_huge_lock(pmd, vma); 511c8633798SNaoya Horiguchi if (ptl) { 512c8633798SNaoya Horiguchi ret = queue_pages_pmd(pmd, ptl, addr, end, walk); 513d8835445SYang Shi if (ret != 2) 514a7f40cfeSYang Shi return ret; 515248db92dSKirill A. Shutemov } 516d8835445SYang Shi /* THP was split, fall through to pte walk */ 51791612e0dSHugh Dickins 518337d9abfSNaoya Horiguchi if (pmd_trans_unstable(pmd)) 519337d9abfSNaoya Horiguchi return 0; 52094723aafSMichal Hocko 5213f088420SShijie Luo mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 5226f4576e3SNaoya Horiguchi for (; addr != end; pte++, addr += PAGE_SIZE) { 52391612e0dSHugh Dickins if (!pte_present(*pte)) 52491612e0dSHugh Dickins continue; 5256aab341eSLinus Torvalds page = vm_normal_page(vma, addr, *pte); 5266aab341eSLinus Torvalds if (!page) 52791612e0dSHugh Dickins continue; 528053837fcSNick Piggin /* 52962b61f61SHugh Dickins * vm_normal_page() filters out zero pages, but there might 53062b61f61SHugh Dickins * still be PageReserved pages to skip, perhaps in a VDSO. 531053837fcSNick Piggin */ 532b79bc0a0SHugh Dickins if (PageReserved(page)) 533f4598c8bSChristoph Lameter continue; 53488aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 53538e35860SChristoph Lameter continue; 536a7f40cfeSYang Shi if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 537d8835445SYang Shi /* MPOL_MF_STRICT must be specified if we get here */ 538d8835445SYang Shi if (!vma_migratable(vma)) { 539d8835445SYang Shi has_unmovable = true; 540a7f40cfeSYang Shi break; 541d8835445SYang Shi } 542a53190a4SYang Shi 543a53190a4SYang Shi /* 544a53190a4SYang Shi * Do not abort immediately since there may be 545a53190a4SYang Shi * temporary off LRU pages in the range. Still 546a53190a4SYang Shi * need migrate other LRU pages. 547a53190a4SYang Shi */ 548a53190a4SYang Shi if (migrate_page_add(page, qp->pagelist, flags)) 549a53190a4SYang Shi has_unmovable = true; 550a7f40cfeSYang Shi } else 551a7f40cfeSYang Shi break; 5526f4576e3SNaoya Horiguchi } 5533f088420SShijie Luo pte_unmap_unlock(mapped_pte, ptl); 5546f4576e3SNaoya Horiguchi cond_resched(); 555d8835445SYang Shi 556d8835445SYang Shi if (has_unmovable) 557d8835445SYang Shi return 1; 558d8835445SYang Shi 559a7f40cfeSYang Shi return addr != end ? -EIO : 0; 56091612e0dSHugh Dickins } 56191612e0dSHugh Dickins 5626f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, 5636f4576e3SNaoya Horiguchi unsigned long addr, unsigned long end, 5646f4576e3SNaoya Horiguchi struct mm_walk *walk) 565e2d8cf40SNaoya Horiguchi { 566dcf17635SLi Xinhai int ret = 0; 567e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 5686f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 569dcf17635SLi Xinhai unsigned long flags = (qp->flags & MPOL_MF_VALID); 570e2d8cf40SNaoya Horiguchi struct page *page; 571cb900f41SKirill A. Shutemov spinlock_t *ptl; 572d4c54919SNaoya Horiguchi pte_t entry; 573e2d8cf40SNaoya Horiguchi 5746f4576e3SNaoya Horiguchi ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 5756f4576e3SNaoya Horiguchi entry = huge_ptep_get(pte); 576d4c54919SNaoya Horiguchi if (!pte_present(entry)) 577d4c54919SNaoya Horiguchi goto unlock; 578d4c54919SNaoya Horiguchi page = pte_page(entry); 57988aaa2a1SNaoya Horiguchi if (!queue_pages_required(page, qp)) 580e2d8cf40SNaoya Horiguchi goto unlock; 581dcf17635SLi Xinhai 582dcf17635SLi Xinhai if (flags == MPOL_MF_STRICT) { 583dcf17635SLi Xinhai /* 584dcf17635SLi Xinhai * STRICT alone means only detecting misplaced page and no 585dcf17635SLi Xinhai * need to further check other vma. 586dcf17635SLi Xinhai */ 587dcf17635SLi Xinhai ret = -EIO; 588dcf17635SLi Xinhai goto unlock; 589dcf17635SLi Xinhai } 590dcf17635SLi Xinhai 591dcf17635SLi Xinhai if (!vma_migratable(walk->vma)) { 592dcf17635SLi Xinhai /* 593dcf17635SLi Xinhai * Must be STRICT with MOVE*, otherwise .test_walk() have 594dcf17635SLi Xinhai * stopped walking current vma. 595dcf17635SLi Xinhai * Detecting misplaced page but allow migrating pages which 596dcf17635SLi Xinhai * have been queued. 597dcf17635SLi Xinhai */ 598dcf17635SLi Xinhai ret = 1; 599dcf17635SLi Xinhai goto unlock; 600dcf17635SLi Xinhai } 601dcf17635SLi Xinhai 602e2d8cf40SNaoya Horiguchi /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ 603e2d8cf40SNaoya Horiguchi if (flags & (MPOL_MF_MOVE_ALL) || 604dcf17635SLi Xinhai (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) { 605dcf17635SLi Xinhai if (!isolate_huge_page(page, qp->pagelist) && 606dcf17635SLi Xinhai (flags & MPOL_MF_STRICT)) 607dcf17635SLi Xinhai /* 608dcf17635SLi Xinhai * Failed to isolate page but allow migrating pages 609dcf17635SLi Xinhai * which have been queued. 610dcf17635SLi Xinhai */ 611dcf17635SLi Xinhai ret = 1; 612dcf17635SLi Xinhai } 613e2d8cf40SNaoya Horiguchi unlock: 614cb900f41SKirill A. Shutemov spin_unlock(ptl); 615e2d8cf40SNaoya Horiguchi #else 616e2d8cf40SNaoya Horiguchi BUG(); 617e2d8cf40SNaoya Horiguchi #endif 618dcf17635SLi Xinhai return ret; 6191da177e4SLinus Torvalds } 6201da177e4SLinus Torvalds 6215877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING 622b24f53a0SLee Schermerhorn /* 6234b10e7d5SMel Gorman * This is used to mark a range of virtual addresses to be inaccessible. 6244b10e7d5SMel Gorman * These are later cleared by a NUMA hinting fault. Depending on these 6254b10e7d5SMel Gorman * faults, pages may be migrated for better NUMA placement. 6264b10e7d5SMel Gorman * 6274b10e7d5SMel Gorman * This is assuming that NUMA faults are handled using PROT_NONE. If 6284b10e7d5SMel Gorman * an architecture makes a different choice, it will need further 6294b10e7d5SMel Gorman * changes to the core. 630b24f53a0SLee Schermerhorn */ 6314b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma, 6324b10e7d5SMel Gorman unsigned long addr, unsigned long end) 633b24f53a0SLee Schermerhorn { 6344b10e7d5SMel Gorman int nr_updated; 635b24f53a0SLee Schermerhorn 63658705444SPeter Xu nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA); 63703c5a6e1SMel Gorman if (nr_updated) 63803c5a6e1SMel Gorman count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 639b24f53a0SLee Schermerhorn 6404b10e7d5SMel Gorman return nr_updated; 641b24f53a0SLee Schermerhorn } 642b24f53a0SLee Schermerhorn #else 643b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma, 644b24f53a0SLee Schermerhorn unsigned long addr, unsigned long end) 645b24f53a0SLee Schermerhorn { 646b24f53a0SLee Schermerhorn return 0; 647b24f53a0SLee Schermerhorn } 6485877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */ 649b24f53a0SLee Schermerhorn 6506f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end, 6516f4576e3SNaoya Horiguchi struct mm_walk *walk) 6521da177e4SLinus Torvalds { 6536f4576e3SNaoya Horiguchi struct vm_area_struct *vma = walk->vma; 6546f4576e3SNaoya Horiguchi struct queue_pages *qp = walk->private; 6555b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 6566f4576e3SNaoya Horiguchi unsigned long flags = qp->flags; 657dc9aa5b9SChristoph Lameter 658a18b3ac2SLi Xinhai /* range check first */ 659ce33135cSMiaohe Lin VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma); 660f18da660SLi Xinhai 661f18da660SLi Xinhai if (!qp->first) { 662f18da660SLi Xinhai qp->first = vma; 663f18da660SLi Xinhai if (!(flags & MPOL_MF_DISCONTIG_OK) && 664f18da660SLi Xinhai (qp->start < vma->vm_start)) 665f18da660SLi Xinhai /* hole at head side of range */ 666a18b3ac2SLi Xinhai return -EFAULT; 667a18b3ac2SLi Xinhai } 668f18da660SLi Xinhai if (!(flags & MPOL_MF_DISCONTIG_OK) && 669f18da660SLi Xinhai ((vma->vm_end < qp->end) && 670f18da660SLi Xinhai (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start))) 671f18da660SLi Xinhai /* hole at middle or tail of range */ 672f18da660SLi Xinhai return -EFAULT; 673a18b3ac2SLi Xinhai 674a7f40cfeSYang Shi /* 675a7f40cfeSYang Shi * Need check MPOL_MF_STRICT to return -EIO if possible 676a7f40cfeSYang Shi * regardless of vma_migratable 677a7f40cfeSYang Shi */ 678a7f40cfeSYang Shi if (!vma_migratable(vma) && 679a7f40cfeSYang Shi !(flags & MPOL_MF_STRICT)) 68048684a65SNaoya Horiguchi return 1; 68148684a65SNaoya Horiguchi 6825b952b3cSAndi Kleen if (endvma > end) 6835b952b3cSAndi Kleen endvma = end; 684b24f53a0SLee Schermerhorn 685b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) { 6862c0346a3SMel Gorman /* Similar to task_numa_work, skip inaccessible VMAs */ 6873122e80eSAnshuman Khandual if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) && 6884355c018SLiang Chen !(vma->vm_flags & VM_MIXEDMAP)) 689b24f53a0SLee Schermerhorn change_prot_numa(vma, start, endvma); 6906f4576e3SNaoya Horiguchi return 1; 691b24f53a0SLee Schermerhorn } 692b24f53a0SLee Schermerhorn 6936f4576e3SNaoya Horiguchi /* queue pages from current vma */ 694a7f40cfeSYang Shi if (flags & MPOL_MF_VALID) 6956f4576e3SNaoya Horiguchi return 0; 6966f4576e3SNaoya Horiguchi return 1; 6976f4576e3SNaoya Horiguchi } 698b24f53a0SLee Schermerhorn 6997b86ac33SChristoph Hellwig static const struct mm_walk_ops queue_pages_walk_ops = { 7007b86ac33SChristoph Hellwig .hugetlb_entry = queue_pages_hugetlb, 7017b86ac33SChristoph Hellwig .pmd_entry = queue_pages_pte_range, 7027b86ac33SChristoph Hellwig .test_walk = queue_pages_test_walk, 7037b86ac33SChristoph Hellwig }; 7047b86ac33SChristoph Hellwig 7056f4576e3SNaoya Horiguchi /* 7066f4576e3SNaoya Horiguchi * Walk through page tables and collect pages to be migrated. 7076f4576e3SNaoya Horiguchi * 7086f4576e3SNaoya Horiguchi * If pages found in a given range are on a set of nodes (determined by 7096f4576e3SNaoya Horiguchi * @nodes and @flags,) it's isolated and queued to the pagelist which is 710d8835445SYang Shi * passed via @private. 711d8835445SYang Shi * 712d8835445SYang Shi * queue_pages_range() has three possible return values: 713d8835445SYang Shi * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were 714d8835445SYang Shi * specified. 715d8835445SYang Shi * 0 - queue pages successfully or no misplaced page. 716a85dfc30SYang Shi * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or 717a85dfc30SYang Shi * memory range specified by nodemask and maxnode points outside 718a85dfc30SYang Shi * your accessible address space (-EFAULT) 7196f4576e3SNaoya Horiguchi */ 7206f4576e3SNaoya Horiguchi static int 7216f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 7226f4576e3SNaoya Horiguchi nodemask_t *nodes, unsigned long flags, 7236f4576e3SNaoya Horiguchi struct list_head *pagelist) 7246f4576e3SNaoya Horiguchi { 725f18da660SLi Xinhai int err; 7266f4576e3SNaoya Horiguchi struct queue_pages qp = { 7276f4576e3SNaoya Horiguchi .pagelist = pagelist, 7286f4576e3SNaoya Horiguchi .flags = flags, 7296f4576e3SNaoya Horiguchi .nmask = nodes, 730f18da660SLi Xinhai .start = start, 731f18da660SLi Xinhai .end = end, 732f18da660SLi Xinhai .first = NULL, 7336f4576e3SNaoya Horiguchi }; 7346f4576e3SNaoya Horiguchi 735f18da660SLi Xinhai err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp); 736f18da660SLi Xinhai 737f18da660SLi Xinhai if (!qp.first) 738f18da660SLi Xinhai /* whole range in hole */ 739f18da660SLi Xinhai err = -EFAULT; 740f18da660SLi Xinhai 741f18da660SLi Xinhai return err; 7421da177e4SLinus Torvalds } 7431da177e4SLinus Torvalds 744869833f2SKOSAKI Motohiro /* 745869833f2SKOSAKI Motohiro * Apply policy to a single VMA 746c1e8d7c6SMichel Lespinasse * This must be called with the mmap_lock held for writing. 747869833f2SKOSAKI Motohiro */ 748869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma, 749869833f2SKOSAKI Motohiro struct mempolicy *pol) 7508d34694cSKOSAKI Motohiro { 751869833f2SKOSAKI Motohiro int err; 752869833f2SKOSAKI Motohiro struct mempolicy *old; 753869833f2SKOSAKI Motohiro struct mempolicy *new; 7548d34694cSKOSAKI Motohiro 7558d34694cSKOSAKI Motohiro pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 7568d34694cSKOSAKI Motohiro vma->vm_start, vma->vm_end, vma->vm_pgoff, 7578d34694cSKOSAKI Motohiro vma->vm_ops, vma->vm_file, 7588d34694cSKOSAKI Motohiro vma->vm_ops ? vma->vm_ops->set_policy : NULL); 7598d34694cSKOSAKI Motohiro 760869833f2SKOSAKI Motohiro new = mpol_dup(pol); 761869833f2SKOSAKI Motohiro if (IS_ERR(new)) 762869833f2SKOSAKI Motohiro return PTR_ERR(new); 763869833f2SKOSAKI Motohiro 764869833f2SKOSAKI Motohiro if (vma->vm_ops && vma->vm_ops->set_policy) { 7658d34694cSKOSAKI Motohiro err = vma->vm_ops->set_policy(vma, new); 766869833f2SKOSAKI Motohiro if (err) 767869833f2SKOSAKI Motohiro goto err_out; 7688d34694cSKOSAKI Motohiro } 769869833f2SKOSAKI Motohiro 770869833f2SKOSAKI Motohiro old = vma->vm_policy; 771c1e8d7c6SMichel Lespinasse vma->vm_policy = new; /* protected by mmap_lock */ 772869833f2SKOSAKI Motohiro mpol_put(old); 773869833f2SKOSAKI Motohiro 774869833f2SKOSAKI Motohiro return 0; 775869833f2SKOSAKI Motohiro err_out: 776869833f2SKOSAKI Motohiro mpol_put(new); 7778d34694cSKOSAKI Motohiro return err; 7788d34694cSKOSAKI Motohiro } 7798d34694cSKOSAKI Motohiro 7801da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */ 7819d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start, 7829d8cebd4SKOSAKI Motohiro unsigned long end, struct mempolicy *new_pol) 7831da177e4SLinus Torvalds { 7841da177e4SLinus Torvalds struct vm_area_struct *next; 7859d8cebd4SKOSAKI Motohiro struct vm_area_struct *prev; 7869d8cebd4SKOSAKI Motohiro struct vm_area_struct *vma; 7879d8cebd4SKOSAKI Motohiro int err = 0; 788e26a5114SKOSAKI Motohiro pgoff_t pgoff; 7899d8cebd4SKOSAKI Motohiro unsigned long vmstart; 7909d8cebd4SKOSAKI Motohiro unsigned long vmend; 7911da177e4SLinus Torvalds 792097d5910SLinus Torvalds vma = find_vma(mm, start); 793f18da660SLi Xinhai VM_BUG_ON(!vma); 7949d8cebd4SKOSAKI Motohiro 795097d5910SLinus Torvalds prev = vma->vm_prev; 796e26a5114SKOSAKI Motohiro if (start > vma->vm_start) 797e26a5114SKOSAKI Motohiro prev = vma; 798e26a5114SKOSAKI Motohiro 7999d8cebd4SKOSAKI Motohiro for (; vma && vma->vm_start < end; prev = vma, vma = next) { 8001da177e4SLinus Torvalds next = vma->vm_next; 8019d8cebd4SKOSAKI Motohiro vmstart = max(start, vma->vm_start); 8029d8cebd4SKOSAKI Motohiro vmend = min(end, vma->vm_end); 8039d8cebd4SKOSAKI Motohiro 804e26a5114SKOSAKI Motohiro if (mpol_equal(vma_policy(vma), new_pol)) 805e26a5114SKOSAKI Motohiro continue; 806e26a5114SKOSAKI Motohiro 807e26a5114SKOSAKI Motohiro pgoff = vma->vm_pgoff + 808e26a5114SKOSAKI Motohiro ((vmstart - vma->vm_start) >> PAGE_SHIFT); 8099d8cebd4SKOSAKI Motohiro prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 810e26a5114SKOSAKI Motohiro vma->anon_vma, vma->vm_file, pgoff, 81119a809afSAndrea Arcangeli new_pol, vma->vm_userfaultfd_ctx); 8129d8cebd4SKOSAKI Motohiro if (prev) { 8139d8cebd4SKOSAKI Motohiro vma = prev; 8149d8cebd4SKOSAKI Motohiro next = vma->vm_next; 8153964acd0SOleg Nesterov if (mpol_equal(vma_policy(vma), new_pol)) 8169d8cebd4SKOSAKI Motohiro continue; 8173964acd0SOleg Nesterov /* vma_merge() joined vma && vma->next, case 8 */ 8183964acd0SOleg Nesterov goto replace; 8191da177e4SLinus Torvalds } 8209d8cebd4SKOSAKI Motohiro if (vma->vm_start != vmstart) { 8219d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmstart, 1); 8229d8cebd4SKOSAKI Motohiro if (err) 8239d8cebd4SKOSAKI Motohiro goto out; 8249d8cebd4SKOSAKI Motohiro } 8259d8cebd4SKOSAKI Motohiro if (vma->vm_end != vmend) { 8269d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmend, 0); 8279d8cebd4SKOSAKI Motohiro if (err) 8289d8cebd4SKOSAKI Motohiro goto out; 8299d8cebd4SKOSAKI Motohiro } 8303964acd0SOleg Nesterov replace: 831869833f2SKOSAKI Motohiro err = vma_replace_policy(vma, new_pol); 8329d8cebd4SKOSAKI Motohiro if (err) 8339d8cebd4SKOSAKI Motohiro goto out; 8349d8cebd4SKOSAKI Motohiro } 8359d8cebd4SKOSAKI Motohiro 8369d8cebd4SKOSAKI Motohiro out: 8371da177e4SLinus Torvalds return err; 8381da177e4SLinus Torvalds } 8391da177e4SLinus Torvalds 8401da177e4SLinus Torvalds /* Set the process memory policy */ 841028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags, 842028fec41SDavid Rientjes nodemask_t *nodes) 8431da177e4SLinus Torvalds { 84458568d2aSMiao Xie struct mempolicy *new, *old; 8454bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 84658568d2aSMiao Xie int ret; 8471da177e4SLinus Torvalds 8484bfc4495SKAMEZAWA Hiroyuki if (!scratch) 8494bfc4495SKAMEZAWA Hiroyuki return -ENOMEM; 850f4e53d91SLee Schermerhorn 8514bfc4495SKAMEZAWA Hiroyuki new = mpol_new(mode, flags, nodes); 8524bfc4495SKAMEZAWA Hiroyuki if (IS_ERR(new)) { 8534bfc4495SKAMEZAWA Hiroyuki ret = PTR_ERR(new); 8544bfc4495SKAMEZAWA Hiroyuki goto out; 8554bfc4495SKAMEZAWA Hiroyuki } 8562c7c3a7dSOleg Nesterov 857bda420b9SHuang Ying if (flags & MPOL_F_NUMA_BALANCING) { 858bda420b9SHuang Ying if (new && new->mode == MPOL_BIND) { 859bda420b9SHuang Ying new->flags |= (MPOL_F_MOF | MPOL_F_MORON); 860bda420b9SHuang Ying } else { 861bda420b9SHuang Ying ret = -EINVAL; 862bda420b9SHuang Ying mpol_put(new); 863bda420b9SHuang Ying goto out; 864bda420b9SHuang Ying } 865bda420b9SHuang Ying } 866bda420b9SHuang Ying 8674bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, nodes, scratch); 86858568d2aSMiao Xie if (ret) { 86958568d2aSMiao Xie mpol_put(new); 8704bfc4495SKAMEZAWA Hiroyuki goto out; 87158568d2aSMiao Xie } 87278b132e9SWei Yang task_lock(current); 87358568d2aSMiao Xie old = current->mempolicy; 8741da177e4SLinus Torvalds current->mempolicy = new; 87545816682SVlastimil Babka if (new && new->mode == MPOL_INTERLEAVE) 87645816682SVlastimil Babka current->il_prev = MAX_NUMNODES-1; 87758568d2aSMiao Xie task_unlock(current); 87858568d2aSMiao Xie mpol_put(old); 8794bfc4495SKAMEZAWA Hiroyuki ret = 0; 8804bfc4495SKAMEZAWA Hiroyuki out: 8814bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 8824bfc4495SKAMEZAWA Hiroyuki return ret; 8831da177e4SLinus Torvalds } 8841da177e4SLinus Torvalds 885bea904d5SLee Schermerhorn /* 886bea904d5SLee Schermerhorn * Return nodemask for policy for get_mempolicy() query 88758568d2aSMiao Xie * 88858568d2aSMiao Xie * Called with task's alloc_lock held 889bea904d5SLee Schermerhorn */ 890bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 8911da177e4SLinus Torvalds { 892dfcd3c0dSAndi Kleen nodes_clear(*nodes); 893bea904d5SLee Schermerhorn if (p == &default_policy) 894bea904d5SLee Schermerhorn return; 895bea904d5SLee Schermerhorn 89645c4745aSLee Schermerhorn switch (p->mode) { 89719770b32SMel Gorman case MPOL_BIND: 8981da177e4SLinus Torvalds case MPOL_INTERLEAVE: 899dfcd3c0dSAndi Kleen *nodes = p->v.nodes; 9001da177e4SLinus Torvalds break; 901*7858d7bcSFeng Tang case MPOL_LOCAL: 902*7858d7bcSFeng Tang /* return empty node mask for local allocation */ 903*7858d7bcSFeng Tang break; 904*7858d7bcSFeng Tang 9051da177e4SLinus Torvalds case MPOL_PREFERRED: 906dfcd3c0dSAndi Kleen node_set(p->v.preferred_node, *nodes); 9071da177e4SLinus Torvalds break; 9081da177e4SLinus Torvalds default: 9091da177e4SLinus Torvalds BUG(); 9101da177e4SLinus Torvalds } 9111da177e4SLinus Torvalds } 9121da177e4SLinus Torvalds 9133b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr) 9141da177e4SLinus Torvalds { 915ba841078SPeter Xu struct page *p = NULL; 9161da177e4SLinus Torvalds int err; 9171da177e4SLinus Torvalds 9183b9aadf7SAndrea Arcangeli int locked = 1; 9193b9aadf7SAndrea Arcangeli err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked); 9202d3a36a4SMichal Hocko if (err > 0) { 9211da177e4SLinus Torvalds err = page_to_nid(p); 9221da177e4SLinus Torvalds put_page(p); 9231da177e4SLinus Torvalds } 9243b9aadf7SAndrea Arcangeli if (locked) 925d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 9261da177e4SLinus Torvalds return err; 9271da177e4SLinus Torvalds } 9281da177e4SLinus Torvalds 9291da177e4SLinus Torvalds /* Retrieve NUMA policy */ 930dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask, 9311da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 9321da177e4SLinus Torvalds { 9338bccd85fSChristoph Lameter int err; 9341da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 9351da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 9363b9aadf7SAndrea Arcangeli struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL; 9371da177e4SLinus Torvalds 938754af6f5SLee Schermerhorn if (flags & 939754af6f5SLee Schermerhorn ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 9401da177e4SLinus Torvalds return -EINVAL; 941754af6f5SLee Schermerhorn 942754af6f5SLee Schermerhorn if (flags & MPOL_F_MEMS_ALLOWED) { 943754af6f5SLee Schermerhorn if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 944754af6f5SLee Schermerhorn return -EINVAL; 945754af6f5SLee Schermerhorn *policy = 0; /* just so it's initialized */ 94658568d2aSMiao Xie task_lock(current); 947754af6f5SLee Schermerhorn *nmask = cpuset_current_mems_allowed; 94858568d2aSMiao Xie task_unlock(current); 949754af6f5SLee Schermerhorn return 0; 950754af6f5SLee Schermerhorn } 951754af6f5SLee Schermerhorn 9521da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 953bea904d5SLee Schermerhorn /* 954bea904d5SLee Schermerhorn * Do NOT fall back to task policy if the 955bea904d5SLee Schermerhorn * vma/shared policy at addr is NULL. We 956bea904d5SLee Schermerhorn * want to return MPOL_DEFAULT in this case. 957bea904d5SLee Schermerhorn */ 958d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 95933e3575cSLiam Howlett vma = vma_lookup(mm, addr); 9601da177e4SLinus Torvalds if (!vma) { 961d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 9621da177e4SLinus Torvalds return -EFAULT; 9631da177e4SLinus Torvalds } 9641da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 9651da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 9661da177e4SLinus Torvalds else 9671da177e4SLinus Torvalds pol = vma->vm_policy; 9681da177e4SLinus Torvalds } else if (addr) 9691da177e4SLinus Torvalds return -EINVAL; 9701da177e4SLinus Torvalds 9711da177e4SLinus Torvalds if (!pol) 972bea904d5SLee Schermerhorn pol = &default_policy; /* indicates default behavior */ 9731da177e4SLinus Torvalds 9741da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 9751da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 9763b9aadf7SAndrea Arcangeli /* 9773b9aadf7SAndrea Arcangeli * Take a refcount on the mpol, lookup_node() 978baf2f90bSLu Jialin * will drop the mmap_lock, so after calling 9793b9aadf7SAndrea Arcangeli * lookup_node() only "pol" remains valid, "vma" 9803b9aadf7SAndrea Arcangeli * is stale. 9813b9aadf7SAndrea Arcangeli */ 9823b9aadf7SAndrea Arcangeli pol_refcount = pol; 9833b9aadf7SAndrea Arcangeli vma = NULL; 9843b9aadf7SAndrea Arcangeli mpol_get(pol); 9853b9aadf7SAndrea Arcangeli err = lookup_node(mm, addr); 9861da177e4SLinus Torvalds if (err < 0) 9871da177e4SLinus Torvalds goto out; 9888bccd85fSChristoph Lameter *policy = err; 9891da177e4SLinus Torvalds } else if (pol == current->mempolicy && 99045c4745aSLee Schermerhorn pol->mode == MPOL_INTERLEAVE) { 99145816682SVlastimil Babka *policy = next_node_in(current->il_prev, pol->v.nodes); 9921da177e4SLinus Torvalds } else { 9931da177e4SLinus Torvalds err = -EINVAL; 9941da177e4SLinus Torvalds goto out; 9951da177e4SLinus Torvalds } 996bea904d5SLee Schermerhorn } else { 997bea904d5SLee Schermerhorn *policy = pol == &default_policy ? MPOL_DEFAULT : 998bea904d5SLee Schermerhorn pol->mode; 999d79df630SDavid Rientjes /* 1000d79df630SDavid Rientjes * Internal mempolicy flags must be masked off before exposing 1001d79df630SDavid Rientjes * the policy to userspace. 1002d79df630SDavid Rientjes */ 1003d79df630SDavid Rientjes *policy |= (pol->flags & MPOL_MODE_FLAGS); 1004bea904d5SLee Schermerhorn } 10051da177e4SLinus Torvalds 10061da177e4SLinus Torvalds err = 0; 100758568d2aSMiao Xie if (nmask) { 1008c6b6ef8bSLee Schermerhorn if (mpol_store_user_nodemask(pol)) { 1009c6b6ef8bSLee Schermerhorn *nmask = pol->w.user_nodemask; 1010c6b6ef8bSLee Schermerhorn } else { 101158568d2aSMiao Xie task_lock(current); 1012bea904d5SLee Schermerhorn get_policy_nodemask(pol, nmask); 101358568d2aSMiao Xie task_unlock(current); 101458568d2aSMiao Xie } 1015c6b6ef8bSLee Schermerhorn } 10161da177e4SLinus Torvalds 10171da177e4SLinus Torvalds out: 101852cd3b07SLee Schermerhorn mpol_cond_put(pol); 10191da177e4SLinus Torvalds if (vma) 1020d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 10213b9aadf7SAndrea Arcangeli if (pol_refcount) 10223b9aadf7SAndrea Arcangeli mpol_put(pol_refcount); 10231da177e4SLinus Torvalds return err; 10241da177e4SLinus Torvalds } 10251da177e4SLinus Torvalds 1026b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 10278bccd85fSChristoph Lameter /* 1028c8633798SNaoya Horiguchi * page migration, thp tail pages can be passed. 10296ce3c4c0SChristoph Lameter */ 1030a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 1031fc301289SChristoph Lameter unsigned long flags) 10326ce3c4c0SChristoph Lameter { 1033c8633798SNaoya Horiguchi struct page *head = compound_head(page); 10346ce3c4c0SChristoph Lameter /* 1035fc301289SChristoph Lameter * Avoid migrating a page that is shared with others. 10366ce3c4c0SChristoph Lameter */ 1037c8633798SNaoya Horiguchi if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) { 1038c8633798SNaoya Horiguchi if (!isolate_lru_page(head)) { 1039c8633798SNaoya Horiguchi list_add_tail(&head->lru, pagelist); 1040c8633798SNaoya Horiguchi mod_node_page_state(page_pgdat(head), 10419de4f22aSHuang Ying NR_ISOLATED_ANON + page_is_file_lru(head), 10426c357848SMatthew Wilcox (Oracle) thp_nr_pages(head)); 1043a53190a4SYang Shi } else if (flags & MPOL_MF_STRICT) { 1044a53190a4SYang Shi /* 1045a53190a4SYang Shi * Non-movable page may reach here. And, there may be 1046a53190a4SYang Shi * temporary off LRU pages or non-LRU movable pages. 1047a53190a4SYang Shi * Treat them as unmovable pages since they can't be 1048a53190a4SYang Shi * isolated, so they can't be moved at the moment. It 1049a53190a4SYang Shi * should return -EIO for this case too. 1050a53190a4SYang Shi */ 1051a53190a4SYang Shi return -EIO; 105262695a84SNick Piggin } 105362695a84SNick Piggin } 1054a53190a4SYang Shi 1055a53190a4SYang Shi return 0; 10566ce3c4c0SChristoph Lameter } 10576ce3c4c0SChristoph Lameter 10586ce3c4c0SChristoph Lameter /* 10597e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 10607e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 10617e2ab150SChristoph Lameter */ 1062dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest, 1063dbcb0f19SAdrian Bunk int flags) 10647e2ab150SChristoph Lameter { 10657e2ab150SChristoph Lameter nodemask_t nmask; 10667e2ab150SChristoph Lameter LIST_HEAD(pagelist); 10677e2ab150SChristoph Lameter int err = 0; 1068a0976311SJoonsoo Kim struct migration_target_control mtc = { 1069a0976311SJoonsoo Kim .nid = dest, 1070a0976311SJoonsoo Kim .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 1071a0976311SJoonsoo Kim }; 10727e2ab150SChristoph Lameter 10737e2ab150SChristoph Lameter nodes_clear(nmask); 10747e2ab150SChristoph Lameter node_set(source, nmask); 10757e2ab150SChristoph Lameter 107608270807SMinchan Kim /* 107708270807SMinchan Kim * This does not "check" the range but isolates all pages that 107808270807SMinchan Kim * need migration. Between passing in the full user address 107908270807SMinchan Kim * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 108008270807SMinchan Kim */ 108108270807SMinchan Kim VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 108298094945SNaoya Horiguchi queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, 10837e2ab150SChristoph Lameter flags | MPOL_MF_DISCONTIG_OK, &pagelist); 10847e2ab150SChristoph Lameter 1085cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1086a0976311SJoonsoo Kim err = migrate_pages(&pagelist, alloc_migration_target, NULL, 1087a0976311SJoonsoo Kim (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL); 1088cf608ac1SMinchan Kim if (err) 1089e2d8cf40SNaoya Horiguchi putback_movable_pages(&pagelist); 1090cf608ac1SMinchan Kim } 109195a402c3SChristoph Lameter 10927e2ab150SChristoph Lameter return err; 10937e2ab150SChristoph Lameter } 10947e2ab150SChristoph Lameter 10957e2ab150SChristoph Lameter /* 10967e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 10977e2ab150SChristoph Lameter * layout as much as possible. 109839743889SChristoph Lameter * 109939743889SChristoph Lameter * Returns the number of page that could not be moved. 110039743889SChristoph Lameter */ 11010ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 11020ce72d4fSAndrew Morton const nodemask_t *to, int flags) 110339743889SChristoph Lameter { 11047e2ab150SChristoph Lameter int busy = 0; 1105f555befdSJan Stancek int err = 0; 11067e2ab150SChristoph Lameter nodemask_t tmp; 110739743889SChristoph Lameter 1108361a2a22SMinchan Kim lru_cache_disable(); 11090aedadf9SChristoph Lameter 1110d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 1111d4984711SChristoph Lameter 11127e2ab150SChristoph Lameter /* 11137e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 11147e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 11157e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 11167e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 11177e2ab150SChristoph Lameter * 11187e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 11197e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 11207e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 11217e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 11227e2ab150SChristoph Lameter * 11237e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 11247e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 11257e2ab150SChristoph Lameter * (nothing left to migrate). 11267e2ab150SChristoph Lameter * 11277e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 11287e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 11297e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 11307e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 11317e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 11327e2ab150SChristoph Lameter * 11337e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 11347e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 11357e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 11367e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 1137ae0e47f0SJustin P. Mattock * Otherwise when we finish scanning from_tmp, we at least have the 11387e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 11397e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 11407e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 11417e2ab150SChristoph Lameter */ 11427e2ab150SChristoph Lameter 11430ce72d4fSAndrew Morton tmp = *from; 11447e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 11457e2ab150SChristoph Lameter int s, d; 1146b76ac7e7SJianguo Wu int source = NUMA_NO_NODE; 11477e2ab150SChristoph Lameter int dest = 0; 11487e2ab150SChristoph Lameter 11497e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 11504a5b18ccSLarry Woodman 11514a5b18ccSLarry Woodman /* 11524a5b18ccSLarry Woodman * do_migrate_pages() tries to maintain the relative 11534a5b18ccSLarry Woodman * node relationship of the pages established between 11544a5b18ccSLarry Woodman * threads and memory areas. 11554a5b18ccSLarry Woodman * 11564a5b18ccSLarry Woodman * However if the number of source nodes is not equal to 11574a5b18ccSLarry Woodman * the number of destination nodes we can not preserve 11584a5b18ccSLarry Woodman * this node relative relationship. In that case, skip 11594a5b18ccSLarry Woodman * copying memory from a node that is in the destination 11604a5b18ccSLarry Woodman * mask. 11614a5b18ccSLarry Woodman * 11624a5b18ccSLarry Woodman * Example: [2,3,4] -> [3,4,5] moves everything. 11634a5b18ccSLarry Woodman * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 11644a5b18ccSLarry Woodman */ 11654a5b18ccSLarry Woodman 11660ce72d4fSAndrew Morton if ((nodes_weight(*from) != nodes_weight(*to)) && 11670ce72d4fSAndrew Morton (node_isset(s, *to))) 11684a5b18ccSLarry Woodman continue; 11694a5b18ccSLarry Woodman 11700ce72d4fSAndrew Morton d = node_remap(s, *from, *to); 11717e2ab150SChristoph Lameter if (s == d) 11727e2ab150SChristoph Lameter continue; 11737e2ab150SChristoph Lameter 11747e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 11757e2ab150SChristoph Lameter dest = d; 11767e2ab150SChristoph Lameter 11777e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 11787e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 11797e2ab150SChristoph Lameter break; 11807e2ab150SChristoph Lameter } 1181b76ac7e7SJianguo Wu if (source == NUMA_NO_NODE) 11827e2ab150SChristoph Lameter break; 11837e2ab150SChristoph Lameter 11847e2ab150SChristoph Lameter node_clear(source, tmp); 11857e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 11867e2ab150SChristoph Lameter if (err > 0) 11877e2ab150SChristoph Lameter busy += err; 11887e2ab150SChristoph Lameter if (err < 0) 11897e2ab150SChristoph Lameter break; 119039743889SChristoph Lameter } 1191d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1192d479960eSMinchan Kim 1193361a2a22SMinchan Kim lru_cache_enable(); 11947e2ab150SChristoph Lameter if (err < 0) 11957e2ab150SChristoph Lameter return err; 11967e2ab150SChristoph Lameter return busy; 1197b20a3503SChristoph Lameter 119839743889SChristoph Lameter } 119939743889SChristoph Lameter 12003ad33b24SLee Schermerhorn /* 12013ad33b24SLee Schermerhorn * Allocate a new page for page migration based on vma policy. 1202d05f0cdcSHugh Dickins * Start by assuming the page is mapped by the same vma as contains @start. 12033ad33b24SLee Schermerhorn * Search forward from there, if not. N.B., this assumes that the 12043ad33b24SLee Schermerhorn * list of pages handed to migrate_pages()--which is how we get here-- 12053ad33b24SLee Schermerhorn * is in virtual address order. 12063ad33b24SLee Schermerhorn */ 1207666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start) 120895a402c3SChristoph Lameter { 1209d05f0cdcSHugh Dickins struct vm_area_struct *vma; 12103f649ab7SKees Cook unsigned long address; 121195a402c3SChristoph Lameter 1212d05f0cdcSHugh Dickins vma = find_vma(current->mm, start); 12133ad33b24SLee Schermerhorn while (vma) { 12143ad33b24SLee Schermerhorn address = page_address_in_vma(page, vma); 12153ad33b24SLee Schermerhorn if (address != -EFAULT) 12163ad33b24SLee Schermerhorn break; 12173ad33b24SLee Schermerhorn vma = vma->vm_next; 12183ad33b24SLee Schermerhorn } 12193ad33b24SLee Schermerhorn 122011c731e8SWanpeng Li if (PageHuge(page)) { 1221389c8178SMichal Hocko return alloc_huge_page_vma(page_hstate(compound_head(page)), 1222389c8178SMichal Hocko vma, address); 122394723aafSMichal Hocko } else if (PageTransHuge(page)) { 1224c8633798SNaoya Horiguchi struct page *thp; 1225c8633798SNaoya Horiguchi 122619deb769SDavid Rientjes thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, 122719deb769SDavid Rientjes HPAGE_PMD_ORDER); 1228c8633798SNaoya Horiguchi if (!thp) 1229c8633798SNaoya Horiguchi return NULL; 1230c8633798SNaoya Horiguchi prep_transhuge_page(thp); 1231c8633798SNaoya Horiguchi return thp; 123211c731e8SWanpeng Li } 123311c731e8SWanpeng Li /* 123411c731e8SWanpeng Li * if !vma, alloc_page_vma() will use task or system default policy 123511c731e8SWanpeng Li */ 12360f556856SMichal Hocko return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL, 12370f556856SMichal Hocko vma, address); 123895a402c3SChristoph Lameter } 1239b20a3503SChristoph Lameter #else 1240b20a3503SChristoph Lameter 1241a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist, 1242b20a3503SChristoph Lameter unsigned long flags) 1243b20a3503SChristoph Lameter { 1244a53190a4SYang Shi return -EIO; 1245b20a3503SChristoph Lameter } 1246b20a3503SChristoph Lameter 12470ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 12480ce72d4fSAndrew Morton const nodemask_t *to, int flags) 1249b20a3503SChristoph Lameter { 1250b20a3503SChristoph Lameter return -ENOSYS; 1251b20a3503SChristoph Lameter } 125295a402c3SChristoph Lameter 1253666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start) 125495a402c3SChristoph Lameter { 125595a402c3SChristoph Lameter return NULL; 125695a402c3SChristoph Lameter } 1257b20a3503SChristoph Lameter #endif 1258b20a3503SChristoph Lameter 1259dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len, 1260028fec41SDavid Rientjes unsigned short mode, unsigned short mode_flags, 1261028fec41SDavid Rientjes nodemask_t *nmask, unsigned long flags) 12626ce3c4c0SChristoph Lameter { 12636ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 12646ce3c4c0SChristoph Lameter struct mempolicy *new; 12656ce3c4c0SChristoph Lameter unsigned long end; 12666ce3c4c0SChristoph Lameter int err; 1267d8835445SYang Shi int ret; 12686ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 12696ce3c4c0SChristoph Lameter 1270b24f53a0SLee Schermerhorn if (flags & ~(unsigned long)MPOL_MF_VALID) 12716ce3c4c0SChristoph Lameter return -EINVAL; 127274c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 12736ce3c4c0SChristoph Lameter return -EPERM; 12746ce3c4c0SChristoph Lameter 12756ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 12766ce3c4c0SChristoph Lameter return -EINVAL; 12776ce3c4c0SChristoph Lameter 12786ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 12796ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 12806ce3c4c0SChristoph Lameter 12816ce3c4c0SChristoph Lameter len = (len + PAGE_SIZE - 1) & PAGE_MASK; 12826ce3c4c0SChristoph Lameter end = start + len; 12836ce3c4c0SChristoph Lameter 12846ce3c4c0SChristoph Lameter if (end < start) 12856ce3c4c0SChristoph Lameter return -EINVAL; 12866ce3c4c0SChristoph Lameter if (end == start) 12876ce3c4c0SChristoph Lameter return 0; 12886ce3c4c0SChristoph Lameter 1289028fec41SDavid Rientjes new = mpol_new(mode, mode_flags, nmask); 12906ce3c4c0SChristoph Lameter if (IS_ERR(new)) 12916ce3c4c0SChristoph Lameter return PTR_ERR(new); 12926ce3c4c0SChristoph Lameter 1293b24f53a0SLee Schermerhorn if (flags & MPOL_MF_LAZY) 1294b24f53a0SLee Schermerhorn new->flags |= MPOL_F_MOF; 1295b24f53a0SLee Schermerhorn 12966ce3c4c0SChristoph Lameter /* 12976ce3c4c0SChristoph Lameter * If we are using the default policy then operation 12986ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 12996ce3c4c0SChristoph Lameter */ 13006ce3c4c0SChristoph Lameter if (!new) 13016ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 13026ce3c4c0SChristoph Lameter 1303028fec41SDavid Rientjes pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1304028fec41SDavid Rientjes start, start + len, mode, mode_flags, 130500ef2d2fSDavid Rientjes nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 13066ce3c4c0SChristoph Lameter 13070aedadf9SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 13080aedadf9SChristoph Lameter 1309361a2a22SMinchan Kim lru_cache_disable(); 13100aedadf9SChristoph Lameter } 13114bfc4495SKAMEZAWA Hiroyuki { 13124bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 13134bfc4495SKAMEZAWA Hiroyuki if (scratch) { 1314d8ed45c5SMichel Lespinasse mmap_write_lock(mm); 13154bfc4495SKAMEZAWA Hiroyuki err = mpol_set_nodemask(new, nmask, scratch); 13164bfc4495SKAMEZAWA Hiroyuki if (err) 1317d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 13184bfc4495SKAMEZAWA Hiroyuki } else 13194bfc4495SKAMEZAWA Hiroyuki err = -ENOMEM; 13204bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 13214bfc4495SKAMEZAWA Hiroyuki } 1322b05ca738SKOSAKI Motohiro if (err) 1323b05ca738SKOSAKI Motohiro goto mpol_out; 1324b05ca738SKOSAKI Motohiro 1325d8835445SYang Shi ret = queue_pages_range(mm, start, end, nmask, 13266ce3c4c0SChristoph Lameter flags | MPOL_MF_INVERT, &pagelist); 1327d8835445SYang Shi 1328d8835445SYang Shi if (ret < 0) { 1329a85dfc30SYang Shi err = ret; 1330d8835445SYang Shi goto up_out; 1331d8835445SYang Shi } 1332d8835445SYang Shi 13339d8cebd4SKOSAKI Motohiro err = mbind_range(mm, start, end, new); 13347e2ab150SChristoph Lameter 1335b24f53a0SLee Schermerhorn if (!err) { 1336b24f53a0SLee Schermerhorn int nr_failed = 0; 1337b24f53a0SLee Schermerhorn 1338cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 1339b24f53a0SLee Schermerhorn WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1340d05f0cdcSHugh Dickins nr_failed = migrate_pages(&pagelist, new_page, NULL, 1341d05f0cdcSHugh Dickins start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND); 1342cf608ac1SMinchan Kim if (nr_failed) 134374060e4dSNaoya Horiguchi putback_movable_pages(&pagelist); 1344cf608ac1SMinchan Kim } 13456ce3c4c0SChristoph Lameter 1346d8835445SYang Shi if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) 13476ce3c4c0SChristoph Lameter err = -EIO; 1348a85dfc30SYang Shi } else { 1349d8835445SYang Shi up_out: 1350a85dfc30SYang Shi if (!list_empty(&pagelist)) 1351a85dfc30SYang Shi putback_movable_pages(&pagelist); 1352a85dfc30SYang Shi } 1353a85dfc30SYang Shi 1354d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 1355b05ca738SKOSAKI Motohiro mpol_out: 1356f0be3d32SLee Schermerhorn mpol_put(new); 1357d479960eSMinchan Kim if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 1358361a2a22SMinchan Kim lru_cache_enable(); 13596ce3c4c0SChristoph Lameter return err; 13606ce3c4c0SChristoph Lameter } 13616ce3c4c0SChristoph Lameter 136239743889SChristoph Lameter /* 13638bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 13648bccd85fSChristoph Lameter */ 13658bccd85fSChristoph Lameter 13668bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 136739743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 13688bccd85fSChristoph Lameter unsigned long maxnode) 13698bccd85fSChristoph Lameter { 13708bccd85fSChristoph Lameter unsigned long k; 137156521e7aSYisheng Xie unsigned long t; 13728bccd85fSChristoph Lameter unsigned long nlongs; 13738bccd85fSChristoph Lameter unsigned long endmask; 13748bccd85fSChristoph Lameter 13758bccd85fSChristoph Lameter --maxnode; 13768bccd85fSChristoph Lameter nodes_clear(*nodes); 13778bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 13788bccd85fSChristoph Lameter return 0; 1379a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1380636f13c1SChris Wright return -EINVAL; 13818bccd85fSChristoph Lameter 13828bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(maxnode); 13838bccd85fSChristoph Lameter if ((maxnode % BITS_PER_LONG) == 0) 13848bccd85fSChristoph Lameter endmask = ~0UL; 13858bccd85fSChristoph Lameter else 13868bccd85fSChristoph Lameter endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; 13878bccd85fSChristoph Lameter 138856521e7aSYisheng Xie /* 138956521e7aSYisheng Xie * When the user specified more nodes than supported just check 139056521e7aSYisheng Xie * if the non supported part is all zero. 139156521e7aSYisheng Xie * 139256521e7aSYisheng Xie * If maxnode have more longs than MAX_NUMNODES, check 139356521e7aSYisheng Xie * the bits in that area first. And then go through to 139456521e7aSYisheng Xie * check the rest bits which equal or bigger than MAX_NUMNODES. 139556521e7aSYisheng Xie * Otherwise, just check bits [MAX_NUMNODES, maxnode). 139656521e7aSYisheng Xie */ 13978bccd85fSChristoph Lameter if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { 13988bccd85fSChristoph Lameter for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 13998bccd85fSChristoph Lameter if (get_user(t, nmask + k)) 14008bccd85fSChristoph Lameter return -EFAULT; 14018bccd85fSChristoph Lameter if (k == nlongs - 1) { 14028bccd85fSChristoph Lameter if (t & endmask) 14038bccd85fSChristoph Lameter return -EINVAL; 14048bccd85fSChristoph Lameter } else if (t) 14058bccd85fSChristoph Lameter return -EINVAL; 14068bccd85fSChristoph Lameter } 14078bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(MAX_NUMNODES); 14088bccd85fSChristoph Lameter endmask = ~0UL; 14098bccd85fSChristoph Lameter } 14108bccd85fSChristoph Lameter 141156521e7aSYisheng Xie if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) { 141256521e7aSYisheng Xie unsigned long valid_mask = endmask; 141356521e7aSYisheng Xie 141456521e7aSYisheng Xie valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1); 141556521e7aSYisheng Xie if (get_user(t, nmask + nlongs - 1)) 141656521e7aSYisheng Xie return -EFAULT; 141756521e7aSYisheng Xie if (t & valid_mask) 141856521e7aSYisheng Xie return -EINVAL; 141956521e7aSYisheng Xie } 142056521e7aSYisheng Xie 14218bccd85fSChristoph Lameter if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) 14228bccd85fSChristoph Lameter return -EFAULT; 14238bccd85fSChristoph Lameter nodes_addr(*nodes)[nlongs-1] &= endmask; 14248bccd85fSChristoph Lameter return 0; 14258bccd85fSChristoph Lameter } 14268bccd85fSChristoph Lameter 14278bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 14288bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 14298bccd85fSChristoph Lameter nodemask_t *nodes) 14308bccd85fSChristoph Lameter { 14318bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 1432050c17f2SRalph Campbell unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); 14338bccd85fSChristoph Lameter 14348bccd85fSChristoph Lameter if (copy > nbytes) { 14358bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 14368bccd85fSChristoph Lameter return -EINVAL; 14378bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 14388bccd85fSChristoph Lameter return -EFAULT; 14398bccd85fSChristoph Lameter copy = nbytes; 14408bccd85fSChristoph Lameter } 14418bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 14428bccd85fSChristoph Lameter } 14438bccd85fSChristoph Lameter 1444e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len, 1445e7dc9ad6SDominik Brodowski unsigned long mode, const unsigned long __user *nmask, 1446e7dc9ad6SDominik Brodowski unsigned long maxnode, unsigned int flags) 14478bccd85fSChristoph Lameter { 14488bccd85fSChristoph Lameter nodemask_t nodes; 14498bccd85fSChristoph Lameter int err; 1450028fec41SDavid Rientjes unsigned short mode_flags; 14518bccd85fSChristoph Lameter 1452057d3389SAndrey Konovalov start = untagged_addr(start); 1453028fec41SDavid Rientjes mode_flags = mode & MPOL_MODE_FLAGS; 1454028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1455a3b51e01SDavid Rientjes if (mode >= MPOL_MAX) 1456a3b51e01SDavid Rientjes return -EINVAL; 14574c50bc01SDavid Rientjes if ((mode_flags & MPOL_F_STATIC_NODES) && 14584c50bc01SDavid Rientjes (mode_flags & MPOL_F_RELATIVE_NODES)) 14594c50bc01SDavid Rientjes return -EINVAL; 14608bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 14618bccd85fSChristoph Lameter if (err) 14628bccd85fSChristoph Lameter return err; 1463028fec41SDavid Rientjes return do_mbind(start, len, mode, mode_flags, &nodes, flags); 14648bccd85fSChristoph Lameter } 14658bccd85fSChristoph Lameter 1466e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1467e7dc9ad6SDominik Brodowski unsigned long, mode, const unsigned long __user *, nmask, 1468e7dc9ad6SDominik Brodowski unsigned long, maxnode, unsigned int, flags) 1469e7dc9ad6SDominik Brodowski { 1470e7dc9ad6SDominik Brodowski return kernel_mbind(start, len, mode, nmask, maxnode, flags); 1471e7dc9ad6SDominik Brodowski } 1472e7dc9ad6SDominik Brodowski 14738bccd85fSChristoph Lameter /* Set the process memory policy */ 1474af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask, 1475af03c4acSDominik Brodowski unsigned long maxnode) 14768bccd85fSChristoph Lameter { 14778bccd85fSChristoph Lameter int err; 14788bccd85fSChristoph Lameter nodemask_t nodes; 1479028fec41SDavid Rientjes unsigned short flags; 14808bccd85fSChristoph Lameter 1481028fec41SDavid Rientjes flags = mode & MPOL_MODE_FLAGS; 1482028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1483028fec41SDavid Rientjes if ((unsigned int)mode >= MPOL_MAX) 14848bccd85fSChristoph Lameter return -EINVAL; 14854c50bc01SDavid Rientjes if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES)) 14864c50bc01SDavid Rientjes return -EINVAL; 14878bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 14888bccd85fSChristoph Lameter if (err) 14898bccd85fSChristoph Lameter return err; 1490028fec41SDavid Rientjes return do_set_mempolicy(mode, flags, &nodes); 14918bccd85fSChristoph Lameter } 14928bccd85fSChristoph Lameter 1493af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1494af03c4acSDominik Brodowski unsigned long, maxnode) 1495af03c4acSDominik Brodowski { 1496af03c4acSDominik Brodowski return kernel_set_mempolicy(mode, nmask, maxnode); 1497af03c4acSDominik Brodowski } 1498af03c4acSDominik Brodowski 1499b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode, 1500b6e9b0baSDominik Brodowski const unsigned long __user *old_nodes, 1501b6e9b0baSDominik Brodowski const unsigned long __user *new_nodes) 150239743889SChristoph Lameter { 1503596d7cfaSKOSAKI Motohiro struct mm_struct *mm = NULL; 150439743889SChristoph Lameter struct task_struct *task; 150539743889SChristoph Lameter nodemask_t task_nodes; 150639743889SChristoph Lameter int err; 1507596d7cfaSKOSAKI Motohiro nodemask_t *old; 1508596d7cfaSKOSAKI Motohiro nodemask_t *new; 1509596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH(scratch); 151039743889SChristoph Lameter 1511596d7cfaSKOSAKI Motohiro if (!scratch) 1512596d7cfaSKOSAKI Motohiro return -ENOMEM; 151339743889SChristoph Lameter 1514596d7cfaSKOSAKI Motohiro old = &scratch->mask1; 1515596d7cfaSKOSAKI Motohiro new = &scratch->mask2; 1516596d7cfaSKOSAKI Motohiro 1517596d7cfaSKOSAKI Motohiro err = get_nodes(old, old_nodes, maxnode); 151839743889SChristoph Lameter if (err) 1519596d7cfaSKOSAKI Motohiro goto out; 1520596d7cfaSKOSAKI Motohiro 1521596d7cfaSKOSAKI Motohiro err = get_nodes(new, new_nodes, maxnode); 1522596d7cfaSKOSAKI Motohiro if (err) 1523596d7cfaSKOSAKI Motohiro goto out; 152439743889SChristoph Lameter 152539743889SChristoph Lameter /* Find the mm_struct */ 152655cfaa3cSZeng Zhaoming rcu_read_lock(); 1527228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 152839743889SChristoph Lameter if (!task) { 152955cfaa3cSZeng Zhaoming rcu_read_unlock(); 1530596d7cfaSKOSAKI Motohiro err = -ESRCH; 1531596d7cfaSKOSAKI Motohiro goto out; 153239743889SChristoph Lameter } 15333268c63eSChristoph Lameter get_task_struct(task); 153439743889SChristoph Lameter 1535596d7cfaSKOSAKI Motohiro err = -EINVAL; 153639743889SChristoph Lameter 153739743889SChristoph Lameter /* 153831367466SOtto Ebeling * Check if this process has the right to modify the specified process. 153931367466SOtto Ebeling * Use the regular "ptrace_may_access()" checks. 154039743889SChristoph Lameter */ 154131367466SOtto Ebeling if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1542c69e8d9cSDavid Howells rcu_read_unlock(); 154339743889SChristoph Lameter err = -EPERM; 15443268c63eSChristoph Lameter goto out_put; 154539743889SChristoph Lameter } 1546c69e8d9cSDavid Howells rcu_read_unlock(); 154739743889SChristoph Lameter 154839743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 154939743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 1550596d7cfaSKOSAKI Motohiro if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 155139743889SChristoph Lameter err = -EPERM; 15523268c63eSChristoph Lameter goto out_put; 155339743889SChristoph Lameter } 155439743889SChristoph Lameter 15550486a38bSYisheng Xie task_nodes = cpuset_mems_allowed(current); 15560486a38bSYisheng Xie nodes_and(*new, *new, task_nodes); 15570486a38bSYisheng Xie if (nodes_empty(*new)) 15583268c63eSChristoph Lameter goto out_put; 15590486a38bSYisheng Xie 156086c3a764SDavid Quigley err = security_task_movememory(task); 156186c3a764SDavid Quigley if (err) 15623268c63eSChristoph Lameter goto out_put; 156386c3a764SDavid Quigley 15643268c63eSChristoph Lameter mm = get_task_mm(task); 15653268c63eSChristoph Lameter put_task_struct(task); 1566f2a9ef88SSasha Levin 1567f2a9ef88SSasha Levin if (!mm) { 1568f2a9ef88SSasha Levin err = -EINVAL; 1569f2a9ef88SSasha Levin goto out; 1570f2a9ef88SSasha Levin } 1571f2a9ef88SSasha Levin 1572596d7cfaSKOSAKI Motohiro err = do_migrate_pages(mm, old, new, 157374c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 15743268c63eSChristoph Lameter 157539743889SChristoph Lameter mmput(mm); 15763268c63eSChristoph Lameter out: 1577596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH_FREE(scratch); 1578596d7cfaSKOSAKI Motohiro 157939743889SChristoph Lameter return err; 15803268c63eSChristoph Lameter 15813268c63eSChristoph Lameter out_put: 15823268c63eSChristoph Lameter put_task_struct(task); 15833268c63eSChristoph Lameter goto out; 15843268c63eSChristoph Lameter 158539743889SChristoph Lameter } 158639743889SChristoph Lameter 1587b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1588b6e9b0baSDominik Brodowski const unsigned long __user *, old_nodes, 1589b6e9b0baSDominik Brodowski const unsigned long __user *, new_nodes) 1590b6e9b0baSDominik Brodowski { 1591b6e9b0baSDominik Brodowski return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes); 1592b6e9b0baSDominik Brodowski } 1593b6e9b0baSDominik Brodowski 159439743889SChristoph Lameter 15958bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 1596af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy, 1597af03c4acSDominik Brodowski unsigned long __user *nmask, 1598af03c4acSDominik Brodowski unsigned long maxnode, 1599af03c4acSDominik Brodowski unsigned long addr, 1600af03c4acSDominik Brodowski unsigned long flags) 16018bccd85fSChristoph Lameter { 1602dbcb0f19SAdrian Bunk int err; 16033f649ab7SKees Cook int pval; 16048bccd85fSChristoph Lameter nodemask_t nodes; 16058bccd85fSChristoph Lameter 1606050c17f2SRalph Campbell if (nmask != NULL && maxnode < nr_node_ids) 16078bccd85fSChristoph Lameter return -EINVAL; 16088bccd85fSChristoph Lameter 16094605f057SWenchao Hao addr = untagged_addr(addr); 16104605f057SWenchao Hao 16118bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 16128bccd85fSChristoph Lameter 16138bccd85fSChristoph Lameter if (err) 16148bccd85fSChristoph Lameter return err; 16158bccd85fSChristoph Lameter 16168bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 16178bccd85fSChristoph Lameter return -EFAULT; 16188bccd85fSChristoph Lameter 16198bccd85fSChristoph Lameter if (nmask) 16208bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 16218bccd85fSChristoph Lameter 16228bccd85fSChristoph Lameter return err; 16238bccd85fSChristoph Lameter } 16248bccd85fSChristoph Lameter 1625af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1626af03c4acSDominik Brodowski unsigned long __user *, nmask, unsigned long, maxnode, 1627af03c4acSDominik Brodowski unsigned long, addr, unsigned long, flags) 1628af03c4acSDominik Brodowski { 1629af03c4acSDominik Brodowski return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags); 1630af03c4acSDominik Brodowski } 1631af03c4acSDominik Brodowski 16321da177e4SLinus Torvalds #ifdef CONFIG_COMPAT 16331da177e4SLinus Torvalds 1634c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1635c93e0f6cSHeiko Carstens compat_ulong_t __user *, nmask, 1636c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, 1637c93e0f6cSHeiko Carstens compat_ulong_t, addr, compat_ulong_t, flags) 16381da177e4SLinus Torvalds { 16391da177e4SLinus Torvalds long err; 16401da177e4SLinus Torvalds unsigned long __user *nm = NULL; 16411da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 16421da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 16431da177e4SLinus Torvalds 1644050c17f2SRalph Campbell nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids); 16451da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 16461da177e4SLinus Torvalds 16471da177e4SLinus Torvalds if (nmask) 16481da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 16491da177e4SLinus Torvalds 1650af03c4acSDominik Brodowski err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 16511da177e4SLinus Torvalds 16521da177e4SLinus Torvalds if (!err && nmask) { 16532bbff6c7SKAMEZAWA Hiroyuki unsigned long copy_size; 16542bbff6c7SKAMEZAWA Hiroyuki copy_size = min_t(unsigned long, sizeof(bm), alloc_size); 16552bbff6c7SKAMEZAWA Hiroyuki err = copy_from_user(bm, nm, copy_size); 16561da177e4SLinus Torvalds /* ensure entire bitmap is zeroed */ 16571da177e4SLinus Torvalds err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 16581da177e4SLinus Torvalds err |= compat_put_bitmap(nmask, bm, nr_bits); 16591da177e4SLinus Torvalds } 16601da177e4SLinus Torvalds 16611da177e4SLinus Torvalds return err; 16621da177e4SLinus Torvalds } 16631da177e4SLinus Torvalds 1664c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, 1665c93e0f6cSHeiko Carstens compat_ulong_t, maxnode) 16661da177e4SLinus Torvalds { 16671da177e4SLinus Torvalds unsigned long __user *nm = NULL; 16681da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 16691da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 16701da177e4SLinus Torvalds 16711da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 16721da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 16731da177e4SLinus Torvalds 16741da177e4SLinus Torvalds if (nmask) { 1675cf01fb99SChris Salls if (compat_get_bitmap(bm, nmask, nr_bits)) 16761da177e4SLinus Torvalds return -EFAULT; 1677cf01fb99SChris Salls nm = compat_alloc_user_space(alloc_size); 1678cf01fb99SChris Salls if (copy_to_user(nm, bm, alloc_size)) 1679cf01fb99SChris Salls return -EFAULT; 1680cf01fb99SChris Salls } 16811da177e4SLinus Torvalds 1682af03c4acSDominik Brodowski return kernel_set_mempolicy(mode, nm, nr_bits+1); 16831da177e4SLinus Torvalds } 16841da177e4SLinus Torvalds 1685c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, 1686c93e0f6cSHeiko Carstens compat_ulong_t, mode, compat_ulong_t __user *, nmask, 1687c93e0f6cSHeiko Carstens compat_ulong_t, maxnode, compat_ulong_t, flags) 16881da177e4SLinus Torvalds { 16891da177e4SLinus Torvalds unsigned long __user *nm = NULL; 16901da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 1691dfcd3c0dSAndi Kleen nodemask_t bm; 16921da177e4SLinus Torvalds 16931da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 16941da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 16951da177e4SLinus Torvalds 16961da177e4SLinus Torvalds if (nmask) { 1697cf01fb99SChris Salls if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits)) 16981da177e4SLinus Torvalds return -EFAULT; 1699cf01fb99SChris Salls nm = compat_alloc_user_space(alloc_size); 1700cf01fb99SChris Salls if (copy_to_user(nm, nodes_addr(bm), alloc_size)) 1701cf01fb99SChris Salls return -EFAULT; 1702cf01fb99SChris Salls } 17031da177e4SLinus Torvalds 1704e7dc9ad6SDominik Brodowski return kernel_mbind(start, len, mode, nm, nr_bits+1, flags); 17051da177e4SLinus Torvalds } 17061da177e4SLinus Torvalds 1707b6e9b0baSDominik Brodowski COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid, 1708b6e9b0baSDominik Brodowski compat_ulong_t, maxnode, 1709b6e9b0baSDominik Brodowski const compat_ulong_t __user *, old_nodes, 1710b6e9b0baSDominik Brodowski const compat_ulong_t __user *, new_nodes) 1711b6e9b0baSDominik Brodowski { 1712b6e9b0baSDominik Brodowski unsigned long __user *old = NULL; 1713b6e9b0baSDominik Brodowski unsigned long __user *new = NULL; 1714b6e9b0baSDominik Brodowski nodemask_t tmp_mask; 1715b6e9b0baSDominik Brodowski unsigned long nr_bits; 1716b6e9b0baSDominik Brodowski unsigned long size; 1717b6e9b0baSDominik Brodowski 1718b6e9b0baSDominik Brodowski nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES); 1719b6e9b0baSDominik Brodowski size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1720b6e9b0baSDominik Brodowski if (old_nodes) { 1721b6e9b0baSDominik Brodowski if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits)) 1722b6e9b0baSDominik Brodowski return -EFAULT; 1723b6e9b0baSDominik Brodowski old = compat_alloc_user_space(new_nodes ? size * 2 : size); 1724b6e9b0baSDominik Brodowski if (new_nodes) 1725b6e9b0baSDominik Brodowski new = old + size / sizeof(unsigned long); 1726b6e9b0baSDominik Brodowski if (copy_to_user(old, nodes_addr(tmp_mask), size)) 1727b6e9b0baSDominik Brodowski return -EFAULT; 1728b6e9b0baSDominik Brodowski } 1729b6e9b0baSDominik Brodowski if (new_nodes) { 1730b6e9b0baSDominik Brodowski if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits)) 1731b6e9b0baSDominik Brodowski return -EFAULT; 1732b6e9b0baSDominik Brodowski if (new == NULL) 1733b6e9b0baSDominik Brodowski new = compat_alloc_user_space(size); 1734b6e9b0baSDominik Brodowski if (copy_to_user(new, nodes_addr(tmp_mask), size)) 1735b6e9b0baSDominik Brodowski return -EFAULT; 1736b6e9b0baSDominik Brodowski } 1737b6e9b0baSDominik Brodowski return kernel_migrate_pages(pid, nr_bits + 1, old, new); 1738b6e9b0baSDominik Brodowski } 1739b6e9b0baSDominik Brodowski 1740b6e9b0baSDominik Brodowski #endif /* CONFIG_COMPAT */ 17411da177e4SLinus Torvalds 174220ca87f2SLi Xinhai bool vma_migratable(struct vm_area_struct *vma) 174320ca87f2SLi Xinhai { 174420ca87f2SLi Xinhai if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 174520ca87f2SLi Xinhai return false; 174620ca87f2SLi Xinhai 174720ca87f2SLi Xinhai /* 174820ca87f2SLi Xinhai * DAX device mappings require predictable access latency, so avoid 174920ca87f2SLi Xinhai * incurring periodic faults. 175020ca87f2SLi Xinhai */ 175120ca87f2SLi Xinhai if (vma_is_dax(vma)) 175220ca87f2SLi Xinhai return false; 175320ca87f2SLi Xinhai 175420ca87f2SLi Xinhai if (is_vm_hugetlb_page(vma) && 175520ca87f2SLi Xinhai !hugepage_migration_supported(hstate_vma(vma))) 175620ca87f2SLi Xinhai return false; 175720ca87f2SLi Xinhai 175820ca87f2SLi Xinhai /* 175920ca87f2SLi Xinhai * Migration allocates pages in the highest zone. If we cannot 176020ca87f2SLi Xinhai * do so then migration (at least from node to node) is not 176120ca87f2SLi Xinhai * possible. 176220ca87f2SLi Xinhai */ 176320ca87f2SLi Xinhai if (vma->vm_file && 176420ca87f2SLi Xinhai gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) 176520ca87f2SLi Xinhai < policy_zone) 176620ca87f2SLi Xinhai return false; 176720ca87f2SLi Xinhai return true; 176820ca87f2SLi Xinhai } 176920ca87f2SLi Xinhai 177074d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 177174d2c3a0SOleg Nesterov unsigned long addr) 17721da177e4SLinus Torvalds { 17738d90274bSOleg Nesterov struct mempolicy *pol = NULL; 17741da177e4SLinus Torvalds 17751da177e4SLinus Torvalds if (vma) { 1776480eccf9SLee Schermerhorn if (vma->vm_ops && vma->vm_ops->get_policy) { 17778d90274bSOleg Nesterov pol = vma->vm_ops->get_policy(vma, addr); 177800442ad0SMel Gorman } else if (vma->vm_policy) { 17791da177e4SLinus Torvalds pol = vma->vm_policy; 178000442ad0SMel Gorman 178100442ad0SMel Gorman /* 178200442ad0SMel Gorman * shmem_alloc_page() passes MPOL_F_SHARED policy with 178300442ad0SMel Gorman * a pseudo vma whose vma->vm_ops=NULL. Take a reference 178400442ad0SMel Gorman * count on these policies which will be dropped by 178500442ad0SMel Gorman * mpol_cond_put() later 178600442ad0SMel Gorman */ 178700442ad0SMel Gorman if (mpol_needs_cond_ref(pol)) 178800442ad0SMel Gorman mpol_get(pol); 178900442ad0SMel Gorman } 17901da177e4SLinus Torvalds } 1791f15ca78eSOleg Nesterov 179274d2c3a0SOleg Nesterov return pol; 179374d2c3a0SOleg Nesterov } 179474d2c3a0SOleg Nesterov 179574d2c3a0SOleg Nesterov /* 1796dd6eecb9SOleg Nesterov * get_vma_policy(@vma, @addr) 179774d2c3a0SOleg Nesterov * @vma: virtual memory area whose policy is sought 179874d2c3a0SOleg Nesterov * @addr: address in @vma for shared policy lookup 179974d2c3a0SOleg Nesterov * 180074d2c3a0SOleg Nesterov * Returns effective policy for a VMA at specified address. 1801dd6eecb9SOleg Nesterov * Falls back to current->mempolicy or system default policy, as necessary. 180274d2c3a0SOleg Nesterov * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 180374d2c3a0SOleg Nesterov * count--added by the get_policy() vm_op, as appropriate--to protect against 180474d2c3a0SOleg Nesterov * freeing by another task. It is the caller's responsibility to free the 180574d2c3a0SOleg Nesterov * extra reference for shared policies. 180674d2c3a0SOleg Nesterov */ 1807ac79f78dSDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1808dd6eecb9SOleg Nesterov unsigned long addr) 180974d2c3a0SOleg Nesterov { 181074d2c3a0SOleg Nesterov struct mempolicy *pol = __get_vma_policy(vma, addr); 181174d2c3a0SOleg Nesterov 18128d90274bSOleg Nesterov if (!pol) 1813dd6eecb9SOleg Nesterov pol = get_task_policy(current); 18148d90274bSOleg Nesterov 18151da177e4SLinus Torvalds return pol; 18161da177e4SLinus Torvalds } 18171da177e4SLinus Torvalds 18186b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma) 1819fc314724SMel Gorman { 18206b6482bbSOleg Nesterov struct mempolicy *pol; 1821f15ca78eSOleg Nesterov 1822fc314724SMel Gorman if (vma->vm_ops && vma->vm_ops->get_policy) { 1823fc314724SMel Gorman bool ret = false; 1824fc314724SMel Gorman 1825fc314724SMel Gorman pol = vma->vm_ops->get_policy(vma, vma->vm_start); 1826fc314724SMel Gorman if (pol && (pol->flags & MPOL_F_MOF)) 1827fc314724SMel Gorman ret = true; 1828fc314724SMel Gorman mpol_cond_put(pol); 1829fc314724SMel Gorman 1830fc314724SMel Gorman return ret; 18318d90274bSOleg Nesterov } 18328d90274bSOleg Nesterov 1833fc314724SMel Gorman pol = vma->vm_policy; 18348d90274bSOleg Nesterov if (!pol) 18356b6482bbSOleg Nesterov pol = get_task_policy(current); 1836fc314724SMel Gorman 1837fc314724SMel Gorman return pol->flags & MPOL_F_MOF; 1838fc314724SMel Gorman } 1839fc314724SMel Gorman 1840d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1841d3eb1570SLai Jiangshan { 1842d3eb1570SLai Jiangshan enum zone_type dynamic_policy_zone = policy_zone; 1843d3eb1570SLai Jiangshan 1844d3eb1570SLai Jiangshan BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1845d3eb1570SLai Jiangshan 1846d3eb1570SLai Jiangshan /* 1847d3eb1570SLai Jiangshan * if policy->v.nodes has movable memory only, 1848d3eb1570SLai Jiangshan * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1849d3eb1570SLai Jiangshan * 1850d3eb1570SLai Jiangshan * policy->v.nodes is intersect with node_states[N_MEMORY]. 1851f0953a1bSIngo Molnar * so if the following test fails, it implies 1852d3eb1570SLai Jiangshan * policy->v.nodes has movable memory only. 1853d3eb1570SLai Jiangshan */ 1854d3eb1570SLai Jiangshan if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY])) 1855d3eb1570SLai Jiangshan dynamic_policy_zone = ZONE_MOVABLE; 1856d3eb1570SLai Jiangshan 1857d3eb1570SLai Jiangshan return zone >= dynamic_policy_zone; 1858d3eb1570SLai Jiangshan } 1859d3eb1570SLai Jiangshan 186052cd3b07SLee Schermerhorn /* 186152cd3b07SLee Schermerhorn * Return a nodemask representing a mempolicy for filtering nodes for 186252cd3b07SLee Schermerhorn * page allocation 186352cd3b07SLee Schermerhorn */ 18648ca39e68SMuchun Song nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 186519770b32SMel Gorman { 186619770b32SMel Gorman /* Lower zones don't get a nodemask applied for MPOL_BIND */ 186745c4745aSLee Schermerhorn if (unlikely(policy->mode == MPOL_BIND) && 1868d3eb1570SLai Jiangshan apply_policy_zone(policy, gfp_zone(gfp)) && 186919770b32SMel Gorman cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) 187019770b32SMel Gorman return &policy->v.nodes; 187119770b32SMel Gorman 187219770b32SMel Gorman return NULL; 187319770b32SMel Gorman } 187419770b32SMel Gorman 187504ec6264SVlastimil Babka /* Return the node id preferred by the given mempolicy, or the given id */ 1876f8fd5253SWei Yang static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd) 18771da177e4SLinus Torvalds { 1878*7858d7bcSFeng Tang if (policy->mode == MPOL_PREFERRED) { 18791da177e4SLinus Torvalds nd = policy->v.preferred_node; 1880*7858d7bcSFeng Tang } else { 188119770b32SMel Gorman /* 18826d840958SMichal Hocko * __GFP_THISNODE shouldn't even be used with the bind policy 18836d840958SMichal Hocko * because we might easily break the expectation to stay on the 18846d840958SMichal Hocko * requested node and not break the policy. 188519770b32SMel Gorman */ 18866d840958SMichal Hocko WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); 18871da177e4SLinus Torvalds } 18886d840958SMichal Hocko 188904ec6264SVlastimil Babka return nd; 18901da177e4SLinus Torvalds } 18911da177e4SLinus Torvalds 18921da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 18931da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 18941da177e4SLinus Torvalds { 189545816682SVlastimil Babka unsigned next; 18961da177e4SLinus Torvalds struct task_struct *me = current; 18971da177e4SLinus Torvalds 189845816682SVlastimil Babka next = next_node_in(me->il_prev, policy->v.nodes); 1899f5b087b5SDavid Rientjes if (next < MAX_NUMNODES) 190045816682SVlastimil Babka me->il_prev = next; 190145816682SVlastimil Babka return next; 19021da177e4SLinus Torvalds } 19031da177e4SLinus Torvalds 1904dc85da15SChristoph Lameter /* 1905dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1906dc85da15SChristoph Lameter * next slab entry. 1907dc85da15SChristoph Lameter */ 19082a389610SDavid Rientjes unsigned int mempolicy_slab_node(void) 1909dc85da15SChristoph Lameter { 1910e7b691b0SAndi Kleen struct mempolicy *policy; 19112a389610SDavid Rientjes int node = numa_mem_id(); 1912e7b691b0SAndi Kleen 1913e7b691b0SAndi Kleen if (in_interrupt()) 19142a389610SDavid Rientjes return node; 1915e7b691b0SAndi Kleen 1916e7b691b0SAndi Kleen policy = current->mempolicy; 1917*7858d7bcSFeng Tang if (!policy) 19182a389610SDavid Rientjes return node; 1919765c4507SChristoph Lameter 1920bea904d5SLee Schermerhorn switch (policy->mode) { 1921bea904d5SLee Schermerhorn case MPOL_PREFERRED: 1922bea904d5SLee Schermerhorn return policy->v.preferred_node; 1923bea904d5SLee Schermerhorn 1924dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1925dc85da15SChristoph Lameter return interleave_nodes(policy); 1926dc85da15SChristoph Lameter 1927dd1a239fSMel Gorman case MPOL_BIND: { 1928c33d6c06SMel Gorman struct zoneref *z; 1929c33d6c06SMel Gorman 1930dc85da15SChristoph Lameter /* 1931dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1932dc85da15SChristoph Lameter * first node. 1933dc85da15SChristoph Lameter */ 193419770b32SMel Gorman struct zonelist *zonelist; 193519770b32SMel Gorman enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 1936c9634cf0SAneesh Kumar K.V zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; 1937c33d6c06SMel Gorman z = first_zones_zonelist(zonelist, highest_zoneidx, 1938c33d6c06SMel Gorman &policy->v.nodes); 1939c1093b74SPavel Tatashin return z->zone ? zone_to_nid(z->zone) : node; 1940dd1a239fSMel Gorman } 1941*7858d7bcSFeng Tang case MPOL_LOCAL: 1942*7858d7bcSFeng Tang return node; 1943dc85da15SChristoph Lameter 1944dc85da15SChristoph Lameter default: 1945bea904d5SLee Schermerhorn BUG(); 1946dc85da15SChristoph Lameter } 1947dc85da15SChristoph Lameter } 1948dc85da15SChristoph Lameter 1949fee83b3aSAndrew Morton /* 1950fee83b3aSAndrew Morton * Do static interleaving for a VMA with known offset @n. Returns the n'th 1951fee83b3aSAndrew Morton * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the 1952fee83b3aSAndrew Morton * number of present nodes. 1953fee83b3aSAndrew Morton */ 195498c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) 19551da177e4SLinus Torvalds { 1956dfcd3c0dSAndi Kleen unsigned nnodes = nodes_weight(pol->v.nodes); 1957f5b087b5SDavid Rientjes unsigned target; 1958fee83b3aSAndrew Morton int i; 1959fee83b3aSAndrew Morton int nid; 19601da177e4SLinus Torvalds 1961f5b087b5SDavid Rientjes if (!nnodes) 1962f5b087b5SDavid Rientjes return numa_node_id(); 1963fee83b3aSAndrew Morton target = (unsigned int)n % nnodes; 1964fee83b3aSAndrew Morton nid = first_node(pol->v.nodes); 1965fee83b3aSAndrew Morton for (i = 0; i < target; i++) 1966dfcd3c0dSAndi Kleen nid = next_node(nid, pol->v.nodes); 19671da177e4SLinus Torvalds return nid; 19681da177e4SLinus Torvalds } 19691da177e4SLinus Torvalds 19705da7ca86SChristoph Lameter /* Determine a node number for interleave */ 19715da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 19725da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 19735da7ca86SChristoph Lameter { 19745da7ca86SChristoph Lameter if (vma) { 19755da7ca86SChristoph Lameter unsigned long off; 19765da7ca86SChristoph Lameter 19773b98b087SNishanth Aravamudan /* 19783b98b087SNishanth Aravamudan * for small pages, there is no difference between 19793b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 19803b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 19813b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 19823b98b087SNishanth Aravamudan * a useful offset. 19833b98b087SNishanth Aravamudan */ 19843b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 19853b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 19865da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 198798c70baaSLaurent Dufour return offset_il_node(pol, off); 19885da7ca86SChristoph Lameter } else 19895da7ca86SChristoph Lameter return interleave_nodes(pol); 19905da7ca86SChristoph Lameter } 19915da7ca86SChristoph Lameter 199200ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 1993480eccf9SLee Schermerhorn /* 199404ec6264SVlastimil Babka * huge_node(@vma, @addr, @gfp_flags, @mpol) 1995b46e14acSFabian Frederick * @vma: virtual memory area whose policy is sought 1996b46e14acSFabian Frederick * @addr: address in @vma for shared policy lookup and interleave policy 1997b46e14acSFabian Frederick * @gfp_flags: for requested zone 1998b46e14acSFabian Frederick * @mpol: pointer to mempolicy pointer for reference counted mempolicy 1999b46e14acSFabian Frederick * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask 2000480eccf9SLee Schermerhorn * 200104ec6264SVlastimil Babka * Returns a nid suitable for a huge page allocation and a pointer 200252cd3b07SLee Schermerhorn * to the struct mempolicy for conditional unref after allocation. 200352cd3b07SLee Schermerhorn * If the effective policy is 'BIND, returns a pointer to the mempolicy's 200452cd3b07SLee Schermerhorn * @nodemask for filtering the zonelist. 2005c0ff7453SMiao Xie * 2006d26914d1SMel Gorman * Must be protected by read_mems_allowed_begin() 2007480eccf9SLee Schermerhorn */ 200804ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, 200904ec6264SVlastimil Babka struct mempolicy **mpol, nodemask_t **nodemask) 20105da7ca86SChristoph Lameter { 201104ec6264SVlastimil Babka int nid; 20125da7ca86SChristoph Lameter 2013dd6eecb9SOleg Nesterov *mpol = get_vma_policy(vma, addr); 201419770b32SMel Gorman *nodemask = NULL; /* assume !MPOL_BIND */ 20155da7ca86SChristoph Lameter 201652cd3b07SLee Schermerhorn if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { 201704ec6264SVlastimil Babka nid = interleave_nid(*mpol, vma, addr, 201804ec6264SVlastimil Babka huge_page_shift(hstate_vma(vma))); 201952cd3b07SLee Schermerhorn } else { 202004ec6264SVlastimil Babka nid = policy_node(gfp_flags, *mpol, numa_node_id()); 202152cd3b07SLee Schermerhorn if ((*mpol)->mode == MPOL_BIND) 202252cd3b07SLee Schermerhorn *nodemask = &(*mpol)->v.nodes; 2023480eccf9SLee Schermerhorn } 202404ec6264SVlastimil Babka return nid; 20255da7ca86SChristoph Lameter } 202606808b08SLee Schermerhorn 202706808b08SLee Schermerhorn /* 202806808b08SLee Schermerhorn * init_nodemask_of_mempolicy 202906808b08SLee Schermerhorn * 203006808b08SLee Schermerhorn * If the current task's mempolicy is "default" [NULL], return 'false' 203106808b08SLee Schermerhorn * to indicate default policy. Otherwise, extract the policy nodemask 203206808b08SLee Schermerhorn * for 'bind' or 'interleave' policy into the argument nodemask, or 203306808b08SLee Schermerhorn * initialize the argument nodemask to contain the single node for 203406808b08SLee Schermerhorn * 'preferred' or 'local' policy and return 'true' to indicate presence 203506808b08SLee Schermerhorn * of non-default mempolicy. 203606808b08SLee Schermerhorn * 203706808b08SLee Schermerhorn * We don't bother with reference counting the mempolicy [mpol_get/put] 203806808b08SLee Schermerhorn * because the current task is examining it's own mempolicy and a task's 203906808b08SLee Schermerhorn * mempolicy is only ever changed by the task itself. 204006808b08SLee Schermerhorn * 204106808b08SLee Schermerhorn * N.B., it is the caller's responsibility to free a returned nodemask. 204206808b08SLee Schermerhorn */ 204306808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask) 204406808b08SLee Schermerhorn { 204506808b08SLee Schermerhorn struct mempolicy *mempolicy; 204606808b08SLee Schermerhorn int nid; 204706808b08SLee Schermerhorn 204806808b08SLee Schermerhorn if (!(mask && current->mempolicy)) 204906808b08SLee Schermerhorn return false; 205006808b08SLee Schermerhorn 2051c0ff7453SMiao Xie task_lock(current); 205206808b08SLee Schermerhorn mempolicy = current->mempolicy; 205306808b08SLee Schermerhorn switch (mempolicy->mode) { 205406808b08SLee Schermerhorn case MPOL_PREFERRED: 205506808b08SLee Schermerhorn nid = mempolicy->v.preferred_node; 205606808b08SLee Schermerhorn init_nodemask_of_node(mask, nid); 205706808b08SLee Schermerhorn break; 205806808b08SLee Schermerhorn 205906808b08SLee Schermerhorn case MPOL_BIND: 206006808b08SLee Schermerhorn case MPOL_INTERLEAVE: 206106808b08SLee Schermerhorn *mask = mempolicy->v.nodes; 206206808b08SLee Schermerhorn break; 206306808b08SLee Schermerhorn 2064*7858d7bcSFeng Tang case MPOL_LOCAL: 2065*7858d7bcSFeng Tang nid = numa_node_id(); 2066*7858d7bcSFeng Tang init_nodemask_of_node(mask, nid); 2067*7858d7bcSFeng Tang break; 2068*7858d7bcSFeng Tang 206906808b08SLee Schermerhorn default: 207006808b08SLee Schermerhorn BUG(); 207106808b08SLee Schermerhorn } 2072c0ff7453SMiao Xie task_unlock(current); 207306808b08SLee Schermerhorn 207406808b08SLee Schermerhorn return true; 207506808b08SLee Schermerhorn } 207600ac59adSChen, Kenneth W #endif 20775da7ca86SChristoph Lameter 20786f48d0ebSDavid Rientjes /* 2079b26e517aSFeng Tang * mempolicy_in_oom_domain 20806f48d0ebSDavid Rientjes * 2081b26e517aSFeng Tang * If tsk's mempolicy is "bind", check for intersection between mask and 2082b26e517aSFeng Tang * the policy nodemask. Otherwise, return true for all other policies 2083b26e517aSFeng Tang * including "interleave", as a tsk with "interleave" policy may have 2084b26e517aSFeng Tang * memory allocated from all nodes in system. 20856f48d0ebSDavid Rientjes * 20866f48d0ebSDavid Rientjes * Takes task_lock(tsk) to prevent freeing of its mempolicy. 20876f48d0ebSDavid Rientjes */ 2088b26e517aSFeng Tang bool mempolicy_in_oom_domain(struct task_struct *tsk, 20896f48d0ebSDavid Rientjes const nodemask_t *mask) 20906f48d0ebSDavid Rientjes { 20916f48d0ebSDavid Rientjes struct mempolicy *mempolicy; 20926f48d0ebSDavid Rientjes bool ret = true; 20936f48d0ebSDavid Rientjes 20946f48d0ebSDavid Rientjes if (!mask) 20956f48d0ebSDavid Rientjes return ret; 2096b26e517aSFeng Tang 20976f48d0ebSDavid Rientjes task_lock(tsk); 20986f48d0ebSDavid Rientjes mempolicy = tsk->mempolicy; 2099b26e517aSFeng Tang if (mempolicy && mempolicy->mode == MPOL_BIND) 21006f48d0ebSDavid Rientjes ret = nodes_intersects(mempolicy->v.nodes, *mask); 21016f48d0ebSDavid Rientjes task_unlock(tsk); 2102b26e517aSFeng Tang 21036f48d0ebSDavid Rientjes return ret; 21046f48d0ebSDavid Rientjes } 21056f48d0ebSDavid Rientjes 21061da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 21071da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 2108662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 2109662f3a0bSAndi Kleen unsigned nid) 21101da177e4SLinus Torvalds { 21111da177e4SLinus Torvalds struct page *page; 21121da177e4SLinus Torvalds 211384172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp, order, nid, NULL); 21144518085eSKemi Wang /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ 21154518085eSKemi Wang if (!static_branch_likely(&vm_numa_stat_key)) 21164518085eSKemi Wang return page; 2117de55c8b2SAndrey Ryabinin if (page && page_to_nid(page) == nid) { 2118de55c8b2SAndrey Ryabinin preempt_disable(); 2119f19298b9SMel Gorman __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT); 2120de55c8b2SAndrey Ryabinin preempt_enable(); 2121de55c8b2SAndrey Ryabinin } 21221da177e4SLinus Torvalds return page; 21231da177e4SLinus Torvalds } 21241da177e4SLinus Torvalds 21251da177e4SLinus Torvalds /** 21260bbbc0b3SAndrea Arcangeli * alloc_pages_vma - Allocate a page for a VMA. 2127eb350739SMatthew Wilcox (Oracle) * @gfp: GFP flags. 21280bbbc0b3SAndrea Arcangeli * @order: Order of the GFP allocation. 21291da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 2130eb350739SMatthew Wilcox (Oracle) * @addr: Virtual address of the allocation. Must be inside @vma. 2131be97a41bSVlastimil Babka * @node: Which node to prefer for allocation (modulo policy). 2132eb350739SMatthew Wilcox (Oracle) * @hugepage: For hugepages try only the preferred node if possible. 21331da177e4SLinus Torvalds * 2134eb350739SMatthew Wilcox (Oracle) * Allocate a page for a specific address in @vma, using the appropriate 2135eb350739SMatthew Wilcox (Oracle) * NUMA policy. When @vma is not NULL the caller must hold the mmap_lock 2136eb350739SMatthew Wilcox (Oracle) * of the mm_struct of the VMA to prevent it from going away. Should be 2137eb350739SMatthew Wilcox (Oracle) * used for all allocations for pages that will be mapped into user space. 2138eb350739SMatthew Wilcox (Oracle) * 2139eb350739SMatthew Wilcox (Oracle) * Return: The page on success or NULL if allocation fails. 21401da177e4SLinus Torvalds */ 2141eb350739SMatthew Wilcox (Oracle) struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 214219deb769SDavid Rientjes unsigned long addr, int node, bool hugepage) 21431da177e4SLinus Torvalds { 2144cc9a6c87SMel Gorman struct mempolicy *pol; 2145c0ff7453SMiao Xie struct page *page; 214604ec6264SVlastimil Babka int preferred_nid; 2147be97a41bSVlastimil Babka nodemask_t *nmask; 21481da177e4SLinus Torvalds 2149dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2150cc9a6c87SMel Gorman 2151be97a41bSVlastimil Babka if (pol->mode == MPOL_INTERLEAVE) { 21521da177e4SLinus Torvalds unsigned nid; 21535da7ca86SChristoph Lameter 21548eac563cSAndi Kleen nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 215552cd3b07SLee Schermerhorn mpol_cond_put(pol); 21560bbbc0b3SAndrea Arcangeli page = alloc_page_interleave(gfp, order, nid); 2157be97a41bSVlastimil Babka goto out; 21581da177e4SLinus Torvalds } 21591da177e4SLinus Torvalds 216019deb769SDavid Rientjes if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { 216119deb769SDavid Rientjes int hpage_node = node; 216219deb769SDavid Rientjes 216319deb769SDavid Rientjes /* 216419deb769SDavid Rientjes * For hugepage allocation and non-interleave policy which 216519deb769SDavid Rientjes * allows the current node (or other explicitly preferred 216619deb769SDavid Rientjes * node) we only try to allocate from the current/preferred 216719deb769SDavid Rientjes * node and don't fall back to other nodes, as the cost of 216819deb769SDavid Rientjes * remote accesses would likely offset THP benefits. 216919deb769SDavid Rientjes * 217019deb769SDavid Rientjes * If the policy is interleave, or does not allow the current 217119deb769SDavid Rientjes * node in its nodemask, we allocate the standard way. 217219deb769SDavid Rientjes */ 2173*7858d7bcSFeng Tang if (pol->mode == MPOL_PREFERRED) 217419deb769SDavid Rientjes hpage_node = pol->v.preferred_node; 217519deb769SDavid Rientjes 217619deb769SDavid Rientjes nmask = policy_nodemask(gfp, pol); 217719deb769SDavid Rientjes if (!nmask || node_isset(hpage_node, *nmask)) { 217819deb769SDavid Rientjes mpol_cond_put(pol); 2179cc638f32SVlastimil Babka /* 2180cc638f32SVlastimil Babka * First, try to allocate THP only on local node, but 2181cc638f32SVlastimil Babka * don't reclaim unnecessarily, just compact. 2182cc638f32SVlastimil Babka */ 218319deb769SDavid Rientjes page = __alloc_pages_node(hpage_node, 2184cc638f32SVlastimil Babka gfp | __GFP_THISNODE | __GFP_NORETRY, order); 218576e654ccSDavid Rientjes 218676e654ccSDavid Rientjes /* 218776e654ccSDavid Rientjes * If hugepage allocations are configured to always 218876e654ccSDavid Rientjes * synchronous compact or the vma has been madvised 218976e654ccSDavid Rientjes * to prefer hugepage backing, retry allowing remote 2190cc638f32SVlastimil Babka * memory with both reclaim and compact as well. 219176e654ccSDavid Rientjes */ 219276e654ccSDavid Rientjes if (!page && (gfp & __GFP_DIRECT_RECLAIM)) 219376e654ccSDavid Rientjes page = __alloc_pages_node(hpage_node, 2194cc638f32SVlastimil Babka gfp, order); 219576e654ccSDavid Rientjes 219619deb769SDavid Rientjes goto out; 219719deb769SDavid Rientjes } 219819deb769SDavid Rientjes } 219919deb769SDavid Rientjes 2200077fcf11SAneesh Kumar K.V nmask = policy_nodemask(gfp, pol); 220104ec6264SVlastimil Babka preferred_nid = policy_node(gfp, pol, node); 220284172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp, order, preferred_nid, nmask); 2203d51e9894SVlastimil Babka mpol_cond_put(pol); 2204be97a41bSVlastimil Babka out: 2205077fcf11SAneesh Kumar K.V return page; 2206077fcf11SAneesh Kumar K.V } 220769262215SChristoph Hellwig EXPORT_SYMBOL(alloc_pages_vma); 2208077fcf11SAneesh Kumar K.V 22091da177e4SLinus Torvalds /** 2210d7f946d0SMatthew Wilcox (Oracle) * alloc_pages - Allocate pages. 22116421ec76SMatthew Wilcox (Oracle) * @gfp: GFP flags. 22126421ec76SMatthew Wilcox (Oracle) * @order: Power of two of number of pages to allocate. 22131da177e4SLinus Torvalds * 22146421ec76SMatthew Wilcox (Oracle) * Allocate 1 << @order contiguous pages. The physical address of the 22156421ec76SMatthew Wilcox (Oracle) * first page is naturally aligned (eg an order-3 allocation will be aligned 22166421ec76SMatthew Wilcox (Oracle) * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current 22176421ec76SMatthew Wilcox (Oracle) * process is honoured when in process context. 22181da177e4SLinus Torvalds * 22196421ec76SMatthew Wilcox (Oracle) * Context: Can be called from any context, providing the appropriate GFP 22206421ec76SMatthew Wilcox (Oracle) * flags are used. 22216421ec76SMatthew Wilcox (Oracle) * Return: The page on success or NULL if allocation fails. 22221da177e4SLinus Torvalds */ 2223d7f946d0SMatthew Wilcox (Oracle) struct page *alloc_pages(gfp_t gfp, unsigned order) 22241da177e4SLinus Torvalds { 22258d90274bSOleg Nesterov struct mempolicy *pol = &default_policy; 2226c0ff7453SMiao Xie struct page *page; 22271da177e4SLinus Torvalds 22288d90274bSOleg Nesterov if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 22298d90274bSOleg Nesterov pol = get_task_policy(current); 223052cd3b07SLee Schermerhorn 223152cd3b07SLee Schermerhorn /* 223252cd3b07SLee Schermerhorn * No reference counting needed for current->mempolicy 223352cd3b07SLee Schermerhorn * nor system default_policy 223452cd3b07SLee Schermerhorn */ 223545c4745aSLee Schermerhorn if (pol->mode == MPOL_INTERLEAVE) 2236c0ff7453SMiao Xie page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 2237c0ff7453SMiao Xie else 223884172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp, order, 223904ec6264SVlastimil Babka policy_node(gfp, pol, numa_node_id()), 22405c4b4be3SAndi Kleen policy_nodemask(gfp, pol)); 2241cc9a6c87SMel Gorman 2242c0ff7453SMiao Xie return page; 22431da177e4SLinus Torvalds } 2244d7f946d0SMatthew Wilcox (Oracle) EXPORT_SYMBOL(alloc_pages); 22451da177e4SLinus Torvalds 2246ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2247ef0855d3SOleg Nesterov { 2248ef0855d3SOleg Nesterov struct mempolicy *pol = mpol_dup(vma_policy(src)); 2249ef0855d3SOleg Nesterov 2250ef0855d3SOleg Nesterov if (IS_ERR(pol)) 2251ef0855d3SOleg Nesterov return PTR_ERR(pol); 2252ef0855d3SOleg Nesterov dst->vm_policy = pol; 2253ef0855d3SOleg Nesterov return 0; 2254ef0855d3SOleg Nesterov } 2255ef0855d3SOleg Nesterov 22564225399aSPaul Jackson /* 2257846a16bfSLee Schermerhorn * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 22584225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 22594225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 22604225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 22614225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 2262708c1bbcSMiao Xie * 2263708c1bbcSMiao Xie * current's mempolicy may be rebinded by the other task(the task that changes 2264708c1bbcSMiao Xie * cpuset's mems), so we needn't do rebind work for current task. 22654225399aSPaul Jackson */ 22664225399aSPaul Jackson 2267846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */ 2268846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old) 22691da177e4SLinus Torvalds { 22701da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 22711da177e4SLinus Torvalds 22721da177e4SLinus Torvalds if (!new) 22731da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2274708c1bbcSMiao Xie 2275708c1bbcSMiao Xie /* task's mempolicy is protected by alloc_lock */ 2276708c1bbcSMiao Xie if (old == current->mempolicy) { 2277708c1bbcSMiao Xie task_lock(current); 2278708c1bbcSMiao Xie *new = *old; 2279708c1bbcSMiao Xie task_unlock(current); 2280708c1bbcSMiao Xie } else 2281708c1bbcSMiao Xie *new = *old; 2282708c1bbcSMiao Xie 22834225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 22844225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 2285213980c0SVlastimil Babka mpol_rebind_policy(new, &mems); 22864225399aSPaul Jackson } 22871da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 22881da177e4SLinus Torvalds return new; 22891da177e4SLinus Torvalds } 22901da177e4SLinus Torvalds 22911da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 2292fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 22931da177e4SLinus Torvalds { 22941da177e4SLinus Torvalds if (!a || !b) 2295fcfb4dccSKOSAKI Motohiro return false; 229645c4745aSLee Schermerhorn if (a->mode != b->mode) 2297fcfb4dccSKOSAKI Motohiro return false; 229819800502SBob Liu if (a->flags != b->flags) 2299fcfb4dccSKOSAKI Motohiro return false; 230019800502SBob Liu if (mpol_store_user_nodemask(a)) 230119800502SBob Liu if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2302fcfb4dccSKOSAKI Motohiro return false; 230319800502SBob Liu 230445c4745aSLee Schermerhorn switch (a->mode) { 230519770b32SMel Gorman case MPOL_BIND: 23061da177e4SLinus Torvalds case MPOL_INTERLEAVE: 2307fcfb4dccSKOSAKI Motohiro return !!nodes_equal(a->v.nodes, b->v.nodes); 23081da177e4SLinus Torvalds case MPOL_PREFERRED: 230975719661SNamhyung Kim return a->v.preferred_node == b->v.preferred_node; 2310*7858d7bcSFeng Tang case MPOL_LOCAL: 2311*7858d7bcSFeng Tang return true; 23121da177e4SLinus Torvalds default: 23131da177e4SLinus Torvalds BUG(); 2314fcfb4dccSKOSAKI Motohiro return false; 23151da177e4SLinus Torvalds } 23161da177e4SLinus Torvalds } 23171da177e4SLinus Torvalds 23181da177e4SLinus Torvalds /* 23191da177e4SLinus Torvalds * Shared memory backing store policy support. 23201da177e4SLinus Torvalds * 23211da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 23221da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 23234a8c7bb5SNathan Zimmer * They are protected by the sp->lock rwlock, which should be held 23241da177e4SLinus Torvalds * for any accesses to the tree. 23251da177e4SLinus Torvalds */ 23261da177e4SLinus Torvalds 23274a8c7bb5SNathan Zimmer /* 23284a8c7bb5SNathan Zimmer * lookup first element intersecting start-end. Caller holds sp->lock for 23294a8c7bb5SNathan Zimmer * reading or for writing 23304a8c7bb5SNathan Zimmer */ 23311da177e4SLinus Torvalds static struct sp_node * 23321da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 23331da177e4SLinus Torvalds { 23341da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 23351da177e4SLinus Torvalds 23361da177e4SLinus Torvalds while (n) { 23371da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 23381da177e4SLinus Torvalds 23391da177e4SLinus Torvalds if (start >= p->end) 23401da177e4SLinus Torvalds n = n->rb_right; 23411da177e4SLinus Torvalds else if (end <= p->start) 23421da177e4SLinus Torvalds n = n->rb_left; 23431da177e4SLinus Torvalds else 23441da177e4SLinus Torvalds break; 23451da177e4SLinus Torvalds } 23461da177e4SLinus Torvalds if (!n) 23471da177e4SLinus Torvalds return NULL; 23481da177e4SLinus Torvalds for (;;) { 23491da177e4SLinus Torvalds struct sp_node *w = NULL; 23501da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 23511da177e4SLinus Torvalds if (!prev) 23521da177e4SLinus Torvalds break; 23531da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 23541da177e4SLinus Torvalds if (w->end <= start) 23551da177e4SLinus Torvalds break; 23561da177e4SLinus Torvalds n = prev; 23571da177e4SLinus Torvalds } 23581da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 23591da177e4SLinus Torvalds } 23601da177e4SLinus Torvalds 23614a8c7bb5SNathan Zimmer /* 23624a8c7bb5SNathan Zimmer * Insert a new shared policy into the list. Caller holds sp->lock for 23634a8c7bb5SNathan Zimmer * writing. 23644a8c7bb5SNathan Zimmer */ 23651da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 23661da177e4SLinus Torvalds { 23671da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 23681da177e4SLinus Torvalds struct rb_node *parent = NULL; 23691da177e4SLinus Torvalds struct sp_node *nd; 23701da177e4SLinus Torvalds 23711da177e4SLinus Torvalds while (*p) { 23721da177e4SLinus Torvalds parent = *p; 23731da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 23741da177e4SLinus Torvalds if (new->start < nd->start) 23751da177e4SLinus Torvalds p = &(*p)->rb_left; 23761da177e4SLinus Torvalds else if (new->end > nd->end) 23771da177e4SLinus Torvalds p = &(*p)->rb_right; 23781da177e4SLinus Torvalds else 23791da177e4SLinus Torvalds BUG(); 23801da177e4SLinus Torvalds } 23811da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 23821da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 2383140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 238445c4745aSLee Schermerhorn new->policy ? new->policy->mode : 0); 23851da177e4SLinus Torvalds } 23861da177e4SLinus Torvalds 23871da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 23881da177e4SLinus Torvalds struct mempolicy * 23891da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 23901da177e4SLinus Torvalds { 23911da177e4SLinus Torvalds struct mempolicy *pol = NULL; 23921da177e4SLinus Torvalds struct sp_node *sn; 23931da177e4SLinus Torvalds 23941da177e4SLinus Torvalds if (!sp->root.rb_node) 23951da177e4SLinus Torvalds return NULL; 23964a8c7bb5SNathan Zimmer read_lock(&sp->lock); 23971da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 23981da177e4SLinus Torvalds if (sn) { 23991da177e4SLinus Torvalds mpol_get(sn->policy); 24001da177e4SLinus Torvalds pol = sn->policy; 24011da177e4SLinus Torvalds } 24024a8c7bb5SNathan Zimmer read_unlock(&sp->lock); 24031da177e4SLinus Torvalds return pol; 24041da177e4SLinus Torvalds } 24051da177e4SLinus Torvalds 240663f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n) 240763f74ca2SKOSAKI Motohiro { 240863f74ca2SKOSAKI Motohiro mpol_put(n->policy); 240963f74ca2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 241063f74ca2SKOSAKI Motohiro } 241163f74ca2SKOSAKI Motohiro 2412771fb4d8SLee Schermerhorn /** 2413771fb4d8SLee Schermerhorn * mpol_misplaced - check whether current page node is valid in policy 2414771fb4d8SLee Schermerhorn * 2415b46e14acSFabian Frederick * @page: page to be checked 2416b46e14acSFabian Frederick * @vma: vm area where page mapped 2417b46e14acSFabian Frederick * @addr: virtual address where page mapped 2418771fb4d8SLee Schermerhorn * 2419771fb4d8SLee Schermerhorn * Lookup current policy node id for vma,addr and "compare to" page's 24205f076944SMatthew Wilcox (Oracle) * node id. Policy determination "mimics" alloc_page_vma(). 2421771fb4d8SLee Schermerhorn * Called from fault path where we know the vma and faulting address. 24225f076944SMatthew Wilcox (Oracle) * 24235f076944SMatthew Wilcox (Oracle) * Return: -1 if the page is in a node that is valid for this policy, or a 24245f076944SMatthew Wilcox (Oracle) * suitable node ID to allocate a replacement page from. 2425771fb4d8SLee Schermerhorn */ 2426771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2427771fb4d8SLee Schermerhorn { 2428771fb4d8SLee Schermerhorn struct mempolicy *pol; 2429c33d6c06SMel Gorman struct zoneref *z; 2430771fb4d8SLee Schermerhorn int curnid = page_to_nid(page); 2431771fb4d8SLee Schermerhorn unsigned long pgoff; 243290572890SPeter Zijlstra int thiscpu = raw_smp_processor_id(); 243390572890SPeter Zijlstra int thisnid = cpu_to_node(thiscpu); 243498fa15f3SAnshuman Khandual int polnid = NUMA_NO_NODE; 2435771fb4d8SLee Schermerhorn int ret = -1; 2436771fb4d8SLee Schermerhorn 2437dd6eecb9SOleg Nesterov pol = get_vma_policy(vma, addr); 2438771fb4d8SLee Schermerhorn if (!(pol->flags & MPOL_F_MOF)) 2439771fb4d8SLee Schermerhorn goto out; 2440771fb4d8SLee Schermerhorn 2441771fb4d8SLee Schermerhorn switch (pol->mode) { 2442771fb4d8SLee Schermerhorn case MPOL_INTERLEAVE: 2443771fb4d8SLee Schermerhorn pgoff = vma->vm_pgoff; 2444771fb4d8SLee Schermerhorn pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 244598c70baaSLaurent Dufour polnid = offset_il_node(pol, pgoff); 2446771fb4d8SLee Schermerhorn break; 2447771fb4d8SLee Schermerhorn 2448771fb4d8SLee Schermerhorn case MPOL_PREFERRED: 2449771fb4d8SLee Schermerhorn polnid = pol->v.preferred_node; 2450771fb4d8SLee Schermerhorn break; 2451771fb4d8SLee Schermerhorn 2452*7858d7bcSFeng Tang case MPOL_LOCAL: 2453*7858d7bcSFeng Tang polnid = numa_node_id(); 2454*7858d7bcSFeng Tang break; 2455*7858d7bcSFeng Tang 2456771fb4d8SLee Schermerhorn case MPOL_BIND: 2457bda420b9SHuang Ying /* Optimize placement among multiple nodes via NUMA balancing */ 2458bda420b9SHuang Ying if (pol->flags & MPOL_F_MORON) { 2459bda420b9SHuang Ying if (node_isset(thisnid, pol->v.nodes)) 2460bda420b9SHuang Ying break; 2461bda420b9SHuang Ying goto out; 2462bda420b9SHuang Ying } 2463c33d6c06SMel Gorman 2464771fb4d8SLee Schermerhorn /* 2465771fb4d8SLee Schermerhorn * allows binding to multiple nodes. 2466771fb4d8SLee Schermerhorn * use current page if in policy nodemask, 2467771fb4d8SLee Schermerhorn * else select nearest allowed node, if any. 2468771fb4d8SLee Schermerhorn * If no allowed nodes, use current [!misplaced]. 2469771fb4d8SLee Schermerhorn */ 2470771fb4d8SLee Schermerhorn if (node_isset(curnid, pol->v.nodes)) 2471771fb4d8SLee Schermerhorn goto out; 2472c33d6c06SMel Gorman z = first_zones_zonelist( 2473771fb4d8SLee Schermerhorn node_zonelist(numa_node_id(), GFP_HIGHUSER), 2474771fb4d8SLee Schermerhorn gfp_zone(GFP_HIGHUSER), 2475c33d6c06SMel Gorman &pol->v.nodes); 2476c1093b74SPavel Tatashin polnid = zone_to_nid(z->zone); 2477771fb4d8SLee Schermerhorn break; 2478771fb4d8SLee Schermerhorn 2479771fb4d8SLee Schermerhorn default: 2480771fb4d8SLee Schermerhorn BUG(); 2481771fb4d8SLee Schermerhorn } 24825606e387SMel Gorman 24835606e387SMel Gorman /* Migrate the page towards the node whose CPU is referencing it */ 2484e42c8ff2SMel Gorman if (pol->flags & MPOL_F_MORON) { 248590572890SPeter Zijlstra polnid = thisnid; 24865606e387SMel Gorman 248710f39042SRik van Riel if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) 2488de1c9ce6SRik van Riel goto out; 2489de1c9ce6SRik van Riel } 2490e42c8ff2SMel Gorman 2491771fb4d8SLee Schermerhorn if (curnid != polnid) 2492771fb4d8SLee Schermerhorn ret = polnid; 2493771fb4d8SLee Schermerhorn out: 2494771fb4d8SLee Schermerhorn mpol_cond_put(pol); 2495771fb4d8SLee Schermerhorn 2496771fb4d8SLee Schermerhorn return ret; 2497771fb4d8SLee Schermerhorn } 2498771fb4d8SLee Schermerhorn 2499c11600e4SDavid Rientjes /* 2500c11600e4SDavid Rientjes * Drop the (possibly final) reference to task->mempolicy. It needs to be 2501c11600e4SDavid Rientjes * dropped after task->mempolicy is set to NULL so that any allocation done as 2502c11600e4SDavid Rientjes * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed 2503c11600e4SDavid Rientjes * policy. 2504c11600e4SDavid Rientjes */ 2505c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task) 2506c11600e4SDavid Rientjes { 2507c11600e4SDavid Rientjes struct mempolicy *pol; 2508c11600e4SDavid Rientjes 2509c11600e4SDavid Rientjes task_lock(task); 2510c11600e4SDavid Rientjes pol = task->mempolicy; 2511c11600e4SDavid Rientjes task->mempolicy = NULL; 2512c11600e4SDavid Rientjes task_unlock(task); 2513c11600e4SDavid Rientjes mpol_put(pol); 2514c11600e4SDavid Rientjes } 2515c11600e4SDavid Rientjes 25161da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 25171da177e4SLinus Torvalds { 2518140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 25191da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 252063f74ca2SKOSAKI Motohiro sp_free(n); 25211da177e4SLinus Torvalds } 25221da177e4SLinus Torvalds 252342288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start, 252442288fe3SMel Gorman unsigned long end, struct mempolicy *pol) 252542288fe3SMel Gorman { 252642288fe3SMel Gorman node->start = start; 252742288fe3SMel Gorman node->end = end; 252842288fe3SMel Gorman node->policy = pol; 252942288fe3SMel Gorman } 253042288fe3SMel Gorman 2531dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2532dbcb0f19SAdrian Bunk struct mempolicy *pol) 25331da177e4SLinus Torvalds { 2534869833f2SKOSAKI Motohiro struct sp_node *n; 2535869833f2SKOSAKI Motohiro struct mempolicy *newpol; 25361da177e4SLinus Torvalds 2537869833f2SKOSAKI Motohiro n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 25381da177e4SLinus Torvalds if (!n) 25391da177e4SLinus Torvalds return NULL; 2540869833f2SKOSAKI Motohiro 2541869833f2SKOSAKI Motohiro newpol = mpol_dup(pol); 2542869833f2SKOSAKI Motohiro if (IS_ERR(newpol)) { 2543869833f2SKOSAKI Motohiro kmem_cache_free(sn_cache, n); 2544869833f2SKOSAKI Motohiro return NULL; 2545869833f2SKOSAKI Motohiro } 2546869833f2SKOSAKI Motohiro newpol->flags |= MPOL_F_SHARED; 254742288fe3SMel Gorman sp_node_init(n, start, end, newpol); 2548869833f2SKOSAKI Motohiro 25491da177e4SLinus Torvalds return n; 25501da177e4SLinus Torvalds } 25511da177e4SLinus Torvalds 25521da177e4SLinus Torvalds /* Replace a policy range. */ 25531da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 25541da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 25551da177e4SLinus Torvalds { 2556b22d127aSMel Gorman struct sp_node *n; 255742288fe3SMel Gorman struct sp_node *n_new = NULL; 255842288fe3SMel Gorman struct mempolicy *mpol_new = NULL; 2559b22d127aSMel Gorman int ret = 0; 25601da177e4SLinus Torvalds 256142288fe3SMel Gorman restart: 25624a8c7bb5SNathan Zimmer write_lock(&sp->lock); 25631da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 25641da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 25651da177e4SLinus Torvalds while (n && n->start < end) { 25661da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 25671da177e4SLinus Torvalds if (n->start >= start) { 25681da177e4SLinus Torvalds if (n->end <= end) 25691da177e4SLinus Torvalds sp_delete(sp, n); 25701da177e4SLinus Torvalds else 25711da177e4SLinus Torvalds n->start = end; 25721da177e4SLinus Torvalds } else { 25731da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 25741da177e4SLinus Torvalds if (n->end > end) { 257542288fe3SMel Gorman if (!n_new) 257642288fe3SMel Gorman goto alloc_new; 257742288fe3SMel Gorman 257842288fe3SMel Gorman *mpol_new = *n->policy; 257942288fe3SMel Gorman atomic_set(&mpol_new->refcnt, 1); 25807880639cSKOSAKI Motohiro sp_node_init(n_new, end, n->end, mpol_new); 25811da177e4SLinus Torvalds n->end = start; 25825ca39575SHillf Danton sp_insert(sp, n_new); 258342288fe3SMel Gorman n_new = NULL; 258442288fe3SMel Gorman mpol_new = NULL; 25851da177e4SLinus Torvalds break; 25861da177e4SLinus Torvalds } else 25871da177e4SLinus Torvalds n->end = start; 25881da177e4SLinus Torvalds } 25891da177e4SLinus Torvalds if (!next) 25901da177e4SLinus Torvalds break; 25911da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 25921da177e4SLinus Torvalds } 25931da177e4SLinus Torvalds if (new) 25941da177e4SLinus Torvalds sp_insert(sp, new); 25954a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 259642288fe3SMel Gorman ret = 0; 259742288fe3SMel Gorman 259842288fe3SMel Gorman err_out: 259942288fe3SMel Gorman if (mpol_new) 260042288fe3SMel Gorman mpol_put(mpol_new); 260142288fe3SMel Gorman if (n_new) 260242288fe3SMel Gorman kmem_cache_free(sn_cache, n_new); 260342288fe3SMel Gorman 2604b22d127aSMel Gorman return ret; 260542288fe3SMel Gorman 260642288fe3SMel Gorman alloc_new: 26074a8c7bb5SNathan Zimmer write_unlock(&sp->lock); 260842288fe3SMel Gorman ret = -ENOMEM; 260942288fe3SMel Gorman n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 261042288fe3SMel Gorman if (!n_new) 261142288fe3SMel Gorman goto err_out; 261242288fe3SMel Gorman mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 261342288fe3SMel Gorman if (!mpol_new) 261442288fe3SMel Gorman goto err_out; 261542288fe3SMel Gorman goto restart; 26161da177e4SLinus Torvalds } 26171da177e4SLinus Torvalds 261871fe804bSLee Schermerhorn /** 261971fe804bSLee Schermerhorn * mpol_shared_policy_init - initialize shared policy for inode 262071fe804bSLee Schermerhorn * @sp: pointer to inode shared policy 262171fe804bSLee Schermerhorn * @mpol: struct mempolicy to install 262271fe804bSLee Schermerhorn * 262371fe804bSLee Schermerhorn * Install non-NULL @mpol in inode's shared policy rb-tree. 262471fe804bSLee Schermerhorn * On entry, the current task has a reference on a non-NULL @mpol. 262571fe804bSLee Schermerhorn * This must be released on exit. 26264bfc4495SKAMEZAWA Hiroyuki * This is called at get_inode() calls and we can use GFP_KERNEL. 262771fe804bSLee Schermerhorn */ 262871fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 26297339ff83SRobin Holt { 263058568d2aSMiao Xie int ret; 263158568d2aSMiao Xie 263271fe804bSLee Schermerhorn sp->root = RB_ROOT; /* empty tree == default mempolicy */ 26334a8c7bb5SNathan Zimmer rwlock_init(&sp->lock); 26347339ff83SRobin Holt 263571fe804bSLee Schermerhorn if (mpol) { 26367339ff83SRobin Holt struct vm_area_struct pvma; 263771fe804bSLee Schermerhorn struct mempolicy *new; 26384bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 26397339ff83SRobin Holt 26404bfc4495SKAMEZAWA Hiroyuki if (!scratch) 26415c0c1654SLee Schermerhorn goto put_mpol; 264271fe804bSLee Schermerhorn /* contextualize the tmpfs mount point mempolicy */ 264371fe804bSLee Schermerhorn new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 264415d77835SLee Schermerhorn if (IS_ERR(new)) 26450cae3457SDan Carpenter goto free_scratch; /* no valid nodemask intersection */ 264658568d2aSMiao Xie 264758568d2aSMiao Xie task_lock(current); 26484bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 264958568d2aSMiao Xie task_unlock(current); 265015d77835SLee Schermerhorn if (ret) 26515c0c1654SLee Schermerhorn goto put_new; 265271fe804bSLee Schermerhorn 265371fe804bSLee Schermerhorn /* Create pseudo-vma that contains just the policy */ 26542c4541e2SKirill A. Shutemov vma_init(&pvma, NULL); 265571fe804bSLee Schermerhorn pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 265671fe804bSLee Schermerhorn mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 265715d77835SLee Schermerhorn 26585c0c1654SLee Schermerhorn put_new: 265971fe804bSLee Schermerhorn mpol_put(new); /* drop initial ref */ 26600cae3457SDan Carpenter free_scratch: 26614bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 26625c0c1654SLee Schermerhorn put_mpol: 26635c0c1654SLee Schermerhorn mpol_put(mpol); /* drop our incoming ref on sb mpol */ 26647339ff83SRobin Holt } 26657339ff83SRobin Holt } 26667339ff83SRobin Holt 26671da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 26681da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 26691da177e4SLinus Torvalds { 26701da177e4SLinus Torvalds int err; 26711da177e4SLinus Torvalds struct sp_node *new = NULL; 26721da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 26731da177e4SLinus Torvalds 2674028fec41SDavid Rientjes pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 26751da177e4SLinus Torvalds vma->vm_pgoff, 267645c4745aSLee Schermerhorn sz, npol ? npol->mode : -1, 2677028fec41SDavid Rientjes npol ? npol->flags : -1, 267800ef2d2fSDavid Rientjes npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE); 26791da177e4SLinus Torvalds 26801da177e4SLinus Torvalds if (npol) { 26811da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 26821da177e4SLinus Torvalds if (!new) 26831da177e4SLinus Torvalds return -ENOMEM; 26841da177e4SLinus Torvalds } 26851da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 26861da177e4SLinus Torvalds if (err && new) 268763f74ca2SKOSAKI Motohiro sp_free(new); 26881da177e4SLinus Torvalds return err; 26891da177e4SLinus Torvalds } 26901da177e4SLinus Torvalds 26911da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 26921da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 26931da177e4SLinus Torvalds { 26941da177e4SLinus Torvalds struct sp_node *n; 26951da177e4SLinus Torvalds struct rb_node *next; 26961da177e4SLinus Torvalds 26971da177e4SLinus Torvalds if (!p->root.rb_node) 26981da177e4SLinus Torvalds return; 26994a8c7bb5SNathan Zimmer write_lock(&p->lock); 27001da177e4SLinus Torvalds next = rb_first(&p->root); 27011da177e4SLinus Torvalds while (next) { 27021da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 27031da177e4SLinus Torvalds next = rb_next(&n->nd); 270463f74ca2SKOSAKI Motohiro sp_delete(p, n); 27051da177e4SLinus Torvalds } 27064a8c7bb5SNathan Zimmer write_unlock(&p->lock); 27071da177e4SLinus Torvalds } 27081da177e4SLinus Torvalds 27091a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING 2710c297663cSMel Gorman static int __initdata numabalancing_override; 27111a687c2eSMel Gorman 27121a687c2eSMel Gorman static void __init check_numabalancing_enable(void) 27131a687c2eSMel Gorman { 27141a687c2eSMel Gorman bool numabalancing_default = false; 27151a687c2eSMel Gorman 27161a687c2eSMel Gorman if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 27171a687c2eSMel Gorman numabalancing_default = true; 27181a687c2eSMel Gorman 2719c297663cSMel Gorman /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 2720c297663cSMel Gorman if (numabalancing_override) 2721c297663cSMel Gorman set_numabalancing_state(numabalancing_override == 1); 2722c297663cSMel Gorman 2723b0dc2b9bSMel Gorman if (num_online_nodes() > 1 && !numabalancing_override) { 2724756a025fSJoe Perches pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", 2725c297663cSMel Gorman numabalancing_default ? "Enabling" : "Disabling"); 27261a687c2eSMel Gorman set_numabalancing_state(numabalancing_default); 27271a687c2eSMel Gorman } 27281a687c2eSMel Gorman } 27291a687c2eSMel Gorman 27301a687c2eSMel Gorman static int __init setup_numabalancing(char *str) 27311a687c2eSMel Gorman { 27321a687c2eSMel Gorman int ret = 0; 27331a687c2eSMel Gorman if (!str) 27341a687c2eSMel Gorman goto out; 27351a687c2eSMel Gorman 27361a687c2eSMel Gorman if (!strcmp(str, "enable")) { 2737c297663cSMel Gorman numabalancing_override = 1; 27381a687c2eSMel Gorman ret = 1; 27391a687c2eSMel Gorman } else if (!strcmp(str, "disable")) { 2740c297663cSMel Gorman numabalancing_override = -1; 27411a687c2eSMel Gorman ret = 1; 27421a687c2eSMel Gorman } 27431a687c2eSMel Gorman out: 27441a687c2eSMel Gorman if (!ret) 27454a404beaSAndrew Morton pr_warn("Unable to parse numa_balancing=\n"); 27461a687c2eSMel Gorman 27471a687c2eSMel Gorman return ret; 27481a687c2eSMel Gorman } 27491a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing); 27501a687c2eSMel Gorman #else 27511a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void) 27521a687c2eSMel Gorman { 27531a687c2eSMel Gorman } 27541a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 27551a687c2eSMel Gorman 27561da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 27571da177e4SLinus Torvalds void __init numa_policy_init(void) 27581da177e4SLinus Torvalds { 2759b71636e2SPaul Mundt nodemask_t interleave_nodes; 2760b71636e2SPaul Mundt unsigned long largest = 0; 2761b71636e2SPaul Mundt int nid, prefer = 0; 2762b71636e2SPaul Mundt 27631da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 27641da177e4SLinus Torvalds sizeof(struct mempolicy), 276520c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 27661da177e4SLinus Torvalds 27671da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 27681da177e4SLinus Torvalds sizeof(struct sp_node), 276920c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 27701da177e4SLinus Torvalds 27715606e387SMel Gorman for_each_node(nid) { 27725606e387SMel Gorman preferred_node_policy[nid] = (struct mempolicy) { 27735606e387SMel Gorman .refcnt = ATOMIC_INIT(1), 27745606e387SMel Gorman .mode = MPOL_PREFERRED, 27755606e387SMel Gorman .flags = MPOL_F_MOF | MPOL_F_MORON, 27765606e387SMel Gorman .v = { .preferred_node = nid, }, 27775606e387SMel Gorman }; 27785606e387SMel Gorman } 27795606e387SMel Gorman 2780b71636e2SPaul Mundt /* 2781b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 2782b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 2783b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 2784b71636e2SPaul Mundt */ 2785b71636e2SPaul Mundt nodes_clear(interleave_nodes); 278601f13bd6SLai Jiangshan for_each_node_state(nid, N_MEMORY) { 2787b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 27881da177e4SLinus Torvalds 2789b71636e2SPaul Mundt /* Preserve the largest node */ 2790b71636e2SPaul Mundt if (largest < total_pages) { 2791b71636e2SPaul Mundt largest = total_pages; 2792b71636e2SPaul Mundt prefer = nid; 2793b71636e2SPaul Mundt } 2794b71636e2SPaul Mundt 2795b71636e2SPaul Mundt /* Interleave this node? */ 2796b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2797b71636e2SPaul Mundt node_set(nid, interleave_nodes); 2798b71636e2SPaul Mundt } 2799b71636e2SPaul Mundt 2800b71636e2SPaul Mundt /* All too small, use the largest */ 2801b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 2802b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 2803b71636e2SPaul Mundt 2804028fec41SDavid Rientjes if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2805b1de0d13SMitchel Humpherys pr_err("%s: interleaving failed\n", __func__); 28061a687c2eSMel Gorman 28071a687c2eSMel Gorman check_numabalancing_enable(); 28081da177e4SLinus Torvalds } 28091da177e4SLinus Torvalds 28108bccd85fSChristoph Lameter /* Reset policy of current process to default */ 28111da177e4SLinus Torvalds void numa_default_policy(void) 28121da177e4SLinus Torvalds { 2813028fec41SDavid Rientjes do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 28141da177e4SLinus Torvalds } 281568860ec1SPaul Jackson 28164225399aSPaul Jackson /* 2817095f1fc4SLee Schermerhorn * Parse and format mempolicy from/to strings 2818095f1fc4SLee Schermerhorn */ 2819095f1fc4SLee Schermerhorn 2820345ace9cSLee Schermerhorn static const char * const policy_modes[] = 2821345ace9cSLee Schermerhorn { 2822345ace9cSLee Schermerhorn [MPOL_DEFAULT] = "default", 2823345ace9cSLee Schermerhorn [MPOL_PREFERRED] = "prefer", 2824345ace9cSLee Schermerhorn [MPOL_BIND] = "bind", 2825345ace9cSLee Schermerhorn [MPOL_INTERLEAVE] = "interleave", 2826d3a71033SLee Schermerhorn [MPOL_LOCAL] = "local", 2827345ace9cSLee Schermerhorn }; 28281a75a6c8SChristoph Lameter 2829095f1fc4SLee Schermerhorn 2830095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS 2831095f1fc4SLee Schermerhorn /** 2832f2a07f40SHugh Dickins * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2833095f1fc4SLee Schermerhorn * @str: string containing mempolicy to parse 283471fe804bSLee Schermerhorn * @mpol: pointer to struct mempolicy pointer, returned on success. 2835095f1fc4SLee Schermerhorn * 2836095f1fc4SLee Schermerhorn * Format of input: 2837095f1fc4SLee Schermerhorn * <mode>[=<flags>][:<nodelist>] 2838095f1fc4SLee Schermerhorn * 283971fe804bSLee Schermerhorn * On success, returns 0, else 1 2840095f1fc4SLee Schermerhorn */ 2841a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol) 2842095f1fc4SLee Schermerhorn { 284371fe804bSLee Schermerhorn struct mempolicy *new = NULL; 2844f2a07f40SHugh Dickins unsigned short mode_flags; 284571fe804bSLee Schermerhorn nodemask_t nodes; 2846095f1fc4SLee Schermerhorn char *nodelist = strchr(str, ':'); 2847095f1fc4SLee Schermerhorn char *flags = strchr(str, '='); 2848dedf2c73Szhong jiang int err = 1, mode; 2849095f1fc4SLee Schermerhorn 2850c7a91bc7SDan Carpenter if (flags) 2851c7a91bc7SDan Carpenter *flags++ = '\0'; /* terminate mode string */ 2852c7a91bc7SDan Carpenter 2853095f1fc4SLee Schermerhorn if (nodelist) { 2854095f1fc4SLee Schermerhorn /* NUL-terminate mode or flags string */ 2855095f1fc4SLee Schermerhorn *nodelist++ = '\0'; 285671fe804bSLee Schermerhorn if (nodelist_parse(nodelist, nodes)) 2857095f1fc4SLee Schermerhorn goto out; 285801f13bd6SLai Jiangshan if (!nodes_subset(nodes, node_states[N_MEMORY])) 2859095f1fc4SLee Schermerhorn goto out; 286071fe804bSLee Schermerhorn } else 286171fe804bSLee Schermerhorn nodes_clear(nodes); 286271fe804bSLee Schermerhorn 2863dedf2c73Szhong jiang mode = match_string(policy_modes, MPOL_MAX, str); 2864dedf2c73Szhong jiang if (mode < 0) 2865095f1fc4SLee Schermerhorn goto out; 2866095f1fc4SLee Schermerhorn 286771fe804bSLee Schermerhorn switch (mode) { 2868095f1fc4SLee Schermerhorn case MPOL_PREFERRED: 286971fe804bSLee Schermerhorn /* 2870aa9f7d51SRandy Dunlap * Insist on a nodelist of one node only, although later 2871aa9f7d51SRandy Dunlap * we use first_node(nodes) to grab a single node, so here 2872aa9f7d51SRandy Dunlap * nodelist (or nodes) cannot be empty. 287371fe804bSLee Schermerhorn */ 2874095f1fc4SLee Schermerhorn if (nodelist) { 2875095f1fc4SLee Schermerhorn char *rest = nodelist; 2876095f1fc4SLee Schermerhorn while (isdigit(*rest)) 2877095f1fc4SLee Schermerhorn rest++; 2878926f2ae0SKOSAKI Motohiro if (*rest) 2879926f2ae0SKOSAKI Motohiro goto out; 2880aa9f7d51SRandy Dunlap if (nodes_empty(nodes)) 2881aa9f7d51SRandy Dunlap goto out; 2882095f1fc4SLee Schermerhorn } 2883095f1fc4SLee Schermerhorn break; 2884095f1fc4SLee Schermerhorn case MPOL_INTERLEAVE: 2885095f1fc4SLee Schermerhorn /* 2886095f1fc4SLee Schermerhorn * Default to online nodes with memory if no nodelist 2887095f1fc4SLee Schermerhorn */ 2888095f1fc4SLee Schermerhorn if (!nodelist) 288901f13bd6SLai Jiangshan nodes = node_states[N_MEMORY]; 28903f226aa1SLee Schermerhorn break; 289171fe804bSLee Schermerhorn case MPOL_LOCAL: 28923f226aa1SLee Schermerhorn /* 289371fe804bSLee Schermerhorn * Don't allow a nodelist; mpol_new() checks flags 28943f226aa1SLee Schermerhorn */ 289571fe804bSLee Schermerhorn if (nodelist) 28963f226aa1SLee Schermerhorn goto out; 28973f226aa1SLee Schermerhorn break; 2898413b43deSRavikiran G Thirumalai case MPOL_DEFAULT: 2899413b43deSRavikiran G Thirumalai /* 2900413b43deSRavikiran G Thirumalai * Insist on a empty nodelist 2901413b43deSRavikiran G Thirumalai */ 2902413b43deSRavikiran G Thirumalai if (!nodelist) 2903413b43deSRavikiran G Thirumalai err = 0; 2904413b43deSRavikiran G Thirumalai goto out; 2905d69b2e63SKOSAKI Motohiro case MPOL_BIND: 290671fe804bSLee Schermerhorn /* 2907d69b2e63SKOSAKI Motohiro * Insist on a nodelist 290871fe804bSLee Schermerhorn */ 2909d69b2e63SKOSAKI Motohiro if (!nodelist) 2910d69b2e63SKOSAKI Motohiro goto out; 2911095f1fc4SLee Schermerhorn } 2912095f1fc4SLee Schermerhorn 291371fe804bSLee Schermerhorn mode_flags = 0; 2914095f1fc4SLee Schermerhorn if (flags) { 2915095f1fc4SLee Schermerhorn /* 2916095f1fc4SLee Schermerhorn * Currently, we only support two mutually exclusive 2917095f1fc4SLee Schermerhorn * mode flags. 2918095f1fc4SLee Schermerhorn */ 2919095f1fc4SLee Schermerhorn if (!strcmp(flags, "static")) 292071fe804bSLee Schermerhorn mode_flags |= MPOL_F_STATIC_NODES; 2921095f1fc4SLee Schermerhorn else if (!strcmp(flags, "relative")) 292271fe804bSLee Schermerhorn mode_flags |= MPOL_F_RELATIVE_NODES; 2923095f1fc4SLee Schermerhorn else 2924926f2ae0SKOSAKI Motohiro goto out; 2925095f1fc4SLee Schermerhorn } 292671fe804bSLee Schermerhorn 292771fe804bSLee Schermerhorn new = mpol_new(mode, mode_flags, &nodes); 292871fe804bSLee Schermerhorn if (IS_ERR(new)) 2929926f2ae0SKOSAKI Motohiro goto out; 2930926f2ae0SKOSAKI Motohiro 2931f2a07f40SHugh Dickins /* 2932f2a07f40SHugh Dickins * Save nodes for mpol_to_str() to show the tmpfs mount options 2933f2a07f40SHugh Dickins * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 2934f2a07f40SHugh Dickins */ 2935f2a07f40SHugh Dickins if (mode != MPOL_PREFERRED) 2936f2a07f40SHugh Dickins new->v.nodes = nodes; 2937f2a07f40SHugh Dickins else if (nodelist) 2938f2a07f40SHugh Dickins new->v.preferred_node = first_node(nodes); 2939f2a07f40SHugh Dickins else 2940*7858d7bcSFeng Tang new->mode = MPOL_LOCAL; 2941f2a07f40SHugh Dickins 2942f2a07f40SHugh Dickins /* 2943f2a07f40SHugh Dickins * Save nodes for contextualization: this will be used to "clone" 2944f2a07f40SHugh Dickins * the mempolicy in a specific context [cpuset] at a later time. 2945f2a07f40SHugh Dickins */ 2946e17f74afSLee Schermerhorn new->w.user_nodemask = nodes; 2947f2a07f40SHugh Dickins 2948926f2ae0SKOSAKI Motohiro err = 0; 294971fe804bSLee Schermerhorn 2950095f1fc4SLee Schermerhorn out: 2951095f1fc4SLee Schermerhorn /* Restore string for error message */ 2952095f1fc4SLee Schermerhorn if (nodelist) 2953095f1fc4SLee Schermerhorn *--nodelist = ':'; 2954095f1fc4SLee Schermerhorn if (flags) 2955095f1fc4SLee Schermerhorn *--flags = '='; 295671fe804bSLee Schermerhorn if (!err) 295771fe804bSLee Schermerhorn *mpol = new; 2958095f1fc4SLee Schermerhorn return err; 2959095f1fc4SLee Schermerhorn } 2960095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */ 2961095f1fc4SLee Schermerhorn 296271fe804bSLee Schermerhorn /** 296371fe804bSLee Schermerhorn * mpol_to_str - format a mempolicy structure for printing 296471fe804bSLee Schermerhorn * @buffer: to contain formatted mempolicy string 296571fe804bSLee Schermerhorn * @maxlen: length of @buffer 296671fe804bSLee Schermerhorn * @pol: pointer to mempolicy to be formatted 296771fe804bSLee Schermerhorn * 2968948927eeSDavid Rientjes * Convert @pol into a string. If @buffer is too short, truncate the string. 2969948927eeSDavid Rientjes * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 2970948927eeSDavid Rientjes * longest flag, "relative", and to display at least a few node ids. 29711a75a6c8SChristoph Lameter */ 2972948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 29731a75a6c8SChristoph Lameter { 29741a75a6c8SChristoph Lameter char *p = buffer; 2975948927eeSDavid Rientjes nodemask_t nodes = NODE_MASK_NONE; 2976948927eeSDavid Rientjes unsigned short mode = MPOL_DEFAULT; 2977948927eeSDavid Rientjes unsigned short flags = 0; 29781a75a6c8SChristoph Lameter 29798790c71aSDavid Rientjes if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 2980bea904d5SLee Schermerhorn mode = pol->mode; 2981948927eeSDavid Rientjes flags = pol->flags; 2982948927eeSDavid Rientjes } 2983bea904d5SLee Schermerhorn 29841a75a6c8SChristoph Lameter switch (mode) { 29851a75a6c8SChristoph Lameter case MPOL_DEFAULT: 2986*7858d7bcSFeng Tang case MPOL_LOCAL: 29871a75a6c8SChristoph Lameter break; 29881a75a6c8SChristoph Lameter case MPOL_PREFERRED: 2989fc36b8d3SLee Schermerhorn node_set(pol->v.preferred_node, nodes); 29901a75a6c8SChristoph Lameter break; 29911a75a6c8SChristoph Lameter case MPOL_BIND: 29921a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 29931a75a6c8SChristoph Lameter nodes = pol->v.nodes; 29941a75a6c8SChristoph Lameter break; 29951a75a6c8SChristoph Lameter default: 2996948927eeSDavid Rientjes WARN_ON_ONCE(1); 2997948927eeSDavid Rientjes snprintf(p, maxlen, "unknown"); 2998948927eeSDavid Rientjes return; 29991a75a6c8SChristoph Lameter } 30001a75a6c8SChristoph Lameter 3001b7a9f420SDavid Rientjes p += snprintf(p, maxlen, "%s", policy_modes[mode]); 30021a75a6c8SChristoph Lameter 3003fc36b8d3SLee Schermerhorn if (flags & MPOL_MODE_FLAGS) { 3004948927eeSDavid Rientjes p += snprintf(p, buffer + maxlen - p, "="); 3005f5b087b5SDavid Rientjes 30062291990aSLee Schermerhorn /* 30072291990aSLee Schermerhorn * Currently, the only defined flags are mutually exclusive 30082291990aSLee Schermerhorn */ 3009f5b087b5SDavid Rientjes if (flags & MPOL_F_STATIC_NODES) 30102291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "static"); 30112291990aSLee Schermerhorn else if (flags & MPOL_F_RELATIVE_NODES) 30122291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "relative"); 3013f5b087b5SDavid Rientjes } 3014f5b087b5SDavid Rientjes 30159e763e0fSTejun Heo if (!nodes_empty(nodes)) 30169e763e0fSTejun Heo p += scnprintf(p, buffer + maxlen - p, ":%*pbl", 30179e763e0fSTejun Heo nodemask_pr_args(&nodes)); 30181a75a6c8SChristoph Lameter } 3019