11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Simple NUMA memory policy for the Linux kernel. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2003,2004 Andi Kleen, SuSE Labs. 58bccd85fSChristoph Lameter * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 61da177e4SLinus Torvalds * Subject to the GNU Public License, version 2. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * NUMA policy allows the user to give hints in which node(s) memory should 91da177e4SLinus Torvalds * be allocated. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Support four policies per VMA and per process: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * The VMA policy has priority over the process policy for a page fault. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * interleave Allocate memory interleaved over a set of nodes, 161da177e4SLinus Torvalds * with normal fallback if it fails. 171da177e4SLinus Torvalds * For VMA based allocations this interleaves based on the 181da177e4SLinus Torvalds * offset into the backing object or offset into the mapping 191da177e4SLinus Torvalds * for anonymous memory. For process policy an process counter 201da177e4SLinus Torvalds * is used. 218bccd85fSChristoph Lameter * 221da177e4SLinus Torvalds * bind Only allocate memory on a specific set of nodes, 231da177e4SLinus Torvalds * no fallback. 248bccd85fSChristoph Lameter * FIXME: memory is allocated starting with the first node 258bccd85fSChristoph Lameter * to the last. It would be better if bind would truly restrict 268bccd85fSChristoph Lameter * the allocation to memory nodes instead 278bccd85fSChristoph Lameter * 281da177e4SLinus Torvalds * preferred Try a specific node first before normal fallback. 291da177e4SLinus Torvalds * As a special case node -1 here means do the allocation 301da177e4SLinus Torvalds * on the local CPU. This is normally identical to default, 311da177e4SLinus Torvalds * but useful to set in a VMA when you have a non default 321da177e4SLinus Torvalds * process policy. 338bccd85fSChristoph Lameter * 341da177e4SLinus Torvalds * default Allocate on the local node first, or when on a VMA 351da177e4SLinus Torvalds * use the process policy. This is what Linux always did 361da177e4SLinus Torvalds * in a NUMA aware kernel and still does by, ahem, default. 371da177e4SLinus Torvalds * 381da177e4SLinus Torvalds * The process policy is applied for most non interrupt memory allocations 391da177e4SLinus Torvalds * in that process' context. Interrupts ignore the policies and always 401da177e4SLinus Torvalds * try to allocate on the local CPU. The VMA policy is only applied for memory 411da177e4SLinus Torvalds * allocations for a VMA in the VM. 421da177e4SLinus Torvalds * 431da177e4SLinus Torvalds * Currently there are a few corner cases in swapping where the policy 441da177e4SLinus Torvalds * is not applied, but the majority should be handled. When process policy 451da177e4SLinus Torvalds * is used it is not remembered over swap outs/swap ins. 461da177e4SLinus Torvalds * 471da177e4SLinus Torvalds * Only the highest zone in the zone hierarchy gets policied. Allocations 481da177e4SLinus Torvalds * requesting a lower zone just use default policy. This implies that 491da177e4SLinus Torvalds * on systems with highmem kernel lowmem allocation don't get policied. 501da177e4SLinus Torvalds * Same with GFP_DMA allocations. 511da177e4SLinus Torvalds * 521da177e4SLinus Torvalds * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 531da177e4SLinus Torvalds * all users and remembered even when nobody has memory mapped. 541da177e4SLinus Torvalds */ 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds /* Notebook: 571da177e4SLinus Torvalds fix mmap readahead to honour policy and enable policy for any page cache 581da177e4SLinus Torvalds object 591da177e4SLinus Torvalds statistics for bigpages 601da177e4SLinus Torvalds global policy for page cache? currently it uses process policy. Requires 611da177e4SLinus Torvalds first item above. 621da177e4SLinus Torvalds handle mremap for shared memory (currently ignored for the policy) 631da177e4SLinus Torvalds grows down? 641da177e4SLinus Torvalds make bind policy root only? It can trigger oom much faster and the 651da177e4SLinus Torvalds kernel is not always grateful with that. 661da177e4SLinus Torvalds */ 671da177e4SLinus Torvalds 681da177e4SLinus Torvalds #include <linux/mempolicy.h> 691da177e4SLinus Torvalds #include <linux/mm.h> 701da177e4SLinus Torvalds #include <linux/highmem.h> 711da177e4SLinus Torvalds #include <linux/hugetlb.h> 721da177e4SLinus Torvalds #include <linux/kernel.h> 731da177e4SLinus Torvalds #include <linux/sched.h> 741da177e4SLinus Torvalds #include <linux/nodemask.h> 751da177e4SLinus Torvalds #include <linux/cpuset.h> 761da177e4SLinus Torvalds #include <linux/slab.h> 771da177e4SLinus Torvalds #include <linux/string.h> 78b95f1b31SPaul Gortmaker #include <linux/export.h> 79b488893aSPavel Emelyanov #include <linux/nsproxy.h> 801da177e4SLinus Torvalds #include <linux/interrupt.h> 811da177e4SLinus Torvalds #include <linux/init.h> 821da177e4SLinus Torvalds #include <linux/compat.h> 83dc9aa5b9SChristoph Lameter #include <linux/swap.h> 841a75a6c8SChristoph Lameter #include <linux/seq_file.h> 851a75a6c8SChristoph Lameter #include <linux/proc_fs.h> 86b20a3503SChristoph Lameter #include <linux/migrate.h> 8762b61f61SHugh Dickins #include <linux/ksm.h> 8895a402c3SChristoph Lameter #include <linux/rmap.h> 8986c3a764SDavid Quigley #include <linux/security.h> 90dbcb0f19SAdrian Bunk #include <linux/syscalls.h> 91095f1fc4SLee Schermerhorn #include <linux/ctype.h> 926d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h> 93dc9aa5b9SChristoph Lameter 941da177e4SLinus Torvalds #include <asm/tlbflush.h> 951da177e4SLinus Torvalds #include <asm/uaccess.h> 96778d3b0fSMichal Hocko #include <linux/random.h> 971da177e4SLinus Torvalds 9862695a84SNick Piggin #include "internal.h" 9962695a84SNick Piggin 10038e35860SChristoph Lameter /* Internal flags */ 101dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 10238e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 103dc9aa5b9SChristoph Lameter 104fcc234f8SPekka Enberg static struct kmem_cache *policy_cache; 105fcc234f8SPekka Enberg static struct kmem_cache *sn_cache; 1061da177e4SLinus Torvalds 1071da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not 1081da177e4SLinus Torvalds policied. */ 1096267276fSChristoph Lameter enum zone_type policy_zone = 0; 1101da177e4SLinus Torvalds 111bea904d5SLee Schermerhorn /* 112bea904d5SLee Schermerhorn * run-time system-wide default policy => local allocation 113bea904d5SLee Schermerhorn */ 114e754d79dSH Hartley Sweeten static struct mempolicy default_policy = { 1151da177e4SLinus Torvalds .refcnt = ATOMIC_INIT(1), /* never free it */ 116bea904d5SLee Schermerhorn .mode = MPOL_PREFERRED, 117fc36b8d3SLee Schermerhorn .flags = MPOL_F_LOCAL, 1181da177e4SLinus Torvalds }; 1191da177e4SLinus Torvalds 12037012946SDavid Rientjes static const struct mempolicy_operations { 12137012946SDavid Rientjes int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 122708c1bbcSMiao Xie /* 123708c1bbcSMiao Xie * If read-side task has no lock to protect task->mempolicy, write-side 124708c1bbcSMiao Xie * task will rebind the task->mempolicy by two step. The first step is 125708c1bbcSMiao Xie * setting all the newly nodes, and the second step is cleaning all the 126708c1bbcSMiao Xie * disallowed nodes. In this way, we can avoid finding no node to alloc 127708c1bbcSMiao Xie * page. 128708c1bbcSMiao Xie * If we have a lock to protect task->mempolicy in read-side, we do 129708c1bbcSMiao Xie * rebind directly. 130708c1bbcSMiao Xie * 131708c1bbcSMiao Xie * step: 132708c1bbcSMiao Xie * MPOL_REBIND_ONCE - do rebind work at once 133708c1bbcSMiao Xie * MPOL_REBIND_STEP1 - set all the newly nodes 134708c1bbcSMiao Xie * MPOL_REBIND_STEP2 - clean all the disallowed nodes 135708c1bbcSMiao Xie */ 136708c1bbcSMiao Xie void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes, 137708c1bbcSMiao Xie enum mpol_rebind_step step); 13837012946SDavid Rientjes } mpol_ops[MPOL_MAX]; 13937012946SDavid Rientjes 14019770b32SMel Gorman /* Check that the nodemask contains at least one populated zone */ 14137012946SDavid Rientjes static int is_valid_nodemask(const nodemask_t *nodemask) 1421da177e4SLinus Torvalds { 14319770b32SMel Gorman int nd, k; 1441da177e4SLinus Torvalds 14519770b32SMel Gorman for_each_node_mask(nd, *nodemask) { 14619770b32SMel Gorman struct zone *z; 14719770b32SMel Gorman 14819770b32SMel Gorman for (k = 0; k <= policy_zone; k++) { 14919770b32SMel Gorman z = &NODE_DATA(nd)->node_zones[k]; 150dd942ae3SAndi Kleen if (z->present_pages > 0) 15119770b32SMel Gorman return 1; 152dd942ae3SAndi Kleen } 153dd942ae3SAndi Kleen } 15419770b32SMel Gorman 15519770b32SMel Gorman return 0; 1561da177e4SLinus Torvalds } 1571da177e4SLinus Torvalds 158f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 159f5b087b5SDavid Rientjes { 1606d556294SBob Liu return pol->flags & MPOL_MODE_FLAGS; 1614c50bc01SDavid Rientjes } 1624c50bc01SDavid Rientjes 1634c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 1644c50bc01SDavid Rientjes const nodemask_t *rel) 1654c50bc01SDavid Rientjes { 1664c50bc01SDavid Rientjes nodemask_t tmp; 1674c50bc01SDavid Rientjes nodes_fold(tmp, *orig, nodes_weight(*rel)); 1684c50bc01SDavid Rientjes nodes_onto(*ret, tmp, *rel); 169f5b087b5SDavid Rientjes } 170f5b087b5SDavid Rientjes 17137012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) 17237012946SDavid Rientjes { 17337012946SDavid Rientjes if (nodes_empty(*nodes)) 17437012946SDavid Rientjes return -EINVAL; 17537012946SDavid Rientjes pol->v.nodes = *nodes; 17637012946SDavid Rientjes return 0; 17737012946SDavid Rientjes } 17837012946SDavid Rientjes 17937012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 18037012946SDavid Rientjes { 18137012946SDavid Rientjes if (!nodes) 182fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; /* local allocation */ 18337012946SDavid Rientjes else if (nodes_empty(*nodes)) 18437012946SDavid Rientjes return -EINVAL; /* no allowed nodes */ 18537012946SDavid Rientjes else 18637012946SDavid Rientjes pol->v.preferred_node = first_node(*nodes); 18737012946SDavid Rientjes return 0; 18837012946SDavid Rientjes } 18937012946SDavid Rientjes 19037012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) 19137012946SDavid Rientjes { 19237012946SDavid Rientjes if (!is_valid_nodemask(nodes)) 19337012946SDavid Rientjes return -EINVAL; 19437012946SDavid Rientjes pol->v.nodes = *nodes; 19537012946SDavid Rientjes return 0; 19637012946SDavid Rientjes } 19737012946SDavid Rientjes 19858568d2aSMiao Xie /* 19958568d2aSMiao Xie * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 20058568d2aSMiao Xie * any, for the new policy. mpol_new() has already validated the nodes 20158568d2aSMiao Xie * parameter with respect to the policy mode and flags. But, we need to 20258568d2aSMiao Xie * handle an empty nodemask with MPOL_PREFERRED here. 20358568d2aSMiao Xie * 20458568d2aSMiao Xie * Must be called holding task's alloc_lock to protect task's mems_allowed 20558568d2aSMiao Xie * and mempolicy. May also be called holding the mmap_semaphore for write. 20658568d2aSMiao Xie */ 2074bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol, 2084bfc4495SKAMEZAWA Hiroyuki const nodemask_t *nodes, struct nodemask_scratch *nsc) 20958568d2aSMiao Xie { 21058568d2aSMiao Xie int ret; 21158568d2aSMiao Xie 21258568d2aSMiao Xie /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ 21358568d2aSMiao Xie if (pol == NULL) 21458568d2aSMiao Xie return 0; 2154bfc4495SKAMEZAWA Hiroyuki /* Check N_HIGH_MEMORY */ 2164bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask1, 2174bfc4495SKAMEZAWA Hiroyuki cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]); 21858568d2aSMiao Xie 21958568d2aSMiao Xie VM_BUG_ON(!nodes); 22058568d2aSMiao Xie if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) 22158568d2aSMiao Xie nodes = NULL; /* explicit local allocation */ 22258568d2aSMiao Xie else { 22358568d2aSMiao Xie if (pol->flags & MPOL_F_RELATIVE_NODES) 2244bfc4495SKAMEZAWA Hiroyuki mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1); 22558568d2aSMiao Xie else 2264bfc4495SKAMEZAWA Hiroyuki nodes_and(nsc->mask2, *nodes, nsc->mask1); 2274bfc4495SKAMEZAWA Hiroyuki 22858568d2aSMiao Xie if (mpol_store_user_nodemask(pol)) 22958568d2aSMiao Xie pol->w.user_nodemask = *nodes; 23058568d2aSMiao Xie else 23158568d2aSMiao Xie pol->w.cpuset_mems_allowed = 23258568d2aSMiao Xie cpuset_current_mems_allowed; 23358568d2aSMiao Xie } 23458568d2aSMiao Xie 2354bfc4495SKAMEZAWA Hiroyuki if (nodes) 2364bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 2374bfc4495SKAMEZAWA Hiroyuki else 2384bfc4495SKAMEZAWA Hiroyuki ret = mpol_ops[pol->mode].create(pol, NULL); 23958568d2aSMiao Xie return ret; 24058568d2aSMiao Xie } 24158568d2aSMiao Xie 24258568d2aSMiao Xie /* 24358568d2aSMiao Xie * This function just creates a new policy, does some check and simple 24458568d2aSMiao Xie * initialization. You must invoke mpol_set_nodemask() to set nodes. 24558568d2aSMiao Xie */ 246028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 247028fec41SDavid Rientjes nodemask_t *nodes) 2481da177e4SLinus Torvalds { 2491da177e4SLinus Torvalds struct mempolicy *policy; 2501da177e4SLinus Torvalds 251028fec41SDavid Rientjes pr_debug("setting mode %d flags %d nodes[0] %lx\n", 252028fec41SDavid Rientjes mode, flags, nodes ? nodes_addr(*nodes)[0] : -1); 253140d5a49SPaul Mundt 2543e1f0645SDavid Rientjes if (mode == MPOL_DEFAULT) { 2553e1f0645SDavid Rientjes if (nodes && !nodes_empty(*nodes)) 25637012946SDavid Rientjes return ERR_PTR(-EINVAL); 257bea904d5SLee Schermerhorn return NULL; /* simply delete any existing policy */ 25837012946SDavid Rientjes } 2593e1f0645SDavid Rientjes VM_BUG_ON(!nodes); 2603e1f0645SDavid Rientjes 2613e1f0645SDavid Rientjes /* 2623e1f0645SDavid Rientjes * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 2633e1f0645SDavid Rientjes * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 2643e1f0645SDavid Rientjes * All other modes require a valid pointer to a non-empty nodemask. 2653e1f0645SDavid Rientjes */ 2663e1f0645SDavid Rientjes if (mode == MPOL_PREFERRED) { 2673e1f0645SDavid Rientjes if (nodes_empty(*nodes)) { 2683e1f0645SDavid Rientjes if (((flags & MPOL_F_STATIC_NODES) || 2693e1f0645SDavid Rientjes (flags & MPOL_F_RELATIVE_NODES))) 2703e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2713e1f0645SDavid Rientjes } 2723e1f0645SDavid Rientjes } else if (nodes_empty(*nodes)) 2733e1f0645SDavid Rientjes return ERR_PTR(-EINVAL); 2741da177e4SLinus Torvalds policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2751da177e4SLinus Torvalds if (!policy) 2761da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 2771da177e4SLinus Torvalds atomic_set(&policy->refcnt, 1); 27845c4745aSLee Schermerhorn policy->mode = mode; 27937012946SDavid Rientjes policy->flags = flags; 2803e1f0645SDavid Rientjes 28137012946SDavid Rientjes return policy; 28237012946SDavid Rientjes } 28337012946SDavid Rientjes 28452cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */ 28552cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p) 28652cd3b07SLee Schermerhorn { 28752cd3b07SLee Schermerhorn if (!atomic_dec_and_test(&p->refcnt)) 28852cd3b07SLee Schermerhorn return; 28952cd3b07SLee Schermerhorn kmem_cache_free(policy_cache, p); 29052cd3b07SLee Schermerhorn } 29152cd3b07SLee Schermerhorn 292708c1bbcSMiao Xie static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes, 293708c1bbcSMiao Xie enum mpol_rebind_step step) 29437012946SDavid Rientjes { 29537012946SDavid Rientjes } 29637012946SDavid Rientjes 297708c1bbcSMiao Xie /* 298708c1bbcSMiao Xie * step: 299708c1bbcSMiao Xie * MPOL_REBIND_ONCE - do rebind work at once 300708c1bbcSMiao Xie * MPOL_REBIND_STEP1 - set all the newly nodes 301708c1bbcSMiao Xie * MPOL_REBIND_STEP2 - clean all the disallowed nodes 302708c1bbcSMiao Xie */ 303708c1bbcSMiao Xie static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes, 304708c1bbcSMiao Xie enum mpol_rebind_step step) 3051d0d2680SDavid Rientjes { 3061d0d2680SDavid Rientjes nodemask_t tmp; 3071d0d2680SDavid Rientjes 30837012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) 30937012946SDavid Rientjes nodes_and(tmp, pol->w.user_nodemask, *nodes); 31037012946SDavid Rientjes else if (pol->flags & MPOL_F_RELATIVE_NODES) 31137012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3121d0d2680SDavid Rientjes else { 313708c1bbcSMiao Xie /* 314708c1bbcSMiao Xie * if step == 1, we use ->w.cpuset_mems_allowed to cache the 315708c1bbcSMiao Xie * result 316708c1bbcSMiao Xie */ 317708c1bbcSMiao Xie if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) { 318708c1bbcSMiao Xie nodes_remap(tmp, pol->v.nodes, 319708c1bbcSMiao Xie pol->w.cpuset_mems_allowed, *nodes); 320708c1bbcSMiao Xie pol->w.cpuset_mems_allowed = step ? tmp : *nodes; 321708c1bbcSMiao Xie } else if (step == MPOL_REBIND_STEP2) { 322708c1bbcSMiao Xie tmp = pol->w.cpuset_mems_allowed; 32337012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 324708c1bbcSMiao Xie } else 325708c1bbcSMiao Xie BUG(); 3261d0d2680SDavid Rientjes } 32737012946SDavid Rientjes 328708c1bbcSMiao Xie if (nodes_empty(tmp)) 329708c1bbcSMiao Xie tmp = *nodes; 330708c1bbcSMiao Xie 331708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP1) 332708c1bbcSMiao Xie nodes_or(pol->v.nodes, pol->v.nodes, tmp); 333708c1bbcSMiao Xie else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2) 3341d0d2680SDavid Rientjes pol->v.nodes = tmp; 335708c1bbcSMiao Xie else 336708c1bbcSMiao Xie BUG(); 337708c1bbcSMiao Xie 3381d0d2680SDavid Rientjes if (!node_isset(current->il_next, tmp)) { 3391d0d2680SDavid Rientjes current->il_next = next_node(current->il_next, tmp); 3401d0d2680SDavid Rientjes if (current->il_next >= MAX_NUMNODES) 3411d0d2680SDavid Rientjes current->il_next = first_node(tmp); 3421d0d2680SDavid Rientjes if (current->il_next >= MAX_NUMNODES) 3431d0d2680SDavid Rientjes current->il_next = numa_node_id(); 3441d0d2680SDavid Rientjes } 34537012946SDavid Rientjes } 34637012946SDavid Rientjes 34737012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol, 348708c1bbcSMiao Xie const nodemask_t *nodes, 349708c1bbcSMiao Xie enum mpol_rebind_step step) 35037012946SDavid Rientjes { 35137012946SDavid Rientjes nodemask_t tmp; 35237012946SDavid Rientjes 35337012946SDavid Rientjes if (pol->flags & MPOL_F_STATIC_NODES) { 3541d0d2680SDavid Rientjes int node = first_node(pol->w.user_nodemask); 3551d0d2680SDavid Rientjes 356fc36b8d3SLee Schermerhorn if (node_isset(node, *nodes)) { 3571d0d2680SDavid Rientjes pol->v.preferred_node = node; 358fc36b8d3SLee Schermerhorn pol->flags &= ~MPOL_F_LOCAL; 359fc36b8d3SLee Schermerhorn } else 360fc36b8d3SLee Schermerhorn pol->flags |= MPOL_F_LOCAL; 36137012946SDavid Rientjes } else if (pol->flags & MPOL_F_RELATIVE_NODES) { 36237012946SDavid Rientjes mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 3631d0d2680SDavid Rientjes pol->v.preferred_node = first_node(tmp); 364fc36b8d3SLee Schermerhorn } else if (!(pol->flags & MPOL_F_LOCAL)) { 3651d0d2680SDavid Rientjes pol->v.preferred_node = node_remap(pol->v.preferred_node, 36637012946SDavid Rientjes pol->w.cpuset_mems_allowed, 36737012946SDavid Rientjes *nodes); 36837012946SDavid Rientjes pol->w.cpuset_mems_allowed = *nodes; 3691d0d2680SDavid Rientjes } 3701d0d2680SDavid Rientjes } 37137012946SDavid Rientjes 372708c1bbcSMiao Xie /* 373708c1bbcSMiao Xie * mpol_rebind_policy - Migrate a policy to a different set of nodes 374708c1bbcSMiao Xie * 375708c1bbcSMiao Xie * If read-side task has no lock to protect task->mempolicy, write-side 376708c1bbcSMiao Xie * task will rebind the task->mempolicy by two step. The first step is 377708c1bbcSMiao Xie * setting all the newly nodes, and the second step is cleaning all the 378708c1bbcSMiao Xie * disallowed nodes. In this way, we can avoid finding no node to alloc 379708c1bbcSMiao Xie * page. 380708c1bbcSMiao Xie * If we have a lock to protect task->mempolicy in read-side, we do 381708c1bbcSMiao Xie * rebind directly. 382708c1bbcSMiao Xie * 383708c1bbcSMiao Xie * step: 384708c1bbcSMiao Xie * MPOL_REBIND_ONCE - do rebind work at once 385708c1bbcSMiao Xie * MPOL_REBIND_STEP1 - set all the newly nodes 386708c1bbcSMiao Xie * MPOL_REBIND_STEP2 - clean all the disallowed nodes 387708c1bbcSMiao Xie */ 388708c1bbcSMiao Xie static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask, 389708c1bbcSMiao Xie enum mpol_rebind_step step) 39037012946SDavid Rientjes { 39137012946SDavid Rientjes if (!pol) 39237012946SDavid Rientjes return; 39389c522c7SWang Sheng-Hui if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE && 39437012946SDavid Rientjes nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 39537012946SDavid Rientjes return; 396708c1bbcSMiao Xie 397708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING)) 398708c1bbcSMiao Xie return; 399708c1bbcSMiao Xie 400708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING)) 401708c1bbcSMiao Xie BUG(); 402708c1bbcSMiao Xie 403708c1bbcSMiao Xie if (step == MPOL_REBIND_STEP1) 404708c1bbcSMiao Xie pol->flags |= MPOL_F_REBINDING; 405708c1bbcSMiao Xie else if (step == MPOL_REBIND_STEP2) 406708c1bbcSMiao Xie pol->flags &= ~MPOL_F_REBINDING; 407708c1bbcSMiao Xie else if (step >= MPOL_REBIND_NSTEP) 408708c1bbcSMiao Xie BUG(); 409708c1bbcSMiao Xie 410708c1bbcSMiao Xie mpol_ops[pol->mode].rebind(pol, newmask, step); 4111d0d2680SDavid Rientjes } 4121d0d2680SDavid Rientjes 4131d0d2680SDavid Rientjes /* 4141d0d2680SDavid Rientjes * Wrapper for mpol_rebind_policy() that just requires task 4151d0d2680SDavid Rientjes * pointer, and updates task mempolicy. 41658568d2aSMiao Xie * 41758568d2aSMiao Xie * Called with task's alloc_lock held. 4181d0d2680SDavid Rientjes */ 4191d0d2680SDavid Rientjes 420708c1bbcSMiao Xie void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, 421708c1bbcSMiao Xie enum mpol_rebind_step step) 4221d0d2680SDavid Rientjes { 423708c1bbcSMiao Xie mpol_rebind_policy(tsk->mempolicy, new, step); 4241d0d2680SDavid Rientjes } 4251d0d2680SDavid Rientjes 4261d0d2680SDavid Rientjes /* 4271d0d2680SDavid Rientjes * Rebind each vma in mm to new nodemask. 4281d0d2680SDavid Rientjes * 4291d0d2680SDavid Rientjes * Call holding a reference to mm. Takes mm->mmap_sem during call. 4301d0d2680SDavid Rientjes */ 4311d0d2680SDavid Rientjes 4321d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 4331d0d2680SDavid Rientjes { 4341d0d2680SDavid Rientjes struct vm_area_struct *vma; 4351d0d2680SDavid Rientjes 4361d0d2680SDavid Rientjes down_write(&mm->mmap_sem); 4371d0d2680SDavid Rientjes for (vma = mm->mmap; vma; vma = vma->vm_next) 438708c1bbcSMiao Xie mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE); 4391d0d2680SDavid Rientjes up_write(&mm->mmap_sem); 4401d0d2680SDavid Rientjes } 4411d0d2680SDavid Rientjes 44237012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 44337012946SDavid Rientjes [MPOL_DEFAULT] = { 44437012946SDavid Rientjes .rebind = mpol_rebind_default, 44537012946SDavid Rientjes }, 44637012946SDavid Rientjes [MPOL_INTERLEAVE] = { 44737012946SDavid Rientjes .create = mpol_new_interleave, 44837012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 44937012946SDavid Rientjes }, 45037012946SDavid Rientjes [MPOL_PREFERRED] = { 45137012946SDavid Rientjes .create = mpol_new_preferred, 45237012946SDavid Rientjes .rebind = mpol_rebind_preferred, 45337012946SDavid Rientjes }, 45437012946SDavid Rientjes [MPOL_BIND] = { 45537012946SDavid Rientjes .create = mpol_new_bind, 45637012946SDavid Rientjes .rebind = mpol_rebind_nodemask, 45737012946SDavid Rientjes }, 45837012946SDavid Rientjes }; 45937012946SDavid Rientjes 460fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 461fc301289SChristoph Lameter unsigned long flags); 4621a75a6c8SChristoph Lameter 46338e35860SChristoph Lameter /* Scan through pages checking if pages follow certain conditions. */ 464b5810039SNick Piggin static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 465dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 466dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 46738e35860SChristoph Lameter void *private) 4681da177e4SLinus Torvalds { 46991612e0dSHugh Dickins pte_t *orig_pte; 47091612e0dSHugh Dickins pte_t *pte; 471705e87c0SHugh Dickins spinlock_t *ptl; 472941150a3SHugh Dickins 473705e87c0SHugh Dickins orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 47491612e0dSHugh Dickins do { 4756aab341eSLinus Torvalds struct page *page; 47625ba77c1SAndy Whitcroft int nid; 47791612e0dSHugh Dickins 47891612e0dSHugh Dickins if (!pte_present(*pte)) 47991612e0dSHugh Dickins continue; 4806aab341eSLinus Torvalds page = vm_normal_page(vma, addr, *pte); 4816aab341eSLinus Torvalds if (!page) 48291612e0dSHugh Dickins continue; 483053837fcSNick Piggin /* 48462b61f61SHugh Dickins * vm_normal_page() filters out zero pages, but there might 48562b61f61SHugh Dickins * still be PageReserved pages to skip, perhaps in a VDSO. 48662b61f61SHugh Dickins * And we cannot move PageKsm pages sensibly or safely yet. 487053837fcSNick Piggin */ 48862b61f61SHugh Dickins if (PageReserved(page) || PageKsm(page)) 489f4598c8bSChristoph Lameter continue; 4906aab341eSLinus Torvalds nid = page_to_nid(page); 49138e35860SChristoph Lameter if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) 49238e35860SChristoph Lameter continue; 49338e35860SChristoph Lameter 494b1f72d18SStephen Wilson if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 495fc301289SChristoph Lameter migrate_page_add(page, private, flags); 496dc9aa5b9SChristoph Lameter else 4971da177e4SLinus Torvalds break; 49891612e0dSHugh Dickins } while (pte++, addr += PAGE_SIZE, addr != end); 499705e87c0SHugh Dickins pte_unmap_unlock(orig_pte, ptl); 50091612e0dSHugh Dickins return addr != end; 50191612e0dSHugh Dickins } 50291612e0dSHugh Dickins 503b5810039SNick Piggin static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud, 504dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 505dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 50638e35860SChristoph Lameter void *private) 50791612e0dSHugh Dickins { 50891612e0dSHugh Dickins pmd_t *pmd; 50991612e0dSHugh Dickins unsigned long next; 51091612e0dSHugh Dickins 51191612e0dSHugh Dickins pmd = pmd_offset(pud, addr); 51291612e0dSHugh Dickins do { 51391612e0dSHugh Dickins next = pmd_addr_end(addr, end); 514bae9c19bSAndrea Arcangeli split_huge_page_pmd(vma->vm_mm, pmd); 5151a5a9906SAndrea Arcangeli if (pmd_none_or_trans_huge_or_clear_bad(pmd)) 51691612e0dSHugh Dickins continue; 517dc9aa5b9SChristoph Lameter if (check_pte_range(vma, pmd, addr, next, nodes, 51838e35860SChristoph Lameter flags, private)) 51991612e0dSHugh Dickins return -EIO; 52091612e0dSHugh Dickins } while (pmd++, addr = next, addr != end); 52191612e0dSHugh Dickins return 0; 52291612e0dSHugh Dickins } 52391612e0dSHugh Dickins 524b5810039SNick Piggin static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd, 525dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 526dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 52738e35860SChristoph Lameter void *private) 52891612e0dSHugh Dickins { 52991612e0dSHugh Dickins pud_t *pud; 53091612e0dSHugh Dickins unsigned long next; 53191612e0dSHugh Dickins 53291612e0dSHugh Dickins pud = pud_offset(pgd, addr); 53391612e0dSHugh Dickins do { 53491612e0dSHugh Dickins next = pud_addr_end(addr, end); 53591612e0dSHugh Dickins if (pud_none_or_clear_bad(pud)) 53691612e0dSHugh Dickins continue; 537dc9aa5b9SChristoph Lameter if (check_pmd_range(vma, pud, addr, next, nodes, 53838e35860SChristoph Lameter flags, private)) 53991612e0dSHugh Dickins return -EIO; 54091612e0dSHugh Dickins } while (pud++, addr = next, addr != end); 54191612e0dSHugh Dickins return 0; 54291612e0dSHugh Dickins } 54391612e0dSHugh Dickins 544b5810039SNick Piggin static inline int check_pgd_range(struct vm_area_struct *vma, 545dc9aa5b9SChristoph Lameter unsigned long addr, unsigned long end, 546dc9aa5b9SChristoph Lameter const nodemask_t *nodes, unsigned long flags, 54738e35860SChristoph Lameter void *private) 54891612e0dSHugh Dickins { 54991612e0dSHugh Dickins pgd_t *pgd; 55091612e0dSHugh Dickins unsigned long next; 55191612e0dSHugh Dickins 552b5810039SNick Piggin pgd = pgd_offset(vma->vm_mm, addr); 55391612e0dSHugh Dickins do { 55491612e0dSHugh Dickins next = pgd_addr_end(addr, end); 55591612e0dSHugh Dickins if (pgd_none_or_clear_bad(pgd)) 55691612e0dSHugh Dickins continue; 557dc9aa5b9SChristoph Lameter if (check_pud_range(vma, pgd, addr, next, nodes, 55838e35860SChristoph Lameter flags, private)) 55991612e0dSHugh Dickins return -EIO; 56091612e0dSHugh Dickins } while (pgd++, addr = next, addr != end); 56191612e0dSHugh Dickins return 0; 5621da177e4SLinus Torvalds } 5631da177e4SLinus Torvalds 564dc9aa5b9SChristoph Lameter /* 565dc9aa5b9SChristoph Lameter * Check if all pages in a range are on a set of nodes. 566dc9aa5b9SChristoph Lameter * If pagelist != NULL then isolate pages from the LRU and 567dc9aa5b9SChristoph Lameter * put them on the pagelist. 568dc9aa5b9SChristoph Lameter */ 5691da177e4SLinus Torvalds static struct vm_area_struct * 5701da177e4SLinus Torvalds check_range(struct mm_struct *mm, unsigned long start, unsigned long end, 57138e35860SChristoph Lameter const nodemask_t *nodes, unsigned long flags, void *private) 5721da177e4SLinus Torvalds { 5731da177e4SLinus Torvalds int err; 5741da177e4SLinus Torvalds struct vm_area_struct *first, *vma, *prev; 5751da177e4SLinus Torvalds 576053837fcSNick Piggin 5771da177e4SLinus Torvalds first = find_vma(mm, start); 5781da177e4SLinus Torvalds if (!first) 5791da177e4SLinus Torvalds return ERR_PTR(-EFAULT); 5801da177e4SLinus Torvalds prev = NULL; 5811da177e4SLinus Torvalds for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) { 582dc9aa5b9SChristoph Lameter if (!(flags & MPOL_MF_DISCONTIG_OK)) { 5831da177e4SLinus Torvalds if (!vma->vm_next && vma->vm_end < end) 5841da177e4SLinus Torvalds return ERR_PTR(-EFAULT); 5851da177e4SLinus Torvalds if (prev && prev->vm_end < vma->vm_start) 5861da177e4SLinus Torvalds return ERR_PTR(-EFAULT); 587dc9aa5b9SChristoph Lameter } 588dc9aa5b9SChristoph Lameter if (!is_vm_hugetlb_page(vma) && 589dc9aa5b9SChristoph Lameter ((flags & MPOL_MF_STRICT) || 590dc9aa5b9SChristoph Lameter ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) && 591dc9aa5b9SChristoph Lameter vma_migratable(vma)))) { 5925b952b3cSAndi Kleen unsigned long endvma = vma->vm_end; 593dc9aa5b9SChristoph Lameter 5945b952b3cSAndi Kleen if (endvma > end) 5955b952b3cSAndi Kleen endvma = end; 5965b952b3cSAndi Kleen if (vma->vm_start > start) 5975b952b3cSAndi Kleen start = vma->vm_start; 598dc9aa5b9SChristoph Lameter err = check_pgd_range(vma, start, endvma, nodes, 59938e35860SChristoph Lameter flags, private); 6001da177e4SLinus Torvalds if (err) { 6011da177e4SLinus Torvalds first = ERR_PTR(err); 6021da177e4SLinus Torvalds break; 6031da177e4SLinus Torvalds } 6041da177e4SLinus Torvalds } 6051da177e4SLinus Torvalds prev = vma; 6061da177e4SLinus Torvalds } 6071da177e4SLinus Torvalds return first; 6081da177e4SLinus Torvalds } 6091da177e4SLinus Torvalds 610*8d34694cSKOSAKI Motohiro /* Apply policy to a single VMA */ 611*8d34694cSKOSAKI Motohiro static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new) 612*8d34694cSKOSAKI Motohiro { 613*8d34694cSKOSAKI Motohiro int err = 0; 614*8d34694cSKOSAKI Motohiro struct mempolicy *old = vma->vm_policy; 615*8d34694cSKOSAKI Motohiro 616*8d34694cSKOSAKI Motohiro pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 617*8d34694cSKOSAKI Motohiro vma->vm_start, vma->vm_end, vma->vm_pgoff, 618*8d34694cSKOSAKI Motohiro vma->vm_ops, vma->vm_file, 619*8d34694cSKOSAKI Motohiro vma->vm_ops ? vma->vm_ops->set_policy : NULL); 620*8d34694cSKOSAKI Motohiro 621*8d34694cSKOSAKI Motohiro if (vma->vm_ops && vma->vm_ops->set_policy) 622*8d34694cSKOSAKI Motohiro err = vma->vm_ops->set_policy(vma, new); 623*8d34694cSKOSAKI Motohiro if (!err) { 624*8d34694cSKOSAKI Motohiro mpol_get(new); 625*8d34694cSKOSAKI Motohiro vma->vm_policy = new; 626*8d34694cSKOSAKI Motohiro mpol_put(old); 627*8d34694cSKOSAKI Motohiro } 628*8d34694cSKOSAKI Motohiro return err; 629*8d34694cSKOSAKI Motohiro } 630*8d34694cSKOSAKI Motohiro 6311da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */ 6329d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start, 6339d8cebd4SKOSAKI Motohiro unsigned long end, struct mempolicy *new_pol) 6341da177e4SLinus Torvalds { 6351da177e4SLinus Torvalds struct vm_area_struct *next; 6369d8cebd4SKOSAKI Motohiro struct vm_area_struct *prev; 6379d8cebd4SKOSAKI Motohiro struct vm_area_struct *vma; 6389d8cebd4SKOSAKI Motohiro int err = 0; 639e26a5114SKOSAKI Motohiro pgoff_t pgoff; 6409d8cebd4SKOSAKI Motohiro unsigned long vmstart; 6419d8cebd4SKOSAKI Motohiro unsigned long vmend; 6421da177e4SLinus Torvalds 643097d5910SLinus Torvalds vma = find_vma(mm, start); 6449d8cebd4SKOSAKI Motohiro if (!vma || vma->vm_start > start) 6459d8cebd4SKOSAKI Motohiro return -EFAULT; 6469d8cebd4SKOSAKI Motohiro 647097d5910SLinus Torvalds prev = vma->vm_prev; 648e26a5114SKOSAKI Motohiro if (start > vma->vm_start) 649e26a5114SKOSAKI Motohiro prev = vma; 650e26a5114SKOSAKI Motohiro 6519d8cebd4SKOSAKI Motohiro for (; vma && vma->vm_start < end; prev = vma, vma = next) { 6521da177e4SLinus Torvalds next = vma->vm_next; 6539d8cebd4SKOSAKI Motohiro vmstart = max(start, vma->vm_start); 6549d8cebd4SKOSAKI Motohiro vmend = min(end, vma->vm_end); 6559d8cebd4SKOSAKI Motohiro 656e26a5114SKOSAKI Motohiro if (mpol_equal(vma_policy(vma), new_pol)) 657e26a5114SKOSAKI Motohiro continue; 658e26a5114SKOSAKI Motohiro 659e26a5114SKOSAKI Motohiro pgoff = vma->vm_pgoff + 660e26a5114SKOSAKI Motohiro ((vmstart - vma->vm_start) >> PAGE_SHIFT); 6619d8cebd4SKOSAKI Motohiro prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 662e26a5114SKOSAKI Motohiro vma->anon_vma, vma->vm_file, pgoff, 6638aacc9f5SCaspar Zhang new_pol); 6649d8cebd4SKOSAKI Motohiro if (prev) { 6659d8cebd4SKOSAKI Motohiro vma = prev; 6669d8cebd4SKOSAKI Motohiro next = vma->vm_next; 6679d8cebd4SKOSAKI Motohiro continue; 6681da177e4SLinus Torvalds } 6699d8cebd4SKOSAKI Motohiro if (vma->vm_start != vmstart) { 6709d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmstart, 1); 6719d8cebd4SKOSAKI Motohiro if (err) 6729d8cebd4SKOSAKI Motohiro goto out; 6739d8cebd4SKOSAKI Motohiro } 6749d8cebd4SKOSAKI Motohiro if (vma->vm_end != vmend) { 6759d8cebd4SKOSAKI Motohiro err = split_vma(vma->vm_mm, vma, vmend, 0); 6769d8cebd4SKOSAKI Motohiro if (err) 6779d8cebd4SKOSAKI Motohiro goto out; 6789d8cebd4SKOSAKI Motohiro } 679*8d34694cSKOSAKI Motohiro err = policy_vma(vma, new_pol); 6809d8cebd4SKOSAKI Motohiro if (err) 6819d8cebd4SKOSAKI Motohiro goto out; 6829d8cebd4SKOSAKI Motohiro } 6839d8cebd4SKOSAKI Motohiro 6849d8cebd4SKOSAKI Motohiro out: 6851da177e4SLinus Torvalds return err; 6861da177e4SLinus Torvalds } 6871da177e4SLinus Torvalds 688c61afb18SPaul Jackson /* 689c61afb18SPaul Jackson * Update task->flags PF_MEMPOLICY bit: set iff non-default 690c61afb18SPaul Jackson * mempolicy. Allows more rapid checking of this (combined perhaps 691c61afb18SPaul Jackson * with other PF_* flag bits) on memory allocation hot code paths. 692c61afb18SPaul Jackson * 693c61afb18SPaul Jackson * If called from outside this file, the task 'p' should -only- be 694c61afb18SPaul Jackson * a newly forked child not yet visible on the task list, because 695c61afb18SPaul Jackson * manipulating the task flags of a visible task is not safe. 696c61afb18SPaul Jackson * 697c61afb18SPaul Jackson * The above limitation is why this routine has the funny name 698c61afb18SPaul Jackson * mpol_fix_fork_child_flag(). 699c61afb18SPaul Jackson * 700c61afb18SPaul Jackson * It is also safe to call this with a task pointer of current, 701c61afb18SPaul Jackson * which the static wrapper mpol_set_task_struct_flag() does, 702c61afb18SPaul Jackson * for use within this file. 703c61afb18SPaul Jackson */ 704c61afb18SPaul Jackson 705c61afb18SPaul Jackson void mpol_fix_fork_child_flag(struct task_struct *p) 706c61afb18SPaul Jackson { 707c61afb18SPaul Jackson if (p->mempolicy) 708c61afb18SPaul Jackson p->flags |= PF_MEMPOLICY; 709c61afb18SPaul Jackson else 710c61afb18SPaul Jackson p->flags &= ~PF_MEMPOLICY; 711c61afb18SPaul Jackson } 712c61afb18SPaul Jackson 713c61afb18SPaul Jackson static void mpol_set_task_struct_flag(void) 714c61afb18SPaul Jackson { 715c61afb18SPaul Jackson mpol_fix_fork_child_flag(current); 716c61afb18SPaul Jackson } 717c61afb18SPaul Jackson 7181da177e4SLinus Torvalds /* Set the process memory policy */ 719028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags, 720028fec41SDavid Rientjes nodemask_t *nodes) 7211da177e4SLinus Torvalds { 72258568d2aSMiao Xie struct mempolicy *new, *old; 723f4e53d91SLee Schermerhorn struct mm_struct *mm = current->mm; 7244bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 72558568d2aSMiao Xie int ret; 7261da177e4SLinus Torvalds 7274bfc4495SKAMEZAWA Hiroyuki if (!scratch) 7284bfc4495SKAMEZAWA Hiroyuki return -ENOMEM; 729f4e53d91SLee Schermerhorn 7304bfc4495SKAMEZAWA Hiroyuki new = mpol_new(mode, flags, nodes); 7314bfc4495SKAMEZAWA Hiroyuki if (IS_ERR(new)) { 7324bfc4495SKAMEZAWA Hiroyuki ret = PTR_ERR(new); 7334bfc4495SKAMEZAWA Hiroyuki goto out; 7344bfc4495SKAMEZAWA Hiroyuki } 735f4e53d91SLee Schermerhorn /* 736f4e53d91SLee Schermerhorn * prevent changing our mempolicy while show_numa_maps() 737f4e53d91SLee Schermerhorn * is using it. 738f4e53d91SLee Schermerhorn * Note: do_set_mempolicy() can be called at init time 739f4e53d91SLee Schermerhorn * with no 'mm'. 740f4e53d91SLee Schermerhorn */ 741f4e53d91SLee Schermerhorn if (mm) 742f4e53d91SLee Schermerhorn down_write(&mm->mmap_sem); 74358568d2aSMiao Xie task_lock(current); 7444bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, nodes, scratch); 74558568d2aSMiao Xie if (ret) { 74658568d2aSMiao Xie task_unlock(current); 74758568d2aSMiao Xie if (mm) 74858568d2aSMiao Xie up_write(&mm->mmap_sem); 74958568d2aSMiao Xie mpol_put(new); 7504bfc4495SKAMEZAWA Hiroyuki goto out; 75158568d2aSMiao Xie } 75258568d2aSMiao Xie old = current->mempolicy; 7531da177e4SLinus Torvalds current->mempolicy = new; 754c61afb18SPaul Jackson mpol_set_task_struct_flag(); 75545c4745aSLee Schermerhorn if (new && new->mode == MPOL_INTERLEAVE && 756f5b087b5SDavid Rientjes nodes_weight(new->v.nodes)) 757dfcd3c0dSAndi Kleen current->il_next = first_node(new->v.nodes); 75858568d2aSMiao Xie task_unlock(current); 759f4e53d91SLee Schermerhorn if (mm) 760f4e53d91SLee Schermerhorn up_write(&mm->mmap_sem); 761f4e53d91SLee Schermerhorn 76258568d2aSMiao Xie mpol_put(old); 7634bfc4495SKAMEZAWA Hiroyuki ret = 0; 7644bfc4495SKAMEZAWA Hiroyuki out: 7654bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 7664bfc4495SKAMEZAWA Hiroyuki return ret; 7671da177e4SLinus Torvalds } 7681da177e4SLinus Torvalds 769bea904d5SLee Schermerhorn /* 770bea904d5SLee Schermerhorn * Return nodemask for policy for get_mempolicy() query 77158568d2aSMiao Xie * 77258568d2aSMiao Xie * Called with task's alloc_lock held 773bea904d5SLee Schermerhorn */ 774bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 7751da177e4SLinus Torvalds { 776dfcd3c0dSAndi Kleen nodes_clear(*nodes); 777bea904d5SLee Schermerhorn if (p == &default_policy) 778bea904d5SLee Schermerhorn return; 779bea904d5SLee Schermerhorn 78045c4745aSLee Schermerhorn switch (p->mode) { 78119770b32SMel Gorman case MPOL_BIND: 78219770b32SMel Gorman /* Fall through */ 7831da177e4SLinus Torvalds case MPOL_INTERLEAVE: 784dfcd3c0dSAndi Kleen *nodes = p->v.nodes; 7851da177e4SLinus Torvalds break; 7861da177e4SLinus Torvalds case MPOL_PREFERRED: 787fc36b8d3SLee Schermerhorn if (!(p->flags & MPOL_F_LOCAL)) 788dfcd3c0dSAndi Kleen node_set(p->v.preferred_node, *nodes); 78953f2556bSLee Schermerhorn /* else return empty node mask for local allocation */ 7901da177e4SLinus Torvalds break; 7911da177e4SLinus Torvalds default: 7921da177e4SLinus Torvalds BUG(); 7931da177e4SLinus Torvalds } 7941da177e4SLinus Torvalds } 7951da177e4SLinus Torvalds 7961da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr) 7971da177e4SLinus Torvalds { 7981da177e4SLinus Torvalds struct page *p; 7991da177e4SLinus Torvalds int err; 8001da177e4SLinus Torvalds 8011da177e4SLinus Torvalds err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL); 8021da177e4SLinus Torvalds if (err >= 0) { 8031da177e4SLinus Torvalds err = page_to_nid(p); 8041da177e4SLinus Torvalds put_page(p); 8051da177e4SLinus Torvalds } 8061da177e4SLinus Torvalds return err; 8071da177e4SLinus Torvalds } 8081da177e4SLinus Torvalds 8091da177e4SLinus Torvalds /* Retrieve NUMA policy */ 810dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask, 8111da177e4SLinus Torvalds unsigned long addr, unsigned long flags) 8121da177e4SLinus Torvalds { 8138bccd85fSChristoph Lameter int err; 8141da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 8151da177e4SLinus Torvalds struct vm_area_struct *vma = NULL; 8161da177e4SLinus Torvalds struct mempolicy *pol = current->mempolicy; 8171da177e4SLinus Torvalds 818754af6f5SLee Schermerhorn if (flags & 819754af6f5SLee Schermerhorn ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 8201da177e4SLinus Torvalds return -EINVAL; 821754af6f5SLee Schermerhorn 822754af6f5SLee Schermerhorn if (flags & MPOL_F_MEMS_ALLOWED) { 823754af6f5SLee Schermerhorn if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 824754af6f5SLee Schermerhorn return -EINVAL; 825754af6f5SLee Schermerhorn *policy = 0; /* just so it's initialized */ 82658568d2aSMiao Xie task_lock(current); 827754af6f5SLee Schermerhorn *nmask = cpuset_current_mems_allowed; 82858568d2aSMiao Xie task_unlock(current); 829754af6f5SLee Schermerhorn return 0; 830754af6f5SLee Schermerhorn } 831754af6f5SLee Schermerhorn 8321da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 833bea904d5SLee Schermerhorn /* 834bea904d5SLee Schermerhorn * Do NOT fall back to task policy if the 835bea904d5SLee Schermerhorn * vma/shared policy at addr is NULL. We 836bea904d5SLee Schermerhorn * want to return MPOL_DEFAULT in this case. 837bea904d5SLee Schermerhorn */ 8381da177e4SLinus Torvalds down_read(&mm->mmap_sem); 8391da177e4SLinus Torvalds vma = find_vma_intersection(mm, addr, addr+1); 8401da177e4SLinus Torvalds if (!vma) { 8411da177e4SLinus Torvalds up_read(&mm->mmap_sem); 8421da177e4SLinus Torvalds return -EFAULT; 8431da177e4SLinus Torvalds } 8441da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->get_policy) 8451da177e4SLinus Torvalds pol = vma->vm_ops->get_policy(vma, addr); 8461da177e4SLinus Torvalds else 8471da177e4SLinus Torvalds pol = vma->vm_policy; 8481da177e4SLinus Torvalds } else if (addr) 8491da177e4SLinus Torvalds return -EINVAL; 8501da177e4SLinus Torvalds 8511da177e4SLinus Torvalds if (!pol) 852bea904d5SLee Schermerhorn pol = &default_policy; /* indicates default behavior */ 8531da177e4SLinus Torvalds 8541da177e4SLinus Torvalds if (flags & MPOL_F_NODE) { 8551da177e4SLinus Torvalds if (flags & MPOL_F_ADDR) { 8561da177e4SLinus Torvalds err = lookup_node(mm, addr); 8571da177e4SLinus Torvalds if (err < 0) 8581da177e4SLinus Torvalds goto out; 8598bccd85fSChristoph Lameter *policy = err; 8601da177e4SLinus Torvalds } else if (pol == current->mempolicy && 86145c4745aSLee Schermerhorn pol->mode == MPOL_INTERLEAVE) { 8628bccd85fSChristoph Lameter *policy = current->il_next; 8631da177e4SLinus Torvalds } else { 8641da177e4SLinus Torvalds err = -EINVAL; 8651da177e4SLinus Torvalds goto out; 8661da177e4SLinus Torvalds } 867bea904d5SLee Schermerhorn } else { 868bea904d5SLee Schermerhorn *policy = pol == &default_policy ? MPOL_DEFAULT : 869bea904d5SLee Schermerhorn pol->mode; 870d79df630SDavid Rientjes /* 871d79df630SDavid Rientjes * Internal mempolicy flags must be masked off before exposing 872d79df630SDavid Rientjes * the policy to userspace. 873d79df630SDavid Rientjes */ 874d79df630SDavid Rientjes *policy |= (pol->flags & MPOL_MODE_FLAGS); 875bea904d5SLee Schermerhorn } 8761da177e4SLinus Torvalds 8771da177e4SLinus Torvalds if (vma) { 8781da177e4SLinus Torvalds up_read(¤t->mm->mmap_sem); 8791da177e4SLinus Torvalds vma = NULL; 8801da177e4SLinus Torvalds } 8811da177e4SLinus Torvalds 8821da177e4SLinus Torvalds err = 0; 88358568d2aSMiao Xie if (nmask) { 884c6b6ef8bSLee Schermerhorn if (mpol_store_user_nodemask(pol)) { 885c6b6ef8bSLee Schermerhorn *nmask = pol->w.user_nodemask; 886c6b6ef8bSLee Schermerhorn } else { 88758568d2aSMiao Xie task_lock(current); 888bea904d5SLee Schermerhorn get_policy_nodemask(pol, nmask); 88958568d2aSMiao Xie task_unlock(current); 89058568d2aSMiao Xie } 891c6b6ef8bSLee Schermerhorn } 8921da177e4SLinus Torvalds 8931da177e4SLinus Torvalds out: 89452cd3b07SLee Schermerhorn mpol_cond_put(pol); 8951da177e4SLinus Torvalds if (vma) 8961da177e4SLinus Torvalds up_read(¤t->mm->mmap_sem); 8971da177e4SLinus Torvalds return err; 8981da177e4SLinus Torvalds } 8991da177e4SLinus Torvalds 900b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION 9018bccd85fSChristoph Lameter /* 9026ce3c4c0SChristoph Lameter * page migration 9036ce3c4c0SChristoph Lameter */ 904fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 905fc301289SChristoph Lameter unsigned long flags) 9066ce3c4c0SChristoph Lameter { 9076ce3c4c0SChristoph Lameter /* 908fc301289SChristoph Lameter * Avoid migrating a page that is shared with others. 9096ce3c4c0SChristoph Lameter */ 91062695a84SNick Piggin if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) { 91162695a84SNick Piggin if (!isolate_lru_page(page)) { 91262695a84SNick Piggin list_add_tail(&page->lru, pagelist); 9136d9c285aSKOSAKI Motohiro inc_zone_page_state(page, NR_ISOLATED_ANON + 9146d9c285aSKOSAKI Motohiro page_is_file_cache(page)); 91562695a84SNick Piggin } 91662695a84SNick Piggin } 9176ce3c4c0SChristoph Lameter } 9186ce3c4c0SChristoph Lameter 919742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x) 92095a402c3SChristoph Lameter { 9216484eb3eSMel Gorman return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0); 92295a402c3SChristoph Lameter } 92395a402c3SChristoph Lameter 9246ce3c4c0SChristoph Lameter /* 9257e2ab150SChristoph Lameter * Migrate pages from one node to a target node. 9267e2ab150SChristoph Lameter * Returns error or the number of pages not migrated. 9277e2ab150SChristoph Lameter */ 928dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest, 929dbcb0f19SAdrian Bunk int flags) 9307e2ab150SChristoph Lameter { 9317e2ab150SChristoph Lameter nodemask_t nmask; 9327e2ab150SChristoph Lameter LIST_HEAD(pagelist); 9337e2ab150SChristoph Lameter int err = 0; 9340def08e3SVasiliy Kulikov struct vm_area_struct *vma; 9357e2ab150SChristoph Lameter 9367e2ab150SChristoph Lameter nodes_clear(nmask); 9377e2ab150SChristoph Lameter node_set(source, nmask); 9387e2ab150SChristoph Lameter 9390def08e3SVasiliy Kulikov vma = check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, 9407e2ab150SChristoph Lameter flags | MPOL_MF_DISCONTIG_OK, &pagelist); 9410def08e3SVasiliy Kulikov if (IS_ERR(vma)) 9420def08e3SVasiliy Kulikov return PTR_ERR(vma); 9437e2ab150SChristoph Lameter 944cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 9457f0f2496SMel Gorman err = migrate_pages(&pagelist, new_node_page, dest, 946a6bc32b8SMel Gorman false, MIGRATE_SYNC); 947cf608ac1SMinchan Kim if (err) 948cf608ac1SMinchan Kim putback_lru_pages(&pagelist); 949cf608ac1SMinchan Kim } 95095a402c3SChristoph Lameter 9517e2ab150SChristoph Lameter return err; 9527e2ab150SChristoph Lameter } 9537e2ab150SChristoph Lameter 9547e2ab150SChristoph Lameter /* 9557e2ab150SChristoph Lameter * Move pages between the two nodesets so as to preserve the physical 9567e2ab150SChristoph Lameter * layout as much as possible. 95739743889SChristoph Lameter * 95839743889SChristoph Lameter * Returns the number of page that could not be moved. 95939743889SChristoph Lameter */ 9600ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 9610ce72d4fSAndrew Morton const nodemask_t *to, int flags) 96239743889SChristoph Lameter { 9637e2ab150SChristoph Lameter int busy = 0; 9640aedadf9SChristoph Lameter int err; 9657e2ab150SChristoph Lameter nodemask_t tmp; 96639743889SChristoph Lameter 9670aedadf9SChristoph Lameter err = migrate_prep(); 9680aedadf9SChristoph Lameter if (err) 9690aedadf9SChristoph Lameter return err; 9700aedadf9SChristoph Lameter 97139743889SChristoph Lameter down_read(&mm->mmap_sem); 972d4984711SChristoph Lameter 9730ce72d4fSAndrew Morton err = migrate_vmas(mm, from, to, flags); 9747b2259b3SChristoph Lameter if (err) 9757b2259b3SChristoph Lameter goto out; 9767b2259b3SChristoph Lameter 9777e2ab150SChristoph Lameter /* 9787e2ab150SChristoph Lameter * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 9797e2ab150SChristoph Lameter * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 9807e2ab150SChristoph Lameter * bit in 'tmp', and return that <source, dest> pair for migration. 9817e2ab150SChristoph Lameter * The pair of nodemasks 'to' and 'from' define the map. 9827e2ab150SChristoph Lameter * 9837e2ab150SChristoph Lameter * If no pair of bits is found that way, fallback to picking some 9847e2ab150SChristoph Lameter * pair of 'source' and 'dest' bits that are not the same. If the 9857e2ab150SChristoph Lameter * 'source' and 'dest' bits are the same, this represents a node 9867e2ab150SChristoph Lameter * that will be migrating to itself, so no pages need move. 9877e2ab150SChristoph Lameter * 9887e2ab150SChristoph Lameter * If no bits are left in 'tmp', or if all remaining bits left 9897e2ab150SChristoph Lameter * in 'tmp' correspond to the same bit in 'to', return false 9907e2ab150SChristoph Lameter * (nothing left to migrate). 9917e2ab150SChristoph Lameter * 9927e2ab150SChristoph Lameter * This lets us pick a pair of nodes to migrate between, such that 9937e2ab150SChristoph Lameter * if possible the dest node is not already occupied by some other 9947e2ab150SChristoph Lameter * source node, minimizing the risk of overloading the memory on a 9957e2ab150SChristoph Lameter * node that would happen if we migrated incoming memory to a node 9967e2ab150SChristoph Lameter * before migrating outgoing memory source that same node. 9977e2ab150SChristoph Lameter * 9987e2ab150SChristoph Lameter * A single scan of tmp is sufficient. As we go, we remember the 9997e2ab150SChristoph Lameter * most recent <s, d> pair that moved (s != d). If we find a pair 10007e2ab150SChristoph Lameter * that not only moved, but what's better, moved to an empty slot 10017e2ab150SChristoph Lameter * (d is not set in tmp), then we break out then, with that pair. 1002ae0e47f0SJustin P. Mattock * Otherwise when we finish scanning from_tmp, we at least have the 10037e2ab150SChristoph Lameter * most recent <s, d> pair that moved. If we get all the way through 10047e2ab150SChristoph Lameter * the scan of tmp without finding any node that moved, much less 10057e2ab150SChristoph Lameter * moved to an empty node, then there is nothing left worth migrating. 10067e2ab150SChristoph Lameter */ 10077e2ab150SChristoph Lameter 10080ce72d4fSAndrew Morton tmp = *from; 10097e2ab150SChristoph Lameter while (!nodes_empty(tmp)) { 10107e2ab150SChristoph Lameter int s,d; 10117e2ab150SChristoph Lameter int source = -1; 10127e2ab150SChristoph Lameter int dest = 0; 10137e2ab150SChristoph Lameter 10147e2ab150SChristoph Lameter for_each_node_mask(s, tmp) { 10154a5b18ccSLarry Woodman 10164a5b18ccSLarry Woodman /* 10174a5b18ccSLarry Woodman * do_migrate_pages() tries to maintain the relative 10184a5b18ccSLarry Woodman * node relationship of the pages established between 10194a5b18ccSLarry Woodman * threads and memory areas. 10204a5b18ccSLarry Woodman * 10214a5b18ccSLarry Woodman * However if the number of source nodes is not equal to 10224a5b18ccSLarry Woodman * the number of destination nodes we can not preserve 10234a5b18ccSLarry Woodman * this node relative relationship. In that case, skip 10244a5b18ccSLarry Woodman * copying memory from a node that is in the destination 10254a5b18ccSLarry Woodman * mask. 10264a5b18ccSLarry Woodman * 10274a5b18ccSLarry Woodman * Example: [2,3,4] -> [3,4,5] moves everything. 10284a5b18ccSLarry Woodman * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 10294a5b18ccSLarry Woodman */ 10304a5b18ccSLarry Woodman 10310ce72d4fSAndrew Morton if ((nodes_weight(*from) != nodes_weight(*to)) && 10320ce72d4fSAndrew Morton (node_isset(s, *to))) 10334a5b18ccSLarry Woodman continue; 10344a5b18ccSLarry Woodman 10350ce72d4fSAndrew Morton d = node_remap(s, *from, *to); 10367e2ab150SChristoph Lameter if (s == d) 10377e2ab150SChristoph Lameter continue; 10387e2ab150SChristoph Lameter 10397e2ab150SChristoph Lameter source = s; /* Node moved. Memorize */ 10407e2ab150SChristoph Lameter dest = d; 10417e2ab150SChristoph Lameter 10427e2ab150SChristoph Lameter /* dest not in remaining from nodes? */ 10437e2ab150SChristoph Lameter if (!node_isset(dest, tmp)) 10447e2ab150SChristoph Lameter break; 10457e2ab150SChristoph Lameter } 10467e2ab150SChristoph Lameter if (source == -1) 10477e2ab150SChristoph Lameter break; 10487e2ab150SChristoph Lameter 10497e2ab150SChristoph Lameter node_clear(source, tmp); 10507e2ab150SChristoph Lameter err = migrate_to_node(mm, source, dest, flags); 10517e2ab150SChristoph Lameter if (err > 0) 10527e2ab150SChristoph Lameter busy += err; 10537e2ab150SChristoph Lameter if (err < 0) 10547e2ab150SChristoph Lameter break; 105539743889SChristoph Lameter } 10567b2259b3SChristoph Lameter out: 105739743889SChristoph Lameter up_read(&mm->mmap_sem); 10587e2ab150SChristoph Lameter if (err < 0) 10597e2ab150SChristoph Lameter return err; 10607e2ab150SChristoph Lameter return busy; 1061b20a3503SChristoph Lameter 106239743889SChristoph Lameter } 106339743889SChristoph Lameter 10643ad33b24SLee Schermerhorn /* 10653ad33b24SLee Schermerhorn * Allocate a new page for page migration based on vma policy. 10663ad33b24SLee Schermerhorn * Start assuming that page is mapped by vma pointed to by @private. 10673ad33b24SLee Schermerhorn * Search forward from there, if not. N.B., this assumes that the 10683ad33b24SLee Schermerhorn * list of pages handed to migrate_pages()--which is how we get here-- 10693ad33b24SLee Schermerhorn * is in virtual address order. 10703ad33b24SLee Schermerhorn */ 1071742755a1SChristoph Lameter static struct page *new_vma_page(struct page *page, unsigned long private, int **x) 107295a402c3SChristoph Lameter { 107395a402c3SChristoph Lameter struct vm_area_struct *vma = (struct vm_area_struct *)private; 10743ad33b24SLee Schermerhorn unsigned long uninitialized_var(address); 107595a402c3SChristoph Lameter 10763ad33b24SLee Schermerhorn while (vma) { 10773ad33b24SLee Schermerhorn address = page_address_in_vma(page, vma); 10783ad33b24SLee Schermerhorn if (address != -EFAULT) 10793ad33b24SLee Schermerhorn break; 10803ad33b24SLee Schermerhorn vma = vma->vm_next; 10813ad33b24SLee Schermerhorn } 10823ad33b24SLee Schermerhorn 10833ad33b24SLee Schermerhorn /* 10843ad33b24SLee Schermerhorn * if !vma, alloc_page_vma() will use task or system default policy 10853ad33b24SLee Schermerhorn */ 10863ad33b24SLee Schermerhorn return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 108795a402c3SChristoph Lameter } 1088b20a3503SChristoph Lameter #else 1089b20a3503SChristoph Lameter 1090b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist, 1091b20a3503SChristoph Lameter unsigned long flags) 1092b20a3503SChristoph Lameter { 1093b20a3503SChristoph Lameter } 1094b20a3503SChristoph Lameter 10950ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 10960ce72d4fSAndrew Morton const nodemask_t *to, int flags) 1097b20a3503SChristoph Lameter { 1098b20a3503SChristoph Lameter return -ENOSYS; 1099b20a3503SChristoph Lameter } 110095a402c3SChristoph Lameter 110169939749SKeith Owens static struct page *new_vma_page(struct page *page, unsigned long private, int **x) 110295a402c3SChristoph Lameter { 110395a402c3SChristoph Lameter return NULL; 110495a402c3SChristoph Lameter } 1105b20a3503SChristoph Lameter #endif 1106b20a3503SChristoph Lameter 1107dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len, 1108028fec41SDavid Rientjes unsigned short mode, unsigned short mode_flags, 1109028fec41SDavid Rientjes nodemask_t *nmask, unsigned long flags) 11106ce3c4c0SChristoph Lameter { 11116ce3c4c0SChristoph Lameter struct vm_area_struct *vma; 11126ce3c4c0SChristoph Lameter struct mm_struct *mm = current->mm; 11136ce3c4c0SChristoph Lameter struct mempolicy *new; 11146ce3c4c0SChristoph Lameter unsigned long end; 11156ce3c4c0SChristoph Lameter int err; 11166ce3c4c0SChristoph Lameter LIST_HEAD(pagelist); 11176ce3c4c0SChristoph Lameter 1118a3b51e01SDavid Rientjes if (flags & ~(unsigned long)(MPOL_MF_STRICT | 11196ce3c4c0SChristoph Lameter MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 11206ce3c4c0SChristoph Lameter return -EINVAL; 112174c00241SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 11226ce3c4c0SChristoph Lameter return -EPERM; 11236ce3c4c0SChristoph Lameter 11246ce3c4c0SChristoph Lameter if (start & ~PAGE_MASK) 11256ce3c4c0SChristoph Lameter return -EINVAL; 11266ce3c4c0SChristoph Lameter 11276ce3c4c0SChristoph Lameter if (mode == MPOL_DEFAULT) 11286ce3c4c0SChristoph Lameter flags &= ~MPOL_MF_STRICT; 11296ce3c4c0SChristoph Lameter 11306ce3c4c0SChristoph Lameter len = (len + PAGE_SIZE - 1) & PAGE_MASK; 11316ce3c4c0SChristoph Lameter end = start + len; 11326ce3c4c0SChristoph Lameter 11336ce3c4c0SChristoph Lameter if (end < start) 11346ce3c4c0SChristoph Lameter return -EINVAL; 11356ce3c4c0SChristoph Lameter if (end == start) 11366ce3c4c0SChristoph Lameter return 0; 11376ce3c4c0SChristoph Lameter 1138028fec41SDavid Rientjes new = mpol_new(mode, mode_flags, nmask); 11396ce3c4c0SChristoph Lameter if (IS_ERR(new)) 11406ce3c4c0SChristoph Lameter return PTR_ERR(new); 11416ce3c4c0SChristoph Lameter 11426ce3c4c0SChristoph Lameter /* 11436ce3c4c0SChristoph Lameter * If we are using the default policy then operation 11446ce3c4c0SChristoph Lameter * on discontinuous address spaces is okay after all 11456ce3c4c0SChristoph Lameter */ 11466ce3c4c0SChristoph Lameter if (!new) 11476ce3c4c0SChristoph Lameter flags |= MPOL_MF_DISCONTIG_OK; 11486ce3c4c0SChristoph Lameter 1149028fec41SDavid Rientjes pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1150028fec41SDavid Rientjes start, start + len, mode, mode_flags, 1151028fec41SDavid Rientjes nmask ? nodes_addr(*nmask)[0] : -1); 11526ce3c4c0SChristoph Lameter 11530aedadf9SChristoph Lameter if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 11540aedadf9SChristoph Lameter 11550aedadf9SChristoph Lameter err = migrate_prep(); 11560aedadf9SChristoph Lameter if (err) 1157b05ca738SKOSAKI Motohiro goto mpol_out; 11580aedadf9SChristoph Lameter } 11594bfc4495SKAMEZAWA Hiroyuki { 11604bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 11614bfc4495SKAMEZAWA Hiroyuki if (scratch) { 11626ce3c4c0SChristoph Lameter down_write(&mm->mmap_sem); 116358568d2aSMiao Xie task_lock(current); 11644bfc4495SKAMEZAWA Hiroyuki err = mpol_set_nodemask(new, nmask, scratch); 116558568d2aSMiao Xie task_unlock(current); 11664bfc4495SKAMEZAWA Hiroyuki if (err) 116758568d2aSMiao Xie up_write(&mm->mmap_sem); 11684bfc4495SKAMEZAWA Hiroyuki } else 11694bfc4495SKAMEZAWA Hiroyuki err = -ENOMEM; 11704bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 11714bfc4495SKAMEZAWA Hiroyuki } 1172b05ca738SKOSAKI Motohiro if (err) 1173b05ca738SKOSAKI Motohiro goto mpol_out; 1174b05ca738SKOSAKI Motohiro 11756ce3c4c0SChristoph Lameter vma = check_range(mm, start, end, nmask, 11766ce3c4c0SChristoph Lameter flags | MPOL_MF_INVERT, &pagelist); 11776ce3c4c0SChristoph Lameter 11786ce3c4c0SChristoph Lameter err = PTR_ERR(vma); 11796ce3c4c0SChristoph Lameter if (!IS_ERR(vma)) { 11806ce3c4c0SChristoph Lameter int nr_failed = 0; 11816ce3c4c0SChristoph Lameter 11829d8cebd4SKOSAKI Motohiro err = mbind_range(mm, start, end, new); 11837e2ab150SChristoph Lameter 1184cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 118595a402c3SChristoph Lameter nr_failed = migrate_pages(&pagelist, new_vma_page, 11867f0f2496SMel Gorman (unsigned long)vma, 1187c4c0e9e5SDavid Rientjes false, MIGRATE_SYNC); 1188cf608ac1SMinchan Kim if (nr_failed) 1189cf608ac1SMinchan Kim putback_lru_pages(&pagelist); 1190cf608ac1SMinchan Kim } 11916ce3c4c0SChristoph Lameter 11926ce3c4c0SChristoph Lameter if (!err && nr_failed && (flags & MPOL_MF_STRICT)) 11936ce3c4c0SChristoph Lameter err = -EIO; 1194ab8a3e14SKOSAKI Motohiro } else 1195ab8a3e14SKOSAKI Motohiro putback_lru_pages(&pagelist); 1196b20a3503SChristoph Lameter 11976ce3c4c0SChristoph Lameter up_write(&mm->mmap_sem); 1198b05ca738SKOSAKI Motohiro mpol_out: 1199f0be3d32SLee Schermerhorn mpol_put(new); 12006ce3c4c0SChristoph Lameter return err; 12016ce3c4c0SChristoph Lameter } 12026ce3c4c0SChristoph Lameter 120339743889SChristoph Lameter /* 12048bccd85fSChristoph Lameter * User space interface with variable sized bitmaps for nodelists. 12058bccd85fSChristoph Lameter */ 12068bccd85fSChristoph Lameter 12078bccd85fSChristoph Lameter /* Copy a node mask from user space. */ 120839743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 12098bccd85fSChristoph Lameter unsigned long maxnode) 12108bccd85fSChristoph Lameter { 12118bccd85fSChristoph Lameter unsigned long k; 12128bccd85fSChristoph Lameter unsigned long nlongs; 12138bccd85fSChristoph Lameter unsigned long endmask; 12148bccd85fSChristoph Lameter 12158bccd85fSChristoph Lameter --maxnode; 12168bccd85fSChristoph Lameter nodes_clear(*nodes); 12178bccd85fSChristoph Lameter if (maxnode == 0 || !nmask) 12188bccd85fSChristoph Lameter return 0; 1219a9c930baSAndi Kleen if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1220636f13c1SChris Wright return -EINVAL; 12218bccd85fSChristoph Lameter 12228bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(maxnode); 12238bccd85fSChristoph Lameter if ((maxnode % BITS_PER_LONG) == 0) 12248bccd85fSChristoph Lameter endmask = ~0UL; 12258bccd85fSChristoph Lameter else 12268bccd85fSChristoph Lameter endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; 12278bccd85fSChristoph Lameter 12288bccd85fSChristoph Lameter /* When the user specified more nodes than supported just check 12298bccd85fSChristoph Lameter if the non supported part is all zero. */ 12308bccd85fSChristoph Lameter if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { 12318bccd85fSChristoph Lameter if (nlongs > PAGE_SIZE/sizeof(long)) 12328bccd85fSChristoph Lameter return -EINVAL; 12338bccd85fSChristoph Lameter for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 12348bccd85fSChristoph Lameter unsigned long t; 12358bccd85fSChristoph Lameter if (get_user(t, nmask + k)) 12368bccd85fSChristoph Lameter return -EFAULT; 12378bccd85fSChristoph Lameter if (k == nlongs - 1) { 12388bccd85fSChristoph Lameter if (t & endmask) 12398bccd85fSChristoph Lameter return -EINVAL; 12408bccd85fSChristoph Lameter } else if (t) 12418bccd85fSChristoph Lameter return -EINVAL; 12428bccd85fSChristoph Lameter } 12438bccd85fSChristoph Lameter nlongs = BITS_TO_LONGS(MAX_NUMNODES); 12448bccd85fSChristoph Lameter endmask = ~0UL; 12458bccd85fSChristoph Lameter } 12468bccd85fSChristoph Lameter 12478bccd85fSChristoph Lameter if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) 12488bccd85fSChristoph Lameter return -EFAULT; 12498bccd85fSChristoph Lameter nodes_addr(*nodes)[nlongs-1] &= endmask; 12508bccd85fSChristoph Lameter return 0; 12518bccd85fSChristoph Lameter } 12528bccd85fSChristoph Lameter 12538bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */ 12548bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 12558bccd85fSChristoph Lameter nodemask_t *nodes) 12568bccd85fSChristoph Lameter { 12578bccd85fSChristoph Lameter unsigned long copy = ALIGN(maxnode-1, 64) / 8; 12588bccd85fSChristoph Lameter const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); 12598bccd85fSChristoph Lameter 12608bccd85fSChristoph Lameter if (copy > nbytes) { 12618bccd85fSChristoph Lameter if (copy > PAGE_SIZE) 12628bccd85fSChristoph Lameter return -EINVAL; 12638bccd85fSChristoph Lameter if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 12648bccd85fSChristoph Lameter return -EFAULT; 12658bccd85fSChristoph Lameter copy = nbytes; 12668bccd85fSChristoph Lameter } 12678bccd85fSChristoph Lameter return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 12688bccd85fSChristoph Lameter } 12698bccd85fSChristoph Lameter 1270938bb9f5SHeiko Carstens SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1271938bb9f5SHeiko Carstens unsigned long, mode, unsigned long __user *, nmask, 1272938bb9f5SHeiko Carstens unsigned long, maxnode, unsigned, flags) 12738bccd85fSChristoph Lameter { 12748bccd85fSChristoph Lameter nodemask_t nodes; 12758bccd85fSChristoph Lameter int err; 1276028fec41SDavid Rientjes unsigned short mode_flags; 12778bccd85fSChristoph Lameter 1278028fec41SDavid Rientjes mode_flags = mode & MPOL_MODE_FLAGS; 1279028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1280a3b51e01SDavid Rientjes if (mode >= MPOL_MAX) 1281a3b51e01SDavid Rientjes return -EINVAL; 12824c50bc01SDavid Rientjes if ((mode_flags & MPOL_F_STATIC_NODES) && 12834c50bc01SDavid Rientjes (mode_flags & MPOL_F_RELATIVE_NODES)) 12844c50bc01SDavid Rientjes return -EINVAL; 12858bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 12868bccd85fSChristoph Lameter if (err) 12878bccd85fSChristoph Lameter return err; 1288028fec41SDavid Rientjes return do_mbind(start, len, mode, mode_flags, &nodes, flags); 12898bccd85fSChristoph Lameter } 12908bccd85fSChristoph Lameter 12918bccd85fSChristoph Lameter /* Set the process memory policy */ 1292938bb9f5SHeiko Carstens SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask, 1293938bb9f5SHeiko Carstens unsigned long, maxnode) 12948bccd85fSChristoph Lameter { 12958bccd85fSChristoph Lameter int err; 12968bccd85fSChristoph Lameter nodemask_t nodes; 1297028fec41SDavid Rientjes unsigned short flags; 12988bccd85fSChristoph Lameter 1299028fec41SDavid Rientjes flags = mode & MPOL_MODE_FLAGS; 1300028fec41SDavid Rientjes mode &= ~MPOL_MODE_FLAGS; 1301028fec41SDavid Rientjes if ((unsigned int)mode >= MPOL_MAX) 13028bccd85fSChristoph Lameter return -EINVAL; 13034c50bc01SDavid Rientjes if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES)) 13044c50bc01SDavid Rientjes return -EINVAL; 13058bccd85fSChristoph Lameter err = get_nodes(&nodes, nmask, maxnode); 13068bccd85fSChristoph Lameter if (err) 13078bccd85fSChristoph Lameter return err; 1308028fec41SDavid Rientjes return do_set_mempolicy(mode, flags, &nodes); 13098bccd85fSChristoph Lameter } 13108bccd85fSChristoph Lameter 1311938bb9f5SHeiko Carstens SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1312938bb9f5SHeiko Carstens const unsigned long __user *, old_nodes, 1313938bb9f5SHeiko Carstens const unsigned long __user *, new_nodes) 131439743889SChristoph Lameter { 1315c69e8d9cSDavid Howells const struct cred *cred = current_cred(), *tcred; 1316596d7cfaSKOSAKI Motohiro struct mm_struct *mm = NULL; 131739743889SChristoph Lameter struct task_struct *task; 131839743889SChristoph Lameter nodemask_t task_nodes; 131939743889SChristoph Lameter int err; 1320596d7cfaSKOSAKI Motohiro nodemask_t *old; 1321596d7cfaSKOSAKI Motohiro nodemask_t *new; 1322596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH(scratch); 132339743889SChristoph Lameter 1324596d7cfaSKOSAKI Motohiro if (!scratch) 1325596d7cfaSKOSAKI Motohiro return -ENOMEM; 132639743889SChristoph Lameter 1327596d7cfaSKOSAKI Motohiro old = &scratch->mask1; 1328596d7cfaSKOSAKI Motohiro new = &scratch->mask2; 1329596d7cfaSKOSAKI Motohiro 1330596d7cfaSKOSAKI Motohiro err = get_nodes(old, old_nodes, maxnode); 133139743889SChristoph Lameter if (err) 1332596d7cfaSKOSAKI Motohiro goto out; 1333596d7cfaSKOSAKI Motohiro 1334596d7cfaSKOSAKI Motohiro err = get_nodes(new, new_nodes, maxnode); 1335596d7cfaSKOSAKI Motohiro if (err) 1336596d7cfaSKOSAKI Motohiro goto out; 133739743889SChristoph Lameter 133839743889SChristoph Lameter /* Find the mm_struct */ 133955cfaa3cSZeng Zhaoming rcu_read_lock(); 1340228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 134139743889SChristoph Lameter if (!task) { 134255cfaa3cSZeng Zhaoming rcu_read_unlock(); 1343596d7cfaSKOSAKI Motohiro err = -ESRCH; 1344596d7cfaSKOSAKI Motohiro goto out; 134539743889SChristoph Lameter } 13463268c63eSChristoph Lameter get_task_struct(task); 134739743889SChristoph Lameter 1348596d7cfaSKOSAKI Motohiro err = -EINVAL; 134939743889SChristoph Lameter 135039743889SChristoph Lameter /* 135139743889SChristoph Lameter * Check if this process has the right to modify the specified 135239743889SChristoph Lameter * process. The right exists if the process has administrative 13537f927fccSAlexey Dobriyan * capabilities, superuser privileges or the same 135439743889SChristoph Lameter * userid as the target process. 135539743889SChristoph Lameter */ 1356c69e8d9cSDavid Howells tcred = __task_cred(task); 1357b38a86ebSEric W. Biederman if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) && 1358b38a86ebSEric W. Biederman !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) && 135974c00241SChristoph Lameter !capable(CAP_SYS_NICE)) { 1360c69e8d9cSDavid Howells rcu_read_unlock(); 136139743889SChristoph Lameter err = -EPERM; 13623268c63eSChristoph Lameter goto out_put; 136339743889SChristoph Lameter } 1364c69e8d9cSDavid Howells rcu_read_unlock(); 136539743889SChristoph Lameter 136639743889SChristoph Lameter task_nodes = cpuset_mems_allowed(task); 136739743889SChristoph Lameter /* Is the user allowed to access the target nodes? */ 1368596d7cfaSKOSAKI Motohiro if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 136939743889SChristoph Lameter err = -EPERM; 13703268c63eSChristoph Lameter goto out_put; 137139743889SChristoph Lameter } 137239743889SChristoph Lameter 1373596d7cfaSKOSAKI Motohiro if (!nodes_subset(*new, node_states[N_HIGH_MEMORY])) { 13743b42d28bSChristoph Lameter err = -EINVAL; 13753268c63eSChristoph Lameter goto out_put; 13763b42d28bSChristoph Lameter } 13773b42d28bSChristoph Lameter 137886c3a764SDavid Quigley err = security_task_movememory(task); 137986c3a764SDavid Quigley if (err) 13803268c63eSChristoph Lameter goto out_put; 138186c3a764SDavid Quigley 13823268c63eSChristoph Lameter mm = get_task_mm(task); 13833268c63eSChristoph Lameter put_task_struct(task); 1384f2a9ef88SSasha Levin 1385f2a9ef88SSasha Levin if (!mm) { 1386f2a9ef88SSasha Levin err = -EINVAL; 1387f2a9ef88SSasha Levin goto out; 1388f2a9ef88SSasha Levin } 1389f2a9ef88SSasha Levin 1390596d7cfaSKOSAKI Motohiro err = do_migrate_pages(mm, old, new, 139174c00241SChristoph Lameter capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 13923268c63eSChristoph Lameter 139339743889SChristoph Lameter mmput(mm); 13943268c63eSChristoph Lameter out: 1395596d7cfaSKOSAKI Motohiro NODEMASK_SCRATCH_FREE(scratch); 1396596d7cfaSKOSAKI Motohiro 139739743889SChristoph Lameter return err; 13983268c63eSChristoph Lameter 13993268c63eSChristoph Lameter out_put: 14003268c63eSChristoph Lameter put_task_struct(task); 14013268c63eSChristoph Lameter goto out; 14023268c63eSChristoph Lameter 140339743889SChristoph Lameter } 140439743889SChristoph Lameter 140539743889SChristoph Lameter 14068bccd85fSChristoph Lameter /* Retrieve NUMA policy */ 1407938bb9f5SHeiko Carstens SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1408938bb9f5SHeiko Carstens unsigned long __user *, nmask, unsigned long, maxnode, 1409938bb9f5SHeiko Carstens unsigned long, addr, unsigned long, flags) 14108bccd85fSChristoph Lameter { 1411dbcb0f19SAdrian Bunk int err; 1412dbcb0f19SAdrian Bunk int uninitialized_var(pval); 14138bccd85fSChristoph Lameter nodemask_t nodes; 14148bccd85fSChristoph Lameter 14158bccd85fSChristoph Lameter if (nmask != NULL && maxnode < MAX_NUMNODES) 14168bccd85fSChristoph Lameter return -EINVAL; 14178bccd85fSChristoph Lameter 14188bccd85fSChristoph Lameter err = do_get_mempolicy(&pval, &nodes, addr, flags); 14198bccd85fSChristoph Lameter 14208bccd85fSChristoph Lameter if (err) 14218bccd85fSChristoph Lameter return err; 14228bccd85fSChristoph Lameter 14238bccd85fSChristoph Lameter if (policy && put_user(pval, policy)) 14248bccd85fSChristoph Lameter return -EFAULT; 14258bccd85fSChristoph Lameter 14268bccd85fSChristoph Lameter if (nmask) 14278bccd85fSChristoph Lameter err = copy_nodes_to_user(nmask, maxnode, &nodes); 14288bccd85fSChristoph Lameter 14298bccd85fSChristoph Lameter return err; 14308bccd85fSChristoph Lameter } 14318bccd85fSChristoph Lameter 14321da177e4SLinus Torvalds #ifdef CONFIG_COMPAT 14331da177e4SLinus Torvalds 14341da177e4SLinus Torvalds asmlinkage long compat_sys_get_mempolicy(int __user *policy, 14351da177e4SLinus Torvalds compat_ulong_t __user *nmask, 14361da177e4SLinus Torvalds compat_ulong_t maxnode, 14371da177e4SLinus Torvalds compat_ulong_t addr, compat_ulong_t flags) 14381da177e4SLinus Torvalds { 14391da177e4SLinus Torvalds long err; 14401da177e4SLinus Torvalds unsigned long __user *nm = NULL; 14411da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 14421da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 14431da177e4SLinus Torvalds 14441da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 14451da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 14461da177e4SLinus Torvalds 14471da177e4SLinus Torvalds if (nmask) 14481da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 14491da177e4SLinus Torvalds 14501da177e4SLinus Torvalds err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 14511da177e4SLinus Torvalds 14521da177e4SLinus Torvalds if (!err && nmask) { 14532bbff6c7SKAMEZAWA Hiroyuki unsigned long copy_size; 14542bbff6c7SKAMEZAWA Hiroyuki copy_size = min_t(unsigned long, sizeof(bm), alloc_size); 14552bbff6c7SKAMEZAWA Hiroyuki err = copy_from_user(bm, nm, copy_size); 14561da177e4SLinus Torvalds /* ensure entire bitmap is zeroed */ 14571da177e4SLinus Torvalds err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 14581da177e4SLinus Torvalds err |= compat_put_bitmap(nmask, bm, nr_bits); 14591da177e4SLinus Torvalds } 14601da177e4SLinus Torvalds 14611da177e4SLinus Torvalds return err; 14621da177e4SLinus Torvalds } 14631da177e4SLinus Torvalds 14641da177e4SLinus Torvalds asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, 14651da177e4SLinus Torvalds compat_ulong_t maxnode) 14661da177e4SLinus Torvalds { 14671da177e4SLinus Torvalds long err = 0; 14681da177e4SLinus Torvalds unsigned long __user *nm = NULL; 14691da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 14701da177e4SLinus Torvalds DECLARE_BITMAP(bm, MAX_NUMNODES); 14711da177e4SLinus Torvalds 14721da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 14731da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 14741da177e4SLinus Torvalds 14751da177e4SLinus Torvalds if (nmask) { 14761da177e4SLinus Torvalds err = compat_get_bitmap(bm, nmask, nr_bits); 14771da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 14781da177e4SLinus Torvalds err |= copy_to_user(nm, bm, alloc_size); 14791da177e4SLinus Torvalds } 14801da177e4SLinus Torvalds 14811da177e4SLinus Torvalds if (err) 14821da177e4SLinus Torvalds return -EFAULT; 14831da177e4SLinus Torvalds 14841da177e4SLinus Torvalds return sys_set_mempolicy(mode, nm, nr_bits+1); 14851da177e4SLinus Torvalds } 14861da177e4SLinus Torvalds 14871da177e4SLinus Torvalds asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, 14881da177e4SLinus Torvalds compat_ulong_t mode, compat_ulong_t __user *nmask, 14891da177e4SLinus Torvalds compat_ulong_t maxnode, compat_ulong_t flags) 14901da177e4SLinus Torvalds { 14911da177e4SLinus Torvalds long err = 0; 14921da177e4SLinus Torvalds unsigned long __user *nm = NULL; 14931da177e4SLinus Torvalds unsigned long nr_bits, alloc_size; 1494dfcd3c0dSAndi Kleen nodemask_t bm; 14951da177e4SLinus Torvalds 14961da177e4SLinus Torvalds nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 14971da177e4SLinus Torvalds alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 14981da177e4SLinus Torvalds 14991da177e4SLinus Torvalds if (nmask) { 1500dfcd3c0dSAndi Kleen err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); 15011da177e4SLinus Torvalds nm = compat_alloc_user_space(alloc_size); 1502dfcd3c0dSAndi Kleen err |= copy_to_user(nm, nodes_addr(bm), alloc_size); 15031da177e4SLinus Torvalds } 15041da177e4SLinus Torvalds 15051da177e4SLinus Torvalds if (err) 15061da177e4SLinus Torvalds return -EFAULT; 15071da177e4SLinus Torvalds 15081da177e4SLinus Torvalds return sys_mbind(start, len, mode, nm, nr_bits+1, flags); 15091da177e4SLinus Torvalds } 15101da177e4SLinus Torvalds 15111da177e4SLinus Torvalds #endif 15121da177e4SLinus Torvalds 1513480eccf9SLee Schermerhorn /* 1514480eccf9SLee Schermerhorn * get_vma_policy(@task, @vma, @addr) 1515480eccf9SLee Schermerhorn * @task - task for fallback if vma policy == default 1516480eccf9SLee Schermerhorn * @vma - virtual memory area whose policy is sought 1517480eccf9SLee Schermerhorn * @addr - address in @vma for shared policy lookup 1518480eccf9SLee Schermerhorn * 1519480eccf9SLee Schermerhorn * Returns effective policy for a VMA at specified address. 1520480eccf9SLee Schermerhorn * Falls back to @task or system default policy, as necessary. 152152cd3b07SLee Schermerhorn * Current or other task's task mempolicy and non-shared vma policies 152252cd3b07SLee Schermerhorn * are protected by the task's mmap_sem, which must be held for read by 152352cd3b07SLee Schermerhorn * the caller. 152452cd3b07SLee Schermerhorn * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 152552cd3b07SLee Schermerhorn * count--added by the get_policy() vm_op, as appropriate--to protect against 152652cd3b07SLee Schermerhorn * freeing by another task. It is the caller's responsibility to free the 152752cd3b07SLee Schermerhorn * extra reference for shared policies. 1528480eccf9SLee Schermerhorn */ 1529d98f6cb6SStephen Wilson struct mempolicy *get_vma_policy(struct task_struct *task, 153048fce342SChristoph Lameter struct vm_area_struct *vma, unsigned long addr) 15311da177e4SLinus Torvalds { 15326e21c8f1SChristoph Lameter struct mempolicy *pol = task->mempolicy; 15331da177e4SLinus Torvalds 15341da177e4SLinus Torvalds if (vma) { 1535480eccf9SLee Schermerhorn if (vma->vm_ops && vma->vm_ops->get_policy) { 1536ae4d8c16SLee Schermerhorn struct mempolicy *vpol = vma->vm_ops->get_policy(vma, 1537ae4d8c16SLee Schermerhorn addr); 1538ae4d8c16SLee Schermerhorn if (vpol) 1539ae4d8c16SLee Schermerhorn pol = vpol; 1540bea904d5SLee Schermerhorn } else if (vma->vm_policy) 15411da177e4SLinus Torvalds pol = vma->vm_policy; 15421da177e4SLinus Torvalds } 15431da177e4SLinus Torvalds if (!pol) 15441da177e4SLinus Torvalds pol = &default_policy; 15451da177e4SLinus Torvalds return pol; 15461da177e4SLinus Torvalds } 15471da177e4SLinus Torvalds 154852cd3b07SLee Schermerhorn /* 154952cd3b07SLee Schermerhorn * Return a nodemask representing a mempolicy for filtering nodes for 155052cd3b07SLee Schermerhorn * page allocation 155152cd3b07SLee Schermerhorn */ 155252cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 155319770b32SMel Gorman { 155419770b32SMel Gorman /* Lower zones don't get a nodemask applied for MPOL_BIND */ 155545c4745aSLee Schermerhorn if (unlikely(policy->mode == MPOL_BIND) && 155619770b32SMel Gorman gfp_zone(gfp) >= policy_zone && 155719770b32SMel Gorman cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) 155819770b32SMel Gorman return &policy->v.nodes; 155919770b32SMel Gorman 156019770b32SMel Gorman return NULL; 156119770b32SMel Gorman } 156219770b32SMel Gorman 156352cd3b07SLee Schermerhorn /* Return a zonelist indicated by gfp for node representing a mempolicy */ 15642f5f9486SAndi Kleen static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy, 15652f5f9486SAndi Kleen int nd) 15661da177e4SLinus Torvalds { 156745c4745aSLee Schermerhorn switch (policy->mode) { 15681da177e4SLinus Torvalds case MPOL_PREFERRED: 1569fc36b8d3SLee Schermerhorn if (!(policy->flags & MPOL_F_LOCAL)) 15701da177e4SLinus Torvalds nd = policy->v.preferred_node; 15711da177e4SLinus Torvalds break; 15721da177e4SLinus Torvalds case MPOL_BIND: 157319770b32SMel Gorman /* 157452cd3b07SLee Schermerhorn * Normally, MPOL_BIND allocations are node-local within the 157552cd3b07SLee Schermerhorn * allowed nodemask. However, if __GFP_THISNODE is set and the 15766eb27e1fSBob Liu * current node isn't part of the mask, we use the zonelist for 157752cd3b07SLee Schermerhorn * the first node in the mask instead. 157819770b32SMel Gorman */ 157919770b32SMel Gorman if (unlikely(gfp & __GFP_THISNODE) && 158019770b32SMel Gorman unlikely(!node_isset(nd, policy->v.nodes))) 158119770b32SMel Gorman nd = first_node(policy->v.nodes); 158219770b32SMel Gorman break; 15831da177e4SLinus Torvalds default: 15841da177e4SLinus Torvalds BUG(); 15851da177e4SLinus Torvalds } 15860e88460dSMel Gorman return node_zonelist(nd, gfp); 15871da177e4SLinus Torvalds } 15881da177e4SLinus Torvalds 15891da177e4SLinus Torvalds /* Do dynamic interleaving for a process */ 15901da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy) 15911da177e4SLinus Torvalds { 15921da177e4SLinus Torvalds unsigned nid, next; 15931da177e4SLinus Torvalds struct task_struct *me = current; 15941da177e4SLinus Torvalds 15951da177e4SLinus Torvalds nid = me->il_next; 1596dfcd3c0dSAndi Kleen next = next_node(nid, policy->v.nodes); 15971da177e4SLinus Torvalds if (next >= MAX_NUMNODES) 1598dfcd3c0dSAndi Kleen next = first_node(policy->v.nodes); 1599f5b087b5SDavid Rientjes if (next < MAX_NUMNODES) 16001da177e4SLinus Torvalds me->il_next = next; 16011da177e4SLinus Torvalds return nid; 16021da177e4SLinus Torvalds } 16031da177e4SLinus Torvalds 1604dc85da15SChristoph Lameter /* 1605dc85da15SChristoph Lameter * Depending on the memory policy provide a node from which to allocate the 1606dc85da15SChristoph Lameter * next slab entry. 160752cd3b07SLee Schermerhorn * @policy must be protected by freeing by the caller. If @policy is 160852cd3b07SLee Schermerhorn * the current task's mempolicy, this protection is implicit, as only the 160952cd3b07SLee Schermerhorn * task can change it's policy. The system default policy requires no 161052cd3b07SLee Schermerhorn * such protection. 1611dc85da15SChristoph Lameter */ 1612e7b691b0SAndi Kleen unsigned slab_node(void) 1613dc85da15SChristoph Lameter { 1614e7b691b0SAndi Kleen struct mempolicy *policy; 1615e7b691b0SAndi Kleen 1616e7b691b0SAndi Kleen if (in_interrupt()) 1617e7b691b0SAndi Kleen return numa_node_id(); 1618e7b691b0SAndi Kleen 1619e7b691b0SAndi Kleen policy = current->mempolicy; 1620fc36b8d3SLee Schermerhorn if (!policy || policy->flags & MPOL_F_LOCAL) 1621bea904d5SLee Schermerhorn return numa_node_id(); 1622765c4507SChristoph Lameter 1623bea904d5SLee Schermerhorn switch (policy->mode) { 1624bea904d5SLee Schermerhorn case MPOL_PREFERRED: 1625fc36b8d3SLee Schermerhorn /* 1626fc36b8d3SLee Schermerhorn * handled MPOL_F_LOCAL above 1627fc36b8d3SLee Schermerhorn */ 1628bea904d5SLee Schermerhorn return policy->v.preferred_node; 1629bea904d5SLee Schermerhorn 1630dc85da15SChristoph Lameter case MPOL_INTERLEAVE: 1631dc85da15SChristoph Lameter return interleave_nodes(policy); 1632dc85da15SChristoph Lameter 1633dd1a239fSMel Gorman case MPOL_BIND: { 1634dc85da15SChristoph Lameter /* 1635dc85da15SChristoph Lameter * Follow bind policy behavior and start allocation at the 1636dc85da15SChristoph Lameter * first node. 1637dc85da15SChristoph Lameter */ 163819770b32SMel Gorman struct zonelist *zonelist; 163919770b32SMel Gorman struct zone *zone; 164019770b32SMel Gorman enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 164119770b32SMel Gorman zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0]; 164219770b32SMel Gorman (void)first_zones_zonelist(zonelist, highest_zoneidx, 164319770b32SMel Gorman &policy->v.nodes, 164419770b32SMel Gorman &zone); 1645800416f7SEric Dumazet return zone ? zone->node : numa_node_id(); 1646dd1a239fSMel Gorman } 1647dc85da15SChristoph Lameter 1648dc85da15SChristoph Lameter default: 1649bea904d5SLee Schermerhorn BUG(); 1650dc85da15SChristoph Lameter } 1651dc85da15SChristoph Lameter } 1652dc85da15SChristoph Lameter 16531da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */ 16541da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol, 16551da177e4SLinus Torvalds struct vm_area_struct *vma, unsigned long off) 16561da177e4SLinus Torvalds { 1657dfcd3c0dSAndi Kleen unsigned nnodes = nodes_weight(pol->v.nodes); 1658f5b087b5SDavid Rientjes unsigned target; 16591da177e4SLinus Torvalds int c; 16601da177e4SLinus Torvalds int nid = -1; 16611da177e4SLinus Torvalds 1662f5b087b5SDavid Rientjes if (!nnodes) 1663f5b087b5SDavid Rientjes return numa_node_id(); 1664f5b087b5SDavid Rientjes target = (unsigned int)off % nnodes; 16651da177e4SLinus Torvalds c = 0; 16661da177e4SLinus Torvalds do { 1667dfcd3c0dSAndi Kleen nid = next_node(nid, pol->v.nodes); 16681da177e4SLinus Torvalds c++; 16691da177e4SLinus Torvalds } while (c <= target); 16701da177e4SLinus Torvalds return nid; 16711da177e4SLinus Torvalds } 16721da177e4SLinus Torvalds 16735da7ca86SChristoph Lameter /* Determine a node number for interleave */ 16745da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol, 16755da7ca86SChristoph Lameter struct vm_area_struct *vma, unsigned long addr, int shift) 16765da7ca86SChristoph Lameter { 16775da7ca86SChristoph Lameter if (vma) { 16785da7ca86SChristoph Lameter unsigned long off; 16795da7ca86SChristoph Lameter 16803b98b087SNishanth Aravamudan /* 16813b98b087SNishanth Aravamudan * for small pages, there is no difference between 16823b98b087SNishanth Aravamudan * shift and PAGE_SHIFT, so the bit-shift is safe. 16833b98b087SNishanth Aravamudan * for huge pages, since vm_pgoff is in units of small 16843b98b087SNishanth Aravamudan * pages, we need to shift off the always 0 bits to get 16853b98b087SNishanth Aravamudan * a useful offset. 16863b98b087SNishanth Aravamudan */ 16873b98b087SNishanth Aravamudan BUG_ON(shift < PAGE_SHIFT); 16883b98b087SNishanth Aravamudan off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 16895da7ca86SChristoph Lameter off += (addr - vma->vm_start) >> shift; 16905da7ca86SChristoph Lameter return offset_il_node(pol, vma, off); 16915da7ca86SChristoph Lameter } else 16925da7ca86SChristoph Lameter return interleave_nodes(pol); 16935da7ca86SChristoph Lameter } 16945da7ca86SChristoph Lameter 1695778d3b0fSMichal Hocko /* 1696778d3b0fSMichal Hocko * Return the bit number of a random bit set in the nodemask. 1697778d3b0fSMichal Hocko * (returns -1 if nodemask is empty) 1698778d3b0fSMichal Hocko */ 1699778d3b0fSMichal Hocko int node_random(const nodemask_t *maskp) 1700778d3b0fSMichal Hocko { 1701778d3b0fSMichal Hocko int w, bit = -1; 1702778d3b0fSMichal Hocko 1703778d3b0fSMichal Hocko w = nodes_weight(*maskp); 1704778d3b0fSMichal Hocko if (w) 1705778d3b0fSMichal Hocko bit = bitmap_ord_to_pos(maskp->bits, 1706778d3b0fSMichal Hocko get_random_int() % w, MAX_NUMNODES); 1707778d3b0fSMichal Hocko return bit; 1708778d3b0fSMichal Hocko } 1709778d3b0fSMichal Hocko 171000ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS 1711480eccf9SLee Schermerhorn /* 1712480eccf9SLee Schermerhorn * huge_zonelist(@vma, @addr, @gfp_flags, @mpol) 1713480eccf9SLee Schermerhorn * @vma = virtual memory area whose policy is sought 1714480eccf9SLee Schermerhorn * @addr = address in @vma for shared policy lookup and interleave policy 1715480eccf9SLee Schermerhorn * @gfp_flags = for requested zone 171619770b32SMel Gorman * @mpol = pointer to mempolicy pointer for reference counted mempolicy 171719770b32SMel Gorman * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask 1718480eccf9SLee Schermerhorn * 171952cd3b07SLee Schermerhorn * Returns a zonelist suitable for a huge page allocation and a pointer 172052cd3b07SLee Schermerhorn * to the struct mempolicy for conditional unref after allocation. 172152cd3b07SLee Schermerhorn * If the effective policy is 'BIND, returns a pointer to the mempolicy's 172252cd3b07SLee Schermerhorn * @nodemask for filtering the zonelist. 1723c0ff7453SMiao Xie * 1724c0ff7453SMiao Xie * Must be protected by get_mems_allowed() 1725480eccf9SLee Schermerhorn */ 1726396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, 172719770b32SMel Gorman gfp_t gfp_flags, struct mempolicy **mpol, 172819770b32SMel Gorman nodemask_t **nodemask) 17295da7ca86SChristoph Lameter { 1730480eccf9SLee Schermerhorn struct zonelist *zl; 17315da7ca86SChristoph Lameter 173252cd3b07SLee Schermerhorn *mpol = get_vma_policy(current, vma, addr); 173319770b32SMel Gorman *nodemask = NULL; /* assume !MPOL_BIND */ 17345da7ca86SChristoph Lameter 173552cd3b07SLee Schermerhorn if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { 173652cd3b07SLee Schermerhorn zl = node_zonelist(interleave_nid(*mpol, vma, addr, 1737a5516438SAndi Kleen huge_page_shift(hstate_vma(vma))), gfp_flags); 173852cd3b07SLee Schermerhorn } else { 17392f5f9486SAndi Kleen zl = policy_zonelist(gfp_flags, *mpol, numa_node_id()); 174052cd3b07SLee Schermerhorn if ((*mpol)->mode == MPOL_BIND) 174152cd3b07SLee Schermerhorn *nodemask = &(*mpol)->v.nodes; 1742480eccf9SLee Schermerhorn } 1743480eccf9SLee Schermerhorn return zl; 17445da7ca86SChristoph Lameter } 174506808b08SLee Schermerhorn 174606808b08SLee Schermerhorn /* 174706808b08SLee Schermerhorn * init_nodemask_of_mempolicy 174806808b08SLee Schermerhorn * 174906808b08SLee Schermerhorn * If the current task's mempolicy is "default" [NULL], return 'false' 175006808b08SLee Schermerhorn * to indicate default policy. Otherwise, extract the policy nodemask 175106808b08SLee Schermerhorn * for 'bind' or 'interleave' policy into the argument nodemask, or 175206808b08SLee Schermerhorn * initialize the argument nodemask to contain the single node for 175306808b08SLee Schermerhorn * 'preferred' or 'local' policy and return 'true' to indicate presence 175406808b08SLee Schermerhorn * of non-default mempolicy. 175506808b08SLee Schermerhorn * 175606808b08SLee Schermerhorn * We don't bother with reference counting the mempolicy [mpol_get/put] 175706808b08SLee Schermerhorn * because the current task is examining it's own mempolicy and a task's 175806808b08SLee Schermerhorn * mempolicy is only ever changed by the task itself. 175906808b08SLee Schermerhorn * 176006808b08SLee Schermerhorn * N.B., it is the caller's responsibility to free a returned nodemask. 176106808b08SLee Schermerhorn */ 176206808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask) 176306808b08SLee Schermerhorn { 176406808b08SLee Schermerhorn struct mempolicy *mempolicy; 176506808b08SLee Schermerhorn int nid; 176606808b08SLee Schermerhorn 176706808b08SLee Schermerhorn if (!(mask && current->mempolicy)) 176806808b08SLee Schermerhorn return false; 176906808b08SLee Schermerhorn 1770c0ff7453SMiao Xie task_lock(current); 177106808b08SLee Schermerhorn mempolicy = current->mempolicy; 177206808b08SLee Schermerhorn switch (mempolicy->mode) { 177306808b08SLee Schermerhorn case MPOL_PREFERRED: 177406808b08SLee Schermerhorn if (mempolicy->flags & MPOL_F_LOCAL) 177506808b08SLee Schermerhorn nid = numa_node_id(); 177606808b08SLee Schermerhorn else 177706808b08SLee Schermerhorn nid = mempolicy->v.preferred_node; 177806808b08SLee Schermerhorn init_nodemask_of_node(mask, nid); 177906808b08SLee Schermerhorn break; 178006808b08SLee Schermerhorn 178106808b08SLee Schermerhorn case MPOL_BIND: 178206808b08SLee Schermerhorn /* Fall through */ 178306808b08SLee Schermerhorn case MPOL_INTERLEAVE: 178406808b08SLee Schermerhorn *mask = mempolicy->v.nodes; 178506808b08SLee Schermerhorn break; 178606808b08SLee Schermerhorn 178706808b08SLee Schermerhorn default: 178806808b08SLee Schermerhorn BUG(); 178906808b08SLee Schermerhorn } 1790c0ff7453SMiao Xie task_unlock(current); 179106808b08SLee Schermerhorn 179206808b08SLee Schermerhorn return true; 179306808b08SLee Schermerhorn } 179400ac59adSChen, Kenneth W #endif 17955da7ca86SChristoph Lameter 17966f48d0ebSDavid Rientjes /* 17976f48d0ebSDavid Rientjes * mempolicy_nodemask_intersects 17986f48d0ebSDavid Rientjes * 17996f48d0ebSDavid Rientjes * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default 18006f48d0ebSDavid Rientjes * policy. Otherwise, check for intersection between mask and the policy 18016f48d0ebSDavid Rientjes * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local' 18026f48d0ebSDavid Rientjes * policy, always return true since it may allocate elsewhere on fallback. 18036f48d0ebSDavid Rientjes * 18046f48d0ebSDavid Rientjes * Takes task_lock(tsk) to prevent freeing of its mempolicy. 18056f48d0ebSDavid Rientjes */ 18066f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk, 18076f48d0ebSDavid Rientjes const nodemask_t *mask) 18086f48d0ebSDavid Rientjes { 18096f48d0ebSDavid Rientjes struct mempolicy *mempolicy; 18106f48d0ebSDavid Rientjes bool ret = true; 18116f48d0ebSDavid Rientjes 18126f48d0ebSDavid Rientjes if (!mask) 18136f48d0ebSDavid Rientjes return ret; 18146f48d0ebSDavid Rientjes task_lock(tsk); 18156f48d0ebSDavid Rientjes mempolicy = tsk->mempolicy; 18166f48d0ebSDavid Rientjes if (!mempolicy) 18176f48d0ebSDavid Rientjes goto out; 18186f48d0ebSDavid Rientjes 18196f48d0ebSDavid Rientjes switch (mempolicy->mode) { 18206f48d0ebSDavid Rientjes case MPOL_PREFERRED: 18216f48d0ebSDavid Rientjes /* 18226f48d0ebSDavid Rientjes * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to 18236f48d0ebSDavid Rientjes * allocate from, they may fallback to other nodes when oom. 18246f48d0ebSDavid Rientjes * Thus, it's possible for tsk to have allocated memory from 18256f48d0ebSDavid Rientjes * nodes in mask. 18266f48d0ebSDavid Rientjes */ 18276f48d0ebSDavid Rientjes break; 18286f48d0ebSDavid Rientjes case MPOL_BIND: 18296f48d0ebSDavid Rientjes case MPOL_INTERLEAVE: 18306f48d0ebSDavid Rientjes ret = nodes_intersects(mempolicy->v.nodes, *mask); 18316f48d0ebSDavid Rientjes break; 18326f48d0ebSDavid Rientjes default: 18336f48d0ebSDavid Rientjes BUG(); 18346f48d0ebSDavid Rientjes } 18356f48d0ebSDavid Rientjes out: 18366f48d0ebSDavid Rientjes task_unlock(tsk); 18376f48d0ebSDavid Rientjes return ret; 18386f48d0ebSDavid Rientjes } 18396f48d0ebSDavid Rientjes 18401da177e4SLinus Torvalds /* Allocate a page in interleaved policy. 18411da177e4SLinus Torvalds Own path because it needs to do special accounting. */ 1842662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 1843662f3a0bSAndi Kleen unsigned nid) 18441da177e4SLinus Torvalds { 18451da177e4SLinus Torvalds struct zonelist *zl; 18461da177e4SLinus Torvalds struct page *page; 18471da177e4SLinus Torvalds 18480e88460dSMel Gorman zl = node_zonelist(nid, gfp); 18491da177e4SLinus Torvalds page = __alloc_pages(gfp, order, zl); 1850dd1a239fSMel Gorman if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0])) 1851ca889e6cSChristoph Lameter inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); 18521da177e4SLinus Torvalds return page; 18531da177e4SLinus Torvalds } 18541da177e4SLinus Torvalds 18551da177e4SLinus Torvalds /** 18560bbbc0b3SAndrea Arcangeli * alloc_pages_vma - Allocate a page for a VMA. 18571da177e4SLinus Torvalds * 18581da177e4SLinus Torvalds * @gfp: 18591da177e4SLinus Torvalds * %GFP_USER user allocation. 18601da177e4SLinus Torvalds * %GFP_KERNEL kernel allocations, 18611da177e4SLinus Torvalds * %GFP_HIGHMEM highmem/user allocations, 18621da177e4SLinus Torvalds * %GFP_FS allocation should not call back into a file system. 18631da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 18641da177e4SLinus Torvalds * 18650bbbc0b3SAndrea Arcangeli * @order:Order of the GFP allocation. 18661da177e4SLinus Torvalds * @vma: Pointer to VMA or NULL if not available. 18671da177e4SLinus Torvalds * @addr: Virtual Address of the allocation. Must be inside the VMA. 18681da177e4SLinus Torvalds * 18691da177e4SLinus Torvalds * This function allocates a page from the kernel page pool and applies 18701da177e4SLinus Torvalds * a NUMA policy associated with the VMA or the current process. 18711da177e4SLinus Torvalds * When VMA is not NULL caller must hold down_read on the mmap_sem of the 18721da177e4SLinus Torvalds * mm_struct of the VMA to prevent it from going away. Should be used for 18731da177e4SLinus Torvalds * all allocations for pages that will be mapped into 18741da177e4SLinus Torvalds * user space. Returns NULL when no page can be allocated. 18751da177e4SLinus Torvalds * 18761da177e4SLinus Torvalds * Should be called with the mm_sem of the vma hold. 18771da177e4SLinus Torvalds */ 18781da177e4SLinus Torvalds struct page * 18790bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 18802f5f9486SAndi Kleen unsigned long addr, int node) 18811da177e4SLinus Torvalds { 1882cc9a6c87SMel Gorman struct mempolicy *pol; 1883480eccf9SLee Schermerhorn struct zonelist *zl; 1884c0ff7453SMiao Xie struct page *page; 1885cc9a6c87SMel Gorman unsigned int cpuset_mems_cookie; 18861da177e4SLinus Torvalds 1887cc9a6c87SMel Gorman retry_cpuset: 1888cc9a6c87SMel Gorman pol = get_vma_policy(current, vma, addr); 1889cc9a6c87SMel Gorman cpuset_mems_cookie = get_mems_allowed(); 1890cc9a6c87SMel Gorman 189145c4745aSLee Schermerhorn if (unlikely(pol->mode == MPOL_INTERLEAVE)) { 18921da177e4SLinus Torvalds unsigned nid; 18935da7ca86SChristoph Lameter 18948eac563cSAndi Kleen nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 189552cd3b07SLee Schermerhorn mpol_cond_put(pol); 18960bbbc0b3SAndrea Arcangeli page = alloc_page_interleave(gfp, order, nid); 1897cc9a6c87SMel Gorman if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) 1898cc9a6c87SMel Gorman goto retry_cpuset; 1899cc9a6c87SMel Gorman 1900c0ff7453SMiao Xie return page; 19011da177e4SLinus Torvalds } 19022f5f9486SAndi Kleen zl = policy_zonelist(gfp, pol, node); 190352cd3b07SLee Schermerhorn if (unlikely(mpol_needs_cond_ref(pol))) { 1904480eccf9SLee Schermerhorn /* 190552cd3b07SLee Schermerhorn * slow path: ref counted shared policy 1906480eccf9SLee Schermerhorn */ 19070bbbc0b3SAndrea Arcangeli struct page *page = __alloc_pages_nodemask(gfp, order, 190852cd3b07SLee Schermerhorn zl, policy_nodemask(gfp, pol)); 1909f0be3d32SLee Schermerhorn __mpol_put(pol); 1910cc9a6c87SMel Gorman if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) 1911cc9a6c87SMel Gorman goto retry_cpuset; 1912480eccf9SLee Schermerhorn return page; 1913480eccf9SLee Schermerhorn } 1914480eccf9SLee Schermerhorn /* 1915480eccf9SLee Schermerhorn * fast path: default or task policy 1916480eccf9SLee Schermerhorn */ 19170bbbc0b3SAndrea Arcangeli page = __alloc_pages_nodemask(gfp, order, zl, 19180bbbc0b3SAndrea Arcangeli policy_nodemask(gfp, pol)); 1919cc9a6c87SMel Gorman if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) 1920cc9a6c87SMel Gorman goto retry_cpuset; 1921c0ff7453SMiao Xie return page; 19221da177e4SLinus Torvalds } 19231da177e4SLinus Torvalds 19241da177e4SLinus Torvalds /** 19251da177e4SLinus Torvalds * alloc_pages_current - Allocate pages. 19261da177e4SLinus Torvalds * 19271da177e4SLinus Torvalds * @gfp: 19281da177e4SLinus Torvalds * %GFP_USER user allocation, 19291da177e4SLinus Torvalds * %GFP_KERNEL kernel allocation, 19301da177e4SLinus Torvalds * %GFP_HIGHMEM highmem allocation, 19311da177e4SLinus Torvalds * %GFP_FS don't call back into a file system. 19321da177e4SLinus Torvalds * %GFP_ATOMIC don't sleep. 19331da177e4SLinus Torvalds * @order: Power of two of allocation size in pages. 0 is a single page. 19341da177e4SLinus Torvalds * 19351da177e4SLinus Torvalds * Allocate a page from the kernel page pool. When not in 19361da177e4SLinus Torvalds * interrupt context and apply the current process NUMA policy. 19371da177e4SLinus Torvalds * Returns NULL when no page can be allocated. 19381da177e4SLinus Torvalds * 1939cf2a473cSPaul Jackson * Don't call cpuset_update_task_memory_state() unless 19401da177e4SLinus Torvalds * 1) it's ok to take cpuset_sem (can WAIT), and 19411da177e4SLinus Torvalds * 2) allocating for current task (not interrupt). 19421da177e4SLinus Torvalds */ 1943dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order) 19441da177e4SLinus Torvalds { 19451da177e4SLinus Torvalds struct mempolicy *pol = current->mempolicy; 1946c0ff7453SMiao Xie struct page *page; 1947cc9a6c87SMel Gorman unsigned int cpuset_mems_cookie; 19481da177e4SLinus Torvalds 19499b819d20SChristoph Lameter if (!pol || in_interrupt() || (gfp & __GFP_THISNODE)) 19501da177e4SLinus Torvalds pol = &default_policy; 195152cd3b07SLee Schermerhorn 1952cc9a6c87SMel Gorman retry_cpuset: 1953cc9a6c87SMel Gorman cpuset_mems_cookie = get_mems_allowed(); 1954cc9a6c87SMel Gorman 195552cd3b07SLee Schermerhorn /* 195652cd3b07SLee Schermerhorn * No reference counting needed for current->mempolicy 195752cd3b07SLee Schermerhorn * nor system default_policy 195852cd3b07SLee Schermerhorn */ 195945c4745aSLee Schermerhorn if (pol->mode == MPOL_INTERLEAVE) 1960c0ff7453SMiao Xie page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 1961c0ff7453SMiao Xie else 1962c0ff7453SMiao Xie page = __alloc_pages_nodemask(gfp, order, 19635c4b4be3SAndi Kleen policy_zonelist(gfp, pol, numa_node_id()), 19645c4b4be3SAndi Kleen policy_nodemask(gfp, pol)); 1965cc9a6c87SMel Gorman 1966cc9a6c87SMel Gorman if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) 1967cc9a6c87SMel Gorman goto retry_cpuset; 1968cc9a6c87SMel Gorman 1969c0ff7453SMiao Xie return page; 19701da177e4SLinus Torvalds } 19711da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current); 19721da177e4SLinus Torvalds 19734225399aSPaul Jackson /* 1974846a16bfSLee Schermerhorn * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 19754225399aSPaul Jackson * rebinds the mempolicy its copying by calling mpol_rebind_policy() 19764225399aSPaul Jackson * with the mems_allowed returned by cpuset_mems_allowed(). This 19774225399aSPaul Jackson * keeps mempolicies cpuset relative after its cpuset moves. See 19784225399aSPaul Jackson * further kernel/cpuset.c update_nodemask(). 1979708c1bbcSMiao Xie * 1980708c1bbcSMiao Xie * current's mempolicy may be rebinded by the other task(the task that changes 1981708c1bbcSMiao Xie * cpuset's mems), so we needn't do rebind work for current task. 19824225399aSPaul Jackson */ 19834225399aSPaul Jackson 1984846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */ 1985846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old) 19861da177e4SLinus Torvalds { 19871da177e4SLinus Torvalds struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 19881da177e4SLinus Torvalds 19891da177e4SLinus Torvalds if (!new) 19901da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 1991708c1bbcSMiao Xie 1992708c1bbcSMiao Xie /* task's mempolicy is protected by alloc_lock */ 1993708c1bbcSMiao Xie if (old == current->mempolicy) { 1994708c1bbcSMiao Xie task_lock(current); 1995708c1bbcSMiao Xie *new = *old; 1996708c1bbcSMiao Xie task_unlock(current); 1997708c1bbcSMiao Xie } else 1998708c1bbcSMiao Xie *new = *old; 1999708c1bbcSMiao Xie 200099ee4ca7SPaul E. McKenney rcu_read_lock(); 20014225399aSPaul Jackson if (current_cpuset_is_being_rebound()) { 20024225399aSPaul Jackson nodemask_t mems = cpuset_mems_allowed(current); 2003708c1bbcSMiao Xie if (new->flags & MPOL_F_REBINDING) 2004708c1bbcSMiao Xie mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2); 2005708c1bbcSMiao Xie else 2006708c1bbcSMiao Xie mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE); 20074225399aSPaul Jackson } 200899ee4ca7SPaul E. McKenney rcu_read_unlock(); 20091da177e4SLinus Torvalds atomic_set(&new->refcnt, 1); 20101da177e4SLinus Torvalds return new; 20111da177e4SLinus Torvalds } 20121da177e4SLinus Torvalds 201352cd3b07SLee Schermerhorn /* 201452cd3b07SLee Schermerhorn * If *frompol needs [has] an extra ref, copy *frompol to *tompol , 201552cd3b07SLee Schermerhorn * eliminate the * MPOL_F_* flags that require conditional ref and 201652cd3b07SLee Schermerhorn * [NOTE!!!] drop the extra ref. Not safe to reference *frompol directly 201752cd3b07SLee Schermerhorn * after return. Use the returned value. 201852cd3b07SLee Schermerhorn * 201952cd3b07SLee Schermerhorn * Allows use of a mempolicy for, e.g., multiple allocations with a single 202052cd3b07SLee Schermerhorn * policy lookup, even if the policy needs/has extra ref on lookup. 202152cd3b07SLee Schermerhorn * shmem_readahead needs this. 202252cd3b07SLee Schermerhorn */ 202352cd3b07SLee Schermerhorn struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol, 202452cd3b07SLee Schermerhorn struct mempolicy *frompol) 202552cd3b07SLee Schermerhorn { 202652cd3b07SLee Schermerhorn if (!mpol_needs_cond_ref(frompol)) 202752cd3b07SLee Schermerhorn return frompol; 202852cd3b07SLee Schermerhorn 202952cd3b07SLee Schermerhorn *tompol = *frompol; 203052cd3b07SLee Schermerhorn tompol->flags &= ~MPOL_F_SHARED; /* copy doesn't need unref */ 203152cd3b07SLee Schermerhorn __mpol_put(frompol); 203252cd3b07SLee Schermerhorn return tompol; 203352cd3b07SLee Schermerhorn } 203452cd3b07SLee Schermerhorn 20351da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */ 2036fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 20371da177e4SLinus Torvalds { 20381da177e4SLinus Torvalds if (!a || !b) 2039fcfb4dccSKOSAKI Motohiro return false; 204045c4745aSLee Schermerhorn if (a->mode != b->mode) 2041fcfb4dccSKOSAKI Motohiro return false; 204219800502SBob Liu if (a->flags != b->flags) 2043fcfb4dccSKOSAKI Motohiro return false; 204419800502SBob Liu if (mpol_store_user_nodemask(a)) 204519800502SBob Liu if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2046fcfb4dccSKOSAKI Motohiro return false; 204719800502SBob Liu 204845c4745aSLee Schermerhorn switch (a->mode) { 204919770b32SMel Gorman case MPOL_BIND: 205019770b32SMel Gorman /* Fall through */ 20511da177e4SLinus Torvalds case MPOL_INTERLEAVE: 2052fcfb4dccSKOSAKI Motohiro return !!nodes_equal(a->v.nodes, b->v.nodes); 20531da177e4SLinus Torvalds case MPOL_PREFERRED: 205475719661SNamhyung Kim return a->v.preferred_node == b->v.preferred_node; 20551da177e4SLinus Torvalds default: 20561da177e4SLinus Torvalds BUG(); 2057fcfb4dccSKOSAKI Motohiro return false; 20581da177e4SLinus Torvalds } 20591da177e4SLinus Torvalds } 20601da177e4SLinus Torvalds 20611da177e4SLinus Torvalds /* 20621da177e4SLinus Torvalds * Shared memory backing store policy support. 20631da177e4SLinus Torvalds * 20641da177e4SLinus Torvalds * Remember policies even when nobody has shared memory mapped. 20651da177e4SLinus Torvalds * The policies are kept in Red-Black tree linked from the inode. 20661da177e4SLinus Torvalds * They are protected by the sp->lock spinlock, which should be held 20671da177e4SLinus Torvalds * for any accesses to the tree. 20681da177e4SLinus Torvalds */ 20691da177e4SLinus Torvalds 20701da177e4SLinus Torvalds /* lookup first element intersecting start-end */ 20711da177e4SLinus Torvalds /* Caller holds sp->lock */ 20721da177e4SLinus Torvalds static struct sp_node * 20731da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 20741da177e4SLinus Torvalds { 20751da177e4SLinus Torvalds struct rb_node *n = sp->root.rb_node; 20761da177e4SLinus Torvalds 20771da177e4SLinus Torvalds while (n) { 20781da177e4SLinus Torvalds struct sp_node *p = rb_entry(n, struct sp_node, nd); 20791da177e4SLinus Torvalds 20801da177e4SLinus Torvalds if (start >= p->end) 20811da177e4SLinus Torvalds n = n->rb_right; 20821da177e4SLinus Torvalds else if (end <= p->start) 20831da177e4SLinus Torvalds n = n->rb_left; 20841da177e4SLinus Torvalds else 20851da177e4SLinus Torvalds break; 20861da177e4SLinus Torvalds } 20871da177e4SLinus Torvalds if (!n) 20881da177e4SLinus Torvalds return NULL; 20891da177e4SLinus Torvalds for (;;) { 20901da177e4SLinus Torvalds struct sp_node *w = NULL; 20911da177e4SLinus Torvalds struct rb_node *prev = rb_prev(n); 20921da177e4SLinus Torvalds if (!prev) 20931da177e4SLinus Torvalds break; 20941da177e4SLinus Torvalds w = rb_entry(prev, struct sp_node, nd); 20951da177e4SLinus Torvalds if (w->end <= start) 20961da177e4SLinus Torvalds break; 20971da177e4SLinus Torvalds n = prev; 20981da177e4SLinus Torvalds } 20991da177e4SLinus Torvalds return rb_entry(n, struct sp_node, nd); 21001da177e4SLinus Torvalds } 21011da177e4SLinus Torvalds 21021da177e4SLinus Torvalds /* Insert a new shared policy into the list. */ 21031da177e4SLinus Torvalds /* Caller holds sp->lock */ 21041da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new) 21051da177e4SLinus Torvalds { 21061da177e4SLinus Torvalds struct rb_node **p = &sp->root.rb_node; 21071da177e4SLinus Torvalds struct rb_node *parent = NULL; 21081da177e4SLinus Torvalds struct sp_node *nd; 21091da177e4SLinus Torvalds 21101da177e4SLinus Torvalds while (*p) { 21111da177e4SLinus Torvalds parent = *p; 21121da177e4SLinus Torvalds nd = rb_entry(parent, struct sp_node, nd); 21131da177e4SLinus Torvalds if (new->start < nd->start) 21141da177e4SLinus Torvalds p = &(*p)->rb_left; 21151da177e4SLinus Torvalds else if (new->end > nd->end) 21161da177e4SLinus Torvalds p = &(*p)->rb_right; 21171da177e4SLinus Torvalds else 21181da177e4SLinus Torvalds BUG(); 21191da177e4SLinus Torvalds } 21201da177e4SLinus Torvalds rb_link_node(&new->nd, parent, p); 21211da177e4SLinus Torvalds rb_insert_color(&new->nd, &sp->root); 2122140d5a49SPaul Mundt pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 212345c4745aSLee Schermerhorn new->policy ? new->policy->mode : 0); 21241da177e4SLinus Torvalds } 21251da177e4SLinus Torvalds 21261da177e4SLinus Torvalds /* Find shared policy intersecting idx */ 21271da177e4SLinus Torvalds struct mempolicy * 21281da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 21291da177e4SLinus Torvalds { 21301da177e4SLinus Torvalds struct mempolicy *pol = NULL; 21311da177e4SLinus Torvalds struct sp_node *sn; 21321da177e4SLinus Torvalds 21331da177e4SLinus Torvalds if (!sp->root.rb_node) 21341da177e4SLinus Torvalds return NULL; 21351da177e4SLinus Torvalds spin_lock(&sp->lock); 21361da177e4SLinus Torvalds sn = sp_lookup(sp, idx, idx+1); 21371da177e4SLinus Torvalds if (sn) { 21381da177e4SLinus Torvalds mpol_get(sn->policy); 21391da177e4SLinus Torvalds pol = sn->policy; 21401da177e4SLinus Torvalds } 21411da177e4SLinus Torvalds spin_unlock(&sp->lock); 21421da177e4SLinus Torvalds return pol; 21431da177e4SLinus Torvalds } 21441da177e4SLinus Torvalds 21451da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n) 21461da177e4SLinus Torvalds { 2147140d5a49SPaul Mundt pr_debug("deleting %lx-l%lx\n", n->start, n->end); 21481da177e4SLinus Torvalds rb_erase(&n->nd, &sp->root); 2149f0be3d32SLee Schermerhorn mpol_put(n->policy); 21501da177e4SLinus Torvalds kmem_cache_free(sn_cache, n); 21511da177e4SLinus Torvalds } 21521da177e4SLinus Torvalds 2153dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2154dbcb0f19SAdrian Bunk struct mempolicy *pol) 21551da177e4SLinus Torvalds { 21561da177e4SLinus Torvalds struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 21571da177e4SLinus Torvalds 21581da177e4SLinus Torvalds if (!n) 21591da177e4SLinus Torvalds return NULL; 21601da177e4SLinus Torvalds n->start = start; 21611da177e4SLinus Torvalds n->end = end; 21621da177e4SLinus Torvalds mpol_get(pol); 2163aab0b102SLee Schermerhorn pol->flags |= MPOL_F_SHARED; /* for unref */ 21641da177e4SLinus Torvalds n->policy = pol; 21651da177e4SLinus Torvalds return n; 21661da177e4SLinus Torvalds } 21671da177e4SLinus Torvalds 21681da177e4SLinus Torvalds /* Replace a policy range. */ 21691da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 21701da177e4SLinus Torvalds unsigned long end, struct sp_node *new) 21711da177e4SLinus Torvalds { 21721da177e4SLinus Torvalds struct sp_node *n, *new2 = NULL; 21731da177e4SLinus Torvalds 21741da177e4SLinus Torvalds restart: 21751da177e4SLinus Torvalds spin_lock(&sp->lock); 21761da177e4SLinus Torvalds n = sp_lookup(sp, start, end); 21771da177e4SLinus Torvalds /* Take care of old policies in the same range. */ 21781da177e4SLinus Torvalds while (n && n->start < end) { 21791da177e4SLinus Torvalds struct rb_node *next = rb_next(&n->nd); 21801da177e4SLinus Torvalds if (n->start >= start) { 21811da177e4SLinus Torvalds if (n->end <= end) 21821da177e4SLinus Torvalds sp_delete(sp, n); 21831da177e4SLinus Torvalds else 21841da177e4SLinus Torvalds n->start = end; 21851da177e4SLinus Torvalds } else { 21861da177e4SLinus Torvalds /* Old policy spanning whole new range. */ 21871da177e4SLinus Torvalds if (n->end > end) { 21881da177e4SLinus Torvalds if (!new2) { 21891da177e4SLinus Torvalds spin_unlock(&sp->lock); 21901da177e4SLinus Torvalds new2 = sp_alloc(end, n->end, n->policy); 21911da177e4SLinus Torvalds if (!new2) 21921da177e4SLinus Torvalds return -ENOMEM; 21931da177e4SLinus Torvalds goto restart; 21941da177e4SLinus Torvalds } 21951da177e4SLinus Torvalds n->end = start; 21961da177e4SLinus Torvalds sp_insert(sp, new2); 21971da177e4SLinus Torvalds new2 = NULL; 21981da177e4SLinus Torvalds break; 21991da177e4SLinus Torvalds } else 22001da177e4SLinus Torvalds n->end = start; 22011da177e4SLinus Torvalds } 22021da177e4SLinus Torvalds if (!next) 22031da177e4SLinus Torvalds break; 22041da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 22051da177e4SLinus Torvalds } 22061da177e4SLinus Torvalds if (new) 22071da177e4SLinus Torvalds sp_insert(sp, new); 22081da177e4SLinus Torvalds spin_unlock(&sp->lock); 22091da177e4SLinus Torvalds if (new2) { 2210f0be3d32SLee Schermerhorn mpol_put(new2->policy); 22111da177e4SLinus Torvalds kmem_cache_free(sn_cache, new2); 22121da177e4SLinus Torvalds } 22131da177e4SLinus Torvalds return 0; 22141da177e4SLinus Torvalds } 22151da177e4SLinus Torvalds 221671fe804bSLee Schermerhorn /** 221771fe804bSLee Schermerhorn * mpol_shared_policy_init - initialize shared policy for inode 221871fe804bSLee Schermerhorn * @sp: pointer to inode shared policy 221971fe804bSLee Schermerhorn * @mpol: struct mempolicy to install 222071fe804bSLee Schermerhorn * 222171fe804bSLee Schermerhorn * Install non-NULL @mpol in inode's shared policy rb-tree. 222271fe804bSLee Schermerhorn * On entry, the current task has a reference on a non-NULL @mpol. 222371fe804bSLee Schermerhorn * This must be released on exit. 22244bfc4495SKAMEZAWA Hiroyuki * This is called at get_inode() calls and we can use GFP_KERNEL. 222571fe804bSLee Schermerhorn */ 222671fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 22277339ff83SRobin Holt { 222858568d2aSMiao Xie int ret; 222958568d2aSMiao Xie 223071fe804bSLee Schermerhorn sp->root = RB_ROOT; /* empty tree == default mempolicy */ 223171fe804bSLee Schermerhorn spin_lock_init(&sp->lock); 22327339ff83SRobin Holt 223371fe804bSLee Schermerhorn if (mpol) { 22347339ff83SRobin Holt struct vm_area_struct pvma; 223571fe804bSLee Schermerhorn struct mempolicy *new; 22364bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 22377339ff83SRobin Holt 22384bfc4495SKAMEZAWA Hiroyuki if (!scratch) 22395c0c1654SLee Schermerhorn goto put_mpol; 224071fe804bSLee Schermerhorn /* contextualize the tmpfs mount point mempolicy */ 224171fe804bSLee Schermerhorn new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 224215d77835SLee Schermerhorn if (IS_ERR(new)) 22430cae3457SDan Carpenter goto free_scratch; /* no valid nodemask intersection */ 224458568d2aSMiao Xie 224558568d2aSMiao Xie task_lock(current); 22464bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 224758568d2aSMiao Xie task_unlock(current); 224815d77835SLee Schermerhorn if (ret) 22495c0c1654SLee Schermerhorn goto put_new; 225071fe804bSLee Schermerhorn 225171fe804bSLee Schermerhorn /* Create pseudo-vma that contains just the policy */ 22527339ff83SRobin Holt memset(&pvma, 0, sizeof(struct vm_area_struct)); 225371fe804bSLee Schermerhorn pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 225471fe804bSLee Schermerhorn mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 225515d77835SLee Schermerhorn 22565c0c1654SLee Schermerhorn put_new: 225771fe804bSLee Schermerhorn mpol_put(new); /* drop initial ref */ 22580cae3457SDan Carpenter free_scratch: 22594bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 22605c0c1654SLee Schermerhorn put_mpol: 22615c0c1654SLee Schermerhorn mpol_put(mpol); /* drop our incoming ref on sb mpol */ 22627339ff83SRobin Holt } 22637339ff83SRobin Holt } 22647339ff83SRobin Holt 22651da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info, 22661da177e4SLinus Torvalds struct vm_area_struct *vma, struct mempolicy *npol) 22671da177e4SLinus Torvalds { 22681da177e4SLinus Torvalds int err; 22691da177e4SLinus Torvalds struct sp_node *new = NULL; 22701da177e4SLinus Torvalds unsigned long sz = vma_pages(vma); 22711da177e4SLinus Torvalds 2272028fec41SDavid Rientjes pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 22731da177e4SLinus Torvalds vma->vm_pgoff, 227445c4745aSLee Schermerhorn sz, npol ? npol->mode : -1, 2275028fec41SDavid Rientjes npol ? npol->flags : -1, 2276dfcd3c0dSAndi Kleen npol ? nodes_addr(npol->v.nodes)[0] : -1); 22771da177e4SLinus Torvalds 22781da177e4SLinus Torvalds if (npol) { 22791da177e4SLinus Torvalds new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 22801da177e4SLinus Torvalds if (!new) 22811da177e4SLinus Torvalds return -ENOMEM; 22821da177e4SLinus Torvalds } 22831da177e4SLinus Torvalds err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 22841da177e4SLinus Torvalds if (err && new) 22851da177e4SLinus Torvalds kmem_cache_free(sn_cache, new); 22861da177e4SLinus Torvalds return err; 22871da177e4SLinus Torvalds } 22881da177e4SLinus Torvalds 22891da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */ 22901da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p) 22911da177e4SLinus Torvalds { 22921da177e4SLinus Torvalds struct sp_node *n; 22931da177e4SLinus Torvalds struct rb_node *next; 22941da177e4SLinus Torvalds 22951da177e4SLinus Torvalds if (!p->root.rb_node) 22961da177e4SLinus Torvalds return; 22971da177e4SLinus Torvalds spin_lock(&p->lock); 22981da177e4SLinus Torvalds next = rb_first(&p->root); 22991da177e4SLinus Torvalds while (next) { 23001da177e4SLinus Torvalds n = rb_entry(next, struct sp_node, nd); 23011da177e4SLinus Torvalds next = rb_next(&n->nd); 230290c5029eSAndi Kleen rb_erase(&n->nd, &p->root); 2303f0be3d32SLee Schermerhorn mpol_put(n->policy); 23041da177e4SLinus Torvalds kmem_cache_free(sn_cache, n); 23051da177e4SLinus Torvalds } 23061da177e4SLinus Torvalds spin_unlock(&p->lock); 23071da177e4SLinus Torvalds } 23081da177e4SLinus Torvalds 23091da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */ 23101da177e4SLinus Torvalds void __init numa_policy_init(void) 23111da177e4SLinus Torvalds { 2312b71636e2SPaul Mundt nodemask_t interleave_nodes; 2313b71636e2SPaul Mundt unsigned long largest = 0; 2314b71636e2SPaul Mundt int nid, prefer = 0; 2315b71636e2SPaul Mundt 23161da177e4SLinus Torvalds policy_cache = kmem_cache_create("numa_policy", 23171da177e4SLinus Torvalds sizeof(struct mempolicy), 231820c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 23191da177e4SLinus Torvalds 23201da177e4SLinus Torvalds sn_cache = kmem_cache_create("shared_policy_node", 23211da177e4SLinus Torvalds sizeof(struct sp_node), 232220c2df83SPaul Mundt 0, SLAB_PANIC, NULL); 23231da177e4SLinus Torvalds 2324b71636e2SPaul Mundt /* 2325b71636e2SPaul Mundt * Set interleaving policy for system init. Interleaving is only 2326b71636e2SPaul Mundt * enabled across suitably sized nodes (default is >= 16MB), or 2327b71636e2SPaul Mundt * fall back to the largest node if they're all smaller. 2328b71636e2SPaul Mundt */ 2329b71636e2SPaul Mundt nodes_clear(interleave_nodes); 233056bbd65dSChristoph Lameter for_each_node_state(nid, N_HIGH_MEMORY) { 2331b71636e2SPaul Mundt unsigned long total_pages = node_present_pages(nid); 23321da177e4SLinus Torvalds 2333b71636e2SPaul Mundt /* Preserve the largest node */ 2334b71636e2SPaul Mundt if (largest < total_pages) { 2335b71636e2SPaul Mundt largest = total_pages; 2336b71636e2SPaul Mundt prefer = nid; 2337b71636e2SPaul Mundt } 2338b71636e2SPaul Mundt 2339b71636e2SPaul Mundt /* Interleave this node? */ 2340b71636e2SPaul Mundt if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2341b71636e2SPaul Mundt node_set(nid, interleave_nodes); 2342b71636e2SPaul Mundt } 2343b71636e2SPaul Mundt 2344b71636e2SPaul Mundt /* All too small, use the largest */ 2345b71636e2SPaul Mundt if (unlikely(nodes_empty(interleave_nodes))) 2346b71636e2SPaul Mundt node_set(prefer, interleave_nodes); 2347b71636e2SPaul Mundt 2348028fec41SDavid Rientjes if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 23491da177e4SLinus Torvalds printk("numa_policy_init: interleaving failed\n"); 23501da177e4SLinus Torvalds } 23511da177e4SLinus Torvalds 23528bccd85fSChristoph Lameter /* Reset policy of current process to default */ 23531da177e4SLinus Torvalds void numa_default_policy(void) 23541da177e4SLinus Torvalds { 2355028fec41SDavid Rientjes do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 23561da177e4SLinus Torvalds } 235768860ec1SPaul Jackson 23584225399aSPaul Jackson /* 2359095f1fc4SLee Schermerhorn * Parse and format mempolicy from/to strings 2360095f1fc4SLee Schermerhorn */ 2361095f1fc4SLee Schermerhorn 2362095f1fc4SLee Schermerhorn /* 2363fc36b8d3SLee Schermerhorn * "local" is pseudo-policy: MPOL_PREFERRED with MPOL_F_LOCAL flag 23643f226aa1SLee Schermerhorn * Used only for mpol_parse_str() and mpol_to_str() 23651a75a6c8SChristoph Lameter */ 2366345ace9cSLee Schermerhorn #define MPOL_LOCAL MPOL_MAX 2367345ace9cSLee Schermerhorn static const char * const policy_modes[] = 2368345ace9cSLee Schermerhorn { 2369345ace9cSLee Schermerhorn [MPOL_DEFAULT] = "default", 2370345ace9cSLee Schermerhorn [MPOL_PREFERRED] = "prefer", 2371345ace9cSLee Schermerhorn [MPOL_BIND] = "bind", 2372345ace9cSLee Schermerhorn [MPOL_INTERLEAVE] = "interleave", 2373345ace9cSLee Schermerhorn [MPOL_LOCAL] = "local" 2374345ace9cSLee Schermerhorn }; 23751a75a6c8SChristoph Lameter 2376095f1fc4SLee Schermerhorn 2377095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS 2378095f1fc4SLee Schermerhorn /** 2379095f1fc4SLee Schermerhorn * mpol_parse_str - parse string to mempolicy 2380095f1fc4SLee Schermerhorn * @str: string containing mempolicy to parse 238171fe804bSLee Schermerhorn * @mpol: pointer to struct mempolicy pointer, returned on success. 238271fe804bSLee Schermerhorn * @no_context: flag whether to "contextualize" the mempolicy 2383095f1fc4SLee Schermerhorn * 2384095f1fc4SLee Schermerhorn * Format of input: 2385095f1fc4SLee Schermerhorn * <mode>[=<flags>][:<nodelist>] 2386095f1fc4SLee Schermerhorn * 238771fe804bSLee Schermerhorn * if @no_context is true, save the input nodemask in w.user_nodemask in 238871fe804bSLee Schermerhorn * the returned mempolicy. This will be used to "clone" the mempolicy in 238971fe804bSLee Schermerhorn * a specific context [cpuset] at a later time. Used to parse tmpfs mpol 239071fe804bSLee Schermerhorn * mount option. Note that if 'static' or 'relative' mode flags were 239171fe804bSLee Schermerhorn * specified, the input nodemask will already have been saved. Saving 239271fe804bSLee Schermerhorn * it again is redundant, but safe. 239371fe804bSLee Schermerhorn * 239471fe804bSLee Schermerhorn * On success, returns 0, else 1 2395095f1fc4SLee Schermerhorn */ 239671fe804bSLee Schermerhorn int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) 2397095f1fc4SLee Schermerhorn { 239871fe804bSLee Schermerhorn struct mempolicy *new = NULL; 2399b4652e84SLee Schermerhorn unsigned short mode; 240071fe804bSLee Schermerhorn unsigned short uninitialized_var(mode_flags); 240171fe804bSLee Schermerhorn nodemask_t nodes; 2402095f1fc4SLee Schermerhorn char *nodelist = strchr(str, ':'); 2403095f1fc4SLee Schermerhorn char *flags = strchr(str, '='); 2404095f1fc4SLee Schermerhorn int err = 1; 2405095f1fc4SLee Schermerhorn 2406095f1fc4SLee Schermerhorn if (nodelist) { 2407095f1fc4SLee Schermerhorn /* NUL-terminate mode or flags string */ 2408095f1fc4SLee Schermerhorn *nodelist++ = '\0'; 240971fe804bSLee Schermerhorn if (nodelist_parse(nodelist, nodes)) 2410095f1fc4SLee Schermerhorn goto out; 241171fe804bSLee Schermerhorn if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY])) 2412095f1fc4SLee Schermerhorn goto out; 241371fe804bSLee Schermerhorn } else 241471fe804bSLee Schermerhorn nodes_clear(nodes); 241571fe804bSLee Schermerhorn 2416095f1fc4SLee Schermerhorn if (flags) 2417095f1fc4SLee Schermerhorn *flags++ = '\0'; /* terminate mode string */ 2418095f1fc4SLee Schermerhorn 2419b4652e84SLee Schermerhorn for (mode = 0; mode <= MPOL_LOCAL; mode++) { 2420345ace9cSLee Schermerhorn if (!strcmp(str, policy_modes[mode])) { 2421095f1fc4SLee Schermerhorn break; 2422095f1fc4SLee Schermerhorn } 2423095f1fc4SLee Schermerhorn } 2424b4652e84SLee Schermerhorn if (mode > MPOL_LOCAL) 2425095f1fc4SLee Schermerhorn goto out; 2426095f1fc4SLee Schermerhorn 242771fe804bSLee Schermerhorn switch (mode) { 2428095f1fc4SLee Schermerhorn case MPOL_PREFERRED: 242971fe804bSLee Schermerhorn /* 243071fe804bSLee Schermerhorn * Insist on a nodelist of one node only 243171fe804bSLee Schermerhorn */ 2432095f1fc4SLee Schermerhorn if (nodelist) { 2433095f1fc4SLee Schermerhorn char *rest = nodelist; 2434095f1fc4SLee Schermerhorn while (isdigit(*rest)) 2435095f1fc4SLee Schermerhorn rest++; 2436926f2ae0SKOSAKI Motohiro if (*rest) 2437926f2ae0SKOSAKI Motohiro goto out; 2438095f1fc4SLee Schermerhorn } 2439095f1fc4SLee Schermerhorn break; 2440095f1fc4SLee Schermerhorn case MPOL_INTERLEAVE: 2441095f1fc4SLee Schermerhorn /* 2442095f1fc4SLee Schermerhorn * Default to online nodes with memory if no nodelist 2443095f1fc4SLee Schermerhorn */ 2444095f1fc4SLee Schermerhorn if (!nodelist) 244571fe804bSLee Schermerhorn nodes = node_states[N_HIGH_MEMORY]; 24463f226aa1SLee Schermerhorn break; 244771fe804bSLee Schermerhorn case MPOL_LOCAL: 24483f226aa1SLee Schermerhorn /* 244971fe804bSLee Schermerhorn * Don't allow a nodelist; mpol_new() checks flags 24503f226aa1SLee Schermerhorn */ 245171fe804bSLee Schermerhorn if (nodelist) 24523f226aa1SLee Schermerhorn goto out; 245371fe804bSLee Schermerhorn mode = MPOL_PREFERRED; 24543f226aa1SLee Schermerhorn break; 2455413b43deSRavikiran G Thirumalai case MPOL_DEFAULT: 2456413b43deSRavikiran G Thirumalai /* 2457413b43deSRavikiran G Thirumalai * Insist on a empty nodelist 2458413b43deSRavikiran G Thirumalai */ 2459413b43deSRavikiran G Thirumalai if (!nodelist) 2460413b43deSRavikiran G Thirumalai err = 0; 2461413b43deSRavikiran G Thirumalai goto out; 2462d69b2e63SKOSAKI Motohiro case MPOL_BIND: 246371fe804bSLee Schermerhorn /* 2464d69b2e63SKOSAKI Motohiro * Insist on a nodelist 246571fe804bSLee Schermerhorn */ 2466d69b2e63SKOSAKI Motohiro if (!nodelist) 2467d69b2e63SKOSAKI Motohiro goto out; 2468095f1fc4SLee Schermerhorn } 2469095f1fc4SLee Schermerhorn 247071fe804bSLee Schermerhorn mode_flags = 0; 2471095f1fc4SLee Schermerhorn if (flags) { 2472095f1fc4SLee Schermerhorn /* 2473095f1fc4SLee Schermerhorn * Currently, we only support two mutually exclusive 2474095f1fc4SLee Schermerhorn * mode flags. 2475095f1fc4SLee Schermerhorn */ 2476095f1fc4SLee Schermerhorn if (!strcmp(flags, "static")) 247771fe804bSLee Schermerhorn mode_flags |= MPOL_F_STATIC_NODES; 2478095f1fc4SLee Schermerhorn else if (!strcmp(flags, "relative")) 247971fe804bSLee Schermerhorn mode_flags |= MPOL_F_RELATIVE_NODES; 2480095f1fc4SLee Schermerhorn else 2481926f2ae0SKOSAKI Motohiro goto out; 2482095f1fc4SLee Schermerhorn } 248371fe804bSLee Schermerhorn 248471fe804bSLee Schermerhorn new = mpol_new(mode, mode_flags, &nodes); 248571fe804bSLee Schermerhorn if (IS_ERR(new)) 2486926f2ae0SKOSAKI Motohiro goto out; 2487926f2ae0SKOSAKI Motohiro 2488e17f74afSLee Schermerhorn if (no_context) { 2489e17f74afSLee Schermerhorn /* save for contextualization */ 2490e17f74afSLee Schermerhorn new->w.user_nodemask = nodes; 2491e17f74afSLee Schermerhorn } else { 249258568d2aSMiao Xie int ret; 24934bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH(scratch); 24944bfc4495SKAMEZAWA Hiroyuki if (scratch) { 249558568d2aSMiao Xie task_lock(current); 24964bfc4495SKAMEZAWA Hiroyuki ret = mpol_set_nodemask(new, &nodes, scratch); 249758568d2aSMiao Xie task_unlock(current); 24984bfc4495SKAMEZAWA Hiroyuki } else 24994bfc4495SKAMEZAWA Hiroyuki ret = -ENOMEM; 25004bfc4495SKAMEZAWA Hiroyuki NODEMASK_SCRATCH_FREE(scratch); 25014bfc4495SKAMEZAWA Hiroyuki if (ret) { 25024bfc4495SKAMEZAWA Hiroyuki mpol_put(new); 2503926f2ae0SKOSAKI Motohiro goto out; 2504926f2ae0SKOSAKI Motohiro } 2505926f2ae0SKOSAKI Motohiro } 2506926f2ae0SKOSAKI Motohiro err = 0; 250771fe804bSLee Schermerhorn 2508095f1fc4SLee Schermerhorn out: 2509095f1fc4SLee Schermerhorn /* Restore string for error message */ 2510095f1fc4SLee Schermerhorn if (nodelist) 2511095f1fc4SLee Schermerhorn *--nodelist = ':'; 2512095f1fc4SLee Schermerhorn if (flags) 2513095f1fc4SLee Schermerhorn *--flags = '='; 251471fe804bSLee Schermerhorn if (!err) 251571fe804bSLee Schermerhorn *mpol = new; 2516095f1fc4SLee Schermerhorn return err; 2517095f1fc4SLee Schermerhorn } 2518095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */ 2519095f1fc4SLee Schermerhorn 252071fe804bSLee Schermerhorn /** 252171fe804bSLee Schermerhorn * mpol_to_str - format a mempolicy structure for printing 252271fe804bSLee Schermerhorn * @buffer: to contain formatted mempolicy string 252371fe804bSLee Schermerhorn * @maxlen: length of @buffer 252471fe804bSLee Schermerhorn * @pol: pointer to mempolicy to be formatted 252571fe804bSLee Schermerhorn * @no_context: "context free" mempolicy - use nodemask in w.user_nodemask 252671fe804bSLee Schermerhorn * 25271a75a6c8SChristoph Lameter * Convert a mempolicy into a string. 25281a75a6c8SChristoph Lameter * Returns the number of characters in buffer (if positive) 25291a75a6c8SChristoph Lameter * or an error (negative) 25301a75a6c8SChristoph Lameter */ 253171fe804bSLee Schermerhorn int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context) 25321a75a6c8SChristoph Lameter { 25331a75a6c8SChristoph Lameter char *p = buffer; 25341a75a6c8SChristoph Lameter int l; 25351a75a6c8SChristoph Lameter nodemask_t nodes; 2536bea904d5SLee Schermerhorn unsigned short mode; 2537f5b087b5SDavid Rientjes unsigned short flags = pol ? pol->flags : 0; 25381a75a6c8SChristoph Lameter 25392291990aSLee Schermerhorn /* 25402291990aSLee Schermerhorn * Sanity check: room for longest mode, flag and some nodes 25412291990aSLee Schermerhorn */ 25422291990aSLee Schermerhorn VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16); 25432291990aSLee Schermerhorn 2544bea904d5SLee Schermerhorn if (!pol || pol == &default_policy) 2545bea904d5SLee Schermerhorn mode = MPOL_DEFAULT; 2546bea904d5SLee Schermerhorn else 2547bea904d5SLee Schermerhorn mode = pol->mode; 2548bea904d5SLee Schermerhorn 25491a75a6c8SChristoph Lameter switch (mode) { 25501a75a6c8SChristoph Lameter case MPOL_DEFAULT: 25511a75a6c8SChristoph Lameter nodes_clear(nodes); 25521a75a6c8SChristoph Lameter break; 25531a75a6c8SChristoph Lameter 25541a75a6c8SChristoph Lameter case MPOL_PREFERRED: 25551a75a6c8SChristoph Lameter nodes_clear(nodes); 2556fc36b8d3SLee Schermerhorn if (flags & MPOL_F_LOCAL) 255753f2556bSLee Schermerhorn mode = MPOL_LOCAL; /* pseudo-policy */ 255853f2556bSLee Schermerhorn else 2559fc36b8d3SLee Schermerhorn node_set(pol->v.preferred_node, nodes); 25601a75a6c8SChristoph Lameter break; 25611a75a6c8SChristoph Lameter 25621a75a6c8SChristoph Lameter case MPOL_BIND: 256319770b32SMel Gorman /* Fall through */ 25641a75a6c8SChristoph Lameter case MPOL_INTERLEAVE: 256571fe804bSLee Schermerhorn if (no_context) 256671fe804bSLee Schermerhorn nodes = pol->w.user_nodemask; 256771fe804bSLee Schermerhorn else 25681a75a6c8SChristoph Lameter nodes = pol->v.nodes; 25691a75a6c8SChristoph Lameter break; 25701a75a6c8SChristoph Lameter 25711a75a6c8SChristoph Lameter default: 257280de7c31SDave Jones return -EINVAL; 25731a75a6c8SChristoph Lameter } 25741a75a6c8SChristoph Lameter 2575345ace9cSLee Schermerhorn l = strlen(policy_modes[mode]); 25761a75a6c8SChristoph Lameter if (buffer + maxlen < p + l + 1) 25771a75a6c8SChristoph Lameter return -ENOSPC; 25781a75a6c8SChristoph Lameter 2579345ace9cSLee Schermerhorn strcpy(p, policy_modes[mode]); 25801a75a6c8SChristoph Lameter p += l; 25811a75a6c8SChristoph Lameter 2582fc36b8d3SLee Schermerhorn if (flags & MPOL_MODE_FLAGS) { 2583f5b087b5SDavid Rientjes if (buffer + maxlen < p + 2) 2584f5b087b5SDavid Rientjes return -ENOSPC; 2585f5b087b5SDavid Rientjes *p++ = '='; 2586f5b087b5SDavid Rientjes 25872291990aSLee Schermerhorn /* 25882291990aSLee Schermerhorn * Currently, the only defined flags are mutually exclusive 25892291990aSLee Schermerhorn */ 2590f5b087b5SDavid Rientjes if (flags & MPOL_F_STATIC_NODES) 25912291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "static"); 25922291990aSLee Schermerhorn else if (flags & MPOL_F_RELATIVE_NODES) 25932291990aSLee Schermerhorn p += snprintf(p, buffer + maxlen - p, "relative"); 2594f5b087b5SDavid Rientjes } 2595f5b087b5SDavid Rientjes 25961a75a6c8SChristoph Lameter if (!nodes_empty(nodes)) { 25971a75a6c8SChristoph Lameter if (buffer + maxlen < p + 2) 25981a75a6c8SChristoph Lameter return -ENOSPC; 2599095f1fc4SLee Schermerhorn *p++ = ':'; 26001a75a6c8SChristoph Lameter p += nodelist_scnprintf(p, buffer + maxlen - p, nodes); 26011a75a6c8SChristoph Lameter } 26021a75a6c8SChristoph Lameter return p - buffer; 26031a75a6c8SChristoph Lameter } 2604