xref: /linux/mm/mempolicy.c (revision 6421ec764a62c51f810c5dc40cd45eeb15801ad9)
146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
68bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
2900ef2d2fSDavid Rientjes  *                As a special case NUMA_NO_NODE here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds */
671da177e4SLinus Torvalds 
68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69b1de0d13SMitchel Humpherys 
701da177e4SLinus Torvalds #include <linux/mempolicy.h>
71a520110eSChristoph Hellwig #include <linux/pagewalk.h>
721da177e4SLinus Torvalds #include <linux/highmem.h>
731da177e4SLinus Torvalds #include <linux/hugetlb.h>
741da177e4SLinus Torvalds #include <linux/kernel.h>
751da177e4SLinus Torvalds #include <linux/sched.h>
766e84f315SIngo Molnar #include <linux/sched/mm.h>
776a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h>
78f719ff9bSIngo Molnar #include <linux/sched/task.h>
791da177e4SLinus Torvalds #include <linux/nodemask.h>
801da177e4SLinus Torvalds #include <linux/cpuset.h>
811da177e4SLinus Torvalds #include <linux/slab.h>
821da177e4SLinus Torvalds #include <linux/string.h>
83b95f1b31SPaul Gortmaker #include <linux/export.h>
84b488893aSPavel Emelyanov #include <linux/nsproxy.h>
851da177e4SLinus Torvalds #include <linux/interrupt.h>
861da177e4SLinus Torvalds #include <linux/init.h>
871da177e4SLinus Torvalds #include <linux/compat.h>
8831367466SOtto Ebeling #include <linux/ptrace.h>
89dc9aa5b9SChristoph Lameter #include <linux/swap.h>
901a75a6c8SChristoph Lameter #include <linux/seq_file.h>
911a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
92b20a3503SChristoph Lameter #include <linux/migrate.h>
9362b61f61SHugh Dickins #include <linux/ksm.h>
9495a402c3SChristoph Lameter #include <linux/rmap.h>
9586c3a764SDavid Quigley #include <linux/security.h>
96dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
97095f1fc4SLee Schermerhorn #include <linux/ctype.h>
986d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
99b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h>
100b1de0d13SMitchel Humpherys #include <linux/printk.h>
101c8633798SNaoya Horiguchi #include <linux/swapops.h>
102dc9aa5b9SChristoph Lameter 
1031da177e4SLinus Torvalds #include <asm/tlbflush.h>
1047c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
1051da177e4SLinus Torvalds 
10662695a84SNick Piggin #include "internal.h"
10762695a84SNick Piggin 
10838e35860SChristoph Lameter /* Internal flags */
109dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
11038e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
111dc9aa5b9SChristoph Lameter 
112fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
113fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1141da177e4SLinus Torvalds 
1151da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1161da177e4SLinus Torvalds    policied. */
1176267276fSChristoph Lameter enum zone_type policy_zone = 0;
1181da177e4SLinus Torvalds 
119bea904d5SLee Schermerhorn /*
120bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
121bea904d5SLee Schermerhorn  */
122e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1231da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
124bea904d5SLee Schermerhorn 	.mode = MPOL_PREFERRED,
125fc36b8d3SLee Schermerhorn 	.flags = MPOL_F_LOCAL,
1261da177e4SLinus Torvalds };
1271da177e4SLinus Torvalds 
1285606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES];
1295606e387SMel Gorman 
130b2ca916cSDan Williams /**
131b2ca916cSDan Williams  * numa_map_to_online_node - Find closest online node
132f6e92f40SKrzysztof Kozlowski  * @node: Node id to start the search
133b2ca916cSDan Williams  *
134b2ca916cSDan Williams  * Lookup the next closest node by distance if @nid is not online.
135b2ca916cSDan Williams  */
136b2ca916cSDan Williams int numa_map_to_online_node(int node)
137b2ca916cSDan Williams {
1384fcbe96eSDan Williams 	int min_dist = INT_MAX, dist, n, min_node;
139b2ca916cSDan Williams 
1404fcbe96eSDan Williams 	if (node == NUMA_NO_NODE || node_online(node))
1414fcbe96eSDan Williams 		return node;
142b2ca916cSDan Williams 
143b2ca916cSDan Williams 	min_node = node;
144b2ca916cSDan Williams 	for_each_online_node(n) {
145b2ca916cSDan Williams 		dist = node_distance(node, n);
146b2ca916cSDan Williams 		if (dist < min_dist) {
147b2ca916cSDan Williams 			min_dist = dist;
148b2ca916cSDan Williams 			min_node = n;
149b2ca916cSDan Williams 		}
150b2ca916cSDan Williams 	}
151b2ca916cSDan Williams 
152b2ca916cSDan Williams 	return min_node;
153b2ca916cSDan Williams }
154b2ca916cSDan Williams EXPORT_SYMBOL_GPL(numa_map_to_online_node);
155b2ca916cSDan Williams 
15674d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p)
1575606e387SMel Gorman {
1585606e387SMel Gorman 	struct mempolicy *pol = p->mempolicy;
159f15ca78eSOleg Nesterov 	int node;
1605606e387SMel Gorman 
161f15ca78eSOleg Nesterov 	if (pol)
162f15ca78eSOleg Nesterov 		return pol;
1635606e387SMel Gorman 
164f15ca78eSOleg Nesterov 	node = numa_node_id();
1651da6f0e1SJianguo Wu 	if (node != NUMA_NO_NODE) {
1661da6f0e1SJianguo Wu 		pol = &preferred_node_policy[node];
167f15ca78eSOleg Nesterov 		/* preferred_node_policy is not initialised early in boot */
168f15ca78eSOleg Nesterov 		if (pol->mode)
169f15ca78eSOleg Nesterov 			return pol;
1701da6f0e1SJianguo Wu 	}
1715606e387SMel Gorman 
172f15ca78eSOleg Nesterov 	return &default_policy;
1735606e387SMel Gorman }
1745606e387SMel Gorman 
17537012946SDavid Rientjes static const struct mempolicy_operations {
17637012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
177213980c0SVlastimil Babka 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
17837012946SDavid Rientjes } mpol_ops[MPOL_MAX];
17937012946SDavid Rientjes 
180f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
181f5b087b5SDavid Rientjes {
1826d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1834c50bc01SDavid Rientjes }
1844c50bc01SDavid Rientjes 
1854c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1864c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1874c50bc01SDavid Rientjes {
1884c50bc01SDavid Rientjes 	nodemask_t tmp;
1894c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1904c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
191f5b087b5SDavid Rientjes }
192f5b087b5SDavid Rientjes 
19337012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
19437012946SDavid Rientjes {
19537012946SDavid Rientjes 	if (nodes_empty(*nodes))
19637012946SDavid Rientjes 		return -EINVAL;
19737012946SDavid Rientjes 	pol->v.nodes = *nodes;
19837012946SDavid Rientjes 	return 0;
19937012946SDavid Rientjes }
20037012946SDavid Rientjes 
20137012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
20237012946SDavid Rientjes {
20337012946SDavid Rientjes 	if (!nodes)
204fc36b8d3SLee Schermerhorn 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
20537012946SDavid Rientjes 	else if (nodes_empty(*nodes))
20637012946SDavid Rientjes 		return -EINVAL;			/*  no allowed nodes */
20737012946SDavid Rientjes 	else
20837012946SDavid Rientjes 		pol->v.preferred_node = first_node(*nodes);
20937012946SDavid Rientjes 	return 0;
21037012946SDavid Rientjes }
21137012946SDavid Rientjes 
21237012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
21337012946SDavid Rientjes {
214859f7ef1SZhihui Zhang 	if (nodes_empty(*nodes))
21537012946SDavid Rientjes 		return -EINVAL;
21637012946SDavid Rientjes 	pol->v.nodes = *nodes;
21737012946SDavid Rientjes 	return 0;
21837012946SDavid Rientjes }
21937012946SDavid Rientjes 
22058568d2aSMiao Xie /*
22158568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
22258568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
22358568d2aSMiao Xie  * parameter with respect to the policy mode and flags.  But, we need to
22458568d2aSMiao Xie  * handle an empty nodemask with MPOL_PREFERRED here.
22558568d2aSMiao Xie  *
22658568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
227c1e8d7c6SMichel Lespinasse  * and mempolicy.  May also be called holding the mmap_lock for write.
22858568d2aSMiao Xie  */
2294bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2304bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
23158568d2aSMiao Xie {
23258568d2aSMiao Xie 	int ret;
23358568d2aSMiao Xie 
23458568d2aSMiao Xie 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
23558568d2aSMiao Xie 	if (pol == NULL)
23658568d2aSMiao Xie 		return 0;
23701f13bd6SLai Jiangshan 	/* Check N_MEMORY */
2384bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
23901f13bd6SLai Jiangshan 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
24058568d2aSMiao Xie 
24158568d2aSMiao Xie 	VM_BUG_ON(!nodes);
24258568d2aSMiao Xie 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
24358568d2aSMiao Xie 		nodes = NULL;	/* explicit local allocation */
24458568d2aSMiao Xie 	else {
24558568d2aSMiao Xie 		if (pol->flags & MPOL_F_RELATIVE_NODES)
2464bfc4495SKAMEZAWA Hiroyuki 			mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
24758568d2aSMiao Xie 		else
2484bfc4495SKAMEZAWA Hiroyuki 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
2494bfc4495SKAMEZAWA Hiroyuki 
25058568d2aSMiao Xie 		if (mpol_store_user_nodemask(pol))
25158568d2aSMiao Xie 			pol->w.user_nodemask = *nodes;
25258568d2aSMiao Xie 		else
25358568d2aSMiao Xie 			pol->w.cpuset_mems_allowed =
25458568d2aSMiao Xie 						cpuset_current_mems_allowed;
25558568d2aSMiao Xie 	}
25658568d2aSMiao Xie 
2574bfc4495SKAMEZAWA Hiroyuki 	if (nodes)
2584bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
2594bfc4495SKAMEZAWA Hiroyuki 	else
2604bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, NULL);
26158568d2aSMiao Xie 	return ret;
26258568d2aSMiao Xie }
26358568d2aSMiao Xie 
26458568d2aSMiao Xie /*
26558568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
26658568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
26758568d2aSMiao Xie  */
268028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
269028fec41SDavid Rientjes 				  nodemask_t *nodes)
2701da177e4SLinus Torvalds {
2711da177e4SLinus Torvalds 	struct mempolicy *policy;
2721da177e4SLinus Torvalds 
273028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
27400ef2d2fSDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
275140d5a49SPaul Mundt 
2763e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2773e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
27837012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
279d3a71033SLee Schermerhorn 		return NULL;
28037012946SDavid Rientjes 	}
2813e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2823e1f0645SDavid Rientjes 
2833e1f0645SDavid Rientjes 	/*
2843e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2853e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2863e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2873e1f0645SDavid Rientjes 	 */
2883e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2893e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2903e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2913e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2923e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2933e1f0645SDavid Rientjes 		}
294479e2802SPeter Zijlstra 	} else if (mode == MPOL_LOCAL) {
2958d303e44SPiotr Kwapulinski 		if (!nodes_empty(*nodes) ||
2968d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_STATIC_NODES) ||
2978d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_RELATIVE_NODES))
298479e2802SPeter Zijlstra 			return ERR_PTR(-EINVAL);
299479e2802SPeter Zijlstra 		mode = MPOL_PREFERRED;
3003e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
3013e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
3021da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
3031da177e4SLinus Torvalds 	if (!policy)
3041da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
3051da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
30645c4745aSLee Schermerhorn 	policy->mode = mode;
30737012946SDavid Rientjes 	policy->flags = flags;
3083e1f0645SDavid Rientjes 
30937012946SDavid Rientjes 	return policy;
31037012946SDavid Rientjes }
31137012946SDavid Rientjes 
31252cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
31352cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
31452cd3b07SLee Schermerhorn {
31552cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
31652cd3b07SLee Schermerhorn 		return;
31752cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
31852cd3b07SLee Schermerhorn }
31952cd3b07SLee Schermerhorn 
320213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
32137012946SDavid Rientjes {
32237012946SDavid Rientjes }
32337012946SDavid Rientjes 
324213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
3251d0d2680SDavid Rientjes {
3261d0d2680SDavid Rientjes 	nodemask_t tmp;
3271d0d2680SDavid Rientjes 
32837012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
32937012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
33037012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
33137012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3321d0d2680SDavid Rientjes 	else {
333213980c0SVlastimil Babka 		nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
334213980c0SVlastimil Babka 								*nodes);
33529b190faSzhong jiang 		pol->w.cpuset_mems_allowed = *nodes;
3361d0d2680SDavid Rientjes 	}
33737012946SDavid Rientjes 
338708c1bbcSMiao Xie 	if (nodes_empty(tmp))
339708c1bbcSMiao Xie 		tmp = *nodes;
340708c1bbcSMiao Xie 
3411d0d2680SDavid Rientjes 	pol->v.nodes = tmp;
34237012946SDavid Rientjes }
34337012946SDavid Rientjes 
34437012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
345213980c0SVlastimil Babka 						const nodemask_t *nodes)
34637012946SDavid Rientjes {
34737012946SDavid Rientjes 	nodemask_t tmp;
34837012946SDavid Rientjes 
34937012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES) {
3501d0d2680SDavid Rientjes 		int node = first_node(pol->w.user_nodemask);
3511d0d2680SDavid Rientjes 
352fc36b8d3SLee Schermerhorn 		if (node_isset(node, *nodes)) {
3531d0d2680SDavid Rientjes 			pol->v.preferred_node = node;
354fc36b8d3SLee Schermerhorn 			pol->flags &= ~MPOL_F_LOCAL;
355fc36b8d3SLee Schermerhorn 		} else
356fc36b8d3SLee Schermerhorn 			pol->flags |= MPOL_F_LOCAL;
35737012946SDavid Rientjes 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
35837012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3591d0d2680SDavid Rientjes 		pol->v.preferred_node = first_node(tmp);
360fc36b8d3SLee Schermerhorn 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
3611d0d2680SDavid Rientjes 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
36237012946SDavid Rientjes 						   pol->w.cpuset_mems_allowed,
36337012946SDavid Rientjes 						   *nodes);
36437012946SDavid Rientjes 		pol->w.cpuset_mems_allowed = *nodes;
3651d0d2680SDavid Rientjes 	}
3661d0d2680SDavid Rientjes }
36737012946SDavid Rientjes 
368708c1bbcSMiao Xie /*
369708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
370708c1bbcSMiao Xie  *
371c1e8d7c6SMichel Lespinasse  * Per-vma policies are protected by mmap_lock. Allocations using per-task
372213980c0SVlastimil Babka  * policies are protected by task->mems_allowed_seq to prevent a premature
373213980c0SVlastimil Babka  * OOM/allocation failure due to parallel nodemask modification.
374708c1bbcSMiao Xie  */
375213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
37637012946SDavid Rientjes {
37737012946SDavid Rientjes 	if (!pol)
37837012946SDavid Rientjes 		return;
3792e25644eSVlastimil Babka 	if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
38037012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
38137012946SDavid Rientjes 		return;
382708c1bbcSMiao Xie 
383213980c0SVlastimil Babka 	mpol_ops[pol->mode].rebind(pol, newmask);
3841d0d2680SDavid Rientjes }
3851d0d2680SDavid Rientjes 
3861d0d2680SDavid Rientjes /*
3871d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
3881d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
38958568d2aSMiao Xie  *
39058568d2aSMiao Xie  * Called with task's alloc_lock held.
3911d0d2680SDavid Rientjes  */
3921d0d2680SDavid Rientjes 
393213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
3941d0d2680SDavid Rientjes {
395213980c0SVlastimil Babka 	mpol_rebind_policy(tsk->mempolicy, new);
3961d0d2680SDavid Rientjes }
3971d0d2680SDavid Rientjes 
3981d0d2680SDavid Rientjes /*
3991d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
4001d0d2680SDavid Rientjes  *
401c1e8d7c6SMichel Lespinasse  * Call holding a reference to mm.  Takes mm->mmap_lock during call.
4021d0d2680SDavid Rientjes  */
4031d0d2680SDavid Rientjes 
4041d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
4051d0d2680SDavid Rientjes {
4061d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
4071d0d2680SDavid Rientjes 
408d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
4091d0d2680SDavid Rientjes 	for (vma = mm->mmap; vma; vma = vma->vm_next)
410213980c0SVlastimil Babka 		mpol_rebind_policy(vma->vm_policy, new);
411d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
4121d0d2680SDavid Rientjes }
4131d0d2680SDavid Rientjes 
41437012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
41537012946SDavid Rientjes 	[MPOL_DEFAULT] = {
41637012946SDavid Rientjes 		.rebind = mpol_rebind_default,
41737012946SDavid Rientjes 	},
41837012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
41937012946SDavid Rientjes 		.create = mpol_new_interleave,
42037012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
42137012946SDavid Rientjes 	},
42237012946SDavid Rientjes 	[MPOL_PREFERRED] = {
42337012946SDavid Rientjes 		.create = mpol_new_preferred,
42437012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
42537012946SDavid Rientjes 	},
42637012946SDavid Rientjes 	[MPOL_BIND] = {
42737012946SDavid Rientjes 		.create = mpol_new_bind,
42837012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
42937012946SDavid Rientjes 	},
43037012946SDavid Rientjes };
43137012946SDavid Rientjes 
432a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist,
433fc301289SChristoph Lameter 				unsigned long flags);
4341a75a6c8SChristoph Lameter 
4356f4576e3SNaoya Horiguchi struct queue_pages {
4366f4576e3SNaoya Horiguchi 	struct list_head *pagelist;
4376f4576e3SNaoya Horiguchi 	unsigned long flags;
4386f4576e3SNaoya Horiguchi 	nodemask_t *nmask;
439f18da660SLi Xinhai 	unsigned long start;
440f18da660SLi Xinhai 	unsigned long end;
441f18da660SLi Xinhai 	struct vm_area_struct *first;
4426f4576e3SNaoya Horiguchi };
4436f4576e3SNaoya Horiguchi 
44498094945SNaoya Horiguchi /*
44588aaa2a1SNaoya Horiguchi  * Check if the page's nid is in qp->nmask.
44688aaa2a1SNaoya Horiguchi  *
44788aaa2a1SNaoya Horiguchi  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
44888aaa2a1SNaoya Horiguchi  * in the invert of qp->nmask.
44988aaa2a1SNaoya Horiguchi  */
45088aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page,
45188aaa2a1SNaoya Horiguchi 					struct queue_pages *qp)
45288aaa2a1SNaoya Horiguchi {
45388aaa2a1SNaoya Horiguchi 	int nid = page_to_nid(page);
45488aaa2a1SNaoya Horiguchi 	unsigned long flags = qp->flags;
45588aaa2a1SNaoya Horiguchi 
45688aaa2a1SNaoya Horiguchi 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
45788aaa2a1SNaoya Horiguchi }
45888aaa2a1SNaoya Horiguchi 
459a7f40cfeSYang Shi /*
460d8835445SYang Shi  * queue_pages_pmd() has four possible return values:
461d8835445SYang Shi  * 0 - pages are placed on the right node or queued successfully.
462d8835445SYang Shi  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
463d8835445SYang Shi  *     specified.
464d8835445SYang Shi  * 2 - THP was split.
465d8835445SYang Shi  * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
466d8835445SYang Shi  *        existing page was already on a node that does not follow the
467d8835445SYang Shi  *        policy.
468a7f40cfeSYang Shi  */
469c8633798SNaoya Horiguchi static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
470c8633798SNaoya Horiguchi 				unsigned long end, struct mm_walk *walk)
471959a7e13SJules Irenge 	__releases(ptl)
472c8633798SNaoya Horiguchi {
473c8633798SNaoya Horiguchi 	int ret = 0;
474c8633798SNaoya Horiguchi 	struct page *page;
475c8633798SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
476c8633798SNaoya Horiguchi 	unsigned long flags;
477c8633798SNaoya Horiguchi 
478c8633798SNaoya Horiguchi 	if (unlikely(is_pmd_migration_entry(*pmd))) {
479a7f40cfeSYang Shi 		ret = -EIO;
480c8633798SNaoya Horiguchi 		goto unlock;
481c8633798SNaoya Horiguchi 	}
482c8633798SNaoya Horiguchi 	page = pmd_page(*pmd);
483c8633798SNaoya Horiguchi 	if (is_huge_zero_page(page)) {
484c8633798SNaoya Horiguchi 		spin_unlock(ptl);
485c8633798SNaoya Horiguchi 		__split_huge_pmd(walk->vma, pmd, addr, false, NULL);
486d8835445SYang Shi 		ret = 2;
487c8633798SNaoya Horiguchi 		goto out;
488c8633798SNaoya Horiguchi 	}
489d8835445SYang Shi 	if (!queue_pages_required(page, qp))
490c8633798SNaoya Horiguchi 		goto unlock;
491c8633798SNaoya Horiguchi 
492c8633798SNaoya Horiguchi 	flags = qp->flags;
493c8633798SNaoya Horiguchi 	/* go to thp migration */
494a7f40cfeSYang Shi 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
495a53190a4SYang Shi 		if (!vma_migratable(walk->vma) ||
496a53190a4SYang Shi 		    migrate_page_add(page, qp->pagelist, flags)) {
497d8835445SYang Shi 			ret = 1;
498a7f40cfeSYang Shi 			goto unlock;
499a7f40cfeSYang Shi 		}
500a7f40cfeSYang Shi 	} else
501a7f40cfeSYang Shi 		ret = -EIO;
502c8633798SNaoya Horiguchi unlock:
503c8633798SNaoya Horiguchi 	spin_unlock(ptl);
504c8633798SNaoya Horiguchi out:
505c8633798SNaoya Horiguchi 	return ret;
506c8633798SNaoya Horiguchi }
507c8633798SNaoya Horiguchi 
50888aaa2a1SNaoya Horiguchi /*
50998094945SNaoya Horiguchi  * Scan through pages checking if pages follow certain conditions,
51098094945SNaoya Horiguchi  * and move them to the pagelist if they do.
511d8835445SYang Shi  *
512d8835445SYang Shi  * queue_pages_pte_range() has three possible return values:
513d8835445SYang Shi  * 0 - pages are placed on the right node or queued successfully.
514d8835445SYang Shi  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
515d8835445SYang Shi  *     specified.
516d8835445SYang Shi  * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
517d8835445SYang Shi  *        on a node that does not follow the policy.
51898094945SNaoya Horiguchi  */
5196f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
5206f4576e3SNaoya Horiguchi 			unsigned long end, struct mm_walk *walk)
5211da177e4SLinus Torvalds {
5226f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
5236f4576e3SNaoya Horiguchi 	struct page *page;
5246f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
5256f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
526c8633798SNaoya Horiguchi 	int ret;
527d8835445SYang Shi 	bool has_unmovable = false;
5283f088420SShijie Luo 	pte_t *pte, *mapped_pte;
529705e87c0SHugh Dickins 	spinlock_t *ptl;
530941150a3SHugh Dickins 
531c8633798SNaoya Horiguchi 	ptl = pmd_trans_huge_lock(pmd, vma);
532c8633798SNaoya Horiguchi 	if (ptl) {
533c8633798SNaoya Horiguchi 		ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
534d8835445SYang Shi 		if (ret != 2)
535a7f40cfeSYang Shi 			return ret;
536248db92dSKirill A. Shutemov 	}
537d8835445SYang Shi 	/* THP was split, fall through to pte walk */
53891612e0dSHugh Dickins 
539337d9abfSNaoya Horiguchi 	if (pmd_trans_unstable(pmd))
540337d9abfSNaoya Horiguchi 		return 0;
54194723aafSMichal Hocko 
5423f088420SShijie Luo 	mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
5436f4576e3SNaoya Horiguchi 	for (; addr != end; pte++, addr += PAGE_SIZE) {
54491612e0dSHugh Dickins 		if (!pte_present(*pte))
54591612e0dSHugh Dickins 			continue;
5466aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
5476aab341eSLinus Torvalds 		if (!page)
54891612e0dSHugh Dickins 			continue;
549053837fcSNick Piggin 		/*
55062b61f61SHugh Dickins 		 * vm_normal_page() filters out zero pages, but there might
55162b61f61SHugh Dickins 		 * still be PageReserved pages to skip, perhaps in a VDSO.
552053837fcSNick Piggin 		 */
553b79bc0a0SHugh Dickins 		if (PageReserved(page))
554f4598c8bSChristoph Lameter 			continue;
55588aaa2a1SNaoya Horiguchi 		if (!queue_pages_required(page, qp))
55638e35860SChristoph Lameter 			continue;
557a7f40cfeSYang Shi 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
558d8835445SYang Shi 			/* MPOL_MF_STRICT must be specified if we get here */
559d8835445SYang Shi 			if (!vma_migratable(vma)) {
560d8835445SYang Shi 				has_unmovable = true;
561a7f40cfeSYang Shi 				break;
562d8835445SYang Shi 			}
563a53190a4SYang Shi 
564a53190a4SYang Shi 			/*
565a53190a4SYang Shi 			 * Do not abort immediately since there may be
566a53190a4SYang Shi 			 * temporary off LRU pages in the range.  Still
567a53190a4SYang Shi 			 * need migrate other LRU pages.
568a53190a4SYang Shi 			 */
569a53190a4SYang Shi 			if (migrate_page_add(page, qp->pagelist, flags))
570a53190a4SYang Shi 				has_unmovable = true;
571a7f40cfeSYang Shi 		} else
572a7f40cfeSYang Shi 			break;
5736f4576e3SNaoya Horiguchi 	}
5743f088420SShijie Luo 	pte_unmap_unlock(mapped_pte, ptl);
5756f4576e3SNaoya Horiguchi 	cond_resched();
576d8835445SYang Shi 
577d8835445SYang Shi 	if (has_unmovable)
578d8835445SYang Shi 		return 1;
579d8835445SYang Shi 
580a7f40cfeSYang Shi 	return addr != end ? -EIO : 0;
58191612e0dSHugh Dickins }
58291612e0dSHugh Dickins 
5836f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
5846f4576e3SNaoya Horiguchi 			       unsigned long addr, unsigned long end,
5856f4576e3SNaoya Horiguchi 			       struct mm_walk *walk)
586e2d8cf40SNaoya Horiguchi {
587dcf17635SLi Xinhai 	int ret = 0;
588e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
5896f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
590dcf17635SLi Xinhai 	unsigned long flags = (qp->flags & MPOL_MF_VALID);
591e2d8cf40SNaoya Horiguchi 	struct page *page;
592cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
593d4c54919SNaoya Horiguchi 	pte_t entry;
594e2d8cf40SNaoya Horiguchi 
5956f4576e3SNaoya Horiguchi 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
5966f4576e3SNaoya Horiguchi 	entry = huge_ptep_get(pte);
597d4c54919SNaoya Horiguchi 	if (!pte_present(entry))
598d4c54919SNaoya Horiguchi 		goto unlock;
599d4c54919SNaoya Horiguchi 	page = pte_page(entry);
60088aaa2a1SNaoya Horiguchi 	if (!queue_pages_required(page, qp))
601e2d8cf40SNaoya Horiguchi 		goto unlock;
602dcf17635SLi Xinhai 
603dcf17635SLi Xinhai 	if (flags == MPOL_MF_STRICT) {
604dcf17635SLi Xinhai 		/*
605dcf17635SLi Xinhai 		 * STRICT alone means only detecting misplaced page and no
606dcf17635SLi Xinhai 		 * need to further check other vma.
607dcf17635SLi Xinhai 		 */
608dcf17635SLi Xinhai 		ret = -EIO;
609dcf17635SLi Xinhai 		goto unlock;
610dcf17635SLi Xinhai 	}
611dcf17635SLi Xinhai 
612dcf17635SLi Xinhai 	if (!vma_migratable(walk->vma)) {
613dcf17635SLi Xinhai 		/*
614dcf17635SLi Xinhai 		 * Must be STRICT with MOVE*, otherwise .test_walk() have
615dcf17635SLi Xinhai 		 * stopped walking current vma.
616dcf17635SLi Xinhai 		 * Detecting misplaced page but allow migrating pages which
617dcf17635SLi Xinhai 		 * have been queued.
618dcf17635SLi Xinhai 		 */
619dcf17635SLi Xinhai 		ret = 1;
620dcf17635SLi Xinhai 		goto unlock;
621dcf17635SLi Xinhai 	}
622dcf17635SLi Xinhai 
623e2d8cf40SNaoya Horiguchi 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
624e2d8cf40SNaoya Horiguchi 	if (flags & (MPOL_MF_MOVE_ALL) ||
625dcf17635SLi Xinhai 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
626dcf17635SLi Xinhai 		if (!isolate_huge_page(page, qp->pagelist) &&
627dcf17635SLi Xinhai 			(flags & MPOL_MF_STRICT))
628dcf17635SLi Xinhai 			/*
629dcf17635SLi Xinhai 			 * Failed to isolate page but allow migrating pages
630dcf17635SLi Xinhai 			 * which have been queued.
631dcf17635SLi Xinhai 			 */
632dcf17635SLi Xinhai 			ret = 1;
633dcf17635SLi Xinhai 	}
634e2d8cf40SNaoya Horiguchi unlock:
635cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
636e2d8cf40SNaoya Horiguchi #else
637e2d8cf40SNaoya Horiguchi 	BUG();
638e2d8cf40SNaoya Horiguchi #endif
639dcf17635SLi Xinhai 	return ret;
6401da177e4SLinus Torvalds }
6411da177e4SLinus Torvalds 
6425877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING
643b24f53a0SLee Schermerhorn /*
6444b10e7d5SMel Gorman  * This is used to mark a range of virtual addresses to be inaccessible.
6454b10e7d5SMel Gorman  * These are later cleared by a NUMA hinting fault. Depending on these
6464b10e7d5SMel Gorman  * faults, pages may be migrated for better NUMA placement.
6474b10e7d5SMel Gorman  *
6484b10e7d5SMel Gorman  * This is assuming that NUMA faults are handled using PROT_NONE. If
6494b10e7d5SMel Gorman  * an architecture makes a different choice, it will need further
6504b10e7d5SMel Gorman  * changes to the core.
651b24f53a0SLee Schermerhorn  */
6524b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma,
6534b10e7d5SMel Gorman 			unsigned long addr, unsigned long end)
654b24f53a0SLee Schermerhorn {
6554b10e7d5SMel Gorman 	int nr_updated;
656b24f53a0SLee Schermerhorn 
65758705444SPeter Xu 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
65803c5a6e1SMel Gorman 	if (nr_updated)
65903c5a6e1SMel Gorman 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
660b24f53a0SLee Schermerhorn 
6614b10e7d5SMel Gorman 	return nr_updated;
662b24f53a0SLee Schermerhorn }
663b24f53a0SLee Schermerhorn #else
664b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma,
665b24f53a0SLee Schermerhorn 			unsigned long addr, unsigned long end)
666b24f53a0SLee Schermerhorn {
667b24f53a0SLee Schermerhorn 	return 0;
668b24f53a0SLee Schermerhorn }
6695877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */
670b24f53a0SLee Schermerhorn 
6716f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end,
6726f4576e3SNaoya Horiguchi 				struct mm_walk *walk)
6731da177e4SLinus Torvalds {
6746f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
6756f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
6765b952b3cSAndi Kleen 	unsigned long endvma = vma->vm_end;
6776f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
678dc9aa5b9SChristoph Lameter 
679a18b3ac2SLi Xinhai 	/* range check first */
680ce33135cSMiaohe Lin 	VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
681f18da660SLi Xinhai 
682f18da660SLi Xinhai 	if (!qp->first) {
683f18da660SLi Xinhai 		qp->first = vma;
684f18da660SLi Xinhai 		if (!(flags & MPOL_MF_DISCONTIG_OK) &&
685f18da660SLi Xinhai 			(qp->start < vma->vm_start))
686f18da660SLi Xinhai 			/* hole at head side of range */
687a18b3ac2SLi Xinhai 			return -EFAULT;
688a18b3ac2SLi Xinhai 	}
689f18da660SLi Xinhai 	if (!(flags & MPOL_MF_DISCONTIG_OK) &&
690f18da660SLi Xinhai 		((vma->vm_end < qp->end) &&
691f18da660SLi Xinhai 		(!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
692f18da660SLi Xinhai 		/* hole at middle or tail of range */
693f18da660SLi Xinhai 		return -EFAULT;
694a18b3ac2SLi Xinhai 
695a7f40cfeSYang Shi 	/*
696a7f40cfeSYang Shi 	 * Need check MPOL_MF_STRICT to return -EIO if possible
697a7f40cfeSYang Shi 	 * regardless of vma_migratable
698a7f40cfeSYang Shi 	 */
699a7f40cfeSYang Shi 	if (!vma_migratable(vma) &&
700a7f40cfeSYang Shi 	    !(flags & MPOL_MF_STRICT))
70148684a65SNaoya Horiguchi 		return 1;
70248684a65SNaoya Horiguchi 
7035b952b3cSAndi Kleen 	if (endvma > end)
7045b952b3cSAndi Kleen 		endvma = end;
705b24f53a0SLee Schermerhorn 
706b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY) {
7072c0346a3SMel Gorman 		/* Similar to task_numa_work, skip inaccessible VMAs */
7083122e80eSAnshuman Khandual 		if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
7094355c018SLiang Chen 			!(vma->vm_flags & VM_MIXEDMAP))
710b24f53a0SLee Schermerhorn 			change_prot_numa(vma, start, endvma);
7116f4576e3SNaoya Horiguchi 		return 1;
712b24f53a0SLee Schermerhorn 	}
713b24f53a0SLee Schermerhorn 
7146f4576e3SNaoya Horiguchi 	/* queue pages from current vma */
715a7f40cfeSYang Shi 	if (flags & MPOL_MF_VALID)
7166f4576e3SNaoya Horiguchi 		return 0;
7176f4576e3SNaoya Horiguchi 	return 1;
7186f4576e3SNaoya Horiguchi }
719b24f53a0SLee Schermerhorn 
7207b86ac33SChristoph Hellwig static const struct mm_walk_ops queue_pages_walk_ops = {
7217b86ac33SChristoph Hellwig 	.hugetlb_entry		= queue_pages_hugetlb,
7227b86ac33SChristoph Hellwig 	.pmd_entry		= queue_pages_pte_range,
7237b86ac33SChristoph Hellwig 	.test_walk		= queue_pages_test_walk,
7247b86ac33SChristoph Hellwig };
7257b86ac33SChristoph Hellwig 
7266f4576e3SNaoya Horiguchi /*
7276f4576e3SNaoya Horiguchi  * Walk through page tables and collect pages to be migrated.
7286f4576e3SNaoya Horiguchi  *
7296f4576e3SNaoya Horiguchi  * If pages found in a given range are on a set of nodes (determined by
7306f4576e3SNaoya Horiguchi  * @nodes and @flags,) it's isolated and queued to the pagelist which is
731d8835445SYang Shi  * passed via @private.
732d8835445SYang Shi  *
733d8835445SYang Shi  * queue_pages_range() has three possible return values:
734d8835445SYang Shi  * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
735d8835445SYang Shi  *     specified.
736d8835445SYang Shi  * 0 - queue pages successfully or no misplaced page.
737a85dfc30SYang Shi  * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
738a85dfc30SYang Shi  *         memory range specified by nodemask and maxnode points outside
739a85dfc30SYang Shi  *         your accessible address space (-EFAULT)
7406f4576e3SNaoya Horiguchi  */
7416f4576e3SNaoya Horiguchi static int
7426f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
7436f4576e3SNaoya Horiguchi 		nodemask_t *nodes, unsigned long flags,
7446f4576e3SNaoya Horiguchi 		struct list_head *pagelist)
7456f4576e3SNaoya Horiguchi {
746f18da660SLi Xinhai 	int err;
7476f4576e3SNaoya Horiguchi 	struct queue_pages qp = {
7486f4576e3SNaoya Horiguchi 		.pagelist = pagelist,
7496f4576e3SNaoya Horiguchi 		.flags = flags,
7506f4576e3SNaoya Horiguchi 		.nmask = nodes,
751f18da660SLi Xinhai 		.start = start,
752f18da660SLi Xinhai 		.end = end,
753f18da660SLi Xinhai 		.first = NULL,
7546f4576e3SNaoya Horiguchi 	};
7556f4576e3SNaoya Horiguchi 
756f18da660SLi Xinhai 	err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
757f18da660SLi Xinhai 
758f18da660SLi Xinhai 	if (!qp.first)
759f18da660SLi Xinhai 		/* whole range in hole */
760f18da660SLi Xinhai 		err = -EFAULT;
761f18da660SLi Xinhai 
762f18da660SLi Xinhai 	return err;
7631da177e4SLinus Torvalds }
7641da177e4SLinus Torvalds 
765869833f2SKOSAKI Motohiro /*
766869833f2SKOSAKI Motohiro  * Apply policy to a single VMA
767c1e8d7c6SMichel Lespinasse  * This must be called with the mmap_lock held for writing.
768869833f2SKOSAKI Motohiro  */
769869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma,
770869833f2SKOSAKI Motohiro 						struct mempolicy *pol)
7718d34694cSKOSAKI Motohiro {
772869833f2SKOSAKI Motohiro 	int err;
773869833f2SKOSAKI Motohiro 	struct mempolicy *old;
774869833f2SKOSAKI Motohiro 	struct mempolicy *new;
7758d34694cSKOSAKI Motohiro 
7768d34694cSKOSAKI Motohiro 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
7778d34694cSKOSAKI Motohiro 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
7788d34694cSKOSAKI Motohiro 		 vma->vm_ops, vma->vm_file,
7798d34694cSKOSAKI Motohiro 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
7808d34694cSKOSAKI Motohiro 
781869833f2SKOSAKI Motohiro 	new = mpol_dup(pol);
782869833f2SKOSAKI Motohiro 	if (IS_ERR(new))
783869833f2SKOSAKI Motohiro 		return PTR_ERR(new);
784869833f2SKOSAKI Motohiro 
785869833f2SKOSAKI Motohiro 	if (vma->vm_ops && vma->vm_ops->set_policy) {
7868d34694cSKOSAKI Motohiro 		err = vma->vm_ops->set_policy(vma, new);
787869833f2SKOSAKI Motohiro 		if (err)
788869833f2SKOSAKI Motohiro 			goto err_out;
7898d34694cSKOSAKI Motohiro 	}
790869833f2SKOSAKI Motohiro 
791869833f2SKOSAKI Motohiro 	old = vma->vm_policy;
792c1e8d7c6SMichel Lespinasse 	vma->vm_policy = new; /* protected by mmap_lock */
793869833f2SKOSAKI Motohiro 	mpol_put(old);
794869833f2SKOSAKI Motohiro 
795869833f2SKOSAKI Motohiro 	return 0;
796869833f2SKOSAKI Motohiro  err_out:
797869833f2SKOSAKI Motohiro 	mpol_put(new);
7988d34694cSKOSAKI Motohiro 	return err;
7998d34694cSKOSAKI Motohiro }
8008d34694cSKOSAKI Motohiro 
8011da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
8029d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start,
8039d8cebd4SKOSAKI Motohiro 		       unsigned long end, struct mempolicy *new_pol)
8041da177e4SLinus Torvalds {
8051da177e4SLinus Torvalds 	struct vm_area_struct *next;
8069d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *prev;
8079d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *vma;
8089d8cebd4SKOSAKI Motohiro 	int err = 0;
809e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
8109d8cebd4SKOSAKI Motohiro 	unsigned long vmstart;
8119d8cebd4SKOSAKI Motohiro 	unsigned long vmend;
8121da177e4SLinus Torvalds 
813097d5910SLinus Torvalds 	vma = find_vma(mm, start);
814f18da660SLi Xinhai 	VM_BUG_ON(!vma);
8159d8cebd4SKOSAKI Motohiro 
816097d5910SLinus Torvalds 	prev = vma->vm_prev;
817e26a5114SKOSAKI Motohiro 	if (start > vma->vm_start)
818e26a5114SKOSAKI Motohiro 		prev = vma;
819e26a5114SKOSAKI Motohiro 
8209d8cebd4SKOSAKI Motohiro 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
8211da177e4SLinus Torvalds 		next = vma->vm_next;
8229d8cebd4SKOSAKI Motohiro 		vmstart = max(start, vma->vm_start);
8239d8cebd4SKOSAKI Motohiro 		vmend   = min(end, vma->vm_end);
8249d8cebd4SKOSAKI Motohiro 
825e26a5114SKOSAKI Motohiro 		if (mpol_equal(vma_policy(vma), new_pol))
826e26a5114SKOSAKI Motohiro 			continue;
827e26a5114SKOSAKI Motohiro 
828e26a5114SKOSAKI Motohiro 		pgoff = vma->vm_pgoff +
829e26a5114SKOSAKI Motohiro 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
8309d8cebd4SKOSAKI Motohiro 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
831e26a5114SKOSAKI Motohiro 				 vma->anon_vma, vma->vm_file, pgoff,
83219a809afSAndrea Arcangeli 				 new_pol, vma->vm_userfaultfd_ctx);
8339d8cebd4SKOSAKI Motohiro 		if (prev) {
8349d8cebd4SKOSAKI Motohiro 			vma = prev;
8359d8cebd4SKOSAKI Motohiro 			next = vma->vm_next;
8363964acd0SOleg Nesterov 			if (mpol_equal(vma_policy(vma), new_pol))
8379d8cebd4SKOSAKI Motohiro 				continue;
8383964acd0SOleg Nesterov 			/* vma_merge() joined vma && vma->next, case 8 */
8393964acd0SOleg Nesterov 			goto replace;
8401da177e4SLinus Torvalds 		}
8419d8cebd4SKOSAKI Motohiro 		if (vma->vm_start != vmstart) {
8429d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
8439d8cebd4SKOSAKI Motohiro 			if (err)
8449d8cebd4SKOSAKI Motohiro 				goto out;
8459d8cebd4SKOSAKI Motohiro 		}
8469d8cebd4SKOSAKI Motohiro 		if (vma->vm_end != vmend) {
8479d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmend, 0);
8489d8cebd4SKOSAKI Motohiro 			if (err)
8499d8cebd4SKOSAKI Motohiro 				goto out;
8509d8cebd4SKOSAKI Motohiro 		}
8513964acd0SOleg Nesterov  replace:
852869833f2SKOSAKI Motohiro 		err = vma_replace_policy(vma, new_pol);
8539d8cebd4SKOSAKI Motohiro 		if (err)
8549d8cebd4SKOSAKI Motohiro 			goto out;
8559d8cebd4SKOSAKI Motohiro 	}
8569d8cebd4SKOSAKI Motohiro 
8579d8cebd4SKOSAKI Motohiro  out:
8581da177e4SLinus Torvalds 	return err;
8591da177e4SLinus Torvalds }
8601da177e4SLinus Torvalds 
8611da177e4SLinus Torvalds /* Set the process memory policy */
862028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
863028fec41SDavid Rientjes 			     nodemask_t *nodes)
8641da177e4SLinus Torvalds {
86558568d2aSMiao Xie 	struct mempolicy *new, *old;
8664bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
86758568d2aSMiao Xie 	int ret;
8681da177e4SLinus Torvalds 
8694bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
8704bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
871f4e53d91SLee Schermerhorn 
8724bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
8734bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
8744bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
8754bfc4495SKAMEZAWA Hiroyuki 		goto out;
8764bfc4495SKAMEZAWA Hiroyuki 	}
8772c7c3a7dSOleg Nesterov 
878bda420b9SHuang Ying 	if (flags & MPOL_F_NUMA_BALANCING) {
879bda420b9SHuang Ying 		if (new && new->mode == MPOL_BIND) {
880bda420b9SHuang Ying 			new->flags |= (MPOL_F_MOF | MPOL_F_MORON);
881bda420b9SHuang Ying 		} else {
882bda420b9SHuang Ying 			ret = -EINVAL;
883bda420b9SHuang Ying 			mpol_put(new);
884bda420b9SHuang Ying 			goto out;
885bda420b9SHuang Ying 		}
886bda420b9SHuang Ying 	}
887bda420b9SHuang Ying 
8884bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
88958568d2aSMiao Xie 	if (ret) {
89058568d2aSMiao Xie 		mpol_put(new);
8914bfc4495SKAMEZAWA Hiroyuki 		goto out;
89258568d2aSMiao Xie 	}
89378b132e9SWei Yang 	task_lock(current);
89458568d2aSMiao Xie 	old = current->mempolicy;
8951da177e4SLinus Torvalds 	current->mempolicy = new;
89645816682SVlastimil Babka 	if (new && new->mode == MPOL_INTERLEAVE)
89745816682SVlastimil Babka 		current->il_prev = MAX_NUMNODES-1;
89858568d2aSMiao Xie 	task_unlock(current);
89958568d2aSMiao Xie 	mpol_put(old);
9004bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
9014bfc4495SKAMEZAWA Hiroyuki out:
9024bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
9034bfc4495SKAMEZAWA Hiroyuki 	return ret;
9041da177e4SLinus Torvalds }
9051da177e4SLinus Torvalds 
906bea904d5SLee Schermerhorn /*
907bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
90858568d2aSMiao Xie  *
90958568d2aSMiao Xie  * Called with task's alloc_lock held
910bea904d5SLee Schermerhorn  */
911bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
9121da177e4SLinus Torvalds {
913dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
914bea904d5SLee Schermerhorn 	if (p == &default_policy)
915bea904d5SLee Schermerhorn 		return;
916bea904d5SLee Schermerhorn 
91745c4745aSLee Schermerhorn 	switch (p->mode) {
91819770b32SMel Gorman 	case MPOL_BIND:
9191da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
920dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
9211da177e4SLinus Torvalds 		break;
9221da177e4SLinus Torvalds 	case MPOL_PREFERRED:
923fc36b8d3SLee Schermerhorn 		if (!(p->flags & MPOL_F_LOCAL))
924dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
92553f2556bSLee Schermerhorn 		/* else return empty node mask for local allocation */
9261da177e4SLinus Torvalds 		break;
9271da177e4SLinus Torvalds 	default:
9281da177e4SLinus Torvalds 		BUG();
9291da177e4SLinus Torvalds 	}
9301da177e4SLinus Torvalds }
9311da177e4SLinus Torvalds 
9323b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr)
9331da177e4SLinus Torvalds {
934ba841078SPeter Xu 	struct page *p = NULL;
9351da177e4SLinus Torvalds 	int err;
9361da177e4SLinus Torvalds 
9373b9aadf7SAndrea Arcangeli 	int locked = 1;
9383b9aadf7SAndrea Arcangeli 	err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
9392d3a36a4SMichal Hocko 	if (err > 0) {
9401da177e4SLinus Torvalds 		err = page_to_nid(p);
9411da177e4SLinus Torvalds 		put_page(p);
9421da177e4SLinus Torvalds 	}
9433b9aadf7SAndrea Arcangeli 	if (locked)
944d8ed45c5SMichel Lespinasse 		mmap_read_unlock(mm);
9451da177e4SLinus Torvalds 	return err;
9461da177e4SLinus Torvalds }
9471da177e4SLinus Torvalds 
9481da177e4SLinus Torvalds /* Retrieve NUMA policy */
949dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
9501da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
9511da177e4SLinus Torvalds {
9528bccd85fSChristoph Lameter 	int err;
9531da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
9541da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
9553b9aadf7SAndrea Arcangeli 	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
9561da177e4SLinus Torvalds 
957754af6f5SLee Schermerhorn 	if (flags &
958754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
9591da177e4SLinus Torvalds 		return -EINVAL;
960754af6f5SLee Schermerhorn 
961754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
962754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
963754af6f5SLee Schermerhorn 			return -EINVAL;
964754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
96558568d2aSMiao Xie 		task_lock(current);
966754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
96758568d2aSMiao Xie 		task_unlock(current);
968754af6f5SLee Schermerhorn 		return 0;
969754af6f5SLee Schermerhorn 	}
970754af6f5SLee Schermerhorn 
9711da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
972bea904d5SLee Schermerhorn 		/*
973bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
974bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
975bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
976bea904d5SLee Schermerhorn 		 */
977d8ed45c5SMichel Lespinasse 		mmap_read_lock(mm);
9781da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
9791da177e4SLinus Torvalds 		if (!vma) {
980d8ed45c5SMichel Lespinasse 			mmap_read_unlock(mm);
9811da177e4SLinus Torvalds 			return -EFAULT;
9821da177e4SLinus Torvalds 		}
9831da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
9841da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
9851da177e4SLinus Torvalds 		else
9861da177e4SLinus Torvalds 			pol = vma->vm_policy;
9871da177e4SLinus Torvalds 	} else if (addr)
9881da177e4SLinus Torvalds 		return -EINVAL;
9891da177e4SLinus Torvalds 
9901da177e4SLinus Torvalds 	if (!pol)
991bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
9921da177e4SLinus Torvalds 
9931da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
9941da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
9953b9aadf7SAndrea Arcangeli 			/*
9963b9aadf7SAndrea Arcangeli 			 * Take a refcount on the mpol, lookup_node()
997c1e8d7c6SMichel Lespinasse 			 * wil drop the mmap_lock, so after calling
9983b9aadf7SAndrea Arcangeli 			 * lookup_node() only "pol" remains valid, "vma"
9993b9aadf7SAndrea Arcangeli 			 * is stale.
10003b9aadf7SAndrea Arcangeli 			 */
10013b9aadf7SAndrea Arcangeli 			pol_refcount = pol;
10023b9aadf7SAndrea Arcangeli 			vma = NULL;
10033b9aadf7SAndrea Arcangeli 			mpol_get(pol);
10043b9aadf7SAndrea Arcangeli 			err = lookup_node(mm, addr);
10051da177e4SLinus Torvalds 			if (err < 0)
10061da177e4SLinus Torvalds 				goto out;
10078bccd85fSChristoph Lameter 			*policy = err;
10081da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
100945c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
101045816682SVlastimil Babka 			*policy = next_node_in(current->il_prev, pol->v.nodes);
10111da177e4SLinus Torvalds 		} else {
10121da177e4SLinus Torvalds 			err = -EINVAL;
10131da177e4SLinus Torvalds 			goto out;
10141da177e4SLinus Torvalds 		}
1015bea904d5SLee Schermerhorn 	} else {
1016bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
1017bea904d5SLee Schermerhorn 						pol->mode;
1018d79df630SDavid Rientjes 		/*
1019d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
1020d79df630SDavid Rientjes 		 * the policy to userspace.
1021d79df630SDavid Rientjes 		 */
1022d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
1023bea904d5SLee Schermerhorn 	}
10241da177e4SLinus Torvalds 
10251da177e4SLinus Torvalds 	err = 0;
102658568d2aSMiao Xie 	if (nmask) {
1027c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
1028c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
1029c6b6ef8bSLee Schermerhorn 		} else {
103058568d2aSMiao Xie 			task_lock(current);
1031bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
103258568d2aSMiao Xie 			task_unlock(current);
103358568d2aSMiao Xie 		}
1034c6b6ef8bSLee Schermerhorn 	}
10351da177e4SLinus Torvalds 
10361da177e4SLinus Torvalds  out:
103752cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
10381da177e4SLinus Torvalds 	if (vma)
1039d8ed45c5SMichel Lespinasse 		mmap_read_unlock(mm);
10403b9aadf7SAndrea Arcangeli 	if (pol_refcount)
10413b9aadf7SAndrea Arcangeli 		mpol_put(pol_refcount);
10421da177e4SLinus Torvalds 	return err;
10431da177e4SLinus Torvalds }
10441da177e4SLinus Torvalds 
1045b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
10468bccd85fSChristoph Lameter /*
1047c8633798SNaoya Horiguchi  * page migration, thp tail pages can be passed.
10486ce3c4c0SChristoph Lameter  */
1049a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist,
1050fc301289SChristoph Lameter 				unsigned long flags)
10516ce3c4c0SChristoph Lameter {
1052c8633798SNaoya Horiguchi 	struct page *head = compound_head(page);
10536ce3c4c0SChristoph Lameter 	/*
1054fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
10556ce3c4c0SChristoph Lameter 	 */
1056c8633798SNaoya Horiguchi 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1057c8633798SNaoya Horiguchi 		if (!isolate_lru_page(head)) {
1058c8633798SNaoya Horiguchi 			list_add_tail(&head->lru, pagelist);
1059c8633798SNaoya Horiguchi 			mod_node_page_state(page_pgdat(head),
10609de4f22aSHuang Ying 				NR_ISOLATED_ANON + page_is_file_lru(head),
10616c357848SMatthew Wilcox (Oracle) 				thp_nr_pages(head));
1062a53190a4SYang Shi 		} else if (flags & MPOL_MF_STRICT) {
1063a53190a4SYang Shi 			/*
1064a53190a4SYang Shi 			 * Non-movable page may reach here.  And, there may be
1065a53190a4SYang Shi 			 * temporary off LRU pages or non-LRU movable pages.
1066a53190a4SYang Shi 			 * Treat them as unmovable pages since they can't be
1067a53190a4SYang Shi 			 * isolated, so they can't be moved at the moment.  It
1068a53190a4SYang Shi 			 * should return -EIO for this case too.
1069a53190a4SYang Shi 			 */
1070a53190a4SYang Shi 			return -EIO;
107162695a84SNick Piggin 		}
107262695a84SNick Piggin 	}
1073a53190a4SYang Shi 
1074a53190a4SYang Shi 	return 0;
10756ce3c4c0SChristoph Lameter }
10766ce3c4c0SChristoph Lameter 
10776ce3c4c0SChristoph Lameter /*
10787e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
10797e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
10807e2ab150SChristoph Lameter  */
1081dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1082dbcb0f19SAdrian Bunk 			   int flags)
10837e2ab150SChristoph Lameter {
10847e2ab150SChristoph Lameter 	nodemask_t nmask;
10857e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
10867e2ab150SChristoph Lameter 	int err = 0;
1087a0976311SJoonsoo Kim 	struct migration_target_control mtc = {
1088a0976311SJoonsoo Kim 		.nid = dest,
1089a0976311SJoonsoo Kim 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1090a0976311SJoonsoo Kim 	};
10917e2ab150SChristoph Lameter 
10927e2ab150SChristoph Lameter 	nodes_clear(nmask);
10937e2ab150SChristoph Lameter 	node_set(source, nmask);
10947e2ab150SChristoph Lameter 
109508270807SMinchan Kim 	/*
109608270807SMinchan Kim 	 * This does not "check" the range but isolates all pages that
109708270807SMinchan Kim 	 * need migration.  Between passing in the full user address
109808270807SMinchan Kim 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
109908270807SMinchan Kim 	 */
110008270807SMinchan Kim 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
110198094945SNaoya Horiguchi 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
11027e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
11037e2ab150SChristoph Lameter 
1104cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
1105a0976311SJoonsoo Kim 		err = migrate_pages(&pagelist, alloc_migration_target, NULL,
1106a0976311SJoonsoo Kim 				(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL);
1107cf608ac1SMinchan Kim 		if (err)
1108e2d8cf40SNaoya Horiguchi 			putback_movable_pages(&pagelist);
1109cf608ac1SMinchan Kim 	}
111095a402c3SChristoph Lameter 
11117e2ab150SChristoph Lameter 	return err;
11127e2ab150SChristoph Lameter }
11137e2ab150SChristoph Lameter 
11147e2ab150SChristoph Lameter /*
11157e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
11167e2ab150SChristoph Lameter  * layout as much as possible.
111739743889SChristoph Lameter  *
111839743889SChristoph Lameter  * Returns the number of page that could not be moved.
111939743889SChristoph Lameter  */
11200ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
11210ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
112239743889SChristoph Lameter {
11237e2ab150SChristoph Lameter 	int busy = 0;
1124f555befdSJan Stancek 	int err = 0;
11257e2ab150SChristoph Lameter 	nodemask_t tmp;
112639743889SChristoph Lameter 
1127236c32ebSYang Shi 	migrate_prep();
11280aedadf9SChristoph Lameter 
1129d8ed45c5SMichel Lespinasse 	mmap_read_lock(mm);
1130d4984711SChristoph Lameter 
11317e2ab150SChristoph Lameter 	/*
11327e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
11337e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
11347e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
11357e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
11367e2ab150SChristoph Lameter 	 *
11377e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
11387e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
11397e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
11407e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
11417e2ab150SChristoph Lameter 	 *
11427e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
11437e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
11447e2ab150SChristoph Lameter 	 * (nothing left to migrate).
11457e2ab150SChristoph Lameter 	 *
11467e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
11477e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
11487e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
11497e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
11507e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
11517e2ab150SChristoph Lameter 	 *
11527e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
11537e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
11547e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
11557e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1156ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
11577e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
11587e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
11597e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
11607e2ab150SChristoph Lameter 	 */
11617e2ab150SChristoph Lameter 
11620ce72d4fSAndrew Morton 	tmp = *from;
11637e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
11647e2ab150SChristoph Lameter 		int s,d;
1165b76ac7e7SJianguo Wu 		int source = NUMA_NO_NODE;
11667e2ab150SChristoph Lameter 		int dest = 0;
11677e2ab150SChristoph Lameter 
11687e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
11694a5b18ccSLarry Woodman 
11704a5b18ccSLarry Woodman 			/*
11714a5b18ccSLarry Woodman 			 * do_migrate_pages() tries to maintain the relative
11724a5b18ccSLarry Woodman 			 * node relationship of the pages established between
11734a5b18ccSLarry Woodman 			 * threads and memory areas.
11744a5b18ccSLarry Woodman                          *
11754a5b18ccSLarry Woodman 			 * However if the number of source nodes is not equal to
11764a5b18ccSLarry Woodman 			 * the number of destination nodes we can not preserve
11774a5b18ccSLarry Woodman 			 * this node relative relationship.  In that case, skip
11784a5b18ccSLarry Woodman 			 * copying memory from a node that is in the destination
11794a5b18ccSLarry Woodman 			 * mask.
11804a5b18ccSLarry Woodman 			 *
11814a5b18ccSLarry Woodman 			 * Example: [2,3,4] -> [3,4,5] moves everything.
11824a5b18ccSLarry Woodman 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
11834a5b18ccSLarry Woodman 			 */
11844a5b18ccSLarry Woodman 
11850ce72d4fSAndrew Morton 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
11860ce72d4fSAndrew Morton 						(node_isset(s, *to)))
11874a5b18ccSLarry Woodman 				continue;
11884a5b18ccSLarry Woodman 
11890ce72d4fSAndrew Morton 			d = node_remap(s, *from, *to);
11907e2ab150SChristoph Lameter 			if (s == d)
11917e2ab150SChristoph Lameter 				continue;
11927e2ab150SChristoph Lameter 
11937e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
11947e2ab150SChristoph Lameter 			dest = d;
11957e2ab150SChristoph Lameter 
11967e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
11977e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
11987e2ab150SChristoph Lameter 				break;
11997e2ab150SChristoph Lameter 		}
1200b76ac7e7SJianguo Wu 		if (source == NUMA_NO_NODE)
12017e2ab150SChristoph Lameter 			break;
12027e2ab150SChristoph Lameter 
12037e2ab150SChristoph Lameter 		node_clear(source, tmp);
12047e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
12057e2ab150SChristoph Lameter 		if (err > 0)
12067e2ab150SChristoph Lameter 			busy += err;
12077e2ab150SChristoph Lameter 		if (err < 0)
12087e2ab150SChristoph Lameter 			break;
120939743889SChristoph Lameter 	}
1210d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
12117e2ab150SChristoph Lameter 	if (err < 0)
12127e2ab150SChristoph Lameter 		return err;
12137e2ab150SChristoph Lameter 	return busy;
1214b20a3503SChristoph Lameter 
121539743889SChristoph Lameter }
121639743889SChristoph Lameter 
12173ad33b24SLee Schermerhorn /*
12183ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
1219d05f0cdcSHugh Dickins  * Start by assuming the page is mapped by the same vma as contains @start.
12203ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
12213ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
12223ad33b24SLee Schermerhorn  * is in virtual address order.
12233ad33b24SLee Schermerhorn  */
1224666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start)
122595a402c3SChristoph Lameter {
1226d05f0cdcSHugh Dickins 	struct vm_area_struct *vma;
12273f649ab7SKees Cook 	unsigned long address;
122895a402c3SChristoph Lameter 
1229d05f0cdcSHugh Dickins 	vma = find_vma(current->mm, start);
12303ad33b24SLee Schermerhorn 	while (vma) {
12313ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
12323ad33b24SLee Schermerhorn 		if (address != -EFAULT)
12333ad33b24SLee Schermerhorn 			break;
12343ad33b24SLee Schermerhorn 		vma = vma->vm_next;
12353ad33b24SLee Schermerhorn 	}
12363ad33b24SLee Schermerhorn 
123711c731e8SWanpeng Li 	if (PageHuge(page)) {
1238389c8178SMichal Hocko 		return alloc_huge_page_vma(page_hstate(compound_head(page)),
1239389c8178SMichal Hocko 				vma, address);
124094723aafSMichal Hocko 	} else if (PageTransHuge(page)) {
1241c8633798SNaoya Horiguchi 		struct page *thp;
1242c8633798SNaoya Horiguchi 
124319deb769SDavid Rientjes 		thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
124419deb769SDavid Rientjes 					 HPAGE_PMD_ORDER);
1245c8633798SNaoya Horiguchi 		if (!thp)
1246c8633798SNaoya Horiguchi 			return NULL;
1247c8633798SNaoya Horiguchi 		prep_transhuge_page(thp);
1248c8633798SNaoya Horiguchi 		return thp;
124911c731e8SWanpeng Li 	}
125011c731e8SWanpeng Li 	/*
125111c731e8SWanpeng Li 	 * if !vma, alloc_page_vma() will use task or system default policy
125211c731e8SWanpeng Li 	 */
12530f556856SMichal Hocko 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
12540f556856SMichal Hocko 			vma, address);
125595a402c3SChristoph Lameter }
1256b20a3503SChristoph Lameter #else
1257b20a3503SChristoph Lameter 
1258a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist,
1259b20a3503SChristoph Lameter 				unsigned long flags)
1260b20a3503SChristoph Lameter {
1261a53190a4SYang Shi 	return -EIO;
1262b20a3503SChristoph Lameter }
1263b20a3503SChristoph Lameter 
12640ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
12650ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
1266b20a3503SChristoph Lameter {
1267b20a3503SChristoph Lameter 	return -ENOSYS;
1268b20a3503SChristoph Lameter }
126995a402c3SChristoph Lameter 
1270666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start)
127195a402c3SChristoph Lameter {
127295a402c3SChristoph Lameter 	return NULL;
127395a402c3SChristoph Lameter }
1274b20a3503SChristoph Lameter #endif
1275b20a3503SChristoph Lameter 
1276dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1277028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1278028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
12796ce3c4c0SChristoph Lameter {
12806ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
12816ce3c4c0SChristoph Lameter 	struct mempolicy *new;
12826ce3c4c0SChristoph Lameter 	unsigned long end;
12836ce3c4c0SChristoph Lameter 	int err;
1284d8835445SYang Shi 	int ret;
12856ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
12866ce3c4c0SChristoph Lameter 
1287b24f53a0SLee Schermerhorn 	if (flags & ~(unsigned long)MPOL_MF_VALID)
12886ce3c4c0SChristoph Lameter 		return -EINVAL;
128974c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
12906ce3c4c0SChristoph Lameter 		return -EPERM;
12916ce3c4c0SChristoph Lameter 
12926ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
12936ce3c4c0SChristoph Lameter 		return -EINVAL;
12946ce3c4c0SChristoph Lameter 
12956ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
12966ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
12976ce3c4c0SChristoph Lameter 
12986ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
12996ce3c4c0SChristoph Lameter 	end = start + len;
13006ce3c4c0SChristoph Lameter 
13016ce3c4c0SChristoph Lameter 	if (end < start)
13026ce3c4c0SChristoph Lameter 		return -EINVAL;
13036ce3c4c0SChristoph Lameter 	if (end == start)
13046ce3c4c0SChristoph Lameter 		return 0;
13056ce3c4c0SChristoph Lameter 
1306028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
13076ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
13086ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
13096ce3c4c0SChristoph Lameter 
1310b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY)
1311b24f53a0SLee Schermerhorn 		new->flags |= MPOL_F_MOF;
1312b24f53a0SLee Schermerhorn 
13136ce3c4c0SChristoph Lameter 	/*
13146ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
13156ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
13166ce3c4c0SChristoph Lameter 	 */
13176ce3c4c0SChristoph Lameter 	if (!new)
13186ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
13196ce3c4c0SChristoph Lameter 
1320028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1321028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
132200ef2d2fSDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
13236ce3c4c0SChristoph Lameter 
13240aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
13250aedadf9SChristoph Lameter 
1326236c32ebSYang Shi 		migrate_prep();
13270aedadf9SChristoph Lameter 	}
13284bfc4495SKAMEZAWA Hiroyuki 	{
13294bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
13304bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
1331d8ed45c5SMichel Lespinasse 			mmap_write_lock(mm);
13324bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
13334bfc4495SKAMEZAWA Hiroyuki 			if (err)
1334d8ed45c5SMichel Lespinasse 				mmap_write_unlock(mm);
13354bfc4495SKAMEZAWA Hiroyuki 		} else
13364bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
13374bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
13384bfc4495SKAMEZAWA Hiroyuki 	}
1339b05ca738SKOSAKI Motohiro 	if (err)
1340b05ca738SKOSAKI Motohiro 		goto mpol_out;
1341b05ca738SKOSAKI Motohiro 
1342d8835445SYang Shi 	ret = queue_pages_range(mm, start, end, nmask,
13436ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
1344d8835445SYang Shi 
1345d8835445SYang Shi 	if (ret < 0) {
1346a85dfc30SYang Shi 		err = ret;
1347d8835445SYang Shi 		goto up_out;
1348d8835445SYang Shi 	}
1349d8835445SYang Shi 
13509d8cebd4SKOSAKI Motohiro 	err = mbind_range(mm, start, end, new);
13517e2ab150SChristoph Lameter 
1352b24f53a0SLee Schermerhorn 	if (!err) {
1353b24f53a0SLee Schermerhorn 		int nr_failed = 0;
1354b24f53a0SLee Schermerhorn 
1355cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
1356b24f53a0SLee Schermerhorn 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1357d05f0cdcSHugh Dickins 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1358d05f0cdcSHugh Dickins 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1359cf608ac1SMinchan Kim 			if (nr_failed)
136074060e4dSNaoya Horiguchi 				putback_movable_pages(&pagelist);
1361cf608ac1SMinchan Kim 		}
13626ce3c4c0SChristoph Lameter 
1363d8835445SYang Shi 		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
13646ce3c4c0SChristoph Lameter 			err = -EIO;
1365a85dfc30SYang Shi 	} else {
1366d8835445SYang Shi up_out:
1367a85dfc30SYang Shi 		if (!list_empty(&pagelist))
1368a85dfc30SYang Shi 			putback_movable_pages(&pagelist);
1369a85dfc30SYang Shi 	}
1370a85dfc30SYang Shi 
1371d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
1372b05ca738SKOSAKI Motohiro mpol_out:
1373f0be3d32SLee Schermerhorn 	mpol_put(new);
13746ce3c4c0SChristoph Lameter 	return err;
13756ce3c4c0SChristoph Lameter }
13766ce3c4c0SChristoph Lameter 
137739743889SChristoph Lameter /*
13788bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
13798bccd85fSChristoph Lameter  */
13808bccd85fSChristoph Lameter 
13818bccd85fSChristoph Lameter /* Copy a node mask from user space. */
138239743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
13838bccd85fSChristoph Lameter 		     unsigned long maxnode)
13848bccd85fSChristoph Lameter {
13858bccd85fSChristoph Lameter 	unsigned long k;
138656521e7aSYisheng Xie 	unsigned long t;
13878bccd85fSChristoph Lameter 	unsigned long nlongs;
13888bccd85fSChristoph Lameter 	unsigned long endmask;
13898bccd85fSChristoph Lameter 
13908bccd85fSChristoph Lameter 	--maxnode;
13918bccd85fSChristoph Lameter 	nodes_clear(*nodes);
13928bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
13938bccd85fSChristoph Lameter 		return 0;
1394a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1395636f13c1SChris Wright 		return -EINVAL;
13968bccd85fSChristoph Lameter 
13978bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
13988bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
13998bccd85fSChristoph Lameter 		endmask = ~0UL;
14008bccd85fSChristoph Lameter 	else
14018bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
14028bccd85fSChristoph Lameter 
140356521e7aSYisheng Xie 	/*
140456521e7aSYisheng Xie 	 * When the user specified more nodes than supported just check
140556521e7aSYisheng Xie 	 * if the non supported part is all zero.
140656521e7aSYisheng Xie 	 *
140756521e7aSYisheng Xie 	 * If maxnode have more longs than MAX_NUMNODES, check
140856521e7aSYisheng Xie 	 * the bits in that area first. And then go through to
140956521e7aSYisheng Xie 	 * check the rest bits which equal or bigger than MAX_NUMNODES.
141056521e7aSYisheng Xie 	 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
141156521e7aSYisheng Xie 	 */
14128bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
14138bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
14148bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
14158bccd85fSChristoph Lameter 				return -EFAULT;
14168bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
14178bccd85fSChristoph Lameter 				if (t & endmask)
14188bccd85fSChristoph Lameter 					return -EINVAL;
14198bccd85fSChristoph Lameter 			} else if (t)
14208bccd85fSChristoph Lameter 				return -EINVAL;
14218bccd85fSChristoph Lameter 		}
14228bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
14238bccd85fSChristoph Lameter 		endmask = ~0UL;
14248bccd85fSChristoph Lameter 	}
14258bccd85fSChristoph Lameter 
142656521e7aSYisheng Xie 	if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
142756521e7aSYisheng Xie 		unsigned long valid_mask = endmask;
142856521e7aSYisheng Xie 
142956521e7aSYisheng Xie 		valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
143056521e7aSYisheng Xie 		if (get_user(t, nmask + nlongs - 1))
143156521e7aSYisheng Xie 			return -EFAULT;
143256521e7aSYisheng Xie 		if (t & valid_mask)
143356521e7aSYisheng Xie 			return -EINVAL;
143456521e7aSYisheng Xie 	}
143556521e7aSYisheng Xie 
14368bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
14378bccd85fSChristoph Lameter 		return -EFAULT;
14388bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
14398bccd85fSChristoph Lameter 	return 0;
14408bccd85fSChristoph Lameter }
14418bccd85fSChristoph Lameter 
14428bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
14438bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
14448bccd85fSChristoph Lameter 			      nodemask_t *nodes)
14458bccd85fSChristoph Lameter {
14468bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1447050c17f2SRalph Campbell 	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
14488bccd85fSChristoph Lameter 
14498bccd85fSChristoph Lameter 	if (copy > nbytes) {
14508bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
14518bccd85fSChristoph Lameter 			return -EINVAL;
14528bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
14538bccd85fSChristoph Lameter 			return -EFAULT;
14548bccd85fSChristoph Lameter 		copy = nbytes;
14558bccd85fSChristoph Lameter 	}
14568bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
14578bccd85fSChristoph Lameter }
14588bccd85fSChristoph Lameter 
1459e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len,
1460e7dc9ad6SDominik Brodowski 			 unsigned long mode, const unsigned long __user *nmask,
1461e7dc9ad6SDominik Brodowski 			 unsigned long maxnode, unsigned int flags)
14628bccd85fSChristoph Lameter {
14638bccd85fSChristoph Lameter 	nodemask_t nodes;
14648bccd85fSChristoph Lameter 	int err;
1465028fec41SDavid Rientjes 	unsigned short mode_flags;
14668bccd85fSChristoph Lameter 
1467057d3389SAndrey Konovalov 	start = untagged_addr(start);
1468028fec41SDavid Rientjes 	mode_flags = mode & MPOL_MODE_FLAGS;
1469028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1470a3b51e01SDavid Rientjes 	if (mode >= MPOL_MAX)
1471a3b51e01SDavid Rientjes 		return -EINVAL;
14724c50bc01SDavid Rientjes 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
14734c50bc01SDavid Rientjes 	    (mode_flags & MPOL_F_RELATIVE_NODES))
14744c50bc01SDavid Rientjes 		return -EINVAL;
14758bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
14768bccd85fSChristoph Lameter 	if (err)
14778bccd85fSChristoph Lameter 		return err;
1478028fec41SDavid Rientjes 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
14798bccd85fSChristoph Lameter }
14808bccd85fSChristoph Lameter 
1481e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1482e7dc9ad6SDominik Brodowski 		unsigned long, mode, const unsigned long __user *, nmask,
1483e7dc9ad6SDominik Brodowski 		unsigned long, maxnode, unsigned int, flags)
1484e7dc9ad6SDominik Brodowski {
1485e7dc9ad6SDominik Brodowski 	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1486e7dc9ad6SDominik Brodowski }
1487e7dc9ad6SDominik Brodowski 
14888bccd85fSChristoph Lameter /* Set the process memory policy */
1489af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1490af03c4acSDominik Brodowski 				 unsigned long maxnode)
14918bccd85fSChristoph Lameter {
14928bccd85fSChristoph Lameter 	int err;
14938bccd85fSChristoph Lameter 	nodemask_t nodes;
1494028fec41SDavid Rientjes 	unsigned short flags;
14958bccd85fSChristoph Lameter 
1496028fec41SDavid Rientjes 	flags = mode & MPOL_MODE_FLAGS;
1497028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1498028fec41SDavid Rientjes 	if ((unsigned int)mode >= MPOL_MAX)
14998bccd85fSChristoph Lameter 		return -EINVAL;
15004c50bc01SDavid Rientjes 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
15014c50bc01SDavid Rientjes 		return -EINVAL;
15028bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
15038bccd85fSChristoph Lameter 	if (err)
15048bccd85fSChristoph Lameter 		return err;
1505028fec41SDavid Rientjes 	return do_set_mempolicy(mode, flags, &nodes);
15068bccd85fSChristoph Lameter }
15078bccd85fSChristoph Lameter 
1508af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1509af03c4acSDominik Brodowski 		unsigned long, maxnode)
1510af03c4acSDominik Brodowski {
1511af03c4acSDominik Brodowski 	return kernel_set_mempolicy(mode, nmask, maxnode);
1512af03c4acSDominik Brodowski }
1513af03c4acSDominik Brodowski 
1514b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1515b6e9b0baSDominik Brodowski 				const unsigned long __user *old_nodes,
1516b6e9b0baSDominik Brodowski 				const unsigned long __user *new_nodes)
151739743889SChristoph Lameter {
1518596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
151939743889SChristoph Lameter 	struct task_struct *task;
152039743889SChristoph Lameter 	nodemask_t task_nodes;
152139743889SChristoph Lameter 	int err;
1522596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1523596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1524596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
152539743889SChristoph Lameter 
1526596d7cfaSKOSAKI Motohiro 	if (!scratch)
1527596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
152839743889SChristoph Lameter 
1529596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1530596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1531596d7cfaSKOSAKI Motohiro 
1532596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
153339743889SChristoph Lameter 	if (err)
1534596d7cfaSKOSAKI Motohiro 		goto out;
1535596d7cfaSKOSAKI Motohiro 
1536596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1537596d7cfaSKOSAKI Motohiro 	if (err)
1538596d7cfaSKOSAKI Motohiro 		goto out;
153939743889SChristoph Lameter 
154039743889SChristoph Lameter 	/* Find the mm_struct */
154155cfaa3cSZeng Zhaoming 	rcu_read_lock();
1542228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
154339743889SChristoph Lameter 	if (!task) {
154455cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1545596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1546596d7cfaSKOSAKI Motohiro 		goto out;
154739743889SChristoph Lameter 	}
15483268c63eSChristoph Lameter 	get_task_struct(task);
154939743889SChristoph Lameter 
1550596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
155139743889SChristoph Lameter 
155239743889SChristoph Lameter 	/*
155331367466SOtto Ebeling 	 * Check if this process has the right to modify the specified process.
155431367466SOtto Ebeling 	 * Use the regular "ptrace_may_access()" checks.
155539743889SChristoph Lameter 	 */
155631367466SOtto Ebeling 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1557c69e8d9cSDavid Howells 		rcu_read_unlock();
155839743889SChristoph Lameter 		err = -EPERM;
15593268c63eSChristoph Lameter 		goto out_put;
156039743889SChristoph Lameter 	}
1561c69e8d9cSDavid Howells 	rcu_read_unlock();
156239743889SChristoph Lameter 
156339743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
156439743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1565596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
156639743889SChristoph Lameter 		err = -EPERM;
15673268c63eSChristoph Lameter 		goto out_put;
156839743889SChristoph Lameter 	}
156939743889SChristoph Lameter 
15700486a38bSYisheng Xie 	task_nodes = cpuset_mems_allowed(current);
15710486a38bSYisheng Xie 	nodes_and(*new, *new, task_nodes);
15720486a38bSYisheng Xie 	if (nodes_empty(*new))
15733268c63eSChristoph Lameter 		goto out_put;
15740486a38bSYisheng Xie 
157586c3a764SDavid Quigley 	err = security_task_movememory(task);
157686c3a764SDavid Quigley 	if (err)
15773268c63eSChristoph Lameter 		goto out_put;
157886c3a764SDavid Quigley 
15793268c63eSChristoph Lameter 	mm = get_task_mm(task);
15803268c63eSChristoph Lameter 	put_task_struct(task);
1581f2a9ef88SSasha Levin 
1582f2a9ef88SSasha Levin 	if (!mm) {
1583f2a9ef88SSasha Levin 		err = -EINVAL;
1584f2a9ef88SSasha Levin 		goto out;
1585f2a9ef88SSasha Levin 	}
1586f2a9ef88SSasha Levin 
1587596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
158874c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
15893268c63eSChristoph Lameter 
159039743889SChristoph Lameter 	mmput(mm);
15913268c63eSChristoph Lameter out:
1592596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1593596d7cfaSKOSAKI Motohiro 
159439743889SChristoph Lameter 	return err;
15953268c63eSChristoph Lameter 
15963268c63eSChristoph Lameter out_put:
15973268c63eSChristoph Lameter 	put_task_struct(task);
15983268c63eSChristoph Lameter 	goto out;
15993268c63eSChristoph Lameter 
160039743889SChristoph Lameter }
160139743889SChristoph Lameter 
1602b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1603b6e9b0baSDominik Brodowski 		const unsigned long __user *, old_nodes,
1604b6e9b0baSDominik Brodowski 		const unsigned long __user *, new_nodes)
1605b6e9b0baSDominik Brodowski {
1606b6e9b0baSDominik Brodowski 	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1607b6e9b0baSDominik Brodowski }
1608b6e9b0baSDominik Brodowski 
160939743889SChristoph Lameter 
16108bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1611af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy,
1612af03c4acSDominik Brodowski 				unsigned long __user *nmask,
1613af03c4acSDominik Brodowski 				unsigned long maxnode,
1614af03c4acSDominik Brodowski 				unsigned long addr,
1615af03c4acSDominik Brodowski 				unsigned long flags)
16168bccd85fSChristoph Lameter {
1617dbcb0f19SAdrian Bunk 	int err;
16183f649ab7SKees Cook 	int pval;
16198bccd85fSChristoph Lameter 	nodemask_t nodes;
16208bccd85fSChristoph Lameter 
1621050c17f2SRalph Campbell 	if (nmask != NULL && maxnode < nr_node_ids)
16228bccd85fSChristoph Lameter 		return -EINVAL;
16238bccd85fSChristoph Lameter 
16244605f057SWenchao Hao 	addr = untagged_addr(addr);
16254605f057SWenchao Hao 
16268bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
16278bccd85fSChristoph Lameter 
16288bccd85fSChristoph Lameter 	if (err)
16298bccd85fSChristoph Lameter 		return err;
16308bccd85fSChristoph Lameter 
16318bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
16328bccd85fSChristoph Lameter 		return -EFAULT;
16338bccd85fSChristoph Lameter 
16348bccd85fSChristoph Lameter 	if (nmask)
16358bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
16368bccd85fSChristoph Lameter 
16378bccd85fSChristoph Lameter 	return err;
16388bccd85fSChristoph Lameter }
16398bccd85fSChristoph Lameter 
1640af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1641af03c4acSDominik Brodowski 		unsigned long __user *, nmask, unsigned long, maxnode,
1642af03c4acSDominik Brodowski 		unsigned long, addr, unsigned long, flags)
1643af03c4acSDominik Brodowski {
1644af03c4acSDominik Brodowski 	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1645af03c4acSDominik Brodowski }
1646af03c4acSDominik Brodowski 
16471da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
16481da177e4SLinus Torvalds 
1649c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1650c93e0f6cSHeiko Carstens 		       compat_ulong_t __user *, nmask,
1651c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode,
1652c93e0f6cSHeiko Carstens 		       compat_ulong_t, addr, compat_ulong_t, flags)
16531da177e4SLinus Torvalds {
16541da177e4SLinus Torvalds 	long err;
16551da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
16561da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
16571da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
16581da177e4SLinus Torvalds 
1659050c17f2SRalph Campbell 	nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
16601da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
16611da177e4SLinus Torvalds 
16621da177e4SLinus Torvalds 	if (nmask)
16631da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
16641da177e4SLinus Torvalds 
1665af03c4acSDominik Brodowski 	err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
16661da177e4SLinus Torvalds 
16671da177e4SLinus Torvalds 	if (!err && nmask) {
16682bbff6c7SKAMEZAWA Hiroyuki 		unsigned long copy_size;
16692bbff6c7SKAMEZAWA Hiroyuki 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
16702bbff6c7SKAMEZAWA Hiroyuki 		err = copy_from_user(bm, nm, copy_size);
16711da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
16721da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
16731da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
16741da177e4SLinus Torvalds 	}
16751da177e4SLinus Torvalds 
16761da177e4SLinus Torvalds 	return err;
16771da177e4SLinus Torvalds }
16781da177e4SLinus Torvalds 
1679c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1680c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode)
16811da177e4SLinus Torvalds {
16821da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
16831da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
16841da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
16851da177e4SLinus Torvalds 
16861da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
16871da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
16881da177e4SLinus Torvalds 
16891da177e4SLinus Torvalds 	if (nmask) {
1690cf01fb99SChris Salls 		if (compat_get_bitmap(bm, nmask, nr_bits))
16911da177e4SLinus Torvalds 			return -EFAULT;
1692cf01fb99SChris Salls 		nm = compat_alloc_user_space(alloc_size);
1693cf01fb99SChris Salls 		if (copy_to_user(nm, bm, alloc_size))
1694cf01fb99SChris Salls 			return -EFAULT;
1695cf01fb99SChris Salls 	}
16961da177e4SLinus Torvalds 
1697af03c4acSDominik Brodowski 	return kernel_set_mempolicy(mode, nm, nr_bits+1);
16981da177e4SLinus Torvalds }
16991da177e4SLinus Torvalds 
1700c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1701c93e0f6cSHeiko Carstens 		       compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1702c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode, compat_ulong_t, flags)
17031da177e4SLinus Torvalds {
17041da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
17051da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1706dfcd3c0dSAndi Kleen 	nodemask_t bm;
17071da177e4SLinus Torvalds 
17081da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
17091da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
17101da177e4SLinus Torvalds 
17111da177e4SLinus Torvalds 	if (nmask) {
1712cf01fb99SChris Salls 		if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
17131da177e4SLinus Torvalds 			return -EFAULT;
1714cf01fb99SChris Salls 		nm = compat_alloc_user_space(alloc_size);
1715cf01fb99SChris Salls 		if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1716cf01fb99SChris Salls 			return -EFAULT;
1717cf01fb99SChris Salls 	}
17181da177e4SLinus Torvalds 
1719e7dc9ad6SDominik Brodowski 	return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
17201da177e4SLinus Torvalds }
17211da177e4SLinus Torvalds 
1722b6e9b0baSDominik Brodowski COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
1723b6e9b0baSDominik Brodowski 		       compat_ulong_t, maxnode,
1724b6e9b0baSDominik Brodowski 		       const compat_ulong_t __user *, old_nodes,
1725b6e9b0baSDominik Brodowski 		       const compat_ulong_t __user *, new_nodes)
1726b6e9b0baSDominik Brodowski {
1727b6e9b0baSDominik Brodowski 	unsigned long __user *old = NULL;
1728b6e9b0baSDominik Brodowski 	unsigned long __user *new = NULL;
1729b6e9b0baSDominik Brodowski 	nodemask_t tmp_mask;
1730b6e9b0baSDominik Brodowski 	unsigned long nr_bits;
1731b6e9b0baSDominik Brodowski 	unsigned long size;
1732b6e9b0baSDominik Brodowski 
1733b6e9b0baSDominik Brodowski 	nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
1734b6e9b0baSDominik Brodowski 	size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1735b6e9b0baSDominik Brodowski 	if (old_nodes) {
1736b6e9b0baSDominik Brodowski 		if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1737b6e9b0baSDominik Brodowski 			return -EFAULT;
1738b6e9b0baSDominik Brodowski 		old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1739b6e9b0baSDominik Brodowski 		if (new_nodes)
1740b6e9b0baSDominik Brodowski 			new = old + size / sizeof(unsigned long);
1741b6e9b0baSDominik Brodowski 		if (copy_to_user(old, nodes_addr(tmp_mask), size))
1742b6e9b0baSDominik Brodowski 			return -EFAULT;
1743b6e9b0baSDominik Brodowski 	}
1744b6e9b0baSDominik Brodowski 	if (new_nodes) {
1745b6e9b0baSDominik Brodowski 		if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1746b6e9b0baSDominik Brodowski 			return -EFAULT;
1747b6e9b0baSDominik Brodowski 		if (new == NULL)
1748b6e9b0baSDominik Brodowski 			new = compat_alloc_user_space(size);
1749b6e9b0baSDominik Brodowski 		if (copy_to_user(new, nodes_addr(tmp_mask), size))
1750b6e9b0baSDominik Brodowski 			return -EFAULT;
1751b6e9b0baSDominik Brodowski 	}
1752b6e9b0baSDominik Brodowski 	return kernel_migrate_pages(pid, nr_bits + 1, old, new);
1753b6e9b0baSDominik Brodowski }
1754b6e9b0baSDominik Brodowski 
1755b6e9b0baSDominik Brodowski #endif /* CONFIG_COMPAT */
17561da177e4SLinus Torvalds 
175720ca87f2SLi Xinhai bool vma_migratable(struct vm_area_struct *vma)
175820ca87f2SLi Xinhai {
175920ca87f2SLi Xinhai 	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
176020ca87f2SLi Xinhai 		return false;
176120ca87f2SLi Xinhai 
176220ca87f2SLi Xinhai 	/*
176320ca87f2SLi Xinhai 	 * DAX device mappings require predictable access latency, so avoid
176420ca87f2SLi Xinhai 	 * incurring periodic faults.
176520ca87f2SLi Xinhai 	 */
176620ca87f2SLi Xinhai 	if (vma_is_dax(vma))
176720ca87f2SLi Xinhai 		return false;
176820ca87f2SLi Xinhai 
176920ca87f2SLi Xinhai 	if (is_vm_hugetlb_page(vma) &&
177020ca87f2SLi Xinhai 		!hugepage_migration_supported(hstate_vma(vma)))
177120ca87f2SLi Xinhai 		return false;
177220ca87f2SLi Xinhai 
177320ca87f2SLi Xinhai 	/*
177420ca87f2SLi Xinhai 	 * Migration allocates pages in the highest zone. If we cannot
177520ca87f2SLi Xinhai 	 * do so then migration (at least from node to node) is not
177620ca87f2SLi Xinhai 	 * possible.
177720ca87f2SLi Xinhai 	 */
177820ca87f2SLi Xinhai 	if (vma->vm_file &&
177920ca87f2SLi Xinhai 		gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
178020ca87f2SLi Xinhai 			< policy_zone)
178120ca87f2SLi Xinhai 		return false;
178220ca87f2SLi Xinhai 	return true;
178320ca87f2SLi Xinhai }
178420ca87f2SLi Xinhai 
178574d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
178674d2c3a0SOleg Nesterov 						unsigned long addr)
17871da177e4SLinus Torvalds {
17888d90274bSOleg Nesterov 	struct mempolicy *pol = NULL;
17891da177e4SLinus Torvalds 
17901da177e4SLinus Torvalds 	if (vma) {
1791480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
17928d90274bSOleg Nesterov 			pol = vma->vm_ops->get_policy(vma, addr);
179300442ad0SMel Gorman 		} else if (vma->vm_policy) {
17941da177e4SLinus Torvalds 			pol = vma->vm_policy;
179500442ad0SMel Gorman 
179600442ad0SMel Gorman 			/*
179700442ad0SMel Gorman 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
179800442ad0SMel Gorman 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
179900442ad0SMel Gorman 			 * count on these policies which will be dropped by
180000442ad0SMel Gorman 			 * mpol_cond_put() later
180100442ad0SMel Gorman 			 */
180200442ad0SMel Gorman 			if (mpol_needs_cond_ref(pol))
180300442ad0SMel Gorman 				mpol_get(pol);
180400442ad0SMel Gorman 		}
18051da177e4SLinus Torvalds 	}
1806f15ca78eSOleg Nesterov 
180774d2c3a0SOleg Nesterov 	return pol;
180874d2c3a0SOleg Nesterov }
180974d2c3a0SOleg Nesterov 
181074d2c3a0SOleg Nesterov /*
1811dd6eecb9SOleg Nesterov  * get_vma_policy(@vma, @addr)
181274d2c3a0SOleg Nesterov  * @vma: virtual memory area whose policy is sought
181374d2c3a0SOleg Nesterov  * @addr: address in @vma for shared policy lookup
181474d2c3a0SOleg Nesterov  *
181574d2c3a0SOleg Nesterov  * Returns effective policy for a VMA at specified address.
1816dd6eecb9SOleg Nesterov  * Falls back to current->mempolicy or system default policy, as necessary.
181774d2c3a0SOleg Nesterov  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
181874d2c3a0SOleg Nesterov  * count--added by the get_policy() vm_op, as appropriate--to protect against
181974d2c3a0SOleg Nesterov  * freeing by another task.  It is the caller's responsibility to free the
182074d2c3a0SOleg Nesterov  * extra reference for shared policies.
182174d2c3a0SOleg Nesterov  */
1822ac79f78dSDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1823dd6eecb9SOleg Nesterov 						unsigned long addr)
182474d2c3a0SOleg Nesterov {
182574d2c3a0SOleg Nesterov 	struct mempolicy *pol = __get_vma_policy(vma, addr);
182674d2c3a0SOleg Nesterov 
18278d90274bSOleg Nesterov 	if (!pol)
1828dd6eecb9SOleg Nesterov 		pol = get_task_policy(current);
18298d90274bSOleg Nesterov 
18301da177e4SLinus Torvalds 	return pol;
18311da177e4SLinus Torvalds }
18321da177e4SLinus Torvalds 
18336b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma)
1834fc314724SMel Gorman {
18356b6482bbSOleg Nesterov 	struct mempolicy *pol;
1836f15ca78eSOleg Nesterov 
1837fc314724SMel Gorman 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1838fc314724SMel Gorman 		bool ret = false;
1839fc314724SMel Gorman 
1840fc314724SMel Gorman 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1841fc314724SMel Gorman 		if (pol && (pol->flags & MPOL_F_MOF))
1842fc314724SMel Gorman 			ret = true;
1843fc314724SMel Gorman 		mpol_cond_put(pol);
1844fc314724SMel Gorman 
1845fc314724SMel Gorman 		return ret;
18468d90274bSOleg Nesterov 	}
18478d90274bSOleg Nesterov 
1848fc314724SMel Gorman 	pol = vma->vm_policy;
18498d90274bSOleg Nesterov 	if (!pol)
18506b6482bbSOleg Nesterov 		pol = get_task_policy(current);
1851fc314724SMel Gorman 
1852fc314724SMel Gorman 	return pol->flags & MPOL_F_MOF;
1853fc314724SMel Gorman }
1854fc314724SMel Gorman 
1855d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1856d3eb1570SLai Jiangshan {
1857d3eb1570SLai Jiangshan 	enum zone_type dynamic_policy_zone = policy_zone;
1858d3eb1570SLai Jiangshan 
1859d3eb1570SLai Jiangshan 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1860d3eb1570SLai Jiangshan 
1861d3eb1570SLai Jiangshan 	/*
1862d3eb1570SLai Jiangshan 	 * if policy->v.nodes has movable memory only,
1863d3eb1570SLai Jiangshan 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1864d3eb1570SLai Jiangshan 	 *
1865d3eb1570SLai Jiangshan 	 * policy->v.nodes is intersect with node_states[N_MEMORY].
1866d3eb1570SLai Jiangshan 	 * so if the following test faile, it implies
1867d3eb1570SLai Jiangshan 	 * policy->v.nodes has movable memory only.
1868d3eb1570SLai Jiangshan 	 */
1869d3eb1570SLai Jiangshan 	if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1870d3eb1570SLai Jiangshan 		dynamic_policy_zone = ZONE_MOVABLE;
1871d3eb1570SLai Jiangshan 
1872d3eb1570SLai Jiangshan 	return zone >= dynamic_policy_zone;
1873d3eb1570SLai Jiangshan }
1874d3eb1570SLai Jiangshan 
187552cd3b07SLee Schermerhorn /*
187652cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
187752cd3b07SLee Schermerhorn  * page allocation
187852cd3b07SLee Schermerhorn  */
18798ca39e68SMuchun Song nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
188019770b32SMel Gorman {
188119770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
188245c4745aSLee Schermerhorn 	if (unlikely(policy->mode == MPOL_BIND) &&
1883d3eb1570SLai Jiangshan 			apply_policy_zone(policy, gfp_zone(gfp)) &&
188419770b32SMel Gorman 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
188519770b32SMel Gorman 		return &policy->v.nodes;
188619770b32SMel Gorman 
188719770b32SMel Gorman 	return NULL;
188819770b32SMel Gorman }
188919770b32SMel Gorman 
189004ec6264SVlastimil Babka /* Return the node id preferred by the given mempolicy, or the given id */
1891f8fd5253SWei Yang static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
18921da177e4SLinus Torvalds {
18936d840958SMichal Hocko 	if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
18941da177e4SLinus Torvalds 		nd = policy->v.preferred_node;
18956d840958SMichal Hocko 	else {
189619770b32SMel Gorman 		/*
18976d840958SMichal Hocko 		 * __GFP_THISNODE shouldn't even be used with the bind policy
18986d840958SMichal Hocko 		 * because we might easily break the expectation to stay on the
18996d840958SMichal Hocko 		 * requested node and not break the policy.
190019770b32SMel Gorman 		 */
19016d840958SMichal Hocko 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
19021da177e4SLinus Torvalds 	}
19036d840958SMichal Hocko 
190404ec6264SVlastimil Babka 	return nd;
19051da177e4SLinus Torvalds }
19061da177e4SLinus Torvalds 
19071da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
19081da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
19091da177e4SLinus Torvalds {
191045816682SVlastimil Babka 	unsigned next;
19111da177e4SLinus Torvalds 	struct task_struct *me = current;
19121da177e4SLinus Torvalds 
191345816682SVlastimil Babka 	next = next_node_in(me->il_prev, policy->v.nodes);
1914f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
191545816682SVlastimil Babka 		me->il_prev = next;
191645816682SVlastimil Babka 	return next;
19171da177e4SLinus Torvalds }
19181da177e4SLinus Torvalds 
1919dc85da15SChristoph Lameter /*
1920dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1921dc85da15SChristoph Lameter  * next slab entry.
1922dc85da15SChristoph Lameter  */
19232a389610SDavid Rientjes unsigned int mempolicy_slab_node(void)
1924dc85da15SChristoph Lameter {
1925e7b691b0SAndi Kleen 	struct mempolicy *policy;
19262a389610SDavid Rientjes 	int node = numa_mem_id();
1927e7b691b0SAndi Kleen 
1928e7b691b0SAndi Kleen 	if (in_interrupt())
19292a389610SDavid Rientjes 		return node;
1930e7b691b0SAndi Kleen 
1931e7b691b0SAndi Kleen 	policy = current->mempolicy;
1932fc36b8d3SLee Schermerhorn 	if (!policy || policy->flags & MPOL_F_LOCAL)
19332a389610SDavid Rientjes 		return node;
1934765c4507SChristoph Lameter 
1935bea904d5SLee Schermerhorn 	switch (policy->mode) {
1936bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1937fc36b8d3SLee Schermerhorn 		/*
1938fc36b8d3SLee Schermerhorn 		 * handled MPOL_F_LOCAL above
1939fc36b8d3SLee Schermerhorn 		 */
1940bea904d5SLee Schermerhorn 		return policy->v.preferred_node;
1941bea904d5SLee Schermerhorn 
1942dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1943dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1944dc85da15SChristoph Lameter 
1945dd1a239fSMel Gorman 	case MPOL_BIND: {
1946c33d6c06SMel Gorman 		struct zoneref *z;
1947c33d6c06SMel Gorman 
1948dc85da15SChristoph Lameter 		/*
1949dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1950dc85da15SChristoph Lameter 		 * first node.
1951dc85da15SChristoph Lameter 		 */
195219770b32SMel Gorman 		struct zonelist *zonelist;
195319770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1954c9634cf0SAneesh Kumar K.V 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1955c33d6c06SMel Gorman 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1956c33d6c06SMel Gorman 							&policy->v.nodes);
1957c1093b74SPavel Tatashin 		return z->zone ? zone_to_nid(z->zone) : node;
1958dd1a239fSMel Gorman 	}
1959dc85da15SChristoph Lameter 
1960dc85da15SChristoph Lameter 	default:
1961bea904d5SLee Schermerhorn 		BUG();
1962dc85da15SChristoph Lameter 	}
1963dc85da15SChristoph Lameter }
1964dc85da15SChristoph Lameter 
1965fee83b3aSAndrew Morton /*
1966fee83b3aSAndrew Morton  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1967fee83b3aSAndrew Morton  * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1968fee83b3aSAndrew Morton  * number of present nodes.
1969fee83b3aSAndrew Morton  */
197098c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
19711da177e4SLinus Torvalds {
1972dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
1973f5b087b5SDavid Rientjes 	unsigned target;
1974fee83b3aSAndrew Morton 	int i;
1975fee83b3aSAndrew Morton 	int nid;
19761da177e4SLinus Torvalds 
1977f5b087b5SDavid Rientjes 	if (!nnodes)
1978f5b087b5SDavid Rientjes 		return numa_node_id();
1979fee83b3aSAndrew Morton 	target = (unsigned int)n % nnodes;
1980fee83b3aSAndrew Morton 	nid = first_node(pol->v.nodes);
1981fee83b3aSAndrew Morton 	for (i = 0; i < target; i++)
1982dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
19831da177e4SLinus Torvalds 	return nid;
19841da177e4SLinus Torvalds }
19851da177e4SLinus Torvalds 
19865da7ca86SChristoph Lameter /* Determine a node number for interleave */
19875da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
19885da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
19895da7ca86SChristoph Lameter {
19905da7ca86SChristoph Lameter 	if (vma) {
19915da7ca86SChristoph Lameter 		unsigned long off;
19925da7ca86SChristoph Lameter 
19933b98b087SNishanth Aravamudan 		/*
19943b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
19953b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
19963b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
19973b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
19983b98b087SNishanth Aravamudan 		 * a useful offset.
19993b98b087SNishanth Aravamudan 		 */
20003b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
20013b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
20025da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
200398c70baaSLaurent Dufour 		return offset_il_node(pol, off);
20045da7ca86SChristoph Lameter 	} else
20055da7ca86SChristoph Lameter 		return interleave_nodes(pol);
20065da7ca86SChristoph Lameter }
20075da7ca86SChristoph Lameter 
200800ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
2009480eccf9SLee Schermerhorn /*
201004ec6264SVlastimil Babka  * huge_node(@vma, @addr, @gfp_flags, @mpol)
2011b46e14acSFabian Frederick  * @vma: virtual memory area whose policy is sought
2012b46e14acSFabian Frederick  * @addr: address in @vma for shared policy lookup and interleave policy
2013b46e14acSFabian Frederick  * @gfp_flags: for requested zone
2014b46e14acSFabian Frederick  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2015b46e14acSFabian Frederick  * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
2016480eccf9SLee Schermerhorn  *
201704ec6264SVlastimil Babka  * Returns a nid suitable for a huge page allocation and a pointer
201852cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
201952cd3b07SLee Schermerhorn  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
202052cd3b07SLee Schermerhorn  * @nodemask for filtering the zonelist.
2021c0ff7453SMiao Xie  *
2022d26914d1SMel Gorman  * Must be protected by read_mems_allowed_begin()
2023480eccf9SLee Schermerhorn  */
202404ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
202504ec6264SVlastimil Babka 				struct mempolicy **mpol, nodemask_t **nodemask)
20265da7ca86SChristoph Lameter {
202704ec6264SVlastimil Babka 	int nid;
20285da7ca86SChristoph Lameter 
2029dd6eecb9SOleg Nesterov 	*mpol = get_vma_policy(vma, addr);
203019770b32SMel Gorman 	*nodemask = NULL;	/* assume !MPOL_BIND */
20315da7ca86SChristoph Lameter 
203252cd3b07SLee Schermerhorn 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
203304ec6264SVlastimil Babka 		nid = interleave_nid(*mpol, vma, addr,
203404ec6264SVlastimil Babka 					huge_page_shift(hstate_vma(vma)));
203552cd3b07SLee Schermerhorn 	} else {
203604ec6264SVlastimil Babka 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
203752cd3b07SLee Schermerhorn 		if ((*mpol)->mode == MPOL_BIND)
203852cd3b07SLee Schermerhorn 			*nodemask = &(*mpol)->v.nodes;
2039480eccf9SLee Schermerhorn 	}
204004ec6264SVlastimil Babka 	return nid;
20415da7ca86SChristoph Lameter }
204206808b08SLee Schermerhorn 
204306808b08SLee Schermerhorn /*
204406808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
204506808b08SLee Schermerhorn  *
204606808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
204706808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
204806808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
204906808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
205006808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
205106808b08SLee Schermerhorn  * of non-default mempolicy.
205206808b08SLee Schermerhorn  *
205306808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
205406808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
205506808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
205606808b08SLee Schermerhorn  *
205706808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
205806808b08SLee Schermerhorn  */
205906808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
206006808b08SLee Schermerhorn {
206106808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
206206808b08SLee Schermerhorn 	int nid;
206306808b08SLee Schermerhorn 
206406808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
206506808b08SLee Schermerhorn 		return false;
206606808b08SLee Schermerhorn 
2067c0ff7453SMiao Xie 	task_lock(current);
206806808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
206906808b08SLee Schermerhorn 	switch (mempolicy->mode) {
207006808b08SLee Schermerhorn 	case MPOL_PREFERRED:
207106808b08SLee Schermerhorn 		if (mempolicy->flags & MPOL_F_LOCAL)
207206808b08SLee Schermerhorn 			nid = numa_node_id();
207306808b08SLee Schermerhorn 		else
207406808b08SLee Schermerhorn 			nid = mempolicy->v.preferred_node;
207506808b08SLee Schermerhorn 		init_nodemask_of_node(mask, nid);
207606808b08SLee Schermerhorn 		break;
207706808b08SLee Schermerhorn 
207806808b08SLee Schermerhorn 	case MPOL_BIND:
207906808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
208006808b08SLee Schermerhorn 		*mask =  mempolicy->v.nodes;
208106808b08SLee Schermerhorn 		break;
208206808b08SLee Schermerhorn 
208306808b08SLee Schermerhorn 	default:
208406808b08SLee Schermerhorn 		BUG();
208506808b08SLee Schermerhorn 	}
2086c0ff7453SMiao Xie 	task_unlock(current);
208706808b08SLee Schermerhorn 
208806808b08SLee Schermerhorn 	return true;
208906808b08SLee Schermerhorn }
209000ac59adSChen, Kenneth W #endif
20915da7ca86SChristoph Lameter 
20926f48d0ebSDavid Rientjes /*
20936f48d0ebSDavid Rientjes  * mempolicy_nodemask_intersects
20946f48d0ebSDavid Rientjes  *
20956f48d0ebSDavid Rientjes  * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
20966f48d0ebSDavid Rientjes  * policy.  Otherwise, check for intersection between mask and the policy
20976f48d0ebSDavid Rientjes  * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
20986f48d0ebSDavid Rientjes  * policy, always return true since it may allocate elsewhere on fallback.
20996f48d0ebSDavid Rientjes  *
21006f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
21016f48d0ebSDavid Rientjes  */
21026f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk,
21036f48d0ebSDavid Rientjes 					const nodemask_t *mask)
21046f48d0ebSDavid Rientjes {
21056f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
21066f48d0ebSDavid Rientjes 	bool ret = true;
21076f48d0ebSDavid Rientjes 
21086f48d0ebSDavid Rientjes 	if (!mask)
21096f48d0ebSDavid Rientjes 		return ret;
21106f48d0ebSDavid Rientjes 	task_lock(tsk);
21116f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
21126f48d0ebSDavid Rientjes 	if (!mempolicy)
21136f48d0ebSDavid Rientjes 		goto out;
21146f48d0ebSDavid Rientjes 
21156f48d0ebSDavid Rientjes 	switch (mempolicy->mode) {
21166f48d0ebSDavid Rientjes 	case MPOL_PREFERRED:
21176f48d0ebSDavid Rientjes 		/*
21186f48d0ebSDavid Rientjes 		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
21196f48d0ebSDavid Rientjes 		 * allocate from, they may fallback to other nodes when oom.
21206f48d0ebSDavid Rientjes 		 * Thus, it's possible for tsk to have allocated memory from
21216f48d0ebSDavid Rientjes 		 * nodes in mask.
21226f48d0ebSDavid Rientjes 		 */
21236f48d0ebSDavid Rientjes 		break;
21246f48d0ebSDavid Rientjes 	case MPOL_BIND:
21256f48d0ebSDavid Rientjes 	case MPOL_INTERLEAVE:
21266f48d0ebSDavid Rientjes 		ret = nodes_intersects(mempolicy->v.nodes, *mask);
21276f48d0ebSDavid Rientjes 		break;
21286f48d0ebSDavid Rientjes 	default:
21296f48d0ebSDavid Rientjes 		BUG();
21306f48d0ebSDavid Rientjes 	}
21316f48d0ebSDavid Rientjes out:
21326f48d0ebSDavid Rientjes 	task_unlock(tsk);
21336f48d0ebSDavid Rientjes 	return ret;
21346f48d0ebSDavid Rientjes }
21356f48d0ebSDavid Rientjes 
21361da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
21371da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
2138662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2139662f3a0bSAndi Kleen 					unsigned nid)
21401da177e4SLinus Torvalds {
21411da177e4SLinus Torvalds 	struct page *page;
21421da177e4SLinus Torvalds 
214384172f4bSMatthew Wilcox (Oracle) 	page = __alloc_pages(gfp, order, nid, NULL);
21444518085eSKemi Wang 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
21454518085eSKemi Wang 	if (!static_branch_likely(&vm_numa_stat_key))
21464518085eSKemi Wang 		return page;
2147de55c8b2SAndrey Ryabinin 	if (page && page_to_nid(page) == nid) {
2148de55c8b2SAndrey Ryabinin 		preempt_disable();
2149de55c8b2SAndrey Ryabinin 		__inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
2150de55c8b2SAndrey Ryabinin 		preempt_enable();
2151de55c8b2SAndrey Ryabinin 	}
21521da177e4SLinus Torvalds 	return page;
21531da177e4SLinus Torvalds }
21541da177e4SLinus Torvalds 
21551da177e4SLinus Torvalds /**
21560bbbc0b3SAndrea Arcangeli  * 	alloc_pages_vma	- Allocate a page for a VMA.
21571da177e4SLinus Torvalds  *
21581da177e4SLinus Torvalds  * 	@gfp:
21591da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
21601da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
21611da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
21621da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
21631da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
21641da177e4SLinus Torvalds  *
21650bbbc0b3SAndrea Arcangeli  *	@order:Order of the GFP allocation.
21661da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
21671da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
2168be97a41bSVlastimil Babka  *	@node: Which node to prefer for allocation (modulo policy).
216919deb769SDavid Rientjes  *	@hugepage: for hugepages try only the preferred node if possible
21701da177e4SLinus Torvalds  *
21711da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
21721da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
21733e4e28c5SMichel Lespinasse  *	When VMA is not NULL caller must read-lock the mmap_lock of the
21741da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
2175be97a41bSVlastimil Babka  *	all allocations for pages that will be mapped into user space. Returns
2176be97a41bSVlastimil Babka  *	NULL when no page can be allocated.
21771da177e4SLinus Torvalds  */
21781da177e4SLinus Torvalds struct page *
21790bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
218019deb769SDavid Rientjes 		unsigned long addr, int node, bool hugepage)
21811da177e4SLinus Torvalds {
2182cc9a6c87SMel Gorman 	struct mempolicy *pol;
2183c0ff7453SMiao Xie 	struct page *page;
218404ec6264SVlastimil Babka 	int preferred_nid;
2185be97a41bSVlastimil Babka 	nodemask_t *nmask;
21861da177e4SLinus Torvalds 
2187dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2188cc9a6c87SMel Gorman 
2189be97a41bSVlastimil Babka 	if (pol->mode == MPOL_INTERLEAVE) {
21901da177e4SLinus Torvalds 		unsigned nid;
21915da7ca86SChristoph Lameter 
21928eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
219352cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
21940bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
2195be97a41bSVlastimil Babka 		goto out;
21961da177e4SLinus Torvalds 	}
21971da177e4SLinus Torvalds 
219819deb769SDavid Rientjes 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
219919deb769SDavid Rientjes 		int hpage_node = node;
220019deb769SDavid Rientjes 
220119deb769SDavid Rientjes 		/*
220219deb769SDavid Rientjes 		 * For hugepage allocation and non-interleave policy which
220319deb769SDavid Rientjes 		 * allows the current node (or other explicitly preferred
220419deb769SDavid Rientjes 		 * node) we only try to allocate from the current/preferred
220519deb769SDavid Rientjes 		 * node and don't fall back to other nodes, as the cost of
220619deb769SDavid Rientjes 		 * remote accesses would likely offset THP benefits.
220719deb769SDavid Rientjes 		 *
220819deb769SDavid Rientjes 		 * If the policy is interleave, or does not allow the current
220919deb769SDavid Rientjes 		 * node in its nodemask, we allocate the standard way.
221019deb769SDavid Rientjes 		 */
221119deb769SDavid Rientjes 		if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
221219deb769SDavid Rientjes 			hpage_node = pol->v.preferred_node;
221319deb769SDavid Rientjes 
221419deb769SDavid Rientjes 		nmask = policy_nodemask(gfp, pol);
221519deb769SDavid Rientjes 		if (!nmask || node_isset(hpage_node, *nmask)) {
221619deb769SDavid Rientjes 			mpol_cond_put(pol);
2217cc638f32SVlastimil Babka 			/*
2218cc638f32SVlastimil Babka 			 * First, try to allocate THP only on local node, but
2219cc638f32SVlastimil Babka 			 * don't reclaim unnecessarily, just compact.
2220cc638f32SVlastimil Babka 			 */
222119deb769SDavid Rientjes 			page = __alloc_pages_node(hpage_node,
2222cc638f32SVlastimil Babka 				gfp | __GFP_THISNODE | __GFP_NORETRY, order);
222376e654ccSDavid Rientjes 
222476e654ccSDavid Rientjes 			/*
222576e654ccSDavid Rientjes 			 * If hugepage allocations are configured to always
222676e654ccSDavid Rientjes 			 * synchronous compact or the vma has been madvised
222776e654ccSDavid Rientjes 			 * to prefer hugepage backing, retry allowing remote
2228cc638f32SVlastimil Babka 			 * memory with both reclaim and compact as well.
222976e654ccSDavid Rientjes 			 */
223076e654ccSDavid Rientjes 			if (!page && (gfp & __GFP_DIRECT_RECLAIM))
223176e654ccSDavid Rientjes 				page = __alloc_pages_node(hpage_node,
2232cc638f32SVlastimil Babka 								gfp, order);
223376e654ccSDavid Rientjes 
223419deb769SDavid Rientjes 			goto out;
223519deb769SDavid Rientjes 		}
223619deb769SDavid Rientjes 	}
223719deb769SDavid Rientjes 
2238077fcf11SAneesh Kumar K.V 	nmask = policy_nodemask(gfp, pol);
223904ec6264SVlastimil Babka 	preferred_nid = policy_node(gfp, pol, node);
224084172f4bSMatthew Wilcox (Oracle) 	page = __alloc_pages(gfp, order, preferred_nid, nmask);
2241d51e9894SVlastimil Babka 	mpol_cond_put(pol);
2242be97a41bSVlastimil Babka out:
2243077fcf11SAneesh Kumar K.V 	return page;
2244077fcf11SAneesh Kumar K.V }
224569262215SChristoph Hellwig EXPORT_SYMBOL(alloc_pages_vma);
2246077fcf11SAneesh Kumar K.V 
22471da177e4SLinus Torvalds /**
2248d7f946d0SMatthew Wilcox (Oracle)  * alloc_pages - Allocate pages.
2249*6421ec76SMatthew Wilcox (Oracle)  * @gfp: GFP flags.
2250*6421ec76SMatthew Wilcox (Oracle)  * @order: Power of two of number of pages to allocate.
22511da177e4SLinus Torvalds  *
2252*6421ec76SMatthew Wilcox (Oracle)  * Allocate 1 << @order contiguous pages.  The physical address of the
2253*6421ec76SMatthew Wilcox (Oracle)  * first page is naturally aligned (eg an order-3 allocation will be aligned
2254*6421ec76SMatthew Wilcox (Oracle)  * to a multiple of 8 * PAGE_SIZE bytes).  The NUMA policy of the current
2255*6421ec76SMatthew Wilcox (Oracle)  * process is honoured when in process context.
22561da177e4SLinus Torvalds  *
2257*6421ec76SMatthew Wilcox (Oracle)  * Context: Can be called from any context, providing the appropriate GFP
2258*6421ec76SMatthew Wilcox (Oracle)  * flags are used.
2259*6421ec76SMatthew Wilcox (Oracle)  * Return: The page on success or NULL if allocation fails.
22601da177e4SLinus Torvalds  */
2261d7f946d0SMatthew Wilcox (Oracle) struct page *alloc_pages(gfp_t gfp, unsigned order)
22621da177e4SLinus Torvalds {
22638d90274bSOleg Nesterov 	struct mempolicy *pol = &default_policy;
2264c0ff7453SMiao Xie 	struct page *page;
22651da177e4SLinus Torvalds 
22668d90274bSOleg Nesterov 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
22678d90274bSOleg Nesterov 		pol = get_task_policy(current);
226852cd3b07SLee Schermerhorn 
226952cd3b07SLee Schermerhorn 	/*
227052cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
227152cd3b07SLee Schermerhorn 	 * nor system default_policy
227252cd3b07SLee Schermerhorn 	 */
227345c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
2274c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2275c0ff7453SMiao Xie 	else
227684172f4bSMatthew Wilcox (Oracle) 		page = __alloc_pages(gfp, order,
227704ec6264SVlastimil Babka 				policy_node(gfp, pol, numa_node_id()),
22785c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
2279cc9a6c87SMel Gorman 
2280c0ff7453SMiao Xie 	return page;
22811da177e4SLinus Torvalds }
2282d7f946d0SMatthew Wilcox (Oracle) EXPORT_SYMBOL(alloc_pages);
22831da177e4SLinus Torvalds 
2284ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2285ef0855d3SOleg Nesterov {
2286ef0855d3SOleg Nesterov 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2287ef0855d3SOleg Nesterov 
2288ef0855d3SOleg Nesterov 	if (IS_ERR(pol))
2289ef0855d3SOleg Nesterov 		return PTR_ERR(pol);
2290ef0855d3SOleg Nesterov 	dst->vm_policy = pol;
2291ef0855d3SOleg Nesterov 	return 0;
2292ef0855d3SOleg Nesterov }
2293ef0855d3SOleg Nesterov 
22944225399aSPaul Jackson /*
2295846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
22964225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
22974225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
22984225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
22994225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
2300708c1bbcSMiao Xie  *
2301708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
2302708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
23034225399aSPaul Jackson  */
23044225399aSPaul Jackson 
2305846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
2306846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
23071da177e4SLinus Torvalds {
23081da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
23091da177e4SLinus Torvalds 
23101da177e4SLinus Torvalds 	if (!new)
23111da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2312708c1bbcSMiao Xie 
2313708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
2314708c1bbcSMiao Xie 	if (old == current->mempolicy) {
2315708c1bbcSMiao Xie 		task_lock(current);
2316708c1bbcSMiao Xie 		*new = *old;
2317708c1bbcSMiao Xie 		task_unlock(current);
2318708c1bbcSMiao Xie 	} else
2319708c1bbcSMiao Xie 		*new = *old;
2320708c1bbcSMiao Xie 
23214225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
23224225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
2323213980c0SVlastimil Babka 		mpol_rebind_policy(new, &mems);
23244225399aSPaul Jackson 	}
23251da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
23261da177e4SLinus Torvalds 	return new;
23271da177e4SLinus Torvalds }
23281da177e4SLinus Torvalds 
23291da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
2330fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
23311da177e4SLinus Torvalds {
23321da177e4SLinus Torvalds 	if (!a || !b)
2333fcfb4dccSKOSAKI Motohiro 		return false;
233445c4745aSLee Schermerhorn 	if (a->mode != b->mode)
2335fcfb4dccSKOSAKI Motohiro 		return false;
233619800502SBob Liu 	if (a->flags != b->flags)
2337fcfb4dccSKOSAKI Motohiro 		return false;
233819800502SBob Liu 	if (mpol_store_user_nodemask(a))
233919800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2340fcfb4dccSKOSAKI Motohiro 			return false;
234119800502SBob Liu 
234245c4745aSLee Schermerhorn 	switch (a->mode) {
234319770b32SMel Gorman 	case MPOL_BIND:
23441da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
2345fcfb4dccSKOSAKI Motohiro 		return !!nodes_equal(a->v.nodes, b->v.nodes);
23461da177e4SLinus Torvalds 	case MPOL_PREFERRED:
23478970a63eSYisheng Xie 		/* a's ->flags is the same as b's */
23488970a63eSYisheng Xie 		if (a->flags & MPOL_F_LOCAL)
23498970a63eSYisheng Xie 			return true;
235075719661SNamhyung Kim 		return a->v.preferred_node == b->v.preferred_node;
23511da177e4SLinus Torvalds 	default:
23521da177e4SLinus Torvalds 		BUG();
2353fcfb4dccSKOSAKI Motohiro 		return false;
23541da177e4SLinus Torvalds 	}
23551da177e4SLinus Torvalds }
23561da177e4SLinus Torvalds 
23571da177e4SLinus Torvalds /*
23581da177e4SLinus Torvalds  * Shared memory backing store policy support.
23591da177e4SLinus Torvalds  *
23601da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
23611da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
23624a8c7bb5SNathan Zimmer  * They are protected by the sp->lock rwlock, which should be held
23631da177e4SLinus Torvalds  * for any accesses to the tree.
23641da177e4SLinus Torvalds  */
23651da177e4SLinus Torvalds 
23664a8c7bb5SNathan Zimmer /*
23674a8c7bb5SNathan Zimmer  * lookup first element intersecting start-end.  Caller holds sp->lock for
23684a8c7bb5SNathan Zimmer  * reading or for writing
23694a8c7bb5SNathan Zimmer  */
23701da177e4SLinus Torvalds static struct sp_node *
23711da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
23721da177e4SLinus Torvalds {
23731da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
23741da177e4SLinus Torvalds 
23751da177e4SLinus Torvalds 	while (n) {
23761da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
23771da177e4SLinus Torvalds 
23781da177e4SLinus Torvalds 		if (start >= p->end)
23791da177e4SLinus Torvalds 			n = n->rb_right;
23801da177e4SLinus Torvalds 		else if (end <= p->start)
23811da177e4SLinus Torvalds 			n = n->rb_left;
23821da177e4SLinus Torvalds 		else
23831da177e4SLinus Torvalds 			break;
23841da177e4SLinus Torvalds 	}
23851da177e4SLinus Torvalds 	if (!n)
23861da177e4SLinus Torvalds 		return NULL;
23871da177e4SLinus Torvalds 	for (;;) {
23881da177e4SLinus Torvalds 		struct sp_node *w = NULL;
23891da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
23901da177e4SLinus Torvalds 		if (!prev)
23911da177e4SLinus Torvalds 			break;
23921da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
23931da177e4SLinus Torvalds 		if (w->end <= start)
23941da177e4SLinus Torvalds 			break;
23951da177e4SLinus Torvalds 		n = prev;
23961da177e4SLinus Torvalds 	}
23971da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
23981da177e4SLinus Torvalds }
23991da177e4SLinus Torvalds 
24004a8c7bb5SNathan Zimmer /*
24014a8c7bb5SNathan Zimmer  * Insert a new shared policy into the list.  Caller holds sp->lock for
24024a8c7bb5SNathan Zimmer  * writing.
24034a8c7bb5SNathan Zimmer  */
24041da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
24051da177e4SLinus Torvalds {
24061da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
24071da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
24081da177e4SLinus Torvalds 	struct sp_node *nd;
24091da177e4SLinus Torvalds 
24101da177e4SLinus Torvalds 	while (*p) {
24111da177e4SLinus Torvalds 		parent = *p;
24121da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
24131da177e4SLinus Torvalds 		if (new->start < nd->start)
24141da177e4SLinus Torvalds 			p = &(*p)->rb_left;
24151da177e4SLinus Torvalds 		else if (new->end > nd->end)
24161da177e4SLinus Torvalds 			p = &(*p)->rb_right;
24171da177e4SLinus Torvalds 		else
24181da177e4SLinus Torvalds 			BUG();
24191da177e4SLinus Torvalds 	}
24201da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
24211da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2422140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
242345c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
24241da177e4SLinus Torvalds }
24251da177e4SLinus Torvalds 
24261da177e4SLinus Torvalds /* Find shared policy intersecting idx */
24271da177e4SLinus Torvalds struct mempolicy *
24281da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
24291da177e4SLinus Torvalds {
24301da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
24311da177e4SLinus Torvalds 	struct sp_node *sn;
24321da177e4SLinus Torvalds 
24331da177e4SLinus Torvalds 	if (!sp->root.rb_node)
24341da177e4SLinus Torvalds 		return NULL;
24354a8c7bb5SNathan Zimmer 	read_lock(&sp->lock);
24361da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
24371da177e4SLinus Torvalds 	if (sn) {
24381da177e4SLinus Torvalds 		mpol_get(sn->policy);
24391da177e4SLinus Torvalds 		pol = sn->policy;
24401da177e4SLinus Torvalds 	}
24414a8c7bb5SNathan Zimmer 	read_unlock(&sp->lock);
24421da177e4SLinus Torvalds 	return pol;
24431da177e4SLinus Torvalds }
24441da177e4SLinus Torvalds 
244563f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n)
244663f74ca2SKOSAKI Motohiro {
244763f74ca2SKOSAKI Motohiro 	mpol_put(n->policy);
244863f74ca2SKOSAKI Motohiro 	kmem_cache_free(sn_cache, n);
244963f74ca2SKOSAKI Motohiro }
245063f74ca2SKOSAKI Motohiro 
2451771fb4d8SLee Schermerhorn /**
2452771fb4d8SLee Schermerhorn  * mpol_misplaced - check whether current page node is valid in policy
2453771fb4d8SLee Schermerhorn  *
2454b46e14acSFabian Frederick  * @page: page to be checked
2455b46e14acSFabian Frederick  * @vma: vm area where page mapped
2456b46e14acSFabian Frederick  * @addr: virtual address where page mapped
2457771fb4d8SLee Schermerhorn  *
2458771fb4d8SLee Schermerhorn  * Lookup current policy node id for vma,addr and "compare to" page's
2459771fb4d8SLee Schermerhorn  * node id.
2460771fb4d8SLee Schermerhorn  *
2461771fb4d8SLee Schermerhorn  * Returns:
2462771fb4d8SLee Schermerhorn  *	-1	- not misplaced, page is in the right node
2463771fb4d8SLee Schermerhorn  *	node	- node id where the page should be
2464771fb4d8SLee Schermerhorn  *
2465771fb4d8SLee Schermerhorn  * Policy determination "mimics" alloc_page_vma().
2466771fb4d8SLee Schermerhorn  * Called from fault path where we know the vma and faulting address.
2467771fb4d8SLee Schermerhorn  */
2468771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2469771fb4d8SLee Schermerhorn {
2470771fb4d8SLee Schermerhorn 	struct mempolicy *pol;
2471c33d6c06SMel Gorman 	struct zoneref *z;
2472771fb4d8SLee Schermerhorn 	int curnid = page_to_nid(page);
2473771fb4d8SLee Schermerhorn 	unsigned long pgoff;
247490572890SPeter Zijlstra 	int thiscpu = raw_smp_processor_id();
247590572890SPeter Zijlstra 	int thisnid = cpu_to_node(thiscpu);
247698fa15f3SAnshuman Khandual 	int polnid = NUMA_NO_NODE;
2477771fb4d8SLee Schermerhorn 	int ret = -1;
2478771fb4d8SLee Schermerhorn 
2479dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2480771fb4d8SLee Schermerhorn 	if (!(pol->flags & MPOL_F_MOF))
2481771fb4d8SLee Schermerhorn 		goto out;
2482771fb4d8SLee Schermerhorn 
2483771fb4d8SLee Schermerhorn 	switch (pol->mode) {
2484771fb4d8SLee Schermerhorn 	case MPOL_INTERLEAVE:
2485771fb4d8SLee Schermerhorn 		pgoff = vma->vm_pgoff;
2486771fb4d8SLee Schermerhorn 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
248798c70baaSLaurent Dufour 		polnid = offset_il_node(pol, pgoff);
2488771fb4d8SLee Schermerhorn 		break;
2489771fb4d8SLee Schermerhorn 
2490771fb4d8SLee Schermerhorn 	case MPOL_PREFERRED:
2491771fb4d8SLee Schermerhorn 		if (pol->flags & MPOL_F_LOCAL)
2492771fb4d8SLee Schermerhorn 			polnid = numa_node_id();
2493771fb4d8SLee Schermerhorn 		else
2494771fb4d8SLee Schermerhorn 			polnid = pol->v.preferred_node;
2495771fb4d8SLee Schermerhorn 		break;
2496771fb4d8SLee Schermerhorn 
2497771fb4d8SLee Schermerhorn 	case MPOL_BIND:
2498bda420b9SHuang Ying 		/* Optimize placement among multiple nodes via NUMA balancing */
2499bda420b9SHuang Ying 		if (pol->flags & MPOL_F_MORON) {
2500bda420b9SHuang Ying 			if (node_isset(thisnid, pol->v.nodes))
2501bda420b9SHuang Ying 				break;
2502bda420b9SHuang Ying 			goto out;
2503bda420b9SHuang Ying 		}
2504c33d6c06SMel Gorman 
2505771fb4d8SLee Schermerhorn 		/*
2506771fb4d8SLee Schermerhorn 		 * allows binding to multiple nodes.
2507771fb4d8SLee Schermerhorn 		 * use current page if in policy nodemask,
2508771fb4d8SLee Schermerhorn 		 * else select nearest allowed node, if any.
2509771fb4d8SLee Schermerhorn 		 * If no allowed nodes, use current [!misplaced].
2510771fb4d8SLee Schermerhorn 		 */
2511771fb4d8SLee Schermerhorn 		if (node_isset(curnid, pol->v.nodes))
2512771fb4d8SLee Schermerhorn 			goto out;
2513c33d6c06SMel Gorman 		z = first_zones_zonelist(
2514771fb4d8SLee Schermerhorn 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2515771fb4d8SLee Schermerhorn 				gfp_zone(GFP_HIGHUSER),
2516c33d6c06SMel Gorman 				&pol->v.nodes);
2517c1093b74SPavel Tatashin 		polnid = zone_to_nid(z->zone);
2518771fb4d8SLee Schermerhorn 		break;
2519771fb4d8SLee Schermerhorn 
2520771fb4d8SLee Schermerhorn 	default:
2521771fb4d8SLee Schermerhorn 		BUG();
2522771fb4d8SLee Schermerhorn 	}
25235606e387SMel Gorman 
25245606e387SMel Gorman 	/* Migrate the page towards the node whose CPU is referencing it */
2525e42c8ff2SMel Gorman 	if (pol->flags & MPOL_F_MORON) {
252690572890SPeter Zijlstra 		polnid = thisnid;
25275606e387SMel Gorman 
252810f39042SRik van Riel 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2529de1c9ce6SRik van Riel 			goto out;
2530de1c9ce6SRik van Riel 	}
2531e42c8ff2SMel Gorman 
2532771fb4d8SLee Schermerhorn 	if (curnid != polnid)
2533771fb4d8SLee Schermerhorn 		ret = polnid;
2534771fb4d8SLee Schermerhorn out:
2535771fb4d8SLee Schermerhorn 	mpol_cond_put(pol);
2536771fb4d8SLee Schermerhorn 
2537771fb4d8SLee Schermerhorn 	return ret;
2538771fb4d8SLee Schermerhorn }
2539771fb4d8SLee Schermerhorn 
2540c11600e4SDavid Rientjes /*
2541c11600e4SDavid Rientjes  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2542c11600e4SDavid Rientjes  * dropped after task->mempolicy is set to NULL so that any allocation done as
2543c11600e4SDavid Rientjes  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2544c11600e4SDavid Rientjes  * policy.
2545c11600e4SDavid Rientjes  */
2546c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task)
2547c11600e4SDavid Rientjes {
2548c11600e4SDavid Rientjes 	struct mempolicy *pol;
2549c11600e4SDavid Rientjes 
2550c11600e4SDavid Rientjes 	task_lock(task);
2551c11600e4SDavid Rientjes 	pol = task->mempolicy;
2552c11600e4SDavid Rientjes 	task->mempolicy = NULL;
2553c11600e4SDavid Rientjes 	task_unlock(task);
2554c11600e4SDavid Rientjes 	mpol_put(pol);
2555c11600e4SDavid Rientjes }
2556c11600e4SDavid Rientjes 
25571da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
25581da177e4SLinus Torvalds {
2559140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
25601da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
256163f74ca2SKOSAKI Motohiro 	sp_free(n);
25621da177e4SLinus Torvalds }
25631da177e4SLinus Torvalds 
256442288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start,
256542288fe3SMel Gorman 			unsigned long end, struct mempolicy *pol)
256642288fe3SMel Gorman {
256742288fe3SMel Gorman 	node->start = start;
256842288fe3SMel Gorman 	node->end = end;
256942288fe3SMel Gorman 	node->policy = pol;
257042288fe3SMel Gorman }
257142288fe3SMel Gorman 
2572dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2573dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
25741da177e4SLinus Torvalds {
2575869833f2SKOSAKI Motohiro 	struct sp_node *n;
2576869833f2SKOSAKI Motohiro 	struct mempolicy *newpol;
25771da177e4SLinus Torvalds 
2578869833f2SKOSAKI Motohiro 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
25791da177e4SLinus Torvalds 	if (!n)
25801da177e4SLinus Torvalds 		return NULL;
2581869833f2SKOSAKI Motohiro 
2582869833f2SKOSAKI Motohiro 	newpol = mpol_dup(pol);
2583869833f2SKOSAKI Motohiro 	if (IS_ERR(newpol)) {
2584869833f2SKOSAKI Motohiro 		kmem_cache_free(sn_cache, n);
2585869833f2SKOSAKI Motohiro 		return NULL;
2586869833f2SKOSAKI Motohiro 	}
2587869833f2SKOSAKI Motohiro 	newpol->flags |= MPOL_F_SHARED;
258842288fe3SMel Gorman 	sp_node_init(n, start, end, newpol);
2589869833f2SKOSAKI Motohiro 
25901da177e4SLinus Torvalds 	return n;
25911da177e4SLinus Torvalds }
25921da177e4SLinus Torvalds 
25931da177e4SLinus Torvalds /* Replace a policy range. */
25941da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
25951da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
25961da177e4SLinus Torvalds {
2597b22d127aSMel Gorman 	struct sp_node *n;
259842288fe3SMel Gorman 	struct sp_node *n_new = NULL;
259942288fe3SMel Gorman 	struct mempolicy *mpol_new = NULL;
2600b22d127aSMel Gorman 	int ret = 0;
26011da177e4SLinus Torvalds 
260242288fe3SMel Gorman restart:
26034a8c7bb5SNathan Zimmer 	write_lock(&sp->lock);
26041da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
26051da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
26061da177e4SLinus Torvalds 	while (n && n->start < end) {
26071da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
26081da177e4SLinus Torvalds 		if (n->start >= start) {
26091da177e4SLinus Torvalds 			if (n->end <= end)
26101da177e4SLinus Torvalds 				sp_delete(sp, n);
26111da177e4SLinus Torvalds 			else
26121da177e4SLinus Torvalds 				n->start = end;
26131da177e4SLinus Torvalds 		} else {
26141da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
26151da177e4SLinus Torvalds 			if (n->end > end) {
261642288fe3SMel Gorman 				if (!n_new)
261742288fe3SMel Gorman 					goto alloc_new;
261842288fe3SMel Gorman 
261942288fe3SMel Gorman 				*mpol_new = *n->policy;
262042288fe3SMel Gorman 				atomic_set(&mpol_new->refcnt, 1);
26217880639cSKOSAKI Motohiro 				sp_node_init(n_new, end, n->end, mpol_new);
26221da177e4SLinus Torvalds 				n->end = start;
26235ca39575SHillf Danton 				sp_insert(sp, n_new);
262442288fe3SMel Gorman 				n_new = NULL;
262542288fe3SMel Gorman 				mpol_new = NULL;
26261da177e4SLinus Torvalds 				break;
26271da177e4SLinus Torvalds 			} else
26281da177e4SLinus Torvalds 				n->end = start;
26291da177e4SLinus Torvalds 		}
26301da177e4SLinus Torvalds 		if (!next)
26311da177e4SLinus Torvalds 			break;
26321da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
26331da177e4SLinus Torvalds 	}
26341da177e4SLinus Torvalds 	if (new)
26351da177e4SLinus Torvalds 		sp_insert(sp, new);
26364a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
263742288fe3SMel Gorman 	ret = 0;
263842288fe3SMel Gorman 
263942288fe3SMel Gorman err_out:
264042288fe3SMel Gorman 	if (mpol_new)
264142288fe3SMel Gorman 		mpol_put(mpol_new);
264242288fe3SMel Gorman 	if (n_new)
264342288fe3SMel Gorman 		kmem_cache_free(sn_cache, n_new);
264442288fe3SMel Gorman 
2645b22d127aSMel Gorman 	return ret;
264642288fe3SMel Gorman 
264742288fe3SMel Gorman alloc_new:
26484a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
264942288fe3SMel Gorman 	ret = -ENOMEM;
265042288fe3SMel Gorman 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
265142288fe3SMel Gorman 	if (!n_new)
265242288fe3SMel Gorman 		goto err_out;
265342288fe3SMel Gorman 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
265442288fe3SMel Gorman 	if (!mpol_new)
265542288fe3SMel Gorman 		goto err_out;
265642288fe3SMel Gorman 	goto restart;
26571da177e4SLinus Torvalds }
26581da177e4SLinus Torvalds 
265971fe804bSLee Schermerhorn /**
266071fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
266171fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
266271fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
266371fe804bSLee Schermerhorn  *
266471fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
266571fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
266671fe804bSLee Schermerhorn  * This must be released on exit.
26674bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
266871fe804bSLee Schermerhorn  */
266971fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
26707339ff83SRobin Holt {
267158568d2aSMiao Xie 	int ret;
267258568d2aSMiao Xie 
267371fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
26744a8c7bb5SNathan Zimmer 	rwlock_init(&sp->lock);
26757339ff83SRobin Holt 
267671fe804bSLee Schermerhorn 	if (mpol) {
26777339ff83SRobin Holt 		struct vm_area_struct pvma;
267871fe804bSLee Schermerhorn 		struct mempolicy *new;
26794bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
26807339ff83SRobin Holt 
26814bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
26825c0c1654SLee Schermerhorn 			goto put_mpol;
268371fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
268471fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
268515d77835SLee Schermerhorn 		if (IS_ERR(new))
26860cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
268758568d2aSMiao Xie 
268858568d2aSMiao Xie 		task_lock(current);
26894bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
269058568d2aSMiao Xie 		task_unlock(current);
269115d77835SLee Schermerhorn 		if (ret)
26925c0c1654SLee Schermerhorn 			goto put_new;
269371fe804bSLee Schermerhorn 
269471fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
26952c4541e2SKirill A. Shutemov 		vma_init(&pvma, NULL);
269671fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
269771fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
269815d77835SLee Schermerhorn 
26995c0c1654SLee Schermerhorn put_new:
270071fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
27010cae3457SDan Carpenter free_scratch:
27024bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
27035c0c1654SLee Schermerhorn put_mpol:
27045c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
27057339ff83SRobin Holt 	}
27067339ff83SRobin Holt }
27077339ff83SRobin Holt 
27081da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
27091da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
27101da177e4SLinus Torvalds {
27111da177e4SLinus Torvalds 	int err;
27121da177e4SLinus Torvalds 	struct sp_node *new = NULL;
27131da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
27141da177e4SLinus Torvalds 
2715028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
27161da177e4SLinus Torvalds 		 vma->vm_pgoff,
271745c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2718028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
271900ef2d2fSDavid Rientjes 		 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
27201da177e4SLinus Torvalds 
27211da177e4SLinus Torvalds 	if (npol) {
27221da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
27231da177e4SLinus Torvalds 		if (!new)
27241da177e4SLinus Torvalds 			return -ENOMEM;
27251da177e4SLinus Torvalds 	}
27261da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
27271da177e4SLinus Torvalds 	if (err && new)
272863f74ca2SKOSAKI Motohiro 		sp_free(new);
27291da177e4SLinus Torvalds 	return err;
27301da177e4SLinus Torvalds }
27311da177e4SLinus Torvalds 
27321da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
27331da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
27341da177e4SLinus Torvalds {
27351da177e4SLinus Torvalds 	struct sp_node *n;
27361da177e4SLinus Torvalds 	struct rb_node *next;
27371da177e4SLinus Torvalds 
27381da177e4SLinus Torvalds 	if (!p->root.rb_node)
27391da177e4SLinus Torvalds 		return;
27404a8c7bb5SNathan Zimmer 	write_lock(&p->lock);
27411da177e4SLinus Torvalds 	next = rb_first(&p->root);
27421da177e4SLinus Torvalds 	while (next) {
27431da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
27441da177e4SLinus Torvalds 		next = rb_next(&n->nd);
274563f74ca2SKOSAKI Motohiro 		sp_delete(p, n);
27461da177e4SLinus Torvalds 	}
27474a8c7bb5SNathan Zimmer 	write_unlock(&p->lock);
27481da177e4SLinus Torvalds }
27491da177e4SLinus Torvalds 
27501a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING
2751c297663cSMel Gorman static int __initdata numabalancing_override;
27521a687c2eSMel Gorman 
27531a687c2eSMel Gorman static void __init check_numabalancing_enable(void)
27541a687c2eSMel Gorman {
27551a687c2eSMel Gorman 	bool numabalancing_default = false;
27561a687c2eSMel Gorman 
27571a687c2eSMel Gorman 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
27581a687c2eSMel Gorman 		numabalancing_default = true;
27591a687c2eSMel Gorman 
2760c297663cSMel Gorman 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2761c297663cSMel Gorman 	if (numabalancing_override)
2762c297663cSMel Gorman 		set_numabalancing_state(numabalancing_override == 1);
2763c297663cSMel Gorman 
2764b0dc2b9bSMel Gorman 	if (num_online_nodes() > 1 && !numabalancing_override) {
2765756a025fSJoe Perches 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2766c297663cSMel Gorman 			numabalancing_default ? "Enabling" : "Disabling");
27671a687c2eSMel Gorman 		set_numabalancing_state(numabalancing_default);
27681a687c2eSMel Gorman 	}
27691a687c2eSMel Gorman }
27701a687c2eSMel Gorman 
27711a687c2eSMel Gorman static int __init setup_numabalancing(char *str)
27721a687c2eSMel Gorman {
27731a687c2eSMel Gorman 	int ret = 0;
27741a687c2eSMel Gorman 	if (!str)
27751a687c2eSMel Gorman 		goto out;
27761a687c2eSMel Gorman 
27771a687c2eSMel Gorman 	if (!strcmp(str, "enable")) {
2778c297663cSMel Gorman 		numabalancing_override = 1;
27791a687c2eSMel Gorman 		ret = 1;
27801a687c2eSMel Gorman 	} else if (!strcmp(str, "disable")) {
2781c297663cSMel Gorman 		numabalancing_override = -1;
27821a687c2eSMel Gorman 		ret = 1;
27831a687c2eSMel Gorman 	}
27841a687c2eSMel Gorman out:
27851a687c2eSMel Gorman 	if (!ret)
27864a404beaSAndrew Morton 		pr_warn("Unable to parse numa_balancing=\n");
27871a687c2eSMel Gorman 
27881a687c2eSMel Gorman 	return ret;
27891a687c2eSMel Gorman }
27901a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing);
27911a687c2eSMel Gorman #else
27921a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void)
27931a687c2eSMel Gorman {
27941a687c2eSMel Gorman }
27951a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
27961a687c2eSMel Gorman 
27971da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
27981da177e4SLinus Torvalds void __init numa_policy_init(void)
27991da177e4SLinus Torvalds {
2800b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2801b71636e2SPaul Mundt 	unsigned long largest = 0;
2802b71636e2SPaul Mundt 	int nid, prefer = 0;
2803b71636e2SPaul Mundt 
28041da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
28051da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
280620c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
28071da177e4SLinus Torvalds 
28081da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
28091da177e4SLinus Torvalds 				     sizeof(struct sp_node),
281020c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
28111da177e4SLinus Torvalds 
28125606e387SMel Gorman 	for_each_node(nid) {
28135606e387SMel Gorman 		preferred_node_policy[nid] = (struct mempolicy) {
28145606e387SMel Gorman 			.refcnt = ATOMIC_INIT(1),
28155606e387SMel Gorman 			.mode = MPOL_PREFERRED,
28165606e387SMel Gorman 			.flags = MPOL_F_MOF | MPOL_F_MORON,
28175606e387SMel Gorman 			.v = { .preferred_node = nid, },
28185606e387SMel Gorman 		};
28195606e387SMel Gorman 	}
28205606e387SMel Gorman 
2821b71636e2SPaul Mundt 	/*
2822b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2823b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2824b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2825b71636e2SPaul Mundt 	 */
2826b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
282701f13bd6SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
2828b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
28291da177e4SLinus Torvalds 
2830b71636e2SPaul Mundt 		/* Preserve the largest node */
2831b71636e2SPaul Mundt 		if (largest < total_pages) {
2832b71636e2SPaul Mundt 			largest = total_pages;
2833b71636e2SPaul Mundt 			prefer = nid;
2834b71636e2SPaul Mundt 		}
2835b71636e2SPaul Mundt 
2836b71636e2SPaul Mundt 		/* Interleave this node? */
2837b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2838b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2839b71636e2SPaul Mundt 	}
2840b71636e2SPaul Mundt 
2841b71636e2SPaul Mundt 	/* All too small, use the largest */
2842b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2843b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2844b71636e2SPaul Mundt 
2845028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2846b1de0d13SMitchel Humpherys 		pr_err("%s: interleaving failed\n", __func__);
28471a687c2eSMel Gorman 
28481a687c2eSMel Gorman 	check_numabalancing_enable();
28491da177e4SLinus Torvalds }
28501da177e4SLinus Torvalds 
28518bccd85fSChristoph Lameter /* Reset policy of current process to default */
28521da177e4SLinus Torvalds void numa_default_policy(void)
28531da177e4SLinus Torvalds {
2854028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
28551da177e4SLinus Torvalds }
285668860ec1SPaul Jackson 
28574225399aSPaul Jackson /*
2858095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2859095f1fc4SLee Schermerhorn  */
2860095f1fc4SLee Schermerhorn 
2861095f1fc4SLee Schermerhorn /*
2862f2a07f40SHugh Dickins  * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
28631a75a6c8SChristoph Lameter  */
2864345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2865345ace9cSLee Schermerhorn {
2866345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2867345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2868345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2869345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2870d3a71033SLee Schermerhorn 	[MPOL_LOCAL]      = "local",
2871345ace9cSLee Schermerhorn };
28721a75a6c8SChristoph Lameter 
2873095f1fc4SLee Schermerhorn 
2874095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2875095f1fc4SLee Schermerhorn /**
2876f2a07f40SHugh Dickins  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2877095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
287871fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2879095f1fc4SLee Schermerhorn  *
2880095f1fc4SLee Schermerhorn  * Format of input:
2881095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2882095f1fc4SLee Schermerhorn  *
288371fe804bSLee Schermerhorn  * On success, returns 0, else 1
2884095f1fc4SLee Schermerhorn  */
2885a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol)
2886095f1fc4SLee Schermerhorn {
288771fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2888f2a07f40SHugh Dickins 	unsigned short mode_flags;
288971fe804bSLee Schermerhorn 	nodemask_t nodes;
2890095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2891095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2892dedf2c73Szhong jiang 	int err = 1, mode;
2893095f1fc4SLee Schermerhorn 
2894c7a91bc7SDan Carpenter 	if (flags)
2895c7a91bc7SDan Carpenter 		*flags++ = '\0';	/* terminate mode string */
2896c7a91bc7SDan Carpenter 
2897095f1fc4SLee Schermerhorn 	if (nodelist) {
2898095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2899095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
290071fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2901095f1fc4SLee Schermerhorn 			goto out;
290201f13bd6SLai Jiangshan 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2903095f1fc4SLee Schermerhorn 			goto out;
290471fe804bSLee Schermerhorn 	} else
290571fe804bSLee Schermerhorn 		nodes_clear(nodes);
290671fe804bSLee Schermerhorn 
2907dedf2c73Szhong jiang 	mode = match_string(policy_modes, MPOL_MAX, str);
2908dedf2c73Szhong jiang 	if (mode < 0)
2909095f1fc4SLee Schermerhorn 		goto out;
2910095f1fc4SLee Schermerhorn 
291171fe804bSLee Schermerhorn 	switch (mode) {
2912095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
291371fe804bSLee Schermerhorn 		/*
2914aa9f7d51SRandy Dunlap 		 * Insist on a nodelist of one node only, although later
2915aa9f7d51SRandy Dunlap 		 * we use first_node(nodes) to grab a single node, so here
2916aa9f7d51SRandy Dunlap 		 * nodelist (or nodes) cannot be empty.
291771fe804bSLee Schermerhorn 		 */
2918095f1fc4SLee Schermerhorn 		if (nodelist) {
2919095f1fc4SLee Schermerhorn 			char *rest = nodelist;
2920095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
2921095f1fc4SLee Schermerhorn 				rest++;
2922926f2ae0SKOSAKI Motohiro 			if (*rest)
2923926f2ae0SKOSAKI Motohiro 				goto out;
2924aa9f7d51SRandy Dunlap 			if (nodes_empty(nodes))
2925aa9f7d51SRandy Dunlap 				goto out;
2926095f1fc4SLee Schermerhorn 		}
2927095f1fc4SLee Schermerhorn 		break;
2928095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
2929095f1fc4SLee Schermerhorn 		/*
2930095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
2931095f1fc4SLee Schermerhorn 		 */
2932095f1fc4SLee Schermerhorn 		if (!nodelist)
293301f13bd6SLai Jiangshan 			nodes = node_states[N_MEMORY];
29343f226aa1SLee Schermerhorn 		break;
293571fe804bSLee Schermerhorn 	case MPOL_LOCAL:
29363f226aa1SLee Schermerhorn 		/*
293771fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
29383f226aa1SLee Schermerhorn 		 */
293971fe804bSLee Schermerhorn 		if (nodelist)
29403f226aa1SLee Schermerhorn 			goto out;
294171fe804bSLee Schermerhorn 		mode = MPOL_PREFERRED;
29423f226aa1SLee Schermerhorn 		break;
2943413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
2944413b43deSRavikiran G Thirumalai 		/*
2945413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
2946413b43deSRavikiran G Thirumalai 		 */
2947413b43deSRavikiran G Thirumalai 		if (!nodelist)
2948413b43deSRavikiran G Thirumalai 			err = 0;
2949413b43deSRavikiran G Thirumalai 		goto out;
2950d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
295171fe804bSLee Schermerhorn 		/*
2952d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
295371fe804bSLee Schermerhorn 		 */
2954d69b2e63SKOSAKI Motohiro 		if (!nodelist)
2955d69b2e63SKOSAKI Motohiro 			goto out;
2956095f1fc4SLee Schermerhorn 	}
2957095f1fc4SLee Schermerhorn 
295871fe804bSLee Schermerhorn 	mode_flags = 0;
2959095f1fc4SLee Schermerhorn 	if (flags) {
2960095f1fc4SLee Schermerhorn 		/*
2961095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
2962095f1fc4SLee Schermerhorn 		 * mode flags.
2963095f1fc4SLee Schermerhorn 		 */
2964095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
296571fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
2966095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
296771fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
2968095f1fc4SLee Schermerhorn 		else
2969926f2ae0SKOSAKI Motohiro 			goto out;
2970095f1fc4SLee Schermerhorn 	}
297171fe804bSLee Schermerhorn 
297271fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
297371fe804bSLee Schermerhorn 	if (IS_ERR(new))
2974926f2ae0SKOSAKI Motohiro 		goto out;
2975926f2ae0SKOSAKI Motohiro 
2976f2a07f40SHugh Dickins 	/*
2977f2a07f40SHugh Dickins 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2978f2a07f40SHugh Dickins 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2979f2a07f40SHugh Dickins 	 */
2980f2a07f40SHugh Dickins 	if (mode != MPOL_PREFERRED)
2981f2a07f40SHugh Dickins 		new->v.nodes = nodes;
2982f2a07f40SHugh Dickins 	else if (nodelist)
2983f2a07f40SHugh Dickins 		new->v.preferred_node = first_node(nodes);
2984f2a07f40SHugh Dickins 	else
2985f2a07f40SHugh Dickins 		new->flags |= MPOL_F_LOCAL;
2986f2a07f40SHugh Dickins 
2987f2a07f40SHugh Dickins 	/*
2988f2a07f40SHugh Dickins 	 * Save nodes for contextualization: this will be used to "clone"
2989f2a07f40SHugh Dickins 	 * the mempolicy in a specific context [cpuset] at a later time.
2990f2a07f40SHugh Dickins 	 */
2991e17f74afSLee Schermerhorn 	new->w.user_nodemask = nodes;
2992f2a07f40SHugh Dickins 
2993926f2ae0SKOSAKI Motohiro 	err = 0;
299471fe804bSLee Schermerhorn 
2995095f1fc4SLee Schermerhorn out:
2996095f1fc4SLee Schermerhorn 	/* Restore string for error message */
2997095f1fc4SLee Schermerhorn 	if (nodelist)
2998095f1fc4SLee Schermerhorn 		*--nodelist = ':';
2999095f1fc4SLee Schermerhorn 	if (flags)
3000095f1fc4SLee Schermerhorn 		*--flags = '=';
300171fe804bSLee Schermerhorn 	if (!err)
300271fe804bSLee Schermerhorn 		*mpol = new;
3003095f1fc4SLee Schermerhorn 	return err;
3004095f1fc4SLee Schermerhorn }
3005095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
3006095f1fc4SLee Schermerhorn 
300771fe804bSLee Schermerhorn /**
300871fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
300971fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
301071fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
301171fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
301271fe804bSLee Schermerhorn  *
3013948927eeSDavid Rientjes  * Convert @pol into a string.  If @buffer is too short, truncate the string.
3014948927eeSDavid Rientjes  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3015948927eeSDavid Rientjes  * longest flag, "relative", and to display at least a few node ids.
30161a75a6c8SChristoph Lameter  */
3017948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
30181a75a6c8SChristoph Lameter {
30191a75a6c8SChristoph Lameter 	char *p = buffer;
3020948927eeSDavid Rientjes 	nodemask_t nodes = NODE_MASK_NONE;
3021948927eeSDavid Rientjes 	unsigned short mode = MPOL_DEFAULT;
3022948927eeSDavid Rientjes 	unsigned short flags = 0;
30231a75a6c8SChristoph Lameter 
30248790c71aSDavid Rientjes 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
3025bea904d5SLee Schermerhorn 		mode = pol->mode;
3026948927eeSDavid Rientjes 		flags = pol->flags;
3027948927eeSDavid Rientjes 	}
3028bea904d5SLee Schermerhorn 
30291a75a6c8SChristoph Lameter 	switch (mode) {
30301a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
30311a75a6c8SChristoph Lameter 		break;
30321a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
3033fc36b8d3SLee Schermerhorn 		if (flags & MPOL_F_LOCAL)
3034f2a07f40SHugh Dickins 			mode = MPOL_LOCAL;
303553f2556bSLee Schermerhorn 		else
3036fc36b8d3SLee Schermerhorn 			node_set(pol->v.preferred_node, nodes);
30371a75a6c8SChristoph Lameter 		break;
30381a75a6c8SChristoph Lameter 	case MPOL_BIND:
30391a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
30401a75a6c8SChristoph Lameter 		nodes = pol->v.nodes;
30411a75a6c8SChristoph Lameter 		break;
30421a75a6c8SChristoph Lameter 	default:
3043948927eeSDavid Rientjes 		WARN_ON_ONCE(1);
3044948927eeSDavid Rientjes 		snprintf(p, maxlen, "unknown");
3045948927eeSDavid Rientjes 		return;
30461a75a6c8SChristoph Lameter 	}
30471a75a6c8SChristoph Lameter 
3048b7a9f420SDavid Rientjes 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
30491a75a6c8SChristoph Lameter 
3050fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
3051948927eeSDavid Rientjes 		p += snprintf(p, buffer + maxlen - p, "=");
3052f5b087b5SDavid Rientjes 
30532291990aSLee Schermerhorn 		/*
30542291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
30552291990aSLee Schermerhorn 		 */
3056f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
30572291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
30582291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
30592291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
3060f5b087b5SDavid Rientjes 	}
3061f5b087b5SDavid Rientjes 
30629e763e0fSTejun Heo 	if (!nodes_empty(nodes))
30639e763e0fSTejun Heo 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
30649e763e0fSTejun Heo 			       nodemask_pr_args(&nodes));
30651a75a6c8SChristoph Lameter }
3066