xref: /linux/mm/mempolicy.c (revision d883544515aae54842c21730b880172e7894fde9)
146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
68bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
2900ef2d2fSDavid Rientjes  *                As a special case NUMA_NO_NODE here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds */
671da177e4SLinus Torvalds 
68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69b1de0d13SMitchel Humpherys 
701da177e4SLinus Torvalds #include <linux/mempolicy.h>
711da177e4SLinus Torvalds #include <linux/mm.h>
721da177e4SLinus Torvalds #include <linux/highmem.h>
731da177e4SLinus Torvalds #include <linux/hugetlb.h>
741da177e4SLinus Torvalds #include <linux/kernel.h>
751da177e4SLinus Torvalds #include <linux/sched.h>
766e84f315SIngo Molnar #include <linux/sched/mm.h>
776a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h>
78f719ff9bSIngo Molnar #include <linux/sched/task.h>
791da177e4SLinus Torvalds #include <linux/nodemask.h>
801da177e4SLinus Torvalds #include <linux/cpuset.h>
811da177e4SLinus Torvalds #include <linux/slab.h>
821da177e4SLinus Torvalds #include <linux/string.h>
83b95f1b31SPaul Gortmaker #include <linux/export.h>
84b488893aSPavel Emelyanov #include <linux/nsproxy.h>
851da177e4SLinus Torvalds #include <linux/interrupt.h>
861da177e4SLinus Torvalds #include <linux/init.h>
871da177e4SLinus Torvalds #include <linux/compat.h>
8831367466SOtto Ebeling #include <linux/ptrace.h>
89dc9aa5b9SChristoph Lameter #include <linux/swap.h>
901a75a6c8SChristoph Lameter #include <linux/seq_file.h>
911a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
92b20a3503SChristoph Lameter #include <linux/migrate.h>
9362b61f61SHugh Dickins #include <linux/ksm.h>
9495a402c3SChristoph Lameter #include <linux/rmap.h>
9586c3a764SDavid Quigley #include <linux/security.h>
96dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
97095f1fc4SLee Schermerhorn #include <linux/ctype.h>
986d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
99b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h>
100b1de0d13SMitchel Humpherys #include <linux/printk.h>
101c8633798SNaoya Horiguchi #include <linux/swapops.h>
102dc9aa5b9SChristoph Lameter 
1031da177e4SLinus Torvalds #include <asm/tlbflush.h>
1047c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
1051da177e4SLinus Torvalds 
10662695a84SNick Piggin #include "internal.h"
10762695a84SNick Piggin 
10838e35860SChristoph Lameter /* Internal flags */
109dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
11038e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
111dc9aa5b9SChristoph Lameter 
112fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
113fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1141da177e4SLinus Torvalds 
1151da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1161da177e4SLinus Torvalds    policied. */
1176267276fSChristoph Lameter enum zone_type policy_zone = 0;
1181da177e4SLinus Torvalds 
119bea904d5SLee Schermerhorn /*
120bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
121bea904d5SLee Schermerhorn  */
122e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1231da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
124bea904d5SLee Schermerhorn 	.mode = MPOL_PREFERRED,
125fc36b8d3SLee Schermerhorn 	.flags = MPOL_F_LOCAL,
1261da177e4SLinus Torvalds };
1271da177e4SLinus Torvalds 
1285606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES];
1295606e387SMel Gorman 
13074d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p)
1315606e387SMel Gorman {
1325606e387SMel Gorman 	struct mempolicy *pol = p->mempolicy;
133f15ca78eSOleg Nesterov 	int node;
1345606e387SMel Gorman 
135f15ca78eSOleg Nesterov 	if (pol)
136f15ca78eSOleg Nesterov 		return pol;
1375606e387SMel Gorman 
138f15ca78eSOleg Nesterov 	node = numa_node_id();
1391da6f0e1SJianguo Wu 	if (node != NUMA_NO_NODE) {
1401da6f0e1SJianguo Wu 		pol = &preferred_node_policy[node];
141f15ca78eSOleg Nesterov 		/* preferred_node_policy is not initialised early in boot */
142f15ca78eSOleg Nesterov 		if (pol->mode)
143f15ca78eSOleg Nesterov 			return pol;
1441da6f0e1SJianguo Wu 	}
1455606e387SMel Gorman 
146f15ca78eSOleg Nesterov 	return &default_policy;
1475606e387SMel Gorman }
1485606e387SMel Gorman 
14937012946SDavid Rientjes static const struct mempolicy_operations {
15037012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
151213980c0SVlastimil Babka 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
15237012946SDavid Rientjes } mpol_ops[MPOL_MAX];
15337012946SDavid Rientjes 
154f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
155f5b087b5SDavid Rientjes {
1566d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1574c50bc01SDavid Rientjes }
1584c50bc01SDavid Rientjes 
1594c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1604c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1614c50bc01SDavid Rientjes {
1624c50bc01SDavid Rientjes 	nodemask_t tmp;
1634c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1644c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
165f5b087b5SDavid Rientjes }
166f5b087b5SDavid Rientjes 
16737012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
16837012946SDavid Rientjes {
16937012946SDavid Rientjes 	if (nodes_empty(*nodes))
17037012946SDavid Rientjes 		return -EINVAL;
17137012946SDavid Rientjes 	pol->v.nodes = *nodes;
17237012946SDavid Rientjes 	return 0;
17337012946SDavid Rientjes }
17437012946SDavid Rientjes 
17537012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
17637012946SDavid Rientjes {
17737012946SDavid Rientjes 	if (!nodes)
178fc36b8d3SLee Schermerhorn 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
17937012946SDavid Rientjes 	else if (nodes_empty(*nodes))
18037012946SDavid Rientjes 		return -EINVAL;			/*  no allowed nodes */
18137012946SDavid Rientjes 	else
18237012946SDavid Rientjes 		pol->v.preferred_node = first_node(*nodes);
18337012946SDavid Rientjes 	return 0;
18437012946SDavid Rientjes }
18537012946SDavid Rientjes 
18637012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
18737012946SDavid Rientjes {
188859f7ef1SZhihui Zhang 	if (nodes_empty(*nodes))
18937012946SDavid Rientjes 		return -EINVAL;
19037012946SDavid Rientjes 	pol->v.nodes = *nodes;
19137012946SDavid Rientjes 	return 0;
19237012946SDavid Rientjes }
19337012946SDavid Rientjes 
19458568d2aSMiao Xie /*
19558568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
19658568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
19758568d2aSMiao Xie  * parameter with respect to the policy mode and flags.  But, we need to
19858568d2aSMiao Xie  * handle an empty nodemask with MPOL_PREFERRED here.
19958568d2aSMiao Xie  *
20058568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
20158568d2aSMiao Xie  * and mempolicy.  May also be called holding the mmap_semaphore for write.
20258568d2aSMiao Xie  */
2034bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2044bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
20558568d2aSMiao Xie {
20658568d2aSMiao Xie 	int ret;
20758568d2aSMiao Xie 
20858568d2aSMiao Xie 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
20958568d2aSMiao Xie 	if (pol == NULL)
21058568d2aSMiao Xie 		return 0;
21101f13bd6SLai Jiangshan 	/* Check N_MEMORY */
2124bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
21301f13bd6SLai Jiangshan 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
21458568d2aSMiao Xie 
21558568d2aSMiao Xie 	VM_BUG_ON(!nodes);
21658568d2aSMiao Xie 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
21758568d2aSMiao Xie 		nodes = NULL;	/* explicit local allocation */
21858568d2aSMiao Xie 	else {
21958568d2aSMiao Xie 		if (pol->flags & MPOL_F_RELATIVE_NODES)
2204bfc4495SKAMEZAWA Hiroyuki 			mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
22158568d2aSMiao Xie 		else
2224bfc4495SKAMEZAWA Hiroyuki 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
2234bfc4495SKAMEZAWA Hiroyuki 
22458568d2aSMiao Xie 		if (mpol_store_user_nodemask(pol))
22558568d2aSMiao Xie 			pol->w.user_nodemask = *nodes;
22658568d2aSMiao Xie 		else
22758568d2aSMiao Xie 			pol->w.cpuset_mems_allowed =
22858568d2aSMiao Xie 						cpuset_current_mems_allowed;
22958568d2aSMiao Xie 	}
23058568d2aSMiao Xie 
2314bfc4495SKAMEZAWA Hiroyuki 	if (nodes)
2324bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
2334bfc4495SKAMEZAWA Hiroyuki 	else
2344bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, NULL);
23558568d2aSMiao Xie 	return ret;
23658568d2aSMiao Xie }
23758568d2aSMiao Xie 
23858568d2aSMiao Xie /*
23958568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
24058568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
24158568d2aSMiao Xie  */
242028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
243028fec41SDavid Rientjes 				  nodemask_t *nodes)
2441da177e4SLinus Torvalds {
2451da177e4SLinus Torvalds 	struct mempolicy *policy;
2461da177e4SLinus Torvalds 
247028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
24800ef2d2fSDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
249140d5a49SPaul Mundt 
2503e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2513e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
25237012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
253d3a71033SLee Schermerhorn 		return NULL;
25437012946SDavid Rientjes 	}
2553e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2563e1f0645SDavid Rientjes 
2573e1f0645SDavid Rientjes 	/*
2583e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2593e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2603e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2613e1f0645SDavid Rientjes 	 */
2623e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2633e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2643e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2653e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2663e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2673e1f0645SDavid Rientjes 		}
268479e2802SPeter Zijlstra 	} else if (mode == MPOL_LOCAL) {
2698d303e44SPiotr Kwapulinski 		if (!nodes_empty(*nodes) ||
2708d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_STATIC_NODES) ||
2718d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_RELATIVE_NODES))
272479e2802SPeter Zijlstra 			return ERR_PTR(-EINVAL);
273479e2802SPeter Zijlstra 		mode = MPOL_PREFERRED;
2743e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
2753e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
2761da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2771da177e4SLinus Torvalds 	if (!policy)
2781da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2791da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
28045c4745aSLee Schermerhorn 	policy->mode = mode;
28137012946SDavid Rientjes 	policy->flags = flags;
2823e1f0645SDavid Rientjes 
28337012946SDavid Rientjes 	return policy;
28437012946SDavid Rientjes }
28537012946SDavid Rientjes 
28652cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
28752cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
28852cd3b07SLee Schermerhorn {
28952cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
29052cd3b07SLee Schermerhorn 		return;
29152cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
29252cd3b07SLee Schermerhorn }
29352cd3b07SLee Schermerhorn 
294213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
29537012946SDavid Rientjes {
29637012946SDavid Rientjes }
29737012946SDavid Rientjes 
298213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
2991d0d2680SDavid Rientjes {
3001d0d2680SDavid Rientjes 	nodemask_t tmp;
3011d0d2680SDavid Rientjes 
30237012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
30337012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
30437012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
30537012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3061d0d2680SDavid Rientjes 	else {
307213980c0SVlastimil Babka 		nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
308213980c0SVlastimil Babka 								*nodes);
30929b190faSzhong jiang 		pol->w.cpuset_mems_allowed = *nodes;
3101d0d2680SDavid Rientjes 	}
31137012946SDavid Rientjes 
312708c1bbcSMiao Xie 	if (nodes_empty(tmp))
313708c1bbcSMiao Xie 		tmp = *nodes;
314708c1bbcSMiao Xie 
3151d0d2680SDavid Rientjes 	pol->v.nodes = tmp;
31637012946SDavid Rientjes }
31737012946SDavid Rientjes 
31837012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
319213980c0SVlastimil Babka 						const nodemask_t *nodes)
32037012946SDavid Rientjes {
32137012946SDavid Rientjes 	nodemask_t tmp;
32237012946SDavid Rientjes 
32337012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES) {
3241d0d2680SDavid Rientjes 		int node = first_node(pol->w.user_nodemask);
3251d0d2680SDavid Rientjes 
326fc36b8d3SLee Schermerhorn 		if (node_isset(node, *nodes)) {
3271d0d2680SDavid Rientjes 			pol->v.preferred_node = node;
328fc36b8d3SLee Schermerhorn 			pol->flags &= ~MPOL_F_LOCAL;
329fc36b8d3SLee Schermerhorn 		} else
330fc36b8d3SLee Schermerhorn 			pol->flags |= MPOL_F_LOCAL;
33137012946SDavid Rientjes 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
33237012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3331d0d2680SDavid Rientjes 		pol->v.preferred_node = first_node(tmp);
334fc36b8d3SLee Schermerhorn 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
3351d0d2680SDavid Rientjes 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
33637012946SDavid Rientjes 						   pol->w.cpuset_mems_allowed,
33737012946SDavid Rientjes 						   *nodes);
33837012946SDavid Rientjes 		pol->w.cpuset_mems_allowed = *nodes;
3391d0d2680SDavid Rientjes 	}
3401d0d2680SDavid Rientjes }
34137012946SDavid Rientjes 
342708c1bbcSMiao Xie /*
343708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
344708c1bbcSMiao Xie  *
345213980c0SVlastimil Babka  * Per-vma policies are protected by mmap_sem. Allocations using per-task
346213980c0SVlastimil Babka  * policies are protected by task->mems_allowed_seq to prevent a premature
347213980c0SVlastimil Babka  * OOM/allocation failure due to parallel nodemask modification.
348708c1bbcSMiao Xie  */
349213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
35037012946SDavid Rientjes {
35137012946SDavid Rientjes 	if (!pol)
35237012946SDavid Rientjes 		return;
3532e25644eSVlastimil Babka 	if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
35437012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
35537012946SDavid Rientjes 		return;
356708c1bbcSMiao Xie 
357213980c0SVlastimil Babka 	mpol_ops[pol->mode].rebind(pol, newmask);
3581d0d2680SDavid Rientjes }
3591d0d2680SDavid Rientjes 
3601d0d2680SDavid Rientjes /*
3611d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
3621d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
36358568d2aSMiao Xie  *
36458568d2aSMiao Xie  * Called with task's alloc_lock held.
3651d0d2680SDavid Rientjes  */
3661d0d2680SDavid Rientjes 
367213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
3681d0d2680SDavid Rientjes {
369213980c0SVlastimil Babka 	mpol_rebind_policy(tsk->mempolicy, new);
3701d0d2680SDavid Rientjes }
3711d0d2680SDavid Rientjes 
3721d0d2680SDavid Rientjes /*
3731d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
3741d0d2680SDavid Rientjes  *
3751d0d2680SDavid Rientjes  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
3761d0d2680SDavid Rientjes  */
3771d0d2680SDavid Rientjes 
3781d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
3791d0d2680SDavid Rientjes {
3801d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
3811d0d2680SDavid Rientjes 
3821d0d2680SDavid Rientjes 	down_write(&mm->mmap_sem);
3831d0d2680SDavid Rientjes 	for (vma = mm->mmap; vma; vma = vma->vm_next)
384213980c0SVlastimil Babka 		mpol_rebind_policy(vma->vm_policy, new);
3851d0d2680SDavid Rientjes 	up_write(&mm->mmap_sem);
3861d0d2680SDavid Rientjes }
3871d0d2680SDavid Rientjes 
38837012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
38937012946SDavid Rientjes 	[MPOL_DEFAULT] = {
39037012946SDavid Rientjes 		.rebind = mpol_rebind_default,
39137012946SDavid Rientjes 	},
39237012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
39337012946SDavid Rientjes 		.create = mpol_new_interleave,
39437012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
39537012946SDavid Rientjes 	},
39637012946SDavid Rientjes 	[MPOL_PREFERRED] = {
39737012946SDavid Rientjes 		.create = mpol_new_preferred,
39837012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
39937012946SDavid Rientjes 	},
40037012946SDavid Rientjes 	[MPOL_BIND] = {
40137012946SDavid Rientjes 		.create = mpol_new_bind,
40237012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
40337012946SDavid Rientjes 	},
40437012946SDavid Rientjes };
40537012946SDavid Rientjes 
406fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
407fc301289SChristoph Lameter 				unsigned long flags);
4081a75a6c8SChristoph Lameter 
4096f4576e3SNaoya Horiguchi struct queue_pages {
4106f4576e3SNaoya Horiguchi 	struct list_head *pagelist;
4116f4576e3SNaoya Horiguchi 	unsigned long flags;
4126f4576e3SNaoya Horiguchi 	nodemask_t *nmask;
4136f4576e3SNaoya Horiguchi 	struct vm_area_struct *prev;
4146f4576e3SNaoya Horiguchi };
4156f4576e3SNaoya Horiguchi 
41698094945SNaoya Horiguchi /*
41788aaa2a1SNaoya Horiguchi  * Check if the page's nid is in qp->nmask.
41888aaa2a1SNaoya Horiguchi  *
41988aaa2a1SNaoya Horiguchi  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
42088aaa2a1SNaoya Horiguchi  * in the invert of qp->nmask.
42188aaa2a1SNaoya Horiguchi  */
42288aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page,
42388aaa2a1SNaoya Horiguchi 					struct queue_pages *qp)
42488aaa2a1SNaoya Horiguchi {
42588aaa2a1SNaoya Horiguchi 	int nid = page_to_nid(page);
42688aaa2a1SNaoya Horiguchi 	unsigned long flags = qp->flags;
42788aaa2a1SNaoya Horiguchi 
42888aaa2a1SNaoya Horiguchi 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
42988aaa2a1SNaoya Horiguchi }
43088aaa2a1SNaoya Horiguchi 
431a7f40cfeSYang Shi /*
432*d8835445SYang Shi  * queue_pages_pmd() has four possible return values:
433*d8835445SYang Shi  * 0 - pages are placed on the right node or queued successfully.
434*d8835445SYang Shi  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
435*d8835445SYang Shi  *     specified.
436*d8835445SYang Shi  * 2 - THP was split.
437*d8835445SYang Shi  * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
438*d8835445SYang Shi  *        existing page was already on a node that does not follow the
439*d8835445SYang Shi  *        policy.
440a7f40cfeSYang Shi  */
441c8633798SNaoya Horiguchi static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
442c8633798SNaoya Horiguchi 				unsigned long end, struct mm_walk *walk)
443c8633798SNaoya Horiguchi {
444c8633798SNaoya Horiguchi 	int ret = 0;
445c8633798SNaoya Horiguchi 	struct page *page;
446c8633798SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
447c8633798SNaoya Horiguchi 	unsigned long flags;
448c8633798SNaoya Horiguchi 
449c8633798SNaoya Horiguchi 	if (unlikely(is_pmd_migration_entry(*pmd))) {
450a7f40cfeSYang Shi 		ret = -EIO;
451c8633798SNaoya Horiguchi 		goto unlock;
452c8633798SNaoya Horiguchi 	}
453c8633798SNaoya Horiguchi 	page = pmd_page(*pmd);
454c8633798SNaoya Horiguchi 	if (is_huge_zero_page(page)) {
455c8633798SNaoya Horiguchi 		spin_unlock(ptl);
456c8633798SNaoya Horiguchi 		__split_huge_pmd(walk->vma, pmd, addr, false, NULL);
457*d8835445SYang Shi 		ret = 2;
458c8633798SNaoya Horiguchi 		goto out;
459c8633798SNaoya Horiguchi 	}
460*d8835445SYang Shi 	if (!queue_pages_required(page, qp))
461c8633798SNaoya Horiguchi 		goto unlock;
462c8633798SNaoya Horiguchi 
463c8633798SNaoya Horiguchi 	flags = qp->flags;
464c8633798SNaoya Horiguchi 	/* go to thp migration */
465a7f40cfeSYang Shi 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
466a7f40cfeSYang Shi 		if (!vma_migratable(walk->vma)) {
467*d8835445SYang Shi 			ret = 1;
468a7f40cfeSYang Shi 			goto unlock;
469a7f40cfeSYang Shi 		}
470a7f40cfeSYang Shi 
471c8633798SNaoya Horiguchi 		migrate_page_add(page, qp->pagelist, flags);
472a7f40cfeSYang Shi 	} else
473a7f40cfeSYang Shi 		ret = -EIO;
474c8633798SNaoya Horiguchi unlock:
475c8633798SNaoya Horiguchi 	spin_unlock(ptl);
476c8633798SNaoya Horiguchi out:
477c8633798SNaoya Horiguchi 	return ret;
478c8633798SNaoya Horiguchi }
479c8633798SNaoya Horiguchi 
48088aaa2a1SNaoya Horiguchi /*
48198094945SNaoya Horiguchi  * Scan through pages checking if pages follow certain conditions,
48298094945SNaoya Horiguchi  * and move them to the pagelist if they do.
483*d8835445SYang Shi  *
484*d8835445SYang Shi  * queue_pages_pte_range() has three possible return values:
485*d8835445SYang Shi  * 0 - pages are placed on the right node or queued successfully.
486*d8835445SYang Shi  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
487*d8835445SYang Shi  *     specified.
488*d8835445SYang Shi  * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
489*d8835445SYang Shi  *        on a node that does not follow the policy.
49098094945SNaoya Horiguchi  */
4916f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
4926f4576e3SNaoya Horiguchi 			unsigned long end, struct mm_walk *walk)
4931da177e4SLinus Torvalds {
4946f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
4956f4576e3SNaoya Horiguchi 	struct page *page;
4966f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
4976f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
498c8633798SNaoya Horiguchi 	int ret;
499*d8835445SYang Shi 	bool has_unmovable = false;
50091612e0dSHugh Dickins 	pte_t *pte;
501705e87c0SHugh Dickins 	spinlock_t *ptl;
502941150a3SHugh Dickins 
503c8633798SNaoya Horiguchi 	ptl = pmd_trans_huge_lock(pmd, vma);
504c8633798SNaoya Horiguchi 	if (ptl) {
505c8633798SNaoya Horiguchi 		ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
506*d8835445SYang Shi 		if (ret != 2)
507a7f40cfeSYang Shi 			return ret;
508248db92dSKirill A. Shutemov 	}
509*d8835445SYang Shi 	/* THP was split, fall through to pte walk */
51091612e0dSHugh Dickins 
511337d9abfSNaoya Horiguchi 	if (pmd_trans_unstable(pmd))
512337d9abfSNaoya Horiguchi 		return 0;
51394723aafSMichal Hocko 
5146f4576e3SNaoya Horiguchi 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
5156f4576e3SNaoya Horiguchi 	for (; addr != end; pte++, addr += PAGE_SIZE) {
51691612e0dSHugh Dickins 		if (!pte_present(*pte))
51791612e0dSHugh Dickins 			continue;
5186aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
5196aab341eSLinus Torvalds 		if (!page)
52091612e0dSHugh Dickins 			continue;
521053837fcSNick Piggin 		/*
52262b61f61SHugh Dickins 		 * vm_normal_page() filters out zero pages, but there might
52362b61f61SHugh Dickins 		 * still be PageReserved pages to skip, perhaps in a VDSO.
524053837fcSNick Piggin 		 */
525b79bc0a0SHugh Dickins 		if (PageReserved(page))
526f4598c8bSChristoph Lameter 			continue;
52788aaa2a1SNaoya Horiguchi 		if (!queue_pages_required(page, qp))
52838e35860SChristoph Lameter 			continue;
529a7f40cfeSYang Shi 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
530*d8835445SYang Shi 			/* MPOL_MF_STRICT must be specified if we get here */
531*d8835445SYang Shi 			if (!vma_migratable(vma)) {
532*d8835445SYang Shi 				has_unmovable = true;
533a7f40cfeSYang Shi 				break;
534*d8835445SYang Shi 			}
5356f4576e3SNaoya Horiguchi 			migrate_page_add(page, qp->pagelist, flags);
536a7f40cfeSYang Shi 		} else
537a7f40cfeSYang Shi 			break;
5386f4576e3SNaoya Horiguchi 	}
5396f4576e3SNaoya Horiguchi 	pte_unmap_unlock(pte - 1, ptl);
5406f4576e3SNaoya Horiguchi 	cond_resched();
541*d8835445SYang Shi 
542*d8835445SYang Shi 	if (has_unmovable)
543*d8835445SYang Shi 		return 1;
544*d8835445SYang Shi 
545a7f40cfeSYang Shi 	return addr != end ? -EIO : 0;
54691612e0dSHugh Dickins }
54791612e0dSHugh Dickins 
5486f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
5496f4576e3SNaoya Horiguchi 			       unsigned long addr, unsigned long end,
5506f4576e3SNaoya Horiguchi 			       struct mm_walk *walk)
551e2d8cf40SNaoya Horiguchi {
552e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
5536f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
5546f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
555e2d8cf40SNaoya Horiguchi 	struct page *page;
556cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
557d4c54919SNaoya Horiguchi 	pte_t entry;
558e2d8cf40SNaoya Horiguchi 
5596f4576e3SNaoya Horiguchi 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
5606f4576e3SNaoya Horiguchi 	entry = huge_ptep_get(pte);
561d4c54919SNaoya Horiguchi 	if (!pte_present(entry))
562d4c54919SNaoya Horiguchi 		goto unlock;
563d4c54919SNaoya Horiguchi 	page = pte_page(entry);
56488aaa2a1SNaoya Horiguchi 	if (!queue_pages_required(page, qp))
565e2d8cf40SNaoya Horiguchi 		goto unlock;
566e2d8cf40SNaoya Horiguchi 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
567e2d8cf40SNaoya Horiguchi 	if (flags & (MPOL_MF_MOVE_ALL) ||
568e2d8cf40SNaoya Horiguchi 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
5696f4576e3SNaoya Horiguchi 		isolate_huge_page(page, qp->pagelist);
570e2d8cf40SNaoya Horiguchi unlock:
571cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
572e2d8cf40SNaoya Horiguchi #else
573e2d8cf40SNaoya Horiguchi 	BUG();
574e2d8cf40SNaoya Horiguchi #endif
57591612e0dSHugh Dickins 	return 0;
5761da177e4SLinus Torvalds }
5771da177e4SLinus Torvalds 
5785877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING
579b24f53a0SLee Schermerhorn /*
5804b10e7d5SMel Gorman  * This is used to mark a range of virtual addresses to be inaccessible.
5814b10e7d5SMel Gorman  * These are later cleared by a NUMA hinting fault. Depending on these
5824b10e7d5SMel Gorman  * faults, pages may be migrated for better NUMA placement.
5834b10e7d5SMel Gorman  *
5844b10e7d5SMel Gorman  * This is assuming that NUMA faults are handled using PROT_NONE. If
5854b10e7d5SMel Gorman  * an architecture makes a different choice, it will need further
5864b10e7d5SMel Gorman  * changes to the core.
587b24f53a0SLee Schermerhorn  */
5884b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma,
5894b10e7d5SMel Gorman 			unsigned long addr, unsigned long end)
590b24f53a0SLee Schermerhorn {
5914b10e7d5SMel Gorman 	int nr_updated;
592b24f53a0SLee Schermerhorn 
5934d942466SMel Gorman 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
59403c5a6e1SMel Gorman 	if (nr_updated)
59503c5a6e1SMel Gorman 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
596b24f53a0SLee Schermerhorn 
5974b10e7d5SMel Gorman 	return nr_updated;
598b24f53a0SLee Schermerhorn }
599b24f53a0SLee Schermerhorn #else
600b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma,
601b24f53a0SLee Schermerhorn 			unsigned long addr, unsigned long end)
602b24f53a0SLee Schermerhorn {
603b24f53a0SLee Schermerhorn 	return 0;
604b24f53a0SLee Schermerhorn }
6055877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */
606b24f53a0SLee Schermerhorn 
6076f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end,
6086f4576e3SNaoya Horiguchi 				struct mm_walk *walk)
6091da177e4SLinus Torvalds {
6106f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
6116f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
6125b952b3cSAndi Kleen 	unsigned long endvma = vma->vm_end;
6136f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
614dc9aa5b9SChristoph Lameter 
615a7f40cfeSYang Shi 	/*
616a7f40cfeSYang Shi 	 * Need check MPOL_MF_STRICT to return -EIO if possible
617a7f40cfeSYang Shi 	 * regardless of vma_migratable
618a7f40cfeSYang Shi 	 */
619a7f40cfeSYang Shi 	if (!vma_migratable(vma) &&
620a7f40cfeSYang Shi 	    !(flags & MPOL_MF_STRICT))
62148684a65SNaoya Horiguchi 		return 1;
62248684a65SNaoya Horiguchi 
6235b952b3cSAndi Kleen 	if (endvma > end)
6245b952b3cSAndi Kleen 		endvma = end;
6255b952b3cSAndi Kleen 	if (vma->vm_start > start)
6265b952b3cSAndi Kleen 		start = vma->vm_start;
627b24f53a0SLee Schermerhorn 
628b24f53a0SLee Schermerhorn 	if (!(flags & MPOL_MF_DISCONTIG_OK)) {
629b24f53a0SLee Schermerhorn 		if (!vma->vm_next && vma->vm_end < end)
630d05f0cdcSHugh Dickins 			return -EFAULT;
6316f4576e3SNaoya Horiguchi 		if (qp->prev && qp->prev->vm_end < vma->vm_start)
632d05f0cdcSHugh Dickins 			return -EFAULT;
633b24f53a0SLee Schermerhorn 	}
634b24f53a0SLee Schermerhorn 
6356f4576e3SNaoya Horiguchi 	qp->prev = vma;
6366f4576e3SNaoya Horiguchi 
637b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY) {
6382c0346a3SMel Gorman 		/* Similar to task_numa_work, skip inaccessible VMAs */
6394355c018SLiang Chen 		if (!is_vm_hugetlb_page(vma) &&
6404355c018SLiang Chen 			(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
6414355c018SLiang Chen 			!(vma->vm_flags & VM_MIXEDMAP))
642b24f53a0SLee Schermerhorn 			change_prot_numa(vma, start, endvma);
6436f4576e3SNaoya Horiguchi 		return 1;
644b24f53a0SLee Schermerhorn 	}
645b24f53a0SLee Schermerhorn 
6466f4576e3SNaoya Horiguchi 	/* queue pages from current vma */
647a7f40cfeSYang Shi 	if (flags & MPOL_MF_VALID)
6486f4576e3SNaoya Horiguchi 		return 0;
6496f4576e3SNaoya Horiguchi 	return 1;
6506f4576e3SNaoya Horiguchi }
651b24f53a0SLee Schermerhorn 
6526f4576e3SNaoya Horiguchi /*
6536f4576e3SNaoya Horiguchi  * Walk through page tables and collect pages to be migrated.
6546f4576e3SNaoya Horiguchi  *
6556f4576e3SNaoya Horiguchi  * If pages found in a given range are on a set of nodes (determined by
6566f4576e3SNaoya Horiguchi  * @nodes and @flags,) it's isolated and queued to the pagelist which is
657*d8835445SYang Shi  * passed via @private.
658*d8835445SYang Shi  *
659*d8835445SYang Shi  * queue_pages_range() has three possible return values:
660*d8835445SYang Shi  * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
661*d8835445SYang Shi  *     specified.
662*d8835445SYang Shi  * 0 - queue pages successfully or no misplaced page.
663*d8835445SYang Shi  * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified.
6646f4576e3SNaoya Horiguchi  */
6656f4576e3SNaoya Horiguchi static int
6666f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
6676f4576e3SNaoya Horiguchi 		nodemask_t *nodes, unsigned long flags,
6686f4576e3SNaoya Horiguchi 		struct list_head *pagelist)
6696f4576e3SNaoya Horiguchi {
6706f4576e3SNaoya Horiguchi 	struct queue_pages qp = {
6716f4576e3SNaoya Horiguchi 		.pagelist = pagelist,
6726f4576e3SNaoya Horiguchi 		.flags = flags,
6736f4576e3SNaoya Horiguchi 		.nmask = nodes,
6746f4576e3SNaoya Horiguchi 		.prev = NULL,
6756f4576e3SNaoya Horiguchi 	};
6766f4576e3SNaoya Horiguchi 	struct mm_walk queue_pages_walk = {
6776f4576e3SNaoya Horiguchi 		.hugetlb_entry = queue_pages_hugetlb,
6786f4576e3SNaoya Horiguchi 		.pmd_entry = queue_pages_pte_range,
6796f4576e3SNaoya Horiguchi 		.test_walk = queue_pages_test_walk,
6806f4576e3SNaoya Horiguchi 		.mm = mm,
6816f4576e3SNaoya Horiguchi 		.private = &qp,
6826f4576e3SNaoya Horiguchi 	};
6836f4576e3SNaoya Horiguchi 
6846f4576e3SNaoya Horiguchi 	return walk_page_range(start, end, &queue_pages_walk);
6851da177e4SLinus Torvalds }
6861da177e4SLinus Torvalds 
687869833f2SKOSAKI Motohiro /*
688869833f2SKOSAKI Motohiro  * Apply policy to a single VMA
689869833f2SKOSAKI Motohiro  * This must be called with the mmap_sem held for writing.
690869833f2SKOSAKI Motohiro  */
691869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma,
692869833f2SKOSAKI Motohiro 						struct mempolicy *pol)
6938d34694cSKOSAKI Motohiro {
694869833f2SKOSAKI Motohiro 	int err;
695869833f2SKOSAKI Motohiro 	struct mempolicy *old;
696869833f2SKOSAKI Motohiro 	struct mempolicy *new;
6978d34694cSKOSAKI Motohiro 
6988d34694cSKOSAKI Motohiro 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
6998d34694cSKOSAKI Motohiro 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
7008d34694cSKOSAKI Motohiro 		 vma->vm_ops, vma->vm_file,
7018d34694cSKOSAKI Motohiro 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
7028d34694cSKOSAKI Motohiro 
703869833f2SKOSAKI Motohiro 	new = mpol_dup(pol);
704869833f2SKOSAKI Motohiro 	if (IS_ERR(new))
705869833f2SKOSAKI Motohiro 		return PTR_ERR(new);
706869833f2SKOSAKI Motohiro 
707869833f2SKOSAKI Motohiro 	if (vma->vm_ops && vma->vm_ops->set_policy) {
7088d34694cSKOSAKI Motohiro 		err = vma->vm_ops->set_policy(vma, new);
709869833f2SKOSAKI Motohiro 		if (err)
710869833f2SKOSAKI Motohiro 			goto err_out;
7118d34694cSKOSAKI Motohiro 	}
712869833f2SKOSAKI Motohiro 
713869833f2SKOSAKI Motohiro 	old = vma->vm_policy;
714869833f2SKOSAKI Motohiro 	vma->vm_policy = new; /* protected by mmap_sem */
715869833f2SKOSAKI Motohiro 	mpol_put(old);
716869833f2SKOSAKI Motohiro 
717869833f2SKOSAKI Motohiro 	return 0;
718869833f2SKOSAKI Motohiro  err_out:
719869833f2SKOSAKI Motohiro 	mpol_put(new);
7208d34694cSKOSAKI Motohiro 	return err;
7218d34694cSKOSAKI Motohiro }
7228d34694cSKOSAKI Motohiro 
7231da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
7249d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start,
7259d8cebd4SKOSAKI Motohiro 		       unsigned long end, struct mempolicy *new_pol)
7261da177e4SLinus Torvalds {
7271da177e4SLinus Torvalds 	struct vm_area_struct *next;
7289d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *prev;
7299d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *vma;
7309d8cebd4SKOSAKI Motohiro 	int err = 0;
731e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
7329d8cebd4SKOSAKI Motohiro 	unsigned long vmstart;
7339d8cebd4SKOSAKI Motohiro 	unsigned long vmend;
7341da177e4SLinus Torvalds 
735097d5910SLinus Torvalds 	vma = find_vma(mm, start);
7369d8cebd4SKOSAKI Motohiro 	if (!vma || vma->vm_start > start)
7379d8cebd4SKOSAKI Motohiro 		return -EFAULT;
7389d8cebd4SKOSAKI Motohiro 
739097d5910SLinus Torvalds 	prev = vma->vm_prev;
740e26a5114SKOSAKI Motohiro 	if (start > vma->vm_start)
741e26a5114SKOSAKI Motohiro 		prev = vma;
742e26a5114SKOSAKI Motohiro 
7439d8cebd4SKOSAKI Motohiro 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
7441da177e4SLinus Torvalds 		next = vma->vm_next;
7459d8cebd4SKOSAKI Motohiro 		vmstart = max(start, vma->vm_start);
7469d8cebd4SKOSAKI Motohiro 		vmend   = min(end, vma->vm_end);
7479d8cebd4SKOSAKI Motohiro 
748e26a5114SKOSAKI Motohiro 		if (mpol_equal(vma_policy(vma), new_pol))
749e26a5114SKOSAKI Motohiro 			continue;
750e26a5114SKOSAKI Motohiro 
751e26a5114SKOSAKI Motohiro 		pgoff = vma->vm_pgoff +
752e26a5114SKOSAKI Motohiro 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
7539d8cebd4SKOSAKI Motohiro 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
754e26a5114SKOSAKI Motohiro 				 vma->anon_vma, vma->vm_file, pgoff,
75519a809afSAndrea Arcangeli 				 new_pol, vma->vm_userfaultfd_ctx);
7569d8cebd4SKOSAKI Motohiro 		if (prev) {
7579d8cebd4SKOSAKI Motohiro 			vma = prev;
7589d8cebd4SKOSAKI Motohiro 			next = vma->vm_next;
7593964acd0SOleg Nesterov 			if (mpol_equal(vma_policy(vma), new_pol))
7609d8cebd4SKOSAKI Motohiro 				continue;
7613964acd0SOleg Nesterov 			/* vma_merge() joined vma && vma->next, case 8 */
7623964acd0SOleg Nesterov 			goto replace;
7631da177e4SLinus Torvalds 		}
7649d8cebd4SKOSAKI Motohiro 		if (vma->vm_start != vmstart) {
7659d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
7669d8cebd4SKOSAKI Motohiro 			if (err)
7679d8cebd4SKOSAKI Motohiro 				goto out;
7689d8cebd4SKOSAKI Motohiro 		}
7699d8cebd4SKOSAKI Motohiro 		if (vma->vm_end != vmend) {
7709d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmend, 0);
7719d8cebd4SKOSAKI Motohiro 			if (err)
7729d8cebd4SKOSAKI Motohiro 				goto out;
7739d8cebd4SKOSAKI Motohiro 		}
7743964acd0SOleg Nesterov  replace:
775869833f2SKOSAKI Motohiro 		err = vma_replace_policy(vma, new_pol);
7769d8cebd4SKOSAKI Motohiro 		if (err)
7779d8cebd4SKOSAKI Motohiro 			goto out;
7789d8cebd4SKOSAKI Motohiro 	}
7799d8cebd4SKOSAKI Motohiro 
7809d8cebd4SKOSAKI Motohiro  out:
7811da177e4SLinus Torvalds 	return err;
7821da177e4SLinus Torvalds }
7831da177e4SLinus Torvalds 
7841da177e4SLinus Torvalds /* Set the process memory policy */
785028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
786028fec41SDavid Rientjes 			     nodemask_t *nodes)
7871da177e4SLinus Torvalds {
78858568d2aSMiao Xie 	struct mempolicy *new, *old;
7894bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
79058568d2aSMiao Xie 	int ret;
7911da177e4SLinus Torvalds 
7924bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
7934bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
794f4e53d91SLee Schermerhorn 
7954bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
7964bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
7974bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
7984bfc4495SKAMEZAWA Hiroyuki 		goto out;
7994bfc4495SKAMEZAWA Hiroyuki 	}
8002c7c3a7dSOleg Nesterov 
80158568d2aSMiao Xie 	task_lock(current);
8024bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
80358568d2aSMiao Xie 	if (ret) {
80458568d2aSMiao Xie 		task_unlock(current);
80558568d2aSMiao Xie 		mpol_put(new);
8064bfc4495SKAMEZAWA Hiroyuki 		goto out;
80758568d2aSMiao Xie 	}
80858568d2aSMiao Xie 	old = current->mempolicy;
8091da177e4SLinus Torvalds 	current->mempolicy = new;
81045816682SVlastimil Babka 	if (new && new->mode == MPOL_INTERLEAVE)
81145816682SVlastimil Babka 		current->il_prev = MAX_NUMNODES-1;
81258568d2aSMiao Xie 	task_unlock(current);
81358568d2aSMiao Xie 	mpol_put(old);
8144bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
8154bfc4495SKAMEZAWA Hiroyuki out:
8164bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
8174bfc4495SKAMEZAWA Hiroyuki 	return ret;
8181da177e4SLinus Torvalds }
8191da177e4SLinus Torvalds 
820bea904d5SLee Schermerhorn /*
821bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
82258568d2aSMiao Xie  *
82358568d2aSMiao Xie  * Called with task's alloc_lock held
824bea904d5SLee Schermerhorn  */
825bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
8261da177e4SLinus Torvalds {
827dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
828bea904d5SLee Schermerhorn 	if (p == &default_policy)
829bea904d5SLee Schermerhorn 		return;
830bea904d5SLee Schermerhorn 
83145c4745aSLee Schermerhorn 	switch (p->mode) {
83219770b32SMel Gorman 	case MPOL_BIND:
83319770b32SMel Gorman 		/* Fall through */
8341da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
835dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
8361da177e4SLinus Torvalds 		break;
8371da177e4SLinus Torvalds 	case MPOL_PREFERRED:
838fc36b8d3SLee Schermerhorn 		if (!(p->flags & MPOL_F_LOCAL))
839dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
84053f2556bSLee Schermerhorn 		/* else return empty node mask for local allocation */
8411da177e4SLinus Torvalds 		break;
8421da177e4SLinus Torvalds 	default:
8431da177e4SLinus Torvalds 		BUG();
8441da177e4SLinus Torvalds 	}
8451da177e4SLinus Torvalds }
8461da177e4SLinus Torvalds 
8473b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr)
8481da177e4SLinus Torvalds {
8491da177e4SLinus Torvalds 	struct page *p;
8501da177e4SLinus Torvalds 	int err;
8511da177e4SLinus Torvalds 
8523b9aadf7SAndrea Arcangeli 	int locked = 1;
8533b9aadf7SAndrea Arcangeli 	err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
8541da177e4SLinus Torvalds 	if (err >= 0) {
8551da177e4SLinus Torvalds 		err = page_to_nid(p);
8561da177e4SLinus Torvalds 		put_page(p);
8571da177e4SLinus Torvalds 	}
8583b9aadf7SAndrea Arcangeli 	if (locked)
8593b9aadf7SAndrea Arcangeli 		up_read(&mm->mmap_sem);
8601da177e4SLinus Torvalds 	return err;
8611da177e4SLinus Torvalds }
8621da177e4SLinus Torvalds 
8631da177e4SLinus Torvalds /* Retrieve NUMA policy */
864dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
8651da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
8661da177e4SLinus Torvalds {
8678bccd85fSChristoph Lameter 	int err;
8681da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
8691da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
8703b9aadf7SAndrea Arcangeli 	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
8711da177e4SLinus Torvalds 
872754af6f5SLee Schermerhorn 	if (flags &
873754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
8741da177e4SLinus Torvalds 		return -EINVAL;
875754af6f5SLee Schermerhorn 
876754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
877754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
878754af6f5SLee Schermerhorn 			return -EINVAL;
879754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
88058568d2aSMiao Xie 		task_lock(current);
881754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
88258568d2aSMiao Xie 		task_unlock(current);
883754af6f5SLee Schermerhorn 		return 0;
884754af6f5SLee Schermerhorn 	}
885754af6f5SLee Schermerhorn 
8861da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
887bea904d5SLee Schermerhorn 		/*
888bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
889bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
890bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
891bea904d5SLee Schermerhorn 		 */
8921da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
8931da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
8941da177e4SLinus Torvalds 		if (!vma) {
8951da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
8961da177e4SLinus Torvalds 			return -EFAULT;
8971da177e4SLinus Torvalds 		}
8981da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
8991da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
9001da177e4SLinus Torvalds 		else
9011da177e4SLinus Torvalds 			pol = vma->vm_policy;
9021da177e4SLinus Torvalds 	} else if (addr)
9031da177e4SLinus Torvalds 		return -EINVAL;
9041da177e4SLinus Torvalds 
9051da177e4SLinus Torvalds 	if (!pol)
906bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
9071da177e4SLinus Torvalds 
9081da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
9091da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
9103b9aadf7SAndrea Arcangeli 			/*
9113b9aadf7SAndrea Arcangeli 			 * Take a refcount on the mpol, lookup_node()
9123b9aadf7SAndrea Arcangeli 			 * wil drop the mmap_sem, so after calling
9133b9aadf7SAndrea Arcangeli 			 * lookup_node() only "pol" remains valid, "vma"
9143b9aadf7SAndrea Arcangeli 			 * is stale.
9153b9aadf7SAndrea Arcangeli 			 */
9163b9aadf7SAndrea Arcangeli 			pol_refcount = pol;
9173b9aadf7SAndrea Arcangeli 			vma = NULL;
9183b9aadf7SAndrea Arcangeli 			mpol_get(pol);
9193b9aadf7SAndrea Arcangeli 			err = lookup_node(mm, addr);
9201da177e4SLinus Torvalds 			if (err < 0)
9211da177e4SLinus Torvalds 				goto out;
9228bccd85fSChristoph Lameter 			*policy = err;
9231da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
92445c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
92545816682SVlastimil Babka 			*policy = next_node_in(current->il_prev, pol->v.nodes);
9261da177e4SLinus Torvalds 		} else {
9271da177e4SLinus Torvalds 			err = -EINVAL;
9281da177e4SLinus Torvalds 			goto out;
9291da177e4SLinus Torvalds 		}
930bea904d5SLee Schermerhorn 	} else {
931bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
932bea904d5SLee Schermerhorn 						pol->mode;
933d79df630SDavid Rientjes 		/*
934d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
935d79df630SDavid Rientjes 		 * the policy to userspace.
936d79df630SDavid Rientjes 		 */
937d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
938bea904d5SLee Schermerhorn 	}
9391da177e4SLinus Torvalds 
9401da177e4SLinus Torvalds 	err = 0;
94158568d2aSMiao Xie 	if (nmask) {
942c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
943c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
944c6b6ef8bSLee Schermerhorn 		} else {
94558568d2aSMiao Xie 			task_lock(current);
946bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
94758568d2aSMiao Xie 			task_unlock(current);
94858568d2aSMiao Xie 		}
949c6b6ef8bSLee Schermerhorn 	}
9501da177e4SLinus Torvalds 
9511da177e4SLinus Torvalds  out:
95252cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
9531da177e4SLinus Torvalds 	if (vma)
9543b9aadf7SAndrea Arcangeli 		up_read(&mm->mmap_sem);
9553b9aadf7SAndrea Arcangeli 	if (pol_refcount)
9563b9aadf7SAndrea Arcangeli 		mpol_put(pol_refcount);
9571da177e4SLinus Torvalds 	return err;
9581da177e4SLinus Torvalds }
9591da177e4SLinus Torvalds 
960b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
9618bccd85fSChristoph Lameter /*
962c8633798SNaoya Horiguchi  * page migration, thp tail pages can be passed.
9636ce3c4c0SChristoph Lameter  */
964fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
965fc301289SChristoph Lameter 				unsigned long flags)
9666ce3c4c0SChristoph Lameter {
967c8633798SNaoya Horiguchi 	struct page *head = compound_head(page);
9686ce3c4c0SChristoph Lameter 	/*
969fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
9706ce3c4c0SChristoph Lameter 	 */
971c8633798SNaoya Horiguchi 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
972c8633798SNaoya Horiguchi 		if (!isolate_lru_page(head)) {
973c8633798SNaoya Horiguchi 			list_add_tail(&head->lru, pagelist);
974c8633798SNaoya Horiguchi 			mod_node_page_state(page_pgdat(head),
975c8633798SNaoya Horiguchi 				NR_ISOLATED_ANON + page_is_file_cache(head),
976c8633798SNaoya Horiguchi 				hpage_nr_pages(head));
97762695a84SNick Piggin 		}
97862695a84SNick Piggin 	}
9796ce3c4c0SChristoph Lameter }
9806ce3c4c0SChristoph Lameter 
981a49bd4d7SMichal Hocko /* page allocation callback for NUMA node migration */
982666feb21SMichal Hocko struct page *alloc_new_node_page(struct page *page, unsigned long node)
98395a402c3SChristoph Lameter {
984e2d8cf40SNaoya Horiguchi 	if (PageHuge(page))
985e2d8cf40SNaoya Horiguchi 		return alloc_huge_page_node(page_hstate(compound_head(page)),
986e2d8cf40SNaoya Horiguchi 					node);
98794723aafSMichal Hocko 	else if (PageTransHuge(page)) {
988c8633798SNaoya Horiguchi 		struct page *thp;
989c8633798SNaoya Horiguchi 
990c8633798SNaoya Horiguchi 		thp = alloc_pages_node(node,
991c8633798SNaoya Horiguchi 			(GFP_TRANSHUGE | __GFP_THISNODE),
992c8633798SNaoya Horiguchi 			HPAGE_PMD_ORDER);
993c8633798SNaoya Horiguchi 		if (!thp)
994c8633798SNaoya Horiguchi 			return NULL;
995c8633798SNaoya Horiguchi 		prep_transhuge_page(thp);
996c8633798SNaoya Horiguchi 		return thp;
997c8633798SNaoya Horiguchi 	} else
99896db800fSVlastimil Babka 		return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
999b360edb4SDavid Rientjes 						    __GFP_THISNODE, 0);
100095a402c3SChristoph Lameter }
100195a402c3SChristoph Lameter 
10026ce3c4c0SChristoph Lameter /*
10037e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
10047e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
10057e2ab150SChristoph Lameter  */
1006dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1007dbcb0f19SAdrian Bunk 			   int flags)
10087e2ab150SChristoph Lameter {
10097e2ab150SChristoph Lameter 	nodemask_t nmask;
10107e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
10117e2ab150SChristoph Lameter 	int err = 0;
10127e2ab150SChristoph Lameter 
10137e2ab150SChristoph Lameter 	nodes_clear(nmask);
10147e2ab150SChristoph Lameter 	node_set(source, nmask);
10157e2ab150SChristoph Lameter 
101608270807SMinchan Kim 	/*
101708270807SMinchan Kim 	 * This does not "check" the range but isolates all pages that
101808270807SMinchan Kim 	 * need migration.  Between passing in the full user address
101908270807SMinchan Kim 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
102008270807SMinchan Kim 	 */
102108270807SMinchan Kim 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
102298094945SNaoya Horiguchi 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
10237e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
10247e2ab150SChristoph Lameter 
1025cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
1026a49bd4d7SMichal Hocko 		err = migrate_pages(&pagelist, alloc_new_node_page, NULL, dest,
10279c620e2bSHugh Dickins 					MIGRATE_SYNC, MR_SYSCALL);
1028cf608ac1SMinchan Kim 		if (err)
1029e2d8cf40SNaoya Horiguchi 			putback_movable_pages(&pagelist);
1030cf608ac1SMinchan Kim 	}
103195a402c3SChristoph Lameter 
10327e2ab150SChristoph Lameter 	return err;
10337e2ab150SChristoph Lameter }
10347e2ab150SChristoph Lameter 
10357e2ab150SChristoph Lameter /*
10367e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
10377e2ab150SChristoph Lameter  * layout as much as possible.
103839743889SChristoph Lameter  *
103939743889SChristoph Lameter  * Returns the number of page that could not be moved.
104039743889SChristoph Lameter  */
10410ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
10420ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
104339743889SChristoph Lameter {
10447e2ab150SChristoph Lameter 	int busy = 0;
10450aedadf9SChristoph Lameter 	int err;
10467e2ab150SChristoph Lameter 	nodemask_t tmp;
104739743889SChristoph Lameter 
10480aedadf9SChristoph Lameter 	err = migrate_prep();
10490aedadf9SChristoph Lameter 	if (err)
10500aedadf9SChristoph Lameter 		return err;
10510aedadf9SChristoph Lameter 
105239743889SChristoph Lameter 	down_read(&mm->mmap_sem);
1053d4984711SChristoph Lameter 
10547e2ab150SChristoph Lameter 	/*
10557e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
10567e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
10577e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
10587e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
10597e2ab150SChristoph Lameter 	 *
10607e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
10617e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
10627e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
10637e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
10647e2ab150SChristoph Lameter 	 *
10657e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
10667e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
10677e2ab150SChristoph Lameter 	 * (nothing left to migrate).
10687e2ab150SChristoph Lameter 	 *
10697e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
10707e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
10717e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
10727e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
10737e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
10747e2ab150SChristoph Lameter 	 *
10757e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
10767e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
10777e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
10787e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1079ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
10807e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
10817e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
10827e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
10837e2ab150SChristoph Lameter 	 */
10847e2ab150SChristoph Lameter 
10850ce72d4fSAndrew Morton 	tmp = *from;
10867e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
10877e2ab150SChristoph Lameter 		int s,d;
1088b76ac7e7SJianguo Wu 		int source = NUMA_NO_NODE;
10897e2ab150SChristoph Lameter 		int dest = 0;
10907e2ab150SChristoph Lameter 
10917e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
10924a5b18ccSLarry Woodman 
10934a5b18ccSLarry Woodman 			/*
10944a5b18ccSLarry Woodman 			 * do_migrate_pages() tries to maintain the relative
10954a5b18ccSLarry Woodman 			 * node relationship of the pages established between
10964a5b18ccSLarry Woodman 			 * threads and memory areas.
10974a5b18ccSLarry Woodman                          *
10984a5b18ccSLarry Woodman 			 * However if the number of source nodes is not equal to
10994a5b18ccSLarry Woodman 			 * the number of destination nodes we can not preserve
11004a5b18ccSLarry Woodman 			 * this node relative relationship.  In that case, skip
11014a5b18ccSLarry Woodman 			 * copying memory from a node that is in the destination
11024a5b18ccSLarry Woodman 			 * mask.
11034a5b18ccSLarry Woodman 			 *
11044a5b18ccSLarry Woodman 			 * Example: [2,3,4] -> [3,4,5] moves everything.
11054a5b18ccSLarry Woodman 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
11064a5b18ccSLarry Woodman 			 */
11074a5b18ccSLarry Woodman 
11080ce72d4fSAndrew Morton 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
11090ce72d4fSAndrew Morton 						(node_isset(s, *to)))
11104a5b18ccSLarry Woodman 				continue;
11114a5b18ccSLarry Woodman 
11120ce72d4fSAndrew Morton 			d = node_remap(s, *from, *to);
11137e2ab150SChristoph Lameter 			if (s == d)
11147e2ab150SChristoph Lameter 				continue;
11157e2ab150SChristoph Lameter 
11167e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
11177e2ab150SChristoph Lameter 			dest = d;
11187e2ab150SChristoph Lameter 
11197e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
11207e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
11217e2ab150SChristoph Lameter 				break;
11227e2ab150SChristoph Lameter 		}
1123b76ac7e7SJianguo Wu 		if (source == NUMA_NO_NODE)
11247e2ab150SChristoph Lameter 			break;
11257e2ab150SChristoph Lameter 
11267e2ab150SChristoph Lameter 		node_clear(source, tmp);
11277e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
11287e2ab150SChristoph Lameter 		if (err > 0)
11297e2ab150SChristoph Lameter 			busy += err;
11307e2ab150SChristoph Lameter 		if (err < 0)
11317e2ab150SChristoph Lameter 			break;
113239743889SChristoph Lameter 	}
113339743889SChristoph Lameter 	up_read(&mm->mmap_sem);
11347e2ab150SChristoph Lameter 	if (err < 0)
11357e2ab150SChristoph Lameter 		return err;
11367e2ab150SChristoph Lameter 	return busy;
1137b20a3503SChristoph Lameter 
113839743889SChristoph Lameter }
113939743889SChristoph Lameter 
11403ad33b24SLee Schermerhorn /*
11413ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
1142d05f0cdcSHugh Dickins  * Start by assuming the page is mapped by the same vma as contains @start.
11433ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
11443ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
11453ad33b24SLee Schermerhorn  * is in virtual address order.
11463ad33b24SLee Schermerhorn  */
1147666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start)
114895a402c3SChristoph Lameter {
1149d05f0cdcSHugh Dickins 	struct vm_area_struct *vma;
11503ad33b24SLee Schermerhorn 	unsigned long uninitialized_var(address);
115195a402c3SChristoph Lameter 
1152d05f0cdcSHugh Dickins 	vma = find_vma(current->mm, start);
11533ad33b24SLee Schermerhorn 	while (vma) {
11543ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
11553ad33b24SLee Schermerhorn 		if (address != -EFAULT)
11563ad33b24SLee Schermerhorn 			break;
11573ad33b24SLee Schermerhorn 		vma = vma->vm_next;
11583ad33b24SLee Schermerhorn 	}
11593ad33b24SLee Schermerhorn 
116011c731e8SWanpeng Li 	if (PageHuge(page)) {
1161389c8178SMichal Hocko 		return alloc_huge_page_vma(page_hstate(compound_head(page)),
1162389c8178SMichal Hocko 				vma, address);
116394723aafSMichal Hocko 	} else if (PageTransHuge(page)) {
1164c8633798SNaoya Horiguchi 		struct page *thp;
1165c8633798SNaoya Horiguchi 
1166356ff8a9SDavid Rientjes 		thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1167356ff8a9SDavid Rientjes 					 HPAGE_PMD_ORDER);
1168c8633798SNaoya Horiguchi 		if (!thp)
1169c8633798SNaoya Horiguchi 			return NULL;
1170c8633798SNaoya Horiguchi 		prep_transhuge_page(thp);
1171c8633798SNaoya Horiguchi 		return thp;
117211c731e8SWanpeng Li 	}
117311c731e8SWanpeng Li 	/*
117411c731e8SWanpeng Li 	 * if !vma, alloc_page_vma() will use task or system default policy
117511c731e8SWanpeng Li 	 */
11760f556856SMichal Hocko 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
11770f556856SMichal Hocko 			vma, address);
117895a402c3SChristoph Lameter }
1179b20a3503SChristoph Lameter #else
1180b20a3503SChristoph Lameter 
1181b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
1182b20a3503SChristoph Lameter 				unsigned long flags)
1183b20a3503SChristoph Lameter {
1184b20a3503SChristoph Lameter }
1185b20a3503SChristoph Lameter 
11860ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
11870ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
1188b20a3503SChristoph Lameter {
1189b20a3503SChristoph Lameter 	return -ENOSYS;
1190b20a3503SChristoph Lameter }
119195a402c3SChristoph Lameter 
1192666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start)
119395a402c3SChristoph Lameter {
119495a402c3SChristoph Lameter 	return NULL;
119595a402c3SChristoph Lameter }
1196b20a3503SChristoph Lameter #endif
1197b20a3503SChristoph Lameter 
1198dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1199028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1200028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
12016ce3c4c0SChristoph Lameter {
12026ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
12036ce3c4c0SChristoph Lameter 	struct mempolicy *new;
12046ce3c4c0SChristoph Lameter 	unsigned long end;
12056ce3c4c0SChristoph Lameter 	int err;
1206*d8835445SYang Shi 	int ret;
12076ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
12086ce3c4c0SChristoph Lameter 
1209b24f53a0SLee Schermerhorn 	if (flags & ~(unsigned long)MPOL_MF_VALID)
12106ce3c4c0SChristoph Lameter 		return -EINVAL;
121174c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
12126ce3c4c0SChristoph Lameter 		return -EPERM;
12136ce3c4c0SChristoph Lameter 
12146ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
12156ce3c4c0SChristoph Lameter 		return -EINVAL;
12166ce3c4c0SChristoph Lameter 
12176ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
12186ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
12196ce3c4c0SChristoph Lameter 
12206ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
12216ce3c4c0SChristoph Lameter 	end = start + len;
12226ce3c4c0SChristoph Lameter 
12236ce3c4c0SChristoph Lameter 	if (end < start)
12246ce3c4c0SChristoph Lameter 		return -EINVAL;
12256ce3c4c0SChristoph Lameter 	if (end == start)
12266ce3c4c0SChristoph Lameter 		return 0;
12276ce3c4c0SChristoph Lameter 
1228028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
12296ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
12306ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
12316ce3c4c0SChristoph Lameter 
1232b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY)
1233b24f53a0SLee Schermerhorn 		new->flags |= MPOL_F_MOF;
1234b24f53a0SLee Schermerhorn 
12356ce3c4c0SChristoph Lameter 	/*
12366ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
12376ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
12386ce3c4c0SChristoph Lameter 	 */
12396ce3c4c0SChristoph Lameter 	if (!new)
12406ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
12416ce3c4c0SChristoph Lameter 
1242028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1243028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
124400ef2d2fSDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
12456ce3c4c0SChristoph Lameter 
12460aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
12470aedadf9SChristoph Lameter 
12480aedadf9SChristoph Lameter 		err = migrate_prep();
12490aedadf9SChristoph Lameter 		if (err)
1250b05ca738SKOSAKI Motohiro 			goto mpol_out;
12510aedadf9SChristoph Lameter 	}
12524bfc4495SKAMEZAWA Hiroyuki 	{
12534bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
12544bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
12556ce3c4c0SChristoph Lameter 			down_write(&mm->mmap_sem);
125658568d2aSMiao Xie 			task_lock(current);
12574bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
125858568d2aSMiao Xie 			task_unlock(current);
12594bfc4495SKAMEZAWA Hiroyuki 			if (err)
126058568d2aSMiao Xie 				up_write(&mm->mmap_sem);
12614bfc4495SKAMEZAWA Hiroyuki 		} else
12624bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
12634bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
12644bfc4495SKAMEZAWA Hiroyuki 	}
1265b05ca738SKOSAKI Motohiro 	if (err)
1266b05ca738SKOSAKI Motohiro 		goto mpol_out;
1267b05ca738SKOSAKI Motohiro 
1268*d8835445SYang Shi 	ret = queue_pages_range(mm, start, end, nmask,
12696ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
1270*d8835445SYang Shi 
1271*d8835445SYang Shi 	if (ret < 0) {
1272*d8835445SYang Shi 		err = -EIO;
1273*d8835445SYang Shi 		goto up_out;
1274*d8835445SYang Shi 	}
1275*d8835445SYang Shi 
12769d8cebd4SKOSAKI Motohiro 	err = mbind_range(mm, start, end, new);
12777e2ab150SChristoph Lameter 
1278b24f53a0SLee Schermerhorn 	if (!err) {
1279b24f53a0SLee Schermerhorn 		int nr_failed = 0;
1280b24f53a0SLee Schermerhorn 
1281cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
1282b24f53a0SLee Schermerhorn 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1283d05f0cdcSHugh Dickins 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1284d05f0cdcSHugh Dickins 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1285cf608ac1SMinchan Kim 			if (nr_failed)
128674060e4dSNaoya Horiguchi 				putback_movable_pages(&pagelist);
1287cf608ac1SMinchan Kim 		}
12886ce3c4c0SChristoph Lameter 
1289*d8835445SYang Shi 		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
12906ce3c4c0SChristoph Lameter 			err = -EIO;
1291ab8a3e14SKOSAKI Motohiro 	} else
1292b0e5fd73SJoonsoo Kim 		putback_movable_pages(&pagelist);
1293b20a3503SChristoph Lameter 
1294*d8835445SYang Shi up_out:
12956ce3c4c0SChristoph Lameter 	up_write(&mm->mmap_sem);
1296b05ca738SKOSAKI Motohiro mpol_out:
1297f0be3d32SLee Schermerhorn 	mpol_put(new);
12986ce3c4c0SChristoph Lameter 	return err;
12996ce3c4c0SChristoph Lameter }
13006ce3c4c0SChristoph Lameter 
130139743889SChristoph Lameter /*
13028bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
13038bccd85fSChristoph Lameter  */
13048bccd85fSChristoph Lameter 
13058bccd85fSChristoph Lameter /* Copy a node mask from user space. */
130639743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
13078bccd85fSChristoph Lameter 		     unsigned long maxnode)
13088bccd85fSChristoph Lameter {
13098bccd85fSChristoph Lameter 	unsigned long k;
131056521e7aSYisheng Xie 	unsigned long t;
13118bccd85fSChristoph Lameter 	unsigned long nlongs;
13128bccd85fSChristoph Lameter 	unsigned long endmask;
13138bccd85fSChristoph Lameter 
13148bccd85fSChristoph Lameter 	--maxnode;
13158bccd85fSChristoph Lameter 	nodes_clear(*nodes);
13168bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
13178bccd85fSChristoph Lameter 		return 0;
1318a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1319636f13c1SChris Wright 		return -EINVAL;
13208bccd85fSChristoph Lameter 
13218bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
13228bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
13238bccd85fSChristoph Lameter 		endmask = ~0UL;
13248bccd85fSChristoph Lameter 	else
13258bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
13268bccd85fSChristoph Lameter 
132756521e7aSYisheng Xie 	/*
132856521e7aSYisheng Xie 	 * When the user specified more nodes than supported just check
132956521e7aSYisheng Xie 	 * if the non supported part is all zero.
133056521e7aSYisheng Xie 	 *
133156521e7aSYisheng Xie 	 * If maxnode have more longs than MAX_NUMNODES, check
133256521e7aSYisheng Xie 	 * the bits in that area first. And then go through to
133356521e7aSYisheng Xie 	 * check the rest bits which equal or bigger than MAX_NUMNODES.
133456521e7aSYisheng Xie 	 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
133556521e7aSYisheng Xie 	 */
13368bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
13378bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
13388bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
13398bccd85fSChristoph Lameter 				return -EFAULT;
13408bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
13418bccd85fSChristoph Lameter 				if (t & endmask)
13428bccd85fSChristoph Lameter 					return -EINVAL;
13438bccd85fSChristoph Lameter 			} else if (t)
13448bccd85fSChristoph Lameter 				return -EINVAL;
13458bccd85fSChristoph Lameter 		}
13468bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
13478bccd85fSChristoph Lameter 		endmask = ~0UL;
13488bccd85fSChristoph Lameter 	}
13498bccd85fSChristoph Lameter 
135056521e7aSYisheng Xie 	if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
135156521e7aSYisheng Xie 		unsigned long valid_mask = endmask;
135256521e7aSYisheng Xie 
135356521e7aSYisheng Xie 		valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
135456521e7aSYisheng Xie 		if (get_user(t, nmask + nlongs - 1))
135556521e7aSYisheng Xie 			return -EFAULT;
135656521e7aSYisheng Xie 		if (t & valid_mask)
135756521e7aSYisheng Xie 			return -EINVAL;
135856521e7aSYisheng Xie 	}
135956521e7aSYisheng Xie 
13608bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
13618bccd85fSChristoph Lameter 		return -EFAULT;
13628bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
13638bccd85fSChristoph Lameter 	return 0;
13648bccd85fSChristoph Lameter }
13658bccd85fSChristoph Lameter 
13668bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
13678bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
13688bccd85fSChristoph Lameter 			      nodemask_t *nodes)
13698bccd85fSChristoph Lameter {
13708bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1371050c17f2SRalph Campbell 	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
13728bccd85fSChristoph Lameter 
13738bccd85fSChristoph Lameter 	if (copy > nbytes) {
13748bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
13758bccd85fSChristoph Lameter 			return -EINVAL;
13768bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
13778bccd85fSChristoph Lameter 			return -EFAULT;
13788bccd85fSChristoph Lameter 		copy = nbytes;
13798bccd85fSChristoph Lameter 	}
13808bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
13818bccd85fSChristoph Lameter }
13828bccd85fSChristoph Lameter 
1383e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len,
1384e7dc9ad6SDominik Brodowski 			 unsigned long mode, const unsigned long __user *nmask,
1385e7dc9ad6SDominik Brodowski 			 unsigned long maxnode, unsigned int flags)
13868bccd85fSChristoph Lameter {
13878bccd85fSChristoph Lameter 	nodemask_t nodes;
13888bccd85fSChristoph Lameter 	int err;
1389028fec41SDavid Rientjes 	unsigned short mode_flags;
13908bccd85fSChristoph Lameter 
1391028fec41SDavid Rientjes 	mode_flags = mode & MPOL_MODE_FLAGS;
1392028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1393a3b51e01SDavid Rientjes 	if (mode >= MPOL_MAX)
1394a3b51e01SDavid Rientjes 		return -EINVAL;
13954c50bc01SDavid Rientjes 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
13964c50bc01SDavid Rientjes 	    (mode_flags & MPOL_F_RELATIVE_NODES))
13974c50bc01SDavid Rientjes 		return -EINVAL;
13988bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
13998bccd85fSChristoph Lameter 	if (err)
14008bccd85fSChristoph Lameter 		return err;
1401028fec41SDavid Rientjes 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
14028bccd85fSChristoph Lameter }
14038bccd85fSChristoph Lameter 
1404e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1405e7dc9ad6SDominik Brodowski 		unsigned long, mode, const unsigned long __user *, nmask,
1406e7dc9ad6SDominik Brodowski 		unsigned long, maxnode, unsigned int, flags)
1407e7dc9ad6SDominik Brodowski {
1408e7dc9ad6SDominik Brodowski 	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1409e7dc9ad6SDominik Brodowski }
1410e7dc9ad6SDominik Brodowski 
14118bccd85fSChristoph Lameter /* Set the process memory policy */
1412af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1413af03c4acSDominik Brodowski 				 unsigned long maxnode)
14148bccd85fSChristoph Lameter {
14158bccd85fSChristoph Lameter 	int err;
14168bccd85fSChristoph Lameter 	nodemask_t nodes;
1417028fec41SDavid Rientjes 	unsigned short flags;
14188bccd85fSChristoph Lameter 
1419028fec41SDavid Rientjes 	flags = mode & MPOL_MODE_FLAGS;
1420028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1421028fec41SDavid Rientjes 	if ((unsigned int)mode >= MPOL_MAX)
14228bccd85fSChristoph Lameter 		return -EINVAL;
14234c50bc01SDavid Rientjes 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
14244c50bc01SDavid Rientjes 		return -EINVAL;
14258bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
14268bccd85fSChristoph Lameter 	if (err)
14278bccd85fSChristoph Lameter 		return err;
1428028fec41SDavid Rientjes 	return do_set_mempolicy(mode, flags, &nodes);
14298bccd85fSChristoph Lameter }
14308bccd85fSChristoph Lameter 
1431af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1432af03c4acSDominik Brodowski 		unsigned long, maxnode)
1433af03c4acSDominik Brodowski {
1434af03c4acSDominik Brodowski 	return kernel_set_mempolicy(mode, nmask, maxnode);
1435af03c4acSDominik Brodowski }
1436af03c4acSDominik Brodowski 
1437b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1438b6e9b0baSDominik Brodowski 				const unsigned long __user *old_nodes,
1439b6e9b0baSDominik Brodowski 				const unsigned long __user *new_nodes)
144039743889SChristoph Lameter {
1441596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
144239743889SChristoph Lameter 	struct task_struct *task;
144339743889SChristoph Lameter 	nodemask_t task_nodes;
144439743889SChristoph Lameter 	int err;
1445596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1446596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1447596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
144839743889SChristoph Lameter 
1449596d7cfaSKOSAKI Motohiro 	if (!scratch)
1450596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
145139743889SChristoph Lameter 
1452596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1453596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1454596d7cfaSKOSAKI Motohiro 
1455596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
145639743889SChristoph Lameter 	if (err)
1457596d7cfaSKOSAKI Motohiro 		goto out;
1458596d7cfaSKOSAKI Motohiro 
1459596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1460596d7cfaSKOSAKI Motohiro 	if (err)
1461596d7cfaSKOSAKI Motohiro 		goto out;
146239743889SChristoph Lameter 
146339743889SChristoph Lameter 	/* Find the mm_struct */
146455cfaa3cSZeng Zhaoming 	rcu_read_lock();
1465228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
146639743889SChristoph Lameter 	if (!task) {
146755cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1468596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1469596d7cfaSKOSAKI Motohiro 		goto out;
147039743889SChristoph Lameter 	}
14713268c63eSChristoph Lameter 	get_task_struct(task);
147239743889SChristoph Lameter 
1473596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
147439743889SChristoph Lameter 
147539743889SChristoph Lameter 	/*
147631367466SOtto Ebeling 	 * Check if this process has the right to modify the specified process.
147731367466SOtto Ebeling 	 * Use the regular "ptrace_may_access()" checks.
147839743889SChristoph Lameter 	 */
147931367466SOtto Ebeling 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1480c69e8d9cSDavid Howells 		rcu_read_unlock();
148139743889SChristoph Lameter 		err = -EPERM;
14823268c63eSChristoph Lameter 		goto out_put;
148339743889SChristoph Lameter 	}
1484c69e8d9cSDavid Howells 	rcu_read_unlock();
148539743889SChristoph Lameter 
148639743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
148739743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1488596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
148939743889SChristoph Lameter 		err = -EPERM;
14903268c63eSChristoph Lameter 		goto out_put;
149139743889SChristoph Lameter 	}
149239743889SChristoph Lameter 
14930486a38bSYisheng Xie 	task_nodes = cpuset_mems_allowed(current);
14940486a38bSYisheng Xie 	nodes_and(*new, *new, task_nodes);
14950486a38bSYisheng Xie 	if (nodes_empty(*new))
14963268c63eSChristoph Lameter 		goto out_put;
14970486a38bSYisheng Xie 
14980486a38bSYisheng Xie 	nodes_and(*new, *new, node_states[N_MEMORY]);
14990486a38bSYisheng Xie 	if (nodes_empty(*new))
15000486a38bSYisheng Xie 		goto out_put;
15013b42d28bSChristoph Lameter 
150286c3a764SDavid Quigley 	err = security_task_movememory(task);
150386c3a764SDavid Quigley 	if (err)
15043268c63eSChristoph Lameter 		goto out_put;
150586c3a764SDavid Quigley 
15063268c63eSChristoph Lameter 	mm = get_task_mm(task);
15073268c63eSChristoph Lameter 	put_task_struct(task);
1508f2a9ef88SSasha Levin 
1509f2a9ef88SSasha Levin 	if (!mm) {
1510f2a9ef88SSasha Levin 		err = -EINVAL;
1511f2a9ef88SSasha Levin 		goto out;
1512f2a9ef88SSasha Levin 	}
1513f2a9ef88SSasha Levin 
1514596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
151574c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
15163268c63eSChristoph Lameter 
151739743889SChristoph Lameter 	mmput(mm);
15183268c63eSChristoph Lameter out:
1519596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1520596d7cfaSKOSAKI Motohiro 
152139743889SChristoph Lameter 	return err;
15223268c63eSChristoph Lameter 
15233268c63eSChristoph Lameter out_put:
15243268c63eSChristoph Lameter 	put_task_struct(task);
15253268c63eSChristoph Lameter 	goto out;
15263268c63eSChristoph Lameter 
152739743889SChristoph Lameter }
152839743889SChristoph Lameter 
1529b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1530b6e9b0baSDominik Brodowski 		const unsigned long __user *, old_nodes,
1531b6e9b0baSDominik Brodowski 		const unsigned long __user *, new_nodes)
1532b6e9b0baSDominik Brodowski {
1533b6e9b0baSDominik Brodowski 	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1534b6e9b0baSDominik Brodowski }
1535b6e9b0baSDominik Brodowski 
153639743889SChristoph Lameter 
15378bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1538af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy,
1539af03c4acSDominik Brodowski 				unsigned long __user *nmask,
1540af03c4acSDominik Brodowski 				unsigned long maxnode,
1541af03c4acSDominik Brodowski 				unsigned long addr,
1542af03c4acSDominik Brodowski 				unsigned long flags)
15438bccd85fSChristoph Lameter {
1544dbcb0f19SAdrian Bunk 	int err;
1545dbcb0f19SAdrian Bunk 	int uninitialized_var(pval);
15468bccd85fSChristoph Lameter 	nodemask_t nodes;
15478bccd85fSChristoph Lameter 
1548050c17f2SRalph Campbell 	if (nmask != NULL && maxnode < nr_node_ids)
15498bccd85fSChristoph Lameter 		return -EINVAL;
15508bccd85fSChristoph Lameter 
15518bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
15528bccd85fSChristoph Lameter 
15538bccd85fSChristoph Lameter 	if (err)
15548bccd85fSChristoph Lameter 		return err;
15558bccd85fSChristoph Lameter 
15568bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
15578bccd85fSChristoph Lameter 		return -EFAULT;
15588bccd85fSChristoph Lameter 
15598bccd85fSChristoph Lameter 	if (nmask)
15608bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
15618bccd85fSChristoph Lameter 
15628bccd85fSChristoph Lameter 	return err;
15638bccd85fSChristoph Lameter }
15648bccd85fSChristoph Lameter 
1565af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1566af03c4acSDominik Brodowski 		unsigned long __user *, nmask, unsigned long, maxnode,
1567af03c4acSDominik Brodowski 		unsigned long, addr, unsigned long, flags)
1568af03c4acSDominik Brodowski {
1569af03c4acSDominik Brodowski 	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1570af03c4acSDominik Brodowski }
1571af03c4acSDominik Brodowski 
15721da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
15731da177e4SLinus Torvalds 
1574c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1575c93e0f6cSHeiko Carstens 		       compat_ulong_t __user *, nmask,
1576c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode,
1577c93e0f6cSHeiko Carstens 		       compat_ulong_t, addr, compat_ulong_t, flags)
15781da177e4SLinus Torvalds {
15791da177e4SLinus Torvalds 	long err;
15801da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15811da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
15821da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
15831da177e4SLinus Torvalds 
1584050c17f2SRalph Campbell 	nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
15851da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15861da177e4SLinus Torvalds 
15871da177e4SLinus Torvalds 	if (nmask)
15881da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
15891da177e4SLinus Torvalds 
1590af03c4acSDominik Brodowski 	err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
15911da177e4SLinus Torvalds 
15921da177e4SLinus Torvalds 	if (!err && nmask) {
15932bbff6c7SKAMEZAWA Hiroyuki 		unsigned long copy_size;
15942bbff6c7SKAMEZAWA Hiroyuki 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
15952bbff6c7SKAMEZAWA Hiroyuki 		err = copy_from_user(bm, nm, copy_size);
15961da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
15971da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
15981da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
15991da177e4SLinus Torvalds 	}
16001da177e4SLinus Torvalds 
16011da177e4SLinus Torvalds 	return err;
16021da177e4SLinus Torvalds }
16031da177e4SLinus Torvalds 
1604c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1605c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode)
16061da177e4SLinus Torvalds {
16071da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
16081da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
16091da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
16101da177e4SLinus Torvalds 
16111da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
16121da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
16131da177e4SLinus Torvalds 
16141da177e4SLinus Torvalds 	if (nmask) {
1615cf01fb99SChris Salls 		if (compat_get_bitmap(bm, nmask, nr_bits))
16161da177e4SLinus Torvalds 			return -EFAULT;
1617cf01fb99SChris Salls 		nm = compat_alloc_user_space(alloc_size);
1618cf01fb99SChris Salls 		if (copy_to_user(nm, bm, alloc_size))
1619cf01fb99SChris Salls 			return -EFAULT;
1620cf01fb99SChris Salls 	}
16211da177e4SLinus Torvalds 
1622af03c4acSDominik Brodowski 	return kernel_set_mempolicy(mode, nm, nr_bits+1);
16231da177e4SLinus Torvalds }
16241da177e4SLinus Torvalds 
1625c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1626c93e0f6cSHeiko Carstens 		       compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1627c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode, compat_ulong_t, flags)
16281da177e4SLinus Torvalds {
16291da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
16301da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1631dfcd3c0dSAndi Kleen 	nodemask_t bm;
16321da177e4SLinus Torvalds 
16331da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
16341da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
16351da177e4SLinus Torvalds 
16361da177e4SLinus Torvalds 	if (nmask) {
1637cf01fb99SChris Salls 		if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
16381da177e4SLinus Torvalds 			return -EFAULT;
1639cf01fb99SChris Salls 		nm = compat_alloc_user_space(alloc_size);
1640cf01fb99SChris Salls 		if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1641cf01fb99SChris Salls 			return -EFAULT;
1642cf01fb99SChris Salls 	}
16431da177e4SLinus Torvalds 
1644e7dc9ad6SDominik Brodowski 	return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
16451da177e4SLinus Torvalds }
16461da177e4SLinus Torvalds 
1647b6e9b0baSDominik Brodowski COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
1648b6e9b0baSDominik Brodowski 		       compat_ulong_t, maxnode,
1649b6e9b0baSDominik Brodowski 		       const compat_ulong_t __user *, old_nodes,
1650b6e9b0baSDominik Brodowski 		       const compat_ulong_t __user *, new_nodes)
1651b6e9b0baSDominik Brodowski {
1652b6e9b0baSDominik Brodowski 	unsigned long __user *old = NULL;
1653b6e9b0baSDominik Brodowski 	unsigned long __user *new = NULL;
1654b6e9b0baSDominik Brodowski 	nodemask_t tmp_mask;
1655b6e9b0baSDominik Brodowski 	unsigned long nr_bits;
1656b6e9b0baSDominik Brodowski 	unsigned long size;
1657b6e9b0baSDominik Brodowski 
1658b6e9b0baSDominik Brodowski 	nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
1659b6e9b0baSDominik Brodowski 	size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1660b6e9b0baSDominik Brodowski 	if (old_nodes) {
1661b6e9b0baSDominik Brodowski 		if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1662b6e9b0baSDominik Brodowski 			return -EFAULT;
1663b6e9b0baSDominik Brodowski 		old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1664b6e9b0baSDominik Brodowski 		if (new_nodes)
1665b6e9b0baSDominik Brodowski 			new = old + size / sizeof(unsigned long);
1666b6e9b0baSDominik Brodowski 		if (copy_to_user(old, nodes_addr(tmp_mask), size))
1667b6e9b0baSDominik Brodowski 			return -EFAULT;
1668b6e9b0baSDominik Brodowski 	}
1669b6e9b0baSDominik Brodowski 	if (new_nodes) {
1670b6e9b0baSDominik Brodowski 		if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1671b6e9b0baSDominik Brodowski 			return -EFAULT;
1672b6e9b0baSDominik Brodowski 		if (new == NULL)
1673b6e9b0baSDominik Brodowski 			new = compat_alloc_user_space(size);
1674b6e9b0baSDominik Brodowski 		if (copy_to_user(new, nodes_addr(tmp_mask), size))
1675b6e9b0baSDominik Brodowski 			return -EFAULT;
1676b6e9b0baSDominik Brodowski 	}
1677b6e9b0baSDominik Brodowski 	return kernel_migrate_pages(pid, nr_bits + 1, old, new);
1678b6e9b0baSDominik Brodowski }
1679b6e9b0baSDominik Brodowski 
1680b6e9b0baSDominik Brodowski #endif /* CONFIG_COMPAT */
16811da177e4SLinus Torvalds 
168274d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
168374d2c3a0SOleg Nesterov 						unsigned long addr)
16841da177e4SLinus Torvalds {
16858d90274bSOleg Nesterov 	struct mempolicy *pol = NULL;
16861da177e4SLinus Torvalds 
16871da177e4SLinus Torvalds 	if (vma) {
1688480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
16898d90274bSOleg Nesterov 			pol = vma->vm_ops->get_policy(vma, addr);
169000442ad0SMel Gorman 		} else if (vma->vm_policy) {
16911da177e4SLinus Torvalds 			pol = vma->vm_policy;
169200442ad0SMel Gorman 
169300442ad0SMel Gorman 			/*
169400442ad0SMel Gorman 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
169500442ad0SMel Gorman 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
169600442ad0SMel Gorman 			 * count on these policies which will be dropped by
169700442ad0SMel Gorman 			 * mpol_cond_put() later
169800442ad0SMel Gorman 			 */
169900442ad0SMel Gorman 			if (mpol_needs_cond_ref(pol))
170000442ad0SMel Gorman 				mpol_get(pol);
170100442ad0SMel Gorman 		}
17021da177e4SLinus Torvalds 	}
1703f15ca78eSOleg Nesterov 
170474d2c3a0SOleg Nesterov 	return pol;
170574d2c3a0SOleg Nesterov }
170674d2c3a0SOleg Nesterov 
170774d2c3a0SOleg Nesterov /*
1708dd6eecb9SOleg Nesterov  * get_vma_policy(@vma, @addr)
170974d2c3a0SOleg Nesterov  * @vma: virtual memory area whose policy is sought
171074d2c3a0SOleg Nesterov  * @addr: address in @vma for shared policy lookup
171174d2c3a0SOleg Nesterov  *
171274d2c3a0SOleg Nesterov  * Returns effective policy for a VMA at specified address.
1713dd6eecb9SOleg Nesterov  * Falls back to current->mempolicy or system default policy, as necessary.
171474d2c3a0SOleg Nesterov  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
171574d2c3a0SOleg Nesterov  * count--added by the get_policy() vm_op, as appropriate--to protect against
171674d2c3a0SOleg Nesterov  * freeing by another task.  It is the caller's responsibility to free the
171774d2c3a0SOleg Nesterov  * extra reference for shared policies.
171874d2c3a0SOleg Nesterov  */
17192f0799a0SDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1720dd6eecb9SOleg Nesterov 						unsigned long addr)
172174d2c3a0SOleg Nesterov {
172274d2c3a0SOleg Nesterov 	struct mempolicy *pol = __get_vma_policy(vma, addr);
172374d2c3a0SOleg Nesterov 
17248d90274bSOleg Nesterov 	if (!pol)
1725dd6eecb9SOleg Nesterov 		pol = get_task_policy(current);
17268d90274bSOleg Nesterov 
17271da177e4SLinus Torvalds 	return pol;
17281da177e4SLinus Torvalds }
17291da177e4SLinus Torvalds 
17306b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma)
1731fc314724SMel Gorman {
17326b6482bbSOleg Nesterov 	struct mempolicy *pol;
1733f15ca78eSOleg Nesterov 
1734fc314724SMel Gorman 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1735fc314724SMel Gorman 		bool ret = false;
1736fc314724SMel Gorman 
1737fc314724SMel Gorman 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1738fc314724SMel Gorman 		if (pol && (pol->flags & MPOL_F_MOF))
1739fc314724SMel Gorman 			ret = true;
1740fc314724SMel Gorman 		mpol_cond_put(pol);
1741fc314724SMel Gorman 
1742fc314724SMel Gorman 		return ret;
17438d90274bSOleg Nesterov 	}
17448d90274bSOleg Nesterov 
1745fc314724SMel Gorman 	pol = vma->vm_policy;
17468d90274bSOleg Nesterov 	if (!pol)
17476b6482bbSOleg Nesterov 		pol = get_task_policy(current);
1748fc314724SMel Gorman 
1749fc314724SMel Gorman 	return pol->flags & MPOL_F_MOF;
1750fc314724SMel Gorman }
1751fc314724SMel Gorman 
1752d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1753d3eb1570SLai Jiangshan {
1754d3eb1570SLai Jiangshan 	enum zone_type dynamic_policy_zone = policy_zone;
1755d3eb1570SLai Jiangshan 
1756d3eb1570SLai Jiangshan 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1757d3eb1570SLai Jiangshan 
1758d3eb1570SLai Jiangshan 	/*
1759d3eb1570SLai Jiangshan 	 * if policy->v.nodes has movable memory only,
1760d3eb1570SLai Jiangshan 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1761d3eb1570SLai Jiangshan 	 *
1762d3eb1570SLai Jiangshan 	 * policy->v.nodes is intersect with node_states[N_MEMORY].
1763d3eb1570SLai Jiangshan 	 * so if the following test faile, it implies
1764d3eb1570SLai Jiangshan 	 * policy->v.nodes has movable memory only.
1765d3eb1570SLai Jiangshan 	 */
1766d3eb1570SLai Jiangshan 	if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1767d3eb1570SLai Jiangshan 		dynamic_policy_zone = ZONE_MOVABLE;
1768d3eb1570SLai Jiangshan 
1769d3eb1570SLai Jiangshan 	return zone >= dynamic_policy_zone;
1770d3eb1570SLai Jiangshan }
1771d3eb1570SLai Jiangshan 
177252cd3b07SLee Schermerhorn /*
177352cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
177452cd3b07SLee Schermerhorn  * page allocation
177552cd3b07SLee Schermerhorn  */
177652cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
177719770b32SMel Gorman {
177819770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
177945c4745aSLee Schermerhorn 	if (unlikely(policy->mode == MPOL_BIND) &&
1780d3eb1570SLai Jiangshan 			apply_policy_zone(policy, gfp_zone(gfp)) &&
178119770b32SMel Gorman 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
178219770b32SMel Gorman 		return &policy->v.nodes;
178319770b32SMel Gorman 
178419770b32SMel Gorman 	return NULL;
178519770b32SMel Gorman }
178619770b32SMel Gorman 
178704ec6264SVlastimil Babka /* Return the node id preferred by the given mempolicy, or the given id */
178804ec6264SVlastimil Babka static int policy_node(gfp_t gfp, struct mempolicy *policy,
17892f5f9486SAndi Kleen 								int nd)
17901da177e4SLinus Torvalds {
17916d840958SMichal Hocko 	if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
17921da177e4SLinus Torvalds 		nd = policy->v.preferred_node;
17936d840958SMichal Hocko 	else {
179419770b32SMel Gorman 		/*
17956d840958SMichal Hocko 		 * __GFP_THISNODE shouldn't even be used with the bind policy
17966d840958SMichal Hocko 		 * because we might easily break the expectation to stay on the
17976d840958SMichal Hocko 		 * requested node and not break the policy.
179819770b32SMel Gorman 		 */
17996d840958SMichal Hocko 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
18001da177e4SLinus Torvalds 	}
18016d840958SMichal Hocko 
180204ec6264SVlastimil Babka 	return nd;
18031da177e4SLinus Torvalds }
18041da177e4SLinus Torvalds 
18051da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
18061da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
18071da177e4SLinus Torvalds {
180845816682SVlastimil Babka 	unsigned next;
18091da177e4SLinus Torvalds 	struct task_struct *me = current;
18101da177e4SLinus Torvalds 
181145816682SVlastimil Babka 	next = next_node_in(me->il_prev, policy->v.nodes);
1812f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
181345816682SVlastimil Babka 		me->il_prev = next;
181445816682SVlastimil Babka 	return next;
18151da177e4SLinus Torvalds }
18161da177e4SLinus Torvalds 
1817dc85da15SChristoph Lameter /*
1818dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1819dc85da15SChristoph Lameter  * next slab entry.
1820dc85da15SChristoph Lameter  */
18212a389610SDavid Rientjes unsigned int mempolicy_slab_node(void)
1822dc85da15SChristoph Lameter {
1823e7b691b0SAndi Kleen 	struct mempolicy *policy;
18242a389610SDavid Rientjes 	int node = numa_mem_id();
1825e7b691b0SAndi Kleen 
1826e7b691b0SAndi Kleen 	if (in_interrupt())
18272a389610SDavid Rientjes 		return node;
1828e7b691b0SAndi Kleen 
1829e7b691b0SAndi Kleen 	policy = current->mempolicy;
1830fc36b8d3SLee Schermerhorn 	if (!policy || policy->flags & MPOL_F_LOCAL)
18312a389610SDavid Rientjes 		return node;
1832765c4507SChristoph Lameter 
1833bea904d5SLee Schermerhorn 	switch (policy->mode) {
1834bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1835fc36b8d3SLee Schermerhorn 		/*
1836fc36b8d3SLee Schermerhorn 		 * handled MPOL_F_LOCAL above
1837fc36b8d3SLee Schermerhorn 		 */
1838bea904d5SLee Schermerhorn 		return policy->v.preferred_node;
1839bea904d5SLee Schermerhorn 
1840dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1841dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1842dc85da15SChristoph Lameter 
1843dd1a239fSMel Gorman 	case MPOL_BIND: {
1844c33d6c06SMel Gorman 		struct zoneref *z;
1845c33d6c06SMel Gorman 
1846dc85da15SChristoph Lameter 		/*
1847dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1848dc85da15SChristoph Lameter 		 * first node.
1849dc85da15SChristoph Lameter 		 */
185019770b32SMel Gorman 		struct zonelist *zonelist;
185119770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1852c9634cf0SAneesh Kumar K.V 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1853c33d6c06SMel Gorman 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1854c33d6c06SMel Gorman 							&policy->v.nodes);
1855c1093b74SPavel Tatashin 		return z->zone ? zone_to_nid(z->zone) : node;
1856dd1a239fSMel Gorman 	}
1857dc85da15SChristoph Lameter 
1858dc85da15SChristoph Lameter 	default:
1859bea904d5SLee Schermerhorn 		BUG();
1860dc85da15SChristoph Lameter 	}
1861dc85da15SChristoph Lameter }
1862dc85da15SChristoph Lameter 
1863fee83b3aSAndrew Morton /*
1864fee83b3aSAndrew Morton  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1865fee83b3aSAndrew Morton  * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1866fee83b3aSAndrew Morton  * number of present nodes.
1867fee83b3aSAndrew Morton  */
186898c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
18691da177e4SLinus Torvalds {
1870dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
1871f5b087b5SDavid Rientjes 	unsigned target;
1872fee83b3aSAndrew Morton 	int i;
1873fee83b3aSAndrew Morton 	int nid;
18741da177e4SLinus Torvalds 
1875f5b087b5SDavid Rientjes 	if (!nnodes)
1876f5b087b5SDavid Rientjes 		return numa_node_id();
1877fee83b3aSAndrew Morton 	target = (unsigned int)n % nnodes;
1878fee83b3aSAndrew Morton 	nid = first_node(pol->v.nodes);
1879fee83b3aSAndrew Morton 	for (i = 0; i < target; i++)
1880dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
18811da177e4SLinus Torvalds 	return nid;
18821da177e4SLinus Torvalds }
18831da177e4SLinus Torvalds 
18845da7ca86SChristoph Lameter /* Determine a node number for interleave */
18855da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
18865da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
18875da7ca86SChristoph Lameter {
18885da7ca86SChristoph Lameter 	if (vma) {
18895da7ca86SChristoph Lameter 		unsigned long off;
18905da7ca86SChristoph Lameter 
18913b98b087SNishanth Aravamudan 		/*
18923b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
18933b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
18943b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
18953b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
18963b98b087SNishanth Aravamudan 		 * a useful offset.
18973b98b087SNishanth Aravamudan 		 */
18983b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
18993b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
19005da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
190198c70baaSLaurent Dufour 		return offset_il_node(pol, off);
19025da7ca86SChristoph Lameter 	} else
19035da7ca86SChristoph Lameter 		return interleave_nodes(pol);
19045da7ca86SChristoph Lameter }
19055da7ca86SChristoph Lameter 
190600ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1907480eccf9SLee Schermerhorn /*
190804ec6264SVlastimil Babka  * huge_node(@vma, @addr, @gfp_flags, @mpol)
1909b46e14acSFabian Frederick  * @vma: virtual memory area whose policy is sought
1910b46e14acSFabian Frederick  * @addr: address in @vma for shared policy lookup and interleave policy
1911b46e14acSFabian Frederick  * @gfp_flags: for requested zone
1912b46e14acSFabian Frederick  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1913b46e14acSFabian Frederick  * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
1914480eccf9SLee Schermerhorn  *
191504ec6264SVlastimil Babka  * Returns a nid suitable for a huge page allocation and a pointer
191652cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
191752cd3b07SLee Schermerhorn  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
191852cd3b07SLee Schermerhorn  * @nodemask for filtering the zonelist.
1919c0ff7453SMiao Xie  *
1920d26914d1SMel Gorman  * Must be protected by read_mems_allowed_begin()
1921480eccf9SLee Schermerhorn  */
192204ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
192304ec6264SVlastimil Babka 				struct mempolicy **mpol, nodemask_t **nodemask)
19245da7ca86SChristoph Lameter {
192504ec6264SVlastimil Babka 	int nid;
19265da7ca86SChristoph Lameter 
1927dd6eecb9SOleg Nesterov 	*mpol = get_vma_policy(vma, addr);
192819770b32SMel Gorman 	*nodemask = NULL;	/* assume !MPOL_BIND */
19295da7ca86SChristoph Lameter 
193052cd3b07SLee Schermerhorn 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
193104ec6264SVlastimil Babka 		nid = interleave_nid(*mpol, vma, addr,
193204ec6264SVlastimil Babka 					huge_page_shift(hstate_vma(vma)));
193352cd3b07SLee Schermerhorn 	} else {
193404ec6264SVlastimil Babka 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
193552cd3b07SLee Schermerhorn 		if ((*mpol)->mode == MPOL_BIND)
193652cd3b07SLee Schermerhorn 			*nodemask = &(*mpol)->v.nodes;
1937480eccf9SLee Schermerhorn 	}
193804ec6264SVlastimil Babka 	return nid;
19395da7ca86SChristoph Lameter }
194006808b08SLee Schermerhorn 
194106808b08SLee Schermerhorn /*
194206808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
194306808b08SLee Schermerhorn  *
194406808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
194506808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
194606808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
194706808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
194806808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
194906808b08SLee Schermerhorn  * of non-default mempolicy.
195006808b08SLee Schermerhorn  *
195106808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
195206808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
195306808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
195406808b08SLee Schermerhorn  *
195506808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
195606808b08SLee Schermerhorn  */
195706808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
195806808b08SLee Schermerhorn {
195906808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
196006808b08SLee Schermerhorn 	int nid;
196106808b08SLee Schermerhorn 
196206808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
196306808b08SLee Schermerhorn 		return false;
196406808b08SLee Schermerhorn 
1965c0ff7453SMiao Xie 	task_lock(current);
196606808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
196706808b08SLee Schermerhorn 	switch (mempolicy->mode) {
196806808b08SLee Schermerhorn 	case MPOL_PREFERRED:
196906808b08SLee Schermerhorn 		if (mempolicy->flags & MPOL_F_LOCAL)
197006808b08SLee Schermerhorn 			nid = numa_node_id();
197106808b08SLee Schermerhorn 		else
197206808b08SLee Schermerhorn 			nid = mempolicy->v.preferred_node;
197306808b08SLee Schermerhorn 		init_nodemask_of_node(mask, nid);
197406808b08SLee Schermerhorn 		break;
197506808b08SLee Schermerhorn 
197606808b08SLee Schermerhorn 	case MPOL_BIND:
197706808b08SLee Schermerhorn 		/* Fall through */
197806808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
197906808b08SLee Schermerhorn 		*mask =  mempolicy->v.nodes;
198006808b08SLee Schermerhorn 		break;
198106808b08SLee Schermerhorn 
198206808b08SLee Schermerhorn 	default:
198306808b08SLee Schermerhorn 		BUG();
198406808b08SLee Schermerhorn 	}
1985c0ff7453SMiao Xie 	task_unlock(current);
198606808b08SLee Schermerhorn 
198706808b08SLee Schermerhorn 	return true;
198806808b08SLee Schermerhorn }
198900ac59adSChen, Kenneth W #endif
19905da7ca86SChristoph Lameter 
19916f48d0ebSDavid Rientjes /*
19926f48d0ebSDavid Rientjes  * mempolicy_nodemask_intersects
19936f48d0ebSDavid Rientjes  *
19946f48d0ebSDavid Rientjes  * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
19956f48d0ebSDavid Rientjes  * policy.  Otherwise, check for intersection between mask and the policy
19966f48d0ebSDavid Rientjes  * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
19976f48d0ebSDavid Rientjes  * policy, always return true since it may allocate elsewhere on fallback.
19986f48d0ebSDavid Rientjes  *
19996f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
20006f48d0ebSDavid Rientjes  */
20016f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk,
20026f48d0ebSDavid Rientjes 					const nodemask_t *mask)
20036f48d0ebSDavid Rientjes {
20046f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
20056f48d0ebSDavid Rientjes 	bool ret = true;
20066f48d0ebSDavid Rientjes 
20076f48d0ebSDavid Rientjes 	if (!mask)
20086f48d0ebSDavid Rientjes 		return ret;
20096f48d0ebSDavid Rientjes 	task_lock(tsk);
20106f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
20116f48d0ebSDavid Rientjes 	if (!mempolicy)
20126f48d0ebSDavid Rientjes 		goto out;
20136f48d0ebSDavid Rientjes 
20146f48d0ebSDavid Rientjes 	switch (mempolicy->mode) {
20156f48d0ebSDavid Rientjes 	case MPOL_PREFERRED:
20166f48d0ebSDavid Rientjes 		/*
20176f48d0ebSDavid Rientjes 		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
20186f48d0ebSDavid Rientjes 		 * allocate from, they may fallback to other nodes when oom.
20196f48d0ebSDavid Rientjes 		 * Thus, it's possible for tsk to have allocated memory from
20206f48d0ebSDavid Rientjes 		 * nodes in mask.
20216f48d0ebSDavid Rientjes 		 */
20226f48d0ebSDavid Rientjes 		break;
20236f48d0ebSDavid Rientjes 	case MPOL_BIND:
20246f48d0ebSDavid Rientjes 	case MPOL_INTERLEAVE:
20256f48d0ebSDavid Rientjes 		ret = nodes_intersects(mempolicy->v.nodes, *mask);
20266f48d0ebSDavid Rientjes 		break;
20276f48d0ebSDavid Rientjes 	default:
20286f48d0ebSDavid Rientjes 		BUG();
20296f48d0ebSDavid Rientjes 	}
20306f48d0ebSDavid Rientjes out:
20316f48d0ebSDavid Rientjes 	task_unlock(tsk);
20326f48d0ebSDavid Rientjes 	return ret;
20336f48d0ebSDavid Rientjes }
20346f48d0ebSDavid Rientjes 
20351da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
20361da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
2037662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2038662f3a0bSAndi Kleen 					unsigned nid)
20391da177e4SLinus Torvalds {
20401da177e4SLinus Torvalds 	struct page *page;
20411da177e4SLinus Torvalds 
204204ec6264SVlastimil Babka 	page = __alloc_pages(gfp, order, nid);
20434518085eSKemi Wang 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
20444518085eSKemi Wang 	if (!static_branch_likely(&vm_numa_stat_key))
20454518085eSKemi Wang 		return page;
2046de55c8b2SAndrey Ryabinin 	if (page && page_to_nid(page) == nid) {
2047de55c8b2SAndrey Ryabinin 		preempt_disable();
2048de55c8b2SAndrey Ryabinin 		__inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
2049de55c8b2SAndrey Ryabinin 		preempt_enable();
2050de55c8b2SAndrey Ryabinin 	}
20511da177e4SLinus Torvalds 	return page;
20521da177e4SLinus Torvalds }
20531da177e4SLinus Torvalds 
20541da177e4SLinus Torvalds /**
20550bbbc0b3SAndrea Arcangeli  * 	alloc_pages_vma	- Allocate a page for a VMA.
20561da177e4SLinus Torvalds  *
20571da177e4SLinus Torvalds  * 	@gfp:
20581da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
20591da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
20601da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
20611da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
20621da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
20631da177e4SLinus Torvalds  *
20640bbbc0b3SAndrea Arcangeli  *	@order:Order of the GFP allocation.
20651da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
20661da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
2067be97a41bSVlastimil Babka  *	@node: Which node to prefer for allocation (modulo policy).
2068356ff8a9SDavid Rientjes  *	@hugepage: for hugepages try only the preferred node if possible
20691da177e4SLinus Torvalds  *
20701da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
20711da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
20721da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
20731da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
2074be97a41bSVlastimil Babka  *	all allocations for pages that will be mapped into user space. Returns
2075be97a41bSVlastimil Babka  *	NULL when no page can be allocated.
20761da177e4SLinus Torvalds  */
20771da177e4SLinus Torvalds struct page *
20780bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2079356ff8a9SDavid Rientjes 		unsigned long addr, int node, bool hugepage)
20801da177e4SLinus Torvalds {
2081cc9a6c87SMel Gorman 	struct mempolicy *pol;
2082c0ff7453SMiao Xie 	struct page *page;
208304ec6264SVlastimil Babka 	int preferred_nid;
2084be97a41bSVlastimil Babka 	nodemask_t *nmask;
20851da177e4SLinus Torvalds 
2086dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2087cc9a6c87SMel Gorman 
2088be97a41bSVlastimil Babka 	if (pol->mode == MPOL_INTERLEAVE) {
20891da177e4SLinus Torvalds 		unsigned nid;
20905da7ca86SChristoph Lameter 
20918eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
209252cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
20930bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
2094be97a41bSVlastimil Babka 		goto out;
20951da177e4SLinus Torvalds 	}
20961da177e4SLinus Torvalds 
2097356ff8a9SDavid Rientjes 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2098356ff8a9SDavid Rientjes 		int hpage_node = node;
2099356ff8a9SDavid Rientjes 
2100356ff8a9SDavid Rientjes 		/*
2101356ff8a9SDavid Rientjes 		 * For hugepage allocation and non-interleave policy which
2102356ff8a9SDavid Rientjes 		 * allows the current node (or other explicitly preferred
2103356ff8a9SDavid Rientjes 		 * node) we only try to allocate from the current/preferred
2104356ff8a9SDavid Rientjes 		 * node and don't fall back to other nodes, as the cost of
2105356ff8a9SDavid Rientjes 		 * remote accesses would likely offset THP benefits.
2106356ff8a9SDavid Rientjes 		 *
2107356ff8a9SDavid Rientjes 		 * If the policy is interleave, or does not allow the current
2108356ff8a9SDavid Rientjes 		 * node in its nodemask, we allocate the standard way.
2109356ff8a9SDavid Rientjes 		 */
2110356ff8a9SDavid Rientjes 		if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
2111356ff8a9SDavid Rientjes 			hpage_node = pol->v.preferred_node;
2112356ff8a9SDavid Rientjes 
2113356ff8a9SDavid Rientjes 		nmask = policy_nodemask(gfp, pol);
2114356ff8a9SDavid Rientjes 		if (!nmask || node_isset(hpage_node, *nmask)) {
2115356ff8a9SDavid Rientjes 			mpol_cond_put(pol);
2116356ff8a9SDavid Rientjes 			page = __alloc_pages_node(hpage_node,
2117356ff8a9SDavid Rientjes 						gfp | __GFP_THISNODE, order);
2118356ff8a9SDavid Rientjes 			goto out;
2119356ff8a9SDavid Rientjes 		}
2120356ff8a9SDavid Rientjes 	}
2121356ff8a9SDavid Rientjes 
2122077fcf11SAneesh Kumar K.V 	nmask = policy_nodemask(gfp, pol);
212304ec6264SVlastimil Babka 	preferred_nid = policy_node(gfp, pol, node);
212404ec6264SVlastimil Babka 	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
2125d51e9894SVlastimil Babka 	mpol_cond_put(pol);
2126be97a41bSVlastimil Babka out:
2127077fcf11SAneesh Kumar K.V 	return page;
2128077fcf11SAneesh Kumar K.V }
212969262215SChristoph Hellwig EXPORT_SYMBOL(alloc_pages_vma);
2130077fcf11SAneesh Kumar K.V 
21311da177e4SLinus Torvalds /**
21321da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
21331da177e4SLinus Torvalds  *
21341da177e4SLinus Torvalds  *	@gfp:
21351da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
21361da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
21371da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
21381da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
21391da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
21401da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
21411da177e4SLinus Torvalds  *
21421da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
21431da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
21441da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
21451da177e4SLinus Torvalds  */
2146dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
21471da177e4SLinus Torvalds {
21488d90274bSOleg Nesterov 	struct mempolicy *pol = &default_policy;
2149c0ff7453SMiao Xie 	struct page *page;
21501da177e4SLinus Torvalds 
21518d90274bSOleg Nesterov 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
21528d90274bSOleg Nesterov 		pol = get_task_policy(current);
215352cd3b07SLee Schermerhorn 
215452cd3b07SLee Schermerhorn 	/*
215552cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
215652cd3b07SLee Schermerhorn 	 * nor system default_policy
215752cd3b07SLee Schermerhorn 	 */
215845c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
2159c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2160c0ff7453SMiao Xie 	else
2161c0ff7453SMiao Xie 		page = __alloc_pages_nodemask(gfp, order,
216204ec6264SVlastimil Babka 				policy_node(gfp, pol, numa_node_id()),
21635c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
2164cc9a6c87SMel Gorman 
2165c0ff7453SMiao Xie 	return page;
21661da177e4SLinus Torvalds }
21671da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
21681da177e4SLinus Torvalds 
2169ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2170ef0855d3SOleg Nesterov {
2171ef0855d3SOleg Nesterov 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2172ef0855d3SOleg Nesterov 
2173ef0855d3SOleg Nesterov 	if (IS_ERR(pol))
2174ef0855d3SOleg Nesterov 		return PTR_ERR(pol);
2175ef0855d3SOleg Nesterov 	dst->vm_policy = pol;
2176ef0855d3SOleg Nesterov 	return 0;
2177ef0855d3SOleg Nesterov }
2178ef0855d3SOleg Nesterov 
21794225399aSPaul Jackson /*
2180846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
21814225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
21824225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
21834225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
21844225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
2185708c1bbcSMiao Xie  *
2186708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
2187708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
21884225399aSPaul Jackson  */
21894225399aSPaul Jackson 
2190846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
2191846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
21921da177e4SLinus Torvalds {
21931da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
21941da177e4SLinus Torvalds 
21951da177e4SLinus Torvalds 	if (!new)
21961da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2197708c1bbcSMiao Xie 
2198708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
2199708c1bbcSMiao Xie 	if (old == current->mempolicy) {
2200708c1bbcSMiao Xie 		task_lock(current);
2201708c1bbcSMiao Xie 		*new = *old;
2202708c1bbcSMiao Xie 		task_unlock(current);
2203708c1bbcSMiao Xie 	} else
2204708c1bbcSMiao Xie 		*new = *old;
2205708c1bbcSMiao Xie 
22064225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
22074225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
2208213980c0SVlastimil Babka 		mpol_rebind_policy(new, &mems);
22094225399aSPaul Jackson 	}
22101da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
22111da177e4SLinus Torvalds 	return new;
22121da177e4SLinus Torvalds }
22131da177e4SLinus Torvalds 
22141da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
2215fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
22161da177e4SLinus Torvalds {
22171da177e4SLinus Torvalds 	if (!a || !b)
2218fcfb4dccSKOSAKI Motohiro 		return false;
221945c4745aSLee Schermerhorn 	if (a->mode != b->mode)
2220fcfb4dccSKOSAKI Motohiro 		return false;
222119800502SBob Liu 	if (a->flags != b->flags)
2222fcfb4dccSKOSAKI Motohiro 		return false;
222319800502SBob Liu 	if (mpol_store_user_nodemask(a))
222419800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2225fcfb4dccSKOSAKI Motohiro 			return false;
222619800502SBob Liu 
222745c4745aSLee Schermerhorn 	switch (a->mode) {
222819770b32SMel Gorman 	case MPOL_BIND:
222919770b32SMel Gorman 		/* Fall through */
22301da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
2231fcfb4dccSKOSAKI Motohiro 		return !!nodes_equal(a->v.nodes, b->v.nodes);
22321da177e4SLinus Torvalds 	case MPOL_PREFERRED:
22338970a63eSYisheng Xie 		/* a's ->flags is the same as b's */
22348970a63eSYisheng Xie 		if (a->flags & MPOL_F_LOCAL)
22358970a63eSYisheng Xie 			return true;
223675719661SNamhyung Kim 		return a->v.preferred_node == b->v.preferred_node;
22371da177e4SLinus Torvalds 	default:
22381da177e4SLinus Torvalds 		BUG();
2239fcfb4dccSKOSAKI Motohiro 		return false;
22401da177e4SLinus Torvalds 	}
22411da177e4SLinus Torvalds }
22421da177e4SLinus Torvalds 
22431da177e4SLinus Torvalds /*
22441da177e4SLinus Torvalds  * Shared memory backing store policy support.
22451da177e4SLinus Torvalds  *
22461da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
22471da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
22484a8c7bb5SNathan Zimmer  * They are protected by the sp->lock rwlock, which should be held
22491da177e4SLinus Torvalds  * for any accesses to the tree.
22501da177e4SLinus Torvalds  */
22511da177e4SLinus Torvalds 
22524a8c7bb5SNathan Zimmer /*
22534a8c7bb5SNathan Zimmer  * lookup first element intersecting start-end.  Caller holds sp->lock for
22544a8c7bb5SNathan Zimmer  * reading or for writing
22554a8c7bb5SNathan Zimmer  */
22561da177e4SLinus Torvalds static struct sp_node *
22571da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
22581da177e4SLinus Torvalds {
22591da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
22601da177e4SLinus Torvalds 
22611da177e4SLinus Torvalds 	while (n) {
22621da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
22631da177e4SLinus Torvalds 
22641da177e4SLinus Torvalds 		if (start >= p->end)
22651da177e4SLinus Torvalds 			n = n->rb_right;
22661da177e4SLinus Torvalds 		else if (end <= p->start)
22671da177e4SLinus Torvalds 			n = n->rb_left;
22681da177e4SLinus Torvalds 		else
22691da177e4SLinus Torvalds 			break;
22701da177e4SLinus Torvalds 	}
22711da177e4SLinus Torvalds 	if (!n)
22721da177e4SLinus Torvalds 		return NULL;
22731da177e4SLinus Torvalds 	for (;;) {
22741da177e4SLinus Torvalds 		struct sp_node *w = NULL;
22751da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
22761da177e4SLinus Torvalds 		if (!prev)
22771da177e4SLinus Torvalds 			break;
22781da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
22791da177e4SLinus Torvalds 		if (w->end <= start)
22801da177e4SLinus Torvalds 			break;
22811da177e4SLinus Torvalds 		n = prev;
22821da177e4SLinus Torvalds 	}
22831da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
22841da177e4SLinus Torvalds }
22851da177e4SLinus Torvalds 
22864a8c7bb5SNathan Zimmer /*
22874a8c7bb5SNathan Zimmer  * Insert a new shared policy into the list.  Caller holds sp->lock for
22884a8c7bb5SNathan Zimmer  * writing.
22894a8c7bb5SNathan Zimmer  */
22901da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
22911da177e4SLinus Torvalds {
22921da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
22931da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
22941da177e4SLinus Torvalds 	struct sp_node *nd;
22951da177e4SLinus Torvalds 
22961da177e4SLinus Torvalds 	while (*p) {
22971da177e4SLinus Torvalds 		parent = *p;
22981da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
22991da177e4SLinus Torvalds 		if (new->start < nd->start)
23001da177e4SLinus Torvalds 			p = &(*p)->rb_left;
23011da177e4SLinus Torvalds 		else if (new->end > nd->end)
23021da177e4SLinus Torvalds 			p = &(*p)->rb_right;
23031da177e4SLinus Torvalds 		else
23041da177e4SLinus Torvalds 			BUG();
23051da177e4SLinus Torvalds 	}
23061da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
23071da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2308140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
230945c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
23101da177e4SLinus Torvalds }
23111da177e4SLinus Torvalds 
23121da177e4SLinus Torvalds /* Find shared policy intersecting idx */
23131da177e4SLinus Torvalds struct mempolicy *
23141da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
23151da177e4SLinus Torvalds {
23161da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
23171da177e4SLinus Torvalds 	struct sp_node *sn;
23181da177e4SLinus Torvalds 
23191da177e4SLinus Torvalds 	if (!sp->root.rb_node)
23201da177e4SLinus Torvalds 		return NULL;
23214a8c7bb5SNathan Zimmer 	read_lock(&sp->lock);
23221da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
23231da177e4SLinus Torvalds 	if (sn) {
23241da177e4SLinus Torvalds 		mpol_get(sn->policy);
23251da177e4SLinus Torvalds 		pol = sn->policy;
23261da177e4SLinus Torvalds 	}
23274a8c7bb5SNathan Zimmer 	read_unlock(&sp->lock);
23281da177e4SLinus Torvalds 	return pol;
23291da177e4SLinus Torvalds }
23301da177e4SLinus Torvalds 
233163f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n)
233263f74ca2SKOSAKI Motohiro {
233363f74ca2SKOSAKI Motohiro 	mpol_put(n->policy);
233463f74ca2SKOSAKI Motohiro 	kmem_cache_free(sn_cache, n);
233563f74ca2SKOSAKI Motohiro }
233663f74ca2SKOSAKI Motohiro 
2337771fb4d8SLee Schermerhorn /**
2338771fb4d8SLee Schermerhorn  * mpol_misplaced - check whether current page node is valid in policy
2339771fb4d8SLee Schermerhorn  *
2340b46e14acSFabian Frederick  * @page: page to be checked
2341b46e14acSFabian Frederick  * @vma: vm area where page mapped
2342b46e14acSFabian Frederick  * @addr: virtual address where page mapped
2343771fb4d8SLee Schermerhorn  *
2344771fb4d8SLee Schermerhorn  * Lookup current policy node id for vma,addr and "compare to" page's
2345771fb4d8SLee Schermerhorn  * node id.
2346771fb4d8SLee Schermerhorn  *
2347771fb4d8SLee Schermerhorn  * Returns:
2348771fb4d8SLee Schermerhorn  *	-1	- not misplaced, page is in the right node
2349771fb4d8SLee Schermerhorn  *	node	- node id where the page should be
2350771fb4d8SLee Schermerhorn  *
2351771fb4d8SLee Schermerhorn  * Policy determination "mimics" alloc_page_vma().
2352771fb4d8SLee Schermerhorn  * Called from fault path where we know the vma and faulting address.
2353771fb4d8SLee Schermerhorn  */
2354771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2355771fb4d8SLee Schermerhorn {
2356771fb4d8SLee Schermerhorn 	struct mempolicy *pol;
2357c33d6c06SMel Gorman 	struct zoneref *z;
2358771fb4d8SLee Schermerhorn 	int curnid = page_to_nid(page);
2359771fb4d8SLee Schermerhorn 	unsigned long pgoff;
236090572890SPeter Zijlstra 	int thiscpu = raw_smp_processor_id();
236190572890SPeter Zijlstra 	int thisnid = cpu_to_node(thiscpu);
236298fa15f3SAnshuman Khandual 	int polnid = NUMA_NO_NODE;
2363771fb4d8SLee Schermerhorn 	int ret = -1;
2364771fb4d8SLee Schermerhorn 
2365dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2366771fb4d8SLee Schermerhorn 	if (!(pol->flags & MPOL_F_MOF))
2367771fb4d8SLee Schermerhorn 		goto out;
2368771fb4d8SLee Schermerhorn 
2369771fb4d8SLee Schermerhorn 	switch (pol->mode) {
2370771fb4d8SLee Schermerhorn 	case MPOL_INTERLEAVE:
2371771fb4d8SLee Schermerhorn 		pgoff = vma->vm_pgoff;
2372771fb4d8SLee Schermerhorn 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
237398c70baaSLaurent Dufour 		polnid = offset_il_node(pol, pgoff);
2374771fb4d8SLee Schermerhorn 		break;
2375771fb4d8SLee Schermerhorn 
2376771fb4d8SLee Schermerhorn 	case MPOL_PREFERRED:
2377771fb4d8SLee Schermerhorn 		if (pol->flags & MPOL_F_LOCAL)
2378771fb4d8SLee Schermerhorn 			polnid = numa_node_id();
2379771fb4d8SLee Schermerhorn 		else
2380771fb4d8SLee Schermerhorn 			polnid = pol->v.preferred_node;
2381771fb4d8SLee Schermerhorn 		break;
2382771fb4d8SLee Schermerhorn 
2383771fb4d8SLee Schermerhorn 	case MPOL_BIND:
2384c33d6c06SMel Gorman 
2385771fb4d8SLee Schermerhorn 		/*
2386771fb4d8SLee Schermerhorn 		 * allows binding to multiple nodes.
2387771fb4d8SLee Schermerhorn 		 * use current page if in policy nodemask,
2388771fb4d8SLee Schermerhorn 		 * else select nearest allowed node, if any.
2389771fb4d8SLee Schermerhorn 		 * If no allowed nodes, use current [!misplaced].
2390771fb4d8SLee Schermerhorn 		 */
2391771fb4d8SLee Schermerhorn 		if (node_isset(curnid, pol->v.nodes))
2392771fb4d8SLee Schermerhorn 			goto out;
2393c33d6c06SMel Gorman 		z = first_zones_zonelist(
2394771fb4d8SLee Schermerhorn 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2395771fb4d8SLee Schermerhorn 				gfp_zone(GFP_HIGHUSER),
2396c33d6c06SMel Gorman 				&pol->v.nodes);
2397c1093b74SPavel Tatashin 		polnid = zone_to_nid(z->zone);
2398771fb4d8SLee Schermerhorn 		break;
2399771fb4d8SLee Schermerhorn 
2400771fb4d8SLee Schermerhorn 	default:
2401771fb4d8SLee Schermerhorn 		BUG();
2402771fb4d8SLee Schermerhorn 	}
24035606e387SMel Gorman 
24045606e387SMel Gorman 	/* Migrate the page towards the node whose CPU is referencing it */
2405e42c8ff2SMel Gorman 	if (pol->flags & MPOL_F_MORON) {
240690572890SPeter Zijlstra 		polnid = thisnid;
24075606e387SMel Gorman 
240810f39042SRik van Riel 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2409de1c9ce6SRik van Riel 			goto out;
2410de1c9ce6SRik van Riel 	}
2411e42c8ff2SMel Gorman 
2412771fb4d8SLee Schermerhorn 	if (curnid != polnid)
2413771fb4d8SLee Schermerhorn 		ret = polnid;
2414771fb4d8SLee Schermerhorn out:
2415771fb4d8SLee Schermerhorn 	mpol_cond_put(pol);
2416771fb4d8SLee Schermerhorn 
2417771fb4d8SLee Schermerhorn 	return ret;
2418771fb4d8SLee Schermerhorn }
2419771fb4d8SLee Schermerhorn 
2420c11600e4SDavid Rientjes /*
2421c11600e4SDavid Rientjes  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2422c11600e4SDavid Rientjes  * dropped after task->mempolicy is set to NULL so that any allocation done as
2423c11600e4SDavid Rientjes  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2424c11600e4SDavid Rientjes  * policy.
2425c11600e4SDavid Rientjes  */
2426c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task)
2427c11600e4SDavid Rientjes {
2428c11600e4SDavid Rientjes 	struct mempolicy *pol;
2429c11600e4SDavid Rientjes 
2430c11600e4SDavid Rientjes 	task_lock(task);
2431c11600e4SDavid Rientjes 	pol = task->mempolicy;
2432c11600e4SDavid Rientjes 	task->mempolicy = NULL;
2433c11600e4SDavid Rientjes 	task_unlock(task);
2434c11600e4SDavid Rientjes 	mpol_put(pol);
2435c11600e4SDavid Rientjes }
2436c11600e4SDavid Rientjes 
24371da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
24381da177e4SLinus Torvalds {
2439140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
24401da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
244163f74ca2SKOSAKI Motohiro 	sp_free(n);
24421da177e4SLinus Torvalds }
24431da177e4SLinus Torvalds 
244442288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start,
244542288fe3SMel Gorman 			unsigned long end, struct mempolicy *pol)
244642288fe3SMel Gorman {
244742288fe3SMel Gorman 	node->start = start;
244842288fe3SMel Gorman 	node->end = end;
244942288fe3SMel Gorman 	node->policy = pol;
245042288fe3SMel Gorman }
245142288fe3SMel Gorman 
2452dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2453dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
24541da177e4SLinus Torvalds {
2455869833f2SKOSAKI Motohiro 	struct sp_node *n;
2456869833f2SKOSAKI Motohiro 	struct mempolicy *newpol;
24571da177e4SLinus Torvalds 
2458869833f2SKOSAKI Motohiro 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
24591da177e4SLinus Torvalds 	if (!n)
24601da177e4SLinus Torvalds 		return NULL;
2461869833f2SKOSAKI Motohiro 
2462869833f2SKOSAKI Motohiro 	newpol = mpol_dup(pol);
2463869833f2SKOSAKI Motohiro 	if (IS_ERR(newpol)) {
2464869833f2SKOSAKI Motohiro 		kmem_cache_free(sn_cache, n);
2465869833f2SKOSAKI Motohiro 		return NULL;
2466869833f2SKOSAKI Motohiro 	}
2467869833f2SKOSAKI Motohiro 	newpol->flags |= MPOL_F_SHARED;
246842288fe3SMel Gorman 	sp_node_init(n, start, end, newpol);
2469869833f2SKOSAKI Motohiro 
24701da177e4SLinus Torvalds 	return n;
24711da177e4SLinus Torvalds }
24721da177e4SLinus Torvalds 
24731da177e4SLinus Torvalds /* Replace a policy range. */
24741da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
24751da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
24761da177e4SLinus Torvalds {
2477b22d127aSMel Gorman 	struct sp_node *n;
247842288fe3SMel Gorman 	struct sp_node *n_new = NULL;
247942288fe3SMel Gorman 	struct mempolicy *mpol_new = NULL;
2480b22d127aSMel Gorman 	int ret = 0;
24811da177e4SLinus Torvalds 
248242288fe3SMel Gorman restart:
24834a8c7bb5SNathan Zimmer 	write_lock(&sp->lock);
24841da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
24851da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
24861da177e4SLinus Torvalds 	while (n && n->start < end) {
24871da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
24881da177e4SLinus Torvalds 		if (n->start >= start) {
24891da177e4SLinus Torvalds 			if (n->end <= end)
24901da177e4SLinus Torvalds 				sp_delete(sp, n);
24911da177e4SLinus Torvalds 			else
24921da177e4SLinus Torvalds 				n->start = end;
24931da177e4SLinus Torvalds 		} else {
24941da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
24951da177e4SLinus Torvalds 			if (n->end > end) {
249642288fe3SMel Gorman 				if (!n_new)
249742288fe3SMel Gorman 					goto alloc_new;
249842288fe3SMel Gorman 
249942288fe3SMel Gorman 				*mpol_new = *n->policy;
250042288fe3SMel Gorman 				atomic_set(&mpol_new->refcnt, 1);
25017880639cSKOSAKI Motohiro 				sp_node_init(n_new, end, n->end, mpol_new);
25021da177e4SLinus Torvalds 				n->end = start;
25035ca39575SHillf Danton 				sp_insert(sp, n_new);
250442288fe3SMel Gorman 				n_new = NULL;
250542288fe3SMel Gorman 				mpol_new = NULL;
25061da177e4SLinus Torvalds 				break;
25071da177e4SLinus Torvalds 			} else
25081da177e4SLinus Torvalds 				n->end = start;
25091da177e4SLinus Torvalds 		}
25101da177e4SLinus Torvalds 		if (!next)
25111da177e4SLinus Torvalds 			break;
25121da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
25131da177e4SLinus Torvalds 	}
25141da177e4SLinus Torvalds 	if (new)
25151da177e4SLinus Torvalds 		sp_insert(sp, new);
25164a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
251742288fe3SMel Gorman 	ret = 0;
251842288fe3SMel Gorman 
251942288fe3SMel Gorman err_out:
252042288fe3SMel Gorman 	if (mpol_new)
252142288fe3SMel Gorman 		mpol_put(mpol_new);
252242288fe3SMel Gorman 	if (n_new)
252342288fe3SMel Gorman 		kmem_cache_free(sn_cache, n_new);
252442288fe3SMel Gorman 
2525b22d127aSMel Gorman 	return ret;
252642288fe3SMel Gorman 
252742288fe3SMel Gorman alloc_new:
25284a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
252942288fe3SMel Gorman 	ret = -ENOMEM;
253042288fe3SMel Gorman 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
253142288fe3SMel Gorman 	if (!n_new)
253242288fe3SMel Gorman 		goto err_out;
253342288fe3SMel Gorman 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
253442288fe3SMel Gorman 	if (!mpol_new)
253542288fe3SMel Gorman 		goto err_out;
253642288fe3SMel Gorman 	goto restart;
25371da177e4SLinus Torvalds }
25381da177e4SLinus Torvalds 
253971fe804bSLee Schermerhorn /**
254071fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
254171fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
254271fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
254371fe804bSLee Schermerhorn  *
254471fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
254571fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
254671fe804bSLee Schermerhorn  * This must be released on exit.
25474bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
254871fe804bSLee Schermerhorn  */
254971fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
25507339ff83SRobin Holt {
255158568d2aSMiao Xie 	int ret;
255258568d2aSMiao Xie 
255371fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
25544a8c7bb5SNathan Zimmer 	rwlock_init(&sp->lock);
25557339ff83SRobin Holt 
255671fe804bSLee Schermerhorn 	if (mpol) {
25577339ff83SRobin Holt 		struct vm_area_struct pvma;
255871fe804bSLee Schermerhorn 		struct mempolicy *new;
25594bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
25607339ff83SRobin Holt 
25614bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
25625c0c1654SLee Schermerhorn 			goto put_mpol;
256371fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
256471fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
256515d77835SLee Schermerhorn 		if (IS_ERR(new))
25660cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
256758568d2aSMiao Xie 
256858568d2aSMiao Xie 		task_lock(current);
25694bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
257058568d2aSMiao Xie 		task_unlock(current);
257115d77835SLee Schermerhorn 		if (ret)
25725c0c1654SLee Schermerhorn 			goto put_new;
257371fe804bSLee Schermerhorn 
257471fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
25752c4541e2SKirill A. Shutemov 		vma_init(&pvma, NULL);
257671fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
257771fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
257815d77835SLee Schermerhorn 
25795c0c1654SLee Schermerhorn put_new:
258071fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
25810cae3457SDan Carpenter free_scratch:
25824bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
25835c0c1654SLee Schermerhorn put_mpol:
25845c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
25857339ff83SRobin Holt 	}
25867339ff83SRobin Holt }
25877339ff83SRobin Holt 
25881da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
25891da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
25901da177e4SLinus Torvalds {
25911da177e4SLinus Torvalds 	int err;
25921da177e4SLinus Torvalds 	struct sp_node *new = NULL;
25931da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
25941da177e4SLinus Torvalds 
2595028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
25961da177e4SLinus Torvalds 		 vma->vm_pgoff,
259745c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2598028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
259900ef2d2fSDavid Rientjes 		 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
26001da177e4SLinus Torvalds 
26011da177e4SLinus Torvalds 	if (npol) {
26021da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
26031da177e4SLinus Torvalds 		if (!new)
26041da177e4SLinus Torvalds 			return -ENOMEM;
26051da177e4SLinus Torvalds 	}
26061da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
26071da177e4SLinus Torvalds 	if (err && new)
260863f74ca2SKOSAKI Motohiro 		sp_free(new);
26091da177e4SLinus Torvalds 	return err;
26101da177e4SLinus Torvalds }
26111da177e4SLinus Torvalds 
26121da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
26131da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
26141da177e4SLinus Torvalds {
26151da177e4SLinus Torvalds 	struct sp_node *n;
26161da177e4SLinus Torvalds 	struct rb_node *next;
26171da177e4SLinus Torvalds 
26181da177e4SLinus Torvalds 	if (!p->root.rb_node)
26191da177e4SLinus Torvalds 		return;
26204a8c7bb5SNathan Zimmer 	write_lock(&p->lock);
26211da177e4SLinus Torvalds 	next = rb_first(&p->root);
26221da177e4SLinus Torvalds 	while (next) {
26231da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
26241da177e4SLinus Torvalds 		next = rb_next(&n->nd);
262563f74ca2SKOSAKI Motohiro 		sp_delete(p, n);
26261da177e4SLinus Torvalds 	}
26274a8c7bb5SNathan Zimmer 	write_unlock(&p->lock);
26281da177e4SLinus Torvalds }
26291da177e4SLinus Torvalds 
26301a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING
2631c297663cSMel Gorman static int __initdata numabalancing_override;
26321a687c2eSMel Gorman 
26331a687c2eSMel Gorman static void __init check_numabalancing_enable(void)
26341a687c2eSMel Gorman {
26351a687c2eSMel Gorman 	bool numabalancing_default = false;
26361a687c2eSMel Gorman 
26371a687c2eSMel Gorman 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
26381a687c2eSMel Gorman 		numabalancing_default = true;
26391a687c2eSMel Gorman 
2640c297663cSMel Gorman 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2641c297663cSMel Gorman 	if (numabalancing_override)
2642c297663cSMel Gorman 		set_numabalancing_state(numabalancing_override == 1);
2643c297663cSMel Gorman 
2644b0dc2b9bSMel Gorman 	if (num_online_nodes() > 1 && !numabalancing_override) {
2645756a025fSJoe Perches 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2646c297663cSMel Gorman 			numabalancing_default ? "Enabling" : "Disabling");
26471a687c2eSMel Gorman 		set_numabalancing_state(numabalancing_default);
26481a687c2eSMel Gorman 	}
26491a687c2eSMel Gorman }
26501a687c2eSMel Gorman 
26511a687c2eSMel Gorman static int __init setup_numabalancing(char *str)
26521a687c2eSMel Gorman {
26531a687c2eSMel Gorman 	int ret = 0;
26541a687c2eSMel Gorman 	if (!str)
26551a687c2eSMel Gorman 		goto out;
26561a687c2eSMel Gorman 
26571a687c2eSMel Gorman 	if (!strcmp(str, "enable")) {
2658c297663cSMel Gorman 		numabalancing_override = 1;
26591a687c2eSMel Gorman 		ret = 1;
26601a687c2eSMel Gorman 	} else if (!strcmp(str, "disable")) {
2661c297663cSMel Gorman 		numabalancing_override = -1;
26621a687c2eSMel Gorman 		ret = 1;
26631a687c2eSMel Gorman 	}
26641a687c2eSMel Gorman out:
26651a687c2eSMel Gorman 	if (!ret)
26664a404beaSAndrew Morton 		pr_warn("Unable to parse numa_balancing=\n");
26671a687c2eSMel Gorman 
26681a687c2eSMel Gorman 	return ret;
26691a687c2eSMel Gorman }
26701a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing);
26711a687c2eSMel Gorman #else
26721a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void)
26731a687c2eSMel Gorman {
26741a687c2eSMel Gorman }
26751a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
26761a687c2eSMel Gorman 
26771da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
26781da177e4SLinus Torvalds void __init numa_policy_init(void)
26791da177e4SLinus Torvalds {
2680b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2681b71636e2SPaul Mundt 	unsigned long largest = 0;
2682b71636e2SPaul Mundt 	int nid, prefer = 0;
2683b71636e2SPaul Mundt 
26841da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
26851da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
268620c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
26871da177e4SLinus Torvalds 
26881da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
26891da177e4SLinus Torvalds 				     sizeof(struct sp_node),
269020c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
26911da177e4SLinus Torvalds 
26925606e387SMel Gorman 	for_each_node(nid) {
26935606e387SMel Gorman 		preferred_node_policy[nid] = (struct mempolicy) {
26945606e387SMel Gorman 			.refcnt = ATOMIC_INIT(1),
26955606e387SMel Gorman 			.mode = MPOL_PREFERRED,
26965606e387SMel Gorman 			.flags = MPOL_F_MOF | MPOL_F_MORON,
26975606e387SMel Gorman 			.v = { .preferred_node = nid, },
26985606e387SMel Gorman 		};
26995606e387SMel Gorman 	}
27005606e387SMel Gorman 
2701b71636e2SPaul Mundt 	/*
2702b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2703b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2704b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2705b71636e2SPaul Mundt 	 */
2706b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
270701f13bd6SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
2708b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
27091da177e4SLinus Torvalds 
2710b71636e2SPaul Mundt 		/* Preserve the largest node */
2711b71636e2SPaul Mundt 		if (largest < total_pages) {
2712b71636e2SPaul Mundt 			largest = total_pages;
2713b71636e2SPaul Mundt 			prefer = nid;
2714b71636e2SPaul Mundt 		}
2715b71636e2SPaul Mundt 
2716b71636e2SPaul Mundt 		/* Interleave this node? */
2717b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2718b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2719b71636e2SPaul Mundt 	}
2720b71636e2SPaul Mundt 
2721b71636e2SPaul Mundt 	/* All too small, use the largest */
2722b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2723b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2724b71636e2SPaul Mundt 
2725028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2726b1de0d13SMitchel Humpherys 		pr_err("%s: interleaving failed\n", __func__);
27271a687c2eSMel Gorman 
27281a687c2eSMel Gorman 	check_numabalancing_enable();
27291da177e4SLinus Torvalds }
27301da177e4SLinus Torvalds 
27318bccd85fSChristoph Lameter /* Reset policy of current process to default */
27321da177e4SLinus Torvalds void numa_default_policy(void)
27331da177e4SLinus Torvalds {
2734028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
27351da177e4SLinus Torvalds }
273668860ec1SPaul Jackson 
27374225399aSPaul Jackson /*
2738095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2739095f1fc4SLee Schermerhorn  */
2740095f1fc4SLee Schermerhorn 
2741095f1fc4SLee Schermerhorn /*
2742f2a07f40SHugh Dickins  * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
27431a75a6c8SChristoph Lameter  */
2744345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2745345ace9cSLee Schermerhorn {
2746345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2747345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2748345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2749345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2750d3a71033SLee Schermerhorn 	[MPOL_LOCAL]      = "local",
2751345ace9cSLee Schermerhorn };
27521a75a6c8SChristoph Lameter 
2753095f1fc4SLee Schermerhorn 
2754095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2755095f1fc4SLee Schermerhorn /**
2756f2a07f40SHugh Dickins  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2757095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
275871fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2759095f1fc4SLee Schermerhorn  *
2760095f1fc4SLee Schermerhorn  * Format of input:
2761095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2762095f1fc4SLee Schermerhorn  *
276371fe804bSLee Schermerhorn  * On success, returns 0, else 1
2764095f1fc4SLee Schermerhorn  */
2765a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol)
2766095f1fc4SLee Schermerhorn {
276771fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2768f2a07f40SHugh Dickins 	unsigned short mode_flags;
276971fe804bSLee Schermerhorn 	nodemask_t nodes;
2770095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2771095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2772dedf2c73Szhong jiang 	int err = 1, mode;
2773095f1fc4SLee Schermerhorn 
2774095f1fc4SLee Schermerhorn 	if (nodelist) {
2775095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2776095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
277771fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2778095f1fc4SLee Schermerhorn 			goto out;
277901f13bd6SLai Jiangshan 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2780095f1fc4SLee Schermerhorn 			goto out;
278171fe804bSLee Schermerhorn 	} else
278271fe804bSLee Schermerhorn 		nodes_clear(nodes);
278371fe804bSLee Schermerhorn 
2784095f1fc4SLee Schermerhorn 	if (flags)
2785095f1fc4SLee Schermerhorn 		*flags++ = '\0';	/* terminate mode string */
2786095f1fc4SLee Schermerhorn 
2787dedf2c73Szhong jiang 	mode = match_string(policy_modes, MPOL_MAX, str);
2788dedf2c73Szhong jiang 	if (mode < 0)
2789095f1fc4SLee Schermerhorn 		goto out;
2790095f1fc4SLee Schermerhorn 
279171fe804bSLee Schermerhorn 	switch (mode) {
2792095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
279371fe804bSLee Schermerhorn 		/*
279471fe804bSLee Schermerhorn 		 * Insist on a nodelist of one node only
279571fe804bSLee Schermerhorn 		 */
2796095f1fc4SLee Schermerhorn 		if (nodelist) {
2797095f1fc4SLee Schermerhorn 			char *rest = nodelist;
2798095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
2799095f1fc4SLee Schermerhorn 				rest++;
2800926f2ae0SKOSAKI Motohiro 			if (*rest)
2801926f2ae0SKOSAKI Motohiro 				goto out;
2802095f1fc4SLee Schermerhorn 		}
2803095f1fc4SLee Schermerhorn 		break;
2804095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
2805095f1fc4SLee Schermerhorn 		/*
2806095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
2807095f1fc4SLee Schermerhorn 		 */
2808095f1fc4SLee Schermerhorn 		if (!nodelist)
280901f13bd6SLai Jiangshan 			nodes = node_states[N_MEMORY];
28103f226aa1SLee Schermerhorn 		break;
281171fe804bSLee Schermerhorn 	case MPOL_LOCAL:
28123f226aa1SLee Schermerhorn 		/*
281371fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
28143f226aa1SLee Schermerhorn 		 */
281571fe804bSLee Schermerhorn 		if (nodelist)
28163f226aa1SLee Schermerhorn 			goto out;
281771fe804bSLee Schermerhorn 		mode = MPOL_PREFERRED;
28183f226aa1SLee Schermerhorn 		break;
2819413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
2820413b43deSRavikiran G Thirumalai 		/*
2821413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
2822413b43deSRavikiran G Thirumalai 		 */
2823413b43deSRavikiran G Thirumalai 		if (!nodelist)
2824413b43deSRavikiran G Thirumalai 			err = 0;
2825413b43deSRavikiran G Thirumalai 		goto out;
2826d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
282771fe804bSLee Schermerhorn 		/*
2828d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
282971fe804bSLee Schermerhorn 		 */
2830d69b2e63SKOSAKI Motohiro 		if (!nodelist)
2831d69b2e63SKOSAKI Motohiro 			goto out;
2832095f1fc4SLee Schermerhorn 	}
2833095f1fc4SLee Schermerhorn 
283471fe804bSLee Schermerhorn 	mode_flags = 0;
2835095f1fc4SLee Schermerhorn 	if (flags) {
2836095f1fc4SLee Schermerhorn 		/*
2837095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
2838095f1fc4SLee Schermerhorn 		 * mode flags.
2839095f1fc4SLee Schermerhorn 		 */
2840095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
284171fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
2842095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
284371fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
2844095f1fc4SLee Schermerhorn 		else
2845926f2ae0SKOSAKI Motohiro 			goto out;
2846095f1fc4SLee Schermerhorn 	}
284771fe804bSLee Schermerhorn 
284871fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
284971fe804bSLee Schermerhorn 	if (IS_ERR(new))
2850926f2ae0SKOSAKI Motohiro 		goto out;
2851926f2ae0SKOSAKI Motohiro 
2852f2a07f40SHugh Dickins 	/*
2853f2a07f40SHugh Dickins 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2854f2a07f40SHugh Dickins 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2855f2a07f40SHugh Dickins 	 */
2856f2a07f40SHugh Dickins 	if (mode != MPOL_PREFERRED)
2857f2a07f40SHugh Dickins 		new->v.nodes = nodes;
2858f2a07f40SHugh Dickins 	else if (nodelist)
2859f2a07f40SHugh Dickins 		new->v.preferred_node = first_node(nodes);
2860f2a07f40SHugh Dickins 	else
2861f2a07f40SHugh Dickins 		new->flags |= MPOL_F_LOCAL;
2862f2a07f40SHugh Dickins 
2863f2a07f40SHugh Dickins 	/*
2864f2a07f40SHugh Dickins 	 * Save nodes for contextualization: this will be used to "clone"
2865f2a07f40SHugh Dickins 	 * the mempolicy in a specific context [cpuset] at a later time.
2866f2a07f40SHugh Dickins 	 */
2867e17f74afSLee Schermerhorn 	new->w.user_nodemask = nodes;
2868f2a07f40SHugh Dickins 
2869926f2ae0SKOSAKI Motohiro 	err = 0;
287071fe804bSLee Schermerhorn 
2871095f1fc4SLee Schermerhorn out:
2872095f1fc4SLee Schermerhorn 	/* Restore string for error message */
2873095f1fc4SLee Schermerhorn 	if (nodelist)
2874095f1fc4SLee Schermerhorn 		*--nodelist = ':';
2875095f1fc4SLee Schermerhorn 	if (flags)
2876095f1fc4SLee Schermerhorn 		*--flags = '=';
287771fe804bSLee Schermerhorn 	if (!err)
287871fe804bSLee Schermerhorn 		*mpol = new;
2879095f1fc4SLee Schermerhorn 	return err;
2880095f1fc4SLee Schermerhorn }
2881095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
2882095f1fc4SLee Schermerhorn 
288371fe804bSLee Schermerhorn /**
288471fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
288571fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
288671fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
288771fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
288871fe804bSLee Schermerhorn  *
2889948927eeSDavid Rientjes  * Convert @pol into a string.  If @buffer is too short, truncate the string.
2890948927eeSDavid Rientjes  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2891948927eeSDavid Rientjes  * longest flag, "relative", and to display at least a few node ids.
28921a75a6c8SChristoph Lameter  */
2893948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
28941a75a6c8SChristoph Lameter {
28951a75a6c8SChristoph Lameter 	char *p = buffer;
2896948927eeSDavid Rientjes 	nodemask_t nodes = NODE_MASK_NONE;
2897948927eeSDavid Rientjes 	unsigned short mode = MPOL_DEFAULT;
2898948927eeSDavid Rientjes 	unsigned short flags = 0;
28991a75a6c8SChristoph Lameter 
29008790c71aSDavid Rientjes 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2901bea904d5SLee Schermerhorn 		mode = pol->mode;
2902948927eeSDavid Rientjes 		flags = pol->flags;
2903948927eeSDavid Rientjes 	}
2904bea904d5SLee Schermerhorn 
29051a75a6c8SChristoph Lameter 	switch (mode) {
29061a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
29071a75a6c8SChristoph Lameter 		break;
29081a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
2909fc36b8d3SLee Schermerhorn 		if (flags & MPOL_F_LOCAL)
2910f2a07f40SHugh Dickins 			mode = MPOL_LOCAL;
291153f2556bSLee Schermerhorn 		else
2912fc36b8d3SLee Schermerhorn 			node_set(pol->v.preferred_node, nodes);
29131a75a6c8SChristoph Lameter 		break;
29141a75a6c8SChristoph Lameter 	case MPOL_BIND:
29151a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
29161a75a6c8SChristoph Lameter 		nodes = pol->v.nodes;
29171a75a6c8SChristoph Lameter 		break;
29181a75a6c8SChristoph Lameter 	default:
2919948927eeSDavid Rientjes 		WARN_ON_ONCE(1);
2920948927eeSDavid Rientjes 		snprintf(p, maxlen, "unknown");
2921948927eeSDavid Rientjes 		return;
29221a75a6c8SChristoph Lameter 	}
29231a75a6c8SChristoph Lameter 
2924b7a9f420SDavid Rientjes 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
29251a75a6c8SChristoph Lameter 
2926fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
2927948927eeSDavid Rientjes 		p += snprintf(p, buffer + maxlen - p, "=");
2928f5b087b5SDavid Rientjes 
29292291990aSLee Schermerhorn 		/*
29302291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
29312291990aSLee Schermerhorn 		 */
2932f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
29332291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
29342291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
29352291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
2936f5b087b5SDavid Rientjes 	}
2937f5b087b5SDavid Rientjes 
29389e763e0fSTejun Heo 	if (!nodes_empty(nodes))
29399e763e0fSTejun Heo 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
29409e763e0fSTejun Heo 			       nodemask_pr_args(&nodes));
29411a75a6c8SChristoph Lameter }
2942