xref: /linux/mm/mempolicy.c (revision ec4858e07ed62eceb60bac2ded3c0d6e2471c66b)
146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
68bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
2900ef2d2fSDavid Rientjes  *                As a special case NUMA_NO_NODE here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
34b27abaccSDave Hansen  * preferred many Try a set of nodes first before normal fallback. This is
35b27abaccSDave Hansen  *                similar to preferred without the special case.
36b27abaccSDave Hansen  *
371da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
381da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
391da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
401da177e4SLinus Torvalds  *
411da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
421da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
431da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
441da177e4SLinus Torvalds  * allocations for a VMA in the VM.
451da177e4SLinus Torvalds  *
461da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
471da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
481da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
491da177e4SLinus Torvalds  *
501da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
511da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
521da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
531da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
541da177e4SLinus Torvalds  *
551da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
561da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
571da177e4SLinus Torvalds  */
581da177e4SLinus Torvalds 
591da177e4SLinus Torvalds /* Notebook:
601da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
611da177e4SLinus Torvalds    object
621da177e4SLinus Torvalds    statistics for bigpages
631da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
641da177e4SLinus Torvalds    first item above.
651da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
661da177e4SLinus Torvalds    grows down?
671da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
681da177e4SLinus Torvalds    kernel is not always grateful with that.
691da177e4SLinus Torvalds */
701da177e4SLinus Torvalds 
71b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72b1de0d13SMitchel Humpherys 
731da177e4SLinus Torvalds #include <linux/mempolicy.h>
74a520110eSChristoph Hellwig #include <linux/pagewalk.h>
751da177e4SLinus Torvalds #include <linux/highmem.h>
761da177e4SLinus Torvalds #include <linux/hugetlb.h>
771da177e4SLinus Torvalds #include <linux/kernel.h>
781da177e4SLinus Torvalds #include <linux/sched.h>
796e84f315SIngo Molnar #include <linux/sched/mm.h>
806a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h>
81f719ff9bSIngo Molnar #include <linux/sched/task.h>
821da177e4SLinus Torvalds #include <linux/nodemask.h>
831da177e4SLinus Torvalds #include <linux/cpuset.h>
841da177e4SLinus Torvalds #include <linux/slab.h>
851da177e4SLinus Torvalds #include <linux/string.h>
86b95f1b31SPaul Gortmaker #include <linux/export.h>
87b488893aSPavel Emelyanov #include <linux/nsproxy.h>
881da177e4SLinus Torvalds #include <linux/interrupt.h>
891da177e4SLinus Torvalds #include <linux/init.h>
901da177e4SLinus Torvalds #include <linux/compat.h>
9131367466SOtto Ebeling #include <linux/ptrace.h>
92dc9aa5b9SChristoph Lameter #include <linux/swap.h>
931a75a6c8SChristoph Lameter #include <linux/seq_file.h>
941a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
95b20a3503SChristoph Lameter #include <linux/migrate.h>
9662b61f61SHugh Dickins #include <linux/ksm.h>
9795a402c3SChristoph Lameter #include <linux/rmap.h>
9886c3a764SDavid Quigley #include <linux/security.h>
99dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
100095f1fc4SLee Schermerhorn #include <linux/ctype.h>
1016d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
102b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h>
103b1de0d13SMitchel Humpherys #include <linux/printk.h>
104c8633798SNaoya Horiguchi #include <linux/swapops.h>
105dc9aa5b9SChristoph Lameter 
1061da177e4SLinus Torvalds #include <asm/tlbflush.h>
1077c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
1081da177e4SLinus Torvalds 
10962695a84SNick Piggin #include "internal.h"
11062695a84SNick Piggin 
11138e35860SChristoph Lameter /* Internal flags */
112dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
11338e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
114dc9aa5b9SChristoph Lameter 
115fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
116fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1171da177e4SLinus Torvalds 
1181da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1191da177e4SLinus Torvalds    policied. */
1206267276fSChristoph Lameter enum zone_type policy_zone = 0;
1211da177e4SLinus Torvalds 
122bea904d5SLee Schermerhorn /*
123bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
124bea904d5SLee Schermerhorn  */
125e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1261da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
1277858d7bcSFeng Tang 	.mode = MPOL_LOCAL,
1281da177e4SLinus Torvalds };
1291da177e4SLinus Torvalds 
1305606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES];
1315606e387SMel Gorman 
132b2ca916cSDan Williams /**
133b2ca916cSDan Williams  * numa_map_to_online_node - Find closest online node
134f6e92f40SKrzysztof Kozlowski  * @node: Node id to start the search
135b2ca916cSDan Williams  *
136b2ca916cSDan Williams  * Lookup the next closest node by distance if @nid is not online.
137dad5b023SRandy Dunlap  *
138dad5b023SRandy Dunlap  * Return: this @node if it is online, otherwise the closest node by distance
139b2ca916cSDan Williams  */
140b2ca916cSDan Williams int numa_map_to_online_node(int node)
141b2ca916cSDan Williams {
1424fcbe96eSDan Williams 	int min_dist = INT_MAX, dist, n, min_node;
143b2ca916cSDan Williams 
1444fcbe96eSDan Williams 	if (node == NUMA_NO_NODE || node_online(node))
1454fcbe96eSDan Williams 		return node;
146b2ca916cSDan Williams 
147b2ca916cSDan Williams 	min_node = node;
148b2ca916cSDan Williams 	for_each_online_node(n) {
149b2ca916cSDan Williams 		dist = node_distance(node, n);
150b2ca916cSDan Williams 		if (dist < min_dist) {
151b2ca916cSDan Williams 			min_dist = dist;
152b2ca916cSDan Williams 			min_node = n;
153b2ca916cSDan Williams 		}
154b2ca916cSDan Williams 	}
155b2ca916cSDan Williams 
156b2ca916cSDan Williams 	return min_node;
157b2ca916cSDan Williams }
158b2ca916cSDan Williams EXPORT_SYMBOL_GPL(numa_map_to_online_node);
159b2ca916cSDan Williams 
16074d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p)
1615606e387SMel Gorman {
1625606e387SMel Gorman 	struct mempolicy *pol = p->mempolicy;
163f15ca78eSOleg Nesterov 	int node;
1645606e387SMel Gorman 
165f15ca78eSOleg Nesterov 	if (pol)
166f15ca78eSOleg Nesterov 		return pol;
1675606e387SMel Gorman 
168f15ca78eSOleg Nesterov 	node = numa_node_id();
1691da6f0e1SJianguo Wu 	if (node != NUMA_NO_NODE) {
1701da6f0e1SJianguo Wu 		pol = &preferred_node_policy[node];
171f15ca78eSOleg Nesterov 		/* preferred_node_policy is not initialised early in boot */
172f15ca78eSOleg Nesterov 		if (pol->mode)
173f15ca78eSOleg Nesterov 			return pol;
1741da6f0e1SJianguo Wu 	}
1755606e387SMel Gorman 
176f15ca78eSOleg Nesterov 	return &default_policy;
1775606e387SMel Gorman }
1785606e387SMel Gorman 
17937012946SDavid Rientjes static const struct mempolicy_operations {
18037012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
181213980c0SVlastimil Babka 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
18237012946SDavid Rientjes } mpol_ops[MPOL_MAX];
18337012946SDavid Rientjes 
184f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
185f5b087b5SDavid Rientjes {
1866d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1874c50bc01SDavid Rientjes }
1884c50bc01SDavid Rientjes 
1894c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1904c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1914c50bc01SDavid Rientjes {
1924c50bc01SDavid Rientjes 	nodemask_t tmp;
1934c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1944c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
195f5b087b5SDavid Rientjes }
196f5b087b5SDavid Rientjes 
197be897d48SFeng Tang static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
19837012946SDavid Rientjes {
19937012946SDavid Rientjes 	if (nodes_empty(*nodes))
20037012946SDavid Rientjes 		return -EINVAL;
201269fbe72SBen Widawsky 	pol->nodes = *nodes;
20237012946SDavid Rientjes 	return 0;
20337012946SDavid Rientjes }
20437012946SDavid Rientjes 
20537012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
20637012946SDavid Rientjes {
2077858d7bcSFeng Tang 	if (nodes_empty(*nodes))
2087858d7bcSFeng Tang 		return -EINVAL;
209269fbe72SBen Widawsky 
210269fbe72SBen Widawsky 	nodes_clear(pol->nodes);
211269fbe72SBen Widawsky 	node_set(first_node(*nodes), pol->nodes);
21237012946SDavid Rientjes 	return 0;
21337012946SDavid Rientjes }
21437012946SDavid Rientjes 
21558568d2aSMiao Xie /*
21658568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
21758568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
2187858d7bcSFeng Tang  * parameter with respect to the policy mode and flags.
21958568d2aSMiao Xie  *
22058568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
221c1e8d7c6SMichel Lespinasse  * and mempolicy.  May also be called holding the mmap_lock for write.
22258568d2aSMiao Xie  */
2234bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2244bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
22558568d2aSMiao Xie {
22658568d2aSMiao Xie 	int ret;
22758568d2aSMiao Xie 
2287858d7bcSFeng Tang 	/*
2297858d7bcSFeng Tang 	 * Default (pol==NULL) resp. local memory policies are not a
2307858d7bcSFeng Tang 	 * subject of any remapping. They also do not need any special
2317858d7bcSFeng Tang 	 * constructor.
2327858d7bcSFeng Tang 	 */
2337858d7bcSFeng Tang 	if (!pol || pol->mode == MPOL_LOCAL)
23458568d2aSMiao Xie 		return 0;
2357858d7bcSFeng Tang 
23601f13bd6SLai Jiangshan 	/* Check N_MEMORY */
2374bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
23801f13bd6SLai Jiangshan 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
23958568d2aSMiao Xie 
24058568d2aSMiao Xie 	VM_BUG_ON(!nodes);
2417858d7bcSFeng Tang 
24258568d2aSMiao Xie 	if (pol->flags & MPOL_F_RELATIVE_NODES)
2434bfc4495SKAMEZAWA Hiroyuki 		mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
24458568d2aSMiao Xie 	else
2454bfc4495SKAMEZAWA Hiroyuki 		nodes_and(nsc->mask2, *nodes, nsc->mask1);
2464bfc4495SKAMEZAWA Hiroyuki 
24758568d2aSMiao Xie 	if (mpol_store_user_nodemask(pol))
24858568d2aSMiao Xie 		pol->w.user_nodemask = *nodes;
24958568d2aSMiao Xie 	else
2507858d7bcSFeng Tang 		pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
25158568d2aSMiao Xie 
2524bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
25358568d2aSMiao Xie 	return ret;
25458568d2aSMiao Xie }
25558568d2aSMiao Xie 
25658568d2aSMiao Xie /*
25758568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
25858568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
25958568d2aSMiao Xie  */
260028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
261028fec41SDavid Rientjes 				  nodemask_t *nodes)
2621da177e4SLinus Torvalds {
2631da177e4SLinus Torvalds 	struct mempolicy *policy;
2641da177e4SLinus Torvalds 
265028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
26600ef2d2fSDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
267140d5a49SPaul Mundt 
2683e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2693e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
27037012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
271d3a71033SLee Schermerhorn 		return NULL;
27237012946SDavid Rientjes 	}
2733e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2743e1f0645SDavid Rientjes 
2753e1f0645SDavid Rientjes 	/*
2763e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2773e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2783e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2793e1f0645SDavid Rientjes 	 */
2803e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2813e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2823e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2833e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2843e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2857858d7bcSFeng Tang 
2867858d7bcSFeng Tang 			mode = MPOL_LOCAL;
2873e1f0645SDavid Rientjes 		}
288479e2802SPeter Zijlstra 	} else if (mode == MPOL_LOCAL) {
2898d303e44SPiotr Kwapulinski 		if (!nodes_empty(*nodes) ||
2908d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_STATIC_NODES) ||
2918d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_RELATIVE_NODES))
292479e2802SPeter Zijlstra 			return ERR_PTR(-EINVAL);
2933e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
2943e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
2951da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2961da177e4SLinus Torvalds 	if (!policy)
2971da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2981da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
29945c4745aSLee Schermerhorn 	policy->mode = mode;
30037012946SDavid Rientjes 	policy->flags = flags;
301c6018b4bSAneesh Kumar K.V 	policy->home_node = NUMA_NO_NODE;
3023e1f0645SDavid Rientjes 
30337012946SDavid Rientjes 	return policy;
30437012946SDavid Rientjes }
30537012946SDavid Rientjes 
30652cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
30752cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
30852cd3b07SLee Schermerhorn {
30952cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
31052cd3b07SLee Schermerhorn 		return;
31152cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
31252cd3b07SLee Schermerhorn }
31352cd3b07SLee Schermerhorn 
314213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
31537012946SDavid Rientjes {
31637012946SDavid Rientjes }
31737012946SDavid Rientjes 
318213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
3191d0d2680SDavid Rientjes {
3201d0d2680SDavid Rientjes 	nodemask_t tmp;
3211d0d2680SDavid Rientjes 
32237012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
32337012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
32437012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
32537012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3261d0d2680SDavid Rientjes 	else {
327269fbe72SBen Widawsky 		nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
328213980c0SVlastimil Babka 								*nodes);
32929b190faSzhong jiang 		pol->w.cpuset_mems_allowed = *nodes;
3301d0d2680SDavid Rientjes 	}
33137012946SDavid Rientjes 
332708c1bbcSMiao Xie 	if (nodes_empty(tmp))
333708c1bbcSMiao Xie 		tmp = *nodes;
334708c1bbcSMiao Xie 
335269fbe72SBen Widawsky 	pol->nodes = tmp;
33637012946SDavid Rientjes }
33737012946SDavid Rientjes 
33837012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
339213980c0SVlastimil Babka 						const nodemask_t *nodes)
34037012946SDavid Rientjes {
34137012946SDavid Rientjes 	pol->w.cpuset_mems_allowed = *nodes;
3421d0d2680SDavid Rientjes }
34337012946SDavid Rientjes 
344708c1bbcSMiao Xie /*
345708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
346708c1bbcSMiao Xie  *
347c1e8d7c6SMichel Lespinasse  * Per-vma policies are protected by mmap_lock. Allocations using per-task
348213980c0SVlastimil Babka  * policies are protected by task->mems_allowed_seq to prevent a premature
349213980c0SVlastimil Babka  * OOM/allocation failure due to parallel nodemask modification.
350708c1bbcSMiao Xie  */
351213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
35237012946SDavid Rientjes {
35337012946SDavid Rientjes 	if (!pol)
35437012946SDavid Rientjes 		return;
3557858d7bcSFeng Tang 	if (!mpol_store_user_nodemask(pol) &&
35637012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
35737012946SDavid Rientjes 		return;
358708c1bbcSMiao Xie 
359213980c0SVlastimil Babka 	mpol_ops[pol->mode].rebind(pol, newmask);
3601d0d2680SDavid Rientjes }
3611d0d2680SDavid Rientjes 
3621d0d2680SDavid Rientjes /*
3631d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
3641d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
36558568d2aSMiao Xie  *
36658568d2aSMiao Xie  * Called with task's alloc_lock held.
3671d0d2680SDavid Rientjes  */
3681d0d2680SDavid Rientjes 
369213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
3701d0d2680SDavid Rientjes {
371213980c0SVlastimil Babka 	mpol_rebind_policy(tsk->mempolicy, new);
3721d0d2680SDavid Rientjes }
3731d0d2680SDavid Rientjes 
3741d0d2680SDavid Rientjes /*
3751d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
3761d0d2680SDavid Rientjes  *
377c1e8d7c6SMichel Lespinasse  * Call holding a reference to mm.  Takes mm->mmap_lock during call.
3781d0d2680SDavid Rientjes  */
3791d0d2680SDavid Rientjes 
3801d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
3811d0d2680SDavid Rientjes {
3821d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
3831d0d2680SDavid Rientjes 
384d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
3851d0d2680SDavid Rientjes 	for (vma = mm->mmap; vma; vma = vma->vm_next)
386213980c0SVlastimil Babka 		mpol_rebind_policy(vma->vm_policy, new);
387d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
3881d0d2680SDavid Rientjes }
3891d0d2680SDavid Rientjes 
39037012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
39137012946SDavid Rientjes 	[MPOL_DEFAULT] = {
39237012946SDavid Rientjes 		.rebind = mpol_rebind_default,
39337012946SDavid Rientjes 	},
39437012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
395be897d48SFeng Tang 		.create = mpol_new_nodemask,
39637012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
39737012946SDavid Rientjes 	},
39837012946SDavid Rientjes 	[MPOL_PREFERRED] = {
39937012946SDavid Rientjes 		.create = mpol_new_preferred,
40037012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
40137012946SDavid Rientjes 	},
40237012946SDavid Rientjes 	[MPOL_BIND] = {
403be897d48SFeng Tang 		.create = mpol_new_nodemask,
40437012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
40537012946SDavid Rientjes 	},
4067858d7bcSFeng Tang 	[MPOL_LOCAL] = {
4077858d7bcSFeng Tang 		.rebind = mpol_rebind_default,
4087858d7bcSFeng Tang 	},
409b27abaccSDave Hansen 	[MPOL_PREFERRED_MANY] = {
410be897d48SFeng Tang 		.create = mpol_new_nodemask,
411b27abaccSDave Hansen 		.rebind = mpol_rebind_preferred,
412b27abaccSDave Hansen 	},
41337012946SDavid Rientjes };
41437012946SDavid Rientjes 
415a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist,
416fc301289SChristoph Lameter 				unsigned long flags);
4171a75a6c8SChristoph Lameter 
4186f4576e3SNaoya Horiguchi struct queue_pages {
4196f4576e3SNaoya Horiguchi 	struct list_head *pagelist;
4206f4576e3SNaoya Horiguchi 	unsigned long flags;
4216f4576e3SNaoya Horiguchi 	nodemask_t *nmask;
422f18da660SLi Xinhai 	unsigned long start;
423f18da660SLi Xinhai 	unsigned long end;
424f18da660SLi Xinhai 	struct vm_area_struct *first;
4256f4576e3SNaoya Horiguchi };
4266f4576e3SNaoya Horiguchi 
42798094945SNaoya Horiguchi /*
42888aaa2a1SNaoya Horiguchi  * Check if the page's nid is in qp->nmask.
42988aaa2a1SNaoya Horiguchi  *
43088aaa2a1SNaoya Horiguchi  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
43188aaa2a1SNaoya Horiguchi  * in the invert of qp->nmask.
43288aaa2a1SNaoya Horiguchi  */
43388aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page,
43488aaa2a1SNaoya Horiguchi 					struct queue_pages *qp)
43588aaa2a1SNaoya Horiguchi {
43688aaa2a1SNaoya Horiguchi 	int nid = page_to_nid(page);
43788aaa2a1SNaoya Horiguchi 	unsigned long flags = qp->flags;
43888aaa2a1SNaoya Horiguchi 
43988aaa2a1SNaoya Horiguchi 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
44088aaa2a1SNaoya Horiguchi }
44188aaa2a1SNaoya Horiguchi 
442a7f40cfeSYang Shi /*
443d8835445SYang Shi  * queue_pages_pmd() has four possible return values:
444e5947d23SYang Shi  * 0 - pages are placed on the right node or queued successfully, or
445e5947d23SYang Shi  *     special page is met, i.e. huge zero page.
446d8835445SYang Shi  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
447d8835445SYang Shi  *     specified.
448d8835445SYang Shi  * 2 - THP was split.
449d8835445SYang Shi  * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
450d8835445SYang Shi  *        existing page was already on a node that does not follow the
451d8835445SYang Shi  *        policy.
452a7f40cfeSYang Shi  */
453c8633798SNaoya Horiguchi static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
454c8633798SNaoya Horiguchi 				unsigned long end, struct mm_walk *walk)
455959a7e13SJules Irenge 	__releases(ptl)
456c8633798SNaoya Horiguchi {
457c8633798SNaoya Horiguchi 	int ret = 0;
458c8633798SNaoya Horiguchi 	struct page *page;
459c8633798SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
460c8633798SNaoya Horiguchi 	unsigned long flags;
461c8633798SNaoya Horiguchi 
462c8633798SNaoya Horiguchi 	if (unlikely(is_pmd_migration_entry(*pmd))) {
463a7f40cfeSYang Shi 		ret = -EIO;
464c8633798SNaoya Horiguchi 		goto unlock;
465c8633798SNaoya Horiguchi 	}
466c8633798SNaoya Horiguchi 	page = pmd_page(*pmd);
467c8633798SNaoya Horiguchi 	if (is_huge_zero_page(page)) {
468c8633798SNaoya Horiguchi 		spin_unlock(ptl);
469e5947d23SYang Shi 		walk->action = ACTION_CONTINUE;
470c8633798SNaoya Horiguchi 		goto out;
471c8633798SNaoya Horiguchi 	}
472d8835445SYang Shi 	if (!queue_pages_required(page, qp))
473c8633798SNaoya Horiguchi 		goto unlock;
474c8633798SNaoya Horiguchi 
475c8633798SNaoya Horiguchi 	flags = qp->flags;
476c8633798SNaoya Horiguchi 	/* go to thp migration */
477a7f40cfeSYang Shi 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
478a53190a4SYang Shi 		if (!vma_migratable(walk->vma) ||
479a53190a4SYang Shi 		    migrate_page_add(page, qp->pagelist, flags)) {
480d8835445SYang Shi 			ret = 1;
481a7f40cfeSYang Shi 			goto unlock;
482a7f40cfeSYang Shi 		}
483a7f40cfeSYang Shi 	} else
484a7f40cfeSYang Shi 		ret = -EIO;
485c8633798SNaoya Horiguchi unlock:
486c8633798SNaoya Horiguchi 	spin_unlock(ptl);
487c8633798SNaoya Horiguchi out:
488c8633798SNaoya Horiguchi 	return ret;
489c8633798SNaoya Horiguchi }
490c8633798SNaoya Horiguchi 
49188aaa2a1SNaoya Horiguchi /*
49298094945SNaoya Horiguchi  * Scan through pages checking if pages follow certain conditions,
49398094945SNaoya Horiguchi  * and move them to the pagelist if they do.
494d8835445SYang Shi  *
495d8835445SYang Shi  * queue_pages_pte_range() has three possible return values:
496e5947d23SYang Shi  * 0 - pages are placed on the right node or queued successfully, or
497e5947d23SYang Shi  *     special page is met, i.e. zero page.
498d8835445SYang Shi  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
499d8835445SYang Shi  *     specified.
500d8835445SYang Shi  * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
501d8835445SYang Shi  *        on a node that does not follow the policy.
50298094945SNaoya Horiguchi  */
5036f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
5046f4576e3SNaoya Horiguchi 			unsigned long end, struct mm_walk *walk)
5051da177e4SLinus Torvalds {
5066f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
5076f4576e3SNaoya Horiguchi 	struct page *page;
5086f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
5096f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
510c8633798SNaoya Horiguchi 	int ret;
511d8835445SYang Shi 	bool has_unmovable = false;
5123f088420SShijie Luo 	pte_t *pte, *mapped_pte;
513705e87c0SHugh Dickins 	spinlock_t *ptl;
514941150a3SHugh Dickins 
515c8633798SNaoya Horiguchi 	ptl = pmd_trans_huge_lock(pmd, vma);
516c8633798SNaoya Horiguchi 	if (ptl) {
517c8633798SNaoya Horiguchi 		ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
518d8835445SYang Shi 		if (ret != 2)
519a7f40cfeSYang Shi 			return ret;
520248db92dSKirill A. Shutemov 	}
521d8835445SYang Shi 	/* THP was split, fall through to pte walk */
52291612e0dSHugh Dickins 
523337d9abfSNaoya Horiguchi 	if (pmd_trans_unstable(pmd))
524337d9abfSNaoya Horiguchi 		return 0;
52594723aafSMichal Hocko 
5263f088420SShijie Luo 	mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
5276f4576e3SNaoya Horiguchi 	for (; addr != end; pte++, addr += PAGE_SIZE) {
52891612e0dSHugh Dickins 		if (!pte_present(*pte))
52991612e0dSHugh Dickins 			continue;
5306aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
5316aab341eSLinus Torvalds 		if (!page)
53291612e0dSHugh Dickins 			continue;
533053837fcSNick Piggin 		/*
53462b61f61SHugh Dickins 		 * vm_normal_page() filters out zero pages, but there might
53562b61f61SHugh Dickins 		 * still be PageReserved pages to skip, perhaps in a VDSO.
536053837fcSNick Piggin 		 */
537b79bc0a0SHugh Dickins 		if (PageReserved(page))
538f4598c8bSChristoph Lameter 			continue;
53988aaa2a1SNaoya Horiguchi 		if (!queue_pages_required(page, qp))
54038e35860SChristoph Lameter 			continue;
541a7f40cfeSYang Shi 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
542d8835445SYang Shi 			/* MPOL_MF_STRICT must be specified if we get here */
543d8835445SYang Shi 			if (!vma_migratable(vma)) {
544d8835445SYang Shi 				has_unmovable = true;
545a7f40cfeSYang Shi 				break;
546d8835445SYang Shi 			}
547a53190a4SYang Shi 
548a53190a4SYang Shi 			/*
549a53190a4SYang Shi 			 * Do not abort immediately since there may be
550a53190a4SYang Shi 			 * temporary off LRU pages in the range.  Still
551a53190a4SYang Shi 			 * need migrate other LRU pages.
552a53190a4SYang Shi 			 */
553a53190a4SYang Shi 			if (migrate_page_add(page, qp->pagelist, flags))
554a53190a4SYang Shi 				has_unmovable = true;
555a7f40cfeSYang Shi 		} else
556a7f40cfeSYang Shi 			break;
5576f4576e3SNaoya Horiguchi 	}
5583f088420SShijie Luo 	pte_unmap_unlock(mapped_pte, ptl);
5596f4576e3SNaoya Horiguchi 	cond_resched();
560d8835445SYang Shi 
561d8835445SYang Shi 	if (has_unmovable)
562d8835445SYang Shi 		return 1;
563d8835445SYang Shi 
564a7f40cfeSYang Shi 	return addr != end ? -EIO : 0;
56591612e0dSHugh Dickins }
56691612e0dSHugh Dickins 
5676f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
5686f4576e3SNaoya Horiguchi 			       unsigned long addr, unsigned long end,
5696f4576e3SNaoya Horiguchi 			       struct mm_walk *walk)
570e2d8cf40SNaoya Horiguchi {
571dcf17635SLi Xinhai 	int ret = 0;
572e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
5736f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
574dcf17635SLi Xinhai 	unsigned long flags = (qp->flags & MPOL_MF_VALID);
575e2d8cf40SNaoya Horiguchi 	struct page *page;
576cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
577d4c54919SNaoya Horiguchi 	pte_t entry;
578e2d8cf40SNaoya Horiguchi 
5796f4576e3SNaoya Horiguchi 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
5806f4576e3SNaoya Horiguchi 	entry = huge_ptep_get(pte);
581d4c54919SNaoya Horiguchi 	if (!pte_present(entry))
582d4c54919SNaoya Horiguchi 		goto unlock;
583d4c54919SNaoya Horiguchi 	page = pte_page(entry);
58488aaa2a1SNaoya Horiguchi 	if (!queue_pages_required(page, qp))
585e2d8cf40SNaoya Horiguchi 		goto unlock;
586dcf17635SLi Xinhai 
587dcf17635SLi Xinhai 	if (flags == MPOL_MF_STRICT) {
588dcf17635SLi Xinhai 		/*
589dcf17635SLi Xinhai 		 * STRICT alone means only detecting misplaced page and no
590dcf17635SLi Xinhai 		 * need to further check other vma.
591dcf17635SLi Xinhai 		 */
592dcf17635SLi Xinhai 		ret = -EIO;
593dcf17635SLi Xinhai 		goto unlock;
594dcf17635SLi Xinhai 	}
595dcf17635SLi Xinhai 
596dcf17635SLi Xinhai 	if (!vma_migratable(walk->vma)) {
597dcf17635SLi Xinhai 		/*
598dcf17635SLi Xinhai 		 * Must be STRICT with MOVE*, otherwise .test_walk() have
599dcf17635SLi Xinhai 		 * stopped walking current vma.
600dcf17635SLi Xinhai 		 * Detecting misplaced page but allow migrating pages which
601dcf17635SLi Xinhai 		 * have been queued.
602dcf17635SLi Xinhai 		 */
603dcf17635SLi Xinhai 		ret = 1;
604dcf17635SLi Xinhai 		goto unlock;
605dcf17635SLi Xinhai 	}
606dcf17635SLi Xinhai 
607e2d8cf40SNaoya Horiguchi 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
608e2d8cf40SNaoya Horiguchi 	if (flags & (MPOL_MF_MOVE_ALL) ||
609dcf17635SLi Xinhai 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
610dcf17635SLi Xinhai 		if (!isolate_huge_page(page, qp->pagelist) &&
611dcf17635SLi Xinhai 			(flags & MPOL_MF_STRICT))
612dcf17635SLi Xinhai 			/*
613dcf17635SLi Xinhai 			 * Failed to isolate page but allow migrating pages
614dcf17635SLi Xinhai 			 * which have been queued.
615dcf17635SLi Xinhai 			 */
616dcf17635SLi Xinhai 			ret = 1;
617dcf17635SLi Xinhai 	}
618e2d8cf40SNaoya Horiguchi unlock:
619cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
620e2d8cf40SNaoya Horiguchi #else
621e2d8cf40SNaoya Horiguchi 	BUG();
622e2d8cf40SNaoya Horiguchi #endif
623dcf17635SLi Xinhai 	return ret;
6241da177e4SLinus Torvalds }
6251da177e4SLinus Torvalds 
6265877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING
627b24f53a0SLee Schermerhorn /*
6284b10e7d5SMel Gorman  * This is used to mark a range of virtual addresses to be inaccessible.
6294b10e7d5SMel Gorman  * These are later cleared by a NUMA hinting fault. Depending on these
6304b10e7d5SMel Gorman  * faults, pages may be migrated for better NUMA placement.
6314b10e7d5SMel Gorman  *
6324b10e7d5SMel Gorman  * This is assuming that NUMA faults are handled using PROT_NONE. If
6334b10e7d5SMel Gorman  * an architecture makes a different choice, it will need further
6344b10e7d5SMel Gorman  * changes to the core.
635b24f53a0SLee Schermerhorn  */
6364b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma,
6374b10e7d5SMel Gorman 			unsigned long addr, unsigned long end)
638b24f53a0SLee Schermerhorn {
6394b10e7d5SMel Gorman 	int nr_updated;
640b24f53a0SLee Schermerhorn 
64158705444SPeter Xu 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
64203c5a6e1SMel Gorman 	if (nr_updated)
64303c5a6e1SMel Gorman 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
644b24f53a0SLee Schermerhorn 
6454b10e7d5SMel Gorman 	return nr_updated;
646b24f53a0SLee Schermerhorn }
647b24f53a0SLee Schermerhorn #else
648b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma,
649b24f53a0SLee Schermerhorn 			unsigned long addr, unsigned long end)
650b24f53a0SLee Schermerhorn {
651b24f53a0SLee Schermerhorn 	return 0;
652b24f53a0SLee Schermerhorn }
6535877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */
654b24f53a0SLee Schermerhorn 
6556f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end,
6566f4576e3SNaoya Horiguchi 				struct mm_walk *walk)
6571da177e4SLinus Torvalds {
6586f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
6596f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
6605b952b3cSAndi Kleen 	unsigned long endvma = vma->vm_end;
6616f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
662dc9aa5b9SChristoph Lameter 
663a18b3ac2SLi Xinhai 	/* range check first */
664ce33135cSMiaohe Lin 	VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
665f18da660SLi Xinhai 
666f18da660SLi Xinhai 	if (!qp->first) {
667f18da660SLi Xinhai 		qp->first = vma;
668f18da660SLi Xinhai 		if (!(flags & MPOL_MF_DISCONTIG_OK) &&
669f18da660SLi Xinhai 			(qp->start < vma->vm_start))
670f18da660SLi Xinhai 			/* hole at head side of range */
671a18b3ac2SLi Xinhai 			return -EFAULT;
672a18b3ac2SLi Xinhai 	}
673f18da660SLi Xinhai 	if (!(flags & MPOL_MF_DISCONTIG_OK) &&
674f18da660SLi Xinhai 		((vma->vm_end < qp->end) &&
675f18da660SLi Xinhai 		(!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
676f18da660SLi Xinhai 		/* hole at middle or tail of range */
677f18da660SLi Xinhai 		return -EFAULT;
678a18b3ac2SLi Xinhai 
679a7f40cfeSYang Shi 	/*
680a7f40cfeSYang Shi 	 * Need check MPOL_MF_STRICT to return -EIO if possible
681a7f40cfeSYang Shi 	 * regardless of vma_migratable
682a7f40cfeSYang Shi 	 */
683a7f40cfeSYang Shi 	if (!vma_migratable(vma) &&
684a7f40cfeSYang Shi 	    !(flags & MPOL_MF_STRICT))
68548684a65SNaoya Horiguchi 		return 1;
68648684a65SNaoya Horiguchi 
6875b952b3cSAndi Kleen 	if (endvma > end)
6885b952b3cSAndi Kleen 		endvma = end;
689b24f53a0SLee Schermerhorn 
690b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY) {
6912c0346a3SMel Gorman 		/* Similar to task_numa_work, skip inaccessible VMAs */
6923122e80eSAnshuman Khandual 		if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
6934355c018SLiang Chen 			!(vma->vm_flags & VM_MIXEDMAP))
694b24f53a0SLee Schermerhorn 			change_prot_numa(vma, start, endvma);
6956f4576e3SNaoya Horiguchi 		return 1;
696b24f53a0SLee Schermerhorn 	}
697b24f53a0SLee Schermerhorn 
6986f4576e3SNaoya Horiguchi 	/* queue pages from current vma */
699a7f40cfeSYang Shi 	if (flags & MPOL_MF_VALID)
7006f4576e3SNaoya Horiguchi 		return 0;
7016f4576e3SNaoya Horiguchi 	return 1;
7026f4576e3SNaoya Horiguchi }
703b24f53a0SLee Schermerhorn 
7047b86ac33SChristoph Hellwig static const struct mm_walk_ops queue_pages_walk_ops = {
7057b86ac33SChristoph Hellwig 	.hugetlb_entry		= queue_pages_hugetlb,
7067b86ac33SChristoph Hellwig 	.pmd_entry		= queue_pages_pte_range,
7077b86ac33SChristoph Hellwig 	.test_walk		= queue_pages_test_walk,
7087b86ac33SChristoph Hellwig };
7097b86ac33SChristoph Hellwig 
7106f4576e3SNaoya Horiguchi /*
7116f4576e3SNaoya Horiguchi  * Walk through page tables and collect pages to be migrated.
7126f4576e3SNaoya Horiguchi  *
7136f4576e3SNaoya Horiguchi  * If pages found in a given range are on a set of nodes (determined by
7146f4576e3SNaoya Horiguchi  * @nodes and @flags,) it's isolated and queued to the pagelist which is
715d8835445SYang Shi  * passed via @private.
716d8835445SYang Shi  *
717d8835445SYang Shi  * queue_pages_range() has three possible return values:
718d8835445SYang Shi  * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
719d8835445SYang Shi  *     specified.
720d8835445SYang Shi  * 0 - queue pages successfully or no misplaced page.
721a85dfc30SYang Shi  * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
722a85dfc30SYang Shi  *         memory range specified by nodemask and maxnode points outside
723a85dfc30SYang Shi  *         your accessible address space (-EFAULT)
7246f4576e3SNaoya Horiguchi  */
7256f4576e3SNaoya Horiguchi static int
7266f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
7276f4576e3SNaoya Horiguchi 		nodemask_t *nodes, unsigned long flags,
7286f4576e3SNaoya Horiguchi 		struct list_head *pagelist)
7296f4576e3SNaoya Horiguchi {
730f18da660SLi Xinhai 	int err;
7316f4576e3SNaoya Horiguchi 	struct queue_pages qp = {
7326f4576e3SNaoya Horiguchi 		.pagelist = pagelist,
7336f4576e3SNaoya Horiguchi 		.flags = flags,
7346f4576e3SNaoya Horiguchi 		.nmask = nodes,
735f18da660SLi Xinhai 		.start = start,
736f18da660SLi Xinhai 		.end = end,
737f18da660SLi Xinhai 		.first = NULL,
7386f4576e3SNaoya Horiguchi 	};
7396f4576e3SNaoya Horiguchi 
740f18da660SLi Xinhai 	err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
741f18da660SLi Xinhai 
742f18da660SLi Xinhai 	if (!qp.first)
743f18da660SLi Xinhai 		/* whole range in hole */
744f18da660SLi Xinhai 		err = -EFAULT;
745f18da660SLi Xinhai 
746f18da660SLi Xinhai 	return err;
7471da177e4SLinus Torvalds }
7481da177e4SLinus Torvalds 
749869833f2SKOSAKI Motohiro /*
750869833f2SKOSAKI Motohiro  * Apply policy to a single VMA
751c1e8d7c6SMichel Lespinasse  * This must be called with the mmap_lock held for writing.
752869833f2SKOSAKI Motohiro  */
753869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma,
754869833f2SKOSAKI Motohiro 						struct mempolicy *pol)
7558d34694cSKOSAKI Motohiro {
756869833f2SKOSAKI Motohiro 	int err;
757869833f2SKOSAKI Motohiro 	struct mempolicy *old;
758869833f2SKOSAKI Motohiro 	struct mempolicy *new;
7598d34694cSKOSAKI Motohiro 
7608d34694cSKOSAKI Motohiro 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
7618d34694cSKOSAKI Motohiro 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
7628d34694cSKOSAKI Motohiro 		 vma->vm_ops, vma->vm_file,
7638d34694cSKOSAKI Motohiro 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
7648d34694cSKOSAKI Motohiro 
765869833f2SKOSAKI Motohiro 	new = mpol_dup(pol);
766869833f2SKOSAKI Motohiro 	if (IS_ERR(new))
767869833f2SKOSAKI Motohiro 		return PTR_ERR(new);
768869833f2SKOSAKI Motohiro 
769869833f2SKOSAKI Motohiro 	if (vma->vm_ops && vma->vm_ops->set_policy) {
7708d34694cSKOSAKI Motohiro 		err = vma->vm_ops->set_policy(vma, new);
771869833f2SKOSAKI Motohiro 		if (err)
772869833f2SKOSAKI Motohiro 			goto err_out;
7738d34694cSKOSAKI Motohiro 	}
774869833f2SKOSAKI Motohiro 
775869833f2SKOSAKI Motohiro 	old = vma->vm_policy;
776c1e8d7c6SMichel Lespinasse 	vma->vm_policy = new; /* protected by mmap_lock */
777869833f2SKOSAKI Motohiro 	mpol_put(old);
778869833f2SKOSAKI Motohiro 
779869833f2SKOSAKI Motohiro 	return 0;
780869833f2SKOSAKI Motohiro  err_out:
781869833f2SKOSAKI Motohiro 	mpol_put(new);
7828d34694cSKOSAKI Motohiro 	return err;
7838d34694cSKOSAKI Motohiro }
7848d34694cSKOSAKI Motohiro 
7851da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
7869d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start,
7879d8cebd4SKOSAKI Motohiro 		       unsigned long end, struct mempolicy *new_pol)
7881da177e4SLinus Torvalds {
7899d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *prev;
7909d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *vma;
7919d8cebd4SKOSAKI Motohiro 	int err = 0;
792e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
7939d8cebd4SKOSAKI Motohiro 	unsigned long vmstart;
7949d8cebd4SKOSAKI Motohiro 	unsigned long vmend;
7951da177e4SLinus Torvalds 
796097d5910SLinus Torvalds 	vma = find_vma(mm, start);
797f18da660SLi Xinhai 	VM_BUG_ON(!vma);
7989d8cebd4SKOSAKI Motohiro 
799097d5910SLinus Torvalds 	prev = vma->vm_prev;
800e26a5114SKOSAKI Motohiro 	if (start > vma->vm_start)
801e26a5114SKOSAKI Motohiro 		prev = vma;
802e26a5114SKOSAKI Motohiro 
8034e090600SHugh Dickins 	for (; vma && vma->vm_start < end; prev = vma, vma = vma->vm_next) {
8049d8cebd4SKOSAKI Motohiro 		vmstart = max(start, vma->vm_start);
8059d8cebd4SKOSAKI Motohiro 		vmend   = min(end, vma->vm_end);
8069d8cebd4SKOSAKI Motohiro 
807e26a5114SKOSAKI Motohiro 		if (mpol_equal(vma_policy(vma), new_pol))
808e26a5114SKOSAKI Motohiro 			continue;
809e26a5114SKOSAKI Motohiro 
810e26a5114SKOSAKI Motohiro 		pgoff = vma->vm_pgoff +
811e26a5114SKOSAKI Motohiro 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
8129d8cebd4SKOSAKI Motohiro 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
813e26a5114SKOSAKI Motohiro 				 vma->anon_vma, vma->vm_file, pgoff,
8149a10064fSColin Cross 				 new_pol, vma->vm_userfaultfd_ctx,
8155c26f6acSSuren Baghdasaryan 				 anon_vma_name(vma));
8169d8cebd4SKOSAKI Motohiro 		if (prev) {
8179d8cebd4SKOSAKI Motohiro 			vma = prev;
8183964acd0SOleg Nesterov 			goto replace;
8191da177e4SLinus Torvalds 		}
8209d8cebd4SKOSAKI Motohiro 		if (vma->vm_start != vmstart) {
8219d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
8229d8cebd4SKOSAKI Motohiro 			if (err)
8239d8cebd4SKOSAKI Motohiro 				goto out;
8249d8cebd4SKOSAKI Motohiro 		}
8259d8cebd4SKOSAKI Motohiro 		if (vma->vm_end != vmend) {
8269d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmend, 0);
8279d8cebd4SKOSAKI Motohiro 			if (err)
8289d8cebd4SKOSAKI Motohiro 				goto out;
8299d8cebd4SKOSAKI Motohiro 		}
8303964acd0SOleg Nesterov  replace:
831869833f2SKOSAKI Motohiro 		err = vma_replace_policy(vma, new_pol);
8329d8cebd4SKOSAKI Motohiro 		if (err)
8339d8cebd4SKOSAKI Motohiro 			goto out;
8349d8cebd4SKOSAKI Motohiro 	}
8359d8cebd4SKOSAKI Motohiro 
8369d8cebd4SKOSAKI Motohiro  out:
8371da177e4SLinus Torvalds 	return err;
8381da177e4SLinus Torvalds }
8391da177e4SLinus Torvalds 
8401da177e4SLinus Torvalds /* Set the process memory policy */
841028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
842028fec41SDavid Rientjes 			     nodemask_t *nodes)
8431da177e4SLinus Torvalds {
84458568d2aSMiao Xie 	struct mempolicy *new, *old;
8454bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
84658568d2aSMiao Xie 	int ret;
8471da177e4SLinus Torvalds 
8484bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
8494bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
850f4e53d91SLee Schermerhorn 
8514bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
8524bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
8534bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
8544bfc4495SKAMEZAWA Hiroyuki 		goto out;
8554bfc4495SKAMEZAWA Hiroyuki 	}
8562c7c3a7dSOleg Nesterov 
8574bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
85858568d2aSMiao Xie 	if (ret) {
85958568d2aSMiao Xie 		mpol_put(new);
8604bfc4495SKAMEZAWA Hiroyuki 		goto out;
86158568d2aSMiao Xie 	}
86278b132e9SWei Yang 	task_lock(current);
86358568d2aSMiao Xie 	old = current->mempolicy;
8641da177e4SLinus Torvalds 	current->mempolicy = new;
86545816682SVlastimil Babka 	if (new && new->mode == MPOL_INTERLEAVE)
86645816682SVlastimil Babka 		current->il_prev = MAX_NUMNODES-1;
86758568d2aSMiao Xie 	task_unlock(current);
86858568d2aSMiao Xie 	mpol_put(old);
8694bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
8704bfc4495SKAMEZAWA Hiroyuki out:
8714bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
8724bfc4495SKAMEZAWA Hiroyuki 	return ret;
8731da177e4SLinus Torvalds }
8741da177e4SLinus Torvalds 
875bea904d5SLee Schermerhorn /*
876bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
87758568d2aSMiao Xie  *
87858568d2aSMiao Xie  * Called with task's alloc_lock held
879bea904d5SLee Schermerhorn  */
880bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
8811da177e4SLinus Torvalds {
882dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
883bea904d5SLee Schermerhorn 	if (p == &default_policy)
884bea904d5SLee Schermerhorn 		return;
885bea904d5SLee Schermerhorn 
88645c4745aSLee Schermerhorn 	switch (p->mode) {
88719770b32SMel Gorman 	case MPOL_BIND:
8881da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
889269fbe72SBen Widawsky 	case MPOL_PREFERRED:
890b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
891269fbe72SBen Widawsky 		*nodes = p->nodes;
8921da177e4SLinus Torvalds 		break;
8937858d7bcSFeng Tang 	case MPOL_LOCAL:
8947858d7bcSFeng Tang 		/* return empty node mask for local allocation */
8957858d7bcSFeng Tang 		break;
8961da177e4SLinus Torvalds 	default:
8971da177e4SLinus Torvalds 		BUG();
8981da177e4SLinus Torvalds 	}
8991da177e4SLinus Torvalds }
9001da177e4SLinus Torvalds 
9013b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr)
9021da177e4SLinus Torvalds {
903ba841078SPeter Xu 	struct page *p = NULL;
904f728b9c4SJohn Hubbard 	int ret;
9051da177e4SLinus Torvalds 
906f728b9c4SJohn Hubbard 	ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p);
907f728b9c4SJohn Hubbard 	if (ret > 0) {
908f728b9c4SJohn Hubbard 		ret = page_to_nid(p);
9091da177e4SLinus Torvalds 		put_page(p);
9101da177e4SLinus Torvalds 	}
911f728b9c4SJohn Hubbard 	return ret;
9121da177e4SLinus Torvalds }
9131da177e4SLinus Torvalds 
9141da177e4SLinus Torvalds /* Retrieve NUMA policy */
915dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
9161da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
9171da177e4SLinus Torvalds {
9188bccd85fSChristoph Lameter 	int err;
9191da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
9201da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
9213b9aadf7SAndrea Arcangeli 	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
9221da177e4SLinus Torvalds 
923754af6f5SLee Schermerhorn 	if (flags &
924754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
9251da177e4SLinus Torvalds 		return -EINVAL;
926754af6f5SLee Schermerhorn 
927754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
928754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
929754af6f5SLee Schermerhorn 			return -EINVAL;
930754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
93158568d2aSMiao Xie 		task_lock(current);
932754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
93358568d2aSMiao Xie 		task_unlock(current);
934754af6f5SLee Schermerhorn 		return 0;
935754af6f5SLee Schermerhorn 	}
936754af6f5SLee Schermerhorn 
9371da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
938bea904d5SLee Schermerhorn 		/*
939bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
940bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
941bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
942bea904d5SLee Schermerhorn 		 */
943d8ed45c5SMichel Lespinasse 		mmap_read_lock(mm);
94433e3575cSLiam Howlett 		vma = vma_lookup(mm, addr);
9451da177e4SLinus Torvalds 		if (!vma) {
946d8ed45c5SMichel Lespinasse 			mmap_read_unlock(mm);
9471da177e4SLinus Torvalds 			return -EFAULT;
9481da177e4SLinus Torvalds 		}
9491da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
9501da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
9511da177e4SLinus Torvalds 		else
9521da177e4SLinus Torvalds 			pol = vma->vm_policy;
9531da177e4SLinus Torvalds 	} else if (addr)
9541da177e4SLinus Torvalds 		return -EINVAL;
9551da177e4SLinus Torvalds 
9561da177e4SLinus Torvalds 	if (!pol)
957bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
9581da177e4SLinus Torvalds 
9591da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
9601da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
9613b9aadf7SAndrea Arcangeli 			/*
962f728b9c4SJohn Hubbard 			 * Take a refcount on the mpol, because we are about to
963f728b9c4SJohn Hubbard 			 * drop the mmap_lock, after which only "pol" remains
964f728b9c4SJohn Hubbard 			 * valid, "vma" is stale.
9653b9aadf7SAndrea Arcangeli 			 */
9663b9aadf7SAndrea Arcangeli 			pol_refcount = pol;
9673b9aadf7SAndrea Arcangeli 			vma = NULL;
9683b9aadf7SAndrea Arcangeli 			mpol_get(pol);
969f728b9c4SJohn Hubbard 			mmap_read_unlock(mm);
9703b9aadf7SAndrea Arcangeli 			err = lookup_node(mm, addr);
9711da177e4SLinus Torvalds 			if (err < 0)
9721da177e4SLinus Torvalds 				goto out;
9738bccd85fSChristoph Lameter 			*policy = err;
9741da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
97545c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
976269fbe72SBen Widawsky 			*policy = next_node_in(current->il_prev, pol->nodes);
9771da177e4SLinus Torvalds 		} else {
9781da177e4SLinus Torvalds 			err = -EINVAL;
9791da177e4SLinus Torvalds 			goto out;
9801da177e4SLinus Torvalds 		}
981bea904d5SLee Schermerhorn 	} else {
982bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
983bea904d5SLee Schermerhorn 						pol->mode;
984d79df630SDavid Rientjes 		/*
985d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
986d79df630SDavid Rientjes 		 * the policy to userspace.
987d79df630SDavid Rientjes 		 */
988d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
989bea904d5SLee Schermerhorn 	}
9901da177e4SLinus Torvalds 
9911da177e4SLinus Torvalds 	err = 0;
99258568d2aSMiao Xie 	if (nmask) {
993c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
994c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
995c6b6ef8bSLee Schermerhorn 		} else {
99658568d2aSMiao Xie 			task_lock(current);
997bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
99858568d2aSMiao Xie 			task_unlock(current);
99958568d2aSMiao Xie 		}
1000c6b6ef8bSLee Schermerhorn 	}
10011da177e4SLinus Torvalds 
10021da177e4SLinus Torvalds  out:
100352cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
10041da177e4SLinus Torvalds 	if (vma)
1005d8ed45c5SMichel Lespinasse 		mmap_read_unlock(mm);
10063b9aadf7SAndrea Arcangeli 	if (pol_refcount)
10073b9aadf7SAndrea Arcangeli 		mpol_put(pol_refcount);
10081da177e4SLinus Torvalds 	return err;
10091da177e4SLinus Torvalds }
10101da177e4SLinus Torvalds 
1011b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
10128bccd85fSChristoph Lameter /*
1013c8633798SNaoya Horiguchi  * page migration, thp tail pages can be passed.
10146ce3c4c0SChristoph Lameter  */
1015a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist,
1016fc301289SChristoph Lameter 				unsigned long flags)
10176ce3c4c0SChristoph Lameter {
1018c8633798SNaoya Horiguchi 	struct page *head = compound_head(page);
10196ce3c4c0SChristoph Lameter 	/*
1020fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
10216ce3c4c0SChristoph Lameter 	 */
1022c8633798SNaoya Horiguchi 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1023c8633798SNaoya Horiguchi 		if (!isolate_lru_page(head)) {
1024c8633798SNaoya Horiguchi 			list_add_tail(&head->lru, pagelist);
1025c8633798SNaoya Horiguchi 			mod_node_page_state(page_pgdat(head),
10269de4f22aSHuang Ying 				NR_ISOLATED_ANON + page_is_file_lru(head),
10276c357848SMatthew Wilcox (Oracle) 				thp_nr_pages(head));
1028a53190a4SYang Shi 		} else if (flags & MPOL_MF_STRICT) {
1029a53190a4SYang Shi 			/*
1030a53190a4SYang Shi 			 * Non-movable page may reach here.  And, there may be
1031a53190a4SYang Shi 			 * temporary off LRU pages or non-LRU movable pages.
1032a53190a4SYang Shi 			 * Treat them as unmovable pages since they can't be
1033a53190a4SYang Shi 			 * isolated, so they can't be moved at the moment.  It
1034a53190a4SYang Shi 			 * should return -EIO for this case too.
1035a53190a4SYang Shi 			 */
1036a53190a4SYang Shi 			return -EIO;
103762695a84SNick Piggin 		}
103862695a84SNick Piggin 	}
1039a53190a4SYang Shi 
1040a53190a4SYang Shi 	return 0;
10416ce3c4c0SChristoph Lameter }
10426ce3c4c0SChristoph Lameter 
10436ce3c4c0SChristoph Lameter /*
10447e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
10457e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
10467e2ab150SChristoph Lameter  */
1047dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1048dbcb0f19SAdrian Bunk 			   int flags)
10497e2ab150SChristoph Lameter {
10507e2ab150SChristoph Lameter 	nodemask_t nmask;
10517e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
10527e2ab150SChristoph Lameter 	int err = 0;
1053a0976311SJoonsoo Kim 	struct migration_target_control mtc = {
1054a0976311SJoonsoo Kim 		.nid = dest,
1055a0976311SJoonsoo Kim 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1056a0976311SJoonsoo Kim 	};
10577e2ab150SChristoph Lameter 
10587e2ab150SChristoph Lameter 	nodes_clear(nmask);
10597e2ab150SChristoph Lameter 	node_set(source, nmask);
10607e2ab150SChristoph Lameter 
106108270807SMinchan Kim 	/*
106208270807SMinchan Kim 	 * This does not "check" the range but isolates all pages that
106308270807SMinchan Kim 	 * need migration.  Between passing in the full user address
106408270807SMinchan Kim 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
106508270807SMinchan Kim 	 */
106608270807SMinchan Kim 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
106798094945SNaoya Horiguchi 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
10687e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
10697e2ab150SChristoph Lameter 
1070cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
1071a0976311SJoonsoo Kim 		err = migrate_pages(&pagelist, alloc_migration_target, NULL,
10725ac95884SYang Shi 				(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1073cf608ac1SMinchan Kim 		if (err)
1074e2d8cf40SNaoya Horiguchi 			putback_movable_pages(&pagelist);
1075cf608ac1SMinchan Kim 	}
107695a402c3SChristoph Lameter 
10777e2ab150SChristoph Lameter 	return err;
10787e2ab150SChristoph Lameter }
10797e2ab150SChristoph Lameter 
10807e2ab150SChristoph Lameter /*
10817e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
10827e2ab150SChristoph Lameter  * layout as much as possible.
108339743889SChristoph Lameter  *
108439743889SChristoph Lameter  * Returns the number of page that could not be moved.
108539743889SChristoph Lameter  */
10860ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
10870ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
108839743889SChristoph Lameter {
10897e2ab150SChristoph Lameter 	int busy = 0;
1090f555befdSJan Stancek 	int err = 0;
10917e2ab150SChristoph Lameter 	nodemask_t tmp;
109239743889SChristoph Lameter 
1093361a2a22SMinchan Kim 	lru_cache_disable();
10940aedadf9SChristoph Lameter 
1095d8ed45c5SMichel Lespinasse 	mmap_read_lock(mm);
1096d4984711SChristoph Lameter 
10977e2ab150SChristoph Lameter 	/*
10987e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
10997e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
11007e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
11017e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
11027e2ab150SChristoph Lameter 	 *
11037e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
11047e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
11057e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
11067e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
11077e2ab150SChristoph Lameter 	 *
11087e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
11097e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
11107e2ab150SChristoph Lameter 	 * (nothing left to migrate).
11117e2ab150SChristoph Lameter 	 *
11127e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
11137e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
11147e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
11157e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
11167e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
11177e2ab150SChristoph Lameter 	 *
11187e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
11197e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
11207e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
11217e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1122ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
11237e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
11247e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
11257e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
11267e2ab150SChristoph Lameter 	 */
11277e2ab150SChristoph Lameter 
11280ce72d4fSAndrew Morton 	tmp = *from;
11297e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
11307e2ab150SChristoph Lameter 		int s, d;
1131b76ac7e7SJianguo Wu 		int source = NUMA_NO_NODE;
11327e2ab150SChristoph Lameter 		int dest = 0;
11337e2ab150SChristoph Lameter 
11347e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
11354a5b18ccSLarry Woodman 
11364a5b18ccSLarry Woodman 			/*
11374a5b18ccSLarry Woodman 			 * do_migrate_pages() tries to maintain the relative
11384a5b18ccSLarry Woodman 			 * node relationship of the pages established between
11394a5b18ccSLarry Woodman 			 * threads and memory areas.
11404a5b18ccSLarry Woodman                          *
11414a5b18ccSLarry Woodman 			 * However if the number of source nodes is not equal to
11424a5b18ccSLarry Woodman 			 * the number of destination nodes we can not preserve
11434a5b18ccSLarry Woodman 			 * this node relative relationship.  In that case, skip
11444a5b18ccSLarry Woodman 			 * copying memory from a node that is in the destination
11454a5b18ccSLarry Woodman 			 * mask.
11464a5b18ccSLarry Woodman 			 *
11474a5b18ccSLarry Woodman 			 * Example: [2,3,4] -> [3,4,5] moves everything.
11484a5b18ccSLarry Woodman 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
11494a5b18ccSLarry Woodman 			 */
11504a5b18ccSLarry Woodman 
11510ce72d4fSAndrew Morton 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
11520ce72d4fSAndrew Morton 						(node_isset(s, *to)))
11534a5b18ccSLarry Woodman 				continue;
11544a5b18ccSLarry Woodman 
11550ce72d4fSAndrew Morton 			d = node_remap(s, *from, *to);
11567e2ab150SChristoph Lameter 			if (s == d)
11577e2ab150SChristoph Lameter 				continue;
11587e2ab150SChristoph Lameter 
11597e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
11607e2ab150SChristoph Lameter 			dest = d;
11617e2ab150SChristoph Lameter 
11627e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
11637e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
11647e2ab150SChristoph Lameter 				break;
11657e2ab150SChristoph Lameter 		}
1166b76ac7e7SJianguo Wu 		if (source == NUMA_NO_NODE)
11677e2ab150SChristoph Lameter 			break;
11687e2ab150SChristoph Lameter 
11697e2ab150SChristoph Lameter 		node_clear(source, tmp);
11707e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
11717e2ab150SChristoph Lameter 		if (err > 0)
11727e2ab150SChristoph Lameter 			busy += err;
11737e2ab150SChristoph Lameter 		if (err < 0)
11747e2ab150SChristoph Lameter 			break;
117539743889SChristoph Lameter 	}
1176d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
1177d479960eSMinchan Kim 
1178361a2a22SMinchan Kim 	lru_cache_enable();
11797e2ab150SChristoph Lameter 	if (err < 0)
11807e2ab150SChristoph Lameter 		return err;
11817e2ab150SChristoph Lameter 	return busy;
1182b20a3503SChristoph Lameter 
118339743889SChristoph Lameter }
118439743889SChristoph Lameter 
11853ad33b24SLee Schermerhorn /*
11863ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
1187d05f0cdcSHugh Dickins  * Start by assuming the page is mapped by the same vma as contains @start.
11883ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
11893ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
11903ad33b24SLee Schermerhorn  * is in virtual address order.
11913ad33b24SLee Schermerhorn  */
1192666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start)
119395a402c3SChristoph Lameter {
1194*ec4858e0SMatthew Wilcox (Oracle) 	struct folio *dst, *src = page_folio(page);
1195d05f0cdcSHugh Dickins 	struct vm_area_struct *vma;
11963f649ab7SKees Cook 	unsigned long address;
1197*ec4858e0SMatthew Wilcox (Oracle) 	gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL;
119895a402c3SChristoph Lameter 
1199d05f0cdcSHugh Dickins 	vma = find_vma(current->mm, start);
12003ad33b24SLee Schermerhorn 	while (vma) {
12013ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
12023ad33b24SLee Schermerhorn 		if (address != -EFAULT)
12033ad33b24SLee Schermerhorn 			break;
12043ad33b24SLee Schermerhorn 		vma = vma->vm_next;
12053ad33b24SLee Schermerhorn 	}
12063ad33b24SLee Schermerhorn 
1207*ec4858e0SMatthew Wilcox (Oracle) 	if (folio_test_hugetlb(src))
1208*ec4858e0SMatthew Wilcox (Oracle) 		return alloc_huge_page_vma(page_hstate(&src->page),
1209389c8178SMichal Hocko 				vma, address);
1210c8633798SNaoya Horiguchi 
1211*ec4858e0SMatthew Wilcox (Oracle) 	if (folio_test_large(src))
1212*ec4858e0SMatthew Wilcox (Oracle) 		gfp = GFP_TRANSHUGE;
1213*ec4858e0SMatthew Wilcox (Oracle) 
121411c731e8SWanpeng Li 	/*
1215*ec4858e0SMatthew Wilcox (Oracle) 	 * if !vma, vma_alloc_folio() will use task or system default policy
121611c731e8SWanpeng Li 	 */
1217*ec4858e0SMatthew Wilcox (Oracle) 	dst = vma_alloc_folio(gfp, folio_order(src), vma, address,
1218*ec4858e0SMatthew Wilcox (Oracle) 			folio_test_large(src));
1219*ec4858e0SMatthew Wilcox (Oracle) 	return &dst->page;
122095a402c3SChristoph Lameter }
1221b20a3503SChristoph Lameter #else
1222b20a3503SChristoph Lameter 
1223a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist,
1224b20a3503SChristoph Lameter 				unsigned long flags)
1225b20a3503SChristoph Lameter {
1226a53190a4SYang Shi 	return -EIO;
1227b20a3503SChristoph Lameter }
1228b20a3503SChristoph Lameter 
12290ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
12300ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
1231b20a3503SChristoph Lameter {
1232b20a3503SChristoph Lameter 	return -ENOSYS;
1233b20a3503SChristoph Lameter }
123495a402c3SChristoph Lameter 
1235666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start)
123695a402c3SChristoph Lameter {
123795a402c3SChristoph Lameter 	return NULL;
123895a402c3SChristoph Lameter }
1239b20a3503SChristoph Lameter #endif
1240b20a3503SChristoph Lameter 
1241dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1242028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1243028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
12446ce3c4c0SChristoph Lameter {
12456ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
12466ce3c4c0SChristoph Lameter 	struct mempolicy *new;
12476ce3c4c0SChristoph Lameter 	unsigned long end;
12486ce3c4c0SChristoph Lameter 	int err;
1249d8835445SYang Shi 	int ret;
12506ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
12516ce3c4c0SChristoph Lameter 
1252b24f53a0SLee Schermerhorn 	if (flags & ~(unsigned long)MPOL_MF_VALID)
12536ce3c4c0SChristoph Lameter 		return -EINVAL;
125474c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
12556ce3c4c0SChristoph Lameter 		return -EPERM;
12566ce3c4c0SChristoph Lameter 
12576ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
12586ce3c4c0SChristoph Lameter 		return -EINVAL;
12596ce3c4c0SChristoph Lameter 
12606ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
12616ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
12626ce3c4c0SChristoph Lameter 
12636ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
12646ce3c4c0SChristoph Lameter 	end = start + len;
12656ce3c4c0SChristoph Lameter 
12666ce3c4c0SChristoph Lameter 	if (end < start)
12676ce3c4c0SChristoph Lameter 		return -EINVAL;
12686ce3c4c0SChristoph Lameter 	if (end == start)
12696ce3c4c0SChristoph Lameter 		return 0;
12706ce3c4c0SChristoph Lameter 
1271028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
12726ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
12736ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
12746ce3c4c0SChristoph Lameter 
1275b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY)
1276b24f53a0SLee Schermerhorn 		new->flags |= MPOL_F_MOF;
1277b24f53a0SLee Schermerhorn 
12786ce3c4c0SChristoph Lameter 	/*
12796ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
12806ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
12816ce3c4c0SChristoph Lameter 	 */
12826ce3c4c0SChristoph Lameter 	if (!new)
12836ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
12846ce3c4c0SChristoph Lameter 
1285028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1286028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
128700ef2d2fSDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
12886ce3c4c0SChristoph Lameter 
12890aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
12900aedadf9SChristoph Lameter 
1291361a2a22SMinchan Kim 		lru_cache_disable();
12920aedadf9SChristoph Lameter 	}
12934bfc4495SKAMEZAWA Hiroyuki 	{
12944bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
12954bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
1296d8ed45c5SMichel Lespinasse 			mmap_write_lock(mm);
12974bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
12984bfc4495SKAMEZAWA Hiroyuki 			if (err)
1299d8ed45c5SMichel Lespinasse 				mmap_write_unlock(mm);
13004bfc4495SKAMEZAWA Hiroyuki 		} else
13014bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
13024bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
13034bfc4495SKAMEZAWA Hiroyuki 	}
1304b05ca738SKOSAKI Motohiro 	if (err)
1305b05ca738SKOSAKI Motohiro 		goto mpol_out;
1306b05ca738SKOSAKI Motohiro 
1307d8835445SYang Shi 	ret = queue_pages_range(mm, start, end, nmask,
13086ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
1309d8835445SYang Shi 
1310d8835445SYang Shi 	if (ret < 0) {
1311a85dfc30SYang Shi 		err = ret;
1312d8835445SYang Shi 		goto up_out;
1313d8835445SYang Shi 	}
1314d8835445SYang Shi 
13159d8cebd4SKOSAKI Motohiro 	err = mbind_range(mm, start, end, new);
13167e2ab150SChristoph Lameter 
1317b24f53a0SLee Schermerhorn 	if (!err) {
1318b24f53a0SLee Schermerhorn 		int nr_failed = 0;
1319b24f53a0SLee Schermerhorn 
1320cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
1321b24f53a0SLee Schermerhorn 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1322d05f0cdcSHugh Dickins 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
13235ac95884SYang Shi 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL);
1324cf608ac1SMinchan Kim 			if (nr_failed)
132574060e4dSNaoya Horiguchi 				putback_movable_pages(&pagelist);
1326cf608ac1SMinchan Kim 		}
13276ce3c4c0SChristoph Lameter 
1328d8835445SYang Shi 		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
13296ce3c4c0SChristoph Lameter 			err = -EIO;
1330a85dfc30SYang Shi 	} else {
1331d8835445SYang Shi up_out:
1332a85dfc30SYang Shi 		if (!list_empty(&pagelist))
1333a85dfc30SYang Shi 			putback_movable_pages(&pagelist);
1334a85dfc30SYang Shi 	}
1335a85dfc30SYang Shi 
1336d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
1337b05ca738SKOSAKI Motohiro mpol_out:
1338f0be3d32SLee Schermerhorn 	mpol_put(new);
1339d479960eSMinchan Kim 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1340361a2a22SMinchan Kim 		lru_cache_enable();
13416ce3c4c0SChristoph Lameter 	return err;
13426ce3c4c0SChristoph Lameter }
13436ce3c4c0SChristoph Lameter 
134439743889SChristoph Lameter /*
13458bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
13468bccd85fSChristoph Lameter  */
1347e130242dSArnd Bergmann static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
1348e130242dSArnd Bergmann 		      unsigned long maxnode)
1349e130242dSArnd Bergmann {
1350e130242dSArnd Bergmann 	unsigned long nlongs = BITS_TO_LONGS(maxnode);
1351e130242dSArnd Bergmann 	int ret;
1352e130242dSArnd Bergmann 
1353e130242dSArnd Bergmann 	if (in_compat_syscall())
1354e130242dSArnd Bergmann 		ret = compat_get_bitmap(mask,
1355e130242dSArnd Bergmann 					(const compat_ulong_t __user *)nmask,
1356e130242dSArnd Bergmann 					maxnode);
1357e130242dSArnd Bergmann 	else
1358e130242dSArnd Bergmann 		ret = copy_from_user(mask, nmask,
1359e130242dSArnd Bergmann 				     nlongs * sizeof(unsigned long));
1360e130242dSArnd Bergmann 
1361e130242dSArnd Bergmann 	if (ret)
1362e130242dSArnd Bergmann 		return -EFAULT;
1363e130242dSArnd Bergmann 
1364e130242dSArnd Bergmann 	if (maxnode % BITS_PER_LONG)
1365e130242dSArnd Bergmann 		mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
1366e130242dSArnd Bergmann 
1367e130242dSArnd Bergmann 	return 0;
1368e130242dSArnd Bergmann }
13698bccd85fSChristoph Lameter 
13708bccd85fSChristoph Lameter /* Copy a node mask from user space. */
137139743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
13728bccd85fSChristoph Lameter 		     unsigned long maxnode)
13738bccd85fSChristoph Lameter {
13748bccd85fSChristoph Lameter 	--maxnode;
13758bccd85fSChristoph Lameter 	nodes_clear(*nodes);
13768bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
13778bccd85fSChristoph Lameter 		return 0;
1378a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1379636f13c1SChris Wright 		return -EINVAL;
13808bccd85fSChristoph Lameter 
138156521e7aSYisheng Xie 	/*
138256521e7aSYisheng Xie 	 * When the user specified more nodes than supported just check
1383e130242dSArnd Bergmann 	 * if the non supported part is all zero, one word at a time,
1384e130242dSArnd Bergmann 	 * starting at the end.
138556521e7aSYisheng Xie 	 */
1386e130242dSArnd Bergmann 	while (maxnode > MAX_NUMNODES) {
1387e130242dSArnd Bergmann 		unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
1388e130242dSArnd Bergmann 		unsigned long t;
13898bccd85fSChristoph Lameter 
1390e130242dSArnd Bergmann 		if (get_bitmap(&t, &nmask[maxnode / BITS_PER_LONG], bits))
139156521e7aSYisheng Xie 			return -EFAULT;
1392e130242dSArnd Bergmann 
1393e130242dSArnd Bergmann 		if (maxnode - bits >= MAX_NUMNODES) {
1394e130242dSArnd Bergmann 			maxnode -= bits;
1395e130242dSArnd Bergmann 		} else {
1396e130242dSArnd Bergmann 			maxnode = MAX_NUMNODES;
1397e130242dSArnd Bergmann 			t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1398e130242dSArnd Bergmann 		}
1399e130242dSArnd Bergmann 		if (t)
140056521e7aSYisheng Xie 			return -EINVAL;
140156521e7aSYisheng Xie 	}
140256521e7aSYisheng Xie 
1403e130242dSArnd Bergmann 	return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
14048bccd85fSChristoph Lameter }
14058bccd85fSChristoph Lameter 
14068bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
14078bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
14088bccd85fSChristoph Lameter 			      nodemask_t *nodes)
14098bccd85fSChristoph Lameter {
14108bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1411050c17f2SRalph Campbell 	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1412e130242dSArnd Bergmann 	bool compat = in_compat_syscall();
1413e130242dSArnd Bergmann 
1414e130242dSArnd Bergmann 	if (compat)
1415e130242dSArnd Bergmann 		nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
14168bccd85fSChristoph Lameter 
14178bccd85fSChristoph Lameter 	if (copy > nbytes) {
14188bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
14198bccd85fSChristoph Lameter 			return -EINVAL;
14208bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
14218bccd85fSChristoph Lameter 			return -EFAULT;
14228bccd85fSChristoph Lameter 		copy = nbytes;
1423e130242dSArnd Bergmann 		maxnode = nr_node_ids;
14248bccd85fSChristoph Lameter 	}
1425e130242dSArnd Bergmann 
1426e130242dSArnd Bergmann 	if (compat)
1427e130242dSArnd Bergmann 		return compat_put_bitmap((compat_ulong_t __user *)mask,
1428e130242dSArnd Bergmann 					 nodes_addr(*nodes), maxnode);
1429e130242dSArnd Bergmann 
14308bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
14318bccd85fSChristoph Lameter }
14328bccd85fSChristoph Lameter 
143395837924SFeng Tang /* Basic parameter sanity check used by both mbind() and set_mempolicy() */
143495837924SFeng Tang static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
143595837924SFeng Tang {
143695837924SFeng Tang 	*flags = *mode & MPOL_MODE_FLAGS;
143795837924SFeng Tang 	*mode &= ~MPOL_MODE_FLAGS;
1438b27abaccSDave Hansen 
1439a38a59fdSBen Widawsky 	if ((unsigned int)(*mode) >=  MPOL_MAX)
144095837924SFeng Tang 		return -EINVAL;
144195837924SFeng Tang 	if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
144295837924SFeng Tang 		return -EINVAL;
14436d2aec9eSEric Dumazet 	if (*flags & MPOL_F_NUMA_BALANCING) {
14446d2aec9eSEric Dumazet 		if (*mode != MPOL_BIND)
14456d2aec9eSEric Dumazet 			return -EINVAL;
14466d2aec9eSEric Dumazet 		*flags |= (MPOL_F_MOF | MPOL_F_MORON);
14476d2aec9eSEric Dumazet 	}
144895837924SFeng Tang 	return 0;
144995837924SFeng Tang }
145095837924SFeng Tang 
1451e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len,
1452e7dc9ad6SDominik Brodowski 			 unsigned long mode, const unsigned long __user *nmask,
1453e7dc9ad6SDominik Brodowski 			 unsigned long maxnode, unsigned int flags)
14548bccd85fSChristoph Lameter {
1455028fec41SDavid Rientjes 	unsigned short mode_flags;
145695837924SFeng Tang 	nodemask_t nodes;
145795837924SFeng Tang 	int lmode = mode;
145895837924SFeng Tang 	int err;
14598bccd85fSChristoph Lameter 
1460057d3389SAndrey Konovalov 	start = untagged_addr(start);
146195837924SFeng Tang 	err = sanitize_mpol_flags(&lmode, &mode_flags);
146295837924SFeng Tang 	if (err)
146395837924SFeng Tang 		return err;
146495837924SFeng Tang 
14658bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
14668bccd85fSChristoph Lameter 	if (err)
14678bccd85fSChristoph Lameter 		return err;
146895837924SFeng Tang 
146995837924SFeng Tang 	return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
14708bccd85fSChristoph Lameter }
14718bccd85fSChristoph Lameter 
1472c6018b4bSAneesh Kumar K.V SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len,
1473c6018b4bSAneesh Kumar K.V 		unsigned long, home_node, unsigned long, flags)
1474c6018b4bSAneesh Kumar K.V {
1475c6018b4bSAneesh Kumar K.V 	struct mm_struct *mm = current->mm;
1476c6018b4bSAneesh Kumar K.V 	struct vm_area_struct *vma;
1477c6018b4bSAneesh Kumar K.V 	struct mempolicy *new;
1478c6018b4bSAneesh Kumar K.V 	unsigned long vmstart;
1479c6018b4bSAneesh Kumar K.V 	unsigned long vmend;
1480c6018b4bSAneesh Kumar K.V 	unsigned long end;
1481c6018b4bSAneesh Kumar K.V 	int err = -ENOENT;
1482c6018b4bSAneesh Kumar K.V 
1483c6018b4bSAneesh Kumar K.V 	start = untagged_addr(start);
1484c6018b4bSAneesh Kumar K.V 	if (start & ~PAGE_MASK)
1485c6018b4bSAneesh Kumar K.V 		return -EINVAL;
1486c6018b4bSAneesh Kumar K.V 	/*
1487c6018b4bSAneesh Kumar K.V 	 * flags is used for future extension if any.
1488c6018b4bSAneesh Kumar K.V 	 */
1489c6018b4bSAneesh Kumar K.V 	if (flags != 0)
1490c6018b4bSAneesh Kumar K.V 		return -EINVAL;
1491c6018b4bSAneesh Kumar K.V 
1492c6018b4bSAneesh Kumar K.V 	/*
1493c6018b4bSAneesh Kumar K.V 	 * Check home_node is online to avoid accessing uninitialized
1494c6018b4bSAneesh Kumar K.V 	 * NODE_DATA.
1495c6018b4bSAneesh Kumar K.V 	 */
1496c6018b4bSAneesh Kumar K.V 	if (home_node >= MAX_NUMNODES || !node_online(home_node))
1497c6018b4bSAneesh Kumar K.V 		return -EINVAL;
1498c6018b4bSAneesh Kumar K.V 
1499c6018b4bSAneesh Kumar K.V 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1500c6018b4bSAneesh Kumar K.V 	end = start + len;
1501c6018b4bSAneesh Kumar K.V 
1502c6018b4bSAneesh Kumar K.V 	if (end < start)
1503c6018b4bSAneesh Kumar K.V 		return -EINVAL;
1504c6018b4bSAneesh Kumar K.V 	if (end == start)
1505c6018b4bSAneesh Kumar K.V 		return 0;
1506c6018b4bSAneesh Kumar K.V 	mmap_write_lock(mm);
1507c6018b4bSAneesh Kumar K.V 	vma = find_vma(mm, start);
1508c6018b4bSAneesh Kumar K.V 	for (; vma && vma->vm_start < end;  vma = vma->vm_next) {
1509c6018b4bSAneesh Kumar K.V 
1510c6018b4bSAneesh Kumar K.V 		vmstart = max(start, vma->vm_start);
1511c6018b4bSAneesh Kumar K.V 		vmend   = min(end, vma->vm_end);
1512c6018b4bSAneesh Kumar K.V 		new = mpol_dup(vma_policy(vma));
1513c6018b4bSAneesh Kumar K.V 		if (IS_ERR(new)) {
1514c6018b4bSAneesh Kumar K.V 			err = PTR_ERR(new);
1515c6018b4bSAneesh Kumar K.V 			break;
1516c6018b4bSAneesh Kumar K.V 		}
1517c6018b4bSAneesh Kumar K.V 		/*
1518c6018b4bSAneesh Kumar K.V 		 * Only update home node if there is an existing vma policy
1519c6018b4bSAneesh Kumar K.V 		 */
1520c6018b4bSAneesh Kumar K.V 		if (!new)
1521c6018b4bSAneesh Kumar K.V 			continue;
1522c6018b4bSAneesh Kumar K.V 
1523c6018b4bSAneesh Kumar K.V 		/*
1524c6018b4bSAneesh Kumar K.V 		 * If any vma in the range got policy other than MPOL_BIND
1525c6018b4bSAneesh Kumar K.V 		 * or MPOL_PREFERRED_MANY we return error. We don't reset
1526c6018b4bSAneesh Kumar K.V 		 * the home node for vmas we already updated before.
1527c6018b4bSAneesh Kumar K.V 		 */
1528c6018b4bSAneesh Kumar K.V 		if (new->mode != MPOL_BIND && new->mode != MPOL_PREFERRED_MANY) {
1529c6018b4bSAneesh Kumar K.V 			err = -EOPNOTSUPP;
1530c6018b4bSAneesh Kumar K.V 			break;
1531c6018b4bSAneesh Kumar K.V 		}
1532c6018b4bSAneesh Kumar K.V 
1533c6018b4bSAneesh Kumar K.V 		new->home_node = home_node;
1534c6018b4bSAneesh Kumar K.V 		err = mbind_range(mm, vmstart, vmend, new);
1535c6018b4bSAneesh Kumar K.V 		mpol_put(new);
1536c6018b4bSAneesh Kumar K.V 		if (err)
1537c6018b4bSAneesh Kumar K.V 			break;
1538c6018b4bSAneesh Kumar K.V 	}
1539c6018b4bSAneesh Kumar K.V 	mmap_write_unlock(mm);
1540c6018b4bSAneesh Kumar K.V 	return err;
1541c6018b4bSAneesh Kumar K.V }
1542c6018b4bSAneesh Kumar K.V 
1543e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1544e7dc9ad6SDominik Brodowski 		unsigned long, mode, const unsigned long __user *, nmask,
1545e7dc9ad6SDominik Brodowski 		unsigned long, maxnode, unsigned int, flags)
1546e7dc9ad6SDominik Brodowski {
1547e7dc9ad6SDominik Brodowski 	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1548e7dc9ad6SDominik Brodowski }
1549e7dc9ad6SDominik Brodowski 
15508bccd85fSChristoph Lameter /* Set the process memory policy */
1551af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1552af03c4acSDominik Brodowski 				 unsigned long maxnode)
15538bccd85fSChristoph Lameter {
155495837924SFeng Tang 	unsigned short mode_flags;
15558bccd85fSChristoph Lameter 	nodemask_t nodes;
155695837924SFeng Tang 	int lmode = mode;
155795837924SFeng Tang 	int err;
15588bccd85fSChristoph Lameter 
155995837924SFeng Tang 	err = sanitize_mpol_flags(&lmode, &mode_flags);
156095837924SFeng Tang 	if (err)
156195837924SFeng Tang 		return err;
156295837924SFeng Tang 
15638bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
15648bccd85fSChristoph Lameter 	if (err)
15658bccd85fSChristoph Lameter 		return err;
156695837924SFeng Tang 
156795837924SFeng Tang 	return do_set_mempolicy(lmode, mode_flags, &nodes);
15688bccd85fSChristoph Lameter }
15698bccd85fSChristoph Lameter 
1570af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1571af03c4acSDominik Brodowski 		unsigned long, maxnode)
1572af03c4acSDominik Brodowski {
1573af03c4acSDominik Brodowski 	return kernel_set_mempolicy(mode, nmask, maxnode);
1574af03c4acSDominik Brodowski }
1575af03c4acSDominik Brodowski 
1576b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1577b6e9b0baSDominik Brodowski 				const unsigned long __user *old_nodes,
1578b6e9b0baSDominik Brodowski 				const unsigned long __user *new_nodes)
157939743889SChristoph Lameter {
1580596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
158139743889SChristoph Lameter 	struct task_struct *task;
158239743889SChristoph Lameter 	nodemask_t task_nodes;
158339743889SChristoph Lameter 	int err;
1584596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1585596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1586596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
158739743889SChristoph Lameter 
1588596d7cfaSKOSAKI Motohiro 	if (!scratch)
1589596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
159039743889SChristoph Lameter 
1591596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1592596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1593596d7cfaSKOSAKI Motohiro 
1594596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
159539743889SChristoph Lameter 	if (err)
1596596d7cfaSKOSAKI Motohiro 		goto out;
1597596d7cfaSKOSAKI Motohiro 
1598596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1599596d7cfaSKOSAKI Motohiro 	if (err)
1600596d7cfaSKOSAKI Motohiro 		goto out;
160139743889SChristoph Lameter 
160239743889SChristoph Lameter 	/* Find the mm_struct */
160355cfaa3cSZeng Zhaoming 	rcu_read_lock();
1604228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
160539743889SChristoph Lameter 	if (!task) {
160655cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1607596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1608596d7cfaSKOSAKI Motohiro 		goto out;
160939743889SChristoph Lameter 	}
16103268c63eSChristoph Lameter 	get_task_struct(task);
161139743889SChristoph Lameter 
1612596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
161339743889SChristoph Lameter 
161439743889SChristoph Lameter 	/*
161531367466SOtto Ebeling 	 * Check if this process has the right to modify the specified process.
161631367466SOtto Ebeling 	 * Use the regular "ptrace_may_access()" checks.
161739743889SChristoph Lameter 	 */
161831367466SOtto Ebeling 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1619c69e8d9cSDavid Howells 		rcu_read_unlock();
162039743889SChristoph Lameter 		err = -EPERM;
16213268c63eSChristoph Lameter 		goto out_put;
162239743889SChristoph Lameter 	}
1623c69e8d9cSDavid Howells 	rcu_read_unlock();
162439743889SChristoph Lameter 
162539743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
162639743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1627596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
162839743889SChristoph Lameter 		err = -EPERM;
16293268c63eSChristoph Lameter 		goto out_put;
163039743889SChristoph Lameter 	}
163139743889SChristoph Lameter 
16320486a38bSYisheng Xie 	task_nodes = cpuset_mems_allowed(current);
16330486a38bSYisheng Xie 	nodes_and(*new, *new, task_nodes);
16340486a38bSYisheng Xie 	if (nodes_empty(*new))
16353268c63eSChristoph Lameter 		goto out_put;
16360486a38bSYisheng Xie 
163786c3a764SDavid Quigley 	err = security_task_movememory(task);
163886c3a764SDavid Quigley 	if (err)
16393268c63eSChristoph Lameter 		goto out_put;
164086c3a764SDavid Quigley 
16413268c63eSChristoph Lameter 	mm = get_task_mm(task);
16423268c63eSChristoph Lameter 	put_task_struct(task);
1643f2a9ef88SSasha Levin 
1644f2a9ef88SSasha Levin 	if (!mm) {
1645f2a9ef88SSasha Levin 		err = -EINVAL;
1646f2a9ef88SSasha Levin 		goto out;
1647f2a9ef88SSasha Levin 	}
1648f2a9ef88SSasha Levin 
1649596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
165074c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
16513268c63eSChristoph Lameter 
165239743889SChristoph Lameter 	mmput(mm);
16533268c63eSChristoph Lameter out:
1654596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1655596d7cfaSKOSAKI Motohiro 
165639743889SChristoph Lameter 	return err;
16573268c63eSChristoph Lameter 
16583268c63eSChristoph Lameter out_put:
16593268c63eSChristoph Lameter 	put_task_struct(task);
16603268c63eSChristoph Lameter 	goto out;
16613268c63eSChristoph Lameter 
166239743889SChristoph Lameter }
166339743889SChristoph Lameter 
1664b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1665b6e9b0baSDominik Brodowski 		const unsigned long __user *, old_nodes,
1666b6e9b0baSDominik Brodowski 		const unsigned long __user *, new_nodes)
1667b6e9b0baSDominik Brodowski {
1668b6e9b0baSDominik Brodowski 	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1669b6e9b0baSDominik Brodowski }
1670b6e9b0baSDominik Brodowski 
167139743889SChristoph Lameter 
16728bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1673af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy,
1674af03c4acSDominik Brodowski 				unsigned long __user *nmask,
1675af03c4acSDominik Brodowski 				unsigned long maxnode,
1676af03c4acSDominik Brodowski 				unsigned long addr,
1677af03c4acSDominik Brodowski 				unsigned long flags)
16788bccd85fSChristoph Lameter {
1679dbcb0f19SAdrian Bunk 	int err;
16803f649ab7SKees Cook 	int pval;
16818bccd85fSChristoph Lameter 	nodemask_t nodes;
16828bccd85fSChristoph Lameter 
1683050c17f2SRalph Campbell 	if (nmask != NULL && maxnode < nr_node_ids)
16848bccd85fSChristoph Lameter 		return -EINVAL;
16858bccd85fSChristoph Lameter 
16864605f057SWenchao Hao 	addr = untagged_addr(addr);
16874605f057SWenchao Hao 
16888bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
16898bccd85fSChristoph Lameter 
16908bccd85fSChristoph Lameter 	if (err)
16918bccd85fSChristoph Lameter 		return err;
16928bccd85fSChristoph Lameter 
16938bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
16948bccd85fSChristoph Lameter 		return -EFAULT;
16958bccd85fSChristoph Lameter 
16968bccd85fSChristoph Lameter 	if (nmask)
16978bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
16988bccd85fSChristoph Lameter 
16998bccd85fSChristoph Lameter 	return err;
17008bccd85fSChristoph Lameter }
17018bccd85fSChristoph Lameter 
1702af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1703af03c4acSDominik Brodowski 		unsigned long __user *, nmask, unsigned long, maxnode,
1704af03c4acSDominik Brodowski 		unsigned long, addr, unsigned long, flags)
1705af03c4acSDominik Brodowski {
1706af03c4acSDominik Brodowski 	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1707af03c4acSDominik Brodowski }
1708af03c4acSDominik Brodowski 
170920ca87f2SLi Xinhai bool vma_migratable(struct vm_area_struct *vma)
171020ca87f2SLi Xinhai {
171120ca87f2SLi Xinhai 	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
171220ca87f2SLi Xinhai 		return false;
171320ca87f2SLi Xinhai 
171420ca87f2SLi Xinhai 	/*
171520ca87f2SLi Xinhai 	 * DAX device mappings require predictable access latency, so avoid
171620ca87f2SLi Xinhai 	 * incurring periodic faults.
171720ca87f2SLi Xinhai 	 */
171820ca87f2SLi Xinhai 	if (vma_is_dax(vma))
171920ca87f2SLi Xinhai 		return false;
172020ca87f2SLi Xinhai 
172120ca87f2SLi Xinhai 	if (is_vm_hugetlb_page(vma) &&
172220ca87f2SLi Xinhai 		!hugepage_migration_supported(hstate_vma(vma)))
172320ca87f2SLi Xinhai 		return false;
172420ca87f2SLi Xinhai 
172520ca87f2SLi Xinhai 	/*
172620ca87f2SLi Xinhai 	 * Migration allocates pages in the highest zone. If we cannot
172720ca87f2SLi Xinhai 	 * do so then migration (at least from node to node) is not
172820ca87f2SLi Xinhai 	 * possible.
172920ca87f2SLi Xinhai 	 */
173020ca87f2SLi Xinhai 	if (vma->vm_file &&
173120ca87f2SLi Xinhai 		gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
173220ca87f2SLi Xinhai 			< policy_zone)
173320ca87f2SLi Xinhai 		return false;
173420ca87f2SLi Xinhai 	return true;
173520ca87f2SLi Xinhai }
173620ca87f2SLi Xinhai 
173774d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
173874d2c3a0SOleg Nesterov 						unsigned long addr)
17391da177e4SLinus Torvalds {
17408d90274bSOleg Nesterov 	struct mempolicy *pol = NULL;
17411da177e4SLinus Torvalds 
17421da177e4SLinus Torvalds 	if (vma) {
1743480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
17448d90274bSOleg Nesterov 			pol = vma->vm_ops->get_policy(vma, addr);
174500442ad0SMel Gorman 		} else if (vma->vm_policy) {
17461da177e4SLinus Torvalds 			pol = vma->vm_policy;
174700442ad0SMel Gorman 
174800442ad0SMel Gorman 			/*
174900442ad0SMel Gorman 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
175000442ad0SMel Gorman 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
175100442ad0SMel Gorman 			 * count on these policies which will be dropped by
175200442ad0SMel Gorman 			 * mpol_cond_put() later
175300442ad0SMel Gorman 			 */
175400442ad0SMel Gorman 			if (mpol_needs_cond_ref(pol))
175500442ad0SMel Gorman 				mpol_get(pol);
175600442ad0SMel Gorman 		}
17571da177e4SLinus Torvalds 	}
1758f15ca78eSOleg Nesterov 
175974d2c3a0SOleg Nesterov 	return pol;
176074d2c3a0SOleg Nesterov }
176174d2c3a0SOleg Nesterov 
176274d2c3a0SOleg Nesterov /*
1763dd6eecb9SOleg Nesterov  * get_vma_policy(@vma, @addr)
176474d2c3a0SOleg Nesterov  * @vma: virtual memory area whose policy is sought
176574d2c3a0SOleg Nesterov  * @addr: address in @vma for shared policy lookup
176674d2c3a0SOleg Nesterov  *
176774d2c3a0SOleg Nesterov  * Returns effective policy for a VMA at specified address.
1768dd6eecb9SOleg Nesterov  * Falls back to current->mempolicy or system default policy, as necessary.
176974d2c3a0SOleg Nesterov  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
177074d2c3a0SOleg Nesterov  * count--added by the get_policy() vm_op, as appropriate--to protect against
177174d2c3a0SOleg Nesterov  * freeing by another task.  It is the caller's responsibility to free the
177274d2c3a0SOleg Nesterov  * extra reference for shared policies.
177374d2c3a0SOleg Nesterov  */
1774ac79f78dSDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1775dd6eecb9SOleg Nesterov 						unsigned long addr)
177674d2c3a0SOleg Nesterov {
177774d2c3a0SOleg Nesterov 	struct mempolicy *pol = __get_vma_policy(vma, addr);
177874d2c3a0SOleg Nesterov 
17798d90274bSOleg Nesterov 	if (!pol)
1780dd6eecb9SOleg Nesterov 		pol = get_task_policy(current);
17818d90274bSOleg Nesterov 
17821da177e4SLinus Torvalds 	return pol;
17831da177e4SLinus Torvalds }
17841da177e4SLinus Torvalds 
17856b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma)
1786fc314724SMel Gorman {
17876b6482bbSOleg Nesterov 	struct mempolicy *pol;
1788f15ca78eSOleg Nesterov 
1789fc314724SMel Gorman 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1790fc314724SMel Gorman 		bool ret = false;
1791fc314724SMel Gorman 
1792fc314724SMel Gorman 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1793fc314724SMel Gorman 		if (pol && (pol->flags & MPOL_F_MOF))
1794fc314724SMel Gorman 			ret = true;
1795fc314724SMel Gorman 		mpol_cond_put(pol);
1796fc314724SMel Gorman 
1797fc314724SMel Gorman 		return ret;
17988d90274bSOleg Nesterov 	}
17998d90274bSOleg Nesterov 
1800fc314724SMel Gorman 	pol = vma->vm_policy;
18018d90274bSOleg Nesterov 	if (!pol)
18026b6482bbSOleg Nesterov 		pol = get_task_policy(current);
1803fc314724SMel Gorman 
1804fc314724SMel Gorman 	return pol->flags & MPOL_F_MOF;
1805fc314724SMel Gorman }
1806fc314724SMel Gorman 
1807d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1808d3eb1570SLai Jiangshan {
1809d3eb1570SLai Jiangshan 	enum zone_type dynamic_policy_zone = policy_zone;
1810d3eb1570SLai Jiangshan 
1811d3eb1570SLai Jiangshan 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1812d3eb1570SLai Jiangshan 
1813d3eb1570SLai Jiangshan 	/*
1814269fbe72SBen Widawsky 	 * if policy->nodes has movable memory only,
1815d3eb1570SLai Jiangshan 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1816d3eb1570SLai Jiangshan 	 *
1817269fbe72SBen Widawsky 	 * policy->nodes is intersect with node_states[N_MEMORY].
1818f0953a1bSIngo Molnar 	 * so if the following test fails, it implies
1819269fbe72SBen Widawsky 	 * policy->nodes has movable memory only.
1820d3eb1570SLai Jiangshan 	 */
1821269fbe72SBen Widawsky 	if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
1822d3eb1570SLai Jiangshan 		dynamic_policy_zone = ZONE_MOVABLE;
1823d3eb1570SLai Jiangshan 
1824d3eb1570SLai Jiangshan 	return zone >= dynamic_policy_zone;
1825d3eb1570SLai Jiangshan }
1826d3eb1570SLai Jiangshan 
182752cd3b07SLee Schermerhorn /*
182852cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
182952cd3b07SLee Schermerhorn  * page allocation
183052cd3b07SLee Schermerhorn  */
18318ca39e68SMuchun Song nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
183219770b32SMel Gorman {
1833b27abaccSDave Hansen 	int mode = policy->mode;
1834b27abaccSDave Hansen 
183519770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1836b27abaccSDave Hansen 	if (unlikely(mode == MPOL_BIND) &&
1837d3eb1570SLai Jiangshan 		apply_policy_zone(policy, gfp_zone(gfp)) &&
1838269fbe72SBen Widawsky 		cpuset_nodemask_valid_mems_allowed(&policy->nodes))
1839269fbe72SBen Widawsky 		return &policy->nodes;
184019770b32SMel Gorman 
1841b27abaccSDave Hansen 	if (mode == MPOL_PREFERRED_MANY)
1842b27abaccSDave Hansen 		return &policy->nodes;
1843b27abaccSDave Hansen 
184419770b32SMel Gorman 	return NULL;
184519770b32SMel Gorman }
184619770b32SMel Gorman 
1847b27abaccSDave Hansen /*
1848b27abaccSDave Hansen  * Return the  preferred node id for 'prefer' mempolicy, and return
1849b27abaccSDave Hansen  * the given id for all other policies.
1850b27abaccSDave Hansen  *
1851b27abaccSDave Hansen  * policy_node() is always coupled with policy_nodemask(), which
1852b27abaccSDave Hansen  * secures the nodemask limit for 'bind' and 'prefer-many' policy.
1853b27abaccSDave Hansen  */
1854f8fd5253SWei Yang static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
18551da177e4SLinus Torvalds {
18567858d7bcSFeng Tang 	if (policy->mode == MPOL_PREFERRED) {
1857269fbe72SBen Widawsky 		nd = first_node(policy->nodes);
18587858d7bcSFeng Tang 	} else {
185919770b32SMel Gorman 		/*
18606d840958SMichal Hocko 		 * __GFP_THISNODE shouldn't even be used with the bind policy
18616d840958SMichal Hocko 		 * because we might easily break the expectation to stay on the
18626d840958SMichal Hocko 		 * requested node and not break the policy.
186319770b32SMel Gorman 		 */
18646d840958SMichal Hocko 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
18651da177e4SLinus Torvalds 	}
18666d840958SMichal Hocko 
1867c6018b4bSAneesh Kumar K.V 	if ((policy->mode == MPOL_BIND ||
1868c6018b4bSAneesh Kumar K.V 	     policy->mode == MPOL_PREFERRED_MANY) &&
1869c6018b4bSAneesh Kumar K.V 	    policy->home_node != NUMA_NO_NODE)
1870c6018b4bSAneesh Kumar K.V 		return policy->home_node;
1871c6018b4bSAneesh Kumar K.V 
187204ec6264SVlastimil Babka 	return nd;
18731da177e4SLinus Torvalds }
18741da177e4SLinus Torvalds 
18751da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
18761da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
18771da177e4SLinus Torvalds {
187845816682SVlastimil Babka 	unsigned next;
18791da177e4SLinus Torvalds 	struct task_struct *me = current;
18801da177e4SLinus Torvalds 
1881269fbe72SBen Widawsky 	next = next_node_in(me->il_prev, policy->nodes);
1882f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
188345816682SVlastimil Babka 		me->il_prev = next;
188445816682SVlastimil Babka 	return next;
18851da177e4SLinus Torvalds }
18861da177e4SLinus Torvalds 
1887dc85da15SChristoph Lameter /*
1888dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1889dc85da15SChristoph Lameter  * next slab entry.
1890dc85da15SChristoph Lameter  */
18912a389610SDavid Rientjes unsigned int mempolicy_slab_node(void)
1892dc85da15SChristoph Lameter {
1893e7b691b0SAndi Kleen 	struct mempolicy *policy;
18942a389610SDavid Rientjes 	int node = numa_mem_id();
1895e7b691b0SAndi Kleen 
189638b031ddSVasily Averin 	if (!in_task())
18972a389610SDavid Rientjes 		return node;
1898e7b691b0SAndi Kleen 
1899e7b691b0SAndi Kleen 	policy = current->mempolicy;
19007858d7bcSFeng Tang 	if (!policy)
19012a389610SDavid Rientjes 		return node;
1902765c4507SChristoph Lameter 
1903bea904d5SLee Schermerhorn 	switch (policy->mode) {
1904bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1905269fbe72SBen Widawsky 		return first_node(policy->nodes);
1906bea904d5SLee Schermerhorn 
1907dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1908dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1909dc85da15SChristoph Lameter 
1910b27abaccSDave Hansen 	case MPOL_BIND:
1911b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
1912b27abaccSDave Hansen 	{
1913c33d6c06SMel Gorman 		struct zoneref *z;
1914c33d6c06SMel Gorman 
1915dc85da15SChristoph Lameter 		/*
1916dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1917dc85da15SChristoph Lameter 		 * first node.
1918dc85da15SChristoph Lameter 		 */
191919770b32SMel Gorman 		struct zonelist *zonelist;
192019770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1921c9634cf0SAneesh Kumar K.V 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1922c33d6c06SMel Gorman 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1923269fbe72SBen Widawsky 							&policy->nodes);
1924c1093b74SPavel Tatashin 		return z->zone ? zone_to_nid(z->zone) : node;
1925dd1a239fSMel Gorman 	}
19267858d7bcSFeng Tang 	case MPOL_LOCAL:
19277858d7bcSFeng Tang 		return node;
1928dc85da15SChristoph Lameter 
1929dc85da15SChristoph Lameter 	default:
1930bea904d5SLee Schermerhorn 		BUG();
1931dc85da15SChristoph Lameter 	}
1932dc85da15SChristoph Lameter }
1933dc85da15SChristoph Lameter 
1934fee83b3aSAndrew Morton /*
1935fee83b3aSAndrew Morton  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1936269fbe72SBen Widawsky  * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
1937fee83b3aSAndrew Morton  * number of present nodes.
1938fee83b3aSAndrew Morton  */
193998c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
19401da177e4SLinus Torvalds {
1941276aeee1Syanghui 	nodemask_t nodemask = pol->nodes;
1942276aeee1Syanghui 	unsigned int target, nnodes;
1943fee83b3aSAndrew Morton 	int i;
1944fee83b3aSAndrew Morton 	int nid;
1945276aeee1Syanghui 	/*
1946276aeee1Syanghui 	 * The barrier will stabilize the nodemask in a register or on
1947276aeee1Syanghui 	 * the stack so that it will stop changing under the code.
1948276aeee1Syanghui 	 *
1949276aeee1Syanghui 	 * Between first_node() and next_node(), pol->nodes could be changed
1950276aeee1Syanghui 	 * by other threads. So we put pol->nodes in a local stack.
1951276aeee1Syanghui 	 */
1952276aeee1Syanghui 	barrier();
19531da177e4SLinus Torvalds 
1954276aeee1Syanghui 	nnodes = nodes_weight(nodemask);
1955f5b087b5SDavid Rientjes 	if (!nnodes)
1956f5b087b5SDavid Rientjes 		return numa_node_id();
1957fee83b3aSAndrew Morton 	target = (unsigned int)n % nnodes;
1958276aeee1Syanghui 	nid = first_node(nodemask);
1959fee83b3aSAndrew Morton 	for (i = 0; i < target; i++)
1960276aeee1Syanghui 		nid = next_node(nid, nodemask);
19611da177e4SLinus Torvalds 	return nid;
19621da177e4SLinus Torvalds }
19631da177e4SLinus Torvalds 
19645da7ca86SChristoph Lameter /* Determine a node number for interleave */
19655da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
19665da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
19675da7ca86SChristoph Lameter {
19685da7ca86SChristoph Lameter 	if (vma) {
19695da7ca86SChristoph Lameter 		unsigned long off;
19705da7ca86SChristoph Lameter 
19713b98b087SNishanth Aravamudan 		/*
19723b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
19733b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
19743b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
19753b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
19763b98b087SNishanth Aravamudan 		 * a useful offset.
19773b98b087SNishanth Aravamudan 		 */
19783b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
19793b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
19805da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
198198c70baaSLaurent Dufour 		return offset_il_node(pol, off);
19825da7ca86SChristoph Lameter 	} else
19835da7ca86SChristoph Lameter 		return interleave_nodes(pol);
19845da7ca86SChristoph Lameter }
19855da7ca86SChristoph Lameter 
198600ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1987480eccf9SLee Schermerhorn /*
198804ec6264SVlastimil Babka  * huge_node(@vma, @addr, @gfp_flags, @mpol)
1989b46e14acSFabian Frederick  * @vma: virtual memory area whose policy is sought
1990b46e14acSFabian Frederick  * @addr: address in @vma for shared policy lookup and interleave policy
1991b46e14acSFabian Frederick  * @gfp_flags: for requested zone
1992b46e14acSFabian Frederick  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1993b27abaccSDave Hansen  * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
1994480eccf9SLee Schermerhorn  *
199504ec6264SVlastimil Babka  * Returns a nid suitable for a huge page allocation and a pointer
199652cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
1997b27abaccSDave Hansen  * If the effective policy is 'bind' or 'prefer-many', returns a pointer
1998b27abaccSDave Hansen  * to the mempolicy's @nodemask for filtering the zonelist.
1999c0ff7453SMiao Xie  *
2000d26914d1SMel Gorman  * Must be protected by read_mems_allowed_begin()
2001480eccf9SLee Schermerhorn  */
200204ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
200304ec6264SVlastimil Babka 				struct mempolicy **mpol, nodemask_t **nodemask)
20045da7ca86SChristoph Lameter {
200504ec6264SVlastimil Babka 	int nid;
2006b27abaccSDave Hansen 	int mode;
20075da7ca86SChristoph Lameter 
2008dd6eecb9SOleg Nesterov 	*mpol = get_vma_policy(vma, addr);
2009b27abaccSDave Hansen 	*nodemask = NULL;
2010b27abaccSDave Hansen 	mode = (*mpol)->mode;
20115da7ca86SChristoph Lameter 
2012b27abaccSDave Hansen 	if (unlikely(mode == MPOL_INTERLEAVE)) {
201304ec6264SVlastimil Babka 		nid = interleave_nid(*mpol, vma, addr,
201404ec6264SVlastimil Babka 					huge_page_shift(hstate_vma(vma)));
201552cd3b07SLee Schermerhorn 	} else {
201604ec6264SVlastimil Babka 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
2017b27abaccSDave Hansen 		if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY)
2018269fbe72SBen Widawsky 			*nodemask = &(*mpol)->nodes;
2019480eccf9SLee Schermerhorn 	}
202004ec6264SVlastimil Babka 	return nid;
20215da7ca86SChristoph Lameter }
202206808b08SLee Schermerhorn 
202306808b08SLee Schermerhorn /*
202406808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
202506808b08SLee Schermerhorn  *
202606808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
202706808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
202806808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
202906808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
203006808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
203106808b08SLee Schermerhorn  * of non-default mempolicy.
203206808b08SLee Schermerhorn  *
203306808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
203406808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
203506808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
203606808b08SLee Schermerhorn  *
203706808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
203806808b08SLee Schermerhorn  */
203906808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
204006808b08SLee Schermerhorn {
204106808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
204206808b08SLee Schermerhorn 
204306808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
204406808b08SLee Schermerhorn 		return false;
204506808b08SLee Schermerhorn 
2046c0ff7453SMiao Xie 	task_lock(current);
204706808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
204806808b08SLee Schermerhorn 	switch (mempolicy->mode) {
204906808b08SLee Schermerhorn 	case MPOL_PREFERRED:
2050b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
205106808b08SLee Schermerhorn 	case MPOL_BIND:
205206808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
2053269fbe72SBen Widawsky 		*mask = mempolicy->nodes;
205406808b08SLee Schermerhorn 		break;
205506808b08SLee Schermerhorn 
20567858d7bcSFeng Tang 	case MPOL_LOCAL:
2057269fbe72SBen Widawsky 		init_nodemask_of_node(mask, numa_node_id());
20587858d7bcSFeng Tang 		break;
20597858d7bcSFeng Tang 
206006808b08SLee Schermerhorn 	default:
206106808b08SLee Schermerhorn 		BUG();
206206808b08SLee Schermerhorn 	}
2063c0ff7453SMiao Xie 	task_unlock(current);
206406808b08SLee Schermerhorn 
206506808b08SLee Schermerhorn 	return true;
206606808b08SLee Schermerhorn }
206700ac59adSChen, Kenneth W #endif
20685da7ca86SChristoph Lameter 
20696f48d0ebSDavid Rientjes /*
2070b26e517aSFeng Tang  * mempolicy_in_oom_domain
20716f48d0ebSDavid Rientjes  *
2072b26e517aSFeng Tang  * If tsk's mempolicy is "bind", check for intersection between mask and
2073b26e517aSFeng Tang  * the policy nodemask. Otherwise, return true for all other policies
2074b26e517aSFeng Tang  * including "interleave", as a tsk with "interleave" policy may have
2075b26e517aSFeng Tang  * memory allocated from all nodes in system.
20766f48d0ebSDavid Rientjes  *
20776f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
20786f48d0ebSDavid Rientjes  */
2079b26e517aSFeng Tang bool mempolicy_in_oom_domain(struct task_struct *tsk,
20806f48d0ebSDavid Rientjes 					const nodemask_t *mask)
20816f48d0ebSDavid Rientjes {
20826f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
20836f48d0ebSDavid Rientjes 	bool ret = true;
20846f48d0ebSDavid Rientjes 
20856f48d0ebSDavid Rientjes 	if (!mask)
20866f48d0ebSDavid Rientjes 		return ret;
2087b26e517aSFeng Tang 
20886f48d0ebSDavid Rientjes 	task_lock(tsk);
20896f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
2090b26e517aSFeng Tang 	if (mempolicy && mempolicy->mode == MPOL_BIND)
2091269fbe72SBen Widawsky 		ret = nodes_intersects(mempolicy->nodes, *mask);
20926f48d0ebSDavid Rientjes 	task_unlock(tsk);
2093b26e517aSFeng Tang 
20946f48d0ebSDavid Rientjes 	return ret;
20956f48d0ebSDavid Rientjes }
20966f48d0ebSDavid Rientjes 
20971da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
20981da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
2099662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2100662f3a0bSAndi Kleen 					unsigned nid)
21011da177e4SLinus Torvalds {
21021da177e4SLinus Torvalds 	struct page *page;
21031da177e4SLinus Torvalds 
210484172f4bSMatthew Wilcox (Oracle) 	page = __alloc_pages(gfp, order, nid, NULL);
21054518085eSKemi Wang 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
21064518085eSKemi Wang 	if (!static_branch_likely(&vm_numa_stat_key))
21074518085eSKemi Wang 		return page;
2108de55c8b2SAndrey Ryabinin 	if (page && page_to_nid(page) == nid) {
2109de55c8b2SAndrey Ryabinin 		preempt_disable();
2110f19298b9SMel Gorman 		__count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
2111de55c8b2SAndrey Ryabinin 		preempt_enable();
2112de55c8b2SAndrey Ryabinin 	}
21131da177e4SLinus Torvalds 	return page;
21141da177e4SLinus Torvalds }
21151da177e4SLinus Torvalds 
21164c54d949SFeng Tang static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
21174c54d949SFeng Tang 						int nid, struct mempolicy *pol)
21184c54d949SFeng Tang {
21194c54d949SFeng Tang 	struct page *page;
21204c54d949SFeng Tang 	gfp_t preferred_gfp;
21214c54d949SFeng Tang 
21224c54d949SFeng Tang 	/*
21234c54d949SFeng Tang 	 * This is a two pass approach. The first pass will only try the
21244c54d949SFeng Tang 	 * preferred nodes but skip the direct reclaim and allow the
21254c54d949SFeng Tang 	 * allocation to fail, while the second pass will try all the
21264c54d949SFeng Tang 	 * nodes in system.
21274c54d949SFeng Tang 	 */
21284c54d949SFeng Tang 	preferred_gfp = gfp | __GFP_NOWARN;
21294c54d949SFeng Tang 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
21304c54d949SFeng Tang 	page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
21314c54d949SFeng Tang 	if (!page)
2132c0455116SAneesh Kumar K.V 		page = __alloc_pages(gfp, order, nid, NULL);
21334c54d949SFeng Tang 
21344c54d949SFeng Tang 	return page;
21354c54d949SFeng Tang }
21364c54d949SFeng Tang 
21371da177e4SLinus Torvalds /**
21380bbbc0b3SAndrea Arcangeli  * alloc_pages_vma - Allocate a page for a VMA.
2139eb350739SMatthew Wilcox (Oracle)  * @gfp: GFP flags.
21400bbbc0b3SAndrea Arcangeli  * @order: Order of the GFP allocation.
21411da177e4SLinus Torvalds  * @vma: Pointer to VMA or NULL if not available.
2142eb350739SMatthew Wilcox (Oracle)  * @addr: Virtual address of the allocation.  Must be inside @vma.
2143eb350739SMatthew Wilcox (Oracle)  * @hugepage: For hugepages try only the preferred node if possible.
21441da177e4SLinus Torvalds  *
2145eb350739SMatthew Wilcox (Oracle)  * Allocate a page for a specific address in @vma, using the appropriate
2146eb350739SMatthew Wilcox (Oracle)  * NUMA policy.  When @vma is not NULL the caller must hold the mmap_lock
2147eb350739SMatthew Wilcox (Oracle)  * of the mm_struct of the VMA to prevent it from going away.  Should be
2148eb350739SMatthew Wilcox (Oracle)  * used for all allocations for pages that will be mapped into user space.
2149eb350739SMatthew Wilcox (Oracle)  *
2150eb350739SMatthew Wilcox (Oracle)  * Return: The page on success or NULL if allocation fails.
21511da177e4SLinus Torvalds  */
2152eb350739SMatthew Wilcox (Oracle) struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2153be1a13ebSMichal Hocko 		unsigned long addr, bool hugepage)
21541da177e4SLinus Torvalds {
2155cc9a6c87SMel Gorman 	struct mempolicy *pol;
2156be1a13ebSMichal Hocko 	int node = numa_node_id();
2157c0ff7453SMiao Xie 	struct page *page;
215804ec6264SVlastimil Babka 	int preferred_nid;
2159be97a41bSVlastimil Babka 	nodemask_t *nmask;
21601da177e4SLinus Torvalds 
2161dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2162cc9a6c87SMel Gorman 
2163be97a41bSVlastimil Babka 	if (pol->mode == MPOL_INTERLEAVE) {
21641da177e4SLinus Torvalds 		unsigned nid;
21655da7ca86SChristoph Lameter 
21668eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
216752cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
21680bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
2169be97a41bSVlastimil Babka 		goto out;
21701da177e4SLinus Torvalds 	}
21711da177e4SLinus Torvalds 
21724c54d949SFeng Tang 	if (pol->mode == MPOL_PREFERRED_MANY) {
2173c0455116SAneesh Kumar K.V 		node = policy_node(gfp, pol, node);
21744c54d949SFeng Tang 		page = alloc_pages_preferred_many(gfp, order, node, pol);
21754c54d949SFeng Tang 		mpol_cond_put(pol);
21764c54d949SFeng Tang 		goto out;
21774c54d949SFeng Tang 	}
21784c54d949SFeng Tang 
217919deb769SDavid Rientjes 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
218019deb769SDavid Rientjes 		int hpage_node = node;
218119deb769SDavid Rientjes 
218219deb769SDavid Rientjes 		/*
218319deb769SDavid Rientjes 		 * For hugepage allocation and non-interleave policy which
218419deb769SDavid Rientjes 		 * allows the current node (or other explicitly preferred
218519deb769SDavid Rientjes 		 * node) we only try to allocate from the current/preferred
218619deb769SDavid Rientjes 		 * node and don't fall back to other nodes, as the cost of
218719deb769SDavid Rientjes 		 * remote accesses would likely offset THP benefits.
218819deb769SDavid Rientjes 		 *
2189b27abaccSDave Hansen 		 * If the policy is interleave or does not allow the current
219019deb769SDavid Rientjes 		 * node in its nodemask, we allocate the standard way.
219119deb769SDavid Rientjes 		 */
21927858d7bcSFeng Tang 		if (pol->mode == MPOL_PREFERRED)
2193269fbe72SBen Widawsky 			hpage_node = first_node(pol->nodes);
219419deb769SDavid Rientjes 
219519deb769SDavid Rientjes 		nmask = policy_nodemask(gfp, pol);
219619deb769SDavid Rientjes 		if (!nmask || node_isset(hpage_node, *nmask)) {
219719deb769SDavid Rientjes 			mpol_cond_put(pol);
2198cc638f32SVlastimil Babka 			/*
2199cc638f32SVlastimil Babka 			 * First, try to allocate THP only on local node, but
2200cc638f32SVlastimil Babka 			 * don't reclaim unnecessarily, just compact.
2201cc638f32SVlastimil Babka 			 */
220219deb769SDavid Rientjes 			page = __alloc_pages_node(hpage_node,
2203cc638f32SVlastimil Babka 				gfp | __GFP_THISNODE | __GFP_NORETRY, order);
220476e654ccSDavid Rientjes 
220576e654ccSDavid Rientjes 			/*
220676e654ccSDavid Rientjes 			 * If hugepage allocations are configured to always
220776e654ccSDavid Rientjes 			 * synchronous compact or the vma has been madvised
220876e654ccSDavid Rientjes 			 * to prefer hugepage backing, retry allowing remote
2209cc638f32SVlastimil Babka 			 * memory with both reclaim and compact as well.
221076e654ccSDavid Rientjes 			 */
221176e654ccSDavid Rientjes 			if (!page && (gfp & __GFP_DIRECT_RECLAIM))
221233863534SAndrey Ryabinin 				page = __alloc_pages(gfp, order, hpage_node, nmask);
221376e654ccSDavid Rientjes 
221419deb769SDavid Rientjes 			goto out;
221519deb769SDavid Rientjes 		}
221619deb769SDavid Rientjes 	}
221719deb769SDavid Rientjes 
2218077fcf11SAneesh Kumar K.V 	nmask = policy_nodemask(gfp, pol);
221904ec6264SVlastimil Babka 	preferred_nid = policy_node(gfp, pol, node);
222084172f4bSMatthew Wilcox (Oracle) 	page = __alloc_pages(gfp, order, preferred_nid, nmask);
2221d51e9894SVlastimil Babka 	mpol_cond_put(pol);
2222be97a41bSVlastimil Babka out:
2223077fcf11SAneesh Kumar K.V 	return page;
2224077fcf11SAneesh Kumar K.V }
222569262215SChristoph Hellwig EXPORT_SYMBOL(alloc_pages_vma);
2226077fcf11SAneesh Kumar K.V 
2227f584b680SMatthew Wilcox (Oracle) struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
2228f584b680SMatthew Wilcox (Oracle) 		unsigned long addr, bool hugepage)
2229f584b680SMatthew Wilcox (Oracle) {
2230f584b680SMatthew Wilcox (Oracle) 	struct folio *folio;
2231f584b680SMatthew Wilcox (Oracle) 
2232f584b680SMatthew Wilcox (Oracle) 	folio = (struct folio *)alloc_pages_vma(gfp, order, vma, addr,
2233f584b680SMatthew Wilcox (Oracle) 			hugepage);
2234f584b680SMatthew Wilcox (Oracle) 	if (folio && order > 1)
2235f584b680SMatthew Wilcox (Oracle) 		prep_transhuge_page(&folio->page);
2236f584b680SMatthew Wilcox (Oracle) 
2237f584b680SMatthew Wilcox (Oracle) 	return folio;
2238f584b680SMatthew Wilcox (Oracle) }
2239f584b680SMatthew Wilcox (Oracle) 
22401da177e4SLinus Torvalds /**
2241d7f946d0SMatthew Wilcox (Oracle)  * alloc_pages - Allocate pages.
22426421ec76SMatthew Wilcox (Oracle)  * @gfp: GFP flags.
22436421ec76SMatthew Wilcox (Oracle)  * @order: Power of two of number of pages to allocate.
22441da177e4SLinus Torvalds  *
22456421ec76SMatthew Wilcox (Oracle)  * Allocate 1 << @order contiguous pages.  The physical address of the
22466421ec76SMatthew Wilcox (Oracle)  * first page is naturally aligned (eg an order-3 allocation will be aligned
22476421ec76SMatthew Wilcox (Oracle)  * to a multiple of 8 * PAGE_SIZE bytes).  The NUMA policy of the current
22486421ec76SMatthew Wilcox (Oracle)  * process is honoured when in process context.
22491da177e4SLinus Torvalds  *
22506421ec76SMatthew Wilcox (Oracle)  * Context: Can be called from any context, providing the appropriate GFP
22516421ec76SMatthew Wilcox (Oracle)  * flags are used.
22526421ec76SMatthew Wilcox (Oracle)  * Return: The page on success or NULL if allocation fails.
22531da177e4SLinus Torvalds  */
2254d7f946d0SMatthew Wilcox (Oracle) struct page *alloc_pages(gfp_t gfp, unsigned order)
22551da177e4SLinus Torvalds {
22568d90274bSOleg Nesterov 	struct mempolicy *pol = &default_policy;
2257c0ff7453SMiao Xie 	struct page *page;
22581da177e4SLinus Torvalds 
22598d90274bSOleg Nesterov 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
22608d90274bSOleg Nesterov 		pol = get_task_policy(current);
226152cd3b07SLee Schermerhorn 
226252cd3b07SLee Schermerhorn 	/*
226352cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
226452cd3b07SLee Schermerhorn 	 * nor system default_policy
226552cd3b07SLee Schermerhorn 	 */
226645c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
2267c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
22684c54d949SFeng Tang 	else if (pol->mode == MPOL_PREFERRED_MANY)
22694c54d949SFeng Tang 		page = alloc_pages_preferred_many(gfp, order,
2270c0455116SAneesh Kumar K.V 				  policy_node(gfp, pol, numa_node_id()), pol);
2271c0ff7453SMiao Xie 	else
227284172f4bSMatthew Wilcox (Oracle) 		page = __alloc_pages(gfp, order,
227304ec6264SVlastimil Babka 				policy_node(gfp, pol, numa_node_id()),
22745c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
2275cc9a6c87SMel Gorman 
2276c0ff7453SMiao Xie 	return page;
22771da177e4SLinus Torvalds }
2278d7f946d0SMatthew Wilcox (Oracle) EXPORT_SYMBOL(alloc_pages);
22791da177e4SLinus Torvalds 
2280cc09cb13SMatthew Wilcox (Oracle) struct folio *folio_alloc(gfp_t gfp, unsigned order)
2281cc09cb13SMatthew Wilcox (Oracle) {
2282cc09cb13SMatthew Wilcox (Oracle) 	struct page *page = alloc_pages(gfp | __GFP_COMP, order);
2283cc09cb13SMatthew Wilcox (Oracle) 
2284cc09cb13SMatthew Wilcox (Oracle) 	if (page && order > 1)
2285cc09cb13SMatthew Wilcox (Oracle) 		prep_transhuge_page(page);
2286cc09cb13SMatthew Wilcox (Oracle) 	return (struct folio *)page;
2287cc09cb13SMatthew Wilcox (Oracle) }
2288cc09cb13SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_alloc);
2289cc09cb13SMatthew Wilcox (Oracle) 
2290c00b6b96SChen Wandun static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
2291c00b6b96SChen Wandun 		struct mempolicy *pol, unsigned long nr_pages,
2292c00b6b96SChen Wandun 		struct page **page_array)
2293c00b6b96SChen Wandun {
2294c00b6b96SChen Wandun 	int nodes;
2295c00b6b96SChen Wandun 	unsigned long nr_pages_per_node;
2296c00b6b96SChen Wandun 	int delta;
2297c00b6b96SChen Wandun 	int i;
2298c00b6b96SChen Wandun 	unsigned long nr_allocated;
2299c00b6b96SChen Wandun 	unsigned long total_allocated = 0;
2300c00b6b96SChen Wandun 
2301c00b6b96SChen Wandun 	nodes = nodes_weight(pol->nodes);
2302c00b6b96SChen Wandun 	nr_pages_per_node = nr_pages / nodes;
2303c00b6b96SChen Wandun 	delta = nr_pages - nodes * nr_pages_per_node;
2304c00b6b96SChen Wandun 
2305c00b6b96SChen Wandun 	for (i = 0; i < nodes; i++) {
2306c00b6b96SChen Wandun 		if (delta) {
2307c00b6b96SChen Wandun 			nr_allocated = __alloc_pages_bulk(gfp,
2308c00b6b96SChen Wandun 					interleave_nodes(pol), NULL,
2309c00b6b96SChen Wandun 					nr_pages_per_node + 1, NULL,
2310c00b6b96SChen Wandun 					page_array);
2311c00b6b96SChen Wandun 			delta--;
2312c00b6b96SChen Wandun 		} else {
2313c00b6b96SChen Wandun 			nr_allocated = __alloc_pages_bulk(gfp,
2314c00b6b96SChen Wandun 					interleave_nodes(pol), NULL,
2315c00b6b96SChen Wandun 					nr_pages_per_node, NULL, page_array);
2316c00b6b96SChen Wandun 		}
2317c00b6b96SChen Wandun 
2318c00b6b96SChen Wandun 		page_array += nr_allocated;
2319c00b6b96SChen Wandun 		total_allocated += nr_allocated;
2320c00b6b96SChen Wandun 	}
2321c00b6b96SChen Wandun 
2322c00b6b96SChen Wandun 	return total_allocated;
2323c00b6b96SChen Wandun }
2324c00b6b96SChen Wandun 
2325c00b6b96SChen Wandun static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
2326c00b6b96SChen Wandun 		struct mempolicy *pol, unsigned long nr_pages,
2327c00b6b96SChen Wandun 		struct page **page_array)
2328c00b6b96SChen Wandun {
2329c00b6b96SChen Wandun 	gfp_t preferred_gfp;
2330c00b6b96SChen Wandun 	unsigned long nr_allocated = 0;
2331c00b6b96SChen Wandun 
2332c00b6b96SChen Wandun 	preferred_gfp = gfp | __GFP_NOWARN;
2333c00b6b96SChen Wandun 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2334c00b6b96SChen Wandun 
2335c00b6b96SChen Wandun 	nr_allocated  = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes,
2336c00b6b96SChen Wandun 					   nr_pages, NULL, page_array);
2337c00b6b96SChen Wandun 
2338c00b6b96SChen Wandun 	if (nr_allocated < nr_pages)
2339c00b6b96SChen Wandun 		nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL,
2340c00b6b96SChen Wandun 				nr_pages - nr_allocated, NULL,
2341c00b6b96SChen Wandun 				page_array + nr_allocated);
2342c00b6b96SChen Wandun 	return nr_allocated;
2343c00b6b96SChen Wandun }
2344c00b6b96SChen Wandun 
2345c00b6b96SChen Wandun /* alloc pages bulk and mempolicy should be considered at the
2346c00b6b96SChen Wandun  * same time in some situation such as vmalloc.
2347c00b6b96SChen Wandun  *
2348c00b6b96SChen Wandun  * It can accelerate memory allocation especially interleaving
2349c00b6b96SChen Wandun  * allocate memory.
2350c00b6b96SChen Wandun  */
2351c00b6b96SChen Wandun unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
2352c00b6b96SChen Wandun 		unsigned long nr_pages, struct page **page_array)
2353c00b6b96SChen Wandun {
2354c00b6b96SChen Wandun 	struct mempolicy *pol = &default_policy;
2355c00b6b96SChen Wandun 
2356c00b6b96SChen Wandun 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2357c00b6b96SChen Wandun 		pol = get_task_policy(current);
2358c00b6b96SChen Wandun 
2359c00b6b96SChen Wandun 	if (pol->mode == MPOL_INTERLEAVE)
2360c00b6b96SChen Wandun 		return alloc_pages_bulk_array_interleave(gfp, pol,
2361c00b6b96SChen Wandun 							 nr_pages, page_array);
2362c00b6b96SChen Wandun 
2363c00b6b96SChen Wandun 	if (pol->mode == MPOL_PREFERRED_MANY)
2364c00b6b96SChen Wandun 		return alloc_pages_bulk_array_preferred_many(gfp,
2365c00b6b96SChen Wandun 				numa_node_id(), pol, nr_pages, page_array);
2366c00b6b96SChen Wandun 
2367c00b6b96SChen Wandun 	return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()),
2368c00b6b96SChen Wandun 				  policy_nodemask(gfp, pol), nr_pages, NULL,
2369c00b6b96SChen Wandun 				  page_array);
2370c00b6b96SChen Wandun }
2371c00b6b96SChen Wandun 
2372ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2373ef0855d3SOleg Nesterov {
2374ef0855d3SOleg Nesterov 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2375ef0855d3SOleg Nesterov 
2376ef0855d3SOleg Nesterov 	if (IS_ERR(pol))
2377ef0855d3SOleg Nesterov 		return PTR_ERR(pol);
2378ef0855d3SOleg Nesterov 	dst->vm_policy = pol;
2379ef0855d3SOleg Nesterov 	return 0;
2380ef0855d3SOleg Nesterov }
2381ef0855d3SOleg Nesterov 
23824225399aSPaul Jackson /*
2383846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
23844225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
23854225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
23864225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
23874225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
2388708c1bbcSMiao Xie  *
2389708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
2390708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
23914225399aSPaul Jackson  */
23924225399aSPaul Jackson 
2393846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
2394846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
23951da177e4SLinus Torvalds {
23961da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
23971da177e4SLinus Torvalds 
23981da177e4SLinus Torvalds 	if (!new)
23991da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2400708c1bbcSMiao Xie 
2401708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
2402708c1bbcSMiao Xie 	if (old == current->mempolicy) {
2403708c1bbcSMiao Xie 		task_lock(current);
2404708c1bbcSMiao Xie 		*new = *old;
2405708c1bbcSMiao Xie 		task_unlock(current);
2406708c1bbcSMiao Xie 	} else
2407708c1bbcSMiao Xie 		*new = *old;
2408708c1bbcSMiao Xie 
24094225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
24104225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
2411213980c0SVlastimil Babka 		mpol_rebind_policy(new, &mems);
24124225399aSPaul Jackson 	}
24131da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
24141da177e4SLinus Torvalds 	return new;
24151da177e4SLinus Torvalds }
24161da177e4SLinus Torvalds 
24171da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
2418fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
24191da177e4SLinus Torvalds {
24201da177e4SLinus Torvalds 	if (!a || !b)
2421fcfb4dccSKOSAKI Motohiro 		return false;
242245c4745aSLee Schermerhorn 	if (a->mode != b->mode)
2423fcfb4dccSKOSAKI Motohiro 		return false;
242419800502SBob Liu 	if (a->flags != b->flags)
2425fcfb4dccSKOSAKI Motohiro 		return false;
2426c6018b4bSAneesh Kumar K.V 	if (a->home_node != b->home_node)
2427c6018b4bSAneesh Kumar K.V 		return false;
242819800502SBob Liu 	if (mpol_store_user_nodemask(a))
242919800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2430fcfb4dccSKOSAKI Motohiro 			return false;
243119800502SBob Liu 
243245c4745aSLee Schermerhorn 	switch (a->mode) {
243319770b32SMel Gorman 	case MPOL_BIND:
24341da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
24351da177e4SLinus Torvalds 	case MPOL_PREFERRED:
2436b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
2437269fbe72SBen Widawsky 		return !!nodes_equal(a->nodes, b->nodes);
24387858d7bcSFeng Tang 	case MPOL_LOCAL:
24397858d7bcSFeng Tang 		return true;
24401da177e4SLinus Torvalds 	default:
24411da177e4SLinus Torvalds 		BUG();
2442fcfb4dccSKOSAKI Motohiro 		return false;
24431da177e4SLinus Torvalds 	}
24441da177e4SLinus Torvalds }
24451da177e4SLinus Torvalds 
24461da177e4SLinus Torvalds /*
24471da177e4SLinus Torvalds  * Shared memory backing store policy support.
24481da177e4SLinus Torvalds  *
24491da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
24501da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
24514a8c7bb5SNathan Zimmer  * They are protected by the sp->lock rwlock, which should be held
24521da177e4SLinus Torvalds  * for any accesses to the tree.
24531da177e4SLinus Torvalds  */
24541da177e4SLinus Torvalds 
24554a8c7bb5SNathan Zimmer /*
24564a8c7bb5SNathan Zimmer  * lookup first element intersecting start-end.  Caller holds sp->lock for
24574a8c7bb5SNathan Zimmer  * reading or for writing
24584a8c7bb5SNathan Zimmer  */
24591da177e4SLinus Torvalds static struct sp_node *
24601da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
24611da177e4SLinus Torvalds {
24621da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
24631da177e4SLinus Torvalds 
24641da177e4SLinus Torvalds 	while (n) {
24651da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
24661da177e4SLinus Torvalds 
24671da177e4SLinus Torvalds 		if (start >= p->end)
24681da177e4SLinus Torvalds 			n = n->rb_right;
24691da177e4SLinus Torvalds 		else if (end <= p->start)
24701da177e4SLinus Torvalds 			n = n->rb_left;
24711da177e4SLinus Torvalds 		else
24721da177e4SLinus Torvalds 			break;
24731da177e4SLinus Torvalds 	}
24741da177e4SLinus Torvalds 	if (!n)
24751da177e4SLinus Torvalds 		return NULL;
24761da177e4SLinus Torvalds 	for (;;) {
24771da177e4SLinus Torvalds 		struct sp_node *w = NULL;
24781da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
24791da177e4SLinus Torvalds 		if (!prev)
24801da177e4SLinus Torvalds 			break;
24811da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
24821da177e4SLinus Torvalds 		if (w->end <= start)
24831da177e4SLinus Torvalds 			break;
24841da177e4SLinus Torvalds 		n = prev;
24851da177e4SLinus Torvalds 	}
24861da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
24871da177e4SLinus Torvalds }
24881da177e4SLinus Torvalds 
24894a8c7bb5SNathan Zimmer /*
24904a8c7bb5SNathan Zimmer  * Insert a new shared policy into the list.  Caller holds sp->lock for
24914a8c7bb5SNathan Zimmer  * writing.
24924a8c7bb5SNathan Zimmer  */
24931da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
24941da177e4SLinus Torvalds {
24951da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
24961da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
24971da177e4SLinus Torvalds 	struct sp_node *nd;
24981da177e4SLinus Torvalds 
24991da177e4SLinus Torvalds 	while (*p) {
25001da177e4SLinus Torvalds 		parent = *p;
25011da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
25021da177e4SLinus Torvalds 		if (new->start < nd->start)
25031da177e4SLinus Torvalds 			p = &(*p)->rb_left;
25041da177e4SLinus Torvalds 		else if (new->end > nd->end)
25051da177e4SLinus Torvalds 			p = &(*p)->rb_right;
25061da177e4SLinus Torvalds 		else
25071da177e4SLinus Torvalds 			BUG();
25081da177e4SLinus Torvalds 	}
25091da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
25101da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2511140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
251245c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
25131da177e4SLinus Torvalds }
25141da177e4SLinus Torvalds 
25151da177e4SLinus Torvalds /* Find shared policy intersecting idx */
25161da177e4SLinus Torvalds struct mempolicy *
25171da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
25181da177e4SLinus Torvalds {
25191da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
25201da177e4SLinus Torvalds 	struct sp_node *sn;
25211da177e4SLinus Torvalds 
25221da177e4SLinus Torvalds 	if (!sp->root.rb_node)
25231da177e4SLinus Torvalds 		return NULL;
25244a8c7bb5SNathan Zimmer 	read_lock(&sp->lock);
25251da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
25261da177e4SLinus Torvalds 	if (sn) {
25271da177e4SLinus Torvalds 		mpol_get(sn->policy);
25281da177e4SLinus Torvalds 		pol = sn->policy;
25291da177e4SLinus Torvalds 	}
25304a8c7bb5SNathan Zimmer 	read_unlock(&sp->lock);
25311da177e4SLinus Torvalds 	return pol;
25321da177e4SLinus Torvalds }
25331da177e4SLinus Torvalds 
253463f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n)
253563f74ca2SKOSAKI Motohiro {
253663f74ca2SKOSAKI Motohiro 	mpol_put(n->policy);
253763f74ca2SKOSAKI Motohiro 	kmem_cache_free(sn_cache, n);
253863f74ca2SKOSAKI Motohiro }
253963f74ca2SKOSAKI Motohiro 
2540771fb4d8SLee Schermerhorn /**
2541771fb4d8SLee Schermerhorn  * mpol_misplaced - check whether current page node is valid in policy
2542771fb4d8SLee Schermerhorn  *
2543b46e14acSFabian Frederick  * @page: page to be checked
2544b46e14acSFabian Frederick  * @vma: vm area where page mapped
2545b46e14acSFabian Frederick  * @addr: virtual address where page mapped
2546771fb4d8SLee Schermerhorn  *
2547771fb4d8SLee Schermerhorn  * Lookup current policy node id for vma,addr and "compare to" page's
25485f076944SMatthew Wilcox (Oracle)  * node id.  Policy determination "mimics" alloc_page_vma().
2549771fb4d8SLee Schermerhorn  * Called from fault path where we know the vma and faulting address.
25505f076944SMatthew Wilcox (Oracle)  *
2551062db293SBaolin Wang  * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2552062db293SBaolin Wang  * policy, or a suitable node ID to allocate a replacement page from.
2553771fb4d8SLee Schermerhorn  */
2554771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2555771fb4d8SLee Schermerhorn {
2556771fb4d8SLee Schermerhorn 	struct mempolicy *pol;
2557c33d6c06SMel Gorman 	struct zoneref *z;
2558771fb4d8SLee Schermerhorn 	int curnid = page_to_nid(page);
2559771fb4d8SLee Schermerhorn 	unsigned long pgoff;
256090572890SPeter Zijlstra 	int thiscpu = raw_smp_processor_id();
256190572890SPeter Zijlstra 	int thisnid = cpu_to_node(thiscpu);
256298fa15f3SAnshuman Khandual 	int polnid = NUMA_NO_NODE;
2563062db293SBaolin Wang 	int ret = NUMA_NO_NODE;
2564771fb4d8SLee Schermerhorn 
2565dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2566771fb4d8SLee Schermerhorn 	if (!(pol->flags & MPOL_F_MOF))
2567771fb4d8SLee Schermerhorn 		goto out;
2568771fb4d8SLee Schermerhorn 
2569771fb4d8SLee Schermerhorn 	switch (pol->mode) {
2570771fb4d8SLee Schermerhorn 	case MPOL_INTERLEAVE:
2571771fb4d8SLee Schermerhorn 		pgoff = vma->vm_pgoff;
2572771fb4d8SLee Schermerhorn 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
257398c70baaSLaurent Dufour 		polnid = offset_il_node(pol, pgoff);
2574771fb4d8SLee Schermerhorn 		break;
2575771fb4d8SLee Schermerhorn 
2576771fb4d8SLee Schermerhorn 	case MPOL_PREFERRED:
2577b27abaccSDave Hansen 		if (node_isset(curnid, pol->nodes))
2578b27abaccSDave Hansen 			goto out;
2579269fbe72SBen Widawsky 		polnid = first_node(pol->nodes);
2580771fb4d8SLee Schermerhorn 		break;
2581771fb4d8SLee Schermerhorn 
25827858d7bcSFeng Tang 	case MPOL_LOCAL:
25837858d7bcSFeng Tang 		polnid = numa_node_id();
25847858d7bcSFeng Tang 		break;
25857858d7bcSFeng Tang 
2586771fb4d8SLee Schermerhorn 	case MPOL_BIND:
2587bda420b9SHuang Ying 		/* Optimize placement among multiple nodes via NUMA balancing */
2588bda420b9SHuang Ying 		if (pol->flags & MPOL_F_MORON) {
2589269fbe72SBen Widawsky 			if (node_isset(thisnid, pol->nodes))
2590bda420b9SHuang Ying 				break;
2591bda420b9SHuang Ying 			goto out;
2592bda420b9SHuang Ying 		}
2593b27abaccSDave Hansen 		fallthrough;
2594c33d6c06SMel Gorman 
2595b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
2596771fb4d8SLee Schermerhorn 		/*
2597771fb4d8SLee Schermerhorn 		 * use current page if in policy nodemask,
2598771fb4d8SLee Schermerhorn 		 * else select nearest allowed node, if any.
2599771fb4d8SLee Schermerhorn 		 * If no allowed nodes, use current [!misplaced].
2600771fb4d8SLee Schermerhorn 		 */
2601269fbe72SBen Widawsky 		if (node_isset(curnid, pol->nodes))
2602771fb4d8SLee Schermerhorn 			goto out;
2603c33d6c06SMel Gorman 		z = first_zones_zonelist(
2604771fb4d8SLee Schermerhorn 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2605771fb4d8SLee Schermerhorn 				gfp_zone(GFP_HIGHUSER),
2606269fbe72SBen Widawsky 				&pol->nodes);
2607c1093b74SPavel Tatashin 		polnid = zone_to_nid(z->zone);
2608771fb4d8SLee Schermerhorn 		break;
2609771fb4d8SLee Schermerhorn 
2610771fb4d8SLee Schermerhorn 	default:
2611771fb4d8SLee Schermerhorn 		BUG();
2612771fb4d8SLee Schermerhorn 	}
26135606e387SMel Gorman 
26145606e387SMel Gorman 	/* Migrate the page towards the node whose CPU is referencing it */
2615e42c8ff2SMel Gorman 	if (pol->flags & MPOL_F_MORON) {
261690572890SPeter Zijlstra 		polnid = thisnid;
26175606e387SMel Gorman 
261810f39042SRik van Riel 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2619de1c9ce6SRik van Riel 			goto out;
2620de1c9ce6SRik van Riel 	}
2621e42c8ff2SMel Gorman 
2622771fb4d8SLee Schermerhorn 	if (curnid != polnid)
2623771fb4d8SLee Schermerhorn 		ret = polnid;
2624771fb4d8SLee Schermerhorn out:
2625771fb4d8SLee Schermerhorn 	mpol_cond_put(pol);
2626771fb4d8SLee Schermerhorn 
2627771fb4d8SLee Schermerhorn 	return ret;
2628771fb4d8SLee Schermerhorn }
2629771fb4d8SLee Schermerhorn 
2630c11600e4SDavid Rientjes /*
2631c11600e4SDavid Rientjes  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2632c11600e4SDavid Rientjes  * dropped after task->mempolicy is set to NULL so that any allocation done as
2633c11600e4SDavid Rientjes  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2634c11600e4SDavid Rientjes  * policy.
2635c11600e4SDavid Rientjes  */
2636c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task)
2637c11600e4SDavid Rientjes {
2638c11600e4SDavid Rientjes 	struct mempolicy *pol;
2639c11600e4SDavid Rientjes 
2640c11600e4SDavid Rientjes 	task_lock(task);
2641c11600e4SDavid Rientjes 	pol = task->mempolicy;
2642c11600e4SDavid Rientjes 	task->mempolicy = NULL;
2643c11600e4SDavid Rientjes 	task_unlock(task);
2644c11600e4SDavid Rientjes 	mpol_put(pol);
2645c11600e4SDavid Rientjes }
2646c11600e4SDavid Rientjes 
26471da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
26481da177e4SLinus Torvalds {
2649140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
26501da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
265163f74ca2SKOSAKI Motohiro 	sp_free(n);
26521da177e4SLinus Torvalds }
26531da177e4SLinus Torvalds 
265442288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start,
265542288fe3SMel Gorman 			unsigned long end, struct mempolicy *pol)
265642288fe3SMel Gorman {
265742288fe3SMel Gorman 	node->start = start;
265842288fe3SMel Gorman 	node->end = end;
265942288fe3SMel Gorman 	node->policy = pol;
266042288fe3SMel Gorman }
266142288fe3SMel Gorman 
2662dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2663dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
26641da177e4SLinus Torvalds {
2665869833f2SKOSAKI Motohiro 	struct sp_node *n;
2666869833f2SKOSAKI Motohiro 	struct mempolicy *newpol;
26671da177e4SLinus Torvalds 
2668869833f2SKOSAKI Motohiro 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
26691da177e4SLinus Torvalds 	if (!n)
26701da177e4SLinus Torvalds 		return NULL;
2671869833f2SKOSAKI Motohiro 
2672869833f2SKOSAKI Motohiro 	newpol = mpol_dup(pol);
2673869833f2SKOSAKI Motohiro 	if (IS_ERR(newpol)) {
2674869833f2SKOSAKI Motohiro 		kmem_cache_free(sn_cache, n);
2675869833f2SKOSAKI Motohiro 		return NULL;
2676869833f2SKOSAKI Motohiro 	}
2677869833f2SKOSAKI Motohiro 	newpol->flags |= MPOL_F_SHARED;
267842288fe3SMel Gorman 	sp_node_init(n, start, end, newpol);
2679869833f2SKOSAKI Motohiro 
26801da177e4SLinus Torvalds 	return n;
26811da177e4SLinus Torvalds }
26821da177e4SLinus Torvalds 
26831da177e4SLinus Torvalds /* Replace a policy range. */
26841da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
26851da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
26861da177e4SLinus Torvalds {
2687b22d127aSMel Gorman 	struct sp_node *n;
268842288fe3SMel Gorman 	struct sp_node *n_new = NULL;
268942288fe3SMel Gorman 	struct mempolicy *mpol_new = NULL;
2690b22d127aSMel Gorman 	int ret = 0;
26911da177e4SLinus Torvalds 
269242288fe3SMel Gorman restart:
26934a8c7bb5SNathan Zimmer 	write_lock(&sp->lock);
26941da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
26951da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
26961da177e4SLinus Torvalds 	while (n && n->start < end) {
26971da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
26981da177e4SLinus Torvalds 		if (n->start >= start) {
26991da177e4SLinus Torvalds 			if (n->end <= end)
27001da177e4SLinus Torvalds 				sp_delete(sp, n);
27011da177e4SLinus Torvalds 			else
27021da177e4SLinus Torvalds 				n->start = end;
27031da177e4SLinus Torvalds 		} else {
27041da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
27051da177e4SLinus Torvalds 			if (n->end > end) {
270642288fe3SMel Gorman 				if (!n_new)
270742288fe3SMel Gorman 					goto alloc_new;
270842288fe3SMel Gorman 
270942288fe3SMel Gorman 				*mpol_new = *n->policy;
271042288fe3SMel Gorman 				atomic_set(&mpol_new->refcnt, 1);
27117880639cSKOSAKI Motohiro 				sp_node_init(n_new, end, n->end, mpol_new);
27121da177e4SLinus Torvalds 				n->end = start;
27135ca39575SHillf Danton 				sp_insert(sp, n_new);
271442288fe3SMel Gorman 				n_new = NULL;
271542288fe3SMel Gorman 				mpol_new = NULL;
27161da177e4SLinus Torvalds 				break;
27171da177e4SLinus Torvalds 			} else
27181da177e4SLinus Torvalds 				n->end = start;
27191da177e4SLinus Torvalds 		}
27201da177e4SLinus Torvalds 		if (!next)
27211da177e4SLinus Torvalds 			break;
27221da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
27231da177e4SLinus Torvalds 	}
27241da177e4SLinus Torvalds 	if (new)
27251da177e4SLinus Torvalds 		sp_insert(sp, new);
27264a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
272742288fe3SMel Gorman 	ret = 0;
272842288fe3SMel Gorman 
272942288fe3SMel Gorman err_out:
273042288fe3SMel Gorman 	if (mpol_new)
273142288fe3SMel Gorman 		mpol_put(mpol_new);
273242288fe3SMel Gorman 	if (n_new)
273342288fe3SMel Gorman 		kmem_cache_free(sn_cache, n_new);
273442288fe3SMel Gorman 
2735b22d127aSMel Gorman 	return ret;
273642288fe3SMel Gorman 
273742288fe3SMel Gorman alloc_new:
27384a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
273942288fe3SMel Gorman 	ret = -ENOMEM;
274042288fe3SMel Gorman 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
274142288fe3SMel Gorman 	if (!n_new)
274242288fe3SMel Gorman 		goto err_out;
274342288fe3SMel Gorman 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
274442288fe3SMel Gorman 	if (!mpol_new)
274542288fe3SMel Gorman 		goto err_out;
274642288fe3SMel Gorman 	goto restart;
27471da177e4SLinus Torvalds }
27481da177e4SLinus Torvalds 
274971fe804bSLee Schermerhorn /**
275071fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
275171fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
275271fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
275371fe804bSLee Schermerhorn  *
275471fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
275571fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
275671fe804bSLee Schermerhorn  * This must be released on exit.
27574bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
275871fe804bSLee Schermerhorn  */
275971fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
27607339ff83SRobin Holt {
276158568d2aSMiao Xie 	int ret;
276258568d2aSMiao Xie 
276371fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
27644a8c7bb5SNathan Zimmer 	rwlock_init(&sp->lock);
27657339ff83SRobin Holt 
276671fe804bSLee Schermerhorn 	if (mpol) {
27677339ff83SRobin Holt 		struct vm_area_struct pvma;
276871fe804bSLee Schermerhorn 		struct mempolicy *new;
27694bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
27707339ff83SRobin Holt 
27714bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
27725c0c1654SLee Schermerhorn 			goto put_mpol;
277371fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
277471fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
277515d77835SLee Schermerhorn 		if (IS_ERR(new))
27760cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
277758568d2aSMiao Xie 
277858568d2aSMiao Xie 		task_lock(current);
27794bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
278058568d2aSMiao Xie 		task_unlock(current);
278115d77835SLee Schermerhorn 		if (ret)
27825c0c1654SLee Schermerhorn 			goto put_new;
278371fe804bSLee Schermerhorn 
278471fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
27852c4541e2SKirill A. Shutemov 		vma_init(&pvma, NULL);
278671fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
278771fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
278815d77835SLee Schermerhorn 
27895c0c1654SLee Schermerhorn put_new:
279071fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
27910cae3457SDan Carpenter free_scratch:
27924bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
27935c0c1654SLee Schermerhorn put_mpol:
27945c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
27957339ff83SRobin Holt 	}
27967339ff83SRobin Holt }
27977339ff83SRobin Holt 
27981da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
27991da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
28001da177e4SLinus Torvalds {
28011da177e4SLinus Torvalds 	int err;
28021da177e4SLinus Torvalds 	struct sp_node *new = NULL;
28031da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
28041da177e4SLinus Torvalds 
2805028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
28061da177e4SLinus Torvalds 		 vma->vm_pgoff,
280745c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2808028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
2809269fbe72SBen Widawsky 		 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE);
28101da177e4SLinus Torvalds 
28111da177e4SLinus Torvalds 	if (npol) {
28121da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
28131da177e4SLinus Torvalds 		if (!new)
28141da177e4SLinus Torvalds 			return -ENOMEM;
28151da177e4SLinus Torvalds 	}
28161da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
28171da177e4SLinus Torvalds 	if (err && new)
281863f74ca2SKOSAKI Motohiro 		sp_free(new);
28191da177e4SLinus Torvalds 	return err;
28201da177e4SLinus Torvalds }
28211da177e4SLinus Torvalds 
28221da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
28231da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
28241da177e4SLinus Torvalds {
28251da177e4SLinus Torvalds 	struct sp_node *n;
28261da177e4SLinus Torvalds 	struct rb_node *next;
28271da177e4SLinus Torvalds 
28281da177e4SLinus Torvalds 	if (!p->root.rb_node)
28291da177e4SLinus Torvalds 		return;
28304a8c7bb5SNathan Zimmer 	write_lock(&p->lock);
28311da177e4SLinus Torvalds 	next = rb_first(&p->root);
28321da177e4SLinus Torvalds 	while (next) {
28331da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
28341da177e4SLinus Torvalds 		next = rb_next(&n->nd);
283563f74ca2SKOSAKI Motohiro 		sp_delete(p, n);
28361da177e4SLinus Torvalds 	}
28374a8c7bb5SNathan Zimmer 	write_unlock(&p->lock);
28381da177e4SLinus Torvalds }
28391da177e4SLinus Torvalds 
28401a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING
2841c297663cSMel Gorman static int __initdata numabalancing_override;
28421a687c2eSMel Gorman 
28431a687c2eSMel Gorman static void __init check_numabalancing_enable(void)
28441a687c2eSMel Gorman {
28451a687c2eSMel Gorman 	bool numabalancing_default = false;
28461a687c2eSMel Gorman 
28471a687c2eSMel Gorman 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
28481a687c2eSMel Gorman 		numabalancing_default = true;
28491a687c2eSMel Gorman 
2850c297663cSMel Gorman 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2851c297663cSMel Gorman 	if (numabalancing_override)
2852c297663cSMel Gorman 		set_numabalancing_state(numabalancing_override == 1);
2853c297663cSMel Gorman 
2854b0dc2b9bSMel Gorman 	if (num_online_nodes() > 1 && !numabalancing_override) {
2855756a025fSJoe Perches 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2856c297663cSMel Gorman 			numabalancing_default ? "Enabling" : "Disabling");
28571a687c2eSMel Gorman 		set_numabalancing_state(numabalancing_default);
28581a687c2eSMel Gorman 	}
28591a687c2eSMel Gorman }
28601a687c2eSMel Gorman 
28611a687c2eSMel Gorman static int __init setup_numabalancing(char *str)
28621a687c2eSMel Gorman {
28631a687c2eSMel Gorman 	int ret = 0;
28641a687c2eSMel Gorman 	if (!str)
28651a687c2eSMel Gorman 		goto out;
28661a687c2eSMel Gorman 
28671a687c2eSMel Gorman 	if (!strcmp(str, "enable")) {
2868c297663cSMel Gorman 		numabalancing_override = 1;
28691a687c2eSMel Gorman 		ret = 1;
28701a687c2eSMel Gorman 	} else if (!strcmp(str, "disable")) {
2871c297663cSMel Gorman 		numabalancing_override = -1;
28721a687c2eSMel Gorman 		ret = 1;
28731a687c2eSMel Gorman 	}
28741a687c2eSMel Gorman out:
28751a687c2eSMel Gorman 	if (!ret)
28764a404beaSAndrew Morton 		pr_warn("Unable to parse numa_balancing=\n");
28771a687c2eSMel Gorman 
28781a687c2eSMel Gorman 	return ret;
28791a687c2eSMel Gorman }
28801a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing);
28811a687c2eSMel Gorman #else
28821a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void)
28831a687c2eSMel Gorman {
28841a687c2eSMel Gorman }
28851a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
28861a687c2eSMel Gorman 
28871da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
28881da177e4SLinus Torvalds void __init numa_policy_init(void)
28891da177e4SLinus Torvalds {
2890b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2891b71636e2SPaul Mundt 	unsigned long largest = 0;
2892b71636e2SPaul Mundt 	int nid, prefer = 0;
2893b71636e2SPaul Mundt 
28941da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
28951da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
289620c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
28971da177e4SLinus Torvalds 
28981da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
28991da177e4SLinus Torvalds 				     sizeof(struct sp_node),
290020c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
29011da177e4SLinus Torvalds 
29025606e387SMel Gorman 	for_each_node(nid) {
29035606e387SMel Gorman 		preferred_node_policy[nid] = (struct mempolicy) {
29045606e387SMel Gorman 			.refcnt = ATOMIC_INIT(1),
29055606e387SMel Gorman 			.mode = MPOL_PREFERRED,
29065606e387SMel Gorman 			.flags = MPOL_F_MOF | MPOL_F_MORON,
2907269fbe72SBen Widawsky 			.nodes = nodemask_of_node(nid),
29085606e387SMel Gorman 		};
29095606e387SMel Gorman 	}
29105606e387SMel Gorman 
2911b71636e2SPaul Mundt 	/*
2912b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2913b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2914b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2915b71636e2SPaul Mundt 	 */
2916b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
291701f13bd6SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
2918b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
29191da177e4SLinus Torvalds 
2920b71636e2SPaul Mundt 		/* Preserve the largest node */
2921b71636e2SPaul Mundt 		if (largest < total_pages) {
2922b71636e2SPaul Mundt 			largest = total_pages;
2923b71636e2SPaul Mundt 			prefer = nid;
2924b71636e2SPaul Mundt 		}
2925b71636e2SPaul Mundt 
2926b71636e2SPaul Mundt 		/* Interleave this node? */
2927b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2928b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2929b71636e2SPaul Mundt 	}
2930b71636e2SPaul Mundt 
2931b71636e2SPaul Mundt 	/* All too small, use the largest */
2932b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2933b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2934b71636e2SPaul Mundt 
2935028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2936b1de0d13SMitchel Humpherys 		pr_err("%s: interleaving failed\n", __func__);
29371a687c2eSMel Gorman 
29381a687c2eSMel Gorman 	check_numabalancing_enable();
29391da177e4SLinus Torvalds }
29401da177e4SLinus Torvalds 
29418bccd85fSChristoph Lameter /* Reset policy of current process to default */
29421da177e4SLinus Torvalds void numa_default_policy(void)
29431da177e4SLinus Torvalds {
2944028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
29451da177e4SLinus Torvalds }
294668860ec1SPaul Jackson 
29474225399aSPaul Jackson /*
2948095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2949095f1fc4SLee Schermerhorn  */
2950095f1fc4SLee Schermerhorn 
2951345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2952345ace9cSLee Schermerhorn {
2953345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2954345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2955345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2956345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2957d3a71033SLee Schermerhorn 	[MPOL_LOCAL]      = "local",
2958b27abaccSDave Hansen 	[MPOL_PREFERRED_MANY]  = "prefer (many)",
2959345ace9cSLee Schermerhorn };
29601a75a6c8SChristoph Lameter 
2961095f1fc4SLee Schermerhorn 
2962095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2963095f1fc4SLee Schermerhorn /**
2964f2a07f40SHugh Dickins  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2965095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
296671fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2967095f1fc4SLee Schermerhorn  *
2968095f1fc4SLee Schermerhorn  * Format of input:
2969095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2970095f1fc4SLee Schermerhorn  *
2971dad5b023SRandy Dunlap  * Return: %0 on success, else %1
2972095f1fc4SLee Schermerhorn  */
2973a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol)
2974095f1fc4SLee Schermerhorn {
297571fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2976f2a07f40SHugh Dickins 	unsigned short mode_flags;
297771fe804bSLee Schermerhorn 	nodemask_t nodes;
2978095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2979095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2980dedf2c73Szhong jiang 	int err = 1, mode;
2981095f1fc4SLee Schermerhorn 
2982c7a91bc7SDan Carpenter 	if (flags)
2983c7a91bc7SDan Carpenter 		*flags++ = '\0';	/* terminate mode string */
2984c7a91bc7SDan Carpenter 
2985095f1fc4SLee Schermerhorn 	if (nodelist) {
2986095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2987095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
298871fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2989095f1fc4SLee Schermerhorn 			goto out;
299001f13bd6SLai Jiangshan 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2991095f1fc4SLee Schermerhorn 			goto out;
299271fe804bSLee Schermerhorn 	} else
299371fe804bSLee Schermerhorn 		nodes_clear(nodes);
299471fe804bSLee Schermerhorn 
2995dedf2c73Szhong jiang 	mode = match_string(policy_modes, MPOL_MAX, str);
2996dedf2c73Szhong jiang 	if (mode < 0)
2997095f1fc4SLee Schermerhorn 		goto out;
2998095f1fc4SLee Schermerhorn 
299971fe804bSLee Schermerhorn 	switch (mode) {
3000095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
300171fe804bSLee Schermerhorn 		/*
3002aa9f7d51SRandy Dunlap 		 * Insist on a nodelist of one node only, although later
3003aa9f7d51SRandy Dunlap 		 * we use first_node(nodes) to grab a single node, so here
3004aa9f7d51SRandy Dunlap 		 * nodelist (or nodes) cannot be empty.
300571fe804bSLee Schermerhorn 		 */
3006095f1fc4SLee Schermerhorn 		if (nodelist) {
3007095f1fc4SLee Schermerhorn 			char *rest = nodelist;
3008095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
3009095f1fc4SLee Schermerhorn 				rest++;
3010926f2ae0SKOSAKI Motohiro 			if (*rest)
3011926f2ae0SKOSAKI Motohiro 				goto out;
3012aa9f7d51SRandy Dunlap 			if (nodes_empty(nodes))
3013aa9f7d51SRandy Dunlap 				goto out;
3014095f1fc4SLee Schermerhorn 		}
3015095f1fc4SLee Schermerhorn 		break;
3016095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
3017095f1fc4SLee Schermerhorn 		/*
3018095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
3019095f1fc4SLee Schermerhorn 		 */
3020095f1fc4SLee Schermerhorn 		if (!nodelist)
302101f13bd6SLai Jiangshan 			nodes = node_states[N_MEMORY];
30223f226aa1SLee Schermerhorn 		break;
302371fe804bSLee Schermerhorn 	case MPOL_LOCAL:
30243f226aa1SLee Schermerhorn 		/*
302571fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
30263f226aa1SLee Schermerhorn 		 */
302771fe804bSLee Schermerhorn 		if (nodelist)
30283f226aa1SLee Schermerhorn 			goto out;
30293f226aa1SLee Schermerhorn 		break;
3030413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
3031413b43deSRavikiran G Thirumalai 		/*
3032413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
3033413b43deSRavikiran G Thirumalai 		 */
3034413b43deSRavikiran G Thirumalai 		if (!nodelist)
3035413b43deSRavikiran G Thirumalai 			err = 0;
3036413b43deSRavikiran G Thirumalai 		goto out;
3037b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
3038d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
303971fe804bSLee Schermerhorn 		/*
3040d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
304171fe804bSLee Schermerhorn 		 */
3042d69b2e63SKOSAKI Motohiro 		if (!nodelist)
3043d69b2e63SKOSAKI Motohiro 			goto out;
3044095f1fc4SLee Schermerhorn 	}
3045095f1fc4SLee Schermerhorn 
304671fe804bSLee Schermerhorn 	mode_flags = 0;
3047095f1fc4SLee Schermerhorn 	if (flags) {
3048095f1fc4SLee Schermerhorn 		/*
3049095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
3050095f1fc4SLee Schermerhorn 		 * mode flags.
3051095f1fc4SLee Schermerhorn 		 */
3052095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
305371fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
3054095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
305571fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
3056095f1fc4SLee Schermerhorn 		else
3057926f2ae0SKOSAKI Motohiro 			goto out;
3058095f1fc4SLee Schermerhorn 	}
305971fe804bSLee Schermerhorn 
306071fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
306171fe804bSLee Schermerhorn 	if (IS_ERR(new))
3062926f2ae0SKOSAKI Motohiro 		goto out;
3063926f2ae0SKOSAKI Motohiro 
3064f2a07f40SHugh Dickins 	/*
3065f2a07f40SHugh Dickins 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
3066f2a07f40SHugh Dickins 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
3067f2a07f40SHugh Dickins 	 */
3068269fbe72SBen Widawsky 	if (mode != MPOL_PREFERRED) {
3069269fbe72SBen Widawsky 		new->nodes = nodes;
3070269fbe72SBen Widawsky 	} else if (nodelist) {
3071269fbe72SBen Widawsky 		nodes_clear(new->nodes);
3072269fbe72SBen Widawsky 		node_set(first_node(nodes), new->nodes);
3073269fbe72SBen Widawsky 	} else {
30747858d7bcSFeng Tang 		new->mode = MPOL_LOCAL;
3075269fbe72SBen Widawsky 	}
3076f2a07f40SHugh Dickins 
3077f2a07f40SHugh Dickins 	/*
3078f2a07f40SHugh Dickins 	 * Save nodes for contextualization: this will be used to "clone"
3079f2a07f40SHugh Dickins 	 * the mempolicy in a specific context [cpuset] at a later time.
3080f2a07f40SHugh Dickins 	 */
3081e17f74afSLee Schermerhorn 	new->w.user_nodemask = nodes;
3082f2a07f40SHugh Dickins 
3083926f2ae0SKOSAKI Motohiro 	err = 0;
308471fe804bSLee Schermerhorn 
3085095f1fc4SLee Schermerhorn out:
3086095f1fc4SLee Schermerhorn 	/* Restore string for error message */
3087095f1fc4SLee Schermerhorn 	if (nodelist)
3088095f1fc4SLee Schermerhorn 		*--nodelist = ':';
3089095f1fc4SLee Schermerhorn 	if (flags)
3090095f1fc4SLee Schermerhorn 		*--flags = '=';
309171fe804bSLee Schermerhorn 	if (!err)
309271fe804bSLee Schermerhorn 		*mpol = new;
3093095f1fc4SLee Schermerhorn 	return err;
3094095f1fc4SLee Schermerhorn }
3095095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
3096095f1fc4SLee Schermerhorn 
309771fe804bSLee Schermerhorn /**
309871fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
309971fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
310071fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
310171fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
310271fe804bSLee Schermerhorn  *
3103948927eeSDavid Rientjes  * Convert @pol into a string.  If @buffer is too short, truncate the string.
3104948927eeSDavid Rientjes  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3105948927eeSDavid Rientjes  * longest flag, "relative", and to display at least a few node ids.
31061a75a6c8SChristoph Lameter  */
3107948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
31081a75a6c8SChristoph Lameter {
31091a75a6c8SChristoph Lameter 	char *p = buffer;
3110948927eeSDavid Rientjes 	nodemask_t nodes = NODE_MASK_NONE;
3111948927eeSDavid Rientjes 	unsigned short mode = MPOL_DEFAULT;
3112948927eeSDavid Rientjes 	unsigned short flags = 0;
31131a75a6c8SChristoph Lameter 
31148790c71aSDavid Rientjes 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
3115bea904d5SLee Schermerhorn 		mode = pol->mode;
3116948927eeSDavid Rientjes 		flags = pol->flags;
3117948927eeSDavid Rientjes 	}
3118bea904d5SLee Schermerhorn 
31191a75a6c8SChristoph Lameter 	switch (mode) {
31201a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
31217858d7bcSFeng Tang 	case MPOL_LOCAL:
31221a75a6c8SChristoph Lameter 		break;
31231a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
3124b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
31251a75a6c8SChristoph Lameter 	case MPOL_BIND:
31261a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
3127269fbe72SBen Widawsky 		nodes = pol->nodes;
31281a75a6c8SChristoph Lameter 		break;
31291a75a6c8SChristoph Lameter 	default:
3130948927eeSDavid Rientjes 		WARN_ON_ONCE(1);
3131948927eeSDavid Rientjes 		snprintf(p, maxlen, "unknown");
3132948927eeSDavid Rientjes 		return;
31331a75a6c8SChristoph Lameter 	}
31341a75a6c8SChristoph Lameter 
3135b7a9f420SDavid Rientjes 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
31361a75a6c8SChristoph Lameter 
3137fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
3138948927eeSDavid Rientjes 		p += snprintf(p, buffer + maxlen - p, "=");
3139f5b087b5SDavid Rientjes 
31402291990aSLee Schermerhorn 		/*
31412291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
31422291990aSLee Schermerhorn 		 */
3143f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
31442291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
31452291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
31462291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
3147f5b087b5SDavid Rientjes 	}
3148f5b087b5SDavid Rientjes 
31499e763e0fSTejun Heo 	if (!nodes_empty(nodes))
31509e763e0fSTejun Heo 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
31519e763e0fSTejun Heo 			       nodemask_pr_args(&nodes));
31521a75a6c8SChristoph Lameter }
3153