xref: /linux/mm/mempolicy.c (revision 0dd9ac63ce26ec87b080ca9c3e6efed33c23ace6)
1 /*
2  * Simple NUMA memory policy for the Linux kernel.
3  *
4  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6  * Subject to the GNU Public License, version 2.
7  *
8  * NUMA policy allows the user to give hints in which node(s) memory should
9  * be allocated.
10  *
11  * Support four policies per VMA and per process:
12  *
13  * The VMA policy has priority over the process policy for a page fault.
14  *
15  * interleave     Allocate memory interleaved over a set of nodes,
16  *                with normal fallback if it fails.
17  *                For VMA based allocations this interleaves based on the
18  *                offset into the backing object or offset into the mapping
19  *                for anonymous memory. For process policy an process counter
20  *                is used.
21  *
22  * bind           Only allocate memory on a specific set of nodes,
23  *                no fallback.
24  *                FIXME: memory is allocated starting with the first node
25  *                to the last. It would be better if bind would truly restrict
26  *                the allocation to memory nodes instead
27  *
28  * preferred       Try a specific node first before normal fallback.
29  *                As a special case node -1 here means do the allocation
30  *                on the local CPU. This is normally identical to default,
31  *                but useful to set in a VMA when you have a non default
32  *                process policy.
33  *
34  * default        Allocate on the local node first, or when on a VMA
35  *                use the process policy. This is what Linux always did
36  *		  in a NUMA aware kernel and still does by, ahem, default.
37  *
38  * The process policy is applied for most non interrupt memory allocations
39  * in that process' context. Interrupts ignore the policies and always
40  * try to allocate on the local CPU. The VMA policy is only applied for memory
41  * allocations for a VMA in the VM.
42  *
43  * Currently there are a few corner cases in swapping where the policy
44  * is not applied, but the majority should be handled. When process policy
45  * is used it is not remembered over swap outs/swap ins.
46  *
47  * Only the highest zone in the zone hierarchy gets policied. Allocations
48  * requesting a lower zone just use default policy. This implies that
49  * on systems with highmem kernel lowmem allocation don't get policied.
50  * Same with GFP_DMA allocations.
51  *
52  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53  * all users and remembered even when nobody has memory mapped.
54  */
55 
56 /* Notebook:
57    fix mmap readahead to honour policy and enable policy for any page cache
58    object
59    statistics for bigpages
60    global policy for page cache? currently it uses process policy. Requires
61    first item above.
62    handle mremap for shared memory (currently ignored for the policy)
63    grows down?
64    make bind policy root only? It can trigger oom much faster and the
65    kernel is not always grateful with that.
66 */
67 
68 #include <linux/mempolicy.h>
69 #include <linux/mm.h>
70 #include <linux/highmem.h>
71 #include <linux/hugetlb.h>
72 #include <linux/kernel.h>
73 #include <linux/sched.h>
74 #include <linux/nodemask.h>
75 #include <linux/cpuset.h>
76 #include <linux/slab.h>
77 #include <linux/string.h>
78 #include <linux/module.h>
79 #include <linux/nsproxy.h>
80 #include <linux/interrupt.h>
81 #include <linux/init.h>
82 #include <linux/compat.h>
83 #include <linux/swap.h>
84 #include <linux/seq_file.h>
85 #include <linux/proc_fs.h>
86 #include <linux/migrate.h>
87 #include <linux/ksm.h>
88 #include <linux/rmap.h>
89 #include <linux/security.h>
90 #include <linux/syscalls.h>
91 #include <linux/ctype.h>
92 #include <linux/mm_inline.h>
93 
94 #include <asm/tlbflush.h>
95 #include <asm/uaccess.h>
96 
97 #include "internal.h"
98 
99 /* Internal flags */
100 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
101 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
102 #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2)		/* Gather statistics */
103 
104 static struct kmem_cache *policy_cache;
105 static struct kmem_cache *sn_cache;
106 
107 /* Highest zone. An specific allocation for a zone below that is not
108    policied. */
109 enum zone_type policy_zone = 0;
110 
111 /*
112  * run-time system-wide default policy => local allocation
113  */
114 struct mempolicy default_policy = {
115 	.refcnt = ATOMIC_INIT(1), /* never free it */
116 	.mode = MPOL_PREFERRED,
117 	.flags = MPOL_F_LOCAL,
118 };
119 
120 static const struct mempolicy_operations {
121 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
122 	/*
123 	 * If read-side task has no lock to protect task->mempolicy, write-side
124 	 * task will rebind the task->mempolicy by two step. The first step is
125 	 * setting all the newly nodes, and the second step is cleaning all the
126 	 * disallowed nodes. In this way, we can avoid finding no node to alloc
127 	 * page.
128 	 * If we have a lock to protect task->mempolicy in read-side, we do
129 	 * rebind directly.
130 	 *
131 	 * step:
132 	 * 	MPOL_REBIND_ONCE - do rebind work at once
133 	 * 	MPOL_REBIND_STEP1 - set all the newly nodes
134 	 * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
135 	 */
136 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
137 			enum mpol_rebind_step step);
138 } mpol_ops[MPOL_MAX];
139 
140 /* Check that the nodemask contains at least one populated zone */
141 static int is_valid_nodemask(const nodemask_t *nodemask)
142 {
143 	int nd, k;
144 
145 	for_each_node_mask(nd, *nodemask) {
146 		struct zone *z;
147 
148 		for (k = 0; k <= policy_zone; k++) {
149 			z = &NODE_DATA(nd)->node_zones[k];
150 			if (z->present_pages > 0)
151 				return 1;
152 		}
153 	}
154 
155 	return 0;
156 }
157 
158 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
159 {
160 	return pol->flags & MPOL_MODE_FLAGS;
161 }
162 
163 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
164 				   const nodemask_t *rel)
165 {
166 	nodemask_t tmp;
167 	nodes_fold(tmp, *orig, nodes_weight(*rel));
168 	nodes_onto(*ret, tmp, *rel);
169 }
170 
171 static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
172 {
173 	if (nodes_empty(*nodes))
174 		return -EINVAL;
175 	pol->v.nodes = *nodes;
176 	return 0;
177 }
178 
179 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
180 {
181 	if (!nodes)
182 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
183 	else if (nodes_empty(*nodes))
184 		return -EINVAL;			/*  no allowed nodes */
185 	else
186 		pol->v.preferred_node = first_node(*nodes);
187 	return 0;
188 }
189 
190 static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
191 {
192 	if (!is_valid_nodemask(nodes))
193 		return -EINVAL;
194 	pol->v.nodes = *nodes;
195 	return 0;
196 }
197 
198 /*
199  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
200  * any, for the new policy.  mpol_new() has already validated the nodes
201  * parameter with respect to the policy mode and flags.  But, we need to
202  * handle an empty nodemask with MPOL_PREFERRED here.
203  *
204  * Must be called holding task's alloc_lock to protect task's mems_allowed
205  * and mempolicy.  May also be called holding the mmap_semaphore for write.
206  */
207 static int mpol_set_nodemask(struct mempolicy *pol,
208 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
209 {
210 	int ret;
211 
212 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
213 	if (pol == NULL)
214 		return 0;
215 	/* Check N_HIGH_MEMORY */
216 	nodes_and(nsc->mask1,
217 		  cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]);
218 
219 	VM_BUG_ON(!nodes);
220 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
221 		nodes = NULL;	/* explicit local allocation */
222 	else {
223 		if (pol->flags & MPOL_F_RELATIVE_NODES)
224 			mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
225 		else
226 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
227 
228 		if (mpol_store_user_nodemask(pol))
229 			pol->w.user_nodemask = *nodes;
230 		else
231 			pol->w.cpuset_mems_allowed =
232 						cpuset_current_mems_allowed;
233 	}
234 
235 	if (nodes)
236 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
237 	else
238 		ret = mpol_ops[pol->mode].create(pol, NULL);
239 	return ret;
240 }
241 
242 /*
243  * This function just creates a new policy, does some check and simple
244  * initialization. You must invoke mpol_set_nodemask() to set nodes.
245  */
246 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
247 				  nodemask_t *nodes)
248 {
249 	struct mempolicy *policy;
250 
251 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
252 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
253 
254 	if (mode == MPOL_DEFAULT) {
255 		if (nodes && !nodes_empty(*nodes))
256 			return ERR_PTR(-EINVAL);
257 		return NULL;	/* simply delete any existing policy */
258 	}
259 	VM_BUG_ON(!nodes);
260 
261 	/*
262 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
263 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
264 	 * All other modes require a valid pointer to a non-empty nodemask.
265 	 */
266 	if (mode == MPOL_PREFERRED) {
267 		if (nodes_empty(*nodes)) {
268 			if (((flags & MPOL_F_STATIC_NODES) ||
269 			     (flags & MPOL_F_RELATIVE_NODES)))
270 				return ERR_PTR(-EINVAL);
271 		}
272 	} else if (nodes_empty(*nodes))
273 		return ERR_PTR(-EINVAL);
274 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
275 	if (!policy)
276 		return ERR_PTR(-ENOMEM);
277 	atomic_set(&policy->refcnt, 1);
278 	policy->mode = mode;
279 	policy->flags = flags;
280 
281 	return policy;
282 }
283 
284 /* Slow path of a mpol destructor. */
285 void __mpol_put(struct mempolicy *p)
286 {
287 	if (!atomic_dec_and_test(&p->refcnt))
288 		return;
289 	kmem_cache_free(policy_cache, p);
290 }
291 
292 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
293 				enum mpol_rebind_step step)
294 {
295 }
296 
297 /*
298  * step:
299  * 	MPOL_REBIND_ONCE  - do rebind work at once
300  * 	MPOL_REBIND_STEP1 - set all the newly nodes
301  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
302  */
303 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
304 				 enum mpol_rebind_step step)
305 {
306 	nodemask_t tmp;
307 
308 	if (pol->flags & MPOL_F_STATIC_NODES)
309 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
310 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
311 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
312 	else {
313 		/*
314 		 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
315 		 * result
316 		 */
317 		if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
318 			nodes_remap(tmp, pol->v.nodes,
319 					pol->w.cpuset_mems_allowed, *nodes);
320 			pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
321 		} else if (step == MPOL_REBIND_STEP2) {
322 			tmp = pol->w.cpuset_mems_allowed;
323 			pol->w.cpuset_mems_allowed = *nodes;
324 		} else
325 			BUG();
326 	}
327 
328 	if (nodes_empty(tmp))
329 		tmp = *nodes;
330 
331 	if (step == MPOL_REBIND_STEP1)
332 		nodes_or(pol->v.nodes, pol->v.nodes, tmp);
333 	else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
334 		pol->v.nodes = tmp;
335 	else
336 		BUG();
337 
338 	if (!node_isset(current->il_next, tmp)) {
339 		current->il_next = next_node(current->il_next, tmp);
340 		if (current->il_next >= MAX_NUMNODES)
341 			current->il_next = first_node(tmp);
342 		if (current->il_next >= MAX_NUMNODES)
343 			current->il_next = numa_node_id();
344 	}
345 }
346 
347 static void mpol_rebind_preferred(struct mempolicy *pol,
348 				  const nodemask_t *nodes,
349 				  enum mpol_rebind_step step)
350 {
351 	nodemask_t tmp;
352 
353 	if (pol->flags & MPOL_F_STATIC_NODES) {
354 		int node = first_node(pol->w.user_nodemask);
355 
356 		if (node_isset(node, *nodes)) {
357 			pol->v.preferred_node = node;
358 			pol->flags &= ~MPOL_F_LOCAL;
359 		} else
360 			pol->flags |= MPOL_F_LOCAL;
361 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
362 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
363 		pol->v.preferred_node = first_node(tmp);
364 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
365 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
366 						   pol->w.cpuset_mems_allowed,
367 						   *nodes);
368 		pol->w.cpuset_mems_allowed = *nodes;
369 	}
370 }
371 
372 /*
373  * mpol_rebind_policy - Migrate a policy to a different set of nodes
374  *
375  * If read-side task has no lock to protect task->mempolicy, write-side
376  * task will rebind the task->mempolicy by two step. The first step is
377  * setting all the newly nodes, and the second step is cleaning all the
378  * disallowed nodes. In this way, we can avoid finding no node to alloc
379  * page.
380  * If we have a lock to protect task->mempolicy in read-side, we do
381  * rebind directly.
382  *
383  * step:
384  * 	MPOL_REBIND_ONCE  - do rebind work at once
385  * 	MPOL_REBIND_STEP1 - set all the newly nodes
386  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
387  */
388 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
389 				enum mpol_rebind_step step)
390 {
391 	if (!pol)
392 		return;
393 	if (!mpol_store_user_nodemask(pol) && step == 0 &&
394 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
395 		return;
396 
397 	if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
398 		return;
399 
400 	if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
401 		BUG();
402 
403 	if (step == MPOL_REBIND_STEP1)
404 		pol->flags |= MPOL_F_REBINDING;
405 	else if (step == MPOL_REBIND_STEP2)
406 		pol->flags &= ~MPOL_F_REBINDING;
407 	else if (step >= MPOL_REBIND_NSTEP)
408 		BUG();
409 
410 	mpol_ops[pol->mode].rebind(pol, newmask, step);
411 }
412 
413 /*
414  * Wrapper for mpol_rebind_policy() that just requires task
415  * pointer, and updates task mempolicy.
416  *
417  * Called with task's alloc_lock held.
418  */
419 
420 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
421 			enum mpol_rebind_step step)
422 {
423 	mpol_rebind_policy(tsk->mempolicy, new, step);
424 }
425 
426 /*
427  * Rebind each vma in mm to new nodemask.
428  *
429  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
430  */
431 
432 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
433 {
434 	struct vm_area_struct *vma;
435 
436 	down_write(&mm->mmap_sem);
437 	for (vma = mm->mmap; vma; vma = vma->vm_next)
438 		mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
439 	up_write(&mm->mmap_sem);
440 }
441 
442 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
443 	[MPOL_DEFAULT] = {
444 		.rebind = mpol_rebind_default,
445 	},
446 	[MPOL_INTERLEAVE] = {
447 		.create = mpol_new_interleave,
448 		.rebind = mpol_rebind_nodemask,
449 	},
450 	[MPOL_PREFERRED] = {
451 		.create = mpol_new_preferred,
452 		.rebind = mpol_rebind_preferred,
453 	},
454 	[MPOL_BIND] = {
455 		.create = mpol_new_bind,
456 		.rebind = mpol_rebind_nodemask,
457 	},
458 };
459 
460 static void gather_stats(struct page *, void *, int pte_dirty);
461 static void migrate_page_add(struct page *page, struct list_head *pagelist,
462 				unsigned long flags);
463 
464 /* Scan through pages checking if pages follow certain conditions. */
465 static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
466 		unsigned long addr, unsigned long end,
467 		const nodemask_t *nodes, unsigned long flags,
468 		void *private)
469 {
470 	pte_t *orig_pte;
471 	pte_t *pte;
472 	spinlock_t *ptl;
473 
474 	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
475 	do {
476 		struct page *page;
477 		int nid;
478 
479 		if (!pte_present(*pte))
480 			continue;
481 		page = vm_normal_page(vma, addr, *pte);
482 		if (!page)
483 			continue;
484 		/*
485 		 * vm_normal_page() filters out zero pages, but there might
486 		 * still be PageReserved pages to skip, perhaps in a VDSO.
487 		 * And we cannot move PageKsm pages sensibly or safely yet.
488 		 */
489 		if (PageReserved(page) || PageKsm(page))
490 			continue;
491 		nid = page_to_nid(page);
492 		if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
493 			continue;
494 
495 		if (flags & MPOL_MF_STATS)
496 			gather_stats(page, private, pte_dirty(*pte));
497 		else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
498 			migrate_page_add(page, private, flags);
499 		else
500 			break;
501 	} while (pte++, addr += PAGE_SIZE, addr != end);
502 	pte_unmap_unlock(orig_pte, ptl);
503 	return addr != end;
504 }
505 
506 static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
507 		unsigned long addr, unsigned long end,
508 		const nodemask_t *nodes, unsigned long flags,
509 		void *private)
510 {
511 	pmd_t *pmd;
512 	unsigned long next;
513 
514 	pmd = pmd_offset(pud, addr);
515 	do {
516 		next = pmd_addr_end(addr, end);
517 		if (pmd_none_or_clear_bad(pmd))
518 			continue;
519 		if (check_pte_range(vma, pmd, addr, next, nodes,
520 				    flags, private))
521 			return -EIO;
522 	} while (pmd++, addr = next, addr != end);
523 	return 0;
524 }
525 
526 static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
527 		unsigned long addr, unsigned long end,
528 		const nodemask_t *nodes, unsigned long flags,
529 		void *private)
530 {
531 	pud_t *pud;
532 	unsigned long next;
533 
534 	pud = pud_offset(pgd, addr);
535 	do {
536 		next = pud_addr_end(addr, end);
537 		if (pud_none_or_clear_bad(pud))
538 			continue;
539 		if (check_pmd_range(vma, pud, addr, next, nodes,
540 				    flags, private))
541 			return -EIO;
542 	} while (pud++, addr = next, addr != end);
543 	return 0;
544 }
545 
546 static inline int check_pgd_range(struct vm_area_struct *vma,
547 		unsigned long addr, unsigned long end,
548 		const nodemask_t *nodes, unsigned long flags,
549 		void *private)
550 {
551 	pgd_t *pgd;
552 	unsigned long next;
553 
554 	pgd = pgd_offset(vma->vm_mm, addr);
555 	do {
556 		next = pgd_addr_end(addr, end);
557 		if (pgd_none_or_clear_bad(pgd))
558 			continue;
559 		if (check_pud_range(vma, pgd, addr, next, nodes,
560 				    flags, private))
561 			return -EIO;
562 	} while (pgd++, addr = next, addr != end);
563 	return 0;
564 }
565 
566 /*
567  * Check if all pages in a range are on a set of nodes.
568  * If pagelist != NULL then isolate pages from the LRU and
569  * put them on the pagelist.
570  */
571 static struct vm_area_struct *
572 check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
573 		const nodemask_t *nodes, unsigned long flags, void *private)
574 {
575 	int err;
576 	struct vm_area_struct *first, *vma, *prev;
577 
578 
579 	first = find_vma(mm, start);
580 	if (!first)
581 		return ERR_PTR(-EFAULT);
582 	prev = NULL;
583 	for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
584 		if (!(flags & MPOL_MF_DISCONTIG_OK)) {
585 			if (!vma->vm_next && vma->vm_end < end)
586 				return ERR_PTR(-EFAULT);
587 			if (prev && prev->vm_end < vma->vm_start)
588 				return ERR_PTR(-EFAULT);
589 		}
590 		if (!is_vm_hugetlb_page(vma) &&
591 		    ((flags & MPOL_MF_STRICT) ||
592 		     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
593 				vma_migratable(vma)))) {
594 			unsigned long endvma = vma->vm_end;
595 
596 			if (endvma > end)
597 				endvma = end;
598 			if (vma->vm_start > start)
599 				start = vma->vm_start;
600 			err = check_pgd_range(vma, start, endvma, nodes,
601 						flags, private);
602 			if (err) {
603 				first = ERR_PTR(err);
604 				break;
605 			}
606 		}
607 		prev = vma;
608 	}
609 	return first;
610 }
611 
612 /* Apply policy to a single VMA */
613 static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
614 {
615 	int err = 0;
616 	struct mempolicy *old = vma->vm_policy;
617 
618 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
619 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
620 		 vma->vm_ops, vma->vm_file,
621 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
622 
623 	if (vma->vm_ops && vma->vm_ops->set_policy)
624 		err = vma->vm_ops->set_policy(vma, new);
625 	if (!err) {
626 		mpol_get(new);
627 		vma->vm_policy = new;
628 		mpol_put(old);
629 	}
630 	return err;
631 }
632 
633 /* Step 2: apply policy to a range and do splits. */
634 static int mbind_range(struct mm_struct *mm, unsigned long start,
635 		       unsigned long end, struct mempolicy *new_pol)
636 {
637 	struct vm_area_struct *next;
638 	struct vm_area_struct *prev;
639 	struct vm_area_struct *vma;
640 	int err = 0;
641 	pgoff_t pgoff;
642 	unsigned long vmstart;
643 	unsigned long vmend;
644 
645 	vma = find_vma_prev(mm, start, &prev);
646 	if (!vma || vma->vm_start > start)
647 		return -EFAULT;
648 
649 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
650 		next = vma->vm_next;
651 		vmstart = max(start, vma->vm_start);
652 		vmend   = min(end, vma->vm_end);
653 
654 		pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
655 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
656 				  vma->anon_vma, vma->vm_file, pgoff, new_pol);
657 		if (prev) {
658 			vma = prev;
659 			next = vma->vm_next;
660 			continue;
661 		}
662 		if (vma->vm_start != vmstart) {
663 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
664 			if (err)
665 				goto out;
666 		}
667 		if (vma->vm_end != vmend) {
668 			err = split_vma(vma->vm_mm, vma, vmend, 0);
669 			if (err)
670 				goto out;
671 		}
672 		err = policy_vma(vma, new_pol);
673 		if (err)
674 			goto out;
675 	}
676 
677  out:
678 	return err;
679 }
680 
681 /*
682  * Update task->flags PF_MEMPOLICY bit: set iff non-default
683  * mempolicy.  Allows more rapid checking of this (combined perhaps
684  * with other PF_* flag bits) on memory allocation hot code paths.
685  *
686  * If called from outside this file, the task 'p' should -only- be
687  * a newly forked child not yet visible on the task list, because
688  * manipulating the task flags of a visible task is not safe.
689  *
690  * The above limitation is why this routine has the funny name
691  * mpol_fix_fork_child_flag().
692  *
693  * It is also safe to call this with a task pointer of current,
694  * which the static wrapper mpol_set_task_struct_flag() does,
695  * for use within this file.
696  */
697 
698 void mpol_fix_fork_child_flag(struct task_struct *p)
699 {
700 	if (p->mempolicy)
701 		p->flags |= PF_MEMPOLICY;
702 	else
703 		p->flags &= ~PF_MEMPOLICY;
704 }
705 
706 static void mpol_set_task_struct_flag(void)
707 {
708 	mpol_fix_fork_child_flag(current);
709 }
710 
711 /* Set the process memory policy */
712 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
713 			     nodemask_t *nodes)
714 {
715 	struct mempolicy *new, *old;
716 	struct mm_struct *mm = current->mm;
717 	NODEMASK_SCRATCH(scratch);
718 	int ret;
719 
720 	if (!scratch)
721 		return -ENOMEM;
722 
723 	new = mpol_new(mode, flags, nodes);
724 	if (IS_ERR(new)) {
725 		ret = PTR_ERR(new);
726 		goto out;
727 	}
728 	/*
729 	 * prevent changing our mempolicy while show_numa_maps()
730 	 * is using it.
731 	 * Note:  do_set_mempolicy() can be called at init time
732 	 * with no 'mm'.
733 	 */
734 	if (mm)
735 		down_write(&mm->mmap_sem);
736 	task_lock(current);
737 	ret = mpol_set_nodemask(new, nodes, scratch);
738 	if (ret) {
739 		task_unlock(current);
740 		if (mm)
741 			up_write(&mm->mmap_sem);
742 		mpol_put(new);
743 		goto out;
744 	}
745 	old = current->mempolicy;
746 	current->mempolicy = new;
747 	mpol_set_task_struct_flag();
748 	if (new && new->mode == MPOL_INTERLEAVE &&
749 	    nodes_weight(new->v.nodes))
750 		current->il_next = first_node(new->v.nodes);
751 	task_unlock(current);
752 	if (mm)
753 		up_write(&mm->mmap_sem);
754 
755 	mpol_put(old);
756 	ret = 0;
757 out:
758 	NODEMASK_SCRATCH_FREE(scratch);
759 	return ret;
760 }
761 
762 /*
763  * Return nodemask for policy for get_mempolicy() query
764  *
765  * Called with task's alloc_lock held
766  */
767 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
768 {
769 	nodes_clear(*nodes);
770 	if (p == &default_policy)
771 		return;
772 
773 	switch (p->mode) {
774 	case MPOL_BIND:
775 		/* Fall through */
776 	case MPOL_INTERLEAVE:
777 		*nodes = p->v.nodes;
778 		break;
779 	case MPOL_PREFERRED:
780 		if (!(p->flags & MPOL_F_LOCAL))
781 			node_set(p->v.preferred_node, *nodes);
782 		/* else return empty node mask for local allocation */
783 		break;
784 	default:
785 		BUG();
786 	}
787 }
788 
789 static int lookup_node(struct mm_struct *mm, unsigned long addr)
790 {
791 	struct page *p;
792 	int err;
793 
794 	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
795 	if (err >= 0) {
796 		err = page_to_nid(p);
797 		put_page(p);
798 	}
799 	return err;
800 }
801 
802 /* Retrieve NUMA policy */
803 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
804 			     unsigned long addr, unsigned long flags)
805 {
806 	int err;
807 	struct mm_struct *mm = current->mm;
808 	struct vm_area_struct *vma = NULL;
809 	struct mempolicy *pol = current->mempolicy;
810 
811 	if (flags &
812 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
813 		return -EINVAL;
814 
815 	if (flags & MPOL_F_MEMS_ALLOWED) {
816 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
817 			return -EINVAL;
818 		*policy = 0;	/* just so it's initialized */
819 		task_lock(current);
820 		*nmask  = cpuset_current_mems_allowed;
821 		task_unlock(current);
822 		return 0;
823 	}
824 
825 	if (flags & MPOL_F_ADDR) {
826 		/*
827 		 * Do NOT fall back to task policy if the
828 		 * vma/shared policy at addr is NULL.  We
829 		 * want to return MPOL_DEFAULT in this case.
830 		 */
831 		down_read(&mm->mmap_sem);
832 		vma = find_vma_intersection(mm, addr, addr+1);
833 		if (!vma) {
834 			up_read(&mm->mmap_sem);
835 			return -EFAULT;
836 		}
837 		if (vma->vm_ops && vma->vm_ops->get_policy)
838 			pol = vma->vm_ops->get_policy(vma, addr);
839 		else
840 			pol = vma->vm_policy;
841 	} else if (addr)
842 		return -EINVAL;
843 
844 	if (!pol)
845 		pol = &default_policy;	/* indicates default behavior */
846 
847 	if (flags & MPOL_F_NODE) {
848 		if (flags & MPOL_F_ADDR) {
849 			err = lookup_node(mm, addr);
850 			if (err < 0)
851 				goto out;
852 			*policy = err;
853 		} else if (pol == current->mempolicy &&
854 				pol->mode == MPOL_INTERLEAVE) {
855 			*policy = current->il_next;
856 		} else {
857 			err = -EINVAL;
858 			goto out;
859 		}
860 	} else {
861 		*policy = pol == &default_policy ? MPOL_DEFAULT :
862 						pol->mode;
863 		/*
864 		 * Internal mempolicy flags must be masked off before exposing
865 		 * the policy to userspace.
866 		 */
867 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
868 	}
869 
870 	if (vma) {
871 		up_read(&current->mm->mmap_sem);
872 		vma = NULL;
873 	}
874 
875 	err = 0;
876 	if (nmask) {
877 		if (mpol_store_user_nodemask(pol)) {
878 			*nmask = pol->w.user_nodemask;
879 		} else {
880 			task_lock(current);
881 			get_policy_nodemask(pol, nmask);
882 			task_unlock(current);
883 		}
884 	}
885 
886  out:
887 	mpol_cond_put(pol);
888 	if (vma)
889 		up_read(&current->mm->mmap_sem);
890 	return err;
891 }
892 
893 #ifdef CONFIG_MIGRATION
894 /*
895  * page migration
896  */
897 static void migrate_page_add(struct page *page, struct list_head *pagelist,
898 				unsigned long flags)
899 {
900 	/*
901 	 * Avoid migrating a page that is shared with others.
902 	 */
903 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
904 		if (!isolate_lru_page(page)) {
905 			list_add_tail(&page->lru, pagelist);
906 			inc_zone_page_state(page, NR_ISOLATED_ANON +
907 					    page_is_file_cache(page));
908 		}
909 	}
910 }
911 
912 static struct page *new_node_page(struct page *page, unsigned long node, int **x)
913 {
914 	return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
915 }
916 
917 /*
918  * Migrate pages from one node to a target node.
919  * Returns error or the number of pages not migrated.
920  */
921 static int migrate_to_node(struct mm_struct *mm, int source, int dest,
922 			   int flags)
923 {
924 	nodemask_t nmask;
925 	LIST_HEAD(pagelist);
926 	int err = 0;
927 
928 	nodes_clear(nmask);
929 	node_set(source, nmask);
930 
931 	check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
932 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
933 
934 	if (!list_empty(&pagelist))
935 		err = migrate_pages(&pagelist, new_node_page, dest, 0);
936 
937 	return err;
938 }
939 
940 /*
941  * Move pages between the two nodesets so as to preserve the physical
942  * layout as much as possible.
943  *
944  * Returns the number of page that could not be moved.
945  */
946 int do_migrate_pages(struct mm_struct *mm,
947 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
948 {
949 	int busy = 0;
950 	int err;
951 	nodemask_t tmp;
952 
953 	err = migrate_prep();
954 	if (err)
955 		return err;
956 
957 	down_read(&mm->mmap_sem);
958 
959 	err = migrate_vmas(mm, from_nodes, to_nodes, flags);
960 	if (err)
961 		goto out;
962 
963 	/*
964 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
965 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
966 	 * bit in 'tmp', and return that <source, dest> pair for migration.
967 	 * The pair of nodemasks 'to' and 'from' define the map.
968 	 *
969 	 * If no pair of bits is found that way, fallback to picking some
970 	 * pair of 'source' and 'dest' bits that are not the same.  If the
971 	 * 'source' and 'dest' bits are the same, this represents a node
972 	 * that will be migrating to itself, so no pages need move.
973 	 *
974 	 * If no bits are left in 'tmp', or if all remaining bits left
975 	 * in 'tmp' correspond to the same bit in 'to', return false
976 	 * (nothing left to migrate).
977 	 *
978 	 * This lets us pick a pair of nodes to migrate between, such that
979 	 * if possible the dest node is not already occupied by some other
980 	 * source node, minimizing the risk of overloading the memory on a
981 	 * node that would happen if we migrated incoming memory to a node
982 	 * before migrating outgoing memory source that same node.
983 	 *
984 	 * A single scan of tmp is sufficient.  As we go, we remember the
985 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
986 	 * that not only moved, but what's better, moved to an empty slot
987 	 * (d is not set in tmp), then we break out then, with that pair.
988 	 * Otherwise when we finish scannng from_tmp, we at least have the
989 	 * most recent <s, d> pair that moved.  If we get all the way through
990 	 * the scan of tmp without finding any node that moved, much less
991 	 * moved to an empty node, then there is nothing left worth migrating.
992 	 */
993 
994 	tmp = *from_nodes;
995 	while (!nodes_empty(tmp)) {
996 		int s,d;
997 		int source = -1;
998 		int dest = 0;
999 
1000 		for_each_node_mask(s, tmp) {
1001 			d = node_remap(s, *from_nodes, *to_nodes);
1002 			if (s == d)
1003 				continue;
1004 
1005 			source = s;	/* Node moved. Memorize */
1006 			dest = d;
1007 
1008 			/* dest not in remaining from nodes? */
1009 			if (!node_isset(dest, tmp))
1010 				break;
1011 		}
1012 		if (source == -1)
1013 			break;
1014 
1015 		node_clear(source, tmp);
1016 		err = migrate_to_node(mm, source, dest, flags);
1017 		if (err > 0)
1018 			busy += err;
1019 		if (err < 0)
1020 			break;
1021 	}
1022 out:
1023 	up_read(&mm->mmap_sem);
1024 	if (err < 0)
1025 		return err;
1026 	return busy;
1027 
1028 }
1029 
1030 /*
1031  * Allocate a new page for page migration based on vma policy.
1032  * Start assuming that page is mapped by vma pointed to by @private.
1033  * Search forward from there, if not.  N.B., this assumes that the
1034  * list of pages handed to migrate_pages()--which is how we get here--
1035  * is in virtual address order.
1036  */
1037 static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1038 {
1039 	struct vm_area_struct *vma = (struct vm_area_struct *)private;
1040 	unsigned long uninitialized_var(address);
1041 
1042 	while (vma) {
1043 		address = page_address_in_vma(page, vma);
1044 		if (address != -EFAULT)
1045 			break;
1046 		vma = vma->vm_next;
1047 	}
1048 
1049 	/*
1050 	 * if !vma, alloc_page_vma() will use task or system default policy
1051 	 */
1052 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1053 }
1054 #else
1055 
1056 static void migrate_page_add(struct page *page, struct list_head *pagelist,
1057 				unsigned long flags)
1058 {
1059 }
1060 
1061 int do_migrate_pages(struct mm_struct *mm,
1062 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
1063 {
1064 	return -ENOSYS;
1065 }
1066 
1067 static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1068 {
1069 	return NULL;
1070 }
1071 #endif
1072 
1073 static long do_mbind(unsigned long start, unsigned long len,
1074 		     unsigned short mode, unsigned short mode_flags,
1075 		     nodemask_t *nmask, unsigned long flags)
1076 {
1077 	struct vm_area_struct *vma;
1078 	struct mm_struct *mm = current->mm;
1079 	struct mempolicy *new;
1080 	unsigned long end;
1081 	int err;
1082 	LIST_HEAD(pagelist);
1083 
1084 	if (flags & ~(unsigned long)(MPOL_MF_STRICT |
1085 				     MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1086 		return -EINVAL;
1087 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1088 		return -EPERM;
1089 
1090 	if (start & ~PAGE_MASK)
1091 		return -EINVAL;
1092 
1093 	if (mode == MPOL_DEFAULT)
1094 		flags &= ~MPOL_MF_STRICT;
1095 
1096 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1097 	end = start + len;
1098 
1099 	if (end < start)
1100 		return -EINVAL;
1101 	if (end == start)
1102 		return 0;
1103 
1104 	new = mpol_new(mode, mode_flags, nmask);
1105 	if (IS_ERR(new))
1106 		return PTR_ERR(new);
1107 
1108 	/*
1109 	 * If we are using the default policy then operation
1110 	 * on discontinuous address spaces is okay after all
1111 	 */
1112 	if (!new)
1113 		flags |= MPOL_MF_DISCONTIG_OK;
1114 
1115 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1116 		 start, start + len, mode, mode_flags,
1117 		 nmask ? nodes_addr(*nmask)[0] : -1);
1118 
1119 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1120 
1121 		err = migrate_prep();
1122 		if (err)
1123 			goto mpol_out;
1124 	}
1125 	{
1126 		NODEMASK_SCRATCH(scratch);
1127 		if (scratch) {
1128 			down_write(&mm->mmap_sem);
1129 			task_lock(current);
1130 			err = mpol_set_nodemask(new, nmask, scratch);
1131 			task_unlock(current);
1132 			if (err)
1133 				up_write(&mm->mmap_sem);
1134 		} else
1135 			err = -ENOMEM;
1136 		NODEMASK_SCRATCH_FREE(scratch);
1137 	}
1138 	if (err)
1139 		goto mpol_out;
1140 
1141 	vma = check_range(mm, start, end, nmask,
1142 			  flags | MPOL_MF_INVERT, &pagelist);
1143 
1144 	err = PTR_ERR(vma);
1145 	if (!IS_ERR(vma)) {
1146 		int nr_failed = 0;
1147 
1148 		err = mbind_range(mm, start, end, new);
1149 
1150 		if (!list_empty(&pagelist))
1151 			nr_failed = migrate_pages(&pagelist, new_vma_page,
1152 						(unsigned long)vma, 0);
1153 
1154 		if (!err && nr_failed && (flags & MPOL_MF_STRICT))
1155 			err = -EIO;
1156 	} else
1157 		putback_lru_pages(&pagelist);
1158 
1159 	up_write(&mm->mmap_sem);
1160  mpol_out:
1161 	mpol_put(new);
1162 	return err;
1163 }
1164 
1165 /*
1166  * User space interface with variable sized bitmaps for nodelists.
1167  */
1168 
1169 /* Copy a node mask from user space. */
1170 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1171 		     unsigned long maxnode)
1172 {
1173 	unsigned long k;
1174 	unsigned long nlongs;
1175 	unsigned long endmask;
1176 
1177 	--maxnode;
1178 	nodes_clear(*nodes);
1179 	if (maxnode == 0 || !nmask)
1180 		return 0;
1181 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1182 		return -EINVAL;
1183 
1184 	nlongs = BITS_TO_LONGS(maxnode);
1185 	if ((maxnode % BITS_PER_LONG) == 0)
1186 		endmask = ~0UL;
1187 	else
1188 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1189 
1190 	/* When the user specified more nodes than supported just check
1191 	   if the non supported part is all zero. */
1192 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1193 		if (nlongs > PAGE_SIZE/sizeof(long))
1194 			return -EINVAL;
1195 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1196 			unsigned long t;
1197 			if (get_user(t, nmask + k))
1198 				return -EFAULT;
1199 			if (k == nlongs - 1) {
1200 				if (t & endmask)
1201 					return -EINVAL;
1202 			} else if (t)
1203 				return -EINVAL;
1204 		}
1205 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1206 		endmask = ~0UL;
1207 	}
1208 
1209 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1210 		return -EFAULT;
1211 	nodes_addr(*nodes)[nlongs-1] &= endmask;
1212 	return 0;
1213 }
1214 
1215 /* Copy a kernel node mask to user space */
1216 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1217 			      nodemask_t *nodes)
1218 {
1219 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1220 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1221 
1222 	if (copy > nbytes) {
1223 		if (copy > PAGE_SIZE)
1224 			return -EINVAL;
1225 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1226 			return -EFAULT;
1227 		copy = nbytes;
1228 	}
1229 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1230 }
1231 
1232 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1233 		unsigned long, mode, unsigned long __user *, nmask,
1234 		unsigned long, maxnode, unsigned, flags)
1235 {
1236 	nodemask_t nodes;
1237 	int err;
1238 	unsigned short mode_flags;
1239 
1240 	mode_flags = mode & MPOL_MODE_FLAGS;
1241 	mode &= ~MPOL_MODE_FLAGS;
1242 	if (mode >= MPOL_MAX)
1243 		return -EINVAL;
1244 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
1245 	    (mode_flags & MPOL_F_RELATIVE_NODES))
1246 		return -EINVAL;
1247 	err = get_nodes(&nodes, nmask, maxnode);
1248 	if (err)
1249 		return err;
1250 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1251 }
1252 
1253 /* Set the process memory policy */
1254 SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1255 		unsigned long, maxnode)
1256 {
1257 	int err;
1258 	nodemask_t nodes;
1259 	unsigned short flags;
1260 
1261 	flags = mode & MPOL_MODE_FLAGS;
1262 	mode &= ~MPOL_MODE_FLAGS;
1263 	if ((unsigned int)mode >= MPOL_MAX)
1264 		return -EINVAL;
1265 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1266 		return -EINVAL;
1267 	err = get_nodes(&nodes, nmask, maxnode);
1268 	if (err)
1269 		return err;
1270 	return do_set_mempolicy(mode, flags, &nodes);
1271 }
1272 
1273 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1274 		const unsigned long __user *, old_nodes,
1275 		const unsigned long __user *, new_nodes)
1276 {
1277 	const struct cred *cred = current_cred(), *tcred;
1278 	struct mm_struct *mm;
1279 	struct task_struct *task;
1280 	nodemask_t old;
1281 	nodemask_t new;
1282 	nodemask_t task_nodes;
1283 	int err;
1284 
1285 	err = get_nodes(&old, old_nodes, maxnode);
1286 	if (err)
1287 		return err;
1288 
1289 	err = get_nodes(&new, new_nodes, maxnode);
1290 	if (err)
1291 		return err;
1292 
1293 	/* Find the mm_struct */
1294 	read_lock(&tasklist_lock);
1295 	task = pid ? find_task_by_vpid(pid) : current;
1296 	if (!task) {
1297 		read_unlock(&tasklist_lock);
1298 		return -ESRCH;
1299 	}
1300 	mm = get_task_mm(task);
1301 	read_unlock(&tasklist_lock);
1302 
1303 	if (!mm)
1304 		return -EINVAL;
1305 
1306 	/*
1307 	 * Check if this process has the right to modify the specified
1308 	 * process. The right exists if the process has administrative
1309 	 * capabilities, superuser privileges or the same
1310 	 * userid as the target process.
1311 	 */
1312 	rcu_read_lock();
1313 	tcred = __task_cred(task);
1314 	if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1315 	    cred->uid  != tcred->suid && cred->uid  != tcred->uid &&
1316 	    !capable(CAP_SYS_NICE)) {
1317 		rcu_read_unlock();
1318 		err = -EPERM;
1319 		goto out;
1320 	}
1321 	rcu_read_unlock();
1322 
1323 	task_nodes = cpuset_mems_allowed(task);
1324 	/* Is the user allowed to access the target nodes? */
1325 	if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
1326 		err = -EPERM;
1327 		goto out;
1328 	}
1329 
1330 	if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
1331 		err = -EINVAL;
1332 		goto out;
1333 	}
1334 
1335 	err = security_task_movememory(task);
1336 	if (err)
1337 		goto out;
1338 
1339 	err = do_migrate_pages(mm, &old, &new,
1340 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1341 out:
1342 	mmput(mm);
1343 	return err;
1344 }
1345 
1346 
1347 /* Retrieve NUMA policy */
1348 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1349 		unsigned long __user *, nmask, unsigned long, maxnode,
1350 		unsigned long, addr, unsigned long, flags)
1351 {
1352 	int err;
1353 	int uninitialized_var(pval);
1354 	nodemask_t nodes;
1355 
1356 	if (nmask != NULL && maxnode < MAX_NUMNODES)
1357 		return -EINVAL;
1358 
1359 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
1360 
1361 	if (err)
1362 		return err;
1363 
1364 	if (policy && put_user(pval, policy))
1365 		return -EFAULT;
1366 
1367 	if (nmask)
1368 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
1369 
1370 	return err;
1371 }
1372 
1373 #ifdef CONFIG_COMPAT
1374 
1375 asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1376 				     compat_ulong_t __user *nmask,
1377 				     compat_ulong_t maxnode,
1378 				     compat_ulong_t addr, compat_ulong_t flags)
1379 {
1380 	long err;
1381 	unsigned long __user *nm = NULL;
1382 	unsigned long nr_bits, alloc_size;
1383 	DECLARE_BITMAP(bm, MAX_NUMNODES);
1384 
1385 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1386 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1387 
1388 	if (nmask)
1389 		nm = compat_alloc_user_space(alloc_size);
1390 
1391 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1392 
1393 	if (!err && nmask) {
1394 		err = copy_from_user(bm, nm, alloc_size);
1395 		/* ensure entire bitmap is zeroed */
1396 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1397 		err |= compat_put_bitmap(nmask, bm, nr_bits);
1398 	}
1399 
1400 	return err;
1401 }
1402 
1403 asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1404 				     compat_ulong_t maxnode)
1405 {
1406 	long err = 0;
1407 	unsigned long __user *nm = NULL;
1408 	unsigned long nr_bits, alloc_size;
1409 	DECLARE_BITMAP(bm, MAX_NUMNODES);
1410 
1411 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1412 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1413 
1414 	if (nmask) {
1415 		err = compat_get_bitmap(bm, nmask, nr_bits);
1416 		nm = compat_alloc_user_space(alloc_size);
1417 		err |= copy_to_user(nm, bm, alloc_size);
1418 	}
1419 
1420 	if (err)
1421 		return -EFAULT;
1422 
1423 	return sys_set_mempolicy(mode, nm, nr_bits+1);
1424 }
1425 
1426 asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1427 			     compat_ulong_t mode, compat_ulong_t __user *nmask,
1428 			     compat_ulong_t maxnode, compat_ulong_t flags)
1429 {
1430 	long err = 0;
1431 	unsigned long __user *nm = NULL;
1432 	unsigned long nr_bits, alloc_size;
1433 	nodemask_t bm;
1434 
1435 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1436 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1437 
1438 	if (nmask) {
1439 		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1440 		nm = compat_alloc_user_space(alloc_size);
1441 		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1442 	}
1443 
1444 	if (err)
1445 		return -EFAULT;
1446 
1447 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1448 }
1449 
1450 #endif
1451 
1452 /*
1453  * get_vma_policy(@task, @vma, @addr)
1454  * @task - task for fallback if vma policy == default
1455  * @vma   - virtual memory area whose policy is sought
1456  * @addr  - address in @vma for shared policy lookup
1457  *
1458  * Returns effective policy for a VMA at specified address.
1459  * Falls back to @task or system default policy, as necessary.
1460  * Current or other task's task mempolicy and non-shared vma policies
1461  * are protected by the task's mmap_sem, which must be held for read by
1462  * the caller.
1463  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1464  * count--added by the get_policy() vm_op, as appropriate--to protect against
1465  * freeing by another task.  It is the caller's responsibility to free the
1466  * extra reference for shared policies.
1467  */
1468 static struct mempolicy *get_vma_policy(struct task_struct *task,
1469 		struct vm_area_struct *vma, unsigned long addr)
1470 {
1471 	struct mempolicy *pol = task->mempolicy;
1472 
1473 	if (vma) {
1474 		if (vma->vm_ops && vma->vm_ops->get_policy) {
1475 			struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1476 									addr);
1477 			if (vpol)
1478 				pol = vpol;
1479 		} else if (vma->vm_policy)
1480 			pol = vma->vm_policy;
1481 	}
1482 	if (!pol)
1483 		pol = &default_policy;
1484 	return pol;
1485 }
1486 
1487 /*
1488  * Return a nodemask representing a mempolicy for filtering nodes for
1489  * page allocation
1490  */
1491 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1492 {
1493 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1494 	if (unlikely(policy->mode == MPOL_BIND) &&
1495 			gfp_zone(gfp) >= policy_zone &&
1496 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1497 		return &policy->v.nodes;
1498 
1499 	return NULL;
1500 }
1501 
1502 /* Return a zonelist indicated by gfp for node representing a mempolicy */
1503 static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy)
1504 {
1505 	int nd = numa_node_id();
1506 
1507 	switch (policy->mode) {
1508 	case MPOL_PREFERRED:
1509 		if (!(policy->flags & MPOL_F_LOCAL))
1510 			nd = policy->v.preferred_node;
1511 		break;
1512 	case MPOL_BIND:
1513 		/*
1514 		 * Normally, MPOL_BIND allocations are node-local within the
1515 		 * allowed nodemask.  However, if __GFP_THISNODE is set and the
1516 		 * current node isn't part of the mask, we use the zonelist for
1517 		 * the first node in the mask instead.
1518 		 */
1519 		if (unlikely(gfp & __GFP_THISNODE) &&
1520 				unlikely(!node_isset(nd, policy->v.nodes)))
1521 			nd = first_node(policy->v.nodes);
1522 		break;
1523 	default:
1524 		BUG();
1525 	}
1526 	return node_zonelist(nd, gfp);
1527 }
1528 
1529 /* Do dynamic interleaving for a process */
1530 static unsigned interleave_nodes(struct mempolicy *policy)
1531 {
1532 	unsigned nid, next;
1533 	struct task_struct *me = current;
1534 
1535 	nid = me->il_next;
1536 	next = next_node(nid, policy->v.nodes);
1537 	if (next >= MAX_NUMNODES)
1538 		next = first_node(policy->v.nodes);
1539 	if (next < MAX_NUMNODES)
1540 		me->il_next = next;
1541 	return nid;
1542 }
1543 
1544 /*
1545  * Depending on the memory policy provide a node from which to allocate the
1546  * next slab entry.
1547  * @policy must be protected by freeing by the caller.  If @policy is
1548  * the current task's mempolicy, this protection is implicit, as only the
1549  * task can change it's policy.  The system default policy requires no
1550  * such protection.
1551  */
1552 unsigned slab_node(struct mempolicy *policy)
1553 {
1554 	if (!policy || policy->flags & MPOL_F_LOCAL)
1555 		return numa_node_id();
1556 
1557 	switch (policy->mode) {
1558 	case MPOL_PREFERRED:
1559 		/*
1560 		 * handled MPOL_F_LOCAL above
1561 		 */
1562 		return policy->v.preferred_node;
1563 
1564 	case MPOL_INTERLEAVE:
1565 		return interleave_nodes(policy);
1566 
1567 	case MPOL_BIND: {
1568 		/*
1569 		 * Follow bind policy behavior and start allocation at the
1570 		 * first node.
1571 		 */
1572 		struct zonelist *zonelist;
1573 		struct zone *zone;
1574 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1575 		zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1576 		(void)first_zones_zonelist(zonelist, highest_zoneidx,
1577 							&policy->v.nodes,
1578 							&zone);
1579 		return zone->node;
1580 	}
1581 
1582 	default:
1583 		BUG();
1584 	}
1585 }
1586 
1587 /* Do static interleaving for a VMA with known offset. */
1588 static unsigned offset_il_node(struct mempolicy *pol,
1589 		struct vm_area_struct *vma, unsigned long off)
1590 {
1591 	unsigned nnodes = nodes_weight(pol->v.nodes);
1592 	unsigned target;
1593 	int c;
1594 	int nid = -1;
1595 
1596 	if (!nnodes)
1597 		return numa_node_id();
1598 	target = (unsigned int)off % nnodes;
1599 	c = 0;
1600 	do {
1601 		nid = next_node(nid, pol->v.nodes);
1602 		c++;
1603 	} while (c <= target);
1604 	return nid;
1605 }
1606 
1607 /* Determine a node number for interleave */
1608 static inline unsigned interleave_nid(struct mempolicy *pol,
1609 		 struct vm_area_struct *vma, unsigned long addr, int shift)
1610 {
1611 	if (vma) {
1612 		unsigned long off;
1613 
1614 		/*
1615 		 * for small pages, there is no difference between
1616 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
1617 		 * for huge pages, since vm_pgoff is in units of small
1618 		 * pages, we need to shift off the always 0 bits to get
1619 		 * a useful offset.
1620 		 */
1621 		BUG_ON(shift < PAGE_SHIFT);
1622 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1623 		off += (addr - vma->vm_start) >> shift;
1624 		return offset_il_node(pol, vma, off);
1625 	} else
1626 		return interleave_nodes(pol);
1627 }
1628 
1629 #ifdef CONFIG_HUGETLBFS
1630 /*
1631  * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1632  * @vma = virtual memory area whose policy is sought
1633  * @addr = address in @vma for shared policy lookup and interleave policy
1634  * @gfp_flags = for requested zone
1635  * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1636  * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1637  *
1638  * Returns a zonelist suitable for a huge page allocation and a pointer
1639  * to the struct mempolicy for conditional unref after allocation.
1640  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1641  * @nodemask for filtering the zonelist.
1642  *
1643  * Must be protected by get_mems_allowed()
1644  */
1645 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1646 				gfp_t gfp_flags, struct mempolicy **mpol,
1647 				nodemask_t **nodemask)
1648 {
1649 	struct zonelist *zl;
1650 
1651 	*mpol = get_vma_policy(current, vma, addr);
1652 	*nodemask = NULL;	/* assume !MPOL_BIND */
1653 
1654 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1655 		zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1656 				huge_page_shift(hstate_vma(vma))), gfp_flags);
1657 	} else {
1658 		zl = policy_zonelist(gfp_flags, *mpol);
1659 		if ((*mpol)->mode == MPOL_BIND)
1660 			*nodemask = &(*mpol)->v.nodes;
1661 	}
1662 	return zl;
1663 }
1664 
1665 /*
1666  * init_nodemask_of_mempolicy
1667  *
1668  * If the current task's mempolicy is "default" [NULL], return 'false'
1669  * to indicate default policy.  Otherwise, extract the policy nodemask
1670  * for 'bind' or 'interleave' policy into the argument nodemask, or
1671  * initialize the argument nodemask to contain the single node for
1672  * 'preferred' or 'local' policy and return 'true' to indicate presence
1673  * of non-default mempolicy.
1674  *
1675  * We don't bother with reference counting the mempolicy [mpol_get/put]
1676  * because the current task is examining it's own mempolicy and a task's
1677  * mempolicy is only ever changed by the task itself.
1678  *
1679  * N.B., it is the caller's responsibility to free a returned nodemask.
1680  */
1681 bool init_nodemask_of_mempolicy(nodemask_t *mask)
1682 {
1683 	struct mempolicy *mempolicy;
1684 	int nid;
1685 
1686 	if (!(mask && current->mempolicy))
1687 		return false;
1688 
1689 	task_lock(current);
1690 	mempolicy = current->mempolicy;
1691 	switch (mempolicy->mode) {
1692 	case MPOL_PREFERRED:
1693 		if (mempolicy->flags & MPOL_F_LOCAL)
1694 			nid = numa_node_id();
1695 		else
1696 			nid = mempolicy->v.preferred_node;
1697 		init_nodemask_of_node(mask, nid);
1698 		break;
1699 
1700 	case MPOL_BIND:
1701 		/* Fall through */
1702 	case MPOL_INTERLEAVE:
1703 		*mask =  mempolicy->v.nodes;
1704 		break;
1705 
1706 	default:
1707 		BUG();
1708 	}
1709 	task_unlock(current);
1710 
1711 	return true;
1712 }
1713 #endif
1714 
1715 /* Allocate a page in interleaved policy.
1716    Own path because it needs to do special accounting. */
1717 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1718 					unsigned nid)
1719 {
1720 	struct zonelist *zl;
1721 	struct page *page;
1722 
1723 	zl = node_zonelist(nid, gfp);
1724 	page = __alloc_pages(gfp, order, zl);
1725 	if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1726 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1727 	return page;
1728 }
1729 
1730 /**
1731  * 	alloc_page_vma	- Allocate a page for a VMA.
1732  *
1733  * 	@gfp:
1734  *      %GFP_USER    user allocation.
1735  *      %GFP_KERNEL  kernel allocations,
1736  *      %GFP_HIGHMEM highmem/user allocations,
1737  *      %GFP_FS      allocation should not call back into a file system.
1738  *      %GFP_ATOMIC  don't sleep.
1739  *
1740  * 	@vma:  Pointer to VMA or NULL if not available.
1741  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
1742  *
1743  * 	This function allocates a page from the kernel page pool and applies
1744  *	a NUMA policy associated with the VMA or the current process.
1745  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
1746  *	mm_struct of the VMA to prevent it from going away. Should be used for
1747  *	all allocations for pages that will be mapped into
1748  * 	user space. Returns NULL when no page can be allocated.
1749  *
1750  *	Should be called with the mm_sem of the vma hold.
1751  */
1752 struct page *
1753 alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1754 {
1755 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
1756 	struct zonelist *zl;
1757 	struct page *page;
1758 
1759 	get_mems_allowed();
1760 	if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1761 		unsigned nid;
1762 
1763 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
1764 		mpol_cond_put(pol);
1765 		page = alloc_page_interleave(gfp, 0, nid);
1766 		put_mems_allowed();
1767 		return page;
1768 	}
1769 	zl = policy_zonelist(gfp, pol);
1770 	if (unlikely(mpol_needs_cond_ref(pol))) {
1771 		/*
1772 		 * slow path: ref counted shared policy
1773 		 */
1774 		struct page *page =  __alloc_pages_nodemask(gfp, 0,
1775 						zl, policy_nodemask(gfp, pol));
1776 		__mpol_put(pol);
1777 		put_mems_allowed();
1778 		return page;
1779 	}
1780 	/*
1781 	 * fast path:  default or task policy
1782 	 */
1783 	page = __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol));
1784 	put_mems_allowed();
1785 	return page;
1786 }
1787 
1788 /**
1789  * 	alloc_pages_current - Allocate pages.
1790  *
1791  *	@gfp:
1792  *		%GFP_USER   user allocation,
1793  *      	%GFP_KERNEL kernel allocation,
1794  *      	%GFP_HIGHMEM highmem allocation,
1795  *      	%GFP_FS     don't call back into a file system.
1796  *      	%GFP_ATOMIC don't sleep.
1797  *	@order: Power of two of allocation size in pages. 0 is a single page.
1798  *
1799  *	Allocate a page from the kernel page pool.  When not in
1800  *	interrupt context and apply the current process NUMA policy.
1801  *	Returns NULL when no page can be allocated.
1802  *
1803  *	Don't call cpuset_update_task_memory_state() unless
1804  *	1) it's ok to take cpuset_sem (can WAIT), and
1805  *	2) allocating for current task (not interrupt).
1806  */
1807 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1808 {
1809 	struct mempolicy *pol = current->mempolicy;
1810 	struct page *page;
1811 
1812 	if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1813 		pol = &default_policy;
1814 
1815 	get_mems_allowed();
1816 	/*
1817 	 * No reference counting needed for current->mempolicy
1818 	 * nor system default_policy
1819 	 */
1820 	if (pol->mode == MPOL_INTERLEAVE)
1821 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
1822 	else
1823 		page = __alloc_pages_nodemask(gfp, order,
1824 			policy_zonelist(gfp, pol), policy_nodemask(gfp, pol));
1825 	put_mems_allowed();
1826 	return page;
1827 }
1828 EXPORT_SYMBOL(alloc_pages_current);
1829 
1830 /*
1831  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
1832  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1833  * with the mems_allowed returned by cpuset_mems_allowed().  This
1834  * keeps mempolicies cpuset relative after its cpuset moves.  See
1835  * further kernel/cpuset.c update_nodemask().
1836  *
1837  * current's mempolicy may be rebinded by the other task(the task that changes
1838  * cpuset's mems), so we needn't do rebind work for current task.
1839  */
1840 
1841 /* Slow path of a mempolicy duplicate */
1842 struct mempolicy *__mpol_dup(struct mempolicy *old)
1843 {
1844 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1845 
1846 	if (!new)
1847 		return ERR_PTR(-ENOMEM);
1848 
1849 	/* task's mempolicy is protected by alloc_lock */
1850 	if (old == current->mempolicy) {
1851 		task_lock(current);
1852 		*new = *old;
1853 		task_unlock(current);
1854 	} else
1855 		*new = *old;
1856 
1857 	rcu_read_lock();
1858 	if (current_cpuset_is_being_rebound()) {
1859 		nodemask_t mems = cpuset_mems_allowed(current);
1860 		if (new->flags & MPOL_F_REBINDING)
1861 			mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
1862 		else
1863 			mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
1864 	}
1865 	rcu_read_unlock();
1866 	atomic_set(&new->refcnt, 1);
1867 	return new;
1868 }
1869 
1870 /*
1871  * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
1872  * eliminate the * MPOL_F_* flags that require conditional ref and
1873  * [NOTE!!!] drop the extra ref.  Not safe to reference *frompol directly
1874  * after return.  Use the returned value.
1875  *
1876  * Allows use of a mempolicy for, e.g., multiple allocations with a single
1877  * policy lookup, even if the policy needs/has extra ref on lookup.
1878  * shmem_readahead needs this.
1879  */
1880 struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
1881 						struct mempolicy *frompol)
1882 {
1883 	if (!mpol_needs_cond_ref(frompol))
1884 		return frompol;
1885 
1886 	*tompol = *frompol;
1887 	tompol->flags &= ~MPOL_F_SHARED;	/* copy doesn't need unref */
1888 	__mpol_put(frompol);
1889 	return tompol;
1890 }
1891 
1892 /* Slow path of a mempolicy comparison */
1893 int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1894 {
1895 	if (!a || !b)
1896 		return 0;
1897 	if (a->mode != b->mode)
1898 		return 0;
1899 	if (a->flags != b->flags)
1900 		return 0;
1901 	if (mpol_store_user_nodemask(a))
1902 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
1903 			return 0;
1904 
1905 	switch (a->mode) {
1906 	case MPOL_BIND:
1907 		/* Fall through */
1908 	case MPOL_INTERLEAVE:
1909 		return nodes_equal(a->v.nodes, b->v.nodes);
1910 	case MPOL_PREFERRED:
1911 		return a->v.preferred_node == b->v.preferred_node &&
1912 			a->flags == b->flags;
1913 	default:
1914 		BUG();
1915 		return 0;
1916 	}
1917 }
1918 
1919 /*
1920  * Shared memory backing store policy support.
1921  *
1922  * Remember policies even when nobody has shared memory mapped.
1923  * The policies are kept in Red-Black tree linked from the inode.
1924  * They are protected by the sp->lock spinlock, which should be held
1925  * for any accesses to the tree.
1926  */
1927 
1928 /* lookup first element intersecting start-end */
1929 /* Caller holds sp->lock */
1930 static struct sp_node *
1931 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1932 {
1933 	struct rb_node *n = sp->root.rb_node;
1934 
1935 	while (n) {
1936 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
1937 
1938 		if (start >= p->end)
1939 			n = n->rb_right;
1940 		else if (end <= p->start)
1941 			n = n->rb_left;
1942 		else
1943 			break;
1944 	}
1945 	if (!n)
1946 		return NULL;
1947 	for (;;) {
1948 		struct sp_node *w = NULL;
1949 		struct rb_node *prev = rb_prev(n);
1950 		if (!prev)
1951 			break;
1952 		w = rb_entry(prev, struct sp_node, nd);
1953 		if (w->end <= start)
1954 			break;
1955 		n = prev;
1956 	}
1957 	return rb_entry(n, struct sp_node, nd);
1958 }
1959 
1960 /* Insert a new shared policy into the list. */
1961 /* Caller holds sp->lock */
1962 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1963 {
1964 	struct rb_node **p = &sp->root.rb_node;
1965 	struct rb_node *parent = NULL;
1966 	struct sp_node *nd;
1967 
1968 	while (*p) {
1969 		parent = *p;
1970 		nd = rb_entry(parent, struct sp_node, nd);
1971 		if (new->start < nd->start)
1972 			p = &(*p)->rb_left;
1973 		else if (new->end > nd->end)
1974 			p = &(*p)->rb_right;
1975 		else
1976 			BUG();
1977 	}
1978 	rb_link_node(&new->nd, parent, p);
1979 	rb_insert_color(&new->nd, &sp->root);
1980 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
1981 		 new->policy ? new->policy->mode : 0);
1982 }
1983 
1984 /* Find shared policy intersecting idx */
1985 struct mempolicy *
1986 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1987 {
1988 	struct mempolicy *pol = NULL;
1989 	struct sp_node *sn;
1990 
1991 	if (!sp->root.rb_node)
1992 		return NULL;
1993 	spin_lock(&sp->lock);
1994 	sn = sp_lookup(sp, idx, idx+1);
1995 	if (sn) {
1996 		mpol_get(sn->policy);
1997 		pol = sn->policy;
1998 	}
1999 	spin_unlock(&sp->lock);
2000 	return pol;
2001 }
2002 
2003 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2004 {
2005 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2006 	rb_erase(&n->nd, &sp->root);
2007 	mpol_put(n->policy);
2008 	kmem_cache_free(sn_cache, n);
2009 }
2010 
2011 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2012 				struct mempolicy *pol)
2013 {
2014 	struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2015 
2016 	if (!n)
2017 		return NULL;
2018 	n->start = start;
2019 	n->end = end;
2020 	mpol_get(pol);
2021 	pol->flags |= MPOL_F_SHARED;	/* for unref */
2022 	n->policy = pol;
2023 	return n;
2024 }
2025 
2026 /* Replace a policy range. */
2027 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2028 				 unsigned long end, struct sp_node *new)
2029 {
2030 	struct sp_node *n, *new2 = NULL;
2031 
2032 restart:
2033 	spin_lock(&sp->lock);
2034 	n = sp_lookup(sp, start, end);
2035 	/* Take care of old policies in the same range. */
2036 	while (n && n->start < end) {
2037 		struct rb_node *next = rb_next(&n->nd);
2038 		if (n->start >= start) {
2039 			if (n->end <= end)
2040 				sp_delete(sp, n);
2041 			else
2042 				n->start = end;
2043 		} else {
2044 			/* Old policy spanning whole new range. */
2045 			if (n->end > end) {
2046 				if (!new2) {
2047 					spin_unlock(&sp->lock);
2048 					new2 = sp_alloc(end, n->end, n->policy);
2049 					if (!new2)
2050 						return -ENOMEM;
2051 					goto restart;
2052 				}
2053 				n->end = start;
2054 				sp_insert(sp, new2);
2055 				new2 = NULL;
2056 				break;
2057 			} else
2058 				n->end = start;
2059 		}
2060 		if (!next)
2061 			break;
2062 		n = rb_entry(next, struct sp_node, nd);
2063 	}
2064 	if (new)
2065 		sp_insert(sp, new);
2066 	spin_unlock(&sp->lock);
2067 	if (new2) {
2068 		mpol_put(new2->policy);
2069 		kmem_cache_free(sn_cache, new2);
2070 	}
2071 	return 0;
2072 }
2073 
2074 /**
2075  * mpol_shared_policy_init - initialize shared policy for inode
2076  * @sp: pointer to inode shared policy
2077  * @mpol:  struct mempolicy to install
2078  *
2079  * Install non-NULL @mpol in inode's shared policy rb-tree.
2080  * On entry, the current task has a reference on a non-NULL @mpol.
2081  * This must be released on exit.
2082  * This is called at get_inode() calls and we can use GFP_KERNEL.
2083  */
2084 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2085 {
2086 	int ret;
2087 
2088 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
2089 	spin_lock_init(&sp->lock);
2090 
2091 	if (mpol) {
2092 		struct vm_area_struct pvma;
2093 		struct mempolicy *new;
2094 		NODEMASK_SCRATCH(scratch);
2095 
2096 		if (!scratch)
2097 			goto put_mpol;
2098 		/* contextualize the tmpfs mount point mempolicy */
2099 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2100 		if (IS_ERR(new))
2101 			goto free_scratch; /* no valid nodemask intersection */
2102 
2103 		task_lock(current);
2104 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2105 		task_unlock(current);
2106 		if (ret)
2107 			goto put_new;
2108 
2109 		/* Create pseudo-vma that contains just the policy */
2110 		memset(&pvma, 0, sizeof(struct vm_area_struct));
2111 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
2112 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2113 
2114 put_new:
2115 		mpol_put(new);			/* drop initial ref */
2116 free_scratch:
2117 		NODEMASK_SCRATCH_FREE(scratch);
2118 put_mpol:
2119 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
2120 	}
2121 }
2122 
2123 int mpol_set_shared_policy(struct shared_policy *info,
2124 			struct vm_area_struct *vma, struct mempolicy *npol)
2125 {
2126 	int err;
2127 	struct sp_node *new = NULL;
2128 	unsigned long sz = vma_pages(vma);
2129 
2130 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2131 		 vma->vm_pgoff,
2132 		 sz, npol ? npol->mode : -1,
2133 		 npol ? npol->flags : -1,
2134 		 npol ? nodes_addr(npol->v.nodes)[0] : -1);
2135 
2136 	if (npol) {
2137 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2138 		if (!new)
2139 			return -ENOMEM;
2140 	}
2141 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2142 	if (err && new)
2143 		kmem_cache_free(sn_cache, new);
2144 	return err;
2145 }
2146 
2147 /* Free a backing policy store on inode delete. */
2148 void mpol_free_shared_policy(struct shared_policy *p)
2149 {
2150 	struct sp_node *n;
2151 	struct rb_node *next;
2152 
2153 	if (!p->root.rb_node)
2154 		return;
2155 	spin_lock(&p->lock);
2156 	next = rb_first(&p->root);
2157 	while (next) {
2158 		n = rb_entry(next, struct sp_node, nd);
2159 		next = rb_next(&n->nd);
2160 		rb_erase(&n->nd, &p->root);
2161 		mpol_put(n->policy);
2162 		kmem_cache_free(sn_cache, n);
2163 	}
2164 	spin_unlock(&p->lock);
2165 }
2166 
2167 /* assumes fs == KERNEL_DS */
2168 void __init numa_policy_init(void)
2169 {
2170 	nodemask_t interleave_nodes;
2171 	unsigned long largest = 0;
2172 	int nid, prefer = 0;
2173 
2174 	policy_cache = kmem_cache_create("numa_policy",
2175 					 sizeof(struct mempolicy),
2176 					 0, SLAB_PANIC, NULL);
2177 
2178 	sn_cache = kmem_cache_create("shared_policy_node",
2179 				     sizeof(struct sp_node),
2180 				     0, SLAB_PANIC, NULL);
2181 
2182 	/*
2183 	 * Set interleaving policy for system init. Interleaving is only
2184 	 * enabled across suitably sized nodes (default is >= 16MB), or
2185 	 * fall back to the largest node if they're all smaller.
2186 	 */
2187 	nodes_clear(interleave_nodes);
2188 	for_each_node_state(nid, N_HIGH_MEMORY) {
2189 		unsigned long total_pages = node_present_pages(nid);
2190 
2191 		/* Preserve the largest node */
2192 		if (largest < total_pages) {
2193 			largest = total_pages;
2194 			prefer = nid;
2195 		}
2196 
2197 		/* Interleave this node? */
2198 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2199 			node_set(nid, interleave_nodes);
2200 	}
2201 
2202 	/* All too small, use the largest */
2203 	if (unlikely(nodes_empty(interleave_nodes)))
2204 		node_set(prefer, interleave_nodes);
2205 
2206 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2207 		printk("numa_policy_init: interleaving failed\n");
2208 }
2209 
2210 /* Reset policy of current process to default */
2211 void numa_default_policy(void)
2212 {
2213 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2214 }
2215 
2216 /*
2217  * Parse and format mempolicy from/to strings
2218  */
2219 
2220 /*
2221  * "local" is pseudo-policy:  MPOL_PREFERRED with MPOL_F_LOCAL flag
2222  * Used only for mpol_parse_str() and mpol_to_str()
2223  */
2224 #define MPOL_LOCAL MPOL_MAX
2225 static const char * const policy_modes[] =
2226 {
2227 	[MPOL_DEFAULT]    = "default",
2228 	[MPOL_PREFERRED]  = "prefer",
2229 	[MPOL_BIND]       = "bind",
2230 	[MPOL_INTERLEAVE] = "interleave",
2231 	[MPOL_LOCAL]      = "local"
2232 };
2233 
2234 
2235 #ifdef CONFIG_TMPFS
2236 /**
2237  * mpol_parse_str - parse string to mempolicy
2238  * @str:  string containing mempolicy to parse
2239  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2240  * @no_context:  flag whether to "contextualize" the mempolicy
2241  *
2242  * Format of input:
2243  *	<mode>[=<flags>][:<nodelist>]
2244  *
2245  * if @no_context is true, save the input nodemask in w.user_nodemask in
2246  * the returned mempolicy.  This will be used to "clone" the mempolicy in
2247  * a specific context [cpuset] at a later time.  Used to parse tmpfs mpol
2248  * mount option.  Note that if 'static' or 'relative' mode flags were
2249  * specified, the input nodemask will already have been saved.  Saving
2250  * it again is redundant, but safe.
2251  *
2252  * On success, returns 0, else 1
2253  */
2254 int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
2255 {
2256 	struct mempolicy *new = NULL;
2257 	unsigned short mode;
2258 	unsigned short uninitialized_var(mode_flags);
2259 	nodemask_t nodes;
2260 	char *nodelist = strchr(str, ':');
2261 	char *flags = strchr(str, '=');
2262 	int err = 1;
2263 
2264 	if (nodelist) {
2265 		/* NUL-terminate mode or flags string */
2266 		*nodelist++ = '\0';
2267 		if (nodelist_parse(nodelist, nodes))
2268 			goto out;
2269 		if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
2270 			goto out;
2271 	} else
2272 		nodes_clear(nodes);
2273 
2274 	if (flags)
2275 		*flags++ = '\0';	/* terminate mode string */
2276 
2277 	for (mode = 0; mode <= MPOL_LOCAL; mode++) {
2278 		if (!strcmp(str, policy_modes[mode])) {
2279 			break;
2280 		}
2281 	}
2282 	if (mode > MPOL_LOCAL)
2283 		goto out;
2284 
2285 	switch (mode) {
2286 	case MPOL_PREFERRED:
2287 		/*
2288 		 * Insist on a nodelist of one node only
2289 		 */
2290 		if (nodelist) {
2291 			char *rest = nodelist;
2292 			while (isdigit(*rest))
2293 				rest++;
2294 			if (*rest)
2295 				goto out;
2296 		}
2297 		break;
2298 	case MPOL_INTERLEAVE:
2299 		/*
2300 		 * Default to online nodes with memory if no nodelist
2301 		 */
2302 		if (!nodelist)
2303 			nodes = node_states[N_HIGH_MEMORY];
2304 		break;
2305 	case MPOL_LOCAL:
2306 		/*
2307 		 * Don't allow a nodelist;  mpol_new() checks flags
2308 		 */
2309 		if (nodelist)
2310 			goto out;
2311 		mode = MPOL_PREFERRED;
2312 		break;
2313 	case MPOL_DEFAULT:
2314 		/*
2315 		 * Insist on a empty nodelist
2316 		 */
2317 		if (!nodelist)
2318 			err = 0;
2319 		goto out;
2320 	case MPOL_BIND:
2321 		/*
2322 		 * Insist on a nodelist
2323 		 */
2324 		if (!nodelist)
2325 			goto out;
2326 	}
2327 
2328 	mode_flags = 0;
2329 	if (flags) {
2330 		/*
2331 		 * Currently, we only support two mutually exclusive
2332 		 * mode flags.
2333 		 */
2334 		if (!strcmp(flags, "static"))
2335 			mode_flags |= MPOL_F_STATIC_NODES;
2336 		else if (!strcmp(flags, "relative"))
2337 			mode_flags |= MPOL_F_RELATIVE_NODES;
2338 		else
2339 			goto out;
2340 	}
2341 
2342 	new = mpol_new(mode, mode_flags, &nodes);
2343 	if (IS_ERR(new))
2344 		goto out;
2345 
2346 	if (no_context) {
2347 		/* save for contextualization */
2348 		new->w.user_nodemask = nodes;
2349 	} else {
2350 		int ret;
2351 		NODEMASK_SCRATCH(scratch);
2352 		if (scratch) {
2353 			task_lock(current);
2354 			ret = mpol_set_nodemask(new, &nodes, scratch);
2355 			task_unlock(current);
2356 		} else
2357 			ret = -ENOMEM;
2358 		NODEMASK_SCRATCH_FREE(scratch);
2359 		if (ret) {
2360 			mpol_put(new);
2361 			goto out;
2362 		}
2363 	}
2364 	err = 0;
2365 
2366 out:
2367 	/* Restore string for error message */
2368 	if (nodelist)
2369 		*--nodelist = ':';
2370 	if (flags)
2371 		*--flags = '=';
2372 	if (!err)
2373 		*mpol = new;
2374 	return err;
2375 }
2376 #endif /* CONFIG_TMPFS */
2377 
2378 /**
2379  * mpol_to_str - format a mempolicy structure for printing
2380  * @buffer:  to contain formatted mempolicy string
2381  * @maxlen:  length of @buffer
2382  * @pol:  pointer to mempolicy to be formatted
2383  * @no_context:  "context free" mempolicy - use nodemask in w.user_nodemask
2384  *
2385  * Convert a mempolicy into a string.
2386  * Returns the number of characters in buffer (if positive)
2387  * or an error (negative)
2388  */
2389 int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
2390 {
2391 	char *p = buffer;
2392 	int l;
2393 	nodemask_t nodes;
2394 	unsigned short mode;
2395 	unsigned short flags = pol ? pol->flags : 0;
2396 
2397 	/*
2398 	 * Sanity check:  room for longest mode, flag and some nodes
2399 	 */
2400 	VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
2401 
2402 	if (!pol || pol == &default_policy)
2403 		mode = MPOL_DEFAULT;
2404 	else
2405 		mode = pol->mode;
2406 
2407 	switch (mode) {
2408 	case MPOL_DEFAULT:
2409 		nodes_clear(nodes);
2410 		break;
2411 
2412 	case MPOL_PREFERRED:
2413 		nodes_clear(nodes);
2414 		if (flags & MPOL_F_LOCAL)
2415 			mode = MPOL_LOCAL;	/* pseudo-policy */
2416 		else
2417 			node_set(pol->v.preferred_node, nodes);
2418 		break;
2419 
2420 	case MPOL_BIND:
2421 		/* Fall through */
2422 	case MPOL_INTERLEAVE:
2423 		if (no_context)
2424 			nodes = pol->w.user_nodemask;
2425 		else
2426 			nodes = pol->v.nodes;
2427 		break;
2428 
2429 	default:
2430 		BUG();
2431 	}
2432 
2433 	l = strlen(policy_modes[mode]);
2434 	if (buffer + maxlen < p + l + 1)
2435 		return -ENOSPC;
2436 
2437 	strcpy(p, policy_modes[mode]);
2438 	p += l;
2439 
2440 	if (flags & MPOL_MODE_FLAGS) {
2441 		if (buffer + maxlen < p + 2)
2442 			return -ENOSPC;
2443 		*p++ = '=';
2444 
2445 		/*
2446 		 * Currently, the only defined flags are mutually exclusive
2447 		 */
2448 		if (flags & MPOL_F_STATIC_NODES)
2449 			p += snprintf(p, buffer + maxlen - p, "static");
2450 		else if (flags & MPOL_F_RELATIVE_NODES)
2451 			p += snprintf(p, buffer + maxlen - p, "relative");
2452 	}
2453 
2454 	if (!nodes_empty(nodes)) {
2455 		if (buffer + maxlen < p + 2)
2456 			return -ENOSPC;
2457 		*p++ = ':';
2458 	 	p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2459 	}
2460 	return p - buffer;
2461 }
2462 
2463 struct numa_maps {
2464 	unsigned long pages;
2465 	unsigned long anon;
2466 	unsigned long active;
2467 	unsigned long writeback;
2468 	unsigned long mapcount_max;
2469 	unsigned long dirty;
2470 	unsigned long swapcache;
2471 	unsigned long node[MAX_NUMNODES];
2472 };
2473 
2474 static void gather_stats(struct page *page, void *private, int pte_dirty)
2475 {
2476 	struct numa_maps *md = private;
2477 	int count = page_mapcount(page);
2478 
2479 	md->pages++;
2480 	if (pte_dirty || PageDirty(page))
2481 		md->dirty++;
2482 
2483 	if (PageSwapCache(page))
2484 		md->swapcache++;
2485 
2486 	if (PageActive(page) || PageUnevictable(page))
2487 		md->active++;
2488 
2489 	if (PageWriteback(page))
2490 		md->writeback++;
2491 
2492 	if (PageAnon(page))
2493 		md->anon++;
2494 
2495 	if (count > md->mapcount_max)
2496 		md->mapcount_max = count;
2497 
2498 	md->node[page_to_nid(page)]++;
2499 }
2500 
2501 #ifdef CONFIG_HUGETLB_PAGE
2502 static void check_huge_range(struct vm_area_struct *vma,
2503 		unsigned long start, unsigned long end,
2504 		struct numa_maps *md)
2505 {
2506 	unsigned long addr;
2507 	struct page *page;
2508 	struct hstate *h = hstate_vma(vma);
2509 	unsigned long sz = huge_page_size(h);
2510 
2511 	for (addr = start; addr < end; addr += sz) {
2512 		pte_t *ptep = huge_pte_offset(vma->vm_mm,
2513 						addr & huge_page_mask(h));
2514 		pte_t pte;
2515 
2516 		if (!ptep)
2517 			continue;
2518 
2519 		pte = *ptep;
2520 		if (pte_none(pte))
2521 			continue;
2522 
2523 		page = pte_page(pte);
2524 		if (!page)
2525 			continue;
2526 
2527 		gather_stats(page, md, pte_dirty(*ptep));
2528 	}
2529 }
2530 #else
2531 static inline void check_huge_range(struct vm_area_struct *vma,
2532 		unsigned long start, unsigned long end,
2533 		struct numa_maps *md)
2534 {
2535 }
2536 #endif
2537 
2538 /*
2539  * Display pages allocated per node and memory policy via /proc.
2540  */
2541 int show_numa_map(struct seq_file *m, void *v)
2542 {
2543 	struct proc_maps_private *priv = m->private;
2544 	struct vm_area_struct *vma = v;
2545 	struct numa_maps *md;
2546 	struct file *file = vma->vm_file;
2547 	struct mm_struct *mm = vma->vm_mm;
2548 	struct mempolicy *pol;
2549 	int n;
2550 	char buffer[50];
2551 
2552 	if (!mm)
2553 		return 0;
2554 
2555 	md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
2556 	if (!md)
2557 		return 0;
2558 
2559 	pol = get_vma_policy(priv->task, vma, vma->vm_start);
2560 	mpol_to_str(buffer, sizeof(buffer), pol, 0);
2561 	mpol_cond_put(pol);
2562 
2563 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
2564 
2565 	if (file) {
2566 		seq_printf(m, " file=");
2567 		seq_path(m, &file->f_path, "\n\t= ");
2568 	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
2569 		seq_printf(m, " heap");
2570 	} else if (vma->vm_start <= mm->start_stack &&
2571 			vma->vm_end >= mm->start_stack) {
2572 		seq_printf(m, " stack");
2573 	}
2574 
2575 	if (is_vm_hugetlb_page(vma)) {
2576 		check_huge_range(vma, vma->vm_start, vma->vm_end, md);
2577 		seq_printf(m, " huge");
2578 	} else {
2579 		check_pgd_range(vma, vma->vm_start, vma->vm_end,
2580 			&node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
2581 	}
2582 
2583 	if (!md->pages)
2584 		goto out;
2585 
2586 	if (md->anon)
2587 		seq_printf(m," anon=%lu",md->anon);
2588 
2589 	if (md->dirty)
2590 		seq_printf(m," dirty=%lu",md->dirty);
2591 
2592 	if (md->pages != md->anon && md->pages != md->dirty)
2593 		seq_printf(m, " mapped=%lu", md->pages);
2594 
2595 	if (md->mapcount_max > 1)
2596 		seq_printf(m, " mapmax=%lu", md->mapcount_max);
2597 
2598 	if (md->swapcache)
2599 		seq_printf(m," swapcache=%lu", md->swapcache);
2600 
2601 	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2602 		seq_printf(m," active=%lu", md->active);
2603 
2604 	if (md->writeback)
2605 		seq_printf(m," writeback=%lu", md->writeback);
2606 
2607 	for_each_node_state(n, N_HIGH_MEMORY)
2608 		if (md->node[n])
2609 			seq_printf(m, " N%d=%lu", n, md->node[n]);
2610 out:
2611 	seq_putc(m, '\n');
2612 	kfree(md);
2613 
2614 	if (m->count < m->size)
2615 		m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
2616 	return 0;
2617 }
2618