xref: /linux/mm/mempolicy.c (revision 100c85421b52e41269ada88f7d71a6b8a06c7a11)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Simple NUMA memory policy for the Linux kernel.
4  *
5  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
6  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
7  *
8  * NUMA policy allows the user to give hints in which node(s) memory should
9  * be allocated.
10  *
11  * Support four policies per VMA and per process:
12  *
13  * The VMA policy has priority over the process policy for a page fault.
14  *
15  * interleave     Allocate memory interleaved over a set of nodes,
16  *                with normal fallback if it fails.
17  *                For VMA based allocations this interleaves based on the
18  *                offset into the backing object or offset into the mapping
19  *                for anonymous memory. For process policy an process counter
20  *                is used.
21  *
22  * weighted interleave
23  *                Allocate memory interleaved over a set of nodes based on
24  *                a set of weights (per-node), with normal fallback if it
25  *                fails.  Otherwise operates the same as interleave.
26  *                Example: nodeset(0,1) & weights (2,1) - 2 pages allocated
27  *                on node 0 for every 1 page allocated on node 1.
28  *
29  * bind           Only allocate memory on a specific set of nodes,
30  *                no fallback.
31  *                FIXME: memory is allocated starting with the first node
32  *                to the last. It would be better if bind would truly restrict
33  *                the allocation to memory nodes instead
34  *
35  * preferred      Try a specific node first before normal fallback.
36  *                As a special case NUMA_NO_NODE here means do the allocation
37  *                on the local CPU. This is normally identical to default,
38  *                but useful to set in a VMA when you have a non default
39  *                process policy.
40  *
41  * preferred many Try a set of nodes first before normal fallback. This is
42  *                similar to preferred without the special case.
43  *
44  * default        Allocate on the local node first, or when on a VMA
45  *                use the process policy. This is what Linux always did
46  *		  in a NUMA aware kernel and still does by, ahem, default.
47  *
48  * The process policy is applied for most non interrupt memory allocations
49  * in that process' context. Interrupts ignore the policies and always
50  * try to allocate on the local CPU. The VMA policy is only applied for memory
51  * allocations for a VMA in the VM.
52  *
53  * Currently there are a few corner cases in swapping where the policy
54  * is not applied, but the majority should be handled. When process policy
55  * is used it is not remembered over swap outs/swap ins.
56  *
57  * Only the highest zone in the zone hierarchy gets policied. Allocations
58  * requesting a lower zone just use default policy. This implies that
59  * on systems with highmem kernel lowmem allocation don't get policied.
60  * Same with GFP_DMA allocations.
61  *
62  * For shmem/tmpfs shared memory the policy is shared between
63  * all users and remembered even when nobody has memory mapped.
64  */
65 
66 /* Notebook:
67    fix mmap readahead to honour policy and enable policy for any page cache
68    object
69    statistics for bigpages
70    global policy for page cache? currently it uses process policy. Requires
71    first item above.
72    handle mremap for shared memory (currently ignored for the policy)
73    grows down?
74    make bind policy root only? It can trigger oom much faster and the
75    kernel is not always grateful with that.
76 */
77 
78 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
79 
80 #include <linux/mempolicy.h>
81 #include <linux/pagewalk.h>
82 #include <linux/highmem.h>
83 #include <linux/hugetlb.h>
84 #include <linux/kernel.h>
85 #include <linux/sched.h>
86 #include <linux/sched/mm.h>
87 #include <linux/sched/numa_balancing.h>
88 #include <linux/sched/task.h>
89 #include <linux/nodemask.h>
90 #include <linux/cpuset.h>
91 #include <linux/slab.h>
92 #include <linux/string.h>
93 #include <linux/export.h>
94 #include <linux/nsproxy.h>
95 #include <linux/interrupt.h>
96 #include <linux/init.h>
97 #include <linux/compat.h>
98 #include <linux/ptrace.h>
99 #include <linux/swap.h>
100 #include <linux/seq_file.h>
101 #include <linux/proc_fs.h>
102 #include <linux/migrate.h>
103 #include <linux/ksm.h>
104 #include <linux/rmap.h>
105 #include <linux/security.h>
106 #include <linux/syscalls.h>
107 #include <linux/ctype.h>
108 #include <linux/mm_inline.h>
109 #include <linux/mmu_notifier.h>
110 #include <linux/printk.h>
111 #include <linux/swapops.h>
112 
113 #include <asm/tlbflush.h>
114 #include <asm/tlb.h>
115 #include <linux/uaccess.h>
116 
117 #include "internal.h"
118 
119 /* Internal flags */
120 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
121 #define MPOL_MF_INVERT       (MPOL_MF_INTERNAL << 1)	/* Invert check for nodemask */
122 #define MPOL_MF_WRLOCK       (MPOL_MF_INTERNAL << 2)	/* Write-lock walked vmas */
123 
124 static struct kmem_cache *policy_cache;
125 static struct kmem_cache *sn_cache;
126 
127 /* Highest zone. An specific allocation for a zone below that is not
128    policied. */
129 enum zone_type policy_zone = 0;
130 
131 /*
132  * run-time system-wide default policy => local allocation
133  */
134 static struct mempolicy default_policy = {
135 	.refcnt = ATOMIC_INIT(1), /* never free it */
136 	.mode = MPOL_LOCAL,
137 };
138 
139 static struct mempolicy preferred_node_policy[MAX_NUMNODES];
140 
141 /*
142  * iw_table is the sysfs-set interleave weight table, a value of 0 denotes
143  * system-default value should be used. A NULL iw_table also denotes that
144  * system-default values should be used. Until the system-default table
145  * is implemented, the system-default is always 1.
146  *
147  * iw_table is RCU protected
148  */
149 static u8 __rcu *iw_table;
150 static DEFINE_MUTEX(iw_table_lock);
151 
152 static u8 get_il_weight(int node)
153 {
154 	u8 *table;
155 	u8 weight;
156 
157 	rcu_read_lock();
158 	table = rcu_dereference(iw_table);
159 	/* if no iw_table, use system default */
160 	weight = table ? table[node] : 1;
161 	/* if value in iw_table is 0, use system default */
162 	weight = weight ? weight : 1;
163 	rcu_read_unlock();
164 	return weight;
165 }
166 
167 /**
168  * numa_nearest_node - Find nearest node by state
169  * @node: Node id to start the search
170  * @state: State to filter the search
171  *
172  * Lookup the closest node by distance if @nid is not in state.
173  *
174  * Return: this @node if it is in state, otherwise the closest node by distance
175  */
176 int numa_nearest_node(int node, unsigned int state)
177 {
178 	int min_dist = INT_MAX, dist, n, min_node;
179 
180 	if (state >= NR_NODE_STATES)
181 		return -EINVAL;
182 
183 	if (node == NUMA_NO_NODE || node_state(node, state))
184 		return node;
185 
186 	min_node = node;
187 	for_each_node_state(n, state) {
188 		dist = node_distance(node, n);
189 		if (dist < min_dist) {
190 			min_dist = dist;
191 			min_node = n;
192 		}
193 	}
194 
195 	return min_node;
196 }
197 EXPORT_SYMBOL_GPL(numa_nearest_node);
198 
199 struct mempolicy *get_task_policy(struct task_struct *p)
200 {
201 	struct mempolicy *pol = p->mempolicy;
202 	int node;
203 
204 	if (pol)
205 		return pol;
206 
207 	node = numa_node_id();
208 	if (node != NUMA_NO_NODE) {
209 		pol = &preferred_node_policy[node];
210 		/* preferred_node_policy is not initialised early in boot */
211 		if (pol->mode)
212 			return pol;
213 	}
214 
215 	return &default_policy;
216 }
217 
218 static const struct mempolicy_operations {
219 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
220 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
221 } mpol_ops[MPOL_MAX];
222 
223 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
224 {
225 	return pol->flags & MPOL_MODE_FLAGS;
226 }
227 
228 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
229 				   const nodemask_t *rel)
230 {
231 	nodemask_t tmp;
232 	nodes_fold(tmp, *orig, nodes_weight(*rel));
233 	nodes_onto(*ret, tmp, *rel);
234 }
235 
236 static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
237 {
238 	if (nodes_empty(*nodes))
239 		return -EINVAL;
240 	pol->nodes = *nodes;
241 	return 0;
242 }
243 
244 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
245 {
246 	if (nodes_empty(*nodes))
247 		return -EINVAL;
248 
249 	nodes_clear(pol->nodes);
250 	node_set(first_node(*nodes), pol->nodes);
251 	return 0;
252 }
253 
254 /*
255  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
256  * any, for the new policy.  mpol_new() has already validated the nodes
257  * parameter with respect to the policy mode and flags.
258  *
259  * Must be called holding task's alloc_lock to protect task's mems_allowed
260  * and mempolicy.  May also be called holding the mmap_lock for write.
261  */
262 static int mpol_set_nodemask(struct mempolicy *pol,
263 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
264 {
265 	int ret;
266 
267 	/*
268 	 * Default (pol==NULL) resp. local memory policies are not a
269 	 * subject of any remapping. They also do not need any special
270 	 * constructor.
271 	 */
272 	if (!pol || pol->mode == MPOL_LOCAL)
273 		return 0;
274 
275 	/* Check N_MEMORY */
276 	nodes_and(nsc->mask1,
277 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
278 
279 	VM_BUG_ON(!nodes);
280 
281 	if (pol->flags & MPOL_F_RELATIVE_NODES)
282 		mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
283 	else
284 		nodes_and(nsc->mask2, *nodes, nsc->mask1);
285 
286 	if (mpol_store_user_nodemask(pol))
287 		pol->w.user_nodemask = *nodes;
288 	else
289 		pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
290 
291 	ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
292 	return ret;
293 }
294 
295 /*
296  * This function just creates a new policy, does some check and simple
297  * initialization. You must invoke mpol_set_nodemask() to set nodes.
298  */
299 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
300 				  nodemask_t *nodes)
301 {
302 	struct mempolicy *policy;
303 
304 	if (mode == MPOL_DEFAULT) {
305 		if (nodes && !nodes_empty(*nodes))
306 			return ERR_PTR(-EINVAL);
307 		return NULL;
308 	}
309 	VM_BUG_ON(!nodes);
310 
311 	/*
312 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
313 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
314 	 * All other modes require a valid pointer to a non-empty nodemask.
315 	 */
316 	if (mode == MPOL_PREFERRED) {
317 		if (nodes_empty(*nodes)) {
318 			if (((flags & MPOL_F_STATIC_NODES) ||
319 			     (flags & MPOL_F_RELATIVE_NODES)))
320 				return ERR_PTR(-EINVAL);
321 
322 			mode = MPOL_LOCAL;
323 		}
324 	} else if (mode == MPOL_LOCAL) {
325 		if (!nodes_empty(*nodes) ||
326 		    (flags & MPOL_F_STATIC_NODES) ||
327 		    (flags & MPOL_F_RELATIVE_NODES))
328 			return ERR_PTR(-EINVAL);
329 	} else if (nodes_empty(*nodes))
330 		return ERR_PTR(-EINVAL);
331 
332 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
333 	if (!policy)
334 		return ERR_PTR(-ENOMEM);
335 	atomic_set(&policy->refcnt, 1);
336 	policy->mode = mode;
337 	policy->flags = flags;
338 	policy->home_node = NUMA_NO_NODE;
339 
340 	return policy;
341 }
342 
343 /* Slow path of a mpol destructor. */
344 void __mpol_put(struct mempolicy *pol)
345 {
346 	if (!atomic_dec_and_test(&pol->refcnt))
347 		return;
348 	kmem_cache_free(policy_cache, pol);
349 }
350 
351 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
352 {
353 }
354 
355 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
356 {
357 	nodemask_t tmp;
358 
359 	if (pol->flags & MPOL_F_STATIC_NODES)
360 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
361 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
362 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
363 	else {
364 		nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
365 								*nodes);
366 		pol->w.cpuset_mems_allowed = *nodes;
367 	}
368 
369 	if (nodes_empty(tmp))
370 		tmp = *nodes;
371 
372 	pol->nodes = tmp;
373 }
374 
375 static void mpol_rebind_preferred(struct mempolicy *pol,
376 						const nodemask_t *nodes)
377 {
378 	pol->w.cpuset_mems_allowed = *nodes;
379 }
380 
381 /*
382  * mpol_rebind_policy - Migrate a policy to a different set of nodes
383  *
384  * Per-vma policies are protected by mmap_lock. Allocations using per-task
385  * policies are protected by task->mems_allowed_seq to prevent a premature
386  * OOM/allocation failure due to parallel nodemask modification.
387  */
388 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
389 {
390 	if (!pol || pol->mode == MPOL_LOCAL)
391 		return;
392 	if (!mpol_store_user_nodemask(pol) &&
393 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
394 		return;
395 
396 	mpol_ops[pol->mode].rebind(pol, newmask);
397 }
398 
399 /*
400  * Wrapper for mpol_rebind_policy() that just requires task
401  * pointer, and updates task mempolicy.
402  *
403  * Called with task's alloc_lock held.
404  */
405 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
406 {
407 	mpol_rebind_policy(tsk->mempolicy, new);
408 }
409 
410 /*
411  * Rebind each vma in mm to new nodemask.
412  *
413  * Call holding a reference to mm.  Takes mm->mmap_lock during call.
414  */
415 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
416 {
417 	struct vm_area_struct *vma;
418 	VMA_ITERATOR(vmi, mm, 0);
419 
420 	mmap_write_lock(mm);
421 	for_each_vma(vmi, vma) {
422 		vma_start_write(vma);
423 		mpol_rebind_policy(vma->vm_policy, new);
424 	}
425 	mmap_write_unlock(mm);
426 }
427 
428 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
429 	[MPOL_DEFAULT] = {
430 		.rebind = mpol_rebind_default,
431 	},
432 	[MPOL_INTERLEAVE] = {
433 		.create = mpol_new_nodemask,
434 		.rebind = mpol_rebind_nodemask,
435 	},
436 	[MPOL_PREFERRED] = {
437 		.create = mpol_new_preferred,
438 		.rebind = mpol_rebind_preferred,
439 	},
440 	[MPOL_BIND] = {
441 		.create = mpol_new_nodemask,
442 		.rebind = mpol_rebind_nodemask,
443 	},
444 	[MPOL_LOCAL] = {
445 		.rebind = mpol_rebind_default,
446 	},
447 	[MPOL_PREFERRED_MANY] = {
448 		.create = mpol_new_nodemask,
449 		.rebind = mpol_rebind_preferred,
450 	},
451 	[MPOL_WEIGHTED_INTERLEAVE] = {
452 		.create = mpol_new_nodemask,
453 		.rebind = mpol_rebind_nodemask,
454 	},
455 };
456 
457 static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
458 				unsigned long flags);
459 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *pol,
460 				pgoff_t ilx, int *nid);
461 
462 static bool strictly_unmovable(unsigned long flags)
463 {
464 	/*
465 	 * STRICT without MOVE flags lets do_mbind() fail immediately with -EIO
466 	 * if any misplaced page is found.
467 	 */
468 	return (flags & (MPOL_MF_STRICT | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ==
469 			 MPOL_MF_STRICT;
470 }
471 
472 struct migration_mpol {		/* for alloc_migration_target_by_mpol() */
473 	struct mempolicy *pol;
474 	pgoff_t ilx;
475 };
476 
477 struct queue_pages {
478 	struct list_head *pagelist;
479 	unsigned long flags;
480 	nodemask_t *nmask;
481 	unsigned long start;
482 	unsigned long end;
483 	struct vm_area_struct *first;
484 	struct folio *large;		/* note last large folio encountered */
485 	long nr_failed;			/* could not be isolated at this time */
486 };
487 
488 /*
489  * Check if the folio's nid is in qp->nmask.
490  *
491  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
492  * in the invert of qp->nmask.
493  */
494 static inline bool queue_folio_required(struct folio *folio,
495 					struct queue_pages *qp)
496 {
497 	int nid = folio_nid(folio);
498 	unsigned long flags = qp->flags;
499 
500 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
501 }
502 
503 static void queue_folios_pmd(pmd_t *pmd, struct mm_walk *walk)
504 {
505 	struct folio *folio;
506 	struct queue_pages *qp = walk->private;
507 
508 	if (unlikely(is_pmd_migration_entry(*pmd))) {
509 		qp->nr_failed++;
510 		return;
511 	}
512 	folio = pfn_folio(pmd_pfn(*pmd));
513 	if (is_huge_zero_page(&folio->page)) {
514 		walk->action = ACTION_CONTINUE;
515 		return;
516 	}
517 	if (!queue_folio_required(folio, qp))
518 		return;
519 	if (!(qp->flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
520 	    !vma_migratable(walk->vma) ||
521 	    !migrate_folio_add(folio, qp->pagelist, qp->flags))
522 		qp->nr_failed++;
523 }
524 
525 /*
526  * Scan through folios, checking if they satisfy the required conditions,
527  * moving them from LRU to local pagelist for migration if they do (or not).
528  *
529  * queue_folios_pte_range() has two possible return values:
530  * 0 - continue walking to scan for more, even if an existing folio on the
531  *     wrong node could not be isolated and queued for migration.
532  * -EIO - only MPOL_MF_STRICT was specified, without MPOL_MF_MOVE or ..._ALL,
533  *        and an existing folio was on a node that does not follow the policy.
534  */
535 static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
536 			unsigned long end, struct mm_walk *walk)
537 {
538 	struct vm_area_struct *vma = walk->vma;
539 	struct folio *folio;
540 	struct queue_pages *qp = walk->private;
541 	unsigned long flags = qp->flags;
542 	pte_t *pte, *mapped_pte;
543 	pte_t ptent;
544 	spinlock_t *ptl;
545 
546 	ptl = pmd_trans_huge_lock(pmd, vma);
547 	if (ptl) {
548 		queue_folios_pmd(pmd, walk);
549 		spin_unlock(ptl);
550 		goto out;
551 	}
552 
553 	mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
554 	if (!pte) {
555 		walk->action = ACTION_AGAIN;
556 		return 0;
557 	}
558 	for (; addr != end; pte++, addr += PAGE_SIZE) {
559 		ptent = ptep_get(pte);
560 		if (pte_none(ptent))
561 			continue;
562 		if (!pte_present(ptent)) {
563 			if (is_migration_entry(pte_to_swp_entry(ptent)))
564 				qp->nr_failed++;
565 			continue;
566 		}
567 		folio = vm_normal_folio(vma, addr, ptent);
568 		if (!folio || folio_is_zone_device(folio))
569 			continue;
570 		/*
571 		 * vm_normal_folio() filters out zero pages, but there might
572 		 * still be reserved folios to skip, perhaps in a VDSO.
573 		 */
574 		if (folio_test_reserved(folio))
575 			continue;
576 		if (!queue_folio_required(folio, qp))
577 			continue;
578 		if (folio_test_large(folio)) {
579 			/*
580 			 * A large folio can only be isolated from LRU once,
581 			 * but may be mapped by many PTEs (and Copy-On-Write may
582 			 * intersperse PTEs of other, order 0, folios).  This is
583 			 * a common case, so don't mistake it for failure (but
584 			 * there can be other cases of multi-mapped pages which
585 			 * this quick check does not help to filter out - and a
586 			 * search of the pagelist might grow to be prohibitive).
587 			 *
588 			 * migrate_pages(&pagelist) returns nr_failed folios, so
589 			 * check "large" now so that queue_pages_range() returns
590 			 * a comparable nr_failed folios.  This does imply that
591 			 * if folio could not be isolated for some racy reason
592 			 * at its first PTE, later PTEs will not give it another
593 			 * chance of isolation; but keeps the accounting simple.
594 			 */
595 			if (folio == qp->large)
596 				continue;
597 			qp->large = folio;
598 		}
599 		if (!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
600 		    !vma_migratable(vma) ||
601 		    !migrate_folio_add(folio, qp->pagelist, flags)) {
602 			qp->nr_failed++;
603 			if (strictly_unmovable(flags))
604 				break;
605 		}
606 	}
607 	pte_unmap_unlock(mapped_pte, ptl);
608 	cond_resched();
609 out:
610 	if (qp->nr_failed && strictly_unmovable(flags))
611 		return -EIO;
612 	return 0;
613 }
614 
615 static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
616 			       unsigned long addr, unsigned long end,
617 			       struct mm_walk *walk)
618 {
619 #ifdef CONFIG_HUGETLB_PAGE
620 	struct queue_pages *qp = walk->private;
621 	unsigned long flags = qp->flags;
622 	struct folio *folio;
623 	spinlock_t *ptl;
624 	pte_t entry;
625 
626 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
627 	entry = huge_ptep_get(pte);
628 	if (!pte_present(entry)) {
629 		if (unlikely(is_hugetlb_entry_migration(entry)))
630 			qp->nr_failed++;
631 		goto unlock;
632 	}
633 	folio = pfn_folio(pte_pfn(entry));
634 	if (!queue_folio_required(folio, qp))
635 		goto unlock;
636 	if (!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
637 	    !vma_migratable(walk->vma)) {
638 		qp->nr_failed++;
639 		goto unlock;
640 	}
641 	/*
642 	 * Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio.
643 	 * Choosing not to migrate a shared folio is not counted as a failure.
644 	 *
645 	 * To check if the folio is shared, ideally we want to make sure
646 	 * every page is mapped to the same process. Doing that is very
647 	 * expensive, so check the estimated sharers of the folio instead.
648 	 */
649 	if ((flags & MPOL_MF_MOVE_ALL) ||
650 	    (folio_estimated_sharers(folio) == 1 && !hugetlb_pmd_shared(pte)))
651 		if (!isolate_hugetlb(folio, qp->pagelist))
652 			qp->nr_failed++;
653 unlock:
654 	spin_unlock(ptl);
655 	if (qp->nr_failed && strictly_unmovable(flags))
656 		return -EIO;
657 #endif
658 	return 0;
659 }
660 
661 #ifdef CONFIG_NUMA_BALANCING
662 /*
663  * This is used to mark a range of virtual addresses to be inaccessible.
664  * These are later cleared by a NUMA hinting fault. Depending on these
665  * faults, pages may be migrated for better NUMA placement.
666  *
667  * This is assuming that NUMA faults are handled using PROT_NONE. If
668  * an architecture makes a different choice, it will need further
669  * changes to the core.
670  */
671 unsigned long change_prot_numa(struct vm_area_struct *vma,
672 			unsigned long addr, unsigned long end)
673 {
674 	struct mmu_gather tlb;
675 	long nr_updated;
676 
677 	tlb_gather_mmu(&tlb, vma->vm_mm);
678 
679 	nr_updated = change_protection(&tlb, vma, addr, end, MM_CP_PROT_NUMA);
680 	if (nr_updated > 0)
681 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
682 
683 	tlb_finish_mmu(&tlb);
684 
685 	return nr_updated;
686 }
687 #endif /* CONFIG_NUMA_BALANCING */
688 
689 static int queue_pages_test_walk(unsigned long start, unsigned long end,
690 				struct mm_walk *walk)
691 {
692 	struct vm_area_struct *next, *vma = walk->vma;
693 	struct queue_pages *qp = walk->private;
694 	unsigned long flags = qp->flags;
695 
696 	/* range check first */
697 	VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
698 
699 	if (!qp->first) {
700 		qp->first = vma;
701 		if (!(flags & MPOL_MF_DISCONTIG_OK) &&
702 			(qp->start < vma->vm_start))
703 			/* hole at head side of range */
704 			return -EFAULT;
705 	}
706 	next = find_vma(vma->vm_mm, vma->vm_end);
707 	if (!(flags & MPOL_MF_DISCONTIG_OK) &&
708 		((vma->vm_end < qp->end) &&
709 		(!next || vma->vm_end < next->vm_start)))
710 		/* hole at middle or tail of range */
711 		return -EFAULT;
712 
713 	/*
714 	 * Need check MPOL_MF_STRICT to return -EIO if possible
715 	 * regardless of vma_migratable
716 	 */
717 	if (!vma_migratable(vma) &&
718 	    !(flags & MPOL_MF_STRICT))
719 		return 1;
720 
721 	/*
722 	 * Check page nodes, and queue pages to move, in the current vma.
723 	 * But if no moving, and no strict checking, the scan can be skipped.
724 	 */
725 	if (flags & (MPOL_MF_STRICT | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
726 		return 0;
727 	return 1;
728 }
729 
730 static const struct mm_walk_ops queue_pages_walk_ops = {
731 	.hugetlb_entry		= queue_folios_hugetlb,
732 	.pmd_entry		= queue_folios_pte_range,
733 	.test_walk		= queue_pages_test_walk,
734 	.walk_lock		= PGWALK_RDLOCK,
735 };
736 
737 static const struct mm_walk_ops queue_pages_lock_vma_walk_ops = {
738 	.hugetlb_entry		= queue_folios_hugetlb,
739 	.pmd_entry		= queue_folios_pte_range,
740 	.test_walk		= queue_pages_test_walk,
741 	.walk_lock		= PGWALK_WRLOCK,
742 };
743 
744 /*
745  * Walk through page tables and collect pages to be migrated.
746  *
747  * If pages found in a given range are not on the required set of @nodes,
748  * and migration is allowed, they are isolated and queued to @pagelist.
749  *
750  * queue_pages_range() may return:
751  * 0 - all pages already on the right node, or successfully queued for moving
752  *     (or neither strict checking nor moving requested: only range checking).
753  * >0 - this number of misplaced folios could not be queued for moving
754  *      (a hugetlbfs page or a transparent huge page being counted as 1).
755  * -EIO - a misplaced page found, when MPOL_MF_STRICT specified without MOVEs.
756  * -EFAULT - a hole in the memory range, when MPOL_MF_DISCONTIG_OK unspecified.
757  */
758 static long
759 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
760 		nodemask_t *nodes, unsigned long flags,
761 		struct list_head *pagelist)
762 {
763 	int err;
764 	struct queue_pages qp = {
765 		.pagelist = pagelist,
766 		.flags = flags,
767 		.nmask = nodes,
768 		.start = start,
769 		.end = end,
770 		.first = NULL,
771 	};
772 	const struct mm_walk_ops *ops = (flags & MPOL_MF_WRLOCK) ?
773 			&queue_pages_lock_vma_walk_ops : &queue_pages_walk_ops;
774 
775 	err = walk_page_range(mm, start, end, ops, &qp);
776 
777 	if (!qp.first)
778 		/* whole range in hole */
779 		err = -EFAULT;
780 
781 	return err ? : qp.nr_failed;
782 }
783 
784 /*
785  * Apply policy to a single VMA
786  * This must be called with the mmap_lock held for writing.
787  */
788 static int vma_replace_policy(struct vm_area_struct *vma,
789 				struct mempolicy *pol)
790 {
791 	int err;
792 	struct mempolicy *old;
793 	struct mempolicy *new;
794 
795 	vma_assert_write_locked(vma);
796 
797 	new = mpol_dup(pol);
798 	if (IS_ERR(new))
799 		return PTR_ERR(new);
800 
801 	if (vma->vm_ops && vma->vm_ops->set_policy) {
802 		err = vma->vm_ops->set_policy(vma, new);
803 		if (err)
804 			goto err_out;
805 	}
806 
807 	old = vma->vm_policy;
808 	vma->vm_policy = new; /* protected by mmap_lock */
809 	mpol_put(old);
810 
811 	return 0;
812  err_out:
813 	mpol_put(new);
814 	return err;
815 }
816 
817 /* Split or merge the VMA (if required) and apply the new policy */
818 static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma,
819 		struct vm_area_struct **prev, unsigned long start,
820 		unsigned long end, struct mempolicy *new_pol)
821 {
822 	unsigned long vmstart, vmend;
823 
824 	vmend = min(end, vma->vm_end);
825 	if (start > vma->vm_start) {
826 		*prev = vma;
827 		vmstart = start;
828 	} else {
829 		vmstart = vma->vm_start;
830 	}
831 
832 	if (mpol_equal(vma->vm_policy, new_pol)) {
833 		*prev = vma;
834 		return 0;
835 	}
836 
837 	vma =  vma_modify_policy(vmi, *prev, vma, vmstart, vmend, new_pol);
838 	if (IS_ERR(vma))
839 		return PTR_ERR(vma);
840 
841 	*prev = vma;
842 	return vma_replace_policy(vma, new_pol);
843 }
844 
845 /* Set the process memory policy */
846 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
847 			     nodemask_t *nodes)
848 {
849 	struct mempolicy *new, *old;
850 	NODEMASK_SCRATCH(scratch);
851 	int ret;
852 
853 	if (!scratch)
854 		return -ENOMEM;
855 
856 	new = mpol_new(mode, flags, nodes);
857 	if (IS_ERR(new)) {
858 		ret = PTR_ERR(new);
859 		goto out;
860 	}
861 
862 	task_lock(current);
863 	ret = mpol_set_nodemask(new, nodes, scratch);
864 	if (ret) {
865 		task_unlock(current);
866 		mpol_put(new);
867 		goto out;
868 	}
869 
870 	old = current->mempolicy;
871 	current->mempolicy = new;
872 	if (new && (new->mode == MPOL_INTERLEAVE ||
873 		    new->mode == MPOL_WEIGHTED_INTERLEAVE)) {
874 		current->il_prev = MAX_NUMNODES-1;
875 		current->il_weight = 0;
876 	}
877 	task_unlock(current);
878 	mpol_put(old);
879 	ret = 0;
880 out:
881 	NODEMASK_SCRATCH_FREE(scratch);
882 	return ret;
883 }
884 
885 /*
886  * Return nodemask for policy for get_mempolicy() query
887  *
888  * Called with task's alloc_lock held
889  */
890 static void get_policy_nodemask(struct mempolicy *pol, nodemask_t *nodes)
891 {
892 	nodes_clear(*nodes);
893 	if (pol == &default_policy)
894 		return;
895 
896 	switch (pol->mode) {
897 	case MPOL_BIND:
898 	case MPOL_INTERLEAVE:
899 	case MPOL_PREFERRED:
900 	case MPOL_PREFERRED_MANY:
901 	case MPOL_WEIGHTED_INTERLEAVE:
902 		*nodes = pol->nodes;
903 		break;
904 	case MPOL_LOCAL:
905 		/* return empty node mask for local allocation */
906 		break;
907 	default:
908 		BUG();
909 	}
910 }
911 
912 static int lookup_node(struct mm_struct *mm, unsigned long addr)
913 {
914 	struct page *p = NULL;
915 	int ret;
916 
917 	ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p);
918 	if (ret > 0) {
919 		ret = page_to_nid(p);
920 		put_page(p);
921 	}
922 	return ret;
923 }
924 
925 /* Retrieve NUMA policy */
926 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
927 			     unsigned long addr, unsigned long flags)
928 {
929 	int err;
930 	struct mm_struct *mm = current->mm;
931 	struct vm_area_struct *vma = NULL;
932 	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
933 
934 	if (flags &
935 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
936 		return -EINVAL;
937 
938 	if (flags & MPOL_F_MEMS_ALLOWED) {
939 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
940 			return -EINVAL;
941 		*policy = 0;	/* just so it's initialized */
942 		task_lock(current);
943 		*nmask  = cpuset_current_mems_allowed;
944 		task_unlock(current);
945 		return 0;
946 	}
947 
948 	if (flags & MPOL_F_ADDR) {
949 		pgoff_t ilx;		/* ignored here */
950 		/*
951 		 * Do NOT fall back to task policy if the
952 		 * vma/shared policy at addr is NULL.  We
953 		 * want to return MPOL_DEFAULT in this case.
954 		 */
955 		mmap_read_lock(mm);
956 		vma = vma_lookup(mm, addr);
957 		if (!vma) {
958 			mmap_read_unlock(mm);
959 			return -EFAULT;
960 		}
961 		pol = __get_vma_policy(vma, addr, &ilx);
962 	} else if (addr)
963 		return -EINVAL;
964 
965 	if (!pol)
966 		pol = &default_policy;	/* indicates default behavior */
967 
968 	if (flags & MPOL_F_NODE) {
969 		if (flags & MPOL_F_ADDR) {
970 			/*
971 			 * Take a refcount on the mpol, because we are about to
972 			 * drop the mmap_lock, after which only "pol" remains
973 			 * valid, "vma" is stale.
974 			 */
975 			pol_refcount = pol;
976 			vma = NULL;
977 			mpol_get(pol);
978 			mmap_read_unlock(mm);
979 			err = lookup_node(mm, addr);
980 			if (err < 0)
981 				goto out;
982 			*policy = err;
983 		} else if (pol == current->mempolicy &&
984 				pol->mode == MPOL_INTERLEAVE) {
985 			*policy = next_node_in(current->il_prev, pol->nodes);
986 		} else if (pol == current->mempolicy &&
987 				pol->mode == MPOL_WEIGHTED_INTERLEAVE) {
988 			if (current->il_weight)
989 				*policy = current->il_prev;
990 			else
991 				*policy = next_node_in(current->il_prev,
992 						       pol->nodes);
993 		} else {
994 			err = -EINVAL;
995 			goto out;
996 		}
997 	} else {
998 		*policy = pol == &default_policy ? MPOL_DEFAULT :
999 						pol->mode;
1000 		/*
1001 		 * Internal mempolicy flags must be masked off before exposing
1002 		 * the policy to userspace.
1003 		 */
1004 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
1005 	}
1006 
1007 	err = 0;
1008 	if (nmask) {
1009 		if (mpol_store_user_nodemask(pol)) {
1010 			*nmask = pol->w.user_nodemask;
1011 		} else {
1012 			task_lock(current);
1013 			get_policy_nodemask(pol, nmask);
1014 			task_unlock(current);
1015 		}
1016 	}
1017 
1018  out:
1019 	mpol_cond_put(pol);
1020 	if (vma)
1021 		mmap_read_unlock(mm);
1022 	if (pol_refcount)
1023 		mpol_put(pol_refcount);
1024 	return err;
1025 }
1026 
1027 #ifdef CONFIG_MIGRATION
1028 static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
1029 				unsigned long flags)
1030 {
1031 	/*
1032 	 * Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio.
1033 	 * Choosing not to migrate a shared folio is not counted as a failure.
1034 	 *
1035 	 * To check if the folio is shared, ideally we want to make sure
1036 	 * every page is mapped to the same process. Doing that is very
1037 	 * expensive, so check the estimated sharers of the folio instead.
1038 	 */
1039 	if ((flags & MPOL_MF_MOVE_ALL) || folio_estimated_sharers(folio) == 1) {
1040 		if (folio_isolate_lru(folio)) {
1041 			list_add_tail(&folio->lru, foliolist);
1042 			node_stat_mod_folio(folio,
1043 				NR_ISOLATED_ANON + folio_is_file_lru(folio),
1044 				folio_nr_pages(folio));
1045 		} else {
1046 			/*
1047 			 * Non-movable folio may reach here.  And, there may be
1048 			 * temporary off LRU folios or non-LRU movable folios.
1049 			 * Treat them as unmovable folios since they can't be
1050 			 * isolated, so they can't be moved at the moment.
1051 			 */
1052 			return false;
1053 		}
1054 	}
1055 	return true;
1056 }
1057 
1058 /*
1059  * Migrate pages from one node to a target node.
1060  * Returns error or the number of pages not migrated.
1061  */
1062 static long migrate_to_node(struct mm_struct *mm, int source, int dest,
1063 			    int flags)
1064 {
1065 	nodemask_t nmask;
1066 	struct vm_area_struct *vma;
1067 	LIST_HEAD(pagelist);
1068 	long nr_failed;
1069 	long err = 0;
1070 	struct migration_target_control mtc = {
1071 		.nid = dest,
1072 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1073 	};
1074 
1075 	nodes_clear(nmask);
1076 	node_set(source, nmask);
1077 
1078 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1079 
1080 	mmap_read_lock(mm);
1081 	vma = find_vma(mm, 0);
1082 
1083 	/*
1084 	 * This does not migrate the range, but isolates all pages that
1085 	 * need migration.  Between passing in the full user address
1086 	 * space range and MPOL_MF_DISCONTIG_OK, this call cannot fail,
1087 	 * but passes back the count of pages which could not be isolated.
1088 	 */
1089 	nr_failed = queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask,
1090 				      flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1091 	mmap_read_unlock(mm);
1092 
1093 	if (!list_empty(&pagelist)) {
1094 		err = migrate_pages(&pagelist, alloc_migration_target, NULL,
1095 			(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1096 		if (err)
1097 			putback_movable_pages(&pagelist);
1098 	}
1099 
1100 	if (err >= 0)
1101 		err += nr_failed;
1102 	return err;
1103 }
1104 
1105 /*
1106  * Move pages between the two nodesets so as to preserve the physical
1107  * layout as much as possible.
1108  *
1109  * Returns the number of page that could not be moved.
1110  */
1111 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1112 		     const nodemask_t *to, int flags)
1113 {
1114 	long nr_failed = 0;
1115 	long err = 0;
1116 	nodemask_t tmp;
1117 
1118 	lru_cache_disable();
1119 
1120 	/*
1121 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1122 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
1123 	 * bit in 'tmp', and return that <source, dest> pair for migration.
1124 	 * The pair of nodemasks 'to' and 'from' define the map.
1125 	 *
1126 	 * If no pair of bits is found that way, fallback to picking some
1127 	 * pair of 'source' and 'dest' bits that are not the same.  If the
1128 	 * 'source' and 'dest' bits are the same, this represents a node
1129 	 * that will be migrating to itself, so no pages need move.
1130 	 *
1131 	 * If no bits are left in 'tmp', or if all remaining bits left
1132 	 * in 'tmp' correspond to the same bit in 'to', return false
1133 	 * (nothing left to migrate).
1134 	 *
1135 	 * This lets us pick a pair of nodes to migrate between, such that
1136 	 * if possible the dest node is not already occupied by some other
1137 	 * source node, minimizing the risk of overloading the memory on a
1138 	 * node that would happen if we migrated incoming memory to a node
1139 	 * before migrating outgoing memory source that same node.
1140 	 *
1141 	 * A single scan of tmp is sufficient.  As we go, we remember the
1142 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
1143 	 * that not only moved, but what's better, moved to an empty slot
1144 	 * (d is not set in tmp), then we break out then, with that pair.
1145 	 * Otherwise when we finish scanning from_tmp, we at least have the
1146 	 * most recent <s, d> pair that moved.  If we get all the way through
1147 	 * the scan of tmp without finding any node that moved, much less
1148 	 * moved to an empty node, then there is nothing left worth migrating.
1149 	 */
1150 
1151 	tmp = *from;
1152 	while (!nodes_empty(tmp)) {
1153 		int s, d;
1154 		int source = NUMA_NO_NODE;
1155 		int dest = 0;
1156 
1157 		for_each_node_mask(s, tmp) {
1158 
1159 			/*
1160 			 * do_migrate_pages() tries to maintain the relative
1161 			 * node relationship of the pages established between
1162 			 * threads and memory areas.
1163                          *
1164 			 * However if the number of source nodes is not equal to
1165 			 * the number of destination nodes we can not preserve
1166 			 * this node relative relationship.  In that case, skip
1167 			 * copying memory from a node that is in the destination
1168 			 * mask.
1169 			 *
1170 			 * Example: [2,3,4] -> [3,4,5] moves everything.
1171 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1172 			 */
1173 
1174 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
1175 						(node_isset(s, *to)))
1176 				continue;
1177 
1178 			d = node_remap(s, *from, *to);
1179 			if (s == d)
1180 				continue;
1181 
1182 			source = s;	/* Node moved. Memorize */
1183 			dest = d;
1184 
1185 			/* dest not in remaining from nodes? */
1186 			if (!node_isset(dest, tmp))
1187 				break;
1188 		}
1189 		if (source == NUMA_NO_NODE)
1190 			break;
1191 
1192 		node_clear(source, tmp);
1193 		err = migrate_to_node(mm, source, dest, flags);
1194 		if (err > 0)
1195 			nr_failed += err;
1196 		if (err < 0)
1197 			break;
1198 	}
1199 
1200 	lru_cache_enable();
1201 	if (err < 0)
1202 		return err;
1203 	return (nr_failed < INT_MAX) ? nr_failed : INT_MAX;
1204 }
1205 
1206 /*
1207  * Allocate a new folio for page migration, according to NUMA mempolicy.
1208  */
1209 static struct folio *alloc_migration_target_by_mpol(struct folio *src,
1210 						    unsigned long private)
1211 {
1212 	struct migration_mpol *mmpol = (struct migration_mpol *)private;
1213 	struct mempolicy *pol = mmpol->pol;
1214 	pgoff_t ilx = mmpol->ilx;
1215 	struct page *page;
1216 	unsigned int order;
1217 	int nid = numa_node_id();
1218 	gfp_t gfp;
1219 
1220 	order = folio_order(src);
1221 	ilx += src->index >> order;
1222 
1223 	if (folio_test_hugetlb(src)) {
1224 		nodemask_t *nodemask;
1225 		struct hstate *h;
1226 
1227 		h = folio_hstate(src);
1228 		gfp = htlb_alloc_mask(h);
1229 		nodemask = policy_nodemask(gfp, pol, ilx, &nid);
1230 		return alloc_hugetlb_folio_nodemask(h, nid, nodemask, gfp);
1231 	}
1232 
1233 	if (folio_test_large(src))
1234 		gfp = GFP_TRANSHUGE;
1235 	else
1236 		gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL | __GFP_COMP;
1237 
1238 	page = alloc_pages_mpol(gfp, order, pol, ilx, nid);
1239 	return page_rmappable_folio(page);
1240 }
1241 #else
1242 
1243 static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
1244 				unsigned long flags)
1245 {
1246 	return false;
1247 }
1248 
1249 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1250 		     const nodemask_t *to, int flags)
1251 {
1252 	return -ENOSYS;
1253 }
1254 
1255 static struct folio *alloc_migration_target_by_mpol(struct folio *src,
1256 						    unsigned long private)
1257 {
1258 	return NULL;
1259 }
1260 #endif
1261 
1262 static long do_mbind(unsigned long start, unsigned long len,
1263 		     unsigned short mode, unsigned short mode_flags,
1264 		     nodemask_t *nmask, unsigned long flags)
1265 {
1266 	struct mm_struct *mm = current->mm;
1267 	struct vm_area_struct *vma, *prev;
1268 	struct vma_iterator vmi;
1269 	struct migration_mpol mmpol;
1270 	struct mempolicy *new;
1271 	unsigned long end;
1272 	long err;
1273 	long nr_failed;
1274 	LIST_HEAD(pagelist);
1275 
1276 	if (flags & ~(unsigned long)MPOL_MF_VALID)
1277 		return -EINVAL;
1278 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1279 		return -EPERM;
1280 
1281 	if (start & ~PAGE_MASK)
1282 		return -EINVAL;
1283 
1284 	if (mode == MPOL_DEFAULT)
1285 		flags &= ~MPOL_MF_STRICT;
1286 
1287 	len = PAGE_ALIGN(len);
1288 	end = start + len;
1289 
1290 	if (end < start)
1291 		return -EINVAL;
1292 	if (end == start)
1293 		return 0;
1294 
1295 	new = mpol_new(mode, mode_flags, nmask);
1296 	if (IS_ERR(new))
1297 		return PTR_ERR(new);
1298 
1299 	/*
1300 	 * If we are using the default policy then operation
1301 	 * on discontinuous address spaces is okay after all
1302 	 */
1303 	if (!new)
1304 		flags |= MPOL_MF_DISCONTIG_OK;
1305 
1306 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1307 		lru_cache_disable();
1308 	{
1309 		NODEMASK_SCRATCH(scratch);
1310 		if (scratch) {
1311 			mmap_write_lock(mm);
1312 			err = mpol_set_nodemask(new, nmask, scratch);
1313 			if (err)
1314 				mmap_write_unlock(mm);
1315 		} else
1316 			err = -ENOMEM;
1317 		NODEMASK_SCRATCH_FREE(scratch);
1318 	}
1319 	if (err)
1320 		goto mpol_out;
1321 
1322 	/*
1323 	 * Lock the VMAs before scanning for pages to migrate,
1324 	 * to ensure we don't miss a concurrently inserted page.
1325 	 */
1326 	nr_failed = queue_pages_range(mm, start, end, nmask,
1327 			flags | MPOL_MF_INVERT | MPOL_MF_WRLOCK, &pagelist);
1328 
1329 	if (nr_failed < 0) {
1330 		err = nr_failed;
1331 		nr_failed = 0;
1332 	} else {
1333 		vma_iter_init(&vmi, mm, start);
1334 		prev = vma_prev(&vmi);
1335 		for_each_vma_range(vmi, vma, end) {
1336 			err = mbind_range(&vmi, vma, &prev, start, end, new);
1337 			if (err)
1338 				break;
1339 		}
1340 	}
1341 
1342 	if (!err && !list_empty(&pagelist)) {
1343 		/* Convert MPOL_DEFAULT's NULL to task or default policy */
1344 		if (!new) {
1345 			new = get_task_policy(current);
1346 			mpol_get(new);
1347 		}
1348 		mmpol.pol = new;
1349 		mmpol.ilx = 0;
1350 
1351 		/*
1352 		 * In the interleaved case, attempt to allocate on exactly the
1353 		 * targeted nodes, for the first VMA to be migrated; for later
1354 		 * VMAs, the nodes will still be interleaved from the targeted
1355 		 * nodemask, but one by one may be selected differently.
1356 		 */
1357 		if (new->mode == MPOL_INTERLEAVE ||
1358 		    new->mode == MPOL_WEIGHTED_INTERLEAVE) {
1359 			struct folio *folio;
1360 			unsigned int order;
1361 			unsigned long addr = -EFAULT;
1362 
1363 			list_for_each_entry(folio, &pagelist, lru) {
1364 				if (!folio_test_ksm(folio))
1365 					break;
1366 			}
1367 			if (!list_entry_is_head(folio, &pagelist, lru)) {
1368 				vma_iter_init(&vmi, mm, start);
1369 				for_each_vma_range(vmi, vma, end) {
1370 					addr = page_address_in_vma(
1371 						folio_page(folio, 0), vma);
1372 					if (addr != -EFAULT)
1373 						break;
1374 				}
1375 			}
1376 			if (addr != -EFAULT) {
1377 				order = folio_order(folio);
1378 				/* We already know the pol, but not the ilx */
1379 				mpol_cond_put(get_vma_policy(vma, addr, order,
1380 							     &mmpol.ilx));
1381 				/* Set base from which to increment by index */
1382 				mmpol.ilx -= folio->index >> order;
1383 			}
1384 		}
1385 	}
1386 
1387 	mmap_write_unlock(mm);
1388 
1389 	if (!err && !list_empty(&pagelist)) {
1390 		nr_failed |= migrate_pages(&pagelist,
1391 				alloc_migration_target_by_mpol, NULL,
1392 				(unsigned long)&mmpol, MIGRATE_SYNC,
1393 				MR_MEMPOLICY_MBIND, NULL);
1394 	}
1395 
1396 	if (nr_failed && (flags & MPOL_MF_STRICT))
1397 		err = -EIO;
1398 	if (!list_empty(&pagelist))
1399 		putback_movable_pages(&pagelist);
1400 mpol_out:
1401 	mpol_put(new);
1402 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1403 		lru_cache_enable();
1404 	return err;
1405 }
1406 
1407 /*
1408  * User space interface with variable sized bitmaps for nodelists.
1409  */
1410 static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
1411 		      unsigned long maxnode)
1412 {
1413 	unsigned long nlongs = BITS_TO_LONGS(maxnode);
1414 	int ret;
1415 
1416 	if (in_compat_syscall())
1417 		ret = compat_get_bitmap(mask,
1418 					(const compat_ulong_t __user *)nmask,
1419 					maxnode);
1420 	else
1421 		ret = copy_from_user(mask, nmask,
1422 				     nlongs * sizeof(unsigned long));
1423 
1424 	if (ret)
1425 		return -EFAULT;
1426 
1427 	if (maxnode % BITS_PER_LONG)
1428 		mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
1429 
1430 	return 0;
1431 }
1432 
1433 /* Copy a node mask from user space. */
1434 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1435 		     unsigned long maxnode)
1436 {
1437 	--maxnode;
1438 	nodes_clear(*nodes);
1439 	if (maxnode == 0 || !nmask)
1440 		return 0;
1441 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1442 		return -EINVAL;
1443 
1444 	/*
1445 	 * When the user specified more nodes than supported just check
1446 	 * if the non supported part is all zero, one word at a time,
1447 	 * starting at the end.
1448 	 */
1449 	while (maxnode > MAX_NUMNODES) {
1450 		unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
1451 		unsigned long t;
1452 
1453 		if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits))
1454 			return -EFAULT;
1455 
1456 		if (maxnode - bits >= MAX_NUMNODES) {
1457 			maxnode -= bits;
1458 		} else {
1459 			maxnode = MAX_NUMNODES;
1460 			t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1461 		}
1462 		if (t)
1463 			return -EINVAL;
1464 	}
1465 
1466 	return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
1467 }
1468 
1469 /* Copy a kernel node mask to user space */
1470 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1471 			      nodemask_t *nodes)
1472 {
1473 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1474 	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1475 	bool compat = in_compat_syscall();
1476 
1477 	if (compat)
1478 		nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
1479 
1480 	if (copy > nbytes) {
1481 		if (copy > PAGE_SIZE)
1482 			return -EINVAL;
1483 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1484 			return -EFAULT;
1485 		copy = nbytes;
1486 		maxnode = nr_node_ids;
1487 	}
1488 
1489 	if (compat)
1490 		return compat_put_bitmap((compat_ulong_t __user *)mask,
1491 					 nodes_addr(*nodes), maxnode);
1492 
1493 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1494 }
1495 
1496 /* Basic parameter sanity check used by both mbind() and set_mempolicy() */
1497 static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
1498 {
1499 	*flags = *mode & MPOL_MODE_FLAGS;
1500 	*mode &= ~MPOL_MODE_FLAGS;
1501 
1502 	if ((unsigned int)(*mode) >=  MPOL_MAX)
1503 		return -EINVAL;
1504 	if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
1505 		return -EINVAL;
1506 	if (*flags & MPOL_F_NUMA_BALANCING) {
1507 		if (*mode != MPOL_BIND)
1508 			return -EINVAL;
1509 		*flags |= (MPOL_F_MOF | MPOL_F_MORON);
1510 	}
1511 	return 0;
1512 }
1513 
1514 static long kernel_mbind(unsigned long start, unsigned long len,
1515 			 unsigned long mode, const unsigned long __user *nmask,
1516 			 unsigned long maxnode, unsigned int flags)
1517 {
1518 	unsigned short mode_flags;
1519 	nodemask_t nodes;
1520 	int lmode = mode;
1521 	int err;
1522 
1523 	start = untagged_addr(start);
1524 	err = sanitize_mpol_flags(&lmode, &mode_flags);
1525 	if (err)
1526 		return err;
1527 
1528 	err = get_nodes(&nodes, nmask, maxnode);
1529 	if (err)
1530 		return err;
1531 
1532 	return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
1533 }
1534 
1535 SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len,
1536 		unsigned long, home_node, unsigned long, flags)
1537 {
1538 	struct mm_struct *mm = current->mm;
1539 	struct vm_area_struct *vma, *prev;
1540 	struct mempolicy *new, *old;
1541 	unsigned long end;
1542 	int err = -ENOENT;
1543 	VMA_ITERATOR(vmi, mm, start);
1544 
1545 	start = untagged_addr(start);
1546 	if (start & ~PAGE_MASK)
1547 		return -EINVAL;
1548 	/*
1549 	 * flags is used for future extension if any.
1550 	 */
1551 	if (flags != 0)
1552 		return -EINVAL;
1553 
1554 	/*
1555 	 * Check home_node is online to avoid accessing uninitialized
1556 	 * NODE_DATA.
1557 	 */
1558 	if (home_node >= MAX_NUMNODES || !node_online(home_node))
1559 		return -EINVAL;
1560 
1561 	len = PAGE_ALIGN(len);
1562 	end = start + len;
1563 
1564 	if (end < start)
1565 		return -EINVAL;
1566 	if (end == start)
1567 		return 0;
1568 	mmap_write_lock(mm);
1569 	prev = vma_prev(&vmi);
1570 	for_each_vma_range(vmi, vma, end) {
1571 		/*
1572 		 * If any vma in the range got policy other than MPOL_BIND
1573 		 * or MPOL_PREFERRED_MANY we return error. We don't reset
1574 		 * the home node for vmas we already updated before.
1575 		 */
1576 		old = vma_policy(vma);
1577 		if (!old) {
1578 			prev = vma;
1579 			continue;
1580 		}
1581 		if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) {
1582 			err = -EOPNOTSUPP;
1583 			break;
1584 		}
1585 		new = mpol_dup(old);
1586 		if (IS_ERR(new)) {
1587 			err = PTR_ERR(new);
1588 			break;
1589 		}
1590 
1591 		vma_start_write(vma);
1592 		new->home_node = home_node;
1593 		err = mbind_range(&vmi, vma, &prev, start, end, new);
1594 		mpol_put(new);
1595 		if (err)
1596 			break;
1597 	}
1598 	mmap_write_unlock(mm);
1599 	return err;
1600 }
1601 
1602 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1603 		unsigned long, mode, const unsigned long __user *, nmask,
1604 		unsigned long, maxnode, unsigned int, flags)
1605 {
1606 	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1607 }
1608 
1609 /* Set the process memory policy */
1610 static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1611 				 unsigned long maxnode)
1612 {
1613 	unsigned short mode_flags;
1614 	nodemask_t nodes;
1615 	int lmode = mode;
1616 	int err;
1617 
1618 	err = sanitize_mpol_flags(&lmode, &mode_flags);
1619 	if (err)
1620 		return err;
1621 
1622 	err = get_nodes(&nodes, nmask, maxnode);
1623 	if (err)
1624 		return err;
1625 
1626 	return do_set_mempolicy(lmode, mode_flags, &nodes);
1627 }
1628 
1629 SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1630 		unsigned long, maxnode)
1631 {
1632 	return kernel_set_mempolicy(mode, nmask, maxnode);
1633 }
1634 
1635 static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1636 				const unsigned long __user *old_nodes,
1637 				const unsigned long __user *new_nodes)
1638 {
1639 	struct mm_struct *mm = NULL;
1640 	struct task_struct *task;
1641 	nodemask_t task_nodes;
1642 	int err;
1643 	nodemask_t *old;
1644 	nodemask_t *new;
1645 	NODEMASK_SCRATCH(scratch);
1646 
1647 	if (!scratch)
1648 		return -ENOMEM;
1649 
1650 	old = &scratch->mask1;
1651 	new = &scratch->mask2;
1652 
1653 	err = get_nodes(old, old_nodes, maxnode);
1654 	if (err)
1655 		goto out;
1656 
1657 	err = get_nodes(new, new_nodes, maxnode);
1658 	if (err)
1659 		goto out;
1660 
1661 	/* Find the mm_struct */
1662 	rcu_read_lock();
1663 	task = pid ? find_task_by_vpid(pid) : current;
1664 	if (!task) {
1665 		rcu_read_unlock();
1666 		err = -ESRCH;
1667 		goto out;
1668 	}
1669 	get_task_struct(task);
1670 
1671 	err = -EINVAL;
1672 
1673 	/*
1674 	 * Check if this process has the right to modify the specified process.
1675 	 * Use the regular "ptrace_may_access()" checks.
1676 	 */
1677 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1678 		rcu_read_unlock();
1679 		err = -EPERM;
1680 		goto out_put;
1681 	}
1682 	rcu_read_unlock();
1683 
1684 	task_nodes = cpuset_mems_allowed(task);
1685 	/* Is the user allowed to access the target nodes? */
1686 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1687 		err = -EPERM;
1688 		goto out_put;
1689 	}
1690 
1691 	task_nodes = cpuset_mems_allowed(current);
1692 	nodes_and(*new, *new, task_nodes);
1693 	if (nodes_empty(*new))
1694 		goto out_put;
1695 
1696 	err = security_task_movememory(task);
1697 	if (err)
1698 		goto out_put;
1699 
1700 	mm = get_task_mm(task);
1701 	put_task_struct(task);
1702 
1703 	if (!mm) {
1704 		err = -EINVAL;
1705 		goto out;
1706 	}
1707 
1708 	err = do_migrate_pages(mm, old, new,
1709 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1710 
1711 	mmput(mm);
1712 out:
1713 	NODEMASK_SCRATCH_FREE(scratch);
1714 
1715 	return err;
1716 
1717 out_put:
1718 	put_task_struct(task);
1719 	goto out;
1720 }
1721 
1722 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1723 		const unsigned long __user *, old_nodes,
1724 		const unsigned long __user *, new_nodes)
1725 {
1726 	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1727 }
1728 
1729 /* Retrieve NUMA policy */
1730 static int kernel_get_mempolicy(int __user *policy,
1731 				unsigned long __user *nmask,
1732 				unsigned long maxnode,
1733 				unsigned long addr,
1734 				unsigned long flags)
1735 {
1736 	int err;
1737 	int pval;
1738 	nodemask_t nodes;
1739 
1740 	if (nmask != NULL && maxnode < nr_node_ids)
1741 		return -EINVAL;
1742 
1743 	addr = untagged_addr(addr);
1744 
1745 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
1746 
1747 	if (err)
1748 		return err;
1749 
1750 	if (policy && put_user(pval, policy))
1751 		return -EFAULT;
1752 
1753 	if (nmask)
1754 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
1755 
1756 	return err;
1757 }
1758 
1759 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1760 		unsigned long __user *, nmask, unsigned long, maxnode,
1761 		unsigned long, addr, unsigned long, flags)
1762 {
1763 	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1764 }
1765 
1766 bool vma_migratable(struct vm_area_struct *vma)
1767 {
1768 	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1769 		return false;
1770 
1771 	/*
1772 	 * DAX device mappings require predictable access latency, so avoid
1773 	 * incurring periodic faults.
1774 	 */
1775 	if (vma_is_dax(vma))
1776 		return false;
1777 
1778 	if (is_vm_hugetlb_page(vma) &&
1779 		!hugepage_migration_supported(hstate_vma(vma)))
1780 		return false;
1781 
1782 	/*
1783 	 * Migration allocates pages in the highest zone. If we cannot
1784 	 * do so then migration (at least from node to node) is not
1785 	 * possible.
1786 	 */
1787 	if (vma->vm_file &&
1788 		gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1789 			< policy_zone)
1790 		return false;
1791 	return true;
1792 }
1793 
1794 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1795 				   unsigned long addr, pgoff_t *ilx)
1796 {
1797 	*ilx = 0;
1798 	return (vma->vm_ops && vma->vm_ops->get_policy) ?
1799 		vma->vm_ops->get_policy(vma, addr, ilx) : vma->vm_policy;
1800 }
1801 
1802 /*
1803  * get_vma_policy(@vma, @addr, @order, @ilx)
1804  * @vma: virtual memory area whose policy is sought
1805  * @addr: address in @vma for shared policy lookup
1806  * @order: 0, or appropriate huge_page_order for interleaving
1807  * @ilx: interleave index (output), for use only when MPOL_INTERLEAVE or
1808  *       MPOL_WEIGHTED_INTERLEAVE
1809  *
1810  * Returns effective policy for a VMA at specified address.
1811  * Falls back to current->mempolicy or system default policy, as necessary.
1812  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1813  * count--added by the get_policy() vm_op, as appropriate--to protect against
1814  * freeing by another task.  It is the caller's responsibility to free the
1815  * extra reference for shared policies.
1816  */
1817 struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1818 				 unsigned long addr, int order, pgoff_t *ilx)
1819 {
1820 	struct mempolicy *pol;
1821 
1822 	pol = __get_vma_policy(vma, addr, ilx);
1823 	if (!pol)
1824 		pol = get_task_policy(current);
1825 	if (pol->mode == MPOL_INTERLEAVE ||
1826 	    pol->mode == MPOL_WEIGHTED_INTERLEAVE) {
1827 		*ilx += vma->vm_pgoff >> order;
1828 		*ilx += (addr - vma->vm_start) >> (PAGE_SHIFT + order);
1829 	}
1830 	return pol;
1831 }
1832 
1833 bool vma_policy_mof(struct vm_area_struct *vma)
1834 {
1835 	struct mempolicy *pol;
1836 
1837 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1838 		bool ret = false;
1839 		pgoff_t ilx;		/* ignored here */
1840 
1841 		pol = vma->vm_ops->get_policy(vma, vma->vm_start, &ilx);
1842 		if (pol && (pol->flags & MPOL_F_MOF))
1843 			ret = true;
1844 		mpol_cond_put(pol);
1845 
1846 		return ret;
1847 	}
1848 
1849 	pol = vma->vm_policy;
1850 	if (!pol)
1851 		pol = get_task_policy(current);
1852 
1853 	return pol->flags & MPOL_F_MOF;
1854 }
1855 
1856 bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1857 {
1858 	enum zone_type dynamic_policy_zone = policy_zone;
1859 
1860 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1861 
1862 	/*
1863 	 * if policy->nodes has movable memory only,
1864 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1865 	 *
1866 	 * policy->nodes is intersect with node_states[N_MEMORY].
1867 	 * so if the following test fails, it implies
1868 	 * policy->nodes has movable memory only.
1869 	 */
1870 	if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
1871 		dynamic_policy_zone = ZONE_MOVABLE;
1872 
1873 	return zone >= dynamic_policy_zone;
1874 }
1875 
1876 static unsigned int weighted_interleave_nodes(struct mempolicy *policy)
1877 {
1878 	unsigned int node;
1879 	unsigned int cpuset_mems_cookie;
1880 
1881 retry:
1882 	/* to prevent miscount use tsk->mems_allowed_seq to detect rebind */
1883 	cpuset_mems_cookie = read_mems_allowed_begin();
1884 	node = current->il_prev;
1885 	if (!current->il_weight || !node_isset(node, policy->nodes)) {
1886 		node = next_node_in(node, policy->nodes);
1887 		if (read_mems_allowed_retry(cpuset_mems_cookie))
1888 			goto retry;
1889 		if (node == MAX_NUMNODES)
1890 			return node;
1891 		current->il_prev = node;
1892 		current->il_weight = get_il_weight(node);
1893 	}
1894 	current->il_weight--;
1895 	return node;
1896 }
1897 
1898 /* Do dynamic interleaving for a process */
1899 static unsigned int interleave_nodes(struct mempolicy *policy)
1900 {
1901 	unsigned int nid;
1902 	unsigned int cpuset_mems_cookie;
1903 
1904 	/* to prevent miscount, use tsk->mems_allowed_seq to detect rebind */
1905 	do {
1906 		cpuset_mems_cookie = read_mems_allowed_begin();
1907 		nid = next_node_in(current->il_prev, policy->nodes);
1908 	} while (read_mems_allowed_retry(cpuset_mems_cookie));
1909 
1910 	if (nid < MAX_NUMNODES)
1911 		current->il_prev = nid;
1912 	return nid;
1913 }
1914 
1915 /*
1916  * Depending on the memory policy provide a node from which to allocate the
1917  * next slab entry.
1918  */
1919 unsigned int mempolicy_slab_node(void)
1920 {
1921 	struct mempolicy *policy;
1922 	int node = numa_mem_id();
1923 
1924 	if (!in_task())
1925 		return node;
1926 
1927 	policy = current->mempolicy;
1928 	if (!policy)
1929 		return node;
1930 
1931 	switch (policy->mode) {
1932 	case MPOL_PREFERRED:
1933 		return first_node(policy->nodes);
1934 
1935 	case MPOL_INTERLEAVE:
1936 		return interleave_nodes(policy);
1937 
1938 	case MPOL_WEIGHTED_INTERLEAVE:
1939 		return weighted_interleave_nodes(policy);
1940 
1941 	case MPOL_BIND:
1942 	case MPOL_PREFERRED_MANY:
1943 	{
1944 		struct zoneref *z;
1945 
1946 		/*
1947 		 * Follow bind policy behavior and start allocation at the
1948 		 * first node.
1949 		 */
1950 		struct zonelist *zonelist;
1951 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1952 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1953 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1954 							&policy->nodes);
1955 		return z->zone ? zone_to_nid(z->zone) : node;
1956 	}
1957 	case MPOL_LOCAL:
1958 		return node;
1959 
1960 	default:
1961 		BUG();
1962 	}
1963 }
1964 
1965 static unsigned int read_once_policy_nodemask(struct mempolicy *pol,
1966 					      nodemask_t *mask)
1967 {
1968 	/*
1969 	 * barrier stabilizes the nodemask locally so that it can be iterated
1970 	 * over safely without concern for changes. Allocators validate node
1971 	 * selection does not violate mems_allowed, so this is safe.
1972 	 */
1973 	barrier();
1974 	memcpy(mask, &pol->nodes, sizeof(nodemask_t));
1975 	barrier();
1976 	return nodes_weight(*mask);
1977 }
1978 
1979 static unsigned int weighted_interleave_nid(struct mempolicy *pol, pgoff_t ilx)
1980 {
1981 	nodemask_t nodemask;
1982 	unsigned int target, nr_nodes;
1983 	u8 *table;
1984 	unsigned int weight_total = 0;
1985 	u8 weight;
1986 	int nid;
1987 
1988 	nr_nodes = read_once_policy_nodemask(pol, &nodemask);
1989 	if (!nr_nodes)
1990 		return numa_node_id();
1991 
1992 	rcu_read_lock();
1993 	table = rcu_dereference(iw_table);
1994 	/* calculate the total weight */
1995 	for_each_node_mask(nid, nodemask) {
1996 		/* detect system default usage */
1997 		weight = table ? table[nid] : 1;
1998 		weight = weight ? weight : 1;
1999 		weight_total += weight;
2000 	}
2001 
2002 	/* Calculate the node offset based on totals */
2003 	target = ilx % weight_total;
2004 	nid = first_node(nodemask);
2005 	while (target) {
2006 		/* detect system default usage */
2007 		weight = table ? table[nid] : 1;
2008 		weight = weight ? weight : 1;
2009 		if (target < weight)
2010 			break;
2011 		target -= weight;
2012 		nid = next_node_in(nid, nodemask);
2013 	}
2014 	rcu_read_unlock();
2015 	return nid;
2016 }
2017 
2018 /*
2019  * Do static interleaving for interleave index @ilx.  Returns the ilx'th
2020  * node in pol->nodes (starting from ilx=0), wrapping around if ilx
2021  * exceeds the number of present nodes.
2022  */
2023 static unsigned int interleave_nid(struct mempolicy *pol, pgoff_t ilx)
2024 {
2025 	nodemask_t nodemask;
2026 	unsigned int target, nnodes;
2027 	int i;
2028 	int nid;
2029 
2030 	nnodes = read_once_policy_nodemask(pol, &nodemask);
2031 	if (!nnodes)
2032 		return numa_node_id();
2033 	target = ilx % nnodes;
2034 	nid = first_node(nodemask);
2035 	for (i = 0; i < target; i++)
2036 		nid = next_node(nid, nodemask);
2037 	return nid;
2038 }
2039 
2040 /*
2041  * Return a nodemask representing a mempolicy for filtering nodes for
2042  * page allocation, together with preferred node id (or the input node id).
2043  */
2044 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *pol,
2045 				   pgoff_t ilx, int *nid)
2046 {
2047 	nodemask_t *nodemask = NULL;
2048 
2049 	switch (pol->mode) {
2050 	case MPOL_PREFERRED:
2051 		/* Override input node id */
2052 		*nid = first_node(pol->nodes);
2053 		break;
2054 	case MPOL_PREFERRED_MANY:
2055 		nodemask = &pol->nodes;
2056 		if (pol->home_node != NUMA_NO_NODE)
2057 			*nid = pol->home_node;
2058 		break;
2059 	case MPOL_BIND:
2060 		/* Restrict to nodemask (but not on lower zones) */
2061 		if (apply_policy_zone(pol, gfp_zone(gfp)) &&
2062 		    cpuset_nodemask_valid_mems_allowed(&pol->nodes))
2063 			nodemask = &pol->nodes;
2064 		if (pol->home_node != NUMA_NO_NODE)
2065 			*nid = pol->home_node;
2066 		/*
2067 		 * __GFP_THISNODE shouldn't even be used with the bind policy
2068 		 * because we might easily break the expectation to stay on the
2069 		 * requested node and not break the policy.
2070 		 */
2071 		WARN_ON_ONCE(gfp & __GFP_THISNODE);
2072 		break;
2073 	case MPOL_INTERLEAVE:
2074 		/* Override input node id */
2075 		*nid = (ilx == NO_INTERLEAVE_INDEX) ?
2076 			interleave_nodes(pol) : interleave_nid(pol, ilx);
2077 		break;
2078 	case MPOL_WEIGHTED_INTERLEAVE:
2079 		*nid = (ilx == NO_INTERLEAVE_INDEX) ?
2080 			weighted_interleave_nodes(pol) :
2081 			weighted_interleave_nid(pol, ilx);
2082 		break;
2083 	}
2084 
2085 	return nodemask;
2086 }
2087 
2088 #ifdef CONFIG_HUGETLBFS
2089 /*
2090  * huge_node(@vma, @addr, @gfp_flags, @mpol)
2091  * @vma: virtual memory area whose policy is sought
2092  * @addr: address in @vma for shared policy lookup and interleave policy
2093  * @gfp_flags: for requested zone
2094  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2095  * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
2096  *
2097  * Returns a nid suitable for a huge page allocation and a pointer
2098  * to the struct mempolicy for conditional unref after allocation.
2099  * If the effective policy is 'bind' or 'prefer-many', returns a pointer
2100  * to the mempolicy's @nodemask for filtering the zonelist.
2101  */
2102 int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
2103 		struct mempolicy **mpol, nodemask_t **nodemask)
2104 {
2105 	pgoff_t ilx;
2106 	int nid;
2107 
2108 	nid = numa_node_id();
2109 	*mpol = get_vma_policy(vma, addr, hstate_vma(vma)->order, &ilx);
2110 	*nodemask = policy_nodemask(gfp_flags, *mpol, ilx, &nid);
2111 	return nid;
2112 }
2113 
2114 /*
2115  * init_nodemask_of_mempolicy
2116  *
2117  * If the current task's mempolicy is "default" [NULL], return 'false'
2118  * to indicate default policy.  Otherwise, extract the policy nodemask
2119  * for 'bind' or 'interleave' policy into the argument nodemask, or
2120  * initialize the argument nodemask to contain the single node for
2121  * 'preferred' or 'local' policy and return 'true' to indicate presence
2122  * of non-default mempolicy.
2123  *
2124  * We don't bother with reference counting the mempolicy [mpol_get/put]
2125  * because the current task is examining it's own mempolicy and a task's
2126  * mempolicy is only ever changed by the task itself.
2127  *
2128  * N.B., it is the caller's responsibility to free a returned nodemask.
2129  */
2130 bool init_nodemask_of_mempolicy(nodemask_t *mask)
2131 {
2132 	struct mempolicy *mempolicy;
2133 
2134 	if (!(mask && current->mempolicy))
2135 		return false;
2136 
2137 	task_lock(current);
2138 	mempolicy = current->mempolicy;
2139 	switch (mempolicy->mode) {
2140 	case MPOL_PREFERRED:
2141 	case MPOL_PREFERRED_MANY:
2142 	case MPOL_BIND:
2143 	case MPOL_INTERLEAVE:
2144 	case MPOL_WEIGHTED_INTERLEAVE:
2145 		*mask = mempolicy->nodes;
2146 		break;
2147 
2148 	case MPOL_LOCAL:
2149 		init_nodemask_of_node(mask, numa_node_id());
2150 		break;
2151 
2152 	default:
2153 		BUG();
2154 	}
2155 	task_unlock(current);
2156 
2157 	return true;
2158 }
2159 #endif
2160 
2161 /*
2162  * mempolicy_in_oom_domain
2163  *
2164  * If tsk's mempolicy is "bind", check for intersection between mask and
2165  * the policy nodemask. Otherwise, return true for all other policies
2166  * including "interleave", as a tsk with "interleave" policy may have
2167  * memory allocated from all nodes in system.
2168  *
2169  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2170  */
2171 bool mempolicy_in_oom_domain(struct task_struct *tsk,
2172 					const nodemask_t *mask)
2173 {
2174 	struct mempolicy *mempolicy;
2175 	bool ret = true;
2176 
2177 	if (!mask)
2178 		return ret;
2179 
2180 	task_lock(tsk);
2181 	mempolicy = tsk->mempolicy;
2182 	if (mempolicy && mempolicy->mode == MPOL_BIND)
2183 		ret = nodes_intersects(mempolicy->nodes, *mask);
2184 	task_unlock(tsk);
2185 
2186 	return ret;
2187 }
2188 
2189 static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
2190 						int nid, nodemask_t *nodemask)
2191 {
2192 	struct page *page;
2193 	gfp_t preferred_gfp;
2194 
2195 	/*
2196 	 * This is a two pass approach. The first pass will only try the
2197 	 * preferred nodes but skip the direct reclaim and allow the
2198 	 * allocation to fail, while the second pass will try all the
2199 	 * nodes in system.
2200 	 */
2201 	preferred_gfp = gfp | __GFP_NOWARN;
2202 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2203 	page = __alloc_pages(preferred_gfp, order, nid, nodemask);
2204 	if (!page)
2205 		page = __alloc_pages(gfp, order, nid, NULL);
2206 
2207 	return page;
2208 }
2209 
2210 /**
2211  * alloc_pages_mpol - Allocate pages according to NUMA mempolicy.
2212  * @gfp: GFP flags.
2213  * @order: Order of the page allocation.
2214  * @pol: Pointer to the NUMA mempolicy.
2215  * @ilx: Index for interleave mempolicy (also distinguishes alloc_pages()).
2216  * @nid: Preferred node (usually numa_node_id() but @mpol may override it).
2217  *
2218  * Return: The page on success or NULL if allocation fails.
2219  */
2220 struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order,
2221 		struct mempolicy *pol, pgoff_t ilx, int nid)
2222 {
2223 	nodemask_t *nodemask;
2224 	struct page *page;
2225 
2226 	nodemask = policy_nodemask(gfp, pol, ilx, &nid);
2227 
2228 	if (pol->mode == MPOL_PREFERRED_MANY)
2229 		return alloc_pages_preferred_many(gfp, order, nid, nodemask);
2230 
2231 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
2232 	    /* filter "hugepage" allocation, unless from alloc_pages() */
2233 	    order == HPAGE_PMD_ORDER && ilx != NO_INTERLEAVE_INDEX) {
2234 		/*
2235 		 * For hugepage allocation and non-interleave policy which
2236 		 * allows the current node (or other explicitly preferred
2237 		 * node) we only try to allocate from the current/preferred
2238 		 * node and don't fall back to other nodes, as the cost of
2239 		 * remote accesses would likely offset THP benefits.
2240 		 *
2241 		 * If the policy is interleave or does not allow the current
2242 		 * node in its nodemask, we allocate the standard way.
2243 		 */
2244 		if (pol->mode != MPOL_INTERLEAVE &&
2245 		    pol->mode != MPOL_WEIGHTED_INTERLEAVE &&
2246 		    (!nodemask || node_isset(nid, *nodemask))) {
2247 			/*
2248 			 * First, try to allocate THP only on local node, but
2249 			 * don't reclaim unnecessarily, just compact.
2250 			 */
2251 			page = __alloc_pages_node(nid,
2252 				gfp | __GFP_THISNODE | __GFP_NORETRY, order);
2253 			if (page || !(gfp & __GFP_DIRECT_RECLAIM))
2254 				return page;
2255 			/*
2256 			 * If hugepage allocations are configured to always
2257 			 * synchronous compact or the vma has been madvised
2258 			 * to prefer hugepage backing, retry allowing remote
2259 			 * memory with both reclaim and compact as well.
2260 			 */
2261 		}
2262 	}
2263 
2264 	page = __alloc_pages(gfp, order, nid, nodemask);
2265 
2266 	if (unlikely(pol->mode == MPOL_INTERLEAVE) && page) {
2267 		/* skip NUMA_INTERLEAVE_HIT update if numa stats is disabled */
2268 		if (static_branch_likely(&vm_numa_stat_key) &&
2269 		    page_to_nid(page) == nid) {
2270 			preempt_disable();
2271 			__count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
2272 			preempt_enable();
2273 		}
2274 	}
2275 
2276 	return page;
2277 }
2278 
2279 /**
2280  * vma_alloc_folio - Allocate a folio for a VMA.
2281  * @gfp: GFP flags.
2282  * @order: Order of the folio.
2283  * @vma: Pointer to VMA.
2284  * @addr: Virtual address of the allocation.  Must be inside @vma.
2285  * @hugepage: Unused (was: For hugepages try only preferred node if possible).
2286  *
2287  * Allocate a folio for a specific address in @vma, using the appropriate
2288  * NUMA policy.  The caller must hold the mmap_lock of the mm_struct of the
2289  * VMA to prevent it from going away.  Should be used for all allocations
2290  * for folios that will be mapped into user space, excepting hugetlbfs, and
2291  * excepting where direct use of alloc_pages_mpol() is more appropriate.
2292  *
2293  * Return: The folio on success or NULL if allocation fails.
2294  */
2295 struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
2296 		unsigned long addr, bool hugepage)
2297 {
2298 	struct mempolicy *pol;
2299 	pgoff_t ilx;
2300 	struct page *page;
2301 
2302 	pol = get_vma_policy(vma, addr, order, &ilx);
2303 	page = alloc_pages_mpol(gfp | __GFP_COMP, order,
2304 				pol, ilx, numa_node_id());
2305 	mpol_cond_put(pol);
2306 	return page_rmappable_folio(page);
2307 }
2308 EXPORT_SYMBOL(vma_alloc_folio);
2309 
2310 /**
2311  * alloc_pages - Allocate pages.
2312  * @gfp: GFP flags.
2313  * @order: Power of two of number of pages to allocate.
2314  *
2315  * Allocate 1 << @order contiguous pages.  The physical address of the
2316  * first page is naturally aligned (eg an order-3 allocation will be aligned
2317  * to a multiple of 8 * PAGE_SIZE bytes).  The NUMA policy of the current
2318  * process is honoured when in process context.
2319  *
2320  * Context: Can be called from any context, providing the appropriate GFP
2321  * flags are used.
2322  * Return: The page on success or NULL if allocation fails.
2323  */
2324 struct page *alloc_pages(gfp_t gfp, unsigned int order)
2325 {
2326 	struct mempolicy *pol = &default_policy;
2327 
2328 	/*
2329 	 * No reference counting needed for current->mempolicy
2330 	 * nor system default_policy
2331 	 */
2332 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2333 		pol = get_task_policy(current);
2334 
2335 	return alloc_pages_mpol(gfp, order,
2336 				pol, NO_INTERLEAVE_INDEX, numa_node_id());
2337 }
2338 EXPORT_SYMBOL(alloc_pages);
2339 
2340 struct folio *folio_alloc(gfp_t gfp, unsigned int order)
2341 {
2342 	return page_rmappable_folio(alloc_pages(gfp | __GFP_COMP, order));
2343 }
2344 EXPORT_SYMBOL(folio_alloc);
2345 
2346 static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
2347 		struct mempolicy *pol, unsigned long nr_pages,
2348 		struct page **page_array)
2349 {
2350 	int nodes;
2351 	unsigned long nr_pages_per_node;
2352 	int delta;
2353 	int i;
2354 	unsigned long nr_allocated;
2355 	unsigned long total_allocated = 0;
2356 
2357 	nodes = nodes_weight(pol->nodes);
2358 	nr_pages_per_node = nr_pages / nodes;
2359 	delta = nr_pages - nodes * nr_pages_per_node;
2360 
2361 	for (i = 0; i < nodes; i++) {
2362 		if (delta) {
2363 			nr_allocated = __alloc_pages_bulk(gfp,
2364 					interleave_nodes(pol), NULL,
2365 					nr_pages_per_node + 1, NULL,
2366 					page_array);
2367 			delta--;
2368 		} else {
2369 			nr_allocated = __alloc_pages_bulk(gfp,
2370 					interleave_nodes(pol), NULL,
2371 					nr_pages_per_node, NULL, page_array);
2372 		}
2373 
2374 		page_array += nr_allocated;
2375 		total_allocated += nr_allocated;
2376 	}
2377 
2378 	return total_allocated;
2379 }
2380 
2381 static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
2382 		struct mempolicy *pol, unsigned long nr_pages,
2383 		struct page **page_array)
2384 {
2385 	struct task_struct *me = current;
2386 	unsigned int cpuset_mems_cookie;
2387 	unsigned long total_allocated = 0;
2388 	unsigned long nr_allocated = 0;
2389 	unsigned long rounds;
2390 	unsigned long node_pages, delta;
2391 	u8 *table, *weights, weight;
2392 	unsigned int weight_total = 0;
2393 	unsigned long rem_pages = nr_pages;
2394 	nodemask_t nodes;
2395 	int nnodes, node;
2396 	int resume_node = MAX_NUMNODES - 1;
2397 	u8 resume_weight = 0;
2398 	int prev_node;
2399 	int i;
2400 
2401 	if (!nr_pages)
2402 		return 0;
2403 
2404 	/* read the nodes onto the stack, retry if done during rebind */
2405 	do {
2406 		cpuset_mems_cookie = read_mems_allowed_begin();
2407 		nnodes = read_once_policy_nodemask(pol, &nodes);
2408 	} while (read_mems_allowed_retry(cpuset_mems_cookie));
2409 
2410 	/* if the nodemask has become invalid, we cannot do anything */
2411 	if (!nnodes)
2412 		return 0;
2413 
2414 	/* Continue allocating from most recent node and adjust the nr_pages */
2415 	node = me->il_prev;
2416 	weight = me->il_weight;
2417 	if (weight && node_isset(node, nodes)) {
2418 		node_pages = min(rem_pages, weight);
2419 		nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
2420 						  NULL, page_array);
2421 		page_array += nr_allocated;
2422 		total_allocated += nr_allocated;
2423 		/* if that's all the pages, no need to interleave */
2424 		if (rem_pages <= weight) {
2425 			me->il_weight -= rem_pages;
2426 			return total_allocated;
2427 		}
2428 		/* Otherwise we adjust remaining pages, continue from there */
2429 		rem_pages -= weight;
2430 	}
2431 	/* clear active weight in case of an allocation failure */
2432 	me->il_weight = 0;
2433 	prev_node = node;
2434 
2435 	/* create a local copy of node weights to operate on outside rcu */
2436 	weights = kzalloc(nr_node_ids, GFP_KERNEL);
2437 	if (!weights)
2438 		return total_allocated;
2439 
2440 	rcu_read_lock();
2441 	table = rcu_dereference(iw_table);
2442 	if (table)
2443 		memcpy(weights, table, nr_node_ids);
2444 	rcu_read_unlock();
2445 
2446 	/* calculate total, detect system default usage */
2447 	for_each_node_mask(node, nodes) {
2448 		if (!weights[node])
2449 			weights[node] = 1;
2450 		weight_total += weights[node];
2451 	}
2452 
2453 	/*
2454 	 * Calculate rounds/partial rounds to minimize __alloc_pages_bulk calls.
2455 	 * Track which node weighted interleave should resume from.
2456 	 *
2457 	 * if (rounds > 0) and (delta == 0), resume_node will always be
2458 	 * the node following prev_node and its weight.
2459 	 */
2460 	rounds = rem_pages / weight_total;
2461 	delta = rem_pages % weight_total;
2462 	resume_node = next_node_in(prev_node, nodes);
2463 	resume_weight = weights[resume_node];
2464 	for (i = 0; i < nnodes; i++) {
2465 		node = next_node_in(prev_node, nodes);
2466 		weight = weights[node];
2467 		node_pages = weight * rounds;
2468 		/* If a delta exists, add this node's portion of the delta */
2469 		if (delta > weight) {
2470 			node_pages += weight;
2471 			delta -= weight;
2472 		} else if (delta) {
2473 			/* when delta is depleted, resume from that node */
2474 			node_pages += delta;
2475 			resume_node = node;
2476 			resume_weight = weight - delta;
2477 			delta = 0;
2478 		}
2479 		/* node_pages can be 0 if an allocation fails and rounds == 0 */
2480 		if (!node_pages)
2481 			break;
2482 		nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
2483 						  NULL, page_array);
2484 		page_array += nr_allocated;
2485 		total_allocated += nr_allocated;
2486 		if (total_allocated == nr_pages)
2487 			break;
2488 		prev_node = node;
2489 	}
2490 	me->il_prev = resume_node;
2491 	me->il_weight = resume_weight;
2492 	kfree(weights);
2493 	return total_allocated;
2494 }
2495 
2496 static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
2497 		struct mempolicy *pol, unsigned long nr_pages,
2498 		struct page **page_array)
2499 {
2500 	gfp_t preferred_gfp;
2501 	unsigned long nr_allocated = 0;
2502 
2503 	preferred_gfp = gfp | __GFP_NOWARN;
2504 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2505 
2506 	nr_allocated  = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes,
2507 					   nr_pages, NULL, page_array);
2508 
2509 	if (nr_allocated < nr_pages)
2510 		nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL,
2511 				nr_pages - nr_allocated, NULL,
2512 				page_array + nr_allocated);
2513 	return nr_allocated;
2514 }
2515 
2516 /* alloc pages bulk and mempolicy should be considered at the
2517  * same time in some situation such as vmalloc.
2518  *
2519  * It can accelerate memory allocation especially interleaving
2520  * allocate memory.
2521  */
2522 unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
2523 		unsigned long nr_pages, struct page **page_array)
2524 {
2525 	struct mempolicy *pol = &default_policy;
2526 	nodemask_t *nodemask;
2527 	int nid;
2528 
2529 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2530 		pol = get_task_policy(current);
2531 
2532 	if (pol->mode == MPOL_INTERLEAVE)
2533 		return alloc_pages_bulk_array_interleave(gfp, pol,
2534 							 nr_pages, page_array);
2535 
2536 	if (pol->mode == MPOL_WEIGHTED_INTERLEAVE)
2537 		return alloc_pages_bulk_array_weighted_interleave(
2538 				  gfp, pol, nr_pages, page_array);
2539 
2540 	if (pol->mode == MPOL_PREFERRED_MANY)
2541 		return alloc_pages_bulk_array_preferred_many(gfp,
2542 				numa_node_id(), pol, nr_pages, page_array);
2543 
2544 	nid = numa_node_id();
2545 	nodemask = policy_nodemask(gfp, pol, NO_INTERLEAVE_INDEX, &nid);
2546 	return __alloc_pages_bulk(gfp, nid, nodemask,
2547 				  nr_pages, NULL, page_array);
2548 }
2549 
2550 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2551 {
2552 	struct mempolicy *pol = mpol_dup(src->vm_policy);
2553 
2554 	if (IS_ERR(pol))
2555 		return PTR_ERR(pol);
2556 	dst->vm_policy = pol;
2557 	return 0;
2558 }
2559 
2560 /*
2561  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2562  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2563  * with the mems_allowed returned by cpuset_mems_allowed().  This
2564  * keeps mempolicies cpuset relative after its cpuset moves.  See
2565  * further kernel/cpuset.c update_nodemask().
2566  *
2567  * current's mempolicy may be rebinded by the other task(the task that changes
2568  * cpuset's mems), so we needn't do rebind work for current task.
2569  */
2570 
2571 /* Slow path of a mempolicy duplicate */
2572 struct mempolicy *__mpol_dup(struct mempolicy *old)
2573 {
2574 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2575 
2576 	if (!new)
2577 		return ERR_PTR(-ENOMEM);
2578 
2579 	/* task's mempolicy is protected by alloc_lock */
2580 	if (old == current->mempolicy) {
2581 		task_lock(current);
2582 		*new = *old;
2583 		task_unlock(current);
2584 	} else
2585 		*new = *old;
2586 
2587 	if (current_cpuset_is_being_rebound()) {
2588 		nodemask_t mems = cpuset_mems_allowed(current);
2589 		mpol_rebind_policy(new, &mems);
2590 	}
2591 	atomic_set(&new->refcnt, 1);
2592 	return new;
2593 }
2594 
2595 /* Slow path of a mempolicy comparison */
2596 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2597 {
2598 	if (!a || !b)
2599 		return false;
2600 	if (a->mode != b->mode)
2601 		return false;
2602 	if (a->flags != b->flags)
2603 		return false;
2604 	if (a->home_node != b->home_node)
2605 		return false;
2606 	if (mpol_store_user_nodemask(a))
2607 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2608 			return false;
2609 
2610 	switch (a->mode) {
2611 	case MPOL_BIND:
2612 	case MPOL_INTERLEAVE:
2613 	case MPOL_PREFERRED:
2614 	case MPOL_PREFERRED_MANY:
2615 	case MPOL_WEIGHTED_INTERLEAVE:
2616 		return !!nodes_equal(a->nodes, b->nodes);
2617 	case MPOL_LOCAL:
2618 		return true;
2619 	default:
2620 		BUG();
2621 		return false;
2622 	}
2623 }
2624 
2625 /*
2626  * Shared memory backing store policy support.
2627  *
2628  * Remember policies even when nobody has shared memory mapped.
2629  * The policies are kept in Red-Black tree linked from the inode.
2630  * They are protected by the sp->lock rwlock, which should be held
2631  * for any accesses to the tree.
2632  */
2633 
2634 /*
2635  * lookup first element intersecting start-end.  Caller holds sp->lock for
2636  * reading or for writing
2637  */
2638 static struct sp_node *sp_lookup(struct shared_policy *sp,
2639 					pgoff_t start, pgoff_t end)
2640 {
2641 	struct rb_node *n = sp->root.rb_node;
2642 
2643 	while (n) {
2644 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
2645 
2646 		if (start >= p->end)
2647 			n = n->rb_right;
2648 		else if (end <= p->start)
2649 			n = n->rb_left;
2650 		else
2651 			break;
2652 	}
2653 	if (!n)
2654 		return NULL;
2655 	for (;;) {
2656 		struct sp_node *w = NULL;
2657 		struct rb_node *prev = rb_prev(n);
2658 		if (!prev)
2659 			break;
2660 		w = rb_entry(prev, struct sp_node, nd);
2661 		if (w->end <= start)
2662 			break;
2663 		n = prev;
2664 	}
2665 	return rb_entry(n, struct sp_node, nd);
2666 }
2667 
2668 /*
2669  * Insert a new shared policy into the list.  Caller holds sp->lock for
2670  * writing.
2671  */
2672 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2673 {
2674 	struct rb_node **p = &sp->root.rb_node;
2675 	struct rb_node *parent = NULL;
2676 	struct sp_node *nd;
2677 
2678 	while (*p) {
2679 		parent = *p;
2680 		nd = rb_entry(parent, struct sp_node, nd);
2681 		if (new->start < nd->start)
2682 			p = &(*p)->rb_left;
2683 		else if (new->end > nd->end)
2684 			p = &(*p)->rb_right;
2685 		else
2686 			BUG();
2687 	}
2688 	rb_link_node(&new->nd, parent, p);
2689 	rb_insert_color(&new->nd, &sp->root);
2690 }
2691 
2692 /* Find shared policy intersecting idx */
2693 struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
2694 						pgoff_t idx)
2695 {
2696 	struct mempolicy *pol = NULL;
2697 	struct sp_node *sn;
2698 
2699 	if (!sp->root.rb_node)
2700 		return NULL;
2701 	read_lock(&sp->lock);
2702 	sn = sp_lookup(sp, idx, idx+1);
2703 	if (sn) {
2704 		mpol_get(sn->policy);
2705 		pol = sn->policy;
2706 	}
2707 	read_unlock(&sp->lock);
2708 	return pol;
2709 }
2710 
2711 static void sp_free(struct sp_node *n)
2712 {
2713 	mpol_put(n->policy);
2714 	kmem_cache_free(sn_cache, n);
2715 }
2716 
2717 /**
2718  * mpol_misplaced - check whether current folio node is valid in policy
2719  *
2720  * @folio: folio to be checked
2721  * @vma: vm area where folio mapped
2722  * @addr: virtual address in @vma for shared policy lookup and interleave policy
2723  *
2724  * Lookup current policy node id for vma,addr and "compare to" folio's
2725  * node id.  Policy determination "mimics" alloc_page_vma().
2726  * Called from fault path where we know the vma and faulting address.
2727  *
2728  * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2729  * policy, or a suitable node ID to allocate a replacement folio from.
2730  */
2731 int mpol_misplaced(struct folio *folio, struct vm_area_struct *vma,
2732 		   unsigned long addr)
2733 {
2734 	struct mempolicy *pol;
2735 	pgoff_t ilx;
2736 	struct zoneref *z;
2737 	int curnid = folio_nid(folio);
2738 	int thiscpu = raw_smp_processor_id();
2739 	int thisnid = cpu_to_node(thiscpu);
2740 	int polnid = NUMA_NO_NODE;
2741 	int ret = NUMA_NO_NODE;
2742 
2743 	pol = get_vma_policy(vma, addr, folio_order(folio), &ilx);
2744 	if (!(pol->flags & MPOL_F_MOF))
2745 		goto out;
2746 
2747 	switch (pol->mode) {
2748 	case MPOL_INTERLEAVE:
2749 		polnid = interleave_nid(pol, ilx);
2750 		break;
2751 
2752 	case MPOL_WEIGHTED_INTERLEAVE:
2753 		polnid = weighted_interleave_nid(pol, ilx);
2754 		break;
2755 
2756 	case MPOL_PREFERRED:
2757 		if (node_isset(curnid, pol->nodes))
2758 			goto out;
2759 		polnid = first_node(pol->nodes);
2760 		break;
2761 
2762 	case MPOL_LOCAL:
2763 		polnid = numa_node_id();
2764 		break;
2765 
2766 	case MPOL_BIND:
2767 		/* Optimize placement among multiple nodes via NUMA balancing */
2768 		if (pol->flags & MPOL_F_MORON) {
2769 			if (node_isset(thisnid, pol->nodes))
2770 				break;
2771 			goto out;
2772 		}
2773 		fallthrough;
2774 
2775 	case MPOL_PREFERRED_MANY:
2776 		/*
2777 		 * use current page if in policy nodemask,
2778 		 * else select nearest allowed node, if any.
2779 		 * If no allowed nodes, use current [!misplaced].
2780 		 */
2781 		if (node_isset(curnid, pol->nodes))
2782 			goto out;
2783 		z = first_zones_zonelist(
2784 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2785 				gfp_zone(GFP_HIGHUSER),
2786 				&pol->nodes);
2787 		polnid = zone_to_nid(z->zone);
2788 		break;
2789 
2790 	default:
2791 		BUG();
2792 	}
2793 
2794 	/* Migrate the folio towards the node whose CPU is referencing it */
2795 	if (pol->flags & MPOL_F_MORON) {
2796 		polnid = thisnid;
2797 
2798 		if (!should_numa_migrate_memory(current, folio, curnid,
2799 						thiscpu))
2800 			goto out;
2801 	}
2802 
2803 	if (curnid != polnid)
2804 		ret = polnid;
2805 out:
2806 	mpol_cond_put(pol);
2807 
2808 	return ret;
2809 }
2810 
2811 /*
2812  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2813  * dropped after task->mempolicy is set to NULL so that any allocation done as
2814  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2815  * policy.
2816  */
2817 void mpol_put_task_policy(struct task_struct *task)
2818 {
2819 	struct mempolicy *pol;
2820 
2821 	task_lock(task);
2822 	pol = task->mempolicy;
2823 	task->mempolicy = NULL;
2824 	task_unlock(task);
2825 	mpol_put(pol);
2826 }
2827 
2828 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2829 {
2830 	rb_erase(&n->nd, &sp->root);
2831 	sp_free(n);
2832 }
2833 
2834 static void sp_node_init(struct sp_node *node, unsigned long start,
2835 			unsigned long end, struct mempolicy *pol)
2836 {
2837 	node->start = start;
2838 	node->end = end;
2839 	node->policy = pol;
2840 }
2841 
2842 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2843 				struct mempolicy *pol)
2844 {
2845 	struct sp_node *n;
2846 	struct mempolicy *newpol;
2847 
2848 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2849 	if (!n)
2850 		return NULL;
2851 
2852 	newpol = mpol_dup(pol);
2853 	if (IS_ERR(newpol)) {
2854 		kmem_cache_free(sn_cache, n);
2855 		return NULL;
2856 	}
2857 	newpol->flags |= MPOL_F_SHARED;
2858 	sp_node_init(n, start, end, newpol);
2859 
2860 	return n;
2861 }
2862 
2863 /* Replace a policy range. */
2864 static int shared_policy_replace(struct shared_policy *sp, pgoff_t start,
2865 				 pgoff_t end, struct sp_node *new)
2866 {
2867 	struct sp_node *n;
2868 	struct sp_node *n_new = NULL;
2869 	struct mempolicy *mpol_new = NULL;
2870 	int ret = 0;
2871 
2872 restart:
2873 	write_lock(&sp->lock);
2874 	n = sp_lookup(sp, start, end);
2875 	/* Take care of old policies in the same range. */
2876 	while (n && n->start < end) {
2877 		struct rb_node *next = rb_next(&n->nd);
2878 		if (n->start >= start) {
2879 			if (n->end <= end)
2880 				sp_delete(sp, n);
2881 			else
2882 				n->start = end;
2883 		} else {
2884 			/* Old policy spanning whole new range. */
2885 			if (n->end > end) {
2886 				if (!n_new)
2887 					goto alloc_new;
2888 
2889 				*mpol_new = *n->policy;
2890 				atomic_set(&mpol_new->refcnt, 1);
2891 				sp_node_init(n_new, end, n->end, mpol_new);
2892 				n->end = start;
2893 				sp_insert(sp, n_new);
2894 				n_new = NULL;
2895 				mpol_new = NULL;
2896 				break;
2897 			} else
2898 				n->end = start;
2899 		}
2900 		if (!next)
2901 			break;
2902 		n = rb_entry(next, struct sp_node, nd);
2903 	}
2904 	if (new)
2905 		sp_insert(sp, new);
2906 	write_unlock(&sp->lock);
2907 	ret = 0;
2908 
2909 err_out:
2910 	if (mpol_new)
2911 		mpol_put(mpol_new);
2912 	if (n_new)
2913 		kmem_cache_free(sn_cache, n_new);
2914 
2915 	return ret;
2916 
2917 alloc_new:
2918 	write_unlock(&sp->lock);
2919 	ret = -ENOMEM;
2920 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2921 	if (!n_new)
2922 		goto err_out;
2923 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2924 	if (!mpol_new)
2925 		goto err_out;
2926 	atomic_set(&mpol_new->refcnt, 1);
2927 	goto restart;
2928 }
2929 
2930 /**
2931  * mpol_shared_policy_init - initialize shared policy for inode
2932  * @sp: pointer to inode shared policy
2933  * @mpol:  struct mempolicy to install
2934  *
2935  * Install non-NULL @mpol in inode's shared policy rb-tree.
2936  * On entry, the current task has a reference on a non-NULL @mpol.
2937  * This must be released on exit.
2938  * This is called at get_inode() calls and we can use GFP_KERNEL.
2939  */
2940 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2941 {
2942 	int ret;
2943 
2944 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
2945 	rwlock_init(&sp->lock);
2946 
2947 	if (mpol) {
2948 		struct sp_node *sn;
2949 		struct mempolicy *npol;
2950 		NODEMASK_SCRATCH(scratch);
2951 
2952 		if (!scratch)
2953 			goto put_mpol;
2954 
2955 		/* contextualize the tmpfs mount point mempolicy to this file */
2956 		npol = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2957 		if (IS_ERR(npol))
2958 			goto free_scratch; /* no valid nodemask intersection */
2959 
2960 		task_lock(current);
2961 		ret = mpol_set_nodemask(npol, &mpol->w.user_nodemask, scratch);
2962 		task_unlock(current);
2963 		if (ret)
2964 			goto put_npol;
2965 
2966 		/* alloc node covering entire file; adds ref to file's npol */
2967 		sn = sp_alloc(0, MAX_LFS_FILESIZE >> PAGE_SHIFT, npol);
2968 		if (sn)
2969 			sp_insert(sp, sn);
2970 put_npol:
2971 		mpol_put(npol);	/* drop initial ref on file's npol */
2972 free_scratch:
2973 		NODEMASK_SCRATCH_FREE(scratch);
2974 put_mpol:
2975 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
2976 	}
2977 }
2978 
2979 int mpol_set_shared_policy(struct shared_policy *sp,
2980 			struct vm_area_struct *vma, struct mempolicy *pol)
2981 {
2982 	int err;
2983 	struct sp_node *new = NULL;
2984 	unsigned long sz = vma_pages(vma);
2985 
2986 	if (pol) {
2987 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, pol);
2988 		if (!new)
2989 			return -ENOMEM;
2990 	}
2991 	err = shared_policy_replace(sp, vma->vm_pgoff, vma->vm_pgoff + sz, new);
2992 	if (err && new)
2993 		sp_free(new);
2994 	return err;
2995 }
2996 
2997 /* Free a backing policy store on inode delete. */
2998 void mpol_free_shared_policy(struct shared_policy *sp)
2999 {
3000 	struct sp_node *n;
3001 	struct rb_node *next;
3002 
3003 	if (!sp->root.rb_node)
3004 		return;
3005 	write_lock(&sp->lock);
3006 	next = rb_first(&sp->root);
3007 	while (next) {
3008 		n = rb_entry(next, struct sp_node, nd);
3009 		next = rb_next(&n->nd);
3010 		sp_delete(sp, n);
3011 	}
3012 	write_unlock(&sp->lock);
3013 }
3014 
3015 #ifdef CONFIG_NUMA_BALANCING
3016 static int __initdata numabalancing_override;
3017 
3018 static void __init check_numabalancing_enable(void)
3019 {
3020 	bool numabalancing_default = false;
3021 
3022 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
3023 		numabalancing_default = true;
3024 
3025 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
3026 	if (numabalancing_override)
3027 		set_numabalancing_state(numabalancing_override == 1);
3028 
3029 	if (num_online_nodes() > 1 && !numabalancing_override) {
3030 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
3031 			numabalancing_default ? "Enabling" : "Disabling");
3032 		set_numabalancing_state(numabalancing_default);
3033 	}
3034 }
3035 
3036 static int __init setup_numabalancing(char *str)
3037 {
3038 	int ret = 0;
3039 	if (!str)
3040 		goto out;
3041 
3042 	if (!strcmp(str, "enable")) {
3043 		numabalancing_override = 1;
3044 		ret = 1;
3045 	} else if (!strcmp(str, "disable")) {
3046 		numabalancing_override = -1;
3047 		ret = 1;
3048 	}
3049 out:
3050 	if (!ret)
3051 		pr_warn("Unable to parse numa_balancing=\n");
3052 
3053 	return ret;
3054 }
3055 __setup("numa_balancing=", setup_numabalancing);
3056 #else
3057 static inline void __init check_numabalancing_enable(void)
3058 {
3059 }
3060 #endif /* CONFIG_NUMA_BALANCING */
3061 
3062 void __init numa_policy_init(void)
3063 {
3064 	nodemask_t interleave_nodes;
3065 	unsigned long largest = 0;
3066 	int nid, prefer = 0;
3067 
3068 	policy_cache = kmem_cache_create("numa_policy",
3069 					 sizeof(struct mempolicy),
3070 					 0, SLAB_PANIC, NULL);
3071 
3072 	sn_cache = kmem_cache_create("shared_policy_node",
3073 				     sizeof(struct sp_node),
3074 				     0, SLAB_PANIC, NULL);
3075 
3076 	for_each_node(nid) {
3077 		preferred_node_policy[nid] = (struct mempolicy) {
3078 			.refcnt = ATOMIC_INIT(1),
3079 			.mode = MPOL_PREFERRED,
3080 			.flags = MPOL_F_MOF | MPOL_F_MORON,
3081 			.nodes = nodemask_of_node(nid),
3082 		};
3083 	}
3084 
3085 	/*
3086 	 * Set interleaving policy for system init. Interleaving is only
3087 	 * enabled across suitably sized nodes (default is >= 16MB), or
3088 	 * fall back to the largest node if they're all smaller.
3089 	 */
3090 	nodes_clear(interleave_nodes);
3091 	for_each_node_state(nid, N_MEMORY) {
3092 		unsigned long total_pages = node_present_pages(nid);
3093 
3094 		/* Preserve the largest node */
3095 		if (largest < total_pages) {
3096 			largest = total_pages;
3097 			prefer = nid;
3098 		}
3099 
3100 		/* Interleave this node? */
3101 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
3102 			node_set(nid, interleave_nodes);
3103 	}
3104 
3105 	/* All too small, use the largest */
3106 	if (unlikely(nodes_empty(interleave_nodes)))
3107 		node_set(prefer, interleave_nodes);
3108 
3109 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
3110 		pr_err("%s: interleaving failed\n", __func__);
3111 
3112 	check_numabalancing_enable();
3113 }
3114 
3115 /* Reset policy of current process to default */
3116 void numa_default_policy(void)
3117 {
3118 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
3119 }
3120 
3121 /*
3122  * Parse and format mempolicy from/to strings
3123  */
3124 static const char * const policy_modes[] =
3125 {
3126 	[MPOL_DEFAULT]    = "default",
3127 	[MPOL_PREFERRED]  = "prefer",
3128 	[MPOL_BIND]       = "bind",
3129 	[MPOL_INTERLEAVE] = "interleave",
3130 	[MPOL_WEIGHTED_INTERLEAVE] = "weighted interleave",
3131 	[MPOL_LOCAL]      = "local",
3132 	[MPOL_PREFERRED_MANY]  = "prefer (many)",
3133 };
3134 
3135 #ifdef CONFIG_TMPFS
3136 /**
3137  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
3138  * @str:  string containing mempolicy to parse
3139  * @mpol:  pointer to struct mempolicy pointer, returned on success.
3140  *
3141  * Format of input:
3142  *	<mode>[=<flags>][:<nodelist>]
3143  *
3144  * Return: %0 on success, else %1
3145  */
3146 int mpol_parse_str(char *str, struct mempolicy **mpol)
3147 {
3148 	struct mempolicy *new = NULL;
3149 	unsigned short mode_flags;
3150 	nodemask_t nodes;
3151 	char *nodelist = strchr(str, ':');
3152 	char *flags = strchr(str, '=');
3153 	int err = 1, mode;
3154 
3155 	if (flags)
3156 		*flags++ = '\0';	/* terminate mode string */
3157 
3158 	if (nodelist) {
3159 		/* NUL-terminate mode or flags string */
3160 		*nodelist++ = '\0';
3161 		if (nodelist_parse(nodelist, nodes))
3162 			goto out;
3163 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
3164 			goto out;
3165 	} else
3166 		nodes_clear(nodes);
3167 
3168 	mode = match_string(policy_modes, MPOL_MAX, str);
3169 	if (mode < 0)
3170 		goto out;
3171 
3172 	switch (mode) {
3173 	case MPOL_PREFERRED:
3174 		/*
3175 		 * Insist on a nodelist of one node only, although later
3176 		 * we use first_node(nodes) to grab a single node, so here
3177 		 * nodelist (or nodes) cannot be empty.
3178 		 */
3179 		if (nodelist) {
3180 			char *rest = nodelist;
3181 			while (isdigit(*rest))
3182 				rest++;
3183 			if (*rest)
3184 				goto out;
3185 			if (nodes_empty(nodes))
3186 				goto out;
3187 		}
3188 		break;
3189 	case MPOL_INTERLEAVE:
3190 	case MPOL_WEIGHTED_INTERLEAVE:
3191 		/*
3192 		 * Default to online nodes with memory if no nodelist
3193 		 */
3194 		if (!nodelist)
3195 			nodes = node_states[N_MEMORY];
3196 		break;
3197 	case MPOL_LOCAL:
3198 		/*
3199 		 * Don't allow a nodelist;  mpol_new() checks flags
3200 		 */
3201 		if (nodelist)
3202 			goto out;
3203 		break;
3204 	case MPOL_DEFAULT:
3205 		/*
3206 		 * Insist on a empty nodelist
3207 		 */
3208 		if (!nodelist)
3209 			err = 0;
3210 		goto out;
3211 	case MPOL_PREFERRED_MANY:
3212 	case MPOL_BIND:
3213 		/*
3214 		 * Insist on a nodelist
3215 		 */
3216 		if (!nodelist)
3217 			goto out;
3218 	}
3219 
3220 	mode_flags = 0;
3221 	if (flags) {
3222 		/*
3223 		 * Currently, we only support two mutually exclusive
3224 		 * mode flags.
3225 		 */
3226 		if (!strcmp(flags, "static"))
3227 			mode_flags |= MPOL_F_STATIC_NODES;
3228 		else if (!strcmp(flags, "relative"))
3229 			mode_flags |= MPOL_F_RELATIVE_NODES;
3230 		else
3231 			goto out;
3232 	}
3233 
3234 	new = mpol_new(mode, mode_flags, &nodes);
3235 	if (IS_ERR(new))
3236 		goto out;
3237 
3238 	/*
3239 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
3240 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
3241 	 */
3242 	if (mode != MPOL_PREFERRED) {
3243 		new->nodes = nodes;
3244 	} else if (nodelist) {
3245 		nodes_clear(new->nodes);
3246 		node_set(first_node(nodes), new->nodes);
3247 	} else {
3248 		new->mode = MPOL_LOCAL;
3249 	}
3250 
3251 	/*
3252 	 * Save nodes for contextualization: this will be used to "clone"
3253 	 * the mempolicy in a specific context [cpuset] at a later time.
3254 	 */
3255 	new->w.user_nodemask = nodes;
3256 
3257 	err = 0;
3258 
3259 out:
3260 	/* Restore string for error message */
3261 	if (nodelist)
3262 		*--nodelist = ':';
3263 	if (flags)
3264 		*--flags = '=';
3265 	if (!err)
3266 		*mpol = new;
3267 	return err;
3268 }
3269 #endif /* CONFIG_TMPFS */
3270 
3271 /**
3272  * mpol_to_str - format a mempolicy structure for printing
3273  * @buffer:  to contain formatted mempolicy string
3274  * @maxlen:  length of @buffer
3275  * @pol:  pointer to mempolicy to be formatted
3276  *
3277  * Convert @pol into a string.  If @buffer is too short, truncate the string.
3278  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3279  * longest flag, "relative", and to display at least a few node ids.
3280  */
3281 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
3282 {
3283 	char *p = buffer;
3284 	nodemask_t nodes = NODE_MASK_NONE;
3285 	unsigned short mode = MPOL_DEFAULT;
3286 	unsigned short flags = 0;
3287 
3288 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
3289 		mode = pol->mode;
3290 		flags = pol->flags;
3291 	}
3292 
3293 	switch (mode) {
3294 	case MPOL_DEFAULT:
3295 	case MPOL_LOCAL:
3296 		break;
3297 	case MPOL_PREFERRED:
3298 	case MPOL_PREFERRED_MANY:
3299 	case MPOL_BIND:
3300 	case MPOL_INTERLEAVE:
3301 	case MPOL_WEIGHTED_INTERLEAVE:
3302 		nodes = pol->nodes;
3303 		break;
3304 	default:
3305 		WARN_ON_ONCE(1);
3306 		snprintf(p, maxlen, "unknown");
3307 		return;
3308 	}
3309 
3310 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
3311 
3312 	if (flags & MPOL_MODE_FLAGS) {
3313 		p += snprintf(p, buffer + maxlen - p, "=");
3314 
3315 		/*
3316 		 * Currently, the only defined flags are mutually exclusive
3317 		 */
3318 		if (flags & MPOL_F_STATIC_NODES)
3319 			p += snprintf(p, buffer + maxlen - p, "static");
3320 		else if (flags & MPOL_F_RELATIVE_NODES)
3321 			p += snprintf(p, buffer + maxlen - p, "relative");
3322 	}
3323 
3324 	if (!nodes_empty(nodes))
3325 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3326 			       nodemask_pr_args(&nodes));
3327 }
3328 
3329 #ifdef CONFIG_SYSFS
3330 struct iw_node_attr {
3331 	struct kobj_attribute kobj_attr;
3332 	int nid;
3333 };
3334 
3335 static ssize_t node_show(struct kobject *kobj, struct kobj_attribute *attr,
3336 			 char *buf)
3337 {
3338 	struct iw_node_attr *node_attr;
3339 	u8 weight;
3340 
3341 	node_attr = container_of(attr, struct iw_node_attr, kobj_attr);
3342 	weight = get_il_weight(node_attr->nid);
3343 	return sysfs_emit(buf, "%d\n", weight);
3344 }
3345 
3346 static ssize_t node_store(struct kobject *kobj, struct kobj_attribute *attr,
3347 			  const char *buf, size_t count)
3348 {
3349 	struct iw_node_attr *node_attr;
3350 	u8 *new;
3351 	u8 *old;
3352 	u8 weight = 0;
3353 
3354 	node_attr = container_of(attr, struct iw_node_attr, kobj_attr);
3355 	if (count == 0 || sysfs_streq(buf, ""))
3356 		weight = 0;
3357 	else if (kstrtou8(buf, 0, &weight))
3358 		return -EINVAL;
3359 
3360 	new = kzalloc(nr_node_ids, GFP_KERNEL);
3361 	if (!new)
3362 		return -ENOMEM;
3363 
3364 	mutex_lock(&iw_table_lock);
3365 	old = rcu_dereference_protected(iw_table,
3366 					lockdep_is_held(&iw_table_lock));
3367 	if (old)
3368 		memcpy(new, old, nr_node_ids);
3369 	new[node_attr->nid] = weight;
3370 	rcu_assign_pointer(iw_table, new);
3371 	mutex_unlock(&iw_table_lock);
3372 	synchronize_rcu();
3373 	kfree(old);
3374 	return count;
3375 }
3376 
3377 static struct iw_node_attr **node_attrs;
3378 
3379 static void sysfs_wi_node_release(struct iw_node_attr *node_attr,
3380 				  struct kobject *parent)
3381 {
3382 	if (!node_attr)
3383 		return;
3384 	sysfs_remove_file(parent, &node_attr->kobj_attr.attr);
3385 	kfree(node_attr->kobj_attr.attr.name);
3386 	kfree(node_attr);
3387 }
3388 
3389 static void sysfs_wi_release(struct kobject *wi_kobj)
3390 {
3391 	int i;
3392 
3393 	for (i = 0; i < nr_node_ids; i++)
3394 		sysfs_wi_node_release(node_attrs[i], wi_kobj);
3395 	kobject_put(wi_kobj);
3396 }
3397 
3398 static const struct kobj_type wi_ktype = {
3399 	.sysfs_ops = &kobj_sysfs_ops,
3400 	.release = sysfs_wi_release,
3401 };
3402 
3403 static int add_weight_node(int nid, struct kobject *wi_kobj)
3404 {
3405 	struct iw_node_attr *node_attr;
3406 	char *name;
3407 
3408 	node_attr = kzalloc(sizeof(*node_attr), GFP_KERNEL);
3409 	if (!node_attr)
3410 		return -ENOMEM;
3411 
3412 	name = kasprintf(GFP_KERNEL, "node%d", nid);
3413 	if (!name) {
3414 		kfree(node_attr);
3415 		return -ENOMEM;
3416 	}
3417 
3418 	sysfs_attr_init(&node_attr->kobj_attr.attr);
3419 	node_attr->kobj_attr.attr.name = name;
3420 	node_attr->kobj_attr.attr.mode = 0644;
3421 	node_attr->kobj_attr.show = node_show;
3422 	node_attr->kobj_attr.store = node_store;
3423 	node_attr->nid = nid;
3424 
3425 	if (sysfs_create_file(wi_kobj, &node_attr->kobj_attr.attr)) {
3426 		kfree(node_attr->kobj_attr.attr.name);
3427 		kfree(node_attr);
3428 		pr_err("failed to add attribute to weighted_interleave\n");
3429 		return -ENOMEM;
3430 	}
3431 
3432 	node_attrs[nid] = node_attr;
3433 	return 0;
3434 }
3435 
3436 static int add_weighted_interleave_group(struct kobject *root_kobj)
3437 {
3438 	struct kobject *wi_kobj;
3439 	int nid, err;
3440 
3441 	wi_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
3442 	if (!wi_kobj)
3443 		return -ENOMEM;
3444 
3445 	err = kobject_init_and_add(wi_kobj, &wi_ktype, root_kobj,
3446 				   "weighted_interleave");
3447 	if (err) {
3448 		kfree(wi_kobj);
3449 		return err;
3450 	}
3451 
3452 	for_each_node_state(nid, N_POSSIBLE) {
3453 		err = add_weight_node(nid, wi_kobj);
3454 		if (err) {
3455 			pr_err("failed to add sysfs [node%d]\n", nid);
3456 			break;
3457 		}
3458 	}
3459 	if (err)
3460 		kobject_put(wi_kobj);
3461 	return 0;
3462 }
3463 
3464 static void mempolicy_kobj_release(struct kobject *kobj)
3465 {
3466 	u8 *old;
3467 
3468 	mutex_lock(&iw_table_lock);
3469 	old = rcu_dereference_protected(iw_table,
3470 					lockdep_is_held(&iw_table_lock));
3471 	rcu_assign_pointer(iw_table, NULL);
3472 	mutex_unlock(&iw_table_lock);
3473 	synchronize_rcu();
3474 	kfree(old);
3475 	kfree(node_attrs);
3476 	kfree(kobj);
3477 }
3478 
3479 static const struct kobj_type mempolicy_ktype = {
3480 	.release = mempolicy_kobj_release
3481 };
3482 
3483 static int __init mempolicy_sysfs_init(void)
3484 {
3485 	int err;
3486 	static struct kobject *mempolicy_kobj;
3487 
3488 	mempolicy_kobj = kzalloc(sizeof(*mempolicy_kobj), GFP_KERNEL);
3489 	if (!mempolicy_kobj) {
3490 		err = -ENOMEM;
3491 		goto err_out;
3492 	}
3493 
3494 	node_attrs = kcalloc(nr_node_ids, sizeof(struct iw_node_attr *),
3495 			     GFP_KERNEL);
3496 	if (!node_attrs) {
3497 		err = -ENOMEM;
3498 		goto mempol_out;
3499 	}
3500 
3501 	err = kobject_init_and_add(mempolicy_kobj, &mempolicy_ktype, mm_kobj,
3502 				   "mempolicy");
3503 	if (err)
3504 		goto node_out;
3505 
3506 	err = add_weighted_interleave_group(mempolicy_kobj);
3507 	if (err) {
3508 		pr_err("mempolicy sysfs structure failed to initialize\n");
3509 		kobject_put(mempolicy_kobj);
3510 		return err;
3511 	}
3512 
3513 	return err;
3514 node_out:
3515 	kfree(node_attrs);
3516 mempol_out:
3517 	kfree(mempolicy_kobj);
3518 err_out:
3519 	pr_err("failed to add mempolicy kobject to the system\n");
3520 	return err;
3521 }
3522 
3523 late_initcall(mempolicy_sysfs_init);
3524 #endif /* CONFIG_SYSFS */
3525