xref: /linux/mm/oom_kill.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  *  linux/mm/oom_kill.c
3  *
4  *  Copyright (C)  1998,2000  Rik van Riel
5  *	Thanks go out to Claus Fischer for some serious inspiration and
6  *	for goading me into coding this file...
7  *  Copyright (C)  2010  Google, Inc.
8  *	Rewritten by David Rientjes
9  *
10  *  The routines in this file are used to kill a process when
11  *  we're seriously out of memory. This gets called from __alloc_pages()
12  *  in mm/page_alloc.c when we really run out of memory.
13  *
14  *  Since we won't call these routines often (on a well-configured
15  *  machine) this file will double as a 'coding guide' and a signpost
16  *  for newbie kernel hackers. It features several pointers to major
17  *  kernel subsystems and hints as to where to find out what things do.
18  */
19 
20 #include <linux/oom.h>
21 #include <linux/mm.h>
22 #include <linux/err.h>
23 #include <linux/gfp.h>
24 #include <linux/sched.h>
25 #include <linux/swap.h>
26 #include <linux/timex.h>
27 #include <linux/jiffies.h>
28 #include <linux/cpuset.h>
29 #include <linux/export.h>
30 #include <linux/notifier.h>
31 #include <linux/memcontrol.h>
32 #include <linux/mempolicy.h>
33 #include <linux/security.h>
34 #include <linux/ptrace.h>
35 #include <linux/freezer.h>
36 
37 int sysctl_panic_on_oom;
38 int sysctl_oom_kill_allocating_task;
39 int sysctl_oom_dump_tasks = 1;
40 static DEFINE_SPINLOCK(zone_scan_lock);
41 
42 /*
43  * compare_swap_oom_score_adj() - compare and swap current's oom_score_adj
44  * @old_val: old oom_score_adj for compare
45  * @new_val: new oom_score_adj for swap
46  *
47  * Sets the oom_score_adj value for current to @new_val iff its present value is
48  * @old_val.  Usually used to reinstate a previous value to prevent racing with
49  * userspacing tuning the value in the interim.
50  */
51 void compare_swap_oom_score_adj(int old_val, int new_val)
52 {
53 	struct sighand_struct *sighand = current->sighand;
54 
55 	spin_lock_irq(&sighand->siglock);
56 	if (current->signal->oom_score_adj == old_val)
57 		current->signal->oom_score_adj = new_val;
58 	spin_unlock_irq(&sighand->siglock);
59 }
60 
61 /**
62  * test_set_oom_score_adj() - set current's oom_score_adj and return old value
63  * @new_val: new oom_score_adj value
64  *
65  * Sets the oom_score_adj value for current to @new_val with proper
66  * synchronization and returns the old value.  Usually used to temporarily
67  * set a value, save the old value in the caller, and then reinstate it later.
68  */
69 int test_set_oom_score_adj(int new_val)
70 {
71 	struct sighand_struct *sighand = current->sighand;
72 	int old_val;
73 
74 	spin_lock_irq(&sighand->siglock);
75 	old_val = current->signal->oom_score_adj;
76 	current->signal->oom_score_adj = new_val;
77 	spin_unlock_irq(&sighand->siglock);
78 
79 	return old_val;
80 }
81 
82 #ifdef CONFIG_NUMA
83 /**
84  * has_intersects_mems_allowed() - check task eligiblity for kill
85  * @tsk: task struct of which task to consider
86  * @mask: nodemask passed to page allocator for mempolicy ooms
87  *
88  * Task eligibility is determined by whether or not a candidate task, @tsk,
89  * shares the same mempolicy nodes as current if it is bound by such a policy
90  * and whether or not it has the same set of allowed cpuset nodes.
91  */
92 static bool has_intersects_mems_allowed(struct task_struct *tsk,
93 					const nodemask_t *mask)
94 {
95 	struct task_struct *start = tsk;
96 
97 	do {
98 		if (mask) {
99 			/*
100 			 * If this is a mempolicy constrained oom, tsk's
101 			 * cpuset is irrelevant.  Only return true if its
102 			 * mempolicy intersects current, otherwise it may be
103 			 * needlessly killed.
104 			 */
105 			if (mempolicy_nodemask_intersects(tsk, mask))
106 				return true;
107 		} else {
108 			/*
109 			 * This is not a mempolicy constrained oom, so only
110 			 * check the mems of tsk's cpuset.
111 			 */
112 			if (cpuset_mems_allowed_intersects(current, tsk))
113 				return true;
114 		}
115 	} while_each_thread(start, tsk);
116 
117 	return false;
118 }
119 #else
120 static bool has_intersects_mems_allowed(struct task_struct *tsk,
121 					const nodemask_t *mask)
122 {
123 	return true;
124 }
125 #endif /* CONFIG_NUMA */
126 
127 /*
128  * The process p may have detached its own ->mm while exiting or through
129  * use_mm(), but one or more of its subthreads may still have a valid
130  * pointer.  Return p, or any of its subthreads with a valid ->mm, with
131  * task_lock() held.
132  */
133 struct task_struct *find_lock_task_mm(struct task_struct *p)
134 {
135 	struct task_struct *t = p;
136 
137 	do {
138 		task_lock(t);
139 		if (likely(t->mm))
140 			return t;
141 		task_unlock(t);
142 	} while_each_thread(p, t);
143 
144 	return NULL;
145 }
146 
147 /* return true if the task is not adequate as candidate victim task. */
148 static bool oom_unkillable_task(struct task_struct *p,
149 		const struct mem_cgroup *mem, const nodemask_t *nodemask)
150 {
151 	if (is_global_init(p))
152 		return true;
153 	if (p->flags & PF_KTHREAD)
154 		return true;
155 
156 	/* When mem_cgroup_out_of_memory() and p is not member of the group */
157 	if (mem && !task_in_mem_cgroup(p, mem))
158 		return true;
159 
160 	/* p may not have freeable memory in nodemask */
161 	if (!has_intersects_mems_allowed(p, nodemask))
162 		return true;
163 
164 	return false;
165 }
166 
167 /**
168  * oom_badness - heuristic function to determine which candidate task to kill
169  * @p: task struct of which task we should calculate
170  * @totalpages: total present RAM allowed for page allocation
171  *
172  * The heuristic for determining which task to kill is made to be as simple and
173  * predictable as possible.  The goal is to return the highest value for the
174  * task consuming the most memory to avoid subsequent oom failures.
175  */
176 unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
177 		      const nodemask_t *nodemask, unsigned long totalpages)
178 {
179 	int points;
180 
181 	if (oom_unkillable_task(p, mem, nodemask))
182 		return 0;
183 
184 	p = find_lock_task_mm(p);
185 	if (!p)
186 		return 0;
187 
188 	/*
189 	 * The memory controller may have a limit of 0 bytes, so avoid a divide
190 	 * by zero, if necessary.
191 	 */
192 	if (!totalpages)
193 		totalpages = 1;
194 
195 	/*
196 	 * The baseline for the badness score is the proportion of RAM that each
197 	 * task's rss, pagetable and swap space use.
198 	 */
199 	points = get_mm_rss(p->mm) + p->mm->nr_ptes;
200 	points += get_mm_counter(p->mm, MM_SWAPENTS);
201 
202 	points *= 1000;
203 	points /= totalpages;
204 	task_unlock(p);
205 
206 	/*
207 	 * Root processes get 3% bonus, just like the __vm_enough_memory()
208 	 * implementation used by LSMs.
209 	 */
210 	if (has_capability_noaudit(p, CAP_SYS_ADMIN))
211 		points -= 30;
212 
213 	/*
214 	 * /proc/pid/oom_score_adj ranges from -1000 to +1000 such that it may
215 	 * either completely disable oom killing or always prefer a certain
216 	 * task.
217 	 */
218 	points += p->signal->oom_score_adj;
219 
220 	/*
221 	 * Never return 0 for an eligible task that may be killed since it's
222 	 * possible that no single user task uses more than 0.1% of memory and
223 	 * no single admin tasks uses more than 3.0%.
224 	 */
225 	if (points <= 0)
226 		return 1;
227 	return (points < 1000) ? points : 1000;
228 }
229 
230 /*
231  * Determine the type of allocation constraint.
232  */
233 #ifdef CONFIG_NUMA
234 static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
235 				gfp_t gfp_mask, nodemask_t *nodemask,
236 				unsigned long *totalpages)
237 {
238 	struct zone *zone;
239 	struct zoneref *z;
240 	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
241 	bool cpuset_limited = false;
242 	int nid;
243 
244 	/* Default to all available memory */
245 	*totalpages = totalram_pages + total_swap_pages;
246 
247 	if (!zonelist)
248 		return CONSTRAINT_NONE;
249 	/*
250 	 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
251 	 * to kill current.We have to random task kill in this case.
252 	 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
253 	 */
254 	if (gfp_mask & __GFP_THISNODE)
255 		return CONSTRAINT_NONE;
256 
257 	/*
258 	 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
259 	 * the page allocator means a mempolicy is in effect.  Cpuset policy
260 	 * is enforced in get_page_from_freelist().
261 	 */
262 	if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask)) {
263 		*totalpages = total_swap_pages;
264 		for_each_node_mask(nid, *nodemask)
265 			*totalpages += node_spanned_pages(nid);
266 		return CONSTRAINT_MEMORY_POLICY;
267 	}
268 
269 	/* Check this allocation failure is caused by cpuset's wall function */
270 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
271 			high_zoneidx, nodemask)
272 		if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
273 			cpuset_limited = true;
274 
275 	if (cpuset_limited) {
276 		*totalpages = total_swap_pages;
277 		for_each_node_mask(nid, cpuset_current_mems_allowed)
278 			*totalpages += node_spanned_pages(nid);
279 		return CONSTRAINT_CPUSET;
280 	}
281 	return CONSTRAINT_NONE;
282 }
283 #else
284 static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
285 				gfp_t gfp_mask, nodemask_t *nodemask,
286 				unsigned long *totalpages)
287 {
288 	*totalpages = totalram_pages + total_swap_pages;
289 	return CONSTRAINT_NONE;
290 }
291 #endif
292 
293 /*
294  * Simple selection loop. We chose the process with the highest
295  * number of 'points'. We expect the caller will lock the tasklist.
296  *
297  * (not docbooked, we don't want this one cluttering up the manual)
298  */
299 static struct task_struct *select_bad_process(unsigned int *ppoints,
300 		unsigned long totalpages, struct mem_cgroup *mem,
301 		const nodemask_t *nodemask)
302 {
303 	struct task_struct *g, *p;
304 	struct task_struct *chosen = NULL;
305 	*ppoints = 0;
306 
307 	do_each_thread(g, p) {
308 		unsigned int points;
309 
310 		if (p->exit_state)
311 			continue;
312 		if (oom_unkillable_task(p, mem, nodemask))
313 			continue;
314 
315 		/*
316 		 * This task already has access to memory reserves and is
317 		 * being killed. Don't allow any other task access to the
318 		 * memory reserve.
319 		 *
320 		 * Note: this may have a chance of deadlock if it gets
321 		 * blocked waiting for another task which itself is waiting
322 		 * for memory. Is there a better alternative?
323 		 */
324 		if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
325 			if (unlikely(frozen(p)))
326 				thaw_process(p);
327 			return ERR_PTR(-1UL);
328 		}
329 		if (!p->mm)
330 			continue;
331 
332 		if (p->flags & PF_EXITING) {
333 			/*
334 			 * If p is the current task and is in the process of
335 			 * releasing memory, we allow the "kill" to set
336 			 * TIF_MEMDIE, which will allow it to gain access to
337 			 * memory reserves.  Otherwise, it may stall forever.
338 			 *
339 			 * The loop isn't broken here, however, in case other
340 			 * threads are found to have already been oom killed.
341 			 */
342 			if (p == current) {
343 				chosen = p;
344 				*ppoints = 1000;
345 			} else {
346 				/*
347 				 * If this task is not being ptraced on exit,
348 				 * then wait for it to finish before killing
349 				 * some other task unnecessarily.
350 				 */
351 				if (!(p->group_leader->ptrace & PT_TRACE_EXIT))
352 					return ERR_PTR(-1UL);
353 			}
354 		}
355 
356 		points = oom_badness(p, mem, nodemask, totalpages);
357 		if (points > *ppoints) {
358 			chosen = p;
359 			*ppoints = points;
360 		}
361 	} while_each_thread(g, p);
362 
363 	return chosen;
364 }
365 
366 /**
367  * dump_tasks - dump current memory state of all system tasks
368  * @mem: current's memory controller, if constrained
369  * @nodemask: nodemask passed to page allocator for mempolicy ooms
370  *
371  * Dumps the current memory state of all eligible tasks.  Tasks not in the same
372  * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
373  * are not shown.
374  * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
375  * value, oom_score_adj value, and name.
376  *
377  * Call with tasklist_lock read-locked.
378  */
379 static void dump_tasks(const struct mem_cgroup *mem, const nodemask_t *nodemask)
380 {
381 	struct task_struct *p;
382 	struct task_struct *task;
383 
384 	pr_info("[ pid ]   uid  tgid total_vm      rss cpu oom_adj oom_score_adj name\n");
385 	for_each_process(p) {
386 		if (oom_unkillable_task(p, mem, nodemask))
387 			continue;
388 
389 		task = find_lock_task_mm(p);
390 		if (!task) {
391 			/*
392 			 * This is a kthread or all of p's threads have already
393 			 * detached their mm's.  There's no need to report
394 			 * them; they can't be oom killed anyway.
395 			 */
396 			continue;
397 		}
398 
399 		pr_info("[%5d] %5d %5d %8lu %8lu %3u     %3d         %5d %s\n",
400 			task->pid, task_uid(task), task->tgid,
401 			task->mm->total_vm, get_mm_rss(task->mm),
402 			task_cpu(task), task->signal->oom_adj,
403 			task->signal->oom_score_adj, task->comm);
404 		task_unlock(task);
405 	}
406 }
407 
408 static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
409 			struct mem_cgroup *mem, const nodemask_t *nodemask)
410 {
411 	task_lock(current);
412 	pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
413 		"oom_adj=%d, oom_score_adj=%d\n",
414 		current->comm, gfp_mask, order, current->signal->oom_adj,
415 		current->signal->oom_score_adj);
416 	cpuset_print_task_mems_allowed(current);
417 	task_unlock(current);
418 	dump_stack();
419 	mem_cgroup_print_oom_info(mem, p);
420 	show_mem(SHOW_MEM_FILTER_NODES);
421 	if (sysctl_oom_dump_tasks)
422 		dump_tasks(mem, nodemask);
423 }
424 
425 #define K(x) ((x) << (PAGE_SHIFT-10))
426 static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
427 {
428 	struct task_struct *q;
429 	struct mm_struct *mm;
430 
431 	p = find_lock_task_mm(p);
432 	if (!p)
433 		return 1;
434 
435 	/* mm cannot be safely dereferenced after task_unlock(p) */
436 	mm = p->mm;
437 
438 	pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
439 		task_pid_nr(p), p->comm, K(p->mm->total_vm),
440 		K(get_mm_counter(p->mm, MM_ANONPAGES)),
441 		K(get_mm_counter(p->mm, MM_FILEPAGES)));
442 	task_unlock(p);
443 
444 	/*
445 	 * Kill all user processes sharing p->mm in other thread groups, if any.
446 	 * They don't get access to memory reserves or a higher scheduler
447 	 * priority, though, to avoid depletion of all memory or task
448 	 * starvation.  This prevents mm->mmap_sem livelock when an oom killed
449 	 * task cannot exit because it requires the semaphore and its contended
450 	 * by another thread trying to allocate memory itself.  That thread will
451 	 * now get access to memory reserves since it has a pending fatal
452 	 * signal.
453 	 */
454 	for_each_process(q)
455 		if (q->mm == mm && !same_thread_group(q, p) &&
456 		    !(q->flags & PF_KTHREAD)) {
457 			if (q->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
458 				continue;
459 
460 			task_lock(q);	/* Protect ->comm from prctl() */
461 			pr_err("Kill process %d (%s) sharing same memory\n",
462 				task_pid_nr(q), q->comm);
463 			task_unlock(q);
464 			force_sig(SIGKILL, q);
465 		}
466 
467 	set_tsk_thread_flag(p, TIF_MEMDIE);
468 	force_sig(SIGKILL, p);
469 
470 	return 0;
471 }
472 #undef K
473 
474 static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
475 			    unsigned int points, unsigned long totalpages,
476 			    struct mem_cgroup *mem, nodemask_t *nodemask,
477 			    const char *message)
478 {
479 	struct task_struct *victim = p;
480 	struct task_struct *child;
481 	struct task_struct *t = p;
482 	unsigned int victim_points = 0;
483 
484 	if (printk_ratelimit())
485 		dump_header(p, gfp_mask, order, mem, nodemask);
486 
487 	/*
488 	 * If the task is already exiting, don't alarm the sysadmin or kill
489 	 * its children or threads, just set TIF_MEMDIE so it can die quickly
490 	 */
491 	if (p->flags & PF_EXITING) {
492 		set_tsk_thread_flag(p, TIF_MEMDIE);
493 		return 0;
494 	}
495 
496 	task_lock(p);
497 	pr_err("%s: Kill process %d (%s) score %d or sacrifice child\n",
498 		message, task_pid_nr(p), p->comm, points);
499 	task_unlock(p);
500 
501 	/*
502 	 * If any of p's children has a different mm and is eligible for kill,
503 	 * the one with the highest oom_badness() score is sacrificed for its
504 	 * parent.  This attempts to lose the minimal amount of work done while
505 	 * still freeing memory.
506 	 */
507 	do {
508 		list_for_each_entry(child, &t->children, sibling) {
509 			unsigned int child_points;
510 
511 			if (child->mm == p->mm)
512 				continue;
513 			/*
514 			 * oom_badness() returns 0 if the thread is unkillable
515 			 */
516 			child_points = oom_badness(child, mem, nodemask,
517 								totalpages);
518 			if (child_points > victim_points) {
519 				victim = child;
520 				victim_points = child_points;
521 			}
522 		}
523 	} while_each_thread(p, t);
524 
525 	return oom_kill_task(victim, mem);
526 }
527 
528 /*
529  * Determines whether the kernel must panic because of the panic_on_oom sysctl.
530  */
531 static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
532 				int order, const nodemask_t *nodemask)
533 {
534 	if (likely(!sysctl_panic_on_oom))
535 		return;
536 	if (sysctl_panic_on_oom != 2) {
537 		/*
538 		 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
539 		 * does not panic for cpuset, mempolicy, or memcg allocation
540 		 * failures.
541 		 */
542 		if (constraint != CONSTRAINT_NONE)
543 			return;
544 	}
545 	read_lock(&tasklist_lock);
546 	dump_header(NULL, gfp_mask, order, NULL, nodemask);
547 	read_unlock(&tasklist_lock);
548 	panic("Out of memory: %s panic_on_oom is enabled\n",
549 		sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
550 }
551 
552 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
553 void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
554 {
555 	unsigned long limit;
556 	unsigned int points = 0;
557 	struct task_struct *p;
558 
559 	/*
560 	 * If current has a pending SIGKILL, then automatically select it.  The
561 	 * goal is to allow it to allocate so that it may quickly exit and free
562 	 * its memory.
563 	 */
564 	if (fatal_signal_pending(current)) {
565 		set_thread_flag(TIF_MEMDIE);
566 		return;
567 	}
568 
569 	check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL);
570 	limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT;
571 	read_lock(&tasklist_lock);
572 retry:
573 	p = select_bad_process(&points, limit, mem, NULL);
574 	if (!p || PTR_ERR(p) == -1UL)
575 		goto out;
576 
577 	if (oom_kill_process(p, gfp_mask, 0, points, limit, mem, NULL,
578 				"Memory cgroup out of memory"))
579 		goto retry;
580 out:
581 	read_unlock(&tasklist_lock);
582 }
583 #endif
584 
585 static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
586 
587 int register_oom_notifier(struct notifier_block *nb)
588 {
589 	return blocking_notifier_chain_register(&oom_notify_list, nb);
590 }
591 EXPORT_SYMBOL_GPL(register_oom_notifier);
592 
593 int unregister_oom_notifier(struct notifier_block *nb)
594 {
595 	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
596 }
597 EXPORT_SYMBOL_GPL(unregister_oom_notifier);
598 
599 /*
600  * Try to acquire the OOM killer lock for the zones in zonelist.  Returns zero
601  * if a parallel OOM killing is already taking place that includes a zone in
602  * the zonelist.  Otherwise, locks all zones in the zonelist and returns 1.
603  */
604 int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
605 {
606 	struct zoneref *z;
607 	struct zone *zone;
608 	int ret = 1;
609 
610 	spin_lock(&zone_scan_lock);
611 	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
612 		if (zone_is_oom_locked(zone)) {
613 			ret = 0;
614 			goto out;
615 		}
616 	}
617 
618 	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
619 		/*
620 		 * Lock each zone in the zonelist under zone_scan_lock so a
621 		 * parallel invocation of try_set_zonelist_oom() doesn't succeed
622 		 * when it shouldn't.
623 		 */
624 		zone_set_flag(zone, ZONE_OOM_LOCKED);
625 	}
626 
627 out:
628 	spin_unlock(&zone_scan_lock);
629 	return ret;
630 }
631 
632 /*
633  * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed
634  * allocation attempts with zonelists containing them may now recall the OOM
635  * killer, if necessary.
636  */
637 void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
638 {
639 	struct zoneref *z;
640 	struct zone *zone;
641 
642 	spin_lock(&zone_scan_lock);
643 	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
644 		zone_clear_flag(zone, ZONE_OOM_LOCKED);
645 	}
646 	spin_unlock(&zone_scan_lock);
647 }
648 
649 /*
650  * Try to acquire the oom killer lock for all system zones.  Returns zero if a
651  * parallel oom killing is taking place, otherwise locks all zones and returns
652  * non-zero.
653  */
654 static int try_set_system_oom(void)
655 {
656 	struct zone *zone;
657 	int ret = 1;
658 
659 	spin_lock(&zone_scan_lock);
660 	for_each_populated_zone(zone)
661 		if (zone_is_oom_locked(zone)) {
662 			ret = 0;
663 			goto out;
664 		}
665 	for_each_populated_zone(zone)
666 		zone_set_flag(zone, ZONE_OOM_LOCKED);
667 out:
668 	spin_unlock(&zone_scan_lock);
669 	return ret;
670 }
671 
672 /*
673  * Clears ZONE_OOM_LOCKED for all system zones so that failed allocation
674  * attempts or page faults may now recall the oom killer, if necessary.
675  */
676 static void clear_system_oom(void)
677 {
678 	struct zone *zone;
679 
680 	spin_lock(&zone_scan_lock);
681 	for_each_populated_zone(zone)
682 		zone_clear_flag(zone, ZONE_OOM_LOCKED);
683 	spin_unlock(&zone_scan_lock);
684 }
685 
686 /**
687  * out_of_memory - kill the "best" process when we run out of memory
688  * @zonelist: zonelist pointer
689  * @gfp_mask: memory allocation flags
690  * @order: amount of memory being requested as a power of 2
691  * @nodemask: nodemask passed to page allocator
692  *
693  * If we run out of memory, we have the choice between either
694  * killing a random task (bad), letting the system crash (worse)
695  * OR try to be smart about which process to kill. Note that we
696  * don't have to be perfect here, we just have to be good.
697  */
698 void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
699 		int order, nodemask_t *nodemask)
700 {
701 	const nodemask_t *mpol_mask;
702 	struct task_struct *p;
703 	unsigned long totalpages;
704 	unsigned long freed = 0;
705 	unsigned int points;
706 	enum oom_constraint constraint = CONSTRAINT_NONE;
707 	int killed = 0;
708 
709 	blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
710 	if (freed > 0)
711 		/* Got some memory back in the last second. */
712 		return;
713 
714 	/*
715 	 * If current has a pending SIGKILL, then automatically select it.  The
716 	 * goal is to allow it to allocate so that it may quickly exit and free
717 	 * its memory.
718 	 */
719 	if (fatal_signal_pending(current)) {
720 		set_thread_flag(TIF_MEMDIE);
721 		return;
722 	}
723 
724 	/*
725 	 * Check if there were limitations on the allocation (only relevant for
726 	 * NUMA) that may require different handling.
727 	 */
728 	constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
729 						&totalpages);
730 	mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
731 	check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
732 
733 	read_lock(&tasklist_lock);
734 	if (sysctl_oom_kill_allocating_task &&
735 	    !oom_unkillable_task(current, NULL, nodemask) &&
736 	    current->mm) {
737 		/*
738 		 * oom_kill_process() needs tasklist_lock held.  If it returns
739 		 * non-zero, current could not be killed so we must fallback to
740 		 * the tasklist scan.
741 		 */
742 		if (!oom_kill_process(current, gfp_mask, order, 0, totalpages,
743 				NULL, nodemask,
744 				"Out of memory (oom_kill_allocating_task)"))
745 			goto out;
746 	}
747 
748 retry:
749 	p = select_bad_process(&points, totalpages, NULL, mpol_mask);
750 	if (PTR_ERR(p) == -1UL)
751 		goto out;
752 
753 	/* Found nothing?!?! Either we hang forever, or we panic. */
754 	if (!p) {
755 		dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
756 		read_unlock(&tasklist_lock);
757 		panic("Out of memory and no killable processes...\n");
758 	}
759 
760 	if (oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
761 				nodemask, "Out of memory"))
762 		goto retry;
763 	killed = 1;
764 out:
765 	read_unlock(&tasklist_lock);
766 
767 	/*
768 	 * Give "p" a good chance of killing itself before we
769 	 * retry to allocate memory unless "p" is current
770 	 */
771 	if (killed && !test_thread_flag(TIF_MEMDIE))
772 		schedule_timeout_uninterruptible(1);
773 }
774 
775 /*
776  * The pagefault handler calls here because it is out of memory, so kill a
777  * memory-hogging task.  If a populated zone has ZONE_OOM_LOCKED set, a parallel
778  * oom killing is already in progress so do nothing.  If a task is found with
779  * TIF_MEMDIE set, it has been killed so do nothing and allow it to exit.
780  */
781 void pagefault_out_of_memory(void)
782 {
783 	if (try_set_system_oom()) {
784 		out_of_memory(NULL, 0, 0, NULL);
785 		clear_system_oom();
786 	}
787 	if (!test_thread_flag(TIF_MEMDIE))
788 		schedule_timeout_uninterruptible(1);
789 }
790