xref: /linux/mm/page_owner.c (revision 0f3ad9c6105f32d1755c0bd54a7f98c892f3ceb7)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/debugfs.h>
3 #include <linux/mm.h>
4 #include <linux/slab.h>
5 #include <linux/uaccess.h>
6 #include <linux/memblock.h>
7 #include <linux/stacktrace.h>
8 #include <linux/page_owner.h>
9 #include <linux/jump_label.h>
10 #include <linux/migrate.h>
11 #include <linux/stackdepot.h>
12 #include <linux/seq_file.h>
13 #include <linux/memcontrol.h>
14 #include <linux/sched/clock.h>
15 
16 #include "internal.h"
17 
18 /*
19  * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
20  * to use off stack temporal storage
21  */
22 #define PAGE_OWNER_STACK_DEPTH (16)
23 
24 struct page_owner {
25 	unsigned short order;
26 	short last_migrate_reason;
27 	gfp_t gfp_mask;
28 	depot_stack_handle_t handle;
29 	depot_stack_handle_t free_handle;
30 	u64 ts_nsec;
31 	u64 free_ts_nsec;
32 	char comm[TASK_COMM_LEN];
33 	pid_t pid;
34 	pid_t tgid;
35 	pid_t free_pid;
36 	pid_t free_tgid;
37 };
38 
39 struct stack {
40 	struct stack_record *stack_record;
41 	struct stack *next;
42 };
43 static struct stack dummy_stack;
44 static struct stack failure_stack;
45 static struct stack *stack_list;
46 static DEFINE_SPINLOCK(stack_list_lock);
47 
48 static bool page_owner_enabled __initdata;
49 DEFINE_STATIC_KEY_FALSE(page_owner_inited);
50 
51 static depot_stack_handle_t dummy_handle;
52 static depot_stack_handle_t failure_handle;
53 static depot_stack_handle_t early_handle;
54 
55 static void init_early_allocated_pages(void);
56 
set_current_in_page_owner(void)57 static inline void set_current_in_page_owner(void)
58 {
59 	/*
60 	 * Avoid recursion.
61 	 *
62 	 * We might need to allocate more memory from page_owner code, so make
63 	 * sure to signal it in order to avoid recursion.
64 	 */
65 	current->in_page_owner = 1;
66 }
67 
unset_current_in_page_owner(void)68 static inline void unset_current_in_page_owner(void)
69 {
70 	current->in_page_owner = 0;
71 }
72 
early_page_owner_param(char * buf)73 static int __init early_page_owner_param(char *buf)
74 {
75 	int ret = kstrtobool(buf, &page_owner_enabled);
76 
77 	if (page_owner_enabled)
78 		stack_depot_request_early_init();
79 
80 	return ret;
81 }
82 early_param("page_owner", early_page_owner_param);
83 
need_page_owner(void)84 static __init bool need_page_owner(void)
85 {
86 	return page_owner_enabled;
87 }
88 
create_dummy_stack(void)89 static __always_inline depot_stack_handle_t create_dummy_stack(void)
90 {
91 	unsigned long entries[4];
92 	unsigned int nr_entries;
93 
94 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
95 	return stack_depot_save(entries, nr_entries, GFP_KERNEL);
96 }
97 
register_dummy_stack(void)98 static noinline void register_dummy_stack(void)
99 {
100 	dummy_handle = create_dummy_stack();
101 }
102 
register_failure_stack(void)103 static noinline void register_failure_stack(void)
104 {
105 	failure_handle = create_dummy_stack();
106 }
107 
register_early_stack(void)108 static noinline void register_early_stack(void)
109 {
110 	early_handle = create_dummy_stack();
111 }
112 
init_page_owner(void)113 static __init void init_page_owner(void)
114 {
115 	if (!page_owner_enabled)
116 		return;
117 
118 	register_dummy_stack();
119 	register_failure_stack();
120 	register_early_stack();
121 	init_early_allocated_pages();
122 	/* Initialize dummy and failure stacks and link them to stack_list */
123 	dummy_stack.stack_record = __stack_depot_get_stack_record(dummy_handle);
124 	failure_stack.stack_record = __stack_depot_get_stack_record(failure_handle);
125 	if (dummy_stack.stack_record)
126 		refcount_set(&dummy_stack.stack_record->count, 1);
127 	if (failure_stack.stack_record)
128 		refcount_set(&failure_stack.stack_record->count, 1);
129 	dummy_stack.next = &failure_stack;
130 	stack_list = &dummy_stack;
131 	static_branch_enable(&page_owner_inited);
132 }
133 
134 struct page_ext_operations page_owner_ops = {
135 	.size = sizeof(struct page_owner),
136 	.need = need_page_owner,
137 	.init = init_page_owner,
138 	.need_shared_flags = true,
139 };
140 
get_page_owner(struct page_ext * page_ext)141 static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
142 {
143 	return page_ext_data(page_ext, &page_owner_ops);
144 }
145 
save_stack(gfp_t flags)146 static noinline depot_stack_handle_t save_stack(gfp_t flags)
147 {
148 	unsigned long entries[PAGE_OWNER_STACK_DEPTH];
149 	depot_stack_handle_t handle;
150 	unsigned int nr_entries;
151 
152 	if (current->in_page_owner)
153 		return dummy_handle;
154 
155 	set_current_in_page_owner();
156 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
157 	handle = stack_depot_save(entries, nr_entries, flags);
158 	if (!handle)
159 		handle = failure_handle;
160 	unset_current_in_page_owner();
161 
162 	return handle;
163 }
164 
add_stack_record_to_list(struct stack_record * stack_record,gfp_t gfp_mask)165 static void add_stack_record_to_list(struct stack_record *stack_record,
166 				     gfp_t gfp_mask)
167 {
168 	unsigned long flags;
169 	struct stack *stack;
170 
171 	if (!gfpflags_allow_spinning(gfp_mask))
172 		return;
173 
174 	set_current_in_page_owner();
175 	stack = kmalloc(sizeof(*stack), gfp_nested_mask(gfp_mask));
176 	if (!stack) {
177 		unset_current_in_page_owner();
178 		return;
179 	}
180 	unset_current_in_page_owner();
181 
182 	stack->stack_record = stack_record;
183 	stack->next = NULL;
184 
185 	spin_lock_irqsave(&stack_list_lock, flags);
186 	stack->next = stack_list;
187 	/*
188 	 * This pairs with smp_load_acquire() from function
189 	 * stack_start(). This guarantees that stack_start()
190 	 * will see an updated stack_list before starting to
191 	 * traverse the list.
192 	 */
193 	smp_store_release(&stack_list, stack);
194 	spin_unlock_irqrestore(&stack_list_lock, flags);
195 }
196 
inc_stack_record_count(depot_stack_handle_t handle,gfp_t gfp_mask,int nr_base_pages)197 static void inc_stack_record_count(depot_stack_handle_t handle, gfp_t gfp_mask,
198 				   int nr_base_pages)
199 {
200 	struct stack_record *stack_record = __stack_depot_get_stack_record(handle);
201 
202 	if (!stack_record)
203 		return;
204 
205 	/*
206 	 * New stack_record's that do not use STACK_DEPOT_FLAG_GET start
207 	 * with REFCOUNT_SATURATED to catch spurious increments of their
208 	 * refcount.
209 	 * Since we do not use STACK_DEPOT_FLAG_GET API, let us
210 	 * set a refcount of 1 ourselves.
211 	 */
212 	if (refcount_read(&stack_record->count) == REFCOUNT_SATURATED) {
213 		int old = REFCOUNT_SATURATED;
214 
215 		if (atomic_try_cmpxchg_relaxed(&stack_record->count.refs, &old, 1))
216 			/* Add the new stack_record to our list */
217 			add_stack_record_to_list(stack_record, gfp_mask);
218 	}
219 	refcount_add(nr_base_pages, &stack_record->count);
220 }
221 
dec_stack_record_count(depot_stack_handle_t handle,int nr_base_pages)222 static void dec_stack_record_count(depot_stack_handle_t handle,
223 				   int nr_base_pages)
224 {
225 	struct stack_record *stack_record = __stack_depot_get_stack_record(handle);
226 
227 	if (!stack_record)
228 		return;
229 
230 	if (refcount_sub_and_test(nr_base_pages, &stack_record->count))
231 		pr_warn("%s: refcount went to 0 for %u handle\n", __func__,
232 			handle);
233 }
234 
__update_page_owner_handle(struct page * page,depot_stack_handle_t handle,unsigned short order,gfp_t gfp_mask,short last_migrate_reason,u64 ts_nsec,pid_t pid,pid_t tgid,char * comm)235 static inline void __update_page_owner_handle(struct page *page,
236 					      depot_stack_handle_t handle,
237 					      unsigned short order,
238 					      gfp_t gfp_mask,
239 					      short last_migrate_reason, u64 ts_nsec,
240 					      pid_t pid, pid_t tgid, char *comm)
241 {
242 	struct page_ext_iter iter;
243 	struct page_ext *page_ext;
244 	struct page_owner *page_owner;
245 
246 	rcu_read_lock();
247 	for_each_page_ext(page, 1 << order, page_ext, iter) {
248 		page_owner = get_page_owner(page_ext);
249 		page_owner->handle = handle;
250 		page_owner->order = order;
251 		page_owner->gfp_mask = gfp_mask;
252 		page_owner->last_migrate_reason = last_migrate_reason;
253 		page_owner->pid = pid;
254 		page_owner->tgid = tgid;
255 		page_owner->ts_nsec = ts_nsec;
256 		strscpy(page_owner->comm, comm,
257 			sizeof(page_owner->comm));
258 		__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
259 		__set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
260 	}
261 	rcu_read_unlock();
262 }
263 
__update_page_owner_free_handle(struct page * page,depot_stack_handle_t handle,unsigned short order,pid_t pid,pid_t tgid,u64 free_ts_nsec)264 static inline void __update_page_owner_free_handle(struct page *page,
265 						   depot_stack_handle_t handle,
266 						   unsigned short order,
267 						   pid_t pid, pid_t tgid,
268 						   u64 free_ts_nsec)
269 {
270 	struct page_ext_iter iter;
271 	struct page_ext *page_ext;
272 	struct page_owner *page_owner;
273 
274 	rcu_read_lock();
275 	for_each_page_ext(page, 1 << order, page_ext, iter) {
276 		page_owner = get_page_owner(page_ext);
277 		/* Only __reset_page_owner() wants to clear the bit */
278 		if (handle) {
279 			__clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
280 			page_owner->free_handle = handle;
281 		}
282 		page_owner->free_ts_nsec = free_ts_nsec;
283 		page_owner->free_pid = current->pid;
284 		page_owner->free_tgid = current->tgid;
285 	}
286 	rcu_read_unlock();
287 }
288 
__reset_page_owner(struct page * page,unsigned short order)289 void __reset_page_owner(struct page *page, unsigned short order)
290 {
291 	struct page_ext *page_ext;
292 	depot_stack_handle_t handle;
293 	depot_stack_handle_t alloc_handle;
294 	struct page_owner *page_owner;
295 	u64 free_ts_nsec = local_clock();
296 
297 	page_ext = page_ext_get(page);
298 	if (unlikely(!page_ext))
299 		return;
300 
301 	page_owner = get_page_owner(page_ext);
302 	alloc_handle = page_owner->handle;
303 	page_ext_put(page_ext);
304 
305 	/*
306 	 * Do not specify GFP_NOWAIT to make gfpflags_allow_spinning() == false
307 	 * to prevent issues in stack_depot_save().
308 	 * This is similar to alloc_pages_nolock() gfp flags, but only used
309 	 * to signal stack_depot to avoid spin_locks.
310 	 */
311 	handle = save_stack(__GFP_NOWARN);
312 	__update_page_owner_free_handle(page, handle, order, current->pid,
313 					current->tgid, free_ts_nsec);
314 
315 	if (alloc_handle != early_handle)
316 		/*
317 		 * early_handle is being set as a handle for all those
318 		 * early allocated pages. See init_pages_in_zone().
319 		 * Since their refcount is not being incremented because
320 		 * the machinery is not ready yet, we cannot decrement
321 		 * their refcount either.
322 		 */
323 		dec_stack_record_count(alloc_handle, 1 << order);
324 }
325 
__set_page_owner(struct page * page,unsigned short order,gfp_t gfp_mask)326 noinline void __set_page_owner(struct page *page, unsigned short order,
327 					gfp_t gfp_mask)
328 {
329 	u64 ts_nsec = local_clock();
330 	depot_stack_handle_t handle;
331 
332 	handle = save_stack(gfp_mask);
333 	__update_page_owner_handle(page, handle, order, gfp_mask, -1,
334 				   ts_nsec, current->pid, current->tgid,
335 				   current->comm);
336 	inc_stack_record_count(handle, gfp_mask, 1 << order);
337 }
338 
__folio_set_owner_migrate_reason(struct folio * folio,int reason)339 void __folio_set_owner_migrate_reason(struct folio *folio, int reason)
340 {
341 	struct page_ext *page_ext = page_ext_get(&folio->page);
342 	struct page_owner *page_owner;
343 
344 	if (unlikely(!page_ext))
345 		return;
346 
347 	page_owner = get_page_owner(page_ext);
348 	page_owner->last_migrate_reason = reason;
349 	page_ext_put(page_ext);
350 }
351 
__split_page_owner(struct page * page,int old_order,int new_order)352 void __split_page_owner(struct page *page, int old_order, int new_order)
353 {
354 	struct page_ext_iter iter;
355 	struct page_ext *page_ext;
356 	struct page_owner *page_owner;
357 
358 	rcu_read_lock();
359 	for_each_page_ext(page, 1 << old_order, page_ext, iter) {
360 		page_owner = get_page_owner(page_ext);
361 		page_owner->order = new_order;
362 	}
363 	rcu_read_unlock();
364 }
365 
__folio_copy_owner(struct folio * newfolio,struct folio * old)366 void __folio_copy_owner(struct folio *newfolio, struct folio *old)
367 {
368 	struct page_ext *page_ext;
369 	struct page_ext_iter iter;
370 	struct page_owner *old_page_owner;
371 	struct page_owner *new_page_owner;
372 	depot_stack_handle_t migrate_handle;
373 
374 	page_ext = page_ext_get(&old->page);
375 	if (unlikely(!page_ext))
376 		return;
377 
378 	old_page_owner = get_page_owner(page_ext);
379 	page_ext_put(page_ext);
380 
381 	page_ext = page_ext_get(&newfolio->page);
382 	if (unlikely(!page_ext))
383 		return;
384 
385 	new_page_owner = get_page_owner(page_ext);
386 	page_ext_put(page_ext);
387 
388 	migrate_handle = new_page_owner->handle;
389 	__update_page_owner_handle(&newfolio->page, old_page_owner->handle,
390 				   old_page_owner->order, old_page_owner->gfp_mask,
391 				   old_page_owner->last_migrate_reason,
392 				   old_page_owner->ts_nsec, old_page_owner->pid,
393 				   old_page_owner->tgid, old_page_owner->comm);
394 	/*
395 	 * Do not proactively clear PAGE_EXT_OWNER{_ALLOCATED} bits as the folio
396 	 * will be freed after migration. Keep them until then as they may be
397 	 * useful.
398 	 */
399 	__update_page_owner_free_handle(&newfolio->page, 0, old_page_owner->order,
400 					old_page_owner->free_pid,
401 					old_page_owner->free_tgid,
402 					old_page_owner->free_ts_nsec);
403 	/*
404 	 * We linked the original stack to the new folio, we need to do the same
405 	 * for the new one and the old folio otherwise there will be an imbalance
406 	 * when subtracting those pages from the stack.
407 	 */
408 	rcu_read_lock();
409 	for_each_page_ext(&old->page, 1 << new_page_owner->order, page_ext, iter) {
410 		old_page_owner = get_page_owner(page_ext);
411 		old_page_owner->handle = migrate_handle;
412 	}
413 	rcu_read_unlock();
414 }
415 
pagetypeinfo_showmixedcount_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone)416 void pagetypeinfo_showmixedcount_print(struct seq_file *m,
417 				       pg_data_t *pgdat, struct zone *zone)
418 {
419 	struct page *page;
420 	struct page_ext *page_ext;
421 	struct page_owner *page_owner;
422 	unsigned long pfn, block_end_pfn;
423 	unsigned long end_pfn = zone_end_pfn(zone);
424 	unsigned long count[MIGRATE_TYPES] = { 0, };
425 	int pageblock_mt, page_mt;
426 	int i;
427 
428 	/* Scan block by block. First and last block may be incomplete */
429 	pfn = zone->zone_start_pfn;
430 
431 	/*
432 	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
433 	 * a zone boundary, it will be double counted between zones. This does
434 	 * not matter as the mixed block count will still be correct
435 	 */
436 	for (; pfn < end_pfn; ) {
437 		page = pfn_to_online_page(pfn);
438 		if (!page) {
439 			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
440 			continue;
441 		}
442 
443 		block_end_pfn = pageblock_end_pfn(pfn);
444 		block_end_pfn = min(block_end_pfn, end_pfn);
445 
446 		pageblock_mt = get_pageblock_migratetype(page);
447 
448 		for (; pfn < block_end_pfn; pfn++) {
449 			/* The pageblock is online, no need to recheck. */
450 			page = pfn_to_page(pfn);
451 
452 			if (page_zone(page) != zone)
453 				continue;
454 
455 			if (PageBuddy(page)) {
456 				unsigned long freepage_order;
457 
458 				freepage_order = buddy_order_unsafe(page);
459 				if (freepage_order <= MAX_PAGE_ORDER)
460 					pfn += (1UL << freepage_order) - 1;
461 				continue;
462 			}
463 
464 			if (PageReserved(page))
465 				continue;
466 
467 			page_ext = page_ext_get(page);
468 			if (unlikely(!page_ext))
469 				continue;
470 
471 			if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
472 				goto ext_put_continue;
473 
474 			page_owner = get_page_owner(page_ext);
475 			page_mt = gfp_migratetype(page_owner->gfp_mask);
476 			if (pageblock_mt != page_mt) {
477 				if (is_migrate_cma(pageblock_mt))
478 					count[MIGRATE_MOVABLE]++;
479 				else
480 					count[pageblock_mt]++;
481 
482 				pfn = block_end_pfn;
483 				page_ext_put(page_ext);
484 				break;
485 			}
486 			pfn += (1UL << page_owner->order) - 1;
487 ext_put_continue:
488 			page_ext_put(page_ext);
489 		}
490 	}
491 
492 	/* Print counts */
493 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
494 	for (i = 0; i < MIGRATE_TYPES; i++)
495 		seq_printf(m, "%12lu ", count[i]);
496 	seq_putc(m, '\n');
497 }
498 
499 /*
500  * Looking for memcg information and print it out
501  */
print_page_owner_memcg(char * kbuf,size_t count,int ret,struct page * page)502 static inline int print_page_owner_memcg(char *kbuf, size_t count, int ret,
503 					 struct page *page)
504 {
505 #ifdef CONFIG_MEMCG
506 	unsigned long memcg_data;
507 	struct mem_cgroup *memcg;
508 	bool online;
509 	char name[80];
510 
511 	rcu_read_lock();
512 	memcg_data = READ_ONCE(page->memcg_data);
513 	if (!memcg_data || PageTail(page))
514 		goto out_unlock;
515 
516 	if (memcg_data & MEMCG_DATA_OBJEXTS)
517 		ret += scnprintf(kbuf + ret, count - ret,
518 				"Slab cache page\n");
519 
520 	memcg = page_memcg_check(page);
521 	if (!memcg)
522 		goto out_unlock;
523 
524 	online = (memcg->css.flags & CSS_ONLINE);
525 	cgroup_name(memcg->css.cgroup, name, sizeof(name));
526 	ret += scnprintf(kbuf + ret, count - ret,
527 			"Charged %sto %smemcg %s\n",
528 			PageMemcgKmem(page) ? "(via objcg) " : "",
529 			online ? "" : "offline ",
530 			name);
531 out_unlock:
532 	rcu_read_unlock();
533 #endif /* CONFIG_MEMCG */
534 
535 	return ret;
536 }
537 
538 static ssize_t
print_page_owner(char __user * buf,size_t count,unsigned long pfn,struct page * page,struct page_owner * page_owner,depot_stack_handle_t handle)539 print_page_owner(char __user *buf, size_t count, unsigned long pfn,
540 		struct page *page, struct page_owner *page_owner,
541 		depot_stack_handle_t handle)
542 {
543 	int ret, pageblock_mt, page_mt;
544 	char *kbuf;
545 
546 	count = min_t(size_t, count, PAGE_SIZE);
547 	kbuf = kmalloc(count, GFP_KERNEL);
548 	if (!kbuf)
549 		return -ENOMEM;
550 
551 	ret = scnprintf(kbuf, count,
552 			"Page allocated via order %u, mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu ns\n",
553 			page_owner->order, page_owner->gfp_mask,
554 			&page_owner->gfp_mask, page_owner->pid,
555 			page_owner->tgid, page_owner->comm,
556 			page_owner->ts_nsec);
557 
558 	/* Print information relevant to grouping pages by mobility */
559 	pageblock_mt = get_pageblock_migratetype(page);
560 	page_mt  = gfp_migratetype(page_owner->gfp_mask);
561 	ret += scnprintf(kbuf + ret, count - ret,
562 			"PFN 0x%lx type %s Block %lu type %s Flags %pGp\n",
563 			pfn,
564 			migratetype_names[page_mt],
565 			pfn >> pageblock_order,
566 			migratetype_names[pageblock_mt],
567 			&page->flags);
568 
569 	ret += stack_depot_snprint(handle, kbuf + ret, count - ret, 0);
570 	if (ret >= count)
571 		goto err;
572 
573 	if (page_owner->last_migrate_reason != -1) {
574 		ret += scnprintf(kbuf + ret, count - ret,
575 			"Page has been migrated, last migrate reason: %s\n",
576 			migrate_reason_names[page_owner->last_migrate_reason]);
577 	}
578 
579 	ret = print_page_owner_memcg(kbuf, count, ret, page);
580 
581 	ret += snprintf(kbuf + ret, count - ret, "\n");
582 	if (ret >= count)
583 		goto err;
584 
585 	if (copy_to_user(buf, kbuf, ret))
586 		ret = -EFAULT;
587 
588 	kfree(kbuf);
589 	return ret;
590 
591 err:
592 	kfree(kbuf);
593 	return -ENOMEM;
594 }
595 
__dump_page_owner(const struct page * page)596 void __dump_page_owner(const struct page *page)
597 {
598 	struct page_ext *page_ext = page_ext_get((void *)page);
599 	struct page_owner *page_owner;
600 	depot_stack_handle_t handle;
601 	gfp_t gfp_mask;
602 	int mt;
603 
604 	if (unlikely(!page_ext)) {
605 		pr_alert("There is not page extension available.\n");
606 		return;
607 	}
608 
609 	page_owner = get_page_owner(page_ext);
610 	gfp_mask = page_owner->gfp_mask;
611 	mt = gfp_migratetype(gfp_mask);
612 
613 	if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
614 		pr_alert("page_owner info is not present (never set?)\n");
615 		page_ext_put(page_ext);
616 		return;
617 	}
618 
619 	if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
620 		pr_alert("page_owner tracks the page as allocated\n");
621 	else
622 		pr_alert("page_owner tracks the page as freed\n");
623 
624 	pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu, free_ts %llu\n",
625 		 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask,
626 		 page_owner->pid, page_owner->tgid, page_owner->comm,
627 		 page_owner->ts_nsec, page_owner->free_ts_nsec);
628 
629 	handle = READ_ONCE(page_owner->handle);
630 	if (!handle)
631 		pr_alert("page_owner allocation stack trace missing\n");
632 	else
633 		stack_depot_print(handle);
634 
635 	handle = READ_ONCE(page_owner->free_handle);
636 	if (!handle) {
637 		pr_alert("page_owner free stack trace missing\n");
638 	} else {
639 		pr_alert("page last free pid %d tgid %d stack trace:\n",
640 			  page_owner->free_pid, page_owner->free_tgid);
641 		stack_depot_print(handle);
642 	}
643 
644 	if (page_owner->last_migrate_reason != -1)
645 		pr_alert("page has been migrated, last migrate reason: %s\n",
646 			migrate_reason_names[page_owner->last_migrate_reason]);
647 	page_ext_put(page_ext);
648 }
649 
650 static ssize_t
read_page_owner(struct file * file,char __user * buf,size_t count,loff_t * ppos)651 read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
652 {
653 	unsigned long pfn;
654 	struct page *page;
655 	struct page_ext *page_ext;
656 	struct page_owner *page_owner;
657 	depot_stack_handle_t handle;
658 
659 	if (!static_branch_unlikely(&page_owner_inited))
660 		return -EINVAL;
661 
662 	page = NULL;
663 	if (*ppos == 0)
664 		pfn = min_low_pfn;
665 	else
666 		pfn = *ppos;
667 	/* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
668 	while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
669 		pfn++;
670 
671 	/* Find an allocated page */
672 	for (; pfn < max_pfn; pfn++) {
673 		/*
674 		 * This temporary page_owner is required so
675 		 * that we can avoid the context switches while holding
676 		 * the rcu lock and copying the page owner information to
677 		 * user through copy_to_user() or GFP_KERNEL allocations.
678 		 */
679 		struct page_owner page_owner_tmp;
680 
681 		/*
682 		 * If the new page is in a new MAX_ORDER_NR_PAGES area,
683 		 * validate the area as existing, skip it if not
684 		 */
685 		if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
686 			pfn += MAX_ORDER_NR_PAGES - 1;
687 			continue;
688 		}
689 
690 		page = pfn_to_page(pfn);
691 		if (PageBuddy(page)) {
692 			unsigned long freepage_order = buddy_order_unsafe(page);
693 
694 			if (freepage_order <= MAX_PAGE_ORDER)
695 				pfn += (1UL << freepage_order) - 1;
696 			continue;
697 		}
698 
699 		page_ext = page_ext_get(page);
700 		if (unlikely(!page_ext))
701 			continue;
702 
703 		/*
704 		 * Some pages could be missed by concurrent allocation or free,
705 		 * because we don't hold the zone lock.
706 		 */
707 		if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
708 			goto ext_put_continue;
709 
710 		/*
711 		 * Although we do have the info about past allocation of free
712 		 * pages, it's not relevant for current memory usage.
713 		 */
714 		if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
715 			goto ext_put_continue;
716 
717 		page_owner = get_page_owner(page_ext);
718 
719 		/*
720 		 * Don't print "tail" pages of high-order allocations as that
721 		 * would inflate the stats.
722 		 */
723 		if (!IS_ALIGNED(pfn, 1 << page_owner->order))
724 			goto ext_put_continue;
725 
726 		/*
727 		 * Access to page_ext->handle isn't synchronous so we should
728 		 * be careful to access it.
729 		 */
730 		handle = READ_ONCE(page_owner->handle);
731 		if (!handle)
732 			goto ext_put_continue;
733 
734 		/* Record the next PFN to read in the file offset */
735 		*ppos = pfn + 1;
736 
737 		page_owner_tmp = *page_owner;
738 		page_ext_put(page_ext);
739 		return print_page_owner(buf, count, pfn, page,
740 				&page_owner_tmp, handle);
741 ext_put_continue:
742 		page_ext_put(page_ext);
743 	}
744 
745 	return 0;
746 }
747 
lseek_page_owner(struct file * file,loff_t offset,int orig)748 static loff_t lseek_page_owner(struct file *file, loff_t offset, int orig)
749 {
750 	switch (orig) {
751 	case SEEK_SET:
752 		file->f_pos = offset;
753 		break;
754 	case SEEK_CUR:
755 		file->f_pos += offset;
756 		break;
757 	default:
758 		return -EINVAL;
759 	}
760 	return file->f_pos;
761 }
762 
init_pages_in_zone(pg_data_t * pgdat,struct zone * zone)763 static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
764 {
765 	unsigned long pfn = zone->zone_start_pfn;
766 	unsigned long end_pfn = zone_end_pfn(zone);
767 	unsigned long count = 0;
768 
769 	/*
770 	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
771 	 * a zone boundary, it will be double counted between zones. This does
772 	 * not matter as the mixed block count will still be correct
773 	 */
774 	for (; pfn < end_pfn; ) {
775 		unsigned long block_end_pfn;
776 
777 		if (!pfn_valid(pfn)) {
778 			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
779 			continue;
780 		}
781 
782 		block_end_pfn = pageblock_end_pfn(pfn);
783 		block_end_pfn = min(block_end_pfn, end_pfn);
784 
785 		for (; pfn < block_end_pfn; pfn++) {
786 			struct page *page = pfn_to_page(pfn);
787 			struct page_ext *page_ext;
788 
789 			if (page_zone(page) != zone)
790 				continue;
791 
792 			/*
793 			 * To avoid having to grab zone->lock, be a little
794 			 * careful when reading buddy page order. The only
795 			 * danger is that we skip too much and potentially miss
796 			 * some early allocated pages, which is better than
797 			 * heavy lock contention.
798 			 */
799 			if (PageBuddy(page)) {
800 				unsigned long order = buddy_order_unsafe(page);
801 
802 				if (order > 0 && order <= MAX_PAGE_ORDER)
803 					pfn += (1UL << order) - 1;
804 				continue;
805 			}
806 
807 			if (PageReserved(page))
808 				continue;
809 
810 			page_ext = page_ext_get(page);
811 			if (unlikely(!page_ext))
812 				continue;
813 
814 			/* Maybe overlapping zone */
815 			if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
816 				goto ext_put_continue;
817 
818 			/* Found early allocated page */
819 			__update_page_owner_handle(page, early_handle, 0, 0,
820 						   -1, local_clock(), current->pid,
821 						   current->tgid, current->comm);
822 			count++;
823 ext_put_continue:
824 			page_ext_put(page_ext);
825 		}
826 		cond_resched();
827 	}
828 
829 	pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
830 		pgdat->node_id, zone->name, count);
831 }
832 
init_zones_in_node(pg_data_t * pgdat)833 static void init_zones_in_node(pg_data_t *pgdat)
834 {
835 	struct zone *zone;
836 	struct zone *node_zones = pgdat->node_zones;
837 
838 	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
839 		if (!populated_zone(zone))
840 			continue;
841 
842 		init_pages_in_zone(pgdat, zone);
843 	}
844 }
845 
init_early_allocated_pages(void)846 static void init_early_allocated_pages(void)
847 {
848 	pg_data_t *pgdat;
849 
850 	for_each_online_pgdat(pgdat)
851 		init_zones_in_node(pgdat);
852 }
853 
854 static const struct file_operations proc_page_owner_operations = {
855 	.read		= read_page_owner,
856 	.llseek		= lseek_page_owner,
857 };
858 
stack_start(struct seq_file * m,loff_t * ppos)859 static void *stack_start(struct seq_file *m, loff_t *ppos)
860 {
861 	struct stack *stack;
862 
863 	if (*ppos == -1UL)
864 		return NULL;
865 
866 	if (!*ppos) {
867 		/*
868 		 * This pairs with smp_store_release() from function
869 		 * add_stack_record_to_list(), so we get a consistent
870 		 * value of stack_list.
871 		 */
872 		stack = smp_load_acquire(&stack_list);
873 		m->private = stack;
874 	} else {
875 		stack = m->private;
876 	}
877 
878 	return stack;
879 }
880 
stack_next(struct seq_file * m,void * v,loff_t * ppos)881 static void *stack_next(struct seq_file *m, void *v, loff_t *ppos)
882 {
883 	struct stack *stack = v;
884 
885 	stack = stack->next;
886 	*ppos = stack ? *ppos + 1 : -1UL;
887 	m->private = stack;
888 
889 	return stack;
890 }
891 
892 static unsigned long page_owner_pages_threshold;
893 
stack_print(struct seq_file * m,void * v)894 static int stack_print(struct seq_file *m, void *v)
895 {
896 	int i, nr_base_pages;
897 	struct stack *stack = v;
898 	unsigned long *entries;
899 	unsigned long nr_entries;
900 	struct stack_record *stack_record = stack->stack_record;
901 
902 	if (!stack->stack_record)
903 		return 0;
904 
905 	nr_entries = stack_record->size;
906 	entries = stack_record->entries;
907 	nr_base_pages = refcount_read(&stack_record->count) - 1;
908 
909 	if (nr_base_pages < 1 || nr_base_pages < page_owner_pages_threshold)
910 		return 0;
911 
912 	for (i = 0; i < nr_entries; i++)
913 		seq_printf(m, " %pS\n", (void *)entries[i]);
914 	seq_printf(m, "nr_base_pages: %d\n\n", nr_base_pages);
915 
916 	return 0;
917 }
918 
stack_stop(struct seq_file * m,void * v)919 static void stack_stop(struct seq_file *m, void *v)
920 {
921 }
922 
923 static const struct seq_operations page_owner_stack_op = {
924 	.start	= stack_start,
925 	.next	= stack_next,
926 	.stop	= stack_stop,
927 	.show	= stack_print
928 };
929 
page_owner_stack_open(struct inode * inode,struct file * file)930 static int page_owner_stack_open(struct inode *inode, struct file *file)
931 {
932 	return seq_open_private(file, &page_owner_stack_op, 0);
933 }
934 
935 static const struct file_operations page_owner_stack_operations = {
936 	.open		= page_owner_stack_open,
937 	.read		= seq_read,
938 	.llseek		= seq_lseek,
939 	.release	= seq_release,
940 };
941 
page_owner_threshold_get(void * data,u64 * val)942 static int page_owner_threshold_get(void *data, u64 *val)
943 {
944 	*val = READ_ONCE(page_owner_pages_threshold);
945 	return 0;
946 }
947 
page_owner_threshold_set(void * data,u64 val)948 static int page_owner_threshold_set(void *data, u64 val)
949 {
950 	WRITE_ONCE(page_owner_pages_threshold, val);
951 	return 0;
952 }
953 
954 DEFINE_SIMPLE_ATTRIBUTE(proc_page_owner_threshold, &page_owner_threshold_get,
955 			&page_owner_threshold_set, "%llu");
956 
957 
pageowner_init(void)958 static int __init pageowner_init(void)
959 {
960 	struct dentry *dir;
961 
962 	if (!static_branch_unlikely(&page_owner_inited)) {
963 		pr_info("page_owner is disabled\n");
964 		return 0;
965 	}
966 
967 	debugfs_create_file("page_owner", 0400, NULL, NULL,
968 			    &proc_page_owner_operations);
969 	dir = debugfs_create_dir("page_owner_stacks", NULL);
970 	debugfs_create_file("show_stacks", 0400, dir, NULL,
971 			    &page_owner_stack_operations);
972 	debugfs_create_file("count_threshold", 0600, dir, NULL,
973 			    &proc_page_owner_threshold);
974 
975 	return 0;
976 }
977 late_initcall(pageowner_init)
978