xref: /linux/mm/page_owner.c (revision eb386617be4bdfe02eb0972874f726e2bfc7a6e7)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/debugfs.h>
3 #include <linux/mm.h>
4 #include <linux/slab.h>
5 #include <linux/uaccess.h>
6 #include <linux/memblock.h>
7 #include <linux/stacktrace.h>
8 #include <linux/page_owner.h>
9 #include <linux/jump_label.h>
10 #include <linux/migrate.h>
11 #include <linux/stackdepot.h>
12 #include <linux/seq_file.h>
13 #include <linux/memcontrol.h>
14 #include <linux/sched/clock.h>
15 
16 #include "internal.h"
17 
18 /*
19  * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
20  * to use off stack temporal storage
21  */
22 #define PAGE_OWNER_STACK_DEPTH (16)
23 
24 struct page_owner {
25 	unsigned short order;
26 	short last_migrate_reason;
27 	gfp_t gfp_mask;
28 	depot_stack_handle_t handle;
29 	depot_stack_handle_t free_handle;
30 	u64 ts_nsec;
31 	u64 free_ts_nsec;
32 	char comm[TASK_COMM_LEN];
33 	pid_t pid;
34 	pid_t tgid;
35 	pid_t free_pid;
36 	pid_t free_tgid;
37 };
38 
39 static bool page_owner_enabled __initdata;
40 DEFINE_STATIC_KEY_FALSE(page_owner_inited);
41 
42 static depot_stack_handle_t dummy_handle;
43 static depot_stack_handle_t failure_handle;
44 static depot_stack_handle_t early_handle;
45 
46 static void init_early_allocated_pages(void);
47 
48 static int __init early_page_owner_param(char *buf)
49 {
50 	int ret = kstrtobool(buf, &page_owner_enabled);
51 
52 	if (page_owner_enabled)
53 		stack_depot_request_early_init();
54 
55 	return ret;
56 }
57 early_param("page_owner", early_page_owner_param);
58 
59 static __init bool need_page_owner(void)
60 {
61 	return page_owner_enabled;
62 }
63 
64 static __always_inline depot_stack_handle_t create_dummy_stack(void)
65 {
66 	unsigned long entries[4];
67 	unsigned int nr_entries;
68 
69 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
70 	return stack_depot_save(entries, nr_entries, GFP_KERNEL);
71 }
72 
73 static noinline void register_dummy_stack(void)
74 {
75 	dummy_handle = create_dummy_stack();
76 }
77 
78 static noinline void register_failure_stack(void)
79 {
80 	failure_handle = create_dummy_stack();
81 }
82 
83 static noinline void register_early_stack(void)
84 {
85 	early_handle = create_dummy_stack();
86 }
87 
88 static __init void init_page_owner(void)
89 {
90 	if (!page_owner_enabled)
91 		return;
92 
93 	register_dummy_stack();
94 	register_failure_stack();
95 	register_early_stack();
96 	static_branch_enable(&page_owner_inited);
97 	init_early_allocated_pages();
98 }
99 
100 struct page_ext_operations page_owner_ops = {
101 	.size = sizeof(struct page_owner),
102 	.need = need_page_owner,
103 	.init = init_page_owner,
104 	.need_shared_flags = true,
105 };
106 
107 static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
108 {
109 	return page_ext_data(page_ext, &page_owner_ops);
110 }
111 
112 static noinline depot_stack_handle_t save_stack(gfp_t flags)
113 {
114 	unsigned long entries[PAGE_OWNER_STACK_DEPTH];
115 	depot_stack_handle_t handle;
116 	unsigned int nr_entries;
117 
118 	/*
119 	 * Avoid recursion.
120 	 *
121 	 * Sometimes page metadata allocation tracking requires more
122 	 * memory to be allocated:
123 	 * - when new stack trace is saved to stack depot
124 	 */
125 	if (current->in_page_owner)
126 		return dummy_handle;
127 	current->in_page_owner = 1;
128 
129 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
130 	handle = stack_depot_save(entries, nr_entries, flags);
131 	if (!handle)
132 		handle = failure_handle;
133 
134 	current->in_page_owner = 0;
135 	return handle;
136 }
137 
138 void __reset_page_owner(struct page *page, unsigned short order)
139 {
140 	int i;
141 	struct page_ext *page_ext;
142 	depot_stack_handle_t handle;
143 	struct page_owner *page_owner;
144 	u64 free_ts_nsec = local_clock();
145 
146 	page_ext = page_ext_get(page);
147 	if (unlikely(!page_ext))
148 		return;
149 
150 	handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
151 	for (i = 0; i < (1 << order); i++) {
152 		__clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
153 		page_owner = get_page_owner(page_ext);
154 		page_owner->free_handle = handle;
155 		page_owner->free_ts_nsec = free_ts_nsec;
156 		page_owner->free_pid = current->pid;
157 		page_owner->free_tgid = current->tgid;
158 		page_ext = page_ext_next(page_ext);
159 	}
160 	page_ext_put(page_ext);
161 }
162 
163 static inline void __set_page_owner_handle(struct page_ext *page_ext,
164 					depot_stack_handle_t handle,
165 					unsigned short order, gfp_t gfp_mask)
166 {
167 	struct page_owner *page_owner;
168 	int i;
169 	u64 ts_nsec = local_clock();
170 
171 	for (i = 0; i < (1 << order); i++) {
172 		page_owner = get_page_owner(page_ext);
173 		page_owner->handle = handle;
174 		page_owner->order = order;
175 		page_owner->gfp_mask = gfp_mask;
176 		page_owner->last_migrate_reason = -1;
177 		page_owner->pid = current->pid;
178 		page_owner->tgid = current->tgid;
179 		page_owner->ts_nsec = ts_nsec;
180 		strscpy(page_owner->comm, current->comm,
181 			sizeof(page_owner->comm));
182 		__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
183 		__set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
184 
185 		page_ext = page_ext_next(page_ext);
186 	}
187 }
188 
189 noinline void __set_page_owner(struct page *page, unsigned short order,
190 					gfp_t gfp_mask)
191 {
192 	struct page_ext *page_ext;
193 	depot_stack_handle_t handle;
194 
195 	handle = save_stack(gfp_mask);
196 
197 	page_ext = page_ext_get(page);
198 	if (unlikely(!page_ext))
199 		return;
200 	__set_page_owner_handle(page_ext, handle, order, gfp_mask);
201 	page_ext_put(page_ext);
202 }
203 
204 void __set_page_owner_migrate_reason(struct page *page, int reason)
205 {
206 	struct page_ext *page_ext = page_ext_get(page);
207 	struct page_owner *page_owner;
208 
209 	if (unlikely(!page_ext))
210 		return;
211 
212 	page_owner = get_page_owner(page_ext);
213 	page_owner->last_migrate_reason = reason;
214 	page_ext_put(page_ext);
215 }
216 
217 void __split_page_owner(struct page *page, unsigned int nr)
218 {
219 	int i;
220 	struct page_ext *page_ext = page_ext_get(page);
221 	struct page_owner *page_owner;
222 
223 	if (unlikely(!page_ext))
224 		return;
225 
226 	for (i = 0; i < nr; i++) {
227 		page_owner = get_page_owner(page_ext);
228 		page_owner->order = 0;
229 		page_ext = page_ext_next(page_ext);
230 	}
231 	page_ext_put(page_ext);
232 }
233 
234 void __folio_copy_owner(struct folio *newfolio, struct folio *old)
235 {
236 	struct page_ext *old_ext;
237 	struct page_ext *new_ext;
238 	struct page_owner *old_page_owner, *new_page_owner;
239 
240 	old_ext = page_ext_get(&old->page);
241 	if (unlikely(!old_ext))
242 		return;
243 
244 	new_ext = page_ext_get(&newfolio->page);
245 	if (unlikely(!new_ext)) {
246 		page_ext_put(old_ext);
247 		return;
248 	}
249 
250 	old_page_owner = get_page_owner(old_ext);
251 	new_page_owner = get_page_owner(new_ext);
252 	new_page_owner->order = old_page_owner->order;
253 	new_page_owner->gfp_mask = old_page_owner->gfp_mask;
254 	new_page_owner->last_migrate_reason =
255 		old_page_owner->last_migrate_reason;
256 	new_page_owner->handle = old_page_owner->handle;
257 	new_page_owner->pid = old_page_owner->pid;
258 	new_page_owner->tgid = old_page_owner->tgid;
259 	new_page_owner->free_pid = old_page_owner->free_pid;
260 	new_page_owner->free_tgid = old_page_owner->free_tgid;
261 	new_page_owner->ts_nsec = old_page_owner->ts_nsec;
262 	new_page_owner->free_ts_nsec = old_page_owner->ts_nsec;
263 	strcpy(new_page_owner->comm, old_page_owner->comm);
264 
265 	/*
266 	 * We don't clear the bit on the old folio as it's going to be freed
267 	 * after migration. Until then, the info can be useful in case of
268 	 * a bug, and the overall stats will be off a bit only temporarily.
269 	 * Also, migrate_misplaced_transhuge_page() can still fail the
270 	 * migration and then we want the old folio to retain the info. But
271 	 * in that case we also don't need to explicitly clear the info from
272 	 * the new page, which will be freed.
273 	 */
274 	__set_bit(PAGE_EXT_OWNER, &new_ext->flags);
275 	__set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
276 	page_ext_put(new_ext);
277 	page_ext_put(old_ext);
278 }
279 
280 void pagetypeinfo_showmixedcount_print(struct seq_file *m,
281 				       pg_data_t *pgdat, struct zone *zone)
282 {
283 	struct page *page;
284 	struct page_ext *page_ext;
285 	struct page_owner *page_owner;
286 	unsigned long pfn, block_end_pfn;
287 	unsigned long end_pfn = zone_end_pfn(zone);
288 	unsigned long count[MIGRATE_TYPES] = { 0, };
289 	int pageblock_mt, page_mt;
290 	int i;
291 
292 	/* Scan block by block. First and last block may be incomplete */
293 	pfn = zone->zone_start_pfn;
294 
295 	/*
296 	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
297 	 * a zone boundary, it will be double counted between zones. This does
298 	 * not matter as the mixed block count will still be correct
299 	 */
300 	for (; pfn < end_pfn; ) {
301 		page = pfn_to_online_page(pfn);
302 		if (!page) {
303 			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
304 			continue;
305 		}
306 
307 		block_end_pfn = pageblock_end_pfn(pfn);
308 		block_end_pfn = min(block_end_pfn, end_pfn);
309 
310 		pageblock_mt = get_pageblock_migratetype(page);
311 
312 		for (; pfn < block_end_pfn; pfn++) {
313 			/* The pageblock is online, no need to recheck. */
314 			page = pfn_to_page(pfn);
315 
316 			if (page_zone(page) != zone)
317 				continue;
318 
319 			if (PageBuddy(page)) {
320 				unsigned long freepage_order;
321 
322 				freepage_order = buddy_order_unsafe(page);
323 				if (freepage_order <= MAX_PAGE_ORDER)
324 					pfn += (1UL << freepage_order) - 1;
325 				continue;
326 			}
327 
328 			if (PageReserved(page))
329 				continue;
330 
331 			page_ext = page_ext_get(page);
332 			if (unlikely(!page_ext))
333 				continue;
334 
335 			if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
336 				goto ext_put_continue;
337 
338 			page_owner = get_page_owner(page_ext);
339 			page_mt = gfp_migratetype(page_owner->gfp_mask);
340 			if (pageblock_mt != page_mt) {
341 				if (is_migrate_cma(pageblock_mt))
342 					count[MIGRATE_MOVABLE]++;
343 				else
344 					count[pageblock_mt]++;
345 
346 				pfn = block_end_pfn;
347 				page_ext_put(page_ext);
348 				break;
349 			}
350 			pfn += (1UL << page_owner->order) - 1;
351 ext_put_continue:
352 			page_ext_put(page_ext);
353 		}
354 	}
355 
356 	/* Print counts */
357 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
358 	for (i = 0; i < MIGRATE_TYPES; i++)
359 		seq_printf(m, "%12lu ", count[i]);
360 	seq_putc(m, '\n');
361 }
362 
363 /*
364  * Looking for memcg information and print it out
365  */
366 static inline int print_page_owner_memcg(char *kbuf, size_t count, int ret,
367 					 struct page *page)
368 {
369 #ifdef CONFIG_MEMCG
370 	unsigned long memcg_data;
371 	struct mem_cgroup *memcg;
372 	bool online;
373 	char name[80];
374 
375 	rcu_read_lock();
376 	memcg_data = READ_ONCE(page->memcg_data);
377 	if (!memcg_data)
378 		goto out_unlock;
379 
380 	if (memcg_data & MEMCG_DATA_OBJCGS)
381 		ret += scnprintf(kbuf + ret, count - ret,
382 				"Slab cache page\n");
383 
384 	memcg = page_memcg_check(page);
385 	if (!memcg)
386 		goto out_unlock;
387 
388 	online = (memcg->css.flags & CSS_ONLINE);
389 	cgroup_name(memcg->css.cgroup, name, sizeof(name));
390 	ret += scnprintf(kbuf + ret, count - ret,
391 			"Charged %sto %smemcg %s\n",
392 			PageMemcgKmem(page) ? "(via objcg) " : "",
393 			online ? "" : "offline ",
394 			name);
395 out_unlock:
396 	rcu_read_unlock();
397 #endif /* CONFIG_MEMCG */
398 
399 	return ret;
400 }
401 
402 static ssize_t
403 print_page_owner(char __user *buf, size_t count, unsigned long pfn,
404 		struct page *page, struct page_owner *page_owner,
405 		depot_stack_handle_t handle)
406 {
407 	int ret, pageblock_mt, page_mt;
408 	char *kbuf;
409 
410 	count = min_t(size_t, count, PAGE_SIZE);
411 	kbuf = kmalloc(count, GFP_KERNEL);
412 	if (!kbuf)
413 		return -ENOMEM;
414 
415 	ret = scnprintf(kbuf, count,
416 			"Page allocated via order %u, mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu ns\n",
417 			page_owner->order, page_owner->gfp_mask,
418 			&page_owner->gfp_mask, page_owner->pid,
419 			page_owner->tgid, page_owner->comm,
420 			page_owner->ts_nsec);
421 
422 	/* Print information relevant to grouping pages by mobility */
423 	pageblock_mt = get_pageblock_migratetype(page);
424 	page_mt  = gfp_migratetype(page_owner->gfp_mask);
425 	ret += scnprintf(kbuf + ret, count - ret,
426 			"PFN 0x%lx type %s Block %lu type %s Flags %pGp\n",
427 			pfn,
428 			migratetype_names[page_mt],
429 			pfn >> pageblock_order,
430 			migratetype_names[pageblock_mt],
431 			&page->flags);
432 
433 	ret += stack_depot_snprint(handle, kbuf + ret, count - ret, 0);
434 	if (ret >= count)
435 		goto err;
436 
437 	if (page_owner->last_migrate_reason != -1) {
438 		ret += scnprintf(kbuf + ret, count - ret,
439 			"Page has been migrated, last migrate reason: %s\n",
440 			migrate_reason_names[page_owner->last_migrate_reason]);
441 	}
442 
443 	ret = print_page_owner_memcg(kbuf, count, ret, page);
444 
445 	ret += snprintf(kbuf + ret, count - ret, "\n");
446 	if (ret >= count)
447 		goto err;
448 
449 	if (copy_to_user(buf, kbuf, ret))
450 		ret = -EFAULT;
451 
452 	kfree(kbuf);
453 	return ret;
454 
455 err:
456 	kfree(kbuf);
457 	return -ENOMEM;
458 }
459 
460 void __dump_page_owner(const struct page *page)
461 {
462 	struct page_ext *page_ext = page_ext_get((void *)page);
463 	struct page_owner *page_owner;
464 	depot_stack_handle_t handle;
465 	gfp_t gfp_mask;
466 	int mt;
467 
468 	if (unlikely(!page_ext)) {
469 		pr_alert("There is not page extension available.\n");
470 		return;
471 	}
472 
473 	page_owner = get_page_owner(page_ext);
474 	gfp_mask = page_owner->gfp_mask;
475 	mt = gfp_migratetype(gfp_mask);
476 
477 	if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
478 		pr_alert("page_owner info is not present (never set?)\n");
479 		page_ext_put(page_ext);
480 		return;
481 	}
482 
483 	if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
484 		pr_alert("page_owner tracks the page as allocated\n");
485 	else
486 		pr_alert("page_owner tracks the page as freed\n");
487 
488 	pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu, free_ts %llu\n",
489 		 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask,
490 		 page_owner->pid, page_owner->tgid, page_owner->comm,
491 		 page_owner->ts_nsec, page_owner->free_ts_nsec);
492 
493 	handle = READ_ONCE(page_owner->handle);
494 	if (!handle)
495 		pr_alert("page_owner allocation stack trace missing\n");
496 	else
497 		stack_depot_print(handle);
498 
499 	handle = READ_ONCE(page_owner->free_handle);
500 	if (!handle) {
501 		pr_alert("page_owner free stack trace missing\n");
502 	} else {
503 		pr_alert("page last free pid %d tgid %d stack trace:\n",
504 			  page_owner->free_pid, page_owner->free_tgid);
505 		stack_depot_print(handle);
506 	}
507 
508 	if (page_owner->last_migrate_reason != -1)
509 		pr_alert("page has been migrated, last migrate reason: %s\n",
510 			migrate_reason_names[page_owner->last_migrate_reason]);
511 	page_ext_put(page_ext);
512 }
513 
514 static ssize_t
515 read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
516 {
517 	unsigned long pfn;
518 	struct page *page;
519 	struct page_ext *page_ext;
520 	struct page_owner *page_owner;
521 	depot_stack_handle_t handle;
522 
523 	if (!static_branch_unlikely(&page_owner_inited))
524 		return -EINVAL;
525 
526 	page = NULL;
527 	if (*ppos == 0)
528 		pfn = min_low_pfn;
529 	else
530 		pfn = *ppos;
531 	/* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
532 	while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
533 		pfn++;
534 
535 	/* Find an allocated page */
536 	for (; pfn < max_pfn; pfn++) {
537 		/*
538 		 * This temporary page_owner is required so
539 		 * that we can avoid the context switches while holding
540 		 * the rcu lock and copying the page owner information to
541 		 * user through copy_to_user() or GFP_KERNEL allocations.
542 		 */
543 		struct page_owner page_owner_tmp;
544 
545 		/*
546 		 * If the new page is in a new MAX_ORDER_NR_PAGES area,
547 		 * validate the area as existing, skip it if not
548 		 */
549 		if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
550 			pfn += MAX_ORDER_NR_PAGES - 1;
551 			continue;
552 		}
553 
554 		page = pfn_to_page(pfn);
555 		if (PageBuddy(page)) {
556 			unsigned long freepage_order = buddy_order_unsafe(page);
557 
558 			if (freepage_order <= MAX_PAGE_ORDER)
559 				pfn += (1UL << freepage_order) - 1;
560 			continue;
561 		}
562 
563 		page_ext = page_ext_get(page);
564 		if (unlikely(!page_ext))
565 			continue;
566 
567 		/*
568 		 * Some pages could be missed by concurrent allocation or free,
569 		 * because we don't hold the zone lock.
570 		 */
571 		if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
572 			goto ext_put_continue;
573 
574 		/*
575 		 * Although we do have the info about past allocation of free
576 		 * pages, it's not relevant for current memory usage.
577 		 */
578 		if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
579 			goto ext_put_continue;
580 
581 		page_owner = get_page_owner(page_ext);
582 
583 		/*
584 		 * Don't print "tail" pages of high-order allocations as that
585 		 * would inflate the stats.
586 		 */
587 		if (!IS_ALIGNED(pfn, 1 << page_owner->order))
588 			goto ext_put_continue;
589 
590 		/*
591 		 * Access to page_ext->handle isn't synchronous so we should
592 		 * be careful to access it.
593 		 */
594 		handle = READ_ONCE(page_owner->handle);
595 		if (!handle)
596 			goto ext_put_continue;
597 
598 		/* Record the next PFN to read in the file offset */
599 		*ppos = pfn + 1;
600 
601 		page_owner_tmp = *page_owner;
602 		page_ext_put(page_ext);
603 		return print_page_owner(buf, count, pfn, page,
604 				&page_owner_tmp, handle);
605 ext_put_continue:
606 		page_ext_put(page_ext);
607 	}
608 
609 	return 0;
610 }
611 
612 static loff_t lseek_page_owner(struct file *file, loff_t offset, int orig)
613 {
614 	switch (orig) {
615 	case SEEK_SET:
616 		file->f_pos = offset;
617 		break;
618 	case SEEK_CUR:
619 		file->f_pos += offset;
620 		break;
621 	default:
622 		return -EINVAL;
623 	}
624 	return file->f_pos;
625 }
626 
627 static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
628 {
629 	unsigned long pfn = zone->zone_start_pfn;
630 	unsigned long end_pfn = zone_end_pfn(zone);
631 	unsigned long count = 0;
632 
633 	/*
634 	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
635 	 * a zone boundary, it will be double counted between zones. This does
636 	 * not matter as the mixed block count will still be correct
637 	 */
638 	for (; pfn < end_pfn; ) {
639 		unsigned long block_end_pfn;
640 
641 		if (!pfn_valid(pfn)) {
642 			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
643 			continue;
644 		}
645 
646 		block_end_pfn = pageblock_end_pfn(pfn);
647 		block_end_pfn = min(block_end_pfn, end_pfn);
648 
649 		for (; pfn < block_end_pfn; pfn++) {
650 			struct page *page = pfn_to_page(pfn);
651 			struct page_ext *page_ext;
652 
653 			if (page_zone(page) != zone)
654 				continue;
655 
656 			/*
657 			 * To avoid having to grab zone->lock, be a little
658 			 * careful when reading buddy page order. The only
659 			 * danger is that we skip too much and potentially miss
660 			 * some early allocated pages, which is better than
661 			 * heavy lock contention.
662 			 */
663 			if (PageBuddy(page)) {
664 				unsigned long order = buddy_order_unsafe(page);
665 
666 				if (order > 0 && order <= MAX_PAGE_ORDER)
667 					pfn += (1UL << order) - 1;
668 				continue;
669 			}
670 
671 			if (PageReserved(page))
672 				continue;
673 
674 			page_ext = page_ext_get(page);
675 			if (unlikely(!page_ext))
676 				continue;
677 
678 			/* Maybe overlapping zone */
679 			if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
680 				goto ext_put_continue;
681 
682 			/* Found early allocated page */
683 			__set_page_owner_handle(page_ext, early_handle,
684 						0, 0);
685 			count++;
686 ext_put_continue:
687 			page_ext_put(page_ext);
688 		}
689 		cond_resched();
690 	}
691 
692 	pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
693 		pgdat->node_id, zone->name, count);
694 }
695 
696 static void init_zones_in_node(pg_data_t *pgdat)
697 {
698 	struct zone *zone;
699 	struct zone *node_zones = pgdat->node_zones;
700 
701 	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
702 		if (!populated_zone(zone))
703 			continue;
704 
705 		init_pages_in_zone(pgdat, zone);
706 	}
707 }
708 
709 static void init_early_allocated_pages(void)
710 {
711 	pg_data_t *pgdat;
712 
713 	for_each_online_pgdat(pgdat)
714 		init_zones_in_node(pgdat);
715 }
716 
717 static const struct file_operations proc_page_owner_operations = {
718 	.read		= read_page_owner,
719 	.llseek		= lseek_page_owner,
720 };
721 
722 static int __init pageowner_init(void)
723 {
724 	if (!static_branch_unlikely(&page_owner_inited)) {
725 		pr_info("page_owner is disabled\n");
726 		return 0;
727 	}
728 
729 	debugfs_create_file("page_owner", 0400, NULL, NULL,
730 			    &proc_page_owner_operations);
731 
732 	return 0;
733 }
734 late_initcall(pageowner_init)
735