xref: /linux/mm/cma_debug.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
228b24c1fSSasha Levin /*
328b24c1fSSasha Levin  * CMA DebugFS Interface
428b24c1fSSasha Levin  *
528b24c1fSSasha Levin  * Copyright (c) 2015 Sasha Levin <sasha.levin@oracle.com>
628b24c1fSSasha Levin  */
728b24c1fSSasha Levin 
828b24c1fSSasha Levin 
928b24c1fSSasha Levin #include <linux/debugfs.h>
1028b24c1fSSasha Levin #include <linux/cma.h>
1126b02a1fSSasha Levin #include <linux/list.h>
1226b02a1fSSasha Levin #include <linux/kernel.h>
1326b02a1fSSasha Levin #include <linux/slab.h>
148325330bSSasha Levin #include <linux/mm_types.h>
1528b24c1fSSasha Levin 
1628b24c1fSSasha Levin #include "cma.h"
1728b24c1fSSasha Levin 
1826b02a1fSSasha Levin struct cma_mem {
1926b02a1fSSasha Levin 	struct hlist_node node;
2026b02a1fSSasha Levin 	struct page *p;
2126b02a1fSSasha Levin 	unsigned long n;
2226b02a1fSSasha Levin };
2326b02a1fSSasha Levin 
cma_debugfs_get(void * data,u64 * val)2428b24c1fSSasha Levin static int cma_debugfs_get(void *data, u64 *val)
2528b24c1fSSasha Levin {
2628b24c1fSSasha Levin 	unsigned long *p = data;
2728b24c1fSSasha Levin 
2828b24c1fSSasha Levin 	*val = *p;
2928b24c1fSSasha Levin 
3028b24c1fSSasha Levin 	return 0;
3128b24c1fSSasha Levin }
32a9ea242aSzhong jiang DEFINE_DEBUGFS_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
3328b24c1fSSasha Levin 
cma_used_get(void * data,u64 * val)342e32b947SDmitry Safonov static int cma_used_get(void *data, u64 *val)
352e32b947SDmitry Safonov {
362e32b947SDmitry Safonov 	struct cma *cma = data;
372e32b947SDmitry Safonov 	unsigned long used;
382e32b947SDmitry Safonov 
390ef7dcacSMike Kravetz 	spin_lock_irq(&cma->lock);
402e32b947SDmitry Safonov 	/* pages counter is smaller than sizeof(int) */
41d56e84b4SJoonsoo Kim 	used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
420ef7dcacSMike Kravetz 	spin_unlock_irq(&cma->lock);
432e32b947SDmitry Safonov 	*val = (u64)used << cma->order_per_bit;
442e32b947SDmitry Safonov 
452e32b947SDmitry Safonov 	return 0;
462e32b947SDmitry Safonov }
47a9ea242aSzhong jiang DEFINE_DEBUGFS_ATTRIBUTE(cma_used_fops, cma_used_get, NULL, "%llu\n");
482e32b947SDmitry Safonov 
cma_maxchunk_get(void * data,u64 * val)492e32b947SDmitry Safonov static int cma_maxchunk_get(void *data, u64 *val)
502e32b947SDmitry Safonov {
512e32b947SDmitry Safonov 	struct cma *cma = data;
522e32b947SDmitry Safonov 	unsigned long maxchunk = 0;
532e32b947SDmitry Safonov 	unsigned long start, end = 0;
54d56e84b4SJoonsoo Kim 	unsigned long bitmap_maxno = cma_bitmap_maxno(cma);
552e32b947SDmitry Safonov 
560ef7dcacSMike Kravetz 	spin_lock_irq(&cma->lock);
572e32b947SDmitry Safonov 	for (;;) {
58d56e84b4SJoonsoo Kim 		start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
59f0fd5050SYue Hu 		if (start >= bitmap_maxno)
602e32b947SDmitry Safonov 			break;
61d56e84b4SJoonsoo Kim 		end = find_next_bit(cma->bitmap, bitmap_maxno, start);
622e32b947SDmitry Safonov 		maxchunk = max(end - start, maxchunk);
632e32b947SDmitry Safonov 	}
640ef7dcacSMike Kravetz 	spin_unlock_irq(&cma->lock);
652e32b947SDmitry Safonov 	*val = (u64)maxchunk << cma->order_per_bit;
662e32b947SDmitry Safonov 
672e32b947SDmitry Safonov 	return 0;
682e32b947SDmitry Safonov }
69a9ea242aSzhong jiang DEFINE_DEBUGFS_ATTRIBUTE(cma_maxchunk_fops, cma_maxchunk_get, NULL, "%llu\n");
702e32b947SDmitry Safonov 
cma_add_to_cma_mem_list(struct cma * cma,struct cma_mem * mem)7126b02a1fSSasha Levin static void cma_add_to_cma_mem_list(struct cma *cma, struct cma_mem *mem)
7226b02a1fSSasha Levin {
7326b02a1fSSasha Levin 	spin_lock(&cma->mem_head_lock);
7426b02a1fSSasha Levin 	hlist_add_head(&mem->node, &cma->mem_head);
7526b02a1fSSasha Levin 	spin_unlock(&cma->mem_head_lock);
7626b02a1fSSasha Levin }
7726b02a1fSSasha Levin 
cma_get_entry_from_list(struct cma * cma)788325330bSSasha Levin static struct cma_mem *cma_get_entry_from_list(struct cma *cma)
798325330bSSasha Levin {
808325330bSSasha Levin 	struct cma_mem *mem = NULL;
818325330bSSasha Levin 
828325330bSSasha Levin 	spin_lock(&cma->mem_head_lock);
838325330bSSasha Levin 	if (!hlist_empty(&cma->mem_head)) {
848325330bSSasha Levin 		mem = hlist_entry(cma->mem_head.first, struct cma_mem, node);
858325330bSSasha Levin 		hlist_del_init(&mem->node);
868325330bSSasha Levin 	}
878325330bSSasha Levin 	spin_unlock(&cma->mem_head_lock);
888325330bSSasha Levin 
898325330bSSasha Levin 	return mem;
908325330bSSasha Levin }
918325330bSSasha Levin 
cma_free_mem(struct cma * cma,int count)928325330bSSasha Levin static int cma_free_mem(struct cma *cma, int count)
938325330bSSasha Levin {
948325330bSSasha Levin 	struct cma_mem *mem = NULL;
958325330bSSasha Levin 
968325330bSSasha Levin 	while (count) {
978325330bSSasha Levin 		mem = cma_get_entry_from_list(cma);
988325330bSSasha Levin 		if (mem == NULL)
998325330bSSasha Levin 			return 0;
1008325330bSSasha Levin 
1018325330bSSasha Levin 		if (mem->n <= count) {
1028325330bSSasha Levin 			cma_release(cma, mem->p, mem->n);
1038325330bSSasha Levin 			count -= mem->n;
1048325330bSSasha Levin 			kfree(mem);
1058325330bSSasha Levin 		} else if (cma->order_per_bit == 0) {
1068325330bSSasha Levin 			cma_release(cma, mem->p, count);
1078325330bSSasha Levin 			mem->p += count;
1088325330bSSasha Levin 			mem->n -= count;
1098325330bSSasha Levin 			count = 0;
1108325330bSSasha Levin 			cma_add_to_cma_mem_list(cma, mem);
1118325330bSSasha Levin 		} else {
1128325330bSSasha Levin 			pr_debug("cma: cannot release partial block when order_per_bit != 0\n");
1138325330bSSasha Levin 			cma_add_to_cma_mem_list(cma, mem);
1148325330bSSasha Levin 			break;
1158325330bSSasha Levin 		}
1168325330bSSasha Levin 	}
1178325330bSSasha Levin 
1188325330bSSasha Levin 	return 0;
1198325330bSSasha Levin 
1208325330bSSasha Levin }
1218325330bSSasha Levin 
cma_free_write(void * data,u64 val)1228325330bSSasha Levin static int cma_free_write(void *data, u64 val)
1238325330bSSasha Levin {
1248325330bSSasha Levin 	int pages = val;
1258325330bSSasha Levin 	struct cma *cma = data;
1268325330bSSasha Levin 
1278325330bSSasha Levin 	return cma_free_mem(cma, pages);
1288325330bSSasha Levin }
129a9ea242aSzhong jiang DEFINE_DEBUGFS_ATTRIBUTE(cma_free_fops, NULL, cma_free_write, "%llu\n");
1308325330bSSasha Levin 
cma_alloc_mem(struct cma * cma,int count)13126b02a1fSSasha Levin static int cma_alloc_mem(struct cma *cma, int count)
13226b02a1fSSasha Levin {
13326b02a1fSSasha Levin 	struct cma_mem *mem;
13426b02a1fSSasha Levin 	struct page *p;
13526b02a1fSSasha Levin 
13626b02a1fSSasha Levin 	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
13726b02a1fSSasha Levin 	if (!mem)
13826b02a1fSSasha Levin 		return -ENOMEM;
13926b02a1fSSasha Levin 
14065182029SMarek Szyprowski 	p = cma_alloc(cma, count, 0, false);
14126b02a1fSSasha Levin 	if (!p) {
14226b02a1fSSasha Levin 		kfree(mem);
14326b02a1fSSasha Levin 		return -ENOMEM;
14426b02a1fSSasha Levin 	}
14526b02a1fSSasha Levin 
14626b02a1fSSasha Levin 	mem->p = p;
14726b02a1fSSasha Levin 	mem->n = count;
14826b02a1fSSasha Levin 
14926b02a1fSSasha Levin 	cma_add_to_cma_mem_list(cma, mem);
15026b02a1fSSasha Levin 
15126b02a1fSSasha Levin 	return 0;
15226b02a1fSSasha Levin }
15326b02a1fSSasha Levin 
cma_alloc_write(void * data,u64 val)15426b02a1fSSasha Levin static int cma_alloc_write(void *data, u64 val)
15526b02a1fSSasha Levin {
15626b02a1fSSasha Levin 	int pages = val;
15726b02a1fSSasha Levin 	struct cma *cma = data;
15826b02a1fSSasha Levin 
15926b02a1fSSasha Levin 	return cma_alloc_mem(cma, pages);
16026b02a1fSSasha Levin }
161a9ea242aSzhong jiang DEFINE_DEBUGFS_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n");
16226b02a1fSSasha Levin 
cma_debugfs_add_one(struct cma * cma,struct dentry * root_dentry)1635a7f1b2fSYue Hu static void cma_debugfs_add_one(struct cma *cma, struct dentry *root_dentry)
16428b24c1fSSasha Levin {
16528b24c1fSSasha Levin 	struct dentry *tmp;
16628b24c1fSSasha Levin 
167*9a79443dSCharan Teja Kalla 	tmp = debugfs_create_dir(cma->name, root_dentry);
16828b24c1fSSasha Levin 
1690825a6f9SJoe Perches 	debugfs_create_file("alloc", 0200, tmp, cma, &cma_alloc_fops);
1700825a6f9SJoe Perches 	debugfs_create_file("free", 0200, tmp, cma, &cma_free_fops);
1710825a6f9SJoe Perches 	debugfs_create_file("base_pfn", 0444, tmp,
17228b24c1fSSasha Levin 			    &cma->base_pfn, &cma_debugfs_fops);
1730825a6f9SJoe Perches 	debugfs_create_file("count", 0444, tmp, &cma->count, &cma_debugfs_fops);
1740825a6f9SJoe Perches 	debugfs_create_file("order_per_bit", 0444, tmp,
17528b24c1fSSasha Levin 			    &cma->order_per_bit, &cma_debugfs_fops);
1760825a6f9SJoe Perches 	debugfs_create_file("used", 0444, tmp, cma, &cma_used_fops);
1770825a6f9SJoe Perches 	debugfs_create_file("maxchunk", 0444, tmp, cma, &cma_maxchunk_fops);
17828b24c1fSSasha Levin 
179a2b992c8SJakub Kicinski 	cma->dfs_bitmap.array = (u32 *)cma->bitmap;
180a2b992c8SJakub Kicinski 	cma->dfs_bitmap.n_elements = DIV_ROUND_UP(cma_bitmap_maxno(cma),
181a2b992c8SJakub Kicinski 						  BITS_PER_BYTE * sizeof(u32));
182a2b992c8SJakub Kicinski 	debugfs_create_u32_array("bitmap", 0444, tmp, &cma->dfs_bitmap);
18328b24c1fSSasha Levin }
18428b24c1fSSasha Levin 
cma_debugfs_init(void)18528b24c1fSSasha Levin static int __init cma_debugfs_init(void)
18628b24c1fSSasha Levin {
1875a7f1b2fSYue Hu 	struct dentry *cma_debugfs_root;
18828b24c1fSSasha Levin 	int i;
18928b24c1fSSasha Levin 
19028b24c1fSSasha Levin 	cma_debugfs_root = debugfs_create_dir("cma", NULL);
19128b24c1fSSasha Levin 
19228b24c1fSSasha Levin 	for (i = 0; i < cma_area_count; i++)
1935a7f1b2fSYue Hu 		cma_debugfs_add_one(&cma_areas[i], cma_debugfs_root);
19428b24c1fSSasha Levin 
19528b24c1fSSasha Levin 	return 0;
19628b24c1fSSasha Levin }
19728b24c1fSSasha Levin late_initcall(cma_debugfs_init);
198