xref: /linux/mm/cma_debug.c (revision 0825a6f98689d847ab8058c51b3a55f0abcc6563)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
228b24c1fSSasha Levin /*
328b24c1fSSasha Levin  * CMA DebugFS Interface
428b24c1fSSasha Levin  *
528b24c1fSSasha Levin  * Copyright (c) 2015 Sasha Levin <sasha.levin@oracle.com>
628b24c1fSSasha Levin  */
728b24c1fSSasha Levin 
828b24c1fSSasha Levin 
928b24c1fSSasha Levin #include <linux/debugfs.h>
1028b24c1fSSasha Levin #include <linux/cma.h>
1126b02a1fSSasha Levin #include <linux/list.h>
1226b02a1fSSasha Levin #include <linux/kernel.h>
1326b02a1fSSasha Levin #include <linux/slab.h>
148325330bSSasha Levin #include <linux/mm_types.h>
1528b24c1fSSasha Levin 
1628b24c1fSSasha Levin #include "cma.h"
1728b24c1fSSasha Levin 
1826b02a1fSSasha Levin struct cma_mem {
1926b02a1fSSasha Levin 	struct hlist_node node;
2026b02a1fSSasha Levin 	struct page *p;
2126b02a1fSSasha Levin 	unsigned long n;
2226b02a1fSSasha Levin };
2326b02a1fSSasha Levin 
2428b24c1fSSasha Levin static struct dentry *cma_debugfs_root;
2528b24c1fSSasha Levin 
2628b24c1fSSasha Levin static int cma_debugfs_get(void *data, u64 *val)
2728b24c1fSSasha Levin {
2828b24c1fSSasha Levin 	unsigned long *p = data;
2928b24c1fSSasha Levin 
3028b24c1fSSasha Levin 	*val = *p;
3128b24c1fSSasha Levin 
3228b24c1fSSasha Levin 	return 0;
3328b24c1fSSasha Levin }
3428b24c1fSSasha Levin DEFINE_SIMPLE_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
3528b24c1fSSasha Levin 
362e32b947SDmitry Safonov static int cma_used_get(void *data, u64 *val)
372e32b947SDmitry Safonov {
382e32b947SDmitry Safonov 	struct cma *cma = data;
392e32b947SDmitry Safonov 	unsigned long used;
402e32b947SDmitry Safonov 
412e32b947SDmitry Safonov 	mutex_lock(&cma->lock);
422e32b947SDmitry Safonov 	/* pages counter is smaller than sizeof(int) */
43d56e84b4SJoonsoo Kim 	used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
442e32b947SDmitry Safonov 	mutex_unlock(&cma->lock);
452e32b947SDmitry Safonov 	*val = (u64)used << cma->order_per_bit;
462e32b947SDmitry Safonov 
472e32b947SDmitry Safonov 	return 0;
482e32b947SDmitry Safonov }
492e32b947SDmitry Safonov DEFINE_SIMPLE_ATTRIBUTE(cma_used_fops, cma_used_get, NULL, "%llu\n");
502e32b947SDmitry Safonov 
512e32b947SDmitry Safonov static int cma_maxchunk_get(void *data, u64 *val)
522e32b947SDmitry Safonov {
532e32b947SDmitry Safonov 	struct cma *cma = data;
542e32b947SDmitry Safonov 	unsigned long maxchunk = 0;
552e32b947SDmitry Safonov 	unsigned long start, end = 0;
56d56e84b4SJoonsoo Kim 	unsigned long bitmap_maxno = cma_bitmap_maxno(cma);
572e32b947SDmitry Safonov 
582e32b947SDmitry Safonov 	mutex_lock(&cma->lock);
592e32b947SDmitry Safonov 	for (;;) {
60d56e84b4SJoonsoo Kim 		start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
612e32b947SDmitry Safonov 		if (start >= cma->count)
622e32b947SDmitry Safonov 			break;
63d56e84b4SJoonsoo Kim 		end = find_next_bit(cma->bitmap, bitmap_maxno, start);
642e32b947SDmitry Safonov 		maxchunk = max(end - start, maxchunk);
652e32b947SDmitry Safonov 	}
662e32b947SDmitry Safonov 	mutex_unlock(&cma->lock);
672e32b947SDmitry Safonov 	*val = (u64)maxchunk << cma->order_per_bit;
682e32b947SDmitry Safonov 
692e32b947SDmitry Safonov 	return 0;
702e32b947SDmitry Safonov }
712e32b947SDmitry Safonov DEFINE_SIMPLE_ATTRIBUTE(cma_maxchunk_fops, cma_maxchunk_get, NULL, "%llu\n");
722e32b947SDmitry Safonov 
7326b02a1fSSasha Levin static void cma_add_to_cma_mem_list(struct cma *cma, struct cma_mem *mem)
7426b02a1fSSasha Levin {
7526b02a1fSSasha Levin 	spin_lock(&cma->mem_head_lock);
7626b02a1fSSasha Levin 	hlist_add_head(&mem->node, &cma->mem_head);
7726b02a1fSSasha Levin 	spin_unlock(&cma->mem_head_lock);
7826b02a1fSSasha Levin }
7926b02a1fSSasha Levin 
808325330bSSasha Levin static struct cma_mem *cma_get_entry_from_list(struct cma *cma)
818325330bSSasha Levin {
828325330bSSasha Levin 	struct cma_mem *mem = NULL;
838325330bSSasha Levin 
848325330bSSasha Levin 	spin_lock(&cma->mem_head_lock);
858325330bSSasha Levin 	if (!hlist_empty(&cma->mem_head)) {
868325330bSSasha Levin 		mem = hlist_entry(cma->mem_head.first, struct cma_mem, node);
878325330bSSasha Levin 		hlist_del_init(&mem->node);
888325330bSSasha Levin 	}
898325330bSSasha Levin 	spin_unlock(&cma->mem_head_lock);
908325330bSSasha Levin 
918325330bSSasha Levin 	return mem;
928325330bSSasha Levin }
938325330bSSasha Levin 
948325330bSSasha Levin static int cma_free_mem(struct cma *cma, int count)
958325330bSSasha Levin {
968325330bSSasha Levin 	struct cma_mem *mem = NULL;
978325330bSSasha Levin 
988325330bSSasha Levin 	while (count) {
998325330bSSasha Levin 		mem = cma_get_entry_from_list(cma);
1008325330bSSasha Levin 		if (mem == NULL)
1018325330bSSasha Levin 			return 0;
1028325330bSSasha Levin 
1038325330bSSasha Levin 		if (mem->n <= count) {
1048325330bSSasha Levin 			cma_release(cma, mem->p, mem->n);
1058325330bSSasha Levin 			count -= mem->n;
1068325330bSSasha Levin 			kfree(mem);
1078325330bSSasha Levin 		} else if (cma->order_per_bit == 0) {
1088325330bSSasha Levin 			cma_release(cma, mem->p, count);
1098325330bSSasha Levin 			mem->p += count;
1108325330bSSasha Levin 			mem->n -= count;
1118325330bSSasha Levin 			count = 0;
1128325330bSSasha Levin 			cma_add_to_cma_mem_list(cma, mem);
1138325330bSSasha Levin 		} else {
1148325330bSSasha Levin 			pr_debug("cma: cannot release partial block when order_per_bit != 0\n");
1158325330bSSasha Levin 			cma_add_to_cma_mem_list(cma, mem);
1168325330bSSasha Levin 			break;
1178325330bSSasha Levin 		}
1188325330bSSasha Levin 	}
1198325330bSSasha Levin 
1208325330bSSasha Levin 	return 0;
1218325330bSSasha Levin 
1228325330bSSasha Levin }
1238325330bSSasha Levin 
1248325330bSSasha Levin static int cma_free_write(void *data, u64 val)
1258325330bSSasha Levin {
1268325330bSSasha Levin 	int pages = val;
1278325330bSSasha Levin 	struct cma *cma = data;
1288325330bSSasha Levin 
1298325330bSSasha Levin 	return cma_free_mem(cma, pages);
1308325330bSSasha Levin }
1318325330bSSasha Levin DEFINE_SIMPLE_ATTRIBUTE(cma_free_fops, NULL, cma_free_write, "%llu\n");
1328325330bSSasha Levin 
13326b02a1fSSasha Levin static int cma_alloc_mem(struct cma *cma, int count)
13426b02a1fSSasha Levin {
13526b02a1fSSasha Levin 	struct cma_mem *mem;
13626b02a1fSSasha Levin 	struct page *p;
13726b02a1fSSasha Levin 
13826b02a1fSSasha Levin 	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
13926b02a1fSSasha Levin 	if (!mem)
14026b02a1fSSasha Levin 		return -ENOMEM;
14126b02a1fSSasha Levin 
142e2f466e3SLucas Stach 	p = cma_alloc(cma, count, 0, GFP_KERNEL);
14326b02a1fSSasha Levin 	if (!p) {
14426b02a1fSSasha Levin 		kfree(mem);
14526b02a1fSSasha Levin 		return -ENOMEM;
14626b02a1fSSasha Levin 	}
14726b02a1fSSasha Levin 
14826b02a1fSSasha Levin 	mem->p = p;
14926b02a1fSSasha Levin 	mem->n = count;
15026b02a1fSSasha Levin 
15126b02a1fSSasha Levin 	cma_add_to_cma_mem_list(cma, mem);
15226b02a1fSSasha Levin 
15326b02a1fSSasha Levin 	return 0;
15426b02a1fSSasha Levin }
15526b02a1fSSasha Levin 
15626b02a1fSSasha Levin static int cma_alloc_write(void *data, u64 val)
15726b02a1fSSasha Levin {
15826b02a1fSSasha Levin 	int pages = val;
15926b02a1fSSasha Levin 	struct cma *cma = data;
16026b02a1fSSasha Levin 
16126b02a1fSSasha Levin 	return cma_alloc_mem(cma, pages);
16226b02a1fSSasha Levin }
16326b02a1fSSasha Levin DEFINE_SIMPLE_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n");
16426b02a1fSSasha Levin 
16528b24c1fSSasha Levin static void cma_debugfs_add_one(struct cma *cma, int idx)
16628b24c1fSSasha Levin {
16728b24c1fSSasha Levin 	struct dentry *tmp;
16828b24c1fSSasha Levin 	char name[16];
16928b24c1fSSasha Levin 	int u32s;
17028b24c1fSSasha Levin 
171da094e42SPrakash Gupta 	scnprintf(name, sizeof(name), "cma-%s", cma->name);
17228b24c1fSSasha Levin 
17328b24c1fSSasha Levin 	tmp = debugfs_create_dir(name, cma_debugfs_root);
17428b24c1fSSasha Levin 
175*0825a6f9SJoe Perches 	debugfs_create_file("alloc", 0200, tmp, cma, &cma_alloc_fops);
176*0825a6f9SJoe Perches 	debugfs_create_file("free", 0200, tmp, cma, &cma_free_fops);
177*0825a6f9SJoe Perches 	debugfs_create_file("base_pfn", 0444, tmp,
17828b24c1fSSasha Levin 			    &cma->base_pfn, &cma_debugfs_fops);
179*0825a6f9SJoe Perches 	debugfs_create_file("count", 0444, tmp, &cma->count, &cma_debugfs_fops);
180*0825a6f9SJoe Perches 	debugfs_create_file("order_per_bit", 0444, tmp,
18128b24c1fSSasha Levin 			    &cma->order_per_bit, &cma_debugfs_fops);
182*0825a6f9SJoe Perches 	debugfs_create_file("used", 0444, tmp, cma, &cma_used_fops);
183*0825a6f9SJoe Perches 	debugfs_create_file("maxchunk", 0444, tmp, cma, &cma_maxchunk_fops);
18428b24c1fSSasha Levin 
18528b24c1fSSasha Levin 	u32s = DIV_ROUND_UP(cma_bitmap_maxno(cma), BITS_PER_BYTE * sizeof(u32));
186*0825a6f9SJoe Perches 	debugfs_create_u32_array("bitmap", 0444, tmp, (u32 *)cma->bitmap, u32s);
18728b24c1fSSasha Levin }
18828b24c1fSSasha Levin 
18928b24c1fSSasha Levin static int __init cma_debugfs_init(void)
19028b24c1fSSasha Levin {
19128b24c1fSSasha Levin 	int i;
19228b24c1fSSasha Levin 
19328b24c1fSSasha Levin 	cma_debugfs_root = debugfs_create_dir("cma", NULL);
19428b24c1fSSasha Levin 	if (!cma_debugfs_root)
19528b24c1fSSasha Levin 		return -ENOMEM;
19628b24c1fSSasha Levin 
19728b24c1fSSasha Levin 	for (i = 0; i < cma_area_count; i++)
19828b24c1fSSasha Levin 		cma_debugfs_add_one(&cma_areas[i], i);
19928b24c1fSSasha Levin 
20028b24c1fSSasha Levin 	return 0;
20128b24c1fSSasha Levin }
20228b24c1fSSasha Levin late_initcall(cma_debugfs_init);
203