xref: /linux/mm/cma_debug.c (revision 827634added7f38b7d724cab1dccdb2b004c13c3)
1 /*
2  * CMA DebugFS Interface
3  *
4  * Copyright (c) 2015 Sasha Levin <sasha.levin@oracle.com>
5  */
6 
7 
8 #include <linux/debugfs.h>
9 #include <linux/cma.h>
10 #include <linux/list.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/mm_types.h>
14 
15 #include "cma.h"
16 
17 struct cma_mem {
18 	struct hlist_node node;
19 	struct page *p;
20 	unsigned long n;
21 };
22 
23 static struct dentry *cma_debugfs_root;
24 
25 static int cma_debugfs_get(void *data, u64 *val)
26 {
27 	unsigned long *p = data;
28 
29 	*val = *p;
30 
31 	return 0;
32 }
33 DEFINE_SIMPLE_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
34 
35 static int cma_used_get(void *data, u64 *val)
36 {
37 	struct cma *cma = data;
38 	unsigned long used;
39 
40 	mutex_lock(&cma->lock);
41 	/* pages counter is smaller than sizeof(int) */
42 	used = bitmap_weight(cma->bitmap, (int)cma->count);
43 	mutex_unlock(&cma->lock);
44 	*val = (u64)used << cma->order_per_bit;
45 
46 	return 0;
47 }
48 DEFINE_SIMPLE_ATTRIBUTE(cma_used_fops, cma_used_get, NULL, "%llu\n");
49 
50 static int cma_maxchunk_get(void *data, u64 *val)
51 {
52 	struct cma *cma = data;
53 	unsigned long maxchunk = 0;
54 	unsigned long start, end = 0;
55 
56 	mutex_lock(&cma->lock);
57 	for (;;) {
58 		start = find_next_zero_bit(cma->bitmap, cma->count, end);
59 		if (start >= cma->count)
60 			break;
61 		end = find_next_bit(cma->bitmap, cma->count, start);
62 		maxchunk = max(end - start, maxchunk);
63 	}
64 	mutex_unlock(&cma->lock);
65 	*val = (u64)maxchunk << cma->order_per_bit;
66 
67 	return 0;
68 }
69 DEFINE_SIMPLE_ATTRIBUTE(cma_maxchunk_fops, cma_maxchunk_get, NULL, "%llu\n");
70 
71 static void cma_add_to_cma_mem_list(struct cma *cma, struct cma_mem *mem)
72 {
73 	spin_lock(&cma->mem_head_lock);
74 	hlist_add_head(&mem->node, &cma->mem_head);
75 	spin_unlock(&cma->mem_head_lock);
76 }
77 
78 static struct cma_mem *cma_get_entry_from_list(struct cma *cma)
79 {
80 	struct cma_mem *mem = NULL;
81 
82 	spin_lock(&cma->mem_head_lock);
83 	if (!hlist_empty(&cma->mem_head)) {
84 		mem = hlist_entry(cma->mem_head.first, struct cma_mem, node);
85 		hlist_del_init(&mem->node);
86 	}
87 	spin_unlock(&cma->mem_head_lock);
88 
89 	return mem;
90 }
91 
92 static int cma_free_mem(struct cma *cma, int count)
93 {
94 	struct cma_mem *mem = NULL;
95 
96 	while (count) {
97 		mem = cma_get_entry_from_list(cma);
98 		if (mem == NULL)
99 			return 0;
100 
101 		if (mem->n <= count) {
102 			cma_release(cma, mem->p, mem->n);
103 			count -= mem->n;
104 			kfree(mem);
105 		} else if (cma->order_per_bit == 0) {
106 			cma_release(cma, mem->p, count);
107 			mem->p += count;
108 			mem->n -= count;
109 			count = 0;
110 			cma_add_to_cma_mem_list(cma, mem);
111 		} else {
112 			pr_debug("cma: cannot release partial block when order_per_bit != 0\n");
113 			cma_add_to_cma_mem_list(cma, mem);
114 			break;
115 		}
116 	}
117 
118 	return 0;
119 
120 }
121 
122 static int cma_free_write(void *data, u64 val)
123 {
124 	int pages = val;
125 	struct cma *cma = data;
126 
127 	return cma_free_mem(cma, pages);
128 }
129 DEFINE_SIMPLE_ATTRIBUTE(cma_free_fops, NULL, cma_free_write, "%llu\n");
130 
131 static int cma_alloc_mem(struct cma *cma, int count)
132 {
133 	struct cma_mem *mem;
134 	struct page *p;
135 
136 	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
137 	if (!mem)
138 		return -ENOMEM;
139 
140 	p = cma_alloc(cma, count, 0);
141 	if (!p) {
142 		kfree(mem);
143 		return -ENOMEM;
144 	}
145 
146 	mem->p = p;
147 	mem->n = count;
148 
149 	cma_add_to_cma_mem_list(cma, mem);
150 
151 	return 0;
152 }
153 
154 static int cma_alloc_write(void *data, u64 val)
155 {
156 	int pages = val;
157 	struct cma *cma = data;
158 
159 	return cma_alloc_mem(cma, pages);
160 }
161 DEFINE_SIMPLE_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n");
162 
163 static void cma_debugfs_add_one(struct cma *cma, int idx)
164 {
165 	struct dentry *tmp;
166 	char name[16];
167 	int u32s;
168 
169 	sprintf(name, "cma-%d", idx);
170 
171 	tmp = debugfs_create_dir(name, cma_debugfs_root);
172 
173 	debugfs_create_file("alloc", S_IWUSR, cma_debugfs_root, cma,
174 				&cma_alloc_fops);
175 
176 	debugfs_create_file("free", S_IWUSR, cma_debugfs_root, cma,
177 				&cma_free_fops);
178 
179 	debugfs_create_file("base_pfn", S_IRUGO, tmp,
180 				&cma->base_pfn, &cma_debugfs_fops);
181 	debugfs_create_file("count", S_IRUGO, tmp,
182 				&cma->count, &cma_debugfs_fops);
183 	debugfs_create_file("order_per_bit", S_IRUGO, tmp,
184 				&cma->order_per_bit, &cma_debugfs_fops);
185 	debugfs_create_file("used", S_IRUGO, tmp, cma, &cma_used_fops);
186 	debugfs_create_file("maxchunk", S_IRUGO, tmp, cma, &cma_maxchunk_fops);
187 
188 	u32s = DIV_ROUND_UP(cma_bitmap_maxno(cma), BITS_PER_BYTE * sizeof(u32));
189 	debugfs_create_u32_array("bitmap", S_IRUGO, tmp, (u32*)cma->bitmap, u32s);
190 }
191 
192 static int __init cma_debugfs_init(void)
193 {
194 	int i;
195 
196 	cma_debugfs_root = debugfs_create_dir("cma", NULL);
197 	if (!cma_debugfs_root)
198 		return -ENOMEM;
199 
200 	for (i = 0; i < cma_area_count; i++)
201 		cma_debugfs_add_one(&cma_areas[i], i);
202 
203 	return 0;
204 }
205 late_initcall(cma_debugfs_init);
206