xref: /linux/tools/testing/shared/linux.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <stdlib.h>
3 #include <string.h>
4 #include <malloc.h>
5 #include <pthread.h>
6 #include <unistd.h>
7 #include <assert.h>
8 
9 #include <linux/gfp.h>
10 #include <linux/poison.h>
11 #include <linux/slab.h>
12 #include <linux/radix-tree.h>
13 #include <urcu/uatomic.h>
14 
15 int nr_allocated;
16 int preempt_count;
17 int test_verbose;
18 
19 struct kmem_cache {
20 	pthread_mutex_t lock;
21 	unsigned int size;
22 	unsigned int align;
23 	int nr_objs;
24 	void *objs;
25 	void (*ctor)(void *);
26 	unsigned int non_kernel;
27 	unsigned long nr_allocated;
28 	unsigned long nr_tallocated;
29 	bool exec_callback;
30 	void (*callback)(void *);
31 	void *private;
32 };
33 
34 void kmem_cache_set_callback(struct kmem_cache *cachep, void (*callback)(void *))
35 {
36 	cachep->callback = callback;
37 }
38 
39 void kmem_cache_set_private(struct kmem_cache *cachep, void *private)
40 {
41 	cachep->private = private;
42 }
43 
44 void kmem_cache_set_non_kernel(struct kmem_cache *cachep, unsigned int val)
45 {
46 	cachep->non_kernel = val;
47 }
48 
49 unsigned long kmem_cache_get_alloc(struct kmem_cache *cachep)
50 {
51 	return cachep->size * cachep->nr_allocated;
52 }
53 
54 unsigned long kmem_cache_nr_allocated(struct kmem_cache *cachep)
55 {
56 	return cachep->nr_allocated;
57 }
58 
59 unsigned long kmem_cache_nr_tallocated(struct kmem_cache *cachep)
60 {
61 	return cachep->nr_tallocated;
62 }
63 
64 void kmem_cache_zero_nr_tallocated(struct kmem_cache *cachep)
65 {
66 	cachep->nr_tallocated = 0;
67 }
68 
69 void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
70 		int gfp)
71 {
72 	void *p;
73 
74 	if (cachep->exec_callback) {
75 		if (cachep->callback)
76 			cachep->callback(cachep->private);
77 		cachep->exec_callback = false;
78 	}
79 
80 	if (!(gfp & __GFP_DIRECT_RECLAIM)) {
81 		if (!cachep->non_kernel) {
82 			cachep->exec_callback = true;
83 			return NULL;
84 		}
85 
86 		cachep->non_kernel--;
87 	}
88 
89 	pthread_mutex_lock(&cachep->lock);
90 	if (cachep->nr_objs) {
91 		struct radix_tree_node *node = cachep->objs;
92 		cachep->nr_objs--;
93 		cachep->objs = node->parent;
94 		pthread_mutex_unlock(&cachep->lock);
95 		node->parent = NULL;
96 		p = node;
97 	} else {
98 		pthread_mutex_unlock(&cachep->lock);
99 		if (cachep->align) {
100 			if (posix_memalign(&p, cachep->align, cachep->size) < 0)
101 				return NULL;
102 		} else {
103 			p = malloc(cachep->size);
104 		}
105 
106 		if (cachep->ctor)
107 			cachep->ctor(p);
108 		else if (gfp & __GFP_ZERO)
109 			memset(p, 0, cachep->size);
110 	}
111 
112 	uatomic_inc(&cachep->nr_allocated);
113 	uatomic_inc(&nr_allocated);
114 	uatomic_inc(&cachep->nr_tallocated);
115 	if (kmalloc_verbose)
116 		printf("Allocating %p from slab\n", p);
117 	return p;
118 }
119 
120 void __kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
121 {
122 	assert(objp);
123 	if (cachep->nr_objs > 10 || cachep->align) {
124 		memset(objp, POISON_FREE, cachep->size);
125 		free(objp);
126 	} else {
127 		struct radix_tree_node *node = objp;
128 		cachep->nr_objs++;
129 		node->parent = cachep->objs;
130 		cachep->objs = node;
131 	}
132 }
133 
134 void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
135 {
136 	uatomic_dec(&nr_allocated);
137 	uatomic_dec(&cachep->nr_allocated);
138 	if (kmalloc_verbose)
139 		printf("Freeing %p to slab\n", objp);
140 	__kmem_cache_free_locked(cachep, objp);
141 }
142 
143 void kmem_cache_free(struct kmem_cache *cachep, void *objp)
144 {
145 	pthread_mutex_lock(&cachep->lock);
146 	kmem_cache_free_locked(cachep, objp);
147 	pthread_mutex_unlock(&cachep->lock);
148 }
149 
150 void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list)
151 {
152 	if (kmalloc_verbose)
153 		pr_debug("Bulk free %p[0-%lu]\n", list, size - 1);
154 
155 	pthread_mutex_lock(&cachep->lock);
156 	for (int i = 0; i < size; i++)
157 		kmem_cache_free_locked(cachep, list[i]);
158 	pthread_mutex_unlock(&cachep->lock);
159 }
160 
161 void kmem_cache_shrink(struct kmem_cache *cachep)
162 {
163 }
164 
165 int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
166 			  void **p)
167 {
168 	size_t i;
169 
170 	if (kmalloc_verbose)
171 		pr_debug("Bulk alloc %lu\n", size);
172 
173 	pthread_mutex_lock(&cachep->lock);
174 	if (cachep->nr_objs >= size) {
175 		struct radix_tree_node *node;
176 
177 		for (i = 0; i < size; i++) {
178 			if (!(gfp & __GFP_DIRECT_RECLAIM)) {
179 				if (!cachep->non_kernel)
180 					break;
181 				cachep->non_kernel--;
182 			}
183 
184 			node = cachep->objs;
185 			cachep->nr_objs--;
186 			cachep->objs = node->parent;
187 			p[i] = node;
188 			node->parent = NULL;
189 		}
190 		pthread_mutex_unlock(&cachep->lock);
191 	} else {
192 		pthread_mutex_unlock(&cachep->lock);
193 		for (i = 0; i < size; i++) {
194 			if (!(gfp & __GFP_DIRECT_RECLAIM)) {
195 				if (!cachep->non_kernel)
196 					break;
197 				cachep->non_kernel--;
198 			}
199 
200 			if (cachep->align) {
201 				if (posix_memalign(&p[i], cachep->align,
202 					       cachep->size) < 0)
203 					break;
204 			} else {
205 				p[i] = malloc(cachep->size);
206 				if (!p[i])
207 					break;
208 			}
209 			if (cachep->ctor)
210 				cachep->ctor(p[i]);
211 			else if (gfp & __GFP_ZERO)
212 				memset(p[i], 0, cachep->size);
213 		}
214 	}
215 
216 	if (i < size) {
217 		size = i;
218 		pthread_mutex_lock(&cachep->lock);
219 		for (i = 0; i < size; i++)
220 			__kmem_cache_free_locked(cachep, p[i]);
221 		pthread_mutex_unlock(&cachep->lock);
222 		return 0;
223 	}
224 
225 	for (i = 0; i < size; i++) {
226 		uatomic_inc(&nr_allocated);
227 		uatomic_inc(&cachep->nr_allocated);
228 		uatomic_inc(&cachep->nr_tallocated);
229 		if (kmalloc_verbose)
230 			printf("Allocating %p from slab\n", p[i]);
231 	}
232 
233 	return size;
234 }
235 
236 struct kmem_cache *
237 kmem_cache_create(const char *name, unsigned int size, unsigned int align,
238 		unsigned int flags, void (*ctor)(void *))
239 {
240 	struct kmem_cache *ret = malloc(sizeof(*ret));
241 
242 	pthread_mutex_init(&ret->lock, NULL);
243 	ret->size = size;
244 	ret->align = align;
245 	ret->nr_objs = 0;
246 	ret->nr_allocated = 0;
247 	ret->nr_tallocated = 0;
248 	ret->objs = NULL;
249 	ret->ctor = ctor;
250 	ret->non_kernel = 0;
251 	ret->exec_callback = false;
252 	ret->callback = NULL;
253 	ret->private = NULL;
254 	return ret;
255 }
256 
257 /*
258  * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts.
259  */
260 void test_kmem_cache_bulk(void)
261 {
262 	int i;
263 	void *list[12];
264 	static struct kmem_cache *test_cache, *test_cache2;
265 
266 	/*
267 	 * Testing the bulk allocators without aligned kmem_cache to force the
268 	 * bulk alloc/free to reuse
269 	 */
270 	test_cache = kmem_cache_create("test_cache", 256, 0, SLAB_PANIC, NULL);
271 
272 	for (i = 0; i < 5; i++)
273 		list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
274 
275 	for (i = 0; i < 5; i++)
276 		kmem_cache_free(test_cache, list[i]);
277 	assert(test_cache->nr_objs == 5);
278 
279 	kmem_cache_alloc_bulk(test_cache, __GFP_DIRECT_RECLAIM, 5, list);
280 	kmem_cache_free_bulk(test_cache, 5, list);
281 
282 	for (i = 0; i < 12 ; i++)
283 		list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
284 
285 	for (i = 0; i < 12; i++)
286 		kmem_cache_free(test_cache, list[i]);
287 
288 	/* The last free will not be kept around */
289 	assert(test_cache->nr_objs == 11);
290 
291 	/* Aligned caches will immediately free */
292 	test_cache2 = kmem_cache_create("test_cache2", 128, 128, SLAB_PANIC, NULL);
293 
294 	kmem_cache_alloc_bulk(test_cache2, __GFP_DIRECT_RECLAIM, 10, list);
295 	kmem_cache_free_bulk(test_cache2, 10, list);
296 	assert(!test_cache2->nr_objs);
297 
298 
299 }
300