xref: /linux/drivers/gpu/drm/drm_buddy.c (revision 64b14a184e83eb62ea0615e31a409956049d40e7)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include <linux/kmemleak.h>
7 #include <linux/module.h>
8 #include <linux/sizes.h>
9 
10 #include <drm/drm_buddy.h>
11 
12 static struct kmem_cache *slab_blocks;
13 
14 static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
15 					       struct drm_buddy_block *parent,
16 					       unsigned int order,
17 					       u64 offset)
18 {
19 	struct drm_buddy_block *block;
20 
21 	BUG_ON(order > DRM_BUDDY_MAX_ORDER);
22 
23 	block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL);
24 	if (!block)
25 		return NULL;
26 
27 	block->header = offset;
28 	block->header |= order;
29 	block->parent = parent;
30 
31 	BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED);
32 	return block;
33 }
34 
35 static void drm_block_free(struct drm_buddy *mm,
36 			   struct drm_buddy_block *block)
37 {
38 	kmem_cache_free(slab_blocks, block);
39 }
40 
41 static void mark_allocated(struct drm_buddy_block *block)
42 {
43 	block->header &= ~DRM_BUDDY_HEADER_STATE;
44 	block->header |= DRM_BUDDY_ALLOCATED;
45 
46 	list_del(&block->link);
47 }
48 
49 static void mark_free(struct drm_buddy *mm,
50 		      struct drm_buddy_block *block)
51 {
52 	block->header &= ~DRM_BUDDY_HEADER_STATE;
53 	block->header |= DRM_BUDDY_FREE;
54 
55 	list_add(&block->link,
56 		 &mm->free_list[drm_buddy_block_order(block)]);
57 }
58 
59 static void mark_split(struct drm_buddy_block *block)
60 {
61 	block->header &= ~DRM_BUDDY_HEADER_STATE;
62 	block->header |= DRM_BUDDY_SPLIT;
63 
64 	list_del(&block->link);
65 }
66 
67 /**
68  * drm_buddy_init - init memory manager
69  *
70  * @mm: DRM buddy manager to initialize
71  * @size: size in bytes to manage
72  * @chunk_size: minimum page size in bytes for our allocations
73  *
74  * Initializes the memory manager and its resources.
75  *
76  * Returns:
77  * 0 on success, error code on failure.
78  */
79 int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
80 {
81 	unsigned int i;
82 	u64 offset;
83 
84 	if (size < chunk_size)
85 		return -EINVAL;
86 
87 	if (chunk_size < PAGE_SIZE)
88 		return -EINVAL;
89 
90 	if (!is_power_of_2(chunk_size))
91 		return -EINVAL;
92 
93 	size = round_down(size, chunk_size);
94 
95 	mm->size = size;
96 	mm->avail = size;
97 	mm->chunk_size = chunk_size;
98 	mm->max_order = ilog2(size) - ilog2(chunk_size);
99 
100 	BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
101 
102 	mm->free_list = kmalloc_array(mm->max_order + 1,
103 				      sizeof(struct list_head),
104 				      GFP_KERNEL);
105 	if (!mm->free_list)
106 		return -ENOMEM;
107 
108 	for (i = 0; i <= mm->max_order; ++i)
109 		INIT_LIST_HEAD(&mm->free_list[i]);
110 
111 	mm->n_roots = hweight64(size);
112 
113 	mm->roots = kmalloc_array(mm->n_roots,
114 				  sizeof(struct drm_buddy_block *),
115 				  GFP_KERNEL);
116 	if (!mm->roots)
117 		goto out_free_list;
118 
119 	offset = 0;
120 	i = 0;
121 
122 	/*
123 	 * Split into power-of-two blocks, in case we are given a size that is
124 	 * not itself a power-of-two.
125 	 */
126 	do {
127 		struct drm_buddy_block *root;
128 		unsigned int order;
129 		u64 root_size;
130 
131 		root_size = rounddown_pow_of_two(size);
132 		order = ilog2(root_size) - ilog2(chunk_size);
133 
134 		root = drm_block_alloc(mm, NULL, order, offset);
135 		if (!root)
136 			goto out_free_roots;
137 
138 		mark_free(mm, root);
139 
140 		BUG_ON(i > mm->max_order);
141 		BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
142 
143 		mm->roots[i] = root;
144 
145 		offset += root_size;
146 		size -= root_size;
147 		i++;
148 	} while (size);
149 
150 	return 0;
151 
152 out_free_roots:
153 	while (i--)
154 		drm_block_free(mm, mm->roots[i]);
155 	kfree(mm->roots);
156 out_free_list:
157 	kfree(mm->free_list);
158 	return -ENOMEM;
159 }
160 EXPORT_SYMBOL(drm_buddy_init);
161 
162 /**
163  * drm_buddy_fini - tear down the memory manager
164  *
165  * @mm: DRM buddy manager to free
166  *
167  * Cleanup memory manager resources and the freelist
168  */
169 void drm_buddy_fini(struct drm_buddy *mm)
170 {
171 	int i;
172 
173 	for (i = 0; i < mm->n_roots; ++i) {
174 		WARN_ON(!drm_buddy_block_is_free(mm->roots[i]));
175 		drm_block_free(mm, mm->roots[i]);
176 	}
177 
178 	WARN_ON(mm->avail != mm->size);
179 
180 	kfree(mm->roots);
181 	kfree(mm->free_list);
182 }
183 EXPORT_SYMBOL(drm_buddy_fini);
184 
185 static int split_block(struct drm_buddy *mm,
186 		       struct drm_buddy_block *block)
187 {
188 	unsigned int block_order = drm_buddy_block_order(block) - 1;
189 	u64 offset = drm_buddy_block_offset(block);
190 
191 	BUG_ON(!drm_buddy_block_is_free(block));
192 	BUG_ON(!drm_buddy_block_order(block));
193 
194 	block->left = drm_block_alloc(mm, block, block_order, offset);
195 	if (!block->left)
196 		return -ENOMEM;
197 
198 	block->right = drm_block_alloc(mm, block, block_order,
199 				       offset + (mm->chunk_size << block_order));
200 	if (!block->right) {
201 		drm_block_free(mm, block->left);
202 		return -ENOMEM;
203 	}
204 
205 	mark_free(mm, block->left);
206 	mark_free(mm, block->right);
207 
208 	mark_split(block);
209 
210 	return 0;
211 }
212 
213 static struct drm_buddy_block *
214 get_buddy(struct drm_buddy_block *block)
215 {
216 	struct drm_buddy_block *parent;
217 
218 	parent = block->parent;
219 	if (!parent)
220 		return NULL;
221 
222 	if (parent->left == block)
223 		return parent->right;
224 
225 	return parent->left;
226 }
227 
228 static void __drm_buddy_free(struct drm_buddy *mm,
229 			     struct drm_buddy_block *block)
230 {
231 	struct drm_buddy_block *parent;
232 
233 	while ((parent = block->parent)) {
234 		struct drm_buddy_block *buddy;
235 
236 		buddy = get_buddy(block);
237 
238 		if (!drm_buddy_block_is_free(buddy))
239 			break;
240 
241 		list_del(&buddy->link);
242 
243 		drm_block_free(mm, block);
244 		drm_block_free(mm, buddy);
245 
246 		block = parent;
247 	}
248 
249 	mark_free(mm, block);
250 }
251 
252 /**
253  * drm_buddy_free_block - free a block
254  *
255  * @mm: DRM buddy manager
256  * @block: block to be freed
257  */
258 void drm_buddy_free_block(struct drm_buddy *mm,
259 			  struct drm_buddy_block *block)
260 {
261 	BUG_ON(!drm_buddy_block_is_allocated(block));
262 	mm->avail += drm_buddy_block_size(mm, block);
263 	__drm_buddy_free(mm, block);
264 }
265 EXPORT_SYMBOL(drm_buddy_free_block);
266 
267 /**
268  * drm_buddy_free_list - free blocks
269  *
270  * @mm: DRM buddy manager
271  * @objects: input list head to free blocks
272  */
273 void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects)
274 {
275 	struct drm_buddy_block *block, *on;
276 
277 	list_for_each_entry_safe(block, on, objects, link) {
278 		drm_buddy_free_block(mm, block);
279 		cond_resched();
280 	}
281 	INIT_LIST_HEAD(objects);
282 }
283 EXPORT_SYMBOL(drm_buddy_free_list);
284 
285 /**
286  * drm_buddy_alloc_blocks - allocate power-of-two blocks
287  *
288  * @mm: DRM buddy manager to allocate from
289  * @order: size of the allocation
290  *
291  * The order value here translates to:
292  *
293  * 0 = 2^0 * mm->chunk_size
294  * 1 = 2^1 * mm->chunk_size
295  * 2 = 2^2 * mm->chunk_size
296  *
297  * Returns:
298  * allocated ptr to the &drm_buddy_block on success
299  */
300 struct drm_buddy_block *
301 drm_buddy_alloc_blocks(struct drm_buddy *mm, unsigned int order)
302 {
303 	struct drm_buddy_block *block = NULL;
304 	unsigned int i;
305 	int err;
306 
307 	for (i = order; i <= mm->max_order; ++i) {
308 		block = list_first_entry_or_null(&mm->free_list[i],
309 						 struct drm_buddy_block,
310 						 link);
311 		if (block)
312 			break;
313 	}
314 
315 	if (!block)
316 		return ERR_PTR(-ENOSPC);
317 
318 	BUG_ON(!drm_buddy_block_is_free(block));
319 
320 	while (i != order) {
321 		err = split_block(mm, block);
322 		if (unlikely(err))
323 			goto out_free;
324 
325 		/* Go low */
326 		block = block->left;
327 		i--;
328 	}
329 
330 	mark_allocated(block);
331 	mm->avail -= drm_buddy_block_size(mm, block);
332 	kmemleak_update_trace(block);
333 	return block;
334 
335 out_free:
336 	if (i != order)
337 		__drm_buddy_free(mm, block);
338 	return ERR_PTR(err);
339 }
340 EXPORT_SYMBOL(drm_buddy_alloc_blocks);
341 
342 static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
343 {
344 	return s1 <= e2 && e1 >= s2;
345 }
346 
347 static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
348 {
349 	return s1 <= s2 && e1 >= e2;
350 }
351 
352 /**
353  * drm_buddy_alloc_range - allocate range
354  *
355  * @mm: DRM buddy manager to allocate from
356  * @blocks: output list head to add allocated blocks
357  * @start: start of the allowed range for this block
358  * @size: size of the allocation
359  *
360  * Intended for pre-allocating portions of the address space, for example to
361  * reserve a block for the initial framebuffer or similar, hence the expectation
362  * here is that drm_buddy_alloc_blocks() is still the main vehicle for
363  * allocations, so if that's not the case then the drm_mm range allocator is
364  * probably a much better fit, and so you should probably go use that instead.
365  *
366  * Note that it's safe to chain together multiple alloc_ranges
367  * with the same blocks list
368  *
369  * Returns:
370  * 0 on success, error code on failure.
371  */
372 int drm_buddy_alloc_range(struct drm_buddy *mm,
373 			  struct list_head *blocks,
374 			  u64 start, u64 size)
375 {
376 	struct drm_buddy_block *block;
377 	struct drm_buddy_block *buddy;
378 	LIST_HEAD(allocated);
379 	LIST_HEAD(dfs);
380 	u64 end;
381 	int err;
382 	int i;
383 
384 	if (size < mm->chunk_size)
385 		return -EINVAL;
386 
387 	if (!IS_ALIGNED(size | start, mm->chunk_size))
388 		return -EINVAL;
389 
390 	if (range_overflows(start, size, mm->size))
391 		return -EINVAL;
392 
393 	for (i = 0; i < mm->n_roots; ++i)
394 		list_add_tail(&mm->roots[i]->tmp_link, &dfs);
395 
396 	end = start + size - 1;
397 
398 	do {
399 		u64 block_start;
400 		u64 block_end;
401 
402 		block = list_first_entry_or_null(&dfs,
403 						 struct drm_buddy_block,
404 						 tmp_link);
405 		if (!block)
406 			break;
407 
408 		list_del(&block->tmp_link);
409 
410 		block_start = drm_buddy_block_offset(block);
411 		block_end = block_start + drm_buddy_block_size(mm, block) - 1;
412 
413 		if (!overlaps(start, end, block_start, block_end))
414 			continue;
415 
416 		if (drm_buddy_block_is_allocated(block)) {
417 			err = -ENOSPC;
418 			goto err_free;
419 		}
420 
421 		if (contains(start, end, block_start, block_end)) {
422 			if (!drm_buddy_block_is_free(block)) {
423 				err = -ENOSPC;
424 				goto err_free;
425 			}
426 
427 			mark_allocated(block);
428 			mm->avail -= drm_buddy_block_size(mm, block);
429 			list_add_tail(&block->link, &allocated);
430 			continue;
431 		}
432 
433 		if (!drm_buddy_block_is_split(block)) {
434 			err = split_block(mm, block);
435 			if (unlikely(err))
436 				goto err_undo;
437 		}
438 
439 		list_add(&block->right->tmp_link, &dfs);
440 		list_add(&block->left->tmp_link, &dfs);
441 	} while (1);
442 
443 	list_splice_tail(&allocated, blocks);
444 	return 0;
445 
446 err_undo:
447 	/*
448 	 * We really don't want to leave around a bunch of split blocks, since
449 	 * bigger is better, so make sure we merge everything back before we
450 	 * free the allocated blocks.
451 	 */
452 	buddy = get_buddy(block);
453 	if (buddy &&
454 	    (drm_buddy_block_is_free(block) &&
455 	     drm_buddy_block_is_free(buddy)))
456 		__drm_buddy_free(mm, block);
457 
458 err_free:
459 	drm_buddy_free_list(mm, &allocated);
460 	return err;
461 }
462 EXPORT_SYMBOL(drm_buddy_alloc_range);
463 
464 /**
465  * drm_buddy_block_print - print block information
466  *
467  * @mm: DRM buddy manager
468  * @block: DRM buddy block
469  * @p: DRM printer to use
470  */
471 void drm_buddy_block_print(struct drm_buddy *mm,
472 			   struct drm_buddy_block *block,
473 			   struct drm_printer *p)
474 {
475 	u64 start = drm_buddy_block_offset(block);
476 	u64 size = drm_buddy_block_size(mm, block);
477 
478 	drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size);
479 }
480 EXPORT_SYMBOL(drm_buddy_block_print);
481 
482 /**
483  * drm_buddy_print - print allocator state
484  *
485  * @mm: DRM buddy manager
486  * @p: DRM printer to use
487  */
488 void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
489 {
490 	int order;
491 
492 	drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB\n",
493 		   mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20);
494 
495 	for (order = mm->max_order; order >= 0; order--) {
496 		struct drm_buddy_block *block;
497 		u64 count = 0, free;
498 
499 		list_for_each_entry(block, &mm->free_list[order], link) {
500 			BUG_ON(!drm_buddy_block_is_free(block));
501 			count++;
502 		}
503 
504 		drm_printf(p, "order-%d ", order);
505 
506 		free = count * (mm->chunk_size << order);
507 		if (free < SZ_1M)
508 			drm_printf(p, "free: %lluKiB", free >> 10);
509 		else
510 			drm_printf(p, "free: %lluMiB", free >> 20);
511 
512 		drm_printf(p, ", pages: %llu\n", count);
513 	}
514 }
515 EXPORT_SYMBOL(drm_buddy_print);
516 
517 static void drm_buddy_module_exit(void)
518 {
519 	kmem_cache_destroy(slab_blocks);
520 }
521 
522 static int __init drm_buddy_module_init(void)
523 {
524 	slab_blocks = KMEM_CACHE(drm_buddy_block, 0);
525 	if (!slab_blocks)
526 		return -ENOMEM;
527 
528 	return 0;
529 }
530 
531 module_init(drm_buddy_module_init);
532 module_exit(drm_buddy_module_exit);
533 
534 MODULE_DESCRIPTION("DRM Buddy Allocator");
535 MODULE_LICENSE("Dual MIT/GPL");
536