xref: /linux/arch/powerpc/lib/rheap.c (revision b0148a98ec5151fec82064d95f11eb9efbc628ea)
1 /*
2  * A Remote Heap.  Remote means that we don't touch the memory that the
3  * heap points to. Normal heap implementations use the memory they manage
4  * to place their list. We cannot do that because the memory we manage may
5  * have special properties, for example it is uncachable or of different
6  * endianess.
7  *
8  * Author: Pantelis Antoniou <panto@intracom.gr>
9  *
10  * 2004 (c) INTRACOM S.A. Greece. This file is licensed under
11  * the terms of the GNU General Public License version 2. This program
12  * is licensed "as is" without any warranty of any kind, whether express
13  * or implied.
14  */
15 #include <linux/types.h>
16 #include <linux/errno.h>
17 #include <linux/kernel.h>
18 #include <linux/mm.h>
19 #include <linux/slab.h>
20 
21 #include <asm/rheap.h>
22 
23 /*
24  * Fixup a list_head, needed when copying lists.  If the pointers fall
25  * between s and e, apply the delta.  This assumes that
26  * sizeof(struct list_head *) == sizeof(unsigned long *).
27  */
28 static inline void fixup(unsigned long s, unsigned long e, int d,
29 			 struct list_head *l)
30 {
31 	unsigned long *pp;
32 
33 	pp = (unsigned long *)&l->next;
34 	if (*pp >= s && *pp < e)
35 		*pp += d;
36 
37 	pp = (unsigned long *)&l->prev;
38 	if (*pp >= s && *pp < e)
39 		*pp += d;
40 }
41 
42 /* Grow the allocated blocks */
43 static int grow(rh_info_t * info, int max_blocks)
44 {
45 	rh_block_t *block, *blk;
46 	int i, new_blocks;
47 	int delta;
48 	unsigned long blks, blke;
49 
50 	if (max_blocks <= info->max_blocks)
51 		return -EINVAL;
52 
53 	new_blocks = max_blocks - info->max_blocks;
54 
55 	block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_KERNEL);
56 	if (block == NULL)
57 		return -ENOMEM;
58 
59 	if (info->max_blocks > 0) {
60 
61 		/* copy old block area */
62 		memcpy(block, info->block,
63 		       sizeof(rh_block_t) * info->max_blocks);
64 
65 		delta = (char *)block - (char *)info->block;
66 
67 		/* and fixup list pointers */
68 		blks = (unsigned long)info->block;
69 		blke = (unsigned long)(info->block + info->max_blocks);
70 
71 		for (i = 0, blk = block; i < info->max_blocks; i++, blk++)
72 			fixup(blks, blke, delta, &blk->list);
73 
74 		fixup(blks, blke, delta, &info->empty_list);
75 		fixup(blks, blke, delta, &info->free_list);
76 		fixup(blks, blke, delta, &info->taken_list);
77 
78 		/* free the old allocated memory */
79 		if ((info->flags & RHIF_STATIC_BLOCK) == 0)
80 			kfree(info->block);
81 	}
82 
83 	info->block = block;
84 	info->empty_slots += new_blocks;
85 	info->max_blocks = max_blocks;
86 	info->flags &= ~RHIF_STATIC_BLOCK;
87 
88 	/* add all new blocks to the free list */
89 	blk = block + info->max_blocks - new_blocks;
90 	for (i = 0; i < new_blocks; i++, blk++)
91 		list_add(&blk->list, &info->empty_list);
92 
93 	return 0;
94 }
95 
96 /*
97  * Assure at least the required amount of empty slots.  If this function
98  * causes a grow in the block area then all pointers kept to the block
99  * area are invalid!
100  */
101 static int assure_empty(rh_info_t * info, int slots)
102 {
103 	int max_blocks;
104 
105 	/* This function is not meant to be used to grow uncontrollably */
106 	if (slots >= 4)
107 		return -EINVAL;
108 
109 	/* Enough space */
110 	if (info->empty_slots >= slots)
111 		return 0;
112 
113 	/* Next 16 sized block */
114 	max_blocks = ((info->max_blocks + slots) + 15) & ~15;
115 
116 	return grow(info, max_blocks);
117 }
118 
119 static rh_block_t *get_slot(rh_info_t * info)
120 {
121 	rh_block_t *blk;
122 
123 	/* If no more free slots, and failure to extend. */
124 	/* XXX: You should have called assure_empty before */
125 	if (info->empty_slots == 0) {
126 		printk(KERN_ERR "rh: out of slots; crash is imminent.\n");
127 		return NULL;
128 	}
129 
130 	/* Get empty slot to use */
131 	blk = list_entry(info->empty_list.next, rh_block_t, list);
132 	list_del_init(&blk->list);
133 	info->empty_slots--;
134 
135 	/* Initialize */
136 	blk->start = NULL;
137 	blk->size = 0;
138 	blk->owner = NULL;
139 
140 	return blk;
141 }
142 
143 static inline void release_slot(rh_info_t * info, rh_block_t * blk)
144 {
145 	list_add(&blk->list, &info->empty_list);
146 	info->empty_slots++;
147 }
148 
149 static void attach_free_block(rh_info_t * info, rh_block_t * blkn)
150 {
151 	rh_block_t *blk;
152 	rh_block_t *before;
153 	rh_block_t *after;
154 	rh_block_t *next;
155 	int size;
156 	unsigned long s, e, bs, be;
157 	struct list_head *l;
158 
159 	/* We assume that they are aligned properly */
160 	size = blkn->size;
161 	s = (unsigned long)blkn->start;
162 	e = s + size;
163 
164 	/* Find the blocks immediately before and after the given one
165 	 * (if any) */
166 	before = NULL;
167 	after = NULL;
168 	next = NULL;
169 
170 	list_for_each(l, &info->free_list) {
171 		blk = list_entry(l, rh_block_t, list);
172 
173 		bs = (unsigned long)blk->start;
174 		be = bs + blk->size;
175 
176 		if (next == NULL && s >= bs)
177 			next = blk;
178 
179 		if (be == s)
180 			before = blk;
181 
182 		if (e == bs)
183 			after = blk;
184 
185 		/* If both are not null, break now */
186 		if (before != NULL && after != NULL)
187 			break;
188 	}
189 
190 	/* Now check if they are really adjacent */
191 	if (before != NULL && s != (unsigned long)before->start + before->size)
192 		before = NULL;
193 
194 	if (after != NULL && e != (unsigned long)after->start)
195 		after = NULL;
196 
197 	/* No coalescing; list insert and return */
198 	if (before == NULL && after == NULL) {
199 
200 		if (next != NULL)
201 			list_add(&blkn->list, &next->list);
202 		else
203 			list_add(&blkn->list, &info->free_list);
204 
205 		return;
206 	}
207 
208 	/* We don't need it anymore */
209 	release_slot(info, blkn);
210 
211 	/* Grow the before block */
212 	if (before != NULL && after == NULL) {
213 		before->size += size;
214 		return;
215 	}
216 
217 	/* Grow the after block backwards */
218 	if (before == NULL && after != NULL) {
219 		after->start = (int8_t *)after->start - size;
220 		after->size += size;
221 		return;
222 	}
223 
224 	/* Grow the before block, and release the after block */
225 	before->size += size + after->size;
226 	list_del(&after->list);
227 	release_slot(info, after);
228 }
229 
230 static void attach_taken_block(rh_info_t * info, rh_block_t * blkn)
231 {
232 	rh_block_t *blk;
233 	struct list_head *l;
234 
235 	/* Find the block immediately before the given one (if any) */
236 	list_for_each(l, &info->taken_list) {
237 		blk = list_entry(l, rh_block_t, list);
238 		if (blk->start > blkn->start) {
239 			list_add_tail(&blkn->list, &blk->list);
240 			return;
241 		}
242 	}
243 
244 	list_add_tail(&blkn->list, &info->taken_list);
245 }
246 
247 /*
248  * Create a remote heap dynamically.  Note that no memory for the blocks
249  * are allocated.  It will upon the first allocation
250  */
251 rh_info_t *rh_create(unsigned int alignment)
252 {
253 	rh_info_t *info;
254 
255 	/* Alignment must be a power of two */
256 	if ((alignment & (alignment - 1)) != 0)
257 		return ERR_PTR(-EINVAL);
258 
259 	info = kmalloc(sizeof(*info), GFP_KERNEL);
260 	if (info == NULL)
261 		return ERR_PTR(-ENOMEM);
262 
263 	info->alignment = alignment;
264 
265 	/* Initially everything as empty */
266 	info->block = NULL;
267 	info->max_blocks = 0;
268 	info->empty_slots = 0;
269 	info->flags = 0;
270 
271 	INIT_LIST_HEAD(&info->empty_list);
272 	INIT_LIST_HEAD(&info->free_list);
273 	INIT_LIST_HEAD(&info->taken_list);
274 
275 	return info;
276 }
277 
278 /*
279  * Destroy a dynamically created remote heap.  Deallocate only if the areas
280  * are not static
281  */
282 void rh_destroy(rh_info_t * info)
283 {
284 	if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL)
285 		kfree(info->block);
286 
287 	if ((info->flags & RHIF_STATIC_INFO) == 0)
288 		kfree(info);
289 }
290 
291 /*
292  * Initialize in place a remote heap info block.  This is needed to support
293  * operation very early in the startup of the kernel, when it is not yet safe
294  * to call kmalloc.
295  */
296 void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks,
297 	     rh_block_t * block)
298 {
299 	int i;
300 	rh_block_t *blk;
301 
302 	/* Alignment must be a power of two */
303 	if ((alignment & (alignment - 1)) != 0)
304 		return;
305 
306 	info->alignment = alignment;
307 
308 	/* Initially everything as empty */
309 	info->block = block;
310 	info->max_blocks = max_blocks;
311 	info->empty_slots = max_blocks;
312 	info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK;
313 
314 	INIT_LIST_HEAD(&info->empty_list);
315 	INIT_LIST_HEAD(&info->free_list);
316 	INIT_LIST_HEAD(&info->taken_list);
317 
318 	/* Add all new blocks to the free list */
319 	for (i = 0, blk = block; i < max_blocks; i++, blk++)
320 		list_add(&blk->list, &info->empty_list);
321 }
322 
323 /* Attach a free memory region, coalesces regions if adjuscent */
324 int rh_attach_region(rh_info_t * info, void *start, int size)
325 {
326 	rh_block_t *blk;
327 	unsigned long s, e, m;
328 	int r;
329 
330 	/* The region must be aligned */
331 	s = (unsigned long)start;
332 	e = s + size;
333 	m = info->alignment - 1;
334 
335 	/* Round start up */
336 	s = (s + m) & ~m;
337 
338 	/* Round end down */
339 	e = e & ~m;
340 
341 	/* Take final values */
342 	start = (void *)s;
343 	size = (int)(e - s);
344 
345 	/* Grow the blocks, if needed */
346 	r = assure_empty(info, 1);
347 	if (r < 0)
348 		return r;
349 
350 	blk = get_slot(info);
351 	blk->start = start;
352 	blk->size = size;
353 	blk->owner = NULL;
354 
355 	attach_free_block(info, blk);
356 
357 	return 0;
358 }
359 
360 /* Detatch given address range, splits free block if needed. */
361 void *rh_detach_region(rh_info_t * info, void *start, int size)
362 {
363 	struct list_head *l;
364 	rh_block_t *blk, *newblk;
365 	unsigned long s, e, m, bs, be;
366 
367 	/* Validate size */
368 	if (size <= 0)
369 		return ERR_PTR(-EINVAL);
370 
371 	/* The region must be aligned */
372 	s = (unsigned long)start;
373 	e = s + size;
374 	m = info->alignment - 1;
375 
376 	/* Round start up */
377 	s = (s + m) & ~m;
378 
379 	/* Round end down */
380 	e = e & ~m;
381 
382 	if (assure_empty(info, 1) < 0)
383 		return ERR_PTR(-ENOMEM);
384 
385 	blk = NULL;
386 	list_for_each(l, &info->free_list) {
387 		blk = list_entry(l, rh_block_t, list);
388 		/* The range must lie entirely inside one free block */
389 		bs = (unsigned long)blk->start;
390 		be = (unsigned long)blk->start + blk->size;
391 		if (s >= bs && e <= be)
392 			break;
393 		blk = NULL;
394 	}
395 
396 	if (blk == NULL)
397 		return ERR_PTR(-ENOMEM);
398 
399 	/* Perfect fit */
400 	if (bs == s && be == e) {
401 		/* Delete from free list, release slot */
402 		list_del(&blk->list);
403 		release_slot(info, blk);
404 		return (void *)s;
405 	}
406 
407 	/* blk still in free list, with updated start and/or size */
408 	if (bs == s || be == e) {
409 		if (bs == s)
410 			blk->start = (int8_t *)blk->start + size;
411 		blk->size -= size;
412 
413 	} else {
414 		/* The front free fragment */
415 		blk->size = s - bs;
416 
417 		/* the back free fragment */
418 		newblk = get_slot(info);
419 		newblk->start = (void *)e;
420 		newblk->size = be - e;
421 
422 		list_add(&newblk->list, &blk->list);
423 	}
424 
425 	return (void *)s;
426 }
427 
428 void *rh_alloc_align(rh_info_t * info, int size, int alignment, const char *owner)
429 {
430 	struct list_head *l;
431 	rh_block_t *blk;
432 	rh_block_t *newblk;
433 	void *start;
434 
435 	/* Validate size, (must be power of two) */
436 	if (size <= 0 || (alignment & (alignment - 1)) != 0)
437 		return ERR_PTR(-EINVAL);
438 
439 	/* given alignment larger that default rheap alignment */
440 	if (alignment > info->alignment)
441 		size += alignment - 1;
442 
443 	/* Align to configured alignment */
444 	size = (size + (info->alignment - 1)) & ~(info->alignment - 1);
445 
446 	if (assure_empty(info, 1) < 0)
447 		return ERR_PTR(-ENOMEM);
448 
449 	blk = NULL;
450 	list_for_each(l, &info->free_list) {
451 		blk = list_entry(l, rh_block_t, list);
452 		if (size <= blk->size)
453 			break;
454 		blk = NULL;
455 	}
456 
457 	if (blk == NULL)
458 		return ERR_PTR(-ENOMEM);
459 
460 	/* Just fits */
461 	if (blk->size == size) {
462 		/* Move from free list to taken list */
463 		list_del(&blk->list);
464 		blk->owner = owner;
465 		start = blk->start;
466 
467 		attach_taken_block(info, blk);
468 
469 		return start;
470 	}
471 
472 	newblk = get_slot(info);
473 	newblk->start = blk->start;
474 	newblk->size = size;
475 	newblk->owner = owner;
476 
477 	/* blk still in free list, with updated start, size */
478 	blk->start = (int8_t *)blk->start + size;
479 	blk->size -= size;
480 
481 	start = newblk->start;
482 
483 	attach_taken_block(info, newblk);
484 
485 	/* for larger alignment return fixed up pointer  */
486 	/* this is no problem with the deallocator since */
487 	/* we scan for pointers that lie in the blocks   */
488 	if (alignment > info->alignment)
489 		start = (void *)(((unsigned long)start + alignment - 1) &
490 				~(alignment - 1));
491 
492 	return start;
493 }
494 
495 void *rh_alloc(rh_info_t * info, int size, const char *owner)
496 {
497 	return rh_alloc_align(info, size, info->alignment, owner);
498 }
499 
500 /* allocate at precisely the given address */
501 void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner)
502 {
503 	struct list_head *l;
504 	rh_block_t *blk, *newblk1, *newblk2;
505 	unsigned long s, e, m, bs = 0, be = 0;
506 
507 	/* Validate size */
508 	if (size <= 0)
509 		return ERR_PTR(-EINVAL);
510 
511 	/* The region must be aligned */
512 	s = (unsigned long)start;
513 	e = s + size;
514 	m = info->alignment - 1;
515 
516 	/* Round start up */
517 	s = (s + m) & ~m;
518 
519 	/* Round end down */
520 	e = e & ~m;
521 
522 	if (assure_empty(info, 2) < 0)
523 		return ERR_PTR(-ENOMEM);
524 
525 	blk = NULL;
526 	list_for_each(l, &info->free_list) {
527 		blk = list_entry(l, rh_block_t, list);
528 		/* The range must lie entirely inside one free block */
529 		bs = (unsigned long)blk->start;
530 		be = (unsigned long)blk->start + blk->size;
531 		if (s >= bs && e <= be)
532 			break;
533 	}
534 
535 	if (blk == NULL)
536 		return ERR_PTR(-ENOMEM);
537 
538 	/* Perfect fit */
539 	if (bs == s && be == e) {
540 		/* Move from free list to taken list */
541 		list_del(&blk->list);
542 		blk->owner = owner;
543 
544 		start = blk->start;
545 		attach_taken_block(info, blk);
546 
547 		return start;
548 
549 	}
550 
551 	/* blk still in free list, with updated start and/or size */
552 	if (bs == s || be == e) {
553 		if (bs == s)
554 			blk->start = (int8_t *)blk->start + size;
555 		blk->size -= size;
556 
557 	} else {
558 		/* The front free fragment */
559 		blk->size = s - bs;
560 
561 		/* The back free fragment */
562 		newblk2 = get_slot(info);
563 		newblk2->start = (void *)e;
564 		newblk2->size = be - e;
565 
566 		list_add(&newblk2->list, &blk->list);
567 	}
568 
569 	newblk1 = get_slot(info);
570 	newblk1->start = (void *)s;
571 	newblk1->size = e - s;
572 	newblk1->owner = owner;
573 
574 	start = newblk1->start;
575 	attach_taken_block(info, newblk1);
576 
577 	return start;
578 }
579 
580 int rh_free(rh_info_t * info, void *start)
581 {
582 	rh_block_t *blk, *blk2;
583 	struct list_head *l;
584 	int size;
585 
586 	/* Linear search for block */
587 	blk = NULL;
588 	list_for_each(l, &info->taken_list) {
589 		blk2 = list_entry(l, rh_block_t, list);
590 		if (start < blk2->start)
591 			break;
592 		blk = blk2;
593 	}
594 
595 	if (blk == NULL || start > (blk->start + blk->size))
596 		return -EINVAL;
597 
598 	/* Remove from taken list */
599 	list_del(&blk->list);
600 
601 	/* Get size of freed block */
602 	size = blk->size;
603 	attach_free_block(info, blk);
604 
605 	return size;
606 }
607 
608 int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats)
609 {
610 	rh_block_t *blk;
611 	struct list_head *l;
612 	struct list_head *h;
613 	int nr;
614 
615 	switch (what) {
616 
617 	case RHGS_FREE:
618 		h = &info->free_list;
619 		break;
620 
621 	case RHGS_TAKEN:
622 		h = &info->taken_list;
623 		break;
624 
625 	default:
626 		return -EINVAL;
627 	}
628 
629 	/* Linear search for block */
630 	nr = 0;
631 	list_for_each(l, h) {
632 		blk = list_entry(l, rh_block_t, list);
633 		if (stats != NULL && nr < max_stats) {
634 			stats->start = blk->start;
635 			stats->size = blk->size;
636 			stats->owner = blk->owner;
637 			stats++;
638 		}
639 		nr++;
640 	}
641 
642 	return nr;
643 }
644 
645 int rh_set_owner(rh_info_t * info, void *start, const char *owner)
646 {
647 	rh_block_t *blk, *blk2;
648 	struct list_head *l;
649 	int size;
650 
651 	/* Linear search for block */
652 	blk = NULL;
653 	list_for_each(l, &info->taken_list) {
654 		blk2 = list_entry(l, rh_block_t, list);
655 		if (start < blk2->start)
656 			break;
657 		blk = blk2;
658 	}
659 
660 	if (blk == NULL || start > (blk->start + blk->size))
661 		return -EINVAL;
662 
663 	blk->owner = owner;
664 	size = blk->size;
665 
666 	return size;
667 }
668 
669 void rh_dump(rh_info_t * info)
670 {
671 	static rh_stats_t st[32];	/* XXX maximum 32 blocks */
672 	int maxnr;
673 	int i, nr;
674 
675 	maxnr = ARRAY_SIZE(st);
676 
677 	printk(KERN_INFO
678 	       "info @0x%p (%d slots empty / %d max)\n",
679 	       info, info->empty_slots, info->max_blocks);
680 
681 	printk(KERN_INFO "  Free:\n");
682 	nr = rh_get_stats(info, RHGS_FREE, maxnr, st);
683 	if (nr > maxnr)
684 		nr = maxnr;
685 	for (i = 0; i < nr; i++)
686 		printk(KERN_INFO
687 		       "    0x%p-0x%p (%u)\n",
688 		       st[i].start, (int8_t *) st[i].start + st[i].size,
689 		       st[i].size);
690 	printk(KERN_INFO "\n");
691 
692 	printk(KERN_INFO "  Taken:\n");
693 	nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st);
694 	if (nr > maxnr)
695 		nr = maxnr;
696 	for (i = 0; i < nr; i++)
697 		printk(KERN_INFO
698 		       "    0x%p-0x%p (%u) %s\n",
699 		       st[i].start, (int8_t *) st[i].start + st[i].size,
700 		       st[i].size, st[i].owner != NULL ? st[i].owner : "");
701 	printk(KERN_INFO "\n");
702 }
703 
704 void rh_dump_blk(rh_info_t * info, rh_block_t * blk)
705 {
706 	printk(KERN_INFO
707 	       "blk @0x%p: 0x%p-0x%p (%u)\n",
708 	       blk, blk->start, (int8_t *) blk->start + blk->size, blk->size);
709 }
710