1 /* 2 * Basic general purpose allocator for managing special purpose memory 3 * not managed by the regular kmalloc/kfree interface. 4 * Uses for this includes on-device special memory, uncached memory 5 * etc. 6 * 7 * This code is based on the buddy allocator found in the sym53c8xx_2 8 * driver Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>, 9 * and adapted for general purpose use. 10 * 11 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> 12 * 13 * This source code is licensed under the GNU General Public License, 14 * Version 2. See the file COPYING for more details. 15 */ 16 17 #include <linux/module.h> 18 #include <linux/stddef.h> 19 #include <linux/kernel.h> 20 #include <linux/string.h> 21 #include <linux/slab.h> 22 #include <linux/init.h> 23 #include <linux/mm.h> 24 #include <linux/spinlock.h> 25 #include <linux/genalloc.h> 26 27 #include <asm/page.h> 28 29 30 struct gen_pool *gen_pool_create(int nr_chunks, int max_chunk_shift, 31 unsigned long (*fp)(struct gen_pool *), 32 unsigned long data) 33 { 34 struct gen_pool *poolp; 35 unsigned long tmp; 36 int i; 37 38 /* 39 * This is really an arbitrary limit, +10 is enough for 40 * IA64_GRANULE_SHIFT, aka 16MB. If anyone needs a large limit 41 * this can be increased without problems. 42 */ 43 if ((max_chunk_shift > (PAGE_SHIFT + 10)) || 44 ((max_chunk_shift < ALLOC_MIN_SHIFT) && max_chunk_shift)) 45 return NULL; 46 47 if (!max_chunk_shift) 48 max_chunk_shift = PAGE_SHIFT; 49 50 poolp = kmalloc(sizeof(struct gen_pool), GFP_KERNEL); 51 if (!poolp) 52 return NULL; 53 memset(poolp, 0, sizeof(struct gen_pool)); 54 poolp->h = kmalloc(sizeof(struct gen_pool_link) * 55 (max_chunk_shift - ALLOC_MIN_SHIFT + 1), 56 GFP_KERNEL); 57 if (!poolp->h) { 58 printk(KERN_WARNING "gen_pool_alloc() failed to allocate\n"); 59 kfree(poolp); 60 return NULL; 61 } 62 memset(poolp->h, 0, sizeof(struct gen_pool_link) * 63 (max_chunk_shift - ALLOC_MIN_SHIFT + 1)); 64 65 spin_lock_init(&poolp->lock); 66 poolp->get_new_chunk = fp; 67 poolp->max_chunk_shift = max_chunk_shift; 68 poolp->private = data; 69 70 for (i = 0; i < nr_chunks; i++) { 71 tmp = poolp->get_new_chunk(poolp); 72 printk(KERN_INFO "allocated %lx\n", tmp); 73 if (!tmp) 74 break; 75 gen_pool_free(poolp, tmp, (1 << poolp->max_chunk_shift)); 76 } 77 78 return poolp; 79 } 80 EXPORT_SYMBOL(gen_pool_create); 81 82 83 /* 84 * Simple power of two buddy-like generic allocator. 85 * Provides naturally aligned memory chunks. 86 */ 87 unsigned long gen_pool_alloc(struct gen_pool *poolp, int size) 88 { 89 int j, i, s, max_chunk_size; 90 unsigned long a, flags; 91 struct gen_pool_link *h = poolp->h; 92 93 max_chunk_size = 1 << poolp->max_chunk_shift; 94 95 if (size > max_chunk_size) 96 return 0; 97 98 size = max(size, 1 << ALLOC_MIN_SHIFT); 99 i = fls(size - 1); 100 s = 1 << i; 101 j = i -= ALLOC_MIN_SHIFT; 102 103 spin_lock_irqsave(&poolp->lock, flags); 104 while (!h[j].next) { 105 if (s == max_chunk_size) { 106 struct gen_pool_link *ptr; 107 spin_unlock_irqrestore(&poolp->lock, flags); 108 ptr = (struct gen_pool_link *)poolp->get_new_chunk(poolp); 109 spin_lock_irqsave(&poolp->lock, flags); 110 h[j].next = ptr; 111 if (h[j].next) 112 h[j].next->next = NULL; 113 break; 114 } 115 j++; 116 s <<= 1; 117 } 118 a = (unsigned long) h[j].next; 119 if (a) { 120 h[j].next = h[j].next->next; 121 /* 122 * This should be split into a seperate function doing 123 * the chunk split in order to support custom 124 * handling memory not physically accessible by host 125 */ 126 while (j > i) { 127 j -= 1; 128 s >>= 1; 129 h[j].next = (struct gen_pool_link *) (a + s); 130 h[j].next->next = NULL; 131 } 132 } 133 spin_unlock_irqrestore(&poolp->lock, flags); 134 return a; 135 } 136 EXPORT_SYMBOL(gen_pool_alloc); 137 138 139 /* 140 * Counter-part of the generic allocator. 141 */ 142 void gen_pool_free(struct gen_pool *poolp, unsigned long ptr, int size) 143 { 144 struct gen_pool_link *q; 145 struct gen_pool_link *h = poolp->h; 146 unsigned long a, b, flags; 147 int i, s, max_chunk_size; 148 149 max_chunk_size = 1 << poolp->max_chunk_shift; 150 151 if (size > max_chunk_size) 152 return; 153 154 size = max(size, 1 << ALLOC_MIN_SHIFT); 155 i = fls(size - 1); 156 s = 1 << i; 157 i -= ALLOC_MIN_SHIFT; 158 159 a = ptr; 160 161 spin_lock_irqsave(&poolp->lock, flags); 162 while (1) { 163 if (s == max_chunk_size) { 164 ((struct gen_pool_link *)a)->next = h[i].next; 165 h[i].next = (struct gen_pool_link *)a; 166 break; 167 } 168 b = a ^ s; 169 q = &h[i]; 170 171 while (q->next && q->next != (struct gen_pool_link *)b) 172 q = q->next; 173 174 if (!q->next) { 175 ((struct gen_pool_link *)a)->next = h[i].next; 176 h[i].next = (struct gen_pool_link *)a; 177 break; 178 } 179 q->next = q->next->next; 180 a = a & b; 181 s <<= 1; 182 i++; 183 } 184 spin_unlock_irqrestore(&poolp->lock, flags); 185 } 186 EXPORT_SYMBOL(gen_pool_free); 187