1 /* 2 * JFFS2 -- Journalling Flash File System, Version 2. 3 * 4 * Copyright © 2001-2007 Red Hat, Inc. 5 * 6 * Created by David Woodhouse <dwmw2@infradead.org> 7 * 8 * For licensing information, see the file 'LICENCE' in this directory. 9 * 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/kernel.h> 15 #include <linux/slab.h> 16 #include <linux/init.h> 17 #include <linux/jffs2.h> 18 #include "nodelist.h" 19 20 /* These are initialised to NULL in the kernel startup code. 21 If you're porting to other operating systems, beware */ 22 static struct kmem_cache *full_dnode_slab; 23 static struct kmem_cache *raw_dirent_slab; 24 static struct kmem_cache *raw_inode_slab; 25 static struct kmem_cache *tmp_dnode_info_slab; 26 static struct kmem_cache *raw_node_ref_slab; 27 static struct kmem_cache *node_frag_slab; 28 static struct kmem_cache *inode_cache_slab; 29 #ifdef CONFIG_JFFS2_FS_XATTR 30 static struct kmem_cache *xattr_datum_cache; 31 static struct kmem_cache *xattr_ref_cache; 32 #endif 33 34 int __init jffs2_create_slab_caches(void) 35 { 36 full_dnode_slab = KMEM_CACHE(jffs2_full_dnode, 0); 37 if (!full_dnode_slab) 38 goto err; 39 40 raw_dirent_slab = KMEM_CACHE(jffs2_raw_dirent, SLAB_HWCACHE_ALIGN); 41 if (!raw_dirent_slab) 42 goto err; 43 44 raw_inode_slab = KMEM_CACHE(jffs2_raw_inode, SLAB_HWCACHE_ALIGN); 45 if (!raw_inode_slab) 46 goto err; 47 48 tmp_dnode_info_slab = KMEM_CACHE(jffs2_tmp_dnode_info, 0); 49 if (!tmp_dnode_info_slab) 50 goto err; 51 52 raw_node_ref_slab = kmem_cache_create("jffs2_refblock", 53 sizeof(struct jffs2_raw_node_ref) * (REFS_PER_BLOCK + 1), 54 0, 0, NULL); 55 if (!raw_node_ref_slab) 56 goto err; 57 58 node_frag_slab = KMEM_CACHE(jffs2_node_frag, 0); 59 if (!node_frag_slab) 60 goto err; 61 62 inode_cache_slab = KMEM_CACHE(jffs2_inode_cache, 0); 63 if (!inode_cache_slab) 64 goto err; 65 66 #ifdef CONFIG_JFFS2_FS_XATTR 67 xattr_datum_cache = KMEM_CACHE(jffs2_xattr_datum, 0); 68 if (!xattr_datum_cache) 69 goto err; 70 71 xattr_ref_cache = KMEM_CACHE(jffs2_xattr_ref, 0); 72 if (!xattr_ref_cache) 73 goto err; 74 #endif 75 76 return 0; 77 err: 78 jffs2_destroy_slab_caches(); 79 return -ENOMEM; 80 } 81 82 void jffs2_destroy_slab_caches(void) 83 { 84 kmem_cache_destroy(full_dnode_slab); 85 kmem_cache_destroy(raw_dirent_slab); 86 kmem_cache_destroy(raw_inode_slab); 87 kmem_cache_destroy(tmp_dnode_info_slab); 88 kmem_cache_destroy(raw_node_ref_slab); 89 kmem_cache_destroy(node_frag_slab); 90 kmem_cache_destroy(inode_cache_slab); 91 #ifdef CONFIG_JFFS2_FS_XATTR 92 kmem_cache_destroy(xattr_datum_cache); 93 kmem_cache_destroy(xattr_ref_cache); 94 #endif 95 } 96 97 struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize) 98 { 99 struct jffs2_full_dirent *ret; 100 ret = kmalloc(sizeof(struct jffs2_full_dirent) + namesize, GFP_KERNEL); 101 dbg_memalloc("%p\n", ret); 102 return ret; 103 } 104 105 void jffs2_free_full_dirent(struct jffs2_full_dirent *x) 106 { 107 dbg_memalloc("%p\n", x); 108 kfree(x); 109 } 110 111 struct jffs2_full_dnode *jffs2_alloc_full_dnode(void) 112 { 113 struct jffs2_full_dnode *ret; 114 ret = kmem_cache_alloc(full_dnode_slab, GFP_KERNEL); 115 dbg_memalloc("%p\n", ret); 116 return ret; 117 } 118 119 void jffs2_free_full_dnode(struct jffs2_full_dnode *x) 120 { 121 dbg_memalloc("%p\n", x); 122 kmem_cache_free(full_dnode_slab, x); 123 } 124 125 struct jffs2_raw_dirent *jffs2_alloc_raw_dirent(void) 126 { 127 struct jffs2_raw_dirent *ret; 128 ret = kmem_cache_alloc(raw_dirent_slab, GFP_KERNEL); 129 dbg_memalloc("%p\n", ret); 130 return ret; 131 } 132 133 void jffs2_free_raw_dirent(struct jffs2_raw_dirent *x) 134 { 135 dbg_memalloc("%p\n", x); 136 kmem_cache_free(raw_dirent_slab, x); 137 } 138 139 struct jffs2_raw_inode *jffs2_alloc_raw_inode(void) 140 { 141 struct jffs2_raw_inode *ret; 142 ret = kmem_cache_alloc(raw_inode_slab, GFP_KERNEL); 143 dbg_memalloc("%p\n", ret); 144 return ret; 145 } 146 147 void jffs2_free_raw_inode(struct jffs2_raw_inode *x) 148 { 149 dbg_memalloc("%p\n", x); 150 kmem_cache_free(raw_inode_slab, x); 151 } 152 153 struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void) 154 { 155 struct jffs2_tmp_dnode_info *ret; 156 ret = kmem_cache_alloc(tmp_dnode_info_slab, GFP_KERNEL); 157 dbg_memalloc("%p\n", 158 ret); 159 return ret; 160 } 161 162 void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x) 163 { 164 dbg_memalloc("%p\n", x); 165 kmem_cache_free(tmp_dnode_info_slab, x); 166 } 167 168 static struct jffs2_raw_node_ref *jffs2_alloc_refblock(void) 169 { 170 struct jffs2_raw_node_ref *ret; 171 172 ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL); 173 if (ret) { 174 int i = 0; 175 for (i=0; i < REFS_PER_BLOCK; i++) { 176 ret[i].flash_offset = REF_EMPTY_NODE; 177 ret[i].next_in_ino = NULL; 178 } 179 ret[i].flash_offset = REF_LINK_NODE; 180 ret[i].next_in_ino = NULL; 181 } 182 return ret; 183 } 184 185 int jffs2_prealloc_raw_node_refs(struct jffs2_sb_info *c, 186 struct jffs2_eraseblock *jeb, int nr) 187 { 188 struct jffs2_raw_node_ref **p, *ref; 189 int i = nr; 190 191 dbg_memalloc("%d\n", nr); 192 193 p = &jeb->last_node; 194 ref = *p; 195 196 dbg_memalloc("Reserving %d refs for block @0x%08x\n", nr, jeb->offset); 197 198 /* If jeb->last_node is really a valid node then skip over it */ 199 if (ref && ref->flash_offset != REF_EMPTY_NODE) 200 ref++; 201 202 while (i) { 203 if (!ref) { 204 dbg_memalloc("Allocating new refblock linked from %p\n", p); 205 ref = *p = jffs2_alloc_refblock(); 206 if (!ref) 207 return -ENOMEM; 208 } 209 if (ref->flash_offset == REF_LINK_NODE) { 210 p = &ref->next_in_ino; 211 ref = *p; 212 continue; 213 } 214 i--; 215 ref++; 216 } 217 jeb->allocated_refs = nr; 218 219 dbg_memalloc("Reserved %d refs for block @0x%08x, last_node is %p (%08x,%p)\n", 220 nr, jeb->offset, jeb->last_node, jeb->last_node->flash_offset, 221 jeb->last_node->next_in_ino); 222 223 return 0; 224 } 225 226 void jffs2_free_refblock(struct jffs2_raw_node_ref *x) 227 { 228 dbg_memalloc("%p\n", x); 229 kmem_cache_free(raw_node_ref_slab, x); 230 } 231 232 struct jffs2_node_frag *jffs2_alloc_node_frag(void) 233 { 234 struct jffs2_node_frag *ret; 235 ret = kmem_cache_alloc(node_frag_slab, GFP_KERNEL); 236 dbg_memalloc("%p\n", ret); 237 return ret; 238 } 239 240 void jffs2_free_node_frag(struct jffs2_node_frag *x) 241 { 242 dbg_memalloc("%p\n", x); 243 kmem_cache_free(node_frag_slab, x); 244 } 245 246 struct jffs2_inode_cache *jffs2_alloc_inode_cache(void) 247 { 248 struct jffs2_inode_cache *ret; 249 ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL); 250 dbg_memalloc("%p\n", ret); 251 return ret; 252 } 253 254 void jffs2_free_inode_cache(struct jffs2_inode_cache *x) 255 { 256 dbg_memalloc("%p\n", x); 257 kmem_cache_free(inode_cache_slab, x); 258 } 259 260 #ifdef CONFIG_JFFS2_FS_XATTR 261 struct jffs2_xattr_datum *jffs2_alloc_xattr_datum(void) 262 { 263 struct jffs2_xattr_datum *xd; 264 xd = kmem_cache_zalloc(xattr_datum_cache, GFP_KERNEL); 265 dbg_memalloc("%p\n", xd); 266 if (!xd) 267 return NULL; 268 269 xd->class = RAWNODE_CLASS_XATTR_DATUM; 270 xd->node = (void *)xd; 271 INIT_LIST_HEAD(&xd->xindex); 272 return xd; 273 } 274 275 void jffs2_free_xattr_datum(struct jffs2_xattr_datum *xd) 276 { 277 dbg_memalloc("%p\n", xd); 278 kmem_cache_free(xattr_datum_cache, xd); 279 } 280 281 struct jffs2_xattr_ref *jffs2_alloc_xattr_ref(void) 282 { 283 struct jffs2_xattr_ref *ref; 284 ref = kmem_cache_zalloc(xattr_ref_cache, GFP_KERNEL); 285 dbg_memalloc("%p\n", ref); 286 if (!ref) 287 return NULL; 288 289 ref->class = RAWNODE_CLASS_XATTR_REF; 290 ref->node = (void *)ref; 291 return ref; 292 } 293 294 void jffs2_free_xattr_ref(struct jffs2_xattr_ref *ref) 295 { 296 dbg_memalloc("%p\n", ref); 297 kmem_cache_free(xattr_ref_cache, ref); 298 } 299 #endif 300