1 /* 2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. 3 * Copyright (C) 2007 The Regents of the University of California. 4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). 5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>. 6 * UCRL-CODE-235197 7 * 8 * This file is part of the SPL, Solaris Porting Layer. 9 * 10 * The SPL is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License as published by the 12 * Free Software Foundation; either version 2 of the License, or (at your 13 * option) any later version. 14 * 15 * The SPL is distributed in the hope that it will be useful, but WITHOUT 16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 18 * for more details. 19 * 20 * You should have received a copy of the GNU General Public License along 21 * with the SPL. If not, see <http://www.gnu.org/licenses/>. 22 */ 23 24 #define SPL_KMEM_CACHE_IMPLEMENTING 25 26 #include <sys/kmem.h> 27 #include <sys/kmem_cache.h> 28 #include <sys/taskq.h> 29 #include <sys/timer.h> 30 #include <sys/vmem.h> 31 #include <sys/wait.h> 32 #include <sys/string.h> 33 #include <linux/slab.h> 34 #include <linux/swap.h> 35 #include <linux/prefetch.h> 36 37 /* 38 * Linux 3.16 replaced smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}() 39 * with smp_mb__{before,after}_atomic() because they were redundant. This is 40 * only used inside our SLAB allocator, so we implement an internal wrapper 41 * here to give us smp_mb__{before,after}_atomic() on older kernels. 42 */ 43 #ifndef smp_mb__before_atomic 44 #define smp_mb__before_atomic(x) smp_mb__before_clear_bit(x) 45 #endif 46 47 #ifndef smp_mb__after_atomic 48 #define smp_mb__after_atomic(x) smp_mb__after_clear_bit(x) 49 #endif 50 51 /* 52 * Cache magazines are an optimization designed to minimize the cost of 53 * allocating memory. They do this by keeping a per-cpu cache of recently 54 * freed objects, which can then be reallocated without taking a lock. This 55 * can improve performance on highly contended caches. However, because 56 * objects in magazines will prevent otherwise empty slabs from being 57 * immediately released this may not be ideal for low memory machines. 58 * 59 * For this reason spl_kmem_cache_magazine_size can be used to set a maximum 60 * magazine size. When this value is set to 0 the magazine size will be 61 * automatically determined based on the object size. Otherwise magazines 62 * will be limited to 2-256 objects per magazine (i.e per cpu). Magazines 63 * may never be entirely disabled in this implementation. 64 */ 65 static unsigned int spl_kmem_cache_magazine_size = 0; 66 module_param(spl_kmem_cache_magazine_size, uint, 0444); 67 MODULE_PARM_DESC(spl_kmem_cache_magazine_size, 68 "Default magazine size (2-256), set automatically (0)"); 69 70 static unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB; 71 module_param(spl_kmem_cache_obj_per_slab, uint, 0644); 72 MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab"); 73 74 static unsigned int spl_kmem_cache_max_size = SPL_KMEM_CACHE_MAX_SIZE; 75 module_param(spl_kmem_cache_max_size, uint, 0644); 76 MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB"); 77 78 /* 79 * For small objects the Linux slab allocator should be used to make the most 80 * efficient use of the memory. However, large objects are not supported by 81 * the Linux slab and therefore the SPL implementation is preferred. A cutoff 82 * of 16K was determined to be optimal for architectures using 4K pages and 83 * to also work well on architecutres using larger 64K page sizes. 84 */ 85 static unsigned int spl_kmem_cache_slab_limit = 86 SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE; 87 module_param(spl_kmem_cache_slab_limit, uint, 0644); 88 MODULE_PARM_DESC(spl_kmem_cache_slab_limit, 89 "Objects less than N bytes use the Linux slab"); 90 91 /* 92 * The number of threads available to allocate new slabs for caches. This 93 * should not need to be tuned but it is available for performance analysis. 94 */ 95 static unsigned int spl_kmem_cache_kmem_threads = 4; 96 module_param(spl_kmem_cache_kmem_threads, uint, 0444); 97 MODULE_PARM_DESC(spl_kmem_cache_kmem_threads, 98 "Number of spl_kmem_cache threads"); 99 100 /* 101 * Slab allocation interfaces 102 * 103 * While the Linux slab implementation was inspired by the Solaris 104 * implementation I cannot use it to emulate the Solaris APIs. I 105 * require two features which are not provided by the Linux slab. 106 * 107 * 1) Constructors AND destructors. Recent versions of the Linux 108 * kernel have removed support for destructors. This is a deal 109 * breaker for the SPL which contains particularly expensive 110 * initializers for mutex's, condition variables, etc. We also 111 * require a minimal level of cleanup for these data types unlike 112 * many Linux data types which do need to be explicitly destroyed. 113 * 114 * 2) Virtual address space backed slab. Callers of the Solaris slab 115 * expect it to work well for both small are very large allocations. 116 * Because of memory fragmentation the Linux slab which is backed 117 * by kmalloc'ed memory performs very badly when confronted with 118 * large numbers of large allocations. Basing the slab on the 119 * virtual address space removes the need for contiguous pages 120 * and greatly improve performance for large allocations. 121 * 122 * For these reasons, the SPL has its own slab implementation with 123 * the needed features. It is not as highly optimized as either the 124 * Solaris or Linux slabs, but it should get me most of what is 125 * needed until it can be optimized or obsoleted by another approach. 126 * 127 * One serious concern I do have about this method is the relatively 128 * small virtual address space on 32bit arches. This will seriously 129 * constrain the size of the slab caches and their performance. 130 */ 131 132 struct list_head spl_kmem_cache_list; /* List of caches */ 133 struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */ 134 static taskq_t *spl_kmem_cache_taskq; /* Task queue for aging / reclaim */ 135 136 static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj); 137 138 static void * 139 kv_alloc(spl_kmem_cache_t *skc, int size, int flags) 140 { 141 gfp_t lflags = kmem_flags_convert(flags); 142 void *ptr; 143 144 if (skc->skc_flags & KMC_RECLAIMABLE) 145 lflags |= __GFP_RECLAIMABLE; 146 ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM); 147 148 /* Resulting allocated memory will be page aligned */ 149 ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE)); 150 151 return (ptr); 152 } 153 154 static void 155 kv_free(spl_kmem_cache_t *skc, void *ptr, int size) 156 { 157 ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE)); 158 159 /* 160 * The Linux direct reclaim path uses this out of band value to 161 * determine if forward progress is being made. Normally this is 162 * incremented by kmem_freepages() which is part of the various 163 * Linux slab implementations. However, since we are using none 164 * of that infrastructure we are responsible for incrementing it. 165 */ 166 if (current->reclaim_state) 167 #ifdef HAVE_RECLAIM_STATE_RECLAIMED 168 current->reclaim_state->reclaimed += size >> PAGE_SHIFT; 169 #else 170 current->reclaim_state->reclaimed_slab += size >> PAGE_SHIFT; 171 #endif 172 vfree(ptr); 173 } 174 175 /* 176 * Required space for each aligned sks. 177 */ 178 static inline uint32_t 179 spl_sks_size(spl_kmem_cache_t *skc) 180 { 181 return (P2ROUNDUP_TYPED(sizeof (spl_kmem_slab_t), 182 skc->skc_obj_align, uint32_t)); 183 } 184 185 /* 186 * Required space for each aligned object. 187 */ 188 static inline uint32_t 189 spl_obj_size(spl_kmem_cache_t *skc) 190 { 191 uint32_t align = skc->skc_obj_align; 192 193 return (P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) + 194 P2ROUNDUP_TYPED(sizeof (spl_kmem_obj_t), align, uint32_t)); 195 } 196 197 uint64_t 198 spl_kmem_cache_inuse(kmem_cache_t *cache) 199 { 200 return (cache->skc_obj_total); 201 } 202 EXPORT_SYMBOL(spl_kmem_cache_inuse); 203 204 uint64_t 205 spl_kmem_cache_entry_size(kmem_cache_t *cache) 206 { 207 return (cache->skc_obj_size); 208 } 209 EXPORT_SYMBOL(spl_kmem_cache_entry_size); 210 211 /* 212 * Lookup the spl_kmem_object_t for an object given that object. 213 */ 214 static inline spl_kmem_obj_t * 215 spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj) 216 { 217 return (obj + P2ROUNDUP_TYPED(skc->skc_obj_size, 218 skc->skc_obj_align, uint32_t)); 219 } 220 221 /* 222 * It's important that we pack the spl_kmem_obj_t structure and the 223 * actual objects in to one large address space to minimize the number 224 * of calls to the allocator. It is far better to do a few large 225 * allocations and then subdivide it ourselves. Now which allocator 226 * we use requires balancing a few trade offs. 227 * 228 * For small objects we use kmem_alloc() because as long as you are 229 * only requesting a small number of pages (ideally just one) its cheap. 230 * However, when you start requesting multiple pages with kmem_alloc() 231 * it gets increasingly expensive since it requires contiguous pages. 232 * For this reason we shift to vmem_alloc() for slabs of large objects 233 * which removes the need for contiguous pages. We do not use 234 * vmem_alloc() in all cases because there is significant locking 235 * overhead in __get_vm_area_node(). This function takes a single 236 * global lock when acquiring an available virtual address range which 237 * serializes all vmem_alloc()'s for all slab caches. Using slightly 238 * different allocation functions for small and large objects should 239 * give us the best of both worlds. 240 * 241 * +------------------------+ 242 * | spl_kmem_slab_t --+-+ | 243 * | skc_obj_size <-+ | | 244 * | spl_kmem_obj_t | | 245 * | skc_obj_size <---+ | 246 * | spl_kmem_obj_t | | 247 * | ... v | 248 * +------------------------+ 249 */ 250 static spl_kmem_slab_t * 251 spl_slab_alloc(spl_kmem_cache_t *skc, int flags) 252 { 253 spl_kmem_slab_t *sks; 254 void *base; 255 uint32_t obj_size; 256 257 base = kv_alloc(skc, skc->skc_slab_size, flags); 258 if (base == NULL) 259 return (NULL); 260 261 sks = (spl_kmem_slab_t *)base; 262 sks->sks_magic = SKS_MAGIC; 263 sks->sks_objs = skc->skc_slab_objs; 264 sks->sks_age = jiffies; 265 sks->sks_cache = skc; 266 INIT_LIST_HEAD(&sks->sks_list); 267 INIT_LIST_HEAD(&sks->sks_free_list); 268 sks->sks_ref = 0; 269 obj_size = spl_obj_size(skc); 270 271 for (int i = 0; i < sks->sks_objs; i++) { 272 void *obj = base + spl_sks_size(skc) + (i * obj_size); 273 274 ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align)); 275 spl_kmem_obj_t *sko = spl_sko_from_obj(skc, obj); 276 sko->sko_addr = obj; 277 sko->sko_magic = SKO_MAGIC; 278 sko->sko_slab = sks; 279 INIT_LIST_HEAD(&sko->sko_list); 280 list_add_tail(&sko->sko_list, &sks->sks_free_list); 281 } 282 283 return (sks); 284 } 285 286 /* 287 * Remove a slab from complete or partial list, it must be called with 288 * the 'skc->skc_lock' held but the actual free must be performed 289 * outside the lock to prevent deadlocking on vmem addresses. 290 */ 291 static void 292 spl_slab_free(spl_kmem_slab_t *sks, 293 struct list_head *sks_list, struct list_head *sko_list) 294 { 295 spl_kmem_cache_t *skc; 296 297 ASSERT(sks->sks_magic == SKS_MAGIC); 298 ASSERT(sks->sks_ref == 0); 299 300 skc = sks->sks_cache; 301 ASSERT(skc->skc_magic == SKC_MAGIC); 302 303 /* 304 * Update slab/objects counters in the cache, then remove the 305 * slab from the skc->skc_partial_list. Finally add the slab 306 * and all its objects in to the private work lists where the 307 * destructors will be called and the memory freed to the system. 308 */ 309 skc->skc_obj_total -= sks->sks_objs; 310 skc->skc_slab_total--; 311 list_del(&sks->sks_list); 312 list_add(&sks->sks_list, sks_list); 313 list_splice_init(&sks->sks_free_list, sko_list); 314 } 315 316 /* 317 * Reclaim empty slabs at the end of the partial list. 318 */ 319 static void 320 spl_slab_reclaim(spl_kmem_cache_t *skc) 321 { 322 spl_kmem_slab_t *sks = NULL, *m = NULL; 323 spl_kmem_obj_t *sko = NULL, *n = NULL; 324 LIST_HEAD(sks_list); 325 LIST_HEAD(sko_list); 326 327 /* 328 * Empty slabs and objects must be moved to a private list so they 329 * can be safely freed outside the spin lock. All empty slabs are 330 * at the end of skc->skc_partial_list, therefore once a non-empty 331 * slab is found we can stop scanning. 332 */ 333 spin_lock(&skc->skc_lock); 334 list_for_each_entry_safe_reverse(sks, m, 335 &skc->skc_partial_list, sks_list) { 336 337 if (sks->sks_ref > 0) 338 break; 339 340 spl_slab_free(sks, &sks_list, &sko_list); 341 } 342 spin_unlock(&skc->skc_lock); 343 344 /* 345 * The following two loops ensure all the object destructors are run, 346 * and the slabs themselves are freed. This is all done outside the 347 * skc->skc_lock since this allows the destructor to sleep, and 348 * allows us to perform a conditional reschedule when a freeing a 349 * large number of objects and slabs back to the system. 350 */ 351 352 list_for_each_entry_safe(sko, n, &sko_list, sko_list) { 353 ASSERT(sko->sko_magic == SKO_MAGIC); 354 } 355 356 list_for_each_entry_safe(sks, m, &sks_list, sks_list) { 357 ASSERT(sks->sks_magic == SKS_MAGIC); 358 kv_free(skc, sks, skc->skc_slab_size); 359 } 360 } 361 362 static spl_kmem_emergency_t * 363 spl_emergency_search(struct rb_root *root, void *obj) 364 { 365 struct rb_node *node = root->rb_node; 366 spl_kmem_emergency_t *ske; 367 unsigned long address = (unsigned long)obj; 368 369 while (node) { 370 ske = container_of(node, spl_kmem_emergency_t, ske_node); 371 372 if (address < ske->ske_obj) 373 node = node->rb_left; 374 else if (address > ske->ske_obj) 375 node = node->rb_right; 376 else 377 return (ske); 378 } 379 380 return (NULL); 381 } 382 383 static int 384 spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske) 385 { 386 struct rb_node **new = &(root->rb_node), *parent = NULL; 387 spl_kmem_emergency_t *ske_tmp; 388 unsigned long address = ske->ske_obj; 389 390 while (*new) { 391 ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node); 392 393 parent = *new; 394 if (address < ske_tmp->ske_obj) 395 new = &((*new)->rb_left); 396 else if (address > ske_tmp->ske_obj) 397 new = &((*new)->rb_right); 398 else 399 return (0); 400 } 401 402 rb_link_node(&ske->ske_node, parent, new); 403 rb_insert_color(&ske->ske_node, root); 404 405 return (1); 406 } 407 408 /* 409 * Allocate a single emergency object and track it in a red black tree. 410 */ 411 static int 412 spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj) 413 { 414 gfp_t lflags = kmem_flags_convert(flags); 415 spl_kmem_emergency_t *ske; 416 int order = get_order(skc->skc_obj_size); 417 int empty; 418 419 /* Last chance use a partial slab if one now exists */ 420 spin_lock(&skc->skc_lock); 421 empty = list_empty(&skc->skc_partial_list); 422 spin_unlock(&skc->skc_lock); 423 if (!empty) 424 return (-EEXIST); 425 426 if (skc->skc_flags & KMC_RECLAIMABLE) 427 lflags |= __GFP_RECLAIMABLE; 428 ske = kmalloc(sizeof (*ske), lflags); 429 if (ske == NULL) 430 return (-ENOMEM); 431 432 ske->ske_obj = __get_free_pages(lflags, order); 433 if (ske->ske_obj == 0) { 434 kfree(ske); 435 return (-ENOMEM); 436 } 437 438 spin_lock(&skc->skc_lock); 439 empty = spl_emergency_insert(&skc->skc_emergency_tree, ske); 440 if (likely(empty)) { 441 skc->skc_obj_total++; 442 skc->skc_obj_emergency++; 443 if (skc->skc_obj_emergency > skc->skc_obj_emergency_max) 444 skc->skc_obj_emergency_max = skc->skc_obj_emergency; 445 } 446 spin_unlock(&skc->skc_lock); 447 448 if (unlikely(!empty)) { 449 free_pages(ske->ske_obj, order); 450 kfree(ske); 451 return (-EINVAL); 452 } 453 454 *obj = (void *)ske->ske_obj; 455 456 return (0); 457 } 458 459 /* 460 * Locate the passed object in the red black tree and free it. 461 */ 462 static int 463 spl_emergency_free(spl_kmem_cache_t *skc, void *obj) 464 { 465 spl_kmem_emergency_t *ske; 466 int order = get_order(skc->skc_obj_size); 467 468 spin_lock(&skc->skc_lock); 469 ske = spl_emergency_search(&skc->skc_emergency_tree, obj); 470 if (ske) { 471 rb_erase(&ske->ske_node, &skc->skc_emergency_tree); 472 skc->skc_obj_emergency--; 473 skc->skc_obj_total--; 474 } 475 spin_unlock(&skc->skc_lock); 476 477 if (ske == NULL) 478 return (-ENOENT); 479 480 free_pages(ske->ske_obj, order); 481 kfree(ske); 482 483 return (0); 484 } 485 486 /* 487 * Release objects from the per-cpu magazine back to their slab. The flush 488 * argument contains the max number of entries to remove from the magazine. 489 */ 490 static void 491 spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush) 492 { 493 spin_lock(&skc->skc_lock); 494 495 ASSERT(skc->skc_magic == SKC_MAGIC); 496 ASSERT(skm->skm_magic == SKM_MAGIC); 497 498 int count = MIN(flush, skm->skm_avail); 499 for (int i = 0; i < count; i++) 500 spl_cache_shrink(skc, skm->skm_objs[i]); 501 502 skm->skm_avail -= count; 503 memmove(skm->skm_objs, &(skm->skm_objs[count]), 504 sizeof (void *) * skm->skm_avail); 505 506 spin_unlock(&skc->skc_lock); 507 } 508 509 /* 510 * Size a slab based on the size of each aligned object plus spl_kmem_obj_t. 511 * When on-slab we want to target spl_kmem_cache_obj_per_slab. However, 512 * for very small objects we may end up with more than this so as not 513 * to waste space in the minimal allocation of a single page. 514 */ 515 static int 516 spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size) 517 { 518 uint32_t sks_size, obj_size, max_size, tgt_size, tgt_objs; 519 520 sks_size = spl_sks_size(skc); 521 obj_size = spl_obj_size(skc); 522 max_size = (spl_kmem_cache_max_size * 1024 * 1024); 523 tgt_size = (spl_kmem_cache_obj_per_slab * obj_size + sks_size); 524 525 if (tgt_size <= max_size) { 526 tgt_objs = (tgt_size - sks_size) / obj_size; 527 } else { 528 tgt_objs = (max_size - sks_size) / obj_size; 529 tgt_size = (tgt_objs * obj_size) + sks_size; 530 } 531 532 if (tgt_objs == 0) 533 return (-ENOSPC); 534 535 *objs = tgt_objs; 536 *size = tgt_size; 537 538 return (0); 539 } 540 541 /* 542 * Make a guess at reasonable per-cpu magazine size based on the size of 543 * each object and the cost of caching N of them in each magazine. Long 544 * term this should really adapt based on an observed usage heuristic. 545 */ 546 static int 547 spl_magazine_size(spl_kmem_cache_t *skc) 548 { 549 uint32_t obj_size = spl_obj_size(skc); 550 int size; 551 552 if (spl_kmem_cache_magazine_size > 0) 553 return (MAX(MIN(spl_kmem_cache_magazine_size, 256), 2)); 554 555 /* Per-magazine sizes below assume a 4Kib page size */ 556 if (obj_size > (PAGE_SIZE * 256)) 557 size = 4; /* Minimum 4Mib per-magazine */ 558 else if (obj_size > (PAGE_SIZE * 32)) 559 size = 16; /* Minimum 2Mib per-magazine */ 560 else if (obj_size > (PAGE_SIZE)) 561 size = 64; /* Minimum 256Kib per-magazine */ 562 else if (obj_size > (PAGE_SIZE / 4)) 563 size = 128; /* Minimum 128Kib per-magazine */ 564 else 565 size = 256; 566 567 return (size); 568 } 569 570 /* 571 * Allocate a per-cpu magazine to associate with a specific core. 572 */ 573 static spl_kmem_magazine_t * 574 spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu) 575 { 576 spl_kmem_magazine_t *skm; 577 int size = sizeof (spl_kmem_magazine_t) + 578 sizeof (void *) * skc->skc_mag_size; 579 580 skm = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu)); 581 if (skm) { 582 skm->skm_magic = SKM_MAGIC; 583 skm->skm_avail = 0; 584 skm->skm_size = skc->skc_mag_size; 585 skm->skm_refill = skc->skc_mag_refill; 586 skm->skm_cache = skc; 587 skm->skm_cpu = cpu; 588 } 589 590 return (skm); 591 } 592 593 /* 594 * Free a per-cpu magazine associated with a specific core. 595 */ 596 static void 597 spl_magazine_free(spl_kmem_magazine_t *skm) 598 { 599 ASSERT(skm->skm_magic == SKM_MAGIC); 600 ASSERT(skm->skm_avail == 0); 601 kfree(skm); 602 } 603 604 /* 605 * Create all pre-cpu magazines of reasonable sizes. 606 */ 607 static int 608 spl_magazine_create(spl_kmem_cache_t *skc) 609 { 610 int i = 0; 611 612 ASSERT((skc->skc_flags & KMC_SLAB) == 0); 613 614 skc->skc_mag = kzalloc(sizeof (spl_kmem_magazine_t *) * 615 num_possible_cpus(), kmem_flags_convert(KM_SLEEP)); 616 skc->skc_mag_size = spl_magazine_size(skc); 617 skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2; 618 619 for_each_possible_cpu(i) { 620 skc->skc_mag[i] = spl_magazine_alloc(skc, i); 621 if (!skc->skc_mag[i]) { 622 for (i--; i >= 0; i--) 623 spl_magazine_free(skc->skc_mag[i]); 624 625 kfree(skc->skc_mag); 626 return (-ENOMEM); 627 } 628 } 629 630 return (0); 631 } 632 633 /* 634 * Destroy all pre-cpu magazines. 635 */ 636 static void 637 spl_magazine_destroy(spl_kmem_cache_t *skc) 638 { 639 spl_kmem_magazine_t *skm; 640 int i = 0; 641 642 ASSERT((skc->skc_flags & KMC_SLAB) == 0); 643 644 for_each_possible_cpu(i) { 645 skm = skc->skc_mag[i]; 646 spl_cache_flush(skc, skm, skm->skm_avail); 647 spl_magazine_free(skm); 648 } 649 650 kfree(skc->skc_mag); 651 } 652 653 /* 654 * Create a object cache based on the following arguments: 655 * name cache name 656 * size cache object size 657 * align cache object alignment 658 * ctor cache object constructor 659 * dtor cache object destructor 660 * reclaim cache object reclaim 661 * priv cache private data for ctor/dtor/reclaim 662 * vmp unused must be NULL 663 * flags 664 * KMC_KVMEM Force kvmem backed SPL cache 665 * KMC_SLAB Force Linux slab backed cache 666 * KMC_NODEBUG Disable debugging (unsupported) 667 * KMC_RECLAIMABLE Memory can be freed under pressure 668 */ 669 spl_kmem_cache_t * 670 spl_kmem_cache_create(const char *name, size_t size, size_t align, 671 spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, void *reclaim, 672 void *priv, void *vmp, int flags) 673 { 674 gfp_t lflags = kmem_flags_convert(KM_SLEEP); 675 spl_kmem_cache_t *skc; 676 int rc; 677 678 /* 679 * Unsupported flags 680 */ 681 ASSERT(vmp == NULL); 682 ASSERT(reclaim == NULL); 683 684 might_sleep(); 685 686 skc = kzalloc(sizeof (*skc), lflags); 687 if (skc == NULL) 688 return (NULL); 689 690 skc->skc_magic = SKC_MAGIC; 691 skc->skc_name_size = strlen(name) + 1; 692 skc->skc_name = kmalloc(skc->skc_name_size, lflags); 693 if (skc->skc_name == NULL) { 694 kfree(skc); 695 return (NULL); 696 } 697 strlcpy(skc->skc_name, name, skc->skc_name_size); 698 699 skc->skc_ctor = ctor; 700 skc->skc_dtor = dtor; 701 skc->skc_private = priv; 702 skc->skc_vmp = vmp; 703 skc->skc_linux_cache = NULL; 704 skc->skc_flags = flags; 705 skc->skc_obj_size = size; 706 skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN; 707 atomic_set(&skc->skc_ref, 0); 708 709 INIT_LIST_HEAD(&skc->skc_list); 710 INIT_LIST_HEAD(&skc->skc_complete_list); 711 INIT_LIST_HEAD(&skc->skc_partial_list); 712 skc->skc_emergency_tree = RB_ROOT; 713 spin_lock_init(&skc->skc_lock); 714 init_waitqueue_head(&skc->skc_waitq); 715 skc->skc_slab_fail = 0; 716 skc->skc_slab_create = 0; 717 skc->skc_slab_destroy = 0; 718 skc->skc_slab_total = 0; 719 skc->skc_slab_alloc = 0; 720 skc->skc_slab_max = 0; 721 skc->skc_obj_total = 0; 722 skc->skc_obj_alloc = 0; 723 skc->skc_obj_max = 0; 724 skc->skc_obj_deadlock = 0; 725 skc->skc_obj_emergency = 0; 726 skc->skc_obj_emergency_max = 0; 727 728 rc = percpu_counter_init(&skc->skc_linux_alloc, 0, GFP_KERNEL); 729 if (rc != 0) { 730 kfree(skc); 731 return (NULL); 732 } 733 734 /* 735 * Verify the requested alignment restriction is sane. 736 */ 737 if (align) { 738 VERIFY(ISP2(align)); 739 VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN); 740 VERIFY3U(align, <=, PAGE_SIZE); 741 skc->skc_obj_align = align; 742 } 743 744 /* 745 * When no specific type of slab is requested (kmem, vmem, or 746 * linuxslab) then select a cache type based on the object size 747 * and default tunables. 748 */ 749 if (!(skc->skc_flags & (KMC_SLAB | KMC_KVMEM))) { 750 if (spl_kmem_cache_slab_limit && 751 size <= (size_t)spl_kmem_cache_slab_limit) { 752 /* 753 * Objects smaller than spl_kmem_cache_slab_limit can 754 * use the Linux slab for better space-efficiency. 755 */ 756 skc->skc_flags |= KMC_SLAB; 757 } else { 758 /* 759 * All other objects are considered large and are 760 * placed on kvmem backed slabs. 761 */ 762 skc->skc_flags |= KMC_KVMEM; 763 } 764 } 765 766 /* 767 * Given the type of slab allocate the required resources. 768 */ 769 if (skc->skc_flags & KMC_KVMEM) { 770 rc = spl_slab_size(skc, 771 &skc->skc_slab_objs, &skc->skc_slab_size); 772 if (rc) 773 goto out; 774 775 rc = spl_magazine_create(skc); 776 if (rc) 777 goto out; 778 } else { 779 unsigned long slabflags = 0; 780 781 if (size > spl_kmem_cache_slab_limit) 782 goto out; 783 784 if (skc->skc_flags & KMC_RECLAIMABLE) 785 slabflags |= SLAB_RECLAIM_ACCOUNT; 786 787 skc->skc_linux_cache = kmem_cache_create_usercopy( 788 skc->skc_name, size, align, slabflags, 0, size, NULL); 789 if (skc->skc_linux_cache == NULL) 790 goto out; 791 } 792 793 down_write(&spl_kmem_cache_sem); 794 list_add_tail(&skc->skc_list, &spl_kmem_cache_list); 795 up_write(&spl_kmem_cache_sem); 796 797 return (skc); 798 out: 799 kfree(skc->skc_name); 800 percpu_counter_destroy(&skc->skc_linux_alloc); 801 kfree(skc); 802 return (NULL); 803 } 804 EXPORT_SYMBOL(spl_kmem_cache_create); 805 806 /* 807 * Register a move callback for cache defragmentation. 808 * XXX: Unimplemented but harmless to stub out for now. 809 */ 810 void 811 spl_kmem_cache_set_move(spl_kmem_cache_t *skc, 812 kmem_cbrc_t (move)(void *, void *, size_t, void *)) 813 { 814 ASSERT(move != NULL); 815 } 816 EXPORT_SYMBOL(spl_kmem_cache_set_move); 817 818 /* 819 * Destroy a cache and all objects associated with the cache. 820 */ 821 void 822 spl_kmem_cache_destroy(spl_kmem_cache_t *skc) 823 { 824 DECLARE_WAIT_QUEUE_HEAD(wq); 825 taskqid_t id; 826 827 ASSERT(skc->skc_magic == SKC_MAGIC); 828 ASSERT(skc->skc_flags & (KMC_KVMEM | KMC_SLAB)); 829 830 down_write(&spl_kmem_cache_sem); 831 list_del_init(&skc->skc_list); 832 up_write(&spl_kmem_cache_sem); 833 834 /* Cancel any and wait for any pending delayed tasks */ 835 VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags)); 836 837 spin_lock(&skc->skc_lock); 838 id = skc->skc_taskqid; 839 spin_unlock(&skc->skc_lock); 840 841 taskq_cancel_id(spl_kmem_cache_taskq, id); 842 843 /* 844 * Wait until all current callers complete, this is mainly 845 * to catch the case where a low memory situation triggers a 846 * cache reaping action which races with this destroy. 847 */ 848 wait_event(wq, atomic_read(&skc->skc_ref) == 0); 849 850 if (skc->skc_flags & KMC_KVMEM) { 851 spl_magazine_destroy(skc); 852 spl_slab_reclaim(skc); 853 } else { 854 ASSERT(skc->skc_flags & KMC_SLAB); 855 kmem_cache_destroy(skc->skc_linux_cache); 856 } 857 858 spin_lock(&skc->skc_lock); 859 860 /* 861 * Validate there are no objects in use and free all the 862 * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers. 863 */ 864 ASSERT3U(skc->skc_slab_alloc, ==, 0); 865 ASSERT3U(skc->skc_obj_alloc, ==, 0); 866 ASSERT3U(skc->skc_slab_total, ==, 0); 867 ASSERT3U(skc->skc_obj_total, ==, 0); 868 ASSERT3U(skc->skc_obj_emergency, ==, 0); 869 ASSERT(list_empty(&skc->skc_complete_list)); 870 871 ASSERT3U(percpu_counter_sum(&skc->skc_linux_alloc), ==, 0); 872 percpu_counter_destroy(&skc->skc_linux_alloc); 873 874 spin_unlock(&skc->skc_lock); 875 876 kfree(skc->skc_name); 877 kfree(skc); 878 } 879 EXPORT_SYMBOL(spl_kmem_cache_destroy); 880 881 /* 882 * Allocate an object from a slab attached to the cache. This is used to 883 * repopulate the per-cpu magazine caches in batches when they run low. 884 */ 885 static void * 886 spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks) 887 { 888 spl_kmem_obj_t *sko; 889 890 ASSERT(skc->skc_magic == SKC_MAGIC); 891 ASSERT(sks->sks_magic == SKS_MAGIC); 892 893 sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list); 894 ASSERT(sko->sko_magic == SKO_MAGIC); 895 ASSERT(sko->sko_addr != NULL); 896 897 /* Remove from sks_free_list */ 898 list_del_init(&sko->sko_list); 899 900 sks->sks_age = jiffies; 901 sks->sks_ref++; 902 skc->skc_obj_alloc++; 903 904 /* Track max obj usage statistics */ 905 if (skc->skc_obj_alloc > skc->skc_obj_max) 906 skc->skc_obj_max = skc->skc_obj_alloc; 907 908 /* Track max slab usage statistics */ 909 if (sks->sks_ref == 1) { 910 skc->skc_slab_alloc++; 911 912 if (skc->skc_slab_alloc > skc->skc_slab_max) 913 skc->skc_slab_max = skc->skc_slab_alloc; 914 } 915 916 return (sko->sko_addr); 917 } 918 919 /* 920 * Generic slab allocation function to run by the global work queues. 921 * It is responsible for allocating a new slab, linking it in to the list 922 * of partial slabs, and then waking any waiters. 923 */ 924 static int 925 __spl_cache_grow(spl_kmem_cache_t *skc, int flags) 926 { 927 spl_kmem_slab_t *sks; 928 929 fstrans_cookie_t cookie = spl_fstrans_mark(); 930 sks = spl_slab_alloc(skc, flags); 931 spl_fstrans_unmark(cookie); 932 933 spin_lock(&skc->skc_lock); 934 if (sks) { 935 skc->skc_slab_total++; 936 skc->skc_obj_total += sks->sks_objs; 937 list_add_tail(&sks->sks_list, &skc->skc_partial_list); 938 939 smp_mb__before_atomic(); 940 clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags); 941 smp_mb__after_atomic(); 942 } 943 spin_unlock(&skc->skc_lock); 944 945 return (sks == NULL ? -ENOMEM : 0); 946 } 947 948 static void 949 spl_cache_grow_work(void *data) 950 { 951 spl_kmem_alloc_t *ska = (spl_kmem_alloc_t *)data; 952 spl_kmem_cache_t *skc = ska->ska_cache; 953 954 int error = __spl_cache_grow(skc, ska->ska_flags); 955 956 atomic_dec(&skc->skc_ref); 957 smp_mb__before_atomic(); 958 clear_bit(KMC_BIT_GROWING, &skc->skc_flags); 959 smp_mb__after_atomic(); 960 if (error == 0) 961 wake_up_all(&skc->skc_waitq); 962 963 kfree(ska); 964 } 965 966 /* 967 * Returns non-zero when a new slab should be available. 968 */ 969 static int 970 spl_cache_grow_wait(spl_kmem_cache_t *skc) 971 { 972 return (!test_bit(KMC_BIT_GROWING, &skc->skc_flags)); 973 } 974 975 /* 976 * No available objects on any slabs, create a new slab. Note that this 977 * functionality is disabled for KMC_SLAB caches which are backed by the 978 * Linux slab. 979 */ 980 static int 981 spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj) 982 { 983 int remaining, rc = 0; 984 985 ASSERT0(flags & ~KM_PUBLIC_MASK); 986 ASSERT(skc->skc_magic == SKC_MAGIC); 987 ASSERT((skc->skc_flags & KMC_SLAB) == 0); 988 989 *obj = NULL; 990 991 /* 992 * Since we can't sleep attempt an emergency allocation to satisfy 993 * the request. The only alterative is to fail the allocation but 994 * it's preferable try. The use of KM_NOSLEEP is expected to be rare. 995 */ 996 if (flags & KM_NOSLEEP) 997 return (spl_emergency_alloc(skc, flags, obj)); 998 999 might_sleep(); 1000 1001 /* 1002 * Before allocating a new slab wait for any reaping to complete and 1003 * then return so the local magazine can be rechecked for new objects. 1004 */ 1005 if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) { 1006 rc = wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING, 1007 TASK_UNINTERRUPTIBLE); 1008 return (rc ? rc : -EAGAIN); 1009 } 1010 1011 /* 1012 * Note: It would be nice to reduce the overhead of context switch 1013 * and improve NUMA locality, by trying to allocate a new slab in the 1014 * current process context with KM_NOSLEEP flag. 1015 * 1016 * However, this can't be applied to vmem/kvmem due to a bug that 1017 * spl_vmalloc() doesn't honor gfp flags in page table allocation. 1018 */ 1019 1020 /* 1021 * This is handled by dispatching a work request to the global work 1022 * queue. This allows us to asynchronously allocate a new slab while 1023 * retaining the ability to safely fall back to a smaller synchronous 1024 * allocations to ensure forward progress is always maintained. 1025 */ 1026 if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) { 1027 spl_kmem_alloc_t *ska; 1028 1029 ska = kmalloc(sizeof (*ska), kmem_flags_convert(flags)); 1030 if (ska == NULL) { 1031 clear_bit_unlock(KMC_BIT_GROWING, &skc->skc_flags); 1032 smp_mb__after_atomic(); 1033 wake_up_all(&skc->skc_waitq); 1034 return (-ENOMEM); 1035 } 1036 1037 atomic_inc(&skc->skc_ref); 1038 ska->ska_cache = skc; 1039 ska->ska_flags = flags; 1040 taskq_init_ent(&ska->ska_tqe); 1041 taskq_dispatch_ent(spl_kmem_cache_taskq, 1042 spl_cache_grow_work, ska, 0, &ska->ska_tqe); 1043 } 1044 1045 /* 1046 * The goal here is to only detect the rare case where a virtual slab 1047 * allocation has deadlocked. We must be careful to minimize the use 1048 * of emergency objects which are more expensive to track. Therefore, 1049 * we set a very long timeout for the asynchronous allocation and if 1050 * the timeout is reached the cache is flagged as deadlocked. From 1051 * this point only new emergency objects will be allocated until the 1052 * asynchronous allocation completes and clears the deadlocked flag. 1053 */ 1054 if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) { 1055 rc = spl_emergency_alloc(skc, flags, obj); 1056 } else { 1057 remaining = wait_event_timeout(skc->skc_waitq, 1058 spl_cache_grow_wait(skc), HZ / 10); 1059 1060 if (!remaining) { 1061 spin_lock(&skc->skc_lock); 1062 if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) { 1063 set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags); 1064 skc->skc_obj_deadlock++; 1065 } 1066 spin_unlock(&skc->skc_lock); 1067 } 1068 1069 rc = -ENOMEM; 1070 } 1071 1072 return (rc); 1073 } 1074 1075 /* 1076 * Refill a per-cpu magazine with objects from the slabs for this cache. 1077 * Ideally the magazine can be repopulated using existing objects which have 1078 * been released, however if we are unable to locate enough free objects new 1079 * slabs of objects will be created. On success NULL is returned, otherwise 1080 * the address of a single emergency object is returned for use by the caller. 1081 */ 1082 static void * 1083 spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags) 1084 { 1085 spl_kmem_slab_t *sks; 1086 int count = 0, rc, refill; 1087 void *obj = NULL; 1088 1089 ASSERT(skc->skc_magic == SKC_MAGIC); 1090 ASSERT(skm->skm_magic == SKM_MAGIC); 1091 1092 refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail); 1093 spin_lock(&skc->skc_lock); 1094 1095 while (refill > 0) { 1096 /* No slabs available we may need to grow the cache */ 1097 if (list_empty(&skc->skc_partial_list)) { 1098 spin_unlock(&skc->skc_lock); 1099 1100 local_irq_enable(); 1101 rc = spl_cache_grow(skc, flags, &obj); 1102 local_irq_disable(); 1103 1104 /* Emergency object for immediate use by caller */ 1105 if (rc == 0 && obj != NULL) 1106 return (obj); 1107 1108 if (rc) 1109 goto out; 1110 1111 /* Rescheduled to different CPU skm is not local */ 1112 if (skm != skc->skc_mag[smp_processor_id()]) 1113 goto out; 1114 1115 /* 1116 * Potentially rescheduled to the same CPU but 1117 * allocations may have occurred from this CPU while 1118 * we were sleeping so recalculate max refill. 1119 */ 1120 refill = MIN(refill, skm->skm_size - skm->skm_avail); 1121 1122 spin_lock(&skc->skc_lock); 1123 continue; 1124 } 1125 1126 /* Grab the next available slab */ 1127 sks = list_entry((&skc->skc_partial_list)->next, 1128 spl_kmem_slab_t, sks_list); 1129 ASSERT(sks->sks_magic == SKS_MAGIC); 1130 ASSERT(sks->sks_ref < sks->sks_objs); 1131 ASSERT(!list_empty(&sks->sks_free_list)); 1132 1133 /* 1134 * Consume as many objects as needed to refill the requested 1135 * cache. We must also be careful not to overfill it. 1136 */ 1137 while (sks->sks_ref < sks->sks_objs && refill-- > 0 && 1138 ++count) { 1139 ASSERT(skm->skm_avail < skm->skm_size); 1140 ASSERT(count < skm->skm_size); 1141 skm->skm_objs[skm->skm_avail++] = 1142 spl_cache_obj(skc, sks); 1143 } 1144 1145 /* Move slab to skc_complete_list when full */ 1146 if (sks->sks_ref == sks->sks_objs) { 1147 list_del(&sks->sks_list); 1148 list_add(&sks->sks_list, &skc->skc_complete_list); 1149 } 1150 } 1151 1152 spin_unlock(&skc->skc_lock); 1153 out: 1154 return (NULL); 1155 } 1156 1157 /* 1158 * Release an object back to the slab from which it came. 1159 */ 1160 static void 1161 spl_cache_shrink(spl_kmem_cache_t *skc, void *obj) 1162 { 1163 spl_kmem_slab_t *sks = NULL; 1164 spl_kmem_obj_t *sko = NULL; 1165 1166 ASSERT(skc->skc_magic == SKC_MAGIC); 1167 1168 sko = spl_sko_from_obj(skc, obj); 1169 ASSERT(sko->sko_magic == SKO_MAGIC); 1170 sks = sko->sko_slab; 1171 ASSERT(sks->sks_magic == SKS_MAGIC); 1172 ASSERT(sks->sks_cache == skc); 1173 list_add(&sko->sko_list, &sks->sks_free_list); 1174 1175 sks->sks_age = jiffies; 1176 sks->sks_ref--; 1177 skc->skc_obj_alloc--; 1178 1179 /* 1180 * Move slab to skc_partial_list when no longer full. Slabs 1181 * are added to the head to keep the partial list is quasi-full 1182 * sorted order. Fuller at the head, emptier at the tail. 1183 */ 1184 if (sks->sks_ref == (sks->sks_objs - 1)) { 1185 list_del(&sks->sks_list); 1186 list_add(&sks->sks_list, &skc->skc_partial_list); 1187 } 1188 1189 /* 1190 * Move empty slabs to the end of the partial list so 1191 * they can be easily found and freed during reclamation. 1192 */ 1193 if (sks->sks_ref == 0) { 1194 list_del(&sks->sks_list); 1195 list_add_tail(&sks->sks_list, &skc->skc_partial_list); 1196 skc->skc_slab_alloc--; 1197 } 1198 } 1199 1200 /* 1201 * Allocate an object from the per-cpu magazine, or if the magazine 1202 * is empty directly allocate from a slab and repopulate the magazine. 1203 */ 1204 void * 1205 spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags) 1206 { 1207 spl_kmem_magazine_t *skm; 1208 void *obj = NULL; 1209 1210 ASSERT0(flags & ~KM_PUBLIC_MASK); 1211 ASSERT(skc->skc_magic == SKC_MAGIC); 1212 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); 1213 1214 /* 1215 * Allocate directly from a Linux slab. All optimizations are left 1216 * to the underlying cache we only need to guarantee that KM_SLEEP 1217 * callers will never fail. 1218 */ 1219 if (skc->skc_flags & KMC_SLAB) { 1220 struct kmem_cache *slc = skc->skc_linux_cache; 1221 do { 1222 obj = kmem_cache_alloc(slc, kmem_flags_convert(flags)); 1223 } while ((obj == NULL) && !(flags & KM_NOSLEEP)); 1224 1225 if (obj != NULL) { 1226 /* 1227 * Even though we leave everything up to the 1228 * underlying cache we still keep track of 1229 * how many objects we've allocated in it for 1230 * better debuggability. 1231 */ 1232 percpu_counter_inc(&skc->skc_linux_alloc); 1233 } 1234 goto ret; 1235 } 1236 1237 local_irq_disable(); 1238 1239 restart: 1240 /* 1241 * Safe to update per-cpu structure without lock, but 1242 * in the restart case we must be careful to reacquire 1243 * the local magazine since this may have changed 1244 * when we need to grow the cache. 1245 */ 1246 skm = skc->skc_mag[smp_processor_id()]; 1247 ASSERT(skm->skm_magic == SKM_MAGIC); 1248 1249 if (likely(skm->skm_avail)) { 1250 /* Object available in CPU cache, use it */ 1251 obj = skm->skm_objs[--skm->skm_avail]; 1252 } else { 1253 obj = spl_cache_refill(skc, skm, flags); 1254 if ((obj == NULL) && !(flags & KM_NOSLEEP)) 1255 goto restart; 1256 1257 local_irq_enable(); 1258 goto ret; 1259 } 1260 1261 local_irq_enable(); 1262 ASSERT(obj); 1263 ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align)); 1264 1265 ret: 1266 /* Pre-emptively migrate object to CPU L1 cache */ 1267 if (obj) { 1268 if (obj && skc->skc_ctor) 1269 skc->skc_ctor(obj, skc->skc_private, flags); 1270 else 1271 prefetchw(obj); 1272 } 1273 1274 return (obj); 1275 } 1276 EXPORT_SYMBOL(spl_kmem_cache_alloc); 1277 1278 /* 1279 * Free an object back to the local per-cpu magazine, there is no 1280 * guarantee that this is the same magazine the object was originally 1281 * allocated from. We may need to flush entire from the magazine 1282 * back to the slabs to make space. 1283 */ 1284 void 1285 spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj) 1286 { 1287 spl_kmem_magazine_t *skm; 1288 unsigned long flags; 1289 int do_reclaim = 0; 1290 int do_emergency = 0; 1291 1292 ASSERT(skc->skc_magic == SKC_MAGIC); 1293 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); 1294 1295 /* 1296 * Run the destructor 1297 */ 1298 if (skc->skc_dtor) 1299 skc->skc_dtor(obj, skc->skc_private); 1300 1301 /* 1302 * Free the object from the Linux underlying Linux slab. 1303 */ 1304 if (skc->skc_flags & KMC_SLAB) { 1305 kmem_cache_free(skc->skc_linux_cache, obj); 1306 percpu_counter_dec(&skc->skc_linux_alloc); 1307 return; 1308 } 1309 1310 /* 1311 * While a cache has outstanding emergency objects all freed objects 1312 * must be checked. However, since emergency objects will never use 1313 * a virtual address these objects can be safely excluded as an 1314 * optimization. 1315 */ 1316 if (!is_vmalloc_addr(obj)) { 1317 spin_lock(&skc->skc_lock); 1318 do_emergency = (skc->skc_obj_emergency > 0); 1319 spin_unlock(&skc->skc_lock); 1320 1321 if (do_emergency && (spl_emergency_free(skc, obj) == 0)) 1322 return; 1323 } 1324 1325 local_irq_save(flags); 1326 1327 /* 1328 * Safe to update per-cpu structure without lock, but 1329 * no remote memory allocation tracking is being performed 1330 * it is entirely possible to allocate an object from one 1331 * CPU cache and return it to another. 1332 */ 1333 skm = skc->skc_mag[smp_processor_id()]; 1334 ASSERT(skm->skm_magic == SKM_MAGIC); 1335 1336 /* 1337 * Per-CPU cache full, flush it to make space for this object, 1338 * this may result in an empty slab which can be reclaimed once 1339 * interrupts are re-enabled. 1340 */ 1341 if (unlikely(skm->skm_avail >= skm->skm_size)) { 1342 spl_cache_flush(skc, skm, skm->skm_refill); 1343 do_reclaim = 1; 1344 } 1345 1346 /* Available space in cache, use it */ 1347 skm->skm_objs[skm->skm_avail++] = obj; 1348 1349 local_irq_restore(flags); 1350 1351 if (do_reclaim) 1352 spl_slab_reclaim(skc); 1353 } 1354 EXPORT_SYMBOL(spl_kmem_cache_free); 1355 1356 /* 1357 * Depending on how many and which objects are released it may simply 1358 * repopulate the local magazine which will then need to age-out. Objects 1359 * which cannot fit in the magazine will be released back to their slabs 1360 * which will also need to age out before being released. This is all just 1361 * best effort and we do not want to thrash creating and destroying slabs. 1362 */ 1363 void 1364 spl_kmem_cache_reap_now(spl_kmem_cache_t *skc) 1365 { 1366 ASSERT(skc->skc_magic == SKC_MAGIC); 1367 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); 1368 1369 if (skc->skc_flags & KMC_SLAB) 1370 return; 1371 1372 atomic_inc(&skc->skc_ref); 1373 1374 /* 1375 * Prevent concurrent cache reaping when contended. 1376 */ 1377 if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags)) 1378 goto out; 1379 1380 /* Reclaim from the magazine and free all now empty slabs. */ 1381 unsigned long irq_flags; 1382 local_irq_save(irq_flags); 1383 spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()]; 1384 spl_cache_flush(skc, skm, skm->skm_avail); 1385 local_irq_restore(irq_flags); 1386 1387 spl_slab_reclaim(skc); 1388 clear_bit_unlock(KMC_BIT_REAPING, &skc->skc_flags); 1389 smp_mb__after_atomic(); 1390 wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING); 1391 out: 1392 atomic_dec(&skc->skc_ref); 1393 } 1394 EXPORT_SYMBOL(spl_kmem_cache_reap_now); 1395 1396 /* 1397 * This is stubbed out for code consistency with other platforms. There 1398 * is existing logic to prevent concurrent reaping so while this is ugly 1399 * it should do no harm. 1400 */ 1401 int 1402 spl_kmem_cache_reap_active(void) 1403 { 1404 return (0); 1405 } 1406 EXPORT_SYMBOL(spl_kmem_cache_reap_active); 1407 1408 /* 1409 * Reap all free slabs from all registered caches. 1410 */ 1411 void 1412 spl_kmem_reap(void) 1413 { 1414 spl_kmem_cache_t *skc = NULL; 1415 1416 down_read(&spl_kmem_cache_sem); 1417 list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) { 1418 spl_kmem_cache_reap_now(skc); 1419 } 1420 up_read(&spl_kmem_cache_sem); 1421 } 1422 EXPORT_SYMBOL(spl_kmem_reap); 1423 1424 int 1425 spl_kmem_cache_init(void) 1426 { 1427 init_rwsem(&spl_kmem_cache_sem); 1428 INIT_LIST_HEAD(&spl_kmem_cache_list); 1429 spl_kmem_cache_taskq = taskq_create("spl_kmem_cache", 1430 spl_kmem_cache_kmem_threads, maxclsyspri, 1431 spl_kmem_cache_kmem_threads * 8, INT_MAX, 1432 TASKQ_PREPOPULATE | TASKQ_DYNAMIC); 1433 1434 if (spl_kmem_cache_taskq == NULL) 1435 return (-ENOMEM); 1436 1437 return (0); 1438 } 1439 1440 void 1441 spl_kmem_cache_fini(void) 1442 { 1443 taskq_destroy(spl_kmem_cache_taskq); 1444 } 1445