1 /* 2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. 3 * Copyright (C) 2007 The Regents of the University of California. 4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). 5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>. 6 * UCRL-CODE-235197 7 * 8 * This file is part of the SPL, Solaris Porting Layer. 9 * 10 * The SPL is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License as published by the 12 * Free Software Foundation; either version 2 of the License, or (at your 13 * option) any later version. 14 * 15 * The SPL is distributed in the hope that it will be useful, but WITHOUT 16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 18 * for more details. 19 * 20 * You should have received a copy of the GNU General Public License along 21 * with the SPL. If not, see <http://www.gnu.org/licenses/>. 22 */ 23 24 #include <linux/percpu_compat.h> 25 #include <sys/kmem.h> 26 #include <sys/kmem_cache.h> 27 #include <sys/taskq.h> 28 #include <sys/timer.h> 29 #include <sys/vmem.h> 30 #include <sys/wait.h> 31 #include <sys/string.h> 32 #include <linux/slab.h> 33 #include <linux/swap.h> 34 #include <linux/prefetch.h> 35 36 /* 37 * Within the scope of spl-kmem.c file the kmem_cache_* definitions 38 * are removed to allow access to the real Linux slab allocator. 39 */ 40 #undef kmem_cache_destroy 41 #undef kmem_cache_create 42 #undef kmem_cache_alloc 43 #undef kmem_cache_free 44 45 46 /* 47 * Linux 3.16 replaced smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}() 48 * with smp_mb__{before,after}_atomic() because they were redundant. This is 49 * only used inside our SLAB allocator, so we implement an internal wrapper 50 * here to give us smp_mb__{before,after}_atomic() on older kernels. 51 */ 52 #ifndef smp_mb__before_atomic 53 #define smp_mb__before_atomic(x) smp_mb__before_clear_bit(x) 54 #endif 55 56 #ifndef smp_mb__after_atomic 57 #define smp_mb__after_atomic(x) smp_mb__after_clear_bit(x) 58 #endif 59 60 /* BEGIN CSTYLED */ 61 /* 62 * Cache magazines are an optimization designed to minimize the cost of 63 * allocating memory. They do this by keeping a per-cpu cache of recently 64 * freed objects, which can then be reallocated without taking a lock. This 65 * can improve performance on highly contended caches. However, because 66 * objects in magazines will prevent otherwise empty slabs from being 67 * immediately released this may not be ideal for low memory machines. 68 * 69 * For this reason spl_kmem_cache_magazine_size can be used to set a maximum 70 * magazine size. When this value is set to 0 the magazine size will be 71 * automatically determined based on the object size. Otherwise magazines 72 * will be limited to 2-256 objects per magazine (i.e per cpu). Magazines 73 * may never be entirely disabled in this implementation. 74 */ 75 static unsigned int spl_kmem_cache_magazine_size = 0; 76 module_param(spl_kmem_cache_magazine_size, uint, 0444); 77 MODULE_PARM_DESC(spl_kmem_cache_magazine_size, 78 "Default magazine size (2-256), set automatically (0)"); 79 80 static unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB; 81 module_param(spl_kmem_cache_obj_per_slab, uint, 0644); 82 MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab"); 83 84 static unsigned int spl_kmem_cache_max_size = SPL_KMEM_CACHE_MAX_SIZE; 85 module_param(spl_kmem_cache_max_size, uint, 0644); 86 MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB"); 87 88 /* 89 * For small objects the Linux slab allocator should be used to make the most 90 * efficient use of the memory. However, large objects are not supported by 91 * the Linux slab and therefore the SPL implementation is preferred. A cutoff 92 * of 16K was determined to be optimal for architectures using 4K pages and 93 * to also work well on architecutres using larger 64K page sizes. 94 */ 95 static unsigned int spl_kmem_cache_slab_limit = 96 SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE; 97 module_param(spl_kmem_cache_slab_limit, uint, 0644); 98 MODULE_PARM_DESC(spl_kmem_cache_slab_limit, 99 "Objects less than N bytes use the Linux slab"); 100 101 /* 102 * The number of threads available to allocate new slabs for caches. This 103 * should not need to be tuned but it is available for performance analysis. 104 */ 105 static unsigned int spl_kmem_cache_kmem_threads = 4; 106 module_param(spl_kmem_cache_kmem_threads, uint, 0444); 107 MODULE_PARM_DESC(spl_kmem_cache_kmem_threads, 108 "Number of spl_kmem_cache threads"); 109 /* END CSTYLED */ 110 111 /* 112 * Slab allocation interfaces 113 * 114 * While the Linux slab implementation was inspired by the Solaris 115 * implementation I cannot use it to emulate the Solaris APIs. I 116 * require two features which are not provided by the Linux slab. 117 * 118 * 1) Constructors AND destructors. Recent versions of the Linux 119 * kernel have removed support for destructors. This is a deal 120 * breaker for the SPL which contains particularly expensive 121 * initializers for mutex's, condition variables, etc. We also 122 * require a minimal level of cleanup for these data types unlike 123 * many Linux data types which do need to be explicitly destroyed. 124 * 125 * 2) Virtual address space backed slab. Callers of the Solaris slab 126 * expect it to work well for both small are very large allocations. 127 * Because of memory fragmentation the Linux slab which is backed 128 * by kmalloc'ed memory performs very badly when confronted with 129 * large numbers of large allocations. Basing the slab on the 130 * virtual address space removes the need for contiguous pages 131 * and greatly improve performance for large allocations. 132 * 133 * For these reasons, the SPL has its own slab implementation with 134 * the needed features. It is not as highly optimized as either the 135 * Solaris or Linux slabs, but it should get me most of what is 136 * needed until it can be optimized or obsoleted by another approach. 137 * 138 * One serious concern I do have about this method is the relatively 139 * small virtual address space on 32bit arches. This will seriously 140 * constrain the size of the slab caches and their performance. 141 */ 142 143 struct list_head spl_kmem_cache_list; /* List of caches */ 144 struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */ 145 static taskq_t *spl_kmem_cache_taskq; /* Task queue for aging / reclaim */ 146 147 static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj); 148 149 static void * 150 kv_alloc(spl_kmem_cache_t *skc, int size, int flags) 151 { 152 gfp_t lflags = kmem_flags_convert(flags); 153 void *ptr; 154 155 ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM); 156 157 /* Resulting allocated memory will be page aligned */ 158 ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE)); 159 160 return (ptr); 161 } 162 163 static void 164 kv_free(spl_kmem_cache_t *skc, void *ptr, int size) 165 { 166 ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE)); 167 168 /* 169 * The Linux direct reclaim path uses this out of band value to 170 * determine if forward progress is being made. Normally this is 171 * incremented by kmem_freepages() which is part of the various 172 * Linux slab implementations. However, since we are using none 173 * of that infrastructure we are responsible for incrementing it. 174 */ 175 if (current->reclaim_state) 176 #ifdef HAVE_RECLAIM_STATE_RECLAIMED 177 current->reclaim_state->reclaimed += size >> PAGE_SHIFT; 178 #else 179 current->reclaim_state->reclaimed_slab += size >> PAGE_SHIFT; 180 #endif 181 vfree(ptr); 182 } 183 184 /* 185 * Required space for each aligned sks. 186 */ 187 static inline uint32_t 188 spl_sks_size(spl_kmem_cache_t *skc) 189 { 190 return (P2ROUNDUP_TYPED(sizeof (spl_kmem_slab_t), 191 skc->skc_obj_align, uint32_t)); 192 } 193 194 /* 195 * Required space for each aligned object. 196 */ 197 static inline uint32_t 198 spl_obj_size(spl_kmem_cache_t *skc) 199 { 200 uint32_t align = skc->skc_obj_align; 201 202 return (P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) + 203 P2ROUNDUP_TYPED(sizeof (spl_kmem_obj_t), align, uint32_t)); 204 } 205 206 uint64_t 207 spl_kmem_cache_inuse(kmem_cache_t *cache) 208 { 209 return (cache->skc_obj_total); 210 } 211 EXPORT_SYMBOL(spl_kmem_cache_inuse); 212 213 uint64_t 214 spl_kmem_cache_entry_size(kmem_cache_t *cache) 215 { 216 return (cache->skc_obj_size); 217 } 218 EXPORT_SYMBOL(spl_kmem_cache_entry_size); 219 220 /* 221 * Lookup the spl_kmem_object_t for an object given that object. 222 */ 223 static inline spl_kmem_obj_t * 224 spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj) 225 { 226 return (obj + P2ROUNDUP_TYPED(skc->skc_obj_size, 227 skc->skc_obj_align, uint32_t)); 228 } 229 230 /* 231 * It's important that we pack the spl_kmem_obj_t structure and the 232 * actual objects in to one large address space to minimize the number 233 * of calls to the allocator. It is far better to do a few large 234 * allocations and then subdivide it ourselves. Now which allocator 235 * we use requires balancing a few trade offs. 236 * 237 * For small objects we use kmem_alloc() because as long as you are 238 * only requesting a small number of pages (ideally just one) its cheap. 239 * However, when you start requesting multiple pages with kmem_alloc() 240 * it gets increasingly expensive since it requires contiguous pages. 241 * For this reason we shift to vmem_alloc() for slabs of large objects 242 * which removes the need for contiguous pages. We do not use 243 * vmem_alloc() in all cases because there is significant locking 244 * overhead in __get_vm_area_node(). This function takes a single 245 * global lock when acquiring an available virtual address range which 246 * serializes all vmem_alloc()'s for all slab caches. Using slightly 247 * different allocation functions for small and large objects should 248 * give us the best of both worlds. 249 * 250 * +------------------------+ 251 * | spl_kmem_slab_t --+-+ | 252 * | skc_obj_size <-+ | | 253 * | spl_kmem_obj_t | | 254 * | skc_obj_size <---+ | 255 * | spl_kmem_obj_t | | 256 * | ... v | 257 * +------------------------+ 258 */ 259 static spl_kmem_slab_t * 260 spl_slab_alloc(spl_kmem_cache_t *skc, int flags) 261 { 262 spl_kmem_slab_t *sks; 263 void *base; 264 uint32_t obj_size; 265 266 base = kv_alloc(skc, skc->skc_slab_size, flags); 267 if (base == NULL) 268 return (NULL); 269 270 sks = (spl_kmem_slab_t *)base; 271 sks->sks_magic = SKS_MAGIC; 272 sks->sks_objs = skc->skc_slab_objs; 273 sks->sks_age = jiffies; 274 sks->sks_cache = skc; 275 INIT_LIST_HEAD(&sks->sks_list); 276 INIT_LIST_HEAD(&sks->sks_free_list); 277 sks->sks_ref = 0; 278 obj_size = spl_obj_size(skc); 279 280 for (int i = 0; i < sks->sks_objs; i++) { 281 void *obj = base + spl_sks_size(skc) + (i * obj_size); 282 283 ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align)); 284 spl_kmem_obj_t *sko = spl_sko_from_obj(skc, obj); 285 sko->sko_addr = obj; 286 sko->sko_magic = SKO_MAGIC; 287 sko->sko_slab = sks; 288 INIT_LIST_HEAD(&sko->sko_list); 289 list_add_tail(&sko->sko_list, &sks->sks_free_list); 290 } 291 292 return (sks); 293 } 294 295 /* 296 * Remove a slab from complete or partial list, it must be called with 297 * the 'skc->skc_lock' held but the actual free must be performed 298 * outside the lock to prevent deadlocking on vmem addresses. 299 */ 300 static void 301 spl_slab_free(spl_kmem_slab_t *sks, 302 struct list_head *sks_list, struct list_head *sko_list) 303 { 304 spl_kmem_cache_t *skc; 305 306 ASSERT(sks->sks_magic == SKS_MAGIC); 307 ASSERT(sks->sks_ref == 0); 308 309 skc = sks->sks_cache; 310 ASSERT(skc->skc_magic == SKC_MAGIC); 311 312 /* 313 * Update slab/objects counters in the cache, then remove the 314 * slab from the skc->skc_partial_list. Finally add the slab 315 * and all its objects in to the private work lists where the 316 * destructors will be called and the memory freed to the system. 317 */ 318 skc->skc_obj_total -= sks->sks_objs; 319 skc->skc_slab_total--; 320 list_del(&sks->sks_list); 321 list_add(&sks->sks_list, sks_list); 322 list_splice_init(&sks->sks_free_list, sko_list); 323 } 324 325 /* 326 * Reclaim empty slabs at the end of the partial list. 327 */ 328 static void 329 spl_slab_reclaim(spl_kmem_cache_t *skc) 330 { 331 spl_kmem_slab_t *sks = NULL, *m = NULL; 332 spl_kmem_obj_t *sko = NULL, *n = NULL; 333 LIST_HEAD(sks_list); 334 LIST_HEAD(sko_list); 335 336 /* 337 * Empty slabs and objects must be moved to a private list so they 338 * can be safely freed outside the spin lock. All empty slabs are 339 * at the end of skc->skc_partial_list, therefore once a non-empty 340 * slab is found we can stop scanning. 341 */ 342 spin_lock(&skc->skc_lock); 343 list_for_each_entry_safe_reverse(sks, m, 344 &skc->skc_partial_list, sks_list) { 345 346 if (sks->sks_ref > 0) 347 break; 348 349 spl_slab_free(sks, &sks_list, &sko_list); 350 } 351 spin_unlock(&skc->skc_lock); 352 353 /* 354 * The following two loops ensure all the object destructors are run, 355 * and the slabs themselves are freed. This is all done outside the 356 * skc->skc_lock since this allows the destructor to sleep, and 357 * allows us to perform a conditional reschedule when a freeing a 358 * large number of objects and slabs back to the system. 359 */ 360 361 list_for_each_entry_safe(sko, n, &sko_list, sko_list) { 362 ASSERT(sko->sko_magic == SKO_MAGIC); 363 } 364 365 list_for_each_entry_safe(sks, m, &sks_list, sks_list) { 366 ASSERT(sks->sks_magic == SKS_MAGIC); 367 kv_free(skc, sks, skc->skc_slab_size); 368 } 369 } 370 371 static spl_kmem_emergency_t * 372 spl_emergency_search(struct rb_root *root, void *obj) 373 { 374 struct rb_node *node = root->rb_node; 375 spl_kmem_emergency_t *ske; 376 unsigned long address = (unsigned long)obj; 377 378 while (node) { 379 ske = container_of(node, spl_kmem_emergency_t, ske_node); 380 381 if (address < ske->ske_obj) 382 node = node->rb_left; 383 else if (address > ske->ske_obj) 384 node = node->rb_right; 385 else 386 return (ske); 387 } 388 389 return (NULL); 390 } 391 392 static int 393 spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske) 394 { 395 struct rb_node **new = &(root->rb_node), *parent = NULL; 396 spl_kmem_emergency_t *ske_tmp; 397 unsigned long address = ske->ske_obj; 398 399 while (*new) { 400 ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node); 401 402 parent = *new; 403 if (address < ske_tmp->ske_obj) 404 new = &((*new)->rb_left); 405 else if (address > ske_tmp->ske_obj) 406 new = &((*new)->rb_right); 407 else 408 return (0); 409 } 410 411 rb_link_node(&ske->ske_node, parent, new); 412 rb_insert_color(&ske->ske_node, root); 413 414 return (1); 415 } 416 417 /* 418 * Allocate a single emergency object and track it in a red black tree. 419 */ 420 static int 421 spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj) 422 { 423 gfp_t lflags = kmem_flags_convert(flags); 424 spl_kmem_emergency_t *ske; 425 int order = get_order(skc->skc_obj_size); 426 int empty; 427 428 /* Last chance use a partial slab if one now exists */ 429 spin_lock(&skc->skc_lock); 430 empty = list_empty(&skc->skc_partial_list); 431 spin_unlock(&skc->skc_lock); 432 if (!empty) 433 return (-EEXIST); 434 435 ske = kmalloc(sizeof (*ske), lflags); 436 if (ske == NULL) 437 return (-ENOMEM); 438 439 ske->ske_obj = __get_free_pages(lflags, order); 440 if (ske->ske_obj == 0) { 441 kfree(ske); 442 return (-ENOMEM); 443 } 444 445 spin_lock(&skc->skc_lock); 446 empty = spl_emergency_insert(&skc->skc_emergency_tree, ske); 447 if (likely(empty)) { 448 skc->skc_obj_total++; 449 skc->skc_obj_emergency++; 450 if (skc->skc_obj_emergency > skc->skc_obj_emergency_max) 451 skc->skc_obj_emergency_max = skc->skc_obj_emergency; 452 } 453 spin_unlock(&skc->skc_lock); 454 455 if (unlikely(!empty)) { 456 free_pages(ske->ske_obj, order); 457 kfree(ske); 458 return (-EINVAL); 459 } 460 461 *obj = (void *)ske->ske_obj; 462 463 return (0); 464 } 465 466 /* 467 * Locate the passed object in the red black tree and free it. 468 */ 469 static int 470 spl_emergency_free(spl_kmem_cache_t *skc, void *obj) 471 { 472 spl_kmem_emergency_t *ske; 473 int order = get_order(skc->skc_obj_size); 474 475 spin_lock(&skc->skc_lock); 476 ske = spl_emergency_search(&skc->skc_emergency_tree, obj); 477 if (ske) { 478 rb_erase(&ske->ske_node, &skc->skc_emergency_tree); 479 skc->skc_obj_emergency--; 480 skc->skc_obj_total--; 481 } 482 spin_unlock(&skc->skc_lock); 483 484 if (ske == NULL) 485 return (-ENOENT); 486 487 free_pages(ske->ske_obj, order); 488 kfree(ske); 489 490 return (0); 491 } 492 493 /* 494 * Release objects from the per-cpu magazine back to their slab. The flush 495 * argument contains the max number of entries to remove from the magazine. 496 */ 497 static void 498 spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush) 499 { 500 spin_lock(&skc->skc_lock); 501 502 ASSERT(skc->skc_magic == SKC_MAGIC); 503 ASSERT(skm->skm_magic == SKM_MAGIC); 504 505 int count = MIN(flush, skm->skm_avail); 506 for (int i = 0; i < count; i++) 507 spl_cache_shrink(skc, skm->skm_objs[i]); 508 509 skm->skm_avail -= count; 510 memmove(skm->skm_objs, &(skm->skm_objs[count]), 511 sizeof (void *) * skm->skm_avail); 512 513 spin_unlock(&skc->skc_lock); 514 } 515 516 /* 517 * Size a slab based on the size of each aligned object plus spl_kmem_obj_t. 518 * When on-slab we want to target spl_kmem_cache_obj_per_slab. However, 519 * for very small objects we may end up with more than this so as not 520 * to waste space in the minimal allocation of a single page. 521 */ 522 static int 523 spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size) 524 { 525 uint32_t sks_size, obj_size, max_size, tgt_size, tgt_objs; 526 527 sks_size = spl_sks_size(skc); 528 obj_size = spl_obj_size(skc); 529 max_size = (spl_kmem_cache_max_size * 1024 * 1024); 530 tgt_size = (spl_kmem_cache_obj_per_slab * obj_size + sks_size); 531 532 if (tgt_size <= max_size) { 533 tgt_objs = (tgt_size - sks_size) / obj_size; 534 } else { 535 tgt_objs = (max_size - sks_size) / obj_size; 536 tgt_size = (tgt_objs * obj_size) + sks_size; 537 } 538 539 if (tgt_objs == 0) 540 return (-ENOSPC); 541 542 *objs = tgt_objs; 543 *size = tgt_size; 544 545 return (0); 546 } 547 548 /* 549 * Make a guess at reasonable per-cpu magazine size based on the size of 550 * each object and the cost of caching N of them in each magazine. Long 551 * term this should really adapt based on an observed usage heuristic. 552 */ 553 static int 554 spl_magazine_size(spl_kmem_cache_t *skc) 555 { 556 uint32_t obj_size = spl_obj_size(skc); 557 int size; 558 559 if (spl_kmem_cache_magazine_size > 0) 560 return (MAX(MIN(spl_kmem_cache_magazine_size, 256), 2)); 561 562 /* Per-magazine sizes below assume a 4Kib page size */ 563 if (obj_size > (PAGE_SIZE * 256)) 564 size = 4; /* Minimum 4Mib per-magazine */ 565 else if (obj_size > (PAGE_SIZE * 32)) 566 size = 16; /* Minimum 2Mib per-magazine */ 567 else if (obj_size > (PAGE_SIZE)) 568 size = 64; /* Minimum 256Kib per-magazine */ 569 else if (obj_size > (PAGE_SIZE / 4)) 570 size = 128; /* Minimum 128Kib per-magazine */ 571 else 572 size = 256; 573 574 return (size); 575 } 576 577 /* 578 * Allocate a per-cpu magazine to associate with a specific core. 579 */ 580 static spl_kmem_magazine_t * 581 spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu) 582 { 583 spl_kmem_magazine_t *skm; 584 int size = sizeof (spl_kmem_magazine_t) + 585 sizeof (void *) * skc->skc_mag_size; 586 587 skm = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu)); 588 if (skm) { 589 skm->skm_magic = SKM_MAGIC; 590 skm->skm_avail = 0; 591 skm->skm_size = skc->skc_mag_size; 592 skm->skm_refill = skc->skc_mag_refill; 593 skm->skm_cache = skc; 594 skm->skm_cpu = cpu; 595 } 596 597 return (skm); 598 } 599 600 /* 601 * Free a per-cpu magazine associated with a specific core. 602 */ 603 static void 604 spl_magazine_free(spl_kmem_magazine_t *skm) 605 { 606 ASSERT(skm->skm_magic == SKM_MAGIC); 607 ASSERT(skm->skm_avail == 0); 608 kfree(skm); 609 } 610 611 /* 612 * Create all pre-cpu magazines of reasonable sizes. 613 */ 614 static int 615 spl_magazine_create(spl_kmem_cache_t *skc) 616 { 617 int i = 0; 618 619 ASSERT((skc->skc_flags & KMC_SLAB) == 0); 620 621 skc->skc_mag = kzalloc(sizeof (spl_kmem_magazine_t *) * 622 num_possible_cpus(), kmem_flags_convert(KM_SLEEP)); 623 skc->skc_mag_size = spl_magazine_size(skc); 624 skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2; 625 626 for_each_possible_cpu(i) { 627 skc->skc_mag[i] = spl_magazine_alloc(skc, i); 628 if (!skc->skc_mag[i]) { 629 for (i--; i >= 0; i--) 630 spl_magazine_free(skc->skc_mag[i]); 631 632 kfree(skc->skc_mag); 633 return (-ENOMEM); 634 } 635 } 636 637 return (0); 638 } 639 640 /* 641 * Destroy all pre-cpu magazines. 642 */ 643 static void 644 spl_magazine_destroy(spl_kmem_cache_t *skc) 645 { 646 spl_kmem_magazine_t *skm; 647 int i = 0; 648 649 ASSERT((skc->skc_flags & KMC_SLAB) == 0); 650 651 for_each_possible_cpu(i) { 652 skm = skc->skc_mag[i]; 653 spl_cache_flush(skc, skm, skm->skm_avail); 654 spl_magazine_free(skm); 655 } 656 657 kfree(skc->skc_mag); 658 } 659 660 /* 661 * Create a object cache based on the following arguments: 662 * name cache name 663 * size cache object size 664 * align cache object alignment 665 * ctor cache object constructor 666 * dtor cache object destructor 667 * reclaim cache object reclaim 668 * priv cache private data for ctor/dtor/reclaim 669 * vmp unused must be NULL 670 * flags 671 * KMC_KVMEM Force kvmem backed SPL cache 672 * KMC_SLAB Force Linux slab backed cache 673 * KMC_NODEBUG Disable debugging (unsupported) 674 */ 675 spl_kmem_cache_t * 676 spl_kmem_cache_create(const char *name, size_t size, size_t align, 677 spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, void *reclaim, 678 void *priv, void *vmp, int flags) 679 { 680 gfp_t lflags = kmem_flags_convert(KM_SLEEP); 681 spl_kmem_cache_t *skc; 682 int rc; 683 684 /* 685 * Unsupported flags 686 */ 687 ASSERT(vmp == NULL); 688 ASSERT(reclaim == NULL); 689 690 might_sleep(); 691 692 skc = kzalloc(sizeof (*skc), lflags); 693 if (skc == NULL) 694 return (NULL); 695 696 skc->skc_magic = SKC_MAGIC; 697 skc->skc_name_size = strlen(name) + 1; 698 skc->skc_name = kmalloc(skc->skc_name_size, lflags); 699 if (skc->skc_name == NULL) { 700 kfree(skc); 701 return (NULL); 702 } 703 strlcpy(skc->skc_name, name, skc->skc_name_size); 704 705 skc->skc_ctor = ctor; 706 skc->skc_dtor = dtor; 707 skc->skc_private = priv; 708 skc->skc_vmp = vmp; 709 skc->skc_linux_cache = NULL; 710 skc->skc_flags = flags; 711 skc->skc_obj_size = size; 712 skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN; 713 atomic_set(&skc->skc_ref, 0); 714 715 INIT_LIST_HEAD(&skc->skc_list); 716 INIT_LIST_HEAD(&skc->skc_complete_list); 717 INIT_LIST_HEAD(&skc->skc_partial_list); 718 skc->skc_emergency_tree = RB_ROOT; 719 spin_lock_init(&skc->skc_lock); 720 init_waitqueue_head(&skc->skc_waitq); 721 skc->skc_slab_fail = 0; 722 skc->skc_slab_create = 0; 723 skc->skc_slab_destroy = 0; 724 skc->skc_slab_total = 0; 725 skc->skc_slab_alloc = 0; 726 skc->skc_slab_max = 0; 727 skc->skc_obj_total = 0; 728 skc->skc_obj_alloc = 0; 729 skc->skc_obj_max = 0; 730 skc->skc_obj_deadlock = 0; 731 skc->skc_obj_emergency = 0; 732 skc->skc_obj_emergency_max = 0; 733 734 rc = percpu_counter_init_common(&skc->skc_linux_alloc, 0, 735 GFP_KERNEL); 736 if (rc != 0) { 737 kfree(skc); 738 return (NULL); 739 } 740 741 /* 742 * Verify the requested alignment restriction is sane. 743 */ 744 if (align) { 745 VERIFY(ISP2(align)); 746 VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN); 747 VERIFY3U(align, <=, PAGE_SIZE); 748 skc->skc_obj_align = align; 749 } 750 751 /* 752 * When no specific type of slab is requested (kmem, vmem, or 753 * linuxslab) then select a cache type based on the object size 754 * and default tunables. 755 */ 756 if (!(skc->skc_flags & (KMC_SLAB | KMC_KVMEM))) { 757 if (spl_kmem_cache_slab_limit && 758 size <= (size_t)spl_kmem_cache_slab_limit) { 759 /* 760 * Objects smaller than spl_kmem_cache_slab_limit can 761 * use the Linux slab for better space-efficiency. 762 */ 763 skc->skc_flags |= KMC_SLAB; 764 } else { 765 /* 766 * All other objects are considered large and are 767 * placed on kvmem backed slabs. 768 */ 769 skc->skc_flags |= KMC_KVMEM; 770 } 771 } 772 773 /* 774 * Given the type of slab allocate the required resources. 775 */ 776 if (skc->skc_flags & KMC_KVMEM) { 777 rc = spl_slab_size(skc, 778 &skc->skc_slab_objs, &skc->skc_slab_size); 779 if (rc) 780 goto out; 781 782 rc = spl_magazine_create(skc); 783 if (rc) 784 goto out; 785 } else { 786 unsigned long slabflags = 0; 787 788 if (size > spl_kmem_cache_slab_limit) 789 goto out; 790 791 #if defined(SLAB_USERCOPY) 792 /* 793 * Required for PAX-enabled kernels if the slab is to be 794 * used for copying between user and kernel space. 795 */ 796 slabflags |= SLAB_USERCOPY; 797 #endif 798 799 #if defined(HAVE_KMEM_CACHE_CREATE_USERCOPY) 800 /* 801 * Newer grsec patchset uses kmem_cache_create_usercopy() 802 * instead of SLAB_USERCOPY flag 803 */ 804 skc->skc_linux_cache = kmem_cache_create_usercopy( 805 skc->skc_name, size, align, slabflags, 0, size, NULL); 806 #else 807 skc->skc_linux_cache = kmem_cache_create( 808 skc->skc_name, size, align, slabflags, NULL); 809 #endif 810 if (skc->skc_linux_cache == NULL) 811 goto out; 812 } 813 814 down_write(&spl_kmem_cache_sem); 815 list_add_tail(&skc->skc_list, &spl_kmem_cache_list); 816 up_write(&spl_kmem_cache_sem); 817 818 return (skc); 819 out: 820 kfree(skc->skc_name); 821 percpu_counter_destroy(&skc->skc_linux_alloc); 822 kfree(skc); 823 return (NULL); 824 } 825 EXPORT_SYMBOL(spl_kmem_cache_create); 826 827 /* 828 * Register a move callback for cache defragmentation. 829 * XXX: Unimplemented but harmless to stub out for now. 830 */ 831 void 832 spl_kmem_cache_set_move(spl_kmem_cache_t *skc, 833 kmem_cbrc_t (move)(void *, void *, size_t, void *)) 834 { 835 ASSERT(move != NULL); 836 } 837 EXPORT_SYMBOL(spl_kmem_cache_set_move); 838 839 /* 840 * Destroy a cache and all objects associated with the cache. 841 */ 842 void 843 spl_kmem_cache_destroy(spl_kmem_cache_t *skc) 844 { 845 DECLARE_WAIT_QUEUE_HEAD(wq); 846 taskqid_t id; 847 848 ASSERT(skc->skc_magic == SKC_MAGIC); 849 ASSERT(skc->skc_flags & (KMC_KVMEM | KMC_SLAB)); 850 851 down_write(&spl_kmem_cache_sem); 852 list_del_init(&skc->skc_list); 853 up_write(&spl_kmem_cache_sem); 854 855 /* Cancel any and wait for any pending delayed tasks */ 856 VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags)); 857 858 spin_lock(&skc->skc_lock); 859 id = skc->skc_taskqid; 860 spin_unlock(&skc->skc_lock); 861 862 taskq_cancel_id(spl_kmem_cache_taskq, id); 863 864 /* 865 * Wait until all current callers complete, this is mainly 866 * to catch the case where a low memory situation triggers a 867 * cache reaping action which races with this destroy. 868 */ 869 wait_event(wq, atomic_read(&skc->skc_ref) == 0); 870 871 if (skc->skc_flags & KMC_KVMEM) { 872 spl_magazine_destroy(skc); 873 spl_slab_reclaim(skc); 874 } else { 875 ASSERT(skc->skc_flags & KMC_SLAB); 876 kmem_cache_destroy(skc->skc_linux_cache); 877 } 878 879 spin_lock(&skc->skc_lock); 880 881 /* 882 * Validate there are no objects in use and free all the 883 * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers. 884 */ 885 ASSERT3U(skc->skc_slab_alloc, ==, 0); 886 ASSERT3U(skc->skc_obj_alloc, ==, 0); 887 ASSERT3U(skc->skc_slab_total, ==, 0); 888 ASSERT3U(skc->skc_obj_total, ==, 0); 889 ASSERT3U(skc->skc_obj_emergency, ==, 0); 890 ASSERT(list_empty(&skc->skc_complete_list)); 891 892 ASSERT3U(percpu_counter_sum(&skc->skc_linux_alloc), ==, 0); 893 percpu_counter_destroy(&skc->skc_linux_alloc); 894 895 spin_unlock(&skc->skc_lock); 896 897 kfree(skc->skc_name); 898 kfree(skc); 899 } 900 EXPORT_SYMBOL(spl_kmem_cache_destroy); 901 902 /* 903 * Allocate an object from a slab attached to the cache. This is used to 904 * repopulate the per-cpu magazine caches in batches when they run low. 905 */ 906 static void * 907 spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks) 908 { 909 spl_kmem_obj_t *sko; 910 911 ASSERT(skc->skc_magic == SKC_MAGIC); 912 ASSERT(sks->sks_magic == SKS_MAGIC); 913 914 sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list); 915 ASSERT(sko->sko_magic == SKO_MAGIC); 916 ASSERT(sko->sko_addr != NULL); 917 918 /* Remove from sks_free_list */ 919 list_del_init(&sko->sko_list); 920 921 sks->sks_age = jiffies; 922 sks->sks_ref++; 923 skc->skc_obj_alloc++; 924 925 /* Track max obj usage statistics */ 926 if (skc->skc_obj_alloc > skc->skc_obj_max) 927 skc->skc_obj_max = skc->skc_obj_alloc; 928 929 /* Track max slab usage statistics */ 930 if (sks->sks_ref == 1) { 931 skc->skc_slab_alloc++; 932 933 if (skc->skc_slab_alloc > skc->skc_slab_max) 934 skc->skc_slab_max = skc->skc_slab_alloc; 935 } 936 937 return (sko->sko_addr); 938 } 939 940 /* 941 * Generic slab allocation function to run by the global work queues. 942 * It is responsible for allocating a new slab, linking it in to the list 943 * of partial slabs, and then waking any waiters. 944 */ 945 static int 946 __spl_cache_grow(spl_kmem_cache_t *skc, int flags) 947 { 948 spl_kmem_slab_t *sks; 949 950 fstrans_cookie_t cookie = spl_fstrans_mark(); 951 sks = spl_slab_alloc(skc, flags); 952 spl_fstrans_unmark(cookie); 953 954 spin_lock(&skc->skc_lock); 955 if (sks) { 956 skc->skc_slab_total++; 957 skc->skc_obj_total += sks->sks_objs; 958 list_add_tail(&sks->sks_list, &skc->skc_partial_list); 959 960 smp_mb__before_atomic(); 961 clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags); 962 smp_mb__after_atomic(); 963 } 964 spin_unlock(&skc->skc_lock); 965 966 return (sks == NULL ? -ENOMEM : 0); 967 } 968 969 static void 970 spl_cache_grow_work(void *data) 971 { 972 spl_kmem_alloc_t *ska = (spl_kmem_alloc_t *)data; 973 spl_kmem_cache_t *skc = ska->ska_cache; 974 975 int error = __spl_cache_grow(skc, ska->ska_flags); 976 977 atomic_dec(&skc->skc_ref); 978 smp_mb__before_atomic(); 979 clear_bit(KMC_BIT_GROWING, &skc->skc_flags); 980 smp_mb__after_atomic(); 981 if (error == 0) 982 wake_up_all(&skc->skc_waitq); 983 984 kfree(ska); 985 } 986 987 /* 988 * Returns non-zero when a new slab should be available. 989 */ 990 static int 991 spl_cache_grow_wait(spl_kmem_cache_t *skc) 992 { 993 return (!test_bit(KMC_BIT_GROWING, &skc->skc_flags)); 994 } 995 996 /* 997 * No available objects on any slabs, create a new slab. Note that this 998 * functionality is disabled for KMC_SLAB caches which are backed by the 999 * Linux slab. 1000 */ 1001 static int 1002 spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj) 1003 { 1004 int remaining, rc = 0; 1005 1006 ASSERT0(flags & ~KM_PUBLIC_MASK); 1007 ASSERT(skc->skc_magic == SKC_MAGIC); 1008 ASSERT((skc->skc_flags & KMC_SLAB) == 0); 1009 1010 *obj = NULL; 1011 1012 /* 1013 * Since we can't sleep attempt an emergency allocation to satisfy 1014 * the request. The only alterative is to fail the allocation but 1015 * it's preferable try. The use of KM_NOSLEEP is expected to be rare. 1016 */ 1017 if (flags & KM_NOSLEEP) 1018 return (spl_emergency_alloc(skc, flags, obj)); 1019 1020 might_sleep(); 1021 1022 /* 1023 * Before allocating a new slab wait for any reaping to complete and 1024 * then return so the local magazine can be rechecked for new objects. 1025 */ 1026 if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) { 1027 rc = spl_wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING, 1028 TASK_UNINTERRUPTIBLE); 1029 return (rc ? rc : -EAGAIN); 1030 } 1031 1032 /* 1033 * Note: It would be nice to reduce the overhead of context switch 1034 * and improve NUMA locality, by trying to allocate a new slab in the 1035 * current process context with KM_NOSLEEP flag. 1036 * 1037 * However, this can't be applied to vmem/kvmem due to a bug that 1038 * spl_vmalloc() doesn't honor gfp flags in page table allocation. 1039 */ 1040 1041 /* 1042 * This is handled by dispatching a work request to the global work 1043 * queue. This allows us to asynchronously allocate a new slab while 1044 * retaining the ability to safely fall back to a smaller synchronous 1045 * allocations to ensure forward progress is always maintained. 1046 */ 1047 if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) { 1048 spl_kmem_alloc_t *ska; 1049 1050 ska = kmalloc(sizeof (*ska), kmem_flags_convert(flags)); 1051 if (ska == NULL) { 1052 clear_bit_unlock(KMC_BIT_GROWING, &skc->skc_flags); 1053 smp_mb__after_atomic(); 1054 wake_up_all(&skc->skc_waitq); 1055 return (-ENOMEM); 1056 } 1057 1058 atomic_inc(&skc->skc_ref); 1059 ska->ska_cache = skc; 1060 ska->ska_flags = flags; 1061 taskq_init_ent(&ska->ska_tqe); 1062 taskq_dispatch_ent(spl_kmem_cache_taskq, 1063 spl_cache_grow_work, ska, 0, &ska->ska_tqe); 1064 } 1065 1066 /* 1067 * The goal here is to only detect the rare case where a virtual slab 1068 * allocation has deadlocked. We must be careful to minimize the use 1069 * of emergency objects which are more expensive to track. Therefore, 1070 * we set a very long timeout for the asynchronous allocation and if 1071 * the timeout is reached the cache is flagged as deadlocked. From 1072 * this point only new emergency objects will be allocated until the 1073 * asynchronous allocation completes and clears the deadlocked flag. 1074 */ 1075 if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) { 1076 rc = spl_emergency_alloc(skc, flags, obj); 1077 } else { 1078 remaining = wait_event_timeout(skc->skc_waitq, 1079 spl_cache_grow_wait(skc), HZ / 10); 1080 1081 if (!remaining) { 1082 spin_lock(&skc->skc_lock); 1083 if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) { 1084 set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags); 1085 skc->skc_obj_deadlock++; 1086 } 1087 spin_unlock(&skc->skc_lock); 1088 } 1089 1090 rc = -ENOMEM; 1091 } 1092 1093 return (rc); 1094 } 1095 1096 /* 1097 * Refill a per-cpu magazine with objects from the slabs for this cache. 1098 * Ideally the magazine can be repopulated using existing objects which have 1099 * been released, however if we are unable to locate enough free objects new 1100 * slabs of objects will be created. On success NULL is returned, otherwise 1101 * the address of a single emergency object is returned for use by the caller. 1102 */ 1103 static void * 1104 spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags) 1105 { 1106 spl_kmem_slab_t *sks; 1107 int count = 0, rc, refill; 1108 void *obj = NULL; 1109 1110 ASSERT(skc->skc_magic == SKC_MAGIC); 1111 ASSERT(skm->skm_magic == SKM_MAGIC); 1112 1113 refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail); 1114 spin_lock(&skc->skc_lock); 1115 1116 while (refill > 0) { 1117 /* No slabs available we may need to grow the cache */ 1118 if (list_empty(&skc->skc_partial_list)) { 1119 spin_unlock(&skc->skc_lock); 1120 1121 local_irq_enable(); 1122 rc = spl_cache_grow(skc, flags, &obj); 1123 local_irq_disable(); 1124 1125 /* Emergency object for immediate use by caller */ 1126 if (rc == 0 && obj != NULL) 1127 return (obj); 1128 1129 if (rc) 1130 goto out; 1131 1132 /* Rescheduled to different CPU skm is not local */ 1133 if (skm != skc->skc_mag[smp_processor_id()]) 1134 goto out; 1135 1136 /* 1137 * Potentially rescheduled to the same CPU but 1138 * allocations may have occurred from this CPU while 1139 * we were sleeping so recalculate max refill. 1140 */ 1141 refill = MIN(refill, skm->skm_size - skm->skm_avail); 1142 1143 spin_lock(&skc->skc_lock); 1144 continue; 1145 } 1146 1147 /* Grab the next available slab */ 1148 sks = list_entry((&skc->skc_partial_list)->next, 1149 spl_kmem_slab_t, sks_list); 1150 ASSERT(sks->sks_magic == SKS_MAGIC); 1151 ASSERT(sks->sks_ref < sks->sks_objs); 1152 ASSERT(!list_empty(&sks->sks_free_list)); 1153 1154 /* 1155 * Consume as many objects as needed to refill the requested 1156 * cache. We must also be careful not to overfill it. 1157 */ 1158 while (sks->sks_ref < sks->sks_objs && refill-- > 0 && 1159 ++count) { 1160 ASSERT(skm->skm_avail < skm->skm_size); 1161 ASSERT(count < skm->skm_size); 1162 skm->skm_objs[skm->skm_avail++] = 1163 spl_cache_obj(skc, sks); 1164 } 1165 1166 /* Move slab to skc_complete_list when full */ 1167 if (sks->sks_ref == sks->sks_objs) { 1168 list_del(&sks->sks_list); 1169 list_add(&sks->sks_list, &skc->skc_complete_list); 1170 } 1171 } 1172 1173 spin_unlock(&skc->skc_lock); 1174 out: 1175 return (NULL); 1176 } 1177 1178 /* 1179 * Release an object back to the slab from which it came. 1180 */ 1181 static void 1182 spl_cache_shrink(spl_kmem_cache_t *skc, void *obj) 1183 { 1184 spl_kmem_slab_t *sks = NULL; 1185 spl_kmem_obj_t *sko = NULL; 1186 1187 ASSERT(skc->skc_magic == SKC_MAGIC); 1188 1189 sko = spl_sko_from_obj(skc, obj); 1190 ASSERT(sko->sko_magic == SKO_MAGIC); 1191 sks = sko->sko_slab; 1192 ASSERT(sks->sks_magic == SKS_MAGIC); 1193 ASSERT(sks->sks_cache == skc); 1194 list_add(&sko->sko_list, &sks->sks_free_list); 1195 1196 sks->sks_age = jiffies; 1197 sks->sks_ref--; 1198 skc->skc_obj_alloc--; 1199 1200 /* 1201 * Move slab to skc_partial_list when no longer full. Slabs 1202 * are added to the head to keep the partial list is quasi-full 1203 * sorted order. Fuller at the head, emptier at the tail. 1204 */ 1205 if (sks->sks_ref == (sks->sks_objs - 1)) { 1206 list_del(&sks->sks_list); 1207 list_add(&sks->sks_list, &skc->skc_partial_list); 1208 } 1209 1210 /* 1211 * Move empty slabs to the end of the partial list so 1212 * they can be easily found and freed during reclamation. 1213 */ 1214 if (sks->sks_ref == 0) { 1215 list_del(&sks->sks_list); 1216 list_add_tail(&sks->sks_list, &skc->skc_partial_list); 1217 skc->skc_slab_alloc--; 1218 } 1219 } 1220 1221 /* 1222 * Allocate an object from the per-cpu magazine, or if the magazine 1223 * is empty directly allocate from a slab and repopulate the magazine. 1224 */ 1225 void * 1226 spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags) 1227 { 1228 spl_kmem_magazine_t *skm; 1229 void *obj = NULL; 1230 1231 ASSERT0(flags & ~KM_PUBLIC_MASK); 1232 ASSERT(skc->skc_magic == SKC_MAGIC); 1233 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); 1234 1235 /* 1236 * Allocate directly from a Linux slab. All optimizations are left 1237 * to the underlying cache we only need to guarantee that KM_SLEEP 1238 * callers will never fail. 1239 */ 1240 if (skc->skc_flags & KMC_SLAB) { 1241 struct kmem_cache *slc = skc->skc_linux_cache; 1242 do { 1243 obj = kmem_cache_alloc(slc, kmem_flags_convert(flags)); 1244 } while ((obj == NULL) && !(flags & KM_NOSLEEP)); 1245 1246 if (obj != NULL) { 1247 /* 1248 * Even though we leave everything up to the 1249 * underlying cache we still keep track of 1250 * how many objects we've allocated in it for 1251 * better debuggability. 1252 */ 1253 percpu_counter_inc(&skc->skc_linux_alloc); 1254 } 1255 goto ret; 1256 } 1257 1258 local_irq_disable(); 1259 1260 restart: 1261 /* 1262 * Safe to update per-cpu structure without lock, but 1263 * in the restart case we must be careful to reacquire 1264 * the local magazine since this may have changed 1265 * when we need to grow the cache. 1266 */ 1267 skm = skc->skc_mag[smp_processor_id()]; 1268 ASSERT(skm->skm_magic == SKM_MAGIC); 1269 1270 if (likely(skm->skm_avail)) { 1271 /* Object available in CPU cache, use it */ 1272 obj = skm->skm_objs[--skm->skm_avail]; 1273 } else { 1274 obj = spl_cache_refill(skc, skm, flags); 1275 if ((obj == NULL) && !(flags & KM_NOSLEEP)) 1276 goto restart; 1277 1278 local_irq_enable(); 1279 goto ret; 1280 } 1281 1282 local_irq_enable(); 1283 ASSERT(obj); 1284 ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align)); 1285 1286 ret: 1287 /* Pre-emptively migrate object to CPU L1 cache */ 1288 if (obj) { 1289 if (obj && skc->skc_ctor) 1290 skc->skc_ctor(obj, skc->skc_private, flags); 1291 else 1292 prefetchw(obj); 1293 } 1294 1295 return (obj); 1296 } 1297 EXPORT_SYMBOL(spl_kmem_cache_alloc); 1298 1299 /* 1300 * Free an object back to the local per-cpu magazine, there is no 1301 * guarantee that this is the same magazine the object was originally 1302 * allocated from. We may need to flush entire from the magazine 1303 * back to the slabs to make space. 1304 */ 1305 void 1306 spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj) 1307 { 1308 spl_kmem_magazine_t *skm; 1309 unsigned long flags; 1310 int do_reclaim = 0; 1311 int do_emergency = 0; 1312 1313 ASSERT(skc->skc_magic == SKC_MAGIC); 1314 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); 1315 1316 /* 1317 * Run the destructor 1318 */ 1319 if (skc->skc_dtor) 1320 skc->skc_dtor(obj, skc->skc_private); 1321 1322 /* 1323 * Free the object from the Linux underlying Linux slab. 1324 */ 1325 if (skc->skc_flags & KMC_SLAB) { 1326 kmem_cache_free(skc->skc_linux_cache, obj); 1327 percpu_counter_dec(&skc->skc_linux_alloc); 1328 return; 1329 } 1330 1331 /* 1332 * While a cache has outstanding emergency objects all freed objects 1333 * must be checked. However, since emergency objects will never use 1334 * a virtual address these objects can be safely excluded as an 1335 * optimization. 1336 */ 1337 if (!is_vmalloc_addr(obj)) { 1338 spin_lock(&skc->skc_lock); 1339 do_emergency = (skc->skc_obj_emergency > 0); 1340 spin_unlock(&skc->skc_lock); 1341 1342 if (do_emergency && (spl_emergency_free(skc, obj) == 0)) 1343 return; 1344 } 1345 1346 local_irq_save(flags); 1347 1348 /* 1349 * Safe to update per-cpu structure without lock, but 1350 * no remote memory allocation tracking is being performed 1351 * it is entirely possible to allocate an object from one 1352 * CPU cache and return it to another. 1353 */ 1354 skm = skc->skc_mag[smp_processor_id()]; 1355 ASSERT(skm->skm_magic == SKM_MAGIC); 1356 1357 /* 1358 * Per-CPU cache full, flush it to make space for this object, 1359 * this may result in an empty slab which can be reclaimed once 1360 * interrupts are re-enabled. 1361 */ 1362 if (unlikely(skm->skm_avail >= skm->skm_size)) { 1363 spl_cache_flush(skc, skm, skm->skm_refill); 1364 do_reclaim = 1; 1365 } 1366 1367 /* Available space in cache, use it */ 1368 skm->skm_objs[skm->skm_avail++] = obj; 1369 1370 local_irq_restore(flags); 1371 1372 if (do_reclaim) 1373 spl_slab_reclaim(skc); 1374 } 1375 EXPORT_SYMBOL(spl_kmem_cache_free); 1376 1377 /* 1378 * Depending on how many and which objects are released it may simply 1379 * repopulate the local magazine which will then need to age-out. Objects 1380 * which cannot fit in the magazine will be released back to their slabs 1381 * which will also need to age out before being released. This is all just 1382 * best effort and we do not want to thrash creating and destroying slabs. 1383 */ 1384 void 1385 spl_kmem_cache_reap_now(spl_kmem_cache_t *skc) 1386 { 1387 ASSERT(skc->skc_magic == SKC_MAGIC); 1388 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); 1389 1390 if (skc->skc_flags & KMC_SLAB) 1391 return; 1392 1393 atomic_inc(&skc->skc_ref); 1394 1395 /* 1396 * Prevent concurrent cache reaping when contended. 1397 */ 1398 if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags)) 1399 goto out; 1400 1401 /* Reclaim from the magazine and free all now empty slabs. */ 1402 unsigned long irq_flags; 1403 local_irq_save(irq_flags); 1404 spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()]; 1405 spl_cache_flush(skc, skm, skm->skm_avail); 1406 local_irq_restore(irq_flags); 1407 1408 spl_slab_reclaim(skc); 1409 clear_bit_unlock(KMC_BIT_REAPING, &skc->skc_flags); 1410 smp_mb__after_atomic(); 1411 wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING); 1412 out: 1413 atomic_dec(&skc->skc_ref); 1414 } 1415 EXPORT_SYMBOL(spl_kmem_cache_reap_now); 1416 1417 /* 1418 * This is stubbed out for code consistency with other platforms. There 1419 * is existing logic to prevent concurrent reaping so while this is ugly 1420 * it should do no harm. 1421 */ 1422 int 1423 spl_kmem_cache_reap_active(void) 1424 { 1425 return (0); 1426 } 1427 EXPORT_SYMBOL(spl_kmem_cache_reap_active); 1428 1429 /* 1430 * Reap all free slabs from all registered caches. 1431 */ 1432 void 1433 spl_kmem_reap(void) 1434 { 1435 spl_kmem_cache_t *skc = NULL; 1436 1437 down_read(&spl_kmem_cache_sem); 1438 list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) { 1439 spl_kmem_cache_reap_now(skc); 1440 } 1441 up_read(&spl_kmem_cache_sem); 1442 } 1443 EXPORT_SYMBOL(spl_kmem_reap); 1444 1445 int 1446 spl_kmem_cache_init(void) 1447 { 1448 init_rwsem(&spl_kmem_cache_sem); 1449 INIT_LIST_HEAD(&spl_kmem_cache_list); 1450 spl_kmem_cache_taskq = taskq_create("spl_kmem_cache", 1451 spl_kmem_cache_kmem_threads, maxclsyspri, 1452 spl_kmem_cache_kmem_threads * 8, INT_MAX, 1453 TASKQ_PREPOPULATE | TASKQ_DYNAMIC); 1454 1455 if (spl_kmem_cache_taskq == NULL) 1456 return (-ENOMEM); 1457 1458 return (0); 1459 } 1460 1461 void 1462 spl_kmem_cache_fini(void) 1463 { 1464 taskq_destroy(spl_kmem_cache_taskq); 1465 } 1466