1 /* 2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. 3 * Copyright (C) 2007 The Regents of the University of California. 4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). 5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>. 6 * UCRL-CODE-235197 7 * 8 * This file is part of the SPL, Solaris Porting Layer. 9 * 10 * The SPL is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License as published by the 12 * Free Software Foundation; either version 2 of the License, or (at your 13 * option) any later version. 14 * 15 * The SPL is distributed in the hope that it will be useful, but WITHOUT 16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 18 * for more details. 19 * 20 * You should have received a copy of the GNU General Public License along 21 * with the SPL. If not, see <http://www.gnu.org/licenses/>. 22 */ 23 24 #include <linux/percpu_compat.h> 25 #include <sys/kmem.h> 26 #include <sys/kmem_cache.h> 27 #include <sys/taskq.h> 28 #include <sys/timer.h> 29 #include <sys/vmem.h> 30 #include <sys/wait.h> 31 #include <linux/slab.h> 32 #include <linux/swap.h> 33 #include <linux/prefetch.h> 34 35 /* 36 * Within the scope of spl-kmem.c file the kmem_cache_* definitions 37 * are removed to allow access to the real Linux slab allocator. 38 */ 39 #undef kmem_cache_destroy 40 #undef kmem_cache_create 41 #undef kmem_cache_alloc 42 #undef kmem_cache_free 43 44 45 /* 46 * Linux 3.16 replaced smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}() 47 * with smp_mb__{before,after}_atomic() because they were redundant. This is 48 * only used inside our SLAB allocator, so we implement an internal wrapper 49 * here to give us smp_mb__{before,after}_atomic() on older kernels. 50 */ 51 #ifndef smp_mb__before_atomic 52 #define smp_mb__before_atomic(x) smp_mb__before_clear_bit(x) 53 #endif 54 55 #ifndef smp_mb__after_atomic 56 #define smp_mb__after_atomic(x) smp_mb__after_clear_bit(x) 57 #endif 58 59 /* BEGIN CSTYLED */ 60 61 /* 62 * Cache magazines are an optimization designed to minimize the cost of 63 * allocating memory. They do this by keeping a per-cpu cache of recently 64 * freed objects, which can then be reallocated without taking a lock. This 65 * can improve performance on highly contended caches. However, because 66 * objects in magazines will prevent otherwise empty slabs from being 67 * immediately released this may not be ideal for low memory machines. 68 * 69 * For this reason spl_kmem_cache_magazine_size can be used to set a maximum 70 * magazine size. When this value is set to 0 the magazine size will be 71 * automatically determined based on the object size. Otherwise magazines 72 * will be limited to 2-256 objects per magazine (i.e per cpu). Magazines 73 * may never be entirely disabled in this implementation. 74 */ 75 unsigned int spl_kmem_cache_magazine_size = 0; 76 module_param(spl_kmem_cache_magazine_size, uint, 0444); 77 MODULE_PARM_DESC(spl_kmem_cache_magazine_size, 78 "Default magazine size (2-256), set automatically (0)"); 79 80 /* 81 * The default behavior is to report the number of objects remaining in the 82 * cache. This allows the Linux VM to repeatedly reclaim objects from the 83 * cache when memory is low satisfy other memory allocations. Alternately, 84 * setting this value to KMC_RECLAIM_ONCE limits how aggressively the cache 85 * is reclaimed. This may increase the likelihood of out of memory events. 86 */ 87 unsigned int spl_kmem_cache_reclaim = 0 /* KMC_RECLAIM_ONCE */; 88 module_param(spl_kmem_cache_reclaim, uint, 0644); 89 MODULE_PARM_DESC(spl_kmem_cache_reclaim, "Single reclaim pass (0x1)"); 90 91 unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB; 92 module_param(spl_kmem_cache_obj_per_slab, uint, 0644); 93 MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab"); 94 95 unsigned int spl_kmem_cache_max_size = SPL_KMEM_CACHE_MAX_SIZE; 96 module_param(spl_kmem_cache_max_size, uint, 0644); 97 MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB"); 98 99 /* 100 * For small objects the Linux slab allocator should be used to make the most 101 * efficient use of the memory. However, large objects are not supported by 102 * the Linux slab and therefore the SPL implementation is preferred. A cutoff 103 * of 16K was determined to be optimal for architectures using 4K pages and 104 * to also work well on architecutres using larger 64K page sizes. 105 */ 106 unsigned int spl_kmem_cache_slab_limit = 16384; 107 module_param(spl_kmem_cache_slab_limit, uint, 0644); 108 MODULE_PARM_DESC(spl_kmem_cache_slab_limit, 109 "Objects less than N bytes use the Linux slab"); 110 111 /* 112 * The number of threads available to allocate new slabs for caches. This 113 * should not need to be tuned but it is available for performance analysis. 114 */ 115 unsigned int spl_kmem_cache_kmem_threads = 4; 116 module_param(spl_kmem_cache_kmem_threads, uint, 0444); 117 MODULE_PARM_DESC(spl_kmem_cache_kmem_threads, 118 "Number of spl_kmem_cache threads"); 119 /* END CSTYLED */ 120 121 /* 122 * Slab allocation interfaces 123 * 124 * While the Linux slab implementation was inspired by the Solaris 125 * implementation I cannot use it to emulate the Solaris APIs. I 126 * require two features which are not provided by the Linux slab. 127 * 128 * 1) Constructors AND destructors. Recent versions of the Linux 129 * kernel have removed support for destructors. This is a deal 130 * breaker for the SPL which contains particularly expensive 131 * initializers for mutex's, condition variables, etc. We also 132 * require a minimal level of cleanup for these data types unlike 133 * many Linux data types which do need to be explicitly destroyed. 134 * 135 * 2) Virtual address space backed slab. Callers of the Solaris slab 136 * expect it to work well for both small are very large allocations. 137 * Because of memory fragmentation the Linux slab which is backed 138 * by kmalloc'ed memory performs very badly when confronted with 139 * large numbers of large allocations. Basing the slab on the 140 * virtual address space removes the need for contiguous pages 141 * and greatly improve performance for large allocations. 142 * 143 * For these reasons, the SPL has its own slab implementation with 144 * the needed features. It is not as highly optimized as either the 145 * Solaris or Linux slabs, but it should get me most of what is 146 * needed until it can be optimized or obsoleted by another approach. 147 * 148 * One serious concern I do have about this method is the relatively 149 * small virtual address space on 32bit arches. This will seriously 150 * constrain the size of the slab caches and their performance. 151 */ 152 153 struct list_head spl_kmem_cache_list; /* List of caches */ 154 struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */ 155 taskq_t *spl_kmem_cache_taskq; /* Task queue for aging / reclaim */ 156 157 static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj); 158 159 static void * 160 kv_alloc(spl_kmem_cache_t *skc, int size, int flags) 161 { 162 gfp_t lflags = kmem_flags_convert(flags); 163 void *ptr; 164 165 ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM); 166 167 /* Resulting allocated memory will be page aligned */ 168 ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE)); 169 170 return (ptr); 171 } 172 173 static void 174 kv_free(spl_kmem_cache_t *skc, void *ptr, int size) 175 { 176 ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE)); 177 178 /* 179 * The Linux direct reclaim path uses this out of band value to 180 * determine if forward progress is being made. Normally this is 181 * incremented by kmem_freepages() which is part of the various 182 * Linux slab implementations. However, since we are using none 183 * of that infrastructure we are responsible for incrementing it. 184 */ 185 if (current->reclaim_state) 186 current->reclaim_state->reclaimed_slab += size >> PAGE_SHIFT; 187 188 vfree(ptr); 189 } 190 191 /* 192 * Required space for each aligned sks. 193 */ 194 static inline uint32_t 195 spl_sks_size(spl_kmem_cache_t *skc) 196 { 197 return (P2ROUNDUP_TYPED(sizeof (spl_kmem_slab_t), 198 skc->skc_obj_align, uint32_t)); 199 } 200 201 /* 202 * Required space for each aligned object. 203 */ 204 static inline uint32_t 205 spl_obj_size(spl_kmem_cache_t *skc) 206 { 207 uint32_t align = skc->skc_obj_align; 208 209 return (P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) + 210 P2ROUNDUP_TYPED(sizeof (spl_kmem_obj_t), align, uint32_t)); 211 } 212 213 uint64_t 214 spl_kmem_cache_inuse(kmem_cache_t *cache) 215 { 216 return (cache->skc_obj_total); 217 } 218 EXPORT_SYMBOL(spl_kmem_cache_inuse); 219 220 uint64_t 221 spl_kmem_cache_entry_size(kmem_cache_t *cache) 222 { 223 return (cache->skc_obj_size); 224 } 225 EXPORT_SYMBOL(spl_kmem_cache_entry_size); 226 227 /* 228 * Lookup the spl_kmem_object_t for an object given that object. 229 */ 230 static inline spl_kmem_obj_t * 231 spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj) 232 { 233 return (obj + P2ROUNDUP_TYPED(skc->skc_obj_size, 234 skc->skc_obj_align, uint32_t)); 235 } 236 237 /* 238 * It's important that we pack the spl_kmem_obj_t structure and the 239 * actual objects in to one large address space to minimize the number 240 * of calls to the allocator. It is far better to do a few large 241 * allocations and then subdivide it ourselves. Now which allocator 242 * we use requires balancing a few trade offs. 243 * 244 * For small objects we use kmem_alloc() because as long as you are 245 * only requesting a small number of pages (ideally just one) its cheap. 246 * However, when you start requesting multiple pages with kmem_alloc() 247 * it gets increasingly expensive since it requires contiguous pages. 248 * For this reason we shift to vmem_alloc() for slabs of large objects 249 * which removes the need for contiguous pages. We do not use 250 * vmem_alloc() in all cases because there is significant locking 251 * overhead in __get_vm_area_node(). This function takes a single 252 * global lock when acquiring an available virtual address range which 253 * serializes all vmem_alloc()'s for all slab caches. Using slightly 254 * different allocation functions for small and large objects should 255 * give us the best of both worlds. 256 * 257 * +------------------------+ 258 * | spl_kmem_slab_t --+-+ | 259 * | skc_obj_size <-+ | | 260 * | spl_kmem_obj_t | | 261 * | skc_obj_size <---+ | 262 * | spl_kmem_obj_t | | 263 * | ... v | 264 * +------------------------+ 265 */ 266 static spl_kmem_slab_t * 267 spl_slab_alloc(spl_kmem_cache_t *skc, int flags) 268 { 269 spl_kmem_slab_t *sks; 270 void *base; 271 uint32_t obj_size; 272 273 base = kv_alloc(skc, skc->skc_slab_size, flags); 274 if (base == NULL) 275 return (NULL); 276 277 sks = (spl_kmem_slab_t *)base; 278 sks->sks_magic = SKS_MAGIC; 279 sks->sks_objs = skc->skc_slab_objs; 280 sks->sks_age = jiffies; 281 sks->sks_cache = skc; 282 INIT_LIST_HEAD(&sks->sks_list); 283 INIT_LIST_HEAD(&sks->sks_free_list); 284 sks->sks_ref = 0; 285 obj_size = spl_obj_size(skc); 286 287 for (int i = 0; i < sks->sks_objs; i++) { 288 void *obj = base + spl_sks_size(skc) + (i * obj_size); 289 290 ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align)); 291 spl_kmem_obj_t *sko = spl_sko_from_obj(skc, obj); 292 sko->sko_addr = obj; 293 sko->sko_magic = SKO_MAGIC; 294 sko->sko_slab = sks; 295 INIT_LIST_HEAD(&sko->sko_list); 296 list_add_tail(&sko->sko_list, &sks->sks_free_list); 297 } 298 299 return (sks); 300 } 301 302 /* 303 * Remove a slab from complete or partial list, it must be called with 304 * the 'skc->skc_lock' held but the actual free must be performed 305 * outside the lock to prevent deadlocking on vmem addresses. 306 */ 307 static void 308 spl_slab_free(spl_kmem_slab_t *sks, 309 struct list_head *sks_list, struct list_head *sko_list) 310 { 311 spl_kmem_cache_t *skc; 312 313 ASSERT(sks->sks_magic == SKS_MAGIC); 314 ASSERT(sks->sks_ref == 0); 315 316 skc = sks->sks_cache; 317 ASSERT(skc->skc_magic == SKC_MAGIC); 318 319 /* 320 * Update slab/objects counters in the cache, then remove the 321 * slab from the skc->skc_partial_list. Finally add the slab 322 * and all its objects in to the private work lists where the 323 * destructors will be called and the memory freed to the system. 324 */ 325 skc->skc_obj_total -= sks->sks_objs; 326 skc->skc_slab_total--; 327 list_del(&sks->sks_list); 328 list_add(&sks->sks_list, sks_list); 329 list_splice_init(&sks->sks_free_list, sko_list); 330 } 331 332 /* 333 * Reclaim empty slabs at the end of the partial list. 334 */ 335 static void 336 spl_slab_reclaim(spl_kmem_cache_t *skc) 337 { 338 spl_kmem_slab_t *sks = NULL, *m = NULL; 339 spl_kmem_obj_t *sko = NULL, *n = NULL; 340 LIST_HEAD(sks_list); 341 LIST_HEAD(sko_list); 342 343 /* 344 * Empty slabs and objects must be moved to a private list so they 345 * can be safely freed outside the spin lock. All empty slabs are 346 * at the end of skc->skc_partial_list, therefore once a non-empty 347 * slab is found we can stop scanning. 348 */ 349 spin_lock(&skc->skc_lock); 350 list_for_each_entry_safe_reverse(sks, m, 351 &skc->skc_partial_list, sks_list) { 352 353 if (sks->sks_ref > 0) 354 break; 355 356 spl_slab_free(sks, &sks_list, &sko_list); 357 } 358 spin_unlock(&skc->skc_lock); 359 360 /* 361 * The following two loops ensure all the object destructors are run, 362 * and the slabs themselves are freed. This is all done outside the 363 * skc->skc_lock since this allows the destructor to sleep, and 364 * allows us to perform a conditional reschedule when a freeing a 365 * large number of objects and slabs back to the system. 366 */ 367 368 list_for_each_entry_safe(sko, n, &sko_list, sko_list) { 369 ASSERT(sko->sko_magic == SKO_MAGIC); 370 } 371 372 list_for_each_entry_safe(sks, m, &sks_list, sks_list) { 373 ASSERT(sks->sks_magic == SKS_MAGIC); 374 kv_free(skc, sks, skc->skc_slab_size); 375 } 376 } 377 378 static spl_kmem_emergency_t * 379 spl_emergency_search(struct rb_root *root, void *obj) 380 { 381 struct rb_node *node = root->rb_node; 382 spl_kmem_emergency_t *ske; 383 unsigned long address = (unsigned long)obj; 384 385 while (node) { 386 ske = container_of(node, spl_kmem_emergency_t, ske_node); 387 388 if (address < ske->ske_obj) 389 node = node->rb_left; 390 else if (address > ske->ske_obj) 391 node = node->rb_right; 392 else 393 return (ske); 394 } 395 396 return (NULL); 397 } 398 399 static int 400 spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske) 401 { 402 struct rb_node **new = &(root->rb_node), *parent = NULL; 403 spl_kmem_emergency_t *ske_tmp; 404 unsigned long address = ske->ske_obj; 405 406 while (*new) { 407 ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node); 408 409 parent = *new; 410 if (address < ske_tmp->ske_obj) 411 new = &((*new)->rb_left); 412 else if (address > ske_tmp->ske_obj) 413 new = &((*new)->rb_right); 414 else 415 return (0); 416 } 417 418 rb_link_node(&ske->ske_node, parent, new); 419 rb_insert_color(&ske->ske_node, root); 420 421 return (1); 422 } 423 424 /* 425 * Allocate a single emergency object and track it in a red black tree. 426 */ 427 static int 428 spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj) 429 { 430 gfp_t lflags = kmem_flags_convert(flags); 431 spl_kmem_emergency_t *ske; 432 int order = get_order(skc->skc_obj_size); 433 int empty; 434 435 /* Last chance use a partial slab if one now exists */ 436 spin_lock(&skc->skc_lock); 437 empty = list_empty(&skc->skc_partial_list); 438 spin_unlock(&skc->skc_lock); 439 if (!empty) 440 return (-EEXIST); 441 442 ske = kmalloc(sizeof (*ske), lflags); 443 if (ske == NULL) 444 return (-ENOMEM); 445 446 ske->ske_obj = __get_free_pages(lflags, order); 447 if (ske->ske_obj == 0) { 448 kfree(ske); 449 return (-ENOMEM); 450 } 451 452 spin_lock(&skc->skc_lock); 453 empty = spl_emergency_insert(&skc->skc_emergency_tree, ske); 454 if (likely(empty)) { 455 skc->skc_obj_total++; 456 skc->skc_obj_emergency++; 457 if (skc->skc_obj_emergency > skc->skc_obj_emergency_max) 458 skc->skc_obj_emergency_max = skc->skc_obj_emergency; 459 } 460 spin_unlock(&skc->skc_lock); 461 462 if (unlikely(!empty)) { 463 free_pages(ske->ske_obj, order); 464 kfree(ske); 465 return (-EINVAL); 466 } 467 468 *obj = (void *)ske->ske_obj; 469 470 return (0); 471 } 472 473 /* 474 * Locate the passed object in the red black tree and free it. 475 */ 476 static int 477 spl_emergency_free(spl_kmem_cache_t *skc, void *obj) 478 { 479 spl_kmem_emergency_t *ske; 480 int order = get_order(skc->skc_obj_size); 481 482 spin_lock(&skc->skc_lock); 483 ske = spl_emergency_search(&skc->skc_emergency_tree, obj); 484 if (ske) { 485 rb_erase(&ske->ske_node, &skc->skc_emergency_tree); 486 skc->skc_obj_emergency--; 487 skc->skc_obj_total--; 488 } 489 spin_unlock(&skc->skc_lock); 490 491 if (ske == NULL) 492 return (-ENOENT); 493 494 free_pages(ske->ske_obj, order); 495 kfree(ske); 496 497 return (0); 498 } 499 500 /* 501 * Release objects from the per-cpu magazine back to their slab. The flush 502 * argument contains the max number of entries to remove from the magazine. 503 */ 504 static void 505 spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush) 506 { 507 spin_lock(&skc->skc_lock); 508 509 ASSERT(skc->skc_magic == SKC_MAGIC); 510 ASSERT(skm->skm_magic == SKM_MAGIC); 511 512 int count = MIN(flush, skm->skm_avail); 513 for (int i = 0; i < count; i++) 514 spl_cache_shrink(skc, skm->skm_objs[i]); 515 516 skm->skm_avail -= count; 517 memmove(skm->skm_objs, &(skm->skm_objs[count]), 518 sizeof (void *) * skm->skm_avail); 519 520 spin_unlock(&skc->skc_lock); 521 } 522 523 /* 524 * Size a slab based on the size of each aligned object plus spl_kmem_obj_t. 525 * When on-slab we want to target spl_kmem_cache_obj_per_slab. However, 526 * for very small objects we may end up with more than this so as not 527 * to waste space in the minimal allocation of a single page. 528 */ 529 static int 530 spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size) 531 { 532 uint32_t sks_size, obj_size, max_size, tgt_size, tgt_objs; 533 534 sks_size = spl_sks_size(skc); 535 obj_size = spl_obj_size(skc); 536 max_size = (spl_kmem_cache_max_size * 1024 * 1024); 537 tgt_size = (spl_kmem_cache_obj_per_slab * obj_size + sks_size); 538 539 if (tgt_size <= max_size) { 540 tgt_objs = (tgt_size - sks_size) / obj_size; 541 } else { 542 tgt_objs = (max_size - sks_size) / obj_size; 543 tgt_size = (tgt_objs * obj_size) + sks_size; 544 } 545 546 if (tgt_objs == 0) 547 return (-ENOSPC); 548 549 *objs = tgt_objs; 550 *size = tgt_size; 551 552 return (0); 553 } 554 555 /* 556 * Make a guess at reasonable per-cpu magazine size based on the size of 557 * each object and the cost of caching N of them in each magazine. Long 558 * term this should really adapt based on an observed usage heuristic. 559 */ 560 static int 561 spl_magazine_size(spl_kmem_cache_t *skc) 562 { 563 uint32_t obj_size = spl_obj_size(skc); 564 int size; 565 566 if (spl_kmem_cache_magazine_size > 0) 567 return (MAX(MIN(spl_kmem_cache_magazine_size, 256), 2)); 568 569 /* Per-magazine sizes below assume a 4Kib page size */ 570 if (obj_size > (PAGE_SIZE * 256)) 571 size = 4; /* Minimum 4Mib per-magazine */ 572 else if (obj_size > (PAGE_SIZE * 32)) 573 size = 16; /* Minimum 2Mib per-magazine */ 574 else if (obj_size > (PAGE_SIZE)) 575 size = 64; /* Minimum 256Kib per-magazine */ 576 else if (obj_size > (PAGE_SIZE / 4)) 577 size = 128; /* Minimum 128Kib per-magazine */ 578 else 579 size = 256; 580 581 return (size); 582 } 583 584 /* 585 * Allocate a per-cpu magazine to associate with a specific core. 586 */ 587 static spl_kmem_magazine_t * 588 spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu) 589 { 590 spl_kmem_magazine_t *skm; 591 int size = sizeof (spl_kmem_magazine_t) + 592 sizeof (void *) * skc->skc_mag_size; 593 594 skm = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu)); 595 if (skm) { 596 skm->skm_magic = SKM_MAGIC; 597 skm->skm_avail = 0; 598 skm->skm_size = skc->skc_mag_size; 599 skm->skm_refill = skc->skc_mag_refill; 600 skm->skm_cache = skc; 601 skm->skm_cpu = cpu; 602 } 603 604 return (skm); 605 } 606 607 /* 608 * Free a per-cpu magazine associated with a specific core. 609 */ 610 static void 611 spl_magazine_free(spl_kmem_magazine_t *skm) 612 { 613 ASSERT(skm->skm_magic == SKM_MAGIC); 614 ASSERT(skm->skm_avail == 0); 615 kfree(skm); 616 } 617 618 /* 619 * Create all pre-cpu magazines of reasonable sizes. 620 */ 621 static int 622 spl_magazine_create(spl_kmem_cache_t *skc) 623 { 624 int i = 0; 625 626 ASSERT((skc->skc_flags & KMC_SLAB) == 0); 627 628 skc->skc_mag = kzalloc(sizeof (spl_kmem_magazine_t *) * 629 num_possible_cpus(), kmem_flags_convert(KM_SLEEP)); 630 skc->skc_mag_size = spl_magazine_size(skc); 631 skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2; 632 633 for_each_possible_cpu(i) { 634 skc->skc_mag[i] = spl_magazine_alloc(skc, i); 635 if (!skc->skc_mag[i]) { 636 for (i--; i >= 0; i--) 637 spl_magazine_free(skc->skc_mag[i]); 638 639 kfree(skc->skc_mag); 640 return (-ENOMEM); 641 } 642 } 643 644 return (0); 645 } 646 647 /* 648 * Destroy all pre-cpu magazines. 649 */ 650 static void 651 spl_magazine_destroy(spl_kmem_cache_t *skc) 652 { 653 spl_kmem_magazine_t *skm; 654 int i = 0; 655 656 ASSERT((skc->skc_flags & KMC_SLAB) == 0); 657 658 for_each_possible_cpu(i) { 659 skm = skc->skc_mag[i]; 660 spl_cache_flush(skc, skm, skm->skm_avail); 661 spl_magazine_free(skm); 662 } 663 664 kfree(skc->skc_mag); 665 } 666 667 /* 668 * Create a object cache based on the following arguments: 669 * name cache name 670 * size cache object size 671 * align cache object alignment 672 * ctor cache object constructor 673 * dtor cache object destructor 674 * reclaim cache object reclaim 675 * priv cache private data for ctor/dtor/reclaim 676 * vmp unused must be NULL 677 * flags 678 * KMC_KVMEM Force kvmem backed SPL cache 679 * KMC_SLAB Force Linux slab backed cache 680 * KMC_NODEBUG Disable debugging (unsupported) 681 */ 682 spl_kmem_cache_t * 683 spl_kmem_cache_create(char *name, size_t size, size_t align, 684 spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, void *reclaim, 685 void *priv, void *vmp, int flags) 686 { 687 gfp_t lflags = kmem_flags_convert(KM_SLEEP); 688 spl_kmem_cache_t *skc; 689 int rc; 690 691 /* 692 * Unsupported flags 693 */ 694 ASSERT(vmp == NULL); 695 ASSERT(reclaim == NULL); 696 697 might_sleep(); 698 699 skc = kzalloc(sizeof (*skc), lflags); 700 if (skc == NULL) 701 return (NULL); 702 703 skc->skc_magic = SKC_MAGIC; 704 skc->skc_name_size = strlen(name) + 1; 705 skc->skc_name = (char *)kmalloc(skc->skc_name_size, lflags); 706 if (skc->skc_name == NULL) { 707 kfree(skc); 708 return (NULL); 709 } 710 strncpy(skc->skc_name, name, skc->skc_name_size); 711 712 skc->skc_ctor = ctor; 713 skc->skc_dtor = dtor; 714 skc->skc_private = priv; 715 skc->skc_vmp = vmp; 716 skc->skc_linux_cache = NULL; 717 skc->skc_flags = flags; 718 skc->skc_obj_size = size; 719 skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN; 720 atomic_set(&skc->skc_ref, 0); 721 722 INIT_LIST_HEAD(&skc->skc_list); 723 INIT_LIST_HEAD(&skc->skc_complete_list); 724 INIT_LIST_HEAD(&skc->skc_partial_list); 725 skc->skc_emergency_tree = RB_ROOT; 726 spin_lock_init(&skc->skc_lock); 727 init_waitqueue_head(&skc->skc_waitq); 728 skc->skc_slab_fail = 0; 729 skc->skc_slab_create = 0; 730 skc->skc_slab_destroy = 0; 731 skc->skc_slab_total = 0; 732 skc->skc_slab_alloc = 0; 733 skc->skc_slab_max = 0; 734 skc->skc_obj_total = 0; 735 skc->skc_obj_alloc = 0; 736 skc->skc_obj_max = 0; 737 skc->skc_obj_deadlock = 0; 738 skc->skc_obj_emergency = 0; 739 skc->skc_obj_emergency_max = 0; 740 741 rc = percpu_counter_init_common(&skc->skc_linux_alloc, 0, 742 GFP_KERNEL); 743 if (rc != 0) { 744 kfree(skc); 745 return (NULL); 746 } 747 748 /* 749 * Verify the requested alignment restriction is sane. 750 */ 751 if (align) { 752 VERIFY(ISP2(align)); 753 VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN); 754 VERIFY3U(align, <=, PAGE_SIZE); 755 skc->skc_obj_align = align; 756 } 757 758 /* 759 * When no specific type of slab is requested (kmem, vmem, or 760 * linuxslab) then select a cache type based on the object size 761 * and default tunables. 762 */ 763 if (!(skc->skc_flags & (KMC_SLAB | KMC_KVMEM))) { 764 if (spl_kmem_cache_slab_limit && 765 size <= (size_t)spl_kmem_cache_slab_limit) { 766 /* 767 * Objects smaller than spl_kmem_cache_slab_limit can 768 * use the Linux slab for better space-efficiency. 769 */ 770 skc->skc_flags |= KMC_SLAB; 771 } else { 772 /* 773 * All other objects are considered large and are 774 * placed on kvmem backed slabs. 775 */ 776 skc->skc_flags |= KMC_KVMEM; 777 } 778 } 779 780 /* 781 * Given the type of slab allocate the required resources. 782 */ 783 if (skc->skc_flags & KMC_KVMEM) { 784 rc = spl_slab_size(skc, 785 &skc->skc_slab_objs, &skc->skc_slab_size); 786 if (rc) 787 goto out; 788 789 rc = spl_magazine_create(skc); 790 if (rc) 791 goto out; 792 } else { 793 unsigned long slabflags = 0; 794 795 if (size > (SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE)) { 796 rc = EINVAL; 797 goto out; 798 } 799 800 #if defined(SLAB_USERCOPY) 801 /* 802 * Required for PAX-enabled kernels if the slab is to be 803 * used for copying between user and kernel space. 804 */ 805 slabflags |= SLAB_USERCOPY; 806 #endif 807 808 #if defined(HAVE_KMEM_CACHE_CREATE_USERCOPY) 809 /* 810 * Newer grsec patchset uses kmem_cache_create_usercopy() 811 * instead of SLAB_USERCOPY flag 812 */ 813 skc->skc_linux_cache = kmem_cache_create_usercopy( 814 skc->skc_name, size, align, slabflags, 0, size, NULL); 815 #else 816 skc->skc_linux_cache = kmem_cache_create( 817 skc->skc_name, size, align, slabflags, NULL); 818 #endif 819 if (skc->skc_linux_cache == NULL) { 820 rc = ENOMEM; 821 goto out; 822 } 823 } 824 825 down_write(&spl_kmem_cache_sem); 826 list_add_tail(&skc->skc_list, &spl_kmem_cache_list); 827 up_write(&spl_kmem_cache_sem); 828 829 return (skc); 830 out: 831 kfree(skc->skc_name); 832 percpu_counter_destroy(&skc->skc_linux_alloc); 833 kfree(skc); 834 return (NULL); 835 } 836 EXPORT_SYMBOL(spl_kmem_cache_create); 837 838 /* 839 * Register a move callback for cache defragmentation. 840 * XXX: Unimplemented but harmless to stub out for now. 841 */ 842 void 843 spl_kmem_cache_set_move(spl_kmem_cache_t *skc, 844 kmem_cbrc_t (move)(void *, void *, size_t, void *)) 845 { 846 ASSERT(move != NULL); 847 } 848 EXPORT_SYMBOL(spl_kmem_cache_set_move); 849 850 /* 851 * Destroy a cache and all objects associated with the cache. 852 */ 853 void 854 spl_kmem_cache_destroy(spl_kmem_cache_t *skc) 855 { 856 DECLARE_WAIT_QUEUE_HEAD(wq); 857 taskqid_t id; 858 859 ASSERT(skc->skc_magic == SKC_MAGIC); 860 ASSERT(skc->skc_flags & (KMC_KVMEM | KMC_SLAB)); 861 862 down_write(&spl_kmem_cache_sem); 863 list_del_init(&skc->skc_list); 864 up_write(&spl_kmem_cache_sem); 865 866 /* Cancel any and wait for any pending delayed tasks */ 867 VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags)); 868 869 spin_lock(&skc->skc_lock); 870 id = skc->skc_taskqid; 871 spin_unlock(&skc->skc_lock); 872 873 taskq_cancel_id(spl_kmem_cache_taskq, id); 874 875 /* 876 * Wait until all current callers complete, this is mainly 877 * to catch the case where a low memory situation triggers a 878 * cache reaping action which races with this destroy. 879 */ 880 wait_event(wq, atomic_read(&skc->skc_ref) == 0); 881 882 if (skc->skc_flags & KMC_KVMEM) { 883 spl_magazine_destroy(skc); 884 spl_slab_reclaim(skc); 885 } else { 886 ASSERT(skc->skc_flags & KMC_SLAB); 887 kmem_cache_destroy(skc->skc_linux_cache); 888 } 889 890 spin_lock(&skc->skc_lock); 891 892 /* 893 * Validate there are no objects in use and free all the 894 * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers. 895 */ 896 ASSERT3U(skc->skc_slab_alloc, ==, 0); 897 ASSERT3U(skc->skc_obj_alloc, ==, 0); 898 ASSERT3U(skc->skc_slab_total, ==, 0); 899 ASSERT3U(skc->skc_obj_total, ==, 0); 900 ASSERT3U(skc->skc_obj_emergency, ==, 0); 901 ASSERT(list_empty(&skc->skc_complete_list)); 902 903 ASSERT3U(percpu_counter_sum(&skc->skc_linux_alloc), ==, 0); 904 percpu_counter_destroy(&skc->skc_linux_alloc); 905 906 spin_unlock(&skc->skc_lock); 907 908 kfree(skc->skc_name); 909 kfree(skc); 910 } 911 EXPORT_SYMBOL(spl_kmem_cache_destroy); 912 913 /* 914 * Allocate an object from a slab attached to the cache. This is used to 915 * repopulate the per-cpu magazine caches in batches when they run low. 916 */ 917 static void * 918 spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks) 919 { 920 spl_kmem_obj_t *sko; 921 922 ASSERT(skc->skc_magic == SKC_MAGIC); 923 ASSERT(sks->sks_magic == SKS_MAGIC); 924 925 sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list); 926 ASSERT(sko->sko_magic == SKO_MAGIC); 927 ASSERT(sko->sko_addr != NULL); 928 929 /* Remove from sks_free_list */ 930 list_del_init(&sko->sko_list); 931 932 sks->sks_age = jiffies; 933 sks->sks_ref++; 934 skc->skc_obj_alloc++; 935 936 /* Track max obj usage statistics */ 937 if (skc->skc_obj_alloc > skc->skc_obj_max) 938 skc->skc_obj_max = skc->skc_obj_alloc; 939 940 /* Track max slab usage statistics */ 941 if (sks->sks_ref == 1) { 942 skc->skc_slab_alloc++; 943 944 if (skc->skc_slab_alloc > skc->skc_slab_max) 945 skc->skc_slab_max = skc->skc_slab_alloc; 946 } 947 948 return (sko->sko_addr); 949 } 950 951 /* 952 * Generic slab allocation function to run by the global work queues. 953 * It is responsible for allocating a new slab, linking it in to the list 954 * of partial slabs, and then waking any waiters. 955 */ 956 static int 957 __spl_cache_grow(spl_kmem_cache_t *skc, int flags) 958 { 959 spl_kmem_slab_t *sks; 960 961 fstrans_cookie_t cookie = spl_fstrans_mark(); 962 sks = spl_slab_alloc(skc, flags); 963 spl_fstrans_unmark(cookie); 964 965 spin_lock(&skc->skc_lock); 966 if (sks) { 967 skc->skc_slab_total++; 968 skc->skc_obj_total += sks->sks_objs; 969 list_add_tail(&sks->sks_list, &skc->skc_partial_list); 970 971 smp_mb__before_atomic(); 972 clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags); 973 smp_mb__after_atomic(); 974 } 975 spin_unlock(&skc->skc_lock); 976 977 return (sks == NULL ? -ENOMEM : 0); 978 } 979 980 static void 981 spl_cache_grow_work(void *data) 982 { 983 spl_kmem_alloc_t *ska = (spl_kmem_alloc_t *)data; 984 spl_kmem_cache_t *skc = ska->ska_cache; 985 986 int error = __spl_cache_grow(skc, ska->ska_flags); 987 988 atomic_dec(&skc->skc_ref); 989 smp_mb__before_atomic(); 990 clear_bit(KMC_BIT_GROWING, &skc->skc_flags); 991 smp_mb__after_atomic(); 992 if (error == 0) 993 wake_up_all(&skc->skc_waitq); 994 995 kfree(ska); 996 } 997 998 /* 999 * Returns non-zero when a new slab should be available. 1000 */ 1001 static int 1002 spl_cache_grow_wait(spl_kmem_cache_t *skc) 1003 { 1004 return (!test_bit(KMC_BIT_GROWING, &skc->skc_flags)); 1005 } 1006 1007 /* 1008 * No available objects on any slabs, create a new slab. Note that this 1009 * functionality is disabled for KMC_SLAB caches which are backed by the 1010 * Linux slab. 1011 */ 1012 static int 1013 spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj) 1014 { 1015 int remaining, rc = 0; 1016 1017 ASSERT0(flags & ~KM_PUBLIC_MASK); 1018 ASSERT(skc->skc_magic == SKC_MAGIC); 1019 ASSERT((skc->skc_flags & KMC_SLAB) == 0); 1020 might_sleep(); 1021 *obj = NULL; 1022 1023 /* 1024 * Before allocating a new slab wait for any reaping to complete and 1025 * then return so the local magazine can be rechecked for new objects. 1026 */ 1027 if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) { 1028 rc = spl_wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING, 1029 TASK_UNINTERRUPTIBLE); 1030 return (rc ? rc : -EAGAIN); 1031 } 1032 1033 /* 1034 * Note: It would be nice to reduce the overhead of context switch 1035 * and improve NUMA locality, by trying to allocate a new slab in the 1036 * current process context with KM_NOSLEEP flag. 1037 * 1038 * However, this can't be applied to vmem/kvmem due to a bug that 1039 * spl_vmalloc() doesn't honor gfp flags in page table allocation. 1040 */ 1041 1042 /* 1043 * This is handled by dispatching a work request to the global work 1044 * queue. This allows us to asynchronously allocate a new slab while 1045 * retaining the ability to safely fall back to a smaller synchronous 1046 * allocations to ensure forward progress is always maintained. 1047 */ 1048 if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) { 1049 spl_kmem_alloc_t *ska; 1050 1051 ska = kmalloc(sizeof (*ska), kmem_flags_convert(flags)); 1052 if (ska == NULL) { 1053 clear_bit_unlock(KMC_BIT_GROWING, &skc->skc_flags); 1054 smp_mb__after_atomic(); 1055 wake_up_all(&skc->skc_waitq); 1056 return (-ENOMEM); 1057 } 1058 1059 atomic_inc(&skc->skc_ref); 1060 ska->ska_cache = skc; 1061 ska->ska_flags = flags; 1062 taskq_init_ent(&ska->ska_tqe); 1063 taskq_dispatch_ent(spl_kmem_cache_taskq, 1064 spl_cache_grow_work, ska, 0, &ska->ska_tqe); 1065 } 1066 1067 /* 1068 * The goal here is to only detect the rare case where a virtual slab 1069 * allocation has deadlocked. We must be careful to minimize the use 1070 * of emergency objects which are more expensive to track. Therefore, 1071 * we set a very long timeout for the asynchronous allocation and if 1072 * the timeout is reached the cache is flagged as deadlocked. From 1073 * this point only new emergency objects will be allocated until the 1074 * asynchronous allocation completes and clears the deadlocked flag. 1075 */ 1076 if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) { 1077 rc = spl_emergency_alloc(skc, flags, obj); 1078 } else { 1079 remaining = wait_event_timeout(skc->skc_waitq, 1080 spl_cache_grow_wait(skc), HZ / 10); 1081 1082 if (!remaining) { 1083 spin_lock(&skc->skc_lock); 1084 if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) { 1085 set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags); 1086 skc->skc_obj_deadlock++; 1087 } 1088 spin_unlock(&skc->skc_lock); 1089 } 1090 1091 rc = -ENOMEM; 1092 } 1093 1094 return (rc); 1095 } 1096 1097 /* 1098 * Refill a per-cpu magazine with objects from the slabs for this cache. 1099 * Ideally the magazine can be repopulated using existing objects which have 1100 * been released, however if we are unable to locate enough free objects new 1101 * slabs of objects will be created. On success NULL is returned, otherwise 1102 * the address of a single emergency object is returned for use by the caller. 1103 */ 1104 static void * 1105 spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags) 1106 { 1107 spl_kmem_slab_t *sks; 1108 int count = 0, rc, refill; 1109 void *obj = NULL; 1110 1111 ASSERT(skc->skc_magic == SKC_MAGIC); 1112 ASSERT(skm->skm_magic == SKM_MAGIC); 1113 1114 refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail); 1115 spin_lock(&skc->skc_lock); 1116 1117 while (refill > 0) { 1118 /* No slabs available we may need to grow the cache */ 1119 if (list_empty(&skc->skc_partial_list)) { 1120 spin_unlock(&skc->skc_lock); 1121 1122 local_irq_enable(); 1123 rc = spl_cache_grow(skc, flags, &obj); 1124 local_irq_disable(); 1125 1126 /* Emergency object for immediate use by caller */ 1127 if (rc == 0 && obj != NULL) 1128 return (obj); 1129 1130 if (rc) 1131 goto out; 1132 1133 /* Rescheduled to different CPU skm is not local */ 1134 if (skm != skc->skc_mag[smp_processor_id()]) 1135 goto out; 1136 1137 /* 1138 * Potentially rescheduled to the same CPU but 1139 * allocations may have occurred from this CPU while 1140 * we were sleeping so recalculate max refill. 1141 */ 1142 refill = MIN(refill, skm->skm_size - skm->skm_avail); 1143 1144 spin_lock(&skc->skc_lock); 1145 continue; 1146 } 1147 1148 /* Grab the next available slab */ 1149 sks = list_entry((&skc->skc_partial_list)->next, 1150 spl_kmem_slab_t, sks_list); 1151 ASSERT(sks->sks_magic == SKS_MAGIC); 1152 ASSERT(sks->sks_ref < sks->sks_objs); 1153 ASSERT(!list_empty(&sks->sks_free_list)); 1154 1155 /* 1156 * Consume as many objects as needed to refill the requested 1157 * cache. We must also be careful not to overfill it. 1158 */ 1159 while (sks->sks_ref < sks->sks_objs && refill-- > 0 && 1160 ++count) { 1161 ASSERT(skm->skm_avail < skm->skm_size); 1162 ASSERT(count < skm->skm_size); 1163 skm->skm_objs[skm->skm_avail++] = 1164 spl_cache_obj(skc, sks); 1165 } 1166 1167 /* Move slab to skc_complete_list when full */ 1168 if (sks->sks_ref == sks->sks_objs) { 1169 list_del(&sks->sks_list); 1170 list_add(&sks->sks_list, &skc->skc_complete_list); 1171 } 1172 } 1173 1174 spin_unlock(&skc->skc_lock); 1175 out: 1176 return (NULL); 1177 } 1178 1179 /* 1180 * Release an object back to the slab from which it came. 1181 */ 1182 static void 1183 spl_cache_shrink(spl_kmem_cache_t *skc, void *obj) 1184 { 1185 spl_kmem_slab_t *sks = NULL; 1186 spl_kmem_obj_t *sko = NULL; 1187 1188 ASSERT(skc->skc_magic == SKC_MAGIC); 1189 1190 sko = spl_sko_from_obj(skc, obj); 1191 ASSERT(sko->sko_magic == SKO_MAGIC); 1192 sks = sko->sko_slab; 1193 ASSERT(sks->sks_magic == SKS_MAGIC); 1194 ASSERT(sks->sks_cache == skc); 1195 list_add(&sko->sko_list, &sks->sks_free_list); 1196 1197 sks->sks_age = jiffies; 1198 sks->sks_ref--; 1199 skc->skc_obj_alloc--; 1200 1201 /* 1202 * Move slab to skc_partial_list when no longer full. Slabs 1203 * are added to the head to keep the partial list is quasi-full 1204 * sorted order. Fuller at the head, emptier at the tail. 1205 */ 1206 if (sks->sks_ref == (sks->sks_objs - 1)) { 1207 list_del(&sks->sks_list); 1208 list_add(&sks->sks_list, &skc->skc_partial_list); 1209 } 1210 1211 /* 1212 * Move empty slabs to the end of the partial list so 1213 * they can be easily found and freed during reclamation. 1214 */ 1215 if (sks->sks_ref == 0) { 1216 list_del(&sks->sks_list); 1217 list_add_tail(&sks->sks_list, &skc->skc_partial_list); 1218 skc->skc_slab_alloc--; 1219 } 1220 } 1221 1222 /* 1223 * Allocate an object from the per-cpu magazine, or if the magazine 1224 * is empty directly allocate from a slab and repopulate the magazine. 1225 */ 1226 void * 1227 spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags) 1228 { 1229 spl_kmem_magazine_t *skm; 1230 void *obj = NULL; 1231 1232 ASSERT0(flags & ~KM_PUBLIC_MASK); 1233 ASSERT(skc->skc_magic == SKC_MAGIC); 1234 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); 1235 1236 /* 1237 * Allocate directly from a Linux slab. All optimizations are left 1238 * to the underlying cache we only need to guarantee that KM_SLEEP 1239 * callers will never fail. 1240 */ 1241 if (skc->skc_flags & KMC_SLAB) { 1242 struct kmem_cache *slc = skc->skc_linux_cache; 1243 do { 1244 obj = kmem_cache_alloc(slc, kmem_flags_convert(flags)); 1245 } while ((obj == NULL) && !(flags & KM_NOSLEEP)); 1246 1247 if (obj != NULL) { 1248 /* 1249 * Even though we leave everything up to the 1250 * underlying cache we still keep track of 1251 * how many objects we've allocated in it for 1252 * better debuggability. 1253 */ 1254 percpu_counter_inc(&skc->skc_linux_alloc); 1255 } 1256 goto ret; 1257 } 1258 1259 local_irq_disable(); 1260 1261 restart: 1262 /* 1263 * Safe to update per-cpu structure without lock, but 1264 * in the restart case we must be careful to reacquire 1265 * the local magazine since this may have changed 1266 * when we need to grow the cache. 1267 */ 1268 skm = skc->skc_mag[smp_processor_id()]; 1269 ASSERT(skm->skm_magic == SKM_MAGIC); 1270 1271 if (likely(skm->skm_avail)) { 1272 /* Object available in CPU cache, use it */ 1273 obj = skm->skm_objs[--skm->skm_avail]; 1274 } else { 1275 obj = spl_cache_refill(skc, skm, flags); 1276 if ((obj == NULL) && !(flags & KM_NOSLEEP)) 1277 goto restart; 1278 1279 local_irq_enable(); 1280 goto ret; 1281 } 1282 1283 local_irq_enable(); 1284 ASSERT(obj); 1285 ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align)); 1286 1287 ret: 1288 /* Pre-emptively migrate object to CPU L1 cache */ 1289 if (obj) { 1290 if (obj && skc->skc_ctor) 1291 skc->skc_ctor(obj, skc->skc_private, flags); 1292 else 1293 prefetchw(obj); 1294 } 1295 1296 return (obj); 1297 } 1298 EXPORT_SYMBOL(spl_kmem_cache_alloc); 1299 1300 /* 1301 * Free an object back to the local per-cpu magazine, there is no 1302 * guarantee that this is the same magazine the object was originally 1303 * allocated from. We may need to flush entire from the magazine 1304 * back to the slabs to make space. 1305 */ 1306 void 1307 spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj) 1308 { 1309 spl_kmem_magazine_t *skm; 1310 unsigned long flags; 1311 int do_reclaim = 0; 1312 int do_emergency = 0; 1313 1314 ASSERT(skc->skc_magic == SKC_MAGIC); 1315 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); 1316 1317 /* 1318 * Run the destructor 1319 */ 1320 if (skc->skc_dtor) 1321 skc->skc_dtor(obj, skc->skc_private); 1322 1323 /* 1324 * Free the object from the Linux underlying Linux slab. 1325 */ 1326 if (skc->skc_flags & KMC_SLAB) { 1327 kmem_cache_free(skc->skc_linux_cache, obj); 1328 percpu_counter_dec(&skc->skc_linux_alloc); 1329 return; 1330 } 1331 1332 /* 1333 * While a cache has outstanding emergency objects all freed objects 1334 * must be checked. However, since emergency objects will never use 1335 * a virtual address these objects can be safely excluded as an 1336 * optimization. 1337 */ 1338 if (!is_vmalloc_addr(obj)) { 1339 spin_lock(&skc->skc_lock); 1340 do_emergency = (skc->skc_obj_emergency > 0); 1341 spin_unlock(&skc->skc_lock); 1342 1343 if (do_emergency && (spl_emergency_free(skc, obj) == 0)) 1344 return; 1345 } 1346 1347 local_irq_save(flags); 1348 1349 /* 1350 * Safe to update per-cpu structure without lock, but 1351 * no remote memory allocation tracking is being performed 1352 * it is entirely possible to allocate an object from one 1353 * CPU cache and return it to another. 1354 */ 1355 skm = skc->skc_mag[smp_processor_id()]; 1356 ASSERT(skm->skm_magic == SKM_MAGIC); 1357 1358 /* 1359 * Per-CPU cache full, flush it to make space for this object, 1360 * this may result in an empty slab which can be reclaimed once 1361 * interrupts are re-enabled. 1362 */ 1363 if (unlikely(skm->skm_avail >= skm->skm_size)) { 1364 spl_cache_flush(skc, skm, skm->skm_refill); 1365 do_reclaim = 1; 1366 } 1367 1368 /* Available space in cache, use it */ 1369 skm->skm_objs[skm->skm_avail++] = obj; 1370 1371 local_irq_restore(flags); 1372 1373 if (do_reclaim) 1374 spl_slab_reclaim(skc); 1375 } 1376 EXPORT_SYMBOL(spl_kmem_cache_free); 1377 1378 /* 1379 * Depending on how many and which objects are released it may simply 1380 * repopulate the local magazine which will then need to age-out. Objects 1381 * which cannot fit in the magazine will be released back to their slabs 1382 * which will also need to age out before being released. This is all just 1383 * best effort and we do not want to thrash creating and destroying slabs. 1384 */ 1385 void 1386 spl_kmem_cache_reap_now(spl_kmem_cache_t *skc) 1387 { 1388 ASSERT(skc->skc_magic == SKC_MAGIC); 1389 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); 1390 1391 if (skc->skc_flags & KMC_SLAB) 1392 return; 1393 1394 atomic_inc(&skc->skc_ref); 1395 1396 /* 1397 * Prevent concurrent cache reaping when contended. 1398 */ 1399 if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags)) 1400 goto out; 1401 1402 /* Reclaim from the magazine and free all now empty slabs. */ 1403 unsigned long irq_flags; 1404 local_irq_save(irq_flags); 1405 spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()]; 1406 spl_cache_flush(skc, skm, skm->skm_avail); 1407 local_irq_restore(irq_flags); 1408 1409 spl_slab_reclaim(skc); 1410 clear_bit_unlock(KMC_BIT_REAPING, &skc->skc_flags); 1411 smp_mb__after_atomic(); 1412 wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING); 1413 out: 1414 atomic_dec(&skc->skc_ref); 1415 } 1416 EXPORT_SYMBOL(spl_kmem_cache_reap_now); 1417 1418 /* 1419 * This is stubbed out for code consistency with other platforms. There 1420 * is existing logic to prevent concurrent reaping so while this is ugly 1421 * it should do no harm. 1422 */ 1423 int 1424 spl_kmem_cache_reap_active() 1425 { 1426 return (0); 1427 } 1428 EXPORT_SYMBOL(spl_kmem_cache_reap_active); 1429 1430 /* 1431 * Reap all free slabs from all registered caches. 1432 */ 1433 void 1434 spl_kmem_reap(void) 1435 { 1436 spl_kmem_cache_t *skc = NULL; 1437 1438 down_read(&spl_kmem_cache_sem); 1439 list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) { 1440 spl_kmem_cache_reap_now(skc); 1441 } 1442 up_read(&spl_kmem_cache_sem); 1443 } 1444 EXPORT_SYMBOL(spl_kmem_reap); 1445 1446 int 1447 spl_kmem_cache_init(void) 1448 { 1449 init_rwsem(&spl_kmem_cache_sem); 1450 INIT_LIST_HEAD(&spl_kmem_cache_list); 1451 spl_kmem_cache_taskq = taskq_create("spl_kmem_cache", 1452 spl_kmem_cache_kmem_threads, maxclsyspri, 1453 spl_kmem_cache_kmem_threads * 8, INT_MAX, 1454 TASKQ_PREPOPULATE | TASKQ_DYNAMIC); 1455 1456 return (0); 1457 } 1458 1459 void 1460 spl_kmem_cache_fini(void) 1461 { 1462 taskq_destroy(spl_kmem_cache_taskq); 1463 } 1464