Lines Matching refs:skc

137 static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj);
140 kv_alloc(spl_kmem_cache_t *skc, int size, int flags) in kv_alloc() argument
145 if (skc->skc_flags & KMC_RECLAIMABLE) in kv_alloc()
156 kv_free(spl_kmem_cache_t *skc, void *ptr, int size) in kv_free() argument
180 spl_sks_size(spl_kmem_cache_t *skc) in spl_sks_size() argument
183 skc->skc_obj_align, uint32_t)); in spl_sks_size()
190 spl_obj_size(spl_kmem_cache_t *skc) in spl_obj_size() argument
192 uint32_t align = skc->skc_obj_align; in spl_obj_size()
194 return (P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) + in spl_obj_size()
216 spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj) in spl_sko_from_obj() argument
218 return (obj + P2ROUNDUP_TYPED(skc->skc_obj_size, in spl_sko_from_obj()
219 skc->skc_obj_align, uint32_t)); in spl_sko_from_obj()
252 spl_slab_alloc(spl_kmem_cache_t *skc, int flags) in spl_slab_alloc() argument
258 base = kv_alloc(skc, skc->skc_slab_size, flags); in spl_slab_alloc()
264 sks->sks_objs = skc->skc_slab_objs; in spl_slab_alloc()
266 sks->sks_cache = skc; in spl_slab_alloc()
270 obj_size = spl_obj_size(skc); in spl_slab_alloc()
273 void *obj = base + spl_sks_size(skc) + (i * obj_size); in spl_slab_alloc()
275 ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align)); in spl_slab_alloc()
276 spl_kmem_obj_t *sko = spl_sko_from_obj(skc, obj); in spl_slab_alloc()
296 spl_kmem_cache_t *skc; in spl_slab_free() local
301 skc = sks->sks_cache; in spl_slab_free()
302 ASSERT(skc->skc_magic == SKC_MAGIC); in spl_slab_free()
310 skc->skc_obj_total -= sks->sks_objs; in spl_slab_free()
311 skc->skc_slab_total--; in spl_slab_free()
321 spl_slab_reclaim(spl_kmem_cache_t *skc) in spl_slab_reclaim() argument
334 spin_lock(&skc->skc_lock); in spl_slab_reclaim()
336 &skc->skc_partial_list, sks_list) { in spl_slab_reclaim()
343 spin_unlock(&skc->skc_lock); in spl_slab_reclaim()
359 kv_free(skc, sks, skc->skc_slab_size); in spl_slab_reclaim()
413 spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj) in spl_emergency_alloc() argument
417 int order = get_order(skc->skc_obj_size); in spl_emergency_alloc()
421 spin_lock(&skc->skc_lock); in spl_emergency_alloc()
422 empty = list_empty(&skc->skc_partial_list); in spl_emergency_alloc()
423 spin_unlock(&skc->skc_lock); in spl_emergency_alloc()
427 if (skc->skc_flags & KMC_RECLAIMABLE) in spl_emergency_alloc()
439 spin_lock(&skc->skc_lock); in spl_emergency_alloc()
440 empty = spl_emergency_insert(&skc->skc_emergency_tree, ske); in spl_emergency_alloc()
442 skc->skc_obj_total++; in spl_emergency_alloc()
443 skc->skc_obj_emergency++; in spl_emergency_alloc()
444 if (skc->skc_obj_emergency > skc->skc_obj_emergency_max) in spl_emergency_alloc()
445 skc->skc_obj_emergency_max = skc->skc_obj_emergency; in spl_emergency_alloc()
447 spin_unlock(&skc->skc_lock); in spl_emergency_alloc()
464 spl_emergency_free(spl_kmem_cache_t *skc, void *obj) in spl_emergency_free() argument
467 int order = get_order(skc->skc_obj_size); in spl_emergency_free()
469 spin_lock(&skc->skc_lock); in spl_emergency_free()
470 ske = spl_emergency_search(&skc->skc_emergency_tree, obj); in spl_emergency_free()
472 rb_erase(&ske->ske_node, &skc->skc_emergency_tree); in spl_emergency_free()
473 skc->skc_obj_emergency--; in spl_emergency_free()
474 skc->skc_obj_total--; in spl_emergency_free()
476 spin_unlock(&skc->skc_lock); in spl_emergency_free()
492 spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush) in spl_cache_flush() argument
494 spin_lock(&skc->skc_lock); in spl_cache_flush()
496 ASSERT(skc->skc_magic == SKC_MAGIC); in spl_cache_flush()
501 spl_cache_shrink(skc, skm->skm_objs[i]); in spl_cache_flush()
507 spin_unlock(&skc->skc_lock); in spl_cache_flush()
517 spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size) in spl_slab_size() argument
521 sks_size = spl_sks_size(skc); in spl_slab_size()
522 obj_size = spl_obj_size(skc); in spl_slab_size()
548 spl_magazine_size(spl_kmem_cache_t *skc) in spl_magazine_size() argument
550 uint32_t obj_size = spl_obj_size(skc); in spl_magazine_size()
575 spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu) in spl_magazine_alloc() argument
579 sizeof (void *) * skc->skc_mag_size; in spl_magazine_alloc()
585 skm->skm_size = skc->skc_mag_size; in spl_magazine_alloc()
586 skm->skm_refill = skc->skc_mag_refill; in spl_magazine_alloc()
587 skm->skm_cache = skc; in spl_magazine_alloc()
609 spl_magazine_create(spl_kmem_cache_t *skc) in spl_magazine_create() argument
613 ASSERT((skc->skc_flags & KMC_SLAB) == 0); in spl_magazine_create()
615 skc->skc_mag = kzalloc(sizeof (spl_kmem_magazine_t *) * in spl_magazine_create()
617 skc->skc_mag_size = spl_magazine_size(skc); in spl_magazine_create()
618 skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2; in spl_magazine_create()
621 skc->skc_mag[i] = spl_magazine_alloc(skc, i); in spl_magazine_create()
622 if (!skc->skc_mag[i]) { in spl_magazine_create()
624 spl_magazine_free(skc->skc_mag[i]); in spl_magazine_create()
626 kfree(skc->skc_mag); in spl_magazine_create()
638 spl_magazine_destroy(spl_kmem_cache_t *skc) in spl_magazine_destroy() argument
643 ASSERT((skc->skc_flags & KMC_SLAB) == 0); in spl_magazine_destroy()
646 skm = skc->skc_mag[i]; in spl_magazine_destroy()
647 spl_cache_flush(skc, skm, skm->skm_avail); in spl_magazine_destroy()
651 kfree(skc->skc_mag); in spl_magazine_destroy()
676 spl_kmem_cache_t *skc; in spl_kmem_cache_create() local
687 skc = kzalloc(sizeof (*skc), lflags); in spl_kmem_cache_create()
688 if (skc == NULL) in spl_kmem_cache_create()
691 skc->skc_magic = SKC_MAGIC; in spl_kmem_cache_create()
692 skc->skc_name_size = strlen(name) + 1; in spl_kmem_cache_create()
693 skc->skc_name = kmalloc(skc->skc_name_size, lflags); in spl_kmem_cache_create()
694 if (skc->skc_name == NULL) { in spl_kmem_cache_create()
695 kfree(skc); in spl_kmem_cache_create()
698 strlcpy(skc->skc_name, name, skc->skc_name_size); in spl_kmem_cache_create()
700 skc->skc_ctor = ctor; in spl_kmem_cache_create()
701 skc->skc_dtor = dtor; in spl_kmem_cache_create()
702 skc->skc_private = priv; in spl_kmem_cache_create()
703 skc->skc_vmp = vmp; in spl_kmem_cache_create()
704 skc->skc_linux_cache = NULL; in spl_kmem_cache_create()
705 skc->skc_flags = flags; in spl_kmem_cache_create()
706 skc->skc_obj_size = size; in spl_kmem_cache_create()
707 skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN; in spl_kmem_cache_create()
708 atomic_set(&skc->skc_ref, 0); in spl_kmem_cache_create()
710 INIT_LIST_HEAD(&skc->skc_list); in spl_kmem_cache_create()
711 INIT_LIST_HEAD(&skc->skc_complete_list); in spl_kmem_cache_create()
712 INIT_LIST_HEAD(&skc->skc_partial_list); in spl_kmem_cache_create()
713 skc->skc_emergency_tree = RB_ROOT; in spl_kmem_cache_create()
714 spin_lock_init(&skc->skc_lock); in spl_kmem_cache_create()
715 init_waitqueue_head(&skc->skc_waitq); in spl_kmem_cache_create()
716 skc->skc_slab_fail = 0; in spl_kmem_cache_create()
717 skc->skc_slab_create = 0; in spl_kmem_cache_create()
718 skc->skc_slab_destroy = 0; in spl_kmem_cache_create()
719 skc->skc_slab_total = 0; in spl_kmem_cache_create()
720 skc->skc_slab_alloc = 0; in spl_kmem_cache_create()
721 skc->skc_slab_max = 0; in spl_kmem_cache_create()
722 skc->skc_obj_total = 0; in spl_kmem_cache_create()
723 skc->skc_obj_alloc = 0; in spl_kmem_cache_create()
724 skc->skc_obj_max = 0; in spl_kmem_cache_create()
725 skc->skc_obj_deadlock = 0; in spl_kmem_cache_create()
726 skc->skc_obj_emergency = 0; in spl_kmem_cache_create()
727 skc->skc_obj_emergency_max = 0; in spl_kmem_cache_create()
729 rc = percpu_counter_init(&skc->skc_linux_alloc, 0, GFP_KERNEL); in spl_kmem_cache_create()
731 kfree(skc->skc_name); in spl_kmem_cache_create()
732 kfree(skc); in spl_kmem_cache_create()
743 skc->skc_obj_align = align; in spl_kmem_cache_create()
751 if (!(skc->skc_flags & (KMC_SLAB | KMC_KVMEM))) { in spl_kmem_cache_create()
758 skc->skc_flags |= KMC_SLAB; in spl_kmem_cache_create()
764 skc->skc_flags |= KMC_KVMEM; in spl_kmem_cache_create()
771 if (skc->skc_flags & KMC_KVMEM) { in spl_kmem_cache_create()
772 rc = spl_slab_size(skc, in spl_kmem_cache_create()
773 &skc->skc_slab_objs, &skc->skc_slab_size); in spl_kmem_cache_create()
777 rc = spl_magazine_create(skc); in spl_kmem_cache_create()
786 if (skc->skc_flags & KMC_RECLAIMABLE) in spl_kmem_cache_create()
789 skc->skc_linux_cache = kmem_cache_create_usercopy( in spl_kmem_cache_create()
790 skc->skc_name, size, align, slabflags, 0, size, NULL); in spl_kmem_cache_create()
791 if (skc->skc_linux_cache == NULL) in spl_kmem_cache_create()
796 list_add_tail(&skc->skc_list, &spl_kmem_cache_list); in spl_kmem_cache_create()
799 return (skc); in spl_kmem_cache_create()
801 kfree(skc->skc_name); in spl_kmem_cache_create()
802 percpu_counter_destroy(&skc->skc_linux_alloc); in spl_kmem_cache_create()
803 kfree(skc); in spl_kmem_cache_create()
813 spl_kmem_cache_set_move(spl_kmem_cache_t *skc, in spl_kmem_cache_set_move() argument
824 spl_kmem_cache_destroy(spl_kmem_cache_t *skc) in spl_kmem_cache_destroy() argument
829 ASSERT(skc->skc_magic == SKC_MAGIC); in spl_kmem_cache_destroy()
830 ASSERT(skc->skc_flags & (KMC_KVMEM | KMC_SLAB)); in spl_kmem_cache_destroy()
833 list_del_init(&skc->skc_list); in spl_kmem_cache_destroy()
837 VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags)); in spl_kmem_cache_destroy()
839 spin_lock(&skc->skc_lock); in spl_kmem_cache_destroy()
840 id = skc->skc_taskqid; in spl_kmem_cache_destroy()
841 spin_unlock(&skc->skc_lock); in spl_kmem_cache_destroy()
850 wait_event(wq, atomic_read(&skc->skc_ref) == 0); in spl_kmem_cache_destroy()
852 if (skc->skc_flags & KMC_KVMEM) { in spl_kmem_cache_destroy()
853 spl_magazine_destroy(skc); in spl_kmem_cache_destroy()
854 spl_slab_reclaim(skc); in spl_kmem_cache_destroy()
856 ASSERT(skc->skc_flags & KMC_SLAB); in spl_kmem_cache_destroy()
857 kmem_cache_destroy(skc->skc_linux_cache); in spl_kmem_cache_destroy()
860 spin_lock(&skc->skc_lock); in spl_kmem_cache_destroy()
866 ASSERT3U(skc->skc_slab_alloc, ==, 0); in spl_kmem_cache_destroy()
867 ASSERT3U(skc->skc_obj_alloc, ==, 0); in spl_kmem_cache_destroy()
868 ASSERT3U(skc->skc_slab_total, ==, 0); in spl_kmem_cache_destroy()
869 ASSERT3U(skc->skc_obj_total, ==, 0); in spl_kmem_cache_destroy()
870 ASSERT3U(skc->skc_obj_emergency, ==, 0); in spl_kmem_cache_destroy()
871 ASSERT(list_empty(&skc->skc_complete_list)); in spl_kmem_cache_destroy()
873 ASSERT3U(percpu_counter_sum(&skc->skc_linux_alloc), ==, 0); in spl_kmem_cache_destroy()
874 percpu_counter_destroy(&skc->skc_linux_alloc); in spl_kmem_cache_destroy()
876 spin_unlock(&skc->skc_lock); in spl_kmem_cache_destroy()
878 kfree(skc->skc_name); in spl_kmem_cache_destroy()
879 kfree(skc); in spl_kmem_cache_destroy()
888 spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks) in spl_cache_obj() argument
892 ASSERT(skc->skc_magic == SKC_MAGIC); in spl_cache_obj()
904 skc->skc_obj_alloc++; in spl_cache_obj()
907 if (skc->skc_obj_alloc > skc->skc_obj_max) in spl_cache_obj()
908 skc->skc_obj_max = skc->skc_obj_alloc; in spl_cache_obj()
912 skc->skc_slab_alloc++; in spl_cache_obj()
914 if (skc->skc_slab_alloc > skc->skc_slab_max) in spl_cache_obj()
915 skc->skc_slab_max = skc->skc_slab_alloc; in spl_cache_obj()
927 __spl_cache_grow(spl_kmem_cache_t *skc, int flags) in __spl_cache_grow() argument
932 sks = spl_slab_alloc(skc, flags); in __spl_cache_grow()
935 spin_lock(&skc->skc_lock); in __spl_cache_grow()
937 skc->skc_slab_total++; in __spl_cache_grow()
938 skc->skc_obj_total += sks->sks_objs; in __spl_cache_grow()
939 list_add_tail(&sks->sks_list, &skc->skc_partial_list); in __spl_cache_grow()
942 clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags); in __spl_cache_grow()
945 spin_unlock(&skc->skc_lock); in __spl_cache_grow()
954 spl_kmem_cache_t *skc = ska->ska_cache; in spl_cache_grow_work() local
956 int error = __spl_cache_grow(skc, ska->ska_flags); in spl_cache_grow_work()
958 atomic_dec(&skc->skc_ref); in spl_cache_grow_work()
960 clear_bit(KMC_BIT_GROWING, &skc->skc_flags); in spl_cache_grow_work()
963 wake_up_all(&skc->skc_waitq); in spl_cache_grow_work()
972 spl_cache_grow_wait(spl_kmem_cache_t *skc) in spl_cache_grow_wait() argument
974 return (!test_bit(KMC_BIT_GROWING, &skc->skc_flags)); in spl_cache_grow_wait()
983 spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj) in spl_cache_grow() argument
988 ASSERT(skc->skc_magic == SKC_MAGIC); in spl_cache_grow()
989 ASSERT((skc->skc_flags & KMC_SLAB) == 0); in spl_cache_grow()
999 return (spl_emergency_alloc(skc, flags, obj)); in spl_cache_grow()
1007 if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) { in spl_cache_grow()
1008 rc = wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING, in spl_cache_grow()
1028 if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) { in spl_cache_grow()
1033 clear_bit_unlock(KMC_BIT_GROWING, &skc->skc_flags); in spl_cache_grow()
1035 wake_up_all(&skc->skc_waitq); in spl_cache_grow()
1039 atomic_inc(&skc->skc_ref); in spl_cache_grow()
1040 ska->ska_cache = skc; in spl_cache_grow()
1056 if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) { in spl_cache_grow()
1057 rc = spl_emergency_alloc(skc, flags, obj); in spl_cache_grow()
1059 remaining = wait_event_timeout(skc->skc_waitq, in spl_cache_grow()
1060 spl_cache_grow_wait(skc), HZ / 10); in spl_cache_grow()
1063 spin_lock(&skc->skc_lock); in spl_cache_grow()
1064 if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) { in spl_cache_grow()
1065 set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags); in spl_cache_grow()
1066 skc->skc_obj_deadlock++; in spl_cache_grow()
1068 spin_unlock(&skc->skc_lock); in spl_cache_grow()
1085 spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags) in spl_cache_refill() argument
1091 ASSERT(skc->skc_magic == SKC_MAGIC); in spl_cache_refill()
1095 spin_lock(&skc->skc_lock); in spl_cache_refill()
1099 if (list_empty(&skc->skc_partial_list)) { in spl_cache_refill()
1100 spin_unlock(&skc->skc_lock); in spl_cache_refill()
1103 rc = spl_cache_grow(skc, flags, &obj); in spl_cache_refill()
1114 if (skm != skc->skc_mag[smp_processor_id()]) in spl_cache_refill()
1124 spin_lock(&skc->skc_lock); in spl_cache_refill()
1129 sks = list_entry((&skc->skc_partial_list)->next, in spl_cache_refill()
1144 spl_cache_obj(skc, sks); in spl_cache_refill()
1150 list_add(&sks->sks_list, &skc->skc_complete_list); in spl_cache_refill()
1154 spin_unlock(&skc->skc_lock); in spl_cache_refill()
1163 spl_cache_shrink(spl_kmem_cache_t *skc, void *obj) in spl_cache_shrink() argument
1168 ASSERT(skc->skc_magic == SKC_MAGIC); in spl_cache_shrink()
1170 sko = spl_sko_from_obj(skc, obj); in spl_cache_shrink()
1174 ASSERT(sks->sks_cache == skc); in spl_cache_shrink()
1179 skc->skc_obj_alloc--; in spl_cache_shrink()
1188 list_add(&sks->sks_list, &skc->skc_partial_list); in spl_cache_shrink()
1197 list_add_tail(&sks->sks_list, &skc->skc_partial_list); in spl_cache_shrink()
1198 skc->skc_slab_alloc--; in spl_cache_shrink()
1207 spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags) in spl_kmem_cache_alloc() argument
1213 ASSERT(skc->skc_magic == SKC_MAGIC); in spl_kmem_cache_alloc()
1214 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); in spl_kmem_cache_alloc()
1221 if (skc->skc_flags & KMC_SLAB) { in spl_kmem_cache_alloc()
1222 struct kmem_cache *slc = skc->skc_linux_cache; in spl_kmem_cache_alloc()
1234 percpu_counter_inc(&skc->skc_linux_alloc); in spl_kmem_cache_alloc()
1248 skm = skc->skc_mag[smp_processor_id()]; in spl_kmem_cache_alloc()
1255 obj = spl_cache_refill(skc, skm, flags); in spl_kmem_cache_alloc()
1265 ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align)); in spl_kmem_cache_alloc()
1270 if (obj && skc->skc_ctor) in spl_kmem_cache_alloc()
1271 skc->skc_ctor(obj, skc->skc_private, flags); in spl_kmem_cache_alloc()
1287 spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj) in spl_kmem_cache_free() argument
1294 ASSERT(skc->skc_magic == SKC_MAGIC); in spl_kmem_cache_free()
1295 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); in spl_kmem_cache_free()
1300 if (skc->skc_dtor) in spl_kmem_cache_free()
1301 skc->skc_dtor(obj, skc->skc_private); in spl_kmem_cache_free()
1306 if (skc->skc_flags & KMC_SLAB) { in spl_kmem_cache_free()
1307 kmem_cache_free(skc->skc_linux_cache, obj); in spl_kmem_cache_free()
1308 percpu_counter_dec(&skc->skc_linux_alloc); in spl_kmem_cache_free()
1319 spin_lock(&skc->skc_lock); in spl_kmem_cache_free()
1320 do_emergency = (skc->skc_obj_emergency > 0); in spl_kmem_cache_free()
1321 spin_unlock(&skc->skc_lock); in spl_kmem_cache_free()
1323 if (do_emergency && (spl_emergency_free(skc, obj) == 0)) in spl_kmem_cache_free()
1335 skm = skc->skc_mag[smp_processor_id()]; in spl_kmem_cache_free()
1344 spl_cache_flush(skc, skm, skm->skm_refill); in spl_kmem_cache_free()
1354 spl_slab_reclaim(skc); in spl_kmem_cache_free()
1366 spl_kmem_cache_reap_now(spl_kmem_cache_t *skc) in spl_kmem_cache_reap_now() argument
1368 ASSERT(skc->skc_magic == SKC_MAGIC); in spl_kmem_cache_reap_now()
1369 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); in spl_kmem_cache_reap_now()
1371 if (skc->skc_flags & KMC_SLAB) in spl_kmem_cache_reap_now()
1374 atomic_inc(&skc->skc_ref); in spl_kmem_cache_reap_now()
1379 if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags)) in spl_kmem_cache_reap_now()
1385 spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()]; in spl_kmem_cache_reap_now()
1386 spl_cache_flush(skc, skm, skm->skm_avail); in spl_kmem_cache_reap_now()
1389 spl_slab_reclaim(skc); in spl_kmem_cache_reap_now()
1390 clear_bit_unlock(KMC_BIT_REAPING, &skc->skc_flags); in spl_kmem_cache_reap_now()
1392 wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING); in spl_kmem_cache_reap_now()
1394 atomic_dec(&skc->skc_ref); in spl_kmem_cache_reap_now()
1416 spl_kmem_cache_t *skc = NULL; in spl_kmem_reap() local
1419 list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) { in spl_kmem_reap()
1420 spl_kmem_cache_reap_now(skc); in spl_kmem_reap()