1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #ifndef _SYS_KMEM_IMPL_H 28 #define _SYS_KMEM_IMPL_H 29 30 #pragma ident "%Z%%M% %I% %E% SMI" 31 32 #include <sys/kmem.h> 33 #include <sys/vmem.h> 34 #include <sys/thread.h> 35 #include <sys/t_lock.h> 36 #include <sys/time.h> 37 #include <sys/kstat.h> 38 #include <sys/cpuvar.h> 39 #include <sys/systm.h> 40 #include <vm/page.h> 41 #include <sys/avl.h> 42 #include <sys/list.h> 43 44 #ifdef __cplusplus 45 extern "C" { 46 #endif 47 48 /* 49 * kernel memory allocator: implementation-private data structures 50 * 51 * Lock order: 52 * 1. cache_lock 53 * 2. cc_lock in order by CPU ID 54 * 3. cache_depot_lock 55 * 56 * Do not call kmem_cache_alloc() or taskq_dispatch() while holding any of the 57 * above locks. 58 */ 59 60 #define KMF_AUDIT 0x00000001 /* transaction auditing */ 61 #define KMF_DEADBEEF 0x00000002 /* deadbeef checking */ 62 #define KMF_REDZONE 0x00000004 /* redzone checking */ 63 #define KMF_CONTENTS 0x00000008 /* freed-buffer content logging */ 64 #define KMF_STICKY 0x00000010 /* if set, override /etc/system */ 65 #define KMF_NOMAGAZINE 0x00000020 /* disable per-cpu magazines */ 66 #define KMF_FIREWALL 0x00000040 /* put all bufs before unmapped pages */ 67 #define KMF_LITE 0x00000100 /* lightweight debugging */ 68 69 #define KMF_HASH 0x00000200 /* cache has hash table */ 70 #define KMF_RANDOMIZE 0x00000400 /* randomize other kmem_flags */ 71 72 #define KMF_BUFTAG (KMF_DEADBEEF | KMF_REDZONE) 73 #define KMF_TOUCH (KMF_BUFTAG | KMF_LITE | KMF_CONTENTS) 74 #define KMF_RANDOM (KMF_TOUCH | KMF_AUDIT | KMF_NOMAGAZINE) 75 #define KMF_DEBUG (KMF_RANDOM | KMF_FIREWALL) 76 77 #define KMEM_STACK_DEPTH 15 78 79 #define KMEM_FREE_PATTERN 0xdeadbeefdeadbeefULL 80 #define KMEM_UNINITIALIZED_PATTERN 0xbaddcafebaddcafeULL 81 #define KMEM_REDZONE_PATTERN 0xfeedfacefeedfaceULL 82 #define KMEM_REDZONE_BYTE 0xbb 83 84 /* 85 * Redzone size encodings for kmem_alloc() / kmem_free(). We encode the 86 * allocation size, rather than storing it directly, so that kmem_free() 87 * can distinguish frees of the wrong size from redzone violations. 88 * 89 * A size of zero is never valid. 90 */ 91 #define KMEM_SIZE_ENCODE(x) (251 * (x) + 1) 92 #define KMEM_SIZE_DECODE(x) ((x) / 251) 93 #define KMEM_SIZE_VALID(x) ((x) % 251 == 1 && (x) != 1) 94 95 96 #define KMEM_ALIGN 8 /* min guaranteed alignment */ 97 #define KMEM_ALIGN_SHIFT 3 /* log2(KMEM_ALIGN) */ 98 #define KMEM_VOID_FRACTION 8 /* never waste more than 1/8 of slab */ 99 100 #define KMEM_SLAB_IS_PARTIAL(sp) \ 101 ((sp)->slab_refcnt > 0 && (sp)->slab_refcnt < (sp)->slab_chunks) 102 #define KMEM_SLAB_IS_ALL_USED(sp) \ 103 ((sp)->slab_refcnt == (sp)->slab_chunks) 104 105 /* 106 * The bufctl (buffer control) structure keeps some minimal information 107 * about each buffer: its address, its slab, and its current linkage, 108 * which is either on the slab's freelist (if the buffer is free), or 109 * on the cache's buf-to-bufctl hash table (if the buffer is allocated). 110 * In the case of non-hashed, or "raw", caches (the common case), only 111 * the freelist linkage is necessary: the buffer address is at a fixed 112 * offset from the bufctl address, and the slab is at the end of the page. 113 * 114 * NOTE: bc_next must be the first field; raw buffers have linkage only. 115 */ 116 typedef struct kmem_bufctl { 117 struct kmem_bufctl *bc_next; /* next bufctl struct */ 118 void *bc_addr; /* address of buffer */ 119 struct kmem_slab *bc_slab; /* controlling slab */ 120 } kmem_bufctl_t; 121 122 /* 123 * The KMF_AUDIT version of the bufctl structure. The beginning of this 124 * structure must be identical to the normal bufctl structure so that 125 * pointers are interchangeable. 126 */ 127 typedef struct kmem_bufctl_audit { 128 struct kmem_bufctl *bc_next; /* next bufctl struct */ 129 void *bc_addr; /* address of buffer */ 130 struct kmem_slab *bc_slab; /* controlling slab */ 131 kmem_cache_t *bc_cache; /* controlling cache */ 132 hrtime_t bc_timestamp; /* transaction time */ 133 kthread_t *bc_thread; /* thread doing transaction */ 134 struct kmem_bufctl *bc_lastlog; /* last log entry */ 135 void *bc_contents; /* contents at last free */ 136 int bc_depth; /* stack depth */ 137 pc_t bc_stack[KMEM_STACK_DEPTH]; /* pc stack */ 138 } kmem_bufctl_audit_t; 139 140 /* 141 * A kmem_buftag structure is appended to each buffer whenever any of the 142 * KMF_BUFTAG flags (KMF_DEADBEEF, KMF_REDZONE, KMF_VERIFY) are set. 143 */ 144 typedef struct kmem_buftag { 145 uint64_t bt_redzone; /* 64-bit redzone pattern */ 146 kmem_bufctl_t *bt_bufctl; /* bufctl */ 147 intptr_t bt_bxstat; /* bufctl ^ (alloc/free) */ 148 } kmem_buftag_t; 149 150 /* 151 * A variant of the kmem_buftag structure used for KMF_LITE caches. 152 * Previous callers are stored in reverse chronological order. (i.e. most 153 * recent first) 154 */ 155 typedef struct kmem_buftag_lite { 156 kmem_buftag_t bt_buftag; /* a normal buftag */ 157 pc_t bt_history[1]; /* zero or more callers */ 158 } kmem_buftag_lite_t; 159 160 #define KMEM_BUFTAG_LITE_SIZE(f) \ 161 (offsetof(kmem_buftag_lite_t, bt_history[f])) 162 163 #define KMEM_BUFTAG(cp, buf) \ 164 ((kmem_buftag_t *)((char *)(buf) + (cp)->cache_buftag)) 165 166 #define KMEM_BUFCTL(cp, buf) \ 167 ((kmem_bufctl_t *)((char *)(buf) + (cp)->cache_bufctl)) 168 169 #define KMEM_BUF(cp, bcp) \ 170 ((void *)((char *)(bcp) - (cp)->cache_bufctl)) 171 172 #define KMEM_SLAB(cp, buf) \ 173 ((kmem_slab_t *)P2END((uintptr_t)(buf), (cp)->cache_slabsize) - 1) 174 175 #define KMEM_CPU_CACHE(cp) \ 176 (kmem_cpu_cache_t *)((char *)cp + CPU->cpu_cache_offset) 177 178 #define KMEM_MAGAZINE_VALID(cp, mp) \ 179 (((kmem_slab_t *)P2END((uintptr_t)(mp), PAGESIZE) - 1)->slab_cache == \ 180 (cp)->cache_magtype->mt_cache) 181 182 #define KMEM_SLAB_OFFSET(sp, buf) \ 183 ((size_t)((uintptr_t)(buf) - (uintptr_t)((sp)->slab_base))) 184 185 #define KMEM_SLAB_MEMBER(sp, buf) \ 186 (KMEM_SLAB_OFFSET(sp, buf) < (sp)->slab_cache->cache_slabsize) 187 188 #define KMEM_BUFTAG_ALLOC 0xa110c8edUL 189 #define KMEM_BUFTAG_FREE 0xf4eef4eeUL 190 191 /* slab_later_count thresholds */ 192 #define KMEM_DISBELIEF 3 193 194 /* slab_flags */ 195 #define KMEM_SLAB_NOMOVE 0x1 196 #define KMEM_SLAB_MOVE_PENDING 0x2 197 198 typedef struct kmem_slab { 199 struct kmem_cache *slab_cache; /* controlling cache */ 200 void *slab_base; /* base of allocated memory */ 201 avl_node_t slab_link; /* slab linkage */ 202 struct kmem_bufctl *slab_head; /* first free buffer */ 203 long slab_refcnt; /* outstanding allocations */ 204 long slab_chunks; /* chunks (bufs) in this slab */ 205 uint32_t slab_stuck_offset; /* unmoved buffer offset */ 206 uint16_t slab_later_count; /* cf KMEM_CBRC_LATER */ 207 uint16_t slab_flags; /* bits to mark the slab */ 208 } kmem_slab_t; 209 210 #define KMEM_HASH_INITIAL 64 211 212 #define KMEM_HASH(cp, buf) \ 213 ((cp)->cache_hash_table + \ 214 (((uintptr_t)(buf) >> (cp)->cache_hash_shift) & (cp)->cache_hash_mask)) 215 216 typedef struct kmem_magazine { 217 void *mag_next; 218 void *mag_round[1]; /* one or more rounds */ 219 } kmem_magazine_t; 220 221 /* 222 * The magazine types for fast per-cpu allocation 223 */ 224 typedef struct kmem_magtype { 225 int mt_magsize; /* magazine size (number of rounds) */ 226 int mt_align; /* magazine alignment */ 227 size_t mt_minbuf; /* all smaller buffers qualify */ 228 size_t mt_maxbuf; /* no larger buffers qualify */ 229 kmem_cache_t *mt_cache; /* magazine cache */ 230 } kmem_magtype_t; 231 232 #define KMEM_CPU_CACHE_SIZE 64 /* must be power of 2 */ 233 #define KMEM_CPU_PAD (KMEM_CPU_CACHE_SIZE - sizeof (kmutex_t) - \ 234 2 * sizeof (uint64_t) - 2 * sizeof (void *) - 4 * sizeof (int)) 235 #define KMEM_CACHE_SIZE(ncpus) \ 236 ((size_t)(&((kmem_cache_t *)0)->cache_cpu[ncpus])) 237 238 typedef struct kmem_cpu_cache { 239 kmutex_t cc_lock; /* protects this cpu's local cache */ 240 uint64_t cc_alloc; /* allocations from this cpu */ 241 uint64_t cc_free; /* frees to this cpu */ 242 kmem_magazine_t *cc_loaded; /* the currently loaded magazine */ 243 kmem_magazine_t *cc_ploaded; /* the previously loaded magazine */ 244 int cc_rounds; /* number of objects in loaded mag */ 245 int cc_prounds; /* number of objects in previous mag */ 246 int cc_magsize; /* number of rounds in a full mag */ 247 int cc_flags; /* CPU-local copy of cache_flags */ 248 char cc_pad[KMEM_CPU_PAD]; /* for nice alignment */ 249 } kmem_cpu_cache_t; 250 251 /* 252 * The magazine lists used in the depot. 253 */ 254 typedef struct kmem_maglist { 255 kmem_magazine_t *ml_list; /* magazine list */ 256 long ml_total; /* number of magazines */ 257 long ml_min; /* min since last update */ 258 long ml_reaplimit; /* max reapable magazines */ 259 uint64_t ml_alloc; /* allocations from this list */ 260 } kmem_maglist_t; 261 262 typedef struct kmem_defrag { 263 /* 264 * Statistics 265 */ 266 uint64_t kmd_callbacks; /* move callbacks */ 267 uint64_t kmd_yes; /* KMEM_CBRC_YES responses */ 268 uint64_t kmd_no; /* NO responses */ 269 uint64_t kmd_later; /* LATER responses */ 270 uint64_t kmd_dont_need; /* DONT_NEED responses */ 271 uint64_t kmd_dont_know; /* DONT_KNOW responses */ 272 uint64_t kmd_hunt_found; /* DONT_KNOW: # found in mag */ 273 274 /* 275 * Consolidator fields 276 */ 277 avl_tree_t kmd_moves_pending; /* buffer moves pending */ 278 list_t kmd_deadlist; /* deferred slab frees */ 279 size_t kmd_deadcount; /* # of slabs in kmd_deadlist */ 280 uint8_t kmd_reclaim_numer; /* slab usage threshold */ 281 uint8_t kmd_pad1; /* compiler padding */ 282 size_t kmd_slabs_sought; /* reclaimable slabs sought */ 283 size_t kmd_slabs_found; /* reclaimable slabs found */ 284 size_t kmd_scans; /* nth scan interval counter */ 285 /* 286 * Fields used to ASSERT that the client does not kmem_cache_free() 287 * objects passed to the move callback. 288 */ 289 void *kmd_from_buf; /* object to move */ 290 void *kmd_to_buf; /* move destination */ 291 kthread_t *kmd_thread; /* thread calling move */ 292 } kmem_defrag_t; 293 294 #define KMEM_CACHE_NAMELEN 31 295 296 struct kmem_cache { 297 /* 298 * Statistics 299 */ 300 uint64_t cache_slab_create; /* slab creates */ 301 uint64_t cache_slab_destroy; /* slab destroys */ 302 uint64_t cache_slab_alloc; /* slab layer allocations */ 303 uint64_t cache_slab_free; /* slab layer frees */ 304 uint64_t cache_alloc_fail; /* total failed allocations */ 305 uint64_t cache_buftotal; /* total buffers */ 306 uint64_t cache_bufmax; /* max buffers ever */ 307 uint64_t cache_bufslab; /* buffers free in slab layer */ 308 uint64_t cache_rescale; /* # of hash table rescales */ 309 uint64_t cache_lookup_depth; /* hash lookup depth */ 310 uint64_t cache_depot_contention; /* mutex contention count */ 311 uint64_t cache_depot_contention_prev; /* previous snapshot */ 312 313 /* 314 * Cache properties 315 */ 316 char cache_name[KMEM_CACHE_NAMELEN + 1]; 317 size_t cache_bufsize; /* object size */ 318 size_t cache_align; /* object alignment */ 319 int (*cache_constructor)(void *, void *, int); 320 void (*cache_destructor)(void *, void *); 321 void (*cache_reclaim)(void *); 322 kmem_cbrc_t (*cache_move)(void *, void *, size_t, void *); 323 void *cache_private; /* opaque arg to callbacks */ 324 vmem_t *cache_arena; /* vmem source for slabs */ 325 int cache_cflags; /* cache creation flags */ 326 int cache_flags; /* various cache state info */ 327 uint32_t cache_mtbf; /* induced alloc failure rate */ 328 uint32_t cache_pad1; /* compiler padding */ 329 kstat_t *cache_kstat; /* exported statistics */ 330 list_node_t cache_link; /* cache linkage */ 331 332 /* 333 * Slab layer 334 */ 335 kmutex_t cache_lock; /* protects slab layer */ 336 size_t cache_chunksize; /* buf + alignment [+ debug] */ 337 size_t cache_slabsize; /* size of a slab */ 338 size_t cache_maxchunks; /* max buffers per slab */ 339 size_t cache_bufctl; /* buf-to-bufctl distance */ 340 size_t cache_buftag; /* buf-to-buftag distance */ 341 size_t cache_verify; /* bytes to verify */ 342 size_t cache_contents; /* bytes of saved content */ 343 size_t cache_color; /* next slab color */ 344 size_t cache_mincolor; /* maximum slab color */ 345 size_t cache_maxcolor; /* maximum slab color */ 346 size_t cache_hash_shift; /* get to interesting bits */ 347 size_t cache_hash_mask; /* hash table mask */ 348 list_t cache_complete_slabs; /* completely allocated slabs */ 349 size_t cache_complete_slab_count; 350 avl_tree_t cache_partial_slabs; /* partial slab freelist */ 351 size_t cache_partial_binshift; /* for AVL sort bins */ 352 kmem_cache_t *cache_bufctl_cache; /* source of bufctls */ 353 kmem_bufctl_t **cache_hash_table; /* hash table base */ 354 kmem_defrag_t *cache_defrag; /* slab consolidator fields */ 355 356 /* 357 * Depot layer 358 */ 359 kmutex_t cache_depot_lock; /* protects depot */ 360 kmem_magtype_t *cache_magtype; /* magazine type */ 361 kmem_maglist_t cache_full; /* full magazines */ 362 kmem_maglist_t cache_empty; /* empty magazines */ 363 364 /* 365 * Per-CPU layer 366 */ 367 kmem_cpu_cache_t cache_cpu[1]; /* max_ncpus actual elements */ 368 }; 369 370 typedef struct kmem_cpu_log_header { 371 kmutex_t clh_lock; 372 char *clh_current; 373 size_t clh_avail; 374 int clh_chunk; 375 int clh_hits; 376 char clh_pad[64 - sizeof (kmutex_t) - sizeof (char *) - 377 sizeof (size_t) - 2 * sizeof (int)]; 378 } kmem_cpu_log_header_t; 379 380 typedef struct kmem_log_header { 381 kmutex_t lh_lock; 382 char *lh_base; 383 int *lh_free; 384 size_t lh_chunksize; 385 int lh_nchunks; 386 int lh_head; 387 int lh_tail; 388 int lh_hits; 389 kmem_cpu_log_header_t lh_cpu[1]; /* ncpus actually allocated */ 390 } kmem_log_header_t; 391 392 /* kmem_move kmm_flags */ 393 #define KMM_DESPERATE 0x1 394 #define KMM_NOTIFY 0x2 395 396 typedef struct kmem_move { 397 kmem_slab_t *kmm_from_slab; 398 void *kmm_from_buf; 399 void *kmm_to_buf; 400 avl_node_t kmm_entry; 401 int kmm_flags; 402 } kmem_move_t; 403 404 /* 405 * In order to consolidate partial slabs, it must be possible for the cache to 406 * have partial slabs. 407 */ 408 #define KMEM_IS_MOVABLE(cp) \ 409 (((cp)->cache_chunksize * 2) <= (cp)->cache_slabsize) 410 411 #ifdef __cplusplus 412 } 413 #endif 414 415 #endif /* _SYS_KMEM_IMPL_H */ 416