1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2013, 2015 by Delphix. All rights reserved. 24 */ 25 26 /* 27 * The 512-byte leaf is broken into 32 16-byte chunks. 28 * chunk number n means l_chunk[n], even though the header precedes it. 29 * the names are stored null-terminated. 30 */ 31 32 #include <sys/zio.h> 33 #include <sys/spa.h> 34 #include <sys/dmu.h> 35 #include <sys/zfs_context.h> 36 #include <sys/fs/zfs.h> 37 #include <sys/zap.h> 38 #include <sys/zap_impl.h> 39 #include <sys/zap_leaf.h> 40 #include <sys/arc.h> 41 42 static uint16_t *zap_leaf_rehash_entry(zap_leaf_t *l, uint16_t entry); 43 44 #define CHAIN_END 0xffff /* end of the chunk chain */ 45 46 /* half the (current) minimum block size */ 47 #define MAX_ARRAY_BYTES (8<<10) 48 49 #define LEAF_HASH(l, h) \ 50 ((ZAP_LEAF_HASH_NUMENTRIES(l)-1) & \ 51 ((h) >> \ 52 (64 - ZAP_LEAF_HASH_SHIFT(l) - zap_leaf_phys(l)->l_hdr.lh_prefix_len))) 53 54 #define LEAF_HASH_ENTPTR(l, h) (&zap_leaf_phys(l)->l_hash[LEAF_HASH(l, h)]) 55 56 extern inline zap_leaf_phys_t *zap_leaf_phys(zap_leaf_t *l); 57 58 static void 59 zap_memset(void *a, int c, size_t n) 60 { 61 char *cp = a; 62 char *cpend = cp + n; 63 64 while (cp < cpend) 65 *cp++ = c; 66 } 67 68 static void 69 stv(int len, void *addr, uint64_t value) 70 { 71 switch (len) { 72 case 1: 73 *(uint8_t *)addr = value; 74 return; 75 case 2: 76 *(uint16_t *)addr = value; 77 return; 78 case 4: 79 *(uint32_t *)addr = value; 80 return; 81 case 8: 82 *(uint64_t *)addr = value; 83 return; 84 } 85 ASSERT(!"bad int len"); 86 } 87 88 static uint64_t 89 ldv(int len, const void *addr) 90 { 91 switch (len) { 92 case 1: 93 return (*(uint8_t *)addr); 94 case 2: 95 return (*(uint16_t *)addr); 96 case 4: 97 return (*(uint32_t *)addr); 98 case 8: 99 return (*(uint64_t *)addr); 100 } 101 ASSERT(!"bad int len"); 102 return (0xFEEDFACEDEADBEEFULL); 103 } 104 105 void 106 zap_leaf_byteswap(zap_leaf_phys_t *buf, int size) 107 { 108 int i; 109 zap_leaf_t l; 110 dmu_buf_t l_dbuf; 111 112 l_dbuf.db_data = buf; 113 l.l_bs = highbit64(size) - 1; 114 l.l_dbuf = &l_dbuf; 115 116 buf->l_hdr.lh_block_type = BSWAP_64(buf->l_hdr.lh_block_type); 117 buf->l_hdr.lh_prefix = BSWAP_64(buf->l_hdr.lh_prefix); 118 buf->l_hdr.lh_magic = BSWAP_32(buf->l_hdr.lh_magic); 119 buf->l_hdr.lh_nfree = BSWAP_16(buf->l_hdr.lh_nfree); 120 buf->l_hdr.lh_nentries = BSWAP_16(buf->l_hdr.lh_nentries); 121 buf->l_hdr.lh_prefix_len = BSWAP_16(buf->l_hdr.lh_prefix_len); 122 buf->l_hdr.lh_freelist = BSWAP_16(buf->l_hdr.lh_freelist); 123 124 for (i = 0; i < ZAP_LEAF_HASH_NUMENTRIES(&l); i++) 125 buf->l_hash[i] = BSWAP_16(buf->l_hash[i]); 126 127 for (i = 0; i < ZAP_LEAF_NUMCHUNKS(&l); i++) { 128 zap_leaf_chunk_t *lc = &ZAP_LEAF_CHUNK(&l, i); 129 struct zap_leaf_entry *le; 130 131 switch (lc->l_free.lf_type) { 132 case ZAP_CHUNK_ENTRY: 133 le = &lc->l_entry; 134 135 le->le_type = BSWAP_8(le->le_type); 136 le->le_value_intlen = BSWAP_8(le->le_value_intlen); 137 le->le_next = BSWAP_16(le->le_next); 138 le->le_name_chunk = BSWAP_16(le->le_name_chunk); 139 le->le_name_numints = BSWAP_16(le->le_name_numints); 140 le->le_value_chunk = BSWAP_16(le->le_value_chunk); 141 le->le_value_numints = BSWAP_16(le->le_value_numints); 142 le->le_cd = BSWAP_32(le->le_cd); 143 le->le_hash = BSWAP_64(le->le_hash); 144 break; 145 case ZAP_CHUNK_FREE: 146 lc->l_free.lf_type = BSWAP_8(lc->l_free.lf_type); 147 lc->l_free.lf_next = BSWAP_16(lc->l_free.lf_next); 148 break; 149 case ZAP_CHUNK_ARRAY: 150 lc->l_array.la_type = BSWAP_8(lc->l_array.la_type); 151 lc->l_array.la_next = BSWAP_16(lc->l_array.la_next); 152 /* la_array doesn't need swapping */ 153 break; 154 default: 155 ASSERT(!"bad leaf type"); 156 } 157 } 158 } 159 160 void 161 zap_leaf_init(zap_leaf_t *l, boolean_t sort) 162 { 163 int i; 164 165 l->l_bs = highbit64(l->l_dbuf->db_size) - 1; 166 zap_memset(&zap_leaf_phys(l)->l_hdr, 0, 167 sizeof (struct zap_leaf_header)); 168 zap_memset(zap_leaf_phys(l)->l_hash, CHAIN_END, 169 2*ZAP_LEAF_HASH_NUMENTRIES(l)); 170 for (i = 0; i < ZAP_LEAF_NUMCHUNKS(l); i++) { 171 ZAP_LEAF_CHUNK(l, i).l_free.lf_type = ZAP_CHUNK_FREE; 172 ZAP_LEAF_CHUNK(l, i).l_free.lf_next = i+1; 173 } 174 ZAP_LEAF_CHUNK(l, ZAP_LEAF_NUMCHUNKS(l)-1).l_free.lf_next = CHAIN_END; 175 zap_leaf_phys(l)->l_hdr.lh_block_type = ZBT_LEAF; 176 zap_leaf_phys(l)->l_hdr.lh_magic = ZAP_LEAF_MAGIC; 177 zap_leaf_phys(l)->l_hdr.lh_nfree = ZAP_LEAF_NUMCHUNKS(l); 178 if (sort) 179 zap_leaf_phys(l)->l_hdr.lh_flags |= ZLF_ENTRIES_CDSORTED; 180 } 181 182 /* 183 * Routines which manipulate leaf chunks (l_chunk[]). 184 */ 185 186 static uint16_t 187 zap_leaf_chunk_alloc(zap_leaf_t *l) 188 { 189 int chunk; 190 191 ASSERT(zap_leaf_phys(l)->l_hdr.lh_nfree > 0); 192 193 chunk = zap_leaf_phys(l)->l_hdr.lh_freelist; 194 ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l)); 195 ASSERT3U(ZAP_LEAF_CHUNK(l, chunk).l_free.lf_type, ==, ZAP_CHUNK_FREE); 196 197 zap_leaf_phys(l)->l_hdr.lh_freelist = 198 ZAP_LEAF_CHUNK(l, chunk).l_free.lf_next; 199 200 zap_leaf_phys(l)->l_hdr.lh_nfree--; 201 202 return (chunk); 203 } 204 205 static void 206 zap_leaf_chunk_free(zap_leaf_t *l, uint16_t chunk) 207 { 208 struct zap_leaf_free *zlf = &ZAP_LEAF_CHUNK(l, chunk).l_free; 209 ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_nfree, <, ZAP_LEAF_NUMCHUNKS(l)); 210 ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l)); 211 ASSERT(zlf->lf_type != ZAP_CHUNK_FREE); 212 213 zlf->lf_type = ZAP_CHUNK_FREE; 214 zlf->lf_next = zap_leaf_phys(l)->l_hdr.lh_freelist; 215 bzero(zlf->lf_pad, sizeof (zlf->lf_pad)); /* help it to compress */ 216 zap_leaf_phys(l)->l_hdr.lh_freelist = chunk; 217 218 zap_leaf_phys(l)->l_hdr.lh_nfree++; 219 } 220 221 /* 222 * Routines which manipulate leaf arrays (zap_leaf_array type chunks). 223 */ 224 225 static uint16_t 226 zap_leaf_array_create(zap_leaf_t *l, const char *buf, 227 int integer_size, int num_integers) 228 { 229 uint16_t chunk_head; 230 uint16_t *chunkp = &chunk_head; 231 int byten = 0; 232 uint64_t value = 0; 233 int shift = (integer_size-1)*8; 234 int len = num_integers; 235 236 ASSERT3U(num_integers * integer_size, <, MAX_ARRAY_BYTES); 237 238 while (len > 0) { 239 uint16_t chunk = zap_leaf_chunk_alloc(l); 240 struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(l, chunk).l_array; 241 int i; 242 243 la->la_type = ZAP_CHUNK_ARRAY; 244 for (i = 0; i < ZAP_LEAF_ARRAY_BYTES; i++) { 245 if (byten == 0) 246 value = ldv(integer_size, buf); 247 la->la_array[i] = value >> shift; 248 value <<= 8; 249 if (++byten == integer_size) { 250 byten = 0; 251 buf += integer_size; 252 if (--len == 0) 253 break; 254 } 255 } 256 257 *chunkp = chunk; 258 chunkp = &la->la_next; 259 } 260 *chunkp = CHAIN_END; 261 262 return (chunk_head); 263 } 264 265 static void 266 zap_leaf_array_free(zap_leaf_t *l, uint16_t *chunkp) 267 { 268 uint16_t chunk = *chunkp; 269 270 *chunkp = CHAIN_END; 271 272 while (chunk != CHAIN_END) { 273 int nextchunk = ZAP_LEAF_CHUNK(l, chunk).l_array.la_next; 274 ASSERT3U(ZAP_LEAF_CHUNK(l, chunk).l_array.la_type, ==, 275 ZAP_CHUNK_ARRAY); 276 zap_leaf_chunk_free(l, chunk); 277 chunk = nextchunk; 278 } 279 } 280 281 /* array_len and buf_len are in integers, not bytes */ 282 static void 283 zap_leaf_array_read(zap_leaf_t *l, uint16_t chunk, 284 int array_int_len, int array_len, int buf_int_len, uint64_t buf_len, 285 void *buf) 286 { 287 int len = MIN(array_len, buf_len); 288 int byten = 0; 289 uint64_t value = 0; 290 char *p = buf; 291 292 ASSERT3U(array_int_len, <=, buf_int_len); 293 294 /* Fast path for one 8-byte integer */ 295 if (array_int_len == 8 && buf_int_len == 8 && len == 1) { 296 struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(l, chunk).l_array; 297 uint8_t *ip = la->la_array; 298 uint64_t *buf64 = buf; 299 300 *buf64 = (uint64_t)ip[0] << 56 | (uint64_t)ip[1] << 48 | 301 (uint64_t)ip[2] << 40 | (uint64_t)ip[3] << 32 | 302 (uint64_t)ip[4] << 24 | (uint64_t)ip[5] << 16 | 303 (uint64_t)ip[6] << 8 | (uint64_t)ip[7]; 304 return; 305 } 306 307 /* Fast path for an array of 1-byte integers (eg. the entry name) */ 308 if (array_int_len == 1 && buf_int_len == 1 && 309 buf_len > array_len + ZAP_LEAF_ARRAY_BYTES) { 310 while (chunk != CHAIN_END) { 311 struct zap_leaf_array *la = 312 &ZAP_LEAF_CHUNK(l, chunk).l_array; 313 bcopy(la->la_array, p, ZAP_LEAF_ARRAY_BYTES); 314 p += ZAP_LEAF_ARRAY_BYTES; 315 chunk = la->la_next; 316 } 317 return; 318 } 319 320 while (len > 0) { 321 struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(l, chunk).l_array; 322 int i; 323 324 ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l)); 325 for (i = 0; i < ZAP_LEAF_ARRAY_BYTES && len > 0; i++) { 326 value = (value << 8) | la->la_array[i]; 327 byten++; 328 if (byten == array_int_len) { 329 stv(buf_int_len, p, value); 330 byten = 0; 331 len--; 332 if (len == 0) 333 return; 334 p += buf_int_len; 335 } 336 } 337 chunk = la->la_next; 338 } 339 } 340 341 static boolean_t 342 zap_leaf_array_match(zap_leaf_t *l, zap_name_t *zn, 343 int chunk, int array_numints) 344 { 345 int bseen = 0; 346 347 if (zap_getflags(zn->zn_zap) & ZAP_FLAG_UINT64_KEY) { 348 uint64_t *thiskey; 349 boolean_t match; 350 351 ASSERT(zn->zn_key_intlen == sizeof (*thiskey)); 352 thiskey = kmem_alloc(array_numints * sizeof (*thiskey), 353 KM_SLEEP); 354 355 zap_leaf_array_read(l, chunk, sizeof (*thiskey), array_numints, 356 sizeof (*thiskey), array_numints, thiskey); 357 match = bcmp(thiskey, zn->zn_key_orig, 358 array_numints * sizeof (*thiskey)) == 0; 359 kmem_free(thiskey, array_numints * sizeof (*thiskey)); 360 return (match); 361 } 362 363 ASSERT(zn->zn_key_intlen == 1); 364 if (zn->zn_matchtype == MT_FIRST) { 365 char *thisname = kmem_alloc(array_numints, KM_SLEEP); 366 boolean_t match; 367 368 zap_leaf_array_read(l, chunk, sizeof (char), array_numints, 369 sizeof (char), array_numints, thisname); 370 match = zap_match(zn, thisname); 371 kmem_free(thisname, array_numints); 372 return (match); 373 } 374 375 /* 376 * Fast path for exact matching. 377 * First check that the lengths match, so that we don't read 378 * past the end of the zn_key_orig array. 379 */ 380 if (array_numints != zn->zn_key_orig_numints) 381 return (B_FALSE); 382 while (bseen < array_numints) { 383 struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(l, chunk).l_array; 384 int toread = MIN(array_numints - bseen, ZAP_LEAF_ARRAY_BYTES); 385 ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l)); 386 if (bcmp(la->la_array, (char *)zn->zn_key_orig + bseen, toread)) 387 break; 388 chunk = la->la_next; 389 bseen += toread; 390 } 391 return (bseen == array_numints); 392 } 393 394 /* 395 * Routines which manipulate leaf entries. 396 */ 397 398 int 399 zap_leaf_lookup(zap_leaf_t *l, zap_name_t *zn, zap_entry_handle_t *zeh) 400 { 401 uint16_t *chunkp; 402 struct zap_leaf_entry *le; 403 404 ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_magic, ==, ZAP_LEAF_MAGIC); 405 406 again: 407 for (chunkp = LEAF_HASH_ENTPTR(l, zn->zn_hash); 408 *chunkp != CHAIN_END; chunkp = &le->le_next) { 409 uint16_t chunk = *chunkp; 410 le = ZAP_LEAF_ENTRY(l, chunk); 411 412 ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l)); 413 ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY); 414 415 if (le->le_hash != zn->zn_hash) 416 continue; 417 418 /* 419 * NB: the entry chain is always sorted by cd on 420 * normalized zap objects, so this will find the 421 * lowest-cd match for MT_FIRST. 422 */ 423 ASSERT(zn->zn_matchtype == MT_EXACT || 424 (zap_leaf_phys(l)->l_hdr.lh_flags & ZLF_ENTRIES_CDSORTED)); 425 if (zap_leaf_array_match(l, zn, le->le_name_chunk, 426 le->le_name_numints)) { 427 zeh->zeh_num_integers = le->le_value_numints; 428 zeh->zeh_integer_size = le->le_value_intlen; 429 zeh->zeh_cd = le->le_cd; 430 zeh->zeh_hash = le->le_hash; 431 zeh->zeh_chunkp = chunkp; 432 zeh->zeh_leaf = l; 433 return (0); 434 } 435 } 436 437 /* 438 * NB: we could of course do this in one pass, but that would be 439 * a pain. We'll see if MT_BEST is even used much. 440 */ 441 if (zn->zn_matchtype == MT_BEST) { 442 zn->zn_matchtype = MT_FIRST; 443 goto again; 444 } 445 446 return (SET_ERROR(ENOENT)); 447 } 448 449 /* Return (h1,cd1 >= h2,cd2) */ 450 #define HCD_GTEQ(h1, cd1, h2, cd2) \ 451 ((h1 > h2) ? TRUE : ((h1 == h2 && cd1 >= cd2) ? TRUE : FALSE)) 452 453 int 454 zap_leaf_lookup_closest(zap_leaf_t *l, 455 uint64_t h, uint32_t cd, zap_entry_handle_t *zeh) 456 { 457 uint16_t chunk; 458 uint64_t besth = -1ULL; 459 uint32_t bestcd = -1U; 460 uint16_t bestlh = ZAP_LEAF_HASH_NUMENTRIES(l)-1; 461 uint16_t lh; 462 struct zap_leaf_entry *le; 463 464 ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_magic, ==, ZAP_LEAF_MAGIC); 465 466 for (lh = LEAF_HASH(l, h); lh <= bestlh; lh++) { 467 for (chunk = zap_leaf_phys(l)->l_hash[lh]; 468 chunk != CHAIN_END; chunk = le->le_next) { 469 le = ZAP_LEAF_ENTRY(l, chunk); 470 471 ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l)); 472 ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY); 473 474 if (HCD_GTEQ(le->le_hash, le->le_cd, h, cd) && 475 HCD_GTEQ(besth, bestcd, le->le_hash, le->le_cd)) { 476 ASSERT3U(bestlh, >=, lh); 477 bestlh = lh; 478 besth = le->le_hash; 479 bestcd = le->le_cd; 480 481 zeh->zeh_num_integers = le->le_value_numints; 482 zeh->zeh_integer_size = le->le_value_intlen; 483 zeh->zeh_cd = le->le_cd; 484 zeh->zeh_hash = le->le_hash; 485 zeh->zeh_fakechunk = chunk; 486 zeh->zeh_chunkp = &zeh->zeh_fakechunk; 487 zeh->zeh_leaf = l; 488 } 489 } 490 } 491 492 return (bestcd == -1U ? ENOENT : 0); 493 } 494 495 int 496 zap_entry_read(const zap_entry_handle_t *zeh, 497 uint8_t integer_size, uint64_t num_integers, void *buf) 498 { 499 struct zap_leaf_entry *le = 500 ZAP_LEAF_ENTRY(zeh->zeh_leaf, *zeh->zeh_chunkp); 501 ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY); 502 503 if (le->le_value_intlen > integer_size) 504 return (SET_ERROR(EINVAL)); 505 506 zap_leaf_array_read(zeh->zeh_leaf, le->le_value_chunk, 507 le->le_value_intlen, le->le_value_numints, 508 integer_size, num_integers, buf); 509 510 if (zeh->zeh_num_integers > num_integers) 511 return (SET_ERROR(EOVERFLOW)); 512 return (0); 513 514 } 515 516 int 517 zap_entry_read_name(zap_t *zap, const zap_entry_handle_t *zeh, uint16_t buflen, 518 char *buf) 519 { 520 struct zap_leaf_entry *le = 521 ZAP_LEAF_ENTRY(zeh->zeh_leaf, *zeh->zeh_chunkp); 522 ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY); 523 524 if (zap_getflags(zap) & ZAP_FLAG_UINT64_KEY) { 525 zap_leaf_array_read(zeh->zeh_leaf, le->le_name_chunk, 8, 526 le->le_name_numints, 8, buflen / 8, buf); 527 } else { 528 zap_leaf_array_read(zeh->zeh_leaf, le->le_name_chunk, 1, 529 le->le_name_numints, 1, buflen, buf); 530 } 531 if (le->le_name_numints > buflen) 532 return (SET_ERROR(EOVERFLOW)); 533 return (0); 534 } 535 536 int 537 zap_entry_update(zap_entry_handle_t *zeh, 538 uint8_t integer_size, uint64_t num_integers, const void *buf) 539 { 540 int delta_chunks; 541 zap_leaf_t *l = zeh->zeh_leaf; 542 struct zap_leaf_entry *le = ZAP_LEAF_ENTRY(l, *zeh->zeh_chunkp); 543 544 delta_chunks = ZAP_LEAF_ARRAY_NCHUNKS(num_integers * integer_size) - 545 ZAP_LEAF_ARRAY_NCHUNKS(le->le_value_numints * le->le_value_intlen); 546 547 if ((int)zap_leaf_phys(l)->l_hdr.lh_nfree < delta_chunks) 548 return (SET_ERROR(EAGAIN)); 549 550 zap_leaf_array_free(l, &le->le_value_chunk); 551 le->le_value_chunk = 552 zap_leaf_array_create(l, buf, integer_size, num_integers); 553 le->le_value_numints = num_integers; 554 le->le_value_intlen = integer_size; 555 return (0); 556 } 557 558 void 559 zap_entry_remove(zap_entry_handle_t *zeh) 560 { 561 uint16_t entry_chunk; 562 struct zap_leaf_entry *le; 563 zap_leaf_t *l = zeh->zeh_leaf; 564 565 ASSERT3P(zeh->zeh_chunkp, !=, &zeh->zeh_fakechunk); 566 567 entry_chunk = *zeh->zeh_chunkp; 568 le = ZAP_LEAF_ENTRY(l, entry_chunk); 569 ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY); 570 571 zap_leaf_array_free(l, &le->le_name_chunk); 572 zap_leaf_array_free(l, &le->le_value_chunk); 573 574 *zeh->zeh_chunkp = le->le_next; 575 zap_leaf_chunk_free(l, entry_chunk); 576 577 zap_leaf_phys(l)->l_hdr.lh_nentries--; 578 } 579 580 int 581 zap_entry_create(zap_leaf_t *l, zap_name_t *zn, uint32_t cd, 582 uint8_t integer_size, uint64_t num_integers, const void *buf, 583 zap_entry_handle_t *zeh) 584 { 585 uint16_t chunk; 586 uint16_t *chunkp; 587 struct zap_leaf_entry *le; 588 uint64_t valuelen; 589 int numchunks; 590 uint64_t h = zn->zn_hash; 591 592 valuelen = integer_size * num_integers; 593 594 numchunks = 1 + ZAP_LEAF_ARRAY_NCHUNKS(zn->zn_key_orig_numints * 595 zn->zn_key_intlen) + ZAP_LEAF_ARRAY_NCHUNKS(valuelen); 596 if (numchunks > ZAP_LEAF_NUMCHUNKS(l)) 597 return (E2BIG); 598 599 if (cd == ZAP_NEED_CD) { 600 /* find the lowest unused cd */ 601 if (zap_leaf_phys(l)->l_hdr.lh_flags & ZLF_ENTRIES_CDSORTED) { 602 cd = 0; 603 604 for (chunk = *LEAF_HASH_ENTPTR(l, h); 605 chunk != CHAIN_END; chunk = le->le_next) { 606 le = ZAP_LEAF_ENTRY(l, chunk); 607 if (le->le_cd > cd) 608 break; 609 if (le->le_hash == h) { 610 ASSERT3U(cd, ==, le->le_cd); 611 cd++; 612 } 613 } 614 } else { 615 /* old unsorted format; do it the O(n^2) way */ 616 for (cd = 0; ; cd++) { 617 for (chunk = *LEAF_HASH_ENTPTR(l, h); 618 chunk != CHAIN_END; chunk = le->le_next) { 619 le = ZAP_LEAF_ENTRY(l, chunk); 620 if (le->le_hash == h && 621 le->le_cd == cd) { 622 break; 623 } 624 } 625 /* If this cd is not in use, we are good. */ 626 if (chunk == CHAIN_END) 627 break; 628 } 629 } 630 /* 631 * We would run out of space in a block before we could 632 * store enough entries to run out of CD values. 633 */ 634 ASSERT3U(cd, <, zap_maxcd(zn->zn_zap)); 635 } 636 637 if (zap_leaf_phys(l)->l_hdr.lh_nfree < numchunks) 638 return (SET_ERROR(EAGAIN)); 639 640 /* make the entry */ 641 chunk = zap_leaf_chunk_alloc(l); 642 le = ZAP_LEAF_ENTRY(l, chunk); 643 le->le_type = ZAP_CHUNK_ENTRY; 644 le->le_name_chunk = zap_leaf_array_create(l, zn->zn_key_orig, 645 zn->zn_key_intlen, zn->zn_key_orig_numints); 646 le->le_name_numints = zn->zn_key_orig_numints; 647 le->le_value_chunk = 648 zap_leaf_array_create(l, buf, integer_size, num_integers); 649 le->le_value_numints = num_integers; 650 le->le_value_intlen = integer_size; 651 le->le_hash = h; 652 le->le_cd = cd; 653 654 /* link it into the hash chain */ 655 /* XXX if we did the search above, we could just use that */ 656 chunkp = zap_leaf_rehash_entry(l, chunk); 657 658 zap_leaf_phys(l)->l_hdr.lh_nentries++; 659 660 zeh->zeh_leaf = l; 661 zeh->zeh_num_integers = num_integers; 662 zeh->zeh_integer_size = le->le_value_intlen; 663 zeh->zeh_cd = le->le_cd; 664 zeh->zeh_hash = le->le_hash; 665 zeh->zeh_chunkp = chunkp; 666 667 return (0); 668 } 669 670 /* 671 * Determine if there is another entry with the same normalized form. 672 * For performance purposes, either zn or name must be provided (the 673 * other can be NULL). Note, there usually won't be any hash 674 * conflicts, in which case we don't need the concatenated/normalized 675 * form of the name. But all callers have one of these on hand anyway, 676 * so might as well take advantage. A cleaner but slower interface 677 * would accept neither argument, and compute the normalized name as 678 * needed (using zap_name_alloc(zap_entry_read_name(zeh))). 679 */ 680 boolean_t 681 zap_entry_normalization_conflict(zap_entry_handle_t *zeh, zap_name_t *zn, 682 const char *name, zap_t *zap) 683 { 684 uint64_t chunk; 685 struct zap_leaf_entry *le; 686 boolean_t allocdzn = B_FALSE; 687 688 if (zap->zap_normflags == 0) 689 return (B_FALSE); 690 691 for (chunk = *LEAF_HASH_ENTPTR(zeh->zeh_leaf, zeh->zeh_hash); 692 chunk != CHAIN_END; chunk = le->le_next) { 693 le = ZAP_LEAF_ENTRY(zeh->zeh_leaf, chunk); 694 if (le->le_hash != zeh->zeh_hash) 695 continue; 696 if (le->le_cd == zeh->zeh_cd) 697 continue; 698 699 if (zn == NULL) { 700 zn = zap_name_alloc(zap, name, MT_FIRST); 701 allocdzn = B_TRUE; 702 } 703 if (zap_leaf_array_match(zeh->zeh_leaf, zn, 704 le->le_name_chunk, le->le_name_numints)) { 705 if (allocdzn) 706 zap_name_free(zn); 707 return (B_TRUE); 708 } 709 } 710 if (allocdzn) 711 zap_name_free(zn); 712 return (B_FALSE); 713 } 714 715 /* 716 * Routines for transferring entries between leafs. 717 */ 718 719 static uint16_t * 720 zap_leaf_rehash_entry(zap_leaf_t *l, uint16_t entry) 721 { 722 struct zap_leaf_entry *le = ZAP_LEAF_ENTRY(l, entry); 723 struct zap_leaf_entry *le2; 724 uint16_t *chunkp; 725 726 /* 727 * keep the entry chain sorted by cd 728 * NB: this will not cause problems for unsorted leafs, though 729 * it is unnecessary there. 730 */ 731 for (chunkp = LEAF_HASH_ENTPTR(l, le->le_hash); 732 *chunkp != CHAIN_END; chunkp = &le2->le_next) { 733 le2 = ZAP_LEAF_ENTRY(l, *chunkp); 734 if (le2->le_cd > le->le_cd) 735 break; 736 } 737 738 le->le_next = *chunkp; 739 *chunkp = entry; 740 return (chunkp); 741 } 742 743 static uint16_t 744 zap_leaf_transfer_array(zap_leaf_t *l, uint16_t chunk, zap_leaf_t *nl) 745 { 746 uint16_t new_chunk; 747 uint16_t *nchunkp = &new_chunk; 748 749 while (chunk != CHAIN_END) { 750 uint16_t nchunk = zap_leaf_chunk_alloc(nl); 751 struct zap_leaf_array *nla = 752 &ZAP_LEAF_CHUNK(nl, nchunk).l_array; 753 struct zap_leaf_array *la = 754 &ZAP_LEAF_CHUNK(l, chunk).l_array; 755 int nextchunk = la->la_next; 756 757 ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l)); 758 ASSERT3U(nchunk, <, ZAP_LEAF_NUMCHUNKS(l)); 759 760 *nla = *la; /* structure assignment */ 761 762 zap_leaf_chunk_free(l, chunk); 763 chunk = nextchunk; 764 *nchunkp = nchunk; 765 nchunkp = &nla->la_next; 766 } 767 *nchunkp = CHAIN_END; 768 return (new_chunk); 769 } 770 771 static void 772 zap_leaf_transfer_entry(zap_leaf_t *l, int entry, zap_leaf_t *nl) 773 { 774 struct zap_leaf_entry *le, *nle; 775 uint16_t chunk; 776 777 le = ZAP_LEAF_ENTRY(l, entry); 778 ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY); 779 780 chunk = zap_leaf_chunk_alloc(nl); 781 nle = ZAP_LEAF_ENTRY(nl, chunk); 782 *nle = *le; /* structure assignment */ 783 784 (void) zap_leaf_rehash_entry(nl, chunk); 785 786 nle->le_name_chunk = zap_leaf_transfer_array(l, le->le_name_chunk, nl); 787 nle->le_value_chunk = 788 zap_leaf_transfer_array(l, le->le_value_chunk, nl); 789 790 zap_leaf_chunk_free(l, entry); 791 792 zap_leaf_phys(l)->l_hdr.lh_nentries--; 793 zap_leaf_phys(nl)->l_hdr.lh_nentries++; 794 } 795 796 /* 797 * Transfer the entries whose hash prefix ends in 1 to the new leaf. 798 */ 799 void 800 zap_leaf_split(zap_leaf_t *l, zap_leaf_t *nl, boolean_t sort) 801 { 802 int i; 803 int bit = 64 - 1 - zap_leaf_phys(l)->l_hdr.lh_prefix_len; 804 805 /* set new prefix and prefix_len */ 806 zap_leaf_phys(l)->l_hdr.lh_prefix <<= 1; 807 zap_leaf_phys(l)->l_hdr.lh_prefix_len++; 808 zap_leaf_phys(nl)->l_hdr.lh_prefix = 809 zap_leaf_phys(l)->l_hdr.lh_prefix | 1; 810 zap_leaf_phys(nl)->l_hdr.lh_prefix_len = 811 zap_leaf_phys(l)->l_hdr.lh_prefix_len; 812 813 /* break existing hash chains */ 814 zap_memset(zap_leaf_phys(l)->l_hash, CHAIN_END, 815 2*ZAP_LEAF_HASH_NUMENTRIES(l)); 816 817 if (sort) 818 zap_leaf_phys(l)->l_hdr.lh_flags |= ZLF_ENTRIES_CDSORTED; 819 820 /* 821 * Transfer entries whose hash bit 'bit' is set to nl; rehash 822 * the remaining entries 823 * 824 * NB: We could find entries via the hashtable instead. That 825 * would be O(hashents+numents) rather than O(numblks+numents), 826 * but this accesses memory more sequentially, and when we're 827 * called, the block is usually pretty full. 828 */ 829 for (i = 0; i < ZAP_LEAF_NUMCHUNKS(l); i++) { 830 struct zap_leaf_entry *le = ZAP_LEAF_ENTRY(l, i); 831 if (le->le_type != ZAP_CHUNK_ENTRY) 832 continue; 833 834 if (le->le_hash & (1ULL << bit)) 835 zap_leaf_transfer_entry(l, i, nl); 836 else 837 (void) zap_leaf_rehash_entry(l, i); 838 } 839 } 840 841 void 842 zap_leaf_stats(zap_t *zap, zap_leaf_t *l, zap_stats_t *zs) 843 { 844 int i, n; 845 846 n = zap_f_phys(zap)->zap_ptrtbl.zt_shift - 847 zap_leaf_phys(l)->l_hdr.lh_prefix_len; 848 n = MIN(n, ZAP_HISTOGRAM_SIZE-1); 849 zs->zs_leafs_with_2n_pointers[n]++; 850 851 852 n = zap_leaf_phys(l)->l_hdr.lh_nentries/5; 853 n = MIN(n, ZAP_HISTOGRAM_SIZE-1); 854 zs->zs_blocks_with_n5_entries[n]++; 855 856 n = ((1<<FZAP_BLOCK_SHIFT(zap)) - 857 zap_leaf_phys(l)->l_hdr.lh_nfree * (ZAP_LEAF_ARRAY_BYTES+1))*10 / 858 (1<<FZAP_BLOCK_SHIFT(zap)); 859 n = MIN(n, ZAP_HISTOGRAM_SIZE-1); 860 zs->zs_blocks_n_tenths_full[n]++; 861 862 for (i = 0; i < ZAP_LEAF_HASH_NUMENTRIES(l); i++) { 863 int nentries = 0; 864 int chunk = zap_leaf_phys(l)->l_hash[i]; 865 866 while (chunk != CHAIN_END) { 867 struct zap_leaf_entry *le = 868 ZAP_LEAF_ENTRY(l, chunk); 869 870 n = 1 + ZAP_LEAF_ARRAY_NCHUNKS(le->le_name_numints) + 871 ZAP_LEAF_ARRAY_NCHUNKS(le->le_value_numints * 872 le->le_value_intlen); 873 n = MIN(n, ZAP_HISTOGRAM_SIZE-1); 874 zs->zs_entries_using_n_chunks[n]++; 875 876 chunk = le->le_next; 877 nentries++; 878 } 879 880 n = nentries; 881 n = MIN(n, ZAP_HISTOGRAM_SIZE-1); 882 zs->zs_buckets_with_n_entries[n]++; 883 } 884 } 885