1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012, 2018 by Delphix. All rights reserved. 24 * Copyright (c) 2016 Actifio, Inc. All rights reserved. 25 */ 26 27 #include <assert.h> 28 #include <fcntl.h> 29 #include <libgen.h> 30 #include <poll.h> 31 #include <stdio.h> 32 #include <stdlib.h> 33 #include <string.h> 34 #include <limits.h> 35 #include <libzutil.h> 36 #include <sys/crypto/icp.h> 37 #include <sys/processor.h> 38 #include <sys/rrwlock.h> 39 #include <sys/spa.h> 40 #include <sys/stat.h> 41 #include <sys/systeminfo.h> 42 #include <sys/time.h> 43 #include <sys/utsname.h> 44 #include <sys/zfs_context.h> 45 #include <sys/zfs_onexit.h> 46 #include <sys/zfs_vfsops.h> 47 #include <sys/zstd/zstd.h> 48 #include <sys/zvol.h> 49 #include <zfs_fletcher.h> 50 #include <zlib.h> 51 52 /* 53 * Emulation of kernel services in userland. 54 */ 55 56 uint64_t physmem; 57 uint32_t hostid; 58 struct utsname hw_utsname; 59 60 /* If set, all blocks read will be copied to the specified directory. */ 61 char *vn_dumpdir = NULL; 62 63 /* this only exists to have its address taken */ 64 struct proc p0; 65 66 /* 67 * ========================================================================= 68 * threads 69 * ========================================================================= 70 * 71 * TS_STACK_MIN is dictated by the minimum allowed pthread stack size. While 72 * TS_STACK_MAX is somewhat arbitrary, it was selected to be large enough for 73 * the expected stack depth while small enough to avoid exhausting address 74 * space with high thread counts. 75 */ 76 #define TS_STACK_MIN MAX(PTHREAD_STACK_MIN, 32768) 77 #define TS_STACK_MAX (256 * 1024) 78 79 struct zk_thread_wrapper { 80 void (*func)(void *); 81 void *arg; 82 }; 83 84 static void * 85 zk_thread_wrapper(void *arg) 86 { 87 struct zk_thread_wrapper ztw; 88 memcpy(&ztw, arg, sizeof (ztw)); 89 free(arg); 90 ztw.func(ztw.arg); 91 return (NULL); 92 } 93 94 kthread_t * 95 zk_thread_create(void (*func)(void *), void *arg, size_t stksize, int state) 96 { 97 pthread_attr_t attr; 98 pthread_t tid; 99 char *stkstr; 100 struct zk_thread_wrapper *ztw; 101 int detachstate = PTHREAD_CREATE_DETACHED; 102 103 VERIFY0(pthread_attr_init(&attr)); 104 105 if (state & TS_JOINABLE) 106 detachstate = PTHREAD_CREATE_JOINABLE; 107 108 VERIFY0(pthread_attr_setdetachstate(&attr, detachstate)); 109 110 /* 111 * We allow the default stack size in user space to be specified by 112 * setting the ZFS_STACK_SIZE environment variable. This allows us 113 * the convenience of observing and debugging stack overruns in 114 * user space. Explicitly specified stack sizes will be honored. 115 * The usage of ZFS_STACK_SIZE is discussed further in the 116 * ENVIRONMENT VARIABLES sections of the ztest(1) man page. 117 */ 118 if (stksize == 0) { 119 stkstr = getenv("ZFS_STACK_SIZE"); 120 121 if (stkstr == NULL) 122 stksize = TS_STACK_MAX; 123 else 124 stksize = MAX(atoi(stkstr), TS_STACK_MIN); 125 } 126 127 VERIFY3S(stksize, >, 0); 128 stksize = P2ROUNDUP(MAX(stksize, TS_STACK_MIN), PAGESIZE); 129 130 /* 131 * If this ever fails, it may be because the stack size is not a 132 * multiple of system page size. 133 */ 134 VERIFY0(pthread_attr_setstacksize(&attr, stksize)); 135 VERIFY0(pthread_attr_setguardsize(&attr, PAGESIZE)); 136 137 VERIFY(ztw = malloc(sizeof (*ztw))); 138 ztw->func = func; 139 ztw->arg = arg; 140 VERIFY0(pthread_create(&tid, &attr, zk_thread_wrapper, ztw)); 141 VERIFY0(pthread_attr_destroy(&attr)); 142 143 return ((void *)(uintptr_t)tid); 144 } 145 146 /* 147 * ========================================================================= 148 * kstats 149 * ========================================================================= 150 */ 151 kstat_t * 152 kstat_create(const char *module, int instance, const char *name, 153 const char *class, uchar_t type, ulong_t ndata, uchar_t ks_flag) 154 { 155 (void) module, (void) instance, (void) name, (void) class, (void) type, 156 (void) ndata, (void) ks_flag; 157 return (NULL); 158 } 159 160 void 161 kstat_install(kstat_t *ksp) 162 { 163 (void) ksp; 164 } 165 166 void 167 kstat_delete(kstat_t *ksp) 168 { 169 (void) ksp; 170 } 171 172 void 173 kstat_set_raw_ops(kstat_t *ksp, 174 int (*headers)(char *buf, size_t size), 175 int (*data)(char *buf, size_t size, void *data), 176 void *(*addr)(kstat_t *ksp, loff_t index)) 177 { 178 (void) ksp, (void) headers, (void) data, (void) addr; 179 } 180 181 /* 182 * ========================================================================= 183 * mutexes 184 * ========================================================================= 185 */ 186 187 void 188 mutex_init(kmutex_t *mp, char *name, int type, void *cookie) 189 { 190 (void) name, (void) type, (void) cookie; 191 VERIFY0(pthread_mutex_init(&mp->m_lock, NULL)); 192 memset(&mp->m_owner, 0, sizeof (pthread_t)); 193 } 194 195 void 196 mutex_destroy(kmutex_t *mp) 197 { 198 VERIFY0(pthread_mutex_destroy(&mp->m_lock)); 199 } 200 201 void 202 mutex_enter(kmutex_t *mp) 203 { 204 VERIFY0(pthread_mutex_lock(&mp->m_lock)); 205 mp->m_owner = pthread_self(); 206 } 207 208 int 209 mutex_tryenter(kmutex_t *mp) 210 { 211 int error = pthread_mutex_trylock(&mp->m_lock); 212 if (error == 0) { 213 mp->m_owner = pthread_self(); 214 return (1); 215 } else { 216 VERIFY3S(error, ==, EBUSY); 217 return (0); 218 } 219 } 220 221 void 222 mutex_exit(kmutex_t *mp) 223 { 224 memset(&mp->m_owner, 0, sizeof (pthread_t)); 225 VERIFY0(pthread_mutex_unlock(&mp->m_lock)); 226 } 227 228 /* 229 * ========================================================================= 230 * rwlocks 231 * ========================================================================= 232 */ 233 234 void 235 rw_init(krwlock_t *rwlp, char *name, int type, void *arg) 236 { 237 (void) name, (void) type, (void) arg; 238 VERIFY0(pthread_rwlock_init(&rwlp->rw_lock, NULL)); 239 rwlp->rw_readers = 0; 240 rwlp->rw_owner = 0; 241 } 242 243 void 244 rw_destroy(krwlock_t *rwlp) 245 { 246 VERIFY0(pthread_rwlock_destroy(&rwlp->rw_lock)); 247 } 248 249 void 250 rw_enter(krwlock_t *rwlp, krw_t rw) 251 { 252 if (rw == RW_READER) { 253 VERIFY0(pthread_rwlock_rdlock(&rwlp->rw_lock)); 254 atomic_inc_uint(&rwlp->rw_readers); 255 } else { 256 VERIFY0(pthread_rwlock_wrlock(&rwlp->rw_lock)); 257 rwlp->rw_owner = pthread_self(); 258 } 259 } 260 261 void 262 rw_exit(krwlock_t *rwlp) 263 { 264 if (RW_READ_HELD(rwlp)) 265 atomic_dec_uint(&rwlp->rw_readers); 266 else 267 rwlp->rw_owner = 0; 268 269 VERIFY0(pthread_rwlock_unlock(&rwlp->rw_lock)); 270 } 271 272 int 273 rw_tryenter(krwlock_t *rwlp, krw_t rw) 274 { 275 int error; 276 277 if (rw == RW_READER) 278 error = pthread_rwlock_tryrdlock(&rwlp->rw_lock); 279 else 280 error = pthread_rwlock_trywrlock(&rwlp->rw_lock); 281 282 if (error == 0) { 283 if (rw == RW_READER) 284 atomic_inc_uint(&rwlp->rw_readers); 285 else 286 rwlp->rw_owner = pthread_self(); 287 288 return (1); 289 } 290 291 VERIFY3S(error, ==, EBUSY); 292 293 return (0); 294 } 295 296 uint32_t 297 zone_get_hostid(void *zonep) 298 { 299 /* 300 * We're emulating the system's hostid in userland. 301 */ 302 (void) zonep; 303 return (hostid); 304 } 305 306 int 307 rw_tryupgrade(krwlock_t *rwlp) 308 { 309 (void) rwlp; 310 return (0); 311 } 312 313 /* 314 * ========================================================================= 315 * condition variables 316 * ========================================================================= 317 */ 318 319 void 320 cv_init(kcondvar_t *cv, char *name, int type, void *arg) 321 { 322 (void) name, (void) type, (void) arg; 323 VERIFY0(pthread_cond_init(cv, NULL)); 324 } 325 326 void 327 cv_destroy(kcondvar_t *cv) 328 { 329 VERIFY0(pthread_cond_destroy(cv)); 330 } 331 332 void 333 cv_wait(kcondvar_t *cv, kmutex_t *mp) 334 { 335 memset(&mp->m_owner, 0, sizeof (pthread_t)); 336 VERIFY0(pthread_cond_wait(cv, &mp->m_lock)); 337 mp->m_owner = pthread_self(); 338 } 339 340 int 341 cv_wait_sig(kcondvar_t *cv, kmutex_t *mp) 342 { 343 cv_wait(cv, mp); 344 return (1); 345 } 346 347 int 348 cv_timedwait(kcondvar_t *cv, kmutex_t *mp, clock_t abstime) 349 { 350 int error; 351 struct timeval tv; 352 struct timespec ts; 353 clock_t delta; 354 355 delta = abstime - ddi_get_lbolt(); 356 if (delta <= 0) 357 return (-1); 358 359 VERIFY(gettimeofday(&tv, NULL) == 0); 360 361 ts.tv_sec = tv.tv_sec + delta / hz; 362 ts.tv_nsec = tv.tv_usec * NSEC_PER_USEC + (delta % hz) * (NANOSEC / hz); 363 if (ts.tv_nsec >= NANOSEC) { 364 ts.tv_sec++; 365 ts.tv_nsec -= NANOSEC; 366 } 367 368 memset(&mp->m_owner, 0, sizeof (pthread_t)); 369 error = pthread_cond_timedwait(cv, &mp->m_lock, &ts); 370 mp->m_owner = pthread_self(); 371 372 if (error == ETIMEDOUT) 373 return (-1); 374 375 VERIFY0(error); 376 377 return (1); 378 } 379 380 int 381 cv_timedwait_hires(kcondvar_t *cv, kmutex_t *mp, hrtime_t tim, hrtime_t res, 382 int flag) 383 { 384 (void) res; 385 int error; 386 struct timeval tv; 387 struct timespec ts; 388 hrtime_t delta; 389 390 ASSERT(flag == 0 || flag == CALLOUT_FLAG_ABSOLUTE); 391 392 delta = tim; 393 if (flag & CALLOUT_FLAG_ABSOLUTE) 394 delta -= gethrtime(); 395 396 if (delta <= 0) 397 return (-1); 398 399 VERIFY0(gettimeofday(&tv, NULL)); 400 401 ts.tv_sec = tv.tv_sec + delta / NANOSEC; 402 ts.tv_nsec = tv.tv_usec * NSEC_PER_USEC + (delta % NANOSEC); 403 if (ts.tv_nsec >= NANOSEC) { 404 ts.tv_sec++; 405 ts.tv_nsec -= NANOSEC; 406 } 407 408 memset(&mp->m_owner, 0, sizeof (pthread_t)); 409 error = pthread_cond_timedwait(cv, &mp->m_lock, &ts); 410 mp->m_owner = pthread_self(); 411 412 if (error == ETIMEDOUT) 413 return (-1); 414 415 VERIFY0(error); 416 417 return (1); 418 } 419 420 void 421 cv_signal(kcondvar_t *cv) 422 { 423 VERIFY0(pthread_cond_signal(cv)); 424 } 425 426 void 427 cv_broadcast(kcondvar_t *cv) 428 { 429 VERIFY0(pthread_cond_broadcast(cv)); 430 } 431 432 /* 433 * ========================================================================= 434 * procfs list 435 * ========================================================================= 436 */ 437 438 void 439 seq_printf(struct seq_file *m, const char *fmt, ...) 440 { 441 (void) m, (void) fmt; 442 } 443 444 void 445 procfs_list_install(const char *module, 446 const char *submodule, 447 const char *name, 448 mode_t mode, 449 procfs_list_t *procfs_list, 450 int (*show)(struct seq_file *f, void *p), 451 int (*show_header)(struct seq_file *f), 452 int (*clear)(procfs_list_t *procfs_list), 453 size_t procfs_list_node_off) 454 { 455 (void) module, (void) submodule, (void) name, (void) mode, (void) show, 456 (void) show_header, (void) clear; 457 mutex_init(&procfs_list->pl_lock, NULL, MUTEX_DEFAULT, NULL); 458 list_create(&procfs_list->pl_list, 459 procfs_list_node_off + sizeof (procfs_list_node_t), 460 procfs_list_node_off + offsetof(procfs_list_node_t, pln_link)); 461 procfs_list->pl_next_id = 1; 462 procfs_list->pl_node_offset = procfs_list_node_off; 463 } 464 465 void 466 procfs_list_uninstall(procfs_list_t *procfs_list) 467 { 468 (void) procfs_list; 469 } 470 471 void 472 procfs_list_destroy(procfs_list_t *procfs_list) 473 { 474 ASSERT(list_is_empty(&procfs_list->pl_list)); 475 list_destroy(&procfs_list->pl_list); 476 mutex_destroy(&procfs_list->pl_lock); 477 } 478 479 #define NODE_ID(procfs_list, obj) \ 480 (((procfs_list_node_t *)(((char *)obj) + \ 481 (procfs_list)->pl_node_offset))->pln_id) 482 483 void 484 procfs_list_add(procfs_list_t *procfs_list, void *p) 485 { 486 ASSERT(MUTEX_HELD(&procfs_list->pl_lock)); 487 NODE_ID(procfs_list, p) = procfs_list->pl_next_id++; 488 list_insert_tail(&procfs_list->pl_list, p); 489 } 490 491 /* 492 * ========================================================================= 493 * vnode operations 494 * ========================================================================= 495 */ 496 497 /* 498 * ========================================================================= 499 * Figure out which debugging statements to print 500 * ========================================================================= 501 */ 502 503 static char *dprintf_string; 504 static int dprintf_print_all; 505 506 int 507 dprintf_find_string(const char *string) 508 { 509 char *tmp_str = dprintf_string; 510 int len = strlen(string); 511 512 /* 513 * Find out if this is a string we want to print. 514 * String format: file1.c,function_name1,file2.c,file3.c 515 */ 516 517 while (tmp_str != NULL) { 518 if (strncmp(tmp_str, string, len) == 0 && 519 (tmp_str[len] == ',' || tmp_str[len] == '\0')) 520 return (1); 521 tmp_str = strchr(tmp_str, ','); 522 if (tmp_str != NULL) 523 tmp_str++; /* Get rid of , */ 524 } 525 return (0); 526 } 527 528 void 529 dprintf_setup(int *argc, char **argv) 530 { 531 int i, j; 532 533 /* 534 * Debugging can be specified two ways: by setting the 535 * environment variable ZFS_DEBUG, or by including a 536 * "debug=..." argument on the command line. The command 537 * line setting overrides the environment variable. 538 */ 539 540 for (i = 1; i < *argc; i++) { 541 int len = strlen("debug="); 542 /* First look for a command line argument */ 543 if (strncmp("debug=", argv[i], len) == 0) { 544 dprintf_string = argv[i] + len; 545 /* Remove from args */ 546 for (j = i; j < *argc; j++) 547 argv[j] = argv[j+1]; 548 argv[j] = NULL; 549 (*argc)--; 550 } 551 } 552 553 if (dprintf_string == NULL) { 554 /* Look for ZFS_DEBUG environment variable */ 555 dprintf_string = getenv("ZFS_DEBUG"); 556 } 557 558 /* 559 * Are we just turning on all debugging? 560 */ 561 if (dprintf_find_string("on")) 562 dprintf_print_all = 1; 563 564 if (dprintf_string != NULL) 565 zfs_flags |= ZFS_DEBUG_DPRINTF; 566 } 567 568 /* 569 * ========================================================================= 570 * debug printfs 571 * ========================================================================= 572 */ 573 void 574 __dprintf(boolean_t dprint, const char *file, const char *func, 575 int line, const char *fmt, ...) 576 { 577 /* Get rid of annoying "../common/" prefix to filename. */ 578 const char *newfile = zfs_basename(file); 579 580 va_list adx; 581 if (dprint) { 582 /* dprintf messages are printed immediately */ 583 584 if (!dprintf_print_all && 585 !dprintf_find_string(newfile) && 586 !dprintf_find_string(func)) 587 return; 588 589 /* Print out just the function name if requested */ 590 flockfile(stdout); 591 if (dprintf_find_string("pid")) 592 (void) printf("%d ", getpid()); 593 if (dprintf_find_string("tid")) 594 (void) printf("%ju ", 595 (uintmax_t)(uintptr_t)pthread_self()); 596 if (dprintf_find_string("cpu")) 597 (void) printf("%u ", getcpuid()); 598 if (dprintf_find_string("time")) 599 (void) printf("%llu ", gethrtime()); 600 if (dprintf_find_string("long")) 601 (void) printf("%s, line %d: ", newfile, line); 602 (void) printf("dprintf: %s: ", func); 603 va_start(adx, fmt); 604 (void) vprintf(fmt, adx); 605 va_end(adx); 606 funlockfile(stdout); 607 } else { 608 /* zfs_dbgmsg is logged for dumping later */ 609 size_t size; 610 char *buf; 611 int i; 612 613 size = 1024; 614 buf = umem_alloc(size, UMEM_NOFAIL); 615 i = snprintf(buf, size, "%s:%d:%s(): ", newfile, line, func); 616 617 if (i < size) { 618 va_start(adx, fmt); 619 (void) vsnprintf(buf + i, size - i, fmt, adx); 620 va_end(adx); 621 } 622 623 __zfs_dbgmsg(buf); 624 625 umem_free(buf, size); 626 } 627 } 628 629 /* 630 * ========================================================================= 631 * cmn_err() and panic() 632 * ========================================================================= 633 */ 634 static char ce_prefix[CE_IGNORE][10] = { "", "NOTICE: ", "WARNING: ", "" }; 635 static char ce_suffix[CE_IGNORE][2] = { "", "\n", "\n", "" }; 636 637 __attribute__((noreturn)) void 638 vpanic(const char *fmt, va_list adx) 639 { 640 (void) fprintf(stderr, "error: "); 641 (void) vfprintf(stderr, fmt, adx); 642 (void) fprintf(stderr, "\n"); 643 644 abort(); /* think of it as a "user-level crash dump" */ 645 } 646 647 __attribute__((noreturn)) void 648 panic(const char *fmt, ...) 649 { 650 va_list adx; 651 652 va_start(adx, fmt); 653 vpanic(fmt, adx); 654 va_end(adx); 655 } 656 657 void 658 vcmn_err(int ce, const char *fmt, va_list adx) 659 { 660 if (ce == CE_PANIC) 661 vpanic(fmt, adx); 662 if (ce != CE_NOTE) { /* suppress noise in userland stress testing */ 663 (void) fprintf(stderr, "%s", ce_prefix[ce]); 664 (void) vfprintf(stderr, fmt, adx); 665 (void) fprintf(stderr, "%s", ce_suffix[ce]); 666 } 667 } 668 669 void 670 cmn_err(int ce, const char *fmt, ...) 671 { 672 va_list adx; 673 674 va_start(adx, fmt); 675 vcmn_err(ce, fmt, adx); 676 va_end(adx); 677 } 678 679 /* 680 * ========================================================================= 681 * misc routines 682 * ========================================================================= 683 */ 684 685 void 686 delay(clock_t ticks) 687 { 688 (void) poll(0, 0, ticks * (1000 / hz)); 689 } 690 691 /* 692 * Find highest one bit set. 693 * Returns bit number + 1 of highest bit that is set, otherwise returns 0. 694 * The __builtin_clzll() function is supported by both GCC and Clang. 695 */ 696 int 697 highbit64(uint64_t i) 698 { 699 if (i == 0) 700 return (0); 701 702 return (NBBY * sizeof (uint64_t) - __builtin_clzll(i)); 703 } 704 705 /* 706 * Find lowest one bit set. 707 * Returns bit number + 1 of lowest bit that is set, otherwise returns 0. 708 * The __builtin_ffsll() function is supported by both GCC and Clang. 709 */ 710 int 711 lowbit64(uint64_t i) 712 { 713 if (i == 0) 714 return (0); 715 716 return (__builtin_ffsll(i)); 717 } 718 719 const char *random_path = "/dev/random"; 720 const char *urandom_path = "/dev/urandom"; 721 static int random_fd = -1, urandom_fd = -1; 722 723 void 724 random_init(void) 725 { 726 VERIFY((random_fd = open(random_path, O_RDONLY | O_CLOEXEC)) != -1); 727 VERIFY((urandom_fd = open(urandom_path, O_RDONLY | O_CLOEXEC)) != -1); 728 } 729 730 void 731 random_fini(void) 732 { 733 close(random_fd); 734 close(urandom_fd); 735 736 random_fd = -1; 737 urandom_fd = -1; 738 } 739 740 static int 741 random_get_bytes_common(uint8_t *ptr, size_t len, int fd) 742 { 743 size_t resid = len; 744 ssize_t bytes; 745 746 ASSERT(fd != -1); 747 748 while (resid != 0) { 749 bytes = read(fd, ptr, resid); 750 ASSERT3S(bytes, >=, 0); 751 ptr += bytes; 752 resid -= bytes; 753 } 754 755 return (0); 756 } 757 758 int 759 random_get_bytes(uint8_t *ptr, size_t len) 760 { 761 return (random_get_bytes_common(ptr, len, random_fd)); 762 } 763 764 int 765 random_get_pseudo_bytes(uint8_t *ptr, size_t len) 766 { 767 return (random_get_bytes_common(ptr, len, urandom_fd)); 768 } 769 770 int 771 ddi_strtoull(const char *str, char **nptr, int base, u_longlong_t *result) 772 { 773 (void) nptr; 774 char *end; 775 776 *result = strtoull(str, &end, base); 777 if (*result == 0) 778 return (errno); 779 return (0); 780 } 781 782 utsname_t * 783 utsname(void) 784 { 785 return (&hw_utsname); 786 } 787 788 /* 789 * ========================================================================= 790 * kernel emulation setup & teardown 791 * ========================================================================= 792 */ 793 static int 794 umem_out_of_memory(void) 795 { 796 char errmsg[] = "out of memory -- generating core dump\n"; 797 798 (void) fprintf(stderr, "%s", errmsg); 799 abort(); 800 return (0); 801 } 802 803 void 804 kernel_init(int mode) 805 { 806 extern uint_t rrw_tsd_key; 807 808 umem_nofail_callback(umem_out_of_memory); 809 810 physmem = sysconf(_SC_PHYS_PAGES); 811 812 dprintf("physmem = %llu pages (%.2f GB)\n", (u_longlong_t)physmem, 813 (double)physmem * sysconf(_SC_PAGE_SIZE) / (1ULL << 30)); 814 815 hostid = (mode & SPA_MODE_WRITE) ? get_system_hostid() : 0; 816 817 random_init(); 818 819 VERIFY0(uname(&hw_utsname)); 820 821 system_taskq_init(); 822 icp_init(); 823 824 zstd_init(); 825 826 spa_init((spa_mode_t)mode); 827 828 fletcher_4_init(); 829 830 tsd_create(&rrw_tsd_key, rrw_tsd_destroy); 831 } 832 833 void 834 kernel_fini(void) 835 { 836 fletcher_4_fini(); 837 spa_fini(); 838 839 zstd_fini(); 840 841 icp_fini(); 842 system_taskq_fini(); 843 844 random_fini(); 845 } 846 847 uid_t 848 crgetuid(cred_t *cr) 849 { 850 (void) cr; 851 return (0); 852 } 853 854 uid_t 855 crgetruid(cred_t *cr) 856 { 857 (void) cr; 858 return (0); 859 } 860 861 gid_t 862 crgetgid(cred_t *cr) 863 { 864 (void) cr; 865 return (0); 866 } 867 868 int 869 crgetngroups(cred_t *cr) 870 { 871 (void) cr; 872 return (0); 873 } 874 875 gid_t * 876 crgetgroups(cred_t *cr) 877 { 878 (void) cr; 879 return (NULL); 880 } 881 882 int 883 zfs_secpolicy_snapshot_perms(const char *name, cred_t *cr) 884 { 885 (void) name, (void) cr; 886 return (0); 887 } 888 889 int 890 zfs_secpolicy_rename_perms(const char *from, const char *to, cred_t *cr) 891 { 892 (void) from, (void) to, (void) cr; 893 return (0); 894 } 895 896 int 897 zfs_secpolicy_destroy_perms(const char *name, cred_t *cr) 898 { 899 (void) name, (void) cr; 900 return (0); 901 } 902 903 int 904 secpolicy_zfs(const cred_t *cr) 905 { 906 (void) cr; 907 return (0); 908 } 909 910 int 911 secpolicy_zfs_proc(const cred_t *cr, proc_t *proc) 912 { 913 (void) cr, (void) proc; 914 return (0); 915 } 916 917 ksiddomain_t * 918 ksid_lookupdomain(const char *dom) 919 { 920 ksiddomain_t *kd; 921 922 kd = umem_zalloc(sizeof (ksiddomain_t), UMEM_NOFAIL); 923 kd->kd_name = spa_strdup(dom); 924 return (kd); 925 } 926 927 void 928 ksiddomain_rele(ksiddomain_t *ksid) 929 { 930 spa_strfree(ksid->kd_name); 931 umem_free(ksid, sizeof (ksiddomain_t)); 932 } 933 934 char * 935 kmem_vasprintf(const char *fmt, va_list adx) 936 { 937 char *buf = NULL; 938 va_list adx_copy; 939 940 va_copy(adx_copy, adx); 941 VERIFY(vasprintf(&buf, fmt, adx_copy) != -1); 942 va_end(adx_copy); 943 944 return (buf); 945 } 946 947 char * 948 kmem_asprintf(const char *fmt, ...) 949 { 950 char *buf = NULL; 951 va_list adx; 952 953 va_start(adx, fmt); 954 VERIFY(vasprintf(&buf, fmt, adx) != -1); 955 va_end(adx); 956 957 return (buf); 958 } 959 960 /* 961 * kmem_scnprintf() will return the number of characters that it would have 962 * printed whenever it is limited by value of the size variable, rather than 963 * the number of characters that it did print. This can cause misbehavior on 964 * subsequent uses of the return value, so we define a safe version that will 965 * return the number of characters actually printed, minus the NULL format 966 * character. Subsequent use of this by the safe string functions is safe 967 * whether it is snprintf(), strlcat() or strlcpy(). 968 */ 969 int 970 kmem_scnprintf(char *restrict str, size_t size, const char *restrict fmt, ...) 971 { 972 int n; 973 va_list ap; 974 975 /* Make the 0 case a no-op so that we do not return -1 */ 976 if (size == 0) 977 return (0); 978 979 va_start(ap, fmt); 980 n = vsnprintf(str, size, fmt, ap); 981 va_end(ap); 982 983 if (n >= size) 984 n = size - 1; 985 986 return (n); 987 } 988 989 zfs_file_t * 990 zfs_onexit_fd_hold(int fd, minor_t *minorp) 991 { 992 (void) fd; 993 *minorp = 0; 994 return (NULL); 995 } 996 997 void 998 zfs_onexit_fd_rele(zfs_file_t *fp) 999 { 1000 (void) fp; 1001 } 1002 1003 int 1004 zfs_onexit_add_cb(minor_t minor, void (*func)(void *), void *data, 1005 uintptr_t *action_handle) 1006 { 1007 (void) minor, (void) func, (void) data, (void) action_handle; 1008 return (0); 1009 } 1010 1011 fstrans_cookie_t 1012 spl_fstrans_mark(void) 1013 { 1014 return ((fstrans_cookie_t)0); 1015 } 1016 1017 void 1018 spl_fstrans_unmark(fstrans_cookie_t cookie) 1019 { 1020 (void) cookie; 1021 } 1022 1023 int 1024 __spl_pf_fstrans_check(void) 1025 { 1026 return (0); 1027 } 1028 1029 int 1030 kmem_cache_reap_active(void) 1031 { 1032 return (0); 1033 } 1034 1035 void 1036 zvol_create_minor(const char *name) 1037 { 1038 (void) name; 1039 } 1040 1041 void 1042 zvol_create_minors_recursive(const char *name) 1043 { 1044 (void) name; 1045 } 1046 1047 void 1048 zvol_remove_minors(spa_t *spa, const char *name, boolean_t async) 1049 { 1050 (void) spa, (void) name, (void) async; 1051 } 1052 1053 void 1054 zvol_rename_minors(spa_t *spa, const char *oldname, const char *newname, 1055 boolean_t async) 1056 { 1057 (void) spa, (void) oldname, (void) newname, (void) async; 1058 } 1059 1060 /* 1061 * Open file 1062 * 1063 * path - fully qualified path to file 1064 * flags - file attributes O_READ / O_WRITE / O_EXCL 1065 * fpp - pointer to return file pointer 1066 * 1067 * Returns 0 on success underlying error on failure. 1068 */ 1069 int 1070 zfs_file_open(const char *path, int flags, int mode, zfs_file_t **fpp) 1071 { 1072 int fd = -1; 1073 int dump_fd = -1; 1074 int err; 1075 int old_umask = 0; 1076 zfs_file_t *fp; 1077 struct stat64 st; 1078 1079 if (!(flags & O_CREAT) && stat64(path, &st) == -1) 1080 return (errno); 1081 1082 if (!(flags & O_CREAT) && S_ISBLK(st.st_mode)) 1083 flags |= O_DIRECT; 1084 1085 if (flags & O_CREAT) 1086 old_umask = umask(0); 1087 1088 fd = open64(path, flags, mode); 1089 if (fd == -1) 1090 return (errno); 1091 1092 if (flags & O_CREAT) 1093 (void) umask(old_umask); 1094 1095 if (vn_dumpdir != NULL) { 1096 char *dumppath = umem_zalloc(MAXPATHLEN, UMEM_NOFAIL); 1097 const char *inpath = zfs_basename(path); 1098 1099 (void) snprintf(dumppath, MAXPATHLEN, 1100 "%s/%s", vn_dumpdir, inpath); 1101 dump_fd = open64(dumppath, O_CREAT | O_WRONLY, 0666); 1102 umem_free(dumppath, MAXPATHLEN); 1103 if (dump_fd == -1) { 1104 err = errno; 1105 close(fd); 1106 return (err); 1107 } 1108 } else { 1109 dump_fd = -1; 1110 } 1111 1112 (void) fcntl(fd, F_SETFD, FD_CLOEXEC); 1113 1114 fp = umem_zalloc(sizeof (zfs_file_t), UMEM_NOFAIL); 1115 fp->f_fd = fd; 1116 fp->f_dump_fd = dump_fd; 1117 *fpp = fp; 1118 1119 return (0); 1120 } 1121 1122 void 1123 zfs_file_close(zfs_file_t *fp) 1124 { 1125 close(fp->f_fd); 1126 if (fp->f_dump_fd != -1) 1127 close(fp->f_dump_fd); 1128 1129 umem_free(fp, sizeof (zfs_file_t)); 1130 } 1131 1132 /* 1133 * Stateful write - use os internal file pointer to determine where to 1134 * write and update on successful completion. 1135 * 1136 * fp - pointer to file (pipe, socket, etc) to write to 1137 * buf - buffer to write 1138 * count - # of bytes to write 1139 * resid - pointer to count of unwritten bytes (if short write) 1140 * 1141 * Returns 0 on success errno on failure. 1142 */ 1143 int 1144 zfs_file_write(zfs_file_t *fp, const void *buf, size_t count, ssize_t *resid) 1145 { 1146 ssize_t rc; 1147 1148 rc = write(fp->f_fd, buf, count); 1149 if (rc < 0) 1150 return (errno); 1151 1152 if (resid) { 1153 *resid = count - rc; 1154 } else if (rc != count) { 1155 return (EIO); 1156 } 1157 1158 return (0); 1159 } 1160 1161 /* 1162 * Stateless write - os internal file pointer is not updated. 1163 * 1164 * fp - pointer to file (pipe, socket, etc) to write to 1165 * buf - buffer to write 1166 * count - # of bytes to write 1167 * off - file offset to write to (only valid for seekable types) 1168 * resid - pointer to count of unwritten bytes 1169 * 1170 * Returns 0 on success errno on failure. 1171 */ 1172 int 1173 zfs_file_pwrite(zfs_file_t *fp, const void *buf, 1174 size_t count, loff_t pos, ssize_t *resid) 1175 { 1176 ssize_t rc, split, done; 1177 int sectors; 1178 1179 /* 1180 * To simulate partial disk writes, we split writes into two 1181 * system calls so that the process can be killed in between. 1182 * This is used by ztest to simulate realistic failure modes. 1183 */ 1184 sectors = count >> SPA_MINBLOCKSHIFT; 1185 split = (sectors > 0 ? rand() % sectors : 0) << SPA_MINBLOCKSHIFT; 1186 rc = pwrite64(fp->f_fd, buf, split, pos); 1187 if (rc != -1) { 1188 done = rc; 1189 rc = pwrite64(fp->f_fd, (char *)buf + split, 1190 count - split, pos + split); 1191 } 1192 #ifdef __linux__ 1193 if (rc == -1 && errno == EINVAL) { 1194 /* 1195 * Under Linux, this most likely means an alignment issue 1196 * (memory or disk) due to O_DIRECT, so we abort() in order 1197 * to catch the offender. 1198 */ 1199 abort(); 1200 } 1201 #endif 1202 1203 if (rc < 0) 1204 return (errno); 1205 1206 done += rc; 1207 1208 if (resid) { 1209 *resid = count - done; 1210 } else if (done != count) { 1211 return (EIO); 1212 } 1213 1214 return (0); 1215 } 1216 1217 /* 1218 * Stateful read - use os internal file pointer to determine where to 1219 * read and update on successful completion. 1220 * 1221 * fp - pointer to file (pipe, socket, etc) to read from 1222 * buf - buffer to write 1223 * count - # of bytes to read 1224 * resid - pointer to count of unread bytes (if short read) 1225 * 1226 * Returns 0 on success errno on failure. 1227 */ 1228 int 1229 zfs_file_read(zfs_file_t *fp, void *buf, size_t count, ssize_t *resid) 1230 { 1231 int rc; 1232 1233 rc = read(fp->f_fd, buf, count); 1234 if (rc < 0) 1235 return (errno); 1236 1237 if (resid) { 1238 *resid = count - rc; 1239 } else if (rc != count) { 1240 return (EIO); 1241 } 1242 1243 return (0); 1244 } 1245 1246 /* 1247 * Stateless read - os internal file pointer is not updated. 1248 * 1249 * fp - pointer to file (pipe, socket, etc) to read from 1250 * buf - buffer to write 1251 * count - # of bytes to write 1252 * off - file offset to read from (only valid for seekable types) 1253 * resid - pointer to count of unwritten bytes (if short write) 1254 * 1255 * Returns 0 on success errno on failure. 1256 */ 1257 int 1258 zfs_file_pread(zfs_file_t *fp, void *buf, size_t count, loff_t off, 1259 ssize_t *resid) 1260 { 1261 ssize_t rc; 1262 1263 rc = pread64(fp->f_fd, buf, count, off); 1264 if (rc < 0) { 1265 #ifdef __linux__ 1266 /* 1267 * Under Linux, this most likely means an alignment issue 1268 * (memory or disk) due to O_DIRECT, so we abort() in order to 1269 * catch the offender. 1270 */ 1271 if (errno == EINVAL) 1272 abort(); 1273 #endif 1274 return (errno); 1275 } 1276 1277 if (fp->f_dump_fd != -1) { 1278 int status; 1279 1280 status = pwrite64(fp->f_dump_fd, buf, rc, off); 1281 ASSERT(status != -1); 1282 } 1283 1284 if (resid) { 1285 *resid = count - rc; 1286 } else if (rc != count) { 1287 return (EIO); 1288 } 1289 1290 return (0); 1291 } 1292 1293 /* 1294 * lseek - set / get file pointer 1295 * 1296 * fp - pointer to file (pipe, socket, etc) to read from 1297 * offp - value to seek to, returns current value plus passed offset 1298 * whence - see man pages for standard lseek whence values 1299 * 1300 * Returns 0 on success errno on failure (ESPIPE for non seekable types) 1301 */ 1302 int 1303 zfs_file_seek(zfs_file_t *fp, loff_t *offp, int whence) 1304 { 1305 loff_t rc; 1306 1307 rc = lseek(fp->f_fd, *offp, whence); 1308 if (rc < 0) 1309 return (errno); 1310 1311 *offp = rc; 1312 1313 return (0); 1314 } 1315 1316 /* 1317 * Get file attributes 1318 * 1319 * filp - file pointer 1320 * zfattr - pointer to file attr structure 1321 * 1322 * Currently only used for fetching size and file mode 1323 * 1324 * Returns 0 on success or error code of underlying getattr call on failure. 1325 */ 1326 int 1327 zfs_file_getattr(zfs_file_t *fp, zfs_file_attr_t *zfattr) 1328 { 1329 struct stat64 st; 1330 1331 if (fstat64_blk(fp->f_fd, &st) == -1) 1332 return (errno); 1333 1334 zfattr->zfa_size = st.st_size; 1335 zfattr->zfa_mode = st.st_mode; 1336 1337 return (0); 1338 } 1339 1340 /* 1341 * Sync file to disk 1342 * 1343 * filp - file pointer 1344 * flags - O_SYNC and or O_DSYNC 1345 * 1346 * Returns 0 on success or error code of underlying sync call on failure. 1347 */ 1348 int 1349 zfs_file_fsync(zfs_file_t *fp, int flags) 1350 { 1351 (void) flags; 1352 1353 if (fsync(fp->f_fd) < 0) 1354 return (errno); 1355 1356 return (0); 1357 } 1358 1359 /* 1360 * fallocate - allocate or free space on disk 1361 * 1362 * fp - file pointer 1363 * mode (non-standard options for hole punching etc) 1364 * offset - offset to start allocating or freeing from 1365 * len - length to free / allocate 1366 * 1367 * OPTIONAL 1368 */ 1369 int 1370 zfs_file_fallocate(zfs_file_t *fp, int mode, loff_t offset, loff_t len) 1371 { 1372 #ifdef __linux__ 1373 return (fallocate(fp->f_fd, mode, offset, len)); 1374 #else 1375 (void) fp, (void) mode, (void) offset, (void) len; 1376 return (EOPNOTSUPP); 1377 #endif 1378 } 1379 1380 /* 1381 * Request current file pointer offset 1382 * 1383 * fp - pointer to file 1384 * 1385 * Returns current file offset. 1386 */ 1387 loff_t 1388 zfs_file_off(zfs_file_t *fp) 1389 { 1390 return (lseek(fp->f_fd, SEEK_CUR, 0)); 1391 } 1392 1393 /* 1394 * unlink file 1395 * 1396 * path - fully qualified file path 1397 * 1398 * Returns 0 on success. 1399 * 1400 * OPTIONAL 1401 */ 1402 int 1403 zfs_file_unlink(const char *path) 1404 { 1405 return (remove(path)); 1406 } 1407 1408 /* 1409 * Get reference to file pointer 1410 * 1411 * fd - input file descriptor 1412 * 1413 * Returns pointer to file struct or NULL. 1414 * Unsupported in user space. 1415 */ 1416 zfs_file_t * 1417 zfs_file_get(int fd) 1418 { 1419 (void) fd; 1420 abort(); 1421 return (NULL); 1422 } 1423 /* 1424 * Drop reference to file pointer 1425 * 1426 * fp - pointer to file struct 1427 * 1428 * Unsupported in user space. 1429 */ 1430 void 1431 zfs_file_put(zfs_file_t *fp) 1432 { 1433 abort(); 1434 (void) fp; 1435 } 1436 1437 void 1438 zfsvfs_update_fromname(const char *oldname, const char *newname) 1439 { 1440 (void) oldname, (void) newname; 1441 } 1442 1443 void 1444 spa_import_os(spa_t *spa) 1445 { 1446 (void) spa; 1447 } 1448 1449 void 1450 spa_export_os(spa_t *spa) 1451 { 1452 (void) spa; 1453 } 1454 1455 void 1456 spa_activate_os(spa_t *spa) 1457 { 1458 (void) spa; 1459 } 1460 1461 void 1462 spa_deactivate_os(spa_t *spa) 1463 { 1464 (void) spa; 1465 } 1466