1 /* 2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. 3 * Copyright (C) 2007 The Regents of the University of California. 4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). 5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>. 6 * UCRL-CODE-235197 7 * 8 * This file is part of the SPL, Solaris Porting Layer. 9 * 10 * The SPL is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License as published by the 12 * Free Software Foundation; either version 2 of the License, or (at your 13 * option) any later version. 14 * 15 * The SPL is distributed in the hope that it will be useful, but WITHOUT 16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 18 * for more details. 19 * 20 * You should have received a copy of the GNU General Public License along 21 * with the SPL. If not, see <http://www.gnu.org/licenses/>. 22 * 23 * Solaris Porting Layer (SPL) Generic Implementation. 24 */ 25 26 #include <sys/isa_defs.h> 27 #include <sys/sysmacros.h> 28 #include <sys/systeminfo.h> 29 #include <sys/vmsystm.h> 30 #include <sys/kmem.h> 31 #include <sys/kmem_cache.h> 32 #include <sys/vmem.h> 33 #include <sys/mutex.h> 34 #include <sys/rwlock.h> 35 #include <sys/taskq.h> 36 #include <sys/tsd.h> 37 #include <sys/zmod.h> 38 #include <sys/debug.h> 39 #include <sys/proc.h> 40 #include <sys/kstat.h> 41 #include <sys/file.h> 42 #include <sys/sunddi.h> 43 #include <linux/ctype.h> 44 #include <sys/disp.h> 45 #include <sys/random.h> 46 #include <sys/string.h> 47 #include <linux/kmod.h> 48 #include <linux/mod_compat.h> 49 #include <sys/cred.h> 50 #include <sys/vnode.h> 51 #include <sys/misc.h> 52 #include <linux/mod_compat.h> 53 54 unsigned long spl_hostid = 0; 55 EXPORT_SYMBOL(spl_hostid); 56 57 module_param(spl_hostid, ulong, 0644); 58 MODULE_PARM_DESC(spl_hostid, "The system hostid."); 59 60 proc_t p0; 61 EXPORT_SYMBOL(p0); 62 63 /* 64 * xoshiro256++ 1.0 PRNG by David Blackman and Sebastiano Vigna 65 * 66 * "Scrambled Linear Pseudorandom Number Generators∗" 67 * https://vigna.di.unimi.it/ftp/papers/ScrambledLinear.pdf 68 * 69 * random_get_pseudo_bytes() is an API function on Illumos whose sole purpose 70 * is to provide bytes containing random numbers. It is mapped to /dev/urandom 71 * on Illumos, which uses a "FIPS 186-2 algorithm". No user of the SPL's 72 * random_get_pseudo_bytes() needs bytes that are of cryptographic quality, so 73 * we can implement it using a fast PRNG that we seed using Linux' actual 74 * equivalent to random_get_pseudo_bytes(). We do this by providing each CPU 75 * with an independent seed so that all calls to random_get_pseudo_bytes() are 76 * free of atomic instructions. 77 * 78 * A consequence of using a fast PRNG is that using random_get_pseudo_bytes() 79 * to generate words larger than 256 bits will paradoxically be limited to 80 * `2^256 - 1` possibilities. This is because we have a sequence of `2^256 - 1` 81 * 256-bit words and selecting the first will implicitly select the second. If 82 * a caller finds this behavior undesirable, random_get_bytes() should be used 83 * instead. 84 * 85 * XXX: Linux interrupt handlers that trigger within the critical section 86 * formed by `s[3] = xp[3];` and `xp[0] = s[0];` and call this function will 87 * see the same numbers. Nothing in the code currently calls this in an 88 * interrupt handler, so this is considered to be okay. If that becomes a 89 * problem, we could create a set of per-cpu variables for interrupt handlers 90 * and use them when in_interrupt() from linux/preempt_mask.h evaluates to 91 * true. 92 */ 93 static void __percpu *spl_pseudo_entropy; 94 95 /* 96 * rotl()/spl_rand_next()/spl_rand_jump() are copied from the following CC-0 97 * licensed file: 98 * 99 * https://prng.di.unimi.it/xoshiro256plusplus.c 100 */ 101 102 static inline uint64_t rotl(const uint64_t x, int k) 103 { 104 return ((x << k) | (x >> (64 - k))); 105 } 106 107 static inline uint64_t 108 spl_rand_next(uint64_t *s) 109 { 110 const uint64_t result = rotl(s[0] + s[3], 23) + s[0]; 111 112 const uint64_t t = s[1] << 17; 113 114 s[2] ^= s[0]; 115 s[3] ^= s[1]; 116 s[1] ^= s[2]; 117 s[0] ^= s[3]; 118 119 s[2] ^= t; 120 121 s[3] = rotl(s[3], 45); 122 123 return (result); 124 } 125 126 static inline void 127 spl_rand_jump(uint64_t *s) 128 { 129 static const uint64_t JUMP[] = { 0x180ec6d33cfd0aba, 130 0xd5a61266f0c9392c, 0xa9582618e03fc9aa, 0x39abdc4529b1661c }; 131 132 uint64_t s0 = 0; 133 uint64_t s1 = 0; 134 uint64_t s2 = 0; 135 uint64_t s3 = 0; 136 int i, b; 137 for (i = 0; i < sizeof (JUMP) / sizeof (*JUMP); i++) 138 for (b = 0; b < 64; b++) { 139 if (JUMP[i] & 1ULL << b) { 140 s0 ^= s[0]; 141 s1 ^= s[1]; 142 s2 ^= s[2]; 143 s3 ^= s[3]; 144 } 145 (void) spl_rand_next(s); 146 } 147 148 s[0] = s0; 149 s[1] = s1; 150 s[2] = s2; 151 s[3] = s3; 152 } 153 154 int 155 random_get_pseudo_bytes(uint8_t *ptr, size_t len) 156 { 157 uint64_t *xp, s[4]; 158 159 ASSERT(ptr); 160 161 xp = get_cpu_ptr(spl_pseudo_entropy); 162 163 s[0] = xp[0]; 164 s[1] = xp[1]; 165 s[2] = xp[2]; 166 s[3] = xp[3]; 167 168 while (len) { 169 union { 170 uint64_t ui64; 171 uint8_t byte[sizeof (uint64_t)]; 172 }entropy; 173 int i = MIN(len, sizeof (uint64_t)); 174 175 len -= i; 176 entropy.ui64 = spl_rand_next(s); 177 178 /* 179 * xoshiro256++ has low entropy lower bytes, so we copy the 180 * higher order bytes first. 181 */ 182 while (i--) 183 #ifdef _ZFS_BIG_ENDIAN 184 *ptr++ = entropy.byte[i]; 185 #else 186 *ptr++ = entropy.byte[7 - i]; 187 #endif 188 } 189 190 xp[0] = s[0]; 191 xp[1] = s[1]; 192 xp[2] = s[2]; 193 xp[3] = s[3]; 194 195 put_cpu_ptr(spl_pseudo_entropy); 196 197 return (0); 198 } 199 200 201 EXPORT_SYMBOL(random_get_pseudo_bytes); 202 203 #if BITS_PER_LONG == 32 204 205 /* 206 * Support 64/64 => 64 division on a 32-bit platform. While the kernel 207 * provides a div64_u64() function for this we do not use it because the 208 * implementation is flawed. There are cases which return incorrect 209 * results as late as linux-2.6.35. Until this is fixed upstream the 210 * spl must provide its own implementation. 211 * 212 * This implementation is a slightly modified version of the algorithm 213 * proposed by the book 'Hacker's Delight'. The original source can be 214 * found here and is available for use without restriction. 215 * 216 * http://www.hackersdelight.org/HDcode/newCode/divDouble.c 217 */ 218 219 /* 220 * Calculate number of leading of zeros for a 64-bit value. 221 */ 222 static int 223 nlz64(uint64_t x) 224 { 225 register int n = 0; 226 227 if (x == 0) 228 return (64); 229 230 if (x <= 0x00000000FFFFFFFFULL) { n = n + 32; x = x << 32; } 231 if (x <= 0x0000FFFFFFFFFFFFULL) { n = n + 16; x = x << 16; } 232 if (x <= 0x00FFFFFFFFFFFFFFULL) { n = n + 8; x = x << 8; } 233 if (x <= 0x0FFFFFFFFFFFFFFFULL) { n = n + 4; x = x << 4; } 234 if (x <= 0x3FFFFFFFFFFFFFFFULL) { n = n + 2; x = x << 2; } 235 if (x <= 0x7FFFFFFFFFFFFFFFULL) { n = n + 1; } 236 237 return (n); 238 } 239 240 /* 241 * Newer kernels have a div_u64() function but we define our own 242 * to simplify portability between kernel versions. 243 */ 244 static inline uint64_t 245 __div_u64(uint64_t u, uint32_t v) 246 { 247 (void) do_div(u, v); 248 return (u); 249 } 250 251 /* 252 * Turn off missing prototypes warning for these functions. They are 253 * replacements for libgcc-provided functions and will never be called 254 * directly. 255 */ 256 #if defined(__GNUC__) && !defined(__clang__) 257 #pragma GCC diagnostic push 258 #pragma GCC diagnostic ignored "-Wmissing-prototypes" 259 #endif 260 261 /* 262 * Implementation of 64-bit unsigned division for 32-bit machines. 263 * 264 * First the procedure takes care of the case in which the divisor is a 265 * 32-bit quantity. There are two subcases: (1) If the left half of the 266 * dividend is less than the divisor, one execution of do_div() is all that 267 * is required (overflow is not possible). (2) Otherwise it does two 268 * divisions, using the grade school method. 269 */ 270 uint64_t 271 __udivdi3(uint64_t u, uint64_t v) 272 { 273 uint64_t u0, u1, v1, q0, q1, k; 274 int n; 275 276 if (v >> 32 == 0) { // If v < 2**32: 277 if (u >> 32 < v) { // If u/v cannot overflow, 278 return (__div_u64(u, v)); // just do one division. 279 } else { // If u/v would overflow: 280 u1 = u >> 32; // Break u into two halves. 281 u0 = u & 0xFFFFFFFF; 282 q1 = __div_u64(u1, v); // First quotient digit. 283 k = u1 - q1 * v; // First remainder, < v. 284 u0 += (k << 32); 285 q0 = __div_u64(u0, v); // Seconds quotient digit. 286 return ((q1 << 32) + q0); 287 } 288 } else { // If v >= 2**32: 289 n = nlz64(v); // 0 <= n <= 31. 290 v1 = (v << n) >> 32; // Normalize divisor, MSB is 1. 291 u1 = u >> 1; // To ensure no overflow. 292 q1 = __div_u64(u1, v1); // Get quotient from 293 q0 = (q1 << n) >> 31; // Undo normalization and 294 // division of u by 2. 295 if (q0 != 0) // Make q0 correct or 296 q0 = q0 - 1; // too small by 1. 297 if ((u - q0 * v) >= v) 298 q0 = q0 + 1; // Now q0 is correct. 299 300 return (q0); 301 } 302 } 303 EXPORT_SYMBOL(__udivdi3); 304 305 #ifndef abs64 306 /* CSTYLED */ 307 #define abs64(x) ({ uint64_t t = (x) >> 63; ((x) ^ t) - t; }) 308 #endif 309 310 /* 311 * Implementation of 64-bit signed division for 32-bit machines. 312 */ 313 int64_t 314 __divdi3(int64_t u, int64_t v) 315 { 316 int64_t q, t; 317 q = __udivdi3(abs64(u), abs64(v)); 318 t = (u ^ v) >> 63; // If u, v have different 319 return ((q ^ t) - t); // signs, negate q. 320 } 321 EXPORT_SYMBOL(__divdi3); 322 323 /* 324 * Implementation of 64-bit unsigned modulo for 32-bit machines. 325 */ 326 uint64_t 327 __umoddi3(uint64_t dividend, uint64_t divisor) 328 { 329 return (dividend - (divisor * __udivdi3(dividend, divisor))); 330 } 331 EXPORT_SYMBOL(__umoddi3); 332 333 /* 64-bit signed modulo for 32-bit machines. */ 334 int64_t 335 __moddi3(int64_t n, int64_t d) 336 { 337 int64_t q; 338 boolean_t nn = B_FALSE; 339 340 if (n < 0) { 341 nn = B_TRUE; 342 n = -n; 343 } 344 if (d < 0) 345 d = -d; 346 347 q = __umoddi3(n, d); 348 349 return (nn ? -q : q); 350 } 351 EXPORT_SYMBOL(__moddi3); 352 353 /* 354 * Implementation of 64-bit unsigned division/modulo for 32-bit machines. 355 */ 356 uint64_t 357 __udivmoddi4(uint64_t n, uint64_t d, uint64_t *r) 358 { 359 uint64_t q = __udivdi3(n, d); 360 if (r) 361 *r = n - d * q; 362 return (q); 363 } 364 EXPORT_SYMBOL(__udivmoddi4); 365 366 /* 367 * Implementation of 64-bit signed division/modulo for 32-bit machines. 368 */ 369 int64_t 370 __divmoddi4(int64_t n, int64_t d, int64_t *r) 371 { 372 int64_t q, rr; 373 boolean_t nn = B_FALSE; 374 boolean_t nd = B_FALSE; 375 if (n < 0) { 376 nn = B_TRUE; 377 n = -n; 378 } 379 if (d < 0) { 380 nd = B_TRUE; 381 d = -d; 382 } 383 384 q = __udivmoddi4(n, d, (uint64_t *)&rr); 385 386 if (nn != nd) 387 q = -q; 388 if (nn) 389 rr = -rr; 390 if (r) 391 *r = rr; 392 return (q); 393 } 394 EXPORT_SYMBOL(__divmoddi4); 395 396 #if defined(__arm) || defined(__arm__) 397 /* 398 * Implementation of 64-bit (un)signed division for 32-bit arm machines. 399 * 400 * Run-time ABI for the ARM Architecture (page 20). A pair of (unsigned) 401 * long longs is returned in {{r0, r1}, {r2,r3}}, the quotient in {r0, r1}, 402 * and the remainder in {r2, r3}. The return type is specifically left 403 * set to 'void' to ensure the compiler does not overwrite these registers 404 * during the return. All results are in registers as per ABI 405 */ 406 void 407 __aeabi_uldivmod(uint64_t u, uint64_t v) 408 { 409 uint64_t res; 410 uint64_t mod; 411 412 res = __udivdi3(u, v); 413 mod = __umoddi3(u, v); 414 { 415 register uint32_t r0 asm("r0") = (res & 0xFFFFFFFF); 416 register uint32_t r1 asm("r1") = (res >> 32); 417 register uint32_t r2 asm("r2") = (mod & 0xFFFFFFFF); 418 register uint32_t r3 asm("r3") = (mod >> 32); 419 420 asm volatile("" 421 : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3) /* output */ 422 : "r"(r0), "r"(r1), "r"(r2), "r"(r3)); /* input */ 423 424 return; /* r0; */ 425 } 426 } 427 EXPORT_SYMBOL(__aeabi_uldivmod); 428 429 void 430 __aeabi_ldivmod(int64_t u, int64_t v) 431 { 432 int64_t res; 433 uint64_t mod; 434 435 res = __divdi3(u, v); 436 mod = __umoddi3(u, v); 437 { 438 register uint32_t r0 asm("r0") = (res & 0xFFFFFFFF); 439 register uint32_t r1 asm("r1") = (res >> 32); 440 register uint32_t r2 asm("r2") = (mod & 0xFFFFFFFF); 441 register uint32_t r3 asm("r3") = (mod >> 32); 442 443 asm volatile("" 444 : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3) /* output */ 445 : "r"(r0), "r"(r1), "r"(r2), "r"(r3)); /* input */ 446 447 return; /* r0; */ 448 } 449 } 450 EXPORT_SYMBOL(__aeabi_ldivmod); 451 #endif /* __arm || __arm__ */ 452 453 #if defined(__GNUC__) && !defined(__clang__) 454 #pragma GCC diagnostic pop 455 #endif 456 457 #endif /* BITS_PER_LONG */ 458 459 /* 460 * NOTE: The strtoxx behavior is solely based on my reading of the Solaris 461 * ddi_strtol(9F) man page. I have not verified the behavior of these 462 * functions against their Solaris counterparts. It is possible that I 463 * may have misinterpreted the man page or the man page is incorrect. 464 */ 465 int ddi_strtol(const char *, char **, int, long *); 466 int ddi_strtoull(const char *, char **, int, unsigned long long *); 467 int ddi_strtoll(const char *, char **, int, long long *); 468 469 #define define_ddi_strtox(type, valtype) \ 470 int ddi_strto##type(const char *str, char **endptr, \ 471 int base, valtype *result) \ 472 { \ 473 valtype last_value, value = 0; \ 474 char *ptr = (char *)str; \ 475 int digit, minus = 0; \ 476 \ 477 while (strchr(" \t\n\r\f", *ptr)) \ 478 ++ptr; \ 479 \ 480 if (strlen(ptr) == 0) \ 481 return (EINVAL); \ 482 \ 483 switch (*ptr) { \ 484 case '-': \ 485 minus = 1; \ 486 zfs_fallthrough; \ 487 case '+': \ 488 ++ptr; \ 489 break; \ 490 } \ 491 \ 492 /* Auto-detect base based on prefix */ \ 493 if (!base) { \ 494 if (str[0] == '0') { \ 495 if (tolower(str[1]) == 'x' && isxdigit(str[2])) { \ 496 base = 16; /* hex */ \ 497 ptr += 2; \ 498 } else if (str[1] >= '0' && str[1] < '8') { \ 499 base = 8; /* octal */ \ 500 ptr += 1; \ 501 } else { \ 502 return (EINVAL); \ 503 } \ 504 } else { \ 505 base = 10; /* decimal */ \ 506 } \ 507 } \ 508 \ 509 while (1) { \ 510 if (isdigit(*ptr)) \ 511 digit = *ptr - '0'; \ 512 else if (isalpha(*ptr)) \ 513 digit = tolower(*ptr) - 'a' + 10; \ 514 else \ 515 break; \ 516 \ 517 if (digit >= base) \ 518 break; \ 519 \ 520 last_value = value; \ 521 value = value * base + digit; \ 522 if (last_value > value) /* Overflow */ \ 523 return (ERANGE); \ 524 \ 525 ptr++; \ 526 } \ 527 \ 528 *result = minus ? -value : value; \ 529 \ 530 if (endptr) \ 531 *endptr = ptr; \ 532 \ 533 return (0); \ 534 } \ 535 536 define_ddi_strtox(l, long) 537 define_ddi_strtox(ull, unsigned long long) 538 define_ddi_strtox(ll, long long) 539 540 EXPORT_SYMBOL(ddi_strtol); 541 EXPORT_SYMBOL(ddi_strtoll); 542 EXPORT_SYMBOL(ddi_strtoull); 543 544 int 545 ddi_copyin(const void *from, void *to, size_t len, int flags) 546 { 547 /* Fake ioctl() issued by kernel, 'from' is a kernel address */ 548 if (flags & FKIOCTL) { 549 memcpy(to, from, len); 550 return (0); 551 } 552 553 return (copyin(from, to, len)); 554 } 555 EXPORT_SYMBOL(ddi_copyin); 556 557 #define define_spl_param(type, fmt) \ 558 int \ 559 spl_param_get_##type(char *buf, zfs_kernel_param_t *kp) \ 560 { \ 561 return (scnprintf(buf, PAGE_SIZE, fmt "\n", \ 562 *(type *)kp->arg)); \ 563 } \ 564 int \ 565 spl_param_set_##type(const char *buf, zfs_kernel_param_t *kp) \ 566 { \ 567 return (kstrto##type(buf, 0, (type *)kp->arg)); \ 568 } \ 569 const struct kernel_param_ops spl_param_ops_##type = { \ 570 .set = spl_param_set_##type, \ 571 .get = spl_param_get_##type, \ 572 }; \ 573 EXPORT_SYMBOL(spl_param_get_##type); \ 574 EXPORT_SYMBOL(spl_param_set_##type); \ 575 EXPORT_SYMBOL(spl_param_ops_##type); 576 577 define_spl_param(s64, "%lld") 578 define_spl_param(u64, "%llu") 579 580 /* 581 * Post a uevent to userspace whenever a new vdev adds to the pool. It is 582 * necessary to sync blkid information with udev, which zed daemon uses 583 * during device hotplug to identify the vdev. 584 */ 585 void 586 spl_signal_kobj_evt(struct block_device *bdev) 587 { 588 #if defined(HAVE_BDEV_KOBJ) || defined(HAVE_PART_TO_DEV) 589 #ifdef HAVE_BDEV_KOBJ 590 struct kobject *disk_kobj = bdev_kobj(bdev); 591 #else 592 struct kobject *disk_kobj = &part_to_dev(bdev->bd_part)->kobj; 593 #endif 594 if (disk_kobj) { 595 int ret = kobject_uevent(disk_kobj, KOBJ_CHANGE); 596 if (ret) { 597 pr_warn("ZFS: Sending event '%d' to kobject: '%s'" 598 " (%p): failed(ret:%d)\n", KOBJ_CHANGE, 599 kobject_name(disk_kobj), disk_kobj, ret); 600 } 601 } 602 #else 603 /* 604 * This is encountered if neither bdev_kobj() nor part_to_dev() is available 605 * in the kernel - likely due to an API change that needs to be chased down. 606 */ 607 #error "Unsupported kernel: unable to get struct kobj from bdev" 608 #endif 609 } 610 EXPORT_SYMBOL(spl_signal_kobj_evt); 611 612 int 613 ddi_copyout(const void *from, void *to, size_t len, int flags) 614 { 615 /* Fake ioctl() issued by kernel, 'from' is a kernel address */ 616 if (flags & FKIOCTL) { 617 memcpy(to, from, len); 618 return (0); 619 } 620 621 return (copyout(from, to, len)); 622 } 623 EXPORT_SYMBOL(ddi_copyout); 624 625 static int 626 spl_getattr(struct file *filp, struct kstat *stat) 627 { 628 int rc; 629 630 ASSERT(filp); 631 ASSERT(stat); 632 633 rc = vfs_getattr(&filp->f_path, stat, STATX_BASIC_STATS, 634 AT_STATX_SYNC_AS_STAT); 635 if (rc) 636 return (-rc); 637 638 return (0); 639 } 640 641 /* 642 * Read the unique system identifier from the /etc/hostid file. 643 * 644 * The behavior of /usr/bin/hostid on Linux systems with the 645 * regular eglibc and coreutils is: 646 * 647 * 1. Generate the value if the /etc/hostid file does not exist 648 * or if the /etc/hostid file is less than four bytes in size. 649 * 650 * 2. If the /etc/hostid file is at least 4 bytes, then return 651 * the first four bytes [0..3] in native endian order. 652 * 653 * 3. Always ignore bytes [4..] if they exist in the file. 654 * 655 * Only the first four bytes are significant, even on systems that 656 * have a 64-bit word size. 657 * 658 * See: 659 * 660 * eglibc: sysdeps/unix/sysv/linux/gethostid.c 661 * coreutils: src/hostid.c 662 * 663 * Notes: 664 * 665 * The /etc/hostid file on Solaris is a text file that often reads: 666 * 667 * # DO NOT EDIT 668 * "0123456789" 669 * 670 * Directly copying this file to Linux results in a constant 671 * hostid of 4f442023 because the default comment constitutes 672 * the first four bytes of the file. 673 * 674 */ 675 676 static char *spl_hostid_path = HW_HOSTID_PATH; 677 module_param(spl_hostid_path, charp, 0444); 678 MODULE_PARM_DESC(spl_hostid_path, "The system hostid file (/etc/hostid)"); 679 680 static int 681 hostid_read(uint32_t *hostid) 682 { 683 uint64_t size; 684 uint32_t value = 0; 685 int error; 686 loff_t off; 687 struct file *filp; 688 struct kstat stat; 689 690 filp = filp_open(spl_hostid_path, 0, 0); 691 692 if (IS_ERR(filp)) 693 return (ENOENT); 694 695 error = spl_getattr(filp, &stat); 696 if (error) { 697 filp_close(filp, 0); 698 return (error); 699 } 700 size = stat.size; 701 // cppcheck-suppress sizeofwithnumericparameter 702 if (size < sizeof (HW_HOSTID_MASK)) { 703 filp_close(filp, 0); 704 return (EINVAL); 705 } 706 707 off = 0; 708 /* 709 * Read directly into the variable like eglibc does. 710 * Short reads are okay; native behavior is preserved. 711 */ 712 error = kernel_read(filp, &value, sizeof (value), &off); 713 if (error < 0) { 714 filp_close(filp, 0); 715 return (EIO); 716 } 717 718 /* Mask down to 32 bits like coreutils does. */ 719 *hostid = (value & HW_HOSTID_MASK); 720 filp_close(filp, 0); 721 722 return (0); 723 } 724 725 /* 726 * Return the system hostid. Preferentially use the spl_hostid module option 727 * when set, otherwise use the value in the /etc/hostid file. 728 */ 729 uint32_t 730 zone_get_hostid(void *zone) 731 { 732 uint32_t hostid; 733 734 ASSERT3P(zone, ==, NULL); 735 736 if (spl_hostid != 0) 737 return ((uint32_t)(spl_hostid & HW_HOSTID_MASK)); 738 739 if (hostid_read(&hostid) == 0) 740 return (hostid); 741 742 return (0); 743 } 744 EXPORT_SYMBOL(zone_get_hostid); 745 746 static int 747 spl_kvmem_init(void) 748 { 749 int rc = 0; 750 751 rc = spl_kmem_init(); 752 if (rc) 753 return (rc); 754 755 rc = spl_vmem_init(); 756 if (rc) { 757 spl_kmem_fini(); 758 return (rc); 759 } 760 761 return (rc); 762 } 763 764 /* 765 * We initialize the random number generator with 128 bits of entropy from the 766 * system random number generator. In the improbable case that we have a zero 767 * seed, we fallback to the system jiffies, unless it is also zero, in which 768 * situation we use a preprogrammed seed. We step forward by 2^64 iterations to 769 * initialize each of the per-cpu seeds so that the sequences generated on each 770 * CPU are guaranteed to never overlap in practice. 771 */ 772 static int __init 773 spl_random_init(void) 774 { 775 uint64_t s[4]; 776 int i = 0; 777 778 spl_pseudo_entropy = __alloc_percpu(4 * sizeof (uint64_t), 779 sizeof (uint64_t)); 780 781 if (!spl_pseudo_entropy) 782 return (-ENOMEM); 783 784 get_random_bytes(s, sizeof (s)); 785 786 if (s[0] == 0 && s[1] == 0 && s[2] == 0 && s[3] == 0) { 787 if (jiffies != 0) { 788 s[0] = jiffies; 789 s[1] = ~0 - jiffies; 790 s[2] = ~jiffies; 791 s[3] = jiffies - ~0; 792 } else { 793 (void) memcpy(s, "improbable seed", 16); 794 } 795 printk("SPL: get_random_bytes() returned 0 " 796 "when generating random seed. Setting initial seed to " 797 "0x%016llx%016llx%016llx%016llx.\n", cpu_to_be64(s[0]), 798 cpu_to_be64(s[1]), cpu_to_be64(s[2]), cpu_to_be64(s[3])); 799 } 800 801 for_each_possible_cpu(i) { 802 uint64_t *wordp = per_cpu_ptr(spl_pseudo_entropy, i); 803 804 spl_rand_jump(s); 805 806 wordp[0] = s[0]; 807 wordp[1] = s[1]; 808 wordp[2] = s[2]; 809 wordp[3] = s[3]; 810 } 811 812 return (0); 813 } 814 815 static void 816 spl_random_fini(void) 817 { 818 free_percpu(spl_pseudo_entropy); 819 } 820 821 static void 822 spl_kvmem_fini(void) 823 { 824 spl_vmem_fini(); 825 spl_kmem_fini(); 826 } 827 828 static int __init 829 spl_init(void) 830 { 831 int rc = 0; 832 833 if ((rc = spl_random_init())) 834 goto out0; 835 836 if ((rc = spl_kvmem_init())) 837 goto out1; 838 839 if ((rc = spl_tsd_init())) 840 goto out2; 841 842 if ((rc = spl_proc_init())) 843 goto out3; 844 845 if ((rc = spl_kstat_init())) 846 goto out4; 847 848 if ((rc = spl_taskq_init())) 849 goto out5; 850 851 if ((rc = spl_kmem_cache_init())) 852 goto out6; 853 854 if ((rc = spl_zlib_init())) 855 goto out7; 856 857 if ((rc = spl_zone_init())) 858 goto out8; 859 860 return (rc); 861 862 out8: 863 spl_zlib_fini(); 864 out7: 865 spl_kmem_cache_fini(); 866 out6: 867 spl_taskq_fini(); 868 out5: 869 spl_kstat_fini(); 870 out4: 871 spl_proc_fini(); 872 out3: 873 spl_tsd_fini(); 874 out2: 875 spl_kvmem_fini(); 876 out1: 877 spl_random_fini(); 878 out0: 879 return (rc); 880 } 881 882 static void __exit 883 spl_fini(void) 884 { 885 spl_zone_fini(); 886 spl_zlib_fini(); 887 spl_kmem_cache_fini(); 888 spl_taskq_fini(); 889 spl_kstat_fini(); 890 spl_proc_fini(); 891 spl_tsd_fini(); 892 spl_kvmem_fini(); 893 spl_random_fini(); 894 } 895 896 module_init(spl_init); 897 module_exit(spl_fini); 898 899 MODULE_DESCRIPTION("Solaris Porting Layer"); 900 MODULE_AUTHOR(ZFS_META_AUTHOR); 901 MODULE_LICENSE("GPL"); 902 MODULE_VERSION(ZFS_META_VERSION "-" ZFS_META_RELEASE); 903