1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Mike Karels at Berkeley Software Design, Inc. 9 * 10 * Quite extensively rewritten by Poul-Henning Kamp of the FreeBSD 11 * project, to make these variables more userfriendly. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include "opt_capsicum.h" 44 #include "opt_compat.h" 45 #include "opt_ktrace.h" 46 47 #include <sys/param.h> 48 #include <sys/fail.h> 49 #include <sys/systm.h> 50 #include <sys/capsicum.h> 51 #include <sys/kernel.h> 52 #include <sys/sysctl.h> 53 #include <sys/malloc.h> 54 #include <sys/priv.h> 55 #include <sys/proc.h> 56 #include <sys/jail.h> 57 #include <sys/lock.h> 58 #include <sys/mutex.h> 59 #include <sys/rmlock.h> 60 #include <sys/sbuf.h> 61 #include <sys/sx.h> 62 #include <sys/sysproto.h> 63 #include <sys/uio.h> 64 #ifdef KTRACE 65 #include <sys/ktrace.h> 66 #endif 67 68 #include <net/vnet.h> 69 70 #include <security/mac/mac_framework.h> 71 72 #include <vm/vm.h> 73 #include <vm/vm_extern.h> 74 75 static MALLOC_DEFINE(M_SYSCTL, "sysctl", "sysctl internal magic"); 76 static MALLOC_DEFINE(M_SYSCTLOID, "sysctloid", "sysctl dynamic oids"); 77 static MALLOC_DEFINE(M_SYSCTLTMP, "sysctltmp", "sysctl temp output buffer"); 78 79 /* 80 * The sysctllock protects the MIB tree. It also protects sysctl 81 * contexts used with dynamic sysctls. The sysctl_register_oid() and 82 * sysctl_unregister_oid() routines require the sysctllock to already 83 * be held, so the sysctl_wlock() and sysctl_wunlock() routines are 84 * provided for the few places in the kernel which need to use that 85 * API rather than using the dynamic API. Use of the dynamic API is 86 * strongly encouraged for most code. 87 * 88 * The sysctlmemlock is used to limit the amount of user memory wired for 89 * sysctl requests. This is implemented by serializing any userland 90 * sysctl requests larger than a single page via an exclusive lock. 91 */ 92 static struct rmlock sysctllock; 93 static struct sx __exclusive_cache_line sysctlmemlock; 94 95 #define SYSCTL_WLOCK() rm_wlock(&sysctllock) 96 #define SYSCTL_WUNLOCK() rm_wunlock(&sysctllock) 97 #define SYSCTL_RLOCK(tracker) rm_rlock(&sysctllock, (tracker)) 98 #define SYSCTL_RUNLOCK(tracker) rm_runlock(&sysctllock, (tracker)) 99 #define SYSCTL_WLOCKED() rm_wowned(&sysctllock) 100 #define SYSCTL_ASSERT_LOCKED() rm_assert(&sysctllock, RA_LOCKED) 101 #define SYSCTL_ASSERT_WLOCKED() rm_assert(&sysctllock, RA_WLOCKED) 102 #define SYSCTL_ASSERT_RLOCKED() rm_assert(&sysctllock, RA_RLOCKED) 103 #define SYSCTL_INIT() rm_init_flags(&sysctllock, "sysctl lock", \ 104 RM_SLEEPABLE) 105 #define SYSCTL_SLEEP(ch, wmesg, timo) \ 106 rm_sleep(ch, &sysctllock, 0, wmesg, timo) 107 108 static int sysctl_root(SYSCTL_HANDLER_ARGS); 109 110 /* Root list */ 111 struct sysctl_oid_list sysctl__children = SLIST_HEAD_INITIALIZER(&sysctl__children); 112 113 static int sysctl_remove_oid_locked(struct sysctl_oid *oidp, int del, 114 int recurse); 115 static int sysctl_old_kernel(struct sysctl_req *, const void *, size_t); 116 static int sysctl_new_kernel(struct sysctl_req *, void *, size_t); 117 118 static struct sysctl_oid * 119 sysctl_find_oidname(const char *name, struct sysctl_oid_list *list) 120 { 121 struct sysctl_oid *oidp; 122 123 SYSCTL_ASSERT_LOCKED(); 124 SLIST_FOREACH(oidp, list, oid_link) { 125 if (strcmp(oidp->oid_name, name) == 0) { 126 return (oidp); 127 } 128 } 129 return (NULL); 130 } 131 132 /* 133 * Initialization of the MIB tree. 134 * 135 * Order by number in each list. 136 */ 137 void 138 sysctl_wlock(void) 139 { 140 141 SYSCTL_WLOCK(); 142 } 143 144 void 145 sysctl_wunlock(void) 146 { 147 148 SYSCTL_WUNLOCK(); 149 } 150 151 static int 152 sysctl_root_handler_locked(struct sysctl_oid *oid, void *arg1, intmax_t arg2, 153 struct sysctl_req *req, struct rm_priotracker *tracker) 154 { 155 int error; 156 157 if (oid->oid_kind & CTLFLAG_DYN) 158 atomic_add_int(&oid->oid_running, 1); 159 160 if (tracker != NULL) 161 SYSCTL_RUNLOCK(tracker); 162 else 163 SYSCTL_WUNLOCK(); 164 165 if (!(oid->oid_kind & CTLFLAG_MPSAFE)) 166 mtx_lock(&Giant); 167 error = oid->oid_handler(oid, arg1, arg2, req); 168 if (!(oid->oid_kind & CTLFLAG_MPSAFE)) 169 mtx_unlock(&Giant); 170 171 KFAIL_POINT_ERROR(_debug_fail_point, sysctl_running, error); 172 173 if (tracker != NULL) 174 SYSCTL_RLOCK(tracker); 175 else 176 SYSCTL_WLOCK(); 177 178 if (oid->oid_kind & CTLFLAG_DYN) { 179 if (atomic_fetchadd_int(&oid->oid_running, -1) == 1 && 180 (oid->oid_kind & CTLFLAG_DYING) != 0) 181 wakeup(&oid->oid_running); 182 } 183 184 return (error); 185 } 186 187 static void 188 sysctl_load_tunable_by_oid_locked(struct sysctl_oid *oidp) 189 { 190 struct sysctl_req req; 191 struct sysctl_oid *curr; 192 char *penv = NULL; 193 char path[64]; 194 ssize_t rem = sizeof(path); 195 ssize_t len; 196 uint8_t val_8; 197 uint16_t val_16; 198 uint32_t val_32; 199 int val_int; 200 long val_long; 201 int64_t val_64; 202 quad_t val_quad; 203 int error; 204 205 path[--rem] = 0; 206 207 for (curr = oidp; curr != NULL; curr = SYSCTL_PARENT(curr)) { 208 len = strlen(curr->oid_name); 209 rem -= len; 210 if (curr != oidp) 211 rem -= 1; 212 if (rem < 0) { 213 printf("OID path exceeds %d bytes\n", (int)sizeof(path)); 214 return; 215 } 216 memcpy(path + rem, curr->oid_name, len); 217 if (curr != oidp) 218 path[rem + len] = '.'; 219 } 220 221 memset(&req, 0, sizeof(req)); 222 223 req.td = curthread; 224 req.oldfunc = sysctl_old_kernel; 225 req.newfunc = sysctl_new_kernel; 226 req.lock = REQ_UNWIRED; 227 228 switch (oidp->oid_kind & CTLTYPE) { 229 case CTLTYPE_INT: 230 if (getenv_int(path + rem, &val_int) == 0) 231 return; 232 req.newlen = sizeof(val_int); 233 req.newptr = &val_int; 234 break; 235 case CTLTYPE_UINT: 236 if (getenv_uint(path + rem, (unsigned int *)&val_int) == 0) 237 return; 238 req.newlen = sizeof(val_int); 239 req.newptr = &val_int; 240 break; 241 case CTLTYPE_LONG: 242 if (getenv_long(path + rem, &val_long) == 0) 243 return; 244 req.newlen = sizeof(val_long); 245 req.newptr = &val_long; 246 break; 247 case CTLTYPE_ULONG: 248 if (getenv_ulong(path + rem, (unsigned long *)&val_long) == 0) 249 return; 250 req.newlen = sizeof(val_long); 251 req.newptr = &val_long; 252 break; 253 case CTLTYPE_S8: 254 if (getenv_int(path + rem, &val_int) == 0) 255 return; 256 val_8 = val_int; 257 req.newlen = sizeof(val_8); 258 req.newptr = &val_8; 259 break; 260 case CTLTYPE_S16: 261 if (getenv_int(path + rem, &val_int) == 0) 262 return; 263 val_16 = val_int; 264 req.newlen = sizeof(val_16); 265 req.newptr = &val_16; 266 break; 267 case CTLTYPE_S32: 268 if (getenv_long(path + rem, &val_long) == 0) 269 return; 270 val_32 = val_long; 271 req.newlen = sizeof(val_32); 272 req.newptr = &val_32; 273 break; 274 case CTLTYPE_S64: 275 if (getenv_quad(path + rem, &val_quad) == 0) 276 return; 277 val_64 = val_quad; 278 req.newlen = sizeof(val_64); 279 req.newptr = &val_64; 280 break; 281 case CTLTYPE_U8: 282 if (getenv_uint(path + rem, (unsigned int *)&val_int) == 0) 283 return; 284 val_8 = val_int; 285 req.newlen = sizeof(val_8); 286 req.newptr = &val_8; 287 break; 288 case CTLTYPE_U16: 289 if (getenv_uint(path + rem, (unsigned int *)&val_int) == 0) 290 return; 291 val_16 = val_int; 292 req.newlen = sizeof(val_16); 293 req.newptr = &val_16; 294 break; 295 case CTLTYPE_U32: 296 if (getenv_ulong(path + rem, (unsigned long *)&val_long) == 0) 297 return; 298 val_32 = val_long; 299 req.newlen = sizeof(val_32); 300 req.newptr = &val_32; 301 break; 302 case CTLTYPE_U64: 303 /* XXX there is no getenv_uquad() */ 304 if (getenv_quad(path + rem, &val_quad) == 0) 305 return; 306 val_64 = val_quad; 307 req.newlen = sizeof(val_64); 308 req.newptr = &val_64; 309 break; 310 case CTLTYPE_STRING: 311 penv = kern_getenv(path + rem); 312 if (penv == NULL) 313 return; 314 req.newlen = strlen(penv); 315 req.newptr = penv; 316 break; 317 default: 318 return; 319 } 320 error = sysctl_root_handler_locked(oidp, oidp->oid_arg1, 321 oidp->oid_arg2, &req, NULL); 322 if (error != 0) 323 printf("Setting sysctl %s failed: %d\n", path + rem, error); 324 if (penv != NULL) 325 freeenv(penv); 326 } 327 328 static int 329 sbuf_printf_drain(void *arg __unused, const char *data, int len) 330 { 331 332 return (printf("%.*s", len, data)); 333 } 334 335 /* 336 * Locate the path to a given oid. Returns the length of the resulting path, 337 * or -1 if the oid was not found. nodes must have room for CTL_MAXNAME 338 * elements and be NULL initialized. 339 */ 340 static int 341 sysctl_search_oid(struct sysctl_oid **nodes, struct sysctl_oid *needle) 342 { 343 int indx; 344 345 SYSCTL_ASSERT_LOCKED(); 346 indx = 0; 347 while (indx < CTL_MAXNAME && indx >= 0) { 348 if (nodes[indx] == NULL && indx == 0) 349 nodes[indx] = SLIST_FIRST(&sysctl__children); 350 else if (nodes[indx] == NULL) 351 nodes[indx] = SLIST_FIRST(&nodes[indx - 1]->oid_children); 352 else 353 nodes[indx] = SLIST_NEXT(nodes[indx], oid_link); 354 355 if (nodes[indx] == needle) 356 return (indx + 1); 357 358 if (nodes[indx] == NULL) { 359 indx--; 360 continue; 361 } 362 363 if ((nodes[indx]->oid_kind & CTLTYPE) == CTLTYPE_NODE) { 364 indx++; 365 continue; 366 } 367 } 368 return (-1); 369 } 370 371 static void 372 sysctl_warn_reuse(const char *func, struct sysctl_oid *leaf) 373 { 374 struct sysctl_oid *nodes[CTL_MAXNAME]; 375 char buf[128]; 376 struct sbuf sb; 377 int rc, i; 378 379 (void)sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL); 380 sbuf_set_drain(&sb, sbuf_printf_drain, NULL); 381 382 sbuf_printf(&sb, "%s: can't re-use a leaf (", __func__); 383 384 memset(nodes, 0, sizeof(nodes)); 385 rc = sysctl_search_oid(nodes, leaf); 386 if (rc > 0) { 387 for (i = 0; i < rc; i++) 388 sbuf_printf(&sb, "%s%.*s", nodes[i]->oid_name, 389 i != (rc - 1), "."); 390 } else { 391 sbuf_printf(&sb, "%s", leaf->oid_name); 392 } 393 sbuf_printf(&sb, ")!\n"); 394 395 (void)sbuf_finish(&sb); 396 } 397 398 #ifdef SYSCTL_DEBUG 399 static int 400 sysctl_reuse_test(SYSCTL_HANDLER_ARGS) 401 { 402 struct rm_priotracker tracker; 403 404 SYSCTL_RLOCK(&tracker); 405 sysctl_warn_reuse(__func__, oidp); 406 SYSCTL_RUNLOCK(&tracker); 407 return (0); 408 } 409 SYSCTL_PROC(_sysctl, 0, reuse_test, CTLTYPE_STRING|CTLFLAG_RD|CTLFLAG_MPSAFE, 410 0, 0, sysctl_reuse_test, "-", ""); 411 #endif 412 413 void 414 sysctl_register_oid(struct sysctl_oid *oidp) 415 { 416 struct sysctl_oid_list *parent = oidp->oid_parent; 417 struct sysctl_oid *p; 418 struct sysctl_oid *q; 419 int oid_number; 420 int timeout = 2; 421 422 /* 423 * First check if another oid with the same name already 424 * exists in the parent's list. 425 */ 426 SYSCTL_ASSERT_WLOCKED(); 427 p = sysctl_find_oidname(oidp->oid_name, parent); 428 if (p != NULL) { 429 if ((p->oid_kind & CTLTYPE) == CTLTYPE_NODE) { 430 p->oid_refcnt++; 431 return; 432 } else { 433 sysctl_warn_reuse(__func__, p); 434 return; 435 } 436 } 437 /* get current OID number */ 438 oid_number = oidp->oid_number; 439 440 #if (OID_AUTO >= 0) 441 #error "OID_AUTO is expected to be a negative value" 442 #endif 443 /* 444 * Any negative OID number qualifies as OID_AUTO. Valid OID 445 * numbers should always be positive. 446 * 447 * NOTE: DO NOT change the starting value here, change it in 448 * <sys/sysctl.h>, and make sure it is at least 256 to 449 * accommodate e.g. net.inet.raw as a static sysctl node. 450 */ 451 if (oid_number < 0) { 452 static int newoid; 453 454 /* 455 * By decrementing the next OID number we spend less 456 * time inserting the OIDs into a sorted list. 457 */ 458 if (--newoid < CTL_AUTO_START) 459 newoid = 0x7fffffff; 460 461 oid_number = newoid; 462 } 463 464 /* 465 * Insert the OID into the parent's list sorted by OID number. 466 */ 467 retry: 468 q = NULL; 469 SLIST_FOREACH(p, parent, oid_link) { 470 /* check if the current OID number is in use */ 471 if (oid_number == p->oid_number) { 472 /* get the next valid OID number */ 473 if (oid_number < CTL_AUTO_START || 474 oid_number == 0x7fffffff) { 475 /* wraparound - restart */ 476 oid_number = CTL_AUTO_START; 477 /* don't loop forever */ 478 if (!timeout--) 479 panic("sysctl: Out of OID numbers\n"); 480 goto retry; 481 } else { 482 oid_number++; 483 } 484 } else if (oid_number < p->oid_number) 485 break; 486 q = p; 487 } 488 /* check for non-auto OID number collision */ 489 if (oidp->oid_number >= 0 && oidp->oid_number < CTL_AUTO_START && 490 oid_number >= CTL_AUTO_START) { 491 printf("sysctl: OID number(%d) is already in use for '%s'\n", 492 oidp->oid_number, oidp->oid_name); 493 } 494 /* update the OID number, if any */ 495 oidp->oid_number = oid_number; 496 if (q != NULL) 497 SLIST_INSERT_AFTER(q, oidp, oid_link); 498 else 499 SLIST_INSERT_HEAD(parent, oidp, oid_link); 500 501 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE && 502 #ifdef VIMAGE 503 (oidp->oid_kind & CTLFLAG_VNET) == 0 && 504 #endif 505 (oidp->oid_kind & CTLFLAG_TUN) != 0 && 506 (oidp->oid_kind & CTLFLAG_NOFETCH) == 0) { 507 /* only fetch value once */ 508 oidp->oid_kind |= CTLFLAG_NOFETCH; 509 /* try to fetch value from kernel environment */ 510 sysctl_load_tunable_by_oid_locked(oidp); 511 } 512 } 513 514 void 515 sysctl_register_disabled_oid(struct sysctl_oid *oidp) 516 { 517 518 /* 519 * Mark the leaf as dormant if it's not to be immediately enabled. 520 * We do not disable nodes as they can be shared between modules 521 * and it is always safe to access a node. 522 */ 523 KASSERT((oidp->oid_kind & CTLFLAG_DORMANT) == 0, 524 ("internal flag is set in oid_kind")); 525 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) 526 oidp->oid_kind |= CTLFLAG_DORMANT; 527 sysctl_register_oid(oidp); 528 } 529 530 void 531 sysctl_enable_oid(struct sysctl_oid *oidp) 532 { 533 534 SYSCTL_ASSERT_WLOCKED(); 535 if ((oidp->oid_kind & CTLTYPE) == CTLTYPE_NODE) { 536 KASSERT((oidp->oid_kind & CTLFLAG_DORMANT) == 0, 537 ("sysctl node is marked as dormant")); 538 return; 539 } 540 KASSERT((oidp->oid_kind & CTLFLAG_DORMANT) != 0, 541 ("enabling already enabled sysctl oid")); 542 oidp->oid_kind &= ~CTLFLAG_DORMANT; 543 } 544 545 void 546 sysctl_unregister_oid(struct sysctl_oid *oidp) 547 { 548 struct sysctl_oid *p; 549 int error; 550 551 SYSCTL_ASSERT_WLOCKED(); 552 error = ENOENT; 553 if (oidp->oid_number == OID_AUTO) { 554 error = EINVAL; 555 } else { 556 SLIST_FOREACH(p, oidp->oid_parent, oid_link) { 557 if (p == oidp) { 558 SLIST_REMOVE(oidp->oid_parent, oidp, 559 sysctl_oid, oid_link); 560 error = 0; 561 break; 562 } 563 } 564 } 565 566 /* 567 * This can happen when a module fails to register and is 568 * being unloaded afterwards. It should not be a panic() 569 * for normal use. 570 */ 571 if (error) 572 printf("%s: failed to unregister sysctl\n", __func__); 573 } 574 575 /* Initialize a new context to keep track of dynamically added sysctls. */ 576 int 577 sysctl_ctx_init(struct sysctl_ctx_list *c) 578 { 579 580 if (c == NULL) { 581 return (EINVAL); 582 } 583 584 /* 585 * No locking here, the caller is responsible for not adding 586 * new nodes to a context until after this function has 587 * returned. 588 */ 589 TAILQ_INIT(c); 590 return (0); 591 } 592 593 /* Free the context, and destroy all dynamic oids registered in this context */ 594 int 595 sysctl_ctx_free(struct sysctl_ctx_list *clist) 596 { 597 struct sysctl_ctx_entry *e, *e1; 598 int error; 599 600 error = 0; 601 /* 602 * First perform a "dry run" to check if it's ok to remove oids. 603 * XXX FIXME 604 * XXX This algorithm is a hack. But I don't know any 605 * XXX better solution for now... 606 */ 607 SYSCTL_WLOCK(); 608 TAILQ_FOREACH(e, clist, link) { 609 error = sysctl_remove_oid_locked(e->entry, 0, 0); 610 if (error) 611 break; 612 } 613 /* 614 * Restore deregistered entries, either from the end, 615 * or from the place where error occurred. 616 * e contains the entry that was not unregistered 617 */ 618 if (error) 619 e1 = TAILQ_PREV(e, sysctl_ctx_list, link); 620 else 621 e1 = TAILQ_LAST(clist, sysctl_ctx_list); 622 while (e1 != NULL) { 623 sysctl_register_oid(e1->entry); 624 e1 = TAILQ_PREV(e1, sysctl_ctx_list, link); 625 } 626 if (error) { 627 SYSCTL_WUNLOCK(); 628 return(EBUSY); 629 } 630 /* Now really delete the entries */ 631 e = TAILQ_FIRST(clist); 632 while (e != NULL) { 633 e1 = TAILQ_NEXT(e, link); 634 error = sysctl_remove_oid_locked(e->entry, 1, 0); 635 if (error) 636 panic("sysctl_remove_oid: corrupt tree, entry: %s", 637 e->entry->oid_name); 638 free(e, M_SYSCTLOID); 639 e = e1; 640 } 641 SYSCTL_WUNLOCK(); 642 return (error); 643 } 644 645 /* Add an entry to the context */ 646 struct sysctl_ctx_entry * 647 sysctl_ctx_entry_add(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp) 648 { 649 struct sysctl_ctx_entry *e; 650 651 SYSCTL_ASSERT_WLOCKED(); 652 if (clist == NULL || oidp == NULL) 653 return(NULL); 654 e = malloc(sizeof(struct sysctl_ctx_entry), M_SYSCTLOID, M_WAITOK); 655 e->entry = oidp; 656 TAILQ_INSERT_HEAD(clist, e, link); 657 return (e); 658 } 659 660 /* Find an entry in the context */ 661 struct sysctl_ctx_entry * 662 sysctl_ctx_entry_find(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp) 663 { 664 struct sysctl_ctx_entry *e; 665 666 SYSCTL_ASSERT_WLOCKED(); 667 if (clist == NULL || oidp == NULL) 668 return(NULL); 669 TAILQ_FOREACH(e, clist, link) { 670 if(e->entry == oidp) 671 return(e); 672 } 673 return (e); 674 } 675 676 /* 677 * Delete an entry from the context. 678 * NOTE: this function doesn't free oidp! You have to remove it 679 * with sysctl_remove_oid(). 680 */ 681 int 682 sysctl_ctx_entry_del(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp) 683 { 684 struct sysctl_ctx_entry *e; 685 686 if (clist == NULL || oidp == NULL) 687 return (EINVAL); 688 SYSCTL_WLOCK(); 689 e = sysctl_ctx_entry_find(clist, oidp); 690 if (e != NULL) { 691 TAILQ_REMOVE(clist, e, link); 692 SYSCTL_WUNLOCK(); 693 free(e, M_SYSCTLOID); 694 return (0); 695 } else { 696 SYSCTL_WUNLOCK(); 697 return (ENOENT); 698 } 699 } 700 701 /* 702 * Remove dynamically created sysctl trees. 703 * oidp - top of the tree to be removed 704 * del - if 0 - just deregister, otherwise free up entries as well 705 * recurse - if != 0 traverse the subtree to be deleted 706 */ 707 int 708 sysctl_remove_oid(struct sysctl_oid *oidp, int del, int recurse) 709 { 710 int error; 711 712 SYSCTL_WLOCK(); 713 error = sysctl_remove_oid_locked(oidp, del, recurse); 714 SYSCTL_WUNLOCK(); 715 return (error); 716 } 717 718 int 719 sysctl_remove_name(struct sysctl_oid *parent, const char *name, 720 int del, int recurse) 721 { 722 struct sysctl_oid *p, *tmp; 723 int error; 724 725 error = ENOENT; 726 SYSCTL_WLOCK(); 727 SLIST_FOREACH_SAFE(p, SYSCTL_CHILDREN(parent), oid_link, tmp) { 728 if (strcmp(p->oid_name, name) == 0) { 729 error = sysctl_remove_oid_locked(p, del, recurse); 730 break; 731 } 732 } 733 SYSCTL_WUNLOCK(); 734 735 return (error); 736 } 737 738 739 static int 740 sysctl_remove_oid_locked(struct sysctl_oid *oidp, int del, int recurse) 741 { 742 struct sysctl_oid *p, *tmp; 743 int error; 744 745 SYSCTL_ASSERT_WLOCKED(); 746 if (oidp == NULL) 747 return(EINVAL); 748 if ((oidp->oid_kind & CTLFLAG_DYN) == 0) { 749 printf("Warning: can't remove non-dynamic nodes (%s)!\n", 750 oidp->oid_name); 751 return (EINVAL); 752 } 753 /* 754 * WARNING: normal method to do this should be through 755 * sysctl_ctx_free(). Use recursing as the last resort 756 * method to purge your sysctl tree of leftovers... 757 * However, if some other code still references these nodes, 758 * it will panic. 759 */ 760 if ((oidp->oid_kind & CTLTYPE) == CTLTYPE_NODE) { 761 if (oidp->oid_refcnt == 1) { 762 SLIST_FOREACH_SAFE(p, 763 SYSCTL_CHILDREN(oidp), oid_link, tmp) { 764 if (!recurse) { 765 printf("Warning: failed attempt to " 766 "remove oid %s with child %s\n", 767 oidp->oid_name, p->oid_name); 768 return (ENOTEMPTY); 769 } 770 error = sysctl_remove_oid_locked(p, del, 771 recurse); 772 if (error) 773 return (error); 774 } 775 } 776 } 777 if (oidp->oid_refcnt > 1 ) { 778 oidp->oid_refcnt--; 779 } else { 780 if (oidp->oid_refcnt == 0) { 781 printf("Warning: bad oid_refcnt=%u (%s)!\n", 782 oidp->oid_refcnt, oidp->oid_name); 783 return (EINVAL); 784 } 785 sysctl_unregister_oid(oidp); 786 if (del) { 787 /* 788 * Wait for all threads running the handler to drain. 789 * This preserves the previous behavior when the 790 * sysctl lock was held across a handler invocation, 791 * and is necessary for module unload correctness. 792 */ 793 while (oidp->oid_running > 0) { 794 oidp->oid_kind |= CTLFLAG_DYING; 795 SYSCTL_SLEEP(&oidp->oid_running, "oidrm", 0); 796 } 797 if (oidp->oid_descr) 798 free(__DECONST(char *, oidp->oid_descr), 799 M_SYSCTLOID); 800 if (oidp->oid_label) 801 free(__DECONST(char *, oidp->oid_label), 802 M_SYSCTLOID); 803 free(__DECONST(char *, oidp->oid_name), M_SYSCTLOID); 804 free(oidp, M_SYSCTLOID); 805 } 806 } 807 return (0); 808 } 809 /* 810 * Create new sysctls at run time. 811 * clist may point to a valid context initialized with sysctl_ctx_init(). 812 */ 813 struct sysctl_oid * 814 sysctl_add_oid(struct sysctl_ctx_list *clist, struct sysctl_oid_list *parent, 815 int number, const char *name, int kind, void *arg1, intmax_t arg2, 816 int (*handler)(SYSCTL_HANDLER_ARGS), const char *fmt, const char *descr, 817 const char *label) 818 { 819 struct sysctl_oid *oidp; 820 821 /* You have to hook up somewhere.. */ 822 if (parent == NULL) 823 return(NULL); 824 /* Check if the node already exists, otherwise create it */ 825 SYSCTL_WLOCK(); 826 oidp = sysctl_find_oidname(name, parent); 827 if (oidp != NULL) { 828 if ((oidp->oid_kind & CTLTYPE) == CTLTYPE_NODE) { 829 oidp->oid_refcnt++; 830 /* Update the context */ 831 if (clist != NULL) 832 sysctl_ctx_entry_add(clist, oidp); 833 SYSCTL_WUNLOCK(); 834 return (oidp); 835 } else { 836 sysctl_warn_reuse(__func__, oidp); 837 SYSCTL_WUNLOCK(); 838 return (NULL); 839 } 840 } 841 oidp = malloc(sizeof(struct sysctl_oid), M_SYSCTLOID, M_WAITOK|M_ZERO); 842 oidp->oid_parent = parent; 843 SLIST_INIT(&oidp->oid_children); 844 oidp->oid_number = number; 845 oidp->oid_refcnt = 1; 846 oidp->oid_name = strdup(name, M_SYSCTLOID); 847 oidp->oid_handler = handler; 848 oidp->oid_kind = CTLFLAG_DYN | kind; 849 oidp->oid_arg1 = arg1; 850 oidp->oid_arg2 = arg2; 851 oidp->oid_fmt = fmt; 852 if (descr != NULL) 853 oidp->oid_descr = strdup(descr, M_SYSCTLOID); 854 if (label != NULL) 855 oidp->oid_label = strdup(label, M_SYSCTLOID); 856 /* Update the context, if used */ 857 if (clist != NULL) 858 sysctl_ctx_entry_add(clist, oidp); 859 /* Register this oid */ 860 sysctl_register_oid(oidp); 861 SYSCTL_WUNLOCK(); 862 return (oidp); 863 } 864 865 /* 866 * Rename an existing oid. 867 */ 868 void 869 sysctl_rename_oid(struct sysctl_oid *oidp, const char *name) 870 { 871 char *newname; 872 char *oldname; 873 874 newname = strdup(name, M_SYSCTLOID); 875 SYSCTL_WLOCK(); 876 oldname = __DECONST(char *, oidp->oid_name); 877 oidp->oid_name = newname; 878 SYSCTL_WUNLOCK(); 879 free(oldname, M_SYSCTLOID); 880 } 881 882 /* 883 * Reparent an existing oid. 884 */ 885 int 886 sysctl_move_oid(struct sysctl_oid *oid, struct sysctl_oid_list *parent) 887 { 888 struct sysctl_oid *oidp; 889 890 SYSCTL_WLOCK(); 891 if (oid->oid_parent == parent) { 892 SYSCTL_WUNLOCK(); 893 return (0); 894 } 895 oidp = sysctl_find_oidname(oid->oid_name, parent); 896 if (oidp != NULL) { 897 SYSCTL_WUNLOCK(); 898 return (EEXIST); 899 } 900 sysctl_unregister_oid(oid); 901 oid->oid_parent = parent; 902 oid->oid_number = OID_AUTO; 903 sysctl_register_oid(oid); 904 SYSCTL_WUNLOCK(); 905 return (0); 906 } 907 908 /* 909 * Register the kernel's oids on startup. 910 */ 911 SET_DECLARE(sysctl_set, struct sysctl_oid); 912 913 static void 914 sysctl_register_all(void *arg) 915 { 916 struct sysctl_oid **oidp; 917 918 sx_init(&sysctlmemlock, "sysctl mem"); 919 SYSCTL_INIT(); 920 SYSCTL_WLOCK(); 921 SET_FOREACH(oidp, sysctl_set) 922 sysctl_register_oid(*oidp); 923 SYSCTL_WUNLOCK(); 924 } 925 SYSINIT(sysctl, SI_SUB_KMEM, SI_ORDER_FIRST, sysctl_register_all, 0); 926 927 /* 928 * "Staff-functions" 929 * 930 * These functions implement a presently undocumented interface 931 * used by the sysctl program to walk the tree, and get the type 932 * so it can print the value. 933 * This interface is under work and consideration, and should probably 934 * be killed with a big axe by the first person who can find the time. 935 * (be aware though, that the proper interface isn't as obvious as it 936 * may seem, there are various conflicting requirements. 937 * 938 * {0,0} printf the entire MIB-tree. 939 * {0,1,...} return the name of the "..." OID. 940 * {0,2,...} return the next OID. 941 * {0,3} return the OID of the name in "new" 942 * {0,4,...} return the kind & format info for the "..." OID. 943 * {0,5,...} return the description of the "..." OID. 944 * {0,6,...} return the aggregation label of the "..." OID. 945 */ 946 947 #ifdef SYSCTL_DEBUG 948 static void 949 sysctl_sysctl_debug_dump_node(struct sysctl_oid_list *l, int i) 950 { 951 int k; 952 struct sysctl_oid *oidp; 953 954 SYSCTL_ASSERT_LOCKED(); 955 SLIST_FOREACH(oidp, l, oid_link) { 956 957 for (k=0; k<i; k++) 958 printf(" "); 959 960 printf("%d %s ", oidp->oid_number, oidp->oid_name); 961 962 printf("%c%c", 963 oidp->oid_kind & CTLFLAG_RD ? 'R':' ', 964 oidp->oid_kind & CTLFLAG_WR ? 'W':' '); 965 966 if (oidp->oid_handler) 967 printf(" *Handler"); 968 969 switch (oidp->oid_kind & CTLTYPE) { 970 case CTLTYPE_NODE: 971 printf(" Node\n"); 972 if (!oidp->oid_handler) { 973 sysctl_sysctl_debug_dump_node( 974 SYSCTL_CHILDREN(oidp), i + 2); 975 } 976 break; 977 case CTLTYPE_INT: printf(" Int\n"); break; 978 case CTLTYPE_UINT: printf(" u_int\n"); break; 979 case CTLTYPE_LONG: printf(" Long\n"); break; 980 case CTLTYPE_ULONG: printf(" u_long\n"); break; 981 case CTLTYPE_STRING: printf(" String\n"); break; 982 case CTLTYPE_S8: printf(" int8_t\n"); break; 983 case CTLTYPE_S16: printf(" int16_t\n"); break; 984 case CTLTYPE_S32: printf(" int32_t\n"); break; 985 case CTLTYPE_S64: printf(" int64_t\n"); break; 986 case CTLTYPE_U8: printf(" uint8_t\n"); break; 987 case CTLTYPE_U16: printf(" uint16_t\n"); break; 988 case CTLTYPE_U32: printf(" uint32_t\n"); break; 989 case CTLTYPE_U64: printf(" uint64_t\n"); break; 990 case CTLTYPE_OPAQUE: printf(" Opaque/struct\n"); break; 991 default: printf("\n"); 992 } 993 994 } 995 } 996 997 static int 998 sysctl_sysctl_debug(SYSCTL_HANDLER_ARGS) 999 { 1000 struct rm_priotracker tracker; 1001 int error; 1002 1003 error = priv_check(req->td, PRIV_SYSCTL_DEBUG); 1004 if (error) 1005 return (error); 1006 SYSCTL_RLOCK(&tracker); 1007 sysctl_sysctl_debug_dump_node(&sysctl__children, 0); 1008 SYSCTL_RUNLOCK(&tracker); 1009 return (ENOENT); 1010 } 1011 1012 SYSCTL_PROC(_sysctl, 0, debug, CTLTYPE_STRING|CTLFLAG_RD|CTLFLAG_MPSAFE, 1013 0, 0, sysctl_sysctl_debug, "-", ""); 1014 #endif 1015 1016 static int 1017 sysctl_sysctl_name(SYSCTL_HANDLER_ARGS) 1018 { 1019 int *name = (int *) arg1; 1020 u_int namelen = arg2; 1021 int error = 0; 1022 struct sysctl_oid *oid; 1023 struct sysctl_oid_list *lsp = &sysctl__children, *lsp2; 1024 struct rm_priotracker tracker; 1025 char buf[10]; 1026 1027 SYSCTL_RLOCK(&tracker); 1028 while (namelen) { 1029 if (!lsp) { 1030 snprintf(buf,sizeof(buf),"%d",*name); 1031 if (req->oldidx) 1032 error = SYSCTL_OUT(req, ".", 1); 1033 if (!error) 1034 error = SYSCTL_OUT(req, buf, strlen(buf)); 1035 if (error) 1036 goto out; 1037 namelen--; 1038 name++; 1039 continue; 1040 } 1041 lsp2 = NULL; 1042 SLIST_FOREACH(oid, lsp, oid_link) { 1043 if (oid->oid_number != *name) 1044 continue; 1045 1046 if (req->oldidx) 1047 error = SYSCTL_OUT(req, ".", 1); 1048 if (!error) 1049 error = SYSCTL_OUT(req, oid->oid_name, 1050 strlen(oid->oid_name)); 1051 if (error) 1052 goto out; 1053 1054 namelen--; 1055 name++; 1056 1057 if ((oid->oid_kind & CTLTYPE) != CTLTYPE_NODE) 1058 break; 1059 1060 if (oid->oid_handler) 1061 break; 1062 1063 lsp2 = SYSCTL_CHILDREN(oid); 1064 break; 1065 } 1066 lsp = lsp2; 1067 } 1068 error = SYSCTL_OUT(req, "", 1); 1069 out: 1070 SYSCTL_RUNLOCK(&tracker); 1071 return (error); 1072 } 1073 1074 /* 1075 * XXXRW/JA: Shouldn't return name data for nodes that we don't permit in 1076 * capability mode. 1077 */ 1078 static SYSCTL_NODE(_sysctl, 1, name, CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_CAPRD, 1079 sysctl_sysctl_name, ""); 1080 1081 static int 1082 sysctl_sysctl_next_ls(struct sysctl_oid_list *lsp, int *name, u_int namelen, 1083 int *next, int *len, int level, struct sysctl_oid **oidpp) 1084 { 1085 struct sysctl_oid *oidp; 1086 1087 SYSCTL_ASSERT_LOCKED(); 1088 *len = level; 1089 SLIST_FOREACH(oidp, lsp, oid_link) { 1090 *next = oidp->oid_number; 1091 *oidpp = oidp; 1092 1093 if ((oidp->oid_kind & (CTLFLAG_SKIP | CTLFLAG_DORMANT)) != 0) 1094 continue; 1095 1096 if (!namelen) { 1097 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) 1098 return (0); 1099 if (oidp->oid_handler) 1100 /* We really should call the handler here...*/ 1101 return (0); 1102 lsp = SYSCTL_CHILDREN(oidp); 1103 if (!sysctl_sysctl_next_ls(lsp, 0, 0, next+1, 1104 len, level+1, oidpp)) 1105 return (0); 1106 goto emptynode; 1107 } 1108 1109 if (oidp->oid_number < *name) 1110 continue; 1111 1112 if (oidp->oid_number > *name) { 1113 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) 1114 return (0); 1115 if (oidp->oid_handler) 1116 return (0); 1117 lsp = SYSCTL_CHILDREN(oidp); 1118 if (!sysctl_sysctl_next_ls(lsp, name+1, namelen-1, 1119 next+1, len, level+1, oidpp)) 1120 return (0); 1121 goto next; 1122 } 1123 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) 1124 continue; 1125 1126 if (oidp->oid_handler) 1127 continue; 1128 1129 lsp = SYSCTL_CHILDREN(oidp); 1130 if (!sysctl_sysctl_next_ls(lsp, name+1, namelen-1, next+1, 1131 len, level+1, oidpp)) 1132 return (0); 1133 next: 1134 namelen = 1; 1135 emptynode: 1136 *len = level; 1137 } 1138 return (1); 1139 } 1140 1141 static int 1142 sysctl_sysctl_next(SYSCTL_HANDLER_ARGS) 1143 { 1144 int *name = (int *) arg1; 1145 u_int namelen = arg2; 1146 int i, j, error; 1147 struct sysctl_oid *oid; 1148 struct sysctl_oid_list *lsp = &sysctl__children; 1149 struct rm_priotracker tracker; 1150 int newoid[CTL_MAXNAME]; 1151 1152 SYSCTL_RLOCK(&tracker); 1153 i = sysctl_sysctl_next_ls(lsp, name, namelen, newoid, &j, 1, &oid); 1154 SYSCTL_RUNLOCK(&tracker); 1155 if (i) 1156 return (ENOENT); 1157 error = SYSCTL_OUT(req, newoid, j * sizeof (int)); 1158 return (error); 1159 } 1160 1161 /* 1162 * XXXRW/JA: Shouldn't return next data for nodes that we don't permit in 1163 * capability mode. 1164 */ 1165 static SYSCTL_NODE(_sysctl, 2, next, CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_CAPRD, 1166 sysctl_sysctl_next, ""); 1167 1168 static int 1169 name2oid(char *name, int *oid, int *len, struct sysctl_oid **oidpp) 1170 { 1171 struct sysctl_oid *oidp; 1172 struct sysctl_oid_list *lsp = &sysctl__children; 1173 char *p; 1174 1175 SYSCTL_ASSERT_LOCKED(); 1176 1177 for (*len = 0; *len < CTL_MAXNAME;) { 1178 p = strsep(&name, "."); 1179 1180 oidp = SLIST_FIRST(lsp); 1181 for (;; oidp = SLIST_NEXT(oidp, oid_link)) { 1182 if (oidp == NULL) 1183 return (ENOENT); 1184 if (strcmp(p, oidp->oid_name) == 0) 1185 break; 1186 } 1187 *oid++ = oidp->oid_number; 1188 (*len)++; 1189 1190 if (name == NULL || *name == '\0') { 1191 if (oidpp) 1192 *oidpp = oidp; 1193 return (0); 1194 } 1195 1196 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) 1197 break; 1198 1199 if (oidp->oid_handler) 1200 break; 1201 1202 lsp = SYSCTL_CHILDREN(oidp); 1203 } 1204 return (ENOENT); 1205 } 1206 1207 static int 1208 sysctl_sysctl_name2oid(SYSCTL_HANDLER_ARGS) 1209 { 1210 char *p; 1211 int error, oid[CTL_MAXNAME], len = 0; 1212 struct sysctl_oid *op = NULL; 1213 struct rm_priotracker tracker; 1214 char buf[32]; 1215 1216 if (!req->newlen) 1217 return (ENOENT); 1218 if (req->newlen >= MAXPATHLEN) /* XXX arbitrary, undocumented */ 1219 return (ENAMETOOLONG); 1220 1221 p = buf; 1222 if (req->newlen >= sizeof(buf)) 1223 p = malloc(req->newlen+1, M_SYSCTL, M_WAITOK); 1224 1225 error = SYSCTL_IN(req, p, req->newlen); 1226 if (error) { 1227 if (p != buf) 1228 free(p, M_SYSCTL); 1229 return (error); 1230 } 1231 1232 p [req->newlen] = '\0'; 1233 1234 SYSCTL_RLOCK(&tracker); 1235 error = name2oid(p, oid, &len, &op); 1236 SYSCTL_RUNLOCK(&tracker); 1237 1238 if (p != buf) 1239 free(p, M_SYSCTL); 1240 1241 if (error) 1242 return (error); 1243 1244 error = SYSCTL_OUT(req, oid, len * sizeof *oid); 1245 return (error); 1246 } 1247 1248 /* 1249 * XXXRW/JA: Shouldn't return name2oid data for nodes that we don't permit in 1250 * capability mode. 1251 */ 1252 SYSCTL_PROC(_sysctl, 3, name2oid, 1253 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE 1254 | CTLFLAG_CAPRW, 0, 0, sysctl_sysctl_name2oid, "I", ""); 1255 1256 static int 1257 sysctl_sysctl_oidfmt(SYSCTL_HANDLER_ARGS) 1258 { 1259 struct sysctl_oid *oid; 1260 struct rm_priotracker tracker; 1261 int error; 1262 1263 SYSCTL_RLOCK(&tracker); 1264 error = sysctl_find_oid(arg1, arg2, &oid, NULL, req); 1265 if (error) 1266 goto out; 1267 1268 if (oid->oid_fmt == NULL) { 1269 error = ENOENT; 1270 goto out; 1271 } 1272 error = SYSCTL_OUT(req, &oid->oid_kind, sizeof(oid->oid_kind)); 1273 if (error) 1274 goto out; 1275 error = SYSCTL_OUT(req, oid->oid_fmt, strlen(oid->oid_fmt) + 1); 1276 out: 1277 SYSCTL_RUNLOCK(&tracker); 1278 return (error); 1279 } 1280 1281 1282 static SYSCTL_NODE(_sysctl, 4, oidfmt, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLFLAG_CAPRD, 1283 sysctl_sysctl_oidfmt, ""); 1284 1285 static int 1286 sysctl_sysctl_oiddescr(SYSCTL_HANDLER_ARGS) 1287 { 1288 struct sysctl_oid *oid; 1289 struct rm_priotracker tracker; 1290 int error; 1291 1292 SYSCTL_RLOCK(&tracker); 1293 error = sysctl_find_oid(arg1, arg2, &oid, NULL, req); 1294 if (error) 1295 goto out; 1296 1297 if (oid->oid_descr == NULL) { 1298 error = ENOENT; 1299 goto out; 1300 } 1301 error = SYSCTL_OUT(req, oid->oid_descr, strlen(oid->oid_descr) + 1); 1302 out: 1303 SYSCTL_RUNLOCK(&tracker); 1304 return (error); 1305 } 1306 1307 static SYSCTL_NODE(_sysctl, 5, oiddescr, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLFLAG_CAPRD, 1308 sysctl_sysctl_oiddescr, ""); 1309 1310 static int 1311 sysctl_sysctl_oidlabel(SYSCTL_HANDLER_ARGS) 1312 { 1313 struct sysctl_oid *oid; 1314 struct rm_priotracker tracker; 1315 int error; 1316 1317 SYSCTL_RLOCK(&tracker); 1318 error = sysctl_find_oid(arg1, arg2, &oid, NULL, req); 1319 if (error) 1320 goto out; 1321 1322 if (oid->oid_label == NULL) { 1323 error = ENOENT; 1324 goto out; 1325 } 1326 error = SYSCTL_OUT(req, oid->oid_label, strlen(oid->oid_label) + 1); 1327 out: 1328 SYSCTL_RUNLOCK(&tracker); 1329 return (error); 1330 } 1331 1332 static SYSCTL_NODE(_sysctl, 6, oidlabel, 1333 CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_CAPRD, sysctl_sysctl_oidlabel, ""); 1334 1335 /* 1336 * Default "handler" functions. 1337 */ 1338 1339 /* 1340 * Handle a bool. 1341 * Two cases: 1342 * a variable: point arg1 at it. 1343 * a constant: pass it in arg2. 1344 */ 1345 1346 int 1347 sysctl_handle_bool(SYSCTL_HANDLER_ARGS) 1348 { 1349 uint8_t temp; 1350 int error; 1351 1352 /* 1353 * Attempt to get a coherent snapshot by making a copy of the data. 1354 */ 1355 if (arg1) 1356 temp = *(bool *)arg1 ? 1 : 0; 1357 else 1358 temp = arg2 ? 1 : 0; 1359 1360 error = SYSCTL_OUT(req, &temp, sizeof(temp)); 1361 if (error || !req->newptr) 1362 return (error); 1363 1364 if (!arg1) 1365 error = EPERM; 1366 else { 1367 error = SYSCTL_IN(req, &temp, sizeof(temp)); 1368 if (!error) 1369 *(bool *)arg1 = temp ? 1 : 0; 1370 } 1371 return (error); 1372 } 1373 1374 /* 1375 * Handle an int8_t, signed or unsigned. 1376 * Two cases: 1377 * a variable: point arg1 at it. 1378 * a constant: pass it in arg2. 1379 */ 1380 1381 int 1382 sysctl_handle_8(SYSCTL_HANDLER_ARGS) 1383 { 1384 int8_t tmpout; 1385 int error = 0; 1386 1387 /* 1388 * Attempt to get a coherent snapshot by making a copy of the data. 1389 */ 1390 if (arg1) 1391 tmpout = *(int8_t *)arg1; 1392 else 1393 tmpout = arg2; 1394 error = SYSCTL_OUT(req, &tmpout, sizeof(tmpout)); 1395 1396 if (error || !req->newptr) 1397 return (error); 1398 1399 if (!arg1) 1400 error = EPERM; 1401 else 1402 error = SYSCTL_IN(req, arg1, sizeof(tmpout)); 1403 return (error); 1404 } 1405 1406 /* 1407 * Handle an int16_t, signed or unsigned. 1408 * Two cases: 1409 * a variable: point arg1 at it. 1410 * a constant: pass it in arg2. 1411 */ 1412 1413 int 1414 sysctl_handle_16(SYSCTL_HANDLER_ARGS) 1415 { 1416 int16_t tmpout; 1417 int error = 0; 1418 1419 /* 1420 * Attempt to get a coherent snapshot by making a copy of the data. 1421 */ 1422 if (arg1) 1423 tmpout = *(int16_t *)arg1; 1424 else 1425 tmpout = arg2; 1426 error = SYSCTL_OUT(req, &tmpout, sizeof(tmpout)); 1427 1428 if (error || !req->newptr) 1429 return (error); 1430 1431 if (!arg1) 1432 error = EPERM; 1433 else 1434 error = SYSCTL_IN(req, arg1, sizeof(tmpout)); 1435 return (error); 1436 } 1437 1438 /* 1439 * Handle an int32_t, signed or unsigned. 1440 * Two cases: 1441 * a variable: point arg1 at it. 1442 * a constant: pass it in arg2. 1443 */ 1444 1445 int 1446 sysctl_handle_32(SYSCTL_HANDLER_ARGS) 1447 { 1448 int32_t tmpout; 1449 int error = 0; 1450 1451 /* 1452 * Attempt to get a coherent snapshot by making a copy of the data. 1453 */ 1454 if (arg1) 1455 tmpout = *(int32_t *)arg1; 1456 else 1457 tmpout = arg2; 1458 error = SYSCTL_OUT(req, &tmpout, sizeof(tmpout)); 1459 1460 if (error || !req->newptr) 1461 return (error); 1462 1463 if (!arg1) 1464 error = EPERM; 1465 else 1466 error = SYSCTL_IN(req, arg1, sizeof(tmpout)); 1467 return (error); 1468 } 1469 1470 /* 1471 * Handle an int, signed or unsigned. 1472 * Two cases: 1473 * a variable: point arg1 at it. 1474 * a constant: pass it in arg2. 1475 */ 1476 1477 int 1478 sysctl_handle_int(SYSCTL_HANDLER_ARGS) 1479 { 1480 int tmpout, error = 0; 1481 1482 /* 1483 * Attempt to get a coherent snapshot by making a copy of the data. 1484 */ 1485 if (arg1) 1486 tmpout = *(int *)arg1; 1487 else 1488 tmpout = arg2; 1489 error = SYSCTL_OUT(req, &tmpout, sizeof(int)); 1490 1491 if (error || !req->newptr) 1492 return (error); 1493 1494 if (!arg1) 1495 error = EPERM; 1496 else 1497 error = SYSCTL_IN(req, arg1, sizeof(int)); 1498 return (error); 1499 } 1500 1501 /* 1502 * Based on on sysctl_handle_int() convert milliseconds into ticks. 1503 * Note: this is used by TCP. 1504 */ 1505 1506 int 1507 sysctl_msec_to_ticks(SYSCTL_HANDLER_ARGS) 1508 { 1509 int error, s, tt; 1510 1511 tt = *(int *)arg1; 1512 s = (int)((int64_t)tt * 1000 / hz); 1513 1514 error = sysctl_handle_int(oidp, &s, 0, req); 1515 if (error || !req->newptr) 1516 return (error); 1517 1518 tt = (int)((int64_t)s * hz / 1000); 1519 if (tt < 1) 1520 return (EINVAL); 1521 1522 *(int *)arg1 = tt; 1523 return (0); 1524 } 1525 1526 1527 /* 1528 * Handle a long, signed or unsigned. 1529 * Two cases: 1530 * a variable: point arg1 at it. 1531 * a constant: pass it in arg2. 1532 */ 1533 1534 int 1535 sysctl_handle_long(SYSCTL_HANDLER_ARGS) 1536 { 1537 int error = 0; 1538 long tmplong; 1539 #ifdef SCTL_MASK32 1540 int tmpint; 1541 #endif 1542 1543 /* 1544 * Attempt to get a coherent snapshot by making a copy of the data. 1545 */ 1546 if (arg1) 1547 tmplong = *(long *)arg1; 1548 else 1549 tmplong = arg2; 1550 #ifdef SCTL_MASK32 1551 if (req->flags & SCTL_MASK32) { 1552 tmpint = tmplong; 1553 error = SYSCTL_OUT(req, &tmpint, sizeof(int)); 1554 } else 1555 #endif 1556 error = SYSCTL_OUT(req, &tmplong, sizeof(long)); 1557 1558 if (error || !req->newptr) 1559 return (error); 1560 1561 if (!arg1) 1562 error = EPERM; 1563 #ifdef SCTL_MASK32 1564 else if (req->flags & SCTL_MASK32) { 1565 error = SYSCTL_IN(req, &tmpint, sizeof(int)); 1566 *(long *)arg1 = (long)tmpint; 1567 } 1568 #endif 1569 else 1570 error = SYSCTL_IN(req, arg1, sizeof(long)); 1571 return (error); 1572 } 1573 1574 /* 1575 * Handle a 64 bit int, signed or unsigned. 1576 * Two cases: 1577 * a variable: point arg1 at it. 1578 * a constant: pass it in arg2. 1579 */ 1580 int 1581 sysctl_handle_64(SYSCTL_HANDLER_ARGS) 1582 { 1583 int error = 0; 1584 uint64_t tmpout; 1585 1586 /* 1587 * Attempt to get a coherent snapshot by making a copy of the data. 1588 */ 1589 if (arg1) 1590 tmpout = *(uint64_t *)arg1; 1591 else 1592 tmpout = arg2; 1593 error = SYSCTL_OUT(req, &tmpout, sizeof(uint64_t)); 1594 1595 if (error || !req->newptr) 1596 return (error); 1597 1598 if (!arg1) 1599 error = EPERM; 1600 else 1601 error = SYSCTL_IN(req, arg1, sizeof(uint64_t)); 1602 return (error); 1603 } 1604 1605 /* 1606 * Handle our generic '\0' terminated 'C' string. 1607 * Two cases: 1608 * a variable string: point arg1 at it, arg2 is max length. 1609 * a constant string: point arg1 at it, arg2 is zero. 1610 */ 1611 1612 int 1613 sysctl_handle_string(SYSCTL_HANDLER_ARGS) 1614 { 1615 size_t outlen; 1616 int error = 0, ro_string = 0; 1617 1618 /* 1619 * A zero-length buffer indicates a fixed size read-only 1620 * string: 1621 */ 1622 if (arg2 == 0) { 1623 arg2 = strlen((char *)arg1) + 1; 1624 ro_string = 1; 1625 } 1626 1627 if (req->oldptr != NULL) { 1628 char *tmparg; 1629 1630 if (ro_string) { 1631 tmparg = arg1; 1632 } else { 1633 /* try to make a coherent snapshot of the string */ 1634 tmparg = malloc(arg2, M_SYSCTLTMP, M_WAITOK); 1635 memcpy(tmparg, arg1, arg2); 1636 } 1637 1638 outlen = strnlen(tmparg, arg2 - 1) + 1; 1639 error = SYSCTL_OUT(req, tmparg, outlen); 1640 1641 if (!ro_string) 1642 free(tmparg, M_SYSCTLTMP); 1643 } else { 1644 outlen = strnlen((char *)arg1, arg2 - 1) + 1; 1645 error = SYSCTL_OUT(req, NULL, outlen); 1646 } 1647 if (error || !req->newptr) 1648 return (error); 1649 1650 if ((req->newlen - req->newidx) >= arg2) { 1651 error = EINVAL; 1652 } else { 1653 arg2 = (req->newlen - req->newidx); 1654 error = SYSCTL_IN(req, arg1, arg2); 1655 ((char *)arg1)[arg2] = '\0'; 1656 } 1657 return (error); 1658 } 1659 1660 /* 1661 * Handle any kind of opaque data. 1662 * arg1 points to it, arg2 is the size. 1663 */ 1664 1665 int 1666 sysctl_handle_opaque(SYSCTL_HANDLER_ARGS) 1667 { 1668 int error, tries; 1669 u_int generation; 1670 struct sysctl_req req2; 1671 1672 /* 1673 * Attempt to get a coherent snapshot, by using the thread 1674 * pre-emption counter updated from within mi_switch() to 1675 * determine if we were pre-empted during a bcopy() or 1676 * copyout(). Make 3 attempts at doing this before giving up. 1677 * If we encounter an error, stop immediately. 1678 */ 1679 tries = 0; 1680 req2 = *req; 1681 retry: 1682 generation = curthread->td_generation; 1683 error = SYSCTL_OUT(req, arg1, arg2); 1684 if (error) 1685 return (error); 1686 tries++; 1687 if (generation != curthread->td_generation && tries < 3) { 1688 *req = req2; 1689 goto retry; 1690 } 1691 1692 error = SYSCTL_IN(req, arg1, arg2); 1693 1694 return (error); 1695 } 1696 1697 /* 1698 * Transfer functions to/from kernel space. 1699 * XXX: rather untested at this point 1700 */ 1701 static int 1702 sysctl_old_kernel(struct sysctl_req *req, const void *p, size_t l) 1703 { 1704 size_t i = 0; 1705 1706 if (req->oldptr) { 1707 i = l; 1708 if (req->oldlen <= req->oldidx) 1709 i = 0; 1710 else 1711 if (i > req->oldlen - req->oldidx) 1712 i = req->oldlen - req->oldidx; 1713 if (i > 0) 1714 bcopy(p, (char *)req->oldptr + req->oldidx, i); 1715 } 1716 req->oldidx += l; 1717 if (req->oldptr && i != l) 1718 return (ENOMEM); 1719 return (0); 1720 } 1721 1722 static int 1723 sysctl_new_kernel(struct sysctl_req *req, void *p, size_t l) 1724 { 1725 if (!req->newptr) 1726 return (0); 1727 if (req->newlen - req->newidx < l) 1728 return (EINVAL); 1729 bcopy((char *)req->newptr + req->newidx, p, l); 1730 req->newidx += l; 1731 return (0); 1732 } 1733 1734 int 1735 kernel_sysctl(struct thread *td, int *name, u_int namelen, void *old, 1736 size_t *oldlenp, void *new, size_t newlen, size_t *retval, int flags) 1737 { 1738 int error = 0; 1739 struct sysctl_req req; 1740 1741 bzero(&req, sizeof req); 1742 1743 req.td = td; 1744 req.flags = flags; 1745 1746 if (oldlenp) { 1747 req.oldlen = *oldlenp; 1748 } 1749 req.validlen = req.oldlen; 1750 1751 if (old) { 1752 req.oldptr= old; 1753 } 1754 1755 if (new != NULL) { 1756 req.newlen = newlen; 1757 req.newptr = new; 1758 } 1759 1760 req.oldfunc = sysctl_old_kernel; 1761 req.newfunc = sysctl_new_kernel; 1762 req.lock = REQ_UNWIRED; 1763 1764 error = sysctl_root(0, name, namelen, &req); 1765 1766 if (req.lock == REQ_WIRED && req.validlen > 0) 1767 vsunlock(req.oldptr, req.validlen); 1768 1769 if (error && error != ENOMEM) 1770 return (error); 1771 1772 if (retval) { 1773 if (req.oldptr && req.oldidx > req.validlen) 1774 *retval = req.validlen; 1775 else 1776 *retval = req.oldidx; 1777 } 1778 return (error); 1779 } 1780 1781 int 1782 kernel_sysctlbyname(struct thread *td, char *name, void *old, size_t *oldlenp, 1783 void *new, size_t newlen, size_t *retval, int flags) 1784 { 1785 int oid[CTL_MAXNAME]; 1786 size_t oidlen, plen; 1787 int error; 1788 1789 oid[0] = 0; /* sysctl internal magic */ 1790 oid[1] = 3; /* name2oid */ 1791 oidlen = sizeof(oid); 1792 1793 error = kernel_sysctl(td, oid, 2, oid, &oidlen, 1794 (void *)name, strlen(name), &plen, flags); 1795 if (error) 1796 return (error); 1797 1798 error = kernel_sysctl(td, oid, plen / sizeof(int), old, oldlenp, 1799 new, newlen, retval, flags); 1800 return (error); 1801 } 1802 1803 /* 1804 * Transfer function to/from user space. 1805 */ 1806 static int 1807 sysctl_old_user(struct sysctl_req *req, const void *p, size_t l) 1808 { 1809 size_t i, len, origidx; 1810 int error; 1811 1812 origidx = req->oldidx; 1813 req->oldidx += l; 1814 if (req->oldptr == NULL) 1815 return (0); 1816 /* 1817 * If we have not wired the user supplied buffer and we are currently 1818 * holding locks, drop a witness warning, as it's possible that 1819 * write operations to the user page can sleep. 1820 */ 1821 if (req->lock != REQ_WIRED) 1822 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 1823 "sysctl_old_user()"); 1824 i = l; 1825 len = req->validlen; 1826 if (len <= origidx) 1827 i = 0; 1828 else { 1829 if (i > len - origidx) 1830 i = len - origidx; 1831 if (req->lock == REQ_WIRED) { 1832 error = copyout_nofault(p, (char *)req->oldptr + 1833 origidx, i); 1834 } else 1835 error = copyout(p, (char *)req->oldptr + origidx, i); 1836 if (error != 0) 1837 return (error); 1838 } 1839 if (i < l) 1840 return (ENOMEM); 1841 return (0); 1842 } 1843 1844 static int 1845 sysctl_new_user(struct sysctl_req *req, void *p, size_t l) 1846 { 1847 int error; 1848 1849 if (!req->newptr) 1850 return (0); 1851 if (req->newlen - req->newidx < l) 1852 return (EINVAL); 1853 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 1854 "sysctl_new_user()"); 1855 error = copyin((char *)req->newptr + req->newidx, p, l); 1856 req->newidx += l; 1857 return (error); 1858 } 1859 1860 /* 1861 * Wire the user space destination buffer. If set to a value greater than 1862 * zero, the len parameter limits the maximum amount of wired memory. 1863 */ 1864 int 1865 sysctl_wire_old_buffer(struct sysctl_req *req, size_t len) 1866 { 1867 int ret; 1868 size_t wiredlen; 1869 1870 wiredlen = (len > 0 && len < req->oldlen) ? len : req->oldlen; 1871 ret = 0; 1872 if (req->lock != REQ_WIRED && req->oldptr && 1873 req->oldfunc == sysctl_old_user) { 1874 if (wiredlen != 0) { 1875 ret = vslock(req->oldptr, wiredlen); 1876 if (ret != 0) { 1877 if (ret != ENOMEM) 1878 return (ret); 1879 wiredlen = 0; 1880 } 1881 } 1882 req->lock = REQ_WIRED; 1883 req->validlen = wiredlen; 1884 } 1885 return (0); 1886 } 1887 1888 int 1889 sysctl_find_oid(int *name, u_int namelen, struct sysctl_oid **noid, 1890 int *nindx, struct sysctl_req *req) 1891 { 1892 struct sysctl_oid_list *lsp; 1893 struct sysctl_oid *oid; 1894 int indx; 1895 1896 SYSCTL_ASSERT_LOCKED(); 1897 lsp = &sysctl__children; 1898 indx = 0; 1899 while (indx < CTL_MAXNAME) { 1900 SLIST_FOREACH(oid, lsp, oid_link) { 1901 if (oid->oid_number == name[indx]) 1902 break; 1903 } 1904 if (oid == NULL) 1905 return (ENOENT); 1906 1907 indx++; 1908 if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) { 1909 if (oid->oid_handler != NULL || indx == namelen) { 1910 *noid = oid; 1911 if (nindx != NULL) 1912 *nindx = indx; 1913 KASSERT((oid->oid_kind & CTLFLAG_DYING) == 0, 1914 ("%s found DYING node %p", __func__, oid)); 1915 return (0); 1916 } 1917 lsp = SYSCTL_CHILDREN(oid); 1918 } else if (indx == namelen) { 1919 if ((oid->oid_kind & CTLFLAG_DORMANT) != 0) 1920 return (ENOENT); 1921 *noid = oid; 1922 if (nindx != NULL) 1923 *nindx = indx; 1924 KASSERT((oid->oid_kind & CTLFLAG_DYING) == 0, 1925 ("%s found DYING node %p", __func__, oid)); 1926 return (0); 1927 } else { 1928 return (ENOTDIR); 1929 } 1930 } 1931 return (ENOENT); 1932 } 1933 1934 /* 1935 * Traverse our tree, and find the right node, execute whatever it points 1936 * to, and return the resulting error code. 1937 */ 1938 1939 static int 1940 sysctl_root(SYSCTL_HANDLER_ARGS) 1941 { 1942 struct sysctl_oid *oid; 1943 struct rm_priotracker tracker; 1944 int error, indx, lvl; 1945 1946 SYSCTL_RLOCK(&tracker); 1947 1948 error = sysctl_find_oid(arg1, arg2, &oid, &indx, req); 1949 if (error) 1950 goto out; 1951 1952 if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) { 1953 /* 1954 * You can't call a sysctl when it's a node, but has 1955 * no handler. Inform the user that it's a node. 1956 * The indx may or may not be the same as namelen. 1957 */ 1958 if (oid->oid_handler == NULL) { 1959 error = EISDIR; 1960 goto out; 1961 } 1962 } 1963 1964 /* Is this sysctl writable? */ 1965 if (req->newptr && !(oid->oid_kind & CTLFLAG_WR)) { 1966 error = EPERM; 1967 goto out; 1968 } 1969 1970 KASSERT(req->td != NULL, ("sysctl_root(): req->td == NULL")); 1971 1972 #ifdef CAPABILITY_MODE 1973 /* 1974 * If the process is in capability mode, then don't permit reading or 1975 * writing unless specifically granted for the node. 1976 */ 1977 if (IN_CAPABILITY_MODE(req->td)) { 1978 if ((req->oldptr && !(oid->oid_kind & CTLFLAG_CAPRD)) || 1979 (req->newptr && !(oid->oid_kind & CTLFLAG_CAPWR))) { 1980 error = EPERM; 1981 goto out; 1982 } 1983 } 1984 #endif 1985 1986 /* Is this sysctl sensitive to securelevels? */ 1987 if (req->newptr && (oid->oid_kind & CTLFLAG_SECURE)) { 1988 lvl = (oid->oid_kind & CTLMASK_SECURE) >> CTLSHIFT_SECURE; 1989 error = securelevel_gt(req->td->td_ucred, lvl); 1990 if (error) 1991 goto out; 1992 } 1993 1994 /* Is this sysctl writable by only privileged users? */ 1995 if (req->newptr && !(oid->oid_kind & CTLFLAG_ANYBODY)) { 1996 int priv; 1997 1998 if (oid->oid_kind & CTLFLAG_PRISON) 1999 priv = PRIV_SYSCTL_WRITEJAIL; 2000 #ifdef VIMAGE 2001 else if ((oid->oid_kind & CTLFLAG_VNET) && 2002 prison_owns_vnet(req->td->td_ucred)) 2003 priv = PRIV_SYSCTL_WRITEJAIL; 2004 #endif 2005 else 2006 priv = PRIV_SYSCTL_WRITE; 2007 error = priv_check(req->td, priv); 2008 if (error) 2009 goto out; 2010 } 2011 2012 if (!oid->oid_handler) { 2013 error = EINVAL; 2014 goto out; 2015 } 2016 2017 if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) { 2018 arg1 = (int *)arg1 + indx; 2019 arg2 -= indx; 2020 } else { 2021 arg1 = oid->oid_arg1; 2022 arg2 = oid->oid_arg2; 2023 } 2024 #ifdef MAC 2025 error = mac_system_check_sysctl(req->td->td_ucred, oid, arg1, arg2, 2026 req); 2027 if (error != 0) 2028 goto out; 2029 #endif 2030 #ifdef VIMAGE 2031 if ((oid->oid_kind & CTLFLAG_VNET) && arg1 != NULL) 2032 arg1 = (void *)(curvnet->vnet_data_base + (uintptr_t)arg1); 2033 #endif 2034 error = sysctl_root_handler_locked(oid, arg1, arg2, req, &tracker); 2035 2036 out: 2037 SYSCTL_RUNLOCK(&tracker); 2038 return (error); 2039 } 2040 2041 #ifndef _SYS_SYSPROTO_H_ 2042 struct sysctl_args { 2043 int *name; 2044 u_int namelen; 2045 void *old; 2046 size_t *oldlenp; 2047 void *new; 2048 size_t newlen; 2049 }; 2050 #endif 2051 int 2052 sys___sysctl(struct thread *td, struct sysctl_args *uap) 2053 { 2054 int error, i, name[CTL_MAXNAME]; 2055 size_t j; 2056 2057 if (uap->namelen > CTL_MAXNAME || uap->namelen < 2) 2058 return (EINVAL); 2059 2060 error = copyin(uap->name, &name, uap->namelen * sizeof(int)); 2061 if (error) 2062 return (error); 2063 2064 error = userland_sysctl(td, name, uap->namelen, 2065 uap->old, uap->oldlenp, 0, 2066 uap->new, uap->newlen, &j, 0); 2067 if (error && error != ENOMEM) 2068 return (error); 2069 if (uap->oldlenp) { 2070 i = copyout(&j, uap->oldlenp, sizeof(j)); 2071 if (i) 2072 return (i); 2073 } 2074 return (error); 2075 } 2076 2077 /* 2078 * This is used from various compatibility syscalls too. That's why name 2079 * must be in kernel space. 2080 */ 2081 int 2082 userland_sysctl(struct thread *td, int *name, u_int namelen, void *old, 2083 size_t *oldlenp, int inkernel, void *new, size_t newlen, size_t *retval, 2084 int flags) 2085 { 2086 int error = 0, memlocked; 2087 struct sysctl_req req; 2088 2089 bzero(&req, sizeof req); 2090 2091 req.td = td; 2092 req.flags = flags; 2093 2094 if (oldlenp) { 2095 if (inkernel) { 2096 req.oldlen = *oldlenp; 2097 } else { 2098 error = copyin(oldlenp, &req.oldlen, sizeof(*oldlenp)); 2099 if (error) 2100 return (error); 2101 } 2102 } 2103 req.validlen = req.oldlen; 2104 req.oldptr = old; 2105 2106 if (new != NULL) { 2107 req.newlen = newlen; 2108 req.newptr = new; 2109 } 2110 2111 req.oldfunc = sysctl_old_user; 2112 req.newfunc = sysctl_new_user; 2113 req.lock = REQ_UNWIRED; 2114 2115 #ifdef KTRACE 2116 if (KTRPOINT(curthread, KTR_SYSCTL)) 2117 ktrsysctl(name, namelen); 2118 #endif 2119 memlocked = 0; 2120 if (req.oldptr && req.oldlen > 4 * PAGE_SIZE) { 2121 memlocked = 1; 2122 sx_xlock(&sysctlmemlock); 2123 } 2124 CURVNET_SET(TD_TO_VNET(td)); 2125 2126 for (;;) { 2127 req.oldidx = 0; 2128 req.newidx = 0; 2129 error = sysctl_root(0, name, namelen, &req); 2130 if (error != EAGAIN) 2131 break; 2132 kern_yield(PRI_USER); 2133 } 2134 2135 CURVNET_RESTORE(); 2136 2137 if (req.lock == REQ_WIRED && req.validlen > 0) 2138 vsunlock(req.oldptr, req.validlen); 2139 if (memlocked) 2140 sx_xunlock(&sysctlmemlock); 2141 2142 if (error && error != ENOMEM) 2143 return (error); 2144 2145 if (retval) { 2146 if (req.oldptr && req.oldidx > req.validlen) 2147 *retval = req.validlen; 2148 else 2149 *retval = req.oldidx; 2150 } 2151 return (error); 2152 } 2153 2154 /* 2155 * Drain into a sysctl struct. The user buffer should be wired if a page 2156 * fault would cause issue. 2157 */ 2158 static int 2159 sbuf_sysctl_drain(void *arg, const char *data, int len) 2160 { 2161 struct sysctl_req *req = arg; 2162 int error; 2163 2164 error = SYSCTL_OUT(req, data, len); 2165 KASSERT(error >= 0, ("Got unexpected negative value %d", error)); 2166 return (error == 0 ? len : -error); 2167 } 2168 2169 struct sbuf * 2170 sbuf_new_for_sysctl(struct sbuf *s, char *buf, int length, 2171 struct sysctl_req *req) 2172 { 2173 2174 /* Supply a default buffer size if none given. */ 2175 if (buf == NULL && length == 0) 2176 length = 64; 2177 s = sbuf_new(s, buf, length, SBUF_FIXEDLEN | SBUF_INCLUDENUL); 2178 sbuf_set_drain(s, sbuf_sysctl_drain, req); 2179 return (s); 2180 } 2181