1 /*- 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Mike Karels at Berkeley Software Design, Inc. 7 * 8 * Quite extensively rewritten by Poul-Henning Kamp of the FreeBSD 9 * project, to make these variables more userfriendly. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #include "opt_capsicum.h" 42 #include "opt_compat.h" 43 #include "opt_ktrace.h" 44 45 #include <sys/param.h> 46 #include <sys/fail.h> 47 #include <sys/systm.h> 48 #include <sys/capsicum.h> 49 #include <sys/kernel.h> 50 #include <sys/sysctl.h> 51 #include <sys/malloc.h> 52 #include <sys/priv.h> 53 #include <sys/proc.h> 54 #include <sys/jail.h> 55 #include <sys/lock.h> 56 #include <sys/mutex.h> 57 #include <sys/rmlock.h> 58 #include <sys/sbuf.h> 59 #include <sys/sx.h> 60 #include <sys/sysproto.h> 61 #include <sys/uio.h> 62 #ifdef KTRACE 63 #include <sys/ktrace.h> 64 #endif 65 66 #include <net/vnet.h> 67 68 #include <security/mac/mac_framework.h> 69 70 #include <vm/vm.h> 71 #include <vm/vm_extern.h> 72 73 static MALLOC_DEFINE(M_SYSCTL, "sysctl", "sysctl internal magic"); 74 static MALLOC_DEFINE(M_SYSCTLOID, "sysctloid", "sysctl dynamic oids"); 75 static MALLOC_DEFINE(M_SYSCTLTMP, "sysctltmp", "sysctl temp output buffer"); 76 77 /* 78 * The sysctllock protects the MIB tree. It also protects sysctl 79 * contexts used with dynamic sysctls. The sysctl_register_oid() and 80 * sysctl_unregister_oid() routines require the sysctllock to already 81 * be held, so the sysctl_wlock() and sysctl_wunlock() routines are 82 * provided for the few places in the kernel which need to use that 83 * API rather than using the dynamic API. Use of the dynamic API is 84 * strongly encouraged for most code. 85 * 86 * The sysctlmemlock is used to limit the amount of user memory wired for 87 * sysctl requests. This is implemented by serializing any userland 88 * sysctl requests larger than a single page via an exclusive lock. 89 */ 90 static struct rmlock sysctllock; 91 static struct sx __exclusive_cache_line sysctlmemlock; 92 93 #define SYSCTL_WLOCK() rm_wlock(&sysctllock) 94 #define SYSCTL_WUNLOCK() rm_wunlock(&sysctllock) 95 #define SYSCTL_RLOCK(tracker) rm_rlock(&sysctllock, (tracker)) 96 #define SYSCTL_RUNLOCK(tracker) rm_runlock(&sysctllock, (tracker)) 97 #define SYSCTL_WLOCKED() rm_wowned(&sysctllock) 98 #define SYSCTL_ASSERT_LOCKED() rm_assert(&sysctllock, RA_LOCKED) 99 #define SYSCTL_ASSERT_WLOCKED() rm_assert(&sysctllock, RA_WLOCKED) 100 #define SYSCTL_ASSERT_RLOCKED() rm_assert(&sysctllock, RA_RLOCKED) 101 #define SYSCTL_INIT() rm_init_flags(&sysctllock, "sysctl lock", \ 102 RM_SLEEPABLE) 103 #define SYSCTL_SLEEP(ch, wmesg, timo) \ 104 rm_sleep(ch, &sysctllock, 0, wmesg, timo) 105 106 static int sysctl_root(SYSCTL_HANDLER_ARGS); 107 108 /* Root list */ 109 struct sysctl_oid_list sysctl__children = SLIST_HEAD_INITIALIZER(&sysctl__children); 110 111 static int sysctl_remove_oid_locked(struct sysctl_oid *oidp, int del, 112 int recurse); 113 static int sysctl_old_kernel(struct sysctl_req *, const void *, size_t); 114 static int sysctl_new_kernel(struct sysctl_req *, void *, size_t); 115 116 static struct sysctl_oid * 117 sysctl_find_oidname(const char *name, struct sysctl_oid_list *list) 118 { 119 struct sysctl_oid *oidp; 120 121 SYSCTL_ASSERT_LOCKED(); 122 SLIST_FOREACH(oidp, list, oid_link) { 123 if (strcmp(oidp->oid_name, name) == 0) { 124 return (oidp); 125 } 126 } 127 return (NULL); 128 } 129 130 /* 131 * Initialization of the MIB tree. 132 * 133 * Order by number in each list. 134 */ 135 void 136 sysctl_wlock(void) 137 { 138 139 SYSCTL_WLOCK(); 140 } 141 142 void 143 sysctl_wunlock(void) 144 { 145 146 SYSCTL_WUNLOCK(); 147 } 148 149 static int 150 sysctl_root_handler_locked(struct sysctl_oid *oid, void *arg1, intmax_t arg2, 151 struct sysctl_req *req, struct rm_priotracker *tracker) 152 { 153 int error; 154 155 if (oid->oid_kind & CTLFLAG_DYN) 156 atomic_add_int(&oid->oid_running, 1); 157 158 if (tracker != NULL) 159 SYSCTL_RUNLOCK(tracker); 160 else 161 SYSCTL_WUNLOCK(); 162 163 if (!(oid->oid_kind & CTLFLAG_MPSAFE)) 164 mtx_lock(&Giant); 165 error = oid->oid_handler(oid, arg1, arg2, req); 166 if (!(oid->oid_kind & CTLFLAG_MPSAFE)) 167 mtx_unlock(&Giant); 168 169 KFAIL_POINT_ERROR(_debug_fail_point, sysctl_running, error); 170 171 if (tracker != NULL) 172 SYSCTL_RLOCK(tracker); 173 else 174 SYSCTL_WLOCK(); 175 176 if (oid->oid_kind & CTLFLAG_DYN) { 177 if (atomic_fetchadd_int(&oid->oid_running, -1) == 1 && 178 (oid->oid_kind & CTLFLAG_DYING) != 0) 179 wakeup(&oid->oid_running); 180 } 181 182 return (error); 183 } 184 185 static void 186 sysctl_load_tunable_by_oid_locked(struct sysctl_oid *oidp) 187 { 188 struct sysctl_req req; 189 struct sysctl_oid *curr; 190 char *penv = NULL; 191 char path[64]; 192 ssize_t rem = sizeof(path); 193 ssize_t len; 194 uint8_t val_8; 195 uint16_t val_16; 196 uint32_t val_32; 197 int val_int; 198 long val_long; 199 int64_t val_64; 200 quad_t val_quad; 201 int error; 202 203 path[--rem] = 0; 204 205 for (curr = oidp; curr != NULL; curr = SYSCTL_PARENT(curr)) { 206 len = strlen(curr->oid_name); 207 rem -= len; 208 if (curr != oidp) 209 rem -= 1; 210 if (rem < 0) { 211 printf("OID path exceeds %d bytes\n", (int)sizeof(path)); 212 return; 213 } 214 memcpy(path + rem, curr->oid_name, len); 215 if (curr != oidp) 216 path[rem + len] = '.'; 217 } 218 219 memset(&req, 0, sizeof(req)); 220 221 req.td = curthread; 222 req.oldfunc = sysctl_old_kernel; 223 req.newfunc = sysctl_new_kernel; 224 req.lock = REQ_UNWIRED; 225 226 switch (oidp->oid_kind & CTLTYPE) { 227 case CTLTYPE_INT: 228 if (getenv_int(path + rem, &val_int) == 0) 229 return; 230 req.newlen = sizeof(val_int); 231 req.newptr = &val_int; 232 break; 233 case CTLTYPE_UINT: 234 if (getenv_uint(path + rem, (unsigned int *)&val_int) == 0) 235 return; 236 req.newlen = sizeof(val_int); 237 req.newptr = &val_int; 238 break; 239 case CTLTYPE_LONG: 240 if (getenv_long(path + rem, &val_long) == 0) 241 return; 242 req.newlen = sizeof(val_long); 243 req.newptr = &val_long; 244 break; 245 case CTLTYPE_ULONG: 246 if (getenv_ulong(path + rem, (unsigned long *)&val_long) == 0) 247 return; 248 req.newlen = sizeof(val_long); 249 req.newptr = &val_long; 250 break; 251 case CTLTYPE_S8: 252 if (getenv_int(path + rem, &val_int) == 0) 253 return; 254 val_8 = val_int; 255 req.newlen = sizeof(val_8); 256 req.newptr = &val_8; 257 break; 258 case CTLTYPE_S16: 259 if (getenv_int(path + rem, &val_int) == 0) 260 return; 261 val_16 = val_int; 262 req.newlen = sizeof(val_16); 263 req.newptr = &val_16; 264 break; 265 case CTLTYPE_S32: 266 if (getenv_long(path + rem, &val_long) == 0) 267 return; 268 val_32 = val_long; 269 req.newlen = sizeof(val_32); 270 req.newptr = &val_32; 271 break; 272 case CTLTYPE_S64: 273 if (getenv_quad(path + rem, &val_quad) == 0) 274 return; 275 val_64 = val_quad; 276 req.newlen = sizeof(val_64); 277 req.newptr = &val_64; 278 break; 279 case CTLTYPE_U8: 280 if (getenv_uint(path + rem, (unsigned int *)&val_int) == 0) 281 return; 282 val_8 = val_int; 283 req.newlen = sizeof(val_8); 284 req.newptr = &val_8; 285 break; 286 case CTLTYPE_U16: 287 if (getenv_uint(path + rem, (unsigned int *)&val_int) == 0) 288 return; 289 val_16 = val_int; 290 req.newlen = sizeof(val_16); 291 req.newptr = &val_16; 292 break; 293 case CTLTYPE_U32: 294 if (getenv_ulong(path + rem, (unsigned long *)&val_long) == 0) 295 return; 296 val_32 = val_long; 297 req.newlen = sizeof(val_32); 298 req.newptr = &val_32; 299 break; 300 case CTLTYPE_U64: 301 /* XXX there is no getenv_uquad() */ 302 if (getenv_quad(path + rem, &val_quad) == 0) 303 return; 304 val_64 = val_quad; 305 req.newlen = sizeof(val_64); 306 req.newptr = &val_64; 307 break; 308 case CTLTYPE_STRING: 309 penv = kern_getenv(path + rem); 310 if (penv == NULL) 311 return; 312 req.newlen = strlen(penv); 313 req.newptr = penv; 314 break; 315 default: 316 return; 317 } 318 error = sysctl_root_handler_locked(oidp, oidp->oid_arg1, 319 oidp->oid_arg2, &req, NULL); 320 if (error != 0) 321 printf("Setting sysctl %s failed: %d\n", path + rem, error); 322 if (penv != NULL) 323 freeenv(penv); 324 } 325 326 static int 327 sbuf_printf_drain(void *arg __unused, const char *data, int len) 328 { 329 330 return (printf("%.*s", len, data)); 331 } 332 333 /* 334 * Locate the path to a given oid. Returns the length of the resulting path, 335 * or -1 if the oid was not found. nodes must have room for CTL_MAXNAME 336 * elements and be NULL initialized. 337 */ 338 static int 339 sysctl_search_oid(struct sysctl_oid **nodes, struct sysctl_oid *needle) 340 { 341 int indx; 342 343 SYSCTL_ASSERT_LOCKED(); 344 indx = 0; 345 while (indx < CTL_MAXNAME && indx >= 0) { 346 if (nodes[indx] == NULL && indx == 0) 347 nodes[indx] = SLIST_FIRST(&sysctl__children); 348 else if (nodes[indx] == NULL) 349 nodes[indx] = SLIST_FIRST(&nodes[indx - 1]->oid_children); 350 else 351 nodes[indx] = SLIST_NEXT(nodes[indx], oid_link); 352 353 if (nodes[indx] == needle) 354 return (indx + 1); 355 356 if (nodes[indx] == NULL) { 357 indx--; 358 continue; 359 } 360 361 if ((nodes[indx]->oid_kind & CTLTYPE) == CTLTYPE_NODE) { 362 indx++; 363 continue; 364 } 365 } 366 return (-1); 367 } 368 369 static void 370 sysctl_warn_reuse(const char *func, struct sysctl_oid *leaf) 371 { 372 struct sysctl_oid *nodes[CTL_MAXNAME]; 373 char buf[128]; 374 struct sbuf sb; 375 int rc, i; 376 377 (void)sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL); 378 sbuf_set_drain(&sb, sbuf_printf_drain, NULL); 379 380 sbuf_printf(&sb, "%s: can't re-use a leaf (", __func__); 381 382 memset(nodes, 0, sizeof(nodes)); 383 rc = sysctl_search_oid(nodes, leaf); 384 if (rc > 0) { 385 for (i = 0; i < rc; i++) 386 sbuf_printf(&sb, "%s%.*s", nodes[i]->oid_name, 387 i != (rc - 1), "."); 388 } else { 389 sbuf_printf(&sb, "%s", leaf->oid_name); 390 } 391 sbuf_printf(&sb, ")!\n"); 392 393 (void)sbuf_finish(&sb); 394 } 395 396 #ifdef SYSCTL_DEBUG 397 static int 398 sysctl_reuse_test(SYSCTL_HANDLER_ARGS) 399 { 400 struct rm_priotracker tracker; 401 402 SYSCTL_RLOCK(&tracker); 403 sysctl_warn_reuse(__func__, oidp); 404 SYSCTL_RUNLOCK(&tracker); 405 return (0); 406 } 407 SYSCTL_PROC(_sysctl, 0, reuse_test, CTLTYPE_STRING|CTLFLAG_RD|CTLFLAG_MPSAFE, 408 0, 0, sysctl_reuse_test, "-", ""); 409 #endif 410 411 void 412 sysctl_register_oid(struct sysctl_oid *oidp) 413 { 414 struct sysctl_oid_list *parent = oidp->oid_parent; 415 struct sysctl_oid *p; 416 struct sysctl_oid *q; 417 int oid_number; 418 int timeout = 2; 419 420 /* 421 * First check if another oid with the same name already 422 * exists in the parent's list. 423 */ 424 SYSCTL_ASSERT_WLOCKED(); 425 p = sysctl_find_oidname(oidp->oid_name, parent); 426 if (p != NULL) { 427 if ((p->oid_kind & CTLTYPE) == CTLTYPE_NODE) { 428 p->oid_refcnt++; 429 return; 430 } else { 431 sysctl_warn_reuse(__func__, p); 432 return; 433 } 434 } 435 /* get current OID number */ 436 oid_number = oidp->oid_number; 437 438 #if (OID_AUTO >= 0) 439 #error "OID_AUTO is expected to be a negative value" 440 #endif 441 /* 442 * Any negative OID number qualifies as OID_AUTO. Valid OID 443 * numbers should always be positive. 444 * 445 * NOTE: DO NOT change the starting value here, change it in 446 * <sys/sysctl.h>, and make sure it is at least 256 to 447 * accommodate e.g. net.inet.raw as a static sysctl node. 448 */ 449 if (oid_number < 0) { 450 static int newoid; 451 452 /* 453 * By decrementing the next OID number we spend less 454 * time inserting the OIDs into a sorted list. 455 */ 456 if (--newoid < CTL_AUTO_START) 457 newoid = 0x7fffffff; 458 459 oid_number = newoid; 460 } 461 462 /* 463 * Insert the OID into the parent's list sorted by OID number. 464 */ 465 retry: 466 q = NULL; 467 SLIST_FOREACH(p, parent, oid_link) { 468 /* check if the current OID number is in use */ 469 if (oid_number == p->oid_number) { 470 /* get the next valid OID number */ 471 if (oid_number < CTL_AUTO_START || 472 oid_number == 0x7fffffff) { 473 /* wraparound - restart */ 474 oid_number = CTL_AUTO_START; 475 /* don't loop forever */ 476 if (!timeout--) 477 panic("sysctl: Out of OID numbers\n"); 478 goto retry; 479 } else { 480 oid_number++; 481 } 482 } else if (oid_number < p->oid_number) 483 break; 484 q = p; 485 } 486 /* check for non-auto OID number collision */ 487 if (oidp->oid_number >= 0 && oidp->oid_number < CTL_AUTO_START && 488 oid_number >= CTL_AUTO_START) { 489 printf("sysctl: OID number(%d) is already in use for '%s'\n", 490 oidp->oid_number, oidp->oid_name); 491 } 492 /* update the OID number, if any */ 493 oidp->oid_number = oid_number; 494 if (q != NULL) 495 SLIST_INSERT_AFTER(q, oidp, oid_link); 496 else 497 SLIST_INSERT_HEAD(parent, oidp, oid_link); 498 499 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE && 500 #ifdef VIMAGE 501 (oidp->oid_kind & CTLFLAG_VNET) == 0 && 502 #endif 503 (oidp->oid_kind & CTLFLAG_TUN) != 0 && 504 (oidp->oid_kind & CTLFLAG_NOFETCH) == 0) { 505 /* only fetch value once */ 506 oidp->oid_kind |= CTLFLAG_NOFETCH; 507 /* try to fetch value from kernel environment */ 508 sysctl_load_tunable_by_oid_locked(oidp); 509 } 510 } 511 512 void 513 sysctl_register_disabled_oid(struct sysctl_oid *oidp) 514 { 515 516 /* 517 * Mark the leaf as dormant if it's not to be immediately enabled. 518 * We do not disable nodes as they can be shared between modules 519 * and it is always safe to access a node. 520 */ 521 KASSERT((oidp->oid_kind & CTLFLAG_DORMANT) == 0, 522 ("internal flag is set in oid_kind")); 523 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) 524 oidp->oid_kind |= CTLFLAG_DORMANT; 525 sysctl_register_oid(oidp); 526 } 527 528 void 529 sysctl_enable_oid(struct sysctl_oid *oidp) 530 { 531 532 SYSCTL_ASSERT_WLOCKED(); 533 if ((oidp->oid_kind & CTLTYPE) == CTLTYPE_NODE) { 534 KASSERT((oidp->oid_kind & CTLFLAG_DORMANT) == 0, 535 ("sysctl node is marked as dormant")); 536 return; 537 } 538 KASSERT((oidp->oid_kind & CTLFLAG_DORMANT) != 0, 539 ("enabling already enabled sysctl oid")); 540 oidp->oid_kind &= ~CTLFLAG_DORMANT; 541 } 542 543 void 544 sysctl_unregister_oid(struct sysctl_oid *oidp) 545 { 546 struct sysctl_oid *p; 547 int error; 548 549 SYSCTL_ASSERT_WLOCKED(); 550 error = ENOENT; 551 if (oidp->oid_number == OID_AUTO) { 552 error = EINVAL; 553 } else { 554 SLIST_FOREACH(p, oidp->oid_parent, oid_link) { 555 if (p == oidp) { 556 SLIST_REMOVE(oidp->oid_parent, oidp, 557 sysctl_oid, oid_link); 558 error = 0; 559 break; 560 } 561 } 562 } 563 564 /* 565 * This can happen when a module fails to register and is 566 * being unloaded afterwards. It should not be a panic() 567 * for normal use. 568 */ 569 if (error) 570 printf("%s: failed to unregister sysctl\n", __func__); 571 } 572 573 /* Initialize a new context to keep track of dynamically added sysctls. */ 574 int 575 sysctl_ctx_init(struct sysctl_ctx_list *c) 576 { 577 578 if (c == NULL) { 579 return (EINVAL); 580 } 581 582 /* 583 * No locking here, the caller is responsible for not adding 584 * new nodes to a context until after this function has 585 * returned. 586 */ 587 TAILQ_INIT(c); 588 return (0); 589 } 590 591 /* Free the context, and destroy all dynamic oids registered in this context */ 592 int 593 sysctl_ctx_free(struct sysctl_ctx_list *clist) 594 { 595 struct sysctl_ctx_entry *e, *e1; 596 int error; 597 598 error = 0; 599 /* 600 * First perform a "dry run" to check if it's ok to remove oids. 601 * XXX FIXME 602 * XXX This algorithm is a hack. But I don't know any 603 * XXX better solution for now... 604 */ 605 SYSCTL_WLOCK(); 606 TAILQ_FOREACH(e, clist, link) { 607 error = sysctl_remove_oid_locked(e->entry, 0, 0); 608 if (error) 609 break; 610 } 611 /* 612 * Restore deregistered entries, either from the end, 613 * or from the place where error occurred. 614 * e contains the entry that was not unregistered 615 */ 616 if (error) 617 e1 = TAILQ_PREV(e, sysctl_ctx_list, link); 618 else 619 e1 = TAILQ_LAST(clist, sysctl_ctx_list); 620 while (e1 != NULL) { 621 sysctl_register_oid(e1->entry); 622 e1 = TAILQ_PREV(e1, sysctl_ctx_list, link); 623 } 624 if (error) { 625 SYSCTL_WUNLOCK(); 626 return(EBUSY); 627 } 628 /* Now really delete the entries */ 629 e = TAILQ_FIRST(clist); 630 while (e != NULL) { 631 e1 = TAILQ_NEXT(e, link); 632 error = sysctl_remove_oid_locked(e->entry, 1, 0); 633 if (error) 634 panic("sysctl_remove_oid: corrupt tree, entry: %s", 635 e->entry->oid_name); 636 free(e, M_SYSCTLOID); 637 e = e1; 638 } 639 SYSCTL_WUNLOCK(); 640 return (error); 641 } 642 643 /* Add an entry to the context */ 644 struct sysctl_ctx_entry * 645 sysctl_ctx_entry_add(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp) 646 { 647 struct sysctl_ctx_entry *e; 648 649 SYSCTL_ASSERT_WLOCKED(); 650 if (clist == NULL || oidp == NULL) 651 return(NULL); 652 e = malloc(sizeof(struct sysctl_ctx_entry), M_SYSCTLOID, M_WAITOK); 653 e->entry = oidp; 654 TAILQ_INSERT_HEAD(clist, e, link); 655 return (e); 656 } 657 658 /* Find an entry in the context */ 659 struct sysctl_ctx_entry * 660 sysctl_ctx_entry_find(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp) 661 { 662 struct sysctl_ctx_entry *e; 663 664 SYSCTL_ASSERT_WLOCKED(); 665 if (clist == NULL || oidp == NULL) 666 return(NULL); 667 TAILQ_FOREACH(e, clist, link) { 668 if(e->entry == oidp) 669 return(e); 670 } 671 return (e); 672 } 673 674 /* 675 * Delete an entry from the context. 676 * NOTE: this function doesn't free oidp! You have to remove it 677 * with sysctl_remove_oid(). 678 */ 679 int 680 sysctl_ctx_entry_del(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp) 681 { 682 struct sysctl_ctx_entry *e; 683 684 if (clist == NULL || oidp == NULL) 685 return (EINVAL); 686 SYSCTL_WLOCK(); 687 e = sysctl_ctx_entry_find(clist, oidp); 688 if (e != NULL) { 689 TAILQ_REMOVE(clist, e, link); 690 SYSCTL_WUNLOCK(); 691 free(e, M_SYSCTLOID); 692 return (0); 693 } else { 694 SYSCTL_WUNLOCK(); 695 return (ENOENT); 696 } 697 } 698 699 /* 700 * Remove dynamically created sysctl trees. 701 * oidp - top of the tree to be removed 702 * del - if 0 - just deregister, otherwise free up entries as well 703 * recurse - if != 0 traverse the subtree to be deleted 704 */ 705 int 706 sysctl_remove_oid(struct sysctl_oid *oidp, int del, int recurse) 707 { 708 int error; 709 710 SYSCTL_WLOCK(); 711 error = sysctl_remove_oid_locked(oidp, del, recurse); 712 SYSCTL_WUNLOCK(); 713 return (error); 714 } 715 716 int 717 sysctl_remove_name(struct sysctl_oid *parent, const char *name, 718 int del, int recurse) 719 { 720 struct sysctl_oid *p, *tmp; 721 int error; 722 723 error = ENOENT; 724 SYSCTL_WLOCK(); 725 SLIST_FOREACH_SAFE(p, SYSCTL_CHILDREN(parent), oid_link, tmp) { 726 if (strcmp(p->oid_name, name) == 0) { 727 error = sysctl_remove_oid_locked(p, del, recurse); 728 break; 729 } 730 } 731 SYSCTL_WUNLOCK(); 732 733 return (error); 734 } 735 736 737 static int 738 sysctl_remove_oid_locked(struct sysctl_oid *oidp, int del, int recurse) 739 { 740 struct sysctl_oid *p, *tmp; 741 int error; 742 743 SYSCTL_ASSERT_WLOCKED(); 744 if (oidp == NULL) 745 return(EINVAL); 746 if ((oidp->oid_kind & CTLFLAG_DYN) == 0) { 747 printf("Warning: can't remove non-dynamic nodes (%s)!\n", 748 oidp->oid_name); 749 return (EINVAL); 750 } 751 /* 752 * WARNING: normal method to do this should be through 753 * sysctl_ctx_free(). Use recursing as the last resort 754 * method to purge your sysctl tree of leftovers... 755 * However, if some other code still references these nodes, 756 * it will panic. 757 */ 758 if ((oidp->oid_kind & CTLTYPE) == CTLTYPE_NODE) { 759 if (oidp->oid_refcnt == 1) { 760 SLIST_FOREACH_SAFE(p, 761 SYSCTL_CHILDREN(oidp), oid_link, tmp) { 762 if (!recurse) { 763 printf("Warning: failed attempt to " 764 "remove oid %s with child %s\n", 765 oidp->oid_name, p->oid_name); 766 return (ENOTEMPTY); 767 } 768 error = sysctl_remove_oid_locked(p, del, 769 recurse); 770 if (error) 771 return (error); 772 } 773 } 774 } 775 if (oidp->oid_refcnt > 1 ) { 776 oidp->oid_refcnt--; 777 } else { 778 if (oidp->oid_refcnt == 0) { 779 printf("Warning: bad oid_refcnt=%u (%s)!\n", 780 oidp->oid_refcnt, oidp->oid_name); 781 return (EINVAL); 782 } 783 sysctl_unregister_oid(oidp); 784 if (del) { 785 /* 786 * Wait for all threads running the handler to drain. 787 * This preserves the previous behavior when the 788 * sysctl lock was held across a handler invocation, 789 * and is necessary for module unload correctness. 790 */ 791 while (oidp->oid_running > 0) { 792 oidp->oid_kind |= CTLFLAG_DYING; 793 SYSCTL_SLEEP(&oidp->oid_running, "oidrm", 0); 794 } 795 if (oidp->oid_descr) 796 free(__DECONST(char *, oidp->oid_descr), 797 M_SYSCTLOID); 798 if (oidp->oid_label) 799 free(__DECONST(char *, oidp->oid_label), 800 M_SYSCTLOID); 801 free(__DECONST(char *, oidp->oid_name), M_SYSCTLOID); 802 free(oidp, M_SYSCTLOID); 803 } 804 } 805 return (0); 806 } 807 /* 808 * Create new sysctls at run time. 809 * clist may point to a valid context initialized with sysctl_ctx_init(). 810 */ 811 struct sysctl_oid * 812 sysctl_add_oid(struct sysctl_ctx_list *clist, struct sysctl_oid_list *parent, 813 int number, const char *name, int kind, void *arg1, intmax_t arg2, 814 int (*handler)(SYSCTL_HANDLER_ARGS), const char *fmt, const char *descr, 815 const char *label) 816 { 817 struct sysctl_oid *oidp; 818 819 /* You have to hook up somewhere.. */ 820 if (parent == NULL) 821 return(NULL); 822 /* Check if the node already exists, otherwise create it */ 823 SYSCTL_WLOCK(); 824 oidp = sysctl_find_oidname(name, parent); 825 if (oidp != NULL) { 826 if ((oidp->oid_kind & CTLTYPE) == CTLTYPE_NODE) { 827 oidp->oid_refcnt++; 828 /* Update the context */ 829 if (clist != NULL) 830 sysctl_ctx_entry_add(clist, oidp); 831 SYSCTL_WUNLOCK(); 832 return (oidp); 833 } else { 834 sysctl_warn_reuse(__func__, oidp); 835 SYSCTL_WUNLOCK(); 836 return (NULL); 837 } 838 } 839 oidp = malloc(sizeof(struct sysctl_oid), M_SYSCTLOID, M_WAITOK|M_ZERO); 840 oidp->oid_parent = parent; 841 SLIST_INIT(&oidp->oid_children); 842 oidp->oid_number = number; 843 oidp->oid_refcnt = 1; 844 oidp->oid_name = strdup(name, M_SYSCTLOID); 845 oidp->oid_handler = handler; 846 oidp->oid_kind = CTLFLAG_DYN | kind; 847 oidp->oid_arg1 = arg1; 848 oidp->oid_arg2 = arg2; 849 oidp->oid_fmt = fmt; 850 if (descr != NULL) 851 oidp->oid_descr = strdup(descr, M_SYSCTLOID); 852 if (label != NULL) 853 oidp->oid_label = strdup(label, M_SYSCTLOID); 854 /* Update the context, if used */ 855 if (clist != NULL) 856 sysctl_ctx_entry_add(clist, oidp); 857 /* Register this oid */ 858 sysctl_register_oid(oidp); 859 SYSCTL_WUNLOCK(); 860 return (oidp); 861 } 862 863 /* 864 * Rename an existing oid. 865 */ 866 void 867 sysctl_rename_oid(struct sysctl_oid *oidp, const char *name) 868 { 869 char *newname; 870 char *oldname; 871 872 newname = strdup(name, M_SYSCTLOID); 873 SYSCTL_WLOCK(); 874 oldname = __DECONST(char *, oidp->oid_name); 875 oidp->oid_name = newname; 876 SYSCTL_WUNLOCK(); 877 free(oldname, M_SYSCTLOID); 878 } 879 880 /* 881 * Reparent an existing oid. 882 */ 883 int 884 sysctl_move_oid(struct sysctl_oid *oid, struct sysctl_oid_list *parent) 885 { 886 struct sysctl_oid *oidp; 887 888 SYSCTL_WLOCK(); 889 if (oid->oid_parent == parent) { 890 SYSCTL_WUNLOCK(); 891 return (0); 892 } 893 oidp = sysctl_find_oidname(oid->oid_name, parent); 894 if (oidp != NULL) { 895 SYSCTL_WUNLOCK(); 896 return (EEXIST); 897 } 898 sysctl_unregister_oid(oid); 899 oid->oid_parent = parent; 900 oid->oid_number = OID_AUTO; 901 sysctl_register_oid(oid); 902 SYSCTL_WUNLOCK(); 903 return (0); 904 } 905 906 /* 907 * Register the kernel's oids on startup. 908 */ 909 SET_DECLARE(sysctl_set, struct sysctl_oid); 910 911 static void 912 sysctl_register_all(void *arg) 913 { 914 struct sysctl_oid **oidp; 915 916 sx_init(&sysctlmemlock, "sysctl mem"); 917 SYSCTL_INIT(); 918 SYSCTL_WLOCK(); 919 SET_FOREACH(oidp, sysctl_set) 920 sysctl_register_oid(*oidp); 921 SYSCTL_WUNLOCK(); 922 } 923 SYSINIT(sysctl, SI_SUB_KMEM, SI_ORDER_FIRST, sysctl_register_all, 0); 924 925 /* 926 * "Staff-functions" 927 * 928 * These functions implement a presently undocumented interface 929 * used by the sysctl program to walk the tree, and get the type 930 * so it can print the value. 931 * This interface is under work and consideration, and should probably 932 * be killed with a big axe by the first person who can find the time. 933 * (be aware though, that the proper interface isn't as obvious as it 934 * may seem, there are various conflicting requirements. 935 * 936 * {0,0} printf the entire MIB-tree. 937 * {0,1,...} return the name of the "..." OID. 938 * {0,2,...} return the next OID. 939 * {0,3} return the OID of the name in "new" 940 * {0,4,...} return the kind & format info for the "..." OID. 941 * {0,5,...} return the description of the "..." OID. 942 * {0,6,...} return the aggregation label of the "..." OID. 943 */ 944 945 #ifdef SYSCTL_DEBUG 946 static void 947 sysctl_sysctl_debug_dump_node(struct sysctl_oid_list *l, int i) 948 { 949 int k; 950 struct sysctl_oid *oidp; 951 952 SYSCTL_ASSERT_LOCKED(); 953 SLIST_FOREACH(oidp, l, oid_link) { 954 955 for (k=0; k<i; k++) 956 printf(" "); 957 958 printf("%d %s ", oidp->oid_number, oidp->oid_name); 959 960 printf("%c%c", 961 oidp->oid_kind & CTLFLAG_RD ? 'R':' ', 962 oidp->oid_kind & CTLFLAG_WR ? 'W':' '); 963 964 if (oidp->oid_handler) 965 printf(" *Handler"); 966 967 switch (oidp->oid_kind & CTLTYPE) { 968 case CTLTYPE_NODE: 969 printf(" Node\n"); 970 if (!oidp->oid_handler) { 971 sysctl_sysctl_debug_dump_node( 972 SYSCTL_CHILDREN(oidp), i + 2); 973 } 974 break; 975 case CTLTYPE_INT: printf(" Int\n"); break; 976 case CTLTYPE_UINT: printf(" u_int\n"); break; 977 case CTLTYPE_LONG: printf(" Long\n"); break; 978 case CTLTYPE_ULONG: printf(" u_long\n"); break; 979 case CTLTYPE_STRING: printf(" String\n"); break; 980 case CTLTYPE_S8: printf(" int8_t\n"); break; 981 case CTLTYPE_S16: printf(" int16_t\n"); break; 982 case CTLTYPE_S32: printf(" int32_t\n"); break; 983 case CTLTYPE_S64: printf(" int64_t\n"); break; 984 case CTLTYPE_U8: printf(" uint8_t\n"); break; 985 case CTLTYPE_U16: printf(" uint16_t\n"); break; 986 case CTLTYPE_U32: printf(" uint32_t\n"); break; 987 case CTLTYPE_U64: printf(" uint64_t\n"); break; 988 case CTLTYPE_OPAQUE: printf(" Opaque/struct\n"); break; 989 default: printf("\n"); 990 } 991 992 } 993 } 994 995 static int 996 sysctl_sysctl_debug(SYSCTL_HANDLER_ARGS) 997 { 998 struct rm_priotracker tracker; 999 int error; 1000 1001 error = priv_check(req->td, PRIV_SYSCTL_DEBUG); 1002 if (error) 1003 return (error); 1004 SYSCTL_RLOCK(&tracker); 1005 sysctl_sysctl_debug_dump_node(&sysctl__children, 0); 1006 SYSCTL_RUNLOCK(&tracker); 1007 return (ENOENT); 1008 } 1009 1010 SYSCTL_PROC(_sysctl, 0, debug, CTLTYPE_STRING|CTLFLAG_RD|CTLFLAG_MPSAFE, 1011 0, 0, sysctl_sysctl_debug, "-", ""); 1012 #endif 1013 1014 static int 1015 sysctl_sysctl_name(SYSCTL_HANDLER_ARGS) 1016 { 1017 int *name = (int *) arg1; 1018 u_int namelen = arg2; 1019 int error = 0; 1020 struct sysctl_oid *oid; 1021 struct sysctl_oid_list *lsp = &sysctl__children, *lsp2; 1022 struct rm_priotracker tracker; 1023 char buf[10]; 1024 1025 SYSCTL_RLOCK(&tracker); 1026 while (namelen) { 1027 if (!lsp) { 1028 snprintf(buf,sizeof(buf),"%d",*name); 1029 if (req->oldidx) 1030 error = SYSCTL_OUT(req, ".", 1); 1031 if (!error) 1032 error = SYSCTL_OUT(req, buf, strlen(buf)); 1033 if (error) 1034 goto out; 1035 namelen--; 1036 name++; 1037 continue; 1038 } 1039 lsp2 = NULL; 1040 SLIST_FOREACH(oid, lsp, oid_link) { 1041 if (oid->oid_number != *name) 1042 continue; 1043 1044 if (req->oldidx) 1045 error = SYSCTL_OUT(req, ".", 1); 1046 if (!error) 1047 error = SYSCTL_OUT(req, oid->oid_name, 1048 strlen(oid->oid_name)); 1049 if (error) 1050 goto out; 1051 1052 namelen--; 1053 name++; 1054 1055 if ((oid->oid_kind & CTLTYPE) != CTLTYPE_NODE) 1056 break; 1057 1058 if (oid->oid_handler) 1059 break; 1060 1061 lsp2 = SYSCTL_CHILDREN(oid); 1062 break; 1063 } 1064 lsp = lsp2; 1065 } 1066 error = SYSCTL_OUT(req, "", 1); 1067 out: 1068 SYSCTL_RUNLOCK(&tracker); 1069 return (error); 1070 } 1071 1072 /* 1073 * XXXRW/JA: Shouldn't return name data for nodes that we don't permit in 1074 * capability mode. 1075 */ 1076 static SYSCTL_NODE(_sysctl, 1, name, CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_CAPRD, 1077 sysctl_sysctl_name, ""); 1078 1079 static int 1080 sysctl_sysctl_next_ls(struct sysctl_oid_list *lsp, int *name, u_int namelen, 1081 int *next, int *len, int level, struct sysctl_oid **oidpp) 1082 { 1083 struct sysctl_oid *oidp; 1084 1085 SYSCTL_ASSERT_LOCKED(); 1086 *len = level; 1087 SLIST_FOREACH(oidp, lsp, oid_link) { 1088 *next = oidp->oid_number; 1089 *oidpp = oidp; 1090 1091 if ((oidp->oid_kind & (CTLFLAG_SKIP | CTLFLAG_DORMANT)) != 0) 1092 continue; 1093 1094 if (!namelen) { 1095 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) 1096 return (0); 1097 if (oidp->oid_handler) 1098 /* We really should call the handler here...*/ 1099 return (0); 1100 lsp = SYSCTL_CHILDREN(oidp); 1101 if (!sysctl_sysctl_next_ls(lsp, 0, 0, next+1, 1102 len, level+1, oidpp)) 1103 return (0); 1104 goto emptynode; 1105 } 1106 1107 if (oidp->oid_number < *name) 1108 continue; 1109 1110 if (oidp->oid_number > *name) { 1111 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) 1112 return (0); 1113 if (oidp->oid_handler) 1114 return (0); 1115 lsp = SYSCTL_CHILDREN(oidp); 1116 if (!sysctl_sysctl_next_ls(lsp, name+1, namelen-1, 1117 next+1, len, level+1, oidpp)) 1118 return (0); 1119 goto next; 1120 } 1121 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) 1122 continue; 1123 1124 if (oidp->oid_handler) 1125 continue; 1126 1127 lsp = SYSCTL_CHILDREN(oidp); 1128 if (!sysctl_sysctl_next_ls(lsp, name+1, namelen-1, next+1, 1129 len, level+1, oidpp)) 1130 return (0); 1131 next: 1132 namelen = 1; 1133 emptynode: 1134 *len = level; 1135 } 1136 return (1); 1137 } 1138 1139 static int 1140 sysctl_sysctl_next(SYSCTL_HANDLER_ARGS) 1141 { 1142 int *name = (int *) arg1; 1143 u_int namelen = arg2; 1144 int i, j, error; 1145 struct sysctl_oid *oid; 1146 struct sysctl_oid_list *lsp = &sysctl__children; 1147 struct rm_priotracker tracker; 1148 int newoid[CTL_MAXNAME]; 1149 1150 SYSCTL_RLOCK(&tracker); 1151 i = sysctl_sysctl_next_ls(lsp, name, namelen, newoid, &j, 1, &oid); 1152 SYSCTL_RUNLOCK(&tracker); 1153 if (i) 1154 return (ENOENT); 1155 error = SYSCTL_OUT(req, newoid, j * sizeof (int)); 1156 return (error); 1157 } 1158 1159 /* 1160 * XXXRW/JA: Shouldn't return next data for nodes that we don't permit in 1161 * capability mode. 1162 */ 1163 static SYSCTL_NODE(_sysctl, 2, next, CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_CAPRD, 1164 sysctl_sysctl_next, ""); 1165 1166 static int 1167 name2oid(char *name, int *oid, int *len, struct sysctl_oid **oidpp) 1168 { 1169 struct sysctl_oid *oidp; 1170 struct sysctl_oid_list *lsp = &sysctl__children; 1171 char *p; 1172 1173 SYSCTL_ASSERT_LOCKED(); 1174 1175 for (*len = 0; *len < CTL_MAXNAME;) { 1176 p = strsep(&name, "."); 1177 1178 oidp = SLIST_FIRST(lsp); 1179 for (;; oidp = SLIST_NEXT(oidp, oid_link)) { 1180 if (oidp == NULL) 1181 return (ENOENT); 1182 if (strcmp(p, oidp->oid_name) == 0) 1183 break; 1184 } 1185 *oid++ = oidp->oid_number; 1186 (*len)++; 1187 1188 if (name == NULL || *name == '\0') { 1189 if (oidpp) 1190 *oidpp = oidp; 1191 return (0); 1192 } 1193 1194 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) 1195 break; 1196 1197 if (oidp->oid_handler) 1198 break; 1199 1200 lsp = SYSCTL_CHILDREN(oidp); 1201 } 1202 return (ENOENT); 1203 } 1204 1205 static int 1206 sysctl_sysctl_name2oid(SYSCTL_HANDLER_ARGS) 1207 { 1208 char *p; 1209 int error, oid[CTL_MAXNAME], len = 0; 1210 struct sysctl_oid *op = NULL; 1211 struct rm_priotracker tracker; 1212 1213 if (!req->newlen) 1214 return (ENOENT); 1215 if (req->newlen >= MAXPATHLEN) /* XXX arbitrary, undocumented */ 1216 return (ENAMETOOLONG); 1217 1218 p = malloc(req->newlen+1, M_SYSCTL, M_WAITOK); 1219 1220 error = SYSCTL_IN(req, p, req->newlen); 1221 if (error) { 1222 free(p, M_SYSCTL); 1223 return (error); 1224 } 1225 1226 p [req->newlen] = '\0'; 1227 1228 SYSCTL_RLOCK(&tracker); 1229 error = name2oid(p, oid, &len, &op); 1230 SYSCTL_RUNLOCK(&tracker); 1231 1232 free(p, M_SYSCTL); 1233 1234 if (error) 1235 return (error); 1236 1237 error = SYSCTL_OUT(req, oid, len * sizeof *oid); 1238 return (error); 1239 } 1240 1241 /* 1242 * XXXRW/JA: Shouldn't return name2oid data for nodes that we don't permit in 1243 * capability mode. 1244 */ 1245 SYSCTL_PROC(_sysctl, 3, name2oid, 1246 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE 1247 | CTLFLAG_CAPRW, 0, 0, sysctl_sysctl_name2oid, "I", ""); 1248 1249 static int 1250 sysctl_sysctl_oidfmt(SYSCTL_HANDLER_ARGS) 1251 { 1252 struct sysctl_oid *oid; 1253 struct rm_priotracker tracker; 1254 int error; 1255 1256 SYSCTL_RLOCK(&tracker); 1257 error = sysctl_find_oid(arg1, arg2, &oid, NULL, req); 1258 if (error) 1259 goto out; 1260 1261 if (oid->oid_fmt == NULL) { 1262 error = ENOENT; 1263 goto out; 1264 } 1265 error = SYSCTL_OUT(req, &oid->oid_kind, sizeof(oid->oid_kind)); 1266 if (error) 1267 goto out; 1268 error = SYSCTL_OUT(req, oid->oid_fmt, strlen(oid->oid_fmt) + 1); 1269 out: 1270 SYSCTL_RUNLOCK(&tracker); 1271 return (error); 1272 } 1273 1274 1275 static SYSCTL_NODE(_sysctl, 4, oidfmt, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLFLAG_CAPRD, 1276 sysctl_sysctl_oidfmt, ""); 1277 1278 static int 1279 sysctl_sysctl_oiddescr(SYSCTL_HANDLER_ARGS) 1280 { 1281 struct sysctl_oid *oid; 1282 struct rm_priotracker tracker; 1283 int error; 1284 1285 SYSCTL_RLOCK(&tracker); 1286 error = sysctl_find_oid(arg1, arg2, &oid, NULL, req); 1287 if (error) 1288 goto out; 1289 1290 if (oid->oid_descr == NULL) { 1291 error = ENOENT; 1292 goto out; 1293 } 1294 error = SYSCTL_OUT(req, oid->oid_descr, strlen(oid->oid_descr) + 1); 1295 out: 1296 SYSCTL_RUNLOCK(&tracker); 1297 return (error); 1298 } 1299 1300 static SYSCTL_NODE(_sysctl, 5, oiddescr, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLFLAG_CAPRD, 1301 sysctl_sysctl_oiddescr, ""); 1302 1303 static int 1304 sysctl_sysctl_oidlabel(SYSCTL_HANDLER_ARGS) 1305 { 1306 struct sysctl_oid *oid; 1307 struct rm_priotracker tracker; 1308 int error; 1309 1310 SYSCTL_RLOCK(&tracker); 1311 error = sysctl_find_oid(arg1, arg2, &oid, NULL, req); 1312 if (error) 1313 goto out; 1314 1315 if (oid->oid_label == NULL) { 1316 error = ENOENT; 1317 goto out; 1318 } 1319 error = SYSCTL_OUT(req, oid->oid_label, strlen(oid->oid_label) + 1); 1320 out: 1321 SYSCTL_RUNLOCK(&tracker); 1322 return (error); 1323 } 1324 1325 static SYSCTL_NODE(_sysctl, 6, oidlabel, 1326 CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_CAPRD, sysctl_sysctl_oidlabel, ""); 1327 1328 /* 1329 * Default "handler" functions. 1330 */ 1331 1332 /* 1333 * Handle a bool. 1334 * Two cases: 1335 * a variable: point arg1 at it. 1336 * a constant: pass it in arg2. 1337 */ 1338 1339 int 1340 sysctl_handle_bool(SYSCTL_HANDLER_ARGS) 1341 { 1342 uint8_t temp; 1343 int error; 1344 1345 /* 1346 * Attempt to get a coherent snapshot by making a copy of the data. 1347 */ 1348 if (arg1) 1349 temp = *(bool *)arg1 ? 1 : 0; 1350 else 1351 temp = arg2 ? 1 : 0; 1352 1353 error = SYSCTL_OUT(req, &temp, sizeof(temp)); 1354 if (error || !req->newptr) 1355 return (error); 1356 1357 if (!arg1) 1358 error = EPERM; 1359 else { 1360 error = SYSCTL_IN(req, &temp, sizeof(temp)); 1361 if (!error) 1362 *(bool *)arg1 = temp ? 1 : 0; 1363 } 1364 return (error); 1365 } 1366 1367 /* 1368 * Handle an int8_t, signed or unsigned. 1369 * Two cases: 1370 * a variable: point arg1 at it. 1371 * a constant: pass it in arg2. 1372 */ 1373 1374 int 1375 sysctl_handle_8(SYSCTL_HANDLER_ARGS) 1376 { 1377 int8_t tmpout; 1378 int error = 0; 1379 1380 /* 1381 * Attempt to get a coherent snapshot by making a copy of the data. 1382 */ 1383 if (arg1) 1384 tmpout = *(int8_t *)arg1; 1385 else 1386 tmpout = arg2; 1387 error = SYSCTL_OUT(req, &tmpout, sizeof(tmpout)); 1388 1389 if (error || !req->newptr) 1390 return (error); 1391 1392 if (!arg1) 1393 error = EPERM; 1394 else 1395 error = SYSCTL_IN(req, arg1, sizeof(tmpout)); 1396 return (error); 1397 } 1398 1399 /* 1400 * Handle an int16_t, signed or unsigned. 1401 * Two cases: 1402 * a variable: point arg1 at it. 1403 * a constant: pass it in arg2. 1404 */ 1405 1406 int 1407 sysctl_handle_16(SYSCTL_HANDLER_ARGS) 1408 { 1409 int16_t tmpout; 1410 int error = 0; 1411 1412 /* 1413 * Attempt to get a coherent snapshot by making a copy of the data. 1414 */ 1415 if (arg1) 1416 tmpout = *(int16_t *)arg1; 1417 else 1418 tmpout = arg2; 1419 error = SYSCTL_OUT(req, &tmpout, sizeof(tmpout)); 1420 1421 if (error || !req->newptr) 1422 return (error); 1423 1424 if (!arg1) 1425 error = EPERM; 1426 else 1427 error = SYSCTL_IN(req, arg1, sizeof(tmpout)); 1428 return (error); 1429 } 1430 1431 /* 1432 * Handle an int32_t, signed or unsigned. 1433 * Two cases: 1434 * a variable: point arg1 at it. 1435 * a constant: pass it in arg2. 1436 */ 1437 1438 int 1439 sysctl_handle_32(SYSCTL_HANDLER_ARGS) 1440 { 1441 int32_t tmpout; 1442 int error = 0; 1443 1444 /* 1445 * Attempt to get a coherent snapshot by making a copy of the data. 1446 */ 1447 if (arg1) 1448 tmpout = *(int32_t *)arg1; 1449 else 1450 tmpout = arg2; 1451 error = SYSCTL_OUT(req, &tmpout, sizeof(tmpout)); 1452 1453 if (error || !req->newptr) 1454 return (error); 1455 1456 if (!arg1) 1457 error = EPERM; 1458 else 1459 error = SYSCTL_IN(req, arg1, sizeof(tmpout)); 1460 return (error); 1461 } 1462 1463 /* 1464 * Handle an int, signed or unsigned. 1465 * Two cases: 1466 * a variable: point arg1 at it. 1467 * a constant: pass it in arg2. 1468 */ 1469 1470 int 1471 sysctl_handle_int(SYSCTL_HANDLER_ARGS) 1472 { 1473 int tmpout, error = 0; 1474 1475 /* 1476 * Attempt to get a coherent snapshot by making a copy of the data. 1477 */ 1478 if (arg1) 1479 tmpout = *(int *)arg1; 1480 else 1481 tmpout = arg2; 1482 error = SYSCTL_OUT(req, &tmpout, sizeof(int)); 1483 1484 if (error || !req->newptr) 1485 return (error); 1486 1487 if (!arg1) 1488 error = EPERM; 1489 else 1490 error = SYSCTL_IN(req, arg1, sizeof(int)); 1491 return (error); 1492 } 1493 1494 /* 1495 * Based on on sysctl_handle_int() convert milliseconds into ticks. 1496 * Note: this is used by TCP. 1497 */ 1498 1499 int 1500 sysctl_msec_to_ticks(SYSCTL_HANDLER_ARGS) 1501 { 1502 int error, s, tt; 1503 1504 tt = *(int *)arg1; 1505 s = (int)((int64_t)tt * 1000 / hz); 1506 1507 error = sysctl_handle_int(oidp, &s, 0, req); 1508 if (error || !req->newptr) 1509 return (error); 1510 1511 tt = (int)((int64_t)s * hz / 1000); 1512 if (tt < 1) 1513 return (EINVAL); 1514 1515 *(int *)arg1 = tt; 1516 return (0); 1517 } 1518 1519 1520 /* 1521 * Handle a long, signed or unsigned. 1522 * Two cases: 1523 * a variable: point arg1 at it. 1524 * a constant: pass it in arg2. 1525 */ 1526 1527 int 1528 sysctl_handle_long(SYSCTL_HANDLER_ARGS) 1529 { 1530 int error = 0; 1531 long tmplong; 1532 #ifdef SCTL_MASK32 1533 int tmpint; 1534 #endif 1535 1536 /* 1537 * Attempt to get a coherent snapshot by making a copy of the data. 1538 */ 1539 if (arg1) 1540 tmplong = *(long *)arg1; 1541 else 1542 tmplong = arg2; 1543 #ifdef SCTL_MASK32 1544 if (req->flags & SCTL_MASK32) { 1545 tmpint = tmplong; 1546 error = SYSCTL_OUT(req, &tmpint, sizeof(int)); 1547 } else 1548 #endif 1549 error = SYSCTL_OUT(req, &tmplong, sizeof(long)); 1550 1551 if (error || !req->newptr) 1552 return (error); 1553 1554 if (!arg1) 1555 error = EPERM; 1556 #ifdef SCTL_MASK32 1557 else if (req->flags & SCTL_MASK32) { 1558 error = SYSCTL_IN(req, &tmpint, sizeof(int)); 1559 *(long *)arg1 = (long)tmpint; 1560 } 1561 #endif 1562 else 1563 error = SYSCTL_IN(req, arg1, sizeof(long)); 1564 return (error); 1565 } 1566 1567 /* 1568 * Handle a 64 bit int, signed or unsigned. 1569 * Two cases: 1570 * a variable: point arg1 at it. 1571 * a constant: pass it in arg2. 1572 */ 1573 int 1574 sysctl_handle_64(SYSCTL_HANDLER_ARGS) 1575 { 1576 int error = 0; 1577 uint64_t tmpout; 1578 1579 /* 1580 * Attempt to get a coherent snapshot by making a copy of the data. 1581 */ 1582 if (arg1) 1583 tmpout = *(uint64_t *)arg1; 1584 else 1585 tmpout = arg2; 1586 error = SYSCTL_OUT(req, &tmpout, sizeof(uint64_t)); 1587 1588 if (error || !req->newptr) 1589 return (error); 1590 1591 if (!arg1) 1592 error = EPERM; 1593 else 1594 error = SYSCTL_IN(req, arg1, sizeof(uint64_t)); 1595 return (error); 1596 } 1597 1598 /* 1599 * Handle our generic '\0' terminated 'C' string. 1600 * Two cases: 1601 * a variable string: point arg1 at it, arg2 is max length. 1602 * a constant string: point arg1 at it, arg2 is zero. 1603 */ 1604 1605 int 1606 sysctl_handle_string(SYSCTL_HANDLER_ARGS) 1607 { 1608 size_t outlen; 1609 int error = 0, ro_string = 0; 1610 1611 /* 1612 * A zero-length buffer indicates a fixed size read-only 1613 * string: 1614 */ 1615 if (arg2 == 0) { 1616 arg2 = strlen((char *)arg1) + 1; 1617 ro_string = 1; 1618 } 1619 1620 if (req->oldptr != NULL) { 1621 char *tmparg; 1622 1623 if (ro_string) { 1624 tmparg = arg1; 1625 } else { 1626 /* try to make a coherent snapshot of the string */ 1627 tmparg = malloc(arg2, M_SYSCTLTMP, M_WAITOK); 1628 memcpy(tmparg, arg1, arg2); 1629 } 1630 1631 outlen = strnlen(tmparg, arg2 - 1) + 1; 1632 error = SYSCTL_OUT(req, tmparg, outlen); 1633 1634 if (!ro_string) 1635 free(tmparg, M_SYSCTLTMP); 1636 } else { 1637 outlen = strnlen((char *)arg1, arg2 - 1) + 1; 1638 error = SYSCTL_OUT(req, NULL, outlen); 1639 } 1640 if (error || !req->newptr) 1641 return (error); 1642 1643 if ((req->newlen - req->newidx) >= arg2) { 1644 error = EINVAL; 1645 } else { 1646 arg2 = (req->newlen - req->newidx); 1647 error = SYSCTL_IN(req, arg1, arg2); 1648 ((char *)arg1)[arg2] = '\0'; 1649 } 1650 return (error); 1651 } 1652 1653 /* 1654 * Handle any kind of opaque data. 1655 * arg1 points to it, arg2 is the size. 1656 */ 1657 1658 int 1659 sysctl_handle_opaque(SYSCTL_HANDLER_ARGS) 1660 { 1661 int error, tries; 1662 u_int generation; 1663 struct sysctl_req req2; 1664 1665 /* 1666 * Attempt to get a coherent snapshot, by using the thread 1667 * pre-emption counter updated from within mi_switch() to 1668 * determine if we were pre-empted during a bcopy() or 1669 * copyout(). Make 3 attempts at doing this before giving up. 1670 * If we encounter an error, stop immediately. 1671 */ 1672 tries = 0; 1673 req2 = *req; 1674 retry: 1675 generation = curthread->td_generation; 1676 error = SYSCTL_OUT(req, arg1, arg2); 1677 if (error) 1678 return (error); 1679 tries++; 1680 if (generation != curthread->td_generation && tries < 3) { 1681 *req = req2; 1682 goto retry; 1683 } 1684 1685 error = SYSCTL_IN(req, arg1, arg2); 1686 1687 return (error); 1688 } 1689 1690 /* 1691 * Transfer functions to/from kernel space. 1692 * XXX: rather untested at this point 1693 */ 1694 static int 1695 sysctl_old_kernel(struct sysctl_req *req, const void *p, size_t l) 1696 { 1697 size_t i = 0; 1698 1699 if (req->oldptr) { 1700 i = l; 1701 if (req->oldlen <= req->oldidx) 1702 i = 0; 1703 else 1704 if (i > req->oldlen - req->oldidx) 1705 i = req->oldlen - req->oldidx; 1706 if (i > 0) 1707 bcopy(p, (char *)req->oldptr + req->oldidx, i); 1708 } 1709 req->oldidx += l; 1710 if (req->oldptr && i != l) 1711 return (ENOMEM); 1712 return (0); 1713 } 1714 1715 static int 1716 sysctl_new_kernel(struct sysctl_req *req, void *p, size_t l) 1717 { 1718 if (!req->newptr) 1719 return (0); 1720 if (req->newlen - req->newidx < l) 1721 return (EINVAL); 1722 bcopy((char *)req->newptr + req->newidx, p, l); 1723 req->newidx += l; 1724 return (0); 1725 } 1726 1727 int 1728 kernel_sysctl(struct thread *td, int *name, u_int namelen, void *old, 1729 size_t *oldlenp, void *new, size_t newlen, size_t *retval, int flags) 1730 { 1731 int error = 0; 1732 struct sysctl_req req; 1733 1734 bzero(&req, sizeof req); 1735 1736 req.td = td; 1737 req.flags = flags; 1738 1739 if (oldlenp) { 1740 req.oldlen = *oldlenp; 1741 } 1742 req.validlen = req.oldlen; 1743 1744 if (old) { 1745 req.oldptr= old; 1746 } 1747 1748 if (new != NULL) { 1749 req.newlen = newlen; 1750 req.newptr = new; 1751 } 1752 1753 req.oldfunc = sysctl_old_kernel; 1754 req.newfunc = sysctl_new_kernel; 1755 req.lock = REQ_UNWIRED; 1756 1757 error = sysctl_root(0, name, namelen, &req); 1758 1759 if (req.lock == REQ_WIRED && req.validlen > 0) 1760 vsunlock(req.oldptr, req.validlen); 1761 1762 if (error && error != ENOMEM) 1763 return (error); 1764 1765 if (retval) { 1766 if (req.oldptr && req.oldidx > req.validlen) 1767 *retval = req.validlen; 1768 else 1769 *retval = req.oldidx; 1770 } 1771 return (error); 1772 } 1773 1774 int 1775 kernel_sysctlbyname(struct thread *td, char *name, void *old, size_t *oldlenp, 1776 void *new, size_t newlen, size_t *retval, int flags) 1777 { 1778 int oid[CTL_MAXNAME]; 1779 size_t oidlen, plen; 1780 int error; 1781 1782 oid[0] = 0; /* sysctl internal magic */ 1783 oid[1] = 3; /* name2oid */ 1784 oidlen = sizeof(oid); 1785 1786 error = kernel_sysctl(td, oid, 2, oid, &oidlen, 1787 (void *)name, strlen(name), &plen, flags); 1788 if (error) 1789 return (error); 1790 1791 error = kernel_sysctl(td, oid, plen / sizeof(int), old, oldlenp, 1792 new, newlen, retval, flags); 1793 return (error); 1794 } 1795 1796 /* 1797 * Transfer function to/from user space. 1798 */ 1799 static int 1800 sysctl_old_user(struct sysctl_req *req, const void *p, size_t l) 1801 { 1802 size_t i, len, origidx; 1803 int error; 1804 1805 origidx = req->oldidx; 1806 req->oldidx += l; 1807 if (req->oldptr == NULL) 1808 return (0); 1809 /* 1810 * If we have not wired the user supplied buffer and we are currently 1811 * holding locks, drop a witness warning, as it's possible that 1812 * write operations to the user page can sleep. 1813 */ 1814 if (req->lock != REQ_WIRED) 1815 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 1816 "sysctl_old_user()"); 1817 i = l; 1818 len = req->validlen; 1819 if (len <= origidx) 1820 i = 0; 1821 else { 1822 if (i > len - origidx) 1823 i = len - origidx; 1824 if (req->lock == REQ_WIRED) { 1825 error = copyout_nofault(p, (char *)req->oldptr + 1826 origidx, i); 1827 } else 1828 error = copyout(p, (char *)req->oldptr + origidx, i); 1829 if (error != 0) 1830 return (error); 1831 } 1832 if (i < l) 1833 return (ENOMEM); 1834 return (0); 1835 } 1836 1837 static int 1838 sysctl_new_user(struct sysctl_req *req, void *p, size_t l) 1839 { 1840 int error; 1841 1842 if (!req->newptr) 1843 return (0); 1844 if (req->newlen - req->newidx < l) 1845 return (EINVAL); 1846 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 1847 "sysctl_new_user()"); 1848 error = copyin((char *)req->newptr + req->newidx, p, l); 1849 req->newidx += l; 1850 return (error); 1851 } 1852 1853 /* 1854 * Wire the user space destination buffer. If set to a value greater than 1855 * zero, the len parameter limits the maximum amount of wired memory. 1856 */ 1857 int 1858 sysctl_wire_old_buffer(struct sysctl_req *req, size_t len) 1859 { 1860 int ret; 1861 size_t wiredlen; 1862 1863 wiredlen = (len > 0 && len < req->oldlen) ? len : req->oldlen; 1864 ret = 0; 1865 if (req->lock != REQ_WIRED && req->oldptr && 1866 req->oldfunc == sysctl_old_user) { 1867 if (wiredlen != 0) { 1868 ret = vslock(req->oldptr, wiredlen); 1869 if (ret != 0) { 1870 if (ret != ENOMEM) 1871 return (ret); 1872 wiredlen = 0; 1873 } 1874 } 1875 req->lock = REQ_WIRED; 1876 req->validlen = wiredlen; 1877 } 1878 return (0); 1879 } 1880 1881 int 1882 sysctl_find_oid(int *name, u_int namelen, struct sysctl_oid **noid, 1883 int *nindx, struct sysctl_req *req) 1884 { 1885 struct sysctl_oid_list *lsp; 1886 struct sysctl_oid *oid; 1887 int indx; 1888 1889 SYSCTL_ASSERT_LOCKED(); 1890 lsp = &sysctl__children; 1891 indx = 0; 1892 while (indx < CTL_MAXNAME) { 1893 SLIST_FOREACH(oid, lsp, oid_link) { 1894 if (oid->oid_number == name[indx]) 1895 break; 1896 } 1897 if (oid == NULL) 1898 return (ENOENT); 1899 1900 indx++; 1901 if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) { 1902 if (oid->oid_handler != NULL || indx == namelen) { 1903 *noid = oid; 1904 if (nindx != NULL) 1905 *nindx = indx; 1906 KASSERT((oid->oid_kind & CTLFLAG_DYING) == 0, 1907 ("%s found DYING node %p", __func__, oid)); 1908 return (0); 1909 } 1910 lsp = SYSCTL_CHILDREN(oid); 1911 } else if (indx == namelen) { 1912 if ((oid->oid_kind & CTLFLAG_DORMANT) != 0) 1913 return (ENOENT); 1914 *noid = oid; 1915 if (nindx != NULL) 1916 *nindx = indx; 1917 KASSERT((oid->oid_kind & CTLFLAG_DYING) == 0, 1918 ("%s found DYING node %p", __func__, oid)); 1919 return (0); 1920 } else { 1921 return (ENOTDIR); 1922 } 1923 } 1924 return (ENOENT); 1925 } 1926 1927 /* 1928 * Traverse our tree, and find the right node, execute whatever it points 1929 * to, and return the resulting error code. 1930 */ 1931 1932 static int 1933 sysctl_root(SYSCTL_HANDLER_ARGS) 1934 { 1935 struct sysctl_oid *oid; 1936 struct rm_priotracker tracker; 1937 int error, indx, lvl; 1938 1939 SYSCTL_RLOCK(&tracker); 1940 1941 error = sysctl_find_oid(arg1, arg2, &oid, &indx, req); 1942 if (error) 1943 goto out; 1944 1945 if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) { 1946 /* 1947 * You can't call a sysctl when it's a node, but has 1948 * no handler. Inform the user that it's a node. 1949 * The indx may or may not be the same as namelen. 1950 */ 1951 if (oid->oid_handler == NULL) { 1952 error = EISDIR; 1953 goto out; 1954 } 1955 } 1956 1957 /* Is this sysctl writable? */ 1958 if (req->newptr && !(oid->oid_kind & CTLFLAG_WR)) { 1959 error = EPERM; 1960 goto out; 1961 } 1962 1963 KASSERT(req->td != NULL, ("sysctl_root(): req->td == NULL")); 1964 1965 #ifdef CAPABILITY_MODE 1966 /* 1967 * If the process is in capability mode, then don't permit reading or 1968 * writing unless specifically granted for the node. 1969 */ 1970 if (IN_CAPABILITY_MODE(req->td)) { 1971 if ((req->oldptr && !(oid->oid_kind & CTLFLAG_CAPRD)) || 1972 (req->newptr && !(oid->oid_kind & CTLFLAG_CAPWR))) { 1973 error = EPERM; 1974 goto out; 1975 } 1976 } 1977 #endif 1978 1979 /* Is this sysctl sensitive to securelevels? */ 1980 if (req->newptr && (oid->oid_kind & CTLFLAG_SECURE)) { 1981 lvl = (oid->oid_kind & CTLMASK_SECURE) >> CTLSHIFT_SECURE; 1982 error = securelevel_gt(req->td->td_ucred, lvl); 1983 if (error) 1984 goto out; 1985 } 1986 1987 /* Is this sysctl writable by only privileged users? */ 1988 if (req->newptr && !(oid->oid_kind & CTLFLAG_ANYBODY)) { 1989 int priv; 1990 1991 if (oid->oid_kind & CTLFLAG_PRISON) 1992 priv = PRIV_SYSCTL_WRITEJAIL; 1993 #ifdef VIMAGE 1994 else if ((oid->oid_kind & CTLFLAG_VNET) && 1995 prison_owns_vnet(req->td->td_ucred)) 1996 priv = PRIV_SYSCTL_WRITEJAIL; 1997 #endif 1998 else 1999 priv = PRIV_SYSCTL_WRITE; 2000 error = priv_check(req->td, priv); 2001 if (error) 2002 goto out; 2003 } 2004 2005 if (!oid->oid_handler) { 2006 error = EINVAL; 2007 goto out; 2008 } 2009 2010 if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) { 2011 arg1 = (int *)arg1 + indx; 2012 arg2 -= indx; 2013 } else { 2014 arg1 = oid->oid_arg1; 2015 arg2 = oid->oid_arg2; 2016 } 2017 #ifdef MAC 2018 error = mac_system_check_sysctl(req->td->td_ucred, oid, arg1, arg2, 2019 req); 2020 if (error != 0) 2021 goto out; 2022 #endif 2023 #ifdef VIMAGE 2024 if ((oid->oid_kind & CTLFLAG_VNET) && arg1 != NULL) 2025 arg1 = (void *)(curvnet->vnet_data_base + (uintptr_t)arg1); 2026 #endif 2027 error = sysctl_root_handler_locked(oid, arg1, arg2, req, &tracker); 2028 2029 out: 2030 SYSCTL_RUNLOCK(&tracker); 2031 return (error); 2032 } 2033 2034 #ifndef _SYS_SYSPROTO_H_ 2035 struct sysctl_args { 2036 int *name; 2037 u_int namelen; 2038 void *old; 2039 size_t *oldlenp; 2040 void *new; 2041 size_t newlen; 2042 }; 2043 #endif 2044 int 2045 sys___sysctl(struct thread *td, struct sysctl_args *uap) 2046 { 2047 int error, i, name[CTL_MAXNAME]; 2048 size_t j; 2049 2050 if (uap->namelen > CTL_MAXNAME || uap->namelen < 2) 2051 return (EINVAL); 2052 2053 error = copyin(uap->name, &name, uap->namelen * sizeof(int)); 2054 if (error) 2055 return (error); 2056 2057 error = userland_sysctl(td, name, uap->namelen, 2058 uap->old, uap->oldlenp, 0, 2059 uap->new, uap->newlen, &j, 0); 2060 if (error && error != ENOMEM) 2061 return (error); 2062 if (uap->oldlenp) { 2063 i = copyout(&j, uap->oldlenp, sizeof(j)); 2064 if (i) 2065 return (i); 2066 } 2067 return (error); 2068 } 2069 2070 /* 2071 * This is used from various compatibility syscalls too. That's why name 2072 * must be in kernel space. 2073 */ 2074 int 2075 userland_sysctl(struct thread *td, int *name, u_int namelen, void *old, 2076 size_t *oldlenp, int inkernel, void *new, size_t newlen, size_t *retval, 2077 int flags) 2078 { 2079 int error = 0, memlocked; 2080 struct sysctl_req req; 2081 2082 bzero(&req, sizeof req); 2083 2084 req.td = td; 2085 req.flags = flags; 2086 2087 if (oldlenp) { 2088 if (inkernel) { 2089 req.oldlen = *oldlenp; 2090 } else { 2091 error = copyin(oldlenp, &req.oldlen, sizeof(*oldlenp)); 2092 if (error) 2093 return (error); 2094 } 2095 } 2096 req.validlen = req.oldlen; 2097 req.oldptr = old; 2098 2099 if (new != NULL) { 2100 req.newlen = newlen; 2101 req.newptr = new; 2102 } 2103 2104 req.oldfunc = sysctl_old_user; 2105 req.newfunc = sysctl_new_user; 2106 req.lock = REQ_UNWIRED; 2107 2108 #ifdef KTRACE 2109 if (KTRPOINT(curthread, KTR_SYSCTL)) 2110 ktrsysctl(name, namelen); 2111 #endif 2112 2113 if (req.oldptr && req.oldlen > PAGE_SIZE) { 2114 memlocked = 1; 2115 sx_xlock(&sysctlmemlock); 2116 } else 2117 memlocked = 0; 2118 CURVNET_SET(TD_TO_VNET(td)); 2119 2120 for (;;) { 2121 req.oldidx = 0; 2122 req.newidx = 0; 2123 error = sysctl_root(0, name, namelen, &req); 2124 if (error != EAGAIN) 2125 break; 2126 kern_yield(PRI_USER); 2127 } 2128 2129 CURVNET_RESTORE(); 2130 2131 if (req.lock == REQ_WIRED && req.validlen > 0) 2132 vsunlock(req.oldptr, req.validlen); 2133 if (memlocked) 2134 sx_xunlock(&sysctlmemlock); 2135 2136 if (error && error != ENOMEM) 2137 return (error); 2138 2139 if (retval) { 2140 if (req.oldptr && req.oldidx > req.validlen) 2141 *retval = req.validlen; 2142 else 2143 *retval = req.oldidx; 2144 } 2145 return (error); 2146 } 2147 2148 /* 2149 * Drain into a sysctl struct. The user buffer should be wired if a page 2150 * fault would cause issue. 2151 */ 2152 static int 2153 sbuf_sysctl_drain(void *arg, const char *data, int len) 2154 { 2155 struct sysctl_req *req = arg; 2156 int error; 2157 2158 error = SYSCTL_OUT(req, data, len); 2159 KASSERT(error >= 0, ("Got unexpected negative value %d", error)); 2160 return (error == 0 ? len : -error); 2161 } 2162 2163 struct sbuf * 2164 sbuf_new_for_sysctl(struct sbuf *s, char *buf, int length, 2165 struct sysctl_req *req) 2166 { 2167 2168 /* Supply a default buffer size if none given. */ 2169 if (buf == NULL && length == 0) 2170 length = 64; 2171 s = sbuf_new(s, buf, length, SBUF_FIXEDLEN | SBUF_INCLUDENUL); 2172 sbuf_set_drain(s, sbuf_sysctl_drain, req); 2173 return (s); 2174 } 2175