xref: /freebsd/sys/kern/kern_sysctl.c (revision 40427cca7a9ae77b095936fb1954417c290cfb17)
1 /*-
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Mike Karels at Berkeley Software Design, Inc.
7  *
8  * Quite extensively rewritten by Poul-Henning Kamp of the FreeBSD
9  * project, to make these variables more userfriendly.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)kern_sysctl.c	8.4 (Berkeley) 4/14/94
36  */
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include "opt_capsicum.h"
42 #include "opt_compat.h"
43 #include "opt_ktrace.h"
44 
45 #include <sys/param.h>
46 #include <sys/fail.h>
47 #include <sys/systm.h>
48 #include <sys/capsicum.h>
49 #include <sys/kernel.h>
50 #include <sys/sysctl.h>
51 #include <sys/malloc.h>
52 #include <sys/priv.h>
53 #include <sys/proc.h>
54 #include <sys/jail.h>
55 #include <sys/lock.h>
56 #include <sys/mutex.h>
57 #include <sys/rmlock.h>
58 #include <sys/sbuf.h>
59 #include <sys/sx.h>
60 #include <sys/sysproto.h>
61 #include <sys/uio.h>
62 #ifdef KTRACE
63 #include <sys/ktrace.h>
64 #endif
65 
66 #include <net/vnet.h>
67 
68 #include <security/mac/mac_framework.h>
69 
70 #include <vm/vm.h>
71 #include <vm/vm_extern.h>
72 
73 static MALLOC_DEFINE(M_SYSCTL, "sysctl", "sysctl internal magic");
74 static MALLOC_DEFINE(M_SYSCTLOID, "sysctloid", "sysctl dynamic oids");
75 static MALLOC_DEFINE(M_SYSCTLTMP, "sysctltmp", "sysctl temp output buffer");
76 
77 /*
78  * The sysctllock protects the MIB tree.  It also protects sysctl
79  * contexts used with dynamic sysctls.  The sysctl_register_oid() and
80  * sysctl_unregister_oid() routines require the sysctllock to already
81  * be held, so the sysctl_wlock() and sysctl_wunlock() routines are
82  * provided for the few places in the kernel which need to use that
83  * API rather than using the dynamic API.  Use of the dynamic API is
84  * strongly encouraged for most code.
85  *
86  * The sysctlmemlock is used to limit the amount of user memory wired for
87  * sysctl requests.  This is implemented by serializing any userland
88  * sysctl requests larger than a single page via an exclusive lock.
89  */
90 static struct rmlock sysctllock;
91 static struct sx sysctlmemlock;
92 
93 #define	SYSCTL_WLOCK()		rm_wlock(&sysctllock)
94 #define	SYSCTL_WUNLOCK()	rm_wunlock(&sysctllock)
95 #define	SYSCTL_RLOCK(tracker)	rm_rlock(&sysctllock, (tracker))
96 #define	SYSCTL_RUNLOCK(tracker)	rm_runlock(&sysctllock, (tracker))
97 #define	SYSCTL_WLOCKED()	rm_wowned(&sysctllock)
98 #define	SYSCTL_ASSERT_LOCKED()	rm_assert(&sysctllock, RA_LOCKED)
99 #define	SYSCTL_ASSERT_WLOCKED()	rm_assert(&sysctllock, RA_WLOCKED)
100 #define	SYSCTL_ASSERT_RLOCKED()	rm_assert(&sysctllock, RA_RLOCKED)
101 #define	SYSCTL_INIT()		rm_init_flags(&sysctllock, "sysctl lock", \
102 				    RM_SLEEPABLE)
103 #define	SYSCTL_SLEEP(ch, wmesg, timo)					\
104 				rm_sleep(ch, &sysctllock, 0, wmesg, timo)
105 
106 static int sysctl_root(SYSCTL_HANDLER_ARGS);
107 
108 /* Root list */
109 struct sysctl_oid_list sysctl__children = SLIST_HEAD_INITIALIZER(&sysctl__children);
110 
111 static int	sysctl_remove_oid_locked(struct sysctl_oid *oidp, int del,
112 		    int recurse);
113 static int	sysctl_old_kernel(struct sysctl_req *, const void *, size_t);
114 static int	sysctl_new_kernel(struct sysctl_req *, void *, size_t);
115 
116 static struct sysctl_oid *
117 sysctl_find_oidname(const char *name, struct sysctl_oid_list *list)
118 {
119 	struct sysctl_oid *oidp;
120 
121 	SYSCTL_ASSERT_LOCKED();
122 	SLIST_FOREACH(oidp, list, oid_link) {
123 		if (strcmp(oidp->oid_name, name) == 0) {
124 			return (oidp);
125 		}
126 	}
127 	return (NULL);
128 }
129 
130 /*
131  * Initialization of the MIB tree.
132  *
133  * Order by number in each list.
134  */
135 void
136 sysctl_wlock(void)
137 {
138 
139 	SYSCTL_WLOCK();
140 }
141 
142 void
143 sysctl_wunlock(void)
144 {
145 
146 	SYSCTL_WUNLOCK();
147 }
148 
149 static int
150 sysctl_root_handler_locked(struct sysctl_oid *oid, void *arg1, intmax_t arg2,
151     struct sysctl_req *req, struct rm_priotracker *tracker)
152 {
153 	int error;
154 
155 	if (oid->oid_kind & CTLFLAG_DYN)
156 		atomic_add_int(&oid->oid_running, 1);
157 
158 	if (tracker != NULL)
159 		SYSCTL_RUNLOCK(tracker);
160 	else
161 		SYSCTL_WUNLOCK();
162 
163 	if (!(oid->oid_kind & CTLFLAG_MPSAFE))
164 		mtx_lock(&Giant);
165 	error = oid->oid_handler(oid, arg1, arg2, req);
166 	if (!(oid->oid_kind & CTLFLAG_MPSAFE))
167 		mtx_unlock(&Giant);
168 
169 	KFAIL_POINT_ERROR(_debug_fail_point, sysctl_running, error);
170 
171 	if (tracker != NULL)
172 		SYSCTL_RLOCK(tracker);
173 	else
174 		SYSCTL_WLOCK();
175 
176 	if (oid->oid_kind & CTLFLAG_DYN) {
177 		if (atomic_fetchadd_int(&oid->oid_running, -1) == 1 &&
178 		    (oid->oid_kind & CTLFLAG_DYING) != 0)
179 			wakeup(&oid->oid_running);
180 	}
181 
182 	return (error);
183 }
184 
185 static void
186 sysctl_load_tunable_by_oid_locked(struct sysctl_oid *oidp)
187 {
188 	struct sysctl_req req;
189 	struct sysctl_oid *curr;
190 	char *penv = NULL;
191 	char path[64];
192 	ssize_t rem = sizeof(path);
193 	ssize_t len;
194 	uint8_t val_8;
195 	uint16_t val_16;
196 	uint32_t val_32;
197 	int val_int;
198 	long val_long;
199 	int64_t val_64;
200 	quad_t val_quad;
201 	int error;
202 
203 	path[--rem] = 0;
204 
205 	for (curr = oidp; curr != NULL; curr = SYSCTL_PARENT(curr)) {
206 		len = strlen(curr->oid_name);
207 		rem -= len;
208 		if (curr != oidp)
209 			rem -= 1;
210 		if (rem < 0) {
211 			printf("OID path exceeds %d bytes\n", (int)sizeof(path));
212 			return;
213 		}
214 		memcpy(path + rem, curr->oid_name, len);
215 		if (curr != oidp)
216 			path[rem + len] = '.';
217 	}
218 
219 	memset(&req, 0, sizeof(req));
220 
221 	req.td = curthread;
222 	req.oldfunc = sysctl_old_kernel;
223 	req.newfunc = sysctl_new_kernel;
224 	req.lock = REQ_UNWIRED;
225 
226 	switch (oidp->oid_kind & CTLTYPE) {
227 	case CTLTYPE_INT:
228 		if (getenv_int(path + rem, &val_int) == 0)
229 			return;
230 		req.newlen = sizeof(val_int);
231 		req.newptr = &val_int;
232 		break;
233 	case CTLTYPE_UINT:
234 		if (getenv_uint(path + rem, (unsigned int *)&val_int) == 0)
235 			return;
236 		req.newlen = sizeof(val_int);
237 		req.newptr = &val_int;
238 		break;
239 	case CTLTYPE_LONG:
240 		if (getenv_long(path + rem, &val_long) == 0)
241 			return;
242 		req.newlen = sizeof(val_long);
243 		req.newptr = &val_long;
244 		break;
245 	case CTLTYPE_ULONG:
246 		if (getenv_ulong(path + rem, (unsigned long *)&val_long) == 0)
247 			return;
248 		req.newlen = sizeof(val_long);
249 		req.newptr = &val_long;
250 		break;
251 	case CTLTYPE_S8:
252 		if (getenv_int(path + rem, &val_int) == 0)
253 			return;
254 		val_8 = val_int;
255 		req.newlen = sizeof(val_8);
256 		req.newptr = &val_8;
257 		break;
258 	case CTLTYPE_S16:
259 		if (getenv_int(path + rem, &val_int) == 0)
260 			return;
261 		val_16 = val_int;
262 		req.newlen = sizeof(val_16);
263 		req.newptr = &val_16;
264 		break;
265 	case CTLTYPE_S32:
266 		if (getenv_long(path + rem, &val_long) == 0)
267 			return;
268 		val_32 = val_long;
269 		req.newlen = sizeof(val_32);
270 		req.newptr = &val_32;
271 		break;
272 	case CTLTYPE_S64:
273 		if (getenv_quad(path + rem, &val_quad) == 0)
274 			return;
275 		val_64 = val_quad;
276 		req.newlen = sizeof(val_64);
277 		req.newptr = &val_64;
278 		break;
279 	case CTLTYPE_U8:
280 		if (getenv_uint(path + rem, (unsigned int *)&val_int) == 0)
281 			return;
282 		val_8 = val_int;
283 		req.newlen = sizeof(val_8);
284 		req.newptr = &val_8;
285 		break;
286 	case CTLTYPE_U16:
287 		if (getenv_uint(path + rem, (unsigned int *)&val_int) == 0)
288 			return;
289 		val_16 = val_int;
290 		req.newlen = sizeof(val_16);
291 		req.newptr = &val_16;
292 		break;
293 	case CTLTYPE_U32:
294 		if (getenv_ulong(path + rem, (unsigned long *)&val_long) == 0)
295 			return;
296 		val_32 = val_long;
297 		req.newlen = sizeof(val_32);
298 		req.newptr = &val_32;
299 		break;
300 	case CTLTYPE_U64:
301 		/* XXX there is no getenv_uquad() */
302 		if (getenv_quad(path + rem, &val_quad) == 0)
303 			return;
304 		val_64 = val_quad;
305 		req.newlen = sizeof(val_64);
306 		req.newptr = &val_64;
307 		break;
308 	case CTLTYPE_STRING:
309 		penv = kern_getenv(path + rem);
310 		if (penv == NULL)
311 			return;
312 		req.newlen = strlen(penv);
313 		req.newptr = penv;
314 		break;
315 	default:
316 		return;
317 	}
318 	error = sysctl_root_handler_locked(oidp, oidp->oid_arg1,
319 	    oidp->oid_arg2, &req, NULL);
320 	if (error != 0)
321 		printf("Setting sysctl %s failed: %d\n", path + rem, error);
322 	if (penv != NULL)
323 		freeenv(penv);
324 }
325 
326 static int
327 sbuf_printf_drain(void *arg __unused, const char *data, int len)
328 {
329 
330 	return (printf("%.*s", len, data));
331 }
332 
333 /*
334  * Locate the path to a given oid.  Returns the length of the resulting path,
335  * or -1 if the oid was not found.  nodes must have room for CTL_MAXNAME
336  * elements and be NULL initialized.
337  */
338 static int
339 sysctl_search_oid(struct sysctl_oid **nodes, struct sysctl_oid *needle)
340 {
341 	int indx;
342 
343 	SYSCTL_ASSERT_LOCKED();
344 	indx = 0;
345 	while (indx < CTL_MAXNAME && indx >= 0) {
346 		if (nodes[indx] == NULL && indx == 0)
347 			nodes[indx] = SLIST_FIRST(&sysctl__children);
348 		else if (nodes[indx] == NULL)
349 			nodes[indx] = SLIST_FIRST(&nodes[indx - 1]->oid_children);
350 		else
351 			nodes[indx] = SLIST_NEXT(nodes[indx], oid_link);
352 
353 		if (nodes[indx] == needle)
354 			return (indx + 1);
355 
356 		if (nodes[indx] == NULL) {
357 			indx--;
358 			continue;
359 		}
360 
361 		if ((nodes[indx]->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
362 			indx++;
363 			continue;
364 		}
365 	}
366 	return (-1);
367 }
368 
369 static void
370 sysctl_warn_reuse(const char *func, struct sysctl_oid *leaf)
371 {
372 	struct sysctl_oid *nodes[CTL_MAXNAME];
373 	char buf[128];
374 	struct sbuf sb;
375 	int rc, i;
376 
377 	(void)sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL);
378 	sbuf_set_drain(&sb, sbuf_printf_drain, NULL);
379 
380 	sbuf_printf(&sb, "%s: can't re-use a leaf (", __func__);
381 
382 	memset(nodes, 0, sizeof(nodes));
383 	rc = sysctl_search_oid(nodes, leaf);
384 	if (rc > 0) {
385 		for (i = 0; i < rc; i++)
386 			sbuf_printf(&sb, "%s%.*s", nodes[i]->oid_name,
387 			    i != (rc - 1), ".");
388 	} else {
389 		sbuf_printf(&sb, "%s", leaf->oid_name);
390 	}
391 	sbuf_printf(&sb, ")!\n");
392 
393 	(void)sbuf_finish(&sb);
394 }
395 
396 #ifdef SYSCTL_DEBUG
397 static int
398 sysctl_reuse_test(SYSCTL_HANDLER_ARGS)
399 {
400 	struct rm_priotracker tracker;
401 
402 	SYSCTL_RLOCK(&tracker);
403 	sysctl_warn_reuse(__func__, oidp);
404 	SYSCTL_RUNLOCK(&tracker);
405 	return (0);
406 }
407 SYSCTL_PROC(_sysctl, 0, reuse_test, CTLTYPE_STRING|CTLFLAG_RD|CTLFLAG_MPSAFE,
408 	0, 0, sysctl_reuse_test, "-", "");
409 #endif
410 
411 void
412 sysctl_register_oid(struct sysctl_oid *oidp)
413 {
414 	struct sysctl_oid_list *parent = oidp->oid_parent;
415 	struct sysctl_oid *p;
416 	struct sysctl_oid *q;
417 	int oid_number;
418 	int timeout = 2;
419 
420 	/*
421 	 * First check if another oid with the same name already
422 	 * exists in the parent's list.
423 	 */
424 	SYSCTL_ASSERT_WLOCKED();
425 	p = sysctl_find_oidname(oidp->oid_name, parent);
426 	if (p != NULL) {
427 		if ((p->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
428 			p->oid_refcnt++;
429 			return;
430 		} else {
431 			sysctl_warn_reuse(__func__, p);
432 			return;
433 		}
434 	}
435 	/* get current OID number */
436 	oid_number = oidp->oid_number;
437 
438 #if (OID_AUTO >= 0)
439 #error "OID_AUTO is expected to be a negative value"
440 #endif
441 	/*
442 	 * Any negative OID number qualifies as OID_AUTO. Valid OID
443 	 * numbers should always be positive.
444 	 *
445 	 * NOTE: DO NOT change the starting value here, change it in
446 	 * <sys/sysctl.h>, and make sure it is at least 256 to
447 	 * accommodate e.g. net.inet.raw as a static sysctl node.
448 	 */
449 	if (oid_number < 0) {
450 		static int newoid;
451 
452 		/*
453 		 * By decrementing the next OID number we spend less
454 		 * time inserting the OIDs into a sorted list.
455 		 */
456 		if (--newoid < CTL_AUTO_START)
457 			newoid = 0x7fffffff;
458 
459 		oid_number = newoid;
460 	}
461 
462 	/*
463 	 * Insert the OID into the parent's list sorted by OID number.
464 	 */
465 retry:
466 	q = NULL;
467 	SLIST_FOREACH(p, parent, oid_link) {
468 		/* check if the current OID number is in use */
469 		if (oid_number == p->oid_number) {
470 			/* get the next valid OID number */
471 			if (oid_number < CTL_AUTO_START ||
472 			    oid_number == 0x7fffffff) {
473 				/* wraparound - restart */
474 				oid_number = CTL_AUTO_START;
475 				/* don't loop forever */
476 				if (!timeout--)
477 					panic("sysctl: Out of OID numbers\n");
478 				goto retry;
479 			} else {
480 				oid_number++;
481 			}
482 		} else if (oid_number < p->oid_number)
483 			break;
484 		q = p;
485 	}
486 	/* check for non-auto OID number collision */
487 	if (oidp->oid_number >= 0 && oidp->oid_number < CTL_AUTO_START &&
488 	    oid_number >= CTL_AUTO_START) {
489 		printf("sysctl: OID number(%d) is already in use for '%s'\n",
490 		    oidp->oid_number, oidp->oid_name);
491 	}
492 	/* update the OID number, if any */
493 	oidp->oid_number = oid_number;
494 	if (q != NULL)
495 		SLIST_INSERT_AFTER(q, oidp, oid_link);
496 	else
497 		SLIST_INSERT_HEAD(parent, oidp, oid_link);
498 
499 	if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE &&
500 #ifdef VIMAGE
501 	    (oidp->oid_kind & CTLFLAG_VNET) == 0 &&
502 #endif
503 	    (oidp->oid_kind & CTLFLAG_TUN) != 0 &&
504 	    (oidp->oid_kind & CTLFLAG_NOFETCH) == 0) {
505 		/* only fetch value once */
506 		oidp->oid_kind |= CTLFLAG_NOFETCH;
507 		/* try to fetch value from kernel environment */
508 		sysctl_load_tunable_by_oid_locked(oidp);
509 	}
510 }
511 
512 void
513 sysctl_unregister_oid(struct sysctl_oid *oidp)
514 {
515 	struct sysctl_oid *p;
516 	int error;
517 
518 	SYSCTL_ASSERT_WLOCKED();
519 	error = ENOENT;
520 	if (oidp->oid_number == OID_AUTO) {
521 		error = EINVAL;
522 	} else {
523 		SLIST_FOREACH(p, oidp->oid_parent, oid_link) {
524 			if (p == oidp) {
525 				SLIST_REMOVE(oidp->oid_parent, oidp,
526 				    sysctl_oid, oid_link);
527 				error = 0;
528 				break;
529 			}
530 		}
531 	}
532 
533 	/*
534 	 * This can happen when a module fails to register and is
535 	 * being unloaded afterwards.  It should not be a panic()
536 	 * for normal use.
537 	 */
538 	if (error)
539 		printf("%s: failed to unregister sysctl\n", __func__);
540 }
541 
542 /* Initialize a new context to keep track of dynamically added sysctls. */
543 int
544 sysctl_ctx_init(struct sysctl_ctx_list *c)
545 {
546 
547 	if (c == NULL) {
548 		return (EINVAL);
549 	}
550 
551 	/*
552 	 * No locking here, the caller is responsible for not adding
553 	 * new nodes to a context until after this function has
554 	 * returned.
555 	 */
556 	TAILQ_INIT(c);
557 	return (0);
558 }
559 
560 /* Free the context, and destroy all dynamic oids registered in this context */
561 int
562 sysctl_ctx_free(struct sysctl_ctx_list *clist)
563 {
564 	struct sysctl_ctx_entry *e, *e1;
565 	int error;
566 
567 	error = 0;
568 	/*
569 	 * First perform a "dry run" to check if it's ok to remove oids.
570 	 * XXX FIXME
571 	 * XXX This algorithm is a hack. But I don't know any
572 	 * XXX better solution for now...
573 	 */
574 	SYSCTL_WLOCK();
575 	TAILQ_FOREACH(e, clist, link) {
576 		error = sysctl_remove_oid_locked(e->entry, 0, 0);
577 		if (error)
578 			break;
579 	}
580 	/*
581 	 * Restore deregistered entries, either from the end,
582 	 * or from the place where error occurred.
583 	 * e contains the entry that was not unregistered
584 	 */
585 	if (error)
586 		e1 = TAILQ_PREV(e, sysctl_ctx_list, link);
587 	else
588 		e1 = TAILQ_LAST(clist, sysctl_ctx_list);
589 	while (e1 != NULL) {
590 		sysctl_register_oid(e1->entry);
591 		e1 = TAILQ_PREV(e1, sysctl_ctx_list, link);
592 	}
593 	if (error) {
594 		SYSCTL_WUNLOCK();
595 		return(EBUSY);
596 	}
597 	/* Now really delete the entries */
598 	e = TAILQ_FIRST(clist);
599 	while (e != NULL) {
600 		e1 = TAILQ_NEXT(e, link);
601 		error = sysctl_remove_oid_locked(e->entry, 1, 0);
602 		if (error)
603 			panic("sysctl_remove_oid: corrupt tree, entry: %s",
604 			    e->entry->oid_name);
605 		free(e, M_SYSCTLOID);
606 		e = e1;
607 	}
608 	SYSCTL_WUNLOCK();
609 	return (error);
610 }
611 
612 /* Add an entry to the context */
613 struct sysctl_ctx_entry *
614 sysctl_ctx_entry_add(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp)
615 {
616 	struct sysctl_ctx_entry *e;
617 
618 	SYSCTL_ASSERT_WLOCKED();
619 	if (clist == NULL || oidp == NULL)
620 		return(NULL);
621 	e = malloc(sizeof(struct sysctl_ctx_entry), M_SYSCTLOID, M_WAITOK);
622 	e->entry = oidp;
623 	TAILQ_INSERT_HEAD(clist, e, link);
624 	return (e);
625 }
626 
627 /* Find an entry in the context */
628 struct sysctl_ctx_entry *
629 sysctl_ctx_entry_find(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp)
630 {
631 	struct sysctl_ctx_entry *e;
632 
633 	SYSCTL_ASSERT_WLOCKED();
634 	if (clist == NULL || oidp == NULL)
635 		return(NULL);
636 	TAILQ_FOREACH(e, clist, link) {
637 		if(e->entry == oidp)
638 			return(e);
639 	}
640 	return (e);
641 }
642 
643 /*
644  * Delete an entry from the context.
645  * NOTE: this function doesn't free oidp! You have to remove it
646  * with sysctl_remove_oid().
647  */
648 int
649 sysctl_ctx_entry_del(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp)
650 {
651 	struct sysctl_ctx_entry *e;
652 
653 	if (clist == NULL || oidp == NULL)
654 		return (EINVAL);
655 	SYSCTL_WLOCK();
656 	e = sysctl_ctx_entry_find(clist, oidp);
657 	if (e != NULL) {
658 		TAILQ_REMOVE(clist, e, link);
659 		SYSCTL_WUNLOCK();
660 		free(e, M_SYSCTLOID);
661 		return (0);
662 	} else {
663 		SYSCTL_WUNLOCK();
664 		return (ENOENT);
665 	}
666 }
667 
668 /*
669  * Remove dynamically created sysctl trees.
670  * oidp - top of the tree to be removed
671  * del - if 0 - just deregister, otherwise free up entries as well
672  * recurse - if != 0 traverse the subtree to be deleted
673  */
674 int
675 sysctl_remove_oid(struct sysctl_oid *oidp, int del, int recurse)
676 {
677 	int error;
678 
679 	SYSCTL_WLOCK();
680 	error = sysctl_remove_oid_locked(oidp, del, recurse);
681 	SYSCTL_WUNLOCK();
682 	return (error);
683 }
684 
685 int
686 sysctl_remove_name(struct sysctl_oid *parent, const char *name,
687     int del, int recurse)
688 {
689 	struct sysctl_oid *p, *tmp;
690 	int error;
691 
692 	error = ENOENT;
693 	SYSCTL_WLOCK();
694 	SLIST_FOREACH_SAFE(p, SYSCTL_CHILDREN(parent), oid_link, tmp) {
695 		if (strcmp(p->oid_name, name) == 0) {
696 			error = sysctl_remove_oid_locked(p, del, recurse);
697 			break;
698 		}
699 	}
700 	SYSCTL_WUNLOCK();
701 
702 	return (error);
703 }
704 
705 
706 static int
707 sysctl_remove_oid_locked(struct sysctl_oid *oidp, int del, int recurse)
708 {
709 	struct sysctl_oid *p, *tmp;
710 	int error;
711 
712 	SYSCTL_ASSERT_WLOCKED();
713 	if (oidp == NULL)
714 		return(EINVAL);
715 	if ((oidp->oid_kind & CTLFLAG_DYN) == 0) {
716 		printf("Warning: can't remove non-dynamic nodes (%s)!\n",
717 		    oidp->oid_name);
718 		return (EINVAL);
719 	}
720 	/*
721 	 * WARNING: normal method to do this should be through
722 	 * sysctl_ctx_free(). Use recursing as the last resort
723 	 * method to purge your sysctl tree of leftovers...
724 	 * However, if some other code still references these nodes,
725 	 * it will panic.
726 	 */
727 	if ((oidp->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
728 		if (oidp->oid_refcnt == 1) {
729 			SLIST_FOREACH_SAFE(p,
730 			    SYSCTL_CHILDREN(oidp), oid_link, tmp) {
731 				if (!recurse) {
732 					printf("Warning: failed attempt to "
733 					    "remove oid %s with child %s\n",
734 					    oidp->oid_name, p->oid_name);
735 					return (ENOTEMPTY);
736 				}
737 				error = sysctl_remove_oid_locked(p, del,
738 				    recurse);
739 				if (error)
740 					return (error);
741 			}
742 		}
743 	}
744 	if (oidp->oid_refcnt > 1 ) {
745 		oidp->oid_refcnt--;
746 	} else {
747 		if (oidp->oid_refcnt == 0) {
748 			printf("Warning: bad oid_refcnt=%u (%s)!\n",
749 				oidp->oid_refcnt, oidp->oid_name);
750 			return (EINVAL);
751 		}
752 		sysctl_unregister_oid(oidp);
753 		if (del) {
754 			/*
755 			 * Wait for all threads running the handler to drain.
756 			 * This preserves the previous behavior when the
757 			 * sysctl lock was held across a handler invocation,
758 			 * and is necessary for module unload correctness.
759 			 */
760 			while (oidp->oid_running > 0) {
761 				oidp->oid_kind |= CTLFLAG_DYING;
762 				SYSCTL_SLEEP(&oidp->oid_running, "oidrm", 0);
763 			}
764 			if (oidp->oid_descr)
765 				free(__DECONST(char *, oidp->oid_descr),
766 				    M_SYSCTLOID);
767 			if (oidp->oid_label)
768 				free(__DECONST(char *, oidp->oid_label),
769 				    M_SYSCTLOID);
770 			free(__DECONST(char *, oidp->oid_name), M_SYSCTLOID);
771 			free(oidp, M_SYSCTLOID);
772 		}
773 	}
774 	return (0);
775 }
776 /*
777  * Create new sysctls at run time.
778  * clist may point to a valid context initialized with sysctl_ctx_init().
779  */
780 struct sysctl_oid *
781 sysctl_add_oid(struct sysctl_ctx_list *clist, struct sysctl_oid_list *parent,
782 	int number, const char *name, int kind, void *arg1, intmax_t arg2,
783 	int (*handler)(SYSCTL_HANDLER_ARGS), const char *fmt, const char *descr,
784 	const char *label)
785 {
786 	struct sysctl_oid *oidp;
787 
788 	/* You have to hook up somewhere.. */
789 	if (parent == NULL)
790 		return(NULL);
791 	/* Check if the node already exists, otherwise create it */
792 	SYSCTL_WLOCK();
793 	oidp = sysctl_find_oidname(name, parent);
794 	if (oidp != NULL) {
795 		if ((oidp->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
796 			oidp->oid_refcnt++;
797 			/* Update the context */
798 			if (clist != NULL)
799 				sysctl_ctx_entry_add(clist, oidp);
800 			SYSCTL_WUNLOCK();
801 			return (oidp);
802 		} else {
803 			sysctl_warn_reuse(__func__, oidp);
804 			SYSCTL_WUNLOCK();
805 			return (NULL);
806 		}
807 	}
808 	oidp = malloc(sizeof(struct sysctl_oid), M_SYSCTLOID, M_WAITOK|M_ZERO);
809 	oidp->oid_parent = parent;
810 	SLIST_INIT(&oidp->oid_children);
811 	oidp->oid_number = number;
812 	oidp->oid_refcnt = 1;
813 	oidp->oid_name = strdup(name, M_SYSCTLOID);
814 	oidp->oid_handler = handler;
815 	oidp->oid_kind = CTLFLAG_DYN | kind;
816 	oidp->oid_arg1 = arg1;
817 	oidp->oid_arg2 = arg2;
818 	oidp->oid_fmt = fmt;
819 	if (descr != NULL)
820 		oidp->oid_descr = strdup(descr, M_SYSCTLOID);
821 	if (label != NULL)
822 		oidp->oid_label = strdup(label, M_SYSCTLOID);
823 	/* Update the context, if used */
824 	if (clist != NULL)
825 		sysctl_ctx_entry_add(clist, oidp);
826 	/* Register this oid */
827 	sysctl_register_oid(oidp);
828 	SYSCTL_WUNLOCK();
829 	return (oidp);
830 }
831 
832 /*
833  * Rename an existing oid.
834  */
835 void
836 sysctl_rename_oid(struct sysctl_oid *oidp, const char *name)
837 {
838 	char *newname;
839 	char *oldname;
840 
841 	newname = strdup(name, M_SYSCTLOID);
842 	SYSCTL_WLOCK();
843 	oldname = __DECONST(char *, oidp->oid_name);
844 	oidp->oid_name = newname;
845 	SYSCTL_WUNLOCK();
846 	free(oldname, M_SYSCTLOID);
847 }
848 
849 /*
850  * Reparent an existing oid.
851  */
852 int
853 sysctl_move_oid(struct sysctl_oid *oid, struct sysctl_oid_list *parent)
854 {
855 	struct sysctl_oid *oidp;
856 
857 	SYSCTL_WLOCK();
858 	if (oid->oid_parent == parent) {
859 		SYSCTL_WUNLOCK();
860 		return (0);
861 	}
862 	oidp = sysctl_find_oidname(oid->oid_name, parent);
863 	if (oidp != NULL) {
864 		SYSCTL_WUNLOCK();
865 		return (EEXIST);
866 	}
867 	sysctl_unregister_oid(oid);
868 	oid->oid_parent = parent;
869 	oid->oid_number = OID_AUTO;
870 	sysctl_register_oid(oid);
871 	SYSCTL_WUNLOCK();
872 	return (0);
873 }
874 
875 /*
876  * Register the kernel's oids on startup.
877  */
878 SET_DECLARE(sysctl_set, struct sysctl_oid);
879 
880 static void
881 sysctl_register_all(void *arg)
882 {
883 	struct sysctl_oid **oidp;
884 
885 	sx_init(&sysctlmemlock, "sysctl mem");
886 	SYSCTL_INIT();
887 	SYSCTL_WLOCK();
888 	SET_FOREACH(oidp, sysctl_set)
889 		sysctl_register_oid(*oidp);
890 	SYSCTL_WUNLOCK();
891 }
892 SYSINIT(sysctl, SI_SUB_KMEM, SI_ORDER_FIRST, sysctl_register_all, 0);
893 
894 /*
895  * "Staff-functions"
896  *
897  * These functions implement a presently undocumented interface
898  * used by the sysctl program to walk the tree, and get the type
899  * so it can print the value.
900  * This interface is under work and consideration, and should probably
901  * be killed with a big axe by the first person who can find the time.
902  * (be aware though, that the proper interface isn't as obvious as it
903  * may seem, there are various conflicting requirements.
904  *
905  * {0,0}	printf the entire MIB-tree.
906  * {0,1,...}	return the name of the "..." OID.
907  * {0,2,...}	return the next OID.
908  * {0,3}	return the OID of the name in "new"
909  * {0,4,...}	return the kind & format info for the "..." OID.
910  * {0,5,...}	return the description of the "..." OID.
911  * {0,6,...}	return the aggregation label of the "..." OID.
912  */
913 
914 #ifdef SYSCTL_DEBUG
915 static void
916 sysctl_sysctl_debug_dump_node(struct sysctl_oid_list *l, int i)
917 {
918 	int k;
919 	struct sysctl_oid *oidp;
920 
921 	SYSCTL_ASSERT_LOCKED();
922 	SLIST_FOREACH(oidp, l, oid_link) {
923 
924 		for (k=0; k<i; k++)
925 			printf(" ");
926 
927 		printf("%d %s ", oidp->oid_number, oidp->oid_name);
928 
929 		printf("%c%c",
930 			oidp->oid_kind & CTLFLAG_RD ? 'R':' ',
931 			oidp->oid_kind & CTLFLAG_WR ? 'W':' ');
932 
933 		if (oidp->oid_handler)
934 			printf(" *Handler");
935 
936 		switch (oidp->oid_kind & CTLTYPE) {
937 			case CTLTYPE_NODE:
938 				printf(" Node\n");
939 				if (!oidp->oid_handler) {
940 					sysctl_sysctl_debug_dump_node(
941 					    SYSCTL_CHILDREN(oidp), i + 2);
942 				}
943 				break;
944 			case CTLTYPE_INT:    printf(" Int\n"); break;
945 			case CTLTYPE_UINT:   printf(" u_int\n"); break;
946 			case CTLTYPE_LONG:   printf(" Long\n"); break;
947 			case CTLTYPE_ULONG:  printf(" u_long\n"); break;
948 			case CTLTYPE_STRING: printf(" String\n"); break;
949 			case CTLTYPE_S8:     printf(" int8_t\n"); break;
950 			case CTLTYPE_S16:    printf(" int16_t\n"); break;
951 			case CTLTYPE_S32:    printf(" int32_t\n"); break;
952 			case CTLTYPE_S64:    printf(" int64_t\n"); break;
953 			case CTLTYPE_U8:     printf(" uint8_t\n"); break;
954 			case CTLTYPE_U16:    printf(" uint16_t\n"); break;
955 			case CTLTYPE_U32:    printf(" uint32_t\n"); break;
956 			case CTLTYPE_U64:    printf(" uint64_t\n"); break;
957 			case CTLTYPE_OPAQUE: printf(" Opaque/struct\n"); break;
958 			default:	     printf("\n");
959 		}
960 
961 	}
962 }
963 
964 static int
965 sysctl_sysctl_debug(SYSCTL_HANDLER_ARGS)
966 {
967 	struct rm_priotracker tracker;
968 	int error;
969 
970 	error = priv_check(req->td, PRIV_SYSCTL_DEBUG);
971 	if (error)
972 		return (error);
973 	SYSCTL_RLOCK(&tracker);
974 	sysctl_sysctl_debug_dump_node(&sysctl__children, 0);
975 	SYSCTL_RUNLOCK(&tracker);
976 	return (ENOENT);
977 }
978 
979 SYSCTL_PROC(_sysctl, 0, debug, CTLTYPE_STRING|CTLFLAG_RD|CTLFLAG_MPSAFE,
980 	0, 0, sysctl_sysctl_debug, "-", "");
981 #endif
982 
983 static int
984 sysctl_sysctl_name(SYSCTL_HANDLER_ARGS)
985 {
986 	int *name = (int *) arg1;
987 	u_int namelen = arg2;
988 	int error = 0;
989 	struct sysctl_oid *oid;
990 	struct sysctl_oid_list *lsp = &sysctl__children, *lsp2;
991 	struct rm_priotracker tracker;
992 	char buf[10];
993 
994 	SYSCTL_RLOCK(&tracker);
995 	while (namelen) {
996 		if (!lsp) {
997 			snprintf(buf,sizeof(buf),"%d",*name);
998 			if (req->oldidx)
999 				error = SYSCTL_OUT(req, ".", 1);
1000 			if (!error)
1001 				error = SYSCTL_OUT(req, buf, strlen(buf));
1002 			if (error)
1003 				goto out;
1004 			namelen--;
1005 			name++;
1006 			continue;
1007 		}
1008 		lsp2 = NULL;
1009 		SLIST_FOREACH(oid, lsp, oid_link) {
1010 			if (oid->oid_number != *name)
1011 				continue;
1012 
1013 			if (req->oldidx)
1014 				error = SYSCTL_OUT(req, ".", 1);
1015 			if (!error)
1016 				error = SYSCTL_OUT(req, oid->oid_name,
1017 					strlen(oid->oid_name));
1018 			if (error)
1019 				goto out;
1020 
1021 			namelen--;
1022 			name++;
1023 
1024 			if ((oid->oid_kind & CTLTYPE) != CTLTYPE_NODE)
1025 				break;
1026 
1027 			if (oid->oid_handler)
1028 				break;
1029 
1030 			lsp2 = SYSCTL_CHILDREN(oid);
1031 			break;
1032 		}
1033 		lsp = lsp2;
1034 	}
1035 	error = SYSCTL_OUT(req, "", 1);
1036  out:
1037 	SYSCTL_RUNLOCK(&tracker);
1038 	return (error);
1039 }
1040 
1041 /*
1042  * XXXRW/JA: Shouldn't return name data for nodes that we don't permit in
1043  * capability mode.
1044  */
1045 static SYSCTL_NODE(_sysctl, 1, name, CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_CAPRD,
1046     sysctl_sysctl_name, "");
1047 
1048 static int
1049 sysctl_sysctl_next_ls(struct sysctl_oid_list *lsp, int *name, u_int namelen,
1050 	int *next, int *len, int level, struct sysctl_oid **oidpp)
1051 {
1052 	struct sysctl_oid *oidp;
1053 
1054 	SYSCTL_ASSERT_LOCKED();
1055 	*len = level;
1056 	SLIST_FOREACH(oidp, lsp, oid_link) {
1057 		*next = oidp->oid_number;
1058 		*oidpp = oidp;
1059 
1060 		if (oidp->oid_kind & CTLFLAG_SKIP)
1061 			continue;
1062 
1063 		if (!namelen) {
1064 			if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
1065 				return (0);
1066 			if (oidp->oid_handler)
1067 				/* We really should call the handler here...*/
1068 				return (0);
1069 			lsp = SYSCTL_CHILDREN(oidp);
1070 			if (!sysctl_sysctl_next_ls(lsp, 0, 0, next+1,
1071 				len, level+1, oidpp))
1072 				return (0);
1073 			goto emptynode;
1074 		}
1075 
1076 		if (oidp->oid_number < *name)
1077 			continue;
1078 
1079 		if (oidp->oid_number > *name) {
1080 			if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
1081 				return (0);
1082 			if (oidp->oid_handler)
1083 				return (0);
1084 			lsp = SYSCTL_CHILDREN(oidp);
1085 			if (!sysctl_sysctl_next_ls(lsp, name+1, namelen-1,
1086 				next+1, len, level+1, oidpp))
1087 				return (0);
1088 			goto next;
1089 		}
1090 		if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
1091 			continue;
1092 
1093 		if (oidp->oid_handler)
1094 			continue;
1095 
1096 		lsp = SYSCTL_CHILDREN(oidp);
1097 		if (!sysctl_sysctl_next_ls(lsp, name+1, namelen-1, next+1,
1098 			len, level+1, oidpp))
1099 			return (0);
1100 	next:
1101 		namelen = 1;
1102 	emptynode:
1103 		*len = level;
1104 	}
1105 	return (1);
1106 }
1107 
1108 static int
1109 sysctl_sysctl_next(SYSCTL_HANDLER_ARGS)
1110 {
1111 	int *name = (int *) arg1;
1112 	u_int namelen = arg2;
1113 	int i, j, error;
1114 	struct sysctl_oid *oid;
1115 	struct sysctl_oid_list *lsp = &sysctl__children;
1116 	struct rm_priotracker tracker;
1117 	int newoid[CTL_MAXNAME];
1118 
1119 	SYSCTL_RLOCK(&tracker);
1120 	i = sysctl_sysctl_next_ls(lsp, name, namelen, newoid, &j, 1, &oid);
1121 	SYSCTL_RUNLOCK(&tracker);
1122 	if (i)
1123 		return (ENOENT);
1124 	error = SYSCTL_OUT(req, newoid, j * sizeof (int));
1125 	return (error);
1126 }
1127 
1128 /*
1129  * XXXRW/JA: Shouldn't return next data for nodes that we don't permit in
1130  * capability mode.
1131  */
1132 static SYSCTL_NODE(_sysctl, 2, next, CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_CAPRD,
1133     sysctl_sysctl_next, "");
1134 
1135 static int
1136 name2oid(char *name, int *oid, int *len, struct sysctl_oid **oidpp)
1137 {
1138 	struct sysctl_oid *oidp;
1139 	struct sysctl_oid_list *lsp = &sysctl__children;
1140 	char *p;
1141 
1142 	SYSCTL_ASSERT_LOCKED();
1143 
1144 	for (*len = 0; *len < CTL_MAXNAME;) {
1145 		p = strsep(&name, ".");
1146 
1147 		oidp = SLIST_FIRST(lsp);
1148 		for (;; oidp = SLIST_NEXT(oidp, oid_link)) {
1149 			if (oidp == NULL)
1150 				return (ENOENT);
1151 			if (strcmp(p, oidp->oid_name) == 0)
1152 				break;
1153 		}
1154 		*oid++ = oidp->oid_number;
1155 		(*len)++;
1156 
1157 		if (name == NULL || *name == '\0') {
1158 			if (oidpp)
1159 				*oidpp = oidp;
1160 			return (0);
1161 		}
1162 
1163 		if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
1164 			break;
1165 
1166 		if (oidp->oid_handler)
1167 			break;
1168 
1169 		lsp = SYSCTL_CHILDREN(oidp);
1170 	}
1171 	return (ENOENT);
1172 }
1173 
1174 static int
1175 sysctl_sysctl_name2oid(SYSCTL_HANDLER_ARGS)
1176 {
1177 	char *p;
1178 	int error, oid[CTL_MAXNAME], len = 0;
1179 	struct sysctl_oid *op = NULL;
1180 	struct rm_priotracker tracker;
1181 
1182 	if (!req->newlen)
1183 		return (ENOENT);
1184 	if (req->newlen >= MAXPATHLEN)	/* XXX arbitrary, undocumented */
1185 		return (ENAMETOOLONG);
1186 
1187 	p = malloc(req->newlen+1, M_SYSCTL, M_WAITOK);
1188 
1189 	error = SYSCTL_IN(req, p, req->newlen);
1190 	if (error) {
1191 		free(p, M_SYSCTL);
1192 		return (error);
1193 	}
1194 
1195 	p [req->newlen] = '\0';
1196 
1197 	SYSCTL_RLOCK(&tracker);
1198 	error = name2oid(p, oid, &len, &op);
1199 	SYSCTL_RUNLOCK(&tracker);
1200 
1201 	free(p, M_SYSCTL);
1202 
1203 	if (error)
1204 		return (error);
1205 
1206 	error = SYSCTL_OUT(req, oid, len * sizeof *oid);
1207 	return (error);
1208 }
1209 
1210 /*
1211  * XXXRW/JA: Shouldn't return name2oid data for nodes that we don't permit in
1212  * capability mode.
1213  */
1214 SYSCTL_PROC(_sysctl, 3, name2oid,
1215     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE
1216     | CTLFLAG_CAPRW, 0, 0, sysctl_sysctl_name2oid, "I", "");
1217 
1218 static int
1219 sysctl_sysctl_oidfmt(SYSCTL_HANDLER_ARGS)
1220 {
1221 	struct sysctl_oid *oid;
1222 	struct rm_priotracker tracker;
1223 	int error;
1224 
1225 	SYSCTL_RLOCK(&tracker);
1226 	error = sysctl_find_oid(arg1, arg2, &oid, NULL, req);
1227 	if (error)
1228 		goto out;
1229 
1230 	if (oid->oid_fmt == NULL) {
1231 		error = ENOENT;
1232 		goto out;
1233 	}
1234 	error = SYSCTL_OUT(req, &oid->oid_kind, sizeof(oid->oid_kind));
1235 	if (error)
1236 		goto out;
1237 	error = SYSCTL_OUT(req, oid->oid_fmt, strlen(oid->oid_fmt) + 1);
1238  out:
1239 	SYSCTL_RUNLOCK(&tracker);
1240 	return (error);
1241 }
1242 
1243 
1244 static SYSCTL_NODE(_sysctl, 4, oidfmt, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLFLAG_CAPRD,
1245     sysctl_sysctl_oidfmt, "");
1246 
1247 static int
1248 sysctl_sysctl_oiddescr(SYSCTL_HANDLER_ARGS)
1249 {
1250 	struct sysctl_oid *oid;
1251 	struct rm_priotracker tracker;
1252 	int error;
1253 
1254 	SYSCTL_RLOCK(&tracker);
1255 	error = sysctl_find_oid(arg1, arg2, &oid, NULL, req);
1256 	if (error)
1257 		goto out;
1258 
1259 	if (oid->oid_descr == NULL) {
1260 		error = ENOENT;
1261 		goto out;
1262 	}
1263 	error = SYSCTL_OUT(req, oid->oid_descr, strlen(oid->oid_descr) + 1);
1264  out:
1265 	SYSCTL_RUNLOCK(&tracker);
1266 	return (error);
1267 }
1268 
1269 static SYSCTL_NODE(_sysctl, 5, oiddescr, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLFLAG_CAPRD,
1270     sysctl_sysctl_oiddescr, "");
1271 
1272 static int
1273 sysctl_sysctl_oidlabel(SYSCTL_HANDLER_ARGS)
1274 {
1275 	struct sysctl_oid *oid;
1276 	struct rm_priotracker tracker;
1277 	int error;
1278 
1279 	SYSCTL_RLOCK(&tracker);
1280 	error = sysctl_find_oid(arg1, arg2, &oid, NULL, req);
1281 	if (error)
1282 		goto out;
1283 
1284 	if (oid->oid_label == NULL) {
1285 		error = ENOENT;
1286 		goto out;
1287 	}
1288 	error = SYSCTL_OUT(req, oid->oid_label, strlen(oid->oid_label) + 1);
1289  out:
1290 	SYSCTL_RUNLOCK(&tracker);
1291 	return (error);
1292 }
1293 
1294 static SYSCTL_NODE(_sysctl, 6, oidlabel,
1295     CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_CAPRD, sysctl_sysctl_oidlabel, "");
1296 
1297 /*
1298  * Default "handler" functions.
1299  */
1300 
1301 /*
1302  * Handle a bool.
1303  * Two cases:
1304  *     a variable:  point arg1 at it.
1305  *     a constant:  pass it in arg2.
1306  */
1307 
1308 int
1309 sysctl_handle_bool(SYSCTL_HANDLER_ARGS)
1310 {
1311 	uint8_t temp;
1312 	int error;
1313 
1314 	/*
1315 	 * Attempt to get a coherent snapshot by making a copy of the data.
1316 	 */
1317 	if (arg1)
1318 		temp = *(bool *)arg1 ? 1 : 0;
1319 	else
1320 		temp = arg2 ? 1 : 0;
1321 
1322 	error = SYSCTL_OUT(req, &temp, sizeof(temp));
1323 	if (error || !req->newptr)
1324 		return (error);
1325 
1326 	if (!arg1)
1327 		error = EPERM;
1328 	else {
1329 		error = SYSCTL_IN(req, &temp, sizeof(temp));
1330 		if (!error)
1331 			*(bool *)arg1 = temp ? 1 : 0;
1332 	}
1333 	return (error);
1334 }
1335 
1336 /*
1337  * Handle an int8_t, signed or unsigned.
1338  * Two cases:
1339  *     a variable:  point arg1 at it.
1340  *     a constant:  pass it in arg2.
1341  */
1342 
1343 int
1344 sysctl_handle_8(SYSCTL_HANDLER_ARGS)
1345 {
1346 	int8_t tmpout;
1347 	int error = 0;
1348 
1349 	/*
1350 	 * Attempt to get a coherent snapshot by making a copy of the data.
1351 	 */
1352 	if (arg1)
1353 		tmpout = *(int8_t *)arg1;
1354 	else
1355 		tmpout = arg2;
1356 	error = SYSCTL_OUT(req, &tmpout, sizeof(tmpout));
1357 
1358 	if (error || !req->newptr)
1359 		return (error);
1360 
1361 	if (!arg1)
1362 		error = EPERM;
1363 	else
1364 		error = SYSCTL_IN(req, arg1, sizeof(tmpout));
1365 	return (error);
1366 }
1367 
1368 /*
1369  * Handle an int16_t, signed or unsigned.
1370  * Two cases:
1371  *     a variable:  point arg1 at it.
1372  *     a constant:  pass it in arg2.
1373  */
1374 
1375 int
1376 sysctl_handle_16(SYSCTL_HANDLER_ARGS)
1377 {
1378 	int16_t tmpout;
1379 	int error = 0;
1380 
1381 	/*
1382 	 * Attempt to get a coherent snapshot by making a copy of the data.
1383 	 */
1384 	if (arg1)
1385 		tmpout = *(int16_t *)arg1;
1386 	else
1387 		tmpout = arg2;
1388 	error = SYSCTL_OUT(req, &tmpout, sizeof(tmpout));
1389 
1390 	if (error || !req->newptr)
1391 		return (error);
1392 
1393 	if (!arg1)
1394 		error = EPERM;
1395 	else
1396 		error = SYSCTL_IN(req, arg1, sizeof(tmpout));
1397 	return (error);
1398 }
1399 
1400 /*
1401  * Handle an int32_t, signed or unsigned.
1402  * Two cases:
1403  *     a variable:  point arg1 at it.
1404  *     a constant:  pass it in arg2.
1405  */
1406 
1407 int
1408 sysctl_handle_32(SYSCTL_HANDLER_ARGS)
1409 {
1410 	int32_t tmpout;
1411 	int error = 0;
1412 
1413 	/*
1414 	 * Attempt to get a coherent snapshot by making a copy of the data.
1415 	 */
1416 	if (arg1)
1417 		tmpout = *(int32_t *)arg1;
1418 	else
1419 		tmpout = arg2;
1420 	error = SYSCTL_OUT(req, &tmpout, sizeof(tmpout));
1421 
1422 	if (error || !req->newptr)
1423 		return (error);
1424 
1425 	if (!arg1)
1426 		error = EPERM;
1427 	else
1428 		error = SYSCTL_IN(req, arg1, sizeof(tmpout));
1429 	return (error);
1430 }
1431 
1432 /*
1433  * Handle an int, signed or unsigned.
1434  * Two cases:
1435  *     a variable:  point arg1 at it.
1436  *     a constant:  pass it in arg2.
1437  */
1438 
1439 int
1440 sysctl_handle_int(SYSCTL_HANDLER_ARGS)
1441 {
1442 	int tmpout, error = 0;
1443 
1444 	/*
1445 	 * Attempt to get a coherent snapshot by making a copy of the data.
1446 	 */
1447 	if (arg1)
1448 		tmpout = *(int *)arg1;
1449 	else
1450 		tmpout = arg2;
1451 	error = SYSCTL_OUT(req, &tmpout, sizeof(int));
1452 
1453 	if (error || !req->newptr)
1454 		return (error);
1455 
1456 	if (!arg1)
1457 		error = EPERM;
1458 	else
1459 		error = SYSCTL_IN(req, arg1, sizeof(int));
1460 	return (error);
1461 }
1462 
1463 /*
1464  * Based on on sysctl_handle_int() convert milliseconds into ticks.
1465  * Note: this is used by TCP.
1466  */
1467 
1468 int
1469 sysctl_msec_to_ticks(SYSCTL_HANDLER_ARGS)
1470 {
1471 	int error, s, tt;
1472 
1473 	tt = *(int *)arg1;
1474 	s = (int)((int64_t)tt * 1000 / hz);
1475 
1476 	error = sysctl_handle_int(oidp, &s, 0, req);
1477 	if (error || !req->newptr)
1478 		return (error);
1479 
1480 	tt = (int)((int64_t)s * hz / 1000);
1481 	if (tt < 1)
1482 		return (EINVAL);
1483 
1484 	*(int *)arg1 = tt;
1485 	return (0);
1486 }
1487 
1488 
1489 /*
1490  * Handle a long, signed or unsigned.
1491  * Two cases:
1492  *     a variable:  point arg1 at it.
1493  *     a constant:  pass it in arg2.
1494  */
1495 
1496 int
1497 sysctl_handle_long(SYSCTL_HANDLER_ARGS)
1498 {
1499 	int error = 0;
1500 	long tmplong;
1501 #ifdef SCTL_MASK32
1502 	int tmpint;
1503 #endif
1504 
1505 	/*
1506 	 * Attempt to get a coherent snapshot by making a copy of the data.
1507 	 */
1508 	if (arg1)
1509 		tmplong = *(long *)arg1;
1510 	else
1511 		tmplong = arg2;
1512 #ifdef SCTL_MASK32
1513 	if (req->flags & SCTL_MASK32) {
1514 		tmpint = tmplong;
1515 		error = SYSCTL_OUT(req, &tmpint, sizeof(int));
1516 	} else
1517 #endif
1518 		error = SYSCTL_OUT(req, &tmplong, sizeof(long));
1519 
1520 	if (error || !req->newptr)
1521 		return (error);
1522 
1523 	if (!arg1)
1524 		error = EPERM;
1525 #ifdef SCTL_MASK32
1526 	else if (req->flags & SCTL_MASK32) {
1527 		error = SYSCTL_IN(req, &tmpint, sizeof(int));
1528 		*(long *)arg1 = (long)tmpint;
1529 	}
1530 #endif
1531 	else
1532 		error = SYSCTL_IN(req, arg1, sizeof(long));
1533 	return (error);
1534 }
1535 
1536 /*
1537  * Handle a 64 bit int, signed or unsigned.
1538  * Two cases:
1539  *     a variable:  point arg1 at it.
1540  *     a constant:  pass it in arg2.
1541  */
1542 int
1543 sysctl_handle_64(SYSCTL_HANDLER_ARGS)
1544 {
1545 	int error = 0;
1546 	uint64_t tmpout;
1547 
1548 	/*
1549 	 * Attempt to get a coherent snapshot by making a copy of the data.
1550 	 */
1551 	if (arg1)
1552 		tmpout = *(uint64_t *)arg1;
1553 	else
1554 		tmpout = arg2;
1555 	error = SYSCTL_OUT(req, &tmpout, sizeof(uint64_t));
1556 
1557 	if (error || !req->newptr)
1558 		return (error);
1559 
1560 	if (!arg1)
1561 		error = EPERM;
1562 	else
1563 		error = SYSCTL_IN(req, arg1, sizeof(uint64_t));
1564 	return (error);
1565 }
1566 
1567 /*
1568  * Handle our generic '\0' terminated 'C' string.
1569  * Two cases:
1570  * 	a variable string:  point arg1 at it, arg2 is max length.
1571  * 	a constant string:  point arg1 at it, arg2 is zero.
1572  */
1573 
1574 int
1575 sysctl_handle_string(SYSCTL_HANDLER_ARGS)
1576 {
1577 	size_t outlen;
1578 	int error = 0, ro_string = 0;
1579 
1580 	/*
1581 	 * A zero-length buffer indicates a fixed size read-only
1582 	 * string:
1583 	 */
1584 	if (arg2 == 0) {
1585 		arg2 = strlen((char *)arg1) + 1;
1586 		ro_string = 1;
1587 	}
1588 
1589 	if (req->oldptr != NULL) {
1590 		char *tmparg;
1591 
1592 		if (ro_string) {
1593 			tmparg = arg1;
1594 		} else {
1595 			/* try to make a coherent snapshot of the string */
1596 			tmparg = malloc(arg2, M_SYSCTLTMP, M_WAITOK);
1597 			memcpy(tmparg, arg1, arg2);
1598 		}
1599 
1600 		outlen = strnlen(tmparg, arg2 - 1) + 1;
1601 		error = SYSCTL_OUT(req, tmparg, outlen);
1602 
1603 		if (!ro_string)
1604 			free(tmparg, M_SYSCTLTMP);
1605 	} else {
1606 		outlen = strnlen((char *)arg1, arg2 - 1) + 1;
1607 		error = SYSCTL_OUT(req, NULL, outlen);
1608 	}
1609 	if (error || !req->newptr)
1610 		return (error);
1611 
1612 	if ((req->newlen - req->newidx) >= arg2) {
1613 		error = EINVAL;
1614 	} else {
1615 		arg2 = (req->newlen - req->newidx);
1616 		error = SYSCTL_IN(req, arg1, arg2);
1617 		((char *)arg1)[arg2] = '\0';
1618 	}
1619 	return (error);
1620 }
1621 
1622 /*
1623  * Handle any kind of opaque data.
1624  * arg1 points to it, arg2 is the size.
1625  */
1626 
1627 int
1628 sysctl_handle_opaque(SYSCTL_HANDLER_ARGS)
1629 {
1630 	int error, tries;
1631 	u_int generation;
1632 	struct sysctl_req req2;
1633 
1634 	/*
1635 	 * Attempt to get a coherent snapshot, by using the thread
1636 	 * pre-emption counter updated from within mi_switch() to
1637 	 * determine if we were pre-empted during a bcopy() or
1638 	 * copyout(). Make 3 attempts at doing this before giving up.
1639 	 * If we encounter an error, stop immediately.
1640 	 */
1641 	tries = 0;
1642 	req2 = *req;
1643 retry:
1644 	generation = curthread->td_generation;
1645 	error = SYSCTL_OUT(req, arg1, arg2);
1646 	if (error)
1647 		return (error);
1648 	tries++;
1649 	if (generation != curthread->td_generation && tries < 3) {
1650 		*req = req2;
1651 		goto retry;
1652 	}
1653 
1654 	error = SYSCTL_IN(req, arg1, arg2);
1655 
1656 	return (error);
1657 }
1658 
1659 /*
1660  * Transfer functions to/from kernel space.
1661  * XXX: rather untested at this point
1662  */
1663 static int
1664 sysctl_old_kernel(struct sysctl_req *req, const void *p, size_t l)
1665 {
1666 	size_t i = 0;
1667 
1668 	if (req->oldptr) {
1669 		i = l;
1670 		if (req->oldlen <= req->oldidx)
1671 			i = 0;
1672 		else
1673 			if (i > req->oldlen - req->oldidx)
1674 				i = req->oldlen - req->oldidx;
1675 		if (i > 0)
1676 			bcopy(p, (char *)req->oldptr + req->oldidx, i);
1677 	}
1678 	req->oldidx += l;
1679 	if (req->oldptr && i != l)
1680 		return (ENOMEM);
1681 	return (0);
1682 }
1683 
1684 static int
1685 sysctl_new_kernel(struct sysctl_req *req, void *p, size_t l)
1686 {
1687 	if (!req->newptr)
1688 		return (0);
1689 	if (req->newlen - req->newidx < l)
1690 		return (EINVAL);
1691 	bcopy((char *)req->newptr + req->newidx, p, l);
1692 	req->newidx += l;
1693 	return (0);
1694 }
1695 
1696 int
1697 kernel_sysctl(struct thread *td, int *name, u_int namelen, void *old,
1698     size_t *oldlenp, void *new, size_t newlen, size_t *retval, int flags)
1699 {
1700 	int error = 0;
1701 	struct sysctl_req req;
1702 
1703 	bzero(&req, sizeof req);
1704 
1705 	req.td = td;
1706 	req.flags = flags;
1707 
1708 	if (oldlenp) {
1709 		req.oldlen = *oldlenp;
1710 	}
1711 	req.validlen = req.oldlen;
1712 
1713 	if (old) {
1714 		req.oldptr= old;
1715 	}
1716 
1717 	if (new != NULL) {
1718 		req.newlen = newlen;
1719 		req.newptr = new;
1720 	}
1721 
1722 	req.oldfunc = sysctl_old_kernel;
1723 	req.newfunc = sysctl_new_kernel;
1724 	req.lock = REQ_UNWIRED;
1725 
1726 	error = sysctl_root(0, name, namelen, &req);
1727 
1728 	if (req.lock == REQ_WIRED && req.validlen > 0)
1729 		vsunlock(req.oldptr, req.validlen);
1730 
1731 	if (error && error != ENOMEM)
1732 		return (error);
1733 
1734 	if (retval) {
1735 		if (req.oldptr && req.oldidx > req.validlen)
1736 			*retval = req.validlen;
1737 		else
1738 			*retval = req.oldidx;
1739 	}
1740 	return (error);
1741 }
1742 
1743 int
1744 kernel_sysctlbyname(struct thread *td, char *name, void *old, size_t *oldlenp,
1745     void *new, size_t newlen, size_t *retval, int flags)
1746 {
1747         int oid[CTL_MAXNAME];
1748         size_t oidlen, plen;
1749 	int error;
1750 
1751 	oid[0] = 0;		/* sysctl internal magic */
1752 	oid[1] = 3;		/* name2oid */
1753 	oidlen = sizeof(oid);
1754 
1755 	error = kernel_sysctl(td, oid, 2, oid, &oidlen,
1756 	    (void *)name, strlen(name), &plen, flags);
1757 	if (error)
1758 		return (error);
1759 
1760 	error = kernel_sysctl(td, oid, plen / sizeof(int), old, oldlenp,
1761 	    new, newlen, retval, flags);
1762 	return (error);
1763 }
1764 
1765 /*
1766  * Transfer function to/from user space.
1767  */
1768 static int
1769 sysctl_old_user(struct sysctl_req *req, const void *p, size_t l)
1770 {
1771 	size_t i, len, origidx;
1772 	int error;
1773 
1774 	origidx = req->oldidx;
1775 	req->oldidx += l;
1776 	if (req->oldptr == NULL)
1777 		return (0);
1778 	/*
1779 	 * If we have not wired the user supplied buffer and we are currently
1780 	 * holding locks, drop a witness warning, as it's possible that
1781 	 * write operations to the user page can sleep.
1782 	 */
1783 	if (req->lock != REQ_WIRED)
1784 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1785 		    "sysctl_old_user()");
1786 	i = l;
1787 	len = req->validlen;
1788 	if (len <= origidx)
1789 		i = 0;
1790 	else {
1791 		if (i > len - origidx)
1792 			i = len - origidx;
1793 		if (req->lock == REQ_WIRED) {
1794 			error = copyout_nofault(p, (char *)req->oldptr +
1795 			    origidx, i);
1796 		} else
1797 			error = copyout(p, (char *)req->oldptr + origidx, i);
1798 		if (error != 0)
1799 			return (error);
1800 	}
1801 	if (i < l)
1802 		return (ENOMEM);
1803 	return (0);
1804 }
1805 
1806 static int
1807 sysctl_new_user(struct sysctl_req *req, void *p, size_t l)
1808 {
1809 	int error;
1810 
1811 	if (!req->newptr)
1812 		return (0);
1813 	if (req->newlen - req->newidx < l)
1814 		return (EINVAL);
1815 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1816 	    "sysctl_new_user()");
1817 	error = copyin((char *)req->newptr + req->newidx, p, l);
1818 	req->newidx += l;
1819 	return (error);
1820 }
1821 
1822 /*
1823  * Wire the user space destination buffer.  If set to a value greater than
1824  * zero, the len parameter limits the maximum amount of wired memory.
1825  */
1826 int
1827 sysctl_wire_old_buffer(struct sysctl_req *req, size_t len)
1828 {
1829 	int ret;
1830 	size_t wiredlen;
1831 
1832 	wiredlen = (len > 0 && len < req->oldlen) ? len : req->oldlen;
1833 	ret = 0;
1834 	if (req->lock != REQ_WIRED && req->oldptr &&
1835 	    req->oldfunc == sysctl_old_user) {
1836 		if (wiredlen != 0) {
1837 			ret = vslock(req->oldptr, wiredlen);
1838 			if (ret != 0) {
1839 				if (ret != ENOMEM)
1840 					return (ret);
1841 				wiredlen = 0;
1842 			}
1843 		}
1844 		req->lock = REQ_WIRED;
1845 		req->validlen = wiredlen;
1846 	}
1847 	return (0);
1848 }
1849 
1850 int
1851 sysctl_find_oid(int *name, u_int namelen, struct sysctl_oid **noid,
1852     int *nindx, struct sysctl_req *req)
1853 {
1854 	struct sysctl_oid_list *lsp;
1855 	struct sysctl_oid *oid;
1856 	int indx;
1857 
1858 	SYSCTL_ASSERT_LOCKED();
1859 	lsp = &sysctl__children;
1860 	indx = 0;
1861 	while (indx < CTL_MAXNAME) {
1862 		SLIST_FOREACH(oid, lsp, oid_link) {
1863 			if (oid->oid_number == name[indx])
1864 				break;
1865 		}
1866 		if (oid == NULL)
1867 			return (ENOENT);
1868 
1869 		indx++;
1870 		if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
1871 			if (oid->oid_handler != NULL || indx == namelen) {
1872 				*noid = oid;
1873 				if (nindx != NULL)
1874 					*nindx = indx;
1875 				KASSERT((oid->oid_kind & CTLFLAG_DYING) == 0,
1876 				    ("%s found DYING node %p", __func__, oid));
1877 				return (0);
1878 			}
1879 			lsp = SYSCTL_CHILDREN(oid);
1880 		} else if (indx == namelen) {
1881 			*noid = oid;
1882 			if (nindx != NULL)
1883 				*nindx = indx;
1884 			KASSERT((oid->oid_kind & CTLFLAG_DYING) == 0,
1885 			    ("%s found DYING node %p", __func__, oid));
1886 			return (0);
1887 		} else {
1888 			return (ENOTDIR);
1889 		}
1890 	}
1891 	return (ENOENT);
1892 }
1893 
1894 /*
1895  * Traverse our tree, and find the right node, execute whatever it points
1896  * to, and return the resulting error code.
1897  */
1898 
1899 static int
1900 sysctl_root(SYSCTL_HANDLER_ARGS)
1901 {
1902 	struct sysctl_oid *oid;
1903 	struct rm_priotracker tracker;
1904 	int error, indx, lvl;
1905 
1906 	SYSCTL_RLOCK(&tracker);
1907 
1908 	error = sysctl_find_oid(arg1, arg2, &oid, &indx, req);
1909 	if (error)
1910 		goto out;
1911 
1912 	if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
1913 		/*
1914 		 * You can't call a sysctl when it's a node, but has
1915 		 * no handler.  Inform the user that it's a node.
1916 		 * The indx may or may not be the same as namelen.
1917 		 */
1918 		if (oid->oid_handler == NULL) {
1919 			error = EISDIR;
1920 			goto out;
1921 		}
1922 	}
1923 
1924 	/* Is this sysctl writable? */
1925 	if (req->newptr && !(oid->oid_kind & CTLFLAG_WR)) {
1926 		error = EPERM;
1927 		goto out;
1928 	}
1929 
1930 	KASSERT(req->td != NULL, ("sysctl_root(): req->td == NULL"));
1931 
1932 #ifdef CAPABILITY_MODE
1933 	/*
1934 	 * If the process is in capability mode, then don't permit reading or
1935 	 * writing unless specifically granted for the node.
1936 	 */
1937 	if (IN_CAPABILITY_MODE(req->td)) {
1938 		if ((req->oldptr && !(oid->oid_kind & CTLFLAG_CAPRD)) ||
1939 		    (req->newptr && !(oid->oid_kind & CTLFLAG_CAPWR))) {
1940 			error = EPERM;
1941 			goto out;
1942 		}
1943 	}
1944 #endif
1945 
1946 	/* Is this sysctl sensitive to securelevels? */
1947 	if (req->newptr && (oid->oid_kind & CTLFLAG_SECURE)) {
1948 		lvl = (oid->oid_kind & CTLMASK_SECURE) >> CTLSHIFT_SECURE;
1949 		error = securelevel_gt(req->td->td_ucred, lvl);
1950 		if (error)
1951 			goto out;
1952 	}
1953 
1954 	/* Is this sysctl writable by only privileged users? */
1955 	if (req->newptr && !(oid->oid_kind & CTLFLAG_ANYBODY)) {
1956 		int priv;
1957 
1958 		if (oid->oid_kind & CTLFLAG_PRISON)
1959 			priv = PRIV_SYSCTL_WRITEJAIL;
1960 #ifdef VIMAGE
1961 		else if ((oid->oid_kind & CTLFLAG_VNET) &&
1962 		     prison_owns_vnet(req->td->td_ucred))
1963 			priv = PRIV_SYSCTL_WRITEJAIL;
1964 #endif
1965 		else
1966 			priv = PRIV_SYSCTL_WRITE;
1967 		error = priv_check(req->td, priv);
1968 		if (error)
1969 			goto out;
1970 	}
1971 
1972 	if (!oid->oid_handler) {
1973 		error = EINVAL;
1974 		goto out;
1975 	}
1976 
1977 	if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
1978 		arg1 = (int *)arg1 + indx;
1979 		arg2 -= indx;
1980 	} else {
1981 		arg1 = oid->oid_arg1;
1982 		arg2 = oid->oid_arg2;
1983 	}
1984 #ifdef MAC
1985 	error = mac_system_check_sysctl(req->td->td_ucred, oid, arg1, arg2,
1986 	    req);
1987 	if (error != 0)
1988 		goto out;
1989 #endif
1990 #ifdef VIMAGE
1991 	if ((oid->oid_kind & CTLFLAG_VNET) && arg1 != NULL)
1992 		arg1 = (void *)(curvnet->vnet_data_base + (uintptr_t)arg1);
1993 #endif
1994 	error = sysctl_root_handler_locked(oid, arg1, arg2, req, &tracker);
1995 
1996 out:
1997 	SYSCTL_RUNLOCK(&tracker);
1998 	return (error);
1999 }
2000 
2001 #ifndef _SYS_SYSPROTO_H_
2002 struct sysctl_args {
2003 	int	*name;
2004 	u_int	namelen;
2005 	void	*old;
2006 	size_t	*oldlenp;
2007 	void	*new;
2008 	size_t	newlen;
2009 };
2010 #endif
2011 int
2012 sys___sysctl(struct thread *td, struct sysctl_args *uap)
2013 {
2014 	int error, i, name[CTL_MAXNAME];
2015 	size_t j;
2016 
2017 	if (uap->namelen > CTL_MAXNAME || uap->namelen < 2)
2018 		return (EINVAL);
2019 
2020  	error = copyin(uap->name, &name, uap->namelen * sizeof(int));
2021  	if (error)
2022 		return (error);
2023 
2024 	error = userland_sysctl(td, name, uap->namelen,
2025 		uap->old, uap->oldlenp, 0,
2026 		uap->new, uap->newlen, &j, 0);
2027 	if (error && error != ENOMEM)
2028 		return (error);
2029 	if (uap->oldlenp) {
2030 		i = copyout(&j, uap->oldlenp, sizeof(j));
2031 		if (i)
2032 			return (i);
2033 	}
2034 	return (error);
2035 }
2036 
2037 /*
2038  * This is used from various compatibility syscalls too.  That's why name
2039  * must be in kernel space.
2040  */
2041 int
2042 userland_sysctl(struct thread *td, int *name, u_int namelen, void *old,
2043     size_t *oldlenp, int inkernel, void *new, size_t newlen, size_t *retval,
2044     int flags)
2045 {
2046 	int error = 0, memlocked;
2047 	struct sysctl_req req;
2048 
2049 	bzero(&req, sizeof req);
2050 
2051 	req.td = td;
2052 	req.flags = flags;
2053 
2054 	if (oldlenp) {
2055 		if (inkernel) {
2056 			req.oldlen = *oldlenp;
2057 		} else {
2058 			error = copyin(oldlenp, &req.oldlen, sizeof(*oldlenp));
2059 			if (error)
2060 				return (error);
2061 		}
2062 	}
2063 	req.validlen = req.oldlen;
2064 
2065 	if (old) {
2066 		if (!useracc(old, req.oldlen, VM_PROT_WRITE))
2067 			return (EFAULT);
2068 		req.oldptr= old;
2069 	}
2070 
2071 	if (new != NULL) {
2072 		if (!useracc(new, newlen, VM_PROT_READ))
2073 			return (EFAULT);
2074 		req.newlen = newlen;
2075 		req.newptr = new;
2076 	}
2077 
2078 	req.oldfunc = sysctl_old_user;
2079 	req.newfunc = sysctl_new_user;
2080 	req.lock = REQ_UNWIRED;
2081 
2082 #ifdef KTRACE
2083 	if (KTRPOINT(curthread, KTR_SYSCTL))
2084 		ktrsysctl(name, namelen);
2085 #endif
2086 
2087 	if (req.oldptr && req.oldlen > PAGE_SIZE) {
2088 		memlocked = 1;
2089 		sx_xlock(&sysctlmemlock);
2090 	} else
2091 		memlocked = 0;
2092 	CURVNET_SET(TD_TO_VNET(td));
2093 
2094 	for (;;) {
2095 		req.oldidx = 0;
2096 		req.newidx = 0;
2097 		error = sysctl_root(0, name, namelen, &req);
2098 		if (error != EAGAIN)
2099 			break;
2100 		kern_yield(PRI_USER);
2101 	}
2102 
2103 	CURVNET_RESTORE();
2104 
2105 	if (req.lock == REQ_WIRED && req.validlen > 0)
2106 		vsunlock(req.oldptr, req.validlen);
2107 	if (memlocked)
2108 		sx_xunlock(&sysctlmemlock);
2109 
2110 	if (error && error != ENOMEM)
2111 		return (error);
2112 
2113 	if (retval) {
2114 		if (req.oldptr && req.oldidx > req.validlen)
2115 			*retval = req.validlen;
2116 		else
2117 			*retval = req.oldidx;
2118 	}
2119 	return (error);
2120 }
2121 
2122 /*
2123  * Drain into a sysctl struct.  The user buffer should be wired if a page
2124  * fault would cause issue.
2125  */
2126 static int
2127 sbuf_sysctl_drain(void *arg, const char *data, int len)
2128 {
2129 	struct sysctl_req *req = arg;
2130 	int error;
2131 
2132 	error = SYSCTL_OUT(req, data, len);
2133 	KASSERT(error >= 0, ("Got unexpected negative value %d", error));
2134 	return (error == 0 ? len : -error);
2135 }
2136 
2137 struct sbuf *
2138 sbuf_new_for_sysctl(struct sbuf *s, char *buf, int length,
2139     struct sysctl_req *req)
2140 {
2141 
2142 	/* Supply a default buffer size if none given. */
2143 	if (buf == NULL && length == 0)
2144 		length = 64;
2145 	s = sbuf_new(s, buf, length, SBUF_FIXEDLEN | SBUF_INCLUDENUL);
2146 	sbuf_set_drain(s, sbuf_sysctl_drain, req);
2147 	return (s);
2148 }
2149