xref: /freebsd/sys/contrib/openzfs/module/os/linux/spl/spl-proc.c (revision 6be3386466ab79a84b48429ae66244f21526d3df)
1 /*
2  *  Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3  *  Copyright (C) 2007 The Regents of the University of California.
4  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5  *  Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6  *  UCRL-CODE-235197
7  *
8  *  This file is part of the SPL, Solaris Porting Layer.
9  *
10  *  The SPL is free software; you can redistribute it and/or modify it
11  *  under the terms of the GNU General Public License as published by the
12  *  Free Software Foundation; either version 2 of the License, or (at your
13  *  option) any later version.
14  *
15  *  The SPL is distributed in the hope that it will be useful, but WITHOUT
16  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
18  *  for more details.
19  *
20  *  You should have received a copy of the GNU General Public License along
21  *  with the SPL.  If not, see <http://www.gnu.org/licenses/>.
22  *
23  *  Solaris Porting Layer (SPL) Proc Implementation.
24  */
25 
26 #include <sys/systeminfo.h>
27 #include <sys/kstat.h>
28 #include <sys/kmem.h>
29 #include <sys/kmem_cache.h>
30 #include <sys/vmem.h>
31 #include <sys/taskq.h>
32 #include <sys/proc.h>
33 #include <linux/ctype.h>
34 #include <linux/kmod.h>
35 #include <linux/seq_file.h>
36 #include <linux/uaccess.h>
37 #include <linux/version.h>
38 
39 #if defined(CONSTIFY_PLUGIN) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
40 typedef struct ctl_table __no_const spl_ctl_table;
41 #else
42 typedef struct ctl_table spl_ctl_table;
43 #endif
44 
45 static unsigned long table_min = 0;
46 static unsigned long table_max = ~0;
47 
48 static struct ctl_table_header *spl_header = NULL;
49 static struct proc_dir_entry *proc_spl = NULL;
50 static struct proc_dir_entry *proc_spl_kmem = NULL;
51 static struct proc_dir_entry *proc_spl_kmem_slab = NULL;
52 static struct proc_dir_entry *proc_spl_taskq_all = NULL;
53 static struct proc_dir_entry *proc_spl_taskq = NULL;
54 struct proc_dir_entry *proc_spl_kstat = NULL;
55 
56 static int
57 proc_copyin_string(char *kbuffer, int kbuffer_size, const char *ubuffer,
58     int ubuffer_size)
59 {
60 	int size;
61 
62 	if (ubuffer_size > kbuffer_size)
63 		return (-EOVERFLOW);
64 
65 	if (copy_from_user((void *)kbuffer, (void *)ubuffer, ubuffer_size))
66 		return (-EFAULT);
67 
68 	/* strip trailing whitespace */
69 	size = strnlen(kbuffer, ubuffer_size);
70 	while (size-- >= 0)
71 		if (!isspace(kbuffer[size]))
72 			break;
73 
74 	/* empty string */
75 	if (size < 0)
76 		return (-EINVAL);
77 
78 	/* no space to terminate */
79 	if (size == kbuffer_size)
80 		return (-EOVERFLOW);
81 
82 	kbuffer[size + 1] = 0;
83 	return (0);
84 }
85 
86 static int
87 proc_copyout_string(char *ubuffer, int ubuffer_size, const char *kbuffer,
88     char *append)
89 {
90 	/*
91 	 * NB if 'append' != NULL, it's a single character to append to the
92 	 * copied out string - usually "\n", for /proc entries and
93 	 * (i.e. a terminating zero byte) for sysctl entries
94 	 */
95 	int size = MIN(strlen(kbuffer), ubuffer_size);
96 
97 	if (copy_to_user(ubuffer, kbuffer, size))
98 		return (-EFAULT);
99 
100 	if (append != NULL && size < ubuffer_size) {
101 		if (copy_to_user(ubuffer + size, append, 1))
102 			return (-EFAULT);
103 
104 		size++;
105 	}
106 
107 	return (size);
108 }
109 
110 #ifdef DEBUG_KMEM
111 static int
112 proc_domemused(struct ctl_table *table, int write,
113     void __user *buffer, size_t *lenp, loff_t *ppos)
114 {
115 	int rc = 0;
116 	unsigned long min = 0, max = ~0, val;
117 	spl_ctl_table dummy = *table;
118 
119 	dummy.data = &val;
120 	dummy.proc_handler = &proc_dointvec;
121 	dummy.extra1 = &min;
122 	dummy.extra2 = &max;
123 
124 	if (write) {
125 		*ppos += *lenp;
126 	} else {
127 #ifdef HAVE_ATOMIC64_T
128 		val = atomic64_read((atomic64_t *)table->data);
129 #else
130 		val = atomic_read((atomic_t *)table->data);
131 #endif /* HAVE_ATOMIC64_T */
132 		rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
133 	}
134 
135 	return (rc);
136 }
137 #endif /* DEBUG_KMEM */
138 
139 static int
140 proc_doslab(struct ctl_table *table, int write,
141     void __user *buffer, size_t *lenp, loff_t *ppos)
142 {
143 	int rc = 0;
144 	unsigned long min = 0, max = ~0, val = 0, mask;
145 	spl_ctl_table dummy = *table;
146 	spl_kmem_cache_t *skc = NULL;
147 
148 	dummy.data = &val;
149 	dummy.proc_handler = &proc_dointvec;
150 	dummy.extra1 = &min;
151 	dummy.extra2 = &max;
152 
153 	if (write) {
154 		*ppos += *lenp;
155 	} else {
156 		down_read(&spl_kmem_cache_sem);
157 		mask = (unsigned long)table->data;
158 
159 		list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
160 
161 			/* Only use slabs of the correct kmem/vmem type */
162 			if (!(skc->skc_flags & mask))
163 				continue;
164 
165 			/* Sum the specified field for selected slabs */
166 			switch (mask & (KMC_TOTAL | KMC_ALLOC | KMC_MAX)) {
167 			case KMC_TOTAL:
168 				val += skc->skc_slab_size * skc->skc_slab_total;
169 				break;
170 			case KMC_ALLOC:
171 				val += skc->skc_obj_size * skc->skc_obj_alloc;
172 				break;
173 			case KMC_MAX:
174 				val += skc->skc_obj_size * skc->skc_obj_max;
175 				break;
176 			}
177 		}
178 
179 		up_read(&spl_kmem_cache_sem);
180 		rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
181 	}
182 
183 	return (rc);
184 }
185 
186 static int
187 proc_dohostid(struct ctl_table *table, int write,
188     void __user *buffer, size_t *lenp, loff_t *ppos)
189 {
190 	int len, rc = 0;
191 	char *end, str[32];
192 
193 	if (write) {
194 		/*
195 		 * We can't use proc_doulongvec_minmax() in the write
196 		 * case here because hostid while a hex value has no
197 		 * leading 0x which confuses the helper function.
198 		 */
199 		rc = proc_copyin_string(str, sizeof (str), buffer, *lenp);
200 		if (rc < 0)
201 			return (rc);
202 
203 		spl_hostid = simple_strtoul(str, &end, 16);
204 		if (str == end)
205 			return (-EINVAL);
206 
207 	} else {
208 		len = snprintf(str, sizeof (str), "%lx",
209 		    (unsigned long) zone_get_hostid(NULL));
210 		if (*ppos >= len)
211 			rc = 0;
212 		else
213 			rc = proc_copyout_string(buffer,
214 			    *lenp, str + *ppos, "\n");
215 
216 		if (rc >= 0) {
217 			*lenp = rc;
218 			*ppos += rc;
219 		}
220 	}
221 
222 	return (rc);
223 }
224 
225 static void
226 taskq_seq_show_headers(struct seq_file *f)
227 {
228 	seq_printf(f, "%-25s %5s %5s %5s %5s %5s %5s %12s %5s %10s\n",
229 	    "taskq", "act", "nthr", "spwn", "maxt", "pri",
230 	    "mina", "maxa", "cura", "flags");
231 }
232 
233 /* indices into the lheads array below */
234 #define	LHEAD_PEND	0
235 #define	LHEAD_PRIO	1
236 #define	LHEAD_DELAY	2
237 #define	LHEAD_WAIT	3
238 #define	LHEAD_ACTIVE	4
239 #define	LHEAD_SIZE	5
240 
241 /* BEGIN CSTYLED */
242 static unsigned int spl_max_show_tasks = 512;
243 module_param(spl_max_show_tasks, uint, 0644);
244 MODULE_PARM_DESC(spl_max_show_tasks, "Max number of tasks shown in taskq proc");
245 /* END CSTYLED */
246 
247 static int
248 taskq_seq_show_impl(struct seq_file *f, void *p, boolean_t allflag)
249 {
250 	taskq_t *tq = p;
251 	taskq_thread_t *tqt = NULL;
252 	spl_wait_queue_entry_t *wq;
253 	struct task_struct *tsk;
254 	taskq_ent_t *tqe;
255 	char name[100];
256 	struct list_head *lheads[LHEAD_SIZE], *lh;
257 	static char *list_names[LHEAD_SIZE] =
258 	    {"pend", "prio", "delay", "wait", "active" };
259 	int i, j, have_lheads = 0;
260 	unsigned long wflags, flags;
261 
262 	spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
263 	spin_lock_irqsave(&tq->tq_wait_waitq.lock, wflags);
264 
265 	/* get the various lists and check whether they're empty */
266 	lheads[LHEAD_PEND] = &tq->tq_pend_list;
267 	lheads[LHEAD_PRIO] = &tq->tq_prio_list;
268 	lheads[LHEAD_DELAY] = &tq->tq_delay_list;
269 #ifdef HAVE_WAIT_QUEUE_HEAD_ENTRY
270 	lheads[LHEAD_WAIT] = &tq->tq_wait_waitq.head;
271 #else
272 	lheads[LHEAD_WAIT] = &tq->tq_wait_waitq.task_list;
273 #endif
274 	lheads[LHEAD_ACTIVE] = &tq->tq_active_list;
275 
276 	for (i = 0; i < LHEAD_SIZE; ++i) {
277 		if (list_empty(lheads[i]))
278 			lheads[i] = NULL;
279 		else
280 			++have_lheads;
281 	}
282 
283 	/* early return in non-"all" mode if lists are all empty */
284 	if (!allflag && !have_lheads) {
285 		spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
286 		spin_unlock_irqrestore(&tq->tq_lock, flags);
287 		return (0);
288 	}
289 
290 	/* unlock the waitq quickly */
291 	if (!lheads[LHEAD_WAIT])
292 		spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
293 
294 	/* show the base taskq contents */
295 	snprintf(name, sizeof (name), "%s/%d", tq->tq_name, tq->tq_instance);
296 	seq_printf(f, "%-25s ", name);
297 	seq_printf(f, "%5d %5d %5d %5d %5d %5d %12d %5d %10x\n",
298 	    tq->tq_nactive, tq->tq_nthreads, tq->tq_nspawn,
299 	    tq->tq_maxthreads, tq->tq_pri, tq->tq_minalloc, tq->tq_maxalloc,
300 	    tq->tq_nalloc, tq->tq_flags);
301 
302 	/* show the active list */
303 	if (lheads[LHEAD_ACTIVE]) {
304 		j = 0;
305 		list_for_each_entry(tqt, &tq->tq_active_list, tqt_active_list) {
306 			if (j == 0)
307 				seq_printf(f, "\t%s:",
308 				    list_names[LHEAD_ACTIVE]);
309 			else if (j == 2) {
310 				seq_printf(f, "\n\t       ");
311 				j = 0;
312 			}
313 			seq_printf(f, " [%d]%pf(%ps)",
314 			    tqt->tqt_thread->pid,
315 			    tqt->tqt_task->tqent_func,
316 			    tqt->tqt_task->tqent_arg);
317 			++j;
318 		}
319 		seq_printf(f, "\n");
320 	}
321 
322 	for (i = LHEAD_PEND; i <= LHEAD_WAIT; ++i)
323 		if (lheads[i]) {
324 			j = 0;
325 			list_for_each(lh, lheads[i]) {
326 				if (spl_max_show_tasks != 0 &&
327 				    j >= spl_max_show_tasks) {
328 					seq_printf(f, "\n\t(truncated)");
329 					break;
330 				}
331 				/* show the wait waitq list */
332 				if (i == LHEAD_WAIT) {
333 #ifdef HAVE_WAIT_QUEUE_HEAD_ENTRY
334 					wq = list_entry(lh,
335 					    spl_wait_queue_entry_t, entry);
336 #else
337 					wq = list_entry(lh,
338 					    spl_wait_queue_entry_t, task_list);
339 #endif
340 					if (j == 0)
341 						seq_printf(f, "\t%s:",
342 						    list_names[i]);
343 					else if (j % 8 == 0)
344 						seq_printf(f, "\n\t     ");
345 
346 					tsk = wq->private;
347 					seq_printf(f, " %d", tsk->pid);
348 				/* pend, prio and delay lists */
349 				} else {
350 					tqe = list_entry(lh, taskq_ent_t,
351 					    tqent_list);
352 					if (j == 0)
353 						seq_printf(f, "\t%s:",
354 						    list_names[i]);
355 					else if (j % 2 == 0)
356 						seq_printf(f, "\n\t     ");
357 
358 					seq_printf(f, " %pf(%ps)",
359 					    tqe->tqent_func,
360 					    tqe->tqent_arg);
361 				}
362 				++j;
363 			}
364 			seq_printf(f, "\n");
365 		}
366 	if (lheads[LHEAD_WAIT])
367 		spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
368 	spin_unlock_irqrestore(&tq->tq_lock, flags);
369 
370 	return (0);
371 }
372 
373 static int
374 taskq_all_seq_show(struct seq_file *f, void *p)
375 {
376 	return (taskq_seq_show_impl(f, p, B_TRUE));
377 }
378 
379 static int
380 taskq_seq_show(struct seq_file *f, void *p)
381 {
382 	return (taskq_seq_show_impl(f, p, B_FALSE));
383 }
384 
385 static void *
386 taskq_seq_start(struct seq_file *f, loff_t *pos)
387 {
388 	struct list_head *p;
389 	loff_t n = *pos;
390 
391 	down_read(&tq_list_sem);
392 	if (!n)
393 		taskq_seq_show_headers(f);
394 
395 	p = tq_list.next;
396 	while (n--) {
397 		p = p->next;
398 		if (p == &tq_list)
399 		return (NULL);
400 	}
401 
402 	return (list_entry(p, taskq_t, tq_taskqs));
403 }
404 
405 static void *
406 taskq_seq_next(struct seq_file *f, void *p, loff_t *pos)
407 {
408 	taskq_t *tq = p;
409 
410 	++*pos;
411 	return ((tq->tq_taskqs.next == &tq_list) ?
412 	    NULL : list_entry(tq->tq_taskqs.next, taskq_t, tq_taskqs));
413 }
414 
415 static void
416 slab_seq_show_headers(struct seq_file *f)
417 {
418 	seq_printf(f,
419 	    "--------------------- cache ----------"
420 	    "---------------------------------------------  "
421 	    "----- slab ------  "
422 	    "---- object -----  "
423 	    "--- emergency ---\n");
424 	seq_printf(f,
425 	    "name                                  "
426 	    "  flags      size     alloc slabsize  objsize  "
427 	    "total alloc   max  "
428 	    "total alloc   max  "
429 	    "dlock alloc   max\n");
430 }
431 
432 static int
433 slab_seq_show(struct seq_file *f, void *p)
434 {
435 	spl_kmem_cache_t *skc = p;
436 
437 	ASSERT(skc->skc_magic == SKC_MAGIC);
438 
439 	if (skc->skc_flags & KMC_SLAB) {
440 		/*
441 		 * This cache is backed by a generic Linux kmem cache which
442 		 * has its own accounting. For these caches we only track
443 		 * the number of active allocated objects that exist within
444 		 * the underlying Linux slabs. For the overall statistics of
445 		 * the underlying Linux cache please refer to /proc/slabinfo.
446 		 */
447 		spin_lock(&skc->skc_lock);
448 		uint64_t objs_allocated =
449 		    percpu_counter_sum(&skc->skc_linux_alloc);
450 		seq_printf(f, "%-36s  ", skc->skc_name);
451 		seq_printf(f, "0x%05lx %9s %9lu %8s %8u  "
452 		    "%5s %5s %5s  %5s %5lu %5s  %5s %5s %5s\n",
453 		    (long unsigned)skc->skc_flags,
454 		    "-",
455 		    (long unsigned)(skc->skc_obj_size * objs_allocated),
456 		    "-",
457 		    (unsigned)skc->skc_obj_size,
458 		    "-", "-", "-", "-",
459 		    (long unsigned)objs_allocated,
460 		    "-", "-", "-", "-");
461 		spin_unlock(&skc->skc_lock);
462 		return (0);
463 	}
464 
465 	spin_lock(&skc->skc_lock);
466 	seq_printf(f, "%-36s  ", skc->skc_name);
467 	seq_printf(f, "0x%05lx %9lu %9lu %8u %8u  "
468 	    "%5lu %5lu %5lu  %5lu %5lu %5lu  %5lu %5lu %5lu\n",
469 	    (long unsigned)skc->skc_flags,
470 	    (long unsigned)(skc->skc_slab_size * skc->skc_slab_total),
471 	    (long unsigned)(skc->skc_obj_size * skc->skc_obj_alloc),
472 	    (unsigned)skc->skc_slab_size,
473 	    (unsigned)skc->skc_obj_size,
474 	    (long unsigned)skc->skc_slab_total,
475 	    (long unsigned)skc->skc_slab_alloc,
476 	    (long unsigned)skc->skc_slab_max,
477 	    (long unsigned)skc->skc_obj_total,
478 	    (long unsigned)skc->skc_obj_alloc,
479 	    (long unsigned)skc->skc_obj_max,
480 	    (long unsigned)skc->skc_obj_deadlock,
481 	    (long unsigned)skc->skc_obj_emergency,
482 	    (long unsigned)skc->skc_obj_emergency_max);
483 	spin_unlock(&skc->skc_lock);
484 	return (0);
485 }
486 
487 static void *
488 slab_seq_start(struct seq_file *f, loff_t *pos)
489 {
490 	struct list_head *p;
491 	loff_t n = *pos;
492 
493 	down_read(&spl_kmem_cache_sem);
494 	if (!n)
495 		slab_seq_show_headers(f);
496 
497 	p = spl_kmem_cache_list.next;
498 	while (n--) {
499 		p = p->next;
500 		if (p == &spl_kmem_cache_list)
501 			return (NULL);
502 	}
503 
504 	return (list_entry(p, spl_kmem_cache_t, skc_list));
505 }
506 
507 static void *
508 slab_seq_next(struct seq_file *f, void *p, loff_t *pos)
509 {
510 	spl_kmem_cache_t *skc = p;
511 
512 	++*pos;
513 	return ((skc->skc_list.next == &spl_kmem_cache_list) ?
514 	    NULL : list_entry(skc->skc_list.next, spl_kmem_cache_t, skc_list));
515 }
516 
517 static void
518 slab_seq_stop(struct seq_file *f, void *v)
519 {
520 	up_read(&spl_kmem_cache_sem);
521 }
522 
523 static struct seq_operations slab_seq_ops = {
524 	.show  = slab_seq_show,
525 	.start = slab_seq_start,
526 	.next  = slab_seq_next,
527 	.stop  = slab_seq_stop,
528 };
529 
530 static int
531 proc_slab_open(struct inode *inode, struct file *filp)
532 {
533 	return (seq_open(filp, &slab_seq_ops));
534 }
535 
536 static const kstat_proc_op_t proc_slab_operations = {
537 #ifdef HAVE_PROC_OPS_STRUCT
538 	.proc_open	= proc_slab_open,
539 	.proc_read	= seq_read,
540 	.proc_lseek	= seq_lseek,
541 	.proc_release	= seq_release,
542 #else
543 	.open		= proc_slab_open,
544 	.read		= seq_read,
545 	.llseek		= seq_lseek,
546 	.release	= seq_release,
547 #endif
548 };
549 
550 static void
551 taskq_seq_stop(struct seq_file *f, void *v)
552 {
553 	up_read(&tq_list_sem);
554 }
555 
556 static struct seq_operations taskq_all_seq_ops = {
557 	.show	= taskq_all_seq_show,
558 	.start	= taskq_seq_start,
559 	.next	= taskq_seq_next,
560 	.stop	= taskq_seq_stop,
561 };
562 
563 static struct seq_operations taskq_seq_ops = {
564 	.show	= taskq_seq_show,
565 	.start	= taskq_seq_start,
566 	.next	= taskq_seq_next,
567 	.stop	= taskq_seq_stop,
568 };
569 
570 static int
571 proc_taskq_all_open(struct inode *inode, struct file *filp)
572 {
573 	return (seq_open(filp, &taskq_all_seq_ops));
574 }
575 
576 static int
577 proc_taskq_open(struct inode *inode, struct file *filp)
578 {
579 	return (seq_open(filp, &taskq_seq_ops));
580 }
581 
582 static const kstat_proc_op_t proc_taskq_all_operations = {
583 #ifdef HAVE_PROC_OPS_STRUCT
584 	.proc_open	= proc_taskq_all_open,
585 	.proc_read	= seq_read,
586 	.proc_lseek	= seq_lseek,
587 	.proc_release	= seq_release,
588 #else
589 	.open		= proc_taskq_all_open,
590 	.read		= seq_read,
591 	.llseek		= seq_lseek,
592 	.release	= seq_release,
593 #endif
594 };
595 
596 static const kstat_proc_op_t proc_taskq_operations = {
597 #ifdef HAVE_PROC_OPS_STRUCT
598 	.proc_open	= proc_taskq_open,
599 	.proc_read	= seq_read,
600 	.proc_lseek	= seq_lseek,
601 	.proc_release	= seq_release,
602 #else
603 	.open		= proc_taskq_open,
604 	.read		= seq_read,
605 	.llseek		= seq_lseek,
606 	.release	= seq_release,
607 #endif
608 };
609 
610 static struct ctl_table spl_kmem_table[] = {
611 #ifdef DEBUG_KMEM
612 	{
613 		.procname	= "kmem_used",
614 		.data		= &kmem_alloc_used,
615 #ifdef HAVE_ATOMIC64_T
616 		.maxlen		= sizeof (atomic64_t),
617 #else
618 		.maxlen		= sizeof (atomic_t),
619 #endif /* HAVE_ATOMIC64_T */
620 		.mode		= 0444,
621 		.proc_handler	= &proc_domemused,
622 	},
623 	{
624 		.procname	= "kmem_max",
625 		.data		= &kmem_alloc_max,
626 		.maxlen		= sizeof (unsigned long),
627 		.extra1		= &table_min,
628 		.extra2		= &table_max,
629 		.mode		= 0444,
630 		.proc_handler	= &proc_doulongvec_minmax,
631 	},
632 #endif /* DEBUG_KMEM */
633 	{
634 		.procname	= "slab_kvmem_total",
635 		.data		= (void *)(KMC_KVMEM | KMC_TOTAL),
636 		.maxlen		= sizeof (unsigned long),
637 		.extra1		= &table_min,
638 		.extra2		= &table_max,
639 		.mode		= 0444,
640 		.proc_handler	= &proc_doslab,
641 	},
642 	{
643 		.procname	= "slab_kvmem_alloc",
644 		.data		= (void *)(KMC_KVMEM | KMC_ALLOC),
645 		.maxlen		= sizeof (unsigned long),
646 		.extra1		= &table_min,
647 		.extra2		= &table_max,
648 		.mode		= 0444,
649 		.proc_handler	= &proc_doslab,
650 	},
651 	{
652 		.procname	= "slab_kvmem_max",
653 		.data		= (void *)(KMC_KVMEM | KMC_MAX),
654 		.maxlen		= sizeof (unsigned long),
655 		.extra1		= &table_min,
656 		.extra2		= &table_max,
657 		.mode		= 0444,
658 		.proc_handler	= &proc_doslab,
659 	},
660 	{},
661 };
662 
663 static struct ctl_table spl_kstat_table[] = {
664 	{},
665 };
666 
667 static struct ctl_table spl_table[] = {
668 	/*
669 	 * NB No .strategy entries have been provided since
670 	 * sysctl(8) prefers to go via /proc for portability.
671 	 */
672 	{
673 		.procname	= "gitrev",
674 		.data		= spl_gitrev,
675 		.maxlen		= sizeof (spl_gitrev),
676 		.mode		= 0444,
677 		.proc_handler	= &proc_dostring,
678 	},
679 	{
680 		.procname	= "hostid",
681 		.data		= &spl_hostid,
682 		.maxlen		= sizeof (unsigned long),
683 		.mode		= 0644,
684 		.proc_handler	= &proc_dohostid,
685 	},
686 	{
687 		.procname	= "kmem",
688 		.mode		= 0555,
689 		.child		= spl_kmem_table,
690 	},
691 	{
692 		.procname	= "kstat",
693 		.mode		= 0555,
694 		.child		= spl_kstat_table,
695 	},
696 	{},
697 };
698 
699 static struct ctl_table spl_dir[] = {
700 	{
701 		.procname	= "spl",
702 		.mode		= 0555,
703 		.child		= spl_table,
704 	},
705 	{}
706 };
707 
708 static struct ctl_table spl_root[] = {
709 	{
710 	.procname = "kernel",
711 	.mode = 0555,
712 	.child = spl_dir,
713 	},
714 	{}
715 };
716 
717 int
718 spl_proc_init(void)
719 {
720 	int rc = 0;
721 
722 	spl_header = register_sysctl_table(spl_root);
723 	if (spl_header == NULL)
724 		return (-EUNATCH);
725 
726 	proc_spl = proc_mkdir("spl", NULL);
727 	if (proc_spl == NULL) {
728 		rc = -EUNATCH;
729 		goto out;
730 	}
731 
732 	proc_spl_taskq_all = proc_create_data("taskq-all", 0444, proc_spl,
733 	    &proc_taskq_all_operations, NULL);
734 	if (proc_spl_taskq_all == NULL) {
735 		rc = -EUNATCH;
736 		goto out;
737 	}
738 
739 	proc_spl_taskq = proc_create_data("taskq", 0444, proc_spl,
740 	    &proc_taskq_operations, NULL);
741 	if (proc_spl_taskq == NULL) {
742 		rc = -EUNATCH;
743 		goto out;
744 	}
745 
746 	proc_spl_kmem = proc_mkdir("kmem", proc_spl);
747 	if (proc_spl_kmem == NULL) {
748 		rc = -EUNATCH;
749 		goto out;
750 	}
751 
752 	proc_spl_kmem_slab = proc_create_data("slab", 0444, proc_spl_kmem,
753 	    &proc_slab_operations, NULL);
754 	if (proc_spl_kmem_slab == NULL) {
755 		rc = -EUNATCH;
756 		goto out;
757 	}
758 
759 	proc_spl_kstat = proc_mkdir("kstat", proc_spl);
760 	if (proc_spl_kstat == NULL) {
761 		rc = -EUNATCH;
762 		goto out;
763 	}
764 out:
765 	if (rc) {
766 		remove_proc_entry("kstat", proc_spl);
767 		remove_proc_entry("slab", proc_spl_kmem);
768 		remove_proc_entry("kmem", proc_spl);
769 		remove_proc_entry("taskq-all", proc_spl);
770 		remove_proc_entry("taskq", proc_spl);
771 		remove_proc_entry("spl", NULL);
772 		unregister_sysctl_table(spl_header);
773 	}
774 
775 	return (rc);
776 }
777 
778 void
779 spl_proc_fini(void)
780 {
781 	remove_proc_entry("kstat", proc_spl);
782 	remove_proc_entry("slab", proc_spl_kmem);
783 	remove_proc_entry("kmem", proc_spl);
784 	remove_proc_entry("taskq-all", proc_spl);
785 	remove_proc_entry("taskq", proc_spl);
786 	remove_proc_entry("spl", NULL);
787 
788 	ASSERT(spl_header != NULL);
789 	unregister_sysctl_table(spl_header);
790 }
791