xref: /freebsd/sys/contrib/openzfs/module/os/linux/spl/spl-proc.c (revision 13ec1e3155c7e9bf037b12af186351b7fa9b9450)
1 /*
2  *  Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3  *  Copyright (C) 2007 The Regents of the University of California.
4  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5  *  Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6  *  UCRL-CODE-235197
7  *
8  *  This file is part of the SPL, Solaris Porting Layer.
9  *
10  *  The SPL is free software; you can redistribute it and/or modify it
11  *  under the terms of the GNU General Public License as published by the
12  *  Free Software Foundation; either version 2 of the License, or (at your
13  *  option) any later version.
14  *
15  *  The SPL is distributed in the hope that it will be useful, but WITHOUT
16  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
18  *  for more details.
19  *
20  *  You should have received a copy of the GNU General Public License along
21  *  with the SPL.  If not, see <http://www.gnu.org/licenses/>.
22  *
23  *  Solaris Porting Layer (SPL) Proc Implementation.
24  */
25 
26 #include <sys/systeminfo.h>
27 #include <sys/kstat.h>
28 #include <sys/kmem.h>
29 #include <sys/kmem_cache.h>
30 #include <sys/vmem.h>
31 #include <sys/taskq.h>
32 #include <sys/proc.h>
33 #include <linux/ctype.h>
34 #include <linux/kmod.h>
35 #include <linux/seq_file.h>
36 #include <linux/uaccess.h>
37 #include <linux/version.h>
38 #include "zfs_gitrev.h"
39 
40 #if defined(CONSTIFY_PLUGIN) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
41 typedef struct ctl_table __no_const spl_ctl_table;
42 #else
43 typedef struct ctl_table spl_ctl_table;
44 #endif
45 
46 static unsigned long table_min = 0;
47 static unsigned long table_max = ~0;
48 
49 static struct ctl_table_header *spl_header = NULL;
50 static struct proc_dir_entry *proc_spl = NULL;
51 static struct proc_dir_entry *proc_spl_kmem = NULL;
52 static struct proc_dir_entry *proc_spl_kmem_slab = NULL;
53 static struct proc_dir_entry *proc_spl_taskq_all = NULL;
54 static struct proc_dir_entry *proc_spl_taskq = NULL;
55 struct proc_dir_entry *proc_spl_kstat = NULL;
56 
57 #ifdef DEBUG_KMEM
58 static int
59 proc_domemused(struct ctl_table *table, int write,
60     void __user *buffer, size_t *lenp, loff_t *ppos)
61 {
62 	int rc = 0;
63 	unsigned long val;
64 	spl_ctl_table dummy = *table;
65 
66 	dummy.data = &val;
67 	dummy.proc_handler = &proc_dointvec;
68 	dummy.extra1 = &table_min;
69 	dummy.extra2 = &table_max;
70 
71 	if (write) {
72 		*ppos += *lenp;
73 	} else {
74 #ifdef HAVE_ATOMIC64_T
75 		val = atomic64_read((atomic64_t *)table->data);
76 #else
77 		val = atomic_read((atomic_t *)table->data);
78 #endif /* HAVE_ATOMIC64_T */
79 		rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
80 	}
81 
82 	return (rc);
83 }
84 #endif /* DEBUG_KMEM */
85 
86 static int
87 proc_doslab(struct ctl_table *table, int write,
88     void __user *buffer, size_t *lenp, loff_t *ppos)
89 {
90 	int rc = 0;
91 	unsigned long val = 0, mask;
92 	spl_ctl_table dummy = *table;
93 	spl_kmem_cache_t *skc = NULL;
94 
95 	dummy.data = &val;
96 	dummy.proc_handler = &proc_dointvec;
97 	dummy.extra1 = &table_min;
98 	dummy.extra2 = &table_max;
99 
100 	if (write) {
101 		*ppos += *lenp;
102 	} else {
103 		down_read(&spl_kmem_cache_sem);
104 		mask = (unsigned long)table->data;
105 
106 		list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
107 
108 			/* Only use slabs of the correct kmem/vmem type */
109 			if (!(skc->skc_flags & mask))
110 				continue;
111 
112 			/* Sum the specified field for selected slabs */
113 			switch (mask & (KMC_TOTAL | KMC_ALLOC | KMC_MAX)) {
114 			case KMC_TOTAL:
115 				val += skc->skc_slab_size * skc->skc_slab_total;
116 				break;
117 			case KMC_ALLOC:
118 				val += skc->skc_obj_size * skc->skc_obj_alloc;
119 				break;
120 			case KMC_MAX:
121 				val += skc->skc_obj_size * skc->skc_obj_max;
122 				break;
123 			}
124 		}
125 
126 		up_read(&spl_kmem_cache_sem);
127 		rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
128 	}
129 
130 	return (rc);
131 }
132 
133 static int
134 proc_dohostid(struct ctl_table *table, int write,
135     void __user *buffer, size_t *lenp, loff_t *ppos)
136 {
137 	char *end, str[32];
138 	unsigned long hid;
139 	spl_ctl_table dummy = *table;
140 
141 	dummy.data = str;
142 	dummy.maxlen = sizeof (str) - 1;
143 
144 	if (!write)
145 		snprintf(str, sizeof (str), "%lx",
146 		    (unsigned long) zone_get_hostid(NULL));
147 
148 	/* always returns 0 */
149 	proc_dostring(&dummy, write, buffer, lenp, ppos);
150 
151 	if (write) {
152 		/*
153 		 * We can't use proc_doulongvec_minmax() in the write
154 		 * case here because hostid, while a hex value, has no
155 		 * leading 0x, which confuses the helper function.
156 		 */
157 
158 		hid = simple_strtoul(str, &end, 16);
159 		if (str == end)
160 			return (-EINVAL);
161 		spl_hostid = hid;
162 	}
163 
164 	return (0);
165 }
166 
167 static void
168 taskq_seq_show_headers(struct seq_file *f)
169 {
170 	seq_printf(f, "%-25s %5s %5s %5s %5s %5s %5s %12s %5s %10s\n",
171 	    "taskq", "act", "nthr", "spwn", "maxt", "pri",
172 	    "mina", "maxa", "cura", "flags");
173 }
174 
175 /* indices into the lheads array below */
176 #define	LHEAD_PEND	0
177 #define	LHEAD_PRIO	1
178 #define	LHEAD_DELAY	2
179 #define	LHEAD_WAIT	3
180 #define	LHEAD_ACTIVE	4
181 #define	LHEAD_SIZE	5
182 
183 /* BEGIN CSTYLED */
184 static unsigned int spl_max_show_tasks = 512;
185 module_param(spl_max_show_tasks, uint, 0644);
186 MODULE_PARM_DESC(spl_max_show_tasks, "Max number of tasks shown in taskq proc");
187 /* END CSTYLED */
188 
189 static int
190 taskq_seq_show_impl(struct seq_file *f, void *p, boolean_t allflag)
191 {
192 	taskq_t *tq = p;
193 	taskq_thread_t *tqt = NULL;
194 	spl_wait_queue_entry_t *wq;
195 	struct task_struct *tsk;
196 	taskq_ent_t *tqe;
197 	char name[100];
198 	struct list_head *lheads[LHEAD_SIZE], *lh;
199 	static char *list_names[LHEAD_SIZE] =
200 	    {"pend", "prio", "delay", "wait", "active" };
201 	int i, j, have_lheads = 0;
202 	unsigned long wflags, flags;
203 
204 	spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
205 	spin_lock_irqsave(&tq->tq_wait_waitq.lock, wflags);
206 
207 	/* get the various lists and check whether they're empty */
208 	lheads[LHEAD_PEND] = &tq->tq_pend_list;
209 	lheads[LHEAD_PRIO] = &tq->tq_prio_list;
210 	lheads[LHEAD_DELAY] = &tq->tq_delay_list;
211 #ifdef HAVE_WAIT_QUEUE_HEAD_ENTRY
212 	lheads[LHEAD_WAIT] = &tq->tq_wait_waitq.head;
213 #else
214 	lheads[LHEAD_WAIT] = &tq->tq_wait_waitq.task_list;
215 #endif
216 	lheads[LHEAD_ACTIVE] = &tq->tq_active_list;
217 
218 	for (i = 0; i < LHEAD_SIZE; ++i) {
219 		if (list_empty(lheads[i]))
220 			lheads[i] = NULL;
221 		else
222 			++have_lheads;
223 	}
224 
225 	/* early return in non-"all" mode if lists are all empty */
226 	if (!allflag && !have_lheads) {
227 		spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
228 		spin_unlock_irqrestore(&tq->tq_lock, flags);
229 		return (0);
230 	}
231 
232 	/* unlock the waitq quickly */
233 	if (!lheads[LHEAD_WAIT])
234 		spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
235 
236 	/* show the base taskq contents */
237 	snprintf(name, sizeof (name), "%s/%d", tq->tq_name, tq->tq_instance);
238 	seq_printf(f, "%-25s ", name);
239 	seq_printf(f, "%5d %5d %5d %5d %5d %5d %12d %5d %10x\n",
240 	    tq->tq_nactive, tq->tq_nthreads, tq->tq_nspawn,
241 	    tq->tq_maxthreads, tq->tq_pri, tq->tq_minalloc, tq->tq_maxalloc,
242 	    tq->tq_nalloc, tq->tq_flags);
243 
244 	/* show the active list */
245 	if (lheads[LHEAD_ACTIVE]) {
246 		j = 0;
247 		list_for_each_entry(tqt, &tq->tq_active_list, tqt_active_list) {
248 			if (j == 0)
249 				seq_printf(f, "\t%s:",
250 				    list_names[LHEAD_ACTIVE]);
251 			else if (j == 2) {
252 				seq_printf(f, "\n\t       ");
253 				j = 0;
254 			}
255 			seq_printf(f, " [%d]%pf(%ps)",
256 			    tqt->tqt_thread->pid,
257 			    tqt->tqt_task->tqent_func,
258 			    tqt->tqt_task->tqent_arg);
259 			++j;
260 		}
261 		seq_printf(f, "\n");
262 	}
263 
264 	for (i = LHEAD_PEND; i <= LHEAD_WAIT; ++i)
265 		if (lheads[i]) {
266 			j = 0;
267 			list_for_each(lh, lheads[i]) {
268 				if (spl_max_show_tasks != 0 &&
269 				    j >= spl_max_show_tasks) {
270 					seq_printf(f, "\n\t(truncated)");
271 					break;
272 				}
273 				/* show the wait waitq list */
274 				if (i == LHEAD_WAIT) {
275 #ifdef HAVE_WAIT_QUEUE_HEAD_ENTRY
276 					wq = list_entry(lh,
277 					    spl_wait_queue_entry_t, entry);
278 #else
279 					wq = list_entry(lh,
280 					    spl_wait_queue_entry_t, task_list);
281 #endif
282 					if (j == 0)
283 						seq_printf(f, "\t%s:",
284 						    list_names[i]);
285 					else if (j % 8 == 0)
286 						seq_printf(f, "\n\t     ");
287 
288 					tsk = wq->private;
289 					seq_printf(f, " %d", tsk->pid);
290 				/* pend, prio and delay lists */
291 				} else {
292 					tqe = list_entry(lh, taskq_ent_t,
293 					    tqent_list);
294 					if (j == 0)
295 						seq_printf(f, "\t%s:",
296 						    list_names[i]);
297 					else if (j % 2 == 0)
298 						seq_printf(f, "\n\t     ");
299 
300 					seq_printf(f, " %pf(%ps)",
301 					    tqe->tqent_func,
302 					    tqe->tqent_arg);
303 				}
304 				++j;
305 			}
306 			seq_printf(f, "\n");
307 		}
308 	if (lheads[LHEAD_WAIT])
309 		spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
310 	spin_unlock_irqrestore(&tq->tq_lock, flags);
311 
312 	return (0);
313 }
314 
315 static int
316 taskq_all_seq_show(struct seq_file *f, void *p)
317 {
318 	return (taskq_seq_show_impl(f, p, B_TRUE));
319 }
320 
321 static int
322 taskq_seq_show(struct seq_file *f, void *p)
323 {
324 	return (taskq_seq_show_impl(f, p, B_FALSE));
325 }
326 
327 static void *
328 taskq_seq_start(struct seq_file *f, loff_t *pos)
329 {
330 	struct list_head *p;
331 	loff_t n = *pos;
332 
333 	down_read(&tq_list_sem);
334 	if (!n)
335 		taskq_seq_show_headers(f);
336 
337 	p = tq_list.next;
338 	while (n--) {
339 		p = p->next;
340 		if (p == &tq_list)
341 		return (NULL);
342 	}
343 
344 	return (list_entry(p, taskq_t, tq_taskqs));
345 }
346 
347 static void *
348 taskq_seq_next(struct seq_file *f, void *p, loff_t *pos)
349 {
350 	taskq_t *tq = p;
351 
352 	++*pos;
353 	return ((tq->tq_taskqs.next == &tq_list) ?
354 	    NULL : list_entry(tq->tq_taskqs.next, taskq_t, tq_taskqs));
355 }
356 
357 static void
358 slab_seq_show_headers(struct seq_file *f)
359 {
360 	seq_printf(f,
361 	    "--------------------- cache ----------"
362 	    "---------------------------------------------  "
363 	    "----- slab ------  "
364 	    "---- object -----  "
365 	    "--- emergency ---\n");
366 	seq_printf(f,
367 	    "name                                  "
368 	    "  flags      size     alloc slabsize  objsize  "
369 	    "total alloc   max  "
370 	    "total alloc   max  "
371 	    "dlock alloc   max\n");
372 }
373 
374 static int
375 slab_seq_show(struct seq_file *f, void *p)
376 {
377 	spl_kmem_cache_t *skc = p;
378 
379 	ASSERT(skc->skc_magic == SKC_MAGIC);
380 
381 	if (skc->skc_flags & KMC_SLAB) {
382 		/*
383 		 * This cache is backed by a generic Linux kmem cache which
384 		 * has its own accounting. For these caches we only track
385 		 * the number of active allocated objects that exist within
386 		 * the underlying Linux slabs. For the overall statistics of
387 		 * the underlying Linux cache please refer to /proc/slabinfo.
388 		 */
389 		spin_lock(&skc->skc_lock);
390 		uint64_t objs_allocated =
391 		    percpu_counter_sum(&skc->skc_linux_alloc);
392 		seq_printf(f, "%-36s  ", skc->skc_name);
393 		seq_printf(f, "0x%05lx %9s %9lu %8s %8u  "
394 		    "%5s %5s %5s  %5s %5lu %5s  %5s %5s %5s\n",
395 		    (long unsigned)skc->skc_flags,
396 		    "-",
397 		    (long unsigned)(skc->skc_obj_size * objs_allocated),
398 		    "-",
399 		    (unsigned)skc->skc_obj_size,
400 		    "-", "-", "-", "-",
401 		    (long unsigned)objs_allocated,
402 		    "-", "-", "-", "-");
403 		spin_unlock(&skc->skc_lock);
404 		return (0);
405 	}
406 
407 	spin_lock(&skc->skc_lock);
408 	seq_printf(f, "%-36s  ", skc->skc_name);
409 	seq_printf(f, "0x%05lx %9lu %9lu %8u %8u  "
410 	    "%5lu %5lu %5lu  %5lu %5lu %5lu  %5lu %5lu %5lu\n",
411 	    (long unsigned)skc->skc_flags,
412 	    (long unsigned)(skc->skc_slab_size * skc->skc_slab_total),
413 	    (long unsigned)(skc->skc_obj_size * skc->skc_obj_alloc),
414 	    (unsigned)skc->skc_slab_size,
415 	    (unsigned)skc->skc_obj_size,
416 	    (long unsigned)skc->skc_slab_total,
417 	    (long unsigned)skc->skc_slab_alloc,
418 	    (long unsigned)skc->skc_slab_max,
419 	    (long unsigned)skc->skc_obj_total,
420 	    (long unsigned)skc->skc_obj_alloc,
421 	    (long unsigned)skc->skc_obj_max,
422 	    (long unsigned)skc->skc_obj_deadlock,
423 	    (long unsigned)skc->skc_obj_emergency,
424 	    (long unsigned)skc->skc_obj_emergency_max);
425 	spin_unlock(&skc->skc_lock);
426 	return (0);
427 }
428 
429 static void *
430 slab_seq_start(struct seq_file *f, loff_t *pos)
431 {
432 	struct list_head *p;
433 	loff_t n = *pos;
434 
435 	down_read(&spl_kmem_cache_sem);
436 	if (!n)
437 		slab_seq_show_headers(f);
438 
439 	p = spl_kmem_cache_list.next;
440 	while (n--) {
441 		p = p->next;
442 		if (p == &spl_kmem_cache_list)
443 			return (NULL);
444 	}
445 
446 	return (list_entry(p, spl_kmem_cache_t, skc_list));
447 }
448 
449 static void *
450 slab_seq_next(struct seq_file *f, void *p, loff_t *pos)
451 {
452 	spl_kmem_cache_t *skc = p;
453 
454 	++*pos;
455 	return ((skc->skc_list.next == &spl_kmem_cache_list) ?
456 	    NULL : list_entry(skc->skc_list.next, spl_kmem_cache_t, skc_list));
457 }
458 
459 static void
460 slab_seq_stop(struct seq_file *f, void *v)
461 {
462 	up_read(&spl_kmem_cache_sem);
463 }
464 
465 static const struct seq_operations slab_seq_ops = {
466 	.show  = slab_seq_show,
467 	.start = slab_seq_start,
468 	.next  = slab_seq_next,
469 	.stop  = slab_seq_stop,
470 };
471 
472 static int
473 proc_slab_open(struct inode *inode, struct file *filp)
474 {
475 	return (seq_open(filp, &slab_seq_ops));
476 }
477 
478 static const kstat_proc_op_t proc_slab_operations = {
479 #ifdef HAVE_PROC_OPS_STRUCT
480 	.proc_open	= proc_slab_open,
481 	.proc_read	= seq_read,
482 	.proc_lseek	= seq_lseek,
483 	.proc_release	= seq_release,
484 #else
485 	.open		= proc_slab_open,
486 	.read		= seq_read,
487 	.llseek		= seq_lseek,
488 	.release	= seq_release,
489 #endif
490 };
491 
492 static void
493 taskq_seq_stop(struct seq_file *f, void *v)
494 {
495 	up_read(&tq_list_sem);
496 }
497 
498 static const struct seq_operations taskq_all_seq_ops = {
499 	.show	= taskq_all_seq_show,
500 	.start	= taskq_seq_start,
501 	.next	= taskq_seq_next,
502 	.stop	= taskq_seq_stop,
503 };
504 
505 static const struct seq_operations taskq_seq_ops = {
506 	.show	= taskq_seq_show,
507 	.start	= taskq_seq_start,
508 	.next	= taskq_seq_next,
509 	.stop	= taskq_seq_stop,
510 };
511 
512 static int
513 proc_taskq_all_open(struct inode *inode, struct file *filp)
514 {
515 	return (seq_open(filp, &taskq_all_seq_ops));
516 }
517 
518 static int
519 proc_taskq_open(struct inode *inode, struct file *filp)
520 {
521 	return (seq_open(filp, &taskq_seq_ops));
522 }
523 
524 static const kstat_proc_op_t proc_taskq_all_operations = {
525 #ifdef HAVE_PROC_OPS_STRUCT
526 	.proc_open	= proc_taskq_all_open,
527 	.proc_read	= seq_read,
528 	.proc_lseek	= seq_lseek,
529 	.proc_release	= seq_release,
530 #else
531 	.open		= proc_taskq_all_open,
532 	.read		= seq_read,
533 	.llseek		= seq_lseek,
534 	.release	= seq_release,
535 #endif
536 };
537 
538 static const kstat_proc_op_t proc_taskq_operations = {
539 #ifdef HAVE_PROC_OPS_STRUCT
540 	.proc_open	= proc_taskq_open,
541 	.proc_read	= seq_read,
542 	.proc_lseek	= seq_lseek,
543 	.proc_release	= seq_release,
544 #else
545 	.open		= proc_taskq_open,
546 	.read		= seq_read,
547 	.llseek		= seq_lseek,
548 	.release	= seq_release,
549 #endif
550 };
551 
552 static struct ctl_table spl_kmem_table[] = {
553 #ifdef DEBUG_KMEM
554 	{
555 		.procname	= "kmem_used",
556 		.data		= &kmem_alloc_used,
557 #ifdef HAVE_ATOMIC64_T
558 		.maxlen		= sizeof (atomic64_t),
559 #else
560 		.maxlen		= sizeof (atomic_t),
561 #endif /* HAVE_ATOMIC64_T */
562 		.mode		= 0444,
563 		.proc_handler	= &proc_domemused,
564 	},
565 	{
566 		.procname	= "kmem_max",
567 		.data		= &kmem_alloc_max,
568 		.maxlen		= sizeof (unsigned long),
569 		.extra1		= &table_min,
570 		.extra2		= &table_max,
571 		.mode		= 0444,
572 		.proc_handler	= &proc_doulongvec_minmax,
573 	},
574 #endif /* DEBUG_KMEM */
575 	{
576 		.procname	= "slab_kvmem_total",
577 		.data		= (void *)(KMC_KVMEM | KMC_TOTAL),
578 		.maxlen		= sizeof (unsigned long),
579 		.extra1		= &table_min,
580 		.extra2		= &table_max,
581 		.mode		= 0444,
582 		.proc_handler	= &proc_doslab,
583 	},
584 	{
585 		.procname	= "slab_kvmem_alloc",
586 		.data		= (void *)(KMC_KVMEM | KMC_ALLOC),
587 		.maxlen		= sizeof (unsigned long),
588 		.extra1		= &table_min,
589 		.extra2		= &table_max,
590 		.mode		= 0444,
591 		.proc_handler	= &proc_doslab,
592 	},
593 	{
594 		.procname	= "slab_kvmem_max",
595 		.data		= (void *)(KMC_KVMEM | KMC_MAX),
596 		.maxlen		= sizeof (unsigned long),
597 		.extra1		= &table_min,
598 		.extra2		= &table_max,
599 		.mode		= 0444,
600 		.proc_handler	= &proc_doslab,
601 	},
602 	{},
603 };
604 
605 static struct ctl_table spl_kstat_table[] = {
606 	{},
607 };
608 
609 static struct ctl_table spl_table[] = {
610 	/*
611 	 * NB No .strategy entries have been provided since
612 	 * sysctl(8) prefers to go via /proc for portability.
613 	 */
614 	{
615 		.procname	= "gitrev",
616 		.data		= (char *)ZFS_META_GITREV,
617 		.maxlen		= sizeof (ZFS_META_GITREV),
618 		.mode		= 0444,
619 		.proc_handler	= &proc_dostring,
620 	},
621 	{
622 		.procname	= "hostid",
623 		.data		= &spl_hostid,
624 		.maxlen		= sizeof (unsigned long),
625 		.mode		= 0644,
626 		.proc_handler	= &proc_dohostid,
627 	},
628 	{
629 		.procname	= "kmem",
630 		.mode		= 0555,
631 		.child		= spl_kmem_table,
632 	},
633 	{
634 		.procname	= "kstat",
635 		.mode		= 0555,
636 		.child		= spl_kstat_table,
637 	},
638 	{},
639 };
640 
641 static struct ctl_table spl_dir[] = {
642 	{
643 		.procname	= "spl",
644 		.mode		= 0555,
645 		.child		= spl_table,
646 	},
647 	{}
648 };
649 
650 static struct ctl_table spl_root[] = {
651 	{
652 	.procname = "kernel",
653 	.mode = 0555,
654 	.child = spl_dir,
655 	},
656 	{}
657 };
658 
659 int
660 spl_proc_init(void)
661 {
662 	int rc = 0;
663 
664 	spl_header = register_sysctl_table(spl_root);
665 	if (spl_header == NULL)
666 		return (-EUNATCH);
667 
668 	proc_spl = proc_mkdir("spl", NULL);
669 	if (proc_spl == NULL) {
670 		rc = -EUNATCH;
671 		goto out;
672 	}
673 
674 	proc_spl_taskq_all = proc_create_data("taskq-all", 0444, proc_spl,
675 	    &proc_taskq_all_operations, NULL);
676 	if (proc_spl_taskq_all == NULL) {
677 		rc = -EUNATCH;
678 		goto out;
679 	}
680 
681 	proc_spl_taskq = proc_create_data("taskq", 0444, proc_spl,
682 	    &proc_taskq_operations, NULL);
683 	if (proc_spl_taskq == NULL) {
684 		rc = -EUNATCH;
685 		goto out;
686 	}
687 
688 	proc_spl_kmem = proc_mkdir("kmem", proc_spl);
689 	if (proc_spl_kmem == NULL) {
690 		rc = -EUNATCH;
691 		goto out;
692 	}
693 
694 	proc_spl_kmem_slab = proc_create_data("slab", 0444, proc_spl_kmem,
695 	    &proc_slab_operations, NULL);
696 	if (proc_spl_kmem_slab == NULL) {
697 		rc = -EUNATCH;
698 		goto out;
699 	}
700 
701 	proc_spl_kstat = proc_mkdir("kstat", proc_spl);
702 	if (proc_spl_kstat == NULL) {
703 		rc = -EUNATCH;
704 		goto out;
705 	}
706 out:
707 	if (rc) {
708 		remove_proc_entry("kstat", proc_spl);
709 		remove_proc_entry("slab", proc_spl_kmem);
710 		remove_proc_entry("kmem", proc_spl);
711 		remove_proc_entry("taskq-all", proc_spl);
712 		remove_proc_entry("taskq", proc_spl);
713 		remove_proc_entry("spl", NULL);
714 		unregister_sysctl_table(spl_header);
715 	}
716 
717 	return (rc);
718 }
719 
720 void
721 spl_proc_fini(void)
722 {
723 	remove_proc_entry("kstat", proc_spl);
724 	remove_proc_entry("slab", proc_spl_kmem);
725 	remove_proc_entry("kmem", proc_spl);
726 	remove_proc_entry("taskq-all", proc_spl);
727 	remove_proc_entry("taskq", proc_spl);
728 	remove_proc_entry("spl", NULL);
729 
730 	ASSERT(spl_header != NULL);
731 	unregister_sysctl_table(spl_header);
732 }
733