xref: /freebsd/sys/contrib/openzfs/module/os/linux/spl/spl-proc.c (revision 5def4c47d4bd90b209b9b4a4ba9faec15846d8fd)
1 /*
2  *  Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3  *  Copyright (C) 2007 The Regents of the University of California.
4  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5  *  Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6  *  UCRL-CODE-235197
7  *
8  *  This file is part of the SPL, Solaris Porting Layer.
9  *
10  *  The SPL is free software; you can redistribute it and/or modify it
11  *  under the terms of the GNU General Public License as published by the
12  *  Free Software Foundation; either version 2 of the License, or (at your
13  *  option) any later version.
14  *
15  *  The SPL is distributed in the hope that it will be useful, but WITHOUT
16  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
18  *  for more details.
19  *
20  *  You should have received a copy of the GNU General Public License along
21  *  with the SPL.  If not, see <http://www.gnu.org/licenses/>.
22  *
23  *  Solaris Porting Layer (SPL) Proc Implementation.
24  */
25 
26 #include <sys/systeminfo.h>
27 #include <sys/kstat.h>
28 #include <sys/kmem.h>
29 #include <sys/kmem_cache.h>
30 #include <sys/vmem.h>
31 #include <sys/taskq.h>
32 #include <sys/proc.h>
33 #include <linux/ctype.h>
34 #include <linux/kmod.h>
35 #include <linux/seq_file.h>
36 #include <linux/uaccess.h>
37 #include <linux/version.h>
38 
39 #if defined(CONSTIFY_PLUGIN) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
40 typedef struct ctl_table __no_const spl_ctl_table;
41 #else
42 typedef struct ctl_table spl_ctl_table;
43 #endif
44 
45 static unsigned long table_min = 0;
46 static unsigned long table_max = ~0;
47 
48 static struct ctl_table_header *spl_header = NULL;
49 static struct proc_dir_entry *proc_spl = NULL;
50 static struct proc_dir_entry *proc_spl_kmem = NULL;
51 static struct proc_dir_entry *proc_spl_kmem_slab = NULL;
52 static struct proc_dir_entry *proc_spl_taskq_all = NULL;
53 static struct proc_dir_entry *proc_spl_taskq = NULL;
54 struct proc_dir_entry *proc_spl_kstat = NULL;
55 
56 #ifdef DEBUG_KMEM
57 static int
58 proc_domemused(struct ctl_table *table, int write,
59     void __user *buffer, size_t *lenp, loff_t *ppos)
60 {
61 	int rc = 0;
62 	unsigned long val;
63 	spl_ctl_table dummy = *table;
64 
65 	dummy.data = &val;
66 	dummy.proc_handler = &proc_dointvec;
67 	dummy.extra1 = &table_min;
68 	dummy.extra2 = &table_max;
69 
70 	if (write) {
71 		*ppos += *lenp;
72 	} else {
73 #ifdef HAVE_ATOMIC64_T
74 		val = atomic64_read((atomic64_t *)table->data);
75 #else
76 		val = atomic_read((atomic_t *)table->data);
77 #endif /* HAVE_ATOMIC64_T */
78 		rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
79 	}
80 
81 	return (rc);
82 }
83 #endif /* DEBUG_KMEM */
84 
85 static int
86 proc_doslab(struct ctl_table *table, int write,
87     void __user *buffer, size_t *lenp, loff_t *ppos)
88 {
89 	int rc = 0;
90 	unsigned long val = 0, mask;
91 	spl_ctl_table dummy = *table;
92 	spl_kmem_cache_t *skc = NULL;
93 
94 	dummy.data = &val;
95 	dummy.proc_handler = &proc_dointvec;
96 	dummy.extra1 = &table_min;
97 	dummy.extra2 = &table_max;
98 
99 	if (write) {
100 		*ppos += *lenp;
101 	} else {
102 		down_read(&spl_kmem_cache_sem);
103 		mask = (unsigned long)table->data;
104 
105 		list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
106 
107 			/* Only use slabs of the correct kmem/vmem type */
108 			if (!(skc->skc_flags & mask))
109 				continue;
110 
111 			/* Sum the specified field for selected slabs */
112 			switch (mask & (KMC_TOTAL | KMC_ALLOC | KMC_MAX)) {
113 			case KMC_TOTAL:
114 				val += skc->skc_slab_size * skc->skc_slab_total;
115 				break;
116 			case KMC_ALLOC:
117 				val += skc->skc_obj_size * skc->skc_obj_alloc;
118 				break;
119 			case KMC_MAX:
120 				val += skc->skc_obj_size * skc->skc_obj_max;
121 				break;
122 			}
123 		}
124 
125 		up_read(&spl_kmem_cache_sem);
126 		rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
127 	}
128 
129 	return (rc);
130 }
131 
132 static int
133 proc_dohostid(struct ctl_table *table, int write,
134     void __user *buffer, size_t *lenp, loff_t *ppos)
135 {
136 	char *end, str[32];
137 	unsigned long hid;
138 	spl_ctl_table dummy = *table;
139 
140 	dummy.data = str;
141 	dummy.maxlen = sizeof (str) - 1;
142 
143 	if (!write)
144 		snprintf(str, sizeof (str), "%lx",
145 		    (unsigned long) zone_get_hostid(NULL));
146 
147 	/* always returns 0 */
148 	proc_dostring(&dummy, write, buffer, lenp, ppos);
149 
150 	if (write) {
151 		/*
152 		 * We can't use proc_doulongvec_minmax() in the write
153 		 * case here because hostid, while a hex value, has no
154 		 * leading 0x, which confuses the helper function.
155 		 */
156 
157 		hid = simple_strtoul(str, &end, 16);
158 		if (str == end)
159 			return (-EINVAL);
160 		spl_hostid = hid;
161 	}
162 
163 	return (0);
164 }
165 
166 static void
167 taskq_seq_show_headers(struct seq_file *f)
168 {
169 	seq_printf(f, "%-25s %5s %5s %5s %5s %5s %5s %12s %5s %10s\n",
170 	    "taskq", "act", "nthr", "spwn", "maxt", "pri",
171 	    "mina", "maxa", "cura", "flags");
172 }
173 
174 /* indices into the lheads array below */
175 #define	LHEAD_PEND	0
176 #define	LHEAD_PRIO	1
177 #define	LHEAD_DELAY	2
178 #define	LHEAD_WAIT	3
179 #define	LHEAD_ACTIVE	4
180 #define	LHEAD_SIZE	5
181 
182 /* BEGIN CSTYLED */
183 static unsigned int spl_max_show_tasks = 512;
184 module_param(spl_max_show_tasks, uint, 0644);
185 MODULE_PARM_DESC(spl_max_show_tasks, "Max number of tasks shown in taskq proc");
186 /* END CSTYLED */
187 
188 static int
189 taskq_seq_show_impl(struct seq_file *f, void *p, boolean_t allflag)
190 {
191 	taskq_t *tq = p;
192 	taskq_thread_t *tqt = NULL;
193 	spl_wait_queue_entry_t *wq;
194 	struct task_struct *tsk;
195 	taskq_ent_t *tqe;
196 	char name[100];
197 	struct list_head *lheads[LHEAD_SIZE], *lh;
198 	static char *list_names[LHEAD_SIZE] =
199 	    {"pend", "prio", "delay", "wait", "active" };
200 	int i, j, have_lheads = 0;
201 	unsigned long wflags, flags;
202 
203 	spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
204 	spin_lock_irqsave(&tq->tq_wait_waitq.lock, wflags);
205 
206 	/* get the various lists and check whether they're empty */
207 	lheads[LHEAD_PEND] = &tq->tq_pend_list;
208 	lheads[LHEAD_PRIO] = &tq->tq_prio_list;
209 	lheads[LHEAD_DELAY] = &tq->tq_delay_list;
210 #ifdef HAVE_WAIT_QUEUE_HEAD_ENTRY
211 	lheads[LHEAD_WAIT] = &tq->tq_wait_waitq.head;
212 #else
213 	lheads[LHEAD_WAIT] = &tq->tq_wait_waitq.task_list;
214 #endif
215 	lheads[LHEAD_ACTIVE] = &tq->tq_active_list;
216 
217 	for (i = 0; i < LHEAD_SIZE; ++i) {
218 		if (list_empty(lheads[i]))
219 			lheads[i] = NULL;
220 		else
221 			++have_lheads;
222 	}
223 
224 	/* early return in non-"all" mode if lists are all empty */
225 	if (!allflag && !have_lheads) {
226 		spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
227 		spin_unlock_irqrestore(&tq->tq_lock, flags);
228 		return (0);
229 	}
230 
231 	/* unlock the waitq quickly */
232 	if (!lheads[LHEAD_WAIT])
233 		spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
234 
235 	/* show the base taskq contents */
236 	snprintf(name, sizeof (name), "%s/%d", tq->tq_name, tq->tq_instance);
237 	seq_printf(f, "%-25s ", name);
238 	seq_printf(f, "%5d %5d %5d %5d %5d %5d %12d %5d %10x\n",
239 	    tq->tq_nactive, tq->tq_nthreads, tq->tq_nspawn,
240 	    tq->tq_maxthreads, tq->tq_pri, tq->tq_minalloc, tq->tq_maxalloc,
241 	    tq->tq_nalloc, tq->tq_flags);
242 
243 	/* show the active list */
244 	if (lheads[LHEAD_ACTIVE]) {
245 		j = 0;
246 		list_for_each_entry(tqt, &tq->tq_active_list, tqt_active_list) {
247 			if (j == 0)
248 				seq_printf(f, "\t%s:",
249 				    list_names[LHEAD_ACTIVE]);
250 			else if (j == 2) {
251 				seq_printf(f, "\n\t       ");
252 				j = 0;
253 			}
254 			seq_printf(f, " [%d]%pf(%ps)",
255 			    tqt->tqt_thread->pid,
256 			    tqt->tqt_task->tqent_func,
257 			    tqt->tqt_task->tqent_arg);
258 			++j;
259 		}
260 		seq_printf(f, "\n");
261 	}
262 
263 	for (i = LHEAD_PEND; i <= LHEAD_WAIT; ++i)
264 		if (lheads[i]) {
265 			j = 0;
266 			list_for_each(lh, lheads[i]) {
267 				if (spl_max_show_tasks != 0 &&
268 				    j >= spl_max_show_tasks) {
269 					seq_printf(f, "\n\t(truncated)");
270 					break;
271 				}
272 				/* show the wait waitq list */
273 				if (i == LHEAD_WAIT) {
274 #ifdef HAVE_WAIT_QUEUE_HEAD_ENTRY
275 					wq = list_entry(lh,
276 					    spl_wait_queue_entry_t, entry);
277 #else
278 					wq = list_entry(lh,
279 					    spl_wait_queue_entry_t, task_list);
280 #endif
281 					if (j == 0)
282 						seq_printf(f, "\t%s:",
283 						    list_names[i]);
284 					else if (j % 8 == 0)
285 						seq_printf(f, "\n\t     ");
286 
287 					tsk = wq->private;
288 					seq_printf(f, " %d", tsk->pid);
289 				/* pend, prio and delay lists */
290 				} else {
291 					tqe = list_entry(lh, taskq_ent_t,
292 					    tqent_list);
293 					if (j == 0)
294 						seq_printf(f, "\t%s:",
295 						    list_names[i]);
296 					else if (j % 2 == 0)
297 						seq_printf(f, "\n\t     ");
298 
299 					seq_printf(f, " %pf(%ps)",
300 					    tqe->tqent_func,
301 					    tqe->tqent_arg);
302 				}
303 				++j;
304 			}
305 			seq_printf(f, "\n");
306 		}
307 	if (lheads[LHEAD_WAIT])
308 		spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
309 	spin_unlock_irqrestore(&tq->tq_lock, flags);
310 
311 	return (0);
312 }
313 
314 static int
315 taskq_all_seq_show(struct seq_file *f, void *p)
316 {
317 	return (taskq_seq_show_impl(f, p, B_TRUE));
318 }
319 
320 static int
321 taskq_seq_show(struct seq_file *f, void *p)
322 {
323 	return (taskq_seq_show_impl(f, p, B_FALSE));
324 }
325 
326 static void *
327 taskq_seq_start(struct seq_file *f, loff_t *pos)
328 {
329 	struct list_head *p;
330 	loff_t n = *pos;
331 
332 	down_read(&tq_list_sem);
333 	if (!n)
334 		taskq_seq_show_headers(f);
335 
336 	p = tq_list.next;
337 	while (n--) {
338 		p = p->next;
339 		if (p == &tq_list)
340 		return (NULL);
341 	}
342 
343 	return (list_entry(p, taskq_t, tq_taskqs));
344 }
345 
346 static void *
347 taskq_seq_next(struct seq_file *f, void *p, loff_t *pos)
348 {
349 	taskq_t *tq = p;
350 
351 	++*pos;
352 	return ((tq->tq_taskqs.next == &tq_list) ?
353 	    NULL : list_entry(tq->tq_taskqs.next, taskq_t, tq_taskqs));
354 }
355 
356 static void
357 slab_seq_show_headers(struct seq_file *f)
358 {
359 	seq_printf(f,
360 	    "--------------------- cache ----------"
361 	    "---------------------------------------------  "
362 	    "----- slab ------  "
363 	    "---- object -----  "
364 	    "--- emergency ---\n");
365 	seq_printf(f,
366 	    "name                                  "
367 	    "  flags      size     alloc slabsize  objsize  "
368 	    "total alloc   max  "
369 	    "total alloc   max  "
370 	    "dlock alloc   max\n");
371 }
372 
373 static int
374 slab_seq_show(struct seq_file *f, void *p)
375 {
376 	spl_kmem_cache_t *skc = p;
377 
378 	ASSERT(skc->skc_magic == SKC_MAGIC);
379 
380 	if (skc->skc_flags & KMC_SLAB) {
381 		/*
382 		 * This cache is backed by a generic Linux kmem cache which
383 		 * has its own accounting. For these caches we only track
384 		 * the number of active allocated objects that exist within
385 		 * the underlying Linux slabs. For the overall statistics of
386 		 * the underlying Linux cache please refer to /proc/slabinfo.
387 		 */
388 		spin_lock(&skc->skc_lock);
389 		uint64_t objs_allocated =
390 		    percpu_counter_sum(&skc->skc_linux_alloc);
391 		seq_printf(f, "%-36s  ", skc->skc_name);
392 		seq_printf(f, "0x%05lx %9s %9lu %8s %8u  "
393 		    "%5s %5s %5s  %5s %5lu %5s  %5s %5s %5s\n",
394 		    (long unsigned)skc->skc_flags,
395 		    "-",
396 		    (long unsigned)(skc->skc_obj_size * objs_allocated),
397 		    "-",
398 		    (unsigned)skc->skc_obj_size,
399 		    "-", "-", "-", "-",
400 		    (long unsigned)objs_allocated,
401 		    "-", "-", "-", "-");
402 		spin_unlock(&skc->skc_lock);
403 		return (0);
404 	}
405 
406 	spin_lock(&skc->skc_lock);
407 	seq_printf(f, "%-36s  ", skc->skc_name);
408 	seq_printf(f, "0x%05lx %9lu %9lu %8u %8u  "
409 	    "%5lu %5lu %5lu  %5lu %5lu %5lu  %5lu %5lu %5lu\n",
410 	    (long unsigned)skc->skc_flags,
411 	    (long unsigned)(skc->skc_slab_size * skc->skc_slab_total),
412 	    (long unsigned)(skc->skc_obj_size * skc->skc_obj_alloc),
413 	    (unsigned)skc->skc_slab_size,
414 	    (unsigned)skc->skc_obj_size,
415 	    (long unsigned)skc->skc_slab_total,
416 	    (long unsigned)skc->skc_slab_alloc,
417 	    (long unsigned)skc->skc_slab_max,
418 	    (long unsigned)skc->skc_obj_total,
419 	    (long unsigned)skc->skc_obj_alloc,
420 	    (long unsigned)skc->skc_obj_max,
421 	    (long unsigned)skc->skc_obj_deadlock,
422 	    (long unsigned)skc->skc_obj_emergency,
423 	    (long unsigned)skc->skc_obj_emergency_max);
424 	spin_unlock(&skc->skc_lock);
425 	return (0);
426 }
427 
428 static void *
429 slab_seq_start(struct seq_file *f, loff_t *pos)
430 {
431 	struct list_head *p;
432 	loff_t n = *pos;
433 
434 	down_read(&spl_kmem_cache_sem);
435 	if (!n)
436 		slab_seq_show_headers(f);
437 
438 	p = spl_kmem_cache_list.next;
439 	while (n--) {
440 		p = p->next;
441 		if (p == &spl_kmem_cache_list)
442 			return (NULL);
443 	}
444 
445 	return (list_entry(p, spl_kmem_cache_t, skc_list));
446 }
447 
448 static void *
449 slab_seq_next(struct seq_file *f, void *p, loff_t *pos)
450 {
451 	spl_kmem_cache_t *skc = p;
452 
453 	++*pos;
454 	return ((skc->skc_list.next == &spl_kmem_cache_list) ?
455 	    NULL : list_entry(skc->skc_list.next, spl_kmem_cache_t, skc_list));
456 }
457 
458 static void
459 slab_seq_stop(struct seq_file *f, void *v)
460 {
461 	up_read(&spl_kmem_cache_sem);
462 }
463 
464 static struct seq_operations slab_seq_ops = {
465 	.show  = slab_seq_show,
466 	.start = slab_seq_start,
467 	.next  = slab_seq_next,
468 	.stop  = slab_seq_stop,
469 };
470 
471 static int
472 proc_slab_open(struct inode *inode, struct file *filp)
473 {
474 	return (seq_open(filp, &slab_seq_ops));
475 }
476 
477 static const kstat_proc_op_t proc_slab_operations = {
478 #ifdef HAVE_PROC_OPS_STRUCT
479 	.proc_open	= proc_slab_open,
480 	.proc_read	= seq_read,
481 	.proc_lseek	= seq_lseek,
482 	.proc_release	= seq_release,
483 #else
484 	.open		= proc_slab_open,
485 	.read		= seq_read,
486 	.llseek		= seq_lseek,
487 	.release	= seq_release,
488 #endif
489 };
490 
491 static void
492 taskq_seq_stop(struct seq_file *f, void *v)
493 {
494 	up_read(&tq_list_sem);
495 }
496 
497 static struct seq_operations taskq_all_seq_ops = {
498 	.show	= taskq_all_seq_show,
499 	.start	= taskq_seq_start,
500 	.next	= taskq_seq_next,
501 	.stop	= taskq_seq_stop,
502 };
503 
504 static struct seq_operations taskq_seq_ops = {
505 	.show	= taskq_seq_show,
506 	.start	= taskq_seq_start,
507 	.next	= taskq_seq_next,
508 	.stop	= taskq_seq_stop,
509 };
510 
511 static int
512 proc_taskq_all_open(struct inode *inode, struct file *filp)
513 {
514 	return (seq_open(filp, &taskq_all_seq_ops));
515 }
516 
517 static int
518 proc_taskq_open(struct inode *inode, struct file *filp)
519 {
520 	return (seq_open(filp, &taskq_seq_ops));
521 }
522 
523 static const kstat_proc_op_t proc_taskq_all_operations = {
524 #ifdef HAVE_PROC_OPS_STRUCT
525 	.proc_open	= proc_taskq_all_open,
526 	.proc_read	= seq_read,
527 	.proc_lseek	= seq_lseek,
528 	.proc_release	= seq_release,
529 #else
530 	.open		= proc_taskq_all_open,
531 	.read		= seq_read,
532 	.llseek		= seq_lseek,
533 	.release	= seq_release,
534 #endif
535 };
536 
537 static const kstat_proc_op_t proc_taskq_operations = {
538 #ifdef HAVE_PROC_OPS_STRUCT
539 	.proc_open	= proc_taskq_open,
540 	.proc_read	= seq_read,
541 	.proc_lseek	= seq_lseek,
542 	.proc_release	= seq_release,
543 #else
544 	.open		= proc_taskq_open,
545 	.read		= seq_read,
546 	.llseek		= seq_lseek,
547 	.release	= seq_release,
548 #endif
549 };
550 
551 static struct ctl_table spl_kmem_table[] = {
552 #ifdef DEBUG_KMEM
553 	{
554 		.procname	= "kmem_used",
555 		.data		= &kmem_alloc_used,
556 #ifdef HAVE_ATOMIC64_T
557 		.maxlen		= sizeof (atomic64_t),
558 #else
559 		.maxlen		= sizeof (atomic_t),
560 #endif /* HAVE_ATOMIC64_T */
561 		.mode		= 0444,
562 		.proc_handler	= &proc_domemused,
563 	},
564 	{
565 		.procname	= "kmem_max",
566 		.data		= &kmem_alloc_max,
567 		.maxlen		= sizeof (unsigned long),
568 		.extra1		= &table_min,
569 		.extra2		= &table_max,
570 		.mode		= 0444,
571 		.proc_handler	= &proc_doulongvec_minmax,
572 	},
573 #endif /* DEBUG_KMEM */
574 	{
575 		.procname	= "slab_kvmem_total",
576 		.data		= (void *)(KMC_KVMEM | KMC_TOTAL),
577 		.maxlen		= sizeof (unsigned long),
578 		.extra1		= &table_min,
579 		.extra2		= &table_max,
580 		.mode		= 0444,
581 		.proc_handler	= &proc_doslab,
582 	},
583 	{
584 		.procname	= "slab_kvmem_alloc",
585 		.data		= (void *)(KMC_KVMEM | KMC_ALLOC),
586 		.maxlen		= sizeof (unsigned long),
587 		.extra1		= &table_min,
588 		.extra2		= &table_max,
589 		.mode		= 0444,
590 		.proc_handler	= &proc_doslab,
591 	},
592 	{
593 		.procname	= "slab_kvmem_max",
594 		.data		= (void *)(KMC_KVMEM | KMC_MAX),
595 		.maxlen		= sizeof (unsigned long),
596 		.extra1		= &table_min,
597 		.extra2		= &table_max,
598 		.mode		= 0444,
599 		.proc_handler	= &proc_doslab,
600 	},
601 	{},
602 };
603 
604 static struct ctl_table spl_kstat_table[] = {
605 	{},
606 };
607 
608 static struct ctl_table spl_table[] = {
609 	/*
610 	 * NB No .strategy entries have been provided since
611 	 * sysctl(8) prefers to go via /proc for portability.
612 	 */
613 	{
614 		.procname	= "gitrev",
615 		.data		= spl_gitrev,
616 		.maxlen		= sizeof (spl_gitrev),
617 		.mode		= 0444,
618 		.proc_handler	= &proc_dostring,
619 	},
620 	{
621 		.procname	= "hostid",
622 		.data		= &spl_hostid,
623 		.maxlen		= sizeof (unsigned long),
624 		.mode		= 0644,
625 		.proc_handler	= &proc_dohostid,
626 	},
627 	{
628 		.procname	= "kmem",
629 		.mode		= 0555,
630 		.child		= spl_kmem_table,
631 	},
632 	{
633 		.procname	= "kstat",
634 		.mode		= 0555,
635 		.child		= spl_kstat_table,
636 	},
637 	{},
638 };
639 
640 static struct ctl_table spl_dir[] = {
641 	{
642 		.procname	= "spl",
643 		.mode		= 0555,
644 		.child		= spl_table,
645 	},
646 	{}
647 };
648 
649 static struct ctl_table spl_root[] = {
650 	{
651 	.procname = "kernel",
652 	.mode = 0555,
653 	.child = spl_dir,
654 	},
655 	{}
656 };
657 
658 int
659 spl_proc_init(void)
660 {
661 	int rc = 0;
662 
663 	spl_header = register_sysctl_table(spl_root);
664 	if (spl_header == NULL)
665 		return (-EUNATCH);
666 
667 	proc_spl = proc_mkdir("spl", NULL);
668 	if (proc_spl == NULL) {
669 		rc = -EUNATCH;
670 		goto out;
671 	}
672 
673 	proc_spl_taskq_all = proc_create_data("taskq-all", 0444, proc_spl,
674 	    &proc_taskq_all_operations, NULL);
675 	if (proc_spl_taskq_all == NULL) {
676 		rc = -EUNATCH;
677 		goto out;
678 	}
679 
680 	proc_spl_taskq = proc_create_data("taskq", 0444, proc_spl,
681 	    &proc_taskq_operations, NULL);
682 	if (proc_spl_taskq == NULL) {
683 		rc = -EUNATCH;
684 		goto out;
685 	}
686 
687 	proc_spl_kmem = proc_mkdir("kmem", proc_spl);
688 	if (proc_spl_kmem == NULL) {
689 		rc = -EUNATCH;
690 		goto out;
691 	}
692 
693 	proc_spl_kmem_slab = proc_create_data("slab", 0444, proc_spl_kmem,
694 	    &proc_slab_operations, NULL);
695 	if (proc_spl_kmem_slab == NULL) {
696 		rc = -EUNATCH;
697 		goto out;
698 	}
699 
700 	proc_spl_kstat = proc_mkdir("kstat", proc_spl);
701 	if (proc_spl_kstat == NULL) {
702 		rc = -EUNATCH;
703 		goto out;
704 	}
705 out:
706 	if (rc) {
707 		remove_proc_entry("kstat", proc_spl);
708 		remove_proc_entry("slab", proc_spl_kmem);
709 		remove_proc_entry("kmem", proc_spl);
710 		remove_proc_entry("taskq-all", proc_spl);
711 		remove_proc_entry("taskq", proc_spl);
712 		remove_proc_entry("spl", NULL);
713 		unregister_sysctl_table(spl_header);
714 	}
715 
716 	return (rc);
717 }
718 
719 void
720 spl_proc_fini(void)
721 {
722 	remove_proc_entry("kstat", proc_spl);
723 	remove_proc_entry("slab", proc_spl_kmem);
724 	remove_proc_entry("kmem", proc_spl);
725 	remove_proc_entry("taskq-all", proc_spl);
726 	remove_proc_entry("taskq", proc_spl);
727 	remove_proc_entry("spl", NULL);
728 
729 	ASSERT(spl_header != NULL);
730 	unregister_sysctl_table(spl_header);
731 }
732