xref: /freebsd/sys/contrib/openzfs/module/os/linux/spl/spl-proc.c (revision 01348ccda2bafcf007921f6cec9ed0e3541ebf94)
1 /*
2  *  Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3  *  Copyright (C) 2007 The Regents of the University of California.
4  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5  *  Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6  *  UCRL-CODE-235197
7  *
8  *  This file is part of the SPL, Solaris Porting Layer.
9  *  For details, see <http://zfsonlinux.org/>.
10  *
11  *  The SPL is free software; you can redistribute it and/or modify it
12  *  under the terms of the GNU General Public License as published by the
13  *  Free Software Foundation; either version 2 of the License, or (at your
14  *  option) any later version.
15  *
16  *  The SPL is distributed in the hope that it will be useful, but WITHOUT
17  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19  *  for more details.
20  *
21  *  You should have received a copy of the GNU General Public License along
22  *  with the SPL.  If not, see <http://www.gnu.org/licenses/>.
23  *
24  *  Solaris Porting Layer (SPL) Proc Implementation.
25  */
26 
27 #include <sys/systeminfo.h>
28 #include <sys/kstat.h>
29 #include <sys/kmem.h>
30 #include <sys/kmem_cache.h>
31 #include <sys/vmem.h>
32 #include <sys/taskq.h>
33 #include <sys/proc.h>
34 #include <linux/ctype.h>
35 #include <linux/kmod.h>
36 #include <linux/seq_file.h>
37 #include <linux/uaccess.h>
38 #include <linux/version.h>
39 
40 #if defined(CONSTIFY_PLUGIN) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
41 typedef struct ctl_table __no_const spl_ctl_table;
42 #else
43 typedef struct ctl_table spl_ctl_table;
44 #endif
45 
46 static unsigned long table_min = 0;
47 static unsigned long table_max = ~0;
48 
49 static struct ctl_table_header *spl_header = NULL;
50 static struct proc_dir_entry *proc_spl = NULL;
51 static struct proc_dir_entry *proc_spl_kmem = NULL;
52 static struct proc_dir_entry *proc_spl_kmem_slab = NULL;
53 static struct proc_dir_entry *proc_spl_taskq_all = NULL;
54 static struct proc_dir_entry *proc_spl_taskq = NULL;
55 struct proc_dir_entry *proc_spl_kstat = NULL;
56 
57 static int
58 proc_copyin_string(char *kbuffer, int kbuffer_size, const char *ubuffer,
59     int ubuffer_size)
60 {
61 	int size;
62 
63 	if (ubuffer_size > kbuffer_size)
64 		return (-EOVERFLOW);
65 
66 	if (copy_from_user((void *)kbuffer, (void *)ubuffer, ubuffer_size))
67 		return (-EFAULT);
68 
69 	/* strip trailing whitespace */
70 	size = strnlen(kbuffer, ubuffer_size);
71 	while (size-- >= 0)
72 		if (!isspace(kbuffer[size]))
73 			break;
74 
75 	/* empty string */
76 	if (size < 0)
77 		return (-EINVAL);
78 
79 	/* no space to terminate */
80 	if (size == kbuffer_size)
81 		return (-EOVERFLOW);
82 
83 	kbuffer[size + 1] = 0;
84 	return (0);
85 }
86 
87 static int
88 proc_copyout_string(char *ubuffer, int ubuffer_size, const char *kbuffer,
89     char *append)
90 {
91 	/*
92 	 * NB if 'append' != NULL, it's a single character to append to the
93 	 * copied out string - usually "\n", for /proc entries and
94 	 * (i.e. a terminating zero byte) for sysctl entries
95 	 */
96 	int size = MIN(strlen(kbuffer), ubuffer_size);
97 
98 	if (copy_to_user(ubuffer, kbuffer, size))
99 		return (-EFAULT);
100 
101 	if (append != NULL && size < ubuffer_size) {
102 		if (copy_to_user(ubuffer + size, append, 1))
103 			return (-EFAULT);
104 
105 		size++;
106 	}
107 
108 	return (size);
109 }
110 
111 #ifdef DEBUG_KMEM
112 static int
113 proc_domemused(struct ctl_table *table, int write,
114     void __user *buffer, size_t *lenp, loff_t *ppos)
115 {
116 	int rc = 0;
117 	unsigned long min = 0, max = ~0, val;
118 	spl_ctl_table dummy = *table;
119 
120 	dummy.data = &val;
121 	dummy.proc_handler = &proc_dointvec;
122 	dummy.extra1 = &min;
123 	dummy.extra2 = &max;
124 
125 	if (write) {
126 		*ppos += *lenp;
127 	} else {
128 #ifdef HAVE_ATOMIC64_T
129 		val = atomic64_read((atomic64_t *)table->data);
130 #else
131 		val = atomic_read((atomic_t *)table->data);
132 #endif /* HAVE_ATOMIC64_T */
133 		rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
134 	}
135 
136 	return (rc);
137 }
138 #endif /* DEBUG_KMEM */
139 
140 static int
141 proc_doslab(struct ctl_table *table, int write,
142     void __user *buffer, size_t *lenp, loff_t *ppos)
143 {
144 	int rc = 0;
145 	unsigned long min = 0, max = ~0, val = 0, mask;
146 	spl_ctl_table dummy = *table;
147 	spl_kmem_cache_t *skc = NULL;
148 
149 	dummy.data = &val;
150 	dummy.proc_handler = &proc_dointvec;
151 	dummy.extra1 = &min;
152 	dummy.extra2 = &max;
153 
154 	if (write) {
155 		*ppos += *lenp;
156 	} else {
157 		down_read(&spl_kmem_cache_sem);
158 		mask = (unsigned long)table->data;
159 
160 		list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
161 
162 			/* Only use slabs of the correct kmem/vmem type */
163 			if (!(skc->skc_flags & mask))
164 				continue;
165 
166 			/* Sum the specified field for selected slabs */
167 			switch (mask & (KMC_TOTAL | KMC_ALLOC | KMC_MAX)) {
168 			case KMC_TOTAL:
169 				val += skc->skc_slab_size * skc->skc_slab_total;
170 				break;
171 			case KMC_ALLOC:
172 				val += skc->skc_obj_size * skc->skc_obj_alloc;
173 				break;
174 			case KMC_MAX:
175 				val += skc->skc_obj_size * skc->skc_obj_max;
176 				break;
177 			}
178 		}
179 
180 		up_read(&spl_kmem_cache_sem);
181 		rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
182 	}
183 
184 	return (rc);
185 }
186 
187 static int
188 proc_dohostid(struct ctl_table *table, int write,
189     void __user *buffer, size_t *lenp, loff_t *ppos)
190 {
191 	int len, rc = 0;
192 	char *end, str[32];
193 
194 	if (write) {
195 		/*
196 		 * We can't use proc_doulongvec_minmax() in the write
197 		 * case here because hostid while a hex value has no
198 		 * leading 0x which confuses the helper function.
199 		 */
200 		rc = proc_copyin_string(str, sizeof (str), buffer, *lenp);
201 		if (rc < 0)
202 			return (rc);
203 
204 		spl_hostid = simple_strtoul(str, &end, 16);
205 		if (str == end)
206 			return (-EINVAL);
207 
208 	} else {
209 		len = snprintf(str, sizeof (str), "%lx",
210 		    (unsigned long) zone_get_hostid(NULL));
211 		if (*ppos >= len)
212 			rc = 0;
213 		else
214 			rc = proc_copyout_string(buffer,
215 			    *lenp, str + *ppos, "\n");
216 
217 		if (rc >= 0) {
218 			*lenp = rc;
219 			*ppos += rc;
220 		}
221 	}
222 
223 	return (rc);
224 }
225 
226 static void
227 taskq_seq_show_headers(struct seq_file *f)
228 {
229 	seq_printf(f, "%-25s %5s %5s %5s %5s %5s %5s %12s %5s %10s\n",
230 	    "taskq", "act", "nthr", "spwn", "maxt", "pri",
231 	    "mina", "maxa", "cura", "flags");
232 }
233 
234 /* indices into the lheads array below */
235 #define	LHEAD_PEND	0
236 #define	LHEAD_PRIO	1
237 #define	LHEAD_DELAY	2
238 #define	LHEAD_WAIT	3
239 #define	LHEAD_ACTIVE	4
240 #define	LHEAD_SIZE	5
241 
242 /* BEGIN CSTYLED */
243 static unsigned int spl_max_show_tasks = 512;
244 module_param(spl_max_show_tasks, uint, 0644);
245 MODULE_PARM_DESC(spl_max_show_tasks, "Max number of tasks shown in taskq proc");
246 /* END CSTYLED */
247 
248 static int
249 taskq_seq_show_impl(struct seq_file *f, void *p, boolean_t allflag)
250 {
251 	taskq_t *tq = p;
252 	taskq_thread_t *tqt = NULL;
253 	spl_wait_queue_entry_t *wq;
254 	struct task_struct *tsk;
255 	taskq_ent_t *tqe;
256 	char name[100];
257 	struct list_head *lheads[LHEAD_SIZE], *lh;
258 	static char *list_names[LHEAD_SIZE] =
259 	    {"pend", "prio", "delay", "wait", "active" };
260 	int i, j, have_lheads = 0;
261 	unsigned long wflags, flags;
262 
263 	spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
264 	spin_lock_irqsave(&tq->tq_wait_waitq.lock, wflags);
265 
266 	/* get the various lists and check whether they're empty */
267 	lheads[LHEAD_PEND] = &tq->tq_pend_list;
268 	lheads[LHEAD_PRIO] = &tq->tq_prio_list;
269 	lheads[LHEAD_DELAY] = &tq->tq_delay_list;
270 #ifdef HAVE_WAIT_QUEUE_HEAD_ENTRY
271 	lheads[LHEAD_WAIT] = &tq->tq_wait_waitq.head;
272 #else
273 	lheads[LHEAD_WAIT] = &tq->tq_wait_waitq.task_list;
274 #endif
275 	lheads[LHEAD_ACTIVE] = &tq->tq_active_list;
276 
277 	for (i = 0; i < LHEAD_SIZE; ++i) {
278 		if (list_empty(lheads[i]))
279 			lheads[i] = NULL;
280 		else
281 			++have_lheads;
282 	}
283 
284 	/* early return in non-"all" mode if lists are all empty */
285 	if (!allflag && !have_lheads) {
286 		spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
287 		spin_unlock_irqrestore(&tq->tq_lock, flags);
288 		return (0);
289 	}
290 
291 	/* unlock the waitq quickly */
292 	if (!lheads[LHEAD_WAIT])
293 		spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
294 
295 	/* show the base taskq contents */
296 	snprintf(name, sizeof (name), "%s/%d", tq->tq_name, tq->tq_instance);
297 	seq_printf(f, "%-25s ", name);
298 	seq_printf(f, "%5d %5d %5d %5d %5d %5d %12d %5d %10x\n",
299 	    tq->tq_nactive, tq->tq_nthreads, tq->tq_nspawn,
300 	    tq->tq_maxthreads, tq->tq_pri, tq->tq_minalloc, tq->tq_maxalloc,
301 	    tq->tq_nalloc, tq->tq_flags);
302 
303 	/* show the active list */
304 	if (lheads[LHEAD_ACTIVE]) {
305 		j = 0;
306 		list_for_each_entry(tqt, &tq->tq_active_list, tqt_active_list) {
307 			if (j == 0)
308 				seq_printf(f, "\t%s:",
309 				    list_names[LHEAD_ACTIVE]);
310 			else if (j == 2) {
311 				seq_printf(f, "\n\t       ");
312 				j = 0;
313 			}
314 			seq_printf(f, " [%d]%pf(%ps)",
315 			    tqt->tqt_thread->pid,
316 			    tqt->tqt_task->tqent_func,
317 			    tqt->tqt_task->tqent_arg);
318 			++j;
319 		}
320 		seq_printf(f, "\n");
321 	}
322 
323 	for (i = LHEAD_PEND; i <= LHEAD_WAIT; ++i)
324 		if (lheads[i]) {
325 			j = 0;
326 			list_for_each(lh, lheads[i]) {
327 				if (spl_max_show_tasks != 0 &&
328 				    j >= spl_max_show_tasks) {
329 					seq_printf(f, "\n\t(truncated)");
330 					break;
331 				}
332 				/* show the wait waitq list */
333 				if (i == LHEAD_WAIT) {
334 #ifdef HAVE_WAIT_QUEUE_HEAD_ENTRY
335 					wq = list_entry(lh,
336 					    spl_wait_queue_entry_t, entry);
337 #else
338 					wq = list_entry(lh,
339 					    spl_wait_queue_entry_t, task_list);
340 #endif
341 					if (j == 0)
342 						seq_printf(f, "\t%s:",
343 						    list_names[i]);
344 					else if (j % 8 == 0)
345 						seq_printf(f, "\n\t     ");
346 
347 					tsk = wq->private;
348 					seq_printf(f, " %d", tsk->pid);
349 				/* pend, prio and delay lists */
350 				} else {
351 					tqe = list_entry(lh, taskq_ent_t,
352 					    tqent_list);
353 					if (j == 0)
354 						seq_printf(f, "\t%s:",
355 						    list_names[i]);
356 					else if (j % 2 == 0)
357 						seq_printf(f, "\n\t     ");
358 
359 					seq_printf(f, " %pf(%ps)",
360 					    tqe->tqent_func,
361 					    tqe->tqent_arg);
362 				}
363 				++j;
364 			}
365 			seq_printf(f, "\n");
366 		}
367 	if (lheads[LHEAD_WAIT])
368 		spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
369 	spin_unlock_irqrestore(&tq->tq_lock, flags);
370 
371 	return (0);
372 }
373 
374 static int
375 taskq_all_seq_show(struct seq_file *f, void *p)
376 {
377 	return (taskq_seq_show_impl(f, p, B_TRUE));
378 }
379 
380 static int
381 taskq_seq_show(struct seq_file *f, void *p)
382 {
383 	return (taskq_seq_show_impl(f, p, B_FALSE));
384 }
385 
386 static void *
387 taskq_seq_start(struct seq_file *f, loff_t *pos)
388 {
389 	struct list_head *p;
390 	loff_t n = *pos;
391 
392 	down_read(&tq_list_sem);
393 	if (!n)
394 		taskq_seq_show_headers(f);
395 
396 	p = tq_list.next;
397 	while (n--) {
398 		p = p->next;
399 		if (p == &tq_list)
400 		return (NULL);
401 	}
402 
403 	return (list_entry(p, taskq_t, tq_taskqs));
404 }
405 
406 static void *
407 taskq_seq_next(struct seq_file *f, void *p, loff_t *pos)
408 {
409 	taskq_t *tq = p;
410 
411 	++*pos;
412 	return ((tq->tq_taskqs.next == &tq_list) ?
413 	    NULL : list_entry(tq->tq_taskqs.next, taskq_t, tq_taskqs));
414 }
415 
416 static void
417 slab_seq_show_headers(struct seq_file *f)
418 {
419 	seq_printf(f,
420 	    "--------------------- cache ----------"
421 	    "---------------------------------------------  "
422 	    "----- slab ------  "
423 	    "---- object -----  "
424 	    "--- emergency ---\n");
425 	seq_printf(f,
426 	    "name                                  "
427 	    "  flags      size     alloc slabsize  objsize  "
428 	    "total alloc   max  "
429 	    "total alloc   max  "
430 	    "dlock alloc   max\n");
431 }
432 
433 static int
434 slab_seq_show(struct seq_file *f, void *p)
435 {
436 	spl_kmem_cache_t *skc = p;
437 
438 	ASSERT(skc->skc_magic == SKC_MAGIC);
439 
440 	if (skc->skc_flags & KMC_SLAB) {
441 		/*
442 		 * This cache is backed by a generic Linux kmem cache which
443 		 * has its own accounting. For these caches we only track
444 		 * the number of active allocated objects that exist within
445 		 * the underlying Linux slabs. For the overall statistics of
446 		 * the underlying Linux cache please refer to /proc/slabinfo.
447 		 */
448 		spin_lock(&skc->skc_lock);
449 		uint64_t objs_allocated =
450 		    percpu_counter_sum(&skc->skc_linux_alloc);
451 		seq_printf(f, "%-36s  ", skc->skc_name);
452 		seq_printf(f, "0x%05lx %9s %9lu %8s %8u  "
453 		    "%5s %5s %5s  %5s %5lu %5s  %5s %5s %5s\n",
454 		    (long unsigned)skc->skc_flags,
455 		    "-",
456 		    (long unsigned)(skc->skc_obj_size * objs_allocated),
457 		    "-",
458 		    (unsigned)skc->skc_obj_size,
459 		    "-", "-", "-", "-",
460 		    (long unsigned)objs_allocated,
461 		    "-", "-", "-", "-");
462 		spin_unlock(&skc->skc_lock);
463 		return (0);
464 	}
465 
466 	spin_lock(&skc->skc_lock);
467 	seq_printf(f, "%-36s  ", skc->skc_name);
468 	seq_printf(f, "0x%05lx %9lu %9lu %8u %8u  "
469 	    "%5lu %5lu %5lu  %5lu %5lu %5lu  %5lu %5lu %5lu\n",
470 	    (long unsigned)skc->skc_flags,
471 	    (long unsigned)(skc->skc_slab_size * skc->skc_slab_total),
472 	    (long unsigned)(skc->skc_obj_size * skc->skc_obj_alloc),
473 	    (unsigned)skc->skc_slab_size,
474 	    (unsigned)skc->skc_obj_size,
475 	    (long unsigned)skc->skc_slab_total,
476 	    (long unsigned)skc->skc_slab_alloc,
477 	    (long unsigned)skc->skc_slab_max,
478 	    (long unsigned)skc->skc_obj_total,
479 	    (long unsigned)skc->skc_obj_alloc,
480 	    (long unsigned)skc->skc_obj_max,
481 	    (long unsigned)skc->skc_obj_deadlock,
482 	    (long unsigned)skc->skc_obj_emergency,
483 	    (long unsigned)skc->skc_obj_emergency_max);
484 	spin_unlock(&skc->skc_lock);
485 	return (0);
486 }
487 
488 static void *
489 slab_seq_start(struct seq_file *f, loff_t *pos)
490 {
491 	struct list_head *p;
492 	loff_t n = *pos;
493 
494 	down_read(&spl_kmem_cache_sem);
495 	if (!n)
496 		slab_seq_show_headers(f);
497 
498 	p = spl_kmem_cache_list.next;
499 	while (n--) {
500 		p = p->next;
501 		if (p == &spl_kmem_cache_list)
502 			return (NULL);
503 	}
504 
505 	return (list_entry(p, spl_kmem_cache_t, skc_list));
506 }
507 
508 static void *
509 slab_seq_next(struct seq_file *f, void *p, loff_t *pos)
510 {
511 	spl_kmem_cache_t *skc = p;
512 
513 	++*pos;
514 	return ((skc->skc_list.next == &spl_kmem_cache_list) ?
515 	    NULL : list_entry(skc->skc_list.next, spl_kmem_cache_t, skc_list));
516 }
517 
518 static void
519 slab_seq_stop(struct seq_file *f, void *v)
520 {
521 	up_read(&spl_kmem_cache_sem);
522 }
523 
524 static struct seq_operations slab_seq_ops = {
525 	.show  = slab_seq_show,
526 	.start = slab_seq_start,
527 	.next  = slab_seq_next,
528 	.stop  = slab_seq_stop,
529 };
530 
531 static int
532 proc_slab_open(struct inode *inode, struct file *filp)
533 {
534 	return (seq_open(filp, &slab_seq_ops));
535 }
536 
537 static const kstat_proc_op_t proc_slab_operations = {
538 #ifdef HAVE_PROC_OPS_STRUCT
539 	.proc_open	= proc_slab_open,
540 	.proc_read	= seq_read,
541 	.proc_lseek	= seq_lseek,
542 	.proc_release	= seq_release,
543 #else
544 	.open		= proc_slab_open,
545 	.read		= seq_read,
546 	.llseek		= seq_lseek,
547 	.release	= seq_release,
548 #endif
549 };
550 
551 static void
552 taskq_seq_stop(struct seq_file *f, void *v)
553 {
554 	up_read(&tq_list_sem);
555 }
556 
557 static struct seq_operations taskq_all_seq_ops = {
558 	.show	= taskq_all_seq_show,
559 	.start	= taskq_seq_start,
560 	.next	= taskq_seq_next,
561 	.stop	= taskq_seq_stop,
562 };
563 
564 static struct seq_operations taskq_seq_ops = {
565 	.show	= taskq_seq_show,
566 	.start	= taskq_seq_start,
567 	.next	= taskq_seq_next,
568 	.stop	= taskq_seq_stop,
569 };
570 
571 static int
572 proc_taskq_all_open(struct inode *inode, struct file *filp)
573 {
574 	return (seq_open(filp, &taskq_all_seq_ops));
575 }
576 
577 static int
578 proc_taskq_open(struct inode *inode, struct file *filp)
579 {
580 	return (seq_open(filp, &taskq_seq_ops));
581 }
582 
583 static const kstat_proc_op_t proc_taskq_all_operations = {
584 #ifdef HAVE_PROC_OPS_STRUCT
585 	.proc_open	= proc_taskq_all_open,
586 	.proc_read	= seq_read,
587 	.proc_lseek	= seq_lseek,
588 	.proc_release	= seq_release,
589 #else
590 	.open		= proc_taskq_all_open,
591 	.read		= seq_read,
592 	.llseek		= seq_lseek,
593 	.release	= seq_release,
594 #endif
595 };
596 
597 static const kstat_proc_op_t proc_taskq_operations = {
598 #ifdef HAVE_PROC_OPS_STRUCT
599 	.proc_open	= proc_taskq_open,
600 	.proc_read	= seq_read,
601 	.proc_lseek	= seq_lseek,
602 	.proc_release	= seq_release,
603 #else
604 	.open		= proc_taskq_open,
605 	.read		= seq_read,
606 	.llseek		= seq_lseek,
607 	.release	= seq_release,
608 #endif
609 };
610 
611 static struct ctl_table spl_kmem_table[] = {
612 #ifdef DEBUG_KMEM
613 	{
614 		.procname	= "kmem_used",
615 		.data		= &kmem_alloc_used,
616 #ifdef HAVE_ATOMIC64_T
617 		.maxlen		= sizeof (atomic64_t),
618 #else
619 		.maxlen		= sizeof (atomic_t),
620 #endif /* HAVE_ATOMIC64_T */
621 		.mode		= 0444,
622 		.proc_handler	= &proc_domemused,
623 	},
624 	{
625 		.procname	= "kmem_max",
626 		.data		= &kmem_alloc_max,
627 		.maxlen		= sizeof (unsigned long),
628 		.extra1		= &table_min,
629 		.extra2		= &table_max,
630 		.mode		= 0444,
631 		.proc_handler	= &proc_doulongvec_minmax,
632 	},
633 #endif /* DEBUG_KMEM */
634 	{
635 		.procname	= "slab_kvmem_total",
636 		.data		= (void *)(KMC_KVMEM | KMC_TOTAL),
637 		.maxlen		= sizeof (unsigned long),
638 		.extra1		= &table_min,
639 		.extra2		= &table_max,
640 		.mode		= 0444,
641 		.proc_handler	= &proc_doslab,
642 	},
643 	{
644 		.procname	= "slab_kvmem_alloc",
645 		.data		= (void *)(KMC_KVMEM | KMC_ALLOC),
646 		.maxlen		= sizeof (unsigned long),
647 		.extra1		= &table_min,
648 		.extra2		= &table_max,
649 		.mode		= 0444,
650 		.proc_handler	= &proc_doslab,
651 	},
652 	{
653 		.procname	= "slab_kvmem_max",
654 		.data		= (void *)(KMC_KVMEM | KMC_MAX),
655 		.maxlen		= sizeof (unsigned long),
656 		.extra1		= &table_min,
657 		.extra2		= &table_max,
658 		.mode		= 0444,
659 		.proc_handler	= &proc_doslab,
660 	},
661 	{},
662 };
663 
664 static struct ctl_table spl_kstat_table[] = {
665 	{},
666 };
667 
668 static struct ctl_table spl_table[] = {
669 	/*
670 	 * NB No .strategy entries have been provided since
671 	 * sysctl(8) prefers to go via /proc for portability.
672 	 */
673 	{
674 		.procname	= "gitrev",
675 		.data		= spl_gitrev,
676 		.maxlen		= sizeof (spl_gitrev),
677 		.mode		= 0444,
678 		.proc_handler	= &proc_dostring,
679 	},
680 	{
681 		.procname	= "hostid",
682 		.data		= &spl_hostid,
683 		.maxlen		= sizeof (unsigned long),
684 		.mode		= 0644,
685 		.proc_handler	= &proc_dohostid,
686 	},
687 	{
688 		.procname	= "kmem",
689 		.mode		= 0555,
690 		.child		= spl_kmem_table,
691 	},
692 	{
693 		.procname	= "kstat",
694 		.mode		= 0555,
695 		.child		= spl_kstat_table,
696 	},
697 	{},
698 };
699 
700 static struct ctl_table spl_dir[] = {
701 	{
702 		.procname	= "spl",
703 		.mode		= 0555,
704 		.child		= spl_table,
705 	},
706 	{}
707 };
708 
709 static struct ctl_table spl_root[] = {
710 	{
711 	.procname = "kernel",
712 	.mode = 0555,
713 	.child = spl_dir,
714 	},
715 	{}
716 };
717 
718 int
719 spl_proc_init(void)
720 {
721 	int rc = 0;
722 
723 	spl_header = register_sysctl_table(spl_root);
724 	if (spl_header == NULL)
725 		return (-EUNATCH);
726 
727 	proc_spl = proc_mkdir("spl", NULL);
728 	if (proc_spl == NULL) {
729 		rc = -EUNATCH;
730 		goto out;
731 	}
732 
733 	proc_spl_taskq_all = proc_create_data("taskq-all", 0444, proc_spl,
734 	    &proc_taskq_all_operations, NULL);
735 	if (proc_spl_taskq_all == NULL) {
736 		rc = -EUNATCH;
737 		goto out;
738 	}
739 
740 	proc_spl_taskq = proc_create_data("taskq", 0444, proc_spl,
741 	    &proc_taskq_operations, NULL);
742 	if (proc_spl_taskq == NULL) {
743 		rc = -EUNATCH;
744 		goto out;
745 	}
746 
747 	proc_spl_kmem = proc_mkdir("kmem", proc_spl);
748 	if (proc_spl_kmem == NULL) {
749 		rc = -EUNATCH;
750 		goto out;
751 	}
752 
753 	proc_spl_kmem_slab = proc_create_data("slab", 0444, proc_spl_kmem,
754 	    &proc_slab_operations, NULL);
755 	if (proc_spl_kmem_slab == NULL) {
756 		rc = -EUNATCH;
757 		goto out;
758 	}
759 
760 	proc_spl_kstat = proc_mkdir("kstat", proc_spl);
761 	if (proc_spl_kstat == NULL) {
762 		rc = -EUNATCH;
763 		goto out;
764 	}
765 out:
766 	if (rc) {
767 		remove_proc_entry("kstat", proc_spl);
768 		remove_proc_entry("slab", proc_spl_kmem);
769 		remove_proc_entry("kmem", proc_spl);
770 		remove_proc_entry("taskq-all", proc_spl);
771 		remove_proc_entry("taskq", proc_spl);
772 		remove_proc_entry("spl", NULL);
773 		unregister_sysctl_table(spl_header);
774 	}
775 
776 	return (rc);
777 }
778 
779 void
780 spl_proc_fini(void)
781 {
782 	remove_proc_entry("kstat", proc_spl);
783 	remove_proc_entry("slab", proc_spl_kmem);
784 	remove_proc_entry("kmem", proc_spl);
785 	remove_proc_entry("taskq-all", proc_spl);
786 	remove_proc_entry("taskq", proc_spl);
787 	remove_proc_entry("spl", NULL);
788 
789 	ASSERT(spl_header != NULL);
790 	unregister_sysctl_table(spl_header);
791 }
792