1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
29 * Copyright (c) 2025, Klara, Inc.
30 */
31
32 #include <libintl.h>
33 #include <libuutil.h>
34 #include <stddef.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <thread_pool.h>
39
40 #include <libzfs.h>
41 #include <libzutil.h>
42 #include <sys/zfs_context.h>
43 #include <sys/wait.h>
44
45 #include "zpool_util.h"
46
47 /*
48 * Private interface for iterating over pools specified on the command line.
49 * Most consumers will call for_each_pool, but in order to support iostat, we
50 * allow fined grained control through the zpool_list_t interface.
51 */
52
53 typedef struct zpool_node {
54 zpool_handle_t *zn_handle;
55 uu_avl_node_t zn_avlnode;
56 hrtime_t zn_last_refresh;
57 } zpool_node_t;
58
59 struct zpool_list {
60 boolean_t zl_findall;
61 boolean_t zl_literal;
62 uu_avl_t *zl_avl;
63 uu_avl_pool_t *zl_pool;
64 zprop_list_t **zl_proplist;
65 zfs_type_t zl_type;
66 hrtime_t zl_last_refresh;
67 };
68
69 static int
zpool_compare(const void * larg,const void * rarg,void * unused)70 zpool_compare(const void *larg, const void *rarg, void *unused)
71 {
72 (void) unused;
73 zpool_handle_t *l = ((zpool_node_t *)larg)->zn_handle;
74 zpool_handle_t *r = ((zpool_node_t *)rarg)->zn_handle;
75 const char *lname = zpool_get_name(l);
76 const char *rname = zpool_get_name(r);
77
78 return (strcmp(lname, rname));
79 }
80
81 /*
82 * Callback function for pool_list_get(). Adds the given pool to the AVL tree
83 * of known pools.
84 */
85 static int
add_pool(zpool_handle_t * zhp,zpool_list_t * zlp)86 add_pool(zpool_handle_t *zhp, zpool_list_t *zlp)
87 {
88 zpool_node_t *node, *new = safe_malloc(sizeof (zpool_node_t));
89 uu_avl_index_t idx;
90
91 new->zn_handle = zhp;
92 uu_avl_node_init(new, &new->zn_avlnode, zlp->zl_pool);
93
94 node = uu_avl_find(zlp->zl_avl, new, NULL, &idx);
95 if (node == NULL) {
96 if (zlp->zl_proplist &&
97 zpool_expand_proplist(zhp, zlp->zl_proplist,
98 zlp->zl_type, zlp->zl_literal) != 0) {
99 zpool_close(zhp);
100 free(new);
101 return (-1);
102 }
103 new->zn_last_refresh = zlp->zl_last_refresh;
104 uu_avl_insert(zlp->zl_avl, new, idx);
105 } else {
106 zpool_refresh_stats_from_handle(node->zn_handle, zhp);
107 node->zn_last_refresh = zlp->zl_last_refresh;
108 zpool_close(zhp);
109 free(new);
110 return (-1);
111 }
112
113 return (0);
114 }
115
116 /*
117 * add_pool(), but always returns 0. This allows zpool_iter() to continue
118 * even if a pool exists in the tree, or we fail to get the properties for
119 * a new one.
120 */
121 static int
add_pool_cb(zpool_handle_t * zhp,void * data)122 add_pool_cb(zpool_handle_t *zhp, void *data)
123 {
124 (void) add_pool(zhp, data);
125 return (0);
126 }
127
128 /*
129 * Create a list of pools based on the given arguments. If we're given no
130 * arguments, then iterate over all pools in the system and add them to the AVL
131 * tree. Otherwise, add only those pool explicitly specified on the command
132 * line.
133 */
134 zpool_list_t *
pool_list_get(int argc,char ** argv,zprop_list_t ** proplist,zfs_type_t type,boolean_t literal,int * err)135 pool_list_get(int argc, char **argv, zprop_list_t **proplist, zfs_type_t type,
136 boolean_t literal, int *err)
137 {
138 zpool_list_t *zlp;
139
140 zlp = safe_malloc(sizeof (zpool_list_t));
141
142 zlp->zl_pool = uu_avl_pool_create("zfs_pool", sizeof (zpool_node_t),
143 offsetof(zpool_node_t, zn_avlnode), zpool_compare, UU_DEFAULT);
144
145 if (zlp->zl_pool == NULL)
146 zpool_no_memory();
147
148 if ((zlp->zl_avl = uu_avl_create(zlp->zl_pool, NULL,
149 UU_DEFAULT)) == NULL)
150 zpool_no_memory();
151
152 zlp->zl_proplist = proplist;
153 zlp->zl_type = type;
154
155 zlp->zl_literal = literal;
156 zlp->zl_last_refresh = gethrtime();
157
158 if (argc == 0) {
159 (void) zpool_iter(g_zfs, add_pool_cb, zlp);
160 zlp->zl_findall = B_TRUE;
161 } else {
162 int i;
163
164 for (i = 0; i < argc; i++) {
165 zpool_handle_t *zhp;
166
167 if ((zhp = zpool_open_canfail(g_zfs, argv[i])) !=
168 NULL) {
169 if (add_pool(zhp, zlp) != 0)
170 *err = B_TRUE;
171 } else {
172 *err = B_TRUE;
173 }
174 }
175 }
176
177 return (zlp);
178 }
179
180 /*
181 * Refresh the state of all pools on the list. Additionally, if no options were
182 * given on the command line, add any new pools and remove any that are no
183 * longer available.
184 */
185 int
pool_list_refresh(zpool_list_t * zlp)186 pool_list_refresh(zpool_list_t *zlp)
187 {
188 zlp->zl_last_refresh = gethrtime();
189
190 if (!zlp->zl_findall) {
191 /*
192 * This list is a fixed list of pools, so we must not add
193 * or remove any. Just walk over them and refresh their
194 * state.
195 */
196 int navail = 0;
197 for (zpool_node_t *node = uu_avl_first(zlp->zl_avl);
198 node != NULL; node = uu_avl_next(zlp->zl_avl, node)) {
199 boolean_t missing;
200 zpool_refresh_stats(node->zn_handle, &missing);
201 navail += !missing;
202 node->zn_last_refresh = zlp->zl_last_refresh;
203 }
204 return (navail);
205 }
206
207 /* Search for any new pools and add them to the list. */
208 (void) zpool_iter(g_zfs, add_pool_cb, zlp);
209
210 /* Walk the list of existing pools, and update or remove them. */
211 zpool_node_t *node, *next;
212 for (node = uu_avl_first(zlp->zl_avl); node != NULL; node = next) {
213 next = uu_avl_next(zlp->zl_avl, node);
214
215 /*
216 * Skip any that were refreshed and are online; they were added
217 * by zpool_iter() and are already up to date.
218 */
219 if (node->zn_last_refresh == zlp->zl_last_refresh &&
220 zpool_get_state(node->zn_handle) != POOL_STATE_UNAVAIL)
221 continue;
222
223 /* Refresh and remove if necessary. */
224 boolean_t missing;
225 zpool_refresh_stats(node->zn_handle, &missing);
226 if (missing) {
227 uu_avl_remove(zlp->zl_avl, node);
228 zpool_close(node->zn_handle);
229 free(node);
230 } else {
231 node->zn_last_refresh = zlp->zl_last_refresh;
232 }
233 }
234
235 return (uu_avl_numnodes(zlp->zl_avl));
236 }
237
238 /*
239 * Iterate over all pools in the list, executing the callback for each
240 */
241 int
pool_list_iter(zpool_list_t * zlp,int unavail,zpool_iter_f func,void * data)242 pool_list_iter(zpool_list_t *zlp, int unavail, zpool_iter_f func,
243 void *data)
244 {
245 zpool_node_t *node, *next_node;
246 int ret = 0;
247
248 for (node = uu_avl_first(zlp->zl_avl); node != NULL; node = next_node) {
249 next_node = uu_avl_next(zlp->zl_avl, node);
250 if (zpool_get_state(node->zn_handle) != POOL_STATE_UNAVAIL ||
251 unavail)
252 ret |= func(node->zn_handle, data);
253 }
254
255 return (ret);
256 }
257
258 /*
259 * Free all the handles associated with this list.
260 */
261 void
pool_list_free(zpool_list_t * zlp)262 pool_list_free(zpool_list_t *zlp)
263 {
264 uu_avl_walk_t *walk;
265 zpool_node_t *node;
266
267 if ((walk = uu_avl_walk_start(zlp->zl_avl, UU_WALK_ROBUST)) == NULL) {
268 (void) fprintf(stderr,
269 gettext("internal error: out of memory"));
270 exit(1);
271 }
272
273 while ((node = uu_avl_walk_next(walk)) != NULL) {
274 uu_avl_remove(zlp->zl_avl, node);
275 zpool_close(node->zn_handle);
276 free(node);
277 }
278
279 uu_avl_walk_end(walk);
280 uu_avl_destroy(zlp->zl_avl);
281 uu_avl_pool_destroy(zlp->zl_pool);
282
283 free(zlp);
284 }
285
286 /*
287 * Returns the number of elements in the pool list.
288 */
289 int
pool_list_count(zpool_list_t * zlp)290 pool_list_count(zpool_list_t *zlp)
291 {
292 return (uu_avl_numnodes(zlp->zl_avl));
293 }
294
295 /*
296 * High level function which iterates over all pools given on the command line,
297 * using the pool_list_* interfaces.
298 */
299 int
for_each_pool(int argc,char ** argv,boolean_t unavail,zprop_list_t ** proplist,zfs_type_t type,boolean_t literal,zpool_iter_f func,void * data)300 for_each_pool(int argc, char **argv, boolean_t unavail,
301 zprop_list_t **proplist, zfs_type_t type, boolean_t literal,
302 zpool_iter_f func, void *data)
303 {
304 zpool_list_t *list;
305 int ret = 0;
306
307 if ((list = pool_list_get(argc, argv, proplist, type, literal,
308 &ret)) == NULL)
309 return (1);
310
311 if (pool_list_iter(list, unavail, func, data) != 0)
312 ret = 1;
313
314 pool_list_free(list);
315
316 return (ret);
317 }
318
319 /*
320 * This is the equivalent of for_each_pool() for vdevs. It iterates thorough
321 * all vdevs in the pool, ignoring root vdevs and holes, calling func() on
322 * each one.
323 *
324 * @zhp: Zpool handle
325 * @func: Function to call on each vdev
326 * @data: Custom data to pass to the function
327 */
328 int
for_each_vdev(zpool_handle_t * zhp,pool_vdev_iter_f func,void * data)329 for_each_vdev(zpool_handle_t *zhp, pool_vdev_iter_f func, void *data)
330 {
331 nvlist_t *config, *nvroot = NULL;
332
333 if ((config = zpool_get_config(zhp, NULL)) != NULL) {
334 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
335 &nvroot) == 0);
336 }
337 return (for_each_vdev_cb((void *) zhp, nvroot, func, data));
338 }
339
340 /*
341 * Process the vcdl->vdev_cmd_data[] array to figure out all the unique column
342 * names and their widths. When this function is done, vcdl->uniq_cols,
343 * vcdl->uniq_cols_cnt, and vcdl->uniq_cols_width will be filled in.
344 */
345 static void
process_unique_cmd_columns(vdev_cmd_data_list_t * vcdl)346 process_unique_cmd_columns(vdev_cmd_data_list_t *vcdl)
347 {
348 char **uniq_cols = NULL, **tmp = NULL;
349 int *uniq_cols_width;
350 vdev_cmd_data_t *data;
351 int cnt = 0;
352 int k;
353
354 /* For each vdev */
355 for (int i = 0; i < vcdl->count; i++) {
356 data = &vcdl->data[i];
357 /* For each column the vdev reported */
358 for (int j = 0; j < data->cols_cnt; j++) {
359 /* Is this column in our list of unique column names? */
360 for (k = 0; k < cnt; k++) {
361 if (strcmp(data->cols[j], uniq_cols[k]) == 0)
362 break; /* yes it is */
363 }
364 if (k == cnt) {
365 /* No entry for column, add to list */
366 tmp = realloc(uniq_cols, sizeof (*uniq_cols) *
367 (cnt + 1));
368 if (tmp == NULL)
369 break; /* Nothing we can do... */
370 uniq_cols = tmp;
371 uniq_cols[cnt] = data->cols[j];
372 cnt++;
373 }
374 }
375 }
376
377 /*
378 * We now have a list of all the unique column names. Figure out the
379 * max width of each column by looking at the column name and all its
380 * values.
381 */
382 uniq_cols_width = safe_malloc(sizeof (*uniq_cols_width) * cnt);
383 for (int i = 0; i < cnt; i++) {
384 /* Start off with the column title's width */
385 uniq_cols_width[i] = strlen(uniq_cols[i]);
386 /* For each vdev */
387 for (int j = 0; j < vcdl->count; j++) {
388 /* For each of the vdev's values in a column */
389 data = &vcdl->data[j];
390 for (k = 0; k < data->cols_cnt; k++) {
391 /* Does this vdev have a value for this col? */
392 if (strcmp(data->cols[k], uniq_cols[i]) == 0) {
393 /* Is the value width larger? */
394 uniq_cols_width[i] =
395 MAX(uniq_cols_width[i],
396 strlen(data->lines[k]));
397 }
398 }
399 }
400 }
401
402 vcdl->uniq_cols = uniq_cols;
403 vcdl->uniq_cols_cnt = cnt;
404 vcdl->uniq_cols_width = uniq_cols_width;
405 }
406
407
408 /*
409 * Process a line of command output
410 *
411 * When running 'zpool iostat|status -c' the lines of output can either be
412 * in the form of:
413 *
414 * column_name=value
415 *
416 * Or just:
417 *
418 * value
419 *
420 * Process the column_name (if any) and value.
421 *
422 * Returns 0 if line was processed, and there are more lines can still be
423 * processed.
424 *
425 * Returns 1 if this was the last line to process, or error.
426 */
427 static int
vdev_process_cmd_output(vdev_cmd_data_t * data,char * line)428 vdev_process_cmd_output(vdev_cmd_data_t *data, char *line)
429 {
430 char *col;
431 char *val;
432 char *equals;
433 char **tmp;
434
435 if (line == NULL)
436 return (1);
437
438 equals = strchr(line, '=');
439 if (equals != NULL) {
440 /*
441 * We have a 'column=value' type line. Split it into the
442 * column and value strings by turning the '=' into a '\0'.
443 */
444 *equals = '\0';
445 col = line;
446 val = equals + 1;
447 } else {
448 col = NULL;
449 val = line;
450 }
451
452 /* Do we already have a column by this name? If so, skip it. */
453 if (col != NULL) {
454 for (int i = 0; i < data->cols_cnt; i++) {
455 if (strcmp(col, data->cols[i]) == 0)
456 return (0); /* Duplicate, skip */
457 }
458 }
459
460 if (val != NULL) {
461 tmp = realloc(data->lines,
462 (data->lines_cnt + 1) * sizeof (*data->lines));
463 if (tmp == NULL)
464 return (1);
465
466 data->lines = tmp;
467 data->lines[data->lines_cnt] = strdup(val);
468 data->lines_cnt++;
469 }
470
471 if (col != NULL) {
472 tmp = realloc(data->cols,
473 (data->cols_cnt + 1) * sizeof (*data->cols));
474 if (tmp == NULL)
475 return (1);
476
477 data->cols = tmp;
478 data->cols[data->cols_cnt] = strdup(col);
479 data->cols_cnt++;
480 }
481
482 if (val != NULL && col == NULL)
483 return (1);
484
485 return (0);
486 }
487
488 /*
489 * Run the cmd and store results in *data.
490 */
491 static void
vdev_run_cmd(vdev_cmd_data_t * data,char * cmd)492 vdev_run_cmd(vdev_cmd_data_t *data, char *cmd)
493 {
494 int rc;
495 char *argv[2] = {cmd};
496 char **env;
497 char **lines = NULL;
498 int lines_cnt = 0;
499 int i;
500
501 env = zpool_vdev_script_alloc_env(data->pool, data->path, data->upath,
502 data->vdev_enc_sysfs_path, NULL, NULL);
503 if (env == NULL)
504 goto out;
505
506 /* Run the command */
507 rc = libzfs_run_process_get_stdout_nopath(cmd, argv, env, &lines,
508 &lines_cnt);
509
510 zpool_vdev_script_free_env(env);
511
512 if (rc != 0)
513 goto out;
514
515 /* Process the output we got */
516 for (i = 0; i < lines_cnt; i++)
517 if (vdev_process_cmd_output(data, lines[i]) != 0)
518 break;
519
520 out:
521 if (lines != NULL)
522 libzfs_free_str_array(lines, lines_cnt);
523 }
524
525 /*
526 * Generate the search path for zpool iostat/status -c scripts.
527 * The string returned must be freed.
528 */
529 char *
zpool_get_cmd_search_path(void)530 zpool_get_cmd_search_path(void)
531 {
532 const char *env;
533 char *sp = NULL;
534
535 env = getenv("ZPOOL_SCRIPTS_PATH");
536 if (env != NULL)
537 return (strdup(env));
538
539 env = getenv("HOME");
540 if (env != NULL) {
541 if (asprintf(&sp, "%s/.zpool.d:%s",
542 env, ZPOOL_SCRIPTS_DIR) != -1) {
543 return (sp);
544 }
545 }
546
547 if (asprintf(&sp, "%s", ZPOOL_SCRIPTS_DIR) != -1)
548 return (sp);
549
550 return (NULL);
551 }
552
553 /* Thread function run for each vdev */
554 static void
vdev_run_cmd_thread(void * cb_cmd_data)555 vdev_run_cmd_thread(void *cb_cmd_data)
556 {
557 vdev_cmd_data_t *data = cb_cmd_data;
558 char *cmd = NULL, *cmddup, *cmdrest;
559
560 cmddup = strdup(data->cmd);
561 if (cmddup == NULL)
562 return;
563
564 cmdrest = cmddup;
565 while ((cmd = strtok_r(cmdrest, ",", &cmdrest))) {
566 char *dir = NULL, *sp, *sprest;
567 char fullpath[MAXPATHLEN];
568
569 if (strchr(cmd, '/') != NULL)
570 continue;
571
572 sp = zpool_get_cmd_search_path();
573 if (sp == NULL)
574 continue;
575
576 sprest = sp;
577 while ((dir = strtok_r(sprest, ":", &sprest))) {
578 if (snprintf(fullpath, sizeof (fullpath),
579 "%s/%s", dir, cmd) == -1)
580 continue;
581
582 if (access(fullpath, X_OK) == 0) {
583 vdev_run_cmd(data, fullpath);
584 break;
585 }
586 }
587 free(sp);
588 }
589 free(cmddup);
590 }
591
592 /* For each vdev in the pool run a command */
593 static int
for_each_vdev_run_cb(void * zhp_data,nvlist_t * nv,void * cb_vcdl)594 for_each_vdev_run_cb(void *zhp_data, nvlist_t *nv, void *cb_vcdl)
595 {
596 vdev_cmd_data_list_t *vcdl = cb_vcdl;
597 vdev_cmd_data_t *data;
598 const char *path = NULL;
599 char *vname = NULL;
600 const char *vdev_enc_sysfs_path = NULL;
601 int i, match = 0;
602 zpool_handle_t *zhp = zhp_data;
603
604 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
605 return (1);
606
607 /* Make sure we're getting the updated enclosure sysfs path */
608 update_vdev_config_dev_sysfs_path(nv, path,
609 ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH);
610
611 nvlist_lookup_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
612 &vdev_enc_sysfs_path);
613
614 /* Spares show more than once if they're in use, so skip if exists */
615 for (i = 0; i < vcdl->count; i++) {
616 if ((strcmp(vcdl->data[i].path, path) == 0) &&
617 (strcmp(vcdl->data[i].pool, zpool_get_name(zhp)) == 0)) {
618 /* vdev already exists, skip it */
619 return (0);
620 }
621 }
622
623 /* Check for selected vdevs here, if any */
624 for (i = 0; i < vcdl->vdev_names_count; i++) {
625 vname = zpool_vdev_name(g_zfs, zhp, nv, vcdl->cb_name_flags);
626 if (strcmp(vcdl->vdev_names[i], vname) == 0) {
627 free(vname);
628 match = 1;
629 break; /* match */
630 }
631 free(vname);
632 }
633
634 /* If we selected vdevs, and this isn't one of them, then bail out */
635 if (!match && vcdl->vdev_names_count)
636 return (0);
637
638 /*
639 * Resize our array and add in the new element.
640 */
641 if (!(vcdl->data = realloc(vcdl->data,
642 sizeof (*vcdl->data) * (vcdl->count + 1))))
643 return (ENOMEM); /* couldn't realloc */
644
645 data = &vcdl->data[vcdl->count];
646
647 data->pool = strdup(zpool_get_name(zhp));
648 data->path = strdup(path);
649 data->upath = zfs_get_underlying_path(path);
650 data->cmd = vcdl->cmd;
651 data->lines = data->cols = NULL;
652 data->lines_cnt = data->cols_cnt = 0;
653 if (vdev_enc_sysfs_path)
654 data->vdev_enc_sysfs_path = strdup(vdev_enc_sysfs_path);
655 else
656 data->vdev_enc_sysfs_path = NULL;
657
658 vcdl->count++;
659
660 return (0);
661 }
662
663 /* Get the names and count of the vdevs */
664 static int
all_pools_for_each_vdev_gather_cb(zpool_handle_t * zhp,void * cb_vcdl)665 all_pools_for_each_vdev_gather_cb(zpool_handle_t *zhp, void *cb_vcdl)
666 {
667 return (for_each_vdev(zhp, for_each_vdev_run_cb, cb_vcdl));
668 }
669
670 /*
671 * Now that vcdl is populated with our complete list of vdevs, spawn
672 * off the commands.
673 */
674 static void
all_pools_for_each_vdev_run_vcdl(vdev_cmd_data_list_t * vcdl)675 all_pools_for_each_vdev_run_vcdl(vdev_cmd_data_list_t *vcdl)
676 {
677 tpool_t *t;
678
679 t = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN), 0, NULL);
680 if (t == NULL)
681 return;
682
683 /* Spawn off the command for each vdev */
684 for (int i = 0; i < vcdl->count; i++) {
685 (void) tpool_dispatch(t, vdev_run_cmd_thread,
686 (void *) &vcdl->data[i]);
687 }
688
689 /* Wait for threads to finish */
690 tpool_wait(t);
691 tpool_destroy(t);
692 }
693
694 /*
695 * Run command 'cmd' on all vdevs in all pools in argv. Saves the first line of
696 * output from the command in vcdk->data[].line for all vdevs. If you want
697 * to run the command on only certain vdevs, fill in g_zfs, vdev_names,
698 * vdev_names_count, and cb_name_flags. Otherwise leave them as zero.
699 *
700 * Returns a vdev_cmd_data_list_t that must be freed with
701 * free_vdev_cmd_data_list();
702 */
703 vdev_cmd_data_list_t *
all_pools_for_each_vdev_run(int argc,char ** argv,char * cmd,libzfs_handle_t * g_zfs,char ** vdev_names,int vdev_names_count,int cb_name_flags)704 all_pools_for_each_vdev_run(int argc, char **argv, char *cmd,
705 libzfs_handle_t *g_zfs, char **vdev_names, int vdev_names_count,
706 int cb_name_flags)
707 {
708 vdev_cmd_data_list_t *vcdl;
709 vcdl = safe_malloc(sizeof (vdev_cmd_data_list_t));
710 vcdl->cmd = cmd;
711
712 vcdl->vdev_names = vdev_names;
713 vcdl->vdev_names_count = vdev_names_count;
714 vcdl->cb_name_flags = cb_name_flags;
715 vcdl->g_zfs = g_zfs;
716
717 /* Gather our list of all vdevs in all pools */
718 for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
719 B_FALSE, all_pools_for_each_vdev_gather_cb, vcdl);
720
721 /* Run command on all vdevs in all pools */
722 all_pools_for_each_vdev_run_vcdl(vcdl);
723
724 /*
725 * vcdl->data[] now contains all the column names and values for each
726 * vdev. We need to process that into a master list of unique column
727 * names, and figure out the width of each column.
728 */
729 process_unique_cmd_columns(vcdl);
730
731 return (vcdl);
732 }
733
734 /*
735 * Free the vdev_cmd_data_list_t created by all_pools_for_each_vdev_run()
736 */
737 void
free_vdev_cmd_data_list(vdev_cmd_data_list_t * vcdl)738 free_vdev_cmd_data_list(vdev_cmd_data_list_t *vcdl)
739 {
740 free(vcdl->uniq_cols);
741 free(vcdl->uniq_cols_width);
742
743 for (int i = 0; i < vcdl->count; i++) {
744 free(vcdl->data[i].path);
745 free(vcdl->data[i].pool);
746 free(vcdl->data[i].upath);
747
748 for (int j = 0; j < vcdl->data[i].lines_cnt; j++)
749 free(vcdl->data[i].lines[j]);
750
751 free(vcdl->data[i].lines);
752
753 for (int j = 0; j < vcdl->data[i].cols_cnt; j++)
754 free(vcdl->data[i].cols[j]);
755
756 free(vcdl->data[i].cols);
757 free(vcdl->data[i].vdev_enc_sysfs_path);
758 }
759 free(vcdl->data);
760 free(vcdl);
761 }
762