xref: /freebsd/sys/contrib/openzfs/man/man8/zpool-status.8 (revision 3f0efe05432b1633991114ca4ca330102a561959)
1.\"
2.\" CDDL HEADER START
3.\"
4.\" The contents of this file are subject to the terms of the
5.\" Common Development and Distribution License (the "License").
6.\" You may not use this file except in compliance with the License.
7.\"
8.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9.\" or https://opensource.org/licenses/CDDL-1.0.
10.\" See the License for the specific language governing permissions
11.\" and limitations under the License.
12.\"
13.\" When distributing Covered Code, include this CDDL HEADER in each
14.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15.\" If applicable, add the following below this CDDL HEADER, with the
16.\" fields enclosed by brackets "[]" replaced with your own identifying
17.\" information: Portions Copyright [yyyy] [name of copyright owner]
18.\"
19.\" CDDL HEADER END
20.\"
21.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
22.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
23.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
24.\" Copyright (c) 2017 Datto Inc.
25.\" Copyright (c) 2018 George Melikov. All Rights Reserved.
26.\" Copyright 2017 Nexenta Systems, Inc.
27.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
28.\"
29.Dd February 14, 2024
30.Dt ZPOOL-STATUS 8
31.Os
32.
33.Sh NAME
34.Nm zpool-status
35.Nd show detailed health status for ZFS storage pools
36.Sh SYNOPSIS
37.Nm zpool
38.Cm status
39.Op Fl DegiLpPstvx
40.Op Fl T Sy u Ns | Ns Sy d
41.Op Fl c Op Ar SCRIPT1 Ns Oo , Ns Ar SCRIPT2 Oc Ns …
42.Oo Ar pool Oc Ns …
43.Op Ar interval Op Ar count
44.Op Fl j Op Ar --json-int, --json-flat-vdevs, --json-pool-key-guid
45.
46.Sh DESCRIPTION
47Displays the detailed health status for the given pools.
48If no
49.Ar pool
50is specified, then the status of each pool in the system is displayed.
51For more information on pool and device health, see the
52.Sx Device Failure and Recovery
53section of
54.Xr zpoolconcepts 7 .
55.Pp
56If a scrub or resilver is in progress, this command reports the percentage done
57and the estimated time to completion.
58Both of these are only approximate, because the amount of data in the pool and
59the other workloads on the system can change.
60.Bl -tag -width Ds
61.It Fl -power
62Display vdev enclosure slot power status (on or off).
63.It Fl c Op Ar SCRIPT1 Ns Oo , Ns Ar SCRIPT2 Oc Ns …
64Run a script (or scripts) on each vdev and include the output as a new column
65in the
66.Nm zpool Cm status
67output.
68See the
69.Fl c
70option of
71.Nm zpool Cm iostat
72for complete details.
73.It Fl j Op Ar --json-int, --json-flat-vdevs, --json-pool-key-guid
74Display the status for ZFS pools in JSON format.
75Specify
76.Sy --json-int
77to display numbers in integer format instead of strings.
78Specify
79.Sy --json-flat-vdevs
80to display vdevs in flat hierarchy instead of nested vdev objects.
81Specify
82.Sy --json-pool-key-guid
83to set pool GUID as key for pool objects instead of pool names.
84.It Fl D
85Display a histogram of deduplication statistics, showing the allocated
86.Pq physically present on disk
87and referenced
88.Pq logically referenced in the pool
89block counts and sizes by reference count.
90If repeated, (-DD), also shows statistics on how much of the DDT is resident
91in the ARC.
92.It Fl e
93Only show unhealthy vdevs (not-ONLINE or with errors).
94.It Fl g
95Display vdev GUIDs instead of the normal device names
96These GUIDs can be used in place of device names for the zpool
97detach/offline/remove/replace commands.
98.It Fl i
99Display vdev initialization status.
100.It Fl L
101Display real paths for vdevs resolving all symbolic links.
102This can be used to look up the current block device name regardless of the
103.Pa /dev/disk/
104path used to open it.
105.It Fl p
106Display numbers in parsable (exact) values.
107.It Fl P
108Display full paths for vdevs instead of only the last component of
109the path.
110This can be used in conjunction with the
111.Fl L
112flag.
113.It Fl s
114Display the number of leaf vdev slow I/O operations.
115This is the number of I/O operations that didn't complete in
116.Sy zio_slow_io_ms
117milliseconds
118.Pq Sy 30000 No by default .
119This does not necessarily mean the I/O operations failed to complete, just took
120an
121unreasonably long amount of time.
122This may indicate a problem with the underlying storage.
123.It Fl t
124Display vdev TRIM status.
125.It Fl T Sy u Ns | Ns Sy d
126Display a time stamp.
127Specify
128.Sy u
129for a printed representation of the internal representation of time.
130See
131.Xr time 1 .
132Specify
133.Sy d
134for standard date format.
135See
136.Xr date 1 .
137.It Fl v
138Displays verbose data error information, printing out a complete list of all
139data errors since the last complete pool scrub.
140If the head_errlog feature is enabled and files containing errors have been
141removed then the respective filenames will not be reported in subsequent runs
142of this command.
143.It Fl x
144Only display status for pools that are exhibiting errors or are otherwise
145unavailable.
146Warnings about pools not using the latest on-disk format will not be included.
147.El
148.
149.Sh EXAMPLES
150.\" These are, respectively, examples 16 from zpool.8
151.\" Make sure to update them bidirectionally
152.Ss Example 1 : No Adding output columns
153Additional columns can be added to the
154.Nm zpool Cm status No and Nm zpool Cm iostat No output with Fl c .
155.Bd -literal -compact -offset Ds
156.No # Nm zpool Cm status Fl c Pa vendor , Ns Pa model , Ns Pa size
157   NAME     STATE  READ WRITE CKSUM vendor  model        size
158   tank     ONLINE 0    0     0
159   mirror-0 ONLINE 0    0     0
160   U1       ONLINE 0    0     0     SEAGATE ST8000NM0075 7.3T
161   U10      ONLINE 0    0     0     SEAGATE ST8000NM0075 7.3T
162   U11      ONLINE 0    0     0     SEAGATE ST8000NM0075 7.3T
163   U12      ONLINE 0    0     0     SEAGATE ST8000NM0075 7.3T
164   U13      ONLINE 0    0     0     SEAGATE ST8000NM0075 7.3T
165   U14      ONLINE 0    0     0     SEAGATE ST8000NM0075 7.3T
166
167.No # Nm zpool Cm iostat Fl vc Pa size
168              capacity     operations     bandwidth
169pool        alloc   free   read  write   read  write  size
170----------  -----  -----  -----  -----  -----  -----  ----
171rpool       14.6G  54.9G      4     55   250K  2.69M
172  sda1      14.6G  54.9G      4     55   250K  2.69M   70G
173----------  -----  -----  -----  -----  -----  -----  ----
174.Ed
175.
176.Ss Example 2 : No Display the status output in JSON format
177.Nm zpool Cm status No can output in JSON format if
178.Fl j
179is specified.
180.Fl c
181can be used to run a script on each VDEV.
182.Bd -literal -compact -offset Ds
183.No # Nm zpool Cm status Fl j Fl c Pa vendor , Ns Pa model , Ns Pa size | Nm jq
184{
185  "output_version": {
186    "command": "zpool status",
187    "vers_major": 0,
188    "vers_minor": 1
189  },
190  "pools": {
191    "tank": {
192      "name": "tank",
193      "state": "ONLINE",
194      "guid": "3920273586464696295",
195      "txg": "16597",
196      "spa_version": "5000",
197      "zpl_version": "5",
198      "status": "OK",
199      "vdevs": {
200        "tank": {
201          "name": "tank",
202          "alloc_space": "62.6G",
203          "total_space": "15.0T",
204          "def_space": "11.3T",
205          "read_errors": "0",
206          "write_errors": "0",
207          "checksum_errors": "0",
208          "vdevs": {
209            "raidz1-0": {
210              "name": "raidz1-0",
211              "vdev_type": "raidz",
212              "guid": "763132626387621737",
213              "state": "HEALTHY",
214              "alloc_space": "62.5G",
215              "total_space": "10.9T",
216              "def_space": "7.26T",
217              "rep_dev_size": "10.9T",
218              "read_errors": "0",
219              "write_errors": "0",
220              "checksum_errors": "0",
221              "vdevs": {
222                "ca1eb824-c371-491d-ac13-37637e35c683": {
223                  "name": "ca1eb824-c371-491d-ac13-37637e35c683",
224                  "vdev_type": "disk",
225                  "guid": "12841765308123764671",
226                  "path": "/dev/disk/by-partuuid/ca1eb824-c371-491d-ac13-37637e35c683",
227                  "state": "HEALTHY",
228                  "rep_dev_size": "3.64T",
229                  "phys_space": "3.64T",
230                  "read_errors": "0",
231                  "write_errors": "0",
232                  "checksum_errors": "0",
233                  "vendor": "ATA",
234                  "model": "WDC WD40EFZX-68AWUN0",
235                  "size": "3.6T"
236                },
237                "97cd98fb-8fb8-4ac4-bc84-bd8950a7ace7": {
238                  "name": "97cd98fb-8fb8-4ac4-bc84-bd8950a7ace7",
239                  "vdev_type": "disk",
240                  "guid": "1527839927278881561",
241                  "path": "/dev/disk/by-partuuid/97cd98fb-8fb8-4ac4-bc84-bd8950a7ace7",
242                  "state": "HEALTHY",
243                  "rep_dev_size": "3.64T",
244                  "phys_space": "3.64T",
245                  "read_errors": "0",
246                  "write_errors": "0",
247                  "checksum_errors": "0",
248                  "vendor": "ATA",
249                  "model": "WDC WD40EFZX-68AWUN0",
250                  "size": "3.6T"
251                },
252                "e9ddba5f-f948-4734-a472-cb8aa5f0ff65": {
253                  "name": "e9ddba5f-f948-4734-a472-cb8aa5f0ff65",
254                  "vdev_type": "disk",
255                  "guid": "6982750226085199860",
256                  "path": "/dev/disk/by-partuuid/e9ddba5f-f948-4734-a472-cb8aa5f0ff65",
257                  "state": "HEALTHY",
258                  "rep_dev_size": "3.64T",
259                  "phys_space": "3.64T",
260                  "read_errors": "0",
261                  "write_errors": "0",
262                  "checksum_errors": "0",
263                  "vendor": "ATA",
264                  "model": "WDC WD40EFZX-68AWUN0",
265                  "size": "3.6T"
266                }
267              }
268            }
269          }
270        }
271      },
272      "dedup": {
273        "mirror-2": {
274          "name": "mirror-2",
275          "vdev_type": "mirror",
276          "guid": "2227766268377771003",
277          "state": "HEALTHY",
278          "alloc_space": "89.1M",
279          "total_space": "3.62T",
280          "def_space": "3.62T",
281          "rep_dev_size": "3.62T",
282          "read_errors": "0",
283          "write_errors": "0",
284          "checksum_errors": "0",
285          "vdevs": {
286            "db017360-d8e9-4163-961b-144ca75293a3": {
287              "name": "db017360-d8e9-4163-961b-144ca75293a3",
288              "vdev_type": "disk",
289              "guid": "17880913061695450307",
290              "path": "/dev/disk/by-partuuid/db017360-d8e9-4163-961b-144ca75293a3",
291              "state": "HEALTHY",
292              "rep_dev_size": "3.63T",
293              "phys_space": "3.64T",
294              "read_errors": "0",
295              "write_errors": "0",
296              "checksum_errors": "0",
297              "vendor": "ATA",
298              "model": "WDC WD40EFZX-68AWUN0",
299              "size": "3.6T"
300            },
301            "952c3baf-b08a-4a8c-b7fa-33a07af5fe6f": {
302              "name": "952c3baf-b08a-4a8c-b7fa-33a07af5fe6f",
303              "vdev_type": "disk",
304              "guid": "10276374011610020557",
305              "path": "/dev/disk/by-partuuid/952c3baf-b08a-4a8c-b7fa-33a07af5fe6f",
306              "state": "HEALTHY",
307              "rep_dev_size": "3.63T",
308              "phys_space": "3.64T",
309              "read_errors": "0",
310              "write_errors": "0",
311              "checksum_errors": "0",
312              "vendor": "ATA",
313              "model": "WDC WD40EFZX-68AWUN0",
314              "size": "3.6T"
315            }
316          }
317        }
318      },
319      "special": {
320        "25d418f8-92bd-4327-b59f-7ef5d5f50d81": {
321          "name": "25d418f8-92bd-4327-b59f-7ef5d5f50d81",
322          "vdev_type": "disk",
323          "guid": "3935742873387713123",
324          "path": "/dev/disk/by-partuuid/25d418f8-92bd-4327-b59f-7ef5d5f50d81",
325          "state": "HEALTHY",
326          "alloc_space": "37.4M",
327          "total_space": "444G",
328          "def_space": "444G",
329          "rep_dev_size": "444G",
330          "phys_space": "447G",
331          "read_errors": "0",
332          "write_errors": "0",
333          "checksum_errors": "0",
334          "vendor": "ATA",
335          "model": "Micron_5300_MTFDDAK480TDS",
336          "size": "447.1G"
337        }
338      },
339      "error_count": "0"
340    }
341  }
342}
343.Ed
344.
345.Sh SEE ALSO
346.Xr zpool-events 8 ,
347.Xr zpool-history 8 ,
348.Xr zpool-iostat 8 ,
349.Xr zpool-list 8 ,
350.Xr zpool-resilver 8 ,
351.Xr zpool-scrub 8 ,
352.Xr zpool-wait 8
353