xref: /freebsd/sys/contrib/openzfs/man/man8/zpool-status.8 (revision b670c9bafc0e31c7609969bf374b2e80bdc00211)
1.\" SPDX-License-Identifier: CDDL-1.0
2.\"
3.\" CDDL HEADER START
4.\"
5.\" The contents of this file are subject to the terms of the
6.\" Common Development and Distribution License (the "License").
7.\" You may not use this file except in compliance with the License.
8.\"
9.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10.\" or https://opensource.org/licenses/CDDL-1.0.
11.\" See the License for the specific language governing permissions
12.\" and limitations under the License.
13.\"
14.\" When distributing Covered Code, include this CDDL HEADER in each
15.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16.\" If applicable, add the following below this CDDL HEADER, with the
17.\" fields enclosed by brackets "[]" replaced with your own identifying
18.\" information: Portions Copyright [yyyy] [name of copyright owner]
19.\"
20.\" CDDL HEADER END
21.\"
22.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
23.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
24.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
25.\" Copyright (c) 2017 Datto Inc.
26.\" Copyright (c) 2018 George Melikov. All Rights Reserved.
27.\" Copyright 2017 Nexenta Systems, Inc.
28.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
29.\"
30.Dd February 14, 2024
31.Dt ZPOOL-STATUS 8
32.Os
33.
34.Sh NAME
35.Nm zpool-status
36.Nd show detailed health status for ZFS storage pools
37.Sh SYNOPSIS
38.Nm zpool
39.Cm status
40.Op Fl DdegiLPpstvx
41.Op Fl c Ar script1 Ns Oo , Ns Ar script2 Ns ,… Oc
42.Oo Fl j|--json
43.Oo Ns Fl -json-flat-vdevs Oc
44.Oo Ns Fl -json-int Oc
45.Oo Ns Fl -json-pool-key-guid Oc
46.Oc
47.Op Fl T Ar d|u
48.Op Fl -power
49.Op Ar pool
50.Op Ar interval Op Ar count
51.
52.Sh DESCRIPTION
53Displays the detailed health status for the given pools.
54If no
55.Ar pool
56is specified, then the status of each pool in the system is displayed.
57For more information on pool and device health, see the
58.Sx Device Failure and Recovery
59section of
60.Xr zpoolconcepts 7 .
61.Pp
62If a scrub or resilver is in progress, this command reports the percentage done
63and the estimated time to completion.
64Both of these are only approximate, because the amount of data in the pool and
65the other workloads on the system can change.
66.Bl -tag -width Ds
67.It Fl c Ar script1 Ns Oo , Ns Ar script2 Ns ,… Oc
68Run a script (or scripts) on each vdev and include the output as a new column
69in the
70.Nm zpool Cm status
71output.
72See the
73.Fl c
74option of
75.Nm zpool Cm iostat
76for complete details.
77.It Fl D
78Display a histogram of deduplication statistics, showing the allocated
79.Pq physically present on disk
80and referenced
81.Pq logically referenced in the pool
82block counts and sizes by reference count.
83If repeated, (-DD), also shows statistics on how much of the DDT is resident
84in the ARC.
85.It Fl d
86Display the number of Direct I/O read/write checksum verify errors that have
87occurred on a top-level VDEV.
88See
89.Sx zfs_vdev_direct_write_verify
90in
91.Xr zfs 4
92for details about the conditions that can cause Direct I/O write checksum
93verify failures to occur.
94Direct I/O reads checksum verify errors can also occur if the contents of the
95buffer are being manipulated after the I/O has been issued and is in flight.
96In the case of Direct I/O read checksum verify errors, the I/O will be reissued
97through the ARC.
98.It Fl e
99Only show unhealthy vdevs (not-ONLINE or with errors).
100.It Fl g
101Display vdev GUIDs instead of the normal device names
102These GUIDs can be used in place of device names for the zpool
103detach/offline/remove/replace commands.
104.It Fl i
105Display vdev initialization status.
106.It Fl j , -json Oo Ns Fl -json-flat-vdevs Oc Oo Ns Fl -json-int Oc \
107Oo Ns Fl -json-pool-key-guid Oc
108Display the status for ZFS pools in JSON format.
109Specify
110.Sy --json-flat-vdevs
111to display vdevs in flat hierarchy instead of nested vdev objects.
112Specify
113.Sy --json-int
114to display numbers in integer format instead of strings.
115Specify
116.Sy --json-pool-key-guid
117to set pool GUID as key for pool objects instead of pool names.
118.It Fl L
119Display real paths for vdevs resolving all symbolic links.
120This can be used to look up the current block device name regardless of the
121.Pa /dev/disk/
122path used to open it.
123.It Fl P
124Display full paths for vdevs instead of only the last component of
125the path.
126This can be used in conjunction with the
127.Fl L
128flag.
129.It Fl p
130Display numbers in parsable (exact) values.
131.It Fl -power
132Display vdev enclosure slot power status (on or off).
133.It Fl s
134Display the number of leaf vdev slow I/O operations.
135This is the number of I/O operations that didn't complete in
136.Sy zio_slow_io_ms
137milliseconds
138.Pq Sy 30000 No by default .
139This does not necessarily mean the I/O operations failed to complete, just took
140an
141unreasonably long amount of time.
142This may indicate a problem with the underlying storage.
143.It Fl T Sy d Ns | Ns Sy u
144Display a time stamp.
145Specify
146.Sy d
147for standard date format.
148See
149.Xr date 1 .
150Specify
151.Sy u
152for a printed representation of the internal representation of time.
153See
154.Xr time 1 .
155.It Fl t
156Display vdev TRIM status.
157.It Fl v
158Displays verbose data error information, printing out a complete list of all
159data errors since the last complete pool scrub.
160If the head_errlog feature is enabled and files containing errors have been
161removed then the respective filenames will not be reported in subsequent runs
162of this command.
163.It Fl x
164Only display status for pools that are exhibiting errors or are otherwise
165unavailable.
166Warnings about pools not using the latest on-disk format will not be included.
167.El
168.
169.Sh EXAMPLES
170.\" These are, respectively, examples 16 from zpool.8
171.\" Make sure to update them bidirectionally
172.Ss Example 1 : No Adding output columns
173Additional columns can be added to the
174.Nm zpool Cm status No and Nm zpool Cm iostat No output with Fl c .
175.Bd -literal -compact -offset Ds
176.No # Nm zpool Cm status Fl c Pa vendor , Ns Pa model , Ns Pa size
177   NAME     STATE  READ WRITE CKSUM vendor  model        size
178   tank     ONLINE 0    0     0
179   mirror-0 ONLINE 0    0     0
180   U1       ONLINE 0    0     0     SEAGATE ST8000NM0075 7.3T
181   U10      ONLINE 0    0     0     SEAGATE ST8000NM0075 7.3T
182   U11      ONLINE 0    0     0     SEAGATE ST8000NM0075 7.3T
183   U12      ONLINE 0    0     0     SEAGATE ST8000NM0075 7.3T
184   U13      ONLINE 0    0     0     SEAGATE ST8000NM0075 7.3T
185   U14      ONLINE 0    0     0     SEAGATE ST8000NM0075 7.3T
186
187.No # Nm zpool Cm iostat Fl vc Pa size
188              capacity     operations     bandwidth
189pool        alloc   free   read  write   read  write  size
190----------  -----  -----  -----  -----  -----  -----  ----
191rpool       14.6G  54.9G      4     55   250K  2.69M
192  sda1      14.6G  54.9G      4     55   250K  2.69M   70G
193----------  -----  -----  -----  -----  -----  -----  ----
194.Ed
195.
196.Ss Example 2 : No Display the status output in JSON format
197.Nm zpool Cm status No can output in JSON format if
198.Fl j
199is specified.
200.Fl c
201can be used to run a script on each VDEV.
202.Bd -literal -compact -offset Ds
203.No # Nm zpool Cm status Fl j Fl c Pa vendor , Ns Pa model , Ns Pa size | Nm jq
204{
205  "output_version": {
206    "command": "zpool status",
207    "vers_major": 0,
208    "vers_minor": 1
209  },
210  "pools": {
211    "tank": {
212      "name": "tank",
213      "state": "ONLINE",
214      "guid": "3920273586464696295",
215      "txg": "16597",
216      "spa_version": "5000",
217      "zpl_version": "5",
218      "status": "OK",
219      "vdevs": {
220        "tank": {
221          "name": "tank",
222          "alloc_space": "62.6G",
223          "total_space": "15.0T",
224          "def_space": "11.3T",
225          "read_errors": "0",
226          "write_errors": "0",
227          "checksum_errors": "0",
228          "vdevs": {
229            "raidz1-0": {
230              "name": "raidz1-0",
231              "vdev_type": "raidz",
232              "guid": "763132626387621737",
233              "state": "HEALTHY",
234              "alloc_space": "62.5G",
235              "total_space": "10.9T",
236              "def_space": "7.26T",
237              "rep_dev_size": "10.9T",
238              "read_errors": "0",
239              "write_errors": "0",
240              "checksum_errors": "0",
241              "vdevs": {
242                "ca1eb824-c371-491d-ac13-37637e35c683": {
243                  "name": "ca1eb824-c371-491d-ac13-37637e35c683",
244                  "vdev_type": "disk",
245                  "guid": "12841765308123764671",
246                  "path": "/dev/disk/by-partuuid/ca1eb824-c371-491d-ac13-37637e35c683",
247                  "state": "HEALTHY",
248                  "rep_dev_size": "3.64T",
249                  "phys_space": "3.64T",
250                  "read_errors": "0",
251                  "write_errors": "0",
252                  "checksum_errors": "0",
253                  "vendor": "ATA",
254                  "model": "WDC WD40EFZX-68AWUN0",
255                  "size": "3.6T"
256                },
257                "97cd98fb-8fb8-4ac4-bc84-bd8950a7ace7": {
258                  "name": "97cd98fb-8fb8-4ac4-bc84-bd8950a7ace7",
259                  "vdev_type": "disk",
260                  "guid": "1527839927278881561",
261                  "path": "/dev/disk/by-partuuid/97cd98fb-8fb8-4ac4-bc84-bd8950a7ace7",
262                  "state": "HEALTHY",
263                  "rep_dev_size": "3.64T",
264                  "phys_space": "3.64T",
265                  "read_errors": "0",
266                  "write_errors": "0",
267                  "checksum_errors": "0",
268                  "vendor": "ATA",
269                  "model": "WDC WD40EFZX-68AWUN0",
270                  "size": "3.6T"
271                },
272                "e9ddba5f-f948-4734-a472-cb8aa5f0ff65": {
273                  "name": "e9ddba5f-f948-4734-a472-cb8aa5f0ff65",
274                  "vdev_type": "disk",
275                  "guid": "6982750226085199860",
276                  "path": "/dev/disk/by-partuuid/e9ddba5f-f948-4734-a472-cb8aa5f0ff65",
277                  "state": "HEALTHY",
278                  "rep_dev_size": "3.64T",
279                  "phys_space": "3.64T",
280                  "read_errors": "0",
281                  "write_errors": "0",
282                  "checksum_errors": "0",
283                  "vendor": "ATA",
284                  "model": "WDC WD40EFZX-68AWUN0",
285                  "size": "3.6T"
286                }
287              }
288            }
289          }
290        }
291      },
292      "dedup": {
293        "mirror-2": {
294          "name": "mirror-2",
295          "vdev_type": "mirror",
296          "guid": "2227766268377771003",
297          "state": "HEALTHY",
298          "alloc_space": "89.1M",
299          "total_space": "3.62T",
300          "def_space": "3.62T",
301          "rep_dev_size": "3.62T",
302          "read_errors": "0",
303          "write_errors": "0",
304          "checksum_errors": "0",
305          "vdevs": {
306            "db017360-d8e9-4163-961b-144ca75293a3": {
307              "name": "db017360-d8e9-4163-961b-144ca75293a3",
308              "vdev_type": "disk",
309              "guid": "17880913061695450307",
310              "path": "/dev/disk/by-partuuid/db017360-d8e9-4163-961b-144ca75293a3",
311              "state": "HEALTHY",
312              "rep_dev_size": "3.63T",
313              "phys_space": "3.64T",
314              "read_errors": "0",
315              "write_errors": "0",
316              "checksum_errors": "0",
317              "vendor": "ATA",
318              "model": "WDC WD40EFZX-68AWUN0",
319              "size": "3.6T"
320            },
321            "952c3baf-b08a-4a8c-b7fa-33a07af5fe6f": {
322              "name": "952c3baf-b08a-4a8c-b7fa-33a07af5fe6f",
323              "vdev_type": "disk",
324              "guid": "10276374011610020557",
325              "path": "/dev/disk/by-partuuid/952c3baf-b08a-4a8c-b7fa-33a07af5fe6f",
326              "state": "HEALTHY",
327              "rep_dev_size": "3.63T",
328              "phys_space": "3.64T",
329              "read_errors": "0",
330              "write_errors": "0",
331              "checksum_errors": "0",
332              "vendor": "ATA",
333              "model": "WDC WD40EFZX-68AWUN0",
334              "size": "3.6T"
335            }
336          }
337        }
338      },
339      "special": {
340        "25d418f8-92bd-4327-b59f-7ef5d5f50d81": {
341          "name": "25d418f8-92bd-4327-b59f-7ef5d5f50d81",
342          "vdev_type": "disk",
343          "guid": "3935742873387713123",
344          "path": "/dev/disk/by-partuuid/25d418f8-92bd-4327-b59f-7ef5d5f50d81",
345          "state": "HEALTHY",
346          "alloc_space": "37.4M",
347          "total_space": "444G",
348          "def_space": "444G",
349          "rep_dev_size": "444G",
350          "phys_space": "447G",
351          "read_errors": "0",
352          "write_errors": "0",
353          "checksum_errors": "0",
354          "vendor": "ATA",
355          "model": "Micron_5300_MTFDDAK480TDS",
356          "size": "447.1G"
357        }
358      },
359      "error_count": "0"
360    }
361  }
362}
363.Ed
364.
365.Sh SEE ALSO
366.Xr zpool-events 8 ,
367.Xr zpool-history 8 ,
368.Xr zpool-iostat 8 ,
369.Xr zpool-list 8 ,
370.Xr zpool-resilver 8 ,
371.Xr zpool-scrub 8 ,
372.Xr zpool-wait 8
373