xref: /freebsd/sys/contrib/openzfs/man/man8/zpool-status.8 (revision 61145dc2b94f12f6a47344fb9aac702321880e43)
1.\" SPDX-License-Identifier: CDDL-1.0
2.\"
3.\" CDDL HEADER START
4.\"
5.\" The contents of this file are subject to the terms of the
6.\" Common Development and Distribution License (the "License").
7.\" You may not use this file except in compliance with the License.
8.\"
9.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10.\" or https://opensource.org/licenses/CDDL-1.0.
11.\" See the License for the specific language governing permissions
12.\" and limitations under the License.
13.\"
14.\" When distributing Covered Code, include this CDDL HEADER in each
15.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16.\" If applicable, add the following below this CDDL HEADER, with the
17.\" fields enclosed by brackets "[]" replaced with your own identifying
18.\" information: Portions Copyright [yyyy] [name of copyright owner]
19.\"
20.\" CDDL HEADER END
21.\"
22.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
23.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
24.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
25.\" Copyright (c) 2017 Datto Inc.
26.\" Copyright (c) 2018 George Melikov. All Rights Reserved.
27.\" Copyright 2017 Nexenta Systems, Inc.
28.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
29.\"
30.Dd February 14, 2024
31.Dt ZPOOL-STATUS 8
32.Os
33.
34.Sh NAME
35.Nm zpool-status
36.Nd show detailed health status for ZFS storage pools
37.Sh SYNOPSIS
38.Nm zpool
39.Cm status
40.Op Fl dDegiLpPstvx
41.Op Fl T Sy u Ns | Ns Sy d
42.Op Fl c Op Ar SCRIPT1 Ns Oo , Ns Ar SCRIPT2 Oc Ns …
43.Oo Ar pool Oc Ns …
44.Op Ar interval Op Ar count
45.Op Fl j Op Ar --json-int, --json-flat-vdevs, --json-pool-key-guid
46.
47.Sh DESCRIPTION
48Displays the detailed health status for the given pools.
49If no
50.Ar pool
51is specified, then the status of each pool in the system is displayed.
52For more information on pool and device health, see the
53.Sx Device Failure and Recovery
54section of
55.Xr zpoolconcepts 7 .
56.Pp
57If a scrub or resilver is in progress, this command reports the percentage done
58and the estimated time to completion.
59Both of these are only approximate, because the amount of data in the pool and
60the other workloads on the system can change.
61.Bl -tag -width Ds
62.It Fl -power
63Display vdev enclosure slot power status (on or off).
64.It Fl c Op Ar SCRIPT1 Ns Oo , Ns Ar SCRIPT2 Oc Ns …
65Run a script (or scripts) on each vdev and include the output as a new column
66in the
67.Nm zpool Cm status
68output.
69See the
70.Fl c
71option of
72.Nm zpool Cm iostat
73for complete details.
74.It Fl j , -json Op Ar --json-int, --json-flat-vdevs, --json-pool-key-guid
75Display the status for ZFS pools in JSON format.
76Specify
77.Sy --json-int
78to display numbers in integer format instead of strings.
79Specify
80.Sy --json-flat-vdevs
81to display vdevs in flat hierarchy instead of nested vdev objects.
82Specify
83.Sy --json-pool-key-guid
84to set pool GUID as key for pool objects instead of pool names.
85.It Fl d
86Display the number of Direct I/O read/write checksum verify errors that have
87occurred on a top-level VDEV.
88See
89.Sx zfs_vdev_direct_write_verify
90in
91.Xr zfs 4
92for details about the conditions that can cause Direct I/O write checksum
93verify failures to occur.
94Direct I/O reads checksum verify errors can also occur if the contents of the
95buffer are being manipulated after the I/O has been issued and is in flight.
96In the case of Direct I/O read checksum verify errors, the I/O will be reissued
97through the ARC.
98.It Fl D
99Display a histogram of deduplication statistics, showing the allocated
100.Pq physically present on disk
101and referenced
102.Pq logically referenced in the pool
103block counts and sizes by reference count.
104If repeated, (-DD), also shows statistics on how much of the DDT is resident
105in the ARC.
106.It Fl e
107Only show unhealthy vdevs (not-ONLINE or with errors).
108.It Fl g
109Display vdev GUIDs instead of the normal device names
110These GUIDs can be used in place of device names for the zpool
111detach/offline/remove/replace commands.
112.It Fl i
113Display vdev initialization status.
114.It Fl L
115Display real paths for vdevs resolving all symbolic links.
116This can be used to look up the current block device name regardless of the
117.Pa /dev/disk/
118path used to open it.
119.It Fl p
120Display numbers in parsable (exact) values.
121.It Fl P
122Display full paths for vdevs instead of only the last component of
123the path.
124This can be used in conjunction with the
125.Fl L
126flag.
127.It Fl s
128Display the number of leaf vdev slow I/O operations.
129This is the number of I/O operations that didn't complete in
130.Sy zio_slow_io_ms
131milliseconds
132.Pq Sy 30000 No by default .
133This does not necessarily mean the I/O operations failed to complete, just took
134an
135unreasonably long amount of time.
136This may indicate a problem with the underlying storage.
137.It Fl t
138Display vdev TRIM status.
139.It Fl T Sy u Ns | Ns Sy d
140Display a time stamp.
141Specify
142.Sy u
143for a printed representation of the internal representation of time.
144See
145.Xr time 1 .
146Specify
147.Sy d
148for standard date format.
149See
150.Xr date 1 .
151.It Fl v
152Displays verbose data error information, printing out a complete list of all
153data errors since the last complete pool scrub.
154If the head_errlog feature is enabled and files containing errors have been
155removed then the respective filenames will not be reported in subsequent runs
156of this command.
157.It Fl x
158Only display status for pools that are exhibiting errors or are otherwise
159unavailable.
160Warnings about pools not using the latest on-disk format will not be included.
161.El
162.
163.Sh EXAMPLES
164.\" These are, respectively, examples 16 from zpool.8
165.\" Make sure to update them bidirectionally
166.Ss Example 1 : No Adding output columns
167Additional columns can be added to the
168.Nm zpool Cm status No and Nm zpool Cm iostat No output with Fl c .
169.Bd -literal -compact -offset Ds
170.No # Nm zpool Cm status Fl c Pa vendor , Ns Pa model , Ns Pa size
171   NAME     STATE  READ WRITE CKSUM vendor  model        size
172   tank     ONLINE 0    0     0
173   mirror-0 ONLINE 0    0     0
174   U1       ONLINE 0    0     0     SEAGATE ST8000NM0075 7.3T
175   U10      ONLINE 0    0     0     SEAGATE ST8000NM0075 7.3T
176   U11      ONLINE 0    0     0     SEAGATE ST8000NM0075 7.3T
177   U12      ONLINE 0    0     0     SEAGATE ST8000NM0075 7.3T
178   U13      ONLINE 0    0     0     SEAGATE ST8000NM0075 7.3T
179   U14      ONLINE 0    0     0     SEAGATE ST8000NM0075 7.3T
180
181.No # Nm zpool Cm iostat Fl vc Pa size
182              capacity     operations     bandwidth
183pool        alloc   free   read  write   read  write  size
184----------  -----  -----  -----  -----  -----  -----  ----
185rpool       14.6G  54.9G      4     55   250K  2.69M
186  sda1      14.6G  54.9G      4     55   250K  2.69M   70G
187----------  -----  -----  -----  -----  -----  -----  ----
188.Ed
189.
190.Ss Example 2 : No Display the status output in JSON format
191.Nm zpool Cm status No can output in JSON format if
192.Fl j
193is specified.
194.Fl c
195can be used to run a script on each VDEV.
196.Bd -literal -compact -offset Ds
197.No # Nm zpool Cm status Fl j Fl c Pa vendor , Ns Pa model , Ns Pa size | Nm jq
198{
199  "output_version": {
200    "command": "zpool status",
201    "vers_major": 0,
202    "vers_minor": 1
203  },
204  "pools": {
205    "tank": {
206      "name": "tank",
207      "state": "ONLINE",
208      "guid": "3920273586464696295",
209      "txg": "16597",
210      "spa_version": "5000",
211      "zpl_version": "5",
212      "status": "OK",
213      "vdevs": {
214        "tank": {
215          "name": "tank",
216          "alloc_space": "62.6G",
217          "total_space": "15.0T",
218          "def_space": "11.3T",
219          "read_errors": "0",
220          "write_errors": "0",
221          "checksum_errors": "0",
222          "vdevs": {
223            "raidz1-0": {
224              "name": "raidz1-0",
225              "vdev_type": "raidz",
226              "guid": "763132626387621737",
227              "state": "HEALTHY",
228              "alloc_space": "62.5G",
229              "total_space": "10.9T",
230              "def_space": "7.26T",
231              "rep_dev_size": "10.9T",
232              "read_errors": "0",
233              "write_errors": "0",
234              "checksum_errors": "0",
235              "vdevs": {
236                "ca1eb824-c371-491d-ac13-37637e35c683": {
237                  "name": "ca1eb824-c371-491d-ac13-37637e35c683",
238                  "vdev_type": "disk",
239                  "guid": "12841765308123764671",
240                  "path": "/dev/disk/by-partuuid/ca1eb824-c371-491d-ac13-37637e35c683",
241                  "state": "HEALTHY",
242                  "rep_dev_size": "3.64T",
243                  "phys_space": "3.64T",
244                  "read_errors": "0",
245                  "write_errors": "0",
246                  "checksum_errors": "0",
247                  "vendor": "ATA",
248                  "model": "WDC WD40EFZX-68AWUN0",
249                  "size": "3.6T"
250                },
251                "97cd98fb-8fb8-4ac4-bc84-bd8950a7ace7": {
252                  "name": "97cd98fb-8fb8-4ac4-bc84-bd8950a7ace7",
253                  "vdev_type": "disk",
254                  "guid": "1527839927278881561",
255                  "path": "/dev/disk/by-partuuid/97cd98fb-8fb8-4ac4-bc84-bd8950a7ace7",
256                  "state": "HEALTHY",
257                  "rep_dev_size": "3.64T",
258                  "phys_space": "3.64T",
259                  "read_errors": "0",
260                  "write_errors": "0",
261                  "checksum_errors": "0",
262                  "vendor": "ATA",
263                  "model": "WDC WD40EFZX-68AWUN0",
264                  "size": "3.6T"
265                },
266                "e9ddba5f-f948-4734-a472-cb8aa5f0ff65": {
267                  "name": "e9ddba5f-f948-4734-a472-cb8aa5f0ff65",
268                  "vdev_type": "disk",
269                  "guid": "6982750226085199860",
270                  "path": "/dev/disk/by-partuuid/e9ddba5f-f948-4734-a472-cb8aa5f0ff65",
271                  "state": "HEALTHY",
272                  "rep_dev_size": "3.64T",
273                  "phys_space": "3.64T",
274                  "read_errors": "0",
275                  "write_errors": "0",
276                  "checksum_errors": "0",
277                  "vendor": "ATA",
278                  "model": "WDC WD40EFZX-68AWUN0",
279                  "size": "3.6T"
280                }
281              }
282            }
283          }
284        }
285      },
286      "dedup": {
287        "mirror-2": {
288          "name": "mirror-2",
289          "vdev_type": "mirror",
290          "guid": "2227766268377771003",
291          "state": "HEALTHY",
292          "alloc_space": "89.1M",
293          "total_space": "3.62T",
294          "def_space": "3.62T",
295          "rep_dev_size": "3.62T",
296          "read_errors": "0",
297          "write_errors": "0",
298          "checksum_errors": "0",
299          "vdevs": {
300            "db017360-d8e9-4163-961b-144ca75293a3": {
301              "name": "db017360-d8e9-4163-961b-144ca75293a3",
302              "vdev_type": "disk",
303              "guid": "17880913061695450307",
304              "path": "/dev/disk/by-partuuid/db017360-d8e9-4163-961b-144ca75293a3",
305              "state": "HEALTHY",
306              "rep_dev_size": "3.63T",
307              "phys_space": "3.64T",
308              "read_errors": "0",
309              "write_errors": "0",
310              "checksum_errors": "0",
311              "vendor": "ATA",
312              "model": "WDC WD40EFZX-68AWUN0",
313              "size": "3.6T"
314            },
315            "952c3baf-b08a-4a8c-b7fa-33a07af5fe6f": {
316              "name": "952c3baf-b08a-4a8c-b7fa-33a07af5fe6f",
317              "vdev_type": "disk",
318              "guid": "10276374011610020557",
319              "path": "/dev/disk/by-partuuid/952c3baf-b08a-4a8c-b7fa-33a07af5fe6f",
320              "state": "HEALTHY",
321              "rep_dev_size": "3.63T",
322              "phys_space": "3.64T",
323              "read_errors": "0",
324              "write_errors": "0",
325              "checksum_errors": "0",
326              "vendor": "ATA",
327              "model": "WDC WD40EFZX-68AWUN0",
328              "size": "3.6T"
329            }
330          }
331        }
332      },
333      "special": {
334        "25d418f8-92bd-4327-b59f-7ef5d5f50d81": {
335          "name": "25d418f8-92bd-4327-b59f-7ef5d5f50d81",
336          "vdev_type": "disk",
337          "guid": "3935742873387713123",
338          "path": "/dev/disk/by-partuuid/25d418f8-92bd-4327-b59f-7ef5d5f50d81",
339          "state": "HEALTHY",
340          "alloc_space": "37.4M",
341          "total_space": "444G",
342          "def_space": "444G",
343          "rep_dev_size": "444G",
344          "phys_space": "447G",
345          "read_errors": "0",
346          "write_errors": "0",
347          "checksum_errors": "0",
348          "vendor": "ATA",
349          "model": "Micron_5300_MTFDDAK480TDS",
350          "size": "447.1G"
351        }
352      },
353      "error_count": "0"
354    }
355  }
356}
357.Ed
358.
359.Sh SEE ALSO
360.Xr zpool-events 8 ,
361.Xr zpool-history 8 ,
362.Xr zpool-iostat 8 ,
363.Xr zpool-list 8 ,
364.Xr zpool-resilver 8 ,
365.Xr zpool-scrub 8 ,
366.Xr zpool-wait 8
367