1.\" 2.\" CDDL HEADER START 3.\" 4.\" The contents of this file are subject to the terms of the 5.\" Common Development and Distribution License (the "License"). 6.\" You may not use this file except in compliance with the License. 7.\" 8.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9.\" or https://opensource.org/licenses/CDDL-1.0. 10.\" See the License for the specific language governing permissions 11.\" and limitations under the License. 12.\" 13.\" When distributing Covered Code, include this CDDL HEADER in each 14.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15.\" If applicable, add the following below this CDDL HEADER, with the 16.\" fields enclosed by brackets "[]" replaced with your own identifying 17.\" information: Portions Copyright [yyyy] [name of copyright owner] 18.\" 19.\" CDDL HEADER END 20.\" 21.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved. 22.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved. 23.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved. 24.\" Copyright (c) 2017 Datto Inc. 25.\" Copyright (c) 2018 George Melikov. All Rights Reserved. 26.\" Copyright 2017 Nexenta Systems, Inc. 27.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved. 28.\" 29.Dd February 14, 2024 30.Dt ZPOOL-STATUS 8 31.Os 32. 33.Sh NAME 34.Nm zpool-status 35.Nd show detailed health status for ZFS storage pools 36.Sh SYNOPSIS 37.Nm zpool 38.Cm status 39.Op Fl dDegiLpPstvx 40.Op Fl T Sy u Ns | Ns Sy d 41.Op Fl c Op Ar SCRIPT1 Ns Oo , Ns Ar SCRIPT2 Oc Ns … 42.Oo Ar pool Oc Ns … 43.Op Ar interval Op Ar count 44.Op Fl j Op Ar --json-int, --json-flat-vdevs, --json-pool-key-guid 45. 46.Sh DESCRIPTION 47Displays the detailed health status for the given pools. 48If no 49.Ar pool 50is specified, then the status of each pool in the system is displayed. 51For more information on pool and device health, see the 52.Sx Device Failure and Recovery 53section of 54.Xr zpoolconcepts 7 . 55.Pp 56If a scrub or resilver is in progress, this command reports the percentage done 57and the estimated time to completion. 58Both of these are only approximate, because the amount of data in the pool and 59the other workloads on the system can change. 60.Bl -tag -width Ds 61.It Fl -power 62Display vdev enclosure slot power status (on or off). 63.It Fl c Op Ar SCRIPT1 Ns Oo , Ns Ar SCRIPT2 Oc Ns … 64Run a script (or scripts) on each vdev and include the output as a new column 65in the 66.Nm zpool Cm status 67output. 68See the 69.Fl c 70option of 71.Nm zpool Cm iostat 72for complete details. 73.It Fl j Op Ar --json-int, --json-flat-vdevs, --json-pool-key-guid 74Display the status for ZFS pools in JSON format. 75Specify 76.Sy --json-int 77to display numbers in integer format instead of strings. 78Specify 79.Sy --json-flat-vdevs 80to display vdevs in flat hierarchy instead of nested vdev objects. 81Specify 82.Sy --json-pool-key-guid 83to set pool GUID as key for pool objects instead of pool names. 84.It Fl d 85Display the number of Direct I/O write checksum verify errors that have occured 86on a top-level VDEV. 87See 88.Sx zfs_vdev_direct_write_verify 89in 90.Xr zfs 4 91for details about the conditions that can cause Direct I/O write checksum 92verify failures to occur. 93.It Fl D 94Display a histogram of deduplication statistics, showing the allocated 95.Pq physically present on disk 96and referenced 97.Pq logically referenced in the pool 98block counts and sizes by reference count. 99If repeated, (-DD), also shows statistics on how much of the DDT is resident 100in the ARC. 101.It Fl e 102Only show unhealthy vdevs (not-ONLINE or with errors). 103.It Fl g 104Display vdev GUIDs instead of the normal device names 105These GUIDs can be used in place of device names for the zpool 106detach/offline/remove/replace commands. 107.It Fl i 108Display vdev initialization status. 109.It Fl L 110Display real paths for vdevs resolving all symbolic links. 111This can be used to look up the current block device name regardless of the 112.Pa /dev/disk/ 113path used to open it. 114.It Fl p 115Display numbers in parsable (exact) values. 116.It Fl P 117Display full paths for vdevs instead of only the last component of 118the path. 119This can be used in conjunction with the 120.Fl L 121flag. 122.It Fl s 123Display the number of leaf vdev slow I/O operations. 124This is the number of I/O operations that didn't complete in 125.Sy zio_slow_io_ms 126milliseconds 127.Pq Sy 30000 No by default . 128This does not necessarily mean the I/O operations failed to complete, just took 129an 130unreasonably long amount of time. 131This may indicate a problem with the underlying storage. 132.It Fl t 133Display vdev TRIM status. 134.It Fl T Sy u Ns | Ns Sy d 135Display a time stamp. 136Specify 137.Sy u 138for a printed representation of the internal representation of time. 139See 140.Xr time 1 . 141Specify 142.Sy d 143for standard date format. 144See 145.Xr date 1 . 146.It Fl v 147Displays verbose data error information, printing out a complete list of all 148data errors since the last complete pool scrub. 149If the head_errlog feature is enabled and files containing errors have been 150removed then the respective filenames will not be reported in subsequent runs 151of this command. 152.It Fl x 153Only display status for pools that are exhibiting errors or are otherwise 154unavailable. 155Warnings about pools not using the latest on-disk format will not be included. 156.El 157. 158.Sh EXAMPLES 159.\" These are, respectively, examples 16 from zpool.8 160.\" Make sure to update them bidirectionally 161.Ss Example 1 : No Adding output columns 162Additional columns can be added to the 163.Nm zpool Cm status No and Nm zpool Cm iostat No output with Fl c . 164.Bd -literal -compact -offset Ds 165.No # Nm zpool Cm status Fl c Pa vendor , Ns Pa model , Ns Pa size 166 NAME STATE READ WRITE CKSUM vendor model size 167 tank ONLINE 0 0 0 168 mirror-0 ONLINE 0 0 0 169 U1 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T 170 U10 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T 171 U11 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T 172 U12 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T 173 U13 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T 174 U14 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T 175 176.No # Nm zpool Cm iostat Fl vc Pa size 177 capacity operations bandwidth 178pool alloc free read write read write size 179---------- ----- ----- ----- ----- ----- ----- ---- 180rpool 14.6G 54.9G 4 55 250K 2.69M 181 sda1 14.6G 54.9G 4 55 250K 2.69M 70G 182---------- ----- ----- ----- ----- ----- ----- ---- 183.Ed 184. 185.Ss Example 2 : No Display the status output in JSON format 186.Nm zpool Cm status No can output in JSON format if 187.Fl j 188is specified. 189.Fl c 190can be used to run a script on each VDEV. 191.Bd -literal -compact -offset Ds 192.No # Nm zpool Cm status Fl j Fl c Pa vendor , Ns Pa model , Ns Pa size | Nm jq 193{ 194 "output_version": { 195 "command": "zpool status", 196 "vers_major": 0, 197 "vers_minor": 1 198 }, 199 "pools": { 200 "tank": { 201 "name": "tank", 202 "state": "ONLINE", 203 "guid": "3920273586464696295", 204 "txg": "16597", 205 "spa_version": "5000", 206 "zpl_version": "5", 207 "status": "OK", 208 "vdevs": { 209 "tank": { 210 "name": "tank", 211 "alloc_space": "62.6G", 212 "total_space": "15.0T", 213 "def_space": "11.3T", 214 "read_errors": "0", 215 "write_errors": "0", 216 "checksum_errors": "0", 217 "vdevs": { 218 "raidz1-0": { 219 "name": "raidz1-0", 220 "vdev_type": "raidz", 221 "guid": "763132626387621737", 222 "state": "HEALTHY", 223 "alloc_space": "62.5G", 224 "total_space": "10.9T", 225 "def_space": "7.26T", 226 "rep_dev_size": "10.9T", 227 "read_errors": "0", 228 "write_errors": "0", 229 "checksum_errors": "0", 230 "vdevs": { 231 "ca1eb824-c371-491d-ac13-37637e35c683": { 232 "name": "ca1eb824-c371-491d-ac13-37637e35c683", 233 "vdev_type": "disk", 234 "guid": "12841765308123764671", 235 "path": "/dev/disk/by-partuuid/ca1eb824-c371-491d-ac13-37637e35c683", 236 "state": "HEALTHY", 237 "rep_dev_size": "3.64T", 238 "phys_space": "3.64T", 239 "read_errors": "0", 240 "write_errors": "0", 241 "checksum_errors": "0", 242 "vendor": "ATA", 243 "model": "WDC WD40EFZX-68AWUN0", 244 "size": "3.6T" 245 }, 246 "97cd98fb-8fb8-4ac4-bc84-bd8950a7ace7": { 247 "name": "97cd98fb-8fb8-4ac4-bc84-bd8950a7ace7", 248 "vdev_type": "disk", 249 "guid": "1527839927278881561", 250 "path": "/dev/disk/by-partuuid/97cd98fb-8fb8-4ac4-bc84-bd8950a7ace7", 251 "state": "HEALTHY", 252 "rep_dev_size": "3.64T", 253 "phys_space": "3.64T", 254 "read_errors": "0", 255 "write_errors": "0", 256 "checksum_errors": "0", 257 "vendor": "ATA", 258 "model": "WDC WD40EFZX-68AWUN0", 259 "size": "3.6T" 260 }, 261 "e9ddba5f-f948-4734-a472-cb8aa5f0ff65": { 262 "name": "e9ddba5f-f948-4734-a472-cb8aa5f0ff65", 263 "vdev_type": "disk", 264 "guid": "6982750226085199860", 265 "path": "/dev/disk/by-partuuid/e9ddba5f-f948-4734-a472-cb8aa5f0ff65", 266 "state": "HEALTHY", 267 "rep_dev_size": "3.64T", 268 "phys_space": "3.64T", 269 "read_errors": "0", 270 "write_errors": "0", 271 "checksum_errors": "0", 272 "vendor": "ATA", 273 "model": "WDC WD40EFZX-68AWUN0", 274 "size": "3.6T" 275 } 276 } 277 } 278 } 279 } 280 }, 281 "dedup": { 282 "mirror-2": { 283 "name": "mirror-2", 284 "vdev_type": "mirror", 285 "guid": "2227766268377771003", 286 "state": "HEALTHY", 287 "alloc_space": "89.1M", 288 "total_space": "3.62T", 289 "def_space": "3.62T", 290 "rep_dev_size": "3.62T", 291 "read_errors": "0", 292 "write_errors": "0", 293 "checksum_errors": "0", 294 "vdevs": { 295 "db017360-d8e9-4163-961b-144ca75293a3": { 296 "name": "db017360-d8e9-4163-961b-144ca75293a3", 297 "vdev_type": "disk", 298 "guid": "17880913061695450307", 299 "path": "/dev/disk/by-partuuid/db017360-d8e9-4163-961b-144ca75293a3", 300 "state": "HEALTHY", 301 "rep_dev_size": "3.63T", 302 "phys_space": "3.64T", 303 "read_errors": "0", 304 "write_errors": "0", 305 "checksum_errors": "0", 306 "vendor": "ATA", 307 "model": "WDC WD40EFZX-68AWUN0", 308 "size": "3.6T" 309 }, 310 "952c3baf-b08a-4a8c-b7fa-33a07af5fe6f": { 311 "name": "952c3baf-b08a-4a8c-b7fa-33a07af5fe6f", 312 "vdev_type": "disk", 313 "guid": "10276374011610020557", 314 "path": "/dev/disk/by-partuuid/952c3baf-b08a-4a8c-b7fa-33a07af5fe6f", 315 "state": "HEALTHY", 316 "rep_dev_size": "3.63T", 317 "phys_space": "3.64T", 318 "read_errors": "0", 319 "write_errors": "0", 320 "checksum_errors": "0", 321 "vendor": "ATA", 322 "model": "WDC WD40EFZX-68AWUN0", 323 "size": "3.6T" 324 } 325 } 326 } 327 }, 328 "special": { 329 "25d418f8-92bd-4327-b59f-7ef5d5f50d81": { 330 "name": "25d418f8-92bd-4327-b59f-7ef5d5f50d81", 331 "vdev_type": "disk", 332 "guid": "3935742873387713123", 333 "path": "/dev/disk/by-partuuid/25d418f8-92bd-4327-b59f-7ef5d5f50d81", 334 "state": "HEALTHY", 335 "alloc_space": "37.4M", 336 "total_space": "444G", 337 "def_space": "444G", 338 "rep_dev_size": "444G", 339 "phys_space": "447G", 340 "read_errors": "0", 341 "write_errors": "0", 342 "checksum_errors": "0", 343 "vendor": "ATA", 344 "model": "Micron_5300_MTFDDAK480TDS", 345 "size": "447.1G" 346 } 347 }, 348 "error_count": "0" 349 } 350 } 351} 352.Ed 353. 354.Sh SEE ALSO 355.Xr zpool-events 8 , 356.Xr zpool-history 8 , 357.Xr zpool-iostat 8 , 358.Xr zpool-list 8 , 359.Xr zpool-resilver 8 , 360.Xr zpool-scrub 8 , 361.Xr zpool-wait 8 362