1.\" 2.\" CDDL HEADER START 3.\" 4.\" The contents of this file are subject to the terms of the 5.\" Common Development and Distribution License (the "License"). 6.\" You may not use this file except in compliance with the License. 7.\" 8.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9.\" or https://opensource.org/licenses/CDDL-1.0. 10.\" See the License for the specific language governing permissions 11.\" and limitations under the License. 12.\" 13.\" When distributing Covered Code, include this CDDL HEADER in each 14.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15.\" If applicable, add the following below this CDDL HEADER, with the 16.\" fields enclosed by brackets "[]" replaced with your own identifying 17.\" information: Portions Copyright [yyyy] [name of copyright owner] 18.\" 19.\" CDDL HEADER END 20.\" 21.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved. 22.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved. 23.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved. 24.\" Copyright (c) 2017 Datto Inc. 25.\" Copyright (c) 2018 George Melikov. All Rights Reserved. 26.\" Copyright 2017 Nexenta Systems, Inc. 27.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved. 28.\" 29.Dd February 14, 2024 30.Dt ZPOOL-STATUS 8 31.Os 32. 33.Sh NAME 34.Nm zpool-status 35.Nd show detailed health status for ZFS storage pools 36.Sh SYNOPSIS 37.Nm zpool 38.Cm status 39.Op Fl dDegiLpPstvx 40.Op Fl T Sy u Ns | Ns Sy d 41.Op Fl c Op Ar SCRIPT1 Ns Oo , Ns Ar SCRIPT2 Oc Ns … 42.Oo Ar pool Oc Ns … 43.Op Ar interval Op Ar count 44.Op Fl j Op Ar --json-int, --json-flat-vdevs, --json-pool-key-guid 45. 46.Sh DESCRIPTION 47Displays the detailed health status for the given pools. 48If no 49.Ar pool 50is specified, then the status of each pool in the system is displayed. 51For more information on pool and device health, see the 52.Sx Device Failure and Recovery 53section of 54.Xr zpoolconcepts 7 . 55.Pp 56If a scrub or resilver is in progress, this command reports the percentage done 57and the estimated time to completion. 58Both of these are only approximate, because the amount of data in the pool and 59the other workloads on the system can change. 60.Bl -tag -width Ds 61.It Fl -power 62Display vdev enclosure slot power status (on or off). 63.It Fl c Op Ar SCRIPT1 Ns Oo , Ns Ar SCRIPT2 Oc Ns … 64Run a script (or scripts) on each vdev and include the output as a new column 65in the 66.Nm zpool Cm status 67output. 68See the 69.Fl c 70option of 71.Nm zpool Cm iostat 72for complete details. 73.It Fl j , -json Op Ar --json-int, --json-flat-vdevs, --json-pool-key-guid 74Display the status for ZFS pools in JSON format. 75Specify 76.Sy --json-int 77to display numbers in integer format instead of strings. 78Specify 79.Sy --json-flat-vdevs 80to display vdevs in flat hierarchy instead of nested vdev objects. 81Specify 82.Sy --json-pool-key-guid 83to set pool GUID as key for pool objects instead of pool names. 84.It Fl d 85Display the number of Direct I/O read/write checksum verify errors that have 86occured on a top-level VDEV. 87See 88.Sx zfs_vdev_direct_write_verify 89in 90.Xr zfs 4 91for details about the conditions that can cause Direct I/O write checksum 92verify failures to occur. 93Direct I/O reads checksum verify errors can also occur if the contents of the 94buffer are being manipulated after the I/O has been issued and is in flight. 95In the case of Direct I/O read checksum verify errors, the I/O will be reissued 96through the ARC. 97.It Fl D 98Display a histogram of deduplication statistics, showing the allocated 99.Pq physically present on disk 100and referenced 101.Pq logically referenced in the pool 102block counts and sizes by reference count. 103If repeated, (-DD), also shows statistics on how much of the DDT is resident 104in the ARC. 105.It Fl e 106Only show unhealthy vdevs (not-ONLINE or with errors). 107.It Fl g 108Display vdev GUIDs instead of the normal device names 109These GUIDs can be used in place of device names for the zpool 110detach/offline/remove/replace commands. 111.It Fl i 112Display vdev initialization status. 113.It Fl L 114Display real paths for vdevs resolving all symbolic links. 115This can be used to look up the current block device name regardless of the 116.Pa /dev/disk/ 117path used to open it. 118.It Fl p 119Display numbers in parsable (exact) values. 120.It Fl P 121Display full paths for vdevs instead of only the last component of 122the path. 123This can be used in conjunction with the 124.Fl L 125flag. 126.It Fl s 127Display the number of leaf vdev slow I/O operations. 128This is the number of I/O operations that didn't complete in 129.Sy zio_slow_io_ms 130milliseconds 131.Pq Sy 30000 No by default . 132This does not necessarily mean the I/O operations failed to complete, just took 133an 134unreasonably long amount of time. 135This may indicate a problem with the underlying storage. 136.It Fl t 137Display vdev TRIM status. 138.It Fl T Sy u Ns | Ns Sy d 139Display a time stamp. 140Specify 141.Sy u 142for a printed representation of the internal representation of time. 143See 144.Xr time 1 . 145Specify 146.Sy d 147for standard date format. 148See 149.Xr date 1 . 150.It Fl v 151Displays verbose data error information, printing out a complete list of all 152data errors since the last complete pool scrub. 153If the head_errlog feature is enabled and files containing errors have been 154removed then the respective filenames will not be reported in subsequent runs 155of this command. 156.It Fl x 157Only display status for pools that are exhibiting errors or are otherwise 158unavailable. 159Warnings about pools not using the latest on-disk format will not be included. 160.El 161. 162.Sh EXAMPLES 163.\" These are, respectively, examples 16 from zpool.8 164.\" Make sure to update them bidirectionally 165.Ss Example 1 : No Adding output columns 166Additional columns can be added to the 167.Nm zpool Cm status No and Nm zpool Cm iostat No output with Fl c . 168.Bd -literal -compact -offset Ds 169.No # Nm zpool Cm status Fl c Pa vendor , Ns Pa model , Ns Pa size 170 NAME STATE READ WRITE CKSUM vendor model size 171 tank ONLINE 0 0 0 172 mirror-0 ONLINE 0 0 0 173 U1 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T 174 U10 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T 175 U11 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T 176 U12 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T 177 U13 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T 178 U14 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T 179 180.No # Nm zpool Cm iostat Fl vc Pa size 181 capacity operations bandwidth 182pool alloc free read write read write size 183---------- ----- ----- ----- ----- ----- ----- ---- 184rpool 14.6G 54.9G 4 55 250K 2.69M 185 sda1 14.6G 54.9G 4 55 250K 2.69M 70G 186---------- ----- ----- ----- ----- ----- ----- ---- 187.Ed 188. 189.Ss Example 2 : No Display the status output in JSON format 190.Nm zpool Cm status No can output in JSON format if 191.Fl j 192is specified. 193.Fl c 194can be used to run a script on each VDEV. 195.Bd -literal -compact -offset Ds 196.No # Nm zpool Cm status Fl j Fl c Pa vendor , Ns Pa model , Ns Pa size | Nm jq 197{ 198 "output_version": { 199 "command": "zpool status", 200 "vers_major": 0, 201 "vers_minor": 1 202 }, 203 "pools": { 204 "tank": { 205 "name": "tank", 206 "state": "ONLINE", 207 "guid": "3920273586464696295", 208 "txg": "16597", 209 "spa_version": "5000", 210 "zpl_version": "5", 211 "status": "OK", 212 "vdevs": { 213 "tank": { 214 "name": "tank", 215 "alloc_space": "62.6G", 216 "total_space": "15.0T", 217 "def_space": "11.3T", 218 "read_errors": "0", 219 "write_errors": "0", 220 "checksum_errors": "0", 221 "vdevs": { 222 "raidz1-0": { 223 "name": "raidz1-0", 224 "vdev_type": "raidz", 225 "guid": "763132626387621737", 226 "state": "HEALTHY", 227 "alloc_space": "62.5G", 228 "total_space": "10.9T", 229 "def_space": "7.26T", 230 "rep_dev_size": "10.9T", 231 "read_errors": "0", 232 "write_errors": "0", 233 "checksum_errors": "0", 234 "vdevs": { 235 "ca1eb824-c371-491d-ac13-37637e35c683": { 236 "name": "ca1eb824-c371-491d-ac13-37637e35c683", 237 "vdev_type": "disk", 238 "guid": "12841765308123764671", 239 "path": "/dev/disk/by-partuuid/ca1eb824-c371-491d-ac13-37637e35c683", 240 "state": "HEALTHY", 241 "rep_dev_size": "3.64T", 242 "phys_space": "3.64T", 243 "read_errors": "0", 244 "write_errors": "0", 245 "checksum_errors": "0", 246 "vendor": "ATA", 247 "model": "WDC WD40EFZX-68AWUN0", 248 "size": "3.6T" 249 }, 250 "97cd98fb-8fb8-4ac4-bc84-bd8950a7ace7": { 251 "name": "97cd98fb-8fb8-4ac4-bc84-bd8950a7ace7", 252 "vdev_type": "disk", 253 "guid": "1527839927278881561", 254 "path": "/dev/disk/by-partuuid/97cd98fb-8fb8-4ac4-bc84-bd8950a7ace7", 255 "state": "HEALTHY", 256 "rep_dev_size": "3.64T", 257 "phys_space": "3.64T", 258 "read_errors": "0", 259 "write_errors": "0", 260 "checksum_errors": "0", 261 "vendor": "ATA", 262 "model": "WDC WD40EFZX-68AWUN0", 263 "size": "3.6T" 264 }, 265 "e9ddba5f-f948-4734-a472-cb8aa5f0ff65": { 266 "name": "e9ddba5f-f948-4734-a472-cb8aa5f0ff65", 267 "vdev_type": "disk", 268 "guid": "6982750226085199860", 269 "path": "/dev/disk/by-partuuid/e9ddba5f-f948-4734-a472-cb8aa5f0ff65", 270 "state": "HEALTHY", 271 "rep_dev_size": "3.64T", 272 "phys_space": "3.64T", 273 "read_errors": "0", 274 "write_errors": "0", 275 "checksum_errors": "0", 276 "vendor": "ATA", 277 "model": "WDC WD40EFZX-68AWUN0", 278 "size": "3.6T" 279 } 280 } 281 } 282 } 283 } 284 }, 285 "dedup": { 286 "mirror-2": { 287 "name": "mirror-2", 288 "vdev_type": "mirror", 289 "guid": "2227766268377771003", 290 "state": "HEALTHY", 291 "alloc_space": "89.1M", 292 "total_space": "3.62T", 293 "def_space": "3.62T", 294 "rep_dev_size": "3.62T", 295 "read_errors": "0", 296 "write_errors": "0", 297 "checksum_errors": "0", 298 "vdevs": { 299 "db017360-d8e9-4163-961b-144ca75293a3": { 300 "name": "db017360-d8e9-4163-961b-144ca75293a3", 301 "vdev_type": "disk", 302 "guid": "17880913061695450307", 303 "path": "/dev/disk/by-partuuid/db017360-d8e9-4163-961b-144ca75293a3", 304 "state": "HEALTHY", 305 "rep_dev_size": "3.63T", 306 "phys_space": "3.64T", 307 "read_errors": "0", 308 "write_errors": "0", 309 "checksum_errors": "0", 310 "vendor": "ATA", 311 "model": "WDC WD40EFZX-68AWUN0", 312 "size": "3.6T" 313 }, 314 "952c3baf-b08a-4a8c-b7fa-33a07af5fe6f": { 315 "name": "952c3baf-b08a-4a8c-b7fa-33a07af5fe6f", 316 "vdev_type": "disk", 317 "guid": "10276374011610020557", 318 "path": "/dev/disk/by-partuuid/952c3baf-b08a-4a8c-b7fa-33a07af5fe6f", 319 "state": "HEALTHY", 320 "rep_dev_size": "3.63T", 321 "phys_space": "3.64T", 322 "read_errors": "0", 323 "write_errors": "0", 324 "checksum_errors": "0", 325 "vendor": "ATA", 326 "model": "WDC WD40EFZX-68AWUN0", 327 "size": "3.6T" 328 } 329 } 330 } 331 }, 332 "special": { 333 "25d418f8-92bd-4327-b59f-7ef5d5f50d81": { 334 "name": "25d418f8-92bd-4327-b59f-7ef5d5f50d81", 335 "vdev_type": "disk", 336 "guid": "3935742873387713123", 337 "path": "/dev/disk/by-partuuid/25d418f8-92bd-4327-b59f-7ef5d5f50d81", 338 "state": "HEALTHY", 339 "alloc_space": "37.4M", 340 "total_space": "444G", 341 "def_space": "444G", 342 "rep_dev_size": "444G", 343 "phys_space": "447G", 344 "read_errors": "0", 345 "write_errors": "0", 346 "checksum_errors": "0", 347 "vendor": "ATA", 348 "model": "Micron_5300_MTFDDAK480TDS", 349 "size": "447.1G" 350 } 351 }, 352 "error_count": "0" 353 } 354 } 355} 356.Ed 357. 358.Sh SEE ALSO 359.Xr zpool-events 8 , 360.Xr zpool-history 8 , 361.Xr zpool-iostat 8 , 362.Xr zpool-list 8 , 363.Xr zpool-resilver 8 , 364.Xr zpool-scrub 8 , 365.Xr zpool-wait 8 366