1#!/bin/ksh -p 2# 3# CDDL HEADER START 4# 5# The contents of this file are subject to the terms of the 6# Common Development and Distribution License (the "License"). 7# You may not use this file except in compliance with the License. 8# 9# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10# or http://www.opensolaris.org/os/licensing. 11# See the License for the specific language governing permissions 12# and limitations under the License. 13# 14# When distributing Covered Code, include this CDDL HEADER in each 15# file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16# If applicable, add the following below this CDDL HEADER, with the 17# fields enclosed by brackets "[]" replaced with your own identifying 18# information: Portions Copyright [yyyy] [name of copyright owner] 19# 20# CDDL HEADER END 21# 22 23# 24# Copyright 2017, loli10K <ezomori.nozomu@gmail.com>. All rights reserved. 25# 26 27. $STF_SUITE/include/libtest.shlib 28. $STF_SUITE/tests/functional/cli_root/zpool_create/zpool_create.shlib 29 30# 31# DESCRIPTION: 32# 'zpool add' works with nested replacing/spare vdevs 33# 34# STRATEGY: 35# 1. Create a redundant pool with a spare device 36# 2. Manually fault a device, wait for the hot-spare and then replace it: 37# this creates a situation where replacing and spare vdevs are nested. 38# 3. Verify 'zpool add' is able to add new devices to the pool. 39# 40 41verify_runnable "global" 42 43function cleanup 44{ 45 log_must zinject -c all 46 poolexists $TESTPOOL && \ 47 destroy_pool $TESTPOOL 48 log_must rm -f $DATA_DEVS $SPARE_DEVS 49} 50 51log_assert "'zpool add' works with nested replacing/spare vdevs" 52log_onexit cleanup 53 54TMPDIR='/var/tmp' 55FAULT_DEV="$TMPDIR/fault-dev" 56SAFE_DEV1="$TMPDIR/safe-dev1" 57SAFE_DEV2="$TMPDIR/safe-dev2" 58SAFE_DEV3="$TMPDIR/safe-dev3" 59SAFE_DEVS="$SAFE_DEV1 $SAFE_DEV2 $SAFE_DEV3" 60REPLACE_DEV="$TMPDIR/replace-dev" 61ADD_DEV="$TMPDIR/add-dev" 62DATA_DEVS="$FAULT_DEV $SAFE_DEVS $REPLACE_DEV $ADD_DEV" 63SPARE_DEV1="$TMPDIR/spare-dev1" 64SPARE_DEV2="$TMPDIR/spare-dev2" 65SPARE_DEVS="$SPARE_DEV1 $SPARE_DEV2" 66 67for type in "mirror" "raidz1" "raidz2" "raidz3" 68do 69 # 1. Create a redundant pool with a spare device 70 truncate -s $SPA_MINDEVSIZE $DATA_DEVS $SPARE_DEVS 71 log_must zpool create $TESTPOOL $type $FAULT_DEV $SAFE_DEVS 72 log_must zpool add $TESTPOOL spare $SPARE_DEV1 73 74 # 2.1 Fault a device, verify the spare is kicked in 75 log_must zinject -d $FAULT_DEV -e nxio -T all -f 100 $TESTPOOL 76 log_must zpool scrub $TESTPOOL 77 log_must wait_vdev_state $TESTPOOL $FAULT_DEV "UNAVAIL" 60 78 log_must wait_vdev_state $TESTPOOL $SPARE_DEV1 "ONLINE" 60 79 log_must wait_hotspare_state $TESTPOOL $SPARE_DEV1 "INUSE" 80 log_must check_state $TESTPOOL "$type-0" "DEGRADED" 81 82 # 2.2 Replace the faulted device: this creates a replacing vdev inside a 83 # spare vdev 84 log_must zpool replace $TESTPOOL $FAULT_DEV $REPLACE_DEV 85 log_must wait_vdev_state $TESTPOOL $REPLACE_DEV "ONLINE" 60 86 zpool status | nawk -v poolname="$TESTPOOL" -v type="$type" 'BEGIN {s=""} 87 $1 ~ poolname {c=4}; (c && c--) { s=s$1":" } 88 END { if (s != poolname":"type"-0:spare-0:replacing-0:") exit 1; }' 89 if [[ $? -ne 0 ]]; then 90 log_fail "Pool does not contain nested replacing/spare vdevs" 91 fi 92 93 # 3. Verify 'zpool add' is able to add new devices 94 log_must zpool add $TESTPOOL spare $SPARE_DEV2 95 log_must wait_hotspare_state $TESTPOOL $SPARE_DEV2 "AVAIL" 96 log_must zpool add -f $TESTPOOL $ADD_DEV 97 log_must wait_vdev_state $TESTPOOL $ADD_DEV "ONLINE" 60 98 99 # Cleanup 100 cleanup 101done 102 103log_pass "'zpool add' works with nested replacing/spare vdevs" 104