xref: /illumos-gate/usr/src/test/zfs-tests/tests/functional/pool_checkpoint/checkpoint_sm_scale.ksh (revision a0b03b161c4df3cfc54fbc741db09b3bdc23ffba)
1#!/usr/bin/ksh -p
2
3#
4# This file and its contents are supplied under the terms of the
5# Common Development and Distribution License ("CDDL"), version 1.0.
6# You may only use this file in accordance with the terms of version
7# 1.0 of the CDDL.
8#
9# A full copy of the text of the CDDL should have accompanied this
10# source.  A copy of the CDDL is also available via the Internet at
11# http://www.illumos.org/license/CDDL.
12#
13
14#
15# Copyright (c) 2017, 2018 by Delphix. All rights reserved.
16#
17
18. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib
19
20#
21# DESCRIPTION:
22#	The maximum address that can be described by a single-word
23#	space map entry limits the maximum allocatable space of any
24#	top-level vdev to 64PB whenever a vdev-wide space map is used.
25#
26#	Since a vdev-wide space map is introduced for the checkpoint
27#	we want to ensure that we cannot checkpoint a pool that does
28#	not use the new space map encoding (V2) and has a top-level
29#	vdev with more than 64PB of allocatable space.
30#
31#	Note: Since this is a pool created from file-based vdevs we
32#	      are guaranteed that vdev_ashift  is SPA_MINBLOCKSHIFT
33#	      [which is currently 9 and (1 << 9) = 512], so the numbers
34#	      work out for this test.
35#
36# STRATEGY:
37#	1. Create pool with a disk of exactly 64PB
38#	   (so ~63.5PB of allocatable space) and
39#	   ensure that has the checkpoint feature
40#	   enabled but not space map V2
41#	2. Ensure that you can checkpoint it
42#	3. Create pool with a disk of exactly 65PB
43#	   (so ~64.5PB of allocatable space) with
44#	   the same setup
45#	4. Ensure we fail trying to checkpoint it
46#
47# Note:
48# This test used to create the two pools and attempt to checkpoint
49# them at the same time, then destroy them. We later had to change
50# this to test one pool at a time as the metaslabs (even though empty)
51# consumed a lot of memory, especially on a machine that has been
52# running with debug enabled. To give an example, each metaslab
53# structure is ~1712 bytes (at the time of this writing), and each
54# vdev has 128K metaslabs, which means that just the structures
55# consume 131071 * 1712 = ~224M.
56#
57
58verify_runnable "global"
59
60TESTPOOL1=testpool1
61TESTPOOL2=testpool2
62
63DISK64PB=/$DISKFS/disk64PB
64DISK65PB=/$DISKFS/disk65PB
65
66function test_cleanup
67{
68	poolexists $TESTPOOL1 && destroy_pool $TESTPOOL1
69	poolexists $TESTPOOL2 && destroy_pool $TESTPOOL2
70	log_must rm -f $DISK64PB $DISK65PB
71	cleanup_test_pool
72}
73
74setup_test_pool
75log_onexit test_cleanup
76
77log_must zfs create $DISKFS
78log_must mkfile -n $((64 * 1024 * 1024))g $DISK64PB
79log_must mkfile -n $((65 * 1024 * 1024))g $DISK65PB
80
81log_must zpool create -d $TESTPOOL1 $DISK64PB
82log_must zpool set feature@zpool_checkpoint=enabled $TESTPOOL1
83log_must zpool checkpoint $TESTPOOL1
84destroy_pool $TESTPOOL1
85
86log_must zpool create -d $TESTPOOL2 $DISK65PB
87log_must zpool set feature@zpool_checkpoint=enabled $TESTPOOL2
88log_mustnot zpool checkpoint $TESTPOOL2
89destroy_pool $TESTPOOL2
90
91log_pass "Fail to checkpoint pool with old spacemap encoding" \
92    " and a vdev that's more than 64PB."
93