1#- 2# SPDX-License-Identifier: BSD-2-Clause 3# 4# Copyright (c) 2022-2023 The FreeBSD Foundation 5# 6# This software was developed by Mark Johnston under sponsorship from 7# the FreeBSD Foundation. 8# 9# Redistribution and use in source and binary forms, with or without 10# modification, are permitted provided that the following conditions are 11# met: 12# 1. Redistributions of source code must retain the above copyright 13# notice, this list of conditions and the following disclaimer. 14# 2. Redistributions in binary form must reproduce the above copyright 15# notice, this list of conditions and the following disclaimer in 16# the documentation and/or other materials provided with the distribution. 17# 18# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28# SUCH DAMAGE. 29# 30 31MAKEFS="makefs -t zfs -o verify-txgs=true" 32ZFS_POOL_NAME="makefstest$$" 33TEST_ZFS_POOL_NAME="$TMPDIR/poolname" 34 35. "$(dirname "$0")/makefs_tests_common.sh" 36 37common_cleanup() 38{ 39 local pool md 40 41 # Try to force a TXG, this can help catch bugs by triggering a panic. 42 sync 43 44 pool=$(cat $TEST_ZFS_POOL_NAME) 45 if zpool list "$pool" >/dev/null; then 46 zpool destroy "$pool" 47 fi 48 49 md=$(cat $TEST_MD_DEVICE_FILE) 50 if [ -c /dev/"$md" ]; then 51 mdconfig -d -u "$md" 52 fi 53} 54 55import_image() 56{ 57 atf_check -e empty -o save:$TEST_MD_DEVICE_FILE -s exit:0 \ 58 mdconfig -a -f $TEST_IMAGE 59 atf_check -o ignore -e empty -s exit:0 \ 60 zdb -e -p /dev/$(cat $TEST_MD_DEVICE_FILE) -mmm -ddddd $ZFS_POOL_NAME 61 atf_check zpool import -R $TEST_MOUNT_DIR $ZFS_POOL_NAME 62 echo "$ZFS_POOL_NAME" > $TEST_ZFS_POOL_NAME 63} 64 65# 66# Test autoexpansion of the vdev. 67# 68# The pool is initially 10GB, so we get 10GB minus one metaslab's worth of 69# usable space for data. Then the pool is expanded to 50GB, and the amount of 70# usable space is 50GB minus one metaslab. 71# 72atf_test_case autoexpand cleanup 73autoexpand_body() 74{ 75 local mssize poolsize poolsize1 newpoolsize 76 77 create_test_inputs 78 79 mssize=$((128 * 1024 * 1024)) 80 poolsize=$((10 * 1024 * 1024 * 1024)) 81 atf_check $MAKEFS -s $poolsize -o mssize=$mssize -o rootpath=/ \ 82 -o poolname=$ZFS_POOL_NAME \ 83 $TEST_IMAGE $TEST_INPUTS_DIR 84 85 newpoolsize=$((50 * 1024 * 1024 * 1024)) 86 truncate -s $newpoolsize $TEST_IMAGE 87 88 import_image 89 90 check_image_contents 91 92 poolsize1=$(zpool list -Hp -o size $ZFS_POOL_NAME) 93 atf_check [ $((poolsize1 + $mssize)) -eq $poolsize ] 94 95 atf_check zpool online -e $ZFS_POOL_NAME /dev/$(cat $TEST_MD_DEVICE_FILE) 96 97 check_image_contents 98 99 poolsize1=$(zpool list -Hp -o size $ZFS_POOL_NAME) 100 atf_check [ $((poolsize1 + $mssize)) -eq $newpoolsize ] 101} 102autoexpand_cleanup() 103{ 104 common_cleanup 105} 106 107# 108# Test with some default layout defined by the common code. 109# 110atf_test_case basic cleanup 111basic_body() 112{ 113 create_test_inputs 114 115 atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \ 116 $TEST_IMAGE $TEST_INPUTS_DIR 117 118 import_image 119 120 check_image_contents 121} 122basic_cleanup() 123{ 124 common_cleanup 125} 126 127atf_test_case dataset_removal cleanup 128dataset_removal_body() 129{ 130 create_test_dirs 131 132 cd $TEST_INPUTS_DIR 133 mkdir dir 134 cd - 135 136 atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \ 137 -o fs=${ZFS_POOL_NAME}/dir \ 138 $TEST_IMAGE $TEST_INPUTS_DIR 139 140 import_image 141 142 check_image_contents 143 144 atf_check zfs destroy ${ZFS_POOL_NAME}/dir 145} 146dataset_removal_cleanup() 147{ 148 common_cleanup 149} 150 151# 152# Make sure that we can handle some special file types. Anything other than 153# regular files, symlinks and directories are ignored. 154# 155atf_test_case devfs cleanup 156devfs_body() 157{ 158 atf_check mkdir dev 159 atf_check mount -t devfs none ./dev 160 161 atf_check -e match:"skipping unhandled" $MAKEFS -s 1g -o rootpath=/ \ 162 -o poolname=$ZFS_POOL_NAME $TEST_IMAGE ./dev 163 164 import_image 165} 166devfs_cleanup() 167{ 168 common_cleanup 169 umount -f ./dev 170} 171 172# 173# Make sure that we can create and remove an empty directory. 174# 175atf_test_case empty_dir cleanup 176empty_dir_body() 177{ 178 create_test_dirs 179 180 cd $TEST_INPUTS_DIR 181 mkdir dir 182 cd - 183 184 atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \ 185 $TEST_IMAGE $TEST_INPUTS_DIR 186 187 import_image 188 189 check_image_contents 190 191 atf_check rmdir ${TEST_MOUNT_DIR}/dir 192} 193empty_dir_cleanup() 194{ 195 common_cleanup 196} 197 198atf_test_case empty_fs cleanup 199empty_fs_body() 200{ 201 create_test_dirs 202 203 atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \ 204 $TEST_IMAGE $TEST_INPUTS_DIR 205 206 import_image 207 208 check_image_contents 209} 210empty_fs_cleanup() 211{ 212 common_cleanup 213} 214 215atf_test_case file_extend cleanup 216file_extend_body() 217{ 218 local i start 219 220 create_test_dirs 221 222 # Create a file slightly longer than the maximum block size. 223 start=132 224 dd if=/dev/random of=${TEST_INPUTS_DIR}/foo bs=1k count=$start 225 md5 -q ${TEST_INPUTS_DIR}/foo > foo.md5 226 227 atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \ 228 $TEST_IMAGE $TEST_INPUTS_DIR 229 230 import_image 231 232 check_image_contents 233 234 i=0 235 while [ $i -lt 1000 ]; do 236 dd if=/dev/random of=${TEST_MOUNT_DIR}/foo bs=1k count=1 \ 237 seek=$(($i + $start)) conv=notrunc 238 # Make sure that the first $start blocks are unmodified. 239 dd if=${TEST_MOUNT_DIR}/foo bs=1k count=$start of=foo.copy 240 atf_check -o file:foo.md5 md5 -q foo.copy 241 i=$(($i + 1)) 242 done 243} 244file_extend_cleanup() 245{ 246 common_cleanup 247} 248 249atf_test_case file_sizes cleanup 250file_sizes_body() 251{ 252 local i 253 254 create_test_dirs 255 cd $TEST_INPUTS_DIR 256 257 i=1 258 while [ $i -lt $((1 << 20)) ]; do 259 truncate -s $i ${i}.1 260 truncate -s $(($i - 1)) ${i}.2 261 truncate -s $(($i + 1)) ${i}.3 262 i=$(($i << 1)) 263 done 264 265 cd - 266 267 # XXXMJ this creates sparse files, make sure makefs doesn't 268 # preserve the sparseness. 269 # XXXMJ need to test with larger files (at least 128MB for L2 indirs) 270 atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \ 271 $TEST_IMAGE $TEST_INPUTS_DIR 272 273 import_image 274 275 check_image_contents 276} 277file_sizes_cleanup() 278{ 279 common_cleanup 280} 281 282atf_test_case hard_links cleanup 283hard_links_body() 284{ 285 local f 286 287 create_test_dirs 288 cd $TEST_INPUTS_DIR 289 290 mkdir dir 291 echo "hello" > 1 292 ln 1 2 293 ln 1 dir/1 294 295 echo "goodbye" > dir/a 296 ln dir/a dir/b 297 ln dir/a a 298 299 cd - 300 301 atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \ 302 $TEST_IMAGE $TEST_INPUTS_DIR 303 304 import_image 305 306 check_image_contents 307 308 stat -f '%i' ${TEST_MOUNT_DIR}/1 > ./ino 309 stat -f '%l' ${TEST_MOUNT_DIR}/1 > ./nlink 310 for f in 1 2 dir/1; do 311 atf_check -o file:./nlink -e empty -s exit:0 \ 312 stat -f '%l' ${TEST_MOUNT_DIR}/${f} 313 atf_check -o file:./ino -e empty -s exit:0 \ 314 stat -f '%i' ${TEST_MOUNT_DIR}/${f} 315 atf_check cmp -s ${TEST_INPUTS_DIR}/1 ${TEST_MOUNT_DIR}/${f} 316 done 317 318 stat -f '%i' ${TEST_MOUNT_DIR}/dir/a > ./ino 319 stat -f '%l' ${TEST_MOUNT_DIR}/dir/a > ./nlink 320 for f in dir/a dir/b a; do 321 atf_check -o file:./nlink -e empty -s exit:0 \ 322 stat -f '%l' ${TEST_MOUNT_DIR}/${f} 323 atf_check -o file:./ino -e empty -s exit:0 \ 324 stat -f '%i' ${TEST_MOUNT_DIR}/${f} 325 atf_check cmp -s ${TEST_INPUTS_DIR}/dir/a ${TEST_MOUNT_DIR}/${f} 326 done 327} 328hard_links_cleanup() 329{ 330 common_cleanup 331} 332 333# Allocate enough dnodes from an object set that the meta dnode needs to use 334# indirect blocks. 335atf_test_case indirect_dnode_array cleanup 336indirect_dnode_array_body() 337{ 338 local count i 339 340 # How many dnodes do we need to allocate? Well, the data block size 341 # for meta dnodes is always 16KB, so with a dnode size of 512B we get 342 # 32 dnodes per direct block. The maximum indirect block size is 128KB 343 # and that can fit 1024 block pointers, so we need at least 32 * 1024 344 # files to force the use of two levels of indirection. 345 # 346 # Unfortunately that number of files makes the test run quite slowly, 347 # so we settle for a single indirect block for now... 348 count=$(jot -r 1 32 1024) 349 350 create_test_dirs 351 cd $TEST_INPUTS_DIR 352 for i in $(seq 1 $count); do 353 touch $i 354 done 355 cd - 356 357 atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \ 358 $TEST_IMAGE $TEST_INPUTS_DIR 359 360 import_image 361 362 check_image_contents 363} 364indirect_dnode_array_cleanup() 365{ 366 common_cleanup 367} 368 369# 370# Create some files with long names, so as to test fat ZAP handling. 371# 372atf_test_case long_file_name cleanup 373long_file_name_body() 374{ 375 local dir i 376 377 create_test_dirs 378 cd $TEST_INPUTS_DIR 379 380 # micro ZAP keys can be at most 50 bytes. 381 for i in $(seq 1 60); do 382 touch $(jot -s '' $i 1 1) 383 done 384 dir=$(jot -s '' 61 1 1) 385 mkdir $dir 386 for i in $(seq 1 60); do 387 touch ${dir}/$(jot -s '' $i 1 1) 388 done 389 390 cd - 391 392 atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \ 393 $TEST_IMAGE $TEST_INPUTS_DIR 394 395 import_image 396 397 check_image_contents 398 399 # Add a directory entry in the hope that OpenZFS might catch a bug 400 # in makefs' fat ZAP encoding. 401 touch ${TEST_MOUNT_DIR}/foo 402} 403long_file_name_cleanup() 404{ 405 common_cleanup 406} 407 408# 409# Exercise handling of multiple datasets. 410# 411atf_test_case multi_dataset_1 cleanup 412multi_dataset_1_body() 413{ 414 create_test_dirs 415 cd $TEST_INPUTS_DIR 416 417 mkdir dir1 418 echo a > dir1/a 419 mkdir dir2 420 echo b > dir2/b 421 422 cd - 423 424 atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \ 425 -o fs=${ZFS_POOL_NAME}/dir1 -o fs=${ZFS_POOL_NAME}/dir2 \ 426 $TEST_IMAGE $TEST_INPUTS_DIR 427 428 import_image 429 430 check_image_contents 431 432 # Make sure that we have three datasets with the expected mount points. 433 atf_check -o inline:${ZFS_POOL_NAME}\\n -e empty -s exit:0 \ 434 zfs list -H -o name ${ZFS_POOL_NAME} 435 atf_check -o inline:${TEST_MOUNT_DIR}\\n -e empty -s exit:0 \ 436 zfs list -H -o mountpoint ${ZFS_POOL_NAME} 437 438 atf_check -o inline:${ZFS_POOL_NAME}/dir1\\n -e empty -s exit:0 \ 439 zfs list -H -o name ${ZFS_POOL_NAME}/dir1 440 atf_check -o inline:${TEST_MOUNT_DIR}/dir1\\n -e empty -s exit:0 \ 441 zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir1 442 443 atf_check -o inline:${ZFS_POOL_NAME}/dir2\\n -e empty -s exit:0 \ 444 zfs list -H -o name ${ZFS_POOL_NAME}/dir2 445 atf_check -o inline:${TEST_MOUNT_DIR}/dir2\\n -e empty -s exit:0 \ 446 zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir2 447} 448multi_dataset_1_cleanup() 449{ 450 common_cleanup 451} 452 453# 454# Create a pool with two datasets, where the root dataset is mounted below 455# the child dataset. 456# 457atf_test_case multi_dataset_2 cleanup 458multi_dataset_2_body() 459{ 460 create_test_dirs 461 cd $TEST_INPUTS_DIR 462 463 mkdir dir1 464 echo a > dir1/a 465 mkdir dir2 466 echo b > dir2/b 467 468 cd - 469 470 atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \ 471 -o fs=${ZFS_POOL_NAME}/dir1\;mountpoint=/ \ 472 -o fs=${ZFS_POOL_NAME}\;mountpoint=/dir1 \ 473 $TEST_IMAGE $TEST_INPUTS_DIR 474 475 import_image 476 477 check_image_contents 478} 479multi_dataset_2_cleanup() 480{ 481 common_cleanup 482} 483 484# 485# Create a dataset with a non-existent mount point. 486# 487atf_test_case multi_dataset_3 cleanup 488multi_dataset_3_body() 489{ 490 create_test_dirs 491 cd $TEST_INPUTS_DIR 492 493 mkdir dir1 494 echo a > dir1/a 495 496 cd - 497 498 atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \ 499 -o fs=${ZFS_POOL_NAME}/dir1 \ 500 -o fs=${ZFS_POOL_NAME}/dir2 \ 501 $TEST_IMAGE $TEST_INPUTS_DIR 502 503 import_image 504 505 atf_check -o inline:${TEST_MOUNT_DIR}/dir2\\n -e empty -s exit:0 \ 506 zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir2 507 508 # Mounting dir2 should have created a directory called dir2. Go 509 # back and create it in the staging tree before comparing. 510 atf_check mkdir ${TEST_INPUTS_DIR}/dir2 511 512 check_image_contents 513} 514multi_dataset_3_cleanup() 515{ 516 common_cleanup 517} 518 519# 520# Create an unmounted dataset. 521# 522atf_test_case multi_dataset_4 cleanup 523multi_dataset_4_body() 524{ 525 create_test_dirs 526 cd $TEST_INPUTS_DIR 527 528 mkdir dir1 529 echo a > dir1/a 530 531 cd - 532 533 atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \ 534 -o fs=${ZFS_POOL_NAME}/dir1\;canmount=noauto\;mountpoint=none \ 535 $TEST_IMAGE $TEST_INPUTS_DIR 536 537 import_image 538 539 atf_check -o inline:none\\n -e empty -s exit:0 \ 540 zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir1 541 542 check_image_contents 543 544 atf_check zfs set mountpoint=/dir1 ${ZFS_POOL_NAME}/dir1 545 atf_check zfs mount ${ZFS_POOL_NAME}/dir1 546 atf_check -o inline:${TEST_MOUNT_DIR}/dir1\\n -e empty -s exit:0 \ 547 zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir1 548 549 # dir1/a should be part of the root dataset, not dir1. 550 atf_check -s not-exit:0 -e not-empty stat ${TEST_MOUNT_DIR}dir1/a 551} 552multi_dataset_4_cleanup() 553{ 554 common_cleanup 555} 556 557# 558# Validate handling of multiple staging directories. 559# 560atf_test_case multi_staging_1 cleanup 561multi_staging_1_body() 562{ 563 local tmpdir 564 565 create_test_dirs 566 cd $TEST_INPUTS_DIR 567 568 mkdir dir1 569 echo a > a 570 echo a > dir1/a 571 echo z > z 572 573 cd - 574 575 tmpdir=$(mktemp -d) 576 cd $tmpdir 577 578 mkdir dir2 dir2/dir3 579 echo b > dir2/b 580 echo c > dir2/dir3/c 581 ln -s dir2/dir3c s 582 583 cd - 584 585 atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \ 586 $TEST_IMAGE ${TEST_INPUTS_DIR} $tmpdir 587 588 import_image 589 590 check_image_contents -d $tmpdir 591} 592multi_staging_1_cleanup() 593{ 594 common_cleanup 595} 596 597atf_test_case multi_staging_2 cleanup 598multi_staging_2_body() 599{ 600 local tmpdir 601 602 create_test_dirs 603 cd $TEST_INPUTS_DIR 604 605 mkdir dir 606 echo a > dir/foo 607 echo b > dir/bar 608 609 cd - 610 611 tmpdir=$(mktemp -d) 612 cd $tmpdir 613 614 mkdir dir 615 echo c > dir/baz 616 617 cd - 618 619 atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \ 620 $TEST_IMAGE ${TEST_INPUTS_DIR} $tmpdir 621 622 import_image 623 624 # check_image_contents can't easily handle merged directories, so 625 # just check that the merged directory contains the files we expect. 626 atf_check -o not-empty stat ${TEST_MOUNT_DIR}/dir/foo 627 atf_check -o not-empty stat ${TEST_MOUNT_DIR}/dir/bar 628 atf_check -o not-empty stat ${TEST_MOUNT_DIR}/dir/baz 629 630 if [ "$(ls ${TEST_MOUNT_DIR}/dir | wc -l)" -ne 3 ]; then 631 atf_fail "Expected 3 files in ${TEST_MOUNT_DIR}/dir" 632 fi 633} 634multi_staging_2_cleanup() 635{ 636 common_cleanup 637} 638 639# 640# Rudimentary test to verify that two ZFS images created using the same 641# parameters and input hierarchy are byte-identical. In particular, makefs(1) 642# does not preserve file access times. 643# 644atf_test_case reproducible cleanup 645reproducible_body() 646{ 647 create_test_inputs 648 649 atf_check $MAKEFS -s 512m -o rootpath=/ -o poolname=$ZFS_POOL_NAME \ 650 ${TEST_IMAGE}.1 $TEST_INPUTS_DIR 651 652 atf_check $MAKEFS -s 512m -o rootpath=/ -o poolname=$ZFS_POOL_NAME \ 653 ${TEST_IMAGE}.2 $TEST_INPUTS_DIR 654 655 # XXX-MJ cmp(1) is really slow 656 atf_check cmp ${TEST_IMAGE}.1 ${TEST_IMAGE}.2 657} 658reproducible_cleanup() 659{ 660} 661 662# 663# Verify that we can take a snapshot of a generated dataset. 664# 665atf_test_case snapshot cleanup 666snapshot_body() 667{ 668 create_test_dirs 669 cd $TEST_INPUTS_DIR 670 671 mkdir dir 672 echo "hello" > dir/hello 673 echo "goodbye" > goodbye 674 675 cd - 676 677 atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \ 678 $TEST_IMAGE $TEST_INPUTS_DIR 679 680 import_image 681 682 atf_check zfs snapshot ${ZFS_POOL_NAME}@1 683} 684snapshot_cleanup() 685{ 686 common_cleanup 687} 688 689# 690# Check handling of symbolic links. 691# 692atf_test_case soft_links cleanup 693soft_links_body() 694{ 695 create_test_dirs 696 cd $TEST_INPUTS_DIR 697 698 mkdir dir 699 ln -s a a 700 ln -s dir/../a a 701 ln -s dir/b b 702 echo 'c' > dir 703 ln -s dir/c c 704 # XXX-MJ overflows bonus buffer ln -s $(jot -s '' 320 1 1) 1 705 706 cd - 707 708 atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \ 709 $TEST_IMAGE $TEST_INPUTS_DIR 710 711 import_image 712 713 check_image_contents 714} 715soft_links_cleanup() 716{ 717 common_cleanup 718} 719 720# 721# Verify that we can set properties on the root dataset. 722# 723atf_test_case root_props cleanup 724root_props_body() 725{ 726 create_test_inputs 727 728 atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \ 729 -o fs=${ZFS_POOL_NAME}\;atime=off\;setuid=off \ 730 $TEST_IMAGE $TEST_INPUTS_DIR 731 732 import_image 733 734 check_image_contents 735 736 atf_check -o inline:off\\n -e empty -s exit:0 \ 737 zfs get -H -o value atime $ZFS_POOL_NAME 738 atf_check -o inline:local\\n -e empty -s exit:0 \ 739 zfs get -H -o source atime $ZFS_POOL_NAME 740 atf_check -o inline:off\\n -e empty -s exit:0 \ 741 zfs get -H -o value setuid $ZFS_POOL_NAME 742 atf_check -o inline:local\\n -e empty -s exit:0 \ 743 zfs get -H -o source setuid $ZFS_POOL_NAME 744} 745root_props_cleanup() 746{ 747 common_cleanup 748} 749 750# 751# Verify that usedds and usedchild props are set properly. 752# 753atf_test_case used_space_props cleanup 754used_space_props_body() 755{ 756 local used usedds usedchild 757 local rootmb childmb totalmb fudge 758 local status 759 760 create_test_dirs 761 cd $TEST_INPUTS_DIR 762 mkdir dir 763 764 rootmb=17 765 childmb=39 766 totalmb=$(($rootmb + $childmb)) 767 fudge=$((2 * 1024 * 1024)) 768 769 atf_check -e ignore dd if=/dev/random of=foo bs=1M count=$rootmb 770 atf_check -e ignore dd if=/dev/random of=dir/bar bs=1M count=$childmb 771 772 cd - 773 774 atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \ 775 -o fs=${ZFS_POOL_NAME}/dir \ 776 $TEST_IMAGE $TEST_INPUTS_DIR 777 778 import_image 779 780 # Make sure that each dataset's space usage is no more than 2MB larger 781 # than their files. This number is magic and might need to change 782 # someday. 783 usedds=$(zfs list -o usedds -Hp ${ZFS_POOL_NAME}) 784 atf_check test $usedds -gt $(($rootmb * 1024 * 1024)) -a \ 785 $usedds -le $(($rootmb * 1024 * 1024 + $fudge)) 786 usedds=$(zfs list -o usedds -Hp ${ZFS_POOL_NAME}/dir) 787 atf_check test $usedds -gt $(($childmb * 1024 * 1024)) -a \ 788 $usedds -le $(($childmb * 1024 * 1024 + $fudge)) 789 790 # Make sure that the usedchild property value makes sense: the parent's 791 # value corresponds to the size of the child, and the child has no 792 # children. 793 usedchild=$(zfs list -o usedchild -Hp ${ZFS_POOL_NAME}) 794 atf_check test $usedchild -gt $(($childmb * 1024 * 1024)) -a \ 795 $usedchild -le $(($childmb * 1024 * 1024 + $fudge)) 796 atf_check -o inline:'0\n' \ 797 zfs list -Hp -o usedchild ${ZFS_POOL_NAME}/dir 798 799 # Make sure that the used property value makes sense: the parent's 800 # value is the sum of the two sizes, and the child's value is the 801 # same as its usedds value, which has already been checked. 802 used=$(zfs list -o used -Hp ${ZFS_POOL_NAME}) 803 atf_check test $used -gt $(($totalmb * 1024 * 1024)) -a \ 804 $used -le $(($totalmb * 1024 * 1024 + 2 * $fudge)) 805 used=$(zfs list -o used -Hp ${ZFS_POOL_NAME}/dir) 806 atf_check -o inline:$used'\n' \ 807 zfs list -Hp -o usedds ${ZFS_POOL_NAME}/dir 808 809 # Both datasets do not have snapshots. 810 atf_check -o inline:'0\n' zfs list -Hp -o usedsnap ${ZFS_POOL_NAME} 811 atf_check -o inline:'0\n' zfs list -Hp -o usedsnap ${ZFS_POOL_NAME}/dir 812} 813used_space_props_cleanup() 814{ 815 common_cleanup 816} 817 818# Verify that file permissions are set properly. Make sure that non-executable 819# files can't be executed. 820atf_test_case perms cleanup 821perms_body() 822{ 823 local mode 824 825 create_test_dirs 826 cd $TEST_INPUTS_DIR 827 828 for mode in $(seq 0 511); do 829 mode=$(printf "%04o\n" $mode) 830 echo 'echo a' > $mode 831 atf_check chmod $mode $mode 832 done 833 834 cd - 835 836 atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \ 837 $TEST_IMAGE $TEST_INPUTS_DIR 838 839 import_image 840 841 check_image_contents 842 843 for mode in $(seq 0 511); do 844 mode=$(printf "%04o\n" $mode) 845 if [ $(($mode & 0111)) -eq 0 ]; then 846 atf_check -s not-exit:0 -e match:"Permission denied" \ 847 ${TEST_INPUTS_DIR}/$mode 848 fi 849 if [ $(($mode & 0001)) -eq 0 ]; then 850 atf_check -s not-exit:0 -e match:"Permission denied" \ 851 su -m tests -c ${TEST_INPUTS_DIR}/$mode 852 fi 853 done 854 855} 856perms_cleanup() 857{ 858 common_cleanup 859} 860 861atf_init_test_cases() 862{ 863 atf_add_test_case autoexpand 864 atf_add_test_case basic 865 atf_add_test_case dataset_removal 866 atf_add_test_case devfs 867 atf_add_test_case empty_dir 868 atf_add_test_case empty_fs 869 atf_add_test_case file_extend 870 atf_add_test_case file_sizes 871 atf_add_test_case hard_links 872 atf_add_test_case indirect_dnode_array 873 atf_add_test_case long_file_name 874 atf_add_test_case multi_dataset_1 875 atf_add_test_case multi_dataset_2 876 atf_add_test_case multi_dataset_3 877 atf_add_test_case multi_dataset_4 878 atf_add_test_case multi_staging_1 879 atf_add_test_case multi_staging_2 880 atf_add_test_case reproducible 881 atf_add_test_case snapshot 882 atf_add_test_case soft_links 883 atf_add_test_case root_props 884 atf_add_test_case used_space_props 885 atf_add_test_case perms 886 887 # XXXMJ tests: 888 # - test with different ashifts (at least, 9 and 12), different image sizes 889 # - create datasets in imported pool 890} 891