xref: /freebsd/sys/contrib/openzfs/.github/workflows/scripts/qemu-5-setup.sh (revision 24e4dcf4ba5e9dedcf89efd358ea3e1fe5867020)
1#!/usr/bin/env bash
2
3######################################################################
4# 5) start test machines and load openzfs module
5######################################################################
6
7set -eu
8
9# read our defined variables
10source /var/tmp/env.txt
11
12# wait for poweroff to succeed
13PID=$(pidof /usr/bin/qemu-system-x86_64)
14tail --pid=$PID -f /dev/null
15sudo virsh undefine --nvram openzfs
16
17# cpu pinning
18CPUSET=("0,1" "2,3")
19
20# additional options for virt-install
21OPTS[0]=""
22OPTS[1]=""
23
24case "$OS" in
25  freebsd*)
26    # FreeBSD needs only 6GiB
27    RAM=6
28    ;;
29  debian13)
30    RAM=8
31    # Boot Debian 13 with uefi=on and secureboot=off (ZFS Kernel Module not signed)
32    OPTS[0]="--boot"
33    OPTS[1]="firmware=efi,firmware.feature0.name=secure-boot,firmware.feature0.enabled=no"
34    ;;
35  *)
36    # Linux needs more memory, but can be optimized to share it via KSM
37    RAM=8
38    ;;
39esac
40
41# create snapshot we can clone later
42sudo zfs snapshot zpool/openzfs@now
43
44# setup the testing vm's
45PUBKEY=$(cat ~/.ssh/id_ed25519.pub)
46
47# start testing VMs
48for ((i=1; i<=VMs; i++)); do
49  echo "Creating disk for vm$i..."
50  DISK="/dev/zvol/zpool/vm$i"
51  FORMAT="raw"
52  sudo zfs clone zpool/openzfs@now zpool/vm$i-system
53  sudo zfs create -ps -b 64k -V 64g zpool/vm$i-tests
54
55  cat <<EOF > /tmp/user-data
56#cloud-config
57
58fqdn: vm$i
59
60users:
61- name: root
62  shell: $BASH
63- name: zfs
64  sudo: ALL=(ALL) NOPASSWD:ALL
65  shell: $BASH
66  ssh_authorized_keys:
67    - $PUBKEY
68
69growpart:
70  mode: auto
71  devices: ['/']
72  ignore_growroot_disabled: false
73EOF
74
75  sudo virsh net-update default add ip-dhcp-host \
76    "<host mac='52:54:00:83:79:0$i' ip='192.168.122.1$i'/>" --live --config
77
78  sudo virt-install \
79    --os-variant $OSv \
80    --name "vm$i" \
81    --cpu host-passthrough \
82    --virt-type=kvm --hvm \
83    --vcpus=$CPU,sockets=1 \
84    --cpuset=${CPUSET[$((i-1))]} \
85    --memory $((1024*RAM)) \
86    --memballoon model=virtio \
87    --graphics none \
88    --cloud-init user-data=/tmp/user-data \
89    --network bridge=virbr0,model=$NIC,mac="52:54:00:83:79:0$i" \
90    --disk $DISK-system,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
91    --disk $DISK-tests,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
92    --import --noautoconsole ${OPTS[0]} ${OPTS[1]}
93done
94
95# generate some memory stats
96cat <<EOF > cronjob.sh
97exec 1>>/var/tmp/stats.txt
98exec 2>&1
99echo "********************************************************************************"
100uptime
101free -m
102zfs list
103EOF
104
105sudo chmod +x cronjob.sh
106sudo mv -f cronjob.sh /root/cronjob.sh
107echo '*/5 * * * *  /root/cronjob.sh' > crontab.txt
108sudo crontab crontab.txt
109rm crontab.txt
110
111# check if the machines are okay
112echo "Waiting for vm's to come up...  (${VMs}x CPU=$CPU RAM=$RAM)"
113for ((i=1; i<=VMs; i++)); do
114  .github/workflows/scripts/qemu-wait-for-vm.sh vm$i
115done
116echo "All $VMs VMs are up now."
117
118# Save the VM's serial output (ttyS0) to /var/tmp/console.txt
119# - ttyS0 on the VM corresponds to a local /dev/pty/N entry
120# - use 'virsh ttyconsole' to lookup the /dev/pty/N entry
121for ((i=1; i<=VMs; i++)); do
122  mkdir -p $RESPATH/vm$i
123  read "pty" <<< $(sudo virsh ttyconsole vm$i)
124  sudo nohup bash -c "cat $pty > $RESPATH/vm$i/console.txt" &
125done
126echo "Console logging for ${VMs}x $OS started."
127