xref: /freebsd/sys/contrib/openzfs/.github/workflows/scripts/qemu-5-setup.sh (revision bd66c1b43e33540205dbc1187c2f2a15c58b57ba)
1#!/usr/bin/env bash
2
3######################################################################
4# 5) start test machines and load openzfs module
5######################################################################
6
7set -eu
8
9# read our defined variables
10source /var/tmp/env.txt
11
12# wait for poweroff to succeed
13PID=$(pidof /usr/bin/qemu-system-x86_64)
14tail --pid=$PID -f /dev/null
15sudo virsh undefine openzfs
16
17# definitions of per operating system
18case "$OS" in
19  freebsd*)
20    VMs=2
21    CPU=3
22    RAM=6
23    ;;
24  *)
25    VMs=2
26    CPU=3
27    RAM=7
28    ;;
29esac
30
31# this can be different for each distro
32echo "VMs=$VMs" >> $ENV
33
34# create snapshot we can clone later
35sudo zfs snapshot zpool/openzfs@now
36
37# setup the testing vm's
38PUBKEY=$(cat ~/.ssh/id_ed25519.pub)
39for i in $(seq 1 $VMs); do
40
41  echo "Creating disk for vm$i..."
42  DISK="/dev/zvol/zpool/vm$i"
43  FORMAT="raw"
44  sudo zfs clone zpool/openzfs@now zpool/vm$i
45  sudo zfs create -ps -b 64k -V 80g zpool/vm$i-2
46
47  cat <<EOF > /tmp/user-data
48#cloud-config
49
50fqdn: vm$i
51
52users:
53- name: root
54  shell: $BASH
55- name: zfs
56  sudo: ALL=(ALL) NOPASSWD:ALL
57  shell: $BASH
58  ssh_authorized_keys:
59    - $PUBKEY
60
61growpart:
62  mode: auto
63  devices: ['/']
64  ignore_growroot_disabled: false
65EOF
66
67  sudo virsh net-update default add ip-dhcp-host \
68    "<host mac='52:54:00:83:79:0$i' ip='192.168.122.1$i'/>" --live --config
69
70  sudo virt-install \
71    --os-variant $OSv \
72    --name "vm$i" \
73    --cpu host-passthrough \
74    --virt-type=kvm --hvm \
75    --vcpus=$CPU,sockets=1 \
76    --memory $((1024*RAM)) \
77    --memballoon model=virtio \
78    --graphics none \
79    --cloud-init user-data=/tmp/user-data \
80    --network bridge=virbr0,model=$NIC,mac="52:54:00:83:79:0$i" \
81    --disk $DISK,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
82    --disk $DISK-2,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
83    --import --noautoconsole >/dev/null
84done
85
86# check the memory state from time to time
87cat <<EOF > cronjob.sh
88# $OS
89exec 1>>/var/tmp/stats.txt
90exec 2>&1
91echo "*******************************************************"
92date
93uptime
94free -m
95df -h /mnt/tests
96zfs list
97EOF
98sudo chmod +x cronjob.sh
99sudo mv -f cronjob.sh /root/cronjob.sh
100echo '*/5 * * * *  /root/cronjob.sh' > crontab.txt
101sudo crontab crontab.txt
102rm crontab.txt
103
104# check if the machines are okay
105echo "Waiting for vm's to come up...  (${VMs}x CPU=$CPU RAM=$RAM)"
106for i in $(seq 1 $VMs); do
107  while true; do
108    ssh 2>/dev/null zfs@192.168.122.1$i "uname -a" && break
109  done
110done
111echo "All $VMs VMs are up now."
112
113# Save the VM's serial output (ttyS0) to /var/tmp/console.txt
114# - ttyS0 on the VM corresponds to a local /dev/pty/N entry
115# - use 'virsh ttyconsole' to lookup the /dev/pty/N entry
116for i in $(seq 1 $VMs); do
117  mkdir -p $RESPATH/vm$i
118  read "pty" <<< $(sudo virsh ttyconsole vm$i)
119  sudo nohup bash -c "cat $pty > $RESPATH/vm$i/console.txt" &
120done
121echo "Console logging for ${VMs}x $OS started."
122