xref: /freebsd/sys/contrib/openzfs/.github/workflows/scripts/qemu-5-setup.sh (revision 4b15965daa99044daf184221b7c283bf7f2d7e66)
1#!/usr/bin/env bash
2
3######################################################################
4# 5) start test machines and load openzfs module
5######################################################################
6
7set -eu
8
9# read our defined variables
10source /var/tmp/env.txt
11
12# wait for poweroff to succeed
13PID=$(pidof /usr/bin/qemu-system-x86_64)
14tail --pid=$PID -f /dev/null
15sudo virsh undefine openzfs
16
17# cpu pinning
18CPUSET=("0,1" "2,3")
19
20case "$OS" in
21  freebsd*)
22    # FreeBSD needs only 6GiB
23    RAM=6
24    ;;
25  *)
26    # Linux needs more memory, but can be optimized to share it via KSM
27    RAM=8
28    ;;
29esac
30
31# create snapshot we can clone later
32sudo zfs snapshot zpool/openzfs@now
33
34# setup the testing vm's
35PUBKEY=$(cat ~/.ssh/id_ed25519.pub)
36
37# start testing VMs
38for ((i=1; i<=VMs; i++)); do
39  echo "Creating disk for vm$i..."
40  DISK="/dev/zvol/zpool/vm$i"
41  FORMAT="raw"
42  sudo zfs clone zpool/openzfs@now zpool/vm$i-system
43  sudo zfs create -ps -b 64k -V 64g zpool/vm$i-tests
44
45  cat <<EOF > /tmp/user-data
46#cloud-config
47
48fqdn: vm$i
49
50users:
51- name: root
52  shell: $BASH
53- name: zfs
54  sudo: ALL=(ALL) NOPASSWD:ALL
55  shell: $BASH
56  ssh_authorized_keys:
57    - $PUBKEY
58
59growpart:
60  mode: auto
61  devices: ['/']
62  ignore_growroot_disabled: false
63EOF
64
65  sudo virsh net-update default add ip-dhcp-host \
66    "<host mac='52:54:00:83:79:0$i' ip='192.168.122.1$i'/>" --live --config
67
68  sudo virt-install \
69    --os-variant $OSv \
70    --name "vm$i" \
71    --cpu host-passthrough \
72    --virt-type=kvm --hvm \
73    --vcpus=$CPU,sockets=1 \
74    --cpuset=${CPUSET[$((i-1))]} \
75    --memory $((1024*RAM)) \
76    --memballoon model=virtio \
77    --graphics none \
78    --cloud-init user-data=/tmp/user-data \
79    --network bridge=virbr0,model=$NIC,mac="52:54:00:83:79:0$i" \
80    --disk $DISK-system,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
81    --disk $DISK-tests,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
82    --import --noautoconsole >/dev/null
83done
84
85# generate some memory stats
86cat <<EOF > cronjob.sh
87exec 1>>/var/tmp/stats.txt
88exec 2>&1
89echo "********************************************************************************"
90uptime
91free -m
92zfs list
93EOF
94
95sudo chmod +x cronjob.sh
96sudo mv -f cronjob.sh /root/cronjob.sh
97echo '*/5 * * * *  /root/cronjob.sh' > crontab.txt
98sudo crontab crontab.txt
99rm crontab.txt
100
101# check if the machines are okay
102echo "Waiting for vm's to come up...  (${VMs}x CPU=$CPU RAM=$RAM)"
103for ((i=1; i<=VMs; i++)); do
104  .github/workflows/scripts/qemu-wait-for-vm.sh vm$i
105done
106echo "All $VMs VMs are up now."
107
108# Save the VM's serial output (ttyS0) to /var/tmp/console.txt
109# - ttyS0 on the VM corresponds to a local /dev/pty/N entry
110# - use 'virsh ttyconsole' to lookup the /dev/pty/N entry
111for ((i=1; i<=VMs; i++)); do
112  mkdir -p $RESPATH/vm$i
113  read "pty" <<< $(sudo virsh ttyconsole vm$i)
114  sudo nohup bash -c "cat $pty > $RESPATH/vm$i/console.txt" &
115done
116echo "Console logging for ${VMs}x $OS started."
117