xref: /freebsd/sys/contrib/openzfs/.github/workflows/scripts/qemu-5-setup.sh (revision db33c6f3ae9d1231087710068ee4ea5398aacca7)
1#!/usr/bin/env bash
2
3######################################################################
4# 5) start test machines and load openzfs module
5######################################################################
6
7set -eu
8
9# read our defined variables
10source /var/tmp/env.txt
11
12# wait for poweroff to succeed
13PID=$(pidof /usr/bin/qemu-system-x86_64)
14tail --pid=$PID -f /dev/null
15sudo virsh undefine openzfs
16
17# default values per test vm:
18VMs=2
19CPU=2
20
21# cpu pinning
22CPUSET=("0,1" "2,3")
23
24case "$OS" in
25  freebsd*)
26    # FreeBSD can't be optimized via ksmtuned
27    RAM=6
28    ;;
29  *)
30    # Linux can be optimized via ksmtuned
31    RAM=8
32    ;;
33esac
34
35# this can be different for each distro
36echo "VMs=$VMs" >> $ENV
37
38# create snapshot we can clone later
39sudo zfs snapshot zpool/openzfs@now
40
41# setup the testing vm's
42PUBKEY=$(cat ~/.ssh/id_ed25519.pub)
43for i in $(seq 1 $VMs); do
44
45  echo "Creating disk for vm$i..."
46  DISK="/dev/zvol/zpool/vm$i"
47  FORMAT="raw"
48  sudo zfs clone zpool/openzfs@now zpool/vm$i
49  sudo zfs create -ps -b 64k -V 80g zpool/vm$i-2
50
51  cat <<EOF > /tmp/user-data
52#cloud-config
53
54fqdn: vm$i
55
56users:
57- name: root
58  shell: $BASH
59- name: zfs
60  sudo: ALL=(ALL) NOPASSWD:ALL
61  shell: $BASH
62  ssh_authorized_keys:
63    - $PUBKEY
64
65growpart:
66  mode: auto
67  devices: ['/']
68  ignore_growroot_disabled: false
69EOF
70
71  sudo virsh net-update default add ip-dhcp-host \
72    "<host mac='52:54:00:83:79:0$i' ip='192.168.122.1$i'/>" --live --config
73
74  sudo virt-install \
75    --os-variant $OSv \
76    --name "vm$i" \
77    --cpu host-passthrough \
78    --virt-type=kvm --hvm \
79    --vcpus=$CPU,sockets=1 \
80    --cpuset=${CPUSET[$((i-1))]} \
81    --memory $((1024*RAM)) \
82    --memballoon model=virtio \
83    --graphics none \
84    --cloud-init user-data=/tmp/user-data \
85    --network bridge=virbr0,model=$NIC,mac="52:54:00:83:79:0$i" \
86    --disk $DISK,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
87    --disk $DISK-2,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
88    --import --noautoconsole >/dev/null
89done
90
91# check the memory state from time to time
92cat <<EOF > cronjob.sh
93# $OS
94exec 1>>/var/tmp/stats.txt
95exec 2>&1
96echo "*******************************************************"
97date
98uptime
99free -m
100df -h /mnt/tests
101zfs list
102EOF
103sudo chmod +x cronjob.sh
104sudo mv -f cronjob.sh /root/cronjob.sh
105echo '*/5 * * * *  /root/cronjob.sh' > crontab.txt
106sudo crontab crontab.txt
107rm crontab.txt
108
109# check if the machines are okay
110echo "Waiting for vm's to come up...  (${VMs}x CPU=$CPU RAM=$RAM)"
111for i in $(seq 1 $VMs); do
112  while true; do
113    ssh 2>/dev/null zfs@192.168.122.1$i "uname -a" && break
114  done
115done
116echo "All $VMs VMs are up now."
117
118# Save the VM's serial output (ttyS0) to /var/tmp/console.txt
119# - ttyS0 on the VM corresponds to a local /dev/pty/N entry
120# - use 'virsh ttyconsole' to lookup the /dev/pty/N entry
121for i in $(seq 1 $VMs); do
122  mkdir -p $RESPATH/vm$i
123  read "pty" <<< $(sudo virsh ttyconsole vm$i)
124  sudo nohup bash -c "cat $pty > $RESPATH/vm$i/console.txt" &
125done
126echo "Console logging for ${VMs}x $OS started."
127