xref: /linux/tools/testing/selftests/net/udpgro.sh (revision 151ebcf0797b1a3ba53c8843dc21748c80e098c7)
1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0
3#
4# Run a series of udpgro functional tests.
5
6source net_helper.sh
7
8readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
9
10BPF_FILE="xdp_dummy.bpf.o"
11
12# set global exit status, but never reset nonzero one.
13check_err()
14{
15	if [ $ret -eq 0 ]; then
16		ret=$1
17	fi
18}
19
20cleanup() {
21	local -r jobs="$(jobs -p)"
22	local -r ns="$(ip netns list|grep $PEER_NS)"
23
24	[ -n "${jobs}" ] && kill -1 ${jobs} 2>/dev/null
25	[ -n "$ns" ] && ip netns del $ns 2>/dev/null
26}
27trap cleanup EXIT
28
29cfg_veth() {
30	ip netns add "${PEER_NS}"
31	ip -netns "${PEER_NS}" link set lo up
32	ip link add type veth
33	ip link set dev veth0 up
34	ip addr add dev veth0 192.168.1.2/24
35	ip addr add dev veth0 2001:db8::2/64 nodad
36
37	ip link set dev veth1 netns "${PEER_NS}"
38	ip -netns "${PEER_NS}" addr add dev veth1 192.168.1.1/24
39	ip -netns "${PEER_NS}" addr add dev veth1 2001:db8::1/64 nodad
40	ip -netns "${PEER_NS}" link set dev veth1 up
41	ip -n "${PEER_NS}" link set veth1 xdp object ${BPF_FILE} section xdp
42}
43
44run_one() {
45	# use 'rx' as separator between sender args and receiver args
46	local -r all="$@"
47	local -r tx_args=${all%rx*}
48	local -r rx_args=${all#*rx}
49
50	cfg_veth
51
52	ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} && \
53		echo "ok" || \
54		echo "failed" &
55
56	wait_local_port_listen ${PEER_NS} 8000 udp
57	./udpgso_bench_tx ${tx_args}
58	ret=$?
59	wait $(jobs -p)
60	return $ret
61}
62
63run_test() {
64	local -r args=$@
65
66	printf " %-40s" "$1"
67	./in_netns.sh $0 __subprocess $2 rx -G -r $3
68}
69
70run_one_nat() {
71	# use 'rx' as separator between sender args and receiver args
72	local addr1 addr2 pid family="" ipt_cmd=ip6tables
73	local -r all="$@"
74	local -r tx_args=${all%rx*}
75	local -r rx_args=${all#*rx}
76
77	if [[ ${tx_args} = *-4* ]]; then
78		ipt_cmd=iptables
79		family=-4
80		addr1=192.168.1.1
81		addr2=192.168.1.3/24
82	else
83		addr1=2001:db8::1
84		addr2="2001:db8::3/64 nodad"
85	fi
86
87	cfg_veth
88	ip -netns "${PEER_NS}" addr add dev veth1 ${addr2}
89
90	# fool the GRO engine changing the destination address ...
91	ip netns exec "${PEER_NS}" $ipt_cmd -t nat -I PREROUTING -d ${addr1} -j DNAT --to-destination ${addr2%/*}
92
93	# ... so that GRO will match the UDP_GRO enabled socket, but packets
94	# will land on the 'plain' one
95	ip netns exec "${PEER_NS}" ./udpgso_bench_rx -G ${family} -b ${addr1} -n 0 &
96	pid=$!
97	ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${family} -b ${addr2%/*} ${rx_args} && \
98		echo "ok" || \
99		echo "failed"&
100
101	wait_local_port_listen "${PEER_NS}" 8000 udp
102	./udpgso_bench_tx ${tx_args}
103	ret=$?
104	kill -INT $pid
105	wait $(jobs -p)
106	return $ret
107}
108
109run_one_2sock() {
110	# use 'rx' as separator between sender args and receiver args
111	local -r all="$@"
112	local -r tx_args=${all%rx*}
113	local -r rx_args=${all#*rx}
114
115	cfg_veth
116
117	ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} -p 12345 &
118	ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 2000 -R 10 ${rx_args} && \
119		echo "ok" || \
120		echo "failed" &
121
122	wait_local_port_listen "${PEER_NS}" 12345 udp
123	./udpgso_bench_tx ${tx_args} -p 12345
124	wait_local_port_listen "${PEER_NS}" 8000 udp
125	./udpgso_bench_tx ${tx_args}
126	ret=$?
127	wait $(jobs -p)
128	return $ret
129}
130
131run_nat_test() {
132	local -r args=$@
133
134	printf " %-40s" "$1"
135	./in_netns.sh $0 __subprocess_nat $2 rx -r $3
136}
137
138run_2sock_test() {
139	local -r args=$@
140
141	printf " %-40s" "$1"
142	./in_netns.sh $0 __subprocess_2sock $2 rx -G -r $3
143}
144
145run_all() {
146	local -r core_args="-l 4"
147	local -r ipv4_args="${core_args} -4 -D 192.168.1.1"
148	local -r ipv6_args="${core_args} -6 -D 2001:db8::1"
149	ret=0
150
151	echo "ipv4"
152	run_test "no GRO" "${ipv4_args} -M 10 -s 1400" "-4 -n 10 -l 1400"
153	check_err $?
154
155	# explicitly check we are not receiving UDP_SEGMENT cmsg (-S -1)
156	# when GRO does not take place
157	run_test "no GRO chk cmsg" "${ipv4_args} -M 10 -s 1400" "-4 -n 10 -l 1400 -S -1"
158	check_err $?
159
160	# the GSO packets are aggregated because:
161	# * veth schedule napi after each xmit
162	# * segmentation happens in BH context, veth napi poll is delayed after
163	#   the transmission of the last segment
164	run_test "GRO" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720"
165	check_err $?
166	run_test "GRO chk cmsg" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720 -S 1472"
167	check_err $?
168	run_test "GRO with custom segment size" "${ipv4_args} -M 1 -s 14720 -S 500 " "-4 -n 1 -l 14720"
169	check_err $?
170	run_test "GRO with custom segment size cmsg" "${ipv4_args} -M 1 -s 14720 -S 500 " "-4 -n 1 -l 14720 -S 500"
171	check_err $?
172
173	run_nat_test "bad GRO lookup" "${ipv4_args} -M 1 -s 14720 -S 0" "-n 10 -l 1472"
174	check_err $?
175	run_2sock_test "multiple GRO socks" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720 -S 1472"
176	check_err $?
177
178	echo "ipv6"
179	run_test "no GRO" "${ipv6_args} -M 10 -s 1400" "-n 10 -l 1400"
180	check_err $?
181	run_test "no GRO chk cmsg" "${ipv6_args} -M 10 -s 1400" "-n 10 -l 1400 -S -1"
182	check_err $?
183	run_test "GRO" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 1 -l 14520"
184	check_err $?
185	run_test "GRO chk cmsg" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 1 -l 14520 -S 1452"
186	check_err $?
187	run_test "GRO with custom segment size" "${ipv6_args} -M 1 -s 14520 -S 500" "-n 1 -l 14520"
188	check_err $?
189	run_test "GRO with custom segment size cmsg" "${ipv6_args} -M 1 -s 14520 -S 500" "-n 1 -l 14520 -S 500"
190	check_err $?
191
192	run_nat_test "bad GRO lookup" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 10 -l 1452"
193	check_err $?
194	run_2sock_test "multiple GRO socks" "${ipv6_args} -M 1 -s 14520 -S 0 " "-n 1 -l 14520 -S 1452"
195	check_err $?
196	return $ret
197}
198
199if [ ! -f ${BPF_FILE} ]; then
200	echo "Missing ${BPF_FILE}. Run 'make' first"
201	exit -1
202fi
203
204if [[ $# -eq 0 ]]; then
205	run_all
206elif [[ $1 == "__subprocess" ]]; then
207	shift
208	run_one $@
209elif [[ $1 == "__subprocess_nat" ]]; then
210	shift
211	run_one_nat $@
212elif [[ $1 == "__subprocess_2sock" ]]; then
213	shift
214	run_one_2sock $@
215fi
216
217exit $?
218