xref: /linux/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh (revision 26fbb4c8c7c3ee9a4c3b4de555a8587b5a19154e)
1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0
3#
4# A test for switch behavior under MC overload. An issue in Spectrum chips
5# causes throughput of UC traffic to drop severely when a switch is under heavy
6# MC load. This issue can be overcome by putting the switch to MC-aware mode.
7# This test verifies that UC performance stays intact even as the switch is
8# under MC flood, and therefore that the MC-aware mode is enabled and correctly
9# configured.
10#
11# Because mlxsw throttles CPU port, the traffic can't actually reach userspace
12# at full speed. That makes it impossible to use iperf3 to simply measure the
13# throughput, because many packets (that reach $h3) don't get to the kernel at
14# all even in UDP mode (the situation is even worse in TCP mode, where one can't
15# hope to see more than a couple Mbps).
16#
17# So instead we send traffic with mausezahn and use RX ethtool counters at $h3.
18# Multicast traffic is untagged, unicast traffic is tagged with PCP 1. Therefore
19# each gets a different priority and we can use per-prio ethtool counters to
20# measure the throughput. In order to avoid prioritizing unicast traffic, prio
21# qdisc is installed on $swp3 and maps all priorities to the same band #7 (and
22# thus TC 0).
23#
24# Mausezahn can't actually saturate the links unless it's using large frames.
25# Thus we set MTU to 10K on all involved interfaces. Then both unicast and
26# multicast traffic uses 8K frames.
27#
28# +---------------------------+            +----------------------------------+
29# | H1                        |            |                               H2 |
30# |                           |            |  unicast --> + $h2.111           |
31# |                 multicast |            |  traffic     | 192.0.2.129/28    |
32# |                 traffic   |            |              | e-qos-map 0:1     |
33# |           $h1 + <-----    |            |              |                   |
34# | 192.0.2.65/28 |           |            |              + $h2               |
35# +---------------|-----------+            +--------------|-------------------+
36#                 |                                       |
37# +---------------|---------------------------------------|-------------------+
38# |         $swp1 +                                       + $swp2             |
39# |        >1Gbps |                                       | >1Gbps            |
40# | +-------------|------+                     +----------|----------------+  |
41# | |     $swp1.1 +      |                     |          + $swp2.111      |  |
42# | |                BR1 |             SW      | BR111                     |  |
43# | |     $swp3.1 +      |                     |          + $swp3.111      |  |
44# | +-------------|------+                     +----------|----------------+  |
45# |               \_______________________________________/                   |
46# |                                    |                                      |
47# |                                    + $swp3                                |
48# |                                    | 1Gbps bottleneck                     |
49# |                                    | prio qdisc: {0..7} -> 7              |
50# +------------------------------------|--------------------------------------+
51#                                      |
52#                                   +--|-----------------+
53#                                   |  + $h3          H3 |
54#                                   |  | 192.0.2.66/28   |
55#                                   |  |                 |
56#                                   |  + $h3.111         |
57#                                   |    192.0.2.130/28  |
58#                                   +--------------------+
59
60ALL_TESTS="
61	ping_ipv4
62	test_mc_aware
63	test_uc_aware
64"
65
66lib_dir=$(dirname $0)/../../../net/forwarding
67
68NUM_NETIFS=6
69source $lib_dir/lib.sh
70source $lib_dir/devlink_lib.sh
71source qos_lib.sh
72
73h1_create()
74{
75	simple_if_init $h1 192.0.2.65/28
76	mtu_set $h1 10000
77}
78
79h1_destroy()
80{
81	mtu_restore $h1
82	simple_if_fini $h1 192.0.2.65/28
83}
84
85h2_create()
86{
87	simple_if_init $h2
88	mtu_set $h2 10000
89
90	vlan_create $h2 111 v$h2 192.0.2.129/28
91	ip link set dev $h2.111 type vlan egress-qos-map 0:1
92}
93
94h2_destroy()
95{
96	vlan_destroy $h2 111
97
98	mtu_restore $h2
99	simple_if_fini $h2
100}
101
102h3_create()
103{
104	simple_if_init $h3 192.0.2.66/28
105	mtu_set $h3 10000
106
107	vlan_create $h3 111 v$h3 192.0.2.130/28
108}
109
110h3_destroy()
111{
112	vlan_destroy $h3 111
113
114	mtu_restore $h3
115	simple_if_fini $h3 192.0.2.66/28
116}
117
118switch_create()
119{
120	ip link set dev $swp1 up
121	mtu_set $swp1 10000
122
123	ip link set dev $swp2 up
124	mtu_set $swp2 10000
125
126	ip link set dev $swp3 up
127	mtu_set $swp3 10000
128
129	vlan_create $swp2 111
130	vlan_create $swp3 111
131
132	ethtool -s $swp3 speed 1000 autoneg off
133	tc qdisc replace dev $swp3 root handle 3: \
134	   prio bands 8 priomap 7 7 7 7 7 7 7 7
135
136	ip link add name br1 type bridge vlan_filtering 0
137	ip link set dev br1 up
138	ip link set dev $swp1 master br1
139	ip link set dev $swp3 master br1
140
141	ip link add name br111 type bridge vlan_filtering 0
142	ip link set dev br111 up
143	ip link set dev $swp2.111 master br111
144	ip link set dev $swp3.111 master br111
145
146	# Make sure that ingress quotas are smaller than egress so that there is
147	# room for both streams of traffic to be admitted to shared buffer.
148	devlink_port_pool_th_save $swp1 0
149	devlink_port_pool_th_set $swp1 0 5
150	devlink_tc_bind_pool_th_save $swp1 0 ingress
151	devlink_tc_bind_pool_th_set $swp1 0 ingress 0 5
152
153	devlink_port_pool_th_save $swp2 0
154	devlink_port_pool_th_set $swp2 0 5
155	devlink_tc_bind_pool_th_save $swp2 1 ingress
156	devlink_tc_bind_pool_th_set $swp2 1 ingress 0 5
157
158	devlink_port_pool_th_save $swp3 4
159	devlink_port_pool_th_set $swp3 4 12
160}
161
162switch_destroy()
163{
164	devlink_port_pool_th_restore $swp3 4
165
166	devlink_tc_bind_pool_th_restore $swp2 1 ingress
167	devlink_port_pool_th_restore $swp2 0
168
169	devlink_tc_bind_pool_th_restore $swp1 0 ingress
170	devlink_port_pool_th_restore $swp1 0
171
172	ip link del dev br111
173	ip link del dev br1
174
175	tc qdisc del dev $swp3 root handle 3:
176	ethtool -s $swp3 autoneg on
177
178	vlan_destroy $swp3 111
179	vlan_destroy $swp2 111
180
181	mtu_restore $swp3
182	ip link set dev $swp3 down
183
184	mtu_restore $swp2
185	ip link set dev $swp2 down
186
187	mtu_restore $swp1
188	ip link set dev $swp1 down
189}
190
191setup_prepare()
192{
193	h1=${NETIFS[p1]}
194	swp1=${NETIFS[p2]}
195
196	swp2=${NETIFS[p3]}
197	h2=${NETIFS[p4]}
198
199	swp3=${NETIFS[p5]}
200	h3=${NETIFS[p6]}
201
202	h3mac=$(mac_get $h3)
203
204	vrf_prepare
205
206	h1_create
207	h2_create
208	h3_create
209	switch_create
210}
211
212cleanup()
213{
214	pre_cleanup
215
216	switch_destroy
217	h3_destroy
218	h2_destroy
219	h1_destroy
220
221	vrf_cleanup
222}
223
224ping_ipv4()
225{
226	ping_test $h2 192.0.2.130
227}
228
229test_mc_aware()
230{
231	RET=0
232
233	local -a uc_rate
234	start_traffic $h2.111 192.0.2.129 192.0.2.130 $h3mac
235	uc_rate=($(measure_rate $swp2 $h3 rx_octets_prio_1 "UC-only"))
236	check_err $? "Could not get high enough UC-only ingress rate"
237	stop_traffic
238	local ucth1=${uc_rate[1]}
239
240	start_traffic $h1 192.0.2.65 bc bc
241
242	local d0=$(date +%s)
243	local t0=$(ethtool_stats_get $h3 rx_octets_prio_0)
244	local u0=$(ethtool_stats_get $swp1 rx_octets_prio_0)
245
246	local -a uc_rate_2
247	start_traffic $h2.111 192.0.2.129 192.0.2.130 $h3mac
248	uc_rate_2=($(measure_rate $swp2 $h3 rx_octets_prio_1 "UC+MC"))
249	check_err $? "Could not get high enough UC+MC ingress rate"
250	stop_traffic
251	local ucth2=${uc_rate_2[1]}
252
253	local d1=$(date +%s)
254	local t1=$(ethtool_stats_get $h3 rx_octets_prio_0)
255	local u1=$(ethtool_stats_get $swp1 rx_octets_prio_0)
256
257	local deg=$(bc <<< "
258			scale=2
259			ret = 100 * ($ucth1 - $ucth2) / $ucth1
260			if (ret > 0) { ret } else { 0 }
261		    ")
262
263	# Minimum shaper of 200Mbps on MC TCs should cause about 20% of
264	# degradation on 1Gbps link.
265	check_err $(bc <<< "$deg < 15") "Minimum shaper not in effect"
266	check_err $(bc <<< "$deg > 25") "MC traffic degrades UC performance too much"
267
268	local interval=$((d1 - d0))
269	local mc_ir=$(rate $u0 $u1 $interval)
270	local mc_er=$(rate $t0 $t1 $interval)
271
272	stop_traffic
273
274	log_test "UC performance under MC overload"
275
276	echo "UC-only throughput  $(humanize $ucth1)"
277	echo "UC+MC throughput    $(humanize $ucth2)"
278	echo "Degradation         $deg %"
279	echo
280	echo "Full report:"
281	echo "  UC only:"
282	echo "    ingress UC throughput $(humanize ${uc_rate[0]})"
283	echo "    egress UC throughput  $(humanize ${uc_rate[1]})"
284	echo "  UC+MC:"
285	echo "    ingress UC throughput $(humanize ${uc_rate_2[0]})"
286	echo "    egress UC throughput  $(humanize ${uc_rate_2[1]})"
287	echo "    ingress MC throughput $(humanize $mc_ir)"
288	echo "    egress MC throughput  $(humanize $mc_er)"
289	echo
290}
291
292test_uc_aware()
293{
294	RET=0
295
296	start_traffic $h2.111 192.0.2.129 192.0.2.130 $h3mac
297
298	local d0=$(date +%s)
299	local t0=$(ethtool_stats_get $h3 rx_octets_prio_1)
300	local u0=$(ethtool_stats_get $swp2 rx_octets_prio_1)
301	sleep 1
302
303	local attempts=50
304	local passes=0
305	local i
306
307	for ((i = 0; i < attempts; ++i)); do
308		if $ARPING -c 1 -I $h1 -b 192.0.2.66 -q -w 1; then
309			((passes++))
310		fi
311
312		sleep 0.1
313	done
314
315	local d1=$(date +%s)
316	local t1=$(ethtool_stats_get $h3 rx_octets_prio_1)
317	local u1=$(ethtool_stats_get $swp2 rx_octets_prio_1)
318
319	local interval=$((d1 - d0))
320	local uc_ir=$(rate $u0 $u1 $interval)
321	local uc_er=$(rate $t0 $t1 $interval)
322
323	((attempts == passes))
324	check_err $?
325
326	stop_traffic
327
328	log_test "MC performance under UC overload"
329	echo "    ingress UC throughput $(humanize ${uc_ir})"
330	echo "    egress UC throughput  $(humanize ${uc_er})"
331	echo "    sent $attempts BC ARPs, got $passes responses"
332}
333
334trap cleanup EXIT
335
336setup_prepare
337setup_wait
338
339tests_run
340
341exit $EXIT_STATUS
342