xref: /linux/tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c (revision 8f109e91b852f159b917f5c565bcf43c26d974e2)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2023. Huawei Technologies Co., Ltd */
3 #include <test_progs.h>
4 
5 #include "linux/filter.h"
6 #include "kptr_xchg_inline.skel.h"
7 
8 void test_kptr_xchg_inline(void)
9 {
10 	struct kptr_xchg_inline *skel;
11 	struct bpf_insn *insn = NULL;
12 	struct bpf_insn exp;
13 	unsigned int cnt;
14 	int err;
15 
16 #if !(defined(__x86_64__) || defined(__aarch64__))
17 	test__skip();
18 	return;
19 #endif
20 
21 	skel = kptr_xchg_inline__open_and_load();
22 	if (!ASSERT_OK_PTR(skel, "open_load"))
23 		return;
24 
25 	err = get_xlated_program(bpf_program__fd(skel->progs.kptr_xchg_inline), &insn, &cnt);
26 	if (!ASSERT_OK(err, "prog insn"))
27 		goto out;
28 
29 	/* The original instructions are:
30 	 * r1 = map[id:xxx][0]+0
31 	 * r2 = 0
32 	 * call bpf_kptr_xchg#yyy
33 	 *
34 	 * call bpf_kptr_xchg#yyy will be inlined as:
35 	 * r0 = r2
36 	 * r0 = atomic64_xchg((u64 *)(r1 +0), r0)
37 	 */
38 	if (!ASSERT_GT(cnt, 5, "insn cnt"))
39 		goto out;
40 
41 	exp = BPF_MOV64_REG(BPF_REG_0, BPF_REG_2);
42 	if (!ASSERT_OK(memcmp(&insn[3], &exp, sizeof(exp)), "mov"))
43 		goto out;
44 
45 	exp = BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_1, BPF_REG_0, 0);
46 	if (!ASSERT_OK(memcmp(&insn[4], &exp, sizeof(exp)), "xchg"))
47 		goto out;
48 out:
49 	free(insn);
50 	kptr_xchg_inline__destroy(skel);
51 }
52