1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2023. Huawei Technologies Co., Ltd */
3 #include <test_progs.h>
4
5 #include "linux/filter.h"
6 #include "kptr_xchg_inline.skel.h"
7
test_kptr_xchg_inline(void)8 void test_kptr_xchg_inline(void)
9 {
10 struct kptr_xchg_inline *skel;
11 struct bpf_insn *insn = NULL;
12 struct bpf_insn exp;
13 unsigned int cnt;
14 int err;
15
16 #if !(defined(__x86_64__) || defined(__aarch64__) || \
17 (defined(__riscv) && __riscv_xlen == 64))
18 test__skip();
19 return;
20 #endif
21
22 skel = kptr_xchg_inline__open_and_load();
23 if (!ASSERT_OK_PTR(skel, "open_load"))
24 return;
25
26 err = get_xlated_program(bpf_program__fd(skel->progs.kptr_xchg_inline), &insn, &cnt);
27 if (!ASSERT_OK(err, "prog insn"))
28 goto out;
29
30 /* The original instructions are:
31 * r1 = map[id:xxx][0]+0
32 * r2 = 0
33 * call bpf_kptr_xchg#yyy
34 *
35 * call bpf_kptr_xchg#yyy will be inlined as:
36 * r0 = r2
37 * r0 = atomic64_xchg((u64 *)(r1 +0), r0)
38 */
39 if (!ASSERT_GT(cnt, 5, "insn cnt"))
40 goto out;
41
42 exp = BPF_MOV64_REG(BPF_REG_0, BPF_REG_2);
43 if (!ASSERT_OK(memcmp(&insn[3], &exp, sizeof(exp)), "mov"))
44 goto out;
45
46 exp = BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_1, BPF_REG_0, 0);
47 if (!ASSERT_OK(memcmp(&insn[4], &exp, sizeof(exp)), "xchg"))
48 goto out;
49 out:
50 free(insn);
51 kptr_xchg_inline__destroy(skel);
52 }
53