xref: /linux/net/bpf/bpf_dummy_struct_ops.c (revision 80154575849778e40d9d87aa7ab14491ac401948)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021. Huawei Technologies Co., Ltd
4  */
5 #include <linux/kernel.h>
6 #include <linux/bpf_verifier.h>
7 #include <linux/bpf.h>
8 #include <linux/btf.h>
9 
10 extern struct bpf_struct_ops bpf_bpf_dummy_ops;
11 
12 /* A common type for test_N with return value in bpf_dummy_ops */
13 typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *state, ...);
14 
15 static int dummy_ops_test_ret_function(struct bpf_dummy_ops_state *state, ...)
16 {
17 	return 0;
18 }
19 
20 struct bpf_dummy_ops_test_args {
21 	u64 args[MAX_BPF_FUNC_ARGS];
22 	struct bpf_dummy_ops_state state;
23 };
24 
25 static struct bpf_dummy_ops_test_args *
26 dummy_ops_init_args(const union bpf_attr *kattr, unsigned int nr)
27 {
28 	__u32 size_in;
29 	struct bpf_dummy_ops_test_args *args;
30 	void __user *ctx_in;
31 	void __user *u_state;
32 
33 	size_in = kattr->test.ctx_size_in;
34 	if (size_in != sizeof(u64) * nr)
35 		return ERR_PTR(-EINVAL);
36 
37 	args = kzalloc(sizeof(*args), GFP_KERNEL);
38 	if (!args)
39 		return ERR_PTR(-ENOMEM);
40 
41 	ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
42 	if (copy_from_user(args->args, ctx_in, size_in))
43 		goto out;
44 
45 	/* args[0] is 0 means state argument of test_N will be NULL */
46 	u_state = u64_to_user_ptr(args->args[0]);
47 	if (u_state && copy_from_user(&args->state, u_state,
48 				      sizeof(args->state)))
49 		goto out;
50 
51 	return args;
52 out:
53 	kfree(args);
54 	return ERR_PTR(-EFAULT);
55 }
56 
57 static int dummy_ops_copy_args(struct bpf_dummy_ops_test_args *args)
58 {
59 	void __user *u_state;
60 
61 	u_state = u64_to_user_ptr(args->args[0]);
62 	if (u_state && copy_to_user(u_state, &args->state, sizeof(args->state)))
63 		return -EFAULT;
64 
65 	return 0;
66 }
67 
68 static int dummy_ops_call_op(void *image, struct bpf_dummy_ops_test_args *args)
69 {
70 	dummy_ops_test_ret_fn test = (void *)image + cfi_get_offset();
71 	struct bpf_dummy_ops_state *state = NULL;
72 
73 	/* state needs to be NULL if args[0] is 0 */
74 	if (args->args[0])
75 		state = &args->state;
76 	return test(state, args->args[1], args->args[2],
77 		    args->args[3], args->args[4]);
78 }
79 
80 extern const struct bpf_link_ops bpf_struct_ops_link_lops;
81 
82 int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
83 			    union bpf_attr __user *uattr)
84 {
85 	const struct bpf_struct_ops *st_ops = &bpf_bpf_dummy_ops;
86 	const struct btf_type *func_proto;
87 	struct bpf_dummy_ops_test_args *args;
88 	struct bpf_tramp_links *tlinks;
89 	struct bpf_tramp_link *link = NULL;
90 	void *image = NULL;
91 	unsigned int op_idx;
92 	int prog_ret;
93 	int err;
94 
95 	if (prog->aux->attach_btf_id != st_ops->type_id)
96 		return -EOPNOTSUPP;
97 
98 	func_proto = prog->aux->attach_func_proto;
99 	args = dummy_ops_init_args(kattr, btf_type_vlen(func_proto));
100 	if (IS_ERR(args))
101 		return PTR_ERR(args);
102 
103 	tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
104 	if (!tlinks) {
105 		err = -ENOMEM;
106 		goto out;
107 	}
108 
109 	image = arch_alloc_bpf_trampoline(PAGE_SIZE);
110 	if (!image) {
111 		err = -ENOMEM;
112 		goto out;
113 	}
114 
115 	link = kzalloc(sizeof(*link), GFP_USER);
116 	if (!link) {
117 		err = -ENOMEM;
118 		goto out;
119 	}
120 	/* prog doesn't take the ownership of the reference from caller */
121 	bpf_prog_inc(prog);
122 	bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_link_lops, prog);
123 
124 	op_idx = prog->expected_attach_type;
125 	err = bpf_struct_ops_prepare_trampoline(tlinks, link,
126 						&st_ops->func_models[op_idx],
127 						&dummy_ops_test_ret_function,
128 						image, image + PAGE_SIZE);
129 	if (err < 0)
130 		goto out;
131 
132 	arch_protect_bpf_trampoline(image, PAGE_SIZE);
133 	prog_ret = dummy_ops_call_op(image, args);
134 
135 	err = dummy_ops_copy_args(args);
136 	if (err)
137 		goto out;
138 	if (put_user(prog_ret, &uattr->test.retval))
139 		err = -EFAULT;
140 out:
141 	kfree(args);
142 	arch_free_bpf_trampoline(image, PAGE_SIZE);
143 	if (link)
144 		bpf_link_put(&link->link);
145 	kfree(tlinks);
146 	return err;
147 }
148 
149 static int bpf_dummy_init(struct btf *btf)
150 {
151 	return 0;
152 }
153 
154 static bool bpf_dummy_ops_is_valid_access(int off, int size,
155 					  enum bpf_access_type type,
156 					  const struct bpf_prog *prog,
157 					  struct bpf_insn_access_aux *info)
158 {
159 	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
160 }
161 
162 static int bpf_dummy_ops_check_member(const struct btf_type *t,
163 				      const struct btf_member *member,
164 				      const struct bpf_prog *prog)
165 {
166 	u32 moff = __btf_member_bit_offset(t, member) / 8;
167 
168 	switch (moff) {
169 	case offsetof(struct bpf_dummy_ops, test_sleepable):
170 		break;
171 	default:
172 		if (prog->aux->sleepable)
173 			return -EINVAL;
174 	}
175 
176 	return 0;
177 }
178 
179 static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log,
180 					   const struct bpf_reg_state *reg,
181 					   int off, int size)
182 {
183 	const struct btf_type *state;
184 	const struct btf_type *t;
185 	s32 type_id;
186 
187 	type_id = btf_find_by_name_kind(reg->btf, "bpf_dummy_ops_state",
188 					BTF_KIND_STRUCT);
189 	if (type_id < 0)
190 		return -EINVAL;
191 
192 	t = btf_type_by_id(reg->btf, reg->btf_id);
193 	state = btf_type_by_id(reg->btf, type_id);
194 	if (t != state) {
195 		bpf_log(log, "only access to bpf_dummy_ops_state is supported\n");
196 		return -EACCES;
197 	}
198 
199 	if (off + size > sizeof(struct bpf_dummy_ops_state)) {
200 		bpf_log(log, "write access at off %d with size %d\n", off, size);
201 		return -EACCES;
202 	}
203 
204 	return NOT_INIT;
205 }
206 
207 static const struct bpf_verifier_ops bpf_dummy_verifier_ops = {
208 	.is_valid_access = bpf_dummy_ops_is_valid_access,
209 	.btf_struct_access = bpf_dummy_ops_btf_struct_access,
210 };
211 
212 static int bpf_dummy_init_member(const struct btf_type *t,
213 				 const struct btf_member *member,
214 				 void *kdata, const void *udata)
215 {
216 	return -EOPNOTSUPP;
217 }
218 
219 static int bpf_dummy_reg(void *kdata)
220 {
221 	return -EOPNOTSUPP;
222 }
223 
224 static void bpf_dummy_unreg(void *kdata)
225 {
226 }
227 
228 static int bpf_dummy_test_1(struct bpf_dummy_ops_state *cb)
229 {
230 	return 0;
231 }
232 
233 static int bpf_dummy_test_2(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
234 			    char a3, unsigned long a4)
235 {
236 	return 0;
237 }
238 
239 static int bpf_dummy_test_sleepable(struct bpf_dummy_ops_state *cb)
240 {
241 	return 0;
242 }
243 
244 static struct bpf_dummy_ops __bpf_bpf_dummy_ops = {
245 	.test_1 = bpf_dummy_test_1,
246 	.test_2 = bpf_dummy_test_2,
247 	.test_sleepable = bpf_dummy_test_sleepable,
248 };
249 
250 struct bpf_struct_ops bpf_bpf_dummy_ops = {
251 	.verifier_ops = &bpf_dummy_verifier_ops,
252 	.init = bpf_dummy_init,
253 	.check_member = bpf_dummy_ops_check_member,
254 	.init_member = bpf_dummy_init_member,
255 	.reg = bpf_dummy_reg,
256 	.unreg = bpf_dummy_unreg,
257 	.name = "bpf_dummy_ops",
258 	.cfi_stubs = &__bpf_bpf_dummy_ops,
259 };
260