xref: /linux/net/bpf/bpf_dummy_struct_ops.c (revision 110d3047a3ec033de00322b1a8068b1215efa97a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021. Huawei Technologies Co., Ltd
4  */
5 #include <linux/kernel.h>
6 #include <linux/bpf_verifier.h>
7 #include <linux/bpf.h>
8 #include <linux/btf.h>
9 
10 static struct bpf_struct_ops bpf_bpf_dummy_ops;
11 
12 /* A common type for test_N with return value in bpf_dummy_ops */
13 typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *state, ...);
14 
15 static int dummy_ops_test_ret_function(struct bpf_dummy_ops_state *state, ...)
16 {
17 	return 0;
18 }
19 
20 struct bpf_dummy_ops_test_args {
21 	u64 args[MAX_BPF_FUNC_ARGS];
22 	struct bpf_dummy_ops_state state;
23 };
24 
25 static struct btf *bpf_dummy_ops_btf;
26 
27 static struct bpf_dummy_ops_test_args *
28 dummy_ops_init_args(const union bpf_attr *kattr, unsigned int nr)
29 {
30 	__u32 size_in;
31 	struct bpf_dummy_ops_test_args *args;
32 	void __user *ctx_in;
33 	void __user *u_state;
34 
35 	size_in = kattr->test.ctx_size_in;
36 	if (size_in != sizeof(u64) * nr)
37 		return ERR_PTR(-EINVAL);
38 
39 	args = kzalloc(sizeof(*args), GFP_KERNEL);
40 	if (!args)
41 		return ERR_PTR(-ENOMEM);
42 
43 	ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
44 	if (copy_from_user(args->args, ctx_in, size_in))
45 		goto out;
46 
47 	/* args[0] is 0 means state argument of test_N will be NULL */
48 	u_state = u64_to_user_ptr(args->args[0]);
49 	if (u_state && copy_from_user(&args->state, u_state,
50 				      sizeof(args->state)))
51 		goto out;
52 
53 	return args;
54 out:
55 	kfree(args);
56 	return ERR_PTR(-EFAULT);
57 }
58 
59 static int dummy_ops_copy_args(struct bpf_dummy_ops_test_args *args)
60 {
61 	void __user *u_state;
62 
63 	u_state = u64_to_user_ptr(args->args[0]);
64 	if (u_state && copy_to_user(u_state, &args->state, sizeof(args->state)))
65 		return -EFAULT;
66 
67 	return 0;
68 }
69 
70 static int dummy_ops_call_op(void *image, struct bpf_dummy_ops_test_args *args)
71 {
72 	dummy_ops_test_ret_fn test = (void *)image + cfi_get_offset();
73 	struct bpf_dummy_ops_state *state = NULL;
74 
75 	/* state needs to be NULL if args[0] is 0 */
76 	if (args->args[0])
77 		state = &args->state;
78 	return test(state, args->args[1], args->args[2],
79 		    args->args[3], args->args[4]);
80 }
81 
82 extern const struct bpf_link_ops bpf_struct_ops_link_lops;
83 
84 int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
85 			    union bpf_attr __user *uattr)
86 {
87 	const struct bpf_struct_ops *st_ops = &bpf_bpf_dummy_ops;
88 	const struct btf_type *func_proto;
89 	struct bpf_dummy_ops_test_args *args;
90 	struct bpf_tramp_links *tlinks;
91 	struct bpf_tramp_link *link = NULL;
92 	void *image = NULL;
93 	unsigned int op_idx;
94 	int prog_ret;
95 	s32 type_id;
96 	int err;
97 
98 	type_id = btf_find_by_name_kind(bpf_dummy_ops_btf,
99 					bpf_bpf_dummy_ops.name,
100 					BTF_KIND_STRUCT);
101 	if (type_id < 0)
102 		return -EINVAL;
103 	if (prog->aux->attach_btf_id != type_id)
104 		return -EOPNOTSUPP;
105 
106 	func_proto = prog->aux->attach_func_proto;
107 	args = dummy_ops_init_args(kattr, btf_type_vlen(func_proto));
108 	if (IS_ERR(args))
109 		return PTR_ERR(args);
110 
111 	tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
112 	if (!tlinks) {
113 		err = -ENOMEM;
114 		goto out;
115 	}
116 
117 	image = arch_alloc_bpf_trampoline(PAGE_SIZE);
118 	if (!image) {
119 		err = -ENOMEM;
120 		goto out;
121 	}
122 
123 	link = kzalloc(sizeof(*link), GFP_USER);
124 	if (!link) {
125 		err = -ENOMEM;
126 		goto out;
127 	}
128 	/* prog doesn't take the ownership of the reference from caller */
129 	bpf_prog_inc(prog);
130 	bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_link_lops, prog);
131 
132 	op_idx = prog->expected_attach_type;
133 	err = bpf_struct_ops_prepare_trampoline(tlinks, link,
134 						&st_ops->func_models[op_idx],
135 						&dummy_ops_test_ret_function,
136 						image, image + PAGE_SIZE);
137 	if (err < 0)
138 		goto out;
139 
140 	arch_protect_bpf_trampoline(image, PAGE_SIZE);
141 	prog_ret = dummy_ops_call_op(image, args);
142 
143 	err = dummy_ops_copy_args(args);
144 	if (err)
145 		goto out;
146 	if (put_user(prog_ret, &uattr->test.retval))
147 		err = -EFAULT;
148 out:
149 	kfree(args);
150 	arch_free_bpf_trampoline(image, PAGE_SIZE);
151 	if (link)
152 		bpf_link_put(&link->link);
153 	kfree(tlinks);
154 	return err;
155 }
156 
157 static int bpf_dummy_init(struct btf *btf)
158 {
159 	bpf_dummy_ops_btf = btf;
160 	return 0;
161 }
162 
163 static bool bpf_dummy_ops_is_valid_access(int off, int size,
164 					  enum bpf_access_type type,
165 					  const struct bpf_prog *prog,
166 					  struct bpf_insn_access_aux *info)
167 {
168 	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
169 }
170 
171 static int bpf_dummy_ops_check_member(const struct btf_type *t,
172 				      const struct btf_member *member,
173 				      const struct bpf_prog *prog)
174 {
175 	u32 moff = __btf_member_bit_offset(t, member) / 8;
176 
177 	switch (moff) {
178 	case offsetof(struct bpf_dummy_ops, test_sleepable):
179 		break;
180 	default:
181 		if (prog->aux->sleepable)
182 			return -EINVAL;
183 	}
184 
185 	return 0;
186 }
187 
188 static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log,
189 					   const struct bpf_reg_state *reg,
190 					   int off, int size)
191 {
192 	const struct btf_type *state;
193 	const struct btf_type *t;
194 	s32 type_id;
195 
196 	type_id = btf_find_by_name_kind(reg->btf, "bpf_dummy_ops_state",
197 					BTF_KIND_STRUCT);
198 	if (type_id < 0)
199 		return -EINVAL;
200 
201 	t = btf_type_by_id(reg->btf, reg->btf_id);
202 	state = btf_type_by_id(reg->btf, type_id);
203 	if (t != state) {
204 		bpf_log(log, "only access to bpf_dummy_ops_state is supported\n");
205 		return -EACCES;
206 	}
207 
208 	if (off + size > sizeof(struct bpf_dummy_ops_state)) {
209 		bpf_log(log, "write access at off %d with size %d\n", off, size);
210 		return -EACCES;
211 	}
212 
213 	return NOT_INIT;
214 }
215 
216 static const struct bpf_verifier_ops bpf_dummy_verifier_ops = {
217 	.is_valid_access = bpf_dummy_ops_is_valid_access,
218 	.btf_struct_access = bpf_dummy_ops_btf_struct_access,
219 };
220 
221 static int bpf_dummy_init_member(const struct btf_type *t,
222 				 const struct btf_member *member,
223 				 void *kdata, const void *udata)
224 {
225 	return -EOPNOTSUPP;
226 }
227 
228 static int bpf_dummy_reg(void *kdata)
229 {
230 	return -EOPNOTSUPP;
231 }
232 
233 static void bpf_dummy_unreg(void *kdata)
234 {
235 }
236 
237 static int bpf_dummy_test_1(struct bpf_dummy_ops_state *cb)
238 {
239 	return 0;
240 }
241 
242 static int bpf_dummy_test_2(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
243 			    char a3, unsigned long a4)
244 {
245 	return 0;
246 }
247 
248 static int bpf_dummy_test_sleepable(struct bpf_dummy_ops_state *cb)
249 {
250 	return 0;
251 }
252 
253 static struct bpf_dummy_ops __bpf_bpf_dummy_ops = {
254 	.test_1 = bpf_dummy_test_1,
255 	.test_2 = bpf_dummy_test_2,
256 	.test_sleepable = bpf_dummy_test_sleepable,
257 };
258 
259 static struct bpf_struct_ops bpf_bpf_dummy_ops = {
260 	.verifier_ops = &bpf_dummy_verifier_ops,
261 	.init = bpf_dummy_init,
262 	.check_member = bpf_dummy_ops_check_member,
263 	.init_member = bpf_dummy_init_member,
264 	.reg = bpf_dummy_reg,
265 	.unreg = bpf_dummy_unreg,
266 	.name = "bpf_dummy_ops",
267 	.cfi_stubs = &__bpf_bpf_dummy_ops,
268 	.owner = THIS_MODULE,
269 };
270 
271 static int __init bpf_dummy_struct_ops_init(void)
272 {
273 	return register_bpf_struct_ops(&bpf_bpf_dummy_ops, bpf_dummy_ops);
274 }
275 late_initcall(bpf_dummy_struct_ops_init);
276