xref: /linux/drivers/hid/bpf/hid_bpf_struct_ops.c (revision 919464deeca24e5bf13b6c8efd0b1d25cc43866f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 /*
4  *  HID-BPF support for Linux
5  *
6  *  Copyright (c) 2024 Benjamin Tissoires
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/bpf_verifier.h>
11 #include <linux/bpf.h>
12 #include <linux/btf.h>
13 #include <linux/btf_ids.h>
14 #include <linux/filter.h>
15 #include <linux/hid.h>
16 #include <linux/hid_bpf.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/stddef.h>
20 #include <linux/workqueue.h>
21 #include "hid_bpf_dispatch.h"
22 
23 static struct btf *hid_bpf_ops_btf;
24 
hid_bpf_ops_init(struct btf * btf)25 static int hid_bpf_ops_init(struct btf *btf)
26 {
27 	hid_bpf_ops_btf = btf;
28 	return 0;
29 }
30 
hid_bpf_ops_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)31 static bool hid_bpf_ops_is_valid_access(int off, int size,
32 					  enum bpf_access_type type,
33 					  const struct bpf_prog *prog,
34 					  struct bpf_insn_access_aux *info)
35 {
36 	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
37 }
38 
hid_bpf_ops_check_member(const struct btf_type * t,const struct btf_member * member,const struct bpf_prog * prog)39 static int hid_bpf_ops_check_member(const struct btf_type *t,
40 				      const struct btf_member *member,
41 				      const struct bpf_prog *prog)
42 {
43 	u32 moff = __btf_member_bit_offset(t, member) / 8;
44 
45 	switch (moff) {
46 	case offsetof(struct hid_bpf_ops, hid_rdesc_fixup):
47 	case offsetof(struct hid_bpf_ops, hid_hw_request):
48 	case offsetof(struct hid_bpf_ops, hid_hw_output_report):
49 		break;
50 	default:
51 		if (prog->sleepable)
52 			return -EINVAL;
53 	}
54 
55 	return 0;
56 }
57 
58 struct hid_bpf_offset_write_range {
59 	const char *struct_name;
60 	u32 struct_length;
61 	u32 start;
62 	u32 end;
63 };
64 
hid_bpf_ops_btf_struct_access(struct bpf_verifier_log * log,const struct bpf_reg_state * reg,int off,int size)65 static int hid_bpf_ops_btf_struct_access(struct bpf_verifier_log *log,
66 					   const struct bpf_reg_state *reg,
67 					   int off, int size)
68 {
69 #define WRITE_RANGE(_name, _field, _is_string)					\
70 	{									\
71 		.struct_name = #_name,						\
72 		.struct_length = sizeof(struct _name),				\
73 		.start = offsetof(struct _name, _field),			\
74 		.end = offsetofend(struct _name, _field) - !!(_is_string),	\
75 	}
76 
77 	const struct hid_bpf_offset_write_range write_ranges[] = {
78 		WRITE_RANGE(hid_bpf_ctx, retval, false),
79 		WRITE_RANGE(hid_device, name, true),
80 		WRITE_RANGE(hid_device, uniq, true),
81 		WRITE_RANGE(hid_device, phys, true),
82 	};
83 #undef WRITE_RANGE
84 	const struct btf_type *state = NULL;
85 	const struct btf_type *t;
86 	const char *cur = NULL;
87 	int i;
88 
89 	t = btf_type_by_id(reg->btf, reg->btf_id);
90 
91 	for (i = 0; i < ARRAY_SIZE(write_ranges); i++) {
92 		const struct hid_bpf_offset_write_range *write_range = &write_ranges[i];
93 		s32 type_id;
94 
95 		/* we already found a writeable struct, but there is a
96 		 * new one, let's break the loop.
97 		 */
98 		if (t == state && write_range->struct_name != cur)
99 			break;
100 
101 		/* new struct to look for */
102 		if (write_range->struct_name != cur) {
103 			type_id = btf_find_by_name_kind(reg->btf, write_range->struct_name,
104 							BTF_KIND_STRUCT);
105 			if (type_id < 0)
106 				return -EINVAL;
107 
108 			state = btf_type_by_id(reg->btf, type_id);
109 		}
110 
111 		/* this is not the struct we are looking for */
112 		if (t != state) {
113 			cur = write_range->struct_name;
114 			continue;
115 		}
116 
117 		/* first time we see this struct, check for out of bounds */
118 		if (cur != write_range->struct_name &&
119 		    off + size > write_range->struct_length) {
120 			bpf_log(log, "write access for struct %s at off %d with size %d\n",
121 				write_range->struct_name, off, size);
122 			return -EACCES;
123 		}
124 
125 		/* now check if we are in our boundaries */
126 		if (off >= write_range->start && off + size <= write_range->end)
127 			return NOT_INIT;
128 
129 		cur = write_range->struct_name;
130 	}
131 
132 
133 	if (t != state)
134 		bpf_log(log, "write access to this struct is not supported\n");
135 	else
136 		bpf_log(log,
137 			"write access at off %d with size %d on read-only part of %s\n",
138 			off, size, cur);
139 
140 	return -EACCES;
141 }
142 
143 static const struct bpf_verifier_ops hid_bpf_verifier_ops = {
144 	.get_func_proto = bpf_base_func_proto,
145 	.is_valid_access = hid_bpf_ops_is_valid_access,
146 	.btf_struct_access = hid_bpf_ops_btf_struct_access,
147 };
148 
hid_bpf_ops_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata)149 static int hid_bpf_ops_init_member(const struct btf_type *t,
150 				 const struct btf_member *member,
151 				 void *kdata, const void *udata)
152 {
153 	const struct hid_bpf_ops *uhid_bpf_ops;
154 	struct hid_bpf_ops *khid_bpf_ops;
155 	u32 moff;
156 
157 	uhid_bpf_ops = (const struct hid_bpf_ops *)udata;
158 	khid_bpf_ops = (struct hid_bpf_ops *)kdata;
159 
160 	moff = __btf_member_bit_offset(t, member) / 8;
161 
162 	switch (moff) {
163 	case offsetof(struct hid_bpf_ops, hid_id):
164 		/* For hid_id and flags fields, this function has to copy it
165 		 * and return 1 to indicate that the data has been handled by
166 		 * the struct_ops type, or the verifier will reject the map if
167 		 * the value of those fields is not zero.
168 		 */
169 		khid_bpf_ops->hid_id = uhid_bpf_ops->hid_id;
170 		return 1;
171 	case offsetof(struct hid_bpf_ops, flags):
172 		if (uhid_bpf_ops->flags & ~BPF_F_BEFORE)
173 			return -EINVAL;
174 		khid_bpf_ops->flags = uhid_bpf_ops->flags;
175 		return 1;
176 	}
177 	return 0;
178 }
179 
hid_bpf_reg(void * kdata,struct bpf_link * link)180 static int hid_bpf_reg(void *kdata, struct bpf_link *link)
181 {
182 	struct hid_bpf_ops *ops = kdata;
183 	struct hid_device *hdev;
184 	int count, err = 0;
185 
186 	/* prevent multiple attach of the same struct_ops */
187 	if (ops->hdev)
188 		return -EINVAL;
189 
190 	hdev = hid_get_device(ops->hid_id);
191 	if (IS_ERR(hdev))
192 		return PTR_ERR(hdev);
193 
194 	ops->hdev = hdev;
195 
196 	mutex_lock(&hdev->bpf.prog_list_lock);
197 
198 	count = list_count_nodes(&hdev->bpf.prog_list);
199 	if (count >= HID_BPF_MAX_PROGS_PER_DEV) {
200 		err = -E2BIG;
201 		goto out_unlock;
202 	}
203 
204 	if (ops->hid_rdesc_fixup) {
205 		if (hdev->bpf.rdesc_ops) {
206 			err = -EINVAL;
207 			goto out_unlock;
208 		}
209 
210 		hdev->bpf.rdesc_ops = ops;
211 	}
212 
213 	if (ops->hid_device_event) {
214 		err = hid_bpf_allocate_event_data(hdev);
215 		if (err)
216 			goto out_unlock;
217 	}
218 
219 	if (ops->flags & BPF_F_BEFORE)
220 		list_add_rcu(&ops->list, &hdev->bpf.prog_list);
221 	else
222 		list_add_tail_rcu(&ops->list, &hdev->bpf.prog_list);
223 	synchronize_srcu(&hdev->bpf.srcu);
224 
225 out_unlock:
226 	mutex_unlock(&hdev->bpf.prog_list_lock);
227 
228 	if (err) {
229 		if (hdev->bpf.rdesc_ops == ops)
230 			hdev->bpf.rdesc_ops = NULL;
231 		hid_put_device(hdev);
232 	} else if (ops->hid_rdesc_fixup) {
233 		hid_bpf_reconnect(hdev);
234 	}
235 
236 	return err;
237 }
238 
hid_bpf_unreg(void * kdata,struct bpf_link * link)239 static void hid_bpf_unreg(void *kdata, struct bpf_link *link)
240 {
241 	struct hid_bpf_ops *ops = kdata;
242 	struct hid_device *hdev;
243 	bool reconnect = false;
244 
245 	hdev = ops->hdev;
246 
247 	/* check if __hid_bpf_ops_destroy_device() has been called */
248 	if (!hdev)
249 		return;
250 
251 	mutex_lock(&hdev->bpf.prog_list_lock);
252 
253 	list_del_rcu(&ops->list);
254 	synchronize_srcu(&hdev->bpf.srcu);
255 	ops->hdev = NULL;
256 
257 	reconnect = hdev->bpf.rdesc_ops == ops;
258 	if (reconnect)
259 		hdev->bpf.rdesc_ops = NULL;
260 
261 	mutex_unlock(&hdev->bpf.prog_list_lock);
262 
263 	if (reconnect)
264 		hid_bpf_reconnect(hdev);
265 
266 	hid_put_device(hdev);
267 }
268 
__hid_bpf_device_event(struct hid_bpf_ctx * ctx,enum hid_report_type type,u64 source)269 static int __hid_bpf_device_event(struct hid_bpf_ctx *ctx, enum hid_report_type type, u64 source)
270 {
271 	return 0;
272 }
273 
__hid_bpf_rdesc_fixup(struct hid_bpf_ctx * ctx)274 static int __hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx)
275 {
276 	return 0;
277 }
278 
__hid_bpf_hw_request(struct hid_bpf_ctx * ctx,unsigned char reportnum,enum hid_report_type rtype,enum hid_class_request reqtype,u64 source)279 static int __hid_bpf_hw_request(struct hid_bpf_ctx *ctx, unsigned char reportnum,
280 				enum hid_report_type rtype, enum hid_class_request reqtype,
281 				u64 source)
282 {
283 	return 0;
284 }
285 
__hid_bpf_hw_output_report(struct hid_bpf_ctx * ctx,u64 source)286 static int __hid_bpf_hw_output_report(struct hid_bpf_ctx *ctx, u64 source)
287 {
288 	return 0;
289 }
290 
291 static struct hid_bpf_ops __bpf_hid_bpf_ops = {
292 	.hid_device_event = __hid_bpf_device_event,
293 	.hid_rdesc_fixup = __hid_bpf_rdesc_fixup,
294 	.hid_hw_request = __hid_bpf_hw_request,
295 	.hid_hw_output_report = __hid_bpf_hw_output_report,
296 };
297 
298 static struct bpf_struct_ops bpf_hid_bpf_ops = {
299 	.verifier_ops = &hid_bpf_verifier_ops,
300 	.init = hid_bpf_ops_init,
301 	.check_member = hid_bpf_ops_check_member,
302 	.init_member = hid_bpf_ops_init_member,
303 	.reg = hid_bpf_reg,
304 	.unreg = hid_bpf_unreg,
305 	.name = "hid_bpf_ops",
306 	.cfi_stubs = &__bpf_hid_bpf_ops,
307 	.owner = THIS_MODULE,
308 };
309 
__hid_bpf_ops_destroy_device(struct hid_device * hdev)310 void __hid_bpf_ops_destroy_device(struct hid_device *hdev)
311 {
312 	struct hid_bpf_ops *e;
313 
314 	rcu_read_lock();
315 	list_for_each_entry_rcu(e, &hdev->bpf.prog_list, list) {
316 		hid_put_device(hdev);
317 		e->hdev = NULL;
318 	}
319 	rcu_read_unlock();
320 }
321 
hid_bpf_struct_ops_init(void)322 static int __init hid_bpf_struct_ops_init(void)
323 {
324 	return register_bpf_struct_ops(&bpf_hid_bpf_ops, hid_bpf_ops);
325 }
326 late_initcall(hid_bpf_struct_ops_init);
327