xref: /linux/drivers/hid/bpf/hid_bpf_dispatch.c (revision 05b3b8f19441b6bf039cec1990de3c75bb9dbbd9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 /*
4  *  HID-BPF support for Linux
5  *
6  *  Copyright (c) 2022-2024 Benjamin Tissoires
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/bitops.h>
11 #include <linux/btf.h>
12 #include <linux/btf_ids.h>
13 #include <linux/filter.h>
14 #include <linux/hid.h>
15 #include <linux/hid_bpf.h>
16 #include <linux/init.h>
17 #include <linux/kfifo.h>
18 #include <linux/minmax.h>
19 #include <linux/module.h>
20 #include "hid_bpf_dispatch.h"
21 
22 struct hid_ops *hid_ops;
23 EXPORT_SYMBOL(hid_ops);
24 
25 u8 *
26 dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type, u8 *data,
27 			      u32 *size, int interrupt)
28 {
29 	struct hid_bpf_ctx_kern ctx_kern = {
30 		.ctx = {
31 			.hid = hdev,
32 			.allocated_size = hdev->bpf.allocated_data,
33 			.size = *size,
34 		},
35 		.data = hdev->bpf.device_data,
36 	};
37 	struct hid_bpf_ops *e;
38 	int ret;
39 
40 	if (type >= HID_REPORT_TYPES)
41 		return ERR_PTR(-EINVAL);
42 
43 	/* no program has been attached yet */
44 	if (!hdev->bpf.device_data)
45 		return data;
46 
47 	memset(ctx_kern.data, 0, hdev->bpf.allocated_data);
48 	memcpy(ctx_kern.data, data, *size);
49 
50 	rcu_read_lock();
51 	list_for_each_entry_rcu(e, &hdev->bpf.prog_list, list) {
52 		if (e->hid_device_event) {
53 			ret = e->hid_device_event(&ctx_kern.ctx, type);
54 			if (ret < 0) {
55 				rcu_read_unlock();
56 				return ERR_PTR(ret);
57 			}
58 
59 			if (ret)
60 				ctx_kern.ctx.retval = ret;
61 		}
62 	}
63 	rcu_read_unlock();
64 
65 	if (ret) {
66 		if (ret > ctx_kern.ctx.allocated_size)
67 			return ERR_PTR(-EINVAL);
68 
69 		*size = ret;
70 	}
71 
72 	return ctx_kern.data;
73 }
74 EXPORT_SYMBOL_GPL(dispatch_hid_bpf_device_event);
75 
76 u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size)
77 {
78 	int ret;
79 	struct hid_bpf_ctx_kern ctx_kern = {
80 		.ctx = {
81 			.hid = hdev,
82 			.size = *size,
83 			.allocated_size = HID_MAX_DESCRIPTOR_SIZE,
84 		},
85 	};
86 
87 	if (!hdev->bpf.rdesc_ops)
88 		goto ignore_bpf;
89 
90 	ctx_kern.data = kzalloc(ctx_kern.ctx.allocated_size, GFP_KERNEL);
91 	if (!ctx_kern.data)
92 		goto ignore_bpf;
93 
94 	memcpy(ctx_kern.data, rdesc, min_t(unsigned int, *size, HID_MAX_DESCRIPTOR_SIZE));
95 
96 	ret = hdev->bpf.rdesc_ops->hid_rdesc_fixup(&ctx_kern.ctx);
97 	if (ret < 0)
98 		goto ignore_bpf;
99 
100 	if (ret) {
101 		if (ret > ctx_kern.ctx.allocated_size)
102 			goto ignore_bpf;
103 
104 		*size = ret;
105 	}
106 
107 	rdesc = krealloc(ctx_kern.data, *size, GFP_KERNEL);
108 
109 	return rdesc;
110 
111  ignore_bpf:
112 	kfree(ctx_kern.data);
113 	return kmemdup(rdesc, *size, GFP_KERNEL);
114 }
115 EXPORT_SYMBOL_GPL(call_hid_bpf_rdesc_fixup);
116 
117 static int device_match_id(struct device *dev, const void *id)
118 {
119 	struct hid_device *hdev = to_hid_device(dev);
120 
121 	return hdev->id == *(int *)id;
122 }
123 
124 struct hid_device *hid_get_device(unsigned int hid_id)
125 {
126 	struct device *dev;
127 
128 	if (!hid_ops)
129 		return ERR_PTR(-EINVAL);
130 
131 	dev = bus_find_device(hid_ops->bus_type, NULL, &hid_id, device_match_id);
132 	if (!dev)
133 		return ERR_PTR(-EINVAL);
134 
135 	return to_hid_device(dev);
136 }
137 
138 void hid_put_device(struct hid_device *hid)
139 {
140 	put_device(&hid->dev);
141 }
142 
143 static int __hid_bpf_allocate_data(struct hid_device *hdev, u8 **data, u32 *size)
144 {
145 	u8 *alloc_data;
146 	unsigned int i, j, max_report_len = 0;
147 	size_t alloc_size = 0;
148 
149 	/* compute the maximum report length for this device */
150 	for (i = 0; i < HID_REPORT_TYPES; i++) {
151 		struct hid_report_enum *report_enum = hdev->report_enum + i;
152 
153 		for (j = 0; j < HID_MAX_IDS; j++) {
154 			struct hid_report *report = report_enum->report_id_hash[j];
155 
156 			if (report)
157 				max_report_len = max(max_report_len, hid_report_len(report));
158 		}
159 	}
160 
161 	/*
162 	 * Give us a little bit of extra space and some predictability in the
163 	 * buffer length we create. This way, we can tell users that they can
164 	 * work on chunks of 64 bytes of memory without having the bpf verifier
165 	 * scream at them.
166 	 */
167 	alloc_size = DIV_ROUND_UP(max_report_len, 64) * 64;
168 
169 	alloc_data = kzalloc(alloc_size, GFP_KERNEL);
170 	if (!alloc_data)
171 		return -ENOMEM;
172 
173 	*data = alloc_data;
174 	*size = alloc_size;
175 
176 	return 0;
177 }
178 
179 int hid_bpf_allocate_event_data(struct hid_device *hdev)
180 {
181 	/* hdev->bpf.device_data is already allocated, abort */
182 	if (hdev->bpf.device_data)
183 		return 0;
184 
185 	return __hid_bpf_allocate_data(hdev, &hdev->bpf.device_data, &hdev->bpf.allocated_data);
186 }
187 
188 int hid_bpf_reconnect(struct hid_device *hdev)
189 {
190 	if (!test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
191 		return device_reprobe(&hdev->dev);
192 
193 	return 0;
194 }
195 
196 /* Disables missing prototype warnings */
197 __bpf_kfunc_start_defs();
198 
199 /**
200  * hid_bpf_get_data - Get the kernel memory pointer associated with the context @ctx
201  *
202  * @ctx: The HID-BPF context
203  * @offset: The offset within the memory
204  * @rdwr_buf_size: the const size of the buffer
205  *
206  * @returns %NULL on error, an %__u8 memory pointer on success
207  */
208 __bpf_kfunc __u8 *
209 hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr_buf_size)
210 {
211 	struct hid_bpf_ctx_kern *ctx_kern;
212 
213 	if (!ctx)
214 		return NULL;
215 
216 	ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
217 
218 	if (rdwr_buf_size + offset > ctx->allocated_size)
219 		return NULL;
220 
221 	return ctx_kern->data + offset;
222 }
223 
224 /**
225  * hid_bpf_allocate_context - Allocate a context to the given HID device
226  *
227  * @hid_id: the system unique identifier of the HID device
228  *
229  * @returns A pointer to &struct hid_bpf_ctx on success, %NULL on error.
230  */
231 __bpf_kfunc struct hid_bpf_ctx *
232 hid_bpf_allocate_context(unsigned int hid_id)
233 {
234 	struct hid_device *hdev;
235 	struct hid_bpf_ctx_kern *ctx_kern = NULL;
236 
237 	hdev = hid_get_device(hid_id);
238 	if (IS_ERR(hdev))
239 		return NULL;
240 
241 	ctx_kern = kzalloc(sizeof(*ctx_kern), GFP_KERNEL);
242 	if (!ctx_kern) {
243 		hid_put_device(hdev);
244 		return NULL;
245 	}
246 
247 	ctx_kern->ctx.hid = hdev;
248 
249 	return &ctx_kern->ctx;
250 }
251 
252 /**
253  * hid_bpf_release_context - Release the previously allocated context @ctx
254  *
255  * @ctx: the HID-BPF context to release
256  *
257  */
258 __bpf_kfunc void
259 hid_bpf_release_context(struct hid_bpf_ctx *ctx)
260 {
261 	struct hid_bpf_ctx_kern *ctx_kern;
262 	struct hid_device *hid;
263 
264 	ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
265 	hid = (struct hid_device *)ctx_kern->ctx.hid; /* ignore const */
266 
267 	kfree(ctx_kern);
268 
269 	/* get_device() is called by bus_find_device() */
270 	hid_put_device(hid);
271 }
272 
273 static int
274 __hid_bpf_hw_check_params(struct hid_bpf_ctx *ctx, __u8 *buf, size_t *buf__sz,
275 			  enum hid_report_type rtype)
276 {
277 	struct hid_report_enum *report_enum;
278 	struct hid_report *report;
279 	struct hid_device *hdev;
280 	u32 report_len;
281 
282 	/* check arguments */
283 	if (!ctx || !hid_ops || !buf)
284 		return -EINVAL;
285 
286 	switch (rtype) {
287 	case HID_INPUT_REPORT:
288 	case HID_OUTPUT_REPORT:
289 	case HID_FEATURE_REPORT:
290 		break;
291 	default:
292 		return -EINVAL;
293 	}
294 
295 	if (*buf__sz < 1)
296 		return -EINVAL;
297 
298 	hdev = (struct hid_device *)ctx->hid; /* discard const */
299 
300 	report_enum = hdev->report_enum + rtype;
301 	report = hid_ops->hid_get_report(report_enum, buf);
302 	if (!report)
303 		return -EINVAL;
304 
305 	report_len = hid_report_len(report);
306 
307 	if (*buf__sz > report_len)
308 		*buf__sz = report_len;
309 
310 	return 0;
311 }
312 
313 /**
314  * hid_bpf_hw_request - Communicate with a HID device
315  *
316  * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
317  * @buf: a %PTR_TO_MEM buffer
318  * @buf__sz: the size of the data to transfer
319  * @rtype: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT)
320  * @reqtype: the type of the request (%HID_REQ_GET_REPORT, %HID_REQ_SET_REPORT, ...)
321  *
322  * @returns %0 on success, a negative error code otherwise.
323  */
324 __bpf_kfunc int
325 hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
326 		   enum hid_report_type rtype, enum hid_class_request reqtype)
327 {
328 	struct hid_device *hdev;
329 	size_t size = buf__sz;
330 	u8 *dma_data;
331 	int ret;
332 
333 	/* check arguments */
334 	ret = __hid_bpf_hw_check_params(ctx, buf, &size, rtype);
335 	if (ret)
336 		return ret;
337 
338 	switch (reqtype) {
339 	case HID_REQ_GET_REPORT:
340 	case HID_REQ_GET_IDLE:
341 	case HID_REQ_GET_PROTOCOL:
342 	case HID_REQ_SET_REPORT:
343 	case HID_REQ_SET_IDLE:
344 	case HID_REQ_SET_PROTOCOL:
345 		break;
346 	default:
347 		return -EINVAL;
348 	}
349 
350 	hdev = (struct hid_device *)ctx->hid; /* discard const */
351 
352 	dma_data = kmemdup(buf, size, GFP_KERNEL);
353 	if (!dma_data)
354 		return -ENOMEM;
355 
356 	ret = hid_ops->hid_hw_raw_request(hdev,
357 					      dma_data[0],
358 					      dma_data,
359 					      size,
360 					      rtype,
361 					      reqtype);
362 
363 	if (ret > 0)
364 		memcpy(buf, dma_data, ret);
365 
366 	kfree(dma_data);
367 	return ret;
368 }
369 
370 /**
371  * hid_bpf_hw_output_report - Send an output report to a HID device
372  *
373  * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
374  * @buf: a %PTR_TO_MEM buffer
375  * @buf__sz: the size of the data to transfer
376  *
377  * Returns the number of bytes transferred on success, a negative error code otherwise.
378  */
379 __bpf_kfunc int
380 hid_bpf_hw_output_report(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz)
381 {
382 	struct hid_device *hdev;
383 	size_t size = buf__sz;
384 	u8 *dma_data;
385 	int ret;
386 
387 	/* check arguments */
388 	ret = __hid_bpf_hw_check_params(ctx, buf, &size, HID_OUTPUT_REPORT);
389 	if (ret)
390 		return ret;
391 
392 	hdev = (struct hid_device *)ctx->hid; /* discard const */
393 
394 	dma_data = kmemdup(buf, size, GFP_KERNEL);
395 	if (!dma_data)
396 		return -ENOMEM;
397 
398 	ret = hid_ops->hid_hw_output_report(hdev,
399 						dma_data,
400 						size);
401 
402 	kfree(dma_data);
403 	return ret;
404 }
405 
406 /**
407  * hid_bpf_input_report - Inject a HID report in the kernel from a HID device
408  *
409  * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
410  * @type: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT)
411  * @buf: a %PTR_TO_MEM buffer
412  * @buf__sz: the size of the data to transfer
413  *
414  * Returns %0 on success, a negative error code otherwise.
415  */
416 __bpf_kfunc int
417 hid_bpf_input_report(struct hid_bpf_ctx *ctx, enum hid_report_type type, u8 *buf,
418 		     const size_t buf__sz)
419 {
420 	struct hid_device *hdev;
421 	size_t size = buf__sz;
422 	int ret;
423 
424 	/* check arguments */
425 	ret = __hid_bpf_hw_check_params(ctx, buf, &size, type);
426 	if (ret)
427 		return ret;
428 
429 	hdev = (struct hid_device *)ctx->hid; /* discard const */
430 
431 	return hid_ops->hid_input_report(hdev, type, buf, size, 0);
432 }
433 __bpf_kfunc_end_defs();
434 
435 /*
436  * The following set contains all functions we agree BPF programs
437  * can use.
438  */
439 BTF_KFUNCS_START(hid_bpf_kfunc_ids)
440 BTF_ID_FLAGS(func, hid_bpf_get_data, KF_RET_NULL)
441 BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
442 BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE | KF_SLEEPABLE)
443 BTF_ID_FLAGS(func, hid_bpf_hw_request, KF_SLEEPABLE)
444 BTF_ID_FLAGS(func, hid_bpf_hw_output_report, KF_SLEEPABLE)
445 BTF_ID_FLAGS(func, hid_bpf_input_report, KF_SLEEPABLE)
446 BTF_KFUNCS_END(hid_bpf_kfunc_ids)
447 
448 static const struct btf_kfunc_id_set hid_bpf_kfunc_set = {
449 	.owner = THIS_MODULE,
450 	.set   = &hid_bpf_kfunc_ids,
451 };
452 
453 /* for syscall HID-BPF */
454 BTF_KFUNCS_START(hid_bpf_syscall_kfunc_ids)
455 BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL)
456 BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE)
457 BTF_ID_FLAGS(func, hid_bpf_hw_request)
458 BTF_ID_FLAGS(func, hid_bpf_hw_output_report)
459 BTF_ID_FLAGS(func, hid_bpf_input_report)
460 BTF_KFUNCS_END(hid_bpf_syscall_kfunc_ids)
461 
462 static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = {
463 	.owner = THIS_MODULE,
464 	.set   = &hid_bpf_syscall_kfunc_ids,
465 };
466 
467 int hid_bpf_connect_device(struct hid_device *hdev)
468 {
469 	bool need_to_allocate = false;
470 	struct hid_bpf_ops *e;
471 
472 	rcu_read_lock();
473 	list_for_each_entry_rcu(e, &hdev->bpf.prog_list, list) {
474 		if (e->hid_device_event) {
475 			need_to_allocate = true;
476 			break;
477 		}
478 	}
479 	rcu_read_unlock();
480 
481 	/* only allocate BPF data if there are programs attached */
482 	if (!need_to_allocate)
483 		return 0;
484 
485 	return hid_bpf_allocate_event_data(hdev);
486 }
487 EXPORT_SYMBOL_GPL(hid_bpf_connect_device);
488 
489 void hid_bpf_disconnect_device(struct hid_device *hdev)
490 {
491 	kfree(hdev->bpf.device_data);
492 	hdev->bpf.device_data = NULL;
493 	hdev->bpf.allocated_data = 0;
494 }
495 EXPORT_SYMBOL_GPL(hid_bpf_disconnect_device);
496 
497 void hid_bpf_destroy_device(struct hid_device *hdev)
498 {
499 	if (!hdev)
500 		return;
501 
502 	/* mark the device as destroyed in bpf so we don't reattach it */
503 	hdev->bpf.destroyed = true;
504 
505 	__hid_bpf_ops_destroy_device(hdev);
506 }
507 EXPORT_SYMBOL_GPL(hid_bpf_destroy_device);
508 
509 void hid_bpf_device_init(struct hid_device *hdev)
510 {
511 	INIT_LIST_HEAD(&hdev->bpf.prog_list);
512 	mutex_init(&hdev->bpf.prog_list_lock);
513 }
514 EXPORT_SYMBOL_GPL(hid_bpf_device_init);
515 
516 static int __init hid_bpf_init(void)
517 {
518 	int err;
519 
520 	/* Note: if we exit with an error any time here, we would entirely break HID, which
521 	 * is probably not something we want. So we log an error and return success.
522 	 *
523 	 * This is not a big deal: nobody will be able to use the functionality.
524 	 */
525 
526 	err = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &hid_bpf_kfunc_set);
527 	if (err) {
528 		pr_warn("error while setting HID BPF tracing kfuncs: %d", err);
529 		return 0;
530 	}
531 
532 	err = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &hid_bpf_syscall_kfunc_set);
533 	if (err) {
534 		pr_warn("error while setting HID BPF syscall kfuncs: %d", err);
535 		return 0;
536 	}
537 
538 	return 0;
539 }
540 
541 late_initcall(hid_bpf_init);
542 MODULE_AUTHOR("Benjamin Tissoires");
543 MODULE_LICENSE("GPL");
544