xref: /linux/drivers/hid/bpf/hid_bpf_dispatch.c (revision 015a4a2a439b285943da471d38b2721bbe4d8b39)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 /*
4  *  HID-BPF support for Linux
5  *
6  *  Copyright (c) 2022-2024 Benjamin Tissoires
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/bitops.h>
11 #include <linux/btf.h>
12 #include <linux/btf_ids.h>
13 #include <linux/filter.h>
14 #include <linux/hid.h>
15 #include <linux/hid_bpf.h>
16 #include <linux/init.h>
17 #include <linux/kfifo.h>
18 #include <linux/minmax.h>
19 #include <linux/module.h>
20 #include "hid_bpf_dispatch.h"
21 
22 struct hid_ops *hid_ops;
23 EXPORT_SYMBOL(hid_ops);
24 
25 u8 *
26 dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type, u8 *data,
27 			      u32 *size, int interrupt, u64 source)
28 {
29 	struct hid_bpf_ctx_kern ctx_kern = {
30 		.ctx = {
31 			.hid = hdev,
32 			.allocated_size = hdev->bpf.allocated_data,
33 			.size = *size,
34 		},
35 		.data = hdev->bpf.device_data,
36 	};
37 	struct hid_bpf_ops *e;
38 	int ret;
39 
40 	if (type >= HID_REPORT_TYPES)
41 		return ERR_PTR(-EINVAL);
42 
43 	/* no program has been attached yet */
44 	if (!hdev->bpf.device_data)
45 		return data;
46 
47 	memset(ctx_kern.data, 0, hdev->bpf.allocated_data);
48 	memcpy(ctx_kern.data, data, *size);
49 
50 	rcu_read_lock();
51 	list_for_each_entry_rcu(e, &hdev->bpf.prog_list, list) {
52 		if (e->hid_device_event) {
53 			ret = e->hid_device_event(&ctx_kern.ctx, type, source);
54 			if (ret < 0) {
55 				rcu_read_unlock();
56 				return ERR_PTR(ret);
57 			}
58 
59 			if (ret)
60 				ctx_kern.ctx.size = ret;
61 		}
62 	}
63 	rcu_read_unlock();
64 
65 	ret = ctx_kern.ctx.size;
66 	if (ret) {
67 		if (ret > ctx_kern.ctx.allocated_size)
68 			return ERR_PTR(-EINVAL);
69 
70 		*size = ret;
71 	}
72 
73 	return ctx_kern.data;
74 }
75 EXPORT_SYMBOL_GPL(dispatch_hid_bpf_device_event);
76 
77 int dispatch_hid_bpf_raw_requests(struct hid_device *hdev,
78 				  unsigned char reportnum, u8 *buf,
79 				  u32 size, enum hid_report_type rtype,
80 				  enum hid_class_request reqtype,
81 				  u64 source, bool from_bpf)
82 {
83 	struct hid_bpf_ctx_kern ctx_kern = {
84 		.ctx = {
85 			.hid = hdev,
86 			.allocated_size = size,
87 			.size = size,
88 		},
89 		.data = buf,
90 		.from_bpf = from_bpf,
91 	};
92 	struct hid_bpf_ops *e;
93 	int ret, idx;
94 
95 	if (rtype >= HID_REPORT_TYPES)
96 		return -EINVAL;
97 
98 	idx = srcu_read_lock(&hdev->bpf.srcu);
99 	list_for_each_entry_srcu(e, &hdev->bpf.prog_list, list,
100 				 srcu_read_lock_held(&hdev->bpf.srcu)) {
101 		if (!e->hid_hw_request)
102 			continue;
103 
104 		ret = e->hid_hw_request(&ctx_kern.ctx, reportnum, rtype, reqtype, source);
105 		if (ret)
106 			goto out;
107 	}
108 	ret = 0;
109 
110 out:
111 	srcu_read_unlock(&hdev->bpf.srcu, idx);
112 	return ret;
113 }
114 EXPORT_SYMBOL_GPL(dispatch_hid_bpf_raw_requests);
115 
116 u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size)
117 {
118 	int ret;
119 	struct hid_bpf_ctx_kern ctx_kern = {
120 		.ctx = {
121 			.hid = hdev,
122 			.size = *size,
123 			.allocated_size = HID_MAX_DESCRIPTOR_SIZE,
124 		},
125 	};
126 
127 	if (!hdev->bpf.rdesc_ops)
128 		goto ignore_bpf;
129 
130 	ctx_kern.data = kzalloc(ctx_kern.ctx.allocated_size, GFP_KERNEL);
131 	if (!ctx_kern.data)
132 		goto ignore_bpf;
133 
134 	memcpy(ctx_kern.data, rdesc, min_t(unsigned int, *size, HID_MAX_DESCRIPTOR_SIZE));
135 
136 	ret = hdev->bpf.rdesc_ops->hid_rdesc_fixup(&ctx_kern.ctx);
137 	if (ret < 0)
138 		goto ignore_bpf;
139 
140 	if (ret) {
141 		if (ret > ctx_kern.ctx.allocated_size)
142 			goto ignore_bpf;
143 
144 		*size = ret;
145 	}
146 
147 	rdesc = krealloc(ctx_kern.data, *size, GFP_KERNEL);
148 
149 	return rdesc;
150 
151  ignore_bpf:
152 	kfree(ctx_kern.data);
153 	return kmemdup(rdesc, *size, GFP_KERNEL);
154 }
155 EXPORT_SYMBOL_GPL(call_hid_bpf_rdesc_fixup);
156 
157 static int device_match_id(struct device *dev, const void *id)
158 {
159 	struct hid_device *hdev = to_hid_device(dev);
160 
161 	return hdev->id == *(int *)id;
162 }
163 
164 struct hid_device *hid_get_device(unsigned int hid_id)
165 {
166 	struct device *dev;
167 
168 	if (!hid_ops)
169 		return ERR_PTR(-EINVAL);
170 
171 	dev = bus_find_device(hid_ops->bus_type, NULL, &hid_id, device_match_id);
172 	if (!dev)
173 		return ERR_PTR(-EINVAL);
174 
175 	return to_hid_device(dev);
176 }
177 
178 void hid_put_device(struct hid_device *hid)
179 {
180 	put_device(&hid->dev);
181 }
182 
183 static int __hid_bpf_allocate_data(struct hid_device *hdev, u8 **data, u32 *size)
184 {
185 	u8 *alloc_data;
186 	unsigned int i, j, max_report_len = 0;
187 	size_t alloc_size = 0;
188 
189 	/* compute the maximum report length for this device */
190 	for (i = 0; i < HID_REPORT_TYPES; i++) {
191 		struct hid_report_enum *report_enum = hdev->report_enum + i;
192 
193 		for (j = 0; j < HID_MAX_IDS; j++) {
194 			struct hid_report *report = report_enum->report_id_hash[j];
195 
196 			if (report)
197 				max_report_len = max(max_report_len, hid_report_len(report));
198 		}
199 	}
200 
201 	/*
202 	 * Give us a little bit of extra space and some predictability in the
203 	 * buffer length we create. This way, we can tell users that they can
204 	 * work on chunks of 64 bytes of memory without having the bpf verifier
205 	 * scream at them.
206 	 */
207 	alloc_size = DIV_ROUND_UP(max_report_len, 64) * 64;
208 
209 	alloc_data = kzalloc(alloc_size, GFP_KERNEL);
210 	if (!alloc_data)
211 		return -ENOMEM;
212 
213 	*data = alloc_data;
214 	*size = alloc_size;
215 
216 	return 0;
217 }
218 
219 int hid_bpf_allocate_event_data(struct hid_device *hdev)
220 {
221 	/* hdev->bpf.device_data is already allocated, abort */
222 	if (hdev->bpf.device_data)
223 		return 0;
224 
225 	return __hid_bpf_allocate_data(hdev, &hdev->bpf.device_data, &hdev->bpf.allocated_data);
226 }
227 
228 int hid_bpf_reconnect(struct hid_device *hdev)
229 {
230 	if (!test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
231 		return device_reprobe(&hdev->dev);
232 
233 	return 0;
234 }
235 
236 /* Disables missing prototype warnings */
237 __bpf_kfunc_start_defs();
238 
239 /**
240  * hid_bpf_get_data - Get the kernel memory pointer associated with the context @ctx
241  *
242  * @ctx: The HID-BPF context
243  * @offset: The offset within the memory
244  * @rdwr_buf_size: the const size of the buffer
245  *
246  * @returns %NULL on error, an %__u8 memory pointer on success
247  */
248 __bpf_kfunc __u8 *
249 hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr_buf_size)
250 {
251 	struct hid_bpf_ctx_kern *ctx_kern;
252 
253 	if (!ctx)
254 		return NULL;
255 
256 	ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
257 
258 	if (rdwr_buf_size + offset > ctx->allocated_size)
259 		return NULL;
260 
261 	return ctx_kern->data + offset;
262 }
263 
264 /**
265  * hid_bpf_allocate_context - Allocate a context to the given HID device
266  *
267  * @hid_id: the system unique identifier of the HID device
268  *
269  * @returns A pointer to &struct hid_bpf_ctx on success, %NULL on error.
270  */
271 __bpf_kfunc struct hid_bpf_ctx *
272 hid_bpf_allocate_context(unsigned int hid_id)
273 {
274 	struct hid_device *hdev;
275 	struct hid_bpf_ctx_kern *ctx_kern = NULL;
276 
277 	hdev = hid_get_device(hid_id);
278 	if (IS_ERR(hdev))
279 		return NULL;
280 
281 	ctx_kern = kzalloc(sizeof(*ctx_kern), GFP_KERNEL);
282 	if (!ctx_kern) {
283 		hid_put_device(hdev);
284 		return NULL;
285 	}
286 
287 	ctx_kern->ctx.hid = hdev;
288 
289 	return &ctx_kern->ctx;
290 }
291 
292 /**
293  * hid_bpf_release_context - Release the previously allocated context @ctx
294  *
295  * @ctx: the HID-BPF context to release
296  *
297  */
298 __bpf_kfunc void
299 hid_bpf_release_context(struct hid_bpf_ctx *ctx)
300 {
301 	struct hid_bpf_ctx_kern *ctx_kern;
302 	struct hid_device *hid;
303 
304 	ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
305 	hid = (struct hid_device *)ctx_kern->ctx.hid; /* ignore const */
306 
307 	kfree(ctx_kern);
308 
309 	/* get_device() is called by bus_find_device() */
310 	hid_put_device(hid);
311 }
312 
313 static int
314 __hid_bpf_hw_check_params(struct hid_bpf_ctx *ctx, __u8 *buf, size_t *buf__sz,
315 			  enum hid_report_type rtype)
316 {
317 	struct hid_report_enum *report_enum;
318 	struct hid_report *report;
319 	struct hid_device *hdev;
320 	u32 report_len;
321 
322 	/* check arguments */
323 	if (!ctx || !hid_ops || !buf)
324 		return -EINVAL;
325 
326 	switch (rtype) {
327 	case HID_INPUT_REPORT:
328 	case HID_OUTPUT_REPORT:
329 	case HID_FEATURE_REPORT:
330 		break;
331 	default:
332 		return -EINVAL;
333 	}
334 
335 	if (*buf__sz < 1)
336 		return -EINVAL;
337 
338 	hdev = (struct hid_device *)ctx->hid; /* discard const */
339 
340 	report_enum = hdev->report_enum + rtype;
341 	report = hid_ops->hid_get_report(report_enum, buf);
342 	if (!report)
343 		return -EINVAL;
344 
345 	report_len = hid_report_len(report);
346 
347 	if (*buf__sz > report_len)
348 		*buf__sz = report_len;
349 
350 	return 0;
351 }
352 
353 /**
354  * hid_bpf_hw_request - Communicate with a HID device
355  *
356  * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
357  * @buf: a %PTR_TO_MEM buffer
358  * @buf__sz: the size of the data to transfer
359  * @rtype: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT)
360  * @reqtype: the type of the request (%HID_REQ_GET_REPORT, %HID_REQ_SET_REPORT, ...)
361  *
362  * @returns %0 on success, a negative error code otherwise.
363  */
364 __bpf_kfunc int
365 hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
366 		   enum hid_report_type rtype, enum hid_class_request reqtype)
367 {
368 	struct hid_bpf_ctx_kern *ctx_kern;
369 	struct hid_device *hdev;
370 	size_t size = buf__sz;
371 	u8 *dma_data;
372 	int ret;
373 
374 	ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
375 
376 	if (ctx_kern->from_bpf)
377 		return -EDEADLOCK;
378 
379 	/* check arguments */
380 	ret = __hid_bpf_hw_check_params(ctx, buf, &size, rtype);
381 	if (ret)
382 		return ret;
383 
384 	switch (reqtype) {
385 	case HID_REQ_GET_REPORT:
386 	case HID_REQ_GET_IDLE:
387 	case HID_REQ_GET_PROTOCOL:
388 	case HID_REQ_SET_REPORT:
389 	case HID_REQ_SET_IDLE:
390 	case HID_REQ_SET_PROTOCOL:
391 		break;
392 	default:
393 		return -EINVAL;
394 	}
395 
396 	hdev = (struct hid_device *)ctx->hid; /* discard const */
397 
398 	dma_data = kmemdup(buf, size, GFP_KERNEL);
399 	if (!dma_data)
400 		return -ENOMEM;
401 
402 	ret = hid_ops->hid_hw_raw_request(hdev,
403 					      dma_data[0],
404 					      dma_data,
405 					      size,
406 					      rtype,
407 					      reqtype,
408 					      (__u64)ctx,
409 					      true); /* prevent infinite recursions */
410 
411 	if (ret > 0)
412 		memcpy(buf, dma_data, ret);
413 
414 	kfree(dma_data);
415 	return ret;
416 }
417 
418 /**
419  * hid_bpf_hw_output_report - Send an output report to a HID device
420  *
421  * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
422  * @buf: a %PTR_TO_MEM buffer
423  * @buf__sz: the size of the data to transfer
424  *
425  * Returns the number of bytes transferred on success, a negative error code otherwise.
426  */
427 __bpf_kfunc int
428 hid_bpf_hw_output_report(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz)
429 {
430 	struct hid_device *hdev;
431 	size_t size = buf__sz;
432 	u8 *dma_data;
433 	int ret;
434 
435 	/* check arguments */
436 	ret = __hid_bpf_hw_check_params(ctx, buf, &size, HID_OUTPUT_REPORT);
437 	if (ret)
438 		return ret;
439 
440 	hdev = (struct hid_device *)ctx->hid; /* discard const */
441 
442 	dma_data = kmemdup(buf, size, GFP_KERNEL);
443 	if (!dma_data)
444 		return -ENOMEM;
445 
446 	ret = hid_ops->hid_hw_output_report(hdev,
447 						dma_data,
448 						size,
449 						(__u64)ctx);
450 
451 	kfree(dma_data);
452 	return ret;
453 }
454 
455 /**
456  * hid_bpf_input_report - Inject a HID report in the kernel from a HID device
457  *
458  * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
459  * @type: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT)
460  * @buf: a %PTR_TO_MEM buffer
461  * @buf__sz: the size of the data to transfer
462  *
463  * Returns %0 on success, a negative error code otherwise.
464  */
465 __bpf_kfunc int
466 hid_bpf_input_report(struct hid_bpf_ctx *ctx, enum hid_report_type type, u8 *buf,
467 		     const size_t buf__sz)
468 {
469 	struct hid_device *hdev;
470 	size_t size = buf__sz;
471 	int ret;
472 
473 	/* check arguments */
474 	ret = __hid_bpf_hw_check_params(ctx, buf, &size, type);
475 	if (ret)
476 		return ret;
477 
478 	hdev = (struct hid_device *)ctx->hid; /* discard const */
479 
480 	return hid_ops->hid_input_report(hdev, type, buf, size, 0, (__u64)ctx);
481 }
482 __bpf_kfunc_end_defs();
483 
484 /*
485  * The following set contains all functions we agree BPF programs
486  * can use.
487  */
488 BTF_KFUNCS_START(hid_bpf_kfunc_ids)
489 BTF_ID_FLAGS(func, hid_bpf_get_data, KF_RET_NULL)
490 BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
491 BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE | KF_SLEEPABLE)
492 BTF_ID_FLAGS(func, hid_bpf_hw_request, KF_SLEEPABLE)
493 BTF_ID_FLAGS(func, hid_bpf_hw_output_report, KF_SLEEPABLE)
494 BTF_ID_FLAGS(func, hid_bpf_input_report, KF_SLEEPABLE)
495 BTF_KFUNCS_END(hid_bpf_kfunc_ids)
496 
497 static const struct btf_kfunc_id_set hid_bpf_kfunc_set = {
498 	.owner = THIS_MODULE,
499 	.set   = &hid_bpf_kfunc_ids,
500 };
501 
502 /* for syscall HID-BPF */
503 BTF_KFUNCS_START(hid_bpf_syscall_kfunc_ids)
504 BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL)
505 BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE)
506 BTF_ID_FLAGS(func, hid_bpf_hw_request)
507 BTF_ID_FLAGS(func, hid_bpf_hw_output_report)
508 BTF_ID_FLAGS(func, hid_bpf_input_report)
509 BTF_KFUNCS_END(hid_bpf_syscall_kfunc_ids)
510 
511 static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = {
512 	.owner = THIS_MODULE,
513 	.set   = &hid_bpf_syscall_kfunc_ids,
514 };
515 
516 int hid_bpf_connect_device(struct hid_device *hdev)
517 {
518 	bool need_to_allocate = false;
519 	struct hid_bpf_ops *e;
520 
521 	rcu_read_lock();
522 	list_for_each_entry_rcu(e, &hdev->bpf.prog_list, list) {
523 		if (e->hid_device_event) {
524 			need_to_allocate = true;
525 			break;
526 		}
527 	}
528 	rcu_read_unlock();
529 
530 	/* only allocate BPF data if there are programs attached */
531 	if (!need_to_allocate)
532 		return 0;
533 
534 	return hid_bpf_allocate_event_data(hdev);
535 }
536 EXPORT_SYMBOL_GPL(hid_bpf_connect_device);
537 
538 void hid_bpf_disconnect_device(struct hid_device *hdev)
539 {
540 	kfree(hdev->bpf.device_data);
541 	hdev->bpf.device_data = NULL;
542 	hdev->bpf.allocated_data = 0;
543 }
544 EXPORT_SYMBOL_GPL(hid_bpf_disconnect_device);
545 
546 void hid_bpf_destroy_device(struct hid_device *hdev)
547 {
548 	if (!hdev)
549 		return;
550 
551 	/* mark the device as destroyed in bpf so we don't reattach it */
552 	hdev->bpf.destroyed = true;
553 
554 	__hid_bpf_ops_destroy_device(hdev);
555 
556 	synchronize_srcu(&hdev->bpf.srcu);
557 	cleanup_srcu_struct(&hdev->bpf.srcu);
558 }
559 EXPORT_SYMBOL_GPL(hid_bpf_destroy_device);
560 
561 int hid_bpf_device_init(struct hid_device *hdev)
562 {
563 	INIT_LIST_HEAD(&hdev->bpf.prog_list);
564 	mutex_init(&hdev->bpf.prog_list_lock);
565 	return init_srcu_struct(&hdev->bpf.srcu);
566 }
567 EXPORT_SYMBOL_GPL(hid_bpf_device_init);
568 
569 static int __init hid_bpf_init(void)
570 {
571 	int err;
572 
573 	/* Note: if we exit with an error any time here, we would entirely break HID, which
574 	 * is probably not something we want. So we log an error and return success.
575 	 *
576 	 * This is not a big deal: nobody will be able to use the functionality.
577 	 */
578 
579 	err = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &hid_bpf_kfunc_set);
580 	if (err) {
581 		pr_warn("error while setting HID BPF tracing kfuncs: %d", err);
582 		return 0;
583 	}
584 
585 	err = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &hid_bpf_syscall_kfunc_set);
586 	if (err) {
587 		pr_warn("error while setting HID BPF syscall kfuncs: %d", err);
588 		return 0;
589 	}
590 
591 	return 0;
592 }
593 
594 late_initcall(hid_bpf_init);
595 MODULE_AUTHOR("Benjamin Tissoires");
596 MODULE_LICENSE("GPL");
597