1 // SPDX-License-Identifier: GPL-2.0-only
2
3 /*
4 * HID-BPF support for Linux
5 *
6 * Copyright (c) 2022-2024 Benjamin Tissoires
7 */
8
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/bitops.h>
11 #include <linux/btf.h>
12 #include <linux/btf_ids.h>
13 #include <linux/filter.h>
14 #include <linux/hid.h>
15 #include <linux/hid_bpf.h>
16 #include <linux/init.h>
17 #include <linux/kfifo.h>
18 #include <linux/minmax.h>
19 #include <linux/module.h>
20 #include "hid_bpf_dispatch.h"
21
22 const struct hid_ops *hid_ops;
23 EXPORT_SYMBOL(hid_ops);
24
25 u8 *
dispatch_hid_bpf_device_event(struct hid_device * hdev,enum hid_report_type type,u8 * data,size_t * buf_size,u32 * size,int interrupt,u64 source,bool from_bpf)26 dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type, u8 *data,
27 size_t *buf_size, u32 *size, int interrupt, u64 source,
28 bool from_bpf)
29 {
30 struct hid_bpf_ctx_kern ctx_kern = {
31 .ctx = {
32 .hid = hdev,
33 .allocated_size = hdev->bpf.allocated_data,
34 .size = *size,
35 },
36 .data = hdev->bpf.device_data,
37 .from_bpf = from_bpf,
38 };
39 struct hid_bpf_ops *e;
40 int ret;
41
42 if (unlikely(hdev->bpf.destroyed))
43 return ERR_PTR(-ENODEV);
44
45 if (type >= HID_REPORT_TYPES)
46 return ERR_PTR(-EINVAL);
47
48 /* no program has been attached yet */
49 if (!hdev->bpf.device_data)
50 return data;
51
52 memset(ctx_kern.data, 0, hdev->bpf.allocated_data);
53 memcpy(ctx_kern.data, data, *size);
54
55 rcu_read_lock();
56 list_for_each_entry_rcu(e, &hdev->bpf.prog_list, list) {
57 if (e->hid_device_event) {
58 ret = e->hid_device_event(&ctx_kern.ctx, type, source);
59 if (ret < 0) {
60 rcu_read_unlock();
61 return ERR_PTR(ret);
62 }
63
64 if (ret)
65 ctx_kern.ctx.size = ret;
66 }
67 }
68 rcu_read_unlock();
69
70 ret = ctx_kern.ctx.size;
71 if (ret) {
72 if (ret > ctx_kern.ctx.allocated_size)
73 return ERR_PTR(-EINVAL);
74
75 *size = ret;
76 }
77
78 *buf_size = ctx_kern.ctx.allocated_size;
79 return ctx_kern.data;
80 }
81 EXPORT_SYMBOL_GPL(dispatch_hid_bpf_device_event);
82
dispatch_hid_bpf_raw_requests(struct hid_device * hdev,unsigned char reportnum,u8 * buf,u32 size,enum hid_report_type rtype,enum hid_class_request reqtype,u64 source,bool from_bpf)83 int dispatch_hid_bpf_raw_requests(struct hid_device *hdev,
84 unsigned char reportnum, u8 *buf,
85 u32 size, enum hid_report_type rtype,
86 enum hid_class_request reqtype,
87 u64 source, bool from_bpf)
88 {
89 struct hid_bpf_ctx_kern ctx_kern = {
90 .ctx = {
91 .hid = hdev,
92 .allocated_size = size,
93 .size = size,
94 },
95 .data = buf,
96 .from_bpf = from_bpf,
97 };
98 struct hid_bpf_ops *e;
99 int ret, idx;
100
101 if (unlikely(hdev->bpf.destroyed))
102 return -ENODEV;
103
104 if (rtype >= HID_REPORT_TYPES)
105 return -EINVAL;
106
107 idx = srcu_read_lock(&hdev->bpf.srcu);
108 list_for_each_entry_srcu(e, &hdev->bpf.prog_list, list,
109 srcu_read_lock_held(&hdev->bpf.srcu)) {
110 if (!e->hid_hw_request)
111 continue;
112
113 ret = e->hid_hw_request(&ctx_kern.ctx, reportnum, rtype, reqtype, source);
114 if (ret)
115 goto out;
116 }
117 ret = 0;
118
119 out:
120 srcu_read_unlock(&hdev->bpf.srcu, idx);
121 return ret;
122 }
123 EXPORT_SYMBOL_GPL(dispatch_hid_bpf_raw_requests);
124
dispatch_hid_bpf_output_report(struct hid_device * hdev,__u8 * buf,u32 size,u64 source,bool from_bpf)125 int dispatch_hid_bpf_output_report(struct hid_device *hdev,
126 __u8 *buf, u32 size, u64 source,
127 bool from_bpf)
128 {
129 struct hid_bpf_ctx_kern ctx_kern = {
130 .ctx = {
131 .hid = hdev,
132 .allocated_size = size,
133 .size = size,
134 },
135 .data = buf,
136 .from_bpf = from_bpf,
137 };
138 struct hid_bpf_ops *e;
139 int ret, idx;
140
141 if (unlikely(hdev->bpf.destroyed))
142 return -ENODEV;
143
144 idx = srcu_read_lock(&hdev->bpf.srcu);
145 list_for_each_entry_srcu(e, &hdev->bpf.prog_list, list,
146 srcu_read_lock_held(&hdev->bpf.srcu)) {
147 if (!e->hid_hw_output_report)
148 continue;
149
150 ret = e->hid_hw_output_report(&ctx_kern.ctx, source);
151 if (ret)
152 goto out;
153 }
154 ret = 0;
155
156 out:
157 srcu_read_unlock(&hdev->bpf.srcu, idx);
158 return ret;
159 }
160 EXPORT_SYMBOL_GPL(dispatch_hid_bpf_output_report);
161
call_hid_bpf_rdesc_fixup(struct hid_device * hdev,const u8 * rdesc,unsigned int * size)162 const u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc, unsigned int *size)
163 {
164 int ret;
165 struct hid_bpf_ctx_kern ctx_kern = {
166 .ctx = {
167 .hid = hdev,
168 .size = *size,
169 .allocated_size = HID_MAX_DESCRIPTOR_SIZE,
170 },
171 };
172
173 if (!hdev->bpf.rdesc_ops)
174 goto ignore_bpf;
175
176 ctx_kern.data = kzalloc(ctx_kern.ctx.allocated_size, GFP_KERNEL);
177 if (!ctx_kern.data)
178 goto ignore_bpf;
179
180 memcpy(ctx_kern.data, rdesc, min_t(unsigned int, *size, HID_MAX_DESCRIPTOR_SIZE));
181
182 ret = hdev->bpf.rdesc_ops->hid_rdesc_fixup(&ctx_kern.ctx);
183 if (ret < 0)
184 goto ignore_bpf;
185
186 if (ret) {
187 if (ret > ctx_kern.ctx.allocated_size)
188 goto ignore_bpf;
189
190 *size = ret;
191 }
192
193 return krealloc(ctx_kern.data, *size, GFP_KERNEL);
194
195 ignore_bpf:
196 kfree(ctx_kern.data);
197 return rdesc;
198 }
199 EXPORT_SYMBOL_GPL(call_hid_bpf_rdesc_fixup);
200
device_match_id(struct device * dev,const void * id)201 static int device_match_id(struct device *dev, const void *id)
202 {
203 struct hid_device *hdev = to_hid_device(dev);
204
205 return hdev->id == *(int *)id;
206 }
207
hid_get_device(unsigned int hid_id)208 struct hid_device *hid_get_device(unsigned int hid_id)
209 {
210 struct device *dev;
211
212 if (!hid_ops)
213 return ERR_PTR(-EINVAL);
214
215 dev = bus_find_device(hid_ops->bus_type, NULL, &hid_id, device_match_id);
216 if (!dev)
217 return ERR_PTR(-EINVAL);
218
219 return to_hid_device(dev);
220 }
221
hid_put_device(struct hid_device * hid)222 void hid_put_device(struct hid_device *hid)
223 {
224 put_device(&hid->dev);
225 }
226
__hid_bpf_allocate_data(struct hid_device * hdev,u8 ** data,u32 * size)227 static int __hid_bpf_allocate_data(struct hid_device *hdev, u8 **data, u32 *size)
228 {
229 u8 *alloc_data;
230 unsigned int i, j, max_report_len = 0;
231 size_t alloc_size = 0;
232
233 /* compute the maximum report length for this device */
234 for (i = 0; i < HID_REPORT_TYPES; i++) {
235 struct hid_report_enum *report_enum = hdev->report_enum + i;
236
237 for (j = 0; j < HID_MAX_IDS; j++) {
238 struct hid_report *report = report_enum->report_id_hash[j];
239
240 if (report)
241 max_report_len = max(max_report_len, hid_report_len(report));
242 }
243 }
244
245 /*
246 * Give us a little bit of extra space and some predictability in the
247 * buffer length we create. This way, we can tell users that they can
248 * work on chunks of 64 bytes of memory without having the bpf verifier
249 * scream at them.
250 */
251 alloc_size = DIV_ROUND_UP(max_report_len, 64) * 64;
252
253 alloc_data = kzalloc(alloc_size, GFP_KERNEL);
254 if (!alloc_data)
255 return -ENOMEM;
256
257 *data = alloc_data;
258 *size = alloc_size;
259
260 return 0;
261 }
262
hid_bpf_allocate_event_data(struct hid_device * hdev)263 int hid_bpf_allocate_event_data(struct hid_device *hdev)
264 {
265 /* hdev->bpf.device_data is already allocated, abort */
266 if (hdev->bpf.device_data)
267 return 0;
268
269 return __hid_bpf_allocate_data(hdev, &hdev->bpf.device_data, &hdev->bpf.allocated_data);
270 }
271
hid_bpf_reconnect(struct hid_device * hdev)272 int hid_bpf_reconnect(struct hid_device *hdev)
273 {
274 if (!test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status)) {
275 /* trigger call to call_hid_bpf_rdesc_fixup() during the next probe */
276 hdev->bpf_rsize = 0;
277 return device_reprobe(&hdev->dev);
278 }
279
280 return 0;
281 }
282
283 /* Disables missing prototype warnings */
284 __bpf_kfunc_start_defs();
285
286 /**
287 * hid_bpf_get_data - Get the kernel memory pointer associated with the context @ctx
288 *
289 * @ctx: The HID-BPF context
290 * @offset: The offset within the memory
291 * @rdwr_buf_size: the const size of the buffer
292 *
293 * @returns %NULL on error, an %__u8 memory pointer on success
294 */
295 __bpf_kfunc __u8 *
hid_bpf_get_data(struct hid_bpf_ctx * ctx,unsigned int offset,const size_t rdwr_buf_size)296 hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr_buf_size)
297 {
298 struct hid_bpf_ctx_kern *ctx_kern;
299
300 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
301
302 if (rdwr_buf_size + offset > ctx->allocated_size)
303 return NULL;
304
305 return ctx_kern->data + offset;
306 }
307
308 /**
309 * hid_bpf_allocate_context - Allocate a context to the given HID device
310 *
311 * @hid_id: the system unique identifier of the HID device
312 *
313 * @returns A pointer to &struct hid_bpf_ctx on success, %NULL on error.
314 */
315 __bpf_kfunc struct hid_bpf_ctx *
hid_bpf_allocate_context(unsigned int hid_id)316 hid_bpf_allocate_context(unsigned int hid_id)
317 {
318 struct hid_device *hdev;
319 struct hid_bpf_ctx_kern *ctx_kern = NULL;
320
321 hdev = hid_get_device(hid_id);
322 if (IS_ERR(hdev))
323 return NULL;
324
325 ctx_kern = kzalloc_obj(*ctx_kern);
326 if (!ctx_kern) {
327 hid_put_device(hdev);
328 return NULL;
329 }
330
331 ctx_kern->ctx.hid = hdev;
332
333 return &ctx_kern->ctx;
334 }
335
336 /**
337 * hid_bpf_release_context - Release the previously allocated context @ctx
338 *
339 * @ctx: the HID-BPF context to release
340 *
341 */
342 __bpf_kfunc void
hid_bpf_release_context(struct hid_bpf_ctx * ctx)343 hid_bpf_release_context(struct hid_bpf_ctx *ctx)
344 {
345 struct hid_bpf_ctx_kern *ctx_kern;
346 struct hid_device *hid;
347
348 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
349 hid = (struct hid_device *)ctx_kern->ctx.hid; /* ignore const */
350
351 kfree(ctx_kern);
352
353 /* get_device() is called by bus_find_device() */
354 hid_put_device(hid);
355 }
356
357 static int
__hid_bpf_hw_check_params(struct hid_bpf_ctx * ctx,__u8 * buf,size_t * buf__sz,enum hid_report_type rtype)358 __hid_bpf_hw_check_params(struct hid_bpf_ctx *ctx, __u8 *buf, size_t *buf__sz,
359 enum hid_report_type rtype)
360 {
361 struct hid_report_enum *report_enum;
362 struct hid_report *report;
363 u32 report_len;
364
365 /* check arguments */
366 if (!hid_ops)
367 return -EINVAL;
368
369 switch (rtype) {
370 case HID_INPUT_REPORT:
371 case HID_OUTPUT_REPORT:
372 case HID_FEATURE_REPORT:
373 break;
374 default:
375 return -EINVAL;
376 }
377
378 if (*buf__sz < 1)
379 return -EINVAL;
380
381 report_enum = ctx->hid->report_enum + rtype;
382 report = hid_ops->hid_get_report(report_enum, buf);
383 if (!report)
384 return -EINVAL;
385
386 report_len = hid_report_len(report);
387
388 if (*buf__sz > report_len)
389 *buf__sz = report_len;
390
391 return 0;
392 }
393
394 /**
395 * hid_bpf_hw_request - Communicate with a HID device
396 *
397 * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
398 * @buf: a %PTR_TO_MEM buffer
399 * @buf__sz: the size of the data to transfer
400 * @rtype: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT)
401 * @reqtype: the type of the request (%HID_REQ_GET_REPORT, %HID_REQ_SET_REPORT, ...)
402 *
403 * @returns %0 on success, a negative error code otherwise.
404 */
405 __bpf_kfunc int
hid_bpf_hw_request(struct hid_bpf_ctx * ctx,__u8 * buf,size_t buf__sz,enum hid_report_type rtype,enum hid_class_request reqtype)406 hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
407 enum hid_report_type rtype, enum hid_class_request reqtype)
408 {
409 struct hid_bpf_ctx_kern *ctx_kern;
410 size_t size = buf__sz;
411 u8 *dma_data;
412 int ret;
413
414 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
415
416 if (ctx_kern->from_bpf)
417 return -EDEADLOCK;
418
419 /* check arguments */
420 ret = __hid_bpf_hw_check_params(ctx, buf, &size, rtype);
421 if (ret)
422 return ret;
423
424 switch (reqtype) {
425 case HID_REQ_GET_REPORT:
426 case HID_REQ_GET_IDLE:
427 case HID_REQ_GET_PROTOCOL:
428 case HID_REQ_SET_REPORT:
429 case HID_REQ_SET_IDLE:
430 case HID_REQ_SET_PROTOCOL:
431 break;
432 default:
433 return -EINVAL;
434 }
435
436 dma_data = kmemdup(buf, size, GFP_KERNEL);
437 if (!dma_data)
438 return -ENOMEM;
439
440 ret = hid_ops->hid_hw_raw_request(ctx->hid,
441 dma_data[0],
442 dma_data,
443 size,
444 rtype,
445 reqtype,
446 (u64)(long)ctx,
447 true); /* prevent infinite recursions */
448
449 if (ret > size)
450 ret = size;
451 if (ret > 0)
452 memcpy(buf, dma_data, ret);
453
454 kfree(dma_data);
455 return ret;
456 }
457
458 /**
459 * hid_bpf_hw_output_report - Send an output report to a HID device
460 *
461 * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
462 * @buf: a %PTR_TO_MEM buffer
463 * @buf__sz: the size of the data to transfer
464 *
465 * Returns the number of bytes transferred on success, a negative error code otherwise.
466 */
467 __bpf_kfunc int
hid_bpf_hw_output_report(struct hid_bpf_ctx * ctx,__u8 * buf,size_t buf__sz)468 hid_bpf_hw_output_report(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz)
469 {
470 struct hid_bpf_ctx_kern *ctx_kern;
471 size_t size = buf__sz;
472 u8 *dma_data;
473 int ret;
474
475 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
476 if (ctx_kern->from_bpf)
477 return -EDEADLOCK;
478
479 /* check arguments */
480 ret = __hid_bpf_hw_check_params(ctx, buf, &size, HID_OUTPUT_REPORT);
481 if (ret)
482 return ret;
483
484 dma_data = kmemdup(buf, size, GFP_KERNEL);
485 if (!dma_data)
486 return -ENOMEM;
487
488 ret = hid_ops->hid_hw_output_report(ctx->hid, dma_data, size, (u64)(long)ctx, true);
489
490 kfree(dma_data);
491 return ret;
492 }
493
494 static int
__hid_bpf_input_report(struct hid_bpf_ctx * ctx,enum hid_report_type type,u8 * buf,size_t size,bool lock_already_taken)495 __hid_bpf_input_report(struct hid_bpf_ctx *ctx, enum hid_report_type type, u8 *buf,
496 size_t size, bool lock_already_taken)
497 {
498 struct hid_bpf_ctx_kern *ctx_kern;
499 int ret;
500
501 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
502 if (ctx_kern->from_bpf)
503 return -EDEADLOCK;
504
505 /* check arguments */
506 ret = __hid_bpf_hw_check_params(ctx, buf, &size, type);
507 if (ret)
508 return ret;
509
510 return hid_ops->hid_input_report(ctx->hid, type, buf, size, size, 0, (u64)(long)ctx, true,
511 lock_already_taken);
512 }
513
514 /**
515 * hid_bpf_try_input_report - Inject a HID report in the kernel from a HID device
516 *
517 * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
518 * @type: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT)
519 * @buf: a %PTR_TO_MEM buffer
520 * @buf__sz: the size of the data to transfer
521 *
522 * Returns %0 on success, a negative error code otherwise. This function will immediately
523 * fail if the device is not available, thus can be safely used in IRQ context.
524 */
525 __bpf_kfunc int
hid_bpf_try_input_report(struct hid_bpf_ctx * ctx,enum hid_report_type type,u8 * buf,const size_t buf__sz)526 hid_bpf_try_input_report(struct hid_bpf_ctx *ctx, enum hid_report_type type, u8 *buf,
527 const size_t buf__sz)
528 {
529 struct hid_bpf_ctx_kern *ctx_kern;
530 bool from_hid_event_hook;
531
532 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
533 from_hid_event_hook = ctx_kern->data && ctx_kern->data == ctx->hid->bpf.device_data;
534
535 return __hid_bpf_input_report(ctx, type, buf, buf__sz, from_hid_event_hook);
536 }
537
538 /**
539 * hid_bpf_input_report - Inject a HID report in the kernel from a HID device
540 *
541 * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
542 * @type: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT)
543 * @buf: a %PTR_TO_MEM buffer
544 * @buf__sz: the size of the data to transfer
545 *
546 * Returns %0 on success, a negative error code otherwise. This function will wait for the
547 * device to be available before injecting the event, thus needs to be called in sleepable
548 * context.
549 */
550 __bpf_kfunc int
hid_bpf_input_report(struct hid_bpf_ctx * ctx,enum hid_report_type type,u8 * buf,const size_t buf__sz)551 hid_bpf_input_report(struct hid_bpf_ctx *ctx, enum hid_report_type type, u8 *buf,
552 const size_t buf__sz)
553 {
554 int ret;
555
556 ret = down_interruptible(&ctx->hid->driver_input_lock);
557 if (ret)
558 return ret;
559
560 /* check arguments */
561 ret = __hid_bpf_input_report(ctx, type, buf, buf__sz, true /* lock_already_taken */);
562
563 up(&ctx->hid->driver_input_lock);
564
565 return ret;
566 }
567 __bpf_kfunc_end_defs();
568
569 /*
570 * The following set contains all functions we agree BPF programs
571 * can use.
572 */
573 BTF_KFUNCS_START(hid_bpf_kfunc_ids)
574 BTF_ID_FLAGS(func, hid_bpf_get_data, KF_RET_NULL)
575 BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
576 BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE | KF_SLEEPABLE)
577 BTF_ID_FLAGS(func, hid_bpf_hw_request, KF_SLEEPABLE)
578 BTF_ID_FLAGS(func, hid_bpf_hw_output_report, KF_SLEEPABLE)
579 BTF_ID_FLAGS(func, hid_bpf_input_report, KF_SLEEPABLE)
580 BTF_ID_FLAGS(func, hid_bpf_try_input_report)
581 BTF_KFUNCS_END(hid_bpf_kfunc_ids)
582
583 static const struct btf_kfunc_id_set hid_bpf_kfunc_set = {
584 .owner = THIS_MODULE,
585 .set = &hid_bpf_kfunc_ids,
586 };
587
588 /* for syscall HID-BPF */
589 BTF_KFUNCS_START(hid_bpf_syscall_kfunc_ids)
590 BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL)
591 BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE)
592 BTF_ID_FLAGS(func, hid_bpf_hw_request)
593 BTF_ID_FLAGS(func, hid_bpf_hw_output_report)
594 BTF_ID_FLAGS(func, hid_bpf_input_report)
595 BTF_KFUNCS_END(hid_bpf_syscall_kfunc_ids)
596
597 static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = {
598 .owner = THIS_MODULE,
599 .set = &hid_bpf_syscall_kfunc_ids,
600 };
601
hid_bpf_connect_device(struct hid_device * hdev)602 int hid_bpf_connect_device(struct hid_device *hdev)
603 {
604 bool need_to_allocate = false;
605 struct hid_bpf_ops *e;
606
607 rcu_read_lock();
608 list_for_each_entry_rcu(e, &hdev->bpf.prog_list, list) {
609 if (e->hid_device_event) {
610 need_to_allocate = true;
611 break;
612 }
613 }
614 rcu_read_unlock();
615
616 /* only allocate BPF data if there are programs attached */
617 if (!need_to_allocate)
618 return 0;
619
620 return hid_bpf_allocate_event_data(hdev);
621 }
622 EXPORT_SYMBOL_GPL(hid_bpf_connect_device);
623
hid_bpf_disconnect_device(struct hid_device * hdev)624 void hid_bpf_disconnect_device(struct hid_device *hdev)
625 {
626 kfree(hdev->bpf.device_data);
627 hdev->bpf.device_data = NULL;
628 hdev->bpf.allocated_data = 0;
629 }
630 EXPORT_SYMBOL_GPL(hid_bpf_disconnect_device);
631
hid_bpf_destroy_device(struct hid_device * hdev)632 void hid_bpf_destroy_device(struct hid_device *hdev)
633 {
634 if (!hdev)
635 return;
636
637 /* mark the device as destroyed in bpf so we don't reattach it */
638 hdev->bpf.destroyed = true;
639
640 __hid_bpf_ops_destroy_device(hdev);
641
642 synchronize_srcu(&hdev->bpf.srcu);
643 cleanup_srcu_struct(&hdev->bpf.srcu);
644 }
645 EXPORT_SYMBOL_GPL(hid_bpf_destroy_device);
646
hid_bpf_device_init(struct hid_device * hdev)647 int hid_bpf_device_init(struct hid_device *hdev)
648 {
649 INIT_LIST_HEAD(&hdev->bpf.prog_list);
650 mutex_init(&hdev->bpf.prog_list_lock);
651 return init_srcu_struct(&hdev->bpf.srcu);
652 }
653 EXPORT_SYMBOL_GPL(hid_bpf_device_init);
654
hid_bpf_init(void)655 static int __init hid_bpf_init(void)
656 {
657 int err;
658
659 /* Note: if we exit with an error any time here, we would entirely break HID, which
660 * is probably not something we want. So we log an error and return success.
661 *
662 * This is not a big deal: nobody will be able to use the functionality.
663 */
664
665 err = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &hid_bpf_kfunc_set);
666 if (err) {
667 pr_warn("error while setting HID BPF tracing kfuncs: %d", err);
668 return 0;
669 }
670
671 err = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &hid_bpf_syscall_kfunc_set);
672 if (err) {
673 pr_warn("error while setting HID BPF syscall kfuncs: %d", err);
674 return 0;
675 }
676
677 return 0;
678 }
679
680 late_initcall(hid_bpf_init);
681 MODULE_AUTHOR("Benjamin Tissoires");
682 MODULE_LICENSE("GPL");
683