1 // SPDX-License-Identifier: GPL-2.0-only 2 3 /* 4 * HID-BPF support for Linux 5 * 6 * Copyright (c) 2022 Benjamin Tissoires 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 #include <linux/bitops.h> 11 #include <linux/btf.h> 12 #include <linux/btf_ids.h> 13 #include <linux/filter.h> 14 #include <linux/hid.h> 15 #include <linux/hid_bpf.h> 16 #include <linux/init.h> 17 #include <linux/kfifo.h> 18 #include <linux/minmax.h> 19 #include <linux/module.h> 20 #include <linux/workqueue.h> 21 #include "hid_bpf_dispatch.h" 22 #include "entrypoints/entrypoints.lskel.h" 23 24 struct hid_ops *hid_ops; 25 EXPORT_SYMBOL(hid_ops); 26 27 /** 28 * hid_bpf_device_event - Called whenever an event is coming in from the device 29 * 30 * @ctx: The HID-BPF context 31 * 32 * @return %0 on success and keep processing; a positive value to change the 33 * incoming size buffer; a negative error code to interrupt the processing 34 * of this event 35 * 36 * Declare an %fmod_ret tracing bpf program to this function and attach this 37 * program through hid_bpf_attach_prog() to have this helper called for 38 * any incoming event from the device itself. 39 * 40 * The function is called while on IRQ context, so we can not sleep. 41 */ 42 /* never used by the kernel but declared so we can load and attach a tracepoint */ 43 __weak noinline int hid_bpf_device_event(struct hid_bpf_ctx *ctx) 44 { 45 return 0; 46 } 47 48 u8 * 49 dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type, u8 *data, 50 u32 *size, int interrupt) 51 { 52 struct hid_bpf_ctx_kern ctx_kern = { 53 .ctx = { 54 .hid = hdev, 55 .report_type = type, 56 .allocated_size = hdev->bpf.allocated_data, 57 .size = *size, 58 }, 59 .data = hdev->bpf.device_data, 60 }; 61 struct hid_bpf_ops *e; 62 int ret; 63 64 if (type >= HID_REPORT_TYPES) 65 return ERR_PTR(-EINVAL); 66 67 /* no program has been attached yet */ 68 if (!hdev->bpf.device_data) 69 return data; 70 71 memset(ctx_kern.data, 0, hdev->bpf.allocated_data); 72 memcpy(ctx_kern.data, data, *size); 73 74 rcu_read_lock(); 75 list_for_each_entry_rcu(e, &hdev->bpf.prog_list, list) { 76 if (e->hid_device_event) { 77 ret = e->hid_device_event(&ctx_kern.ctx, type); 78 if (ret < 0) { 79 rcu_read_unlock(); 80 return ERR_PTR(ret); 81 } 82 83 if (ret) 84 ctx_kern.ctx.retval = ret; 85 } 86 } 87 rcu_read_unlock(); 88 89 ret = hid_bpf_prog_run(hdev, HID_BPF_PROG_TYPE_DEVICE_EVENT, &ctx_kern); 90 if (ret < 0) 91 return ERR_PTR(ret); 92 ret = ctx_kern.ctx.retval; 93 94 if (ret) { 95 if (ret > ctx_kern.ctx.allocated_size) 96 return ERR_PTR(-EINVAL); 97 98 *size = ret; 99 } 100 101 return ctx_kern.data; 102 } 103 EXPORT_SYMBOL_GPL(dispatch_hid_bpf_device_event); 104 105 /** 106 * hid_bpf_rdesc_fixup - Called when the probe function parses the report 107 * descriptor of the HID device 108 * 109 * @ctx: The HID-BPF context 110 * 111 * @return 0 on success and keep processing; a positive value to change the 112 * incoming size buffer; a negative error code to interrupt the processing 113 * of this event 114 * 115 * Declare an %fmod_ret tracing bpf program to this function and attach this 116 * program through hid_bpf_attach_prog() to have this helper called before any 117 * parsing of the report descriptor by HID. 118 */ 119 /* never used by the kernel but declared so we can load and attach a tracepoint */ 120 __weak noinline int hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx) 121 { 122 return 0; 123 } 124 125 u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size) 126 { 127 int ret; 128 struct hid_bpf_ctx_kern ctx_kern = { 129 .ctx = { 130 .hid = hdev, 131 .size = *size, 132 .allocated_size = HID_MAX_DESCRIPTOR_SIZE, 133 }, 134 }; 135 136 ctx_kern.data = kzalloc(ctx_kern.ctx.allocated_size, GFP_KERNEL); 137 if (!ctx_kern.data) 138 goto ignore_bpf; 139 140 memcpy(ctx_kern.data, rdesc, min_t(unsigned int, *size, HID_MAX_DESCRIPTOR_SIZE)); 141 142 if (hdev->bpf.rdesc_ops) 143 ret = hdev->bpf.rdesc_ops->hid_rdesc_fixup(&ctx_kern.ctx); 144 else 145 ret = hid_bpf_prog_run(hdev, HID_BPF_PROG_TYPE_RDESC_FIXUP, &ctx_kern); 146 if (ret < 0) 147 goto ignore_bpf; 148 149 if (ret) { 150 if (ret > ctx_kern.ctx.allocated_size) 151 goto ignore_bpf; 152 153 *size = ret; 154 } 155 156 rdesc = krealloc(ctx_kern.data, *size, GFP_KERNEL); 157 158 return rdesc; 159 160 ignore_bpf: 161 kfree(ctx_kern.data); 162 return kmemdup(rdesc, *size, GFP_KERNEL); 163 } 164 EXPORT_SYMBOL_GPL(call_hid_bpf_rdesc_fixup); 165 166 static int device_match_id(struct device *dev, const void *id) 167 { 168 struct hid_device *hdev = to_hid_device(dev); 169 170 return hdev->id == *(int *)id; 171 } 172 173 struct hid_device *hid_get_device(unsigned int hid_id) 174 { 175 struct device *dev; 176 177 if (!hid_ops) 178 return ERR_PTR(-EINVAL); 179 180 dev = bus_find_device(hid_ops->bus_type, NULL, &hid_id, device_match_id); 181 if (!dev) 182 return ERR_PTR(-EINVAL); 183 184 return to_hid_device(dev); 185 } 186 187 void hid_put_device(struct hid_device *hid) 188 { 189 put_device(&hid->dev); 190 } 191 192 static int __hid_bpf_allocate_data(struct hid_device *hdev, u8 **data, u32 *size) 193 { 194 u8 *alloc_data; 195 unsigned int i, j, max_report_len = 0; 196 size_t alloc_size = 0; 197 198 /* compute the maximum report length for this device */ 199 for (i = 0; i < HID_REPORT_TYPES; i++) { 200 struct hid_report_enum *report_enum = hdev->report_enum + i; 201 202 for (j = 0; j < HID_MAX_IDS; j++) { 203 struct hid_report *report = report_enum->report_id_hash[j]; 204 205 if (report) 206 max_report_len = max(max_report_len, hid_report_len(report)); 207 } 208 } 209 210 /* 211 * Give us a little bit of extra space and some predictability in the 212 * buffer length we create. This way, we can tell users that they can 213 * work on chunks of 64 bytes of memory without having the bpf verifier 214 * scream at them. 215 */ 216 alloc_size = DIV_ROUND_UP(max_report_len, 64) * 64; 217 218 alloc_data = kzalloc(alloc_size, GFP_KERNEL); 219 if (!alloc_data) 220 return -ENOMEM; 221 222 *data = alloc_data; 223 *size = alloc_size; 224 225 return 0; 226 } 227 228 int hid_bpf_allocate_event_data(struct hid_device *hdev) 229 { 230 /* hdev->bpf.device_data is already allocated, abort */ 231 if (hdev->bpf.device_data) 232 return 0; 233 234 return __hid_bpf_allocate_data(hdev, &hdev->bpf.device_data, &hdev->bpf.allocated_data); 235 } 236 237 int hid_bpf_reconnect(struct hid_device *hdev) 238 { 239 if (!test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status)) 240 return device_reprobe(&hdev->dev); 241 242 return 0; 243 } 244 245 static int do_hid_bpf_attach_prog(struct hid_device *hdev, int prog_fd, struct bpf_prog *prog, 246 __u32 flags) 247 { 248 int fd, err, prog_type; 249 250 prog_type = hid_bpf_get_prog_attach_type(prog); 251 if (prog_type < 0) 252 return prog_type; 253 254 if (prog_type >= HID_BPF_PROG_TYPE_MAX) 255 return -EINVAL; 256 257 if (prog_type == HID_BPF_PROG_TYPE_DEVICE_EVENT) { 258 err = hid_bpf_allocate_event_data(hdev); 259 if (err) 260 return err; 261 } 262 263 fd = __hid_bpf_attach_prog(hdev, prog_type, prog_fd, prog, flags); 264 if (fd < 0) 265 return fd; 266 267 if (prog_type == HID_BPF_PROG_TYPE_RDESC_FIXUP) { 268 err = hid_bpf_reconnect(hdev); 269 if (err) { 270 close_fd(fd); 271 return err; 272 } 273 } 274 275 return fd; 276 } 277 278 /* Disables missing prototype warnings */ 279 __bpf_kfunc_start_defs(); 280 281 /** 282 * hid_bpf_get_data - Get the kernel memory pointer associated with the context @ctx 283 * 284 * @ctx: The HID-BPF context 285 * @offset: The offset within the memory 286 * @rdwr_buf_size: the const size of the buffer 287 * 288 * @returns %NULL on error, an %__u8 memory pointer on success 289 */ 290 __bpf_kfunc __u8 * 291 hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr_buf_size) 292 { 293 struct hid_bpf_ctx_kern *ctx_kern; 294 295 if (!ctx) 296 return NULL; 297 298 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx); 299 300 if (rdwr_buf_size + offset > ctx->allocated_size) 301 return NULL; 302 303 return ctx_kern->data + offset; 304 } 305 306 /** 307 * hid_bpf_attach_prog - Attach the given @prog_fd to the given HID device 308 * 309 * @hid_id: the system unique identifier of the HID device 310 * @prog_fd: an fd in the user process representing the program to attach 311 * @flags: any logical OR combination of &enum hid_bpf_attach_flags 312 * 313 * @returns an fd of a bpf_link object on success (> %0), an error code otherwise. 314 * Closing this fd will detach the program from the HID device (unless the bpf_link 315 * is pinned to the BPF file system). 316 */ 317 /* called from syscall */ 318 __bpf_kfunc int 319 hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags) 320 { 321 struct hid_device *hdev; 322 struct bpf_prog *prog; 323 int err, fd; 324 325 if ((flags & ~HID_BPF_FLAG_MASK)) 326 return -EINVAL; 327 328 hdev = hid_get_device(hid_id); 329 if (IS_ERR(hdev)) 330 return PTR_ERR(hdev); 331 332 /* 333 * take a ref on the prog itself, it will be released 334 * on errors or when it'll be detached 335 */ 336 prog = bpf_prog_get(prog_fd); 337 if (IS_ERR(prog)) { 338 err = PTR_ERR(prog); 339 goto out_dev_put; 340 } 341 342 fd = do_hid_bpf_attach_prog(hdev, prog_fd, prog, flags); 343 if (fd < 0) { 344 err = fd; 345 goto out_prog_put; 346 } 347 348 return fd; 349 350 out_prog_put: 351 bpf_prog_put(prog); 352 out_dev_put: 353 hid_put_device(hdev); 354 return err; 355 } 356 357 /** 358 * hid_bpf_allocate_context - Allocate a context to the given HID device 359 * 360 * @hid_id: the system unique identifier of the HID device 361 * 362 * @returns A pointer to &struct hid_bpf_ctx on success, %NULL on error. 363 */ 364 __bpf_kfunc struct hid_bpf_ctx * 365 hid_bpf_allocate_context(unsigned int hid_id) 366 { 367 struct hid_device *hdev; 368 struct hid_bpf_ctx_kern *ctx_kern = NULL; 369 370 hdev = hid_get_device(hid_id); 371 if (IS_ERR(hdev)) 372 return NULL; 373 374 ctx_kern = kzalloc(sizeof(*ctx_kern), GFP_KERNEL); 375 if (!ctx_kern) { 376 hid_put_device(hdev); 377 return NULL; 378 } 379 380 ctx_kern->ctx.hid = hdev; 381 382 return &ctx_kern->ctx; 383 } 384 385 /** 386 * hid_bpf_release_context - Release the previously allocated context @ctx 387 * 388 * @ctx: the HID-BPF context to release 389 * 390 */ 391 __bpf_kfunc void 392 hid_bpf_release_context(struct hid_bpf_ctx *ctx) 393 { 394 struct hid_bpf_ctx_kern *ctx_kern; 395 struct hid_device *hid; 396 397 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx); 398 hid = (struct hid_device *)ctx_kern->ctx.hid; /* ignore const */ 399 400 kfree(ctx_kern); 401 402 /* get_device() is called by bus_find_device() */ 403 hid_put_device(hid); 404 } 405 406 static int 407 __hid_bpf_hw_check_params(struct hid_bpf_ctx *ctx, __u8 *buf, size_t *buf__sz, 408 enum hid_report_type rtype) 409 { 410 struct hid_report_enum *report_enum; 411 struct hid_report *report; 412 struct hid_device *hdev; 413 u32 report_len; 414 415 /* check arguments */ 416 if (!ctx || !hid_ops || !buf) 417 return -EINVAL; 418 419 switch (rtype) { 420 case HID_INPUT_REPORT: 421 case HID_OUTPUT_REPORT: 422 case HID_FEATURE_REPORT: 423 break; 424 default: 425 return -EINVAL; 426 } 427 428 if (*buf__sz < 1) 429 return -EINVAL; 430 431 hdev = (struct hid_device *)ctx->hid; /* discard const */ 432 433 report_enum = hdev->report_enum + rtype; 434 report = hid_ops->hid_get_report(report_enum, buf); 435 if (!report) 436 return -EINVAL; 437 438 report_len = hid_report_len(report); 439 440 if (*buf__sz > report_len) 441 *buf__sz = report_len; 442 443 return 0; 444 } 445 446 /** 447 * hid_bpf_hw_request - Communicate with a HID device 448 * 449 * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context() 450 * @buf: a %PTR_TO_MEM buffer 451 * @buf__sz: the size of the data to transfer 452 * @rtype: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT) 453 * @reqtype: the type of the request (%HID_REQ_GET_REPORT, %HID_REQ_SET_REPORT, ...) 454 * 455 * @returns %0 on success, a negative error code otherwise. 456 */ 457 __bpf_kfunc int 458 hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz, 459 enum hid_report_type rtype, enum hid_class_request reqtype) 460 { 461 struct hid_device *hdev; 462 size_t size = buf__sz; 463 u8 *dma_data; 464 int ret; 465 466 /* check arguments */ 467 ret = __hid_bpf_hw_check_params(ctx, buf, &size, rtype); 468 if (ret) 469 return ret; 470 471 switch (reqtype) { 472 case HID_REQ_GET_REPORT: 473 case HID_REQ_GET_IDLE: 474 case HID_REQ_GET_PROTOCOL: 475 case HID_REQ_SET_REPORT: 476 case HID_REQ_SET_IDLE: 477 case HID_REQ_SET_PROTOCOL: 478 break; 479 default: 480 return -EINVAL; 481 } 482 483 hdev = (struct hid_device *)ctx->hid; /* discard const */ 484 485 dma_data = kmemdup(buf, size, GFP_KERNEL); 486 if (!dma_data) 487 return -ENOMEM; 488 489 ret = hid_ops->hid_hw_raw_request(hdev, 490 dma_data[0], 491 dma_data, 492 size, 493 rtype, 494 reqtype); 495 496 if (ret > 0) 497 memcpy(buf, dma_data, ret); 498 499 kfree(dma_data); 500 return ret; 501 } 502 503 /** 504 * hid_bpf_hw_output_report - Send an output report to a HID device 505 * 506 * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context() 507 * @buf: a %PTR_TO_MEM buffer 508 * @buf__sz: the size of the data to transfer 509 * 510 * Returns the number of bytes transferred on success, a negative error code otherwise. 511 */ 512 __bpf_kfunc int 513 hid_bpf_hw_output_report(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz) 514 { 515 struct hid_device *hdev; 516 size_t size = buf__sz; 517 u8 *dma_data; 518 int ret; 519 520 /* check arguments */ 521 ret = __hid_bpf_hw_check_params(ctx, buf, &size, HID_OUTPUT_REPORT); 522 if (ret) 523 return ret; 524 525 hdev = (struct hid_device *)ctx->hid; /* discard const */ 526 527 dma_data = kmemdup(buf, size, GFP_KERNEL); 528 if (!dma_data) 529 return -ENOMEM; 530 531 ret = hid_ops->hid_hw_output_report(hdev, 532 dma_data, 533 size); 534 535 kfree(dma_data); 536 return ret; 537 } 538 539 /** 540 * hid_bpf_input_report - Inject a HID report in the kernel from a HID device 541 * 542 * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context() 543 * @type: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT) 544 * @buf: a %PTR_TO_MEM buffer 545 * @buf__sz: the size of the data to transfer 546 * 547 * Returns %0 on success, a negative error code otherwise. 548 */ 549 __bpf_kfunc int 550 hid_bpf_input_report(struct hid_bpf_ctx *ctx, enum hid_report_type type, u8 *buf, 551 const size_t buf__sz) 552 { 553 struct hid_device *hdev; 554 size_t size = buf__sz; 555 int ret; 556 557 /* check arguments */ 558 ret = __hid_bpf_hw_check_params(ctx, buf, &size, type); 559 if (ret) 560 return ret; 561 562 hdev = (struct hid_device *)ctx->hid; /* discard const */ 563 564 return hid_ops->hid_input_report(hdev, type, buf, size, 0); 565 } 566 __bpf_kfunc_end_defs(); 567 568 /* 569 * The following set contains all functions we agree BPF programs 570 * can use. 571 */ 572 BTF_KFUNCS_START(hid_bpf_kfunc_ids) 573 BTF_ID_FLAGS(func, hid_bpf_get_data, KF_RET_NULL) 574 BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE) 575 BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE | KF_SLEEPABLE) 576 BTF_ID_FLAGS(func, hid_bpf_hw_request, KF_SLEEPABLE) 577 BTF_ID_FLAGS(func, hid_bpf_hw_output_report, KF_SLEEPABLE) 578 BTF_ID_FLAGS(func, hid_bpf_input_report, KF_SLEEPABLE) 579 BTF_KFUNCS_END(hid_bpf_kfunc_ids) 580 581 static const struct btf_kfunc_id_set hid_bpf_kfunc_set = { 582 .owner = THIS_MODULE, 583 .set = &hid_bpf_kfunc_ids, 584 }; 585 586 /* our HID-BPF entrypoints */ 587 BTF_SET8_START(hid_bpf_fmodret_ids) 588 BTF_ID_FLAGS(func, hid_bpf_device_event) 589 BTF_ID_FLAGS(func, hid_bpf_rdesc_fixup) 590 BTF_ID_FLAGS(func, __hid_bpf_tail_call) 591 BTF_SET8_END(hid_bpf_fmodret_ids) 592 593 static const struct btf_kfunc_id_set hid_bpf_fmodret_set = { 594 .owner = THIS_MODULE, 595 .set = &hid_bpf_fmodret_ids, 596 }; 597 598 /* for syscall HID-BPF */ 599 BTF_KFUNCS_START(hid_bpf_syscall_kfunc_ids) 600 BTF_ID_FLAGS(func, hid_bpf_attach_prog) 601 BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL) 602 BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE) 603 BTF_ID_FLAGS(func, hid_bpf_hw_request) 604 BTF_ID_FLAGS(func, hid_bpf_hw_output_report) 605 BTF_ID_FLAGS(func, hid_bpf_input_report) 606 BTF_KFUNCS_END(hid_bpf_syscall_kfunc_ids) 607 608 static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = { 609 .owner = THIS_MODULE, 610 .set = &hid_bpf_syscall_kfunc_ids, 611 }; 612 613 int hid_bpf_connect_device(struct hid_device *hdev) 614 { 615 bool need_to_allocate = false; 616 struct hid_bpf_ops *e; 617 618 rcu_read_lock(); 619 list_for_each_entry_rcu(e, &hdev->bpf.prog_list, list) { 620 if (e->hid_device_event) { 621 need_to_allocate = true; 622 break; 623 } 624 } 625 if (rcu_dereference(hdev->bpf.progs[HID_BPF_PROG_TYPE_DEVICE_EVENT])) 626 need_to_allocate = true; 627 rcu_read_unlock(); 628 629 /* only allocate BPF data if there are programs attached */ 630 if (!need_to_allocate) 631 return 0; 632 633 return hid_bpf_allocate_event_data(hdev); 634 } 635 EXPORT_SYMBOL_GPL(hid_bpf_connect_device); 636 637 void hid_bpf_disconnect_device(struct hid_device *hdev) 638 { 639 kfree(hdev->bpf.device_data); 640 hdev->bpf.device_data = NULL; 641 hdev->bpf.allocated_data = 0; 642 } 643 EXPORT_SYMBOL_GPL(hid_bpf_disconnect_device); 644 645 void hid_bpf_destroy_device(struct hid_device *hdev) 646 { 647 if (!hdev) 648 return; 649 650 /* mark the device as destroyed in bpf so we don't reattach it */ 651 hdev->bpf.destroyed = true; 652 653 __hid_bpf_destroy_device(hdev); 654 __hid_bpf_ops_destroy_device(hdev); 655 } 656 EXPORT_SYMBOL_GPL(hid_bpf_destroy_device); 657 658 void hid_bpf_device_init(struct hid_device *hdev) 659 { 660 spin_lock_init(&hdev->bpf.progs_lock); 661 INIT_LIST_HEAD(&hdev->bpf.prog_list); 662 mutex_init(&hdev->bpf.prog_list_lock); 663 } 664 EXPORT_SYMBOL_GPL(hid_bpf_device_init); 665 666 static int __init hid_bpf_init(void) 667 { 668 int err; 669 670 /* Note: if we exit with an error any time here, we would entirely break HID, which 671 * is probably not something we want. So we log an error and return success. 672 * 673 * This is not a big deal: the syscall allowing to attach a BPF program to a HID device 674 * will not be available, so nobody will be able to use the functionality. 675 */ 676 677 err = register_btf_fmodret_id_set(&hid_bpf_fmodret_set); 678 if (err) { 679 pr_warn("error while registering fmodret entrypoints: %d", err); 680 return 0; 681 } 682 683 err = hid_bpf_preload_skel(); 684 if (err) { 685 pr_warn("error while preloading HID BPF dispatcher: %d", err); 686 return 0; 687 } 688 689 /* register tracing kfuncs after we are sure we can load our preloaded bpf program */ 690 err = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &hid_bpf_kfunc_set); 691 if (err) { 692 pr_warn("error while setting HID BPF tracing kfuncs: %d", err); 693 return 0; 694 } 695 696 /* register struct_ops kfuncs after we are sure we can load our preloaded bpf program */ 697 err = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &hid_bpf_kfunc_set); 698 if (err) { 699 pr_warn("error while setting HID BPF tracing kfuncs: %d", err); 700 return 0; 701 } 702 703 /* register syscalls after we are sure we can load our preloaded bpf program */ 704 err = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &hid_bpf_syscall_kfunc_set); 705 if (err) { 706 pr_warn("error while setting HID BPF syscall kfuncs: %d", err); 707 return 0; 708 } 709 710 return 0; 711 } 712 713 static void __exit hid_bpf_exit(void) 714 { 715 /* HID depends on us, so if we hit that code, we are guaranteed that hid 716 * has been removed and thus we do not need to clear the HID devices 717 */ 718 hid_bpf_free_links_and_skel(); 719 } 720 721 late_initcall(hid_bpf_init); 722 module_exit(hid_bpf_exit); 723 MODULE_AUTHOR("Benjamin Tissoires"); 724 MODULE_LICENSE("GPL"); 725