1 // SPDX-License-Identifier: GPL-2.0-only 2 3 /* 4 * HID-BPF support for Linux 5 * 6 * Copyright (c) 2022 Benjamin Tissoires 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 #include <linux/bitops.h> 11 #include <linux/btf.h> 12 #include <linux/btf_ids.h> 13 #include <linux/filter.h> 14 #include <linux/hid.h> 15 #include <linux/hid_bpf.h> 16 #include <linux/init.h> 17 #include <linux/kfifo.h> 18 #include <linux/minmax.h> 19 #include <linux/module.h> 20 #include <linux/workqueue.h> 21 #include "hid_bpf_dispatch.h" 22 #include "entrypoints/entrypoints.lskel.h" 23 24 struct hid_bpf_ops *hid_bpf_ops; 25 EXPORT_SYMBOL(hid_bpf_ops); 26 27 /** 28 * hid_bpf_device_event - Called whenever an event is coming in from the device 29 * 30 * @ctx: The HID-BPF context 31 * 32 * @return %0 on success and keep processing; a positive value to change the 33 * incoming size buffer; a negative error code to interrupt the processing 34 * of this event 35 * 36 * Declare an %fmod_ret tracing bpf program to this function and attach this 37 * program through hid_bpf_attach_prog() to have this helper called for 38 * any incoming event from the device itself. 39 * 40 * The function is called while on IRQ context, so we can not sleep. 41 */ 42 /* never used by the kernel but declared so we can load and attach a tracepoint */ 43 __weak noinline int hid_bpf_device_event(struct hid_bpf_ctx *ctx) 44 { 45 return 0; 46 } 47 48 u8 * 49 dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type, u8 *data, 50 u32 *size, int interrupt) 51 { 52 struct hid_bpf_ctx_kern ctx_kern = { 53 .ctx = { 54 .hid = hdev, 55 .report_type = type, 56 .allocated_size = hdev->bpf.allocated_data, 57 .size = *size, 58 }, 59 .data = hdev->bpf.device_data, 60 }; 61 int ret; 62 63 if (type >= HID_REPORT_TYPES) 64 return ERR_PTR(-EINVAL); 65 66 /* no program has been attached yet */ 67 if (!hdev->bpf.device_data) 68 return data; 69 70 memset(ctx_kern.data, 0, hdev->bpf.allocated_data); 71 memcpy(ctx_kern.data, data, *size); 72 73 ret = hid_bpf_prog_run(hdev, HID_BPF_PROG_TYPE_DEVICE_EVENT, &ctx_kern); 74 if (ret < 0) 75 return ERR_PTR(ret); 76 77 if (ret) { 78 if (ret > ctx_kern.ctx.allocated_size) 79 return ERR_PTR(-EINVAL); 80 81 *size = ret; 82 } 83 84 return ctx_kern.data; 85 } 86 EXPORT_SYMBOL_GPL(dispatch_hid_bpf_device_event); 87 88 /** 89 * hid_bpf_rdesc_fixup - Called when the probe function parses the report 90 * descriptor of the HID device 91 * 92 * @ctx: The HID-BPF context 93 * 94 * @return 0 on success and keep processing; a positive value to change the 95 * incoming size buffer; a negative error code to interrupt the processing 96 * of this event 97 * 98 * Declare an %fmod_ret tracing bpf program to this function and attach this 99 * program through hid_bpf_attach_prog() to have this helper called before any 100 * parsing of the report descriptor by HID. 101 */ 102 /* never used by the kernel but declared so we can load and attach a tracepoint */ 103 __weak noinline int hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx) 104 { 105 return 0; 106 } 107 108 u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size) 109 { 110 int ret; 111 struct hid_bpf_ctx_kern ctx_kern = { 112 .ctx = { 113 .hid = hdev, 114 .size = *size, 115 .allocated_size = HID_MAX_DESCRIPTOR_SIZE, 116 }, 117 }; 118 119 ctx_kern.data = kzalloc(ctx_kern.ctx.allocated_size, GFP_KERNEL); 120 if (!ctx_kern.data) 121 goto ignore_bpf; 122 123 memcpy(ctx_kern.data, rdesc, min_t(unsigned int, *size, HID_MAX_DESCRIPTOR_SIZE)); 124 125 ret = hid_bpf_prog_run(hdev, HID_BPF_PROG_TYPE_RDESC_FIXUP, &ctx_kern); 126 if (ret < 0) 127 goto ignore_bpf; 128 129 if (ret) { 130 if (ret > ctx_kern.ctx.allocated_size) 131 goto ignore_bpf; 132 133 *size = ret; 134 } 135 136 rdesc = krealloc(ctx_kern.data, *size, GFP_KERNEL); 137 138 return rdesc; 139 140 ignore_bpf: 141 kfree(ctx_kern.data); 142 return kmemdup(rdesc, *size, GFP_KERNEL); 143 } 144 EXPORT_SYMBOL_GPL(call_hid_bpf_rdesc_fixup); 145 146 /** 147 * hid_bpf_get_data - Get the kernel memory pointer associated with the context @ctx 148 * 149 * @ctx: The HID-BPF context 150 * @offset: The offset within the memory 151 * @rdwr_buf_size: the const size of the buffer 152 * 153 * @returns %NULL on error, an %__u8 memory pointer on success 154 */ 155 noinline __u8 * 156 hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr_buf_size) 157 { 158 struct hid_bpf_ctx_kern *ctx_kern; 159 160 if (!ctx) 161 return NULL; 162 163 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx); 164 165 if (rdwr_buf_size + offset > ctx->allocated_size) 166 return NULL; 167 168 return ctx_kern->data + offset; 169 } 170 171 /* 172 * The following set contains all functions we agree BPF programs 173 * can use. 174 */ 175 BTF_SET8_START(hid_bpf_kfunc_ids) 176 BTF_ID_FLAGS(func, hid_bpf_get_data, KF_RET_NULL) 177 BTF_SET8_END(hid_bpf_kfunc_ids) 178 179 static const struct btf_kfunc_id_set hid_bpf_kfunc_set = { 180 .owner = THIS_MODULE, 181 .set = &hid_bpf_kfunc_ids, 182 }; 183 184 static int device_match_id(struct device *dev, const void *id) 185 { 186 struct hid_device *hdev = to_hid_device(dev); 187 188 return hdev->id == *(int *)id; 189 } 190 191 static int __hid_bpf_allocate_data(struct hid_device *hdev, u8 **data, u32 *size) 192 { 193 u8 *alloc_data; 194 unsigned int i, j, max_report_len = 0; 195 size_t alloc_size = 0; 196 197 /* compute the maximum report length for this device */ 198 for (i = 0; i < HID_REPORT_TYPES; i++) { 199 struct hid_report_enum *report_enum = hdev->report_enum + i; 200 201 for (j = 0; j < HID_MAX_IDS; j++) { 202 struct hid_report *report = report_enum->report_id_hash[j]; 203 204 if (report) 205 max_report_len = max(max_report_len, hid_report_len(report)); 206 } 207 } 208 209 /* 210 * Give us a little bit of extra space and some predictability in the 211 * buffer length we create. This way, we can tell users that they can 212 * work on chunks of 64 bytes of memory without having the bpf verifier 213 * scream at them. 214 */ 215 alloc_size = DIV_ROUND_UP(max_report_len, 64) * 64; 216 217 alloc_data = kzalloc(alloc_size, GFP_KERNEL); 218 if (!alloc_data) 219 return -ENOMEM; 220 221 *data = alloc_data; 222 *size = alloc_size; 223 224 return 0; 225 } 226 227 static int hid_bpf_allocate_event_data(struct hid_device *hdev) 228 { 229 /* hdev->bpf.device_data is already allocated, abort */ 230 if (hdev->bpf.device_data) 231 return 0; 232 233 return __hid_bpf_allocate_data(hdev, &hdev->bpf.device_data, &hdev->bpf.allocated_data); 234 } 235 236 int hid_bpf_reconnect(struct hid_device *hdev) 237 { 238 if (!test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status)) 239 return device_reprobe(&hdev->dev); 240 241 return 0; 242 } 243 244 /** 245 * hid_bpf_attach_prog - Attach the given @prog_fd to the given HID device 246 * 247 * @hid_id: the system unique identifier of the HID device 248 * @prog_fd: an fd in the user process representing the program to attach 249 * @flags: any logical OR combination of &enum hid_bpf_attach_flags 250 * 251 * @returns an fd of a bpf_link object on success (> %0), an error code otherwise. 252 * Closing this fd will detach the program from the HID device (unless the bpf_link 253 * is pinned to the BPF file system). 254 */ 255 /* called from syscall */ 256 noinline int 257 hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags) 258 { 259 struct hid_device *hdev; 260 struct device *dev; 261 int fd, err, prog_type = hid_bpf_get_prog_attach_type(prog_fd); 262 263 if (!hid_bpf_ops) 264 return -EINVAL; 265 266 if (prog_type < 0) 267 return prog_type; 268 269 if (prog_type >= HID_BPF_PROG_TYPE_MAX) 270 return -EINVAL; 271 272 if ((flags & ~HID_BPF_FLAG_MASK)) 273 return -EINVAL; 274 275 dev = bus_find_device(hid_bpf_ops->bus_type, NULL, &hid_id, device_match_id); 276 if (!dev) 277 return -EINVAL; 278 279 hdev = to_hid_device(dev); 280 281 if (prog_type == HID_BPF_PROG_TYPE_DEVICE_EVENT) { 282 err = hid_bpf_allocate_event_data(hdev); 283 if (err) 284 return err; 285 } 286 287 fd = __hid_bpf_attach_prog(hdev, prog_type, prog_fd, flags); 288 if (fd < 0) 289 return fd; 290 291 if (prog_type == HID_BPF_PROG_TYPE_RDESC_FIXUP) { 292 err = hid_bpf_reconnect(hdev); 293 if (err) { 294 close_fd(fd); 295 return err; 296 } 297 } 298 299 return fd; 300 } 301 302 /** 303 * hid_bpf_allocate_context - Allocate a context to the given HID device 304 * 305 * @hid_id: the system unique identifier of the HID device 306 * 307 * @returns A pointer to &struct hid_bpf_ctx on success, %NULL on error. 308 */ 309 noinline struct hid_bpf_ctx * 310 hid_bpf_allocate_context(unsigned int hid_id) 311 { 312 struct hid_device *hdev; 313 struct hid_bpf_ctx_kern *ctx_kern = NULL; 314 struct device *dev; 315 316 if (!hid_bpf_ops) 317 return NULL; 318 319 dev = bus_find_device(hid_bpf_ops->bus_type, NULL, &hid_id, device_match_id); 320 if (!dev) 321 return NULL; 322 323 hdev = to_hid_device(dev); 324 325 ctx_kern = kzalloc(sizeof(*ctx_kern), GFP_KERNEL); 326 if (!ctx_kern) 327 return NULL; 328 329 ctx_kern->ctx.hid = hdev; 330 331 return &ctx_kern->ctx; 332 } 333 334 /** 335 * hid_bpf_release_context - Release the previously allocated context @ctx 336 * 337 * @ctx: the HID-BPF context to release 338 * 339 */ 340 noinline void 341 hid_bpf_release_context(struct hid_bpf_ctx *ctx) 342 { 343 struct hid_bpf_ctx_kern *ctx_kern; 344 345 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx); 346 347 kfree(ctx_kern); 348 } 349 350 /** 351 * hid_bpf_hw_request - Communicate with a HID device 352 * 353 * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context() 354 * @buf: a %PTR_TO_MEM buffer 355 * @buf__sz: the size of the data to transfer 356 * @rtype: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT) 357 * @reqtype: the type of the request (%HID_REQ_GET_REPORT, %HID_REQ_SET_REPORT, ...) 358 * 359 * @returns %0 on success, a negative error code otherwise. 360 */ 361 noinline int 362 hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz, 363 enum hid_report_type rtype, enum hid_class_request reqtype) 364 { 365 struct hid_device *hdev; 366 struct hid_report *report; 367 struct hid_report_enum *report_enum; 368 u8 *dma_data; 369 u32 report_len; 370 int ret; 371 372 /* check arguments */ 373 if (!ctx || !hid_bpf_ops || !buf) 374 return -EINVAL; 375 376 switch (rtype) { 377 case HID_INPUT_REPORT: 378 case HID_OUTPUT_REPORT: 379 case HID_FEATURE_REPORT: 380 break; 381 default: 382 return -EINVAL; 383 } 384 385 switch (reqtype) { 386 case HID_REQ_GET_REPORT: 387 case HID_REQ_GET_IDLE: 388 case HID_REQ_GET_PROTOCOL: 389 case HID_REQ_SET_REPORT: 390 case HID_REQ_SET_IDLE: 391 case HID_REQ_SET_PROTOCOL: 392 break; 393 default: 394 return -EINVAL; 395 } 396 397 if (buf__sz < 1) 398 return -EINVAL; 399 400 hdev = (struct hid_device *)ctx->hid; /* discard const */ 401 402 report_enum = hdev->report_enum + rtype; 403 report = hid_bpf_ops->hid_get_report(report_enum, buf); 404 if (!report) 405 return -EINVAL; 406 407 report_len = hid_report_len(report); 408 409 if (buf__sz > report_len) 410 buf__sz = report_len; 411 412 dma_data = kmemdup(buf, buf__sz, GFP_KERNEL); 413 if (!dma_data) 414 return -ENOMEM; 415 416 ret = hid_bpf_ops->hid_hw_raw_request(hdev, 417 dma_data[0], 418 dma_data, 419 buf__sz, 420 rtype, 421 reqtype); 422 423 if (ret > 0) 424 memcpy(buf, dma_data, ret); 425 426 kfree(dma_data); 427 return ret; 428 } 429 430 /* our HID-BPF entrypoints */ 431 BTF_SET8_START(hid_bpf_fmodret_ids) 432 BTF_ID_FLAGS(func, hid_bpf_device_event) 433 BTF_ID_FLAGS(func, hid_bpf_rdesc_fixup) 434 BTF_ID_FLAGS(func, __hid_bpf_tail_call) 435 BTF_SET8_END(hid_bpf_fmodret_ids) 436 437 static const struct btf_kfunc_id_set hid_bpf_fmodret_set = { 438 .owner = THIS_MODULE, 439 .set = &hid_bpf_fmodret_ids, 440 }; 441 442 /* for syscall HID-BPF */ 443 BTF_SET8_START(hid_bpf_syscall_kfunc_ids) 444 BTF_ID_FLAGS(func, hid_bpf_attach_prog) 445 BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL) 446 BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE) 447 BTF_ID_FLAGS(func, hid_bpf_hw_request) 448 BTF_SET8_END(hid_bpf_syscall_kfunc_ids) 449 450 static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = { 451 .owner = THIS_MODULE, 452 .set = &hid_bpf_syscall_kfunc_ids, 453 }; 454 455 int hid_bpf_connect_device(struct hid_device *hdev) 456 { 457 struct hid_bpf_prog_list *prog_list; 458 459 rcu_read_lock(); 460 prog_list = rcu_dereference(hdev->bpf.progs[HID_BPF_PROG_TYPE_DEVICE_EVENT]); 461 rcu_read_unlock(); 462 463 /* only allocate BPF data if there are programs attached */ 464 if (!prog_list) 465 return 0; 466 467 return hid_bpf_allocate_event_data(hdev); 468 } 469 EXPORT_SYMBOL_GPL(hid_bpf_connect_device); 470 471 void hid_bpf_disconnect_device(struct hid_device *hdev) 472 { 473 kfree(hdev->bpf.device_data); 474 hdev->bpf.device_data = NULL; 475 hdev->bpf.allocated_data = 0; 476 } 477 EXPORT_SYMBOL_GPL(hid_bpf_disconnect_device); 478 479 void hid_bpf_destroy_device(struct hid_device *hdev) 480 { 481 if (!hdev) 482 return; 483 484 /* mark the device as destroyed in bpf so we don't reattach it */ 485 hdev->bpf.destroyed = true; 486 487 __hid_bpf_destroy_device(hdev); 488 } 489 EXPORT_SYMBOL_GPL(hid_bpf_destroy_device); 490 491 void hid_bpf_device_init(struct hid_device *hdev) 492 { 493 spin_lock_init(&hdev->bpf.progs_lock); 494 } 495 EXPORT_SYMBOL_GPL(hid_bpf_device_init); 496 497 static int __init hid_bpf_init(void) 498 { 499 int err; 500 501 /* Note: if we exit with an error any time here, we would entirely break HID, which 502 * is probably not something we want. So we log an error and return success. 503 * 504 * This is not a big deal: the syscall allowing to attach a BPF program to a HID device 505 * will not be available, so nobody will be able to use the functionality. 506 */ 507 508 err = register_btf_fmodret_id_set(&hid_bpf_fmodret_set); 509 if (err) { 510 pr_warn("error while registering fmodret entrypoints: %d", err); 511 return 0; 512 } 513 514 err = hid_bpf_preload_skel(); 515 if (err) { 516 pr_warn("error while preloading HID BPF dispatcher: %d", err); 517 return 0; 518 } 519 520 /* register tracing kfuncs after we are sure we can load our preloaded bpf program */ 521 err = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &hid_bpf_kfunc_set); 522 if (err) { 523 pr_warn("error while setting HID BPF tracing kfuncs: %d", err); 524 return 0; 525 } 526 527 /* register syscalls after we are sure we can load our preloaded bpf program */ 528 err = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &hid_bpf_syscall_kfunc_set); 529 if (err) { 530 pr_warn("error while setting HID BPF syscall kfuncs: %d", err); 531 return 0; 532 } 533 534 return 0; 535 } 536 537 static void __exit hid_bpf_exit(void) 538 { 539 /* HID depends on us, so if we hit that code, we are guaranteed that hid 540 * has been removed and thus we do not need to clear the HID devices 541 */ 542 hid_bpf_free_links_and_skel(); 543 } 544 545 late_initcall(hid_bpf_init); 546 module_exit(hid_bpf_exit); 547 MODULE_AUTHOR("Benjamin Tissoires"); 548 MODULE_LICENSE("GPL"); 549