1 // SPDX-License-Identifier: GPL-2.0-only 2 3 /* 4 * HID-BPF support for Linux 5 * 6 * Copyright (c) 2022 Benjamin Tissoires 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 #include <linux/bitops.h> 11 #include <linux/btf.h> 12 #include <linux/btf_ids.h> 13 #include <linux/filter.h> 14 #include <linux/hid.h> 15 #include <linux/hid_bpf.h> 16 #include <linux/init.h> 17 #include <linux/kfifo.h> 18 #include <linux/module.h> 19 #include <linux/workqueue.h> 20 #include "hid_bpf_dispatch.h" 21 #include "entrypoints/entrypoints.lskel.h" 22 23 struct hid_bpf_ops *hid_bpf_ops; 24 EXPORT_SYMBOL(hid_bpf_ops); 25 26 /** 27 * hid_bpf_device_event - Called whenever an event is coming in from the device 28 * 29 * @ctx: The HID-BPF context 30 * 31 * @return %0 on success and keep processing; a positive value to change the 32 * incoming size buffer; a negative error code to interrupt the processing 33 * of this event 34 * 35 * Declare an %fmod_ret tracing bpf program to this function and attach this 36 * program through hid_bpf_attach_prog() to have this helper called for 37 * any incoming event from the device itself. 38 * 39 * The function is called while on IRQ context, so we can not sleep. 40 */ 41 /* never used by the kernel but declared so we can load and attach a tracepoint */ 42 __weak noinline int hid_bpf_device_event(struct hid_bpf_ctx *ctx) 43 { 44 return 0; 45 } 46 ALLOW_ERROR_INJECTION(hid_bpf_device_event, ERRNO); 47 48 u8 * 49 dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type, u8 *data, 50 u32 *size, int interrupt) 51 { 52 struct hid_bpf_ctx_kern ctx_kern = { 53 .ctx = { 54 .hid = hdev, 55 .report_type = type, 56 .allocated_size = hdev->bpf.allocated_data, 57 .size = *size, 58 }, 59 .data = hdev->bpf.device_data, 60 }; 61 int ret; 62 63 if (type >= HID_REPORT_TYPES) 64 return ERR_PTR(-EINVAL); 65 66 /* no program has been attached yet */ 67 if (!hdev->bpf.device_data) 68 return data; 69 70 memset(ctx_kern.data, 0, hdev->bpf.allocated_data); 71 memcpy(ctx_kern.data, data, *size); 72 73 ret = hid_bpf_prog_run(hdev, HID_BPF_PROG_TYPE_DEVICE_EVENT, &ctx_kern); 74 if (ret < 0) 75 return ERR_PTR(ret); 76 77 if (ret) { 78 if (ret > ctx_kern.ctx.allocated_size) 79 return ERR_PTR(-EINVAL); 80 81 *size = ret; 82 } 83 84 return ctx_kern.data; 85 } 86 EXPORT_SYMBOL_GPL(dispatch_hid_bpf_device_event); 87 88 /** 89 * hid_bpf_get_data - Get the kernel memory pointer associated with the context @ctx 90 * 91 * @ctx: The HID-BPF context 92 * @offset: The offset within the memory 93 * @rdwr_buf_size: the const size of the buffer 94 * 95 * @returns %NULL on error, an %__u8 memory pointer on success 96 */ 97 noinline __u8 * 98 hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr_buf_size) 99 { 100 struct hid_bpf_ctx_kern *ctx_kern; 101 102 if (!ctx) 103 return NULL; 104 105 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx); 106 107 if (rdwr_buf_size + offset > ctx->allocated_size) 108 return NULL; 109 110 return ctx_kern->data + offset; 111 } 112 113 /* 114 * The following set contains all functions we agree BPF programs 115 * can use. 116 */ 117 BTF_SET8_START(hid_bpf_kfunc_ids) 118 BTF_ID_FLAGS(func, call_hid_bpf_prog_put_deferred) 119 BTF_ID_FLAGS(func, hid_bpf_get_data, KF_RET_NULL) 120 BTF_SET8_END(hid_bpf_kfunc_ids) 121 122 static const struct btf_kfunc_id_set hid_bpf_kfunc_set = { 123 .owner = THIS_MODULE, 124 .set = &hid_bpf_kfunc_ids, 125 }; 126 127 static int device_match_id(struct device *dev, const void *id) 128 { 129 struct hid_device *hdev = to_hid_device(dev); 130 131 return hdev->id == *(int *)id; 132 } 133 134 static int __hid_bpf_allocate_data(struct hid_device *hdev, u8 **data, u32 *size) 135 { 136 u8 *alloc_data; 137 unsigned int i, j, max_report_len = 0; 138 size_t alloc_size = 0; 139 140 /* compute the maximum report length for this device */ 141 for (i = 0; i < HID_REPORT_TYPES; i++) { 142 struct hid_report_enum *report_enum = hdev->report_enum + i; 143 144 for (j = 0; j < HID_MAX_IDS; j++) { 145 struct hid_report *report = report_enum->report_id_hash[j]; 146 147 if (report) 148 max_report_len = max(max_report_len, hid_report_len(report)); 149 } 150 } 151 152 /* 153 * Give us a little bit of extra space and some predictability in the 154 * buffer length we create. This way, we can tell users that they can 155 * work on chunks of 64 bytes of memory without having the bpf verifier 156 * scream at them. 157 */ 158 alloc_size = DIV_ROUND_UP(max_report_len, 64) * 64; 159 160 alloc_data = kzalloc(alloc_size, GFP_KERNEL); 161 if (!alloc_data) 162 return -ENOMEM; 163 164 *data = alloc_data; 165 *size = alloc_size; 166 167 return 0; 168 } 169 170 static int hid_bpf_allocate_event_data(struct hid_device *hdev) 171 { 172 /* hdev->bpf.device_data is already allocated, abort */ 173 if (hdev->bpf.device_data) 174 return 0; 175 176 return __hid_bpf_allocate_data(hdev, &hdev->bpf.device_data, &hdev->bpf.allocated_data); 177 } 178 179 /** 180 * hid_bpf_attach_prog - Attach the given @prog_fd to the given HID device 181 * 182 * @hid_id: the system unique identifier of the HID device 183 * @prog_fd: an fd in the user process representing the program to attach 184 * @flags: any logical OR combination of &enum hid_bpf_attach_flags 185 * 186 * @returns %0 on success, an error code otherwise. 187 */ 188 /* called from syscall */ 189 noinline int 190 hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags) 191 { 192 struct hid_device *hdev; 193 struct device *dev; 194 int err, prog_type = hid_bpf_get_prog_attach_type(prog_fd); 195 196 if (!hid_bpf_ops) 197 return -EINVAL; 198 199 if (prog_type < 0) 200 return prog_type; 201 202 if (prog_type >= HID_BPF_PROG_TYPE_MAX) 203 return -EINVAL; 204 205 if ((flags & ~HID_BPF_FLAG_MASK)) 206 return -EINVAL; 207 208 dev = bus_find_device(hid_bpf_ops->bus_type, NULL, &hid_id, device_match_id); 209 if (!dev) 210 return -EINVAL; 211 212 hdev = to_hid_device(dev); 213 214 if (prog_type == HID_BPF_PROG_TYPE_DEVICE_EVENT) { 215 err = hid_bpf_allocate_event_data(hdev); 216 if (err) 217 return err; 218 } 219 220 return __hid_bpf_attach_prog(hdev, prog_type, prog_fd, flags); 221 } 222 223 /* for syscall HID-BPF */ 224 BTF_SET8_START(hid_bpf_syscall_kfunc_ids) 225 BTF_ID_FLAGS(func, hid_bpf_attach_prog) 226 BTF_SET8_END(hid_bpf_syscall_kfunc_ids) 227 228 static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = { 229 .owner = THIS_MODULE, 230 .set = &hid_bpf_syscall_kfunc_ids, 231 }; 232 233 int hid_bpf_connect_device(struct hid_device *hdev) 234 { 235 struct hid_bpf_prog_list *prog_list; 236 237 rcu_read_lock(); 238 prog_list = rcu_dereference(hdev->bpf.progs[HID_BPF_PROG_TYPE_DEVICE_EVENT]); 239 rcu_read_unlock(); 240 241 /* only allocate BPF data if there are programs attached */ 242 if (!prog_list) 243 return 0; 244 245 return hid_bpf_allocate_event_data(hdev); 246 } 247 EXPORT_SYMBOL_GPL(hid_bpf_connect_device); 248 249 void hid_bpf_disconnect_device(struct hid_device *hdev) 250 { 251 kfree(hdev->bpf.device_data); 252 hdev->bpf.device_data = NULL; 253 hdev->bpf.allocated_data = 0; 254 } 255 EXPORT_SYMBOL_GPL(hid_bpf_disconnect_device); 256 257 void hid_bpf_destroy_device(struct hid_device *hdev) 258 { 259 if (!hdev) 260 return; 261 262 /* mark the device as destroyed in bpf so we don't reattach it */ 263 hdev->bpf.destroyed = true; 264 265 __hid_bpf_destroy_device(hdev); 266 } 267 EXPORT_SYMBOL_GPL(hid_bpf_destroy_device); 268 269 void hid_bpf_device_init(struct hid_device *hdev) 270 { 271 spin_lock_init(&hdev->bpf.progs_lock); 272 } 273 EXPORT_SYMBOL_GPL(hid_bpf_device_init); 274 275 static int __init hid_bpf_init(void) 276 { 277 int err; 278 279 /* Note: if we exit with an error any time here, we would entirely break HID, which 280 * is probably not something we want. So we log an error and return success. 281 * 282 * This is not a big deal: the syscall allowing to attach a BPF program to a HID device 283 * will not be available, so nobody will be able to use the functionality. 284 */ 285 286 err = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &hid_bpf_kfunc_set); 287 if (err) { 288 pr_warn("error while setting HID BPF tracing kfuncs: %d", err); 289 return 0; 290 } 291 292 err = hid_bpf_preload_skel(); 293 if (err) { 294 pr_warn("error while preloading HID BPF dispatcher: %d", err); 295 return 0; 296 } 297 298 /* register syscalls after we are sure we can load our preloaded bpf program */ 299 err = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &hid_bpf_syscall_kfunc_set); 300 if (err) { 301 pr_warn("error while setting HID BPF syscall kfuncs: %d", err); 302 return 0; 303 } 304 305 return 0; 306 } 307 308 static void __exit hid_bpf_exit(void) 309 { 310 /* HID depends on us, so if we hit that code, we are guaranteed that hid 311 * has been removed and thus we do not need to clear the HID devices 312 */ 313 hid_bpf_free_links_and_skel(); 314 } 315 316 late_initcall(hid_bpf_init); 317 module_exit(hid_bpf_exit); 318 MODULE_AUTHOR("Benjamin Tissoires"); 319 MODULE_LICENSE("GPL"); 320