1 // SPDX-License-Identifier: GPL-2.0-only 2 3 /* 4 * HID-BPF support for Linux 5 * 6 * Copyright (c) 2024 Benjamin Tissoires 7 */ 8 9 #include <linux/bitops.h> 10 #include <linux/bpf_verifier.h> 11 #include <linux/bpf.h> 12 #include <linux/btf.h> 13 #include <linux/btf_ids.h> 14 #include <linux/filter.h> 15 #include <linux/hid.h> 16 #include <linux/hid_bpf.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/stddef.h> 20 #include <linux/workqueue.h> 21 #include "hid_bpf_dispatch.h" 22 23 static struct btf *hid_bpf_ops_btf; 24 25 static int hid_bpf_ops_init(struct btf *btf) 26 { 27 hid_bpf_ops_btf = btf; 28 return 0; 29 } 30 31 static bool hid_bpf_ops_is_valid_access(int off, int size, 32 enum bpf_access_type type, 33 const struct bpf_prog *prog, 34 struct bpf_insn_access_aux *info) 35 { 36 return bpf_tracing_btf_ctx_access(off, size, type, prog, info); 37 } 38 39 static int hid_bpf_ops_check_member(const struct btf_type *t, 40 const struct btf_member *member, 41 const struct bpf_prog *prog) 42 { 43 u32 moff = __btf_member_bit_offset(t, member) / 8; 44 45 switch (moff) { 46 case offsetof(struct hid_bpf_ops, hid_rdesc_fixup): 47 case offsetof(struct hid_bpf_ops, hid_hw_request): 48 case offsetof(struct hid_bpf_ops, hid_hw_output_report): 49 break; 50 default: 51 if (prog->sleepable) 52 return -EINVAL; 53 } 54 55 return 0; 56 } 57 58 struct hid_bpf_offset_write_range { 59 const char *struct_name; 60 u32 struct_length; 61 u32 start; 62 u32 end; 63 }; 64 65 static int hid_bpf_ops_btf_struct_access(struct bpf_verifier_log *log, 66 const struct bpf_reg_state *reg, 67 int off, int size) 68 { 69 #define WRITE_RANGE(_name, _field, _is_string) \ 70 { \ 71 .struct_name = #_name, \ 72 .struct_length = sizeof(struct _name), \ 73 .start = offsetof(struct _name, _field), \ 74 .end = offsetofend(struct _name, _field) - !!(_is_string), \ 75 } 76 77 const struct hid_bpf_offset_write_range write_ranges[] = { 78 WRITE_RANGE(hid_bpf_ctx, retval, false), 79 WRITE_RANGE(hid_device, name, true), 80 WRITE_RANGE(hid_device, uniq, true), 81 WRITE_RANGE(hid_device, phys, true), 82 WRITE_RANGE(hid_device, quirks, false), 83 }; 84 #undef WRITE_RANGE 85 const struct btf_type *state = NULL; 86 const struct btf_type *t; 87 const char *cur = NULL; 88 int i; 89 90 t = btf_type_by_id(reg->btf, reg->btf_id); 91 92 for (i = 0; i < ARRAY_SIZE(write_ranges); i++) { 93 const struct hid_bpf_offset_write_range *write_range = &write_ranges[i]; 94 s32 type_id; 95 96 /* we already found a writeable struct, but there is a 97 * new one, let's break the loop. 98 */ 99 if (t == state && write_range->struct_name != cur) 100 break; 101 102 /* new struct to look for */ 103 if (write_range->struct_name != cur) { 104 type_id = btf_find_by_name_kind(reg->btf, write_range->struct_name, 105 BTF_KIND_STRUCT); 106 if (type_id < 0) 107 return -EINVAL; 108 109 state = btf_type_by_id(reg->btf, type_id); 110 } 111 112 /* this is not the struct we are looking for */ 113 if (t != state) { 114 cur = write_range->struct_name; 115 continue; 116 } 117 118 /* first time we see this struct, check for out of bounds */ 119 if (cur != write_range->struct_name && 120 off + size > write_range->struct_length) { 121 bpf_log(log, "write access for struct %s at off %d with size %d\n", 122 write_range->struct_name, off, size); 123 return -EACCES; 124 } 125 126 /* now check if we are in our boundaries */ 127 if (off >= write_range->start && off + size <= write_range->end) 128 return NOT_INIT; 129 130 cur = write_range->struct_name; 131 } 132 133 134 if (t != state) 135 bpf_log(log, "write access to this struct is not supported\n"); 136 else 137 bpf_log(log, 138 "write access at off %d with size %d on read-only part of %s\n", 139 off, size, cur); 140 141 return -EACCES; 142 } 143 144 static const struct bpf_verifier_ops hid_bpf_verifier_ops = { 145 .get_func_proto = bpf_base_func_proto, 146 .is_valid_access = hid_bpf_ops_is_valid_access, 147 .btf_struct_access = hid_bpf_ops_btf_struct_access, 148 }; 149 150 static int hid_bpf_ops_init_member(const struct btf_type *t, 151 const struct btf_member *member, 152 void *kdata, const void *udata) 153 { 154 const struct hid_bpf_ops *uhid_bpf_ops; 155 struct hid_bpf_ops *khid_bpf_ops; 156 u32 moff; 157 158 uhid_bpf_ops = (const struct hid_bpf_ops *)udata; 159 khid_bpf_ops = (struct hid_bpf_ops *)kdata; 160 161 moff = __btf_member_bit_offset(t, member) / 8; 162 163 switch (moff) { 164 case offsetof(struct hid_bpf_ops, hid_id): 165 /* For hid_id and flags fields, this function has to copy it 166 * and return 1 to indicate that the data has been handled by 167 * the struct_ops type, or the verifier will reject the map if 168 * the value of those fields is not zero. 169 */ 170 khid_bpf_ops->hid_id = uhid_bpf_ops->hid_id; 171 return 1; 172 case offsetof(struct hid_bpf_ops, flags): 173 if (uhid_bpf_ops->flags & ~BPF_F_BEFORE) 174 return -EINVAL; 175 khid_bpf_ops->flags = uhid_bpf_ops->flags; 176 return 1; 177 } 178 return 0; 179 } 180 181 static int hid_bpf_reg(void *kdata, struct bpf_link *link) 182 { 183 struct hid_bpf_ops *ops = kdata; 184 struct hid_device *hdev; 185 int count, err = 0; 186 187 /* prevent multiple attach of the same struct_ops */ 188 if (ops->hdev) 189 return -EINVAL; 190 191 hdev = hid_get_device(ops->hid_id); 192 if (IS_ERR(hdev)) 193 return PTR_ERR(hdev); 194 195 ops->hdev = hdev; 196 197 mutex_lock(&hdev->bpf.prog_list_lock); 198 199 count = list_count_nodes(&hdev->bpf.prog_list); 200 if (count >= HID_BPF_MAX_PROGS_PER_DEV) { 201 err = -E2BIG; 202 goto out_unlock; 203 } 204 205 if (ops->hid_rdesc_fixup) { 206 if (hdev->bpf.rdesc_ops) { 207 err = -EINVAL; 208 goto out_unlock; 209 } 210 211 hdev->bpf.rdesc_ops = ops; 212 } 213 214 if (ops->hid_device_event) { 215 err = hid_bpf_allocate_event_data(hdev); 216 if (err) 217 goto out_unlock; 218 } 219 220 if (ops->flags & BPF_F_BEFORE) 221 list_add_rcu(&ops->list, &hdev->bpf.prog_list); 222 else 223 list_add_tail_rcu(&ops->list, &hdev->bpf.prog_list); 224 synchronize_srcu(&hdev->bpf.srcu); 225 226 out_unlock: 227 mutex_unlock(&hdev->bpf.prog_list_lock); 228 229 if (err) { 230 if (hdev->bpf.rdesc_ops == ops) 231 hdev->bpf.rdesc_ops = NULL; 232 hid_put_device(hdev); 233 } else if (ops->hid_rdesc_fixup) { 234 hid_bpf_reconnect(hdev); 235 } 236 237 return err; 238 } 239 240 static void hid_bpf_unreg(void *kdata, struct bpf_link *link) 241 { 242 struct hid_bpf_ops *ops = kdata; 243 struct hid_device *hdev; 244 bool reconnect = false; 245 246 hdev = ops->hdev; 247 248 /* check if __hid_bpf_ops_destroy_device() has been called */ 249 if (!hdev) 250 return; 251 252 mutex_lock(&hdev->bpf.prog_list_lock); 253 254 list_del_rcu(&ops->list); 255 synchronize_srcu(&hdev->bpf.srcu); 256 ops->hdev = NULL; 257 258 reconnect = hdev->bpf.rdesc_ops == ops; 259 if (reconnect) 260 hdev->bpf.rdesc_ops = NULL; 261 262 mutex_unlock(&hdev->bpf.prog_list_lock); 263 264 if (reconnect) 265 hid_bpf_reconnect(hdev); 266 267 hid_put_device(hdev); 268 } 269 270 static int __hid_bpf_device_event(struct hid_bpf_ctx *ctx, enum hid_report_type type, u64 source) 271 { 272 return 0; 273 } 274 275 static int __hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx) 276 { 277 return 0; 278 } 279 280 static int __hid_bpf_hw_request(struct hid_bpf_ctx *ctx, unsigned char reportnum, 281 enum hid_report_type rtype, enum hid_class_request reqtype, 282 u64 source) 283 { 284 return 0; 285 } 286 287 static int __hid_bpf_hw_output_report(struct hid_bpf_ctx *ctx, u64 source) 288 { 289 return 0; 290 } 291 292 static struct hid_bpf_ops __bpf_hid_bpf_ops = { 293 .hid_device_event = __hid_bpf_device_event, 294 .hid_rdesc_fixup = __hid_bpf_rdesc_fixup, 295 .hid_hw_request = __hid_bpf_hw_request, 296 .hid_hw_output_report = __hid_bpf_hw_output_report, 297 }; 298 299 static struct bpf_struct_ops bpf_hid_bpf_ops = { 300 .verifier_ops = &hid_bpf_verifier_ops, 301 .init = hid_bpf_ops_init, 302 .check_member = hid_bpf_ops_check_member, 303 .init_member = hid_bpf_ops_init_member, 304 .reg = hid_bpf_reg, 305 .unreg = hid_bpf_unreg, 306 .name = "hid_bpf_ops", 307 .cfi_stubs = &__bpf_hid_bpf_ops, 308 .owner = THIS_MODULE, 309 }; 310 311 void __hid_bpf_ops_destroy_device(struct hid_device *hdev) 312 { 313 struct hid_bpf_ops *e; 314 315 rcu_read_lock(); 316 list_for_each_entry_rcu(e, &hdev->bpf.prog_list, list) { 317 hid_put_device(hdev); 318 e->hdev = NULL; 319 } 320 rcu_read_unlock(); 321 } 322 323 static int __init hid_bpf_struct_ops_init(void) 324 { 325 return register_bpf_struct_ops(&bpf_hid_bpf_ops, hid_bpf_ops); 326 } 327 late_initcall(hid_bpf_struct_ops_init); 328