1 // SPDX-License-Identifier: GPL-2.0-only 2 3 /* 4 * HID-BPF support for Linux 5 * 6 * Copyright (c) 2024 Benjamin Tissoires 7 */ 8 9 #include <linux/bitops.h> 10 #include <linux/bpf_verifier.h> 11 #include <linux/bpf.h> 12 #include <linux/btf.h> 13 #include <linux/btf_ids.h> 14 #include <linux/filter.h> 15 #include <linux/hid.h> 16 #include <linux/hid_bpf.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/stddef.h> 20 #include <linux/workqueue.h> 21 #include "hid_bpf_dispatch.h" 22 23 static struct btf *hid_bpf_ops_btf; 24 25 static int hid_bpf_ops_init(struct btf *btf) 26 { 27 hid_bpf_ops_btf = btf; 28 return 0; 29 } 30 31 static bool hid_bpf_ops_is_valid_access(int off, int size, 32 enum bpf_access_type type, 33 const struct bpf_prog *prog, 34 struct bpf_insn_access_aux *info) 35 { 36 return bpf_tracing_btf_ctx_access(off, size, type, prog, info); 37 } 38 39 static int hid_bpf_ops_check_member(const struct btf_type *t, 40 const struct btf_member *member, 41 const struct bpf_prog *prog) 42 { 43 u32 moff = __btf_member_bit_offset(t, member) / 8; 44 45 switch (moff) { 46 case offsetof(struct hid_bpf_ops, hid_rdesc_fixup): 47 case offsetof(struct hid_bpf_ops, hid_hw_request): 48 case offsetof(struct hid_bpf_ops, hid_hw_output_report): 49 break; 50 default: 51 if (prog->sleepable) 52 return -EINVAL; 53 } 54 55 return 0; 56 } 57 58 struct hid_bpf_offset_write_range { 59 const char *struct_name; 60 u32 struct_length; 61 u32 start; 62 u32 end; 63 }; 64 65 static int hid_bpf_ops_btf_struct_access(struct bpf_verifier_log *log, 66 const struct bpf_reg_state *reg, 67 int off, int size) 68 { 69 #define WRITE_RANGE(_name, _field, _is_string) \ 70 { \ 71 .struct_name = #_name, \ 72 .struct_length = sizeof(struct _name), \ 73 .start = offsetof(struct _name, _field), \ 74 .end = offsetofend(struct _name, _field) - !!(_is_string), \ 75 } 76 77 const struct hid_bpf_offset_write_range write_ranges[] = { 78 WRITE_RANGE(hid_bpf_ctx, retval, false), 79 WRITE_RANGE(hid_device, name, true), 80 WRITE_RANGE(hid_device, uniq, true), 81 WRITE_RANGE(hid_device, phys, true), 82 }; 83 #undef WRITE_RANGE 84 const struct btf_type *state = NULL; 85 const struct btf_type *t; 86 const char *cur = NULL; 87 int i; 88 89 t = btf_type_by_id(reg->btf, reg->btf_id); 90 91 for (i = 0; i < ARRAY_SIZE(write_ranges); i++) { 92 const struct hid_bpf_offset_write_range *write_range = &write_ranges[i]; 93 s32 type_id; 94 95 /* we already found a writeable struct, but there is a 96 * new one, let's break the loop. 97 */ 98 if (t == state && write_range->struct_name != cur) 99 break; 100 101 /* new struct to look for */ 102 if (write_range->struct_name != cur) { 103 type_id = btf_find_by_name_kind(reg->btf, write_range->struct_name, 104 BTF_KIND_STRUCT); 105 if (type_id < 0) 106 return -EINVAL; 107 108 state = btf_type_by_id(reg->btf, type_id); 109 } 110 111 /* this is not the struct we are looking for */ 112 if (t != state) { 113 cur = write_range->struct_name; 114 continue; 115 } 116 117 /* first time we see this struct, check for out of bounds */ 118 if (cur != write_range->struct_name && 119 off + size > write_range->struct_length) { 120 bpf_log(log, "write access for struct %s at off %d with size %d\n", 121 write_range->struct_name, off, size); 122 return -EACCES; 123 } 124 125 /* now check if we are in our boundaries */ 126 if (off >= write_range->start && off + size <= write_range->end) 127 return NOT_INIT; 128 129 cur = write_range->struct_name; 130 } 131 132 133 if (t != state) 134 bpf_log(log, "write access to this struct is not supported\n"); 135 else 136 bpf_log(log, 137 "write access at off %d with size %d on read-only part of %s\n", 138 off, size, cur); 139 140 return -EACCES; 141 } 142 143 static const struct bpf_verifier_ops hid_bpf_verifier_ops = { 144 .get_func_proto = bpf_base_func_proto, 145 .is_valid_access = hid_bpf_ops_is_valid_access, 146 .btf_struct_access = hid_bpf_ops_btf_struct_access, 147 }; 148 149 static int hid_bpf_ops_init_member(const struct btf_type *t, 150 const struct btf_member *member, 151 void *kdata, const void *udata) 152 { 153 const struct hid_bpf_ops *uhid_bpf_ops; 154 struct hid_bpf_ops *khid_bpf_ops; 155 u32 moff; 156 157 uhid_bpf_ops = (const struct hid_bpf_ops *)udata; 158 khid_bpf_ops = (struct hid_bpf_ops *)kdata; 159 160 moff = __btf_member_bit_offset(t, member) / 8; 161 162 switch (moff) { 163 case offsetof(struct hid_bpf_ops, hid_id): 164 /* For hid_id and flags fields, this function has to copy it 165 * and return 1 to indicate that the data has been handled by 166 * the struct_ops type, or the verifier will reject the map if 167 * the value of those fields is not zero. 168 */ 169 khid_bpf_ops->hid_id = uhid_bpf_ops->hid_id; 170 return 1; 171 case offsetof(struct hid_bpf_ops, flags): 172 if (uhid_bpf_ops->flags & ~BPF_F_BEFORE) 173 return -EINVAL; 174 khid_bpf_ops->flags = uhid_bpf_ops->flags; 175 return 1; 176 } 177 return 0; 178 } 179 180 static int hid_bpf_reg(void *kdata, struct bpf_link *link) 181 { 182 struct hid_bpf_ops *ops = kdata; 183 struct hid_device *hdev; 184 int count, err = 0; 185 186 hdev = hid_get_device(ops->hid_id); 187 if (IS_ERR(hdev)) 188 return PTR_ERR(hdev); 189 190 ops->hdev = hdev; 191 192 mutex_lock(&hdev->bpf.prog_list_lock); 193 194 count = list_count_nodes(&hdev->bpf.prog_list); 195 if (count >= HID_BPF_MAX_PROGS_PER_DEV) { 196 err = -E2BIG; 197 goto out_unlock; 198 } 199 200 if (ops->hid_rdesc_fixup) { 201 if (hdev->bpf.rdesc_ops) { 202 err = -EINVAL; 203 goto out_unlock; 204 } 205 206 hdev->bpf.rdesc_ops = ops; 207 } 208 209 if (ops->hid_device_event) { 210 err = hid_bpf_allocate_event_data(hdev); 211 if (err) 212 goto out_unlock; 213 } 214 215 if (ops->flags & BPF_F_BEFORE) 216 list_add_rcu(&ops->list, &hdev->bpf.prog_list); 217 else 218 list_add_tail_rcu(&ops->list, &hdev->bpf.prog_list); 219 synchronize_srcu(&hdev->bpf.srcu); 220 221 out_unlock: 222 mutex_unlock(&hdev->bpf.prog_list_lock); 223 224 if (err) { 225 if (hdev->bpf.rdesc_ops == ops) 226 hdev->bpf.rdesc_ops = NULL; 227 hid_put_device(hdev); 228 } else if (ops->hid_rdesc_fixup) { 229 hid_bpf_reconnect(hdev); 230 } 231 232 return err; 233 } 234 235 static void hid_bpf_unreg(void *kdata, struct bpf_link *link) 236 { 237 struct hid_bpf_ops *ops = kdata; 238 struct hid_device *hdev; 239 bool reconnect = false; 240 241 hdev = ops->hdev; 242 243 /* check if __hid_bpf_ops_destroy_device() has been called */ 244 if (!hdev) 245 return; 246 247 mutex_lock(&hdev->bpf.prog_list_lock); 248 249 list_del_rcu(&ops->list); 250 synchronize_srcu(&hdev->bpf.srcu); 251 252 reconnect = hdev->bpf.rdesc_ops == ops; 253 if (reconnect) 254 hdev->bpf.rdesc_ops = NULL; 255 256 mutex_unlock(&hdev->bpf.prog_list_lock); 257 258 if (reconnect) 259 hid_bpf_reconnect(hdev); 260 261 hid_put_device(hdev); 262 } 263 264 static int __hid_bpf_device_event(struct hid_bpf_ctx *ctx, enum hid_report_type type, u64 source) 265 { 266 return 0; 267 } 268 269 static int __hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx) 270 { 271 return 0; 272 } 273 274 static struct hid_bpf_ops __bpf_hid_bpf_ops = { 275 .hid_device_event = __hid_bpf_device_event, 276 .hid_rdesc_fixup = __hid_bpf_rdesc_fixup, 277 }; 278 279 static struct bpf_struct_ops bpf_hid_bpf_ops = { 280 .verifier_ops = &hid_bpf_verifier_ops, 281 .init = hid_bpf_ops_init, 282 .check_member = hid_bpf_ops_check_member, 283 .init_member = hid_bpf_ops_init_member, 284 .reg = hid_bpf_reg, 285 .unreg = hid_bpf_unreg, 286 .name = "hid_bpf_ops", 287 .cfi_stubs = &__bpf_hid_bpf_ops, 288 .owner = THIS_MODULE, 289 }; 290 291 void __hid_bpf_ops_destroy_device(struct hid_device *hdev) 292 { 293 struct hid_bpf_ops *e; 294 295 rcu_read_lock(); 296 list_for_each_entry_rcu(e, &hdev->bpf.prog_list, list) { 297 hid_put_device(hdev); 298 e->hdev = NULL; 299 } 300 rcu_read_unlock(); 301 } 302 303 static int __init hid_bpf_struct_ops_init(void) 304 { 305 return register_bpf_struct_ops(&bpf_hid_bpf_ops, hid_bpf_ops); 306 } 307 late_initcall(hid_bpf_struct_ops_init); 308