1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2022 Red hat */
3 #include "hid_bpf_helpers.h"
4
5 char _license[] SEC("license") = "GPL";
6
7 struct attach_prog_args {
8 int prog_fd;
9 unsigned int hid;
10 int retval;
11 int insert_head;
12 };
13
14 __u64 callback_check = 52;
15 __u64 callback2_check = 52;
16
17 SEC("?struct_ops/hid_device_event")
BPF_PROG(hid_first_event,struct hid_bpf_ctx * hid_ctx,enum hid_report_type type)18 int BPF_PROG(hid_first_event, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
19 {
20 __u8 *rw_data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 3 /* size */);
21
22 if (!rw_data)
23 return 0; /* EPERM check */
24
25 callback_check = rw_data[1];
26
27 rw_data[2] = rw_data[1] + 5;
28
29 return hid_ctx->size;
30 }
31
32 SEC(".struct_ops.link")
33 struct hid_bpf_ops first_event = {
34 .hid_device_event = (void *)hid_first_event,
35 .hid_id = 2,
36 };
37
__hid_subprog_first_event(struct hid_bpf_ctx * hid_ctx,enum hid_report_type type)38 int __hid_subprog_first_event(struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
39 {
40 __u8 *rw_data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 3 /* size */);
41
42 if (!rw_data)
43 return 0; /* EPERM check */
44
45 rw_data[2] = rw_data[1] + 5;
46
47 return hid_ctx->size;
48 }
49
50 SEC("?struct_ops/hid_device_event")
BPF_PROG(hid_subprog_first_event,struct hid_bpf_ctx * hid_ctx,enum hid_report_type type)51 int BPF_PROG(hid_subprog_first_event, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
52 {
53 return __hid_subprog_first_event(hid_ctx, type);
54 }
55
56 SEC(".struct_ops.link")
57 struct hid_bpf_ops subprog_first_event = {
58 .hid_device_event = (void *)hid_subprog_first_event,
59 .hid_id = 2,
60 };
61
62 SEC("?struct_ops/hid_device_event")
BPF_PROG(hid_second_event,struct hid_bpf_ctx * hid_ctx,enum hid_report_type type)63 int BPF_PROG(hid_second_event, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
64 {
65 __u8 *rw_data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4 /* size */);
66
67 if (!rw_data)
68 return 0; /* EPERM check */
69
70 rw_data[3] = rw_data[2] + 5;
71
72 return hid_ctx->size;
73 }
74
75 SEC(".struct_ops.link")
76 struct hid_bpf_ops second_event = {
77 .hid_device_event = (void *)hid_second_event,
78 };
79
80 SEC("?struct_ops/hid_device_event")
BPF_PROG(hid_change_report_id,struct hid_bpf_ctx * hid_ctx,enum hid_report_type type)81 int BPF_PROG(hid_change_report_id, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
82 {
83 __u8 *rw_data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 3 /* size */);
84
85 if (!rw_data)
86 return 0; /* EPERM check */
87
88 rw_data[0] = 2;
89
90 return 9;
91 }
92
93 SEC(".struct_ops.link")
94 struct hid_bpf_ops change_report_id = {
95 .hid_device_event = (void *)hid_change_report_id,
96 };
97
98 struct hid_hw_request_syscall_args {
99 /* data needs to come at offset 0 so we can use it in calls */
100 __u8 data[10];
101 unsigned int hid;
102 int retval;
103 size_t size;
104 enum hid_report_type type;
105 __u8 request_type;
106 };
107
108 SEC("syscall")
hid_user_raw_request(struct hid_hw_request_syscall_args * args)109 int hid_user_raw_request(struct hid_hw_request_syscall_args *args)
110 {
111 struct hid_bpf_ctx *ctx;
112 const size_t size = args->size;
113 int i, ret = 0;
114
115 if (size > sizeof(args->data))
116 return -7; /* -E2BIG */
117
118 ctx = hid_bpf_allocate_context(args->hid);
119 if (!ctx)
120 return -1; /* EPERM check */
121
122 ret = hid_bpf_hw_request(ctx,
123 args->data,
124 size,
125 args->type,
126 args->request_type);
127 args->retval = ret;
128
129 hid_bpf_release_context(ctx);
130
131 return 0;
132 }
133
134 SEC("syscall")
hid_user_output_report(struct hid_hw_request_syscall_args * args)135 int hid_user_output_report(struct hid_hw_request_syscall_args *args)
136 {
137 struct hid_bpf_ctx *ctx;
138 const size_t size = args->size;
139 int i, ret = 0;
140
141 if (size > sizeof(args->data))
142 return -7; /* -E2BIG */
143
144 ctx = hid_bpf_allocate_context(args->hid);
145 if (!ctx)
146 return -1; /* EPERM check */
147
148 ret = hid_bpf_hw_output_report(ctx,
149 args->data,
150 size);
151 args->retval = ret;
152
153 hid_bpf_release_context(ctx);
154
155 return 0;
156 }
157
158 SEC("syscall")
hid_user_input_report(struct hid_hw_request_syscall_args * args)159 int hid_user_input_report(struct hid_hw_request_syscall_args *args)
160 {
161 struct hid_bpf_ctx *ctx;
162 const size_t size = args->size;
163 int i, ret = 0;
164
165 if (size > sizeof(args->data))
166 return -7; /* -E2BIG */
167
168 ctx = hid_bpf_allocate_context(args->hid);
169 if (!ctx)
170 return -1; /* EPERM check */
171
172 ret = hid_bpf_input_report(ctx, HID_INPUT_REPORT, args->data, size);
173 args->retval = ret;
174
175 hid_bpf_release_context(ctx);
176
177 return 0;
178 }
179
180 static const __u8 rdesc[] = {
181 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
182 0x09, 0x32, /* USAGE (Z) */
183 0x95, 0x01, /* REPORT_COUNT (1) */
184 0x81, 0x06, /* INPUT (Data,Var,Rel) */
185
186 0x06, 0x00, 0xff, /* Usage Page (Vendor Defined Page 1) */
187 0x19, 0x01, /* USAGE_MINIMUM (1) */
188 0x29, 0x03, /* USAGE_MAXIMUM (3) */
189 0x15, 0x00, /* LOGICAL_MINIMUM (0) */
190 0x25, 0x01, /* LOGICAL_MAXIMUM (1) */
191 0x95, 0x03, /* REPORT_COUNT (3) */
192 0x75, 0x01, /* REPORT_SIZE (1) */
193 0x91, 0x02, /* Output (Data,Var,Abs) */
194 0x95, 0x01, /* REPORT_COUNT (1) */
195 0x75, 0x05, /* REPORT_SIZE (5) */
196 0x91, 0x01, /* Output (Cnst,Var,Abs) */
197
198 0x06, 0x00, 0xff, /* Usage Page (Vendor Defined Page 1) */
199 0x19, 0x06, /* USAGE_MINIMUM (6) */
200 0x29, 0x08, /* USAGE_MAXIMUM (8) */
201 0x15, 0x00, /* LOGICAL_MINIMUM (0) */
202 0x25, 0x01, /* LOGICAL_MAXIMUM (1) */
203 0x95, 0x03, /* REPORT_COUNT (3) */
204 0x75, 0x01, /* REPORT_SIZE (1) */
205 0xb1, 0x02, /* Feature (Data,Var,Abs) */
206 0x95, 0x01, /* REPORT_COUNT (1) */
207 0x75, 0x05, /* REPORT_SIZE (5) */
208 0x91, 0x01, /* Output (Cnst,Var,Abs) */
209
210 0xc0, /* END_COLLECTION */
211 0xc0, /* END_COLLECTION */
212 };
213
214 /*
215 * the following program is marked as sleepable (struct_ops.s).
216 * This is not strictly mandatory but is a nice test for
217 * sleepable struct_ops
218 */
219 SEC("?struct_ops.s/hid_rdesc_fixup")
BPF_PROG(hid_rdesc_fixup,struct hid_bpf_ctx * hid_ctx)220 int BPF_PROG(hid_rdesc_fixup, struct hid_bpf_ctx *hid_ctx)
221 {
222 __u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4096 /* size */);
223
224 if (!data)
225 return 0; /* EPERM check */
226
227 callback2_check = data[4];
228
229 /* insert rdesc at offset 73 */
230 __builtin_memcpy(&data[73], rdesc, sizeof(rdesc));
231
232 /* Change Usage Vendor globally */
233 data[4] = 0x42;
234
235 return sizeof(rdesc) + 73;
236 }
237
238 SEC(".struct_ops.link")
239 struct hid_bpf_ops rdesc_fixup = {
240 .hid_rdesc_fixup = (void *)hid_rdesc_fixup,
241 };
242
243 SEC("?struct_ops/hid_device_event")
BPF_PROG(hid_test_insert1,struct hid_bpf_ctx * hid_ctx,enum hid_report_type type)244 int BPF_PROG(hid_test_insert1, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
245 {
246 __u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4 /* size */);
247
248 if (!data)
249 return 0; /* EPERM check */
250
251 /* we need to be run first */
252 if (data[2] || data[3])
253 return -1;
254
255 data[1] = 1;
256
257 return 0;
258 }
259
260 SEC(".struct_ops.link")
261 struct hid_bpf_ops test_insert1 = {
262 .hid_device_event = (void *)hid_test_insert1,
263 .flags = BPF_F_BEFORE,
264 };
265
266 SEC("?struct_ops/hid_device_event")
BPF_PROG(hid_test_insert2,struct hid_bpf_ctx * hid_ctx,enum hid_report_type type)267 int BPF_PROG(hid_test_insert2, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
268 {
269 __u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4 /* size */);
270
271 if (!data)
272 return 0; /* EPERM check */
273
274 /* after insert0 and before insert2 */
275 if (!data[1] || data[3])
276 return -1;
277
278 data[2] = 2;
279
280 return 0;
281 }
282
283 SEC(".struct_ops.link")
284 struct hid_bpf_ops test_insert2 = {
285 .hid_device_event = (void *)hid_test_insert2,
286 };
287
288 SEC("?struct_ops/hid_device_event")
BPF_PROG(hid_test_insert3,struct hid_bpf_ctx * hid_ctx,enum hid_report_type type)289 int BPF_PROG(hid_test_insert3, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
290 {
291 __u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 4 /* size */);
292
293 if (!data)
294 return 0; /* EPERM check */
295
296 /* at the end */
297 if (!data[1] || !data[2])
298 return -1;
299
300 data[3] = 3;
301
302 return 0;
303 }
304
305 SEC(".struct_ops.link")
306 struct hid_bpf_ops test_insert3 = {
307 .hid_device_event = (void *)hid_test_insert3,
308 };
309
310 SEC("?struct_ops/hid_hw_request")
BPF_PROG(hid_test_filter_raw_request,struct hid_bpf_ctx * hctx,unsigned char reportnum,enum hid_report_type rtype,enum hid_class_request reqtype,__u64 source)311 int BPF_PROG(hid_test_filter_raw_request, struct hid_bpf_ctx *hctx, unsigned char reportnum,
312 enum hid_report_type rtype, enum hid_class_request reqtype, __u64 source)
313 {
314 return -20;
315 }
316
317 SEC(".struct_ops.link")
318 struct hid_bpf_ops test_filter_raw_request = {
319 .hid_hw_request = (void *)hid_test_filter_raw_request,
320 };
321
322 static struct file *current_file;
323
324 SEC("fentry/hidraw_open")
BPF_PROG(hidraw_open,struct inode * inode,struct file * file)325 int BPF_PROG(hidraw_open, struct inode *inode, struct file *file)
326 {
327 current_file = file;
328 return 0;
329 }
330
331 SEC("?struct_ops.s/hid_hw_request")
BPF_PROG(hid_test_hidraw_raw_request,struct hid_bpf_ctx * hctx,unsigned char reportnum,enum hid_report_type rtype,enum hid_class_request reqtype,__u64 source)332 int BPF_PROG(hid_test_hidraw_raw_request, struct hid_bpf_ctx *hctx, unsigned char reportnum,
333 enum hid_report_type rtype, enum hid_class_request reqtype, __u64 source)
334 {
335 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 3 /* size */);
336 int ret;
337
338 if (!data)
339 return 0; /* EPERM check */
340
341 /* check if the incoming request comes from our hidraw operation */
342 if (source == (__u64)current_file) {
343 data[0] = reportnum;
344
345 ret = hid_bpf_hw_request(hctx, data, 2, rtype, reqtype);
346 if (ret != 2)
347 return -1;
348 data[0] = reportnum + 1;
349 data[1] = reportnum + 2;
350 data[2] = reportnum + 3;
351 return 3;
352 }
353
354 return 0;
355 }
356
357 SEC(".struct_ops.link")
358 struct hid_bpf_ops test_hidraw_raw_request = {
359 .hid_hw_request = (void *)hid_test_hidraw_raw_request,
360 };
361
362 SEC("?struct_ops.s/hid_hw_request")
BPF_PROG(hid_test_infinite_loop_raw_request,struct hid_bpf_ctx * hctx,unsigned char reportnum,enum hid_report_type rtype,enum hid_class_request reqtype,__u64 source)363 int BPF_PROG(hid_test_infinite_loop_raw_request, struct hid_bpf_ctx *hctx, unsigned char reportnum,
364 enum hid_report_type rtype, enum hid_class_request reqtype, __u64 source)
365 {
366 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 3 /* size */);
367 int ret;
368
369 if (!data)
370 return 0; /* EPERM check */
371
372 /* always forward the request as-is to the device, hid-bpf should prevent
373 * infinite loops.
374 */
375 data[0] = reportnum;
376
377 ret = hid_bpf_hw_request(hctx, data, 2, rtype, reqtype);
378 if (ret == 2)
379 return 3;
380
381 return 0;
382 }
383
384 SEC(".struct_ops.link")
385 struct hid_bpf_ops test_infinite_loop_raw_request = {
386 .hid_hw_request = (void *)hid_test_infinite_loop_raw_request,
387 };
388
389 SEC("?struct_ops/hid_hw_output_report")
BPF_PROG(hid_test_filter_output_report,struct hid_bpf_ctx * hctx,unsigned char reportnum,enum hid_report_type rtype,enum hid_class_request reqtype,__u64 source)390 int BPF_PROG(hid_test_filter_output_report, struct hid_bpf_ctx *hctx, unsigned char reportnum,
391 enum hid_report_type rtype, enum hid_class_request reqtype, __u64 source)
392 {
393 return -25;
394 }
395
396 SEC(".struct_ops.link")
397 struct hid_bpf_ops test_filter_output_report = {
398 .hid_hw_output_report = (void *)hid_test_filter_output_report,
399 };
400
401 SEC("?struct_ops.s/hid_hw_output_report")
BPF_PROG(hid_test_hidraw_output_report,struct hid_bpf_ctx * hctx,__u64 source)402 int BPF_PROG(hid_test_hidraw_output_report, struct hid_bpf_ctx *hctx, __u64 source)
403 {
404 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 3 /* size */);
405 int ret;
406
407 if (!data)
408 return 0; /* EPERM check */
409
410 /* check if the incoming request comes from our hidraw operation */
411 if (source == (__u64)current_file)
412 return hid_bpf_hw_output_report(hctx, data, 2);
413
414 return 0;
415 }
416
417 SEC(".struct_ops.link")
418 struct hid_bpf_ops test_hidraw_output_report = {
419 .hid_hw_output_report = (void *)hid_test_hidraw_output_report,
420 };
421
422 SEC("?struct_ops.s/hid_hw_output_report")
BPF_PROG(hid_test_infinite_loop_output_report,struct hid_bpf_ctx * hctx,__u64 source)423 int BPF_PROG(hid_test_infinite_loop_output_report, struct hid_bpf_ctx *hctx, __u64 source)
424 {
425 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 3 /* size */);
426 int ret;
427
428 if (!data)
429 return 0; /* EPERM check */
430
431 /* always forward the request as-is to the device, hid-bpf should prevent
432 * infinite loops.
433 */
434
435 ret = hid_bpf_hw_output_report(hctx, data, 2);
436 if (ret == 2)
437 return 2;
438
439 return 0;
440 }
441
442 SEC(".struct_ops.link")
443 struct hid_bpf_ops test_infinite_loop_output_report = {
444 .hid_hw_output_report = (void *)hid_test_infinite_loop_output_report,
445 };
446
447 struct elem {
448 struct bpf_wq work;
449 };
450
451 struct {
452 __uint(type, BPF_MAP_TYPE_HASH);
453 __uint(max_entries, 1);
454 __type(key, int);
455 __type(value, struct elem);
456 } hmap SEC(".maps");
457
wq_cb_sleepable(void * map,int * key,void * work)458 static int wq_cb_sleepable(void *map, int *key, void *work)
459 {
460 __u8 buf[9] = {2, 3, 4, 5, 6, 7, 8, 9, 10};
461 struct hid_bpf_ctx *hid_ctx;
462
463 hid_ctx = hid_bpf_allocate_context(*key);
464 if (!hid_ctx)
465 return 0; /* EPERM check */
466
467 hid_bpf_input_report(hid_ctx, HID_INPUT_REPORT, buf, sizeof(buf));
468
469 hid_bpf_release_context(hid_ctx);
470
471 return 0;
472 }
473
test_inject_input_report_callback(int * key)474 static int test_inject_input_report_callback(int *key)
475 {
476 struct elem init = {}, *val;
477 struct bpf_wq *wq;
478
479 if (bpf_map_update_elem(&hmap, key, &init, 0))
480 return -1;
481
482 val = bpf_map_lookup_elem(&hmap, key);
483 if (!val)
484 return -2;
485
486 wq = &val->work;
487 if (bpf_wq_init(wq, &hmap, 0) != 0)
488 return -3;
489
490 if (bpf_wq_set_callback(wq, wq_cb_sleepable, 0))
491 return -4;
492
493 if (bpf_wq_start(wq, 0))
494 return -5;
495
496 return 0;
497 }
498
499 SEC("?struct_ops/hid_device_event")
BPF_PROG(hid_test_multiply_events_wq,struct hid_bpf_ctx * hid_ctx,enum hid_report_type type)500 int BPF_PROG(hid_test_multiply_events_wq, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
501 {
502 __u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 9 /* size */);
503 int hid = hid_ctx->hid->id;
504 int ret;
505
506 if (!data)
507 return 0; /* EPERM check */
508
509 if (data[0] != 1)
510 return 0;
511
512 ret = test_inject_input_report_callback(&hid);
513 if (ret)
514 return ret;
515
516 data[1] += 5;
517
518 return 0;
519 }
520
521 SEC(".struct_ops.link")
522 struct hid_bpf_ops test_multiply_events_wq = {
523 .hid_device_event = (void *)hid_test_multiply_events_wq,
524 };
525
526 SEC("?struct_ops/hid_device_event")
BPF_PROG(hid_test_multiply_events,struct hid_bpf_ctx * hid_ctx,enum hid_report_type type)527 int BPF_PROG(hid_test_multiply_events, struct hid_bpf_ctx *hid_ctx, enum hid_report_type type)
528 {
529 __u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, 9 /* size */);
530 __u8 buf[9];
531 int ret;
532
533 if (!data)
534 return 0; /* EPERM check */
535
536 if (data[0] != 1)
537 return 0;
538
539 /*
540 * we have to use an intermediate buffer as hid_bpf_input_report
541 * will memset data to \0
542 */
543 __builtin_memcpy(buf, data, sizeof(buf));
544
545 buf[0] = 2;
546 buf[1] += 5;
547 ret = hid_bpf_try_input_report(hid_ctx, HID_INPUT_REPORT, buf, sizeof(buf));
548 if (ret < 0)
549 return ret;
550
551 /*
552 * In real world we should reset the original buffer as data might be garbage now,
553 * but it actually now has the content of 'buf'
554 */
555 data[1] += 5;
556
557 return 9;
558 }
559
560 SEC(".struct_ops.link")
561 struct hid_bpf_ops test_multiply_events = {
562 .hid_device_event = (void *)hid_test_multiply_events,
563 };
564
565 SEC("?struct_ops/hid_device_event")
BPF_PROG(hid_test_infinite_loop_input_report,struct hid_bpf_ctx * hctx,enum hid_report_type report_type,__u64 source)566 int BPF_PROG(hid_test_infinite_loop_input_report, struct hid_bpf_ctx *hctx,
567 enum hid_report_type report_type, __u64 source)
568 {
569 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 6 /* size */);
570 __u8 buf[6];
571
572 if (!data)
573 return 0; /* EPERM check */
574
575 /*
576 * we have to use an intermediate buffer as hid_bpf_input_report
577 * will memset data to \0
578 */
579 __builtin_memcpy(buf, data, sizeof(buf));
580
581 /* always forward the request as-is to the device, hid-bpf should prevent
582 * infinite loops.
583 * the return value is ignored so the event is passing to userspace.
584 */
585
586 hid_bpf_try_input_report(hctx, report_type, buf, sizeof(buf));
587
588 /* each time we process the event, we increment by one data[1]:
589 * after each successful call to hid_bpf_try_input_report, buf
590 * has been memcopied into data by the kernel.
591 */
592 data[1] += 1;
593
594 return 0;
595 }
596
597 SEC(".struct_ops.link")
598 struct hid_bpf_ops test_infinite_loop_input_report = {
599 .hid_device_event = (void *)hid_test_infinite_loop_input_report,
600 };
601