Lines Matching refs:ctx_kern
29 struct hid_bpf_ctx_kern ctx_kern = { in dispatch_hid_bpf_device_event() local
48 memset(ctx_kern.data, 0, hdev->bpf.allocated_data); in dispatch_hid_bpf_device_event()
49 memcpy(ctx_kern.data, data, *size); in dispatch_hid_bpf_device_event()
54 ret = e->hid_device_event(&ctx_kern.ctx, type, source); in dispatch_hid_bpf_device_event()
61 ctx_kern.ctx.size = ret; in dispatch_hid_bpf_device_event()
66 ret = ctx_kern.ctx.size; in dispatch_hid_bpf_device_event()
68 if (ret > ctx_kern.ctx.allocated_size) in dispatch_hid_bpf_device_event()
74 return ctx_kern.data; in dispatch_hid_bpf_device_event()
84 struct hid_bpf_ctx_kern ctx_kern = { in dispatch_hid_bpf_raw_requests() local
105 ret = e->hid_hw_request(&ctx_kern.ctx, reportnum, rtype, reqtype, source); in dispatch_hid_bpf_raw_requests()
121 struct hid_bpf_ctx_kern ctx_kern = { in dispatch_hid_bpf_output_report() local
139 ret = e->hid_hw_output_report(&ctx_kern.ctx, source); in dispatch_hid_bpf_output_report()
154 struct hid_bpf_ctx_kern ctx_kern = { in call_hid_bpf_rdesc_fixup() local
165 ctx_kern.data = kzalloc(ctx_kern.ctx.allocated_size, GFP_KERNEL); in call_hid_bpf_rdesc_fixup()
166 if (!ctx_kern.data) in call_hid_bpf_rdesc_fixup()
169 memcpy(ctx_kern.data, rdesc, min_t(unsigned int, *size, HID_MAX_DESCRIPTOR_SIZE)); in call_hid_bpf_rdesc_fixup()
171 ret = hdev->bpf.rdesc_ops->hid_rdesc_fixup(&ctx_kern.ctx); in call_hid_bpf_rdesc_fixup()
176 if (ret > ctx_kern.ctx.allocated_size) in call_hid_bpf_rdesc_fixup()
182 return krealloc(ctx_kern.data, *size, GFP_KERNEL); in call_hid_bpf_rdesc_fixup()
185 kfree(ctx_kern.data); in call_hid_bpf_rdesc_fixup()
287 struct hid_bpf_ctx_kern *ctx_kern; in hid_bpf_get_data() local
292 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx); in hid_bpf_get_data()
297 return ctx_kern->data + offset; in hid_bpf_get_data()
311 struct hid_bpf_ctx_kern *ctx_kern = NULL; in hid_bpf_allocate_context() local
317 ctx_kern = kzalloc(sizeof(*ctx_kern), GFP_KERNEL); in hid_bpf_allocate_context()
318 if (!ctx_kern) { in hid_bpf_allocate_context()
323 ctx_kern->ctx.hid = hdev; in hid_bpf_allocate_context()
325 return &ctx_kern->ctx; in hid_bpf_allocate_context()
337 struct hid_bpf_ctx_kern *ctx_kern; in hid_bpf_release_context() local
340 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx); in hid_bpf_release_context()
341 hid = (struct hid_device *)ctx_kern->ctx.hid; /* ignore const */ in hid_bpf_release_context()
343 kfree(ctx_kern); in hid_bpf_release_context()
401 struct hid_bpf_ctx_kern *ctx_kern; in hid_bpf_hw_request() local
406 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx); in hid_bpf_hw_request()
408 if (ctx_kern->from_bpf) in hid_bpf_hw_request()
460 struct hid_bpf_ctx_kern *ctx_kern; in hid_bpf_hw_output_report() local
465 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx); in hid_bpf_hw_output_report()
466 if (ctx_kern->from_bpf) in hid_bpf_hw_output_report()
488 struct hid_bpf_ctx_kern *ctx_kern; in __hid_bpf_input_report() local
491 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx); in __hid_bpf_input_report()
492 if (ctx_kern->from_bpf) in __hid_bpf_input_report()
519 struct hid_bpf_ctx_kern *ctx_kern; in hid_bpf_try_input_report() local
522 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx); in hid_bpf_try_input_report()
523 from_hid_event_hook = ctx_kern->data && ctx_kern->data == ctx->hid->bpf.device_data; in hid_bpf_try_input_report()