Home
last modified time | relevance | path

Searched refs:hctx (Results 1 – 25 of 44) sorted by relevance

12

/linux/block/
H A Dblk-mq.h53 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
55 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
56 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
135 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
174 struct blk_mq_hw_ctx *hctx; member
199 struct blk_mq_hw_ctx *hctx) in bt_wait_ptr() argument
201 if (!hctx) in bt_wait_ptr()
203 return sbq_wait_ptr(bt, &hctx->wait_index); in bt_wait_ptr()
209 static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_busy() argument
211 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) in blk_mq_tag_busy()
[all …]
H A Dblk-mq-sched.c22 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_mark_restart_hctx() argument
24 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) in blk_mq_sched_mark_restart_hctx()
27 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in blk_mq_sched_mark_restart_hctx()
31 void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) in __blk_mq_sched_restart() argument
33 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in __blk_mq_sched_restart()
44 blk_mq_run_hw_queue(hctx, true); in __blk_mq_sched_restart()
58 struct blk_mq_hw_ctx *hctx = in blk_mq_dispatch_hctx_list() local
64 if (rq->mq_hctx != hctx) { in blk_mq_dispatch_hctx_list()
72 return blk_mq_dispatch_rq_list(hctx, &hctx_list, false); in blk_mq_dispatch_hctx_list()
85 static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) in __blk_mq_do_dispatch_sched() argument
[all …]
H A Dblk-mq.c52 static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
54 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
61 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_has_pending() argument
63 return !list_empty_careful(&hctx->dispatch) || in blk_mq_hctx_has_pending()
64 sbitmap_any_bit_set(&hctx->ctx_map) || in blk_mq_hctx_has_pending()
65 blk_mq_sched_has_work(hctx); in blk_mq_hctx_has_pending()
71 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_mark_pending() argument
74 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_mark_pending()
76 if (!sbitmap_test_bit(&hctx->ctx_map, bit)) in blk_mq_hctx_mark_pending()
77 sbitmap_set_bit(&hctx in blk_mq_hctx_mark_pending()
80 blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx) blk_mq_hctx_clear_pending() argument
365 struct blk_mq_hw_ctx *hctx; blk_mq_wake_waiters() local
414 struct blk_mq_hw_ctx *hctx = data->hctx; blk_mq_rq_ctx_init() local
791 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; __blk_mq_free_request() local
1177 blk_mq_flush_tag_batch(struct blk_mq_hw_ctx * hctx,int * tag_array,int nr_tags) blk_mq_flush_tag_batch() argument
1439 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; blk_execute_rq_nowait() local
1500 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; blk_execute_rq() local
1732 struct blk_mq_hw_ctx *hctx; blk_mq_timeout_work() local
1785 struct blk_mq_hw_ctx *hctx; global() member
1792 struct blk_mq_hw_ctx *hctx = flush_data->hctx; flush_busy_ctx() local
1807 blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx * hctx,struct list_head * list) blk_mq_flush_busy_ctxs() argument
1818 struct blk_mq_hw_ctx *hctx; global() member
1826 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; dispatch_rq_from_ctx() local
1842 blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * start) blk_mq_dequeue_from_ctx() argument
1885 struct blk_mq_hw_ctx *hctx; blk_mq_dispatch_wake() local
1909 blk_mq_mark_tag_wait(struct blk_mq_hw_ctx * hctx,struct request * rq) blk_mq_mark_tag_wait() argument
2003 blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx * hctx,bool busy) blk_mq_update_dispatch_busy() argument
2038 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; blk_mq_prep_dispatch_rq() local
2094 blk_mq_commit_rqs(struct blk_mq_hw_ctx * hctx,int queued,bool from_schedule) blk_mq_commit_rqs() argument
2106 blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx * hctx,struct list_head * list,bool get_budget) blk_mq_dispatch_rq_list() argument
2234 blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx * hctx) blk_mq_first_mapped_cpu() argument
2247 blk_mq_hctx_empty_cpumask(struct blk_mq_hw_ctx * hctx) blk_mq_hctx_empty_cpumask() argument
2258 blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx * hctx) blk_mq_hctx_next_cpu() argument
2306 blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx * hctx,unsigned long msecs) blk_mq_delay_run_hw_queue() argument
2315 blk_mq_hw_queue_need_run(struct blk_mq_hw_ctx * hctx) blk_mq_hw_queue_need_run() argument
2342 blk_mq_run_hw_queue(struct blk_mq_hw_ctx * hctx,bool async) blk_mq_run_hw_queue() argument
2395 struct blk_mq_hw_ctx *hctx = ctx->hctxs[HCTX_TYPE_DEFAULT]; blk_mq_get_sq_hctx() local
2409 struct blk_mq_hw_ctx *hctx, *sq_hctx; blk_mq_run_hw_queues() local
2437 struct blk_mq_hw_ctx *hctx, *sq_hctx; blk_mq_delay_run_hw_queues() local
2475 blk_mq_stop_hw_queue(struct blk_mq_hw_ctx * hctx) blk_mq_stop_hw_queue() argument
2494 struct blk_mq_hw_ctx *hctx; blk_mq_stop_hw_queues() local
2502 blk_mq_start_hw_queue(struct blk_mq_hw_ctx * hctx) blk_mq_start_hw_queue() argument
2512 struct blk_mq_hw_ctx *hctx; blk_mq_start_hw_queues() local
2520 blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx * hctx,bool async) blk_mq_start_stopped_hw_queue() argument
2538 struct blk_mq_hw_ctx *hctx; blk_mq_start_stopped_hw_queues() local
2549 struct blk_mq_hw_ctx *hctx = blk_mq_run_work_fn() local
2566 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; blk_mq_request_bypass_insert() local
2576 blk_mq_insert_requests(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx,struct list_head * list,bool run_queue_async) blk_mq_insert_requests() argument
2617 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; blk_mq_insert_request() local
2700 __blk_mq_issue_directly(struct blk_mq_hw_ctx * hctx,struct request * rq,bool last) __blk_mq_issue_directly() argument
2758 blk_mq_try_issue_directly(struct blk_mq_hw_ctx * hctx,struct request * rq) blk_mq_try_issue_directly() argument
2792 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; blk_mq_request_issue_directly() local
2807 struct blk_mq_hw_ctx *hctx = NULL; blk_mq_issue_direct() local
2991 blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx * hctx,struct list_head * list) blk_mq_try_issue_list_directly() argument
3136 struct blk_mq_hw_ctx *hctx; blk_mq_submit_bio() local
3662 struct blk_mq_hw_ctx *hctx; global() member
3676 blk_mq_hctx_has_requests(struct blk_mq_hw_ctx * hctx) blk_mq_hctx_has_requests() argument
3692 blk_mq_hctx_has_online_cpu(struct blk_mq_hw_ctx * hctx,unsigned int this_cpu) blk_mq_hctx_has_online_cpu() argument
3720 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, blk_mq_hctx_notify_offline() local
3771 blk_mq_cpu_mapped_to_hctx(unsigned int cpu,const struct blk_mq_hw_ctx * hctx) blk_mq_cpu_mapped_to_hctx() argument
3781 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, blk_mq_hctx_notify_online() local
3796 struct blk_mq_hw_ctx *hctx; blk_mq_hctx_notify_dead() local
3826 __blk_mq_remove_cpuhp(struct blk_mq_hw_ctx * hctx) __blk_mq_remove_cpuhp() argument
3844 blk_mq_remove_cpuhp(struct blk_mq_hw_ctx * hctx) blk_mq_remove_cpuhp() argument
3851 __blk_mq_add_cpuhp(struct blk_mq_hw_ctx * hctx) __blk_mq_add_cpuhp() argument
3867 struct blk_mq_hw_ctx *hctx; __blk_mq_remove_cpuhp_list() local
3904 struct blk_mq_hw_ctx *hctx; blk_mq_add_hw_queues_cpuhp() local
3943 blk_mq_exit_hctx(struct request_queue * q,struct blk_mq_tag_set * set,struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx) blk_mq_exit_hctx() argument
3971 struct blk_mq_hw_ctx *hctx; blk_mq_exit_hw_queues() local
3984 blk_mq_init_hctx(struct request_queue * q,struct blk_mq_tag_set * set,struct blk_mq_hw_ctx * hctx,unsigned hctx_idx) blk_mq_init_hctx() argument
4020 struct blk_mq_hw_ctx *hctx; blk_mq_alloc_hctx() local
4085 struct blk_mq_hw_ctx *hctx; blk_mq_init_cpu_queues() local
4165 struct blk_mq_hw_ctx *hctx; blk_mq_map_swqueue() local
4281 struct blk_mq_hw_ctx *hctx; queue_set_hctx_shared() local
4381 struct blk_mq_hw_ctx *hctx, *next; blk_mq_release() local
4494 blk_mq_hctx_is_reusable(struct blk_mq_hw_ctx * hctx) blk_mq_hctx_is_reusable() argument
4504 struct blk_mq_hw_ctx *hctx = NULL, *tmp; blk_mq_alloc_and_init_hctx() local
4596 struct blk_mq_hw_ctx *hctx = hctxs[j]; __blk_mq_realloc_hw_ctxs() local
4978 struct blk_mq_hw_ctx *hctx; blk_mq_update_nr_requests() local
5192 blk_hctx_poll(struct request_queue * q,struct blk_mq_hw_ctx * hctx,struct io_comp_batch * iob,unsigned int flags) blk_hctx_poll() argument
5245 struct blk_mq_hw_ctx *hctx; blk_mq_cancel_work_sync() local
[all...]
H A Dblk-mq-tag.c41 void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_busy() argument
45 struct blk_mq_tags *tags = hctx->tags; in __blk_mq_tag_busy()
51 if (blk_mq_is_shared_tags(hctx->flags)) { in __blk_mq_tag_busy()
52 struct request_queue *q = hctx->queue; in __blk_mq_tag_busy()
58 if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) || in __blk_mq_tag_busy()
59 test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in __blk_mq_tag_busy()
84 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_idle() argument
86 struct blk_mq_tags *tags = hctx->tags; in __blk_mq_tag_idle()
89 if (blk_mq_is_shared_tags(hctx->flags)) { in __blk_mq_tag_idle()
90 struct request_queue *q = hctx->queue; in __blk_mq_tag_idle()
[all …]
H A Dblk-mq-debugfs.h25 struct blk_mq_hw_ctx *hctx);
26 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
33 struct blk_mq_hw_ctx *hctx);
34 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);
44 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_hctx() argument
48 static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_hctx() argument
69 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_sched_hctx() argument
73 static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_sched_hctx() argument
H A Dblk-flush.c341 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in mq_flush_data_end_io() local
365 blk_mq_sched_restart(hctx); in mq_flush_data_end_io()
533 void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_set_fq_lock_class() argument
536 lockdep_set_class(&hctx->fq->mq_flush_lock, key); in blk_mq_hctx_set_fq_lock_class()
/linux/samples/hid/
H A Dhid_mouse.bpf.c8 static int hid_y_event(struct hid_bpf_ctx *hctx) in hid_y_event() argument
11 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 9 /* size */); in hid_y_event()
16 bpf_printk("event: size: %d", hctx->size); in hid_y_event()
53 static int hid_x_event(struct hid_bpf_ctx *hctx) in hid_x_event() argument
56 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 9 /* size */); in hid_x_event()
71 int BPF_PROG(hid_event, struct hid_bpf_ctx *hctx, enum hid_report_type type) in BPF_PROG() argument
73 int ret = hid_y_event(hctx); in BPF_PROG()
78 return hid_x_event(hctx); in BPF_PROG()
83 int BPF_PROG(hid_rdesc_fixup, struct hid_bpf_ctx *hctx) in BPF_PROG() argument
85 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */); in BPF_PROG()
H A Dhid_surface_dial.bpf.c14 int BPF_PROG(hid_event, struct hid_bpf_ctx *hctx) in BPF_PROG() argument
16 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 9 /* size */); in BPF_PROG()
105 int BPF_PROG(hid_rdesc_fixup, struct hid_bpf_ctx *hctx) in BPF_PROG() argument
107 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */); in BPF_PROG()
/linux/drivers/hid/bpf/progs/
H A DXPPen__ACK05.bpf.c218 int BPF_PROG(ack05_fix_rdesc, struct hid_bpf_ctx *hctx) in SEC()
220 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */); in SEC()
221 __s32 rdesc_size = hctx->size; in SEC()
235 hid_set_name(hctx->hid, "Disabled by HID-BPF Hanvon Ugee Shortcut Remote"); in SEC()
265 int BPF_PROG(ack05_fix_events, struct hid_bpf_ctx *hctx) in SEC()
267 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, PAD_REPORT_LENGTH); in SEC()
278 HID_BPF_ASYNC_DELAYED_CALL(switch_to_raw_mode, hctx, 10); in SEC()
315 struct hid_bpf_ctx *hctx = hid_bpf_allocate_context(ctx->hid); in probe() local
317 if (!hctx) { in probe()
323 switch_to_raw_mode(hctx); in probe()
[all …]
H A DXPPen__DecoMini4.bpf.c153 int BPF_PROG(hid_rdesc_fixup_xppen_deco_mini_4, struct hid_bpf_ctx *hctx) in SEC()
155 __u8 *data = hid_bpf_get_data(hctx, 0, HID_MAX_DESCRIPTOR_SIZE); in SEC()
160 if (hctx->size == RDESC_SIZE_PAD) { in SEC()
163 } else if (hctx->size == RDESC_SIZE_PEN) { in SEC()
172 int BPF_PROG(hid_device_event_xppen_deco_mini_4, struct hid_bpf_ctx *hctx) in SEC()
174 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 8 /* size */); in SEC()
H A DFR-TEC__Raptor-Mach-2.bpf.c137 int BPF_PROG(hid_fix_rdesc_raptor_mach_2, struct hid_bpf_ctx *hctx) in SEC()
139 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */); in SEC()
156 int BPF_PROG(raptor_mach_2_fix_hat_switch, struct hid_bpf_ctx *hctx) in SEC()
158 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 64 /* size */); in SEC()
H A DXPPen__Artist24.bpf.c93 int BPF_PROG(hid_fix_rdesc_xppen_artist24, struct hid_bpf_ctx *hctx) in SEC()
95 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */); in SEC()
154 int BPF_PROG(xppen_24_fix_eraser, struct hid_bpf_ctx *hctx) in SEC()
156 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 10 /* size */); in SEC()
H A DThrustmaster__TCA-Yoke-Boeing.bpf.c100 int BPF_PROG(hid_fix_rdesc_tca_yoke, struct hid_bpf_ctx *hctx) in SEC()
104 if (hctx->size != expected_length) in SEC()
107 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */); in SEC()
H A DHP__Elite-Presenter.bpf.c34 int BPF_PROG(hid_fix_rdesc, struct hid_bpf_ctx *hctx) in SEC()
36 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */); in SEC()
H A DIOGEAR__Kaliber-MMOmentum.bpf.c25 int BPF_PROG(hid_fix_rdesc, struct hid_bpf_ctx *hctx) in SEC()
28 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */); in SEC()
H A DMicrosoft__Xbox-Elite-2.bpf.c96 int BPF_PROG(hid_fix_rdesc, struct hid_bpf_ctx *hctx) in SEC()
98 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */); in SEC()
H A DWacom__ArtPen.bpf.c105 int BPF_PROG(artpen_pressure_interpolate, struct hid_bpf_ctx *hctx) in SEC()
107 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, PEN_REPORT_LEN /* size */); in SEC()
/linux/tools/testing/selftests/hid/progs/
H A Dhid.c311 int BPF_PROG(hid_test_filter_raw_request, struct hid_bpf_ctx *hctx, unsigned char reportnum, in BPF_PROG() argument
332 int BPF_PROG(hid_test_hidraw_raw_request, struct hid_bpf_ctx *hctx, unsigned char reportnum, in BPF_PROG() argument
335 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 3 /* size */); in BPF_PROG()
345 ret = hid_bpf_hw_request(hctx, data, 2, rtype, reqtype); in BPF_PROG()
363 int BPF_PROG(hid_test_infinite_loop_raw_request, struct hid_bpf_ctx *hctx, unsigned char reportnum, in BPF_PROG() argument
366 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 3 /* size */); in BPF_PROG()
377 ret = hid_bpf_hw_request(hctx, data, 2, rtype, reqtype); in BPF_PROG()
390 int BPF_PROG(hid_test_filter_output_report, struct hid_bpf_ctx *hctx, unsigned char reportnum, in BPF_PROG() argument
402 int BPF_PROG(hid_test_hidraw_output_report, struct hid_bpf_ctx *hctx, __u64 source) in BPF_PROG() argument
404 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 3 /* size */); in BPF_PROG()
[all …]
/linux/drivers/char/tpm/
H A Dtpm2-sessions.c442 struct hmac_sha256_ctx hctx; in tpm2_KDFa() local
445 hmac_sha256_init_usingrawkey(&hctx, key, key_len); in tpm2_KDFa()
446 hmac_sha256_update(&hctx, (u8 *)&c, sizeof(c)); in tpm2_KDFa()
447 hmac_sha256_update(&hctx, label, strlen(label) + 1); in tpm2_KDFa()
448 hmac_sha256_update(&hctx, u, SHA256_DIGEST_SIZE); in tpm2_KDFa()
449 hmac_sha256_update(&hctx, v, SHA256_DIGEST_SIZE); in tpm2_KDFa()
450 hmac_sha256_update(&hctx, (u8 *)&bits, sizeof(bits)); in tpm2_KDFa()
451 hmac_sha256_final(&hctx, out); in tpm2_KDFa()
593 struct hmac_sha256_ctx hctx; in tpm_buf_fill_hmac_session() local
715 hmac_sha256_init_usingrawkey(&hctx, auth->session_key, in tpm_buf_fill_hmac_session()
[all …]
/linux/include/linux/
H A Dblk-mq.h929 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
930 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
933 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
940 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
941 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
1020 struct blk_mq_hw_ctx *hctx; in queue_hctx() local
1023 hctx = rcu_dereference(q->queue_hw_ctx)[id]; in queue_hctx()
1026 return hctx; in queue_hctx()
1029 #define queue_for_each_hw_ctx(q, hctx, i) \ argument
1031 ({ hctx = queue_hctx((q), i); 1; }); (i)++)
[all …]
/linux/net/dccp/ccids/
H A Dccid3.h
/linux/drivers/block/null_blk/
H A Dmain.c1578 static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) in null_poll() argument
1580 struct nullb_queue *nq = hctx->driver_data; in null_poll()
1611 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in null_timeout_rq() local
1614 if (hctx->type == HCTX_TYPE_POLL) { in null_timeout_rq()
1615 struct nullb_queue *nq = hctx->driver_data; in null_timeout_rq()
1637 if (cmd->fake_timeout || hctx->type == HCTX_TYPE_POLL) in null_timeout_rq()
1642 static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, in null_queue_rq() argument
1647 struct nullb_queue *nq = hctx->driver_data; in null_queue_rq()
1650 const bool is_poll = hctx->type == HCTX_TYPE_POLL; in null_queue_rq()
1652 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); in null_queue_rq()
[all …]
/linux/drivers/infiniband/hw/bnxt_re/
H A Dmain.c2073 struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx; in bnxt_re_get_stats_ctx() local
2077 rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &hctx->stats); in bnxt_re_get_stats_ctx()
2081 rc = bnxt_re_net_stats_ctx_alloc(rdev, &hctx->stats); in bnxt_re_get_stats_ctx()
2087 bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats); in bnxt_re_get_stats_ctx()
2094 struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx; in bnxt_re_get_stats3_ctx() local
2101 rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &hctx->stats3); in bnxt_re_get_stats3_ctx()
2105 rc = bnxt_re_net_stats_ctx_alloc(rdev, &hctx->stats3); in bnxt_re_get_stats3_ctx()
2111 bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats3); in bnxt_re_get_stats3_ctx()
2118 struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx; in bnxt_re_put_stats3_ctx() local
2124 bnxt_re_net_stats_ctx_free(rdev, hctx->stats3.fw_id); in bnxt_re_put_stats3_ctx()
[all …]
/linux/Documentation/translations/zh_CN/block/
H A Dblk-mq.rst95 如果请求无法直接发送到硬件,它们会被加入到请求的链表(``hctx->dispatch``) 中。
/linux/drivers/block/rnbd/
H A Drnbd-clt.h105 struct blk_mq_hw_ctx *hctx; member

12