Home
last modified time | relevance | path

Searched refs:rq (Results 1 – 25 of 512) sorted by relevance

12345678910>>...21

/linux/drivers/scsi/fnic/
H A Dvnic_rq.c15 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
18 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
22 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_rq_alloc_bufs()
23 if (!rq->bufs[i]) { in vnic_rq_alloc_bufs()
30 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
33 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
34 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
36 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
39 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
47 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs()
[all …]
H A Dvnic_rq.h93 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument
96 return rq->ring.desc_avail; in vnic_rq_desc_avail()
99 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument
102 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
105 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument
107 return rq->to_use->desc; in vnic_rq_next_desc()
110 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument
112 return rq->to_use->index; in vnic_rq_next_index()
115 static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq) in vnic_rq_next_buf_index() argument
117 return rq->buf_index++; in vnic_rq_next_buf_index()
[all …]
/linux/drivers/net/ethernet/cisco/enic/
H A Dvnic_rq.c18 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
21 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
25 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_KERNEL); in vnic_rq_alloc_bufs()
26 if (!rq->bufs[i]) in vnic_rq_alloc_bufs()
31 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
34 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
35 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
37 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
40 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
48 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs()
[all …]
H A Dvnic_rq.h86 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument
89 return rq->ring.desc_avail; in vnic_rq_desc_avail()
92 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument
95 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
98 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument
100 return rq->to_use->desc; in vnic_rq_next_desc()
103 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument
105 return rq->to_use->index; in vnic_rq_next_index()
108 static inline void vnic_rq_post(struct vnic_rq *rq, in vnic_rq_post() argument
113 struct vnic_rq_buf *buf = rq->to_use; in vnic_rq_post()
[all …]
/linux/kernel/sched/
H A Dsched.h83 struct rq;
107 extern void calc_global_load_tick(struct rq *this_rq);
108 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
110 extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
364 extern s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec);
415 extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
419 extern void fair_server_init(struct rq *rq);
420 extern void ext_server_init(struct rq *rq);
421 extern void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq);
674 void (*func)(struct rq *rq);
[all …]
H A Dpelt.h11 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
12 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
13 bool update_other_load_avgs(struct rq *rq);
16 int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity);
18 static inline u64 hw_load_avg(struct rq *rq) in hw_load_avg() argument
20 return READ_ONCE(rq->avg_hw.load_avg); in hw_load_avg()
24 update_hw_load_avg(u64 now, struct rq *rq, u64 capacity) in update_hw_load_avg() argument
29 static inline u64 hw_load_avg(struct rq *rq) in hw_load_avg() argument
36 int update_irq_load_avg(struct rq *rq, u64 running);
39 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument
[all …]
H A Ddeadline.c71 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) in rq_of_dl_rq()
73 return container_of(dl_rq, struct rq, dl); in rq_of_dl_rq()
76 static inline struct rq *rq_of_dl_se(struct sched_dl_entity *dl_se) in rq_of_dl_se()
78 struct rq *rq = dl_se->rq; in rq_of_dl_se() local
81 rq = task_rq(dl_task_of(dl_se)); in rq_of_dl_se()
83 return rq; in rq_of_dl_se()
183 struct rq *rq = cpu_rq(i); in __dl_update() local
185 rq->dl.extra_bw += bw; in __dl_update()
288 static void dl_rq_change_utilization(struct rq *rq, struct sched_dl_entity *dl_se, u64 new_bw) in dl_rq_change_utilization() argument
291 sub_running_bw(dl_se, &rq->dl); in dl_rq_change_utilization()
[all …]
H A Dstats.h13 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) in rq_sched_info_arrive() argument
15 if (rq) { in rq_sched_info_arrive()
16 rq->rq_sched_info.run_delay += delta; in rq_sched_info_arrive()
17 rq->rq_sched_info.pcount++; in rq_sched_info_arrive()
25 rq_sched_info_depart(struct rq *rq, unsigned long long delta) in rq_sched_info_depart() argument
27 if (rq) in rq_sched_info_depart()
28 rq->rq_cpu_time += delta; in rq_sched_info_depart()
32 rq_sched_info_dequeue(struct rq *r argument
72 rq_sched_info_arrive(struct rq * rq,unsigned long long delta) rq_sched_info_arrive() argument
73 rq_sched_info_dequeue(struct rq * rq,unsigned long long delta) rq_sched_info_dequeue() argument
74 rq_sched_info_depart(struct rq * rq,unsigned long long delta) rq_sched_info_depart() argument
85 __update_stats_wait_start(rq,p,stats) global() argument
86 __update_stats_wait_end(rq,p,stats) global() argument
87 __update_stats_enqueue_sleeper(rq,p,stats) global() argument
116 psi_account_irqtime(struct rq * rq,struct task_struct * curr,struct task_struct * prev) psi_account_irqtime() argument
210 struct rq *rq; psi_ttwu_dequeue() local
235 psi_account_irqtime(struct rq * rq,struct task_struct * curr,struct task_struct * prev) psi_account_irqtime() argument
246 sched_info_dequeue(struct rq * rq,struct task_struct * t) sched_info_dequeue() argument
268 sched_info_arrive(struct rq * rq,struct task_struct * t) sched_info_arrive() argument
294 sched_info_enqueue(struct rq * rq,struct task_struct * t) sched_info_enqueue() argument
308 sched_info_depart(struct rq * rq,struct task_struct * t) sched_info_depart() argument
324 sched_info_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next) sched_info_switch() argument
339 sched_info_enqueue(rq,t) global() argument
340 sched_info_dequeue(rq,t) global() argument
341 sched_info_switch(rq,t,next) global() argument
[all...]
H A Drt.c178 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
182 return rt_rq->rq; in rq_of_rt_rq()
191 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
196 return rt_rq->rq; in rq_of_rt_se()
230 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() local
234 rt_rq->rq = rq; in init_tg_rt_entry()
244 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry()
304 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
306 return container_of(rt_rq, struct rq, rt); in rq_of_rt_rq()
309 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
[all …]
H A Dcore.c126 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
302 void sched_core_enqueue(struct rq *rq, struct task_struct *p) in sched_core_enqueue() argument
307 rq->core->core_task_seq++; in sched_core_enqueue()
312 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); in sched_core_enqueue()
315 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) in sched_core_dequeue() argument
320 rq->core->core_task_seq++; in sched_core_dequeue()
323 rb_erase(&p->core_node, &rq->core_tree); in sched_core_dequeue()
332 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 && in sched_core_dequeue()
333 rq->core->core_forceidle_count && rq->curr == rq->idle) in sched_core_dequeue()
334 resched_curr(rq); in sched_core_dequeue()
[all …]
H A Dstop_task.c19 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop() argument
21 return sched_stop_runnable(rq); in balance_stop()
25 wakeup_preempt_stop(struct rq *rq, struct task_struct *p, int flags) in wakeup_preempt_stop() argument
30 static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first) in set_next_task_stop() argument
32 stop->se.exec_start = rq_clock_task(rq); in set_next_task_stop()
35 static struct task_struct *pick_task_stop(struct rq *rq, struc argument
44 enqueue_task_stop(struct rq * rq,struct task_struct * p,int flags) enqueue_task_stop() argument
50 dequeue_task_stop(struct rq * rq,struct task_struct * p,int flags) dequeue_task_stop() argument
56 yield_task_stop(struct rq * rq) yield_task_stop() argument
61 put_prev_task_stop(struct rq * rq,struct task_struct * prev,struct task_struct * next) put_prev_task_stop() argument
74 task_tick_stop(struct rq * rq,struct task_struct * curr,int queued) task_tick_stop() argument
78 switching_to_stop(struct rq * rq,struct task_struct * p) switching_to_stop() argument
84 prio_changed_stop(struct rq * rq,struct task_struct * p,u64 oldprio) prio_changed_stop() argument
92 update_curr_stop(struct rq * rq) update_curr_stop() argument
[all...]
H A Dpelt.c347 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running) in update_rt_rq_load_avg() argument
349 if (___update_load_sum(now, &rq->avg_rt, in update_rt_rq_load_avg()
354 ___update_load_avg(&rq->avg_rt, 1); in update_rt_rq_load_avg()
355 trace_pelt_rt_tp(rq); in update_rt_rq_load_avg()
373 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running) in update_dl_rq_load_avg() argument
375 if (___update_load_sum(now, &rq->avg_dl, in update_dl_rq_load_avg()
380 ___update_load_avg(&rq->avg_dl, 1); in update_dl_rq_load_avg()
381 trace_pelt_dl_tp(rq); in update_dl_rq_load_avg()
404 int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity) in update_hw_load_avg() argument
406 if (___update_load_sum(now, &rq->avg_hw, in update_hw_load_avg()
[all …]
H A Didle.c444 balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_idle() argument
452 static void wakeup_preempt_idle(struct rq *rq, struct task_struct *p, int flags) in wakeup_preempt_idle() argument
454 resched_curr(rq); in wakeup_preempt_idle()
457 static void update_curr_idle(struct rq *rq);
459 static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct task_struct *next) in put_prev_task_idle() argument
461 update_curr_idle(rq); in put_prev_task_idle()
462 scx_update_idle(rq, false, true); in put_prev_task_idle()
463 update_rq_avg_idle(rq); in put_prev_task_idle()
466 static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first) in set_next_task_idle() argument
468 update_idle_core(rq); in set_next_task_idle()
[all …]
/linux/drivers/scsi/esas2r/
H A Desas2r_disc.c49 struct esas2r_request *rq);
51 struct esas2r_request *rq);
55 struct esas2r_request *rq);
59 struct esas2r_request *rq);
61 struct esas2r_request *rq);
63 struct esas2r_request *rq);
65 struct esas2r_request *rq);
67 struct esas2r_request *rq);
69 struct esas2r_request *rq);
71 struct esas2r_request *rq);
[all …]
H A Desas2r_int.c173 struct esas2r_request *rq, in esas2r_handle_outbound_rsp_err() argument
181 if (unlikely(rq->req_stat != RS_SUCCESS)) { in esas2r_handle_outbound_rsp_err()
182 memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp)); in esas2r_handle_outbound_rsp_err()
184 if (rq->req_stat == RS_ABORTED) { in esas2r_handle_outbound_rsp_err()
185 if (rq->timeout > RQ_MAX_TIMEOUT) in esas2r_handle_outbound_rsp_err()
186 rq->req_stat = RS_TIMEOUT; in esas2r_handle_outbound_rsp_err()
187 } else if (rq->req_stat == RS_SCSI_ERROR) { in esas2r_handle_outbound_rsp_err()
188 u8 scsistatus = rq->func_rsp.scsi_rsp.scsi_stat; in esas2r_handle_outbound_rsp_err()
197 rq->req_stat = RS_SUCCESS; in esas2r_handle_outbound_rsp_err()
198 rq->func_rsp.scsi_rsp.scsi_stat = in esas2r_handle_outbound_rsp_err()
[all …]
/linux/drivers/gpu/drm/i915/gt/
H A Dgen6_engine_cs.c55 gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) in gen6_emit_post_sync_nonzero_flush() argument
58 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_post_sync_nonzero_flush()
62 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush()
72 intel_ring_advance(rq, cs); in gen6_emit_post_sync_nonzero_flush()
74 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush()
84 intel_ring_advance(rq, cs); in gen6_emit_post_sync_nonzero_flush()
89 int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen6_emit_flush_rcs() argument
92 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_flush_rcs()
98 ret = gen6_emit_post_sync_nonzero_flush(rq); in gen6_emit_flush_rcs()
130 cs = intel_ring_begin(rq, 4); in gen6_emit_flush_rcs()
[all …]
H A Dselftest_execlists.c31 static bool is_active(struct i915_request *rq) in is_active() argument
33 if (i915_request_is_active(rq)) in is_active()
36 if (i915_request_on_hold(rq)) in is_active()
39 if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq)) in is_active()
46 struct i915_request *rq, in wait_for_submit() argument
56 if (i915_request_completed(rq)) /* that was quick! */ in wait_for_submit()
61 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) in wait_for_submit()
72 struct i915_request *rq, in wait_for_reset() argument
84 if (i915_request_completed(rq)) in wait_for_reset()
87 if (READ_ONCE(rq->fence.error)) in wait_for_reset()
[all …]
H A Dintel_breadcrumbs.c113 check_signal_order(struct intel_context *ce, struct i915_request *rq) in check_signal_order() argument
115 if (rq->context != ce) in check_signal_order()
118 if (!list_is_last(&rq->signal_link, &ce->signals) && in check_signal_order()
119 i915_seqno_passed(rq->fence.seqno, in check_signal_order()
120 list_next_entry(rq, signal_link)->fence.seqno)) in check_signal_order()
123 if (!list_is_first(&rq->signal_link, &ce->signals) && in check_signal_order()
124 i915_seqno_passed(list_prev_entry(rq, signal_link)->fence.seqno, in check_signal_order()
125 rq->fence.seqno)) in check_signal_order()
214 struct i915_request *rq; in signal_irq_work() local
216 list_for_each_entry_rcu(rq, &ce->signals, signal_link) { in signal_irq_work()
[all …]
H A Dselftest_timeline.c454 static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value) in emit_ggtt_store_dw() argument
458 cs = intel_ring_begin(rq, 4); in emit_ggtt_store_dw()
462 if (GRAPHICS_VER(rq->i915) >= 8) { in emit_ggtt_store_dw()
467 } else if (GRAPHICS_VER(rq->i915) >= 4) { in emit_ggtt_store_dw()
479 intel_ring_advance(rq, cs); in emit_ggtt_store_dw()
487 struct i915_request *rq; in checked_tl_write() local
492 rq = ERR_PTR(err); in checked_tl_write()
503 rq = intel_engine_create_kernel_request(engine); in checked_tl_write()
504 if (IS_ERR(rq)) in checked_tl_write()
507 i915_request_get(rq); in checked_tl_write()
[all …]
H A Dintel_execlists_submission.c219 struct i915_request *rq, in __active_request() argument
222 struct i915_request *active = rq; in __active_request()
224 list_for_each_entry_from_reverse(rq, &tl->requests, link) { in __active_request()
225 if (__i915_request_is_complete(rq)) in __active_request()
229 i915_request_set_error_once(rq, error); in __active_request()
230 __i915_request_skip(rq); in __active_request()
232 active = rq; in __active_request()
239 active_request(const struct intel_timeline * const tl, struct i915_request *rq) in active_request() argument
241 return __active_request(tl, rq, 0); in active_request()
262 static int rq_prio(const struct i915_request *rq) in rq_prio() argument
[all …]
H A Dselftest_hangcheck.c97 const struct i915_request *rq) in hws_address() argument
100 offset_in_page(sizeof(u32) * rq->fence.context); in hws_address()
109 struct i915_request *rq = NULL; in hang_create_request() local
157 rq = igt_request_alloc(h->ctx, engine); in hang_create_request()
158 if (IS_ERR(rq)) { in hang_create_request()
159 err = PTR_ERR(rq); in hang_create_request()
163 err = igt_vma_move_to_active_unlocked(vma, rq, 0); in hang_create_request()
167 err = igt_vma_move_to_active_unlocked(hws, rq, 0); in hang_create_request()
174 *batch++ = lower_32_bits(hws_address(hws, rq)); in hang_create_request()
175 *batch++ = upper_32_bits(hws_address(hws, rq)); in hang_create_request()
[all …]
/linux/block/
H A Dblk-mq.c49 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
50 static void blk_mq_request_bypass_insert(struct request *rq,
93 static bool blk_mq_check_in_driver(struct request *rq, void *priv) in blk_mq_check_in_driver() argument
97 if (rq->rq_flags & RQF_IO_STAT && in blk_mq_check_in_driver()
98 (!bdev_is_partition(mi->part) || rq->part == mi->part) && in blk_mq_check_in_driver()
99 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) in blk_mq_check_in_driver()
100 mi->inflight[rq_data_dir(rq)]++; in blk_mq_check_in_driver()
373 void blk_rq_init(struct request_queue *q, struct request *rq) in blk_rq_init() argument
375 memset(rq, 0, sizeof(*rq)); in blk_rq_init()
377 INIT_LIST_HEAD(&rq->queuelist); in blk_rq_init()
[all …]
/linux/fs/erofs/
H A Dfileio.c19 struct erofs_fileio_rq *rq; member
24 struct erofs_fileio_rq *rq = in erofs_fileio_ki_complete() local
28 if (ret >= 0 && ret != rq->bio.bi_iter.bi_size) { in erofs_fileio_ki_complete()
29 bio_advance(&rq->bio, ret); in erofs_fileio_ki_complete()
30 zero_fill_bio(&rq->bio); in erofs_fileio_ki_complete()
32 if (!rq->bio.bi_end_io) { in erofs_fileio_ki_complete()
33 bio_for_each_folio_all(fi, &rq->bio) { in erofs_fileio_ki_complete()
37 } else if (ret < 0 && !rq->bio.bi_status) { in erofs_fileio_ki_complete()
38 rq->bio.bi_status = errno_to_blk_status(ret); in erofs_fileio_ki_complete()
40 bio_endio(&rq->bio); in erofs_fileio_ki_complete()
[all …]
/linux/drivers/gpu/drm/i915/selftests/
H A Di915_perf.c161 static int write_timestamp(struct i915_request *rq, int slot) in write_timestamp() argument
166 cs = intel_ring_begin(rq, 6); in write_timestamp()
171 if (GRAPHICS_VER(rq->i915) >= 8) in write_timestamp()
183 intel_ring_advance(rq, cs); in write_timestamp()
188 static ktime_t poll_status(struct i915_request *rq, int slot) in poll_status() argument
190 while (!intel_read_status_page(rq->engine, slot) && in poll_status()
191 !i915_request_completed(rq)) in poll_status()
201 struct i915_request *rq; in live_noa_delay() local
224 rq = intel_engine_create_kernel_request(stream->engine); in live_noa_delay()
225 if (IS_ERR(rq)) { in live_noa_delay()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dreporter_rx.c84 struct mlx5e_rq *rq; in mlx5e_rx_reporter_err_icosq_cqe_recover() local
93 rq = &icosq->channel->rq; in mlx5e_rx_reporter_err_icosq_cqe_recover()
108 mlx5e_deactivate_rq(rq); in mlx5e_rx_reporter_err_icosq_cqe_recover()
126 mlx5e_free_rx_missing_descs(rq); in mlx5e_rx_reporter_err_icosq_cqe_recover()
133 mlx5e_activate_rq(rq); in mlx5e_rx_reporter_err_icosq_cqe_recover()
134 rq->stats->recover++; in mlx5e_rx_reporter_err_icosq_cqe_recover()
154 struct mlx5e_rq *rq = ctx; in mlx5e_rx_reporter_err_rq_cqe_recover() local
157 mlx5e_deactivate_rq(rq); in mlx5e_rx_reporter_err_rq_cqe_recover()
158 err = mlx5e_flush_rq(rq, MLX5_RQC_STATE_ERR); in mlx5e_rx_reporter_err_rq_cqe_recover()
159 clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state); in mlx5e_rx_reporter_err_rq_cqe_recover()
[all …]

12345678910>>...21