Home
last modified time | relevance | path

Searched refs:rqs (Results 1 – 12 of 12) sorted by relevance

/linux/drivers/scsi/elx/efct/
H A Defct_hw_queues.c21 struct hw_rq *rqs[EFCT_HW_MAX_NUM_EQ]; in efct_hw_init_queues() local
79 if (efct_hw_new_rq_set(cqs, rqs, i, EFCT_HW_RQ_ENTRIES_DEF)) { in efct_hw_init_queues()
85 rqs[j]->filter_mask = 0; in efct_hw_init_queues()
86 rqs[j]->is_mrq = true; in efct_hw_init_queues()
87 rqs[j]->base_mrq_id = rqs[0]->hdr->id; in efct_hw_init_queues()
310 efct_hw_new_rq_set(struct hw_cq *cqs[], struct hw_rq *rqs[], in efct_hw_new_rq_set() argument
320 rqs[i] = NULL; in efct_hw_new_rq_set()
331 rqs[i] = rq; in efct_hw_new_rq_set()
356 rqs[0]->entry_count, in efct_hw_new_rq_set()
357 rqs[0]->hdr_entry_size, in efct_hw_new_rq_set()
[all …]
H A Defct_hw.h746 efct_hw_new_rq_set(struct hw_cq *cqs[], struct hw_rq *rqs[],
/linux/tools/perf/scripts/python/
H A Dsched-migration.py170 self.rqs = prev.rqs.copy()
172 self.rqs = defaultdict(RunqueueSnapshot)
180 old_rq = self.prev.rqs[cpu]
186 self.rqs[cpu] = new_rq
194 old_rq = self.prev.rqs[old_cpu]
196 self.rqs[old_cpu] = out_rq
199 new_rq = self.prev.rqs[new_cpu]
201 self.rqs[new_cpu] = in_rq
211 old_rq = self.prev.rqs[cpu]
219 self.rqs[cpu] = new_rq
[all …]
/linux/arch/arm/boot/dts/nxp/imx/
H A Dimx6dl-icore-rqs.dts10 #include "imx6qdl-icore-rqs.dtsi"
14 compatible = "engicam,imx6-icore-rqs", "fsl,imx6dl";
H A Dimx6q-icore-rqs.dts10 #include "imx6qdl-icore-rqs.dtsi"
14 compatible = "engicam,imx6-icore-rqs", "fsl,imx6q";
H A DMakefile106 imx6dl-icore-rqs.dtb \
220 imx6q-icore-rqs.dtb \
H A Dimx6qdl-icore-rqs.dtsi91 simple-audio-card,name = "imx6qdl-icore-rqs-sgtl5000";
/linux/block/
H A Dblk-mq.c1377 rq->mq_hctx->tags->rqs[rq->tag] = rq; in blk_mq_start_request()
2805 static void blk_mq_issue_direct(struct rq_list *rqs) in blk_mq_issue_direct() argument
2812 while ((rq = rq_list_pop(rqs))) { in blk_mq_issue_direct()
2813 bool last = rq_list_empty(rqs); in blk_mq_issue_direct()
2844 static void __blk_mq_flush_list(struct request_queue *q, struct rq_list *rqs) in __blk_mq_flush_list() argument
2848 q->mq_ops->queue_rqs(rqs); in __blk_mq_flush_list()
2851 static unsigned blk_mq_extract_queue_requests(struct rq_list *rqs, in blk_mq_extract_queue_requests() argument
2854 struct request *rq = rq_list_pop(rqs); in blk_mq_extract_queue_requests()
2856 struct request **prev = &rqs->head; in blk_mq_extract_queue_requests()
2864 /* move rq from rqs t in blk_mq_extract_queue_requests()
2880 blk_mq_dispatch_queue_requests(struct rq_list * rqs,unsigned depth) blk_mq_dispatch_queue_requests() argument
2901 blk_mq_dispatch_list(struct rq_list * rqs,bool from_sched) blk_mq_dispatch_list() argument
2946 blk_mq_dispatch_multiple_queue_requests(struct rq_list * rqs) blk_mq_dispatch_multiple_queue_requests() argument
[all...]
H A Dblk-mq-tag.c260 rq = tags->rqs[bitnr]; in blk_mq_find_and_get_req()
390 if (tags->rqs) in bt_tags_for_each()
/linux/include/linux/
H A Dblk-mq.h780 struct request **rqs; member
796 prefetch(tags->rqs[tag]); in blk_mq_tag_to_rq()
797 return tags->rqs[tag]; in blk_mq_tag_to_rq()
/linux/drivers/net/ethernet/cisco/enic/
H A Denic_main.c924 struct enic_rq_stats *rqs = &enic->rq[i].stats; in enic_get_stats() local
928 pkt_truncated += rqs->pkt_truncated; in enic_get_stats()
929 bad_fcs += rqs->bad_fcs; in enic_get_stats()
/linux/Documentation/scheduler/
H A Dsched-util-clamp.rst463 rqs are restricted too. IOW, the whole system is capped to half its performance