| /linux/drivers/scsi/elx/efct/ |
| H A D | efct_hw_queues.c | 21 struct hw_rq *rqs[EFCT_HW_MAX_NUM_EQ]; in efct_hw_init_queues() local 79 if (efct_hw_new_rq_set(cqs, rqs, i, EFCT_HW_RQ_ENTRIES_DEF)) { in efct_hw_init_queues() 85 rqs[j]->filter_mask = 0; in efct_hw_init_queues() 86 rqs[j]->is_mrq = true; in efct_hw_init_queues() 87 rqs[j]->base_mrq_id = rqs[0]->hdr->id; in efct_hw_init_queues() 310 efct_hw_new_rq_set(struct hw_cq *cqs[], struct hw_rq *rqs[], in efct_hw_new_rq_set() argument 320 rqs[i] = NULL; in efct_hw_new_rq_set() 331 rqs[i] = rq; in efct_hw_new_rq_set() 356 rqs[0]->entry_count, in efct_hw_new_rq_set() 357 rqs[0]->hdr_entry_size, in efct_hw_new_rq_set() [all …]
|
| H A D | efct_hw.h | 746 efct_hw_new_rq_set(struct hw_cq *cqs[], struct hw_rq *rqs[],
|
| /linux/tools/perf/scripts/python/ |
| H A D | sched-migration.py | 170 self.rqs = prev.rqs.copy() 172 self.rqs = defaultdict(RunqueueSnapshot) 180 old_rq = self.prev.rqs[cpu] 186 self.rqs[cpu] = new_rq 194 old_rq = self.prev.rqs[old_cpu] 196 self.rqs[old_cpu] = out_rq 199 new_rq = self.prev.rqs[new_cpu] 201 self.rqs[new_cpu] = in_rq 211 old_rq = self.prev.rqs[cpu] 219 self.rqs[cpu] = new_rq [all …]
|
| /linux/drivers/net/ethernet/huawei/hinic3/ |
| H A D | hinic3_nic_io.c | 404 struct hinic3_io_queue *rqs; in hinic3_alloc_qps() local 417 rqs = kzalloc_objs(*rqs, qp_params->num_qps); in hinic3_alloc_qps() 418 if (!rqs) { in hinic3_alloc_qps() 424 err = hinic3_create_qp(hwdev, &sqs[q_id], &rqs[q_id], q_id, in hinic3_alloc_qps() 435 qp_params->rqs = rqs; in hinic3_alloc_qps() 442 hinic3_destroy_qp(hwdev, &sqs[q_id], &rqs[q_id]); in hinic3_alloc_qps() 444 kfree(rqs); in hinic3_alloc_qps() 459 &qp_params->rqs[q_id]); in hinic3_free_qps() 462 kfree(qp_params->rqs); in hinic3_free_qps() 470 struct hinic3_io_queue *rqs = qp_params->rqs; in hinic3_init_qps() local [all …]
|
| /linux/arch/arm/boot/dts/nxp/imx/ |
| H A D | imx6dl-icore-rqs.dts | 10 #include "imx6qdl-icore-rqs.dtsi" 14 compatible = "engicam,imx6-icore-rqs", "fsl,imx6dl";
|
| H A D | imx6q-icore-rqs.dts | 10 #include "imx6qdl-icore-rqs.dtsi" 14 compatible = "engicam,imx6-icore-rqs", "fsl,imx6q";
|
| H A D | Makefile | 106 imx6dl-icore-rqs.dtb \ 220 imx6q-icore-rqs.dtb \
|
| H A D | imx6qdl-icore-rqs.dtsi | 91 simple-audio-card,name = "imx6qdl-icore-rqs-sgtl5000";
|
| /linux/block/ |
| H A D | blk-iolatency.c | 135 struct blk_rq_stat rqs; member 205 blk_rq_stat_init(&stat->rqs); in latency_stat_init() 216 blk_rq_stat_sum(&sum->rqs, &stat->rqs); in latency_stat_sum() 228 blk_rq_stat_add(&stat->rqs, req_time); in latency_stat_record_time() 240 return stat->rqs.mean <= iolat->min_lat_nsec; in latency_sum_ok() 248 return stat->rqs.nr_samples; in latency_stat_samples() 271 stat->rqs.mean); in iolat_update_total_lat_avg()
|
| H A D | blk-mq.c | 1386 rq->mq_hctx->tags->rqs[rq->tag] = rq; in blk_mq_start_request() 2815 static void blk_mq_issue_direct(struct rq_list *rqs) in blk_mq_issue_direct() argument 2822 while ((rq = rq_list_pop(rqs))) { in blk_mq_issue_direct() 2823 bool last = rq_list_empty(rqs); in blk_mq_issue_direct() 2854 static void __blk_mq_flush_list(struct request_queue *q, struct rq_list *rqs) in __blk_mq_flush_list() argument 2858 q->mq_ops->queue_rqs(rqs); in __blk_mq_flush_list() 2861 static unsigned blk_mq_extract_queue_requests(struct rq_list *rqs, in blk_mq_extract_queue_requests() argument 2864 struct request *rq = rq_list_pop(rqs); in blk_mq_extract_queue_requests() 2866 struct request **prev = &rqs->head; in blk_mq_extract_queue_requests() 2885 rqs->tail = last; in blk_mq_extract_queue_requests() [all …]
|
| H A D | blk-mq-tag.c | 260 rq = tags->rqs[bitnr]; in blk_mq_find_and_get_req() 390 if (tags->rqs) in bt_tags_for_each()
|
| /linux/drivers/net/ethernet/cisco/enic/ |
| H A D | enic_main.c | 924 struct enic_rq_stats *rqs = &enic->rq[i].stats; in enic_get_stats() local 928 pkt_truncated += rqs->pkt_truncated; in enic_get_stats() 929 bad_fcs += rqs->bad_fcs; in enic_get_stats()
|
| /linux/Documentation/scheduler/ |
| H A D | sched-util-clamp.rst | 463 rqs are restricted too. IOW, the whole system is capped to half its performance
|