| /linux/drivers/mmc/core/ |
| H A D | queue.c | 26 static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq) in mmc_cqe_dcmd_busy() argument 29 return mq->in_flight[MMC_ISSUE_DCMD]; in mmc_cqe_dcmd_busy() 32 void mmc_cqe_check_busy(struct mmc_queue *mq) in mmc_cqe_check_busy() argument 34 if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq)) in mmc_cqe_check_busy() 35 mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY; in mmc_cqe_check_busy() 60 enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req) in mmc_issue_type() argument 62 struct mmc_host *host = mq->card->host; in mmc_issue_type() 73 static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq) in __mmc_cqe_recovery_notifier() argument 75 if (!mq->recovery_needed) { in __mmc_cqe_recovery_notifier() 76 mq->recovery_needed = true; in __mmc_cqe_recovery_notifier() [all …]
|
| H A D | block.c | 188 struct mmc_queue *mq); 258 struct mmc_queue *mq; in power_ro_lock_store() local 269 mq = &md->queue; in power_ro_lock_store() 272 req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_OUT, 0); in power_ro_lock_store() 672 struct mmc_queue *mq; in mmc_blk_ioctl_cmd() local 692 mq = &md->queue; in mmc_blk_ioctl_cmd() 693 req = blk_mq_alloc_request(mq->queue, in mmc_blk_ioctl_cmd() 723 struct mmc_queue *mq; in mmc_blk_ioctl_multi_cmd() local 765 mq = &md->queue; in mmc_blk_ioctl_multi_cmd() 766 req = blk_mq_alloc_request(mq->queue, in mmc_blk_ioctl_multi_cmd() [all …]
|
| H A D | queue.h | 97 struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, 105 void mmc_cqe_check_busy(struct mmc_queue *mq); 108 enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req); 110 static inline int mmc_tot_in_flight(struct mmc_queue *mq) in mmc_tot_in_flight() argument 112 return mq->in_flight[MMC_ISSUE_SYNC] + in mmc_tot_in_flight() 113 mq->in_flight[MMC_ISSUE_DCMD] + in mmc_tot_in_flight() 114 mq->in_flight[MMC_ISSUE_ASYNC]; in mmc_tot_in_flight() 117 static inline int mmc_cqe_qcnt(struct mmc_queue *mq) in mmc_cqe_qcnt() argument 119 return mq->in_flight[MMC_ISSUE_DCMD] + in mmc_cqe_qcnt() 120 mq->in_flight[MMC_ISSUE_ASYNC]; in mmc_cqe_qcnt()
|
| H A D | block.h | 8 void mmc_blk_cqe_recovery(struct mmc_queue *mq); 12 enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req); 14 void mmc_blk_mq_recovery(struct mmc_queue *mq);
|
| /linux/drivers/scsi/arm/ |
| H A D | msgqueue.c | 24 struct msgqueue_entry *mq; in mqe_alloc() local 26 if ((mq = msgq->free) != NULL) in mqe_alloc() 27 msgq->free = mq->next; in mqe_alloc() 29 return mq; in mqe_alloc() 38 static void mqe_free(MsgQueue_t *msgq, struct msgqueue_entry *mq) in mqe_free() argument 40 if (mq) { in mqe_free() 41 mq->next = msgq->free; in mqe_free() 42 msgq->free = mq; in mqe_free() 82 struct msgqueue_entry *mq = msgq->qe; in msgqueue_msglength() local 85 for (mq = msgq->qe; mq; mq = mq->next) in msgqueue_msglength() [all …]
|
| /linux/drivers/sh/maple/ |
| H A D | maple.c | 122 void (*callback) (struct mapleq *mq), in maple_getcond_callback() 140 struct mapleq *mq; in maple_release_device() local 143 mq = mdev->mq; in maple_release_device() 144 kmem_cache_free(maple_queue_cache, mq->recvbuf); in maple_release_device() 145 kfree(mq); in maple_release_device() 172 mdev->mq->command = command; in maple_add_packet() 173 mdev->mq->length = length; in maple_add_packet() 176 mdev->mq->sendbuf = sendbuf; in maple_add_packet() 179 list_add_tail(&mdev->mq->list, &maple_waitq); in maple_add_packet() 188 struct mapleq *mq; in maple_allocq() local [all …]
|
| /linux/Documentation/translations/zh_CN/block/ |
| H A D | blk-mq.rst | 4 :Original: Documentation/block/blk-mq.rst 15 多队列块设备 I/O 排队机制 (blk-mq) 40 的拥塞问题。为了解决这些问题,blk-mq API 引入了多个队列,每个队列在本地 CPU 47 当用户空间执行对块设备的 I/O(例如读写文件)时,blk-mq 便会介入:它将存储和 51 blk-mq 由两组队列组成:软件暂存队列和硬件派发队列。当请求到达块层时,它会尝 57 多请求,blk-mq 会将请求放置在临时队列中,待硬件资源充足时再发送。 117 - `Linux 块 I/O:多队列 SSD 并发访问简介 <http://kernel.dk/blk-mq.pdf>`_ 128 include/linux/blk-mq.h 130 block/blk-mq.c
|
| /linux/drivers/misc/sgi-gru/ |
| H A D | grukservices.c | 134 #define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h])) argument 546 struct message_queue *mq = p; in gru_create_message_queue() local 550 memset(mq, 0, bytes); in gru_create_message_queue() 551 mq->start = &mq->data; in gru_create_message_queue() 552 mq->start2 = &mq->data + (qlines / 2 - 1) * GRU_CACHE_LINE_BYTES; in gru_create_message_queue() 553 mq->next = &mq->data; in gru_create_message_queue() 554 mq->limit = &mq->data + (qlines - 2) * GRU_CACHE_LINE_BYTES; in gru_create_message_queue() 555 mq->qlines = qlines; in gru_create_message_queue() 556 mq->hstatus[0] = 0; in gru_create_message_queue() 557 mq->hstatus[1] = 1; in gru_create_message_queue() [all …]
|
| /linux/drivers/scsi/elx/efct/ |
| H A D | efct_hw_queues.c | 17 struct hw_mq *mq = NULL; in efct_hw_init_queues() local 51 mq = efct_hw_new_mq(cq, EFCT_HW_MQ_DEPTH); in efct_hw_init_queues() 52 if (!mq) { in efct_hw_init_queues() 247 struct hw_mq *mq = kzalloc(sizeof(*mq), GFP_KERNEL); in efct_hw_new_mq() local 249 if (!mq) in efct_hw_new_mq() 252 mq->cq = cq; in efct_hw_new_mq() 253 mq->type = SLI4_QTYPE_MQ; in efct_hw_new_mq() 254 mq->instance = cq->eq->hw->mq_count++; in efct_hw_new_mq() 255 mq->entry_count = entry_count; in efct_hw_new_mq() 256 mq->entry_size = EFCT_HW_MQ_DEPTH; in efct_hw_new_mq() [all …]
|
| /linux/drivers/misc/sgi-xp/ |
| H A D | xpc_uv.c | 90 xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) in xpc_get_gru_mq_irq_uv() argument 92 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); in xpc_get_gru_mq_irq_uv() 94 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset, in xpc_get_gru_mq_irq_uv() 96 if (mq->irq < 0) in xpc_get_gru_mq_irq_uv() 97 return mq->irq; in xpc_get_gru_mq_irq_uv() 99 mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset); in xpc_get_gru_mq_irq_uv() 105 xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq) in xpc_release_gru_mq_irq_uv() argument 107 uv_teardown_irq(mq->irq); in xpc_release_gru_mq_irq_uv() 111 xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq) in xpc_gru_mq_watchlist_alloc_uv() argument 115 ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address), in xpc_gru_mq_watchlist_alloc_uv() [all …]
|
| /linux/Documentation/admin-guide/device-mapper/ |
| H A D | cache-policies.rst | 29 multiqueue (mq) 48 with the multiqueue (mq) policy. 50 The smq policy (vs mq) offers the promise of less memory utilization, 54 Users may switch from "mq" to "smq" simply by appropriately reloading a 56 mq policy's hints to be dropped. Also, performance of the cache may 63 The mq policy used a lot of memory; 88 bytes per cache block on a 64 78 mq placed entries in different levels of the multiqueue structures 91 The mq policy maintained a hit count for each cache block. For a 105 Testing smq shows substantially better performance than mq. 129 /dev/sdd 512 0 mq 4 sequential_threshold 1024 random_threshold 8"
|
| /linux/drivers/net/ethernet/netronome/nfp/abm/ |
| H A D | qdisc.c | 304 struct nfp_qdisc *mq = nfp_abm_qdisc_tree_deref_slot(slot); in nfp_abm_qdisc_clear_mq() local 307 if (mq->type != NFP_QDISC_MQ || mq->netdev != netdev) in nfp_abm_qdisc_clear_mq() 309 for (i = 0; i < mq->num_children; i++) in nfp_abm_qdisc_clear_mq() 310 if (mq->children[i] == qdisc) { in nfp_abm_qdisc_clear_mq() 311 mq->children[i] = NULL; in nfp_abm_qdisc_clear_mq() 791 memset(&qdisc->mq.stats, 0, sizeof(qdisc->mq.stats)); in nfp_abm_mq_stats() 792 memset(&qdisc->mq.prev_stats, 0, sizeof(qdisc->mq.prev_stats)); in nfp_abm_mq_stats() 803 nfp_abm_stats_propagate(&qdisc->mq.stats, in nfp_abm_mq_stats() 805 nfp_abm_stats_propagate(&qdisc->mq.prev_stats, in nfp_abm_mq_stats() 810 nfp_abm_stats_calculate(&qdisc->mq.stats, &qdisc->mq.prev_stats, in nfp_abm_mq_stats()
|
| /linux/drivers/mtd/maps/ |
| H A D | vmu-flash.c | 89 static void vmu_blockread(struct mapleq *mq) in vmu_blockread() argument 94 mdev = mq->dev; in vmu_blockread() 101 memcpy(card->blockread, mq->recvbuf->buf + 12, in vmu_blockread() 191 list_del_init(&(mdev->mq->list)); in maple_vmu_read_block() 192 kfree(mdev->mq->sendbuf); in maple_vmu_read_block() 193 mdev->mq->sendbuf = NULL; in maple_vmu_read_block() 283 kfree(mdev->mq->sendbuf); in maple_vmu_write_block() 284 mdev->mq->sendbuf = NULL; in maple_vmu_write_block() 285 list_del_init(&(mdev->mq->list)); in maple_vmu_write_block() 499 static void vmu_queryblocks(struct mapleq *mq) in vmu_queryblocks() argument [all …]
|
| /linux/include/linux/ |
| H A D | maple.h | 69 struct mapleq *mq; member 70 void (*callback) (struct mapleq * mq); 89 void (*callback) (struct mapleq * mq),
|
| /linux/Documentation/block/ |
| H A D | switching-sched.rst | 17 the fly to select one of mq-deadline, none, bfq, or kyber schedulers - 32 [mq-deadline] kyber bfq none 35 [none] mq-deadline kyber bfq
|
| H A D | blk-mq.rst | 4 Multi-Queue Block IO Queueing Mechanism (blk-mq) 36 to different CPUs) wanted to perform block IO. Instead of this, the blk-mq API 45 for instance), blk-mq takes action: it will store and manage IO requests to 49 blk-mq has two group of queues: software staging queues and hardware dispatch 59 resources to accept more requests, blk-mq will place requests on a temporary 142 … Block IO: Introducing Multi-queue SSD Access on Multi-core Systems <http://kernel.dk/blk-mq.pdf>`_ 151 .. kernel-doc:: include/linux/blk-mq.h 153 .. kernel-doc:: block/blk-mq.c
|
| H A D | null_blk.rst | 114 0 nullb* use default blk-mq io scheduler 122 0 Register as a non-blocking blk-mq driver device. 123 1 Register as a blocking blk-mq driver device, null_blk will set 133 1 Tag set shared between devices for blk-mq. Only makes sense with
|
| /linux/drivers/input/mouse/ |
| H A D | maplemouse.c | 27 static void dc_mouse_callback(struct mapleq *mq) in dc_mouse_callback() argument 30 struct maple_device *mapledev = mq->dev; in dc_mouse_callback() 33 unsigned char *res = mq->recvbuf->buf; in dc_mouse_callback()
|
| /linux/arch/arm/boot/dts/allwinner/ |
| H A D | sun8i-t113s-mangopi-mq-r-t113.dts | 9 #include "sunxi-d1s-t113-mangopi-mq-r.dtsi" 13 compatible = "widora,mangopi-mq-r-t113", "allwinner,sun8i-t113s";
|
| /linux/drivers/net/wireless/intel/iwlwifi/dvm/ |
| H A D | main.c | 2058 int mq = priv->queue_to_mac80211[queue]; in iwl_stop_sw_queue() local 2060 if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE)) in iwl_stop_sw_queue() 2063 if (atomic_inc_return(&priv->queue_stop_count[mq]) > 1) { in iwl_stop_sw_queue() 2066 queue, mq); in iwl_stop_sw_queue() 2070 set_bit(mq, &priv->transport_queue_stop); in iwl_stop_sw_queue() 2071 ieee80211_stop_queue(priv->hw, mq); in iwl_stop_sw_queue() 2077 int mq = priv->queue_to_mac80211[queue]; in iwl_wake_sw_queue() local 2079 if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE)) in iwl_wake_sw_queue() 2082 if (atomic_dec_return(&priv->queue_stop_count[mq]) > 0) { in iwl_wake_sw_queue() 2085 queue, mq); in iwl_wake_sw_queue() [all …]
|
| /linux/arch/riscv/boot/dts/allwinner/ |
| H A D | Makefile | 9 dtb-$(CONFIG_ARCH_SUNXI) += sun20i-d1-mangopi-mq-pro.dtb 11 dtb-$(CONFIG_ARCH_SUNXI) += sun20i-d1s-mangopi-mq.dtb
|
| /linux/Documentation/devicetree/bindings/powerpc/4xx/ |
| H A D | ppc440spe-adma.txt | 82 - compatible : "ibm,mq-440spe"; 87 MQ0: mq { 88 compatible = "ibm,mq-440spe";
|
| /linux/drivers/input/joystick/ |
| H A D | maplecontrol.c | 27 static void dc_pad_callback(struct mapleq *mq) in dc_pad_callback() argument 30 struct maple_device *mapledev = mq->dev; in dc_pad_callback() 33 unsigned char *res = mq->recvbuf->buf; in dc_pad_callback()
|
| /linux/drivers/infiniband/hw/ocrdma/ |
| H A D | ocrdma_hw.c | 124 (dev->mq.cq.va + (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe))); in ocrdma_get_mcqe() 133 dev->mq.cq.tail = (dev->mq.cq.tail + 1) & (OCRDMA_MQ_CQ_LEN - 1); in ocrdma_mcq_inc_tail() 138 return dev->mq.sq.va + (dev->mq.sq.head * sizeof(struct ocrdma_mqe)); in ocrdma_get_mqe() 143 dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1); in ocrdma_mq_inc_head() 148 return dev->mq.sq.va + (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe)); in ocrdma_get_mqe_rsp() 325 val |= dev->mq.sq.id & OCRDMA_MQ_ID_MASK; in ocrdma_ring_mq_db() 565 struct ocrdma_queue_info *mq, in ocrdma_mbx_create_mq() argument 574 num_pages = PAGES_4K_SPANNED(mq->va, mq->size); in ocrdma_mbx_create_mq() 589 cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) << in ocrdma_mbx_create_mq() 594 ocrdma_build_q_pages(pa, num_pages, mq->dma, PAGE_SIZE_4K); in ocrdma_mbx_create_mq() [all …]
|
| /linux/Documentation/ABI/testing/ |
| H A D | sysfs-block-dm | 45 Description: Request-based Device-mapper blk-mq I/O path mode. 46 Contains the value 1 if the device is using blk-mq.
|