Home
last modified time | relevance | path

Searched refs:tag_set (Results 1 – 25 of 46) sorted by relevance

12

/linux/rust/kernel/block/mq/
H A Dtag_set.rs43 let tag_set: bindings::blk_mq_tag_set = unsafe { core::mem::zeroed() }; in new() localVariable
44 let tag_set: Result<_> = core::mem::size_of::<RequestDataWrapper>() in new() localVariable
57 ..tag_set in new()
64 inner <- tag_set.pin_chain(|tag_set| { in new()
66 let tag_set: &mut Opaque<_> = unsafe { Pin::get_unchecked_mut(tag_set) }; in new() localVariable
68 error::to_result( unsafe { bindings::blk_mq_alloc_tag_set(tag_set.get())}) in new()
/linux/drivers/md/
H A Ddm-rq.c543 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id); in dm_mq_init_request_queue()
544 if (!md->tag_set) in dm_mq_init_request_queue()
547 md->tag_set->ops = &dm_mq_ops; in dm_mq_init_request_queue()
548 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth(); in dm_mq_init_request_queue()
549 md->tag_set->numa_node = md->numa_node_id; in dm_mq_init_request_queue()
550 md->tag_set->flags = BLK_MQ_F_STACKING; in dm_mq_init_request_queue()
551 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues(); in dm_mq_init_request_queue()
552 md->tag_set->driver_data = md; in dm_mq_init_request_queue()
554 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io); in dm_mq_init_request_queue()
558 md->tag_set->cmd_size += immutable_tgt->per_io_data_size; in dm_mq_init_request_queue()
[all …]
/linux/drivers/mmc/core/
H A Dqueue.c384 disk = blk_mq_alloc_disk(&mq->tag_set, &lim, mq); in mmc_alloc_disk()
432 memset(&mq->tag_set, 0, sizeof(mq->tag_set)); in mmc_init_queue()
433 mq->tag_set.ops = &mmc_mq_ops; in mmc_init_queue()
439 mq->tag_set.queue_depth = in mmc_init_queue()
442 mq->tag_set.queue_depth = MMC_QUEUE_DEPTH; in mmc_init_queue()
443 mq->tag_set.numa_node = NUMA_NO_NODE; in mmc_init_queue()
444 mq->tag_set.flags = BLK_MQ_F_BLOCKING; in mmc_init_queue()
445 mq->tag_set.nr_hw_queues = 1; in mmc_init_queue()
446 mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); in mmc_init_queue()
447 mq->tag_set.driver_data = mq; in mmc_init_queue()
[all …]
H A Dqueue.h77 struct blk_mq_tag_set tag_set; member
/linux/block/
H A Dbsg-lib.c22 struct blk_mq_tag_set tag_set; member
279 container_of(q->tag_set, struct bsg_set, tag_set); in bsg_queue_rq()
324 container_of(q->tag_set, struct bsg_set, tag_set); in bsg_remove_queue()
329 blk_mq_free_tag_set(&bset->tag_set); in bsg_remove_queue()
338 container_of(rq->q->tag_set, struct bsg_set, tag_set); in bsg_timeout()
378 set = &bset->tag_set; in bsg_setup_queue()
H A Dblk-mq-tag.c271 struct blk_mq_tag_set *set = q->tag_set; in bt_iter()
509 srcu_idx = srcu_read_lock(&q->tag_set->tags_srcu); in blk_mq_queue_tag_busy_iter()
510 if (blk_mq_is_shared_tags(q->tag_set->flags)) { in blk_mq_queue_tag_busy_iter()
511 struct blk_mq_tags *tags = q->tag_set->shared_tags; in blk_mq_queue_tag_busy_iter()
539 srcu_read_unlock(&q->tag_set->tags_srcu, srcu_idx); in blk_mq_queue_tag_busy_iter()
629 nr - q->tag_set->reserved_tags); in blk_mq_tag_update_sched_shared_tags()
H A Dblk-mq.h87 return queue_hctx((q), (q->tag_set->map[type].mq_map[cpu])); in blk_mq_map_queue_type()
441 if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) { \
442 struct blk_mq_tag_set *__tag_set = (q)->tag_set; \
462 q->tag_set->map[HCTX_TYPE_POLL].nr_queues; in blk_mq_can_poll()
H A Dblk-mq-sched.c552 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_alloc_sched_res()
616 unsigned int flags = q->tag_set->flags; in blk_mq_init_sched()
674 if (blk_mq_is_shared_tags(q->tag_set->flags)) { in blk_mq_sched_free_rqs()
675 blk_mq_free_rqs(q->tag_set, q->sched_shared_tags, in blk_mq_sched_free_rqs()
680 blk_mq_free_rqs(q->tag_set, in blk_mq_sched_free_rqs()
/linux/drivers/block/null_blk/
H A Dmain.c72 static struct blk_mq_tag_set tag_set; variable
407 set = dev->nullb->tag_set; in nullb_update_nr_hw_queues()
1770 if (nullb->tag_set == &nullb->__tag_set) in null_del_dev()
1771 blk_mq_free_tag_set(nullb->tag_set); in null_del_dev()
1836 if (tag_set.ops) in null_init_global_tag_set()
1839 tag_set.nr_hw_queues = g_submit_queues; in null_init_global_tag_set()
1840 tag_set.queue_depth = g_hw_queue_depth; in null_init_global_tag_set()
1841 tag_set.numa_node = g_home_node; in null_init_global_tag_set()
1843 tag_set.flags |= BLK_MQ_F_NO_SCHED_BY_DEFAULT; in null_init_global_tag_set()
1845 tag_set.flags |= BLK_MQ_F_TAG_HCTX_SHARED; in null_init_global_tag_set()
[all …]
/linux/include/scsi/
H A Dscsi_tcq.h32 if (hwq < shost->tag_set.nr_hw_queues) { in scsi_host_find_tag()
33 req = blk_mq_tag_to_rq(shost->tag_set.tags[hwq], in scsi_host_find_tag()
/linux/drivers/block/
H A Dnbd.c117 struct blk_mq_tag_set tag_set; member
269 blk_mq_free_tag_set(&nbd->tag_set); in nbd_dev_remove()
480 (config->num_connections == 1 && nbd->tag_set.timeout)) { in nbd_xmit_timeout()
513 if (!nbd->tag_set.timeout) { in nbd_xmit_timeout()
886 if (hwq < nbd->tag_set.nr_hw_queues) in nbd_handle_reply()
887 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], in nbd_handle_reply()
1053 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL); in nbd_clear_que()
1343 if (nbd->tag_set.timeout) in nbd_reconnect_socket()
1344 sock->sk->sk_sndtimeo = nbd->tag_set.timeout; in nbd_reconnect_socket()
1463 nbd->tag_set.timeout = 0; in nbd_config_put()
[all …]
H A Dataflop.c305 struct blk_mq_tag_set tag_set; member
2001 disk = blk_mq_alloc_disk(&unit[drive].tag_set, &lim, NULL); in ataflop_alloc_disk()
2055 blk_mq_free_tag_set(&unit[i].tag_set); in atari_floppy_cleanup()
2073 blk_mq_free_tag_set(&fs->tag_set); in atari_cleanup_floppy_disk()
2086 memset(&unit[i].tag_set, 0, sizeof(unit[i].tag_set)); in atari_floppy_init()
2087 unit[i].tag_set.ops = &ataflop_mq_ops; in atari_floppy_init()
2088 unit[i].tag_set.nr_hw_queues = 1; in atari_floppy_init()
2089 unit[i].tag_set.nr_maps = 1; in atari_floppy_init()
2090 unit[i].tag_set.queue_depth = 2; in atari_floppy_init()
2091 unit[i].tag_set.numa_node = NUMA_NO_NODE; in atari_floppy_init()
[all …]
H A Dublk_drv.c215 struct blk_mq_tag_set tag_set; member
1886 req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
1982 blk_mq_tagset_busy_iter(&ub->tag_set, in ublk_force_abort_dev()
2506 req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag); in __ublk_check_and_get_req()
2691 if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[cpu] == q_id) in ublk_init_queue()
2796 blk_mq_free_tag_set(&ub->tag_set); in ublk_add_chdev()
2844 ub->tag_set.ops = &ublk_mq_ops; in ublk_remove()
2845 ub->tag_set.nr_hw_queues = ub->dev_info.nr_hw_queues; in ublk_remove()
2846 ub->tag_set.queue_depth = ub->dev_info.queue_depth; in ublk_remove()
2847 ub->tag_set in ublk_remove()
[all...]
/linux/drivers/scsi/
H A Dscsi_lib.c2003 struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set); in scsi_map_queues()
2097 struct blk_mq_tag_set *tag_set = &shost->tag_set; in scsi_mq_setup_tags() local
2106 memset(tag_set, 0, sizeof(*tag_set)); in scsi_mq_setup_tags()
2108 tag_set->ops = &scsi_mq_ops; in scsi_mq_setup_tags()
2110 tag_set->ops = &scsi_mq_ops_no_commit; in scsi_mq_setup_tags()
2111 tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1; in scsi_mq_setup_tags()
2112 tag_set->nr_maps = shost->nr_maps ? : 1; in scsi_mq_setup_tags()
2113 tag_set->queue_depth = shost->can_queue + shost->nr_reserved_cmds; in scsi_mq_setup_tags()
2114 tag_set->reserved_tags = shost->nr_reserved_cmds; in scsi_mq_setup_tags()
2115 tag_set->cmd_size = cmd_size; in scsi_mq_setup_tags()
[all …]
H A Dhosts.c629 if (shost->tag_set.ops) in scsi_host_busy()
630 blk_mq_tagset_busy_iter(&shost->tag_set, in scsi_host_busy()
731 blk_mq_tagset_busy_iter(&shost->tag_set, complete_all_cmds_iter, in scsi_host_complete_all_commands()
767 blk_mq_tagset_busy_iter(&shost->tag_set, __scsi_host_busy_iter_fn, in scsi_host_busy_iter()
/linux/arch/um/drivers/
H A Dubd_kern.c159 struct blk_mq_tag_set tag_set; member
784 blk_mq_free_tag_set(&ubd_dev->tag_set); in ubd_device_release()
865 ubd_dev->tag_set.ops = &ubd_mq_ops; in ubd_add()
866 ubd_dev->tag_set.queue_depth = 64; in ubd_add()
867 ubd_dev->tag_set.numa_node = NUMA_NO_NODE; in ubd_add()
868 ubd_dev->tag_set.driver_data = ubd_dev; in ubd_add()
869 ubd_dev->tag_set.nr_hw_queues = 1; in ubd_add()
871 err = blk_mq_alloc_tag_set(&ubd_dev->tag_set); in ubd_add()
875 disk = blk_mq_alloc_disk(&ubd_dev->tag_set, &lim, ubd_dev); in ubd_add()
907 blk_mq_free_tag_set(&ubd_dev->tag_set); in ubd_add()
/linux/drivers/cdrom/
H A Dgdrom.c104 struct blk_mq_tag_set tag_set; member
779 err = blk_mq_alloc_sq_tag_set(&gd.tag_set, &gdrom_mq_ops, 1, in probe_gdrom()
784 gd.disk = blk_mq_alloc_disk(&gd.tag_set, &lim, NULL); in probe_gdrom()
825 blk_mq_free_tag_set(&gd.tag_set); in probe_gdrom()
837 blk_mq_free_tag_set(&gd.tag_set); in remove_gdrom()
/linux/drivers/s390/block/
H A Dscm_blk.h20 struct blk_mq_tag_set tag_set; member
/linux/include/linux/mtd/
H A Dblktrans.h34 struct blk_mq_tag_set *tag_set; member
/linux/drivers/block/rnbd/
H A Drnbd-clt.h89 struct blk_mq_tag_set tag_set; member
/linux/drivers/memstick/core/
H A Dms_block.h150 struct blk_mq_tag_set tag_set; member
H A Dmspro_block.c140 struct blk_mq_tag_set tag_set; member
1139 rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &mspro_mq_ops, 2, 0); in mspro_block_init_disk()
1143 msb->disk = blk_mq_alloc_disk(&msb->tag_set, &lim, card); in mspro_block_init_disk()
1176 blk_mq_free_tag_set(&msb->tag_set); in mspro_block_init_disk()
1259 blk_mq_free_tag_set(&msb->tag_set); in mspro_block_remove()
/linux/lib/
H A Dradix-tree.c100 static inline void tag_set(struct radix_tree_node *node, unsigned int tag, in tag_set() function
440 tag_set(node, tag, 0); in radix_tree_extend()
945 tag_set(node, tag, offset); in node_tag_set()
984 tag_set(parent, tag, offset); in radix_tree_tag_set()
/linux/drivers/ufs/core/
H A Dufshcd-priv.h381 struct blk_mq_tags *tags = hba->host->tag_set.shared_tags ?: in ufshcd_tag_to_cmd()
382 hba->host->tag_set.tags[0]; in ufshcd_tag_to_cmd()
/linux/drivers/block/aoe/
H A Daoe.h175 struct blk_mq_tag_set tag_set; member

12