Lines Matching refs:ubq

77 	struct ublk_queue *ubq;  member
192 static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq);
195 static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
207 static inline bool ublk_queue_is_zoned(struct ublk_queue *ubq) in ublk_queue_is_zoned() argument
209 return ubq->flags & UBLK_F_ZONED; in ublk_queue_is_zoned()
391 static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq, in ublk_setup_iod_zoned() argument
394 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag); in ublk_setup_iod_zoned()
395 struct ublk_io *io = &ubq->ios[req->tag]; in ublk_setup_iod_zoned()
465 static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq, in ublk_setup_iod_zoned() argument
577 static inline bool ublk_support_user_copy(const struct ublk_queue *ubq) in ublk_support_user_copy() argument
579 return ubq->flags & UBLK_F_USER_COPY; in ublk_support_user_copy()
582 static inline bool ublk_need_req_ref(const struct ublk_queue *ubq) in ublk_need_req_ref() argument
588 return ublk_support_user_copy(ubq); in ublk_need_req_ref()
591 static inline void ublk_init_req_ref(const struct ublk_queue *ubq, in ublk_init_req_ref() argument
594 if (ublk_need_req_ref(ubq)) { in ublk_init_req_ref()
601 static inline bool ublk_get_req_ref(const struct ublk_queue *ubq, in ublk_get_req_ref() argument
604 if (ublk_need_req_ref(ubq)) { in ublk_get_req_ref()
613 static inline void ublk_put_req_ref(const struct ublk_queue *ubq, in ublk_put_req_ref() argument
616 if (ublk_need_req_ref(ubq)) { in ublk_put_req_ref()
625 static inline bool ublk_need_get_data(const struct ublk_queue *ubq) in ublk_need_get_data() argument
627 return ubq->flags & UBLK_F_NEED_GET_DATA; in ublk_need_get_data()
655 static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq, in ublk_get_iod() argument
659 &(ubq->io_cmd_buf[tag * sizeof(struct ublksrv_io_desc)]); in ublk_get_iod()
669 struct ublk_queue *ubq = ublk_get_queue(ub, q_id); in ublk_queue_cmd_buf_size() local
671 return round_up(ubq->q_depth * sizeof(struct ublksrv_io_desc), in ublk_queue_cmd_buf_size()
676 struct ublk_queue *ubq) in ublk_queue_can_use_recovery_reissue() argument
678 return (ubq->flags & UBLK_F_USER_RECOVERY) && in ublk_queue_can_use_recovery_reissue()
679 (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE); in ublk_queue_can_use_recovery_reissue()
683 struct ublk_queue *ubq) in ublk_queue_can_use_recovery() argument
685 return ubq->flags & UBLK_F_USER_RECOVERY; in ublk_queue_can_use_recovery()
865 static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req, in ublk_map_io() argument
870 if (ublk_support_user_copy(ubq)) in ublk_map_io()
888 static int ublk_unmap_io(const struct ublk_queue *ubq, in ublk_unmap_io() argument
894 if (ublk_support_user_copy(ubq)) in ublk_unmap_io()
937 static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req) in ublk_setup_iod() argument
939 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag); in ublk_setup_iod()
940 struct ublk_io *io = &ubq->ios[req->tag]; in ublk_setup_iod()
944 if (!ublk_queue_is_zoned(ubq) && in ublk_setup_iod()
965 if (ublk_queue_is_zoned(ubq)) in ublk_setup_iod()
966 return ublk_setup_iod_zoned(ubq, req); in ublk_setup_iod()
985 static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq) in ubq_daemon_is_dying() argument
987 return ubq->ubq_daemon->flags & PF_EXITING; in ubq_daemon_is_dying()
993 struct ublk_queue *ubq = req->mq_hctx->driver_data; in __ublk_complete_rq() local
994 struct ublk_io *io = &ubq->ios[req->tag]; in __ublk_complete_rq()
1024 unmapped_bytes = ublk_unmap_io(ubq, req, io); in __ublk_complete_rq()
1061 static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io, in __ublk_fail_req() argument
1066 if (ublk_queue_can_use_recovery_reissue(ubq)) in __ublk_fail_req()
1069 ublk_put_req_ref(ubq, req); in __ublk_fail_req()
1090 static inline void __ublk_abort_rq(struct ublk_queue *ubq, in __ublk_abort_rq() argument
1094 if (ublk_queue_can_use_recovery(ubq)) in __ublk_abort_rq()
1103 struct ublk_queue *ubq = req->mq_hctx->driver_data; in __ublk_rq_task_work() local
1105 struct ublk_io *io = &ubq->ios[tag]; in __ublk_rq_task_work()
1109 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags, in __ublk_rq_task_work()
1110 ublk_get_iod(ubq, req->tag)->addr); in __ublk_rq_task_work()
1121 if (unlikely(current != ubq->ubq_daemon || current->flags & PF_EXITING)) { in __ublk_rq_task_work()
1122 __ublk_abort_rq(ubq, req); in __ublk_rq_task_work()
1126 if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) { in __ublk_rq_task_work()
1135 __func__, io->cmd->cmd_op, ubq->q_id, in __ublk_rq_task_work()
1147 ublk_get_iod(ubq, req->tag)->addr = io->addr; in __ublk_rq_task_work()
1149 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags, in __ublk_rq_task_work()
1150 ublk_get_iod(ubq, req->tag)->addr); in __ublk_rq_task_work()
1153 mapped_bytes = ublk_map_io(ubq, req, io); in __ublk_rq_task_work()
1171 ublk_get_iod(ubq, req->tag)->nr_sectors = in __ublk_rq_task_work()
1175 ublk_init_req_ref(ubq, req); in __ublk_rq_task_work()
1179 static inline void ublk_forward_io_cmds(struct ublk_queue *ubq, in ublk_forward_io_cmds() argument
1182 struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds); in ublk_forward_io_cmds()
1193 struct ublk_queue *ubq = pdu->ubq; in ublk_rq_task_work_cb() local
1195 ublk_forward_io_cmds(ubq, issue_flags); in ublk_rq_task_work_cb()
1198 static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq) in ublk_queue_cmd() argument
1202 if (llist_add(&data->node, &ubq->io_cmds)) { in ublk_queue_cmd()
1203 struct ublk_io *io = &ubq->ios[rq->tag]; in ublk_queue_cmd()
1211 struct ublk_queue *ubq = rq->mq_hctx->driver_data; in ublk_timeout() local
1215 if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) { in ublk_timeout()
1216 if (!ubq->timeout) { in ublk_timeout()
1217 send_sig(SIGKILL, ubq->ubq_daemon, 0); in ublk_timeout()
1218 ubq->timeout = true; in ublk_timeout()
1224 if (!ubq_daemon_is_dying(ubq)) in ublk_timeout()
1227 for (i = 0; i < ubq->q_depth; i++) { in ublk_timeout()
1228 struct ublk_io *io = &ubq->ios[i]; in ublk_timeout()
1235 if (nr_inflight == ubq->q_depth) { in ublk_timeout()
1236 struct ublk_device *ub = ubq->dev; in ublk_timeout()
1238 if (ublk_abort_requests(ub, ubq)) { in ublk_timeout()
1253 struct ublk_queue *ubq = hctx->driver_data; in ublk_queue_rq() local
1258 res = ublk_setup_iod(ubq, rq); in ublk_queue_rq()
1271 if (ublk_queue_can_use_recovery(ubq) && unlikely(ubq->force_abort)) in ublk_queue_rq()
1274 if (unlikely(ubq->canceling)) { in ublk_queue_rq()
1275 __ublk_abort_rq(ubq, rq); in ublk_queue_rq()
1280 ublk_queue_cmd(ubq, rq); in ublk_queue_rq()
1289 struct ublk_queue *ubq = ublk_get_queue(ub, hctx->queue_num); in ublk_init_hctx() local
1291 hctx->driver_data = ubq; in ublk_init_hctx()
1362 struct ublk_queue *ubq = ublk_get_queue(ub, qid); in ublk_commit_completion() local
1363 struct ublk_io *io = &ubq->ios[tag]; in ublk_commit_completion()
1379 ublk_put_req_ref(ubq, req); in ublk_commit_completion()
1387 static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq) in ublk_abort_queue() argument
1391 for (i = 0; i < ubq->q_depth; i++) { in ublk_abort_queue()
1392 struct ublk_io *io = &ubq->ios[i]; in ublk_abort_queue()
1401 rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i); in ublk_abort_queue()
1404 __ublk_fail_req(ubq, io, rq); in ublk_abort_queue()
1410 static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq) in ublk_abort_requests() argument
1414 spin_lock(&ubq->cancel_lock); in ublk_abort_requests()
1415 if (ubq->canceling) { in ublk_abort_requests()
1416 spin_unlock(&ubq->cancel_lock); in ublk_abort_requests()
1419 ubq->canceling = true; in ublk_abort_requests()
1420 spin_unlock(&ubq->cancel_lock); in ublk_abort_requests()
1435 ublk_abort_queue(ub, ubq); in ublk_abort_requests()
1442 static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io, in ublk_cancel_cmd() argument
1450 spin_lock(&ubq->cancel_lock); in ublk_cancel_cmd()
1454 spin_unlock(&ubq->cancel_lock); in ublk_cancel_cmd()
1468 struct ublk_queue *ubq = pdu->ubq; in ublk_uring_cmd_cancel_fn() local
1474 if (WARN_ON_ONCE(!ubq)) in ublk_uring_cmd_cancel_fn()
1477 if (WARN_ON_ONCE(pdu->tag >= ubq->q_depth)) in ublk_uring_cmd_cancel_fn()
1481 if (WARN_ON_ONCE(task && task != ubq->ubq_daemon)) in ublk_uring_cmd_cancel_fn()
1484 ub = ubq->dev; in ublk_uring_cmd_cancel_fn()
1485 need_schedule = ublk_abort_requests(ub, ubq); in ublk_uring_cmd_cancel_fn()
1487 io = &ubq->ios[pdu->tag]; in ublk_uring_cmd_cancel_fn()
1489 ublk_cancel_cmd(ubq, io, issue_flags); in ublk_uring_cmd_cancel_fn()
1499 static inline bool ublk_queue_ready(struct ublk_queue *ubq) in ublk_queue_ready() argument
1501 return ubq->nr_io_ready == ubq->q_depth; in ublk_queue_ready()
1504 static void ublk_cancel_queue(struct ublk_queue *ubq) in ublk_cancel_queue() argument
1508 for (i = 0; i < ubq->q_depth; i++) in ublk_cancel_queue()
1509 ublk_cancel_cmd(ubq, &ubq->ios[i], IO_URING_F_UNLOCKED); in ublk_cancel_queue()
1621 static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq) in ublk_mark_io_ready() argument
1624 ubq->nr_io_ready++; in ublk_mark_io_ready()
1625 if (ublk_queue_ready(ubq)) { in ublk_mark_io_ready()
1626 ubq->ubq_daemon = current; in ublk_mark_io_ready()
1627 get_task_struct(ubq->ubq_daemon); in ublk_mark_io_ready()
1641 struct ublk_queue *ubq = ublk_get_queue(ub, q_id); in ublk_handle_need_get_data() local
1644 ublk_queue_cmd(ubq, req); in ublk_handle_need_get_data()
1670 struct ublk_queue *ubq, unsigned int tag) in ublk_prep_cancel() argument
1678 pdu->ubq = ubq; in ublk_prep_cancel()
1688 struct ublk_queue *ubq; in __ublk_ch_uring_cmd() local
1702 ubq = ublk_get_queue(ub, ub_cmd->q_id); in __ublk_ch_uring_cmd()
1703 if (!ubq || ub_cmd->q_id != ubq->q_id) in __ublk_ch_uring_cmd()
1706 if (ubq->ubq_daemon && ubq->ubq_daemon != current) in __ublk_ch_uring_cmd()
1709 if (tag >= ubq->q_depth) in __ublk_ch_uring_cmd()
1712 io = &ubq->ios[tag]; in __ublk_ch_uring_cmd()
1736 if (ublk_queue_ready(ubq)) { in __ublk_ch_uring_cmd()
1747 if (!ublk_support_user_copy(ubq)) { in __ublk_ch_uring_cmd()
1752 if (!ub_cmd->addr && !ublk_need_get_data(ubq)) in __ublk_ch_uring_cmd()
1761 ublk_mark_io_ready(ub, ubq); in __ublk_ch_uring_cmd()
1769 if (!ublk_support_user_copy(ubq)) { in __ublk_ch_uring_cmd()
1774 if (!ub_cmd->addr && (!ublk_need_get_data(ubq) || in __ublk_ch_uring_cmd()
1798 ublk_prep_cancel(cmd, issue_flags, ubq, tag); in __ublk_ch_uring_cmd()
1809 struct ublk_queue *ubq, int tag, size_t offset) in __ublk_check_and_get_req() argument
1813 if (!ublk_need_req_ref(ubq)) in __ublk_check_and_get_req()
1816 req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag); in __ublk_check_and_get_req()
1820 if (!ublk_get_req_ref(ubq, req)) in __ublk_check_and_get_req()
1834 ublk_put_req_ref(ubq, req); in __ublk_check_and_get_req()
1901 struct ublk_queue *ubq; in ublk_check_and_get_req() local
1922 ubq = ublk_get_queue(ub, q_id); in ublk_check_and_get_req()
1923 if (!ubq) in ublk_check_and_get_req()
1926 if (tag >= ubq->q_depth) in ublk_check_and_get_req()
1929 req = __ublk_check_and_get_req(ub, ubq, tag, buf_off); in ublk_check_and_get_req()
1942 ublk_put_req_ref(ubq, req); in ublk_check_and_get_req()
1948 struct ublk_queue *ubq; in ublk_ch_read_iter() local
1958 ubq = req->mq_hctx->driver_data; in ublk_ch_read_iter()
1959 ublk_put_req_ref(ubq, req); in ublk_ch_read_iter()
1966 struct ublk_queue *ubq; in ublk_ch_write_iter() local
1976 ubq = req->mq_hctx->driver_data; in ublk_ch_write_iter()
1977 ublk_put_req_ref(ubq, req); in ublk_ch_write_iter()
1995 struct ublk_queue *ubq = ublk_get_queue(ub, q_id); in ublk_deinit_queue() local
1997 if (ubq->ubq_daemon) in ublk_deinit_queue()
1998 put_task_struct(ubq->ubq_daemon); in ublk_deinit_queue()
1999 if (ubq->io_cmd_buf) in ublk_deinit_queue()
2000 free_pages((unsigned long)ubq->io_cmd_buf, get_order(size)); in ublk_deinit_queue()
2005 struct ublk_queue *ubq = ublk_get_queue(ub, q_id); in ublk_init_queue() local
2010 spin_lock_init(&ubq->cancel_lock); in ublk_init_queue()
2011 ubq->flags = ub->dev_info.flags; in ublk_init_queue()
2012 ubq->q_id = q_id; in ublk_init_queue()
2013 ubq->q_depth = ub->dev_info.queue_depth; in ublk_init_queue()
2020 ubq->io_cmd_buf = ptr; in ublk_init_queue()
2021 ubq->dev = ub; in ublk_init_queue()
2669 static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq) in ublk_queue_reinit() argument
2673 WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq))); in ublk_queue_reinit()
2676 ubq->nr_io_ready = 0; in ublk_queue_reinit()
2678 put_task_struct(ubq->ubq_daemon); in ublk_queue_reinit()
2680 ubq->ubq_daemon = NULL; in ublk_queue_reinit()
2681 ubq->timeout = false; in ublk_queue_reinit()
2682 ubq->canceling = false; in ublk_queue_reinit()
2684 for (i = 0; i < ubq->q_depth; i++) { in ublk_queue_reinit()
2685 struct ublk_io *io = &ubq->ios[i]; in ublk_queue_reinit()