1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #ifndef BLK_MQ_DMA_H
3 #define BLK_MQ_DMA_H
4
5 #include <linux/blk-mq.h>
6 #include <linux/pci-p2pdma.h>
7
8 struct blk_dma_iter {
9 /* Output address range for this iteration */
10 dma_addr_t addr;
11 u32 len;
12
13 /* Status code. Only valid when blk_rq_dma_map_iter_* returned false */
14 blk_status_t status;
15
16 /* Internal to blk_rq_dma_map_iter_* */
17 struct req_iterator iter;
18 struct pci_p2pdma_map_state p2pdma;
19 };
20
21 bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
22 struct dma_iova_state *state, struct blk_dma_iter *iter);
23 bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev,
24 struct dma_iova_state *state, struct blk_dma_iter *iter);
25
26 /**
27 * blk_rq_dma_map_coalesce - were all segments coalesced?
28 * @state: DMA state to check
29 *
30 * Returns true if blk_rq_dma_map_iter_start coalesced all segments into a
31 * single DMA range.
32 */
blk_rq_dma_map_coalesce(struct dma_iova_state * state)33 static inline bool blk_rq_dma_map_coalesce(struct dma_iova_state *state)
34 {
35 return dma_use_iova(state);
36 }
37
38 /**
39 * blk_rq_dma_unmap - try to DMA unmap a request
40 * @req: request to unmap
41 * @dma_dev: device to unmap from
42 * @state: DMA IOVA state
43 * @mapped_len: number of bytes to unmap
44 *
45 * Returns %false if the callers need to manually unmap every DMA segment
46 * mapped using @iter or %true if no work is left to be done.
47 */
blk_rq_dma_unmap(struct request * req,struct device * dma_dev,struct dma_iova_state * state,size_t mapped_len)48 static inline bool blk_rq_dma_unmap(struct request *req, struct device *dma_dev,
49 struct dma_iova_state *state, size_t mapped_len)
50 {
51 if (req->cmd_flags & REQ_P2PDMA)
52 return true;
53
54 if (dma_use_iova(state)) {
55 dma_iova_destroy(dma_dev, state, mapped_len, rq_dma_dir(req),
56 0);
57 return true;
58 }
59
60 return !dma_need_unmap(dma_dev);
61 }
62
63 #endif /* BLK_MQ_DMA_H */
64