xref: /linux/drivers/dma/idxd/dma.c (revision a516c618a627e30b5613fadd264d4b4498254aeb)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <linux/dmaengine.h>
10 #include <uapi/linux/idxd.h>
11 #include "../dmaengine.h"
12 #include "registers.h"
13 #include "idxd.h"
14 
to_idxd_wq(struct dma_chan * c)15 static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
16 {
17 	struct idxd_dma_chan *idxd_chan;
18 
19 	idxd_chan = container_of(c, struct idxd_dma_chan, chan);
20 	return idxd_chan->wq;
21 }
22 
idxd_dma_complete_txd(struct idxd_desc * desc,enum idxd_complete_type comp_type,bool free_desc,void * ctx,u32 * status)23 void idxd_dma_complete_txd(struct idxd_desc *desc,
24 			   enum idxd_complete_type comp_type,
25 			   bool free_desc, void *ctx, u32 *status)
26 {
27 	struct idxd_device *idxd = desc->wq->idxd;
28 	struct dma_async_tx_descriptor *tx;
29 	struct dmaengine_result res;
30 	int complete = 1;
31 
32 	if (desc->completion->status == DSA_COMP_SUCCESS) {
33 		res.result = DMA_TRANS_NOERROR;
34 	} else if (desc->completion->status) {
35 		if (idxd->request_int_handles && comp_type != IDXD_COMPLETE_ABORT &&
36 		    desc->completion->status == DSA_COMP_INT_HANDLE_INVAL &&
37 		    idxd_queue_int_handle_resubmit(desc))
38 			return;
39 		res.result = DMA_TRANS_WRITE_FAILED;
40 	} else if (comp_type == IDXD_COMPLETE_ABORT) {
41 		res.result = DMA_TRANS_ABORTED;
42 	} else {
43 		complete = 0;
44 	}
45 
46 	tx = &desc->txd;
47 	if (complete && tx->cookie) {
48 		dma_cookie_complete(tx);
49 		dma_descriptor_unmap(tx);
50 		dmaengine_desc_get_callback_invoke(tx, &res);
51 		tx->callback = NULL;
52 		tx->callback_result = NULL;
53 	}
54 
55 	if (free_desc)
56 		idxd_free_desc(desc->wq, desc);
57 }
58 
op_flag_setup(unsigned long flags,u32 * desc_flags)59 static void op_flag_setup(unsigned long flags, u32 *desc_flags)
60 {
61 	*desc_flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR;
62 	if (flags & DMA_PREP_INTERRUPT)
63 		*desc_flags |= IDXD_OP_FLAG_RCI;
64 }
65 
idxd_prep_desc_common(struct idxd_wq * wq,struct dsa_hw_desc * hw,char opcode,u64 addr_f1,u64 addr_f2,u64 len,u64 compl,u32 flags)66 static inline void idxd_prep_desc_common(struct idxd_wq *wq,
67 					 struct dsa_hw_desc *hw, char opcode,
68 					 u64 addr_f1, u64 addr_f2, u64 len,
69 					 u64 compl, u32 flags)
70 {
71 	hw->flags = flags;
72 	hw->opcode = opcode;
73 	hw->src_addr = addr_f1;
74 	hw->dst_addr = addr_f2;
75 	hw->xfer_size = len;
76 	/*
77 	 * For dedicated WQ, this field is ignored and HW will use the WQCFG.priv
78 	 * field instead. This field should be set to 0 for kernel descriptors
79 	 * since kernel DMA on VT-d supports "user" privilege only.
80 	 */
81 	hw->priv = 0;
82 	hw->completion_addr = compl;
83 }
84 
85 static struct dma_async_tx_descriptor *
idxd_dma_prep_interrupt(struct dma_chan * c,unsigned long flags)86 idxd_dma_prep_interrupt(struct dma_chan *c, unsigned long flags)
87 {
88 	struct idxd_wq *wq = to_idxd_wq(c);
89 	u32 desc_flags;
90 	struct idxd_desc *desc;
91 
92 	if (wq->state != IDXD_WQ_ENABLED)
93 		return NULL;
94 
95 	op_flag_setup(flags, &desc_flags);
96 	desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
97 	if (IS_ERR(desc))
98 		return NULL;
99 
100 	idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP,
101 			      0, 0, 0, desc->compl_dma, desc_flags);
102 	desc->txd.flags = flags;
103 	return &desc->txd;
104 }
105 
106 static struct dma_async_tx_descriptor *
idxd_dma_submit_memcpy(struct dma_chan * c,dma_addr_t dma_dest,dma_addr_t dma_src,size_t len,unsigned long flags)107 idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
108 		       dma_addr_t dma_src, size_t len, unsigned long flags)
109 {
110 	struct idxd_wq *wq = to_idxd_wq(c);
111 	u32 desc_flags;
112 	struct idxd_device *idxd = wq->idxd;
113 	struct idxd_desc *desc;
114 
115 	if (wq->state != IDXD_WQ_ENABLED)
116 		return NULL;
117 
118 	if (len > idxd->max_xfer_bytes)
119 		return NULL;
120 
121 	op_flag_setup(flags, &desc_flags);
122 	desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
123 	if (IS_ERR(desc))
124 		return NULL;
125 
126 	idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_MEMMOVE,
127 			      dma_src, dma_dest, len, desc->compl_dma,
128 			      desc_flags);
129 
130 	desc->txd.flags = flags;
131 
132 	return &desc->txd;
133 }
134 
idxd_dma_alloc_chan_resources(struct dma_chan * chan)135 static int idxd_dma_alloc_chan_resources(struct dma_chan *chan)
136 {
137 	struct idxd_wq *wq = to_idxd_wq(chan);
138 	struct device *dev = &wq->idxd->pdev->dev;
139 
140 	idxd_wq_get(wq);
141 	dev_dbg(dev, "%s: client_count: %d\n", __func__,
142 		idxd_wq_refcount(wq));
143 	return 0;
144 }
145 
idxd_dma_free_chan_resources(struct dma_chan * chan)146 static void idxd_dma_free_chan_resources(struct dma_chan *chan)
147 {
148 	struct idxd_wq *wq = to_idxd_wq(chan);
149 	struct device *dev = &wq->idxd->pdev->dev;
150 
151 	idxd_wq_put(wq);
152 	dev_dbg(dev, "%s: client_count: %d\n", __func__,
153 		idxd_wq_refcount(wq));
154 }
155 
idxd_dma_tx_status(struct dma_chan * dma_chan,dma_cookie_t cookie,struct dma_tx_state * txstate)156 static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan,
157 					  dma_cookie_t cookie,
158 					  struct dma_tx_state *txstate)
159 {
160 	return DMA_OUT_OF_ORDER;
161 }
162 
163 /*
164  * issue_pending() does not need to do anything since tx_submit() does the job
165  * already.
166  */
idxd_dma_issue_pending(struct dma_chan * dma_chan)167 static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
168 {
169 }
170 
idxd_dma_tx_submit(struct dma_async_tx_descriptor * tx)171 static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
172 {
173 	struct dma_chan *c = tx->chan;
174 	struct idxd_wq *wq = to_idxd_wq(c);
175 	dma_cookie_t cookie;
176 	int rc;
177 	struct idxd_desc *desc = container_of(tx, struct idxd_desc, txd);
178 
179 	cookie = dma_cookie_assign(tx);
180 
181 	rc = idxd_submit_desc(wq, desc);
182 	if (rc < 0) {
183 		idxd_free_desc(wq, desc);
184 		return rc;
185 	}
186 
187 	return cookie;
188 }
189 
idxd_dma_release(struct dma_device * device)190 static void idxd_dma_release(struct dma_device *device)
191 {
192 	struct idxd_dma_dev *idxd_dma = container_of(device, struct idxd_dma_dev, dma);
193 
194 	kfree(idxd_dma);
195 }
196 
idxd_dma_terminate_all(struct dma_chan * c)197 static int idxd_dma_terminate_all(struct dma_chan *c)
198 {
199 	struct idxd_wq *wq = to_idxd_wq(c);
200 
201 	idxd_wq_flush_descs(wq);
202 
203 	return 0;
204 }
205 
idxd_dma_synchronize(struct dma_chan * c)206 static void idxd_dma_synchronize(struct dma_chan *c)
207 {
208 	struct idxd_wq *wq = to_idxd_wq(c);
209 
210 	idxd_wq_drain(wq);
211 }
212 
idxd_register_dma_device(struct idxd_device * idxd)213 int idxd_register_dma_device(struct idxd_device *idxd)
214 {
215 	struct idxd_dma_dev *idxd_dma;
216 	struct dma_device *dma;
217 	struct device *dev = &idxd->pdev->dev;
218 	int rc;
219 
220 	idxd_dma = kzalloc_node(sizeof(*idxd_dma), GFP_KERNEL, dev_to_node(dev));
221 	if (!idxd_dma)
222 		return -ENOMEM;
223 
224 	dma = &idxd_dma->dma;
225 	INIT_LIST_HEAD(&dma->channels);
226 	dma->dev = dev;
227 
228 	dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
229 	dma_cap_set(DMA_PRIVATE, dma->cap_mask);
230 	dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
231 	dma->device_release = idxd_dma_release;
232 
233 	dma->device_prep_dma_interrupt = idxd_dma_prep_interrupt;
234 	if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
235 		dma_cap_set(DMA_MEMCPY, dma->cap_mask);
236 		dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;
237 	}
238 
239 	dma->device_tx_status = idxd_dma_tx_status;
240 	dma->device_issue_pending = idxd_dma_issue_pending;
241 	dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
242 	dma->device_free_chan_resources = idxd_dma_free_chan_resources;
243 	dma->device_terminate_all = idxd_dma_terminate_all;
244 	dma->device_synchronize = idxd_dma_synchronize;
245 
246 	rc = dma_async_device_register(dma);
247 	if (rc < 0) {
248 		kfree(idxd_dma);
249 		return rc;
250 	}
251 
252 	idxd_dma->idxd = idxd;
253 	/*
254 	 * This pointer is protected by the refs taken by the dma_chan. It will remain valid
255 	 * as long as there are outstanding channels.
256 	 */
257 	idxd->idxd_dma = idxd_dma;
258 	return 0;
259 }
260 
idxd_unregister_dma_device(struct idxd_device * idxd)261 void idxd_unregister_dma_device(struct idxd_device *idxd)
262 {
263 	dma_async_device_unregister(&idxd->idxd_dma->dma);
264 }
265 
idxd_register_dma_channel(struct idxd_wq * wq)266 static int idxd_register_dma_channel(struct idxd_wq *wq)
267 {
268 	struct idxd_device *idxd = wq->idxd;
269 	struct dma_device *dma = &idxd->idxd_dma->dma;
270 	struct device *dev = &idxd->pdev->dev;
271 	struct idxd_dma_chan *idxd_chan;
272 	struct dma_chan *chan;
273 	int rc, i;
274 
275 	idxd_chan = kzalloc_node(sizeof(*idxd_chan), GFP_KERNEL, dev_to_node(dev));
276 	if (!idxd_chan)
277 		return -ENOMEM;
278 
279 	chan = &idxd_chan->chan;
280 	chan->device = dma;
281 	list_add_tail(&chan->device_node, &dma->channels);
282 
283 	for (i = 0; i < wq->num_descs; i++) {
284 		struct idxd_desc *desc = wq->descs[i];
285 
286 		dma_async_tx_descriptor_init(&desc->txd, chan);
287 		desc->txd.tx_submit = idxd_dma_tx_submit;
288 	}
289 
290 	rc = dma_async_device_channel_register(dma, chan, NULL);
291 	if (rc < 0) {
292 		kfree(idxd_chan);
293 		return rc;
294 	}
295 
296 	wq->idxd_chan = idxd_chan;
297 	idxd_chan->wq = wq;
298 	get_device(wq_confdev(wq));
299 
300 	return 0;
301 }
302 
idxd_unregister_dma_channel(struct idxd_wq * wq)303 static void idxd_unregister_dma_channel(struct idxd_wq *wq)
304 {
305 	struct idxd_dma_chan *idxd_chan = wq->idxd_chan;
306 	struct dma_chan *chan = &idxd_chan->chan;
307 	struct idxd_dma_dev *idxd_dma = wq->idxd->idxd_dma;
308 
309 	dma_async_device_channel_unregister(&idxd_dma->dma, chan);
310 	list_del(&chan->device_node);
311 	kfree(wq->idxd_chan);
312 	wq->idxd_chan = NULL;
313 	put_device(wq_confdev(wq));
314 }
315 
idxd_dmaengine_drv_probe(struct idxd_dev * idxd_dev)316 static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
317 {
318 	struct device *dev = &idxd_dev->conf_dev;
319 	struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
320 	struct idxd_device *idxd = wq->idxd;
321 	int rc;
322 
323 	if (idxd->state != IDXD_DEV_ENABLED)
324 		return -ENXIO;
325 
326 	mutex_lock(&wq->wq_lock);
327 	if (!idxd_wq_driver_name_match(wq, dev)) {
328 		idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME;
329 		rc = -ENODEV;
330 		goto err;
331 	}
332 
333 	wq->type = IDXD_WQT_KERNEL;
334 
335 	rc = idxd_drv_enable_wq(wq);
336 	if (rc < 0) {
337 		dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc);
338 		rc = -ENXIO;
339 		goto err;
340 	}
341 
342 	rc = idxd_register_dma_channel(wq);
343 	if (rc < 0) {
344 		idxd->cmd_status = IDXD_SCMD_DMA_CHAN_ERR;
345 		dev_dbg(dev, "Failed to register dma channel\n");
346 		goto err_dma;
347 	}
348 
349 	idxd->cmd_status = 0;
350 	mutex_unlock(&wq->wq_lock);
351 	return 0;
352 
353 err_dma:
354 	idxd_drv_disable_wq(wq);
355 err:
356 	wq->type = IDXD_WQT_NONE;
357 	mutex_unlock(&wq->wq_lock);
358 	return rc;
359 }
360 
idxd_dmaengine_drv_remove(struct idxd_dev * idxd_dev)361 static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev)
362 {
363 	struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
364 
365 	mutex_lock(&wq->wq_lock);
366 	__idxd_wq_quiesce(wq);
367 	idxd_unregister_dma_channel(wq);
368 	idxd_drv_disable_wq(wq);
369 	mutex_unlock(&wq->wq_lock);
370 }
371 
372 static enum idxd_dev_type dev_types[] = {
373 	IDXD_DEV_WQ,
374 	IDXD_DEV_NONE,
375 };
376 
377 struct idxd_device_driver idxd_dmaengine_drv = {
378 	.probe = idxd_dmaengine_drv_probe,
379 	.remove = idxd_dmaengine_drv_remove,
380 	.desc_complete = idxd_dma_complete_txd,
381 	.name = "dmaengine",
382 	.type = dev_types,
383 };
384 EXPORT_SYMBOL_GPL(idxd_dmaengine_drv);
385