xref: /linux/drivers/accel/qaic/qaic_data.c (revision 6916d5703ddf9a38f1f6c2cc793381a24ee914c6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 /* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */
4 /* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
5 
6 #include <linux/bitfield.h>
7 #include <linux/bits.h>
8 #include <linux/completion.h>
9 #include <linux/delay.h>
10 #include <linux/dma-buf.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/interrupt.h>
13 #include <linux/kref.h>
14 #include <linux/list.h>
15 #include <linux/math64.h>
16 #include <linux/mm.h>
17 #include <linux/moduleparam.h>
18 #include <linux/scatterlist.h>
19 #include <linux/spinlock.h>
20 #include <linux/srcu.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/uaccess.h>
24 #include <linux/wait.h>
25 #include <drm/drm_file.h>
26 #include <drm/drm_gem.h>
27 #include <drm/drm_prime.h>
28 #include <drm/drm_print.h>
29 #include <uapi/drm/qaic_accel.h>
30 
31 #include "qaic.h"
32 
33 #define SEM_VAL_MASK	GENMASK_ULL(11, 0)
34 #define SEM_INDEX_MASK	GENMASK_ULL(4, 0)
35 #define BULK_XFER	BIT(3)
36 #define GEN_COMPLETION	BIT(4)
37 #define INBOUND_XFER	1
38 #define OUTBOUND_XFER	2
39 #define REQHP_OFF	0x0 /* we read this */
40 #define REQTP_OFF	0x4 /* we write this */
41 #define RSPHP_OFF	0x8 /* we write this */
42 #define RSPTP_OFF	0xc /* we read this */
43 
44 #define ENCODE_SEM(val, index, sync, cmd, flags)			\
45 		({							\
46 			FIELD_PREP(GENMASK(11, 0), (val)) |		\
47 			FIELD_PREP(GENMASK(20, 16), (index)) |		\
48 			FIELD_PREP(BIT(22), (sync)) |			\
49 			FIELD_PREP(GENMASK(26, 24), (cmd)) |		\
50 			FIELD_PREP(GENMASK(30, 29), (flags)) |		\
51 			FIELD_PREP(BIT(31), (cmd) ? 1 : 0);		\
52 		})
53 #define NUM_EVENTS	128
54 #define NUM_DELAYS	10
55 #define fifo_at(base, offset) ((base) + (offset) * get_dbc_req_elem_size())
56 
57 static unsigned int wait_exec_default_timeout_ms = 5000; /* 5 sec default */
58 module_param(wait_exec_default_timeout_ms, uint, 0600);
59 MODULE_PARM_DESC(wait_exec_default_timeout_ms, "Default timeout for DRM_IOCTL_QAIC_WAIT_BO");
60 
61 static unsigned int datapath_poll_interval_us = 100; /* 100 usec default */
62 module_param(datapath_poll_interval_us, uint, 0600);
63 MODULE_PARM_DESC(datapath_poll_interval_us,
64 		 "Amount of time to sleep between activity when datapath polling is enabled");
65 
66 struct dbc_req {
67 	/*
68 	 * A request ID is assigned to each memory handle going in DMA queue.
69 	 * As a single memory handle can enqueue multiple elements in DMA queue
70 	 * all of them will have the same request ID.
71 	 */
72 	__le16	req_id;
73 	/* Future use */
74 	__u8	seq_id;
75 	/*
76 	 * Special encoded variable
77 	 * 7	0 - Do not force to generate MSI after DMA is completed
78 	 *	1 - Force to generate MSI after DMA is completed
79 	 * 6:5	Reserved
80 	 * 4	1 - Generate completion element in the response queue
81 	 *	0 - No Completion Code
82 	 * 3	0 - DMA request is a Link list transfer
83 	 *	1 - DMA request is a Bulk transfer
84 	 * 2	Reserved
85 	 * 1:0	00 - No DMA transfer involved
86 	 *	01 - DMA transfer is part of inbound transfer
87 	 *	10 - DMA transfer has outbound transfer
88 	 *	11 - NA
89 	 */
90 	__u8	cmd;
91 	__le32	resv;
92 	/* Source address for the transfer */
93 	__le64	src_addr;
94 	/* Destination address for the transfer */
95 	__le64	dest_addr;
96 	/* Length of transfer request */
97 	__le32	len;
98 	__le32	resv2;
99 	/* Doorbell address */
100 	__le64	db_addr;
101 	/*
102 	 * Special encoded variable
103 	 * 7	1 - Doorbell(db) write
104 	 *	0 - No doorbell write
105 	 * 6:2	Reserved
106 	 * 1:0	00 - 32 bit access, db address must be aligned to 32bit-boundary
107 	 *	01 - 16 bit access, db address must be aligned to 16bit-boundary
108 	 *	10 - 8 bit access, db address must be aligned to 8bit-boundary
109 	 *	11 - Reserved
110 	 */
111 	__u8	db_len;
112 	__u8	resv3;
113 	__le16	resv4;
114 	/* 32 bit data written to doorbell address */
115 	__le32	db_data;
116 	/*
117 	 * Special encoded variable
118 	 * All the fields of sem_cmdX are passed from user and all are ORed
119 	 * together to form sem_cmd.
120 	 * 0:11		Semaphore value
121 	 * 15:12	Reserved
122 	 * 20:16	Semaphore index
123 	 * 21		Reserved
124 	 * 22		Semaphore Sync
125 	 * 23		Reserved
126 	 * 26:24	Semaphore command
127 	 * 28:27	Reserved
128 	 * 29		Semaphore DMA out bound sync fence
129 	 * 30		Semaphore DMA in bound sync fence
130 	 * 31		Enable semaphore command
131 	 */
132 	__le32	sem_cmd0;
133 	__le32	sem_cmd1;
134 	__le32	sem_cmd2;
135 	__le32	sem_cmd3;
136 } __packed;
137 
138 struct dbc_rsp {
139 	/* Request ID of the memory handle whose DMA transaction is completed */
140 	__le16	req_id;
141 	/* Status of the DMA transaction. 0 : Success otherwise failure */
142 	__le16	status;
143 } __packed;
144 
bo_queued(struct qaic_bo * bo)145 static inline bool bo_queued(struct qaic_bo *bo)
146 {
147 	return !list_empty(&bo->xfer_list);
148 }
149 
get_dbc_req_elem_size(void)150 inline int get_dbc_req_elem_size(void)
151 {
152 	return sizeof(struct dbc_req);
153 }
154 
get_dbc_rsp_elem_size(void)155 inline int get_dbc_rsp_elem_size(void)
156 {
157 	return sizeof(struct dbc_rsp);
158 }
159 
free_slice(struct kref * kref)160 static void free_slice(struct kref *kref)
161 {
162 	struct bo_slice *slice = container_of(kref, struct bo_slice, ref_count);
163 
164 	slice->bo->total_slice_nents -= slice->nents;
165 	list_del(&slice->slice);
166 	drm_gem_object_put(&slice->bo->base);
167 	sg_free_table(slice->sgt);
168 	kfree(slice->sgt);
169 	kvfree(slice->reqs);
170 	kfree(slice);
171 }
172 
clone_range_of_sgt_for_slice(struct qaic_device * qdev,struct sg_table ** sgt_out,struct sg_table * sgt_in,u64 size,u64 offset)173 static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_table **sgt_out,
174 					struct sg_table *sgt_in, u64 size, u64 offset)
175 {
176 	struct scatterlist *sg, *sgn, *sgf, *sgl;
177 	unsigned int len, nents, offf, offl;
178 	struct sg_table *sgt;
179 	size_t total_len;
180 	int ret, j;
181 
182 	/* find out number of relevant nents needed for this mem */
183 	total_len = 0;
184 	sgf = NULL;
185 	sgl = NULL;
186 	nents = 0;
187 	offf = 0;
188 	offl = 0;
189 
190 	size = size ? size : PAGE_SIZE;
191 	for_each_sgtable_dma_sg(sgt_in, sg, j) {
192 		len = sg_dma_len(sg);
193 
194 		if (!len)
195 			continue;
196 		if (offset >= total_len && offset < total_len + len) {
197 			sgf = sg;
198 			offf = offset - total_len;
199 		}
200 		if (sgf)
201 			nents++;
202 		if (offset + size >= total_len &&
203 		    offset + size <= total_len + len) {
204 			sgl = sg;
205 			offl = offset + size - total_len;
206 			break;
207 		}
208 		total_len += len;
209 	}
210 
211 	if (!sgf || !sgl) {
212 		ret = -EINVAL;
213 		goto out;
214 	}
215 
216 	sgt = kzalloc_obj(*sgt);
217 	if (!sgt) {
218 		ret = -ENOMEM;
219 		goto out;
220 	}
221 
222 	ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
223 	if (ret)
224 		goto free_sgt;
225 
226 	/* copy relevant sg node and fix page and length */
227 	sgn = sgf;
228 	for_each_sgtable_dma_sg(sgt, sg, j) {
229 		memcpy(sg, sgn, sizeof(*sg));
230 		if (sgn == sgf) {
231 			sg_dma_address(sg) += offf;
232 			sg_dma_len(sg) -= offf;
233 			sg_set_page(sg, sg_page(sgn), sg_dma_len(sg), offf);
234 		} else {
235 			offf = 0;
236 		}
237 		if (sgn == sgl) {
238 			sg_dma_len(sg) = offl - offf;
239 			sg_set_page(sg, sg_page(sgn), offl - offf, offf);
240 			sg_mark_end(sg);
241 			break;
242 		}
243 		sgn = sg_next(sgn);
244 	}
245 
246 	*sgt_out = sgt;
247 	return ret;
248 
249 free_sgt:
250 	kfree(sgt);
251 out:
252 	*sgt_out = NULL;
253 	return ret;
254 }
255 
encode_reqs(struct qaic_device * qdev,struct bo_slice * slice,struct qaic_attach_slice_entry * req)256 static int encode_reqs(struct qaic_device *qdev, struct bo_slice *slice,
257 		       struct qaic_attach_slice_entry *req)
258 {
259 	__le64 db_addr = cpu_to_le64(req->db_addr);
260 	__le32 db_data = cpu_to_le32(req->db_data);
261 	struct scatterlist *sg;
262 	__u8 cmd = BULK_XFER;
263 	int presync_sem;
264 	u64 dev_addr;
265 	__u8 db_len;
266 	int i;
267 
268 	if (!slice->no_xfer)
269 		cmd |= (slice->dir == DMA_TO_DEVICE ? INBOUND_XFER : OUTBOUND_XFER);
270 
271 	if (req->db_len && !IS_ALIGNED(req->db_addr, req->db_len / 8))
272 		return -EINVAL;
273 
274 	presync_sem = req->sem0.presync + req->sem1.presync + req->sem2.presync + req->sem3.presync;
275 	if (presync_sem > 1)
276 		return -EINVAL;
277 
278 	presync_sem = req->sem0.presync << 0 | req->sem1.presync << 1 |
279 		      req->sem2.presync << 2 | req->sem3.presync << 3;
280 
281 	switch (req->db_len) {
282 	case 32:
283 		db_len = BIT(7);
284 		break;
285 	case 16:
286 		db_len = BIT(7) | 1;
287 		break;
288 	case 8:
289 		db_len = BIT(7) | 2;
290 		break;
291 	case 0:
292 		db_len = 0; /* doorbell is not active for this command */
293 		break;
294 	default:
295 		return -EINVAL; /* should never hit this */
296 	}
297 
298 	/*
299 	 * When we end up splitting up a single request (ie a buf slice) into
300 	 * multiple DMA requests, we have to manage the sync data carefully.
301 	 * There can only be one presync sem. That needs to be on every xfer
302 	 * so that the DMA engine doesn't transfer data before the receiver is
303 	 * ready. We only do the doorbell and postsync sems after the xfer.
304 	 * To guarantee previous xfers for the request are complete, we use a
305 	 * fence.
306 	 */
307 	dev_addr = req->dev_addr;
308 	for_each_sgtable_dma_sg(slice->sgt, sg, i) {
309 		slice->reqs[i].cmd = cmd;
310 		slice->reqs[i].src_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ?
311 						      sg_dma_address(sg) : dev_addr);
312 		slice->reqs[i].dest_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ?
313 						       dev_addr : sg_dma_address(sg));
314 		/*
315 		 * sg_dma_len(sg) returns size of a DMA segment, maximum DMA
316 		 * segment size is set to UINT_MAX by qaic and hence return
317 		 * values of sg_dma_len(sg) can never exceed u32 range. So,
318 		 * by down sizing we are not corrupting the value.
319 		 */
320 		slice->reqs[i].len = cpu_to_le32((u32)sg_dma_len(sg));
321 		switch (presync_sem) {
322 		case BIT(0):
323 			slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val,
324 									 req->sem0.index,
325 									 req->sem0.presync,
326 									 req->sem0.cmd,
327 									 req->sem0.flags));
328 			break;
329 		case BIT(1):
330 			slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val,
331 									 req->sem1.index,
332 									 req->sem1.presync,
333 									 req->sem1.cmd,
334 									 req->sem1.flags));
335 			break;
336 		case BIT(2):
337 			slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val,
338 									 req->sem2.index,
339 									 req->sem2.presync,
340 									 req->sem2.cmd,
341 									 req->sem2.flags));
342 			break;
343 		case BIT(3):
344 			slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val,
345 									 req->sem3.index,
346 									 req->sem3.presync,
347 									 req->sem3.cmd,
348 									 req->sem3.flags));
349 			break;
350 		}
351 		dev_addr += sg_dma_len(sg);
352 	}
353 	/* add post transfer stuff to last segment */
354 	i--;
355 	slice->reqs[i].cmd |= GEN_COMPLETION;
356 	slice->reqs[i].db_addr = db_addr;
357 	slice->reqs[i].db_len = db_len;
358 	slice->reqs[i].db_data = db_data;
359 	/*
360 	 * Add a fence if we have more than one request going to the hardware
361 	 * representing the entirety of the user request, and the user request
362 	 * has no presync condition.
363 	 * Fences are expensive, so we try to avoid them. We rely on the
364 	 * hardware behavior to avoid needing one when there is a presync
365 	 * condition. When a presync exists, all requests for that same
366 	 * presync will be queued into a fifo. Thus, since we queue the
367 	 * post xfer activity only on the last request we queue, the hardware
368 	 * will ensure that the last queued request is processed last, thus
369 	 * making sure the post xfer activity happens at the right time without
370 	 * a fence.
371 	 */
372 	if (i && !presync_sem)
373 		req->sem0.flags |= (slice->dir == DMA_TO_DEVICE ?
374 				    QAIC_SEM_INSYNCFENCE : QAIC_SEM_OUTSYNCFENCE);
375 	slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val, req->sem0.index,
376 							 req->sem0.presync, req->sem0.cmd,
377 							 req->sem0.flags));
378 	slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val, req->sem1.index,
379 							 req->sem1.presync, req->sem1.cmd,
380 							 req->sem1.flags));
381 	slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val, req->sem2.index,
382 							 req->sem2.presync, req->sem2.cmd,
383 							 req->sem2.flags));
384 	slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val, req->sem3.index,
385 							 req->sem3.presync, req->sem3.cmd,
386 							 req->sem3.flags));
387 
388 	return 0;
389 }
390 
qaic_map_one_slice(struct qaic_device * qdev,struct qaic_bo * bo,struct qaic_attach_slice_entry * slice_ent)391 static int qaic_map_one_slice(struct qaic_device *qdev, struct qaic_bo *bo,
392 			      struct qaic_attach_slice_entry *slice_ent)
393 {
394 	struct sg_table *sgt = NULL;
395 	struct bo_slice *slice;
396 	int ret;
397 
398 	ret = clone_range_of_sgt_for_slice(qdev, &sgt, bo->sgt, slice_ent->size, slice_ent->offset);
399 	if (ret)
400 		goto out;
401 
402 	slice = kmalloc_obj(*slice);
403 	if (!slice) {
404 		ret = -ENOMEM;
405 		goto free_sgt;
406 	}
407 
408 	slice->reqs = kvzalloc_objs(*slice->reqs, sgt->nents);
409 	if (!slice->reqs) {
410 		ret = -ENOMEM;
411 		goto free_slice;
412 	}
413 
414 	slice->no_xfer = !slice_ent->size;
415 	slice->sgt = sgt;
416 	slice->nents = sgt->nents;
417 	slice->dir = bo->dir;
418 	slice->bo = bo;
419 	slice->size = slice_ent->size;
420 	slice->offset = slice_ent->offset;
421 
422 	ret = encode_reqs(qdev, slice, slice_ent);
423 	if (ret)
424 		goto free_req;
425 
426 	bo->total_slice_nents += sgt->nents;
427 	kref_init(&slice->ref_count);
428 	drm_gem_object_get(&bo->base);
429 	list_add_tail(&slice->slice, &bo->slices);
430 
431 	return 0;
432 
433 free_req:
434 	kvfree(slice->reqs);
435 free_slice:
436 	kfree(slice);
437 free_sgt:
438 	sg_free_table(sgt);
439 	kfree(sgt);
440 out:
441 	return ret;
442 }
443 
create_sgt(struct qaic_device * qdev,struct sg_table ** sgt_out,u64 size)444 static int create_sgt(struct qaic_device *qdev, struct sg_table **sgt_out, u64 size)
445 {
446 	struct scatterlist *sg;
447 	struct sg_table *sgt;
448 	struct page **pages;
449 	int *pages_order;
450 	int buf_extra;
451 	int max_order;
452 	int nr_pages;
453 	int ret = 0;
454 	int i, j, k;
455 	int order;
456 
457 	if (size) {
458 		nr_pages = DIV_ROUND_UP(size, PAGE_SIZE);
459 		/*
460 		 * calculate how much extra we are going to allocate, to remove
461 		 * later
462 		 */
463 		buf_extra = (PAGE_SIZE - size % PAGE_SIZE) % PAGE_SIZE;
464 		max_order = min(MAX_PAGE_ORDER, get_order(size));
465 	} else {
466 		/* allocate a single page for book keeping */
467 		nr_pages = 1;
468 		buf_extra = 0;
469 		max_order = 0;
470 	}
471 
472 	pages = kvmalloc_array(nr_pages, sizeof(*pages) + sizeof(*pages_order), GFP_KERNEL);
473 	if (!pages) {
474 		ret = -ENOMEM;
475 		goto out;
476 	}
477 	pages_order = (void *)pages + sizeof(*pages) * nr_pages;
478 
479 	/*
480 	 * Allocate requested memory using alloc_pages. It is possible to allocate
481 	 * the requested memory in multiple chunks by calling alloc_pages
482 	 * multiple times. Use SG table to handle multiple allocated pages.
483 	 */
484 	i = 0;
485 	while (nr_pages > 0) {
486 		order = min(get_order(nr_pages * PAGE_SIZE), max_order);
487 		while (1) {
488 			pages[i] = alloc_pages(GFP_KERNEL | GFP_HIGHUSER |
489 					       __GFP_NOWARN | __GFP_ZERO |
490 					       (order ? __GFP_NORETRY : __GFP_RETRY_MAYFAIL),
491 					       order);
492 			if (pages[i])
493 				break;
494 			if (!order--) {
495 				ret = -ENOMEM;
496 				goto free_partial_alloc;
497 			}
498 		}
499 
500 		max_order = order;
501 		pages_order[i] = order;
502 
503 		nr_pages -= 1 << order;
504 		if (nr_pages <= 0)
505 			/* account for over allocation */
506 			buf_extra += abs(nr_pages) * PAGE_SIZE;
507 		i++;
508 	}
509 
510 	sgt = kmalloc_obj(*sgt);
511 	if (!sgt) {
512 		ret = -ENOMEM;
513 		goto free_partial_alloc;
514 	}
515 
516 	if (sg_alloc_table(sgt, i, GFP_KERNEL)) {
517 		ret = -ENOMEM;
518 		goto free_sgt;
519 	}
520 
521 	/* Populate the SG table with the allocated memory pages */
522 	sg = sgt->sgl;
523 	for (k = 0; k < i; k++, sg = sg_next(sg)) {
524 		/* Last entry requires special handling */
525 		if (k < i - 1) {
526 			sg_set_page(sg, pages[k], PAGE_SIZE << pages_order[k], 0);
527 		} else {
528 			sg_set_page(sg, pages[k], (PAGE_SIZE << pages_order[k]) - buf_extra, 0);
529 			sg_mark_end(sg);
530 		}
531 	}
532 
533 	kvfree(pages);
534 	*sgt_out = sgt;
535 	return ret;
536 
537 free_sgt:
538 	kfree(sgt);
539 free_partial_alloc:
540 	for (j = 0; j < i; j++)
541 		__free_pages(pages[j], pages_order[j]);
542 	kvfree(pages);
543 out:
544 	*sgt_out = NULL;
545 	return ret;
546 }
547 
invalid_sem(struct qaic_sem * sem)548 static bool invalid_sem(struct qaic_sem *sem)
549 {
550 	if (sem->val & ~SEM_VAL_MASK || sem->index & ~SEM_INDEX_MASK ||
551 	    !(sem->presync == 0 || sem->presync == 1) || sem->pad ||
552 	    sem->flags & ~(QAIC_SEM_INSYNCFENCE | QAIC_SEM_OUTSYNCFENCE) ||
553 	    sem->cmd > QAIC_SEM_WAIT_GT_0)
554 		return true;
555 	return false;
556 }
557 
qaic_validate_req(struct qaic_device * qdev,struct qaic_attach_slice_entry * slice_ent,u32 count,u64 total_size)558 static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_entry *slice_ent,
559 			     u32 count, u64 total_size)
560 {
561 	u64 total;
562 	int i;
563 
564 	for (i = 0; i < count; i++) {
565 		if (!(slice_ent[i].db_len == 32 || slice_ent[i].db_len == 16 ||
566 		      slice_ent[i].db_len == 8 || slice_ent[i].db_len == 0) ||
567 		      invalid_sem(&slice_ent[i].sem0) || invalid_sem(&slice_ent[i].sem1) ||
568 		      invalid_sem(&slice_ent[i].sem2) || invalid_sem(&slice_ent[i].sem3))
569 			return -EINVAL;
570 
571 		if (check_add_overflow(slice_ent[i].offset, slice_ent[i].size, &total) ||
572 		    total > total_size)
573 			return -EINVAL;
574 	}
575 
576 	return 0;
577 }
578 
qaic_free_sgt(struct sg_table * sgt)579 static void qaic_free_sgt(struct sg_table *sgt)
580 {
581 	struct scatterlist *sg;
582 
583 	if (!sgt)
584 		return;
585 
586 	for (sg = sgt->sgl; sg; sg = sg_next(sg))
587 		if (sg_page(sg))
588 			__free_pages(sg_page(sg), get_order(sg->length));
589 	sg_free_table(sgt);
590 	kfree(sgt);
591 }
592 
qaic_gem_print_info(struct drm_printer * p,unsigned int indent,const struct drm_gem_object * obj)593 static void qaic_gem_print_info(struct drm_printer *p, unsigned int indent,
594 				const struct drm_gem_object *obj)
595 {
596 	struct qaic_bo *bo = to_qaic_bo(obj);
597 
598 	drm_printf_indent(p, indent, "BO DMA direction %d\n", bo->dir);
599 }
600 
601 static const struct vm_operations_struct drm_vm_ops = {
602 	.open = drm_gem_vm_open,
603 	.close = drm_gem_vm_close,
604 };
605 
qaic_gem_object_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)606 static int qaic_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
607 {
608 	struct qaic_bo *bo = to_qaic_bo(obj);
609 	unsigned long remap_start;
610 	unsigned long offset = 0;
611 	unsigned long remap_end;
612 	struct scatterlist *sg;
613 	unsigned long length;
614 	int ret = 0;
615 
616 	if (drm_gem_is_imported(obj))
617 		return -EINVAL;
618 
619 	for (sg = bo->sgt->sgl; sg; sg = sg_next(sg)) {
620 		if (sg_page(sg)) {
621 			/* if sg is too large for the VMA, so truncate it to fit */
622 			if (check_add_overflow(vma->vm_start, offset, &remap_start))
623 				return -EINVAL;
624 			if (check_add_overflow(remap_start, sg->length, &remap_end))
625 				return -EINVAL;
626 
627 			if (remap_end > vma->vm_end) {
628 				if (check_sub_overflow(vma->vm_end, remap_start, &length))
629 					return -EINVAL;
630 			} else {
631 				length = sg->length;
632 			}
633 
634 			if (length == 0)
635 				goto out;
636 
637 			ret = remap_pfn_range(vma, vma->vm_start + offset, page_to_pfn(sg_page(sg)),
638 					      length, vma->vm_page_prot);
639 			if (ret)
640 				goto out;
641 			offset += length;
642 		}
643 	}
644 
645 out:
646 	return ret;
647 }
648 
qaic_free_object(struct drm_gem_object * obj)649 static void qaic_free_object(struct drm_gem_object *obj)
650 {
651 	struct qaic_bo *bo = to_qaic_bo(obj);
652 
653 	if (drm_gem_is_imported(obj)) {
654 		/* DMABUF/PRIME Path */
655 		drm_prime_gem_destroy(obj, NULL);
656 	} else {
657 		/* Private buffer allocation path */
658 		qaic_free_sgt(bo->sgt);
659 	}
660 
661 	mutex_destroy(&bo->lock);
662 	drm_gem_object_release(obj);
663 	kfree(bo);
664 }
665 
qaic_get_sg_table(struct drm_gem_object * obj)666 static struct sg_table *qaic_get_sg_table(struct drm_gem_object *obj)
667 {
668 	struct qaic_bo *bo = to_qaic_bo(obj);
669 	struct scatterlist *sg, *sg_in;
670 	struct sg_table *sgt, *sgt_in;
671 	int i;
672 
673 	sgt_in = bo->sgt;
674 
675 	sgt = kmalloc_obj(*sgt);
676 	if (!sgt)
677 		return ERR_PTR(-ENOMEM);
678 
679 	if (sg_alloc_table(sgt, sgt_in->orig_nents, GFP_KERNEL)) {
680 		kfree(sgt);
681 		return ERR_PTR(-ENOMEM);
682 	}
683 
684 	sg = sgt->sgl;
685 	for_each_sgtable_sg(sgt_in, sg_in, i) {
686 		memcpy(sg, sg_in, sizeof(*sg));
687 		sg = sg_next(sg);
688 	}
689 
690 	return sgt;
691 }
692 
693 static const struct drm_gem_object_funcs qaic_gem_funcs = {
694 	.free = qaic_free_object,
695 	.get_sg_table = qaic_get_sg_table,
696 	.print_info = qaic_gem_print_info,
697 	.mmap = qaic_gem_object_mmap,
698 	.vm_ops = &drm_vm_ops,
699 };
700 
qaic_init_bo(struct qaic_bo * bo,bool reinit)701 static void qaic_init_bo(struct qaic_bo *bo, bool reinit)
702 {
703 	if (reinit) {
704 		bo->sliced = false;
705 		reinit_completion(&bo->xfer_done);
706 	} else {
707 		mutex_init(&bo->lock);
708 		init_completion(&bo->xfer_done);
709 	}
710 	complete_all(&bo->xfer_done);
711 	INIT_LIST_HEAD(&bo->slices);
712 	INIT_LIST_HEAD(&bo->xfer_list);
713 }
714 
qaic_alloc_init_bo(void)715 static struct qaic_bo *qaic_alloc_init_bo(void)
716 {
717 	struct qaic_bo *bo;
718 
719 	bo = kzalloc_obj(*bo);
720 	if (!bo)
721 		return ERR_PTR(-ENOMEM);
722 
723 	qaic_init_bo(bo, false);
724 
725 	return bo;
726 }
727 
qaic_create_bo_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)728 int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
729 {
730 	struct qaic_create_bo *args = data;
731 	int usr_rcu_id, qdev_rcu_id;
732 	struct drm_gem_object *obj;
733 	struct qaic_device *qdev;
734 	struct qaic_user *usr;
735 	struct qaic_bo *bo;
736 	size_t size;
737 	int ret;
738 
739 	if (args->pad)
740 		return -EINVAL;
741 
742 	size = PAGE_ALIGN(args->size);
743 	if (size == 0)
744 		return -EINVAL;
745 
746 	usr = file_priv->driver_priv;
747 	usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
748 	if (!usr->qddev) {
749 		ret = -ENODEV;
750 		goto unlock_usr_srcu;
751 	}
752 
753 	qdev = usr->qddev->qdev;
754 	qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
755 	if (qdev->dev_state != QAIC_ONLINE) {
756 		ret = -ENODEV;
757 		goto unlock_dev_srcu;
758 	}
759 
760 	bo = qaic_alloc_init_bo();
761 	if (IS_ERR(bo)) {
762 		ret = PTR_ERR(bo);
763 		goto unlock_dev_srcu;
764 	}
765 	obj = &bo->base;
766 
767 	drm_gem_private_object_init(dev, obj, size);
768 
769 	obj->funcs = &qaic_gem_funcs;
770 	ret = create_sgt(qdev, &bo->sgt, size);
771 	if (ret)
772 		goto free_bo;
773 
774 	ret = drm_gem_create_mmap_offset(obj);
775 	if (ret)
776 		goto free_bo;
777 
778 	ret = drm_gem_handle_create(file_priv, obj, &args->handle);
779 	if (ret)
780 		goto free_bo;
781 
782 	drm_gem_object_put(obj);
783 	srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
784 	srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
785 
786 	return 0;
787 
788 free_bo:
789 	drm_gem_object_put(obj);
790 unlock_dev_srcu:
791 	srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
792 unlock_usr_srcu:
793 	srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
794 	return ret;
795 }
796 
qaic_mmap_bo_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)797 int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
798 {
799 	struct qaic_mmap_bo *args = data;
800 	int usr_rcu_id, qdev_rcu_id;
801 	struct drm_gem_object *obj;
802 	struct qaic_device *qdev;
803 	struct qaic_user *usr;
804 	int ret = 0;
805 
806 	usr = file_priv->driver_priv;
807 	usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
808 	if (!usr->qddev) {
809 		ret = -ENODEV;
810 		goto unlock_usr_srcu;
811 	}
812 
813 	qdev = usr->qddev->qdev;
814 	qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
815 	if (qdev->dev_state != QAIC_ONLINE) {
816 		ret = -ENODEV;
817 		goto unlock_dev_srcu;
818 	}
819 
820 	obj = drm_gem_object_lookup(file_priv, args->handle);
821 	if (!obj) {
822 		ret = -ENOENT;
823 		goto unlock_dev_srcu;
824 	}
825 
826 	args->offset = drm_vma_node_offset_addr(&obj->vma_node);
827 
828 	drm_gem_object_put(obj);
829 
830 unlock_dev_srcu:
831 	srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
832 unlock_usr_srcu:
833 	srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
834 	return ret;
835 }
836 
qaic_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)837 struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf)
838 {
839 	struct dma_buf_attachment *attach;
840 	struct drm_gem_object *obj;
841 	struct qaic_bo *bo;
842 	int ret;
843 
844 	bo = qaic_alloc_init_bo();
845 	if (IS_ERR(bo)) {
846 		ret = PTR_ERR(bo);
847 		goto out;
848 	}
849 
850 	obj = &bo->base;
851 	get_dma_buf(dma_buf);
852 
853 	attach = dma_buf_attach(dma_buf, dev->dev);
854 	if (IS_ERR(attach)) {
855 		ret = PTR_ERR(attach);
856 		goto attach_fail;
857 	}
858 
859 	if (!attach->dmabuf->size) {
860 		ret = -EINVAL;
861 		goto size_align_fail;
862 	}
863 
864 	drm_gem_private_object_init(dev, obj, attach->dmabuf->size);
865 	/*
866 	 * skipping dma_buf_map_attachment() as we do not know the direction
867 	 * just yet. Once the direction is known in the subsequent IOCTL to
868 	 * attach slicing, we can do it then.
869 	 */
870 
871 	obj->funcs = &qaic_gem_funcs;
872 	obj->import_attach = attach;
873 	obj->resv = dma_buf->resv;
874 
875 	return obj;
876 
877 size_align_fail:
878 	dma_buf_detach(dma_buf, attach);
879 attach_fail:
880 	dma_buf_put(dma_buf);
881 	kfree(bo);
882 out:
883 	return ERR_PTR(ret);
884 }
885 
qaic_prepare_import_bo(struct qaic_bo * bo,struct qaic_attach_slice_hdr * hdr)886 static int qaic_prepare_import_bo(struct qaic_bo *bo, struct qaic_attach_slice_hdr *hdr)
887 {
888 	struct drm_gem_object *obj = &bo->base;
889 	struct sg_table *sgt;
890 	int ret;
891 
892 	sgt = dma_buf_map_attachment(obj->import_attach, hdr->dir);
893 	if (IS_ERR(sgt)) {
894 		ret = PTR_ERR(sgt);
895 		return ret;
896 	}
897 
898 	bo->sgt = sgt;
899 
900 	return 0;
901 }
902 
qaic_prepare_export_bo(struct qaic_device * qdev,struct qaic_bo * bo,struct qaic_attach_slice_hdr * hdr)903 static int qaic_prepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo,
904 				  struct qaic_attach_slice_hdr *hdr)
905 {
906 	int ret;
907 
908 	ret = dma_map_sgtable(&qdev->pdev->dev, bo->sgt, hdr->dir, 0);
909 	if (ret)
910 		return -EFAULT;
911 
912 	return 0;
913 }
914 
qaic_prepare_bo(struct qaic_device * qdev,struct qaic_bo * bo,struct qaic_attach_slice_hdr * hdr)915 static int qaic_prepare_bo(struct qaic_device *qdev, struct qaic_bo *bo,
916 			   struct qaic_attach_slice_hdr *hdr)
917 {
918 	int ret;
919 
920 	if (drm_gem_is_imported(&bo->base))
921 		ret = qaic_prepare_import_bo(bo, hdr);
922 	else
923 		ret = qaic_prepare_export_bo(qdev, bo, hdr);
924 	bo->dir = hdr->dir;
925 	bo->dbc = &qdev->dbc[hdr->dbc_id];
926 	bo->nr_slice = hdr->count;
927 
928 	return ret;
929 }
930 
qaic_unprepare_import_bo(struct qaic_bo * bo)931 static void qaic_unprepare_import_bo(struct qaic_bo *bo)
932 {
933 	dma_buf_unmap_attachment(bo->base.import_attach, bo->sgt, bo->dir);
934 	bo->sgt = NULL;
935 }
936 
qaic_unprepare_export_bo(struct qaic_device * qdev,struct qaic_bo * bo)937 static void qaic_unprepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo)
938 {
939 	dma_unmap_sgtable(&qdev->pdev->dev, bo->sgt, bo->dir, 0);
940 }
941 
qaic_unprepare_bo(struct qaic_device * qdev,struct qaic_bo * bo)942 static void qaic_unprepare_bo(struct qaic_device *qdev, struct qaic_bo *bo)
943 {
944 	if (drm_gem_is_imported(&bo->base))
945 		qaic_unprepare_import_bo(bo);
946 	else
947 		qaic_unprepare_export_bo(qdev, bo);
948 
949 	bo->dir = 0;
950 	bo->dbc = NULL;
951 	bo->nr_slice = 0;
952 }
953 
qaic_free_slices_bo(struct qaic_bo * bo)954 static void qaic_free_slices_bo(struct qaic_bo *bo)
955 {
956 	struct bo_slice *slice, *temp;
957 
958 	list_for_each_entry_safe(slice, temp, &bo->slices, slice)
959 		kref_put(&slice->ref_count, free_slice);
960 	if (WARN_ON_ONCE(bo->total_slice_nents != 0))
961 		bo->total_slice_nents = 0;
962 	bo->nr_slice = 0;
963 }
964 
qaic_attach_slicing_bo(struct qaic_device * qdev,struct qaic_bo * bo,struct qaic_attach_slice_hdr * hdr,struct qaic_attach_slice_entry * slice_ent)965 static int qaic_attach_slicing_bo(struct qaic_device *qdev, struct qaic_bo *bo,
966 				  struct qaic_attach_slice_hdr *hdr,
967 				  struct qaic_attach_slice_entry *slice_ent)
968 {
969 	int ret, i;
970 
971 	for (i = 0; i < hdr->count; i++) {
972 		ret = qaic_map_one_slice(qdev, bo, &slice_ent[i]);
973 		if (ret) {
974 			qaic_free_slices_bo(bo);
975 			return ret;
976 		}
977 	}
978 
979 	if (bo->total_slice_nents > bo->dbc->nelem) {
980 		qaic_free_slices_bo(bo);
981 		return -ENOSPC;
982 	}
983 
984 	return 0;
985 }
986 
qaic_attach_slice_bo_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)987 int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
988 {
989 	struct qaic_attach_slice_entry *slice_ent;
990 	struct qaic_attach_slice *args = data;
991 	int rcu_id, usr_rcu_id, qdev_rcu_id;
992 	struct dma_bridge_chan	*dbc;
993 	struct drm_gem_object *obj;
994 	struct qaic_device *qdev;
995 	unsigned long arg_size;
996 	struct qaic_user *usr;
997 	u8 __user *user_data;
998 	struct qaic_bo *bo;
999 	int ret;
1000 
1001 	if (args->hdr.count == 0)
1002 		return -EINVAL;
1003 
1004 	if (check_mul_overflow((unsigned long)args->hdr.count,
1005 			       (unsigned long)sizeof(*slice_ent),
1006 			       &arg_size))
1007 		return -EINVAL;
1008 
1009 	if (!(args->hdr.dir == DMA_TO_DEVICE || args->hdr.dir == DMA_FROM_DEVICE))
1010 		return -EINVAL;
1011 
1012 	if (args->data == 0)
1013 		return -EINVAL;
1014 
1015 	usr = file_priv->driver_priv;
1016 	usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
1017 	if (!usr->qddev) {
1018 		ret = -ENODEV;
1019 		goto unlock_usr_srcu;
1020 	}
1021 
1022 	qdev = usr->qddev->qdev;
1023 	qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
1024 	if (qdev->dev_state != QAIC_ONLINE) {
1025 		ret = -ENODEV;
1026 		goto unlock_dev_srcu;
1027 	}
1028 
1029 	if (args->hdr.dbc_id >= qdev->num_dbc) {
1030 		ret = -EINVAL;
1031 		goto unlock_dev_srcu;
1032 	}
1033 
1034 	user_data = u64_to_user_ptr(args->data);
1035 
1036 	slice_ent = memdup_user(user_data, arg_size);
1037 	if (IS_ERR(slice_ent)) {
1038 		ret = PTR_ERR(slice_ent);
1039 		goto unlock_dev_srcu;
1040 	}
1041 
1042 	obj = drm_gem_object_lookup(file_priv, args->hdr.handle);
1043 	if (!obj) {
1044 		ret = -ENOENT;
1045 		goto free_slice_ent;
1046 	}
1047 
1048 	ret = qaic_validate_req(qdev, slice_ent, args->hdr.count, obj->size);
1049 	if (ret)
1050 		goto put_bo;
1051 
1052 	bo = to_qaic_bo(obj);
1053 	ret = mutex_lock_interruptible(&bo->lock);
1054 	if (ret)
1055 		goto put_bo;
1056 
1057 	if (bo->sliced) {
1058 		ret = -EINVAL;
1059 		goto unlock_bo;
1060 	}
1061 
1062 	dbc = &qdev->dbc[args->hdr.dbc_id];
1063 	rcu_id = srcu_read_lock(&dbc->ch_lock);
1064 	if (dbc->usr != usr) {
1065 		ret = -EINVAL;
1066 		goto unlock_ch_srcu;
1067 	}
1068 
1069 	if (dbc->id == qdev->ssr_dbc) {
1070 		ret = -EPIPE;
1071 		goto unlock_ch_srcu;
1072 	}
1073 
1074 	ret = qaic_prepare_bo(qdev, bo, &args->hdr);
1075 	if (ret)
1076 		goto unlock_ch_srcu;
1077 
1078 	ret = qaic_attach_slicing_bo(qdev, bo, &args->hdr, slice_ent);
1079 	if (ret)
1080 		goto unprepare_bo;
1081 
1082 	if (args->hdr.dir == DMA_TO_DEVICE)
1083 		dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, args->hdr.dir);
1084 
1085 	bo->sliced = true;
1086 	list_add_tail(&bo->bo_list, &bo->dbc->bo_lists);
1087 	srcu_read_unlock(&dbc->ch_lock, rcu_id);
1088 	mutex_unlock(&bo->lock);
1089 	kfree(slice_ent);
1090 	srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
1091 	srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1092 
1093 	return 0;
1094 
1095 unprepare_bo:
1096 	qaic_unprepare_bo(qdev, bo);
1097 unlock_ch_srcu:
1098 	srcu_read_unlock(&dbc->ch_lock, rcu_id);
1099 unlock_bo:
1100 	mutex_unlock(&bo->lock);
1101 put_bo:
1102 	drm_gem_object_put(obj);
1103 free_slice_ent:
1104 	kfree(slice_ent);
1105 unlock_dev_srcu:
1106 	srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
1107 unlock_usr_srcu:
1108 	srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1109 	return ret;
1110 }
1111 
fifo_space_avail(u32 head,u32 tail,u32 q_size)1112 static inline u32 fifo_space_avail(u32 head, u32 tail, u32 q_size)
1113 {
1114 	u32 avail = head - tail - 1;
1115 
1116 	if (head <= tail)
1117 		avail += q_size;
1118 
1119 	return avail;
1120 }
1121 
copy_exec_reqs(struct qaic_device * qdev,struct bo_slice * slice,u32 dbc_id,u32 head,u32 * ptail)1122 static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, u32 dbc_id,
1123 				 u32 head, u32 *ptail)
1124 {
1125 	struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id];
1126 	struct dbc_req *reqs = slice->reqs;
1127 	u32 tail = *ptail;
1128 	u32 avail;
1129 
1130 	avail = fifo_space_avail(head, tail, dbc->nelem);
1131 	if (avail < slice->nents)
1132 		return -EAGAIN;
1133 
1134 	if (tail + slice->nents > dbc->nelem) {
1135 		avail = dbc->nelem - tail;
1136 		avail = min_t(u32, avail, slice->nents);
1137 		memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * avail);
1138 		reqs += avail;
1139 		avail = slice->nents - avail;
1140 		if (avail)
1141 			memcpy(dbc->req_q_base, reqs, sizeof(*reqs) * avail);
1142 	} else {
1143 		memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * slice->nents);
1144 	}
1145 
1146 	*ptail = (tail + slice->nents) % dbc->nelem;
1147 
1148 	return 0;
1149 }
1150 
copy_partial_exec_reqs(struct qaic_device * qdev,struct bo_slice * slice,u64 resize,struct dma_bridge_chan * dbc,u32 head,u32 * ptail)1151 static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice,
1152 					 u64 resize, struct dma_bridge_chan *dbc, u32 head,
1153 					 u32 *ptail)
1154 {
1155 	struct dbc_req *reqs = slice->reqs;
1156 	struct dbc_req *last_req;
1157 	u32 tail = *ptail;
1158 	u64 last_bytes;
1159 	u32 first_n;
1160 	u32 avail;
1161 
1162 	avail = fifo_space_avail(head, tail, dbc->nelem);
1163 
1164 	/*
1165 	 * After this for loop is complete, first_n represents the index
1166 	 * of the last DMA request of this slice that needs to be
1167 	 * transferred after resizing and last_bytes represents DMA size
1168 	 * of that request.
1169 	 */
1170 	last_bytes = resize;
1171 	for (first_n = 0; first_n < slice->nents; first_n++)
1172 		if (last_bytes > le32_to_cpu(reqs[first_n].len))
1173 			last_bytes -= le32_to_cpu(reqs[first_n].len);
1174 		else
1175 			break;
1176 
1177 	if (avail < (first_n + 1))
1178 		return -EAGAIN;
1179 
1180 	if (first_n) {
1181 		if (tail + first_n > dbc->nelem) {
1182 			avail = dbc->nelem - tail;
1183 			avail = min_t(u32, avail, first_n);
1184 			memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * avail);
1185 			last_req = reqs + avail;
1186 			avail = first_n - avail;
1187 			if (avail)
1188 				memcpy(dbc->req_q_base, last_req, sizeof(*reqs) * avail);
1189 		} else {
1190 			memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * first_n);
1191 		}
1192 	}
1193 
1194 	/*
1195 	 * Copy over the last entry. Here we need to adjust len to the left over
1196 	 * size, and set src and dst to the entry it is copied to.
1197 	 */
1198 	last_req = fifo_at(dbc->req_q_base, (tail + first_n) % dbc->nelem);
1199 	memcpy(last_req, reqs + slice->nents - 1, sizeof(*reqs));
1200 
1201 	/*
1202 	 * last_bytes holds size of a DMA segment, maximum DMA segment size is
1203 	 * set to UINT_MAX by qaic and hence last_bytes can never exceed u32
1204 	 * range. So, by down sizing we are not corrupting the value.
1205 	 */
1206 	last_req->len = cpu_to_le32((u32)last_bytes);
1207 	last_req->src_addr = reqs[first_n].src_addr;
1208 	last_req->dest_addr = reqs[first_n].dest_addr;
1209 	if (!last_bytes)
1210 		/* Disable DMA transfer */
1211 		last_req->cmd = GENMASK(7, 2) & reqs[first_n].cmd;
1212 
1213 	*ptail = (tail + first_n + 1) % dbc->nelem;
1214 
1215 	return 0;
1216 }
1217 
send_bo_list_to_device(struct qaic_device * qdev,struct drm_file * file_priv,struct qaic_execute_entry * exec,unsigned int count,bool is_partial,struct dma_bridge_chan * dbc,u32 head,u32 * tail)1218 static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *file_priv,
1219 				  struct qaic_execute_entry *exec, unsigned int count,
1220 				  bool is_partial, struct dma_bridge_chan *dbc, u32 head,
1221 				  u32 *tail)
1222 {
1223 	struct qaic_partial_execute_entry *pexec = (struct qaic_partial_execute_entry *)exec;
1224 	struct drm_gem_object *obj;
1225 	struct bo_slice *slice;
1226 	unsigned long flags;
1227 	struct qaic_bo *bo;
1228 	int i, j;
1229 	int ret;
1230 
1231 	for (i = 0; i < count; i++) {
1232 		/*
1233 		 * ref count will be decremented when the transfer of this
1234 		 * buffer is complete. It is inside dbc_irq_threaded_fn().
1235 		 */
1236 		obj = drm_gem_object_lookup(file_priv,
1237 					    is_partial ? pexec[i].handle : exec[i].handle);
1238 		if (!obj) {
1239 			ret = -ENOENT;
1240 			goto failed_to_send_bo;
1241 		}
1242 
1243 		bo = to_qaic_bo(obj);
1244 		ret = mutex_lock_interruptible(&bo->lock);
1245 		if (ret)
1246 			goto failed_to_send_bo;
1247 
1248 		if (!bo->sliced) {
1249 			ret = -EINVAL;
1250 			goto unlock_bo;
1251 		}
1252 
1253 		if (is_partial && pexec[i].resize > bo->base.size) {
1254 			ret = -EINVAL;
1255 			goto unlock_bo;
1256 		}
1257 
1258 		spin_lock_irqsave(&dbc->xfer_lock, flags);
1259 		if (bo_queued(bo)) {
1260 			spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1261 			ret = -EINVAL;
1262 			goto unlock_bo;
1263 		}
1264 
1265 		bo->req_id = dbc->next_req_id++;
1266 
1267 		list_for_each_entry(slice, &bo->slices, slice) {
1268 			for (j = 0; j < slice->nents; j++)
1269 				slice->reqs[j].req_id = cpu_to_le16(bo->req_id);
1270 
1271 			if (is_partial && (!pexec[i].resize || pexec[i].resize <= slice->offset))
1272 				/* Configure the slice for no DMA transfer */
1273 				ret = copy_partial_exec_reqs(qdev, slice, 0, dbc, head, tail);
1274 			else if (is_partial && pexec[i].resize < slice->offset + slice->size)
1275 				/* Configure the slice to be partially DMA transferred */
1276 				ret = copy_partial_exec_reqs(qdev, slice,
1277 							     pexec[i].resize - slice->offset, dbc,
1278 							     head, tail);
1279 			else
1280 				ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail);
1281 			if (ret) {
1282 				spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1283 				goto unlock_bo;
1284 			}
1285 		}
1286 		reinit_completion(&bo->xfer_done);
1287 		list_add_tail(&bo->xfer_list, &dbc->xfer_list);
1288 		spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1289 		dma_sync_sgtable_for_device(&qdev->pdev->dev, bo->sgt, bo->dir);
1290 		mutex_unlock(&bo->lock);
1291 	}
1292 
1293 	return 0;
1294 
1295 unlock_bo:
1296 	mutex_unlock(&bo->lock);
1297 failed_to_send_bo:
1298 	if (likely(obj))
1299 		drm_gem_object_put(obj);
1300 	for (j = 0; j < i; j++) {
1301 		spin_lock_irqsave(&dbc->xfer_lock, flags);
1302 		bo = list_last_entry(&dbc->xfer_list, struct qaic_bo, xfer_list);
1303 		obj = &bo->base;
1304 		list_del_init(&bo->xfer_list);
1305 		spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1306 		dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
1307 		drm_gem_object_put(obj);
1308 	}
1309 	return ret;
1310 }
1311 
update_profiling_data(struct drm_file * file_priv,struct qaic_execute_entry * exec,unsigned int count,bool is_partial,u64 received_ts,u64 submit_ts,u32 queue_level)1312 static void update_profiling_data(struct drm_file *file_priv,
1313 				  struct qaic_execute_entry *exec, unsigned int count,
1314 				  bool is_partial, u64 received_ts, u64 submit_ts, u32 queue_level)
1315 {
1316 	struct qaic_partial_execute_entry *pexec = (struct qaic_partial_execute_entry *)exec;
1317 	struct drm_gem_object *obj;
1318 	struct qaic_bo *bo;
1319 	int i;
1320 
1321 	for (i = 0; i < count; i++) {
1322 		/*
1323 		 * Since we already committed the BO to hardware, the only way
1324 		 * this should fail is a pending signal. We can't cancel the
1325 		 * submit to hardware, so we have to just skip the profiling
1326 		 * data. In case the signal is not fatal to the process, we
1327 		 * return success so that the user doesn't try to resubmit.
1328 		 */
1329 		obj = drm_gem_object_lookup(file_priv,
1330 					    is_partial ? pexec[i].handle : exec[i].handle);
1331 		if (!obj)
1332 			break;
1333 		bo = to_qaic_bo(obj);
1334 		bo->perf_stats.req_received_ts = received_ts;
1335 		bo->perf_stats.req_submit_ts = submit_ts;
1336 		bo->perf_stats.queue_level_before = queue_level;
1337 		queue_level += bo->total_slice_nents;
1338 		drm_gem_object_put(obj);
1339 	}
1340 }
1341 
__qaic_execute_bo_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv,bool is_partial)1342 static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv,
1343 				   bool is_partial)
1344 {
1345 	struct qaic_execute *args = data;
1346 	struct qaic_execute_entry *exec;
1347 	struct dma_bridge_chan *dbc;
1348 	int usr_rcu_id, qdev_rcu_id;
1349 	struct qaic_device *qdev;
1350 	struct qaic_user *usr;
1351 	u64 received_ts;
1352 	u32 queue_level;
1353 	u64 submit_ts;
1354 	int rcu_id;
1355 	u32 head;
1356 	u32 tail;
1357 	u64 size;
1358 	int ret;
1359 
1360 	received_ts = ktime_get_ns();
1361 
1362 	size = is_partial ? sizeof(struct qaic_partial_execute_entry) : sizeof(*exec);
1363 	if (args->hdr.count == 0)
1364 		return -EINVAL;
1365 
1366 	exec = memdup_array_user(u64_to_user_ptr(args->data), args->hdr.count, size);
1367 	if (IS_ERR(exec))
1368 		return PTR_ERR(exec);
1369 
1370 	usr = file_priv->driver_priv;
1371 	usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
1372 	if (!usr->qddev) {
1373 		ret = -ENODEV;
1374 		goto unlock_usr_srcu;
1375 	}
1376 
1377 	qdev = usr->qddev->qdev;
1378 	qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
1379 	if (qdev->dev_state != QAIC_ONLINE) {
1380 		ret = -ENODEV;
1381 		goto unlock_dev_srcu;
1382 	}
1383 
1384 	if (args->hdr.dbc_id >= qdev->num_dbc) {
1385 		ret = -EINVAL;
1386 		goto unlock_dev_srcu;
1387 	}
1388 
1389 	dbc = &qdev->dbc[args->hdr.dbc_id];
1390 
1391 	rcu_id = srcu_read_lock(&dbc->ch_lock);
1392 	if (!dbc->usr || dbc->usr->handle != usr->handle) {
1393 		ret = -EPERM;
1394 		goto release_ch_rcu;
1395 	}
1396 
1397 	if (dbc->id == qdev->ssr_dbc) {
1398 		ret = -EPIPE;
1399 		goto release_ch_rcu;
1400 	}
1401 
1402 	ret = mutex_lock_interruptible(&dbc->req_lock);
1403 	if (ret)
1404 		goto release_ch_rcu;
1405 
1406 	head = readl(dbc->dbc_base + REQHP_OFF);
1407 	tail = readl(dbc->dbc_base + REQTP_OFF);
1408 
1409 	if (head == U32_MAX || tail == U32_MAX) {
1410 		/* PCI link error */
1411 		ret = -ENODEV;
1412 		goto unlock_req_lock;
1413 	}
1414 
1415 	queue_level = head <= tail ? tail - head : dbc->nelem - (head - tail);
1416 
1417 	ret = send_bo_list_to_device(qdev, file_priv, exec, args->hdr.count, is_partial, dbc,
1418 				     head, &tail);
1419 	if (ret)
1420 		goto unlock_req_lock;
1421 
1422 	/* Finalize commit to hardware */
1423 	submit_ts = ktime_get_ns();
1424 	writel(tail, dbc->dbc_base + REQTP_OFF);
1425 	mutex_unlock(&dbc->req_lock);
1426 
1427 	update_profiling_data(file_priv, exec, args->hdr.count, is_partial, received_ts,
1428 			      submit_ts, queue_level);
1429 
1430 	if (datapath_polling)
1431 		schedule_work(&dbc->poll_work);
1432 
1433 unlock_req_lock:
1434 	if (ret)
1435 		mutex_unlock(&dbc->req_lock);
1436 release_ch_rcu:
1437 	srcu_read_unlock(&dbc->ch_lock, rcu_id);
1438 unlock_dev_srcu:
1439 	srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
1440 unlock_usr_srcu:
1441 	srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1442 	kfree(exec);
1443 	return ret;
1444 }
1445 
qaic_execute_bo_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1446 int qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1447 {
1448 	return __qaic_execute_bo_ioctl(dev, data, file_priv, false);
1449 }
1450 
qaic_partial_execute_bo_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1451 int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1452 {
1453 	return __qaic_execute_bo_ioctl(dev, data, file_priv, true);
1454 }
1455 
1456 /*
1457  * Our interrupt handling is a bit more complicated than a simple ideal, but
1458  * sadly necessary.
1459  *
1460  * Each dbc has a completion queue. Entries in the queue correspond to DMA
1461  * requests which the device has processed. The hardware already has a built
1462  * in irq mitigation. When the device puts an entry into the queue, it will
1463  * only trigger an interrupt if the queue was empty. Therefore, when adding
1464  * the Nth event to a non-empty queue, the hardware doesn't trigger an
1465  * interrupt. This means the host doesn't get additional interrupts signaling
1466  * the same thing - the queue has something to process.
1467  * This behavior can be overridden in the DMA request.
1468  * This means that when the host receives an interrupt, it is required to
1469  * drain the queue.
1470  *
1471  * This behavior is what NAPI attempts to accomplish, although we can't use
1472  * NAPI as we don't have a netdev. We use threaded irqs instead.
1473  *
1474  * However, there is a situation where the host drains the queue fast enough
1475  * that every event causes an interrupt. Typically this is not a problem as
1476  * the rate of events would be low. However, that is not the case with
1477  * lprnet for example. On an Intel Xeon D-2191 where we run 8 instances of
1478  * lprnet, the host receives roughly 80k interrupts per second from the device
1479  * (per /proc/interrupts). While NAPI documentation indicates the host should
1480  * just chug along, sadly that behavior causes instability in some hosts.
1481  *
1482  * Therefore, we implement an interrupt disable scheme similar to NAPI. The
1483  * key difference is that we will delay after draining the queue for a small
1484  * time to allow additional events to come in via polling. Using the above
1485  * lprnet workload, this reduces the number of interrupts processed from
1486  * ~80k/sec to about 64 in 5 minutes and appears to solve the system
1487  * instability.
1488  */
dbc_irq_handler(int irq,void * data)1489 irqreturn_t dbc_irq_handler(int irq, void *data)
1490 {
1491 	struct dma_bridge_chan *dbc = data;
1492 	int rcu_id;
1493 	u32 head;
1494 	u32 tail;
1495 
1496 	rcu_id = srcu_read_lock(&dbc->ch_lock);
1497 
1498 	if (datapath_polling) {
1499 		srcu_read_unlock(&dbc->ch_lock, rcu_id);
1500 		/*
1501 		 * Normally datapath_polling will not have irqs enabled, but
1502 		 * when running with only one MSI the interrupt is shared with
1503 		 * MHI so it cannot be disabled. Return ASAP instead.
1504 		 */
1505 		return IRQ_HANDLED;
1506 	}
1507 
1508 	if (!dbc->usr) {
1509 		srcu_read_unlock(&dbc->ch_lock, rcu_id);
1510 		return IRQ_HANDLED;
1511 	}
1512 
1513 	head = readl(dbc->dbc_base + RSPHP_OFF);
1514 	if (head == U32_MAX) { /* PCI link error */
1515 		srcu_read_unlock(&dbc->ch_lock, rcu_id);
1516 		return IRQ_NONE;
1517 	}
1518 
1519 	tail = readl(dbc->dbc_base + RSPTP_OFF);
1520 	if (tail == U32_MAX) { /* PCI link error */
1521 		srcu_read_unlock(&dbc->ch_lock, rcu_id);
1522 		return IRQ_NONE;
1523 	}
1524 
1525 	if (head == tail) { /* queue empty */
1526 		srcu_read_unlock(&dbc->ch_lock, rcu_id);
1527 		return IRQ_NONE;
1528 	}
1529 
1530 	if (!dbc->qdev->single_msi)
1531 		disable_irq_nosync(irq);
1532 	srcu_read_unlock(&dbc->ch_lock, rcu_id);
1533 	return IRQ_WAKE_THREAD;
1534 }
1535 
qaic_irq_polling_work(struct work_struct * work)1536 void qaic_irq_polling_work(struct work_struct *work)
1537 {
1538 	struct dma_bridge_chan *dbc = container_of(work, struct dma_bridge_chan,  poll_work);
1539 	unsigned long flags;
1540 	int rcu_id;
1541 	u32 head;
1542 	u32 tail;
1543 
1544 	rcu_id = srcu_read_lock(&dbc->ch_lock);
1545 
1546 	while (1) {
1547 		if (dbc->qdev->dev_state != QAIC_ONLINE) {
1548 			srcu_read_unlock(&dbc->ch_lock, rcu_id);
1549 			return;
1550 		}
1551 		if (!dbc->usr) {
1552 			srcu_read_unlock(&dbc->ch_lock, rcu_id);
1553 			return;
1554 		}
1555 		spin_lock_irqsave(&dbc->xfer_lock, flags);
1556 		if (list_empty(&dbc->xfer_list)) {
1557 			spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1558 			srcu_read_unlock(&dbc->ch_lock, rcu_id);
1559 			return;
1560 		}
1561 		spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1562 
1563 		head = readl(dbc->dbc_base + RSPHP_OFF);
1564 		if (head == U32_MAX) { /* PCI link error */
1565 			srcu_read_unlock(&dbc->ch_lock, rcu_id);
1566 			return;
1567 		}
1568 
1569 		tail = readl(dbc->dbc_base + RSPTP_OFF);
1570 		if (tail == U32_MAX) { /* PCI link error */
1571 			srcu_read_unlock(&dbc->ch_lock, rcu_id);
1572 			return;
1573 		}
1574 
1575 		if (head != tail) {
1576 			irq_wake_thread(dbc->irq, dbc);
1577 			srcu_read_unlock(&dbc->ch_lock, rcu_id);
1578 			return;
1579 		}
1580 
1581 		cond_resched();
1582 		usleep_range(datapath_poll_interval_us, 2 * datapath_poll_interval_us);
1583 	}
1584 }
1585 
dbc_irq_threaded_fn(int irq,void * data)1586 irqreturn_t dbc_irq_threaded_fn(int irq, void *data)
1587 {
1588 	struct dma_bridge_chan *dbc = data;
1589 	int event_count = NUM_EVENTS;
1590 	int delay_count = NUM_DELAYS;
1591 	struct qaic_device *qdev;
1592 	struct qaic_bo *bo, *i;
1593 	struct dbc_rsp *rsp;
1594 	unsigned long flags;
1595 	int rcu_id;
1596 	u16 status;
1597 	u16 req_id;
1598 	u32 head;
1599 	u32 tail;
1600 
1601 	rcu_id = srcu_read_lock(&dbc->ch_lock);
1602 	qdev = dbc->qdev;
1603 
1604 	head = readl(dbc->dbc_base + RSPHP_OFF);
1605 	if (head == U32_MAX) /* PCI link error */
1606 		goto error_out;
1607 
1608 read_fifo:
1609 
1610 	if (!event_count) {
1611 		event_count = NUM_EVENTS;
1612 		cond_resched();
1613 	}
1614 
1615 	/*
1616 	 * if this channel isn't assigned or gets unassigned during processing
1617 	 * we have nothing further to do
1618 	 */
1619 	if (!dbc->usr)
1620 		goto error_out;
1621 
1622 	tail = readl(dbc->dbc_base + RSPTP_OFF);
1623 	if (tail == U32_MAX) /* PCI link error */
1624 		goto error_out;
1625 
1626 	if (head == tail) { /* queue empty */
1627 		if (delay_count) {
1628 			--delay_count;
1629 			usleep_range(100, 200);
1630 			goto read_fifo; /* check for a new event */
1631 		}
1632 		goto normal_out;
1633 	}
1634 
1635 	delay_count = NUM_DELAYS;
1636 	while (head != tail) {
1637 		if (!event_count)
1638 			break;
1639 		--event_count;
1640 		rsp = dbc->rsp_q_base + head * sizeof(*rsp);
1641 		req_id = le16_to_cpu(rsp->req_id);
1642 		status = le16_to_cpu(rsp->status);
1643 		if (status)
1644 			pci_dbg(qdev->pdev, "req_id %d failed with status %d\n", req_id, status);
1645 		spin_lock_irqsave(&dbc->xfer_lock, flags);
1646 		/*
1647 		 * A BO can receive multiple interrupts, since a BO can be
1648 		 * divided into multiple slices and a buffer receives as many
1649 		 * interrupts as slices. So until it receives interrupts for
1650 		 * all the slices we cannot mark that buffer complete.
1651 		 */
1652 		list_for_each_entry_safe(bo, i, &dbc->xfer_list, xfer_list) {
1653 			if (bo->req_id == req_id)
1654 				bo->nr_slice_xfer_done++;
1655 			else
1656 				continue;
1657 
1658 			if (bo->nr_slice_xfer_done < bo->nr_slice)
1659 				break;
1660 
1661 			/*
1662 			 * At this point we have received all the interrupts for
1663 			 * BO, which means BO execution is complete.
1664 			 */
1665 			dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
1666 			bo->nr_slice_xfer_done = 0;
1667 			list_del_init(&bo->xfer_list);
1668 			bo->perf_stats.req_processed_ts = ktime_get_ns();
1669 			complete_all(&bo->xfer_done);
1670 			drm_gem_object_put(&bo->base);
1671 			break;
1672 		}
1673 		spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1674 		head = (head + 1) % dbc->nelem;
1675 	}
1676 
1677 	/*
1678 	 * Update the head pointer of response queue and let the device know
1679 	 * that we have consumed elements from the queue.
1680 	 */
1681 	writel(head, dbc->dbc_base + RSPHP_OFF);
1682 
1683 	/* elements might have been put in the queue while we were processing */
1684 	goto read_fifo;
1685 
1686 normal_out:
1687 	if (!qdev->single_msi && likely(!datapath_polling))
1688 		enable_irq(irq);
1689 	else if (unlikely(datapath_polling))
1690 		schedule_work(&dbc->poll_work);
1691 	/* checking the fifo and enabling irqs is a race, missed event check */
1692 	tail = readl(dbc->dbc_base + RSPTP_OFF);
1693 	if (tail != U32_MAX && head != tail) {
1694 		if (!qdev->single_msi && likely(!datapath_polling))
1695 			disable_irq_nosync(irq);
1696 		goto read_fifo;
1697 	}
1698 	srcu_read_unlock(&dbc->ch_lock, rcu_id);
1699 	return IRQ_HANDLED;
1700 
1701 error_out:
1702 	srcu_read_unlock(&dbc->ch_lock, rcu_id);
1703 	if (!qdev->single_msi && likely(!datapath_polling))
1704 		enable_irq(irq);
1705 	else if (unlikely(datapath_polling))
1706 		schedule_work(&dbc->poll_work);
1707 
1708 	return IRQ_HANDLED;
1709 }
1710 
qaic_wait_bo_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1711 int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1712 {
1713 	struct qaic_wait *args = data;
1714 	int usr_rcu_id, qdev_rcu_id;
1715 	struct dma_bridge_chan *dbc;
1716 	struct drm_gem_object *obj;
1717 	struct qaic_device *qdev;
1718 	unsigned long timeout;
1719 	struct qaic_user *usr;
1720 	struct qaic_bo *bo;
1721 	int rcu_id;
1722 	int ret;
1723 
1724 	if (args->pad != 0)
1725 		return -EINVAL;
1726 
1727 	usr = file_priv->driver_priv;
1728 	usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
1729 	if (!usr->qddev) {
1730 		ret = -ENODEV;
1731 		goto unlock_usr_srcu;
1732 	}
1733 
1734 	qdev = usr->qddev->qdev;
1735 	qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
1736 	if (qdev->dev_state != QAIC_ONLINE) {
1737 		ret = -ENODEV;
1738 		goto unlock_dev_srcu;
1739 	}
1740 
1741 	if (args->dbc_id >= qdev->num_dbc) {
1742 		ret = -EINVAL;
1743 		goto unlock_dev_srcu;
1744 	}
1745 
1746 	dbc = &qdev->dbc[args->dbc_id];
1747 
1748 	rcu_id = srcu_read_lock(&dbc->ch_lock);
1749 	if (dbc->usr != usr) {
1750 		ret = -EPERM;
1751 		goto unlock_ch_srcu;
1752 	}
1753 
1754 	if (dbc->id == qdev->ssr_dbc) {
1755 		ret = -EPIPE;
1756 		goto unlock_ch_srcu;
1757 	}
1758 
1759 	obj = drm_gem_object_lookup(file_priv, args->handle);
1760 	if (!obj) {
1761 		ret = -ENOENT;
1762 		goto unlock_ch_srcu;
1763 	}
1764 
1765 	bo = to_qaic_bo(obj);
1766 	timeout = args->timeout ? args->timeout : wait_exec_default_timeout_ms;
1767 	timeout = msecs_to_jiffies(timeout);
1768 	ret = wait_for_completion_interruptible_timeout(&bo->xfer_done, timeout);
1769 	if (!ret) {
1770 		ret = -ETIMEDOUT;
1771 		goto put_obj;
1772 	}
1773 	if (ret > 0)
1774 		ret = 0;
1775 
1776 	if (!dbc->usr)
1777 		ret = -EPERM;
1778 
1779 	if (dbc->id == qdev->ssr_dbc)
1780 		ret = -EPIPE;
1781 
1782 put_obj:
1783 	drm_gem_object_put(obj);
1784 unlock_ch_srcu:
1785 	srcu_read_unlock(&dbc->ch_lock, rcu_id);
1786 unlock_dev_srcu:
1787 	srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
1788 unlock_usr_srcu:
1789 	srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1790 	return ret;
1791 }
1792 
qaic_perf_stats_bo_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1793 int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1794 {
1795 	struct qaic_perf_stats_entry *ent = NULL;
1796 	struct qaic_perf_stats *args = data;
1797 	int usr_rcu_id, qdev_rcu_id;
1798 	struct drm_gem_object *obj;
1799 	struct qaic_device *qdev;
1800 	struct qaic_user *usr;
1801 	struct qaic_bo *bo;
1802 	int ret = 0;
1803 	int i;
1804 
1805 	usr = file_priv->driver_priv;
1806 	usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
1807 	if (!usr->qddev) {
1808 		ret = -ENODEV;
1809 		goto unlock_usr_srcu;
1810 	}
1811 
1812 	qdev = usr->qddev->qdev;
1813 	qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
1814 	if (qdev->dev_state != QAIC_ONLINE) {
1815 		ret = -ENODEV;
1816 		goto unlock_dev_srcu;
1817 	}
1818 
1819 	if (args->hdr.dbc_id >= qdev->num_dbc) {
1820 		ret = -EINVAL;
1821 		goto unlock_dev_srcu;
1822 	}
1823 
1824 	ent = memdup_array_user(u64_to_user_ptr(args->data), args->hdr.count, sizeof(*ent));
1825 	if (IS_ERR(ent)) {
1826 		ret = PTR_ERR(ent);
1827 		goto unlock_dev_srcu;
1828 	}
1829 
1830 	for (i = 0; i < args->hdr.count; i++) {
1831 		obj = drm_gem_object_lookup(file_priv, ent[i].handle);
1832 		if (!obj) {
1833 			ret = -ENOENT;
1834 			goto free_ent;
1835 		}
1836 		bo = to_qaic_bo(obj);
1837 		if (!bo->sliced) {
1838 			drm_gem_object_put(obj);
1839 			ret = -EINVAL;
1840 			goto free_ent;
1841 		}
1842 		if (bo->dbc->id != args->hdr.dbc_id) {
1843 			drm_gem_object_put(obj);
1844 			ret = -EINVAL;
1845 			goto free_ent;
1846 		}
1847 		/*
1848 		 * perf stats ioctl is called before wait ioctl is complete then
1849 		 * the latency information is invalid.
1850 		 */
1851 		if (bo->perf_stats.req_processed_ts < bo->perf_stats.req_submit_ts) {
1852 			ent[i].device_latency_us = 0;
1853 		} else {
1854 			ent[i].device_latency_us = div_u64((bo->perf_stats.req_processed_ts -
1855 							    bo->perf_stats.req_submit_ts), 1000);
1856 		}
1857 		ent[i].submit_latency_us = div_u64((bo->perf_stats.req_submit_ts -
1858 						    bo->perf_stats.req_received_ts), 1000);
1859 		ent[i].queue_level_before = bo->perf_stats.queue_level_before;
1860 		ent[i].num_queue_element = bo->total_slice_nents;
1861 		drm_gem_object_put(obj);
1862 	}
1863 
1864 	if (copy_to_user(u64_to_user_ptr(args->data), ent, args->hdr.count * sizeof(*ent)))
1865 		ret = -EFAULT;
1866 
1867 free_ent:
1868 	kfree(ent);
1869 unlock_dev_srcu:
1870 	srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
1871 unlock_usr_srcu:
1872 	srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1873 	return ret;
1874 }
1875 
detach_slice_bo(struct qaic_device * qdev,struct qaic_bo * bo)1876 static void detach_slice_bo(struct qaic_device *qdev, struct qaic_bo *bo)
1877 {
1878 	qaic_free_slices_bo(bo);
1879 	qaic_unprepare_bo(qdev, bo);
1880 	qaic_init_bo(bo, true);
1881 	list_del(&bo->bo_list);
1882 	drm_gem_object_put(&bo->base);
1883 }
1884 
qaic_detach_slice_bo_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1885 int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1886 {
1887 	struct qaic_detach_slice *args = data;
1888 	int rcu_id, usr_rcu_id, qdev_rcu_id;
1889 	struct dma_bridge_chan *dbc;
1890 	struct drm_gem_object *obj;
1891 	struct qaic_device *qdev;
1892 	struct qaic_user *usr;
1893 	unsigned long flags;
1894 	struct qaic_bo *bo;
1895 	int ret;
1896 
1897 	if (args->pad != 0)
1898 		return -EINVAL;
1899 
1900 	usr = file_priv->driver_priv;
1901 	usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
1902 	if (!usr->qddev) {
1903 		ret = -ENODEV;
1904 		goto unlock_usr_srcu;
1905 	}
1906 
1907 	qdev = usr->qddev->qdev;
1908 	qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
1909 	if (qdev->dev_state != QAIC_ONLINE) {
1910 		ret = -ENODEV;
1911 		goto unlock_dev_srcu;
1912 	}
1913 
1914 	obj = drm_gem_object_lookup(file_priv, args->handle);
1915 	if (!obj) {
1916 		ret = -ENOENT;
1917 		goto unlock_dev_srcu;
1918 	}
1919 
1920 	bo = to_qaic_bo(obj);
1921 	ret = mutex_lock_interruptible(&bo->lock);
1922 	if (ret)
1923 		goto put_bo;
1924 
1925 	if (!bo->sliced) {
1926 		ret = -EINVAL;
1927 		goto unlock_bo;
1928 	}
1929 
1930 	dbc = bo->dbc;
1931 	rcu_id = srcu_read_lock(&dbc->ch_lock);
1932 	if (dbc->usr != usr) {
1933 		ret = -EINVAL;
1934 		goto unlock_ch_srcu;
1935 	}
1936 
1937 	/* Check if BO is committed to H/W for DMA */
1938 	spin_lock_irqsave(&dbc->xfer_lock, flags);
1939 	if (bo_queued(bo)) {
1940 		spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1941 		ret = -EBUSY;
1942 		goto unlock_ch_srcu;
1943 	}
1944 	spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1945 
1946 	detach_slice_bo(qdev, bo);
1947 
1948 unlock_ch_srcu:
1949 	srcu_read_unlock(&dbc->ch_lock, rcu_id);
1950 unlock_bo:
1951 	mutex_unlock(&bo->lock);
1952 put_bo:
1953 	drm_gem_object_put(obj);
1954 unlock_dev_srcu:
1955 	srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
1956 unlock_usr_srcu:
1957 	srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1958 	return ret;
1959 }
1960 
empty_xfer_list(struct qaic_device * qdev,struct dma_bridge_chan * dbc)1961 static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *dbc)
1962 {
1963 	unsigned long flags;
1964 	struct qaic_bo *bo;
1965 
1966 	spin_lock_irqsave(&dbc->xfer_lock, flags);
1967 	while (!list_empty(&dbc->xfer_list)) {
1968 		bo = list_first_entry(&dbc->xfer_list, typeof(*bo), xfer_list);
1969 		list_del_init(&bo->xfer_list);
1970 		spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1971 		bo->nr_slice_xfer_done = 0;
1972 		bo->req_id = 0;
1973 		bo->perf_stats.req_received_ts = 0;
1974 		bo->perf_stats.req_submit_ts = 0;
1975 		bo->perf_stats.req_processed_ts = 0;
1976 		bo->perf_stats.queue_level_before = 0;
1977 		dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
1978 		complete_all(&bo->xfer_done);
1979 		drm_gem_object_put(&bo->base);
1980 		spin_lock_irqsave(&dbc->xfer_lock, flags);
1981 	}
1982 	spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1983 }
1984 
sync_empty_xfer_list(struct qaic_device * qdev,struct dma_bridge_chan * dbc)1985 static void sync_empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *dbc)
1986 {
1987 	empty_xfer_list(qdev, dbc);
1988 	synchronize_srcu(&dbc->ch_lock);
1989 	/*
1990 	 * Threads holding channel lock, may add more elements in the xfer_list.
1991 	 * Flush out these elements from xfer_list.
1992 	 */
1993 	empty_xfer_list(qdev, dbc);
1994 }
1995 
disable_dbc(struct qaic_device * qdev,u32 dbc_id,struct qaic_user * usr)1996 int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr)
1997 {
1998 	if (!qdev->dbc[dbc_id].usr || qdev->dbc[dbc_id].usr->handle != usr->handle)
1999 		return -EPERM;
2000 
2001 	qdev->dbc[dbc_id].usr = NULL;
2002 	synchronize_srcu(&qdev->dbc[dbc_id].ch_lock);
2003 	return 0;
2004 }
2005 
2006 /**
2007  * enable_dbc - Enable the DBC. DBCs are disabled by removing the context of
2008  * user. Add user context back to DBC to enable it. This function trusts the
2009  * DBC ID passed and expects the DBC to be disabled.
2010  * @qdev: qaic device handle
2011  * @dbc_id: ID of the DBC
2012  * @usr: User context
2013  */
enable_dbc(struct qaic_device * qdev,u32 dbc_id,struct qaic_user * usr)2014 void enable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr)
2015 {
2016 	qdev->dbc[dbc_id].usr = usr;
2017 }
2018 
wakeup_dbc(struct qaic_device * qdev,u32 dbc_id)2019 void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id)
2020 {
2021 	struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id];
2022 
2023 	dbc->usr = NULL;
2024 	sync_empty_xfer_list(qdev, dbc);
2025 }
2026 
release_dbc(struct qaic_device * qdev,u32 dbc_id)2027 void release_dbc(struct qaic_device *qdev, u32 dbc_id)
2028 {
2029 	struct qaic_bo *bo, *bo_temp;
2030 	struct dma_bridge_chan *dbc;
2031 
2032 	dbc = &qdev->dbc[dbc_id];
2033 	if (!dbc->in_use)
2034 		return;
2035 
2036 	wakeup_dbc(qdev, dbc_id);
2037 
2038 	dma_free_coherent(&qdev->pdev->dev, dbc->total_size, dbc->req_q_base, dbc->dma_addr);
2039 	dbc->total_size = 0;
2040 	dbc->req_q_base = NULL;
2041 	dbc->dma_addr = 0;
2042 	dbc->nelem = 0;
2043 	dbc->usr = NULL;
2044 
2045 	list_for_each_entry_safe(bo, bo_temp, &dbc->bo_lists, bo_list) {
2046 		drm_gem_object_get(&bo->base);
2047 		mutex_lock(&bo->lock);
2048 		detach_slice_bo(qdev, bo);
2049 		mutex_unlock(&bo->lock);
2050 		drm_gem_object_put(&bo->base);
2051 	}
2052 
2053 	dbc->in_use = false;
2054 	wake_up(&dbc->dbc_release);
2055 }
2056 
qaic_data_get_fifo_info(struct dma_bridge_chan * dbc,u32 * head,u32 * tail)2057 void qaic_data_get_fifo_info(struct dma_bridge_chan *dbc, u32 *head, u32 *tail)
2058 {
2059 	if (!dbc || !head || !tail)
2060 		return;
2061 
2062 	*head = readl(dbc->dbc_base + REQHP_OFF);
2063 	*tail = readl(dbc->dbc_base + REQTP_OFF);
2064 }
2065 
2066 /*
2067  * qaic_dbc_enter_ssr - Prepare to enter in sub system reset(SSR) for given DBC ID.
2068  * @qdev: qaic device handle
2069  * @dbc_id: ID of the DBC which will enter SSR
2070  *
2071  * The device will automatically deactivate the workload as not
2072  * all errors can be silently recovered. The user will be
2073  * notified and will need to decide the required recovery
2074  * action to take.
2075  */
qaic_dbc_enter_ssr(struct qaic_device * qdev,u32 dbc_id)2076 void qaic_dbc_enter_ssr(struct qaic_device *qdev, u32 dbc_id)
2077 {
2078 	qdev->ssr_dbc = dbc_id;
2079 	release_dbc(qdev, dbc_id);
2080 }
2081 
2082 /*
2083  * qaic_dbc_exit_ssr - Prepare to exit from sub system reset(SSR) for given DBC ID.
2084  * @qdev: qaic device handle
2085  *
2086  * The DBC returns to an operational state and begins accepting work after exiting SSR.
2087  */
qaic_dbc_exit_ssr(struct qaic_device * qdev)2088 void qaic_dbc_exit_ssr(struct qaic_device *qdev)
2089 {
2090 	qdev->ssr_dbc = QAIC_SSR_DBC_SENTINEL;
2091 }
2092