xref: /linux/drivers/accel/qaic/qaic_control.c (revision f6e8dc9edf963dbc99085e54f6ced6da9daa6100)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 /* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */
4 /* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
5 
6 #include <asm/byteorder.h>
7 #include <linux/completion.h>
8 #include <linux/crc32.h>
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/kref.h>
12 #include <linux/list.h>
13 #include <linux/mhi.h>
14 #include <linux/mm.h>
15 #include <linux/moduleparam.h>
16 #include <linux/mutex.h>
17 #include <linux/overflow.h>
18 #include <linux/pci.h>
19 #include <linux/scatterlist.h>
20 #include <linux/sched/signal.h>
21 #include <linux/types.h>
22 #include <linux/uaccess.h>
23 #include <linux/workqueue.h>
24 #include <linux/wait.h>
25 #include <drm/drm_device.h>
26 #include <drm/drm_file.h>
27 #include <uapi/drm/qaic_accel.h>
28 
29 #include "qaic.h"
30 
31 #define MANAGE_MAGIC_NUMBER		((__force __le32)0x43494151) /* "QAIC" in little endian */
32 #define QAIC_DBC_Q_GAP			SZ_256
33 #define QAIC_DBC_Q_BUF_ALIGN		SZ_4K
34 #define QAIC_MANAGE_WIRE_MSG_LENGTH	SZ_64K /* Max DMA message length */
35 #define QAIC_WRAPPER_MAX_SIZE		SZ_4K
36 #define QAIC_MHI_RETRY_WAIT_MS		100
37 #define QAIC_MHI_RETRY_MAX		20
38 
39 static unsigned int control_resp_timeout_s = 60; /* 60 sec default */
40 module_param(control_resp_timeout_s, uint, 0600);
41 MODULE_PARM_DESC(control_resp_timeout_s, "Timeout for NNC responses from QSM");
42 
43 struct manage_msg {
44 	u32 len;
45 	u32 count;
46 	u8 data[];
47 };
48 
49 /*
50  * wire encoding structures for the manage protocol.
51  * All fields are little endian on the wire
52  */
53 struct wire_msg_hdr {
54 	__le32 crc32; /* crc of everything following this field in the message */
55 	__le32 magic_number;
56 	__le32 sequence_number;
57 	__le32 len; /* length of this message */
58 	__le32 count; /* number of transactions in this message */
59 	__le32 handle; /* unique id to track the resources consumed */
60 	__le32 partition_id; /* partition id for the request (signed) */
61 	__le32 padding; /* must be 0 */
62 } __packed;
63 
64 struct wire_msg {
65 	struct wire_msg_hdr hdr;
66 	u8 data[];
67 } __packed;
68 
69 struct wire_trans_hdr {
70 	__le32 type;
71 	__le32 len;
72 } __packed;
73 
74 /* Each message sent from driver to device are organized in a list of wrapper_msg */
75 struct wrapper_msg {
76 	struct list_head list;
77 	struct kref ref_count;
78 	u32 len; /* length of data to transfer */
79 	struct wrapper_list *head;
80 	union {
81 		struct wire_msg msg;
82 		struct wire_trans_hdr trans;
83 	};
84 };
85 
86 struct wrapper_list {
87 	struct list_head list;
88 	spinlock_t lock; /* Protects the list state during additions and removals */
89 };
90 
91 struct wire_trans_passthrough {
92 	struct wire_trans_hdr hdr;
93 	u8 data[];
94 } __packed;
95 
96 struct wire_addr_size_pair {
97 	__le64 addr;
98 	__le64 size;
99 } __packed;
100 
101 struct wire_trans_dma_xfer {
102 	struct wire_trans_hdr hdr;
103 	__le32 tag;
104 	__le32 count;
105 	__le32 dma_chunk_id;
106 	__le32 padding;
107 	struct wire_addr_size_pair data[];
108 } __packed;
109 
110 /* Initiated by device to continue the DMA xfer of a large piece of data */
111 struct wire_trans_dma_xfer_cont {
112 	struct wire_trans_hdr hdr;
113 	__le32 dma_chunk_id;
114 	__le32 padding;
115 	__le64 xferred_size;
116 } __packed;
117 
118 struct wire_trans_activate_to_dev {
119 	struct wire_trans_hdr hdr;
120 	__le64 req_q_addr;
121 	__le64 rsp_q_addr;
122 	__le32 req_q_size;
123 	__le32 rsp_q_size;
124 	__le32 buf_len;
125 	__le32 options; /* unused, but BIT(16) has meaning to the device */
126 } __packed;
127 
128 struct wire_trans_activate_from_dev {
129 	struct wire_trans_hdr hdr;
130 	__le32 status;
131 	__le32 dbc_id;
132 	__le64 options; /* unused */
133 } __packed;
134 
135 struct wire_trans_deactivate_from_dev {
136 	struct wire_trans_hdr hdr;
137 	__le32 status;
138 	__le32 dbc_id;
139 } __packed;
140 
141 struct wire_trans_terminate_to_dev {
142 	struct wire_trans_hdr hdr;
143 	__le32 handle;
144 	__le32 padding;
145 } __packed;
146 
147 struct wire_trans_terminate_from_dev {
148 	struct wire_trans_hdr hdr;
149 	__le32 status;
150 	__le32 padding;
151 } __packed;
152 
153 struct wire_trans_status_to_dev {
154 	struct wire_trans_hdr hdr;
155 } __packed;
156 
157 struct wire_trans_status_from_dev {
158 	struct wire_trans_hdr hdr;
159 	__le16 major;
160 	__le16 minor;
161 	__le32 status;
162 	__le64 status_flags;
163 } __packed;
164 
165 struct wire_trans_validate_part_to_dev {
166 	struct wire_trans_hdr hdr;
167 	__le32 part_id;
168 	__le32 padding;
169 } __packed;
170 
171 struct wire_trans_validate_part_from_dev {
172 	struct wire_trans_hdr hdr;
173 	__le32 status;
174 	__le32 padding;
175 } __packed;
176 
177 struct xfer_queue_elem {
178 	/*
179 	 * Node in list of ongoing transfer request on control channel.
180 	 * Maintained by root device struct.
181 	 */
182 	struct list_head list;
183 	/* Sequence number of this transfer request */
184 	u32 seq_num;
185 	/* This is used to wait on until completion of transfer request */
186 	struct completion xfer_done;
187 	/* Received data from device */
188 	void *buf;
189 };
190 
191 struct dma_xfer {
192 	/* Node in list of DMA transfers which is used for cleanup */
193 	struct list_head list;
194 	/* SG table of memory used for DMA */
195 	struct sg_table *sgt;
196 	/* Array pages used for DMA */
197 	struct page **page_list;
198 	/* Number of pages used for DMA */
199 	unsigned long nr_pages;
200 };
201 
202 struct ioctl_resources {
203 	/* List of all DMA transfers which is used later for cleanup */
204 	struct list_head dma_xfers;
205 	/* Base address of request queue which belongs to a DBC */
206 	void *buf;
207 	/*
208 	 * Base bus address of request queue which belongs to a DBC. Response
209 	 * queue base bus address can be calculated by adding size of request
210 	 * queue to base bus address of request queue.
211 	 */
212 	dma_addr_t dma_addr;
213 	/* Total size of request queue and response queue in byte */
214 	u32 total_size;
215 	/* Total number of elements that can be queued in each of request and response queue */
216 	u32 nelem;
217 	/* Base address of response queue which belongs to a DBC */
218 	void *rsp_q_base;
219 	/* Status of the NNC message received */
220 	u32 status;
221 	/* DBC id of the DBC received from device */
222 	u32 dbc_id;
223 	/*
224 	 * DMA transfer request messages can be big in size and it may not be
225 	 * possible to send them in one shot. In such cases the messages are
226 	 * broken into chunks, this field stores ID of such chunks.
227 	 */
228 	u32 dma_chunk_id;
229 	/* Total number of bytes transferred for a DMA xfer request */
230 	u64 xferred_dma_size;
231 	/* Header of transaction message received from user. Used during DMA xfer request. */
232 	void *trans_hdr;
233 };
234 
235 struct resp_work {
236 	struct work_struct work;
237 	struct qaic_device *qdev;
238 	void *buf;
239 };
240 
241 /*
242  * Since we're working with little endian messages, its useful to be able to
243  * increment without filling a whole line with conversions back and forth just
244  * to add one(1) to a message count.
245  */
246 static __le32 incr_le32(__le32 val)
247 {
248 	return cpu_to_le32(le32_to_cpu(val) + 1);
249 }
250 
251 static u32 gen_crc(void *msg)
252 {
253 	struct wrapper_list *wrappers = msg;
254 	struct wrapper_msg *w;
255 	u32 crc = ~0;
256 
257 	list_for_each_entry(w, &wrappers->list, list)
258 		crc = crc32(crc, &w->msg, w->len);
259 
260 	return crc ^ ~0;
261 }
262 
263 static u32 gen_crc_stub(void *msg)
264 {
265 	return 0;
266 }
267 
268 static bool valid_crc(void *msg)
269 {
270 	struct wire_msg_hdr *hdr = msg;
271 	bool ret;
272 	u32 crc;
273 
274 	/*
275 	 * The output of this algorithm is always converted to the native
276 	 * endianness.
277 	 */
278 	crc = le32_to_cpu(hdr->crc32);
279 	hdr->crc32 = 0;
280 	ret = (crc32(~0, msg, le32_to_cpu(hdr->len)) ^ ~0) == crc;
281 	hdr->crc32 = cpu_to_le32(crc);
282 	return ret;
283 }
284 
285 static bool valid_crc_stub(void *msg)
286 {
287 	return true;
288 }
289 
290 static void free_wrapper(struct kref *ref)
291 {
292 	struct wrapper_msg *wrapper = container_of(ref, struct wrapper_msg, ref_count);
293 
294 	list_del(&wrapper->list);
295 	kfree(wrapper);
296 }
297 
298 static void save_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resources,
299 			 struct qaic_user *usr)
300 {
301 	u32 dbc_id = resources->dbc_id;
302 
303 	if (resources->buf) {
304 		wait_event_interruptible(qdev->dbc[dbc_id].dbc_release, !qdev->dbc[dbc_id].in_use);
305 		qdev->dbc[dbc_id].req_q_base = resources->buf;
306 		qdev->dbc[dbc_id].rsp_q_base = resources->rsp_q_base;
307 		qdev->dbc[dbc_id].dma_addr = resources->dma_addr;
308 		qdev->dbc[dbc_id].total_size = resources->total_size;
309 		qdev->dbc[dbc_id].nelem = resources->nelem;
310 		enable_dbc(qdev, dbc_id, usr);
311 		qdev->dbc[dbc_id].in_use = true;
312 		resources->buf = NULL;
313 	}
314 }
315 
316 static void free_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resources)
317 {
318 	if (resources->buf)
319 		dma_free_coherent(&qdev->pdev->dev, resources->total_size, resources->buf,
320 				  resources->dma_addr);
321 	resources->buf = NULL;
322 }
323 
324 static void free_dma_xfers(struct qaic_device *qdev, struct ioctl_resources *resources)
325 {
326 	struct dma_xfer *xfer;
327 	struct dma_xfer *x;
328 	int i;
329 
330 	list_for_each_entry_safe(xfer, x, &resources->dma_xfers, list) {
331 		dma_unmap_sgtable(&qdev->pdev->dev, xfer->sgt, DMA_TO_DEVICE, 0);
332 		sg_free_table(xfer->sgt);
333 		kfree(xfer->sgt);
334 		for (i = 0; i < xfer->nr_pages; ++i)
335 			put_page(xfer->page_list[i]);
336 		kfree(xfer->page_list);
337 		list_del(&xfer->list);
338 		kfree(xfer);
339 	}
340 }
341 
342 static struct wrapper_msg *add_wrapper(struct wrapper_list *wrappers, u32 size)
343 {
344 	struct wrapper_msg *w = kzalloc(size, GFP_KERNEL);
345 
346 	if (!w)
347 		return NULL;
348 	list_add_tail(&w->list, &wrappers->list);
349 	kref_init(&w->ref_count);
350 	w->head = wrappers;
351 	return w;
352 }
353 
354 static int encode_passthrough(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers,
355 			      u32 *user_len)
356 {
357 	struct qaic_manage_trans_passthrough *in_trans = trans;
358 	struct wire_trans_passthrough *out_trans;
359 	struct wrapper_msg *trans_wrapper;
360 	struct wrapper_msg *wrapper;
361 	struct wire_msg *msg;
362 	u32 msg_hdr_len;
363 
364 	wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
365 	msg = &wrapper->msg;
366 	msg_hdr_len = le32_to_cpu(msg->hdr.len);
367 
368 	if (in_trans->hdr.len % 8 != 0)
369 		return -EINVAL;
370 
371 	if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_WIRE_MSG_LENGTH)
372 		return -ENOSPC;
373 
374 	trans_wrapper = add_wrapper(wrappers,
375 				    offsetof(struct wrapper_msg, trans) + in_trans->hdr.len);
376 	if (!trans_wrapper)
377 		return -ENOMEM;
378 	trans_wrapper->len = in_trans->hdr.len;
379 	out_trans = (struct wire_trans_passthrough *)&trans_wrapper->trans;
380 
381 	memcpy(out_trans->data, in_trans->data, in_trans->hdr.len - sizeof(in_trans->hdr));
382 	msg->hdr.len = cpu_to_le32(msg_hdr_len + in_trans->hdr.len);
383 	msg->hdr.count = incr_le32(msg->hdr.count);
384 	*user_len += in_trans->hdr.len;
385 	out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_PASSTHROUGH_TO_DEV);
386 	out_trans->hdr.len = cpu_to_le32(in_trans->hdr.len);
387 
388 	return 0;
389 }
390 
391 /* returns error code for failure, 0 if enough pages alloc'd, 1 if dma_cont is needed */
392 static int find_and_map_user_pages(struct qaic_device *qdev,
393 				   struct qaic_manage_trans_dma_xfer *in_trans,
394 				   struct ioctl_resources *resources, struct dma_xfer *xfer)
395 {
396 	u64 xfer_start_addr, remaining, end, total;
397 	unsigned long need_pages;
398 	struct page **page_list;
399 	unsigned long nr_pages;
400 	struct sg_table *sgt;
401 	int ret;
402 	int i;
403 
404 	if (check_add_overflow(in_trans->addr, resources->xferred_dma_size, &xfer_start_addr))
405 		return -EINVAL;
406 
407 	if (in_trans->size < resources->xferred_dma_size)
408 		return -EINVAL;
409 	remaining = in_trans->size - resources->xferred_dma_size;
410 	if (remaining == 0)
411 		return -EINVAL;
412 
413 	if (check_add_overflow(xfer_start_addr, remaining, &end))
414 		return -EINVAL;
415 
416 	total = remaining + offset_in_page(xfer_start_addr);
417 	if (total >= SIZE_MAX)
418 		return -EINVAL;
419 
420 	need_pages = DIV_ROUND_UP(total, PAGE_SIZE);
421 
422 	nr_pages = need_pages;
423 
424 	while (1) {
425 		page_list = kmalloc_array(nr_pages, sizeof(*page_list), GFP_KERNEL | __GFP_NOWARN);
426 		if (!page_list) {
427 			nr_pages = nr_pages / 2;
428 			if (!nr_pages)
429 				return -ENOMEM;
430 		} else {
431 			break;
432 		}
433 	}
434 
435 	ret = get_user_pages_fast(xfer_start_addr, nr_pages, 0, page_list);
436 	if (ret < 0)
437 		goto free_page_list;
438 	if (ret != nr_pages) {
439 		nr_pages = ret;
440 		ret = -EFAULT;
441 		goto put_pages;
442 	}
443 
444 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
445 	if (!sgt) {
446 		ret = -ENOMEM;
447 		goto put_pages;
448 	}
449 
450 	ret = sg_alloc_table_from_pages(sgt, page_list, nr_pages,
451 					offset_in_page(xfer_start_addr),
452 					remaining, GFP_KERNEL);
453 	if (ret) {
454 		ret = -ENOMEM;
455 		goto free_sgt;
456 	}
457 
458 	ret = dma_map_sgtable(&qdev->pdev->dev, sgt, DMA_TO_DEVICE, 0);
459 	if (ret)
460 		goto free_table;
461 
462 	xfer->sgt = sgt;
463 	xfer->page_list = page_list;
464 	xfer->nr_pages = nr_pages;
465 
466 	return need_pages > nr_pages ? 1 : 0;
467 
468 free_table:
469 	sg_free_table(sgt);
470 free_sgt:
471 	kfree(sgt);
472 put_pages:
473 	for (i = 0; i < nr_pages; ++i)
474 		put_page(page_list[i]);
475 free_page_list:
476 	kfree(page_list);
477 	return ret;
478 }
479 
480 /* returns error code for failure, 0 if everything was encoded, 1 if dma_cont is needed */
481 static int encode_addr_size_pairs(struct dma_xfer *xfer, struct wrapper_list *wrappers,
482 				  struct ioctl_resources *resources, u32 msg_hdr_len, u32 *size,
483 				  struct wire_trans_dma_xfer **out_trans)
484 {
485 	struct wrapper_msg *trans_wrapper;
486 	struct sg_table *sgt = xfer->sgt;
487 	struct wire_addr_size_pair *asp;
488 	struct scatterlist *sg;
489 	struct wrapper_msg *w;
490 	unsigned int dma_len;
491 	u64 dma_chunk_len;
492 	void *boundary;
493 	int nents_dma;
494 	int nents;
495 	int i;
496 
497 	nents = sgt->nents;
498 	nents_dma = nents;
499 	*size = QAIC_MANAGE_WIRE_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans);
500 	for_each_sgtable_dma_sg(sgt, sg, i) {
501 		*size -= sizeof(*asp);
502 		/* Save 1K for possible follow-up transactions. */
503 		if (*size < SZ_1K) {
504 			nents_dma = i;
505 			break;
506 		}
507 	}
508 
509 	trans_wrapper = add_wrapper(wrappers, QAIC_WRAPPER_MAX_SIZE);
510 	if (!trans_wrapper)
511 		return -ENOMEM;
512 	*out_trans = (struct wire_trans_dma_xfer *)&trans_wrapper->trans;
513 
514 	asp = (*out_trans)->data;
515 	boundary = (void *)trans_wrapper + QAIC_WRAPPER_MAX_SIZE;
516 	*size = 0;
517 
518 	dma_len = 0;
519 	w = trans_wrapper;
520 	dma_chunk_len = 0;
521 	for_each_sg(sgt->sgl, sg, nents_dma, i) {
522 		asp->size = cpu_to_le64(dma_len);
523 		dma_chunk_len += dma_len;
524 		if (dma_len) {
525 			asp++;
526 			if ((void *)asp + sizeof(*asp) > boundary) {
527 				w->len = (void *)asp - (void *)&w->msg;
528 				*size += w->len;
529 				w = add_wrapper(wrappers, QAIC_WRAPPER_MAX_SIZE);
530 				if (!w)
531 					return -ENOMEM;
532 				boundary = (void *)w + QAIC_WRAPPER_MAX_SIZE;
533 				asp = (struct wire_addr_size_pair *)&w->msg;
534 			}
535 		}
536 		asp->addr = cpu_to_le64(sg_dma_address(sg));
537 		dma_len = sg_dma_len(sg);
538 	}
539 	/* finalize the last segment */
540 	asp->size = cpu_to_le64(dma_len);
541 	w->len = (void *)asp + sizeof(*asp) - (void *)&w->msg;
542 	*size += w->len;
543 	dma_chunk_len += dma_len;
544 	resources->xferred_dma_size += dma_chunk_len;
545 
546 	return nents_dma < nents ? 1 : 0;
547 }
548 
549 static void cleanup_xfer(struct qaic_device *qdev, struct dma_xfer *xfer)
550 {
551 	int i;
552 
553 	dma_unmap_sgtable(&qdev->pdev->dev, xfer->sgt, DMA_TO_DEVICE, 0);
554 	sg_free_table(xfer->sgt);
555 	kfree(xfer->sgt);
556 	for (i = 0; i < xfer->nr_pages; ++i)
557 		put_page(xfer->page_list[i]);
558 	kfree(xfer->page_list);
559 }
560 
561 static int encode_dma(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers,
562 		      u32 *user_len, struct ioctl_resources *resources, struct qaic_user *usr)
563 {
564 	struct qaic_manage_trans_dma_xfer *in_trans = trans;
565 	struct wire_trans_dma_xfer *out_trans;
566 	struct wrapper_msg *wrapper;
567 	struct dma_xfer *xfer;
568 	struct wire_msg *msg;
569 	bool need_cont_dma;
570 	u32 msg_hdr_len;
571 	u32 size;
572 	int ret;
573 
574 	wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
575 	msg = &wrapper->msg;
576 	msg_hdr_len = le32_to_cpu(msg->hdr.len);
577 
578 	/* There should be enough space to hold at least one ASP entry. */
579 	if (size_add(msg_hdr_len, sizeof(*out_trans) + sizeof(struct wire_addr_size_pair)) >
580 	    QAIC_MANAGE_WIRE_MSG_LENGTH)
581 		return -ENOMEM;
582 
583 	xfer = kmalloc(sizeof(*xfer), GFP_KERNEL);
584 	if (!xfer)
585 		return -ENOMEM;
586 
587 	ret = find_and_map_user_pages(qdev, in_trans, resources, xfer);
588 	if (ret < 0)
589 		goto free_xfer;
590 
591 	need_cont_dma = (bool)ret;
592 
593 	ret = encode_addr_size_pairs(xfer, wrappers, resources, msg_hdr_len, &size, &out_trans);
594 	if (ret < 0)
595 		goto cleanup_xfer;
596 
597 	need_cont_dma = need_cont_dma || (bool)ret;
598 
599 	msg->hdr.len = cpu_to_le32(msg_hdr_len + size);
600 	msg->hdr.count = incr_le32(msg->hdr.count);
601 
602 	out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_DMA_XFER_TO_DEV);
603 	out_trans->hdr.len = cpu_to_le32(size);
604 	out_trans->tag = cpu_to_le32(in_trans->tag);
605 	out_trans->count = cpu_to_le32((size - sizeof(*out_trans)) /
606 								sizeof(struct wire_addr_size_pair));
607 
608 	*user_len += in_trans->hdr.len;
609 
610 	if (resources->dma_chunk_id) {
611 		out_trans->dma_chunk_id = cpu_to_le32(resources->dma_chunk_id);
612 	} else if (need_cont_dma) {
613 		while (resources->dma_chunk_id == 0)
614 			resources->dma_chunk_id = atomic_inc_return(&usr->chunk_id);
615 
616 		out_trans->dma_chunk_id = cpu_to_le32(resources->dma_chunk_id);
617 	}
618 	resources->trans_hdr = trans;
619 
620 	list_add(&xfer->list, &resources->dma_xfers);
621 	return 0;
622 
623 cleanup_xfer:
624 	cleanup_xfer(qdev, xfer);
625 free_xfer:
626 	kfree(xfer);
627 	return ret;
628 }
629 
630 static int encode_activate(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers,
631 			   u32 *user_len, struct ioctl_resources *resources)
632 {
633 	struct qaic_manage_trans_activate_to_dev *in_trans = trans;
634 	struct wire_trans_activate_to_dev *out_trans;
635 	struct wrapper_msg *trans_wrapper;
636 	struct wrapper_msg *wrapper;
637 	struct wire_msg *msg;
638 	dma_addr_t dma_addr;
639 	u32 msg_hdr_len;
640 	void *buf;
641 	u32 nelem;
642 	u32 size;
643 	int ret;
644 
645 	wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
646 	msg = &wrapper->msg;
647 	msg_hdr_len = le32_to_cpu(msg->hdr.len);
648 
649 	if (size_add(msg_hdr_len, sizeof(*out_trans)) > QAIC_MANAGE_WIRE_MSG_LENGTH)
650 		return -ENOSPC;
651 
652 	if (!in_trans->queue_size)
653 		return -EINVAL;
654 
655 	if (in_trans->pad)
656 		return -EINVAL;
657 
658 	nelem = in_trans->queue_size;
659 	if (check_mul_overflow((u32)(get_dbc_req_elem_size() + get_dbc_rsp_elem_size()),
660 			       nelem,
661 			       &size))
662 		return -EINVAL;
663 
664 	if (size + QAIC_DBC_Q_GAP + QAIC_DBC_Q_BUF_ALIGN < size)
665 		return -EINVAL;
666 
667 	size = ALIGN((size + QAIC_DBC_Q_GAP), QAIC_DBC_Q_BUF_ALIGN);
668 
669 	buf = dma_alloc_coherent(&qdev->pdev->dev, size, &dma_addr, GFP_KERNEL);
670 	if (!buf)
671 		return -ENOMEM;
672 
673 	trans_wrapper = add_wrapper(wrappers,
674 				    offsetof(struct wrapper_msg, trans) + sizeof(*out_trans));
675 	if (!trans_wrapper) {
676 		ret = -ENOMEM;
677 		goto free_dma;
678 	}
679 	trans_wrapper->len = sizeof(*out_trans);
680 	out_trans = (struct wire_trans_activate_to_dev *)&trans_wrapper->trans;
681 
682 	out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_ACTIVATE_TO_DEV);
683 	out_trans->hdr.len = cpu_to_le32(sizeof(*out_trans));
684 	out_trans->buf_len = cpu_to_le32(size);
685 	out_trans->req_q_addr = cpu_to_le64(dma_addr);
686 	out_trans->req_q_size = cpu_to_le32(nelem);
687 	out_trans->rsp_q_addr = cpu_to_le64(dma_addr + size - nelem * get_dbc_rsp_elem_size());
688 	out_trans->rsp_q_size = cpu_to_le32(nelem);
689 	out_trans->options = cpu_to_le32(in_trans->options);
690 
691 	*user_len += in_trans->hdr.len;
692 	msg->hdr.len = cpu_to_le32(msg_hdr_len + sizeof(*out_trans));
693 	msg->hdr.count = incr_le32(msg->hdr.count);
694 
695 	resources->buf = buf;
696 	resources->dma_addr = dma_addr;
697 	resources->total_size = size;
698 	resources->nelem = nelem;
699 	resources->rsp_q_base = buf + size - nelem * get_dbc_rsp_elem_size();
700 	return 0;
701 
702 free_dma:
703 	dma_free_coherent(&qdev->pdev->dev, size, buf, dma_addr);
704 	return ret;
705 }
706 
707 static int encode_deactivate(struct qaic_device *qdev, void *trans,
708 			     u32 *user_len, struct qaic_user *usr)
709 {
710 	struct qaic_manage_trans_deactivate *in_trans = trans;
711 
712 	if (in_trans->dbc_id >= qdev->num_dbc || in_trans->pad)
713 		return -EINVAL;
714 
715 	*user_len += in_trans->hdr.len;
716 
717 	return disable_dbc(qdev, in_trans->dbc_id, usr);
718 }
719 
720 static int encode_status(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers,
721 			 u32 *user_len)
722 {
723 	struct qaic_manage_trans_status_to_dev *in_trans = trans;
724 	struct wire_trans_status_to_dev *out_trans;
725 	struct wrapper_msg *trans_wrapper;
726 	struct wrapper_msg *wrapper;
727 	struct wire_msg *msg;
728 	u32 msg_hdr_len;
729 
730 	wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
731 	msg = &wrapper->msg;
732 	msg_hdr_len = le32_to_cpu(msg->hdr.len);
733 
734 	if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_WIRE_MSG_LENGTH)
735 		return -ENOSPC;
736 
737 	trans_wrapper = add_wrapper(wrappers, sizeof(*trans_wrapper));
738 	if (!trans_wrapper)
739 		return -ENOMEM;
740 
741 	trans_wrapper->len = sizeof(*out_trans);
742 	out_trans = (struct wire_trans_status_to_dev *)&trans_wrapper->trans;
743 
744 	out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_STATUS_TO_DEV);
745 	out_trans->hdr.len = cpu_to_le32(in_trans->hdr.len);
746 	msg->hdr.len = cpu_to_le32(msg_hdr_len + in_trans->hdr.len);
747 	msg->hdr.count = incr_le32(msg->hdr.count);
748 	*user_len += in_trans->hdr.len;
749 
750 	return 0;
751 }
752 
753 static int encode_message(struct qaic_device *qdev, struct manage_msg *user_msg,
754 			  struct wrapper_list *wrappers, struct ioctl_resources *resources,
755 			  struct qaic_user *usr)
756 {
757 	struct qaic_manage_trans_hdr *trans_hdr;
758 	struct wrapper_msg *wrapper;
759 	struct wire_msg *msg;
760 	u32 user_len = 0;
761 	int ret;
762 	int i;
763 
764 	if (!user_msg->count ||
765 	    user_msg->len < sizeof(*trans_hdr)) {
766 		ret = -EINVAL;
767 		goto out;
768 	}
769 
770 	wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
771 	msg = &wrapper->msg;
772 
773 	msg->hdr.len = cpu_to_le32(sizeof(msg->hdr));
774 
775 	if (resources->dma_chunk_id) {
776 		ret = encode_dma(qdev, resources->trans_hdr, wrappers, &user_len, resources, usr);
777 		msg->hdr.count = cpu_to_le32(1);
778 		goto out;
779 	}
780 
781 	for (i = 0; i < user_msg->count; ++i) {
782 		if (user_len > user_msg->len - sizeof(*trans_hdr)) {
783 			ret = -EINVAL;
784 			break;
785 		}
786 		trans_hdr = (struct qaic_manage_trans_hdr *)(user_msg->data + user_len);
787 		if (trans_hdr->len < sizeof(trans_hdr) ||
788 		    size_add(user_len, trans_hdr->len) > user_msg->len) {
789 			ret = -EINVAL;
790 			break;
791 		}
792 
793 		switch (trans_hdr->type) {
794 		case QAIC_TRANS_PASSTHROUGH_FROM_USR:
795 			ret = encode_passthrough(qdev, trans_hdr, wrappers, &user_len);
796 			break;
797 		case QAIC_TRANS_DMA_XFER_FROM_USR:
798 			ret = encode_dma(qdev, trans_hdr, wrappers, &user_len, resources, usr);
799 			break;
800 		case QAIC_TRANS_ACTIVATE_FROM_USR:
801 			ret = encode_activate(qdev, trans_hdr, wrappers, &user_len, resources);
802 			break;
803 		case QAIC_TRANS_DEACTIVATE_FROM_USR:
804 			ret = encode_deactivate(qdev, trans_hdr, &user_len, usr);
805 			break;
806 		case QAIC_TRANS_STATUS_FROM_USR:
807 			ret = encode_status(qdev, trans_hdr, wrappers, &user_len);
808 			break;
809 		default:
810 			ret = -EINVAL;
811 			break;
812 		}
813 
814 		if (ret)
815 			goto out;
816 	}
817 
818 	if (user_len != user_msg->len)
819 		ret = -EINVAL;
820 out:
821 	if (ret) {
822 		free_dma_xfers(qdev, resources);
823 		free_dbc_buf(qdev, resources);
824 		return ret;
825 	}
826 
827 	return 0;
828 }
829 
830 static int decode_passthrough(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg,
831 			      u32 *msg_len)
832 {
833 	struct qaic_manage_trans_passthrough *out_trans;
834 	struct wire_trans_passthrough *in_trans = trans;
835 	u32 len;
836 
837 	out_trans = (void *)user_msg->data + user_msg->len;
838 
839 	len = le32_to_cpu(in_trans->hdr.len);
840 	if (len % 8 != 0)
841 		return -EINVAL;
842 
843 	if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH)
844 		return -ENOSPC;
845 
846 	memcpy(out_trans->data, in_trans->data, len - sizeof(in_trans->hdr));
847 	user_msg->len += len;
848 	*msg_len += len;
849 	out_trans->hdr.type = le32_to_cpu(in_trans->hdr.type);
850 	out_trans->hdr.len = len;
851 
852 	return 0;
853 }
854 
855 static int decode_activate(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg,
856 			   u32 *msg_len, struct ioctl_resources *resources, struct qaic_user *usr)
857 {
858 	struct qaic_manage_trans_activate_from_dev *out_trans;
859 	struct wire_trans_activate_from_dev *in_trans = trans;
860 	u32 len;
861 
862 	out_trans = (void *)user_msg->data + user_msg->len;
863 
864 	len = le32_to_cpu(in_trans->hdr.len);
865 	if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH)
866 		return -ENOSPC;
867 
868 	user_msg->len += len;
869 	*msg_len += len;
870 	out_trans->hdr.type = le32_to_cpu(in_trans->hdr.type);
871 	out_trans->hdr.len = len;
872 	out_trans->status = le32_to_cpu(in_trans->status);
873 	out_trans->dbc_id = le32_to_cpu(in_trans->dbc_id);
874 	out_trans->options = le64_to_cpu(in_trans->options);
875 
876 	if (!resources->buf)
877 		/* how did we get an activate response without a request? */
878 		return -EINVAL;
879 
880 	if (out_trans->dbc_id >= qdev->num_dbc)
881 		/*
882 		 * The device assigned an invalid resource, which should never
883 		 * happen. Return an error so the user can try to recover.
884 		 */
885 		return -ENODEV;
886 
887 	if (out_trans->status)
888 		/*
889 		 * Allocating resources failed on device side. This is not an
890 		 * expected behaviour, user is expected to handle this situation.
891 		 */
892 		return -ECANCELED;
893 
894 	resources->status = out_trans->status;
895 	resources->dbc_id = out_trans->dbc_id;
896 	save_dbc_buf(qdev, resources, usr);
897 
898 	return 0;
899 }
900 
901 static int decode_deactivate(struct qaic_device *qdev, void *trans, u32 *msg_len,
902 			     struct qaic_user *usr)
903 {
904 	struct wire_trans_deactivate_from_dev *in_trans = trans;
905 	u32 dbc_id = le32_to_cpu(in_trans->dbc_id);
906 	u32 status = le32_to_cpu(in_trans->status);
907 
908 	if (dbc_id >= qdev->num_dbc)
909 		/*
910 		 * The device assigned an invalid resource, which should never
911 		 * happen. Inject an error so the user can try to recover.
912 		 */
913 		return -ENODEV;
914 
915 	if (status) {
916 		/*
917 		 * Releasing resources failed on the device side, which puts
918 		 * us in a bind since they may still be in use, so enable the
919 		 * dbc. User is expected to retry deactivation.
920 		 */
921 		enable_dbc(qdev, dbc_id, usr);
922 		return -ECANCELED;
923 	}
924 
925 	release_dbc(qdev, dbc_id);
926 	*msg_len += sizeof(*in_trans);
927 
928 	return 0;
929 }
930 
931 static int decode_status(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg,
932 			 u32 *user_len, struct wire_msg *msg)
933 {
934 	struct qaic_manage_trans_status_from_dev *out_trans;
935 	struct wire_trans_status_from_dev *in_trans = trans;
936 	u32 len;
937 
938 	out_trans = (void *)user_msg->data + user_msg->len;
939 
940 	len = le32_to_cpu(in_trans->hdr.len);
941 	if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH)
942 		return -ENOSPC;
943 
944 	out_trans->hdr.type = QAIC_TRANS_STATUS_FROM_DEV;
945 	out_trans->hdr.len = len;
946 	out_trans->major = le16_to_cpu(in_trans->major);
947 	out_trans->minor = le16_to_cpu(in_trans->minor);
948 	out_trans->status_flags = le64_to_cpu(in_trans->status_flags);
949 	out_trans->status = le32_to_cpu(in_trans->status);
950 	*user_len += le32_to_cpu(in_trans->hdr.len);
951 	user_msg->len += len;
952 
953 	if (out_trans->status)
954 		return -ECANCELED;
955 	if (out_trans->status_flags & BIT(0) && !valid_crc(msg))
956 		return -EPIPE;
957 
958 	return 0;
959 }
960 
961 static int decode_message(struct qaic_device *qdev, struct manage_msg *user_msg,
962 			  struct wire_msg *msg, struct ioctl_resources *resources,
963 			  struct qaic_user *usr)
964 {
965 	u32 msg_hdr_len = le32_to_cpu(msg->hdr.len);
966 	struct wire_trans_hdr *trans_hdr;
967 	u32 msg_len = 0;
968 	int ret;
969 	int i;
970 
971 	if (msg_hdr_len < sizeof(*trans_hdr) ||
972 	    msg_hdr_len > QAIC_MANAGE_MAX_MSG_LENGTH)
973 		return -EINVAL;
974 
975 	user_msg->len = 0;
976 	user_msg->count = le32_to_cpu(msg->hdr.count);
977 
978 	for (i = 0; i < user_msg->count; ++i) {
979 		u32 hdr_len;
980 
981 		if (msg_len > msg_hdr_len - sizeof(*trans_hdr))
982 			return -EINVAL;
983 
984 		trans_hdr = (struct wire_trans_hdr *)(msg->data + msg_len);
985 		hdr_len = le32_to_cpu(trans_hdr->len);
986 		if (hdr_len < sizeof(*trans_hdr) ||
987 		    size_add(msg_len, hdr_len) > msg_hdr_len)
988 			return -EINVAL;
989 
990 		switch (le32_to_cpu(trans_hdr->type)) {
991 		case QAIC_TRANS_PASSTHROUGH_FROM_DEV:
992 			ret = decode_passthrough(qdev, trans_hdr, user_msg, &msg_len);
993 			break;
994 		case QAIC_TRANS_ACTIVATE_FROM_DEV:
995 			ret = decode_activate(qdev, trans_hdr, user_msg, &msg_len, resources, usr);
996 			break;
997 		case QAIC_TRANS_DEACTIVATE_FROM_DEV:
998 			ret = decode_deactivate(qdev, trans_hdr, &msg_len, usr);
999 			break;
1000 		case QAIC_TRANS_STATUS_FROM_DEV:
1001 			ret = decode_status(qdev, trans_hdr, user_msg, &msg_len, msg);
1002 			break;
1003 		default:
1004 			return -EINVAL;
1005 		}
1006 
1007 		if (ret)
1008 			return ret;
1009 	}
1010 
1011 	if (msg_len != (msg_hdr_len - sizeof(msg->hdr)))
1012 		return -EINVAL;
1013 
1014 	return 0;
1015 }
1016 
1017 static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u32 seq_num,
1018 		      bool ignore_signal)
1019 {
1020 	struct xfer_queue_elem elem;
1021 	struct wire_msg *out_buf;
1022 	struct wrapper_msg *w;
1023 	long ret = -EAGAIN;
1024 	int xfer_count = 0;
1025 	int retry_count;
1026 
1027 	/* Allow QAIC_BOOT state since we need to check control protocol version */
1028 	if (qdev->dev_state == QAIC_OFFLINE) {
1029 		mutex_unlock(&qdev->cntl_mutex);
1030 		return ERR_PTR(-ENODEV);
1031 	}
1032 
1033 	/* Attempt to avoid a partial commit of a message */
1034 	list_for_each_entry(w, &wrappers->list, list)
1035 		xfer_count++;
1036 
1037 	for (retry_count = 0; retry_count < QAIC_MHI_RETRY_MAX; retry_count++) {
1038 		if (xfer_count <= mhi_get_free_desc_count(qdev->cntl_ch, DMA_TO_DEVICE)) {
1039 			ret = 0;
1040 			break;
1041 		}
1042 		msleep_interruptible(QAIC_MHI_RETRY_WAIT_MS);
1043 		if (signal_pending(current))
1044 			break;
1045 	}
1046 
1047 	if (ret) {
1048 		mutex_unlock(&qdev->cntl_mutex);
1049 		return ERR_PTR(ret);
1050 	}
1051 
1052 	elem.seq_num = seq_num;
1053 	elem.buf = NULL;
1054 	init_completion(&elem.xfer_done);
1055 	if (likely(!qdev->cntl_lost_buf)) {
1056 		/*
1057 		 * The max size of request to device is QAIC_MANAGE_WIRE_MSG_LENGTH.
1058 		 * The max size of response from device is QAIC_MANAGE_MAX_MSG_LENGTH.
1059 		 */
1060 		out_buf = kmalloc(QAIC_MANAGE_MAX_MSG_LENGTH, GFP_KERNEL);
1061 		if (!out_buf) {
1062 			mutex_unlock(&qdev->cntl_mutex);
1063 			return ERR_PTR(-ENOMEM);
1064 		}
1065 
1066 		ret = mhi_queue_buf(qdev->cntl_ch, DMA_FROM_DEVICE, out_buf,
1067 				    QAIC_MANAGE_MAX_MSG_LENGTH, MHI_EOT);
1068 		if (ret) {
1069 			mutex_unlock(&qdev->cntl_mutex);
1070 			return ERR_PTR(ret);
1071 		}
1072 	} else {
1073 		/*
1074 		 * we lost a buffer because we queued a recv buf, but then
1075 		 * queuing the corresponding tx buf failed. To try to avoid
1076 		 * a memory leak, lets reclaim it and use it for this
1077 		 * transaction.
1078 		 */
1079 		qdev->cntl_lost_buf = false;
1080 	}
1081 
1082 	list_for_each_entry(w, &wrappers->list, list) {
1083 		kref_get(&w->ref_count);
1084 		ret = mhi_queue_buf(qdev->cntl_ch, DMA_TO_DEVICE, &w->msg, w->len,
1085 				    list_is_last(&w->list, &wrappers->list) ? MHI_EOT : MHI_CHAIN);
1086 		if (ret) {
1087 			qdev->cntl_lost_buf = true;
1088 			kref_put(&w->ref_count, free_wrapper);
1089 			mutex_unlock(&qdev->cntl_mutex);
1090 			return ERR_PTR(ret);
1091 		}
1092 	}
1093 
1094 	list_add_tail(&elem.list, &qdev->cntl_xfer_list);
1095 	mutex_unlock(&qdev->cntl_mutex);
1096 
1097 	if (ignore_signal)
1098 		ret = wait_for_completion_timeout(&elem.xfer_done, control_resp_timeout_s * HZ);
1099 	else
1100 		ret = wait_for_completion_interruptible_timeout(&elem.xfer_done,
1101 								control_resp_timeout_s * HZ);
1102 	/*
1103 	 * not using _interruptable because we have to cleanup or we'll
1104 	 * likely cause memory corruption
1105 	 */
1106 	mutex_lock(&qdev->cntl_mutex);
1107 	if (!list_empty(&elem.list))
1108 		list_del(&elem.list);
1109 	if (!ret && !elem.buf)
1110 		ret = -ETIMEDOUT;
1111 	else if (ret > 0 && !elem.buf)
1112 		ret = -EIO;
1113 	mutex_unlock(&qdev->cntl_mutex);
1114 
1115 	if (ret < 0) {
1116 		kfree(elem.buf);
1117 		return ERR_PTR(ret);
1118 	} else if (!qdev->valid_crc(elem.buf)) {
1119 		kfree(elem.buf);
1120 		return ERR_PTR(-EPIPE);
1121 	}
1122 
1123 	return elem.buf;
1124 }
1125 
1126 /* Add a transaction to abort the outstanding DMA continuation */
1127 static int abort_dma_cont(struct qaic_device *qdev, struct wrapper_list *wrappers, u32 dma_chunk_id)
1128 {
1129 	struct wire_trans_dma_xfer *out_trans;
1130 	u32 size = sizeof(*out_trans);
1131 	struct wrapper_msg *wrapper;
1132 	struct wrapper_msg *w;
1133 	struct wire_msg *msg;
1134 
1135 	wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
1136 	msg = &wrapper->msg;
1137 
1138 	/* Remove all but the first wrapper which has the msg header */
1139 	list_for_each_entry_safe(wrapper, w, &wrappers->list, list)
1140 		if (!list_is_first(&wrapper->list, &wrappers->list))
1141 			kref_put(&wrapper->ref_count, free_wrapper);
1142 
1143 	wrapper = add_wrapper(wrappers, sizeof(*wrapper));
1144 
1145 	if (!wrapper)
1146 		return -ENOMEM;
1147 
1148 	out_trans = (struct wire_trans_dma_xfer *)&wrapper->trans;
1149 	out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_DMA_XFER_TO_DEV);
1150 	out_trans->hdr.len = cpu_to_le32(size);
1151 	out_trans->tag = cpu_to_le32(0);
1152 	out_trans->count = cpu_to_le32(0);
1153 	out_trans->dma_chunk_id = cpu_to_le32(dma_chunk_id);
1154 
1155 	msg->hdr.len = cpu_to_le32(size + sizeof(*msg));
1156 	msg->hdr.count = cpu_to_le32(1);
1157 	wrapper->len = size;
1158 
1159 	return 0;
1160 }
1161 
1162 static struct wrapper_list *alloc_wrapper_list(void)
1163 {
1164 	struct wrapper_list *wrappers;
1165 
1166 	wrappers = kmalloc(sizeof(*wrappers), GFP_KERNEL);
1167 	if (!wrappers)
1168 		return NULL;
1169 	INIT_LIST_HEAD(&wrappers->list);
1170 	spin_lock_init(&wrappers->lock);
1171 
1172 	return wrappers;
1173 }
1174 
1175 static int qaic_manage_msg_xfer(struct qaic_device *qdev, struct qaic_user *usr,
1176 				struct manage_msg *user_msg, struct ioctl_resources *resources,
1177 				struct wire_msg **rsp)
1178 {
1179 	struct wrapper_list *wrappers;
1180 	struct wrapper_msg *wrapper;
1181 	struct wrapper_msg *w;
1182 	bool all_done = false;
1183 	struct wire_msg *msg;
1184 	int ret;
1185 
1186 	wrappers = alloc_wrapper_list();
1187 	if (!wrappers)
1188 		return -ENOMEM;
1189 
1190 	wrapper = add_wrapper(wrappers, sizeof(*wrapper));
1191 	if (!wrapper) {
1192 		kfree(wrappers);
1193 		return -ENOMEM;
1194 	}
1195 
1196 	msg = &wrapper->msg;
1197 	wrapper->len = sizeof(*msg);
1198 
1199 	ret = encode_message(qdev, user_msg, wrappers, resources, usr);
1200 	if (ret && resources->dma_chunk_id)
1201 		ret = abort_dma_cont(qdev, wrappers, resources->dma_chunk_id);
1202 	if (ret)
1203 		goto encode_failed;
1204 
1205 	ret = mutex_lock_interruptible(&qdev->cntl_mutex);
1206 	if (ret)
1207 		goto lock_failed;
1208 
1209 	msg->hdr.magic_number = MANAGE_MAGIC_NUMBER;
1210 	msg->hdr.sequence_number = cpu_to_le32(qdev->next_seq_num++);
1211 
1212 	if (usr) {
1213 		msg->hdr.handle = cpu_to_le32(usr->handle);
1214 		msg->hdr.partition_id = cpu_to_le32(usr->qddev->partition_id);
1215 	} else {
1216 		msg->hdr.handle = 0;
1217 		msg->hdr.partition_id = cpu_to_le32(QAIC_NO_PARTITION);
1218 	}
1219 
1220 	msg->hdr.padding = cpu_to_le32(0);
1221 	msg->hdr.crc32 = cpu_to_le32(qdev->gen_crc(wrappers));
1222 
1223 	/* msg_xfer releases the mutex */
1224 	*rsp = msg_xfer(qdev, wrappers, qdev->next_seq_num - 1, false);
1225 	if (IS_ERR(*rsp))
1226 		ret = PTR_ERR(*rsp);
1227 
1228 lock_failed:
1229 	free_dma_xfers(qdev, resources);
1230 encode_failed:
1231 	spin_lock(&wrappers->lock);
1232 	list_for_each_entry_safe(wrapper, w, &wrappers->list, list)
1233 		kref_put(&wrapper->ref_count, free_wrapper);
1234 	all_done = list_empty(&wrappers->list);
1235 	spin_unlock(&wrappers->lock);
1236 	if (all_done)
1237 		kfree(wrappers);
1238 
1239 	return ret;
1240 }
1241 
1242 static int qaic_manage(struct qaic_device *qdev, struct qaic_user *usr, struct manage_msg *user_msg)
1243 {
1244 	struct wire_trans_dma_xfer_cont *dma_cont = NULL;
1245 	struct ioctl_resources resources;
1246 	struct wire_msg *rsp = NULL;
1247 	int ret;
1248 
1249 	memset(&resources, 0, sizeof(struct ioctl_resources));
1250 
1251 	INIT_LIST_HEAD(&resources.dma_xfers);
1252 
1253 	if (user_msg->len > QAIC_MANAGE_MAX_MSG_LENGTH ||
1254 	    user_msg->count > QAIC_MANAGE_MAX_MSG_LENGTH / sizeof(struct qaic_manage_trans_hdr))
1255 		return -EINVAL;
1256 
1257 dma_xfer_continue:
1258 	ret = qaic_manage_msg_xfer(qdev, usr, user_msg, &resources, &rsp);
1259 	if (ret)
1260 		return ret;
1261 	/* dma_cont should be the only transaction if present */
1262 	if (le32_to_cpu(rsp->hdr.count) == 1) {
1263 		dma_cont = (struct wire_trans_dma_xfer_cont *)rsp->data;
1264 		if (le32_to_cpu(dma_cont->hdr.type) != QAIC_TRANS_DMA_XFER_CONT)
1265 			dma_cont = NULL;
1266 	}
1267 	if (dma_cont) {
1268 		if (le32_to_cpu(dma_cont->dma_chunk_id) == resources.dma_chunk_id &&
1269 		    le64_to_cpu(dma_cont->xferred_size) == resources.xferred_dma_size) {
1270 			kfree(rsp);
1271 			goto dma_xfer_continue;
1272 		}
1273 
1274 		ret = -EINVAL;
1275 		goto dma_cont_failed;
1276 	}
1277 
1278 	ret = decode_message(qdev, user_msg, rsp, &resources, usr);
1279 
1280 dma_cont_failed:
1281 	free_dbc_buf(qdev, &resources);
1282 	kfree(rsp);
1283 	return ret;
1284 }
1285 
1286 int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1287 {
1288 	struct qaic_manage_msg *user_msg = data;
1289 	struct qaic_device *qdev;
1290 	struct manage_msg *msg;
1291 	struct qaic_user *usr;
1292 	u8 __user *user_data;
1293 	int qdev_rcu_id;
1294 	int usr_rcu_id;
1295 	int ret;
1296 
1297 	if (user_msg->len > QAIC_MANAGE_MAX_MSG_LENGTH)
1298 		return -EINVAL;
1299 
1300 	usr = file_priv->driver_priv;
1301 
1302 	usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
1303 	if (!usr->qddev) {
1304 		srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1305 		return -ENODEV;
1306 	}
1307 
1308 	qdev = usr->qddev->qdev;
1309 
1310 	qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
1311 	if (qdev->dev_state != QAIC_ONLINE) {
1312 		srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
1313 		srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1314 		return -ENODEV;
1315 	}
1316 
1317 	msg = kzalloc(QAIC_MANAGE_MAX_MSG_LENGTH + sizeof(*msg), GFP_KERNEL);
1318 	if (!msg) {
1319 		ret = -ENOMEM;
1320 		goto out;
1321 	}
1322 
1323 	msg->len = user_msg->len;
1324 	msg->count = user_msg->count;
1325 
1326 	user_data = u64_to_user_ptr(user_msg->data);
1327 
1328 	if (copy_from_user(msg->data, user_data, user_msg->len)) {
1329 		ret = -EFAULT;
1330 		goto free_msg;
1331 	}
1332 
1333 	ret = qaic_manage(qdev, usr, msg);
1334 
1335 	/*
1336 	 * If the qaic_manage() is successful then we copy the message onto
1337 	 * userspace memory but we have an exception for -ECANCELED.
1338 	 * For -ECANCELED, it means that device has NACKed the message with a
1339 	 * status error code which userspace would like to know.
1340 	 */
1341 	if (ret == -ECANCELED || !ret) {
1342 		if (copy_to_user(user_data, msg->data, msg->len)) {
1343 			ret = -EFAULT;
1344 		} else {
1345 			user_msg->len = msg->len;
1346 			user_msg->count = msg->count;
1347 		}
1348 	}
1349 
1350 free_msg:
1351 	kfree(msg);
1352 out:
1353 	srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
1354 	srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1355 	return ret;
1356 }
1357 
1358 int get_cntl_version(struct qaic_device *qdev, struct qaic_user *usr, u16 *major, u16 *minor)
1359 {
1360 	struct qaic_manage_trans_status_from_dev *status_result;
1361 	struct qaic_manage_trans_status_to_dev *status_query;
1362 	struct manage_msg *user_msg;
1363 	int ret;
1364 
1365 	user_msg = kmalloc(sizeof(*user_msg) + sizeof(*status_result), GFP_KERNEL);
1366 	if (!user_msg) {
1367 		ret = -ENOMEM;
1368 		goto out;
1369 	}
1370 	user_msg->len = sizeof(*status_query);
1371 	user_msg->count = 1;
1372 
1373 	status_query = (struct qaic_manage_trans_status_to_dev *)user_msg->data;
1374 	status_query->hdr.type = QAIC_TRANS_STATUS_FROM_USR;
1375 	status_query->hdr.len = sizeof(status_query->hdr);
1376 
1377 	ret = qaic_manage(qdev, usr, user_msg);
1378 	if (ret)
1379 		goto kfree_user_msg;
1380 	status_result = (struct qaic_manage_trans_status_from_dev *)user_msg->data;
1381 	*major = status_result->major;
1382 	*minor = status_result->minor;
1383 
1384 	if (status_result->status_flags & BIT(0)) { /* device is using CRC */
1385 		/* By default qdev->gen_crc is programmed to generate CRC */
1386 		qdev->valid_crc = valid_crc;
1387 	} else {
1388 		/* By default qdev->valid_crc is programmed to bypass CRC */
1389 		qdev->gen_crc = gen_crc_stub;
1390 	}
1391 
1392 kfree_user_msg:
1393 	kfree(user_msg);
1394 out:
1395 	return ret;
1396 }
1397 
1398 static void resp_worker(struct work_struct *work)
1399 {
1400 	struct resp_work *resp = container_of(work, struct resp_work, work);
1401 	struct qaic_device *qdev = resp->qdev;
1402 	struct wire_msg *msg = resp->buf;
1403 	struct xfer_queue_elem *elem;
1404 	struct xfer_queue_elem *i;
1405 	bool found = false;
1406 
1407 	mutex_lock(&qdev->cntl_mutex);
1408 	list_for_each_entry_safe(elem, i, &qdev->cntl_xfer_list, list) {
1409 		if (elem->seq_num == le32_to_cpu(msg->hdr.sequence_number)) {
1410 			found = true;
1411 			list_del_init(&elem->list);
1412 			elem->buf = msg;
1413 			complete_all(&elem->xfer_done);
1414 			break;
1415 		}
1416 	}
1417 	mutex_unlock(&qdev->cntl_mutex);
1418 
1419 	if (!found)
1420 		/* request must have timed out, drop packet */
1421 		kfree(msg);
1422 
1423 	kfree(resp);
1424 }
1425 
1426 static void free_wrapper_from_list(struct wrapper_list *wrappers, struct wrapper_msg *wrapper)
1427 {
1428 	bool all_done = false;
1429 
1430 	spin_lock(&wrappers->lock);
1431 	kref_put(&wrapper->ref_count, free_wrapper);
1432 	all_done = list_empty(&wrappers->list);
1433 	spin_unlock(&wrappers->lock);
1434 
1435 	if (all_done)
1436 		kfree(wrappers);
1437 }
1438 
1439 void qaic_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
1440 {
1441 	struct wire_msg *msg = mhi_result->buf_addr;
1442 	struct wrapper_msg *wrapper = container_of(msg, struct wrapper_msg, msg);
1443 
1444 	free_wrapper_from_list(wrapper->head, wrapper);
1445 }
1446 
1447 void qaic_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
1448 {
1449 	struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev);
1450 	struct wire_msg *msg = mhi_result->buf_addr;
1451 	struct resp_work *resp;
1452 
1453 	if (mhi_result->transaction_status || msg->hdr.magic_number != MANAGE_MAGIC_NUMBER) {
1454 		kfree(msg);
1455 		return;
1456 	}
1457 
1458 	resp = kmalloc(sizeof(*resp), GFP_ATOMIC);
1459 	if (!resp) {
1460 		kfree(msg);
1461 		return;
1462 	}
1463 
1464 	INIT_WORK(&resp->work, resp_worker);
1465 	resp->qdev = qdev;
1466 	resp->buf = msg;
1467 	queue_work(qdev->cntl_wq, &resp->work);
1468 }
1469 
1470 int qaic_control_open(struct qaic_device *qdev)
1471 {
1472 	if (!qdev->cntl_ch)
1473 		return -ENODEV;
1474 
1475 	qdev->cntl_lost_buf = false;
1476 	/*
1477 	 * By default qaic should assume that device has CRC enabled.
1478 	 * Qaic comes to know if device has CRC enabled or disabled during the
1479 	 * device status transaction, which is the first transaction performed
1480 	 * on control channel.
1481 	 *
1482 	 * So CRC validation of first device status transaction response is
1483 	 * ignored (by calling valid_crc_stub) and is done later during decoding
1484 	 * if device has CRC enabled.
1485 	 * Now that qaic knows whether device has CRC enabled or not it acts
1486 	 * accordingly.
1487 	 */
1488 	qdev->gen_crc = gen_crc;
1489 	qdev->valid_crc = valid_crc_stub;
1490 
1491 	return mhi_prepare_for_transfer(qdev->cntl_ch);
1492 }
1493 
1494 void qaic_control_close(struct qaic_device *qdev)
1495 {
1496 	mhi_unprepare_from_transfer(qdev->cntl_ch);
1497 }
1498 
1499 void qaic_release_usr(struct qaic_device *qdev, struct qaic_user *usr)
1500 {
1501 	struct wire_trans_terminate_to_dev *trans;
1502 	struct wrapper_list *wrappers;
1503 	struct wrapper_msg *wrapper;
1504 	struct wire_msg *msg;
1505 	struct wire_msg *rsp;
1506 
1507 	wrappers = alloc_wrapper_list();
1508 	if (!wrappers)
1509 		return;
1510 
1511 	wrapper = add_wrapper(wrappers, sizeof(*wrapper) + sizeof(*msg) + sizeof(*trans));
1512 	if (!wrapper)
1513 		return;
1514 
1515 	msg = &wrapper->msg;
1516 
1517 	trans = (struct wire_trans_terminate_to_dev *)msg->data;
1518 
1519 	trans->hdr.type = cpu_to_le32(QAIC_TRANS_TERMINATE_TO_DEV);
1520 	trans->hdr.len = cpu_to_le32(sizeof(*trans));
1521 	trans->handle = cpu_to_le32(usr->handle);
1522 
1523 	mutex_lock(&qdev->cntl_mutex);
1524 	wrapper->len = sizeof(msg->hdr) + sizeof(*trans);
1525 	msg->hdr.magic_number = MANAGE_MAGIC_NUMBER;
1526 	msg->hdr.sequence_number = cpu_to_le32(qdev->next_seq_num++);
1527 	msg->hdr.len = cpu_to_le32(wrapper->len);
1528 	msg->hdr.count = cpu_to_le32(1);
1529 	msg->hdr.handle = cpu_to_le32(usr->handle);
1530 	msg->hdr.padding = cpu_to_le32(0);
1531 	msg->hdr.crc32 = cpu_to_le32(qdev->gen_crc(wrappers));
1532 
1533 	/*
1534 	 * msg_xfer releases the mutex
1535 	 * We don't care about the return of msg_xfer since we will not do
1536 	 * anything different based on what happens.
1537 	 * We ignore pending signals since one will be set if the user is
1538 	 * killed, and we need give the device a chance to cleanup, otherwise
1539 	 * DMA may still be in progress when we return.
1540 	 */
1541 	rsp = msg_xfer(qdev, wrappers, qdev->next_seq_num - 1, true);
1542 	if (!IS_ERR(rsp))
1543 		kfree(rsp);
1544 	free_wrapper_from_list(wrappers, wrapper);
1545 }
1546 
1547 void wake_all_cntl(struct qaic_device *qdev)
1548 {
1549 	struct xfer_queue_elem *elem;
1550 	struct xfer_queue_elem *i;
1551 
1552 	mutex_lock(&qdev->cntl_mutex);
1553 	list_for_each_entry_safe(elem, i, &qdev->cntl_xfer_list, list) {
1554 		list_del_init(&elem->list);
1555 		complete_all(&elem->xfer_done);
1556 	}
1557 	mutex_unlock(&qdev->cntl_mutex);
1558 }
1559