1 // SPDX-License-Identifier: GPL-2.0-only
2
3 /* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */
4 /* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
5
6 #include <asm/byteorder.h>
7 #include <linux/completion.h>
8 #include <linux/crc32.h>
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/kref.h>
12 #include <linux/list.h>
13 #include <linux/mhi.h>
14 #include <linux/mm.h>
15 #include <linux/moduleparam.h>
16 #include <linux/mutex.h>
17 #include <linux/overflow.h>
18 #include <linux/pci.h>
19 #include <linux/scatterlist.h>
20 #include <linux/sched/signal.h>
21 #include <linux/types.h>
22 #include <linux/uaccess.h>
23 #include <linux/workqueue.h>
24 #include <linux/wait.h>
25 #include <drm/drm_device.h>
26 #include <drm/drm_file.h>
27 #include <uapi/drm/qaic_accel.h>
28
29 #include "qaic.h"
30
31 #define MANAGE_MAGIC_NUMBER ((__force __le32)0x43494151) /* "QAIC" in little endian */
32 #define QAIC_DBC_Q_GAP SZ_256
33 #define QAIC_DBC_Q_BUF_ALIGN SZ_4K
34 #define QAIC_MANAGE_WIRE_MSG_LENGTH SZ_64K /* Max DMA message length */
35 #define QAIC_WRAPPER_MAX_SIZE SZ_4K
36 #define QAIC_MHI_RETRY_WAIT_MS 100
37 #define QAIC_MHI_RETRY_MAX 20
38
39 static unsigned int control_resp_timeout_s = 60; /* 60 sec default */
40 module_param(control_resp_timeout_s, uint, 0600);
41 MODULE_PARM_DESC(control_resp_timeout_s, "Timeout for NNC responses from QSM");
42
43 struct manage_msg {
44 u32 len;
45 u32 count;
46 u8 data[];
47 };
48
49 /*
50 * wire encoding structures for the manage protocol.
51 * All fields are little endian on the wire
52 */
53 struct wire_msg_hdr {
54 __le32 crc32; /* crc of everything following this field in the message */
55 __le32 magic_number;
56 __le32 sequence_number;
57 __le32 len; /* length of this message */
58 __le32 count; /* number of transactions in this message */
59 __le32 handle; /* unique id to track the resources consumed */
60 __le32 partition_id; /* partition id for the request (signed) */
61 __le32 padding; /* must be 0 */
62 } __packed;
63
64 struct wire_msg {
65 struct wire_msg_hdr hdr;
66 u8 data[];
67 } __packed;
68
69 struct wire_trans_hdr {
70 __le32 type;
71 __le32 len;
72 } __packed;
73
74 /* Each message sent from driver to device are organized in a list of wrapper_msg */
75 struct wrapper_msg {
76 struct list_head list;
77 struct kref ref_count;
78 u32 len; /* length of data to transfer */
79 struct wrapper_list *head;
80 union {
81 struct wire_msg msg;
82 struct wire_trans_hdr trans;
83 };
84 };
85
86 struct wrapper_list {
87 struct list_head list;
88 spinlock_t lock; /* Protects the list state during additions and removals */
89 };
90
91 struct wire_trans_passthrough {
92 struct wire_trans_hdr hdr;
93 u8 data[];
94 } __packed;
95
96 struct wire_addr_size_pair {
97 __le64 addr;
98 __le64 size;
99 } __packed;
100
101 struct wire_trans_dma_xfer {
102 struct wire_trans_hdr hdr;
103 __le32 tag;
104 __le32 count;
105 __le32 dma_chunk_id;
106 __le32 padding;
107 struct wire_addr_size_pair data[];
108 } __packed;
109
110 /* Initiated by device to continue the DMA xfer of a large piece of data */
111 struct wire_trans_dma_xfer_cont {
112 struct wire_trans_hdr hdr;
113 __le32 dma_chunk_id;
114 __le32 padding;
115 __le64 xferred_size;
116 } __packed;
117
118 struct wire_trans_activate_to_dev {
119 struct wire_trans_hdr hdr;
120 __le64 req_q_addr;
121 __le64 rsp_q_addr;
122 __le32 req_q_size;
123 __le32 rsp_q_size;
124 __le32 buf_len;
125 __le32 options; /* unused, but BIT(16) has meaning to the device */
126 } __packed;
127
128 struct wire_trans_activate_from_dev {
129 struct wire_trans_hdr hdr;
130 __le32 status;
131 __le32 dbc_id;
132 __le64 options; /* unused */
133 } __packed;
134
135 struct wire_trans_deactivate_from_dev {
136 struct wire_trans_hdr hdr;
137 __le32 status;
138 __le32 dbc_id;
139 } __packed;
140
141 struct wire_trans_terminate_to_dev {
142 struct wire_trans_hdr hdr;
143 __le32 handle;
144 __le32 padding;
145 } __packed;
146
147 struct wire_trans_terminate_from_dev {
148 struct wire_trans_hdr hdr;
149 __le32 status;
150 __le32 padding;
151 } __packed;
152
153 struct wire_trans_status_to_dev {
154 struct wire_trans_hdr hdr;
155 } __packed;
156
157 struct wire_trans_status_from_dev {
158 struct wire_trans_hdr hdr;
159 __le16 major;
160 __le16 minor;
161 __le32 status;
162 __le64 status_flags;
163 } __packed;
164
165 struct wire_trans_validate_part_to_dev {
166 struct wire_trans_hdr hdr;
167 __le32 part_id;
168 __le32 padding;
169 } __packed;
170
171 struct wire_trans_validate_part_from_dev {
172 struct wire_trans_hdr hdr;
173 __le32 status;
174 __le32 padding;
175 } __packed;
176
177 struct xfer_queue_elem {
178 /*
179 * Node in list of ongoing transfer request on control channel.
180 * Maintained by root device struct.
181 */
182 struct list_head list;
183 /* Sequence number of this transfer request */
184 u32 seq_num;
185 /* This is used to wait on until completion of transfer request */
186 struct completion xfer_done;
187 /* Received data from device */
188 void *buf;
189 };
190
191 struct dma_xfer {
192 /* Node in list of DMA transfers which is used for cleanup */
193 struct list_head list;
194 /* SG table of memory used for DMA */
195 struct sg_table *sgt;
196 /* Array pages used for DMA */
197 struct page **page_list;
198 /* Number of pages used for DMA */
199 unsigned long nr_pages;
200 };
201
202 struct ioctl_resources {
203 /* List of all DMA transfers which is used later for cleanup */
204 struct list_head dma_xfers;
205 /* Base address of request queue which belongs to a DBC */
206 void *buf;
207 /*
208 * Base bus address of request queue which belongs to a DBC. Response
209 * queue base bus address can be calculated by adding size of request
210 * queue to base bus address of request queue.
211 */
212 dma_addr_t dma_addr;
213 /* Total size of request queue and response queue in byte */
214 u32 total_size;
215 /* Total number of elements that can be queued in each of request and response queue */
216 u32 nelem;
217 /* Base address of response queue which belongs to a DBC */
218 void *rsp_q_base;
219 /* Status of the NNC message received */
220 u32 status;
221 /* DBC id of the DBC received from device */
222 u32 dbc_id;
223 /*
224 * DMA transfer request messages can be big in size and it may not be
225 * possible to send them in one shot. In such cases the messages are
226 * broken into chunks, this field stores ID of such chunks.
227 */
228 u32 dma_chunk_id;
229 /* Total number of bytes transferred for a DMA xfer request */
230 u64 xferred_dma_size;
231 /* Header of transaction message received from user. Used during DMA xfer request. */
232 void *trans_hdr;
233 };
234
235 struct resp_work {
236 struct work_struct work;
237 struct qaic_device *qdev;
238 void *buf;
239 };
240
241 /*
242 * Since we're working with little endian messages, its useful to be able to
243 * increment without filling a whole line with conversions back and forth just
244 * to add one(1) to a message count.
245 */
incr_le32(__le32 val)246 static __le32 incr_le32(__le32 val)
247 {
248 return cpu_to_le32(le32_to_cpu(val) + 1);
249 }
250
gen_crc(void * msg)251 static u32 gen_crc(void *msg)
252 {
253 struct wrapper_list *wrappers = msg;
254 struct wrapper_msg *w;
255 u32 crc = ~0;
256
257 list_for_each_entry(w, &wrappers->list, list)
258 crc = crc32(crc, &w->msg, w->len);
259
260 return crc ^ ~0;
261 }
262
gen_crc_stub(void * msg)263 static u32 gen_crc_stub(void *msg)
264 {
265 return 0;
266 }
267
valid_crc(void * msg)268 static bool valid_crc(void *msg)
269 {
270 struct wire_msg_hdr *hdr = msg;
271 bool ret;
272 u32 crc;
273
274 /*
275 * The output of this algorithm is always converted to the native
276 * endianness.
277 */
278 crc = le32_to_cpu(hdr->crc32);
279 hdr->crc32 = 0;
280 ret = (crc32(~0, msg, le32_to_cpu(hdr->len)) ^ ~0) == crc;
281 hdr->crc32 = cpu_to_le32(crc);
282 return ret;
283 }
284
valid_crc_stub(void * msg)285 static bool valid_crc_stub(void *msg)
286 {
287 return true;
288 }
289
free_wrapper(struct kref * ref)290 static void free_wrapper(struct kref *ref)
291 {
292 struct wrapper_msg *wrapper = container_of(ref, struct wrapper_msg, ref_count);
293
294 list_del(&wrapper->list);
295 kfree(wrapper);
296 }
297
save_dbc_buf(struct qaic_device * qdev,struct ioctl_resources * resources,struct qaic_user * usr)298 static void save_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resources,
299 struct qaic_user *usr)
300 {
301 u32 dbc_id = resources->dbc_id;
302
303 if (resources->buf) {
304 wait_event_interruptible(qdev->dbc[dbc_id].dbc_release, !qdev->dbc[dbc_id].in_use);
305 qdev->dbc[dbc_id].req_q_base = resources->buf;
306 qdev->dbc[dbc_id].rsp_q_base = resources->rsp_q_base;
307 qdev->dbc[dbc_id].dma_addr = resources->dma_addr;
308 qdev->dbc[dbc_id].total_size = resources->total_size;
309 qdev->dbc[dbc_id].nelem = resources->nelem;
310 enable_dbc(qdev, dbc_id, usr);
311 qdev->dbc[dbc_id].in_use = true;
312 resources->buf = NULL;
313 set_dbc_state(qdev, dbc_id, DBC_STATE_ASSIGNED);
314 }
315 }
316
free_dbc_buf(struct qaic_device * qdev,struct ioctl_resources * resources)317 static void free_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resources)
318 {
319 if (resources->buf)
320 dma_free_coherent(&qdev->pdev->dev, resources->total_size, resources->buf,
321 resources->dma_addr);
322 resources->buf = NULL;
323 }
324
free_dma_xfers(struct qaic_device * qdev,struct ioctl_resources * resources)325 static void free_dma_xfers(struct qaic_device *qdev, struct ioctl_resources *resources)
326 {
327 struct dma_xfer *xfer;
328 struct dma_xfer *x;
329 int i;
330
331 list_for_each_entry_safe(xfer, x, &resources->dma_xfers, list) {
332 dma_unmap_sgtable(&qdev->pdev->dev, xfer->sgt, DMA_TO_DEVICE, 0);
333 sg_free_table(xfer->sgt);
334 kfree(xfer->sgt);
335 for (i = 0; i < xfer->nr_pages; ++i)
336 put_page(xfer->page_list[i]);
337 kfree(xfer->page_list);
338 list_del(&xfer->list);
339 kfree(xfer);
340 }
341 }
342
add_wrapper(struct wrapper_list * wrappers,u32 size)343 static struct wrapper_msg *add_wrapper(struct wrapper_list *wrappers, u32 size)
344 {
345 struct wrapper_msg *w = kzalloc(size, GFP_KERNEL);
346
347 if (!w)
348 return NULL;
349 list_add_tail(&w->list, &wrappers->list);
350 kref_init(&w->ref_count);
351 w->head = wrappers;
352 return w;
353 }
354
encode_passthrough(struct qaic_device * qdev,void * trans,struct wrapper_list * wrappers,u32 * user_len)355 static int encode_passthrough(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers,
356 u32 *user_len)
357 {
358 struct qaic_manage_trans_passthrough *in_trans = trans;
359 struct wire_trans_passthrough *out_trans;
360 struct wrapper_msg *trans_wrapper;
361 struct wrapper_msg *wrapper;
362 struct wire_msg *msg;
363 u32 msg_hdr_len;
364
365 wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
366 msg = &wrapper->msg;
367 msg_hdr_len = le32_to_cpu(msg->hdr.len);
368
369 if (in_trans->hdr.len % 8 != 0)
370 return -EINVAL;
371
372 if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_WIRE_MSG_LENGTH)
373 return -ENOSPC;
374
375 trans_wrapper = add_wrapper(wrappers,
376 offsetof(struct wrapper_msg, trans) + in_trans->hdr.len);
377 if (!trans_wrapper)
378 return -ENOMEM;
379 trans_wrapper->len = in_trans->hdr.len;
380 out_trans = (struct wire_trans_passthrough *)&trans_wrapper->trans;
381
382 memcpy(out_trans->data, in_trans->data, in_trans->hdr.len - sizeof(in_trans->hdr));
383 msg->hdr.len = cpu_to_le32(msg_hdr_len + in_trans->hdr.len);
384 msg->hdr.count = incr_le32(msg->hdr.count);
385 *user_len += in_trans->hdr.len;
386 out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_PASSTHROUGH_TO_DEV);
387 out_trans->hdr.len = cpu_to_le32(in_trans->hdr.len);
388
389 return 0;
390 }
391
392 /* returns error code for failure, 0 if enough pages alloc'd, 1 if dma_cont is needed */
find_and_map_user_pages(struct qaic_device * qdev,struct qaic_manage_trans_dma_xfer * in_trans,struct ioctl_resources * resources,struct dma_xfer * xfer)393 static int find_and_map_user_pages(struct qaic_device *qdev,
394 struct qaic_manage_trans_dma_xfer *in_trans,
395 struct ioctl_resources *resources, struct dma_xfer *xfer)
396 {
397 u64 xfer_start_addr, remaining, end, total;
398 unsigned long need_pages;
399 struct page **page_list;
400 unsigned long nr_pages;
401 struct sg_table *sgt;
402 int ret;
403 int i;
404
405 if (check_add_overflow(in_trans->addr, resources->xferred_dma_size, &xfer_start_addr))
406 return -EINVAL;
407
408 if (in_trans->size < resources->xferred_dma_size)
409 return -EINVAL;
410 remaining = in_trans->size - resources->xferred_dma_size;
411 if (remaining == 0)
412 return -EINVAL;
413
414 if (check_add_overflow(xfer_start_addr, remaining, &end))
415 return -EINVAL;
416
417 total = remaining + offset_in_page(xfer_start_addr);
418 if (total >= SIZE_MAX)
419 return -EINVAL;
420
421 need_pages = DIV_ROUND_UP(total, PAGE_SIZE);
422
423 nr_pages = need_pages;
424
425 while (1) {
426 page_list = kmalloc_objs(*page_list, nr_pages,
427 GFP_KERNEL | __GFP_NOWARN);
428 if (!page_list) {
429 nr_pages = nr_pages / 2;
430 if (!nr_pages)
431 return -ENOMEM;
432 } else {
433 break;
434 }
435 }
436
437 ret = get_user_pages_fast(xfer_start_addr, nr_pages, 0, page_list);
438 if (ret < 0)
439 goto free_page_list;
440 if (ret != nr_pages) {
441 nr_pages = ret;
442 ret = -EFAULT;
443 goto put_pages;
444 }
445
446 sgt = kmalloc_obj(*sgt);
447 if (!sgt) {
448 ret = -ENOMEM;
449 goto put_pages;
450 }
451
452 ret = sg_alloc_table_from_pages(sgt, page_list, nr_pages,
453 offset_in_page(xfer_start_addr),
454 remaining, GFP_KERNEL);
455 if (ret) {
456 ret = -ENOMEM;
457 goto free_sgt;
458 }
459
460 ret = dma_map_sgtable(&qdev->pdev->dev, sgt, DMA_TO_DEVICE, 0);
461 if (ret)
462 goto free_table;
463
464 xfer->sgt = sgt;
465 xfer->page_list = page_list;
466 xfer->nr_pages = nr_pages;
467
468 return need_pages > nr_pages ? 1 : 0;
469
470 free_table:
471 sg_free_table(sgt);
472 free_sgt:
473 kfree(sgt);
474 put_pages:
475 for (i = 0; i < nr_pages; ++i)
476 put_page(page_list[i]);
477 free_page_list:
478 kfree(page_list);
479 return ret;
480 }
481
482 /* returns error code for failure, 0 if everything was encoded, 1 if dma_cont is needed */
encode_addr_size_pairs(struct dma_xfer * xfer,struct wrapper_list * wrappers,struct ioctl_resources * resources,u32 msg_hdr_len,u32 * size,struct wire_trans_dma_xfer ** out_trans)483 static int encode_addr_size_pairs(struct dma_xfer *xfer, struct wrapper_list *wrappers,
484 struct ioctl_resources *resources, u32 msg_hdr_len, u32 *size,
485 struct wire_trans_dma_xfer **out_trans)
486 {
487 struct wrapper_msg *trans_wrapper;
488 struct sg_table *sgt = xfer->sgt;
489 struct wire_addr_size_pair *asp;
490 struct scatterlist *sg;
491 struct wrapper_msg *w;
492 unsigned int dma_len;
493 u64 dma_chunk_len;
494 void *boundary;
495 int nents_dma;
496 int nents;
497 int i;
498
499 nents = sgt->nents;
500 nents_dma = nents;
501 *size = QAIC_MANAGE_WIRE_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans);
502 for_each_sgtable_dma_sg(sgt, sg, i) {
503 *size -= sizeof(*asp);
504 /* Save 1K for possible follow-up transactions. */
505 if (*size < SZ_1K) {
506 nents_dma = i;
507 break;
508 }
509 }
510
511 trans_wrapper = add_wrapper(wrappers, QAIC_WRAPPER_MAX_SIZE);
512 if (!trans_wrapper)
513 return -ENOMEM;
514 *out_trans = (struct wire_trans_dma_xfer *)&trans_wrapper->trans;
515
516 asp = (*out_trans)->data;
517 boundary = (void *)trans_wrapper + QAIC_WRAPPER_MAX_SIZE;
518 *size = 0;
519
520 dma_len = 0;
521 w = trans_wrapper;
522 dma_chunk_len = 0;
523 for_each_sg(sgt->sgl, sg, nents_dma, i) {
524 asp->size = cpu_to_le64(dma_len);
525 dma_chunk_len += dma_len;
526 if (dma_len) {
527 asp++;
528 if ((void *)asp + sizeof(*asp) > boundary) {
529 w->len = (void *)asp - (void *)&w->msg;
530 *size += w->len;
531 w = add_wrapper(wrappers, QAIC_WRAPPER_MAX_SIZE);
532 if (!w)
533 return -ENOMEM;
534 boundary = (void *)w + QAIC_WRAPPER_MAX_SIZE;
535 asp = (struct wire_addr_size_pair *)&w->msg;
536 }
537 }
538 asp->addr = cpu_to_le64(sg_dma_address(sg));
539 dma_len = sg_dma_len(sg);
540 }
541 /* finalize the last segment */
542 asp->size = cpu_to_le64(dma_len);
543 w->len = (void *)asp + sizeof(*asp) - (void *)&w->msg;
544 *size += w->len;
545 dma_chunk_len += dma_len;
546 resources->xferred_dma_size += dma_chunk_len;
547
548 return nents_dma < nents ? 1 : 0;
549 }
550
cleanup_xfer(struct qaic_device * qdev,struct dma_xfer * xfer)551 static void cleanup_xfer(struct qaic_device *qdev, struct dma_xfer *xfer)
552 {
553 int i;
554
555 dma_unmap_sgtable(&qdev->pdev->dev, xfer->sgt, DMA_TO_DEVICE, 0);
556 sg_free_table(xfer->sgt);
557 kfree(xfer->sgt);
558 for (i = 0; i < xfer->nr_pages; ++i)
559 put_page(xfer->page_list[i]);
560 kfree(xfer->page_list);
561 }
562
encode_dma(struct qaic_device * qdev,void * trans,struct wrapper_list * wrappers,u32 * user_len,struct ioctl_resources * resources,struct qaic_user * usr)563 static int encode_dma(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers,
564 u32 *user_len, struct ioctl_resources *resources, struct qaic_user *usr)
565 {
566 struct qaic_manage_trans_dma_xfer *in_trans = trans;
567 struct wire_trans_dma_xfer *out_trans;
568 struct wrapper_msg *wrapper;
569 struct dma_xfer *xfer;
570 struct wire_msg *msg;
571 bool need_cont_dma;
572 u32 msg_hdr_len;
573 u32 size;
574 int ret;
575
576 wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
577 msg = &wrapper->msg;
578 msg_hdr_len = le32_to_cpu(msg->hdr.len);
579
580 /* There should be enough space to hold at least one ASP entry. */
581 if (size_add(msg_hdr_len, sizeof(*out_trans) + sizeof(struct wire_addr_size_pair)) >
582 QAIC_MANAGE_WIRE_MSG_LENGTH)
583 return -ENOMEM;
584
585 xfer = kmalloc_obj(*xfer);
586 if (!xfer)
587 return -ENOMEM;
588
589 ret = find_and_map_user_pages(qdev, in_trans, resources, xfer);
590 if (ret < 0)
591 goto free_xfer;
592
593 need_cont_dma = (bool)ret;
594
595 ret = encode_addr_size_pairs(xfer, wrappers, resources, msg_hdr_len, &size, &out_trans);
596 if (ret < 0)
597 goto cleanup_xfer;
598
599 need_cont_dma = need_cont_dma || (bool)ret;
600
601 msg->hdr.len = cpu_to_le32(msg_hdr_len + size);
602 msg->hdr.count = incr_le32(msg->hdr.count);
603
604 out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_DMA_XFER_TO_DEV);
605 out_trans->hdr.len = cpu_to_le32(size);
606 out_trans->tag = cpu_to_le32(in_trans->tag);
607 out_trans->count = cpu_to_le32((size - sizeof(*out_trans)) /
608 sizeof(struct wire_addr_size_pair));
609
610 *user_len += in_trans->hdr.len;
611
612 if (resources->dma_chunk_id) {
613 out_trans->dma_chunk_id = cpu_to_le32(resources->dma_chunk_id);
614 } else if (need_cont_dma) {
615 while (resources->dma_chunk_id == 0)
616 resources->dma_chunk_id = atomic_inc_return(&usr->chunk_id);
617
618 out_trans->dma_chunk_id = cpu_to_le32(resources->dma_chunk_id);
619 }
620 resources->trans_hdr = trans;
621
622 list_add(&xfer->list, &resources->dma_xfers);
623 return 0;
624
625 cleanup_xfer:
626 cleanup_xfer(qdev, xfer);
627 free_xfer:
628 kfree(xfer);
629 return ret;
630 }
631
encode_activate(struct qaic_device * qdev,void * trans,struct wrapper_list * wrappers,u32 * user_len,struct ioctl_resources * resources)632 static int encode_activate(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers,
633 u32 *user_len, struct ioctl_resources *resources)
634 {
635 struct qaic_manage_trans_activate_to_dev *in_trans = trans;
636 struct wire_trans_activate_to_dev *out_trans;
637 struct wrapper_msg *trans_wrapper;
638 struct wrapper_msg *wrapper;
639 struct wire_msg *msg;
640 dma_addr_t dma_addr;
641 u32 msg_hdr_len;
642 void *buf;
643 u32 nelem;
644 u32 size;
645 int ret;
646
647 wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
648 msg = &wrapper->msg;
649 msg_hdr_len = le32_to_cpu(msg->hdr.len);
650
651 if (size_add(msg_hdr_len, sizeof(*out_trans)) > QAIC_MANAGE_WIRE_MSG_LENGTH)
652 return -ENOSPC;
653
654 if (!in_trans->queue_size)
655 return -EINVAL;
656
657 if (in_trans->pad)
658 return -EINVAL;
659
660 nelem = in_trans->queue_size;
661 if (check_mul_overflow((u32)(get_dbc_req_elem_size() + get_dbc_rsp_elem_size()),
662 nelem,
663 &size))
664 return -EINVAL;
665
666 if (size + QAIC_DBC_Q_GAP + QAIC_DBC_Q_BUF_ALIGN < size)
667 return -EINVAL;
668
669 size = ALIGN((size + QAIC_DBC_Q_GAP), QAIC_DBC_Q_BUF_ALIGN);
670
671 buf = dma_alloc_coherent(&qdev->pdev->dev, size, &dma_addr, GFP_KERNEL);
672 if (!buf)
673 return -ENOMEM;
674
675 trans_wrapper = add_wrapper(wrappers,
676 offsetof(struct wrapper_msg, trans) + sizeof(*out_trans));
677 if (!trans_wrapper) {
678 ret = -ENOMEM;
679 goto free_dma;
680 }
681 trans_wrapper->len = sizeof(*out_trans);
682 out_trans = (struct wire_trans_activate_to_dev *)&trans_wrapper->trans;
683
684 out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_ACTIVATE_TO_DEV);
685 out_trans->hdr.len = cpu_to_le32(sizeof(*out_trans));
686 out_trans->buf_len = cpu_to_le32(size);
687 out_trans->req_q_addr = cpu_to_le64(dma_addr);
688 out_trans->req_q_size = cpu_to_le32(nelem);
689 out_trans->rsp_q_addr = cpu_to_le64(dma_addr + size - nelem * get_dbc_rsp_elem_size());
690 out_trans->rsp_q_size = cpu_to_le32(nelem);
691 out_trans->options = cpu_to_le32(in_trans->options);
692
693 *user_len += in_trans->hdr.len;
694 msg->hdr.len = cpu_to_le32(msg_hdr_len + sizeof(*out_trans));
695 msg->hdr.count = incr_le32(msg->hdr.count);
696
697 resources->buf = buf;
698 resources->dma_addr = dma_addr;
699 resources->total_size = size;
700 resources->nelem = nelem;
701 resources->rsp_q_base = buf + size - nelem * get_dbc_rsp_elem_size();
702 return 0;
703
704 free_dma:
705 dma_free_coherent(&qdev->pdev->dev, size, buf, dma_addr);
706 return ret;
707 }
708
encode_deactivate(struct qaic_device * qdev,void * trans,u32 * user_len,struct qaic_user * usr)709 static int encode_deactivate(struct qaic_device *qdev, void *trans,
710 u32 *user_len, struct qaic_user *usr)
711 {
712 struct qaic_manage_trans_deactivate *in_trans = trans;
713
714 if (in_trans->dbc_id >= qdev->num_dbc || in_trans->pad)
715 return -EINVAL;
716
717 *user_len += in_trans->hdr.len;
718
719 return disable_dbc(qdev, in_trans->dbc_id, usr);
720 }
721
encode_status(struct qaic_device * qdev,void * trans,struct wrapper_list * wrappers,u32 * user_len)722 static int encode_status(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers,
723 u32 *user_len)
724 {
725 struct qaic_manage_trans_status_to_dev *in_trans = trans;
726 struct wire_trans_status_to_dev *out_trans;
727 struct wrapper_msg *trans_wrapper;
728 struct wrapper_msg *wrapper;
729 struct wire_msg *msg;
730 u32 msg_hdr_len;
731
732 wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
733 msg = &wrapper->msg;
734 msg_hdr_len = le32_to_cpu(msg->hdr.len);
735
736 if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_WIRE_MSG_LENGTH)
737 return -ENOSPC;
738
739 trans_wrapper = add_wrapper(wrappers, sizeof(*trans_wrapper));
740 if (!trans_wrapper)
741 return -ENOMEM;
742
743 trans_wrapper->len = sizeof(*out_trans);
744 out_trans = (struct wire_trans_status_to_dev *)&trans_wrapper->trans;
745
746 out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_STATUS_TO_DEV);
747 out_trans->hdr.len = cpu_to_le32(in_trans->hdr.len);
748 msg->hdr.len = cpu_to_le32(msg_hdr_len + in_trans->hdr.len);
749 msg->hdr.count = incr_le32(msg->hdr.count);
750 *user_len += in_trans->hdr.len;
751
752 return 0;
753 }
754
encode_message(struct qaic_device * qdev,struct manage_msg * user_msg,struct wrapper_list * wrappers,struct ioctl_resources * resources,struct qaic_user * usr)755 static int encode_message(struct qaic_device *qdev, struct manage_msg *user_msg,
756 struct wrapper_list *wrappers, struct ioctl_resources *resources,
757 struct qaic_user *usr)
758 {
759 struct qaic_manage_trans_hdr *trans_hdr;
760 struct wrapper_msg *wrapper;
761 struct wire_msg *msg;
762 u32 user_len = 0;
763 int ret;
764 int i;
765
766 if (!user_msg->count ||
767 user_msg->len < sizeof(*trans_hdr)) {
768 ret = -EINVAL;
769 goto out;
770 }
771
772 wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
773 msg = &wrapper->msg;
774
775 msg->hdr.len = cpu_to_le32(sizeof(msg->hdr));
776
777 if (resources->dma_chunk_id) {
778 ret = encode_dma(qdev, resources->trans_hdr, wrappers, &user_len, resources, usr);
779 msg->hdr.count = cpu_to_le32(1);
780 goto out;
781 }
782
783 for (i = 0; i < user_msg->count; ++i) {
784 if (user_len > user_msg->len - sizeof(*trans_hdr)) {
785 ret = -EINVAL;
786 break;
787 }
788 trans_hdr = (struct qaic_manage_trans_hdr *)(user_msg->data + user_len);
789 if (trans_hdr->len < sizeof(trans_hdr) ||
790 size_add(user_len, trans_hdr->len) > user_msg->len) {
791 ret = -EINVAL;
792 break;
793 }
794
795 switch (trans_hdr->type) {
796 case QAIC_TRANS_PASSTHROUGH_FROM_USR:
797 ret = encode_passthrough(qdev, trans_hdr, wrappers, &user_len);
798 break;
799 case QAIC_TRANS_DMA_XFER_FROM_USR:
800 ret = encode_dma(qdev, trans_hdr, wrappers, &user_len, resources, usr);
801 break;
802 case QAIC_TRANS_ACTIVATE_FROM_USR:
803 ret = encode_activate(qdev, trans_hdr, wrappers, &user_len, resources);
804 break;
805 case QAIC_TRANS_DEACTIVATE_FROM_USR:
806 ret = encode_deactivate(qdev, trans_hdr, &user_len, usr);
807 break;
808 case QAIC_TRANS_STATUS_FROM_USR:
809 ret = encode_status(qdev, trans_hdr, wrappers, &user_len);
810 break;
811 default:
812 ret = -EINVAL;
813 break;
814 }
815
816 if (ret)
817 goto out;
818 }
819
820 if (user_len != user_msg->len)
821 ret = -EINVAL;
822 out:
823 if (ret) {
824 free_dma_xfers(qdev, resources);
825 free_dbc_buf(qdev, resources);
826 return ret;
827 }
828
829 return 0;
830 }
831
decode_passthrough(struct qaic_device * qdev,void * trans,struct manage_msg * user_msg,u32 * msg_len)832 static int decode_passthrough(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg,
833 u32 *msg_len)
834 {
835 struct qaic_manage_trans_passthrough *out_trans;
836 struct wire_trans_passthrough *in_trans = trans;
837 u32 len;
838
839 out_trans = (void *)user_msg->data + user_msg->len;
840
841 len = le32_to_cpu(in_trans->hdr.len);
842 if (len % 8 != 0)
843 return -EINVAL;
844
845 if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH)
846 return -ENOSPC;
847
848 memcpy(out_trans->data, in_trans->data, len - sizeof(in_trans->hdr));
849 user_msg->len += len;
850 *msg_len += len;
851 out_trans->hdr.type = le32_to_cpu(in_trans->hdr.type);
852 out_trans->hdr.len = len;
853
854 return 0;
855 }
856
decode_activate(struct qaic_device * qdev,void * trans,struct manage_msg * user_msg,u32 * msg_len,struct ioctl_resources * resources,struct qaic_user * usr)857 static int decode_activate(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg,
858 u32 *msg_len, struct ioctl_resources *resources, struct qaic_user *usr)
859 {
860 struct qaic_manage_trans_activate_from_dev *out_trans;
861 struct wire_trans_activate_from_dev *in_trans = trans;
862 u32 len;
863
864 out_trans = (void *)user_msg->data + user_msg->len;
865
866 len = le32_to_cpu(in_trans->hdr.len);
867 if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH)
868 return -ENOSPC;
869
870 user_msg->len += len;
871 *msg_len += len;
872 out_trans->hdr.type = le32_to_cpu(in_trans->hdr.type);
873 out_trans->hdr.len = len;
874 out_trans->status = le32_to_cpu(in_trans->status);
875 out_trans->dbc_id = le32_to_cpu(in_trans->dbc_id);
876 out_trans->options = le64_to_cpu(in_trans->options);
877
878 if (!resources->buf)
879 /* how did we get an activate response without a request? */
880 return -EINVAL;
881
882 if (out_trans->dbc_id >= qdev->num_dbc)
883 /*
884 * The device assigned an invalid resource, which should never
885 * happen. Return an error so the user can try to recover.
886 */
887 return -ENODEV;
888
889 if (out_trans->status)
890 /*
891 * Allocating resources failed on device side. This is not an
892 * expected behaviour, user is expected to handle this situation.
893 */
894 return -ECANCELED;
895
896 resources->status = out_trans->status;
897 resources->dbc_id = out_trans->dbc_id;
898 save_dbc_buf(qdev, resources, usr);
899
900 return 0;
901 }
902
decode_deactivate(struct qaic_device * qdev,void * trans,u32 * msg_len,struct qaic_user * usr)903 static int decode_deactivate(struct qaic_device *qdev, void *trans, u32 *msg_len,
904 struct qaic_user *usr)
905 {
906 struct wire_trans_deactivate_from_dev *in_trans = trans;
907 u32 dbc_id = le32_to_cpu(in_trans->dbc_id);
908 u32 status = le32_to_cpu(in_trans->status);
909
910 if (dbc_id >= qdev->num_dbc)
911 /*
912 * The device assigned an invalid resource, which should never
913 * happen. Inject an error so the user can try to recover.
914 */
915 return -ENODEV;
916
917 if (status) {
918 /*
919 * Releasing resources failed on the device side, which puts
920 * us in a bind since they may still be in use, so enable the
921 * dbc. User is expected to retry deactivation.
922 */
923 enable_dbc(qdev, dbc_id, usr);
924 return -ECANCELED;
925 }
926
927 release_dbc(qdev, dbc_id);
928 set_dbc_state(qdev, dbc_id, DBC_STATE_IDLE);
929 *msg_len += sizeof(*in_trans);
930
931 return 0;
932 }
933
decode_status(struct qaic_device * qdev,void * trans,struct manage_msg * user_msg,u32 * user_len,struct wire_msg * msg)934 static int decode_status(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg,
935 u32 *user_len, struct wire_msg *msg)
936 {
937 struct qaic_manage_trans_status_from_dev *out_trans;
938 struct wire_trans_status_from_dev *in_trans = trans;
939 u32 len;
940
941 out_trans = (void *)user_msg->data + user_msg->len;
942
943 len = le32_to_cpu(in_trans->hdr.len);
944 if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH)
945 return -ENOSPC;
946
947 out_trans->hdr.type = QAIC_TRANS_STATUS_FROM_DEV;
948 out_trans->hdr.len = len;
949 out_trans->major = le16_to_cpu(in_trans->major);
950 out_trans->minor = le16_to_cpu(in_trans->minor);
951 out_trans->status_flags = le64_to_cpu(in_trans->status_flags);
952 out_trans->status = le32_to_cpu(in_trans->status);
953 *user_len += le32_to_cpu(in_trans->hdr.len);
954 user_msg->len += len;
955
956 if (out_trans->status)
957 return -ECANCELED;
958 if (out_trans->status_flags & BIT(0) && !valid_crc(msg))
959 return -EPIPE;
960
961 return 0;
962 }
963
decode_message(struct qaic_device * qdev,struct manage_msg * user_msg,struct wire_msg * msg,struct ioctl_resources * resources,struct qaic_user * usr)964 static int decode_message(struct qaic_device *qdev, struct manage_msg *user_msg,
965 struct wire_msg *msg, struct ioctl_resources *resources,
966 struct qaic_user *usr)
967 {
968 u32 msg_hdr_len = le32_to_cpu(msg->hdr.len);
969 struct wire_trans_hdr *trans_hdr;
970 u32 msg_len = 0;
971 int ret;
972 int i;
973
974 if (msg_hdr_len < sizeof(*trans_hdr) ||
975 msg_hdr_len > QAIC_MANAGE_MAX_MSG_LENGTH)
976 return -EINVAL;
977
978 user_msg->len = 0;
979 user_msg->count = le32_to_cpu(msg->hdr.count);
980
981 for (i = 0; i < user_msg->count; ++i) {
982 u32 hdr_len;
983
984 if (msg_len > msg_hdr_len - sizeof(*trans_hdr))
985 return -EINVAL;
986
987 trans_hdr = (struct wire_trans_hdr *)(msg->data + msg_len);
988 hdr_len = le32_to_cpu(trans_hdr->len);
989 if (hdr_len < sizeof(*trans_hdr) ||
990 size_add(msg_len, hdr_len) > msg_hdr_len)
991 return -EINVAL;
992
993 switch (le32_to_cpu(trans_hdr->type)) {
994 case QAIC_TRANS_PASSTHROUGH_FROM_DEV:
995 ret = decode_passthrough(qdev, trans_hdr, user_msg, &msg_len);
996 break;
997 case QAIC_TRANS_ACTIVATE_FROM_DEV:
998 ret = decode_activate(qdev, trans_hdr, user_msg, &msg_len, resources, usr);
999 break;
1000 case QAIC_TRANS_DEACTIVATE_FROM_DEV:
1001 ret = decode_deactivate(qdev, trans_hdr, &msg_len, usr);
1002 break;
1003 case QAIC_TRANS_STATUS_FROM_DEV:
1004 ret = decode_status(qdev, trans_hdr, user_msg, &msg_len, msg);
1005 break;
1006 default:
1007 return -EINVAL;
1008 }
1009
1010 if (ret)
1011 return ret;
1012 }
1013
1014 if (msg_len != (msg_hdr_len - sizeof(msg->hdr)))
1015 return -EINVAL;
1016
1017 return 0;
1018 }
1019
msg_xfer(struct qaic_device * qdev,struct wrapper_list * wrappers,u32 seq_num,bool ignore_signal)1020 static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u32 seq_num,
1021 bool ignore_signal)
1022 {
1023 struct xfer_queue_elem elem;
1024 struct wire_msg *out_buf;
1025 struct wrapper_msg *w;
1026 long ret = -EAGAIN;
1027 int xfer_count = 0;
1028 int retry_count;
1029
1030 /* Allow QAIC_BOOT state since we need to check control protocol version */
1031 if (qdev->dev_state == QAIC_OFFLINE) {
1032 mutex_unlock(&qdev->cntl_mutex);
1033 return ERR_PTR(-ENODEV);
1034 }
1035
1036 /* Attempt to avoid a partial commit of a message */
1037 list_for_each_entry(w, &wrappers->list, list)
1038 xfer_count++;
1039
1040 for (retry_count = 0; retry_count < QAIC_MHI_RETRY_MAX; retry_count++) {
1041 if (xfer_count <= mhi_get_free_desc_count(qdev->cntl_ch, DMA_TO_DEVICE)) {
1042 ret = 0;
1043 break;
1044 }
1045 msleep_interruptible(QAIC_MHI_RETRY_WAIT_MS);
1046 if (signal_pending(current))
1047 break;
1048 }
1049
1050 if (ret) {
1051 mutex_unlock(&qdev->cntl_mutex);
1052 return ERR_PTR(ret);
1053 }
1054
1055 elem.seq_num = seq_num;
1056 elem.buf = NULL;
1057 init_completion(&elem.xfer_done);
1058 if (likely(!qdev->cntl_lost_buf)) {
1059 /*
1060 * The max size of request to device is QAIC_MANAGE_WIRE_MSG_LENGTH.
1061 * The max size of response from device is QAIC_MANAGE_MAX_MSG_LENGTH.
1062 */
1063 out_buf = kmalloc(QAIC_MANAGE_MAX_MSG_LENGTH, GFP_KERNEL);
1064 if (!out_buf) {
1065 mutex_unlock(&qdev->cntl_mutex);
1066 return ERR_PTR(-ENOMEM);
1067 }
1068
1069 ret = mhi_queue_buf(qdev->cntl_ch, DMA_FROM_DEVICE, out_buf,
1070 QAIC_MANAGE_MAX_MSG_LENGTH, MHI_EOT);
1071 if (ret) {
1072 mutex_unlock(&qdev->cntl_mutex);
1073 return ERR_PTR(ret);
1074 }
1075 } else {
1076 /*
1077 * we lost a buffer because we queued a recv buf, but then
1078 * queuing the corresponding tx buf failed. To try to avoid
1079 * a memory leak, lets reclaim it and use it for this
1080 * transaction.
1081 */
1082 qdev->cntl_lost_buf = false;
1083 }
1084
1085 list_for_each_entry(w, &wrappers->list, list) {
1086 kref_get(&w->ref_count);
1087 ret = mhi_queue_buf(qdev->cntl_ch, DMA_TO_DEVICE, &w->msg, w->len,
1088 list_is_last(&w->list, &wrappers->list) ? MHI_EOT : MHI_CHAIN);
1089 if (ret) {
1090 qdev->cntl_lost_buf = true;
1091 kref_put(&w->ref_count, free_wrapper);
1092 mutex_unlock(&qdev->cntl_mutex);
1093 return ERR_PTR(ret);
1094 }
1095 }
1096
1097 list_add_tail(&elem.list, &qdev->cntl_xfer_list);
1098 mutex_unlock(&qdev->cntl_mutex);
1099
1100 if (ignore_signal)
1101 ret = wait_for_completion_timeout(&elem.xfer_done, control_resp_timeout_s * HZ);
1102 else
1103 ret = wait_for_completion_interruptible_timeout(&elem.xfer_done,
1104 control_resp_timeout_s * HZ);
1105 /*
1106 * not using _interruptable because we have to cleanup or we'll
1107 * likely cause memory corruption
1108 */
1109 mutex_lock(&qdev->cntl_mutex);
1110 if (!list_empty(&elem.list))
1111 list_del(&elem.list);
1112 if (!ret && !elem.buf)
1113 ret = -ETIMEDOUT;
1114 else if (ret > 0 && !elem.buf)
1115 ret = -EIO;
1116 mutex_unlock(&qdev->cntl_mutex);
1117
1118 if (ret < 0) {
1119 kfree(elem.buf);
1120 return ERR_PTR(ret);
1121 } else if (!qdev->valid_crc(elem.buf)) {
1122 kfree(elem.buf);
1123 return ERR_PTR(-EPIPE);
1124 }
1125
1126 return elem.buf;
1127 }
1128
1129 /* Add a transaction to abort the outstanding DMA continuation */
abort_dma_cont(struct qaic_device * qdev,struct wrapper_list * wrappers,u32 dma_chunk_id)1130 static int abort_dma_cont(struct qaic_device *qdev, struct wrapper_list *wrappers, u32 dma_chunk_id)
1131 {
1132 struct wire_trans_dma_xfer *out_trans;
1133 u32 size = sizeof(*out_trans);
1134 struct wrapper_msg *wrapper;
1135 struct wrapper_msg *w;
1136 struct wire_msg *msg;
1137
1138 wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
1139 msg = &wrapper->msg;
1140
1141 /* Remove all but the first wrapper which has the msg header */
1142 list_for_each_entry_safe(wrapper, w, &wrappers->list, list)
1143 if (!list_is_first(&wrapper->list, &wrappers->list))
1144 kref_put(&wrapper->ref_count, free_wrapper);
1145
1146 wrapper = add_wrapper(wrappers, sizeof(*wrapper));
1147
1148 if (!wrapper)
1149 return -ENOMEM;
1150
1151 out_trans = (struct wire_trans_dma_xfer *)&wrapper->trans;
1152 out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_DMA_XFER_TO_DEV);
1153 out_trans->hdr.len = cpu_to_le32(size);
1154 out_trans->tag = cpu_to_le32(0);
1155 out_trans->count = cpu_to_le32(0);
1156 out_trans->dma_chunk_id = cpu_to_le32(dma_chunk_id);
1157
1158 msg->hdr.len = cpu_to_le32(size + sizeof(*msg));
1159 msg->hdr.count = cpu_to_le32(1);
1160 wrapper->len = size;
1161
1162 return 0;
1163 }
1164
alloc_wrapper_list(void)1165 static struct wrapper_list *alloc_wrapper_list(void)
1166 {
1167 struct wrapper_list *wrappers;
1168
1169 wrappers = kmalloc_obj(*wrappers);
1170 if (!wrappers)
1171 return NULL;
1172 INIT_LIST_HEAD(&wrappers->list);
1173 spin_lock_init(&wrappers->lock);
1174
1175 return wrappers;
1176 }
1177
qaic_manage_msg_xfer(struct qaic_device * qdev,struct qaic_user * usr,struct manage_msg * user_msg,struct ioctl_resources * resources,struct wire_msg ** rsp)1178 static int qaic_manage_msg_xfer(struct qaic_device *qdev, struct qaic_user *usr,
1179 struct manage_msg *user_msg, struct ioctl_resources *resources,
1180 struct wire_msg **rsp)
1181 {
1182 struct wrapper_list *wrappers;
1183 struct wrapper_msg *wrapper;
1184 struct wrapper_msg *w;
1185 bool all_done = false;
1186 struct wire_msg *msg;
1187 int ret;
1188
1189 wrappers = alloc_wrapper_list();
1190 if (!wrappers)
1191 return -ENOMEM;
1192
1193 wrapper = add_wrapper(wrappers, sizeof(*wrapper));
1194 if (!wrapper) {
1195 kfree(wrappers);
1196 return -ENOMEM;
1197 }
1198
1199 msg = &wrapper->msg;
1200 wrapper->len = sizeof(*msg);
1201
1202 ret = encode_message(qdev, user_msg, wrappers, resources, usr);
1203 if (ret && resources->dma_chunk_id)
1204 ret = abort_dma_cont(qdev, wrappers, resources->dma_chunk_id);
1205 if (ret)
1206 goto encode_failed;
1207
1208 ret = mutex_lock_interruptible(&qdev->cntl_mutex);
1209 if (ret)
1210 goto lock_failed;
1211
1212 msg->hdr.magic_number = MANAGE_MAGIC_NUMBER;
1213 msg->hdr.sequence_number = cpu_to_le32(qdev->next_seq_num++);
1214
1215 if (usr) {
1216 msg->hdr.handle = cpu_to_le32(usr->handle);
1217 msg->hdr.partition_id = cpu_to_le32(usr->qddev->partition_id);
1218 } else {
1219 msg->hdr.handle = 0;
1220 msg->hdr.partition_id = cpu_to_le32(QAIC_NO_PARTITION);
1221 }
1222
1223 msg->hdr.padding = cpu_to_le32(0);
1224 msg->hdr.crc32 = cpu_to_le32(qdev->gen_crc(wrappers));
1225
1226 /* msg_xfer releases the mutex */
1227 *rsp = msg_xfer(qdev, wrappers, qdev->next_seq_num - 1, false);
1228 if (IS_ERR(*rsp))
1229 ret = PTR_ERR(*rsp);
1230
1231 lock_failed:
1232 free_dma_xfers(qdev, resources);
1233 encode_failed:
1234 spin_lock(&wrappers->lock);
1235 list_for_each_entry_safe(wrapper, w, &wrappers->list, list)
1236 kref_put(&wrapper->ref_count, free_wrapper);
1237 all_done = list_empty(&wrappers->list);
1238 spin_unlock(&wrappers->lock);
1239 if (all_done)
1240 kfree(wrappers);
1241
1242 return ret;
1243 }
1244
qaic_manage(struct qaic_device * qdev,struct qaic_user * usr,struct manage_msg * user_msg)1245 static int qaic_manage(struct qaic_device *qdev, struct qaic_user *usr, struct manage_msg *user_msg)
1246 {
1247 struct wire_trans_dma_xfer_cont *dma_cont = NULL;
1248 struct ioctl_resources resources;
1249 struct wire_msg *rsp = NULL;
1250 int ret;
1251
1252 memset(&resources, 0, sizeof(struct ioctl_resources));
1253
1254 INIT_LIST_HEAD(&resources.dma_xfers);
1255
1256 if (user_msg->len > QAIC_MANAGE_MAX_MSG_LENGTH ||
1257 user_msg->count > QAIC_MANAGE_MAX_MSG_LENGTH / sizeof(struct qaic_manage_trans_hdr))
1258 return -EINVAL;
1259
1260 dma_xfer_continue:
1261 ret = qaic_manage_msg_xfer(qdev, usr, user_msg, &resources, &rsp);
1262 if (ret)
1263 return ret;
1264 /* dma_cont should be the only transaction if present */
1265 if (le32_to_cpu(rsp->hdr.count) == 1) {
1266 dma_cont = (struct wire_trans_dma_xfer_cont *)rsp->data;
1267 if (le32_to_cpu(dma_cont->hdr.type) != QAIC_TRANS_DMA_XFER_CONT)
1268 dma_cont = NULL;
1269 }
1270 if (dma_cont) {
1271 if (le32_to_cpu(dma_cont->dma_chunk_id) == resources.dma_chunk_id &&
1272 le64_to_cpu(dma_cont->xferred_size) == resources.xferred_dma_size) {
1273 kfree(rsp);
1274 goto dma_xfer_continue;
1275 }
1276
1277 ret = -EINVAL;
1278 goto dma_cont_failed;
1279 }
1280
1281 ret = decode_message(qdev, user_msg, rsp, &resources, usr);
1282
1283 dma_cont_failed:
1284 free_dbc_buf(qdev, &resources);
1285 kfree(rsp);
1286 return ret;
1287 }
1288
qaic_manage_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1289 int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1290 {
1291 struct qaic_manage_msg *user_msg = data;
1292 struct qaic_device *qdev;
1293 struct manage_msg *msg;
1294 struct qaic_user *usr;
1295 u8 __user *user_data;
1296 int qdev_rcu_id;
1297 int usr_rcu_id;
1298 int ret;
1299
1300 if (user_msg->len > QAIC_MANAGE_MAX_MSG_LENGTH)
1301 return -EINVAL;
1302
1303 usr = file_priv->driver_priv;
1304
1305 usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
1306 if (!usr->qddev) {
1307 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1308 return -ENODEV;
1309 }
1310
1311 qdev = usr->qddev->qdev;
1312
1313 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
1314 if (qdev->dev_state != QAIC_ONLINE) {
1315 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
1316 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1317 return -ENODEV;
1318 }
1319
1320 msg = kzalloc(QAIC_MANAGE_MAX_MSG_LENGTH + sizeof(*msg), GFP_KERNEL);
1321 if (!msg) {
1322 ret = -ENOMEM;
1323 goto out;
1324 }
1325
1326 msg->len = user_msg->len;
1327 msg->count = user_msg->count;
1328
1329 user_data = u64_to_user_ptr(user_msg->data);
1330
1331 if (copy_from_user(msg->data, user_data, user_msg->len)) {
1332 ret = -EFAULT;
1333 goto free_msg;
1334 }
1335
1336 ret = qaic_manage(qdev, usr, msg);
1337
1338 /*
1339 * If the qaic_manage() is successful then we copy the message onto
1340 * userspace memory but we have an exception for -ECANCELED.
1341 * For -ECANCELED, it means that device has NACKed the message with a
1342 * status error code which userspace would like to know.
1343 */
1344 if (ret == -ECANCELED || !ret) {
1345 if (copy_to_user(user_data, msg->data, msg->len)) {
1346 ret = -EFAULT;
1347 } else {
1348 user_msg->len = msg->len;
1349 user_msg->count = msg->count;
1350 }
1351 }
1352
1353 free_msg:
1354 kfree(msg);
1355 out:
1356 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
1357 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1358 return ret;
1359 }
1360
get_cntl_version(struct qaic_device * qdev,struct qaic_user * usr,u16 * major,u16 * minor)1361 int get_cntl_version(struct qaic_device *qdev, struct qaic_user *usr, u16 *major, u16 *minor)
1362 {
1363 struct qaic_manage_trans_status_from_dev *status_result;
1364 struct qaic_manage_trans_status_to_dev *status_query;
1365 struct manage_msg *user_msg;
1366 int ret;
1367
1368 user_msg = kmalloc(sizeof(*user_msg) + sizeof(*status_result), GFP_KERNEL);
1369 if (!user_msg) {
1370 ret = -ENOMEM;
1371 goto out;
1372 }
1373 user_msg->len = sizeof(*status_query);
1374 user_msg->count = 1;
1375
1376 status_query = (struct qaic_manage_trans_status_to_dev *)user_msg->data;
1377 status_query->hdr.type = QAIC_TRANS_STATUS_FROM_USR;
1378 status_query->hdr.len = sizeof(status_query->hdr);
1379
1380 ret = qaic_manage(qdev, usr, user_msg);
1381 if (ret)
1382 goto kfree_user_msg;
1383 status_result = (struct qaic_manage_trans_status_from_dev *)user_msg->data;
1384 *major = status_result->major;
1385 *minor = status_result->minor;
1386
1387 if (status_result->status_flags & BIT(0)) { /* device is using CRC */
1388 /* By default qdev->gen_crc is programmed to generate CRC */
1389 qdev->valid_crc = valid_crc;
1390 } else {
1391 /* By default qdev->valid_crc is programmed to bypass CRC */
1392 qdev->gen_crc = gen_crc_stub;
1393 }
1394
1395 kfree_user_msg:
1396 kfree(user_msg);
1397 out:
1398 return ret;
1399 }
1400
resp_worker(struct work_struct * work)1401 static void resp_worker(struct work_struct *work)
1402 {
1403 struct resp_work *resp = container_of(work, struct resp_work, work);
1404 struct qaic_device *qdev = resp->qdev;
1405 struct wire_msg *msg = resp->buf;
1406 struct xfer_queue_elem *elem;
1407 struct xfer_queue_elem *i;
1408 bool found = false;
1409
1410 mutex_lock(&qdev->cntl_mutex);
1411 list_for_each_entry_safe(elem, i, &qdev->cntl_xfer_list, list) {
1412 if (elem->seq_num == le32_to_cpu(msg->hdr.sequence_number)) {
1413 found = true;
1414 list_del_init(&elem->list);
1415 elem->buf = msg;
1416 complete_all(&elem->xfer_done);
1417 break;
1418 }
1419 }
1420 mutex_unlock(&qdev->cntl_mutex);
1421
1422 if (!found)
1423 /* request must have timed out, drop packet */
1424 kfree(msg);
1425
1426 kfree(resp);
1427 }
1428
free_wrapper_from_list(struct wrapper_list * wrappers,struct wrapper_msg * wrapper)1429 static void free_wrapper_from_list(struct wrapper_list *wrappers, struct wrapper_msg *wrapper)
1430 {
1431 bool all_done = false;
1432
1433 spin_lock(&wrappers->lock);
1434 kref_put(&wrapper->ref_count, free_wrapper);
1435 all_done = list_empty(&wrappers->list);
1436 spin_unlock(&wrappers->lock);
1437
1438 if (all_done)
1439 kfree(wrappers);
1440 }
1441
qaic_mhi_ul_xfer_cb(struct mhi_device * mhi_dev,struct mhi_result * mhi_result)1442 void qaic_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
1443 {
1444 struct wire_msg *msg = mhi_result->buf_addr;
1445 struct wrapper_msg *wrapper = container_of(msg, struct wrapper_msg, msg);
1446
1447 free_wrapper_from_list(wrapper->head, wrapper);
1448 }
1449
qaic_mhi_dl_xfer_cb(struct mhi_device * mhi_dev,struct mhi_result * mhi_result)1450 void qaic_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
1451 {
1452 struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev);
1453 struct wire_msg *msg = mhi_result->buf_addr;
1454 struct resp_work *resp;
1455
1456 if (mhi_result->transaction_status || msg->hdr.magic_number != MANAGE_MAGIC_NUMBER) {
1457 kfree(msg);
1458 return;
1459 }
1460
1461 resp = kmalloc_obj(*resp, GFP_ATOMIC);
1462 if (!resp) {
1463 kfree(msg);
1464 return;
1465 }
1466
1467 INIT_WORK(&resp->work, resp_worker);
1468 resp->qdev = qdev;
1469 resp->buf = msg;
1470 queue_work(qdev->cntl_wq, &resp->work);
1471 }
1472
qaic_control_open(struct qaic_device * qdev)1473 int qaic_control_open(struct qaic_device *qdev)
1474 {
1475 if (!qdev->cntl_ch)
1476 return -ENODEV;
1477
1478 qdev->cntl_lost_buf = false;
1479 /*
1480 * By default qaic should assume that device has CRC enabled.
1481 * Qaic comes to know if device has CRC enabled or disabled during the
1482 * device status transaction, which is the first transaction performed
1483 * on control channel.
1484 *
1485 * So CRC validation of first device status transaction response is
1486 * ignored (by calling valid_crc_stub) and is done later during decoding
1487 * if device has CRC enabled.
1488 * Now that qaic knows whether device has CRC enabled or not it acts
1489 * accordingly.
1490 */
1491 qdev->gen_crc = gen_crc;
1492 qdev->valid_crc = valid_crc_stub;
1493
1494 return mhi_prepare_for_transfer(qdev->cntl_ch);
1495 }
1496
qaic_control_close(struct qaic_device * qdev)1497 void qaic_control_close(struct qaic_device *qdev)
1498 {
1499 mhi_unprepare_from_transfer(qdev->cntl_ch);
1500 }
1501
qaic_release_usr(struct qaic_device * qdev,struct qaic_user * usr)1502 void qaic_release_usr(struct qaic_device *qdev, struct qaic_user *usr)
1503 {
1504 struct wire_trans_terminate_to_dev *trans;
1505 struct wrapper_list *wrappers;
1506 struct wrapper_msg *wrapper;
1507 struct wire_msg *msg;
1508 struct wire_msg *rsp;
1509
1510 wrappers = alloc_wrapper_list();
1511 if (!wrappers)
1512 return;
1513
1514 wrapper = add_wrapper(wrappers, sizeof(*wrapper) + sizeof(*msg) + sizeof(*trans));
1515 if (!wrapper)
1516 return;
1517
1518 msg = &wrapper->msg;
1519
1520 trans = (struct wire_trans_terminate_to_dev *)msg->data;
1521
1522 trans->hdr.type = cpu_to_le32(QAIC_TRANS_TERMINATE_TO_DEV);
1523 trans->hdr.len = cpu_to_le32(sizeof(*trans));
1524 trans->handle = cpu_to_le32(usr->handle);
1525
1526 mutex_lock(&qdev->cntl_mutex);
1527 wrapper->len = sizeof(msg->hdr) + sizeof(*trans);
1528 msg->hdr.magic_number = MANAGE_MAGIC_NUMBER;
1529 msg->hdr.sequence_number = cpu_to_le32(qdev->next_seq_num++);
1530 msg->hdr.len = cpu_to_le32(wrapper->len);
1531 msg->hdr.count = cpu_to_le32(1);
1532 msg->hdr.handle = cpu_to_le32(usr->handle);
1533 msg->hdr.padding = cpu_to_le32(0);
1534 msg->hdr.crc32 = cpu_to_le32(qdev->gen_crc(wrappers));
1535
1536 /*
1537 * msg_xfer releases the mutex
1538 * We don't care about the return of msg_xfer since we will not do
1539 * anything different based on what happens.
1540 * We ignore pending signals since one will be set if the user is
1541 * killed, and we need give the device a chance to cleanup, otherwise
1542 * DMA may still be in progress when we return.
1543 */
1544 rsp = msg_xfer(qdev, wrappers, qdev->next_seq_num - 1, true);
1545 if (!IS_ERR(rsp))
1546 kfree(rsp);
1547 free_wrapper_from_list(wrappers, wrapper);
1548 }
1549
wake_all_cntl(struct qaic_device * qdev)1550 void wake_all_cntl(struct qaic_device *qdev)
1551 {
1552 struct xfer_queue_elem *elem;
1553 struct xfer_queue_elem *i;
1554
1555 mutex_lock(&qdev->cntl_mutex);
1556 list_for_each_entry_safe(elem, i, &qdev->cntl_xfer_list, list) {
1557 list_del_init(&elem->list);
1558 complete_all(&elem->xfer_done);
1559 }
1560 mutex_unlock(&qdev->cntl_mutex);
1561 }
1562