xref: /linux/drivers/accel/qaic/sahara.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 /* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */
4 
5 #include <linux/devcoredump.h>
6 #include <linux/firmware.h>
7 #include <linux/limits.h>
8 #include <linux/mhi.h>
9 #include <linux/minmax.h>
10 #include <linux/mod_devicetable.h>
11 #include <linux/overflow.h>
12 #include <linux/types.h>
13 #include <linux/vmalloc.h>
14 #include <linux/workqueue.h>
15 
16 #include "sahara.h"
17 
18 #define SAHARA_HELLO_CMD		0x1  /* Min protocol version 1.0 */
19 #define SAHARA_HELLO_RESP_CMD		0x2  /* Min protocol version 1.0 */
20 #define SAHARA_READ_DATA_CMD		0x3  /* Min protocol version 1.0 */
21 #define SAHARA_END_OF_IMAGE_CMD		0x4  /* Min protocol version 1.0 */
22 #define SAHARA_DONE_CMD			0x5  /* Min protocol version 1.0 */
23 #define SAHARA_DONE_RESP_CMD		0x6  /* Min protocol version 1.0 */
24 #define SAHARA_RESET_CMD		0x7  /* Min protocol version 1.0 */
25 #define SAHARA_RESET_RESP_CMD		0x8  /* Min protocol version 1.0 */
26 #define SAHARA_MEM_DEBUG_CMD		0x9  /* Min protocol version 2.0 */
27 #define SAHARA_MEM_READ_CMD		0xa  /* Min protocol version 2.0 */
28 #define SAHARA_CMD_READY_CMD		0xb  /* Min protocol version 2.1 */
29 #define SAHARA_SWITCH_MODE_CMD		0xc  /* Min protocol version 2.1 */
30 #define SAHARA_EXECUTE_CMD		0xd  /* Min protocol version 2.1 */
31 #define SAHARA_EXECUTE_RESP_CMD		0xe  /* Min protocol version 2.1 */
32 #define SAHARA_EXECUTE_DATA_CMD		0xf  /* Min protocol version 2.1 */
33 #define SAHARA_MEM_DEBUG64_CMD		0x10 /* Min protocol version 2.5 */
34 #define SAHARA_MEM_READ64_CMD		0x11 /* Min protocol version 2.5 */
35 #define SAHARA_READ_DATA64_CMD		0x12 /* Min protocol version 2.8 */
36 #define SAHARA_RESET_STATE_CMD		0x13 /* Min protocol version 2.9 */
37 #define SAHARA_WRITE_DATA_CMD		0x14 /* Min protocol version 3.0 */
38 
39 #define SAHARA_PACKET_MAX_SIZE		0xffffU /* MHI_MAX_MTU */
40 #define SAHARA_TRANSFER_MAX_SIZE	0x80000
41 #define SAHARA_READ_MAX_SIZE		0xfff0U /* Avoid unaligned requests */
42 #define SAHARA_NUM_TX_BUF		DIV_ROUND_UP(SAHARA_TRANSFER_MAX_SIZE,\
43 							SAHARA_PACKET_MAX_SIZE)
44 #define SAHARA_IMAGE_ID_NONE		U32_MAX
45 
46 #define SAHARA_VERSION			2
47 #define SAHARA_SUCCESS			0
48 #define SAHARA_TABLE_ENTRY_STR_LEN	20
49 
50 #define SAHARA_MODE_IMAGE_TX_PENDING	0x0
51 #define SAHARA_MODE_IMAGE_TX_COMPLETE	0x1
52 #define SAHARA_MODE_MEMORY_DEBUG	0x2
53 #define SAHARA_MODE_COMMAND		0x3
54 
55 #define SAHARA_HELLO_LENGTH		0x30
56 #define SAHARA_READ_DATA_LENGTH		0x14
57 #define SAHARA_END_OF_IMAGE_LENGTH	0x10
58 #define SAHARA_DONE_LENGTH		0x8
59 #define SAHARA_RESET_LENGTH		0x8
60 #define SAHARA_MEM_DEBUG64_LENGTH	0x18
61 #define SAHARA_MEM_READ64_LENGTH	0x18
62 
63 struct sahara_packet {
64 	__le32 cmd;
65 	__le32 length;
66 
67 	union {
68 		struct {
69 			__le32 version;
70 			__le32 version_compat;
71 			__le32 max_length;
72 			__le32 mode;
73 		} hello;
74 		struct {
75 			__le32 version;
76 			__le32 version_compat;
77 			__le32 status;
78 			__le32 mode;
79 		} hello_resp;
80 		struct {
81 			__le32 image;
82 			__le32 offset;
83 			__le32 length;
84 		} read_data;
85 		struct {
86 			__le32 image;
87 			__le32 status;
88 		} end_of_image;
89 		struct {
90 			__le64 table_address;
91 			__le64 table_length;
92 		} memory_debug64;
93 		struct {
94 			__le64 memory_address;
95 			__le64 memory_length;
96 		} memory_read64;
97 	};
98 };
99 
100 struct sahara_debug_table_entry64 {
101 	__le64	type;
102 	__le64	address;
103 	__le64	length;
104 	char	description[SAHARA_TABLE_ENTRY_STR_LEN];
105 	char	filename[SAHARA_TABLE_ENTRY_STR_LEN];
106 };
107 
108 struct sahara_dump_table_entry {
109 	u64	type;
110 	u64	address;
111 	u64	length;
112 	char	description[SAHARA_TABLE_ENTRY_STR_LEN];
113 	char	filename[SAHARA_TABLE_ENTRY_STR_LEN];
114 };
115 
116 #define SAHARA_DUMP_V1_MAGIC 0x1234567890abcdef
117 #define SAHARA_DUMP_V1_VER   1
118 struct sahara_memory_dump_meta_v1 {
119 	u64	magic;
120 	u64	version;
121 	u64	dump_size;
122 	u64	table_size;
123 };
124 
125 /*
126  * Layout of crashdump provided to user via devcoredump
127  *              +------------------------------------------+
128  *              |         Crashdump Meta structure         |
129  *              | type: struct sahara_memory_dump_meta_v1  |
130  *              +------------------------------------------+
131  *              |             Crashdump Table              |
132  *              | type: array of struct                    |
133  *              |       sahara_dump_table_entry            |
134  *              |                                          |
135  *              |                                          |
136  *              +------------------------------------------+
137  *              |                Crashdump                 |
138  *              |                                          |
139  *              |                                          |
140  *              |                                          |
141  *              |                                          |
142  *              |                                          |
143  *              +------------------------------------------+
144  *
145  * First is the metadata header. Userspace can use the magic number to verify
146  * the content type, and then check the version for the rest of the format.
147  * New versions should keep the magic number location/value, and version
148  * location, but increment the version value.
149  *
150  * For v1, the metadata lists the size of the entire dump (header + table +
151  * dump) and the size of the table. Then the dump image table, which describes
152  * the contents of the dump. Finally all the images are listed in order, with
153  * no deadspace in between. Userspace can use the sizes listed in the image
154  * table to reconstruct the individual images.
155  */
156 
157 struct sahara_context {
158 	struct sahara_packet		*tx[SAHARA_NUM_TX_BUF];
159 	struct sahara_packet		*rx;
160 	struct work_struct		fw_work;
161 	struct work_struct		dump_work;
162 	struct mhi_device		*mhi_dev;
163 	const char			**image_table;
164 	u32				table_size;
165 	u32				active_image_id;
166 	const struct firmware		*firmware;
167 	u64				dump_table_address;
168 	u64				dump_table_length;
169 	size_t				rx_size;
170 	size_t				rx_size_requested;
171 	void				*mem_dump;
172 	size_t				mem_dump_sz;
173 	struct sahara_dump_table_entry	*dump_image;
174 	u64				dump_image_offset;
175 	void				*mem_dump_freespace;
176 	u64				dump_images_left;
177 	bool				is_mem_dump_mode;
178 };
179 
180 static const char *aic100_image_table[] = {
181 	[1]  = "qcom/aic100/fw1.bin",
182 	[2]  = "qcom/aic100/fw2.bin",
183 	[4]  = "qcom/aic100/fw4.bin",
184 	[5]  = "qcom/aic100/fw5.bin",
185 	[6]  = "qcom/aic100/fw6.bin",
186 	[8]  = "qcom/aic100/fw8.bin",
187 	[9]  = "qcom/aic100/fw9.bin",
188 	[10] = "qcom/aic100/fw10.bin",
189 };
190 
191 static int sahara_find_image(struct sahara_context *context, u32 image_id)
192 {
193 	int ret;
194 
195 	if (image_id == context->active_image_id)
196 		return 0;
197 
198 	if (context->active_image_id != SAHARA_IMAGE_ID_NONE) {
199 		dev_err(&context->mhi_dev->dev, "image id %d is not valid as %d is active\n",
200 			image_id, context->active_image_id);
201 		return -EINVAL;
202 	}
203 
204 	if (image_id >= context->table_size || !context->image_table[image_id]) {
205 		dev_err(&context->mhi_dev->dev, "request for unknown image: %d\n", image_id);
206 		return -EINVAL;
207 	}
208 
209 	/*
210 	 * This image might be optional. The device may continue without it.
211 	 * Only the device knows. Suppress error messages that could suggest an
212 	 * a problem when we were actually able to continue.
213 	 */
214 	ret = firmware_request_nowarn(&context->firmware,
215 				      context->image_table[image_id],
216 				      &context->mhi_dev->dev);
217 	if (ret) {
218 		dev_dbg(&context->mhi_dev->dev, "request for image id %d / file %s failed %d\n",
219 			image_id, context->image_table[image_id], ret);
220 		return ret;
221 	}
222 
223 	context->active_image_id = image_id;
224 
225 	return 0;
226 }
227 
228 static void sahara_release_image(struct sahara_context *context)
229 {
230 	if (context->active_image_id != SAHARA_IMAGE_ID_NONE)
231 		release_firmware(context->firmware);
232 	context->active_image_id = SAHARA_IMAGE_ID_NONE;
233 }
234 
235 static void sahara_send_reset(struct sahara_context *context)
236 {
237 	int ret;
238 
239 	context->is_mem_dump_mode = false;
240 
241 	context->tx[0]->cmd = cpu_to_le32(SAHARA_RESET_CMD);
242 	context->tx[0]->length = cpu_to_le32(SAHARA_RESET_LENGTH);
243 
244 	ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
245 			    SAHARA_RESET_LENGTH, MHI_EOT);
246 	if (ret)
247 		dev_err(&context->mhi_dev->dev, "Unable to send reset response %d\n", ret);
248 }
249 
250 static void sahara_hello(struct sahara_context *context)
251 {
252 	int ret;
253 
254 	dev_dbg(&context->mhi_dev->dev,
255 		"HELLO cmd received. length:%d version:%d version_compat:%d max_length:%d mode:%d\n",
256 		le32_to_cpu(context->rx->length),
257 		le32_to_cpu(context->rx->hello.version),
258 		le32_to_cpu(context->rx->hello.version_compat),
259 		le32_to_cpu(context->rx->hello.max_length),
260 		le32_to_cpu(context->rx->hello.mode));
261 
262 	if (le32_to_cpu(context->rx->length) != SAHARA_HELLO_LENGTH) {
263 		dev_err(&context->mhi_dev->dev, "Malformed hello packet - length %d\n",
264 			le32_to_cpu(context->rx->length));
265 		return;
266 	}
267 	if (le32_to_cpu(context->rx->hello.version) != SAHARA_VERSION) {
268 		dev_err(&context->mhi_dev->dev, "Unsupported hello packet - version %d\n",
269 			le32_to_cpu(context->rx->hello.version));
270 		return;
271 	}
272 
273 	if (le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_PENDING &&
274 	    le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_COMPLETE &&
275 	    le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_MEMORY_DEBUG) {
276 		dev_err(&context->mhi_dev->dev, "Unsupported hello packet - mode %d\n",
277 			le32_to_cpu(context->rx->hello.mode));
278 		return;
279 	}
280 
281 	context->tx[0]->cmd = cpu_to_le32(SAHARA_HELLO_RESP_CMD);
282 	context->tx[0]->length = cpu_to_le32(SAHARA_HELLO_LENGTH);
283 	context->tx[0]->hello_resp.version = cpu_to_le32(SAHARA_VERSION);
284 	context->tx[0]->hello_resp.version_compat = cpu_to_le32(SAHARA_VERSION);
285 	context->tx[0]->hello_resp.status = cpu_to_le32(SAHARA_SUCCESS);
286 	context->tx[0]->hello_resp.mode = context->rx->hello_resp.mode;
287 
288 	ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
289 			    SAHARA_HELLO_LENGTH, MHI_EOT);
290 	if (ret)
291 		dev_err(&context->mhi_dev->dev, "Unable to send hello response %d\n", ret);
292 }
293 
294 static void sahara_read_data(struct sahara_context *context)
295 {
296 	u32 image_id, data_offset, data_len, pkt_data_len;
297 	int ret;
298 	int i;
299 
300 	dev_dbg(&context->mhi_dev->dev,
301 		"READ_DATA cmd received. length:%d image:%d offset:%d data_length:%d\n",
302 		le32_to_cpu(context->rx->length),
303 		le32_to_cpu(context->rx->read_data.image),
304 		le32_to_cpu(context->rx->read_data.offset),
305 		le32_to_cpu(context->rx->read_data.length));
306 
307 	if (le32_to_cpu(context->rx->length) != SAHARA_READ_DATA_LENGTH) {
308 		dev_err(&context->mhi_dev->dev, "Malformed read_data packet - length %d\n",
309 			le32_to_cpu(context->rx->length));
310 		return;
311 	}
312 
313 	image_id = le32_to_cpu(context->rx->read_data.image);
314 	data_offset = le32_to_cpu(context->rx->read_data.offset);
315 	data_len = le32_to_cpu(context->rx->read_data.length);
316 
317 	ret = sahara_find_image(context, image_id);
318 	if (ret) {
319 		sahara_send_reset(context);
320 		return;
321 	}
322 
323 	/*
324 	 * Image is released when the device is done with it via
325 	 * SAHARA_END_OF_IMAGE_CMD. sahara_send_reset() will either cause the
326 	 * device to retry the operation with a modification, or decide to be
327 	 * done with the image and trigger SAHARA_END_OF_IMAGE_CMD.
328 	 * release_image() is called from SAHARA_END_OF_IMAGE_CMD. processing
329 	 * and is not needed here on error.
330 	 */
331 
332 	if (data_len > SAHARA_TRANSFER_MAX_SIZE) {
333 		dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data len %d exceeds max xfer size %d\n",
334 			data_len, SAHARA_TRANSFER_MAX_SIZE);
335 		sahara_send_reset(context);
336 		return;
337 	}
338 
339 	if (data_offset >= context->firmware->size) {
340 		dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data offset %d exceeds file size %zu\n",
341 			data_offset, context->firmware->size);
342 		sahara_send_reset(context);
343 		return;
344 	}
345 
346 	if (size_add(data_offset, data_len) > context->firmware->size) {
347 		dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data offset %d and length %d exceeds file size %zu\n",
348 			data_offset, data_len, context->firmware->size);
349 		sahara_send_reset(context);
350 		return;
351 	}
352 
353 	for (i = 0; i < SAHARA_NUM_TX_BUF && data_len; ++i) {
354 		pkt_data_len = min(data_len, SAHARA_PACKET_MAX_SIZE);
355 
356 		memcpy(context->tx[i], &context->firmware->data[data_offset], pkt_data_len);
357 
358 		data_offset += pkt_data_len;
359 		data_len -= pkt_data_len;
360 
361 		ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE,
362 				    context->tx[i], pkt_data_len,
363 				    !data_len ? MHI_EOT : MHI_CHAIN);
364 		if (ret) {
365 			dev_err(&context->mhi_dev->dev, "Unable to send read_data response %d\n",
366 				ret);
367 			return;
368 		}
369 	}
370 }
371 
372 static void sahara_end_of_image(struct sahara_context *context)
373 {
374 	int ret;
375 
376 	dev_dbg(&context->mhi_dev->dev,
377 		"END_OF_IMAGE cmd received. length:%d image:%d status:%d\n",
378 		le32_to_cpu(context->rx->length),
379 		le32_to_cpu(context->rx->end_of_image.image),
380 		le32_to_cpu(context->rx->end_of_image.status));
381 
382 	if (le32_to_cpu(context->rx->length) != SAHARA_END_OF_IMAGE_LENGTH) {
383 		dev_err(&context->mhi_dev->dev, "Malformed end_of_image packet - length %d\n",
384 			le32_to_cpu(context->rx->length));
385 		return;
386 	}
387 
388 	if (context->active_image_id != SAHARA_IMAGE_ID_NONE &&
389 	    le32_to_cpu(context->rx->end_of_image.image) != context->active_image_id) {
390 		dev_err(&context->mhi_dev->dev, "Malformed end_of_image packet - image %d is not the active image\n",
391 			le32_to_cpu(context->rx->end_of_image.image));
392 		return;
393 	}
394 
395 	sahara_release_image(context);
396 
397 	if (le32_to_cpu(context->rx->end_of_image.status))
398 		return;
399 
400 	context->tx[0]->cmd = cpu_to_le32(SAHARA_DONE_CMD);
401 	context->tx[0]->length = cpu_to_le32(SAHARA_DONE_LENGTH);
402 
403 	ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
404 			    SAHARA_DONE_LENGTH, MHI_EOT);
405 	if (ret)
406 		dev_dbg(&context->mhi_dev->dev, "Unable to send done response %d\n", ret);
407 }
408 
409 static void sahara_memory_debug64(struct sahara_context *context)
410 {
411 	int ret;
412 
413 	dev_dbg(&context->mhi_dev->dev,
414 		"MEMORY DEBUG64 cmd received. length:%d table_address:%#llx table_length:%#llx\n",
415 		le32_to_cpu(context->rx->length),
416 		le64_to_cpu(context->rx->memory_debug64.table_address),
417 		le64_to_cpu(context->rx->memory_debug64.table_length));
418 
419 	if (le32_to_cpu(context->rx->length) != SAHARA_MEM_DEBUG64_LENGTH) {
420 		dev_err(&context->mhi_dev->dev, "Malformed memory debug64 packet - length %d\n",
421 			le32_to_cpu(context->rx->length));
422 		return;
423 	}
424 
425 	context->dump_table_address = le64_to_cpu(context->rx->memory_debug64.table_address);
426 	context->dump_table_length = le64_to_cpu(context->rx->memory_debug64.table_length);
427 
428 	if (context->dump_table_length % sizeof(struct sahara_debug_table_entry64) != 0 ||
429 	    !context->dump_table_length) {
430 		dev_err(&context->mhi_dev->dev, "Malformed memory debug64 packet - table length %lld\n",
431 			context->dump_table_length);
432 		return;
433 	}
434 
435 	/*
436 	 * From this point, the protocol flips. We make memory_read requests to
437 	 * the device, and the device responds with the raw data. If the device
438 	 * has an error, it will send an End of Image command. First we need to
439 	 * request the memory dump table so that we know where all the pieces
440 	 * of the dump are that we can consume.
441 	 */
442 
443 	context->is_mem_dump_mode = true;
444 
445 	/*
446 	 * Assume that the table is smaller than our MTU so that we can read it
447 	 * in one shot. The spec does not put an upper limit on the table, but
448 	 * no known device will exceed this.
449 	 */
450 	if (context->dump_table_length > SAHARA_PACKET_MAX_SIZE) {
451 		dev_err(&context->mhi_dev->dev, "Memory dump table length %lld exceeds supported size. Discarding dump\n",
452 			context->dump_table_length);
453 		sahara_send_reset(context);
454 		return;
455 	}
456 
457 	context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD);
458 	context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH);
459 	context->tx[0]->memory_read64.memory_address = cpu_to_le64(context->dump_table_address);
460 	context->tx[0]->memory_read64.memory_length = cpu_to_le64(context->dump_table_length);
461 
462 	context->rx_size_requested = context->dump_table_length;
463 
464 	ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
465 			    SAHARA_MEM_READ64_LENGTH, MHI_EOT);
466 	if (ret)
467 		dev_err(&context->mhi_dev->dev, "Unable to send read for dump table %d\n", ret);
468 }
469 
470 static void sahara_processing(struct work_struct *work)
471 {
472 	struct sahara_context *context = container_of(work, struct sahara_context, fw_work);
473 	int ret;
474 
475 	switch (le32_to_cpu(context->rx->cmd)) {
476 	case SAHARA_HELLO_CMD:
477 		sahara_hello(context);
478 		break;
479 	case SAHARA_READ_DATA_CMD:
480 		sahara_read_data(context);
481 		break;
482 	case SAHARA_END_OF_IMAGE_CMD:
483 		sahara_end_of_image(context);
484 		break;
485 	case SAHARA_DONE_RESP_CMD:
486 		/* Intentional do nothing as we don't need to exit an app */
487 		break;
488 	case SAHARA_RESET_RESP_CMD:
489 		/* Intentional do nothing as we don't need to exit an app */
490 		break;
491 	case SAHARA_MEM_DEBUG64_CMD:
492 		sahara_memory_debug64(context);
493 		break;
494 	default:
495 		dev_err(&context->mhi_dev->dev, "Unknown command %d\n",
496 			le32_to_cpu(context->rx->cmd));
497 		break;
498 	}
499 
500 	ret = mhi_queue_buf(context->mhi_dev, DMA_FROM_DEVICE, context->rx,
501 			    SAHARA_PACKET_MAX_SIZE, MHI_EOT);
502 	if (ret)
503 		dev_err(&context->mhi_dev->dev, "Unable to requeue rx buf %d\n", ret);
504 }
505 
506 static void sahara_parse_dump_table(struct sahara_context *context)
507 {
508 	struct sahara_dump_table_entry *image_out_table;
509 	struct sahara_debug_table_entry64 *dev_table;
510 	struct sahara_memory_dump_meta_v1 *dump_meta;
511 	u64 table_nents;
512 	u64 dump_length;
513 	int ret;
514 	u64 i;
515 
516 	table_nents = context->dump_table_length / sizeof(*dev_table);
517 	context->dump_images_left = table_nents;
518 	dump_length = 0;
519 
520 	dev_table = (struct sahara_debug_table_entry64 *)(context->rx);
521 	for (i = 0; i < table_nents; ++i) {
522 		/* Do not trust the device, ensure the strings are terminated */
523 		dev_table[i].description[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0;
524 		dev_table[i].filename[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0;
525 
526 		dump_length = size_add(dump_length, le64_to_cpu(dev_table[i].length));
527 		if (dump_length == SIZE_MAX) {
528 			/* Discard the dump */
529 			sahara_send_reset(context);
530 			return;
531 		}
532 
533 		dev_dbg(&context->mhi_dev->dev,
534 			"Memory dump table entry %lld type: %lld address: %#llx length: %#llx description: \"%s\" filename \"%s\"\n",
535 			i,
536 			le64_to_cpu(dev_table[i].type),
537 			le64_to_cpu(dev_table[i].address),
538 			le64_to_cpu(dev_table[i].length),
539 			dev_table[i].description,
540 			dev_table[i].filename);
541 	}
542 
543 	dump_length = size_add(dump_length, sizeof(*dump_meta));
544 	if (dump_length == SIZE_MAX) {
545 		/* Discard the dump */
546 		sahara_send_reset(context);
547 		return;
548 	}
549 	dump_length = size_add(dump_length, size_mul(sizeof(*image_out_table), table_nents));
550 	if (dump_length == SIZE_MAX) {
551 		/* Discard the dump */
552 		sahara_send_reset(context);
553 		return;
554 	}
555 
556 	context->mem_dump_sz = dump_length;
557 	context->mem_dump = vzalloc(dump_length);
558 	if (!context->mem_dump) {
559 		/* Discard the dump */
560 		sahara_send_reset(context);
561 		return;
562 	}
563 
564 	/* Populate the dump metadata and table for userspace */
565 	dump_meta = context->mem_dump;
566 	dump_meta->magic = SAHARA_DUMP_V1_MAGIC;
567 	dump_meta->version = SAHARA_DUMP_V1_VER;
568 	dump_meta->dump_size = dump_length;
569 	dump_meta->table_size = context->dump_table_length;
570 
571 	image_out_table = context->mem_dump + sizeof(*dump_meta);
572 	for (i = 0; i < table_nents; ++i) {
573 		image_out_table[i].type = le64_to_cpu(dev_table[i].type);
574 		image_out_table[i].address = le64_to_cpu(dev_table[i].address);
575 		image_out_table[i].length = le64_to_cpu(dev_table[i].length);
576 		strscpy(image_out_table[i].description, dev_table[i].description,
577 			SAHARA_TABLE_ENTRY_STR_LEN);
578 		strscpy(image_out_table[i].filename,
579 			dev_table[i].filename,
580 			SAHARA_TABLE_ENTRY_STR_LEN);
581 	}
582 
583 	context->mem_dump_freespace = &image_out_table[i];
584 
585 	/* Done parsing the table, switch to image dump mode */
586 	context->dump_table_length = 0;
587 
588 	/* Request the first chunk of the first image */
589 	context->dump_image = &image_out_table[0];
590 	dump_length = min(context->dump_image->length, SAHARA_READ_MAX_SIZE);
591 	/* Avoid requesting EOI sized data so that we can identify errors */
592 	if (dump_length == SAHARA_END_OF_IMAGE_LENGTH)
593 		dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2;
594 
595 	context->dump_image_offset = dump_length;
596 
597 	context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD);
598 	context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH);
599 	context->tx[0]->memory_read64.memory_address = cpu_to_le64(context->dump_image->address);
600 	context->tx[0]->memory_read64.memory_length = cpu_to_le64(dump_length);
601 
602 	context->rx_size_requested = dump_length;
603 
604 	ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
605 			    SAHARA_MEM_READ64_LENGTH, MHI_EOT);
606 	if (ret)
607 		dev_err(&context->mhi_dev->dev, "Unable to send read for dump content %d\n", ret);
608 }
609 
610 static void sahara_parse_dump_image(struct sahara_context *context)
611 {
612 	u64 dump_length;
613 	int ret;
614 
615 	memcpy(context->mem_dump_freespace, context->rx, context->rx_size);
616 	context->mem_dump_freespace += context->rx_size;
617 
618 	if (context->dump_image_offset >= context->dump_image->length) {
619 		/* Need to move to next image */
620 		context->dump_image++;
621 		context->dump_images_left--;
622 		context->dump_image_offset = 0;
623 
624 		if (!context->dump_images_left) {
625 			/* Dump done */
626 			dev_coredumpv(context->mhi_dev->mhi_cntrl->cntrl_dev,
627 				      context->mem_dump,
628 				      context->mem_dump_sz,
629 				      GFP_KERNEL);
630 			context->mem_dump = NULL;
631 			sahara_send_reset(context);
632 			return;
633 		}
634 	}
635 
636 	/* Get next image chunk */
637 	dump_length = context->dump_image->length - context->dump_image_offset;
638 	dump_length = min(dump_length, SAHARA_READ_MAX_SIZE);
639 	/* Avoid requesting EOI sized data so that we can identify errors */
640 	if (dump_length == SAHARA_END_OF_IMAGE_LENGTH)
641 		dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2;
642 
643 	context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD);
644 	context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH);
645 	context->tx[0]->memory_read64.memory_address =
646 		cpu_to_le64(context->dump_image->address + context->dump_image_offset);
647 	context->tx[0]->memory_read64.memory_length = cpu_to_le64(dump_length);
648 
649 	context->dump_image_offset += dump_length;
650 	context->rx_size_requested = dump_length;
651 
652 	ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
653 			    SAHARA_MEM_READ64_LENGTH, MHI_EOT);
654 	if (ret)
655 		dev_err(&context->mhi_dev->dev,
656 			"Unable to send read for dump content %d\n", ret);
657 }
658 
659 static void sahara_dump_processing(struct work_struct *work)
660 {
661 	struct sahara_context *context = container_of(work, struct sahara_context, dump_work);
662 	int ret;
663 
664 	/*
665 	 * We should get the expected raw data, but if the device has an error
666 	 * it is supposed to send EOI with an error code.
667 	 */
668 	if (context->rx_size != context->rx_size_requested &&
669 	    context->rx_size != SAHARA_END_OF_IMAGE_LENGTH) {
670 		dev_err(&context->mhi_dev->dev,
671 			"Unexpected response to read_data. Expected size: %#zx got: %#zx\n",
672 			context->rx_size_requested,
673 			context->rx_size);
674 		goto error;
675 	}
676 
677 	if (context->rx_size == SAHARA_END_OF_IMAGE_LENGTH &&
678 	    le32_to_cpu(context->rx->cmd) == SAHARA_END_OF_IMAGE_CMD) {
679 		dev_err(&context->mhi_dev->dev,
680 			"Unexpected EOI response to read_data. Status: %d\n",
681 			le32_to_cpu(context->rx->end_of_image.status));
682 		goto error;
683 	}
684 
685 	if (context->rx_size == SAHARA_END_OF_IMAGE_LENGTH &&
686 	    le32_to_cpu(context->rx->cmd) != SAHARA_END_OF_IMAGE_CMD) {
687 		dev_err(&context->mhi_dev->dev,
688 			"Invalid EOI response to read_data. CMD: %d\n",
689 			le32_to_cpu(context->rx->cmd));
690 		goto error;
691 	}
692 
693 	/*
694 	 * Need to know if we received the dump table, or part of a dump image.
695 	 * Since we get raw data, we cannot tell from the data itself. Instead,
696 	 * we use the stored dump_table_length, which we zero after we read and
697 	 * process the entire table.
698 	 */
699 	if (context->dump_table_length)
700 		sahara_parse_dump_table(context);
701 	else
702 		sahara_parse_dump_image(context);
703 
704 	ret = mhi_queue_buf(context->mhi_dev, DMA_FROM_DEVICE, context->rx,
705 			    SAHARA_PACKET_MAX_SIZE, MHI_EOT);
706 	if (ret)
707 		dev_err(&context->mhi_dev->dev, "Unable to requeue rx buf %d\n", ret);
708 
709 	return;
710 
711 error:
712 	vfree(context->mem_dump);
713 	context->mem_dump = NULL;
714 	sahara_send_reset(context);
715 }
716 
717 static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
718 {
719 	struct sahara_context *context;
720 	int ret;
721 	int i;
722 
723 	context = devm_kzalloc(&mhi_dev->dev, sizeof(*context), GFP_KERNEL);
724 	if (!context)
725 		return -ENOMEM;
726 
727 	context->rx = devm_kzalloc(&mhi_dev->dev, SAHARA_PACKET_MAX_SIZE, GFP_KERNEL);
728 	if (!context->rx)
729 		return -ENOMEM;
730 
731 	/*
732 	 * AIC100 defines SAHARA_TRANSFER_MAX_SIZE as the largest value it
733 	 * will request for READ_DATA. This is larger than
734 	 * SAHARA_PACKET_MAX_SIZE, and we need 9x SAHARA_PACKET_MAX_SIZE to
735 	 * cover SAHARA_TRANSFER_MAX_SIZE. When the remote side issues a
736 	 * READ_DATA, it requires a transfer of the exact size requested. We
737 	 * can use MHI_CHAIN to link multiple buffers into a single transfer
738 	 * but the remote side will not consume the buffers until it sees an
739 	 * EOT, thus we need to allocate enough buffers to put in the tx fifo
740 	 * to cover an entire READ_DATA request of the max size.
741 	 */
742 	for (i = 0; i < SAHARA_NUM_TX_BUF; ++i) {
743 		context->tx[i] = devm_kzalloc(&mhi_dev->dev, SAHARA_PACKET_MAX_SIZE, GFP_KERNEL);
744 		if (!context->tx[i])
745 			return -ENOMEM;
746 	}
747 
748 	context->mhi_dev = mhi_dev;
749 	INIT_WORK(&context->fw_work, sahara_processing);
750 	INIT_WORK(&context->dump_work, sahara_dump_processing);
751 	context->image_table = aic100_image_table;
752 	context->table_size = ARRAY_SIZE(aic100_image_table);
753 	context->active_image_id = SAHARA_IMAGE_ID_NONE;
754 	dev_set_drvdata(&mhi_dev->dev, context);
755 
756 	ret = mhi_prepare_for_transfer(mhi_dev);
757 	if (ret)
758 		return ret;
759 
760 	ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, context->rx, SAHARA_PACKET_MAX_SIZE, MHI_EOT);
761 	if (ret) {
762 		mhi_unprepare_from_transfer(mhi_dev);
763 		return ret;
764 	}
765 
766 	return 0;
767 }
768 
769 static void sahara_mhi_remove(struct mhi_device *mhi_dev)
770 {
771 	struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev);
772 
773 	cancel_work_sync(&context->fw_work);
774 	cancel_work_sync(&context->dump_work);
775 	if (context->mem_dump)
776 		vfree(context->mem_dump);
777 	sahara_release_image(context);
778 	mhi_unprepare_from_transfer(mhi_dev);
779 }
780 
781 static void sahara_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
782 {
783 }
784 
785 static void sahara_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
786 {
787 	struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev);
788 
789 	if (!mhi_result->transaction_status) {
790 		context->rx_size = mhi_result->bytes_xferd;
791 		if (context->is_mem_dump_mode)
792 			schedule_work(&context->dump_work);
793 		else
794 			schedule_work(&context->fw_work);
795 	}
796 
797 }
798 
799 static const struct mhi_device_id sahara_mhi_match_table[] = {
800 	{ .chan = "QAIC_SAHARA", },
801 	{},
802 };
803 
804 static struct mhi_driver sahara_mhi_driver = {
805 	.id_table = sahara_mhi_match_table,
806 	.remove = sahara_mhi_remove,
807 	.probe = sahara_mhi_probe,
808 	.ul_xfer_cb = sahara_mhi_ul_xfer_cb,
809 	.dl_xfer_cb = sahara_mhi_dl_xfer_cb,
810 	.driver = {
811 		.name = "sahara",
812 	},
813 };
814 
815 int sahara_register(void)
816 {
817 	return mhi_driver_register(&sahara_mhi_driver);
818 }
819 
820 void sahara_unregister(void)
821 {
822 	mhi_driver_unregister(&sahara_mhi_driver);
823 }
824