xref: /linux/drivers/accel/qaic/sahara.c (revision 906fd46a65383cd639e5eec72a047efc33045d86)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 /* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */
4 
5 #include <linux/firmware.h>
6 #include <linux/limits.h>
7 #include <linux/mhi.h>
8 #include <linux/minmax.h>
9 #include <linux/mod_devicetable.h>
10 #include <linux/overflow.h>
11 #include <linux/types.h>
12 #include <linux/workqueue.h>
13 
14 #include "sahara.h"
15 
16 #define SAHARA_HELLO_CMD		0x1  /* Min protocol version 1.0 */
17 #define SAHARA_HELLO_RESP_CMD		0x2  /* Min protocol version 1.0 */
18 #define SAHARA_READ_DATA_CMD		0x3  /* Min protocol version 1.0 */
19 #define SAHARA_END_OF_IMAGE_CMD		0x4  /* Min protocol version 1.0 */
20 #define SAHARA_DONE_CMD			0x5  /* Min protocol version 1.0 */
21 #define SAHARA_DONE_RESP_CMD		0x6  /* Min protocol version 1.0 */
22 #define SAHARA_RESET_CMD		0x7  /* Min protocol version 1.0 */
23 #define SAHARA_RESET_RESP_CMD		0x8  /* Min protocol version 1.0 */
24 #define SAHARA_MEM_DEBUG_CMD		0x9  /* Min protocol version 2.0 */
25 #define SAHARA_MEM_READ_CMD		0xa  /* Min protocol version 2.0 */
26 #define SAHARA_CMD_READY_CMD		0xb  /* Min protocol version 2.1 */
27 #define SAHARA_SWITCH_MODE_CMD		0xc  /* Min protocol version 2.1 */
28 #define SAHARA_EXECUTE_CMD		0xd  /* Min protocol version 2.1 */
29 #define SAHARA_EXECUTE_RESP_CMD		0xe  /* Min protocol version 2.1 */
30 #define SAHARA_EXECUTE_DATA_CMD		0xf  /* Min protocol version 2.1 */
31 #define SAHARA_MEM_DEBUG64_CMD		0x10 /* Min protocol version 2.5 */
32 #define SAHARA_MEM_READ64_CMD		0x11 /* Min protocol version 2.5 */
33 #define SAHARA_READ_DATA64_CMD		0x12 /* Min protocol version 2.8 */
34 #define SAHARA_RESET_STATE_CMD		0x13 /* Min protocol version 2.9 */
35 #define SAHARA_WRITE_DATA_CMD		0x14 /* Min protocol version 3.0 */
36 
37 #define SAHARA_PACKET_MAX_SIZE		0xffffU /* MHI_MAX_MTU */
38 #define SAHARA_TRANSFER_MAX_SIZE	0x80000
39 #define SAHARA_NUM_TX_BUF		DIV_ROUND_UP(SAHARA_TRANSFER_MAX_SIZE,\
40 							SAHARA_PACKET_MAX_SIZE)
41 #define SAHARA_IMAGE_ID_NONE		U32_MAX
42 
43 #define SAHARA_VERSION			2
44 #define SAHARA_SUCCESS			0
45 
46 #define SAHARA_MODE_IMAGE_TX_PENDING	0x0
47 #define SAHARA_MODE_IMAGE_TX_COMPLETE	0x1
48 #define SAHARA_MODE_MEMORY_DEBUG	0x2
49 #define SAHARA_MODE_COMMAND		0x3
50 
51 #define SAHARA_HELLO_LENGTH		0x30
52 #define SAHARA_READ_DATA_LENGTH		0x14
53 #define SAHARA_END_OF_IMAGE_LENGTH	0x10
54 #define SAHARA_DONE_LENGTH		0x8
55 #define SAHARA_RESET_LENGTH		0x8
56 
57 struct sahara_packet {
58 	__le32 cmd;
59 	__le32 length;
60 
61 	union {
62 		struct {
63 			__le32 version;
64 			__le32 version_compat;
65 			__le32 max_length;
66 			__le32 mode;
67 		} hello;
68 		struct {
69 			__le32 version;
70 			__le32 version_compat;
71 			__le32 status;
72 			__le32 mode;
73 		} hello_resp;
74 		struct {
75 			__le32 image;
76 			__le32 offset;
77 			__le32 length;
78 		} read_data;
79 		struct {
80 			__le32 image;
81 			__le32 status;
82 		} end_of_image;
83 	};
84 };
85 
86 struct sahara_context {
87 	struct sahara_packet		*tx[SAHARA_NUM_TX_BUF];
88 	struct sahara_packet		*rx;
89 	struct work_struct		work;
90 	struct mhi_device		*mhi_dev;
91 	const char			**image_table;
92 	u32				table_size;
93 	u32				active_image_id;
94 	const struct firmware		*firmware;
95 };
96 
97 static const char *aic100_image_table[] = {
98 	[1]  = "qcom/aic100/fw1.bin",
99 	[2]  = "qcom/aic100/fw2.bin",
100 	[4]  = "qcom/aic100/fw4.bin",
101 	[5]  = "qcom/aic100/fw5.bin",
102 	[6]  = "qcom/aic100/fw6.bin",
103 	[8]  = "qcom/aic100/fw8.bin",
104 	[9]  = "qcom/aic100/fw9.bin",
105 	[10] = "qcom/aic100/fw10.bin",
106 };
107 
108 static int sahara_find_image(struct sahara_context *context, u32 image_id)
109 {
110 	int ret;
111 
112 	if (image_id == context->active_image_id)
113 		return 0;
114 
115 	if (context->active_image_id != SAHARA_IMAGE_ID_NONE) {
116 		dev_err(&context->mhi_dev->dev, "image id %d is not valid as %d is active\n",
117 			image_id, context->active_image_id);
118 		return -EINVAL;
119 	}
120 
121 	if (image_id >= context->table_size || !context->image_table[image_id]) {
122 		dev_err(&context->mhi_dev->dev, "request for unknown image: %d\n", image_id);
123 		return -EINVAL;
124 	}
125 
126 	/*
127 	 * This image might be optional. The device may continue without it.
128 	 * Only the device knows. Suppress error messages that could suggest an
129 	 * a problem when we were actually able to continue.
130 	 */
131 	ret = firmware_request_nowarn(&context->firmware,
132 				      context->image_table[image_id],
133 				      &context->mhi_dev->dev);
134 	if (ret) {
135 		dev_dbg(&context->mhi_dev->dev, "request for image id %d / file %s failed %d\n",
136 			image_id, context->image_table[image_id], ret);
137 		return ret;
138 	}
139 
140 	context->active_image_id = image_id;
141 
142 	return 0;
143 }
144 
145 static void sahara_release_image(struct sahara_context *context)
146 {
147 	if (context->active_image_id != SAHARA_IMAGE_ID_NONE)
148 		release_firmware(context->firmware);
149 	context->active_image_id = SAHARA_IMAGE_ID_NONE;
150 }
151 
152 static void sahara_send_reset(struct sahara_context *context)
153 {
154 	int ret;
155 
156 	context->tx[0]->cmd = cpu_to_le32(SAHARA_RESET_CMD);
157 	context->tx[0]->length = cpu_to_le32(SAHARA_RESET_LENGTH);
158 
159 	ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
160 			    SAHARA_RESET_LENGTH, MHI_EOT);
161 	if (ret)
162 		dev_err(&context->mhi_dev->dev, "Unable to send reset response %d\n", ret);
163 }
164 
165 static void sahara_hello(struct sahara_context *context)
166 {
167 	int ret;
168 
169 	dev_dbg(&context->mhi_dev->dev,
170 		"HELLO cmd received. length:%d version:%d version_compat:%d max_length:%d mode:%d\n",
171 		le32_to_cpu(context->rx->length),
172 		le32_to_cpu(context->rx->hello.version),
173 		le32_to_cpu(context->rx->hello.version_compat),
174 		le32_to_cpu(context->rx->hello.max_length),
175 		le32_to_cpu(context->rx->hello.mode));
176 
177 	if (le32_to_cpu(context->rx->length) != SAHARA_HELLO_LENGTH) {
178 		dev_err(&context->mhi_dev->dev, "Malformed hello packet - length %d\n",
179 			le32_to_cpu(context->rx->length));
180 		return;
181 	}
182 	if (le32_to_cpu(context->rx->hello.version) != SAHARA_VERSION) {
183 		dev_err(&context->mhi_dev->dev, "Unsupported hello packet - version %d\n",
184 			le32_to_cpu(context->rx->hello.version));
185 		return;
186 	}
187 
188 	if (le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_PENDING &&
189 	    le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_COMPLETE) {
190 		dev_err(&context->mhi_dev->dev, "Unsupported hello packet - mode %d\n",
191 			le32_to_cpu(context->rx->hello.mode));
192 		return;
193 	}
194 
195 	context->tx[0]->cmd = cpu_to_le32(SAHARA_HELLO_RESP_CMD);
196 	context->tx[0]->length = cpu_to_le32(SAHARA_HELLO_LENGTH);
197 	context->tx[0]->hello_resp.version = cpu_to_le32(SAHARA_VERSION);
198 	context->tx[0]->hello_resp.version_compat = cpu_to_le32(SAHARA_VERSION);
199 	context->tx[0]->hello_resp.status = cpu_to_le32(SAHARA_SUCCESS);
200 	context->tx[0]->hello_resp.mode = context->rx->hello_resp.mode;
201 
202 	ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
203 			    SAHARA_HELLO_LENGTH, MHI_EOT);
204 	if (ret)
205 		dev_err(&context->mhi_dev->dev, "Unable to send hello response %d\n", ret);
206 }
207 
208 static void sahara_read_data(struct sahara_context *context)
209 {
210 	u32 image_id, data_offset, data_len, pkt_data_len;
211 	int ret;
212 	int i;
213 
214 	dev_dbg(&context->mhi_dev->dev,
215 		"READ_DATA cmd received. length:%d image:%d offset:%d data_length:%d\n",
216 		le32_to_cpu(context->rx->length),
217 		le32_to_cpu(context->rx->read_data.image),
218 		le32_to_cpu(context->rx->read_data.offset),
219 		le32_to_cpu(context->rx->read_data.length));
220 
221 	if (le32_to_cpu(context->rx->length) != SAHARA_READ_DATA_LENGTH) {
222 		dev_err(&context->mhi_dev->dev, "Malformed read_data packet - length %d\n",
223 			le32_to_cpu(context->rx->length));
224 		return;
225 	}
226 
227 	image_id = le32_to_cpu(context->rx->read_data.image);
228 	data_offset = le32_to_cpu(context->rx->read_data.offset);
229 	data_len = le32_to_cpu(context->rx->read_data.length);
230 
231 	ret = sahara_find_image(context, image_id);
232 	if (ret) {
233 		sahara_send_reset(context);
234 		return;
235 	}
236 
237 	/*
238 	 * Image is released when the device is done with it via
239 	 * SAHARA_END_OF_IMAGE_CMD. sahara_send_reset() will either cause the
240 	 * device to retry the operation with a modification, or decide to be
241 	 * done with the image and trigger SAHARA_END_OF_IMAGE_CMD.
242 	 * release_image() is called from SAHARA_END_OF_IMAGE_CMD. processing
243 	 * and is not needed here on error.
244 	 */
245 
246 	if (data_len > SAHARA_TRANSFER_MAX_SIZE) {
247 		dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data len %d exceeds max xfer size %d\n",
248 			data_len, SAHARA_TRANSFER_MAX_SIZE);
249 		sahara_send_reset(context);
250 		return;
251 	}
252 
253 	if (data_offset >= context->firmware->size) {
254 		dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data offset %d exceeds file size %zu\n",
255 			data_offset, context->firmware->size);
256 		sahara_send_reset(context);
257 		return;
258 	}
259 
260 	if (size_add(data_offset, data_len) > context->firmware->size) {
261 		dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data offset %d and length %d exceeds file size %zu\n",
262 			data_offset, data_len, context->firmware->size);
263 		sahara_send_reset(context);
264 		return;
265 	}
266 
267 	for (i = 0; i < SAHARA_NUM_TX_BUF && data_len; ++i) {
268 		pkt_data_len = min(data_len, SAHARA_PACKET_MAX_SIZE);
269 
270 		memcpy(context->tx[i], &context->firmware->data[data_offset], pkt_data_len);
271 
272 		data_offset += pkt_data_len;
273 		data_len -= pkt_data_len;
274 
275 		ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE,
276 				    context->tx[i], pkt_data_len,
277 				    !data_len ? MHI_EOT : MHI_CHAIN);
278 		if (ret) {
279 			dev_err(&context->mhi_dev->dev, "Unable to send read_data response %d\n",
280 				ret);
281 			return;
282 		}
283 	}
284 }
285 
286 static void sahara_end_of_image(struct sahara_context *context)
287 {
288 	int ret;
289 
290 	dev_dbg(&context->mhi_dev->dev,
291 		"END_OF_IMAGE cmd received. length:%d image:%d status:%d\n",
292 		le32_to_cpu(context->rx->length),
293 		le32_to_cpu(context->rx->end_of_image.image),
294 		le32_to_cpu(context->rx->end_of_image.status));
295 
296 	if (le32_to_cpu(context->rx->length) != SAHARA_END_OF_IMAGE_LENGTH) {
297 		dev_err(&context->mhi_dev->dev, "Malformed end_of_image packet - length %d\n",
298 			le32_to_cpu(context->rx->length));
299 		return;
300 	}
301 
302 	if (context->active_image_id != SAHARA_IMAGE_ID_NONE &&
303 	    le32_to_cpu(context->rx->end_of_image.image) != context->active_image_id) {
304 		dev_err(&context->mhi_dev->dev, "Malformed end_of_image packet - image %d is not the active image\n",
305 			le32_to_cpu(context->rx->end_of_image.image));
306 		return;
307 	}
308 
309 	sahara_release_image(context);
310 
311 	if (le32_to_cpu(context->rx->end_of_image.status))
312 		return;
313 
314 	context->tx[0]->cmd = cpu_to_le32(SAHARA_DONE_CMD);
315 	context->tx[0]->length = cpu_to_le32(SAHARA_DONE_LENGTH);
316 
317 	ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
318 			    SAHARA_DONE_LENGTH, MHI_EOT);
319 	if (ret)
320 		dev_dbg(&context->mhi_dev->dev, "Unable to send done response %d\n", ret);
321 }
322 
323 static void sahara_processing(struct work_struct *work)
324 {
325 	struct sahara_context *context = container_of(work, struct sahara_context, work);
326 	int ret;
327 
328 	switch (le32_to_cpu(context->rx->cmd)) {
329 	case SAHARA_HELLO_CMD:
330 		sahara_hello(context);
331 		break;
332 	case SAHARA_READ_DATA_CMD:
333 		sahara_read_data(context);
334 		break;
335 	case SAHARA_END_OF_IMAGE_CMD:
336 		sahara_end_of_image(context);
337 		break;
338 	case SAHARA_DONE_RESP_CMD:
339 		/* Intentional do nothing as we don't need to exit an app */
340 		break;
341 	default:
342 		dev_err(&context->mhi_dev->dev, "Unknown command %d\n",
343 			le32_to_cpu(context->rx->cmd));
344 		break;
345 	}
346 
347 	ret = mhi_queue_buf(context->mhi_dev, DMA_FROM_DEVICE, context->rx,
348 			    SAHARA_PACKET_MAX_SIZE, MHI_EOT);
349 	if (ret)
350 		dev_err(&context->mhi_dev->dev, "Unable to requeue rx buf %d\n", ret);
351 }
352 
353 static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
354 {
355 	struct sahara_context *context;
356 	int ret;
357 	int i;
358 
359 	context = devm_kzalloc(&mhi_dev->dev, sizeof(*context), GFP_KERNEL);
360 	if (!context)
361 		return -ENOMEM;
362 
363 	context->rx = devm_kzalloc(&mhi_dev->dev, SAHARA_PACKET_MAX_SIZE, GFP_KERNEL);
364 	if (!context->rx)
365 		return -ENOMEM;
366 
367 	/*
368 	 * AIC100 defines SAHARA_TRANSFER_MAX_SIZE as the largest value it
369 	 * will request for READ_DATA. This is larger than
370 	 * SAHARA_PACKET_MAX_SIZE, and we need 9x SAHARA_PACKET_MAX_SIZE to
371 	 * cover SAHARA_TRANSFER_MAX_SIZE. When the remote side issues a
372 	 * READ_DATA, it requires a transfer of the exact size requested. We
373 	 * can use MHI_CHAIN to link multiple buffers into a single transfer
374 	 * but the remote side will not consume the buffers until it sees an
375 	 * EOT, thus we need to allocate enough buffers to put in the tx fifo
376 	 * to cover an entire READ_DATA request of the max size.
377 	 */
378 	for (i = 0; i < SAHARA_NUM_TX_BUF; ++i) {
379 		context->tx[i] = devm_kzalloc(&mhi_dev->dev, SAHARA_PACKET_MAX_SIZE, GFP_KERNEL);
380 		if (!context->tx[i])
381 			return -ENOMEM;
382 	}
383 
384 	context->mhi_dev = mhi_dev;
385 	INIT_WORK(&context->work, sahara_processing);
386 	context->image_table = aic100_image_table;
387 	context->table_size = ARRAY_SIZE(aic100_image_table);
388 	context->active_image_id = SAHARA_IMAGE_ID_NONE;
389 	dev_set_drvdata(&mhi_dev->dev, context);
390 
391 	ret = mhi_prepare_for_transfer(mhi_dev);
392 	if (ret)
393 		return ret;
394 
395 	ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, context->rx, SAHARA_PACKET_MAX_SIZE, MHI_EOT);
396 	if (ret) {
397 		mhi_unprepare_from_transfer(mhi_dev);
398 		return ret;
399 	}
400 
401 	return 0;
402 }
403 
404 static void sahara_mhi_remove(struct mhi_device *mhi_dev)
405 {
406 	struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev);
407 
408 	cancel_work_sync(&context->work);
409 	sahara_release_image(context);
410 	mhi_unprepare_from_transfer(mhi_dev);
411 }
412 
413 static void sahara_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
414 {
415 }
416 
417 static void sahara_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
418 {
419 	struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev);
420 
421 	if (!mhi_result->transaction_status)
422 		schedule_work(&context->work);
423 }
424 
425 static const struct mhi_device_id sahara_mhi_match_table[] = {
426 	{ .chan = "QAIC_SAHARA", },
427 	{},
428 };
429 
430 static struct mhi_driver sahara_mhi_driver = {
431 	.id_table = sahara_mhi_match_table,
432 	.remove = sahara_mhi_remove,
433 	.probe = sahara_mhi_probe,
434 	.ul_xfer_cb = sahara_mhi_ul_xfer_cb,
435 	.dl_xfer_cb = sahara_mhi_dl_xfer_cb,
436 	.driver = {
437 		.name = "sahara",
438 	},
439 };
440 
441 int sahara_register(void)
442 {
443 	return mhi_driver_register(&sahara_mhi_driver);
444 }
445 
446 void sahara_unregister(void)
447 {
448 	mhi_driver_unregister(&sahara_mhi_driver);
449 }
450