xref: /linux/drivers/net/wireless/ath/ath10k/bmi.c (revision 06d07429858317ded2db7986113a9e0129cd599b)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (c) 2005-2011 Atheros Communications Inc.
4  * Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
5  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
6  */
7 
8 #include "bmi.h"
9 #include "hif.h"
10 #include "debug.h"
11 #include "htc.h"
12 #include "hw.h"
13 
ath10k_bmi_start(struct ath10k * ar)14 void ath10k_bmi_start(struct ath10k *ar)
15 {
16 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
17 
18 	ar->bmi.done_sent = false;
19 }
20 EXPORT_SYMBOL(ath10k_bmi_start);
21 
ath10k_bmi_done(struct ath10k * ar)22 int ath10k_bmi_done(struct ath10k *ar)
23 {
24 	struct bmi_cmd cmd;
25 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
26 	int ret;
27 
28 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");
29 
30 	if (ar->bmi.done_sent) {
31 		ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
32 		return 0;
33 	}
34 
35 	ar->bmi.done_sent = true;
36 	cmd.id = __cpu_to_le32(BMI_DONE);
37 
38 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
39 	if (ret) {
40 		ath10k_warn(ar, "unable to write to the device: %d\n", ret);
41 		return ret;
42 	}
43 
44 	return 0;
45 }
46 
ath10k_bmi_get_target_info(struct ath10k * ar,struct bmi_target_info * target_info)47 int ath10k_bmi_get_target_info(struct ath10k *ar,
48 			       struct bmi_target_info *target_info)
49 {
50 	struct bmi_cmd cmd;
51 	union bmi_resp resp;
52 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
53 	u32 resplen = sizeof(resp.get_target_info);
54 	int ret;
55 
56 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");
57 
58 	if (ar->bmi.done_sent) {
59 		ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
60 		return -EBUSY;
61 	}
62 
63 	cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
64 
65 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
66 	if (ret) {
67 		ath10k_warn(ar, "unable to get target info from device\n");
68 		return ret;
69 	}
70 
71 	if (resplen < sizeof(resp.get_target_info)) {
72 		ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
73 			    resplen);
74 		return -EIO;
75 	}
76 
77 	target_info->version = __le32_to_cpu(resp.get_target_info.version);
78 	target_info->type    = __le32_to_cpu(resp.get_target_info.type);
79 
80 	return 0;
81 }
82 
83 #define TARGET_VERSION_SENTINAL 0xffffffffu
84 
ath10k_bmi_get_target_info_sdio(struct ath10k * ar,struct bmi_target_info * target_info)85 int ath10k_bmi_get_target_info_sdio(struct ath10k *ar,
86 				    struct bmi_target_info *target_info)
87 {
88 	struct bmi_cmd cmd;
89 	union bmi_resp resp;
90 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
91 	u32 resplen, ver_len;
92 	__le32 tmp;
93 	int ret;
94 
95 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info SDIO\n");
96 
97 	if (ar->bmi.done_sent) {
98 		ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
99 		return -EBUSY;
100 	}
101 
102 	cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
103 
104 	/* Step 1: Read 4 bytes of the target info and check if it is
105 	 * the special sentinel version word or the first word in the
106 	 * version response.
107 	 */
108 	resplen = sizeof(u32);
109 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &tmp, &resplen);
110 	if (ret) {
111 		ath10k_warn(ar, "unable to read from device\n");
112 		return ret;
113 	}
114 
115 	/* Some SDIO boards have a special sentinel byte before the real
116 	 * version response.
117 	 */
118 	if (__le32_to_cpu(tmp) == TARGET_VERSION_SENTINAL) {
119 		/* Step 1b: Read the version length */
120 		resplen = sizeof(u32);
121 		ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0, &tmp,
122 						  &resplen);
123 		if (ret) {
124 			ath10k_warn(ar, "unable to read from device\n");
125 			return ret;
126 		}
127 	}
128 
129 	ver_len = __le32_to_cpu(tmp);
130 
131 	/* Step 2: Check the target info length */
132 	if (ver_len != sizeof(resp.get_target_info)) {
133 		ath10k_warn(ar, "Unexpected target info len: %u. Expected: %zu\n",
134 			    ver_len, sizeof(resp.get_target_info));
135 		return -EINVAL;
136 	}
137 
138 	/* Step 3: Read the rest of the version response */
139 	resplen = sizeof(resp.get_target_info) - sizeof(u32);
140 	ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0,
141 					  &resp.get_target_info.version,
142 					  &resplen);
143 	if (ret) {
144 		ath10k_warn(ar, "unable to read from device\n");
145 		return ret;
146 	}
147 
148 	target_info->version = __le32_to_cpu(resp.get_target_info.version);
149 	target_info->type    = __le32_to_cpu(resp.get_target_info.type);
150 
151 	return 0;
152 }
153 
ath10k_bmi_read_memory(struct ath10k * ar,u32 address,void * buffer,u32 length)154 int ath10k_bmi_read_memory(struct ath10k *ar,
155 			   u32 address, void *buffer, u32 length)
156 {
157 	struct bmi_cmd cmd;
158 	union bmi_resp resp;
159 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
160 	u32 rxlen;
161 	int ret;
162 
163 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
164 		   address, length);
165 
166 	if (ar->bmi.done_sent) {
167 		ath10k_warn(ar, "command disallowed\n");
168 		return -EBUSY;
169 	}
170 
171 	while (length) {
172 		rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
173 
174 		cmd.id            = __cpu_to_le32(BMI_READ_MEMORY);
175 		cmd.read_mem.addr = __cpu_to_le32(address);
176 		cmd.read_mem.len  = __cpu_to_le32(rxlen);
177 
178 		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
179 						  &resp, &rxlen);
180 		if (ret) {
181 			ath10k_warn(ar, "unable to read from the device (%d)\n",
182 				    ret);
183 			return ret;
184 		}
185 
186 		memcpy(buffer, resp.read_mem.payload, rxlen);
187 		address += rxlen;
188 		buffer  += rxlen;
189 		length  -= rxlen;
190 	}
191 
192 	return 0;
193 }
194 EXPORT_SYMBOL(ath10k_bmi_read_memory);
195 
ath10k_bmi_write_soc_reg(struct ath10k * ar,u32 address,u32 reg_val)196 int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val)
197 {
198 	struct bmi_cmd cmd;
199 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.write_soc_reg);
200 	int ret;
201 
202 	ath10k_dbg(ar, ATH10K_DBG_BMI,
203 		   "bmi write soc register 0x%08x val 0x%08x\n",
204 		   address, reg_val);
205 
206 	if (ar->bmi.done_sent) {
207 		ath10k_warn(ar, "bmi write soc register command in progress\n");
208 		return -EBUSY;
209 	}
210 
211 	cmd.id = __cpu_to_le32(BMI_WRITE_SOC_REGISTER);
212 	cmd.write_soc_reg.addr = __cpu_to_le32(address);
213 	cmd.write_soc_reg.value = __cpu_to_le32(reg_val);
214 
215 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
216 	if (ret) {
217 		ath10k_warn(ar, "Unable to write soc register to device: %d\n",
218 			    ret);
219 		return ret;
220 	}
221 
222 	return 0;
223 }
224 
ath10k_bmi_read_soc_reg(struct ath10k * ar,u32 address,u32 * reg_val)225 int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val)
226 {
227 	struct bmi_cmd cmd;
228 	union bmi_resp resp;
229 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_soc_reg);
230 	u32 resplen = sizeof(resp.read_soc_reg);
231 	int ret;
232 
233 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register 0x%08x\n",
234 		   address);
235 
236 	if (ar->bmi.done_sent) {
237 		ath10k_warn(ar, "bmi read soc register command in progress\n");
238 		return -EBUSY;
239 	}
240 
241 	cmd.id = __cpu_to_le32(BMI_READ_SOC_REGISTER);
242 	cmd.read_soc_reg.addr = __cpu_to_le32(address);
243 
244 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
245 	if (ret) {
246 		ath10k_warn(ar, "Unable to read soc register from device: %d\n",
247 			    ret);
248 		return ret;
249 	}
250 
251 	*reg_val = __le32_to_cpu(resp.read_soc_reg.value);
252 
253 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register value 0x%08x\n",
254 		   *reg_val);
255 
256 	return 0;
257 }
258 
ath10k_bmi_write_memory(struct ath10k * ar,u32 address,const void * buffer,u32 length)259 int ath10k_bmi_write_memory(struct ath10k *ar,
260 			    u32 address, const void *buffer, u32 length)
261 {
262 	struct bmi_cmd cmd;
263 	u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
264 	u32 txlen;
265 	int ret;
266 
267 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
268 		   address, length);
269 
270 	if (ar->bmi.done_sent) {
271 		ath10k_warn(ar, "command disallowed\n");
272 		return -EBUSY;
273 	}
274 
275 	while (length) {
276 		txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
277 
278 		/* copy before roundup to avoid reading beyond buffer*/
279 		memcpy(cmd.write_mem.payload, buffer, txlen);
280 		txlen = roundup(txlen, 4);
281 
282 		cmd.id             = __cpu_to_le32(BMI_WRITE_MEMORY);
283 		cmd.write_mem.addr = __cpu_to_le32(address);
284 		cmd.write_mem.len  = __cpu_to_le32(txlen);
285 
286 		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
287 						  NULL, NULL);
288 		if (ret) {
289 			ath10k_warn(ar, "unable to write to the device (%d)\n",
290 				    ret);
291 			return ret;
292 		}
293 
294 		/* fixup roundup() so `length` zeroes out for last chunk */
295 		txlen = min(txlen, length);
296 
297 		address += txlen;
298 		buffer  += txlen;
299 		length  -= txlen;
300 	}
301 
302 	return 0;
303 }
304 
ath10k_bmi_execute(struct ath10k * ar,u32 address,u32 param,u32 * result)305 int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
306 {
307 	struct bmi_cmd cmd;
308 	union bmi_resp resp;
309 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
310 	u32 resplen = sizeof(resp.execute);
311 	int ret;
312 
313 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
314 		   address, param);
315 
316 	if (ar->bmi.done_sent) {
317 		ath10k_warn(ar, "command disallowed\n");
318 		return -EBUSY;
319 	}
320 
321 	cmd.id            = __cpu_to_le32(BMI_EXECUTE);
322 	cmd.execute.addr  = __cpu_to_le32(address);
323 	cmd.execute.param = __cpu_to_le32(param);
324 
325 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
326 	if (ret) {
327 		ath10k_warn(ar, "unable to read from the device\n");
328 		return ret;
329 	}
330 
331 	if (resplen < sizeof(resp.execute)) {
332 		ath10k_warn(ar, "invalid execute response length (%d)\n",
333 			    resplen);
334 		return -EIO;
335 	}
336 
337 	*result = __le32_to_cpu(resp.execute.result);
338 
339 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
340 
341 	return 0;
342 }
343 
ath10k_bmi_lz_data_large(struct ath10k * ar,const void * buffer,u32 length)344 static int ath10k_bmi_lz_data_large(struct ath10k *ar, const void *buffer, u32 length)
345 {
346 	struct bmi_cmd *cmd;
347 	u32 hdrlen = sizeof(cmd->id) + sizeof(cmd->lz_data);
348 	u32 txlen;
349 	int ret;
350 	size_t buf_len;
351 
352 	ath10k_dbg(ar, ATH10K_DBG_BMI, "large bmi lz data buffer 0x%pK length %d\n",
353 		   buffer, length);
354 
355 	if (ar->bmi.done_sent) {
356 		ath10k_warn(ar, "command disallowed\n");
357 		return -EBUSY;
358 	}
359 
360 	buf_len = sizeof(*cmd) + BMI_MAX_LARGE_DATA_SIZE - BMI_MAX_DATA_SIZE;
361 	cmd = kzalloc(buf_len, GFP_KERNEL);
362 	if (!cmd)
363 		return -ENOMEM;
364 
365 	while (length) {
366 		txlen = min(length, BMI_MAX_LARGE_DATA_SIZE - hdrlen);
367 
368 		WARN_ON_ONCE(txlen & 3);
369 
370 		cmd->id          = __cpu_to_le32(BMI_LZ_DATA);
371 		cmd->lz_data.len = __cpu_to_le32(txlen);
372 		memcpy(cmd->lz_data.payload, buffer, txlen);
373 
374 		ret = ath10k_hif_exchange_bmi_msg(ar, cmd, hdrlen + txlen,
375 						  NULL, NULL);
376 		if (ret) {
377 			ath10k_warn(ar, "unable to write to the device\n");
378 			kfree(cmd);
379 			return ret;
380 		}
381 
382 		buffer += txlen;
383 		length -= txlen;
384 	}
385 
386 	kfree(cmd);
387 
388 	return 0;
389 }
390 
ath10k_bmi_lz_data(struct ath10k * ar,const void * buffer,u32 length)391 int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
392 {
393 	struct bmi_cmd cmd;
394 	u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
395 	u32 txlen;
396 	int ret;
397 
398 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%pK length %d\n",
399 		   buffer, length);
400 
401 	if (ar->bmi.done_sent) {
402 		ath10k_warn(ar, "command disallowed\n");
403 		return -EBUSY;
404 	}
405 
406 	while (length) {
407 		txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
408 
409 		WARN_ON_ONCE(txlen & 3);
410 
411 		cmd.id          = __cpu_to_le32(BMI_LZ_DATA);
412 		cmd.lz_data.len = __cpu_to_le32(txlen);
413 		memcpy(cmd.lz_data.payload, buffer, txlen);
414 
415 		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
416 						  NULL, NULL);
417 		if (ret) {
418 			ath10k_warn(ar, "unable to write to the device\n");
419 			return ret;
420 		}
421 
422 		buffer += txlen;
423 		length -= txlen;
424 	}
425 
426 	return 0;
427 }
428 
ath10k_bmi_lz_stream_start(struct ath10k * ar,u32 address)429 int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
430 {
431 	struct bmi_cmd cmd;
432 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
433 	int ret;
434 
435 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
436 		   address);
437 
438 	if (ar->bmi.done_sent) {
439 		ath10k_warn(ar, "command disallowed\n");
440 		return -EBUSY;
441 	}
442 
443 	cmd.id            = __cpu_to_le32(BMI_LZ_STREAM_START);
444 	cmd.lz_start.addr = __cpu_to_le32(address);
445 
446 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
447 	if (ret) {
448 		ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
449 		return ret;
450 	}
451 
452 	return 0;
453 }
454 
ath10k_bmi_fast_download(struct ath10k * ar,u32 address,const void * buffer,u32 length)455 int ath10k_bmi_fast_download(struct ath10k *ar,
456 			     u32 address, const void *buffer, u32 length)
457 {
458 	u8 trailer[4] = {};
459 	u32 head_len = rounddown(length, 4);
460 	u32 trailer_len = length - head_len;
461 	int ret;
462 
463 	ath10k_dbg(ar, ATH10K_DBG_BMI,
464 		   "bmi fast download address 0x%x buffer 0x%pK length %d\n",
465 		   address, buffer, length);
466 
467 	ret = ath10k_bmi_lz_stream_start(ar, address);
468 	if (ret)
469 		return ret;
470 
471 	/* copy the last word into a zero padded buffer */
472 	if (trailer_len > 0)
473 		memcpy(trailer, buffer + head_len, trailer_len);
474 
475 	if (ar->hw_params.bmi_large_size_download)
476 		ret = ath10k_bmi_lz_data_large(ar, buffer, head_len);
477 	else
478 		ret = ath10k_bmi_lz_data(ar, buffer, head_len);
479 
480 	if (ret)
481 		return ret;
482 
483 	if (trailer_len > 0)
484 		ret = ath10k_bmi_lz_data(ar, trailer, 4);
485 
486 	if (ret != 0)
487 		return ret;
488 
489 	/*
490 	 * Close compressed stream and open a new (fake) one.
491 	 * This serves mainly to flush Target caches.
492 	 */
493 	ret = ath10k_bmi_lz_stream_start(ar, 0x00);
494 
495 	return ret;
496 }
497 
ath10k_bmi_set_start(struct ath10k * ar,u32 address)498 int ath10k_bmi_set_start(struct ath10k *ar, u32 address)
499 {
500 	struct bmi_cmd cmd;
501 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.set_app_start);
502 	int ret;
503 
504 	if (ar->bmi.done_sent) {
505 		ath10k_warn(ar, "bmi set start command disallowed\n");
506 		return -EBUSY;
507 	}
508 
509 	cmd.id = __cpu_to_le32(BMI_SET_APP_START);
510 	cmd.set_app_start.addr = __cpu_to_le32(address);
511 
512 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
513 	if (ret) {
514 		ath10k_warn(ar, "unable to set start to the device:%d\n", ret);
515 		return ret;
516 	}
517 
518 	return 0;
519 }
520