1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
6 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
7 */
8
9 #include <linux/export.h>
10 #include "bmi.h"
11 #include "hif.h"
12 #include "debug.h"
13 #include "htc.h"
14 #include "hw.h"
15
ath10k_bmi_start(struct ath10k * ar)16 void ath10k_bmi_start(struct ath10k *ar)
17 {
18 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
19
20 ar->bmi.done_sent = false;
21 }
22 EXPORT_SYMBOL(ath10k_bmi_start);
23
ath10k_bmi_done(struct ath10k * ar)24 int ath10k_bmi_done(struct ath10k *ar)
25 {
26 struct bmi_cmd cmd;
27 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
28 int ret;
29
30 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");
31
32 if (ar->bmi.done_sent) {
33 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
34 return 0;
35 }
36
37 ar->bmi.done_sent = true;
38 cmd.id = __cpu_to_le32(BMI_DONE);
39
40 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
41 if (ret) {
42 ath10k_warn(ar, "unable to write to the device: %d\n", ret);
43 return ret;
44 }
45
46 return 0;
47 }
48
ath10k_bmi_get_target_info(struct ath10k * ar,struct bmi_target_info * target_info)49 int ath10k_bmi_get_target_info(struct ath10k *ar,
50 struct bmi_target_info *target_info)
51 {
52 struct bmi_cmd cmd;
53 union bmi_resp resp;
54 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
55 u32 resplen = sizeof(resp.get_target_info);
56 int ret;
57
58 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");
59
60 if (ar->bmi.done_sent) {
61 ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
62 return -EBUSY;
63 }
64
65 cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
66
67 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
68 if (ret) {
69 ath10k_warn(ar, "unable to get target info from device\n");
70 return ret;
71 }
72
73 if (resplen < sizeof(resp.get_target_info)) {
74 ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
75 resplen);
76 return -EIO;
77 }
78
79 target_info->version = __le32_to_cpu(resp.get_target_info.version);
80 target_info->type = __le32_to_cpu(resp.get_target_info.type);
81
82 return 0;
83 }
84
85 #define TARGET_VERSION_SENTINAL 0xffffffffu
86
ath10k_bmi_get_target_info_sdio(struct ath10k * ar,struct bmi_target_info * target_info)87 int ath10k_bmi_get_target_info_sdio(struct ath10k *ar,
88 struct bmi_target_info *target_info)
89 {
90 struct bmi_cmd cmd;
91 union bmi_resp resp;
92 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
93 u32 resplen, ver_len;
94 __le32 tmp;
95 int ret;
96
97 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info SDIO\n");
98
99 if (ar->bmi.done_sent) {
100 ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
101 return -EBUSY;
102 }
103
104 cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
105
106 /* Step 1: Read 4 bytes of the target info and check if it is
107 * the special sentinel version word or the first word in the
108 * version response.
109 */
110 resplen = sizeof(u32);
111 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &tmp, &resplen);
112 if (ret) {
113 ath10k_warn(ar, "unable to read from device\n");
114 return ret;
115 }
116
117 /* Some SDIO boards have a special sentinel byte before the real
118 * version response.
119 */
120 if (__le32_to_cpu(tmp) == TARGET_VERSION_SENTINAL) {
121 /* Step 1b: Read the version length */
122 resplen = sizeof(u32);
123 ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0, &tmp,
124 &resplen);
125 if (ret) {
126 ath10k_warn(ar, "unable to read from device\n");
127 return ret;
128 }
129 }
130
131 ver_len = __le32_to_cpu(tmp);
132
133 /* Step 2: Check the target info length */
134 if (ver_len != sizeof(resp.get_target_info)) {
135 ath10k_warn(ar, "Unexpected target info len: %u. Expected: %zu\n",
136 ver_len, sizeof(resp.get_target_info));
137 return -EINVAL;
138 }
139
140 /* Step 3: Read the rest of the version response */
141 resplen = sizeof(resp.get_target_info) - sizeof(u32);
142 ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0,
143 &resp.get_target_info.version,
144 &resplen);
145 if (ret) {
146 ath10k_warn(ar, "unable to read from device\n");
147 return ret;
148 }
149
150 target_info->version = __le32_to_cpu(resp.get_target_info.version);
151 target_info->type = __le32_to_cpu(resp.get_target_info.type);
152
153 return 0;
154 }
155
ath10k_bmi_read_memory(struct ath10k * ar,u32 address,void * buffer,u32 length)156 int ath10k_bmi_read_memory(struct ath10k *ar,
157 #if defined(__linux__)
158 u32 address, void *buffer, u32 length)
159 #elif defined(__FreeBSD__)
160 u32 address, u8 *buffer, u32 length)
161 #endif
162 {
163 struct bmi_cmd cmd;
164 union bmi_resp resp;
165 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
166 u32 rxlen;
167 int ret;
168
169 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
170 address, length);
171
172 if (ar->bmi.done_sent) {
173 ath10k_warn(ar, "command disallowed\n");
174 return -EBUSY;
175 }
176
177 while (length) {
178 rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
179
180 cmd.id = __cpu_to_le32(BMI_READ_MEMORY);
181 cmd.read_mem.addr = __cpu_to_le32(address);
182 cmd.read_mem.len = __cpu_to_le32(rxlen);
183
184 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
185 &resp, &rxlen);
186 if (ret) {
187 ath10k_warn(ar, "unable to read from the device (%d)\n",
188 ret);
189 return ret;
190 }
191
192 memcpy(buffer, resp.read_mem.payload, rxlen);
193 address += rxlen;
194 buffer += rxlen;
195 length -= rxlen;
196 }
197
198 return 0;
199 }
200 EXPORT_SYMBOL(ath10k_bmi_read_memory);
201
ath10k_bmi_write_soc_reg(struct ath10k * ar,u32 address,u32 reg_val)202 int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val)
203 {
204 struct bmi_cmd cmd;
205 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.write_soc_reg);
206 int ret;
207
208 ath10k_dbg(ar, ATH10K_DBG_BMI,
209 "bmi write soc register 0x%08x val 0x%08x\n",
210 address, reg_val);
211
212 if (ar->bmi.done_sent) {
213 ath10k_warn(ar, "bmi write soc register command in progress\n");
214 return -EBUSY;
215 }
216
217 cmd.id = __cpu_to_le32(BMI_WRITE_SOC_REGISTER);
218 cmd.write_soc_reg.addr = __cpu_to_le32(address);
219 cmd.write_soc_reg.value = __cpu_to_le32(reg_val);
220
221 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
222 if (ret) {
223 ath10k_warn(ar, "Unable to write soc register to device: %d\n",
224 ret);
225 return ret;
226 }
227
228 return 0;
229 }
230
ath10k_bmi_read_soc_reg(struct ath10k * ar,u32 address,u32 * reg_val)231 int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val)
232 {
233 struct bmi_cmd cmd;
234 union bmi_resp resp;
235 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_soc_reg);
236 u32 resplen = sizeof(resp.read_soc_reg);
237 int ret;
238
239 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register 0x%08x\n",
240 address);
241
242 if (ar->bmi.done_sent) {
243 ath10k_warn(ar, "bmi read soc register command in progress\n");
244 return -EBUSY;
245 }
246
247 cmd.id = __cpu_to_le32(BMI_READ_SOC_REGISTER);
248 cmd.read_soc_reg.addr = __cpu_to_le32(address);
249
250 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
251 if (ret) {
252 ath10k_warn(ar, "Unable to read soc register from device: %d\n",
253 ret);
254 return ret;
255 }
256
257 *reg_val = __le32_to_cpu(resp.read_soc_reg.value);
258
259 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register value 0x%08x\n",
260 *reg_val);
261
262 return 0;
263 }
264
ath10k_bmi_write_memory(struct ath10k * ar,u32 address,const void * buffer,u32 length)265 int ath10k_bmi_write_memory(struct ath10k *ar,
266 #if defined(__linux__)
267 u32 address, const void *buffer, u32 length)
268 #elif defined(__FreeBSD__)
269 u32 address, const u8 *buffer, u32 length)
270 #endif
271 {
272 struct bmi_cmd cmd;
273 u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
274 u32 txlen;
275 int ret;
276
277 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
278 address, length);
279
280 if (ar->bmi.done_sent) {
281 ath10k_warn(ar, "command disallowed\n");
282 return -EBUSY;
283 }
284
285 while (length) {
286 txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
287
288 /* copy before roundup to avoid reading beyond buffer*/
289 memcpy(cmd.write_mem.payload, buffer, txlen);
290 txlen = roundup(txlen, 4);
291
292 cmd.id = __cpu_to_le32(BMI_WRITE_MEMORY);
293 cmd.write_mem.addr = __cpu_to_le32(address);
294 cmd.write_mem.len = __cpu_to_le32(txlen);
295
296 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
297 NULL, NULL);
298 if (ret) {
299 ath10k_warn(ar, "unable to write to the device (%d)\n",
300 ret);
301 return ret;
302 }
303
304 /* fixup roundup() so `length` zeroes out for last chunk */
305 txlen = min(txlen, length);
306
307 address += txlen;
308 buffer += txlen;
309 length -= txlen;
310 }
311
312 return 0;
313 }
314
ath10k_bmi_execute(struct ath10k * ar,u32 address,u32 param,u32 * result)315 int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
316 {
317 struct bmi_cmd cmd;
318 union bmi_resp resp;
319 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
320 u32 resplen = sizeof(resp.execute);
321 int ret;
322
323 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
324 address, param);
325
326 if (ar->bmi.done_sent) {
327 ath10k_warn(ar, "command disallowed\n");
328 return -EBUSY;
329 }
330
331 cmd.id = __cpu_to_le32(BMI_EXECUTE);
332 cmd.execute.addr = __cpu_to_le32(address);
333 cmd.execute.param = __cpu_to_le32(param);
334
335 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
336 if (ret) {
337 ath10k_warn(ar, "unable to read from the device\n");
338 return ret;
339 }
340
341 if (resplen < sizeof(resp.execute)) {
342 ath10k_warn(ar, "invalid execute response length (%d)\n",
343 resplen);
344 return -EIO;
345 }
346
347 *result = __le32_to_cpu(resp.execute.result);
348
349 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
350
351 return 0;
352 }
353
354 #if defined(__linux__)
ath10k_bmi_lz_data_large(struct ath10k * ar,const void * buffer,u32 length)355 static int ath10k_bmi_lz_data_large(struct ath10k *ar, const void *buffer, u32 length)
356 #elif defined(__FreeBSD__)
357 static int ath10k_bmi_lz_data_large(struct ath10k *ar, const u8 *buffer, u32 length)
358 #endif
359 {
360 struct bmi_cmd *cmd;
361 u32 hdrlen = sizeof(cmd->id) + sizeof(cmd->lz_data);
362 u32 txlen;
363 int ret;
364 size_t buf_len;
365
366 ath10k_dbg(ar, ATH10K_DBG_BMI, "large bmi lz data buffer 0x%p length %d\n",
367 buffer, length);
368
369 if (ar->bmi.done_sent) {
370 ath10k_warn(ar, "command disallowed\n");
371 return -EBUSY;
372 }
373
374 buf_len = sizeof(*cmd) + BMI_MAX_LARGE_DATA_SIZE - BMI_MAX_DATA_SIZE;
375 cmd = kzalloc(buf_len, GFP_KERNEL);
376 if (!cmd)
377 return -ENOMEM;
378
379 while (length) {
380 txlen = min(length, BMI_MAX_LARGE_DATA_SIZE - hdrlen);
381
382 WARN_ON_ONCE(txlen & 3);
383
384 cmd->id = __cpu_to_le32(BMI_LZ_DATA);
385 cmd->lz_data.len = __cpu_to_le32(txlen);
386 memcpy(cmd->lz_data.payload, buffer, txlen);
387
388 ret = ath10k_hif_exchange_bmi_msg(ar, cmd, hdrlen + txlen,
389 NULL, NULL);
390 if (ret) {
391 ath10k_warn(ar, "unable to write to the device\n");
392 kfree(cmd);
393 return ret;
394 }
395
396 buffer += txlen;
397 length -= txlen;
398 }
399
400 kfree(cmd);
401
402 return 0;
403 }
404
405 #if defined(__linux__)
ath10k_bmi_lz_data(struct ath10k * ar,const void * buffer,u32 length)406 int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
407 #elif defined(__FreeBSD__)
408 static
409 int ath10k_bmi_lz_data(struct ath10k *ar, const u8 *buffer, u32 length)
410 #endif
411 {
412 struct bmi_cmd cmd;
413 u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
414 u32 txlen;
415 int ret;
416
417 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
418 buffer, length);
419
420 if (ar->bmi.done_sent) {
421 ath10k_warn(ar, "command disallowed\n");
422 return -EBUSY;
423 }
424
425 while (length) {
426 txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
427
428 WARN_ON_ONCE(txlen & 3);
429
430 cmd.id = __cpu_to_le32(BMI_LZ_DATA);
431 cmd.lz_data.len = __cpu_to_le32(txlen);
432 memcpy(cmd.lz_data.payload, buffer, txlen);
433
434 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
435 NULL, NULL);
436 if (ret) {
437 ath10k_warn(ar, "unable to write to the device\n");
438 return ret;
439 }
440
441 buffer += txlen;
442 length -= txlen;
443 }
444
445 return 0;
446 }
447
ath10k_bmi_lz_stream_start(struct ath10k * ar,u32 address)448 int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
449 {
450 struct bmi_cmd cmd;
451 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
452 int ret;
453
454 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
455 address);
456
457 if (ar->bmi.done_sent) {
458 ath10k_warn(ar, "command disallowed\n");
459 return -EBUSY;
460 }
461
462 cmd.id = __cpu_to_le32(BMI_LZ_STREAM_START);
463 cmd.lz_start.addr = __cpu_to_le32(address);
464
465 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
466 if (ret) {
467 ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
468 return ret;
469 }
470
471 return 0;
472 }
473
ath10k_bmi_fast_download(struct ath10k * ar,u32 address,const void * buffer,u32 length)474 int ath10k_bmi_fast_download(struct ath10k *ar,
475 u32 address, const void *buffer, u32 length)
476 {
477 u8 trailer[4] = {};
478 u32 head_len = rounddown(length, 4);
479 u32 trailer_len = length - head_len;
480 int ret;
481
482 ath10k_dbg(ar, ATH10K_DBG_BMI,
483 "bmi fast download address 0x%x buffer 0x%p length %d\n",
484 address, buffer, length);
485
486 ret = ath10k_bmi_lz_stream_start(ar, address);
487 if (ret)
488 return ret;
489
490 /* copy the last word into a zero padded buffer */
491 if (trailer_len > 0)
492 #if defined(__linux__)
493 memcpy(trailer, buffer + head_len, trailer_len);
494 #elif defined(__FreeBSD__)
495 memcpy(trailer, (const u8 *)buffer + head_len, trailer_len);
496 #endif
497
498 if (ar->hw_params.bmi_large_size_download)
499 ret = ath10k_bmi_lz_data_large(ar, buffer, head_len);
500 else
501 ret = ath10k_bmi_lz_data(ar, buffer, head_len);
502
503 if (ret)
504 return ret;
505
506 if (trailer_len > 0)
507 ret = ath10k_bmi_lz_data(ar, trailer, 4);
508
509 if (ret != 0)
510 return ret;
511
512 /*
513 * Close compressed stream and open a new (fake) one.
514 * This serves mainly to flush Target caches.
515 */
516 ret = ath10k_bmi_lz_stream_start(ar, 0x00);
517
518 return ret;
519 }
520
ath10k_bmi_set_start(struct ath10k * ar,u32 address)521 int ath10k_bmi_set_start(struct ath10k *ar, u32 address)
522 {
523 struct bmi_cmd cmd;
524 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.set_app_start);
525 int ret;
526
527 if (ar->bmi.done_sent) {
528 ath10k_warn(ar, "bmi set start command disallowed\n");
529 return -EBUSY;
530 }
531
532 cmd.id = __cpu_to_le32(BMI_SET_APP_START);
533 cmd.set_app_start.addr = __cpu_to_le32(address);
534
535 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
536 if (ret) {
537 ath10k_warn(ar, "unable to set start to the device:%d\n", ret);
538 return ret;
539 }
540
541 return 0;
542 }
543