1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
5 */
6
7 #include "bmi.h"
8 #include "hif.h"
9 #include "debug.h"
10 #include "htc.h"
11 #include "hw.h"
12
ath10k_bmi_start(struct ath10k * ar)13 void ath10k_bmi_start(struct ath10k *ar)
14 {
15 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
16
17 ar->bmi.done_sent = false;
18 }
19 EXPORT_SYMBOL(ath10k_bmi_start);
20
ath10k_bmi_done(struct ath10k * ar)21 int ath10k_bmi_done(struct ath10k *ar)
22 {
23 struct bmi_cmd cmd;
24 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
25 int ret;
26
27 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");
28
29 if (ar->bmi.done_sent) {
30 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
31 return 0;
32 }
33
34 ar->bmi.done_sent = true;
35 cmd.id = __cpu_to_le32(BMI_DONE);
36
37 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
38 if (ret) {
39 ath10k_warn(ar, "unable to write to the device: %d\n", ret);
40 return ret;
41 }
42
43 return 0;
44 }
45
ath10k_bmi_get_target_info(struct ath10k * ar,struct bmi_target_info * target_info)46 int ath10k_bmi_get_target_info(struct ath10k *ar,
47 struct bmi_target_info *target_info)
48 {
49 struct bmi_cmd cmd;
50 union bmi_resp resp;
51 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
52 u32 resplen = sizeof(resp.get_target_info);
53 int ret;
54
55 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");
56
57 if (ar->bmi.done_sent) {
58 ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
59 return -EBUSY;
60 }
61
62 cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
63
64 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
65 if (ret) {
66 ath10k_warn(ar, "unable to get target info from device\n");
67 return ret;
68 }
69
70 if (resplen < sizeof(resp.get_target_info)) {
71 ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
72 resplen);
73 return -EIO;
74 }
75
76 target_info->version = __le32_to_cpu(resp.get_target_info.version);
77 target_info->type = __le32_to_cpu(resp.get_target_info.type);
78
79 return 0;
80 }
81
82 #define TARGET_VERSION_SENTINAL 0xffffffffu
83
ath10k_bmi_get_target_info_sdio(struct ath10k * ar,struct bmi_target_info * target_info)84 int ath10k_bmi_get_target_info_sdio(struct ath10k *ar,
85 struct bmi_target_info *target_info)
86 {
87 struct bmi_cmd cmd;
88 union bmi_resp resp;
89 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
90 u32 resplen, ver_len;
91 __le32 tmp;
92 int ret;
93
94 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info SDIO\n");
95
96 if (ar->bmi.done_sent) {
97 ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
98 return -EBUSY;
99 }
100
101 cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
102
103 /* Step 1: Read 4 bytes of the target info and check if it is
104 * the special sentinel version word or the first word in the
105 * version response.
106 */
107 resplen = sizeof(u32);
108 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &tmp, &resplen);
109 if (ret) {
110 ath10k_warn(ar, "unable to read from device\n");
111 return ret;
112 }
113
114 /* Some SDIO boards have a special sentinel byte before the real
115 * version response.
116 */
117 if (__le32_to_cpu(tmp) == TARGET_VERSION_SENTINAL) {
118 /* Step 1b: Read the version length */
119 resplen = sizeof(u32);
120 ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0, &tmp,
121 &resplen);
122 if (ret) {
123 ath10k_warn(ar, "unable to read from device\n");
124 return ret;
125 }
126 }
127
128 ver_len = __le32_to_cpu(tmp);
129
130 /* Step 2: Check the target info length */
131 if (ver_len != sizeof(resp.get_target_info)) {
132 ath10k_warn(ar, "Unexpected target info len: %u. Expected: %zu\n",
133 ver_len, sizeof(resp.get_target_info));
134 return -EINVAL;
135 }
136
137 /* Step 3: Read the rest of the version response */
138 resplen = sizeof(resp.get_target_info) - sizeof(u32);
139 ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0,
140 &resp.get_target_info.version,
141 &resplen);
142 if (ret) {
143 ath10k_warn(ar, "unable to read from device\n");
144 return ret;
145 }
146
147 target_info->version = __le32_to_cpu(resp.get_target_info.version);
148 target_info->type = __le32_to_cpu(resp.get_target_info.type);
149
150 return 0;
151 }
152
ath10k_bmi_read_memory(struct ath10k * ar,u32 address,void * buffer,u32 length)153 int ath10k_bmi_read_memory(struct ath10k *ar,
154 #if defined(__linux__)
155 u32 address, void *buffer, u32 length)
156 #elif defined(__FreeBSD__)
157 u32 address, u8 *buffer, u32 length)
158 #endif
159 {
160 struct bmi_cmd cmd;
161 union bmi_resp resp;
162 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
163 u32 rxlen;
164 int ret;
165
166 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
167 address, length);
168
169 if (ar->bmi.done_sent) {
170 ath10k_warn(ar, "command disallowed\n");
171 return -EBUSY;
172 }
173
174 while (length) {
175 rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
176
177 cmd.id = __cpu_to_le32(BMI_READ_MEMORY);
178 cmd.read_mem.addr = __cpu_to_le32(address);
179 cmd.read_mem.len = __cpu_to_le32(rxlen);
180
181 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
182 &resp, &rxlen);
183 if (ret) {
184 ath10k_warn(ar, "unable to read from the device (%d)\n",
185 ret);
186 return ret;
187 }
188
189 memcpy(buffer, resp.read_mem.payload, rxlen);
190 address += rxlen;
191 buffer += rxlen;
192 length -= rxlen;
193 }
194
195 return 0;
196 }
197 EXPORT_SYMBOL(ath10k_bmi_read_memory);
198
ath10k_bmi_write_soc_reg(struct ath10k * ar,u32 address,u32 reg_val)199 int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val)
200 {
201 struct bmi_cmd cmd;
202 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.write_soc_reg);
203 int ret;
204
205 ath10k_dbg(ar, ATH10K_DBG_BMI,
206 "bmi write soc register 0x%08x val 0x%08x\n",
207 address, reg_val);
208
209 if (ar->bmi.done_sent) {
210 ath10k_warn(ar, "bmi write soc register command in progress\n");
211 return -EBUSY;
212 }
213
214 cmd.id = __cpu_to_le32(BMI_WRITE_SOC_REGISTER);
215 cmd.write_soc_reg.addr = __cpu_to_le32(address);
216 cmd.write_soc_reg.value = __cpu_to_le32(reg_val);
217
218 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
219 if (ret) {
220 ath10k_warn(ar, "Unable to write soc register to device: %d\n",
221 ret);
222 return ret;
223 }
224
225 return 0;
226 }
227
ath10k_bmi_read_soc_reg(struct ath10k * ar,u32 address,u32 * reg_val)228 int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val)
229 {
230 struct bmi_cmd cmd;
231 union bmi_resp resp;
232 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_soc_reg);
233 u32 resplen = sizeof(resp.read_soc_reg);
234 int ret;
235
236 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register 0x%08x\n",
237 address);
238
239 if (ar->bmi.done_sent) {
240 ath10k_warn(ar, "bmi read soc register command in progress\n");
241 return -EBUSY;
242 }
243
244 cmd.id = __cpu_to_le32(BMI_READ_SOC_REGISTER);
245 cmd.read_soc_reg.addr = __cpu_to_le32(address);
246
247 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
248 if (ret) {
249 ath10k_warn(ar, "Unable to read soc register from device: %d\n",
250 ret);
251 return ret;
252 }
253
254 *reg_val = __le32_to_cpu(resp.read_soc_reg.value);
255
256 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register value 0x%08x\n",
257 *reg_val);
258
259 return 0;
260 }
261
ath10k_bmi_write_memory(struct ath10k * ar,u32 address,const void * buffer,u32 length)262 int ath10k_bmi_write_memory(struct ath10k *ar,
263 #if defined(__linux__)
264 u32 address, const void *buffer, u32 length)
265 #elif defined(__FreeBSD__)
266 u32 address, const u8 *buffer, u32 length)
267 #endif
268 {
269 struct bmi_cmd cmd;
270 u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
271 u32 txlen;
272 int ret;
273
274 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
275 address, length);
276
277 if (ar->bmi.done_sent) {
278 ath10k_warn(ar, "command disallowed\n");
279 return -EBUSY;
280 }
281
282 while (length) {
283 txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
284
285 /* copy before roundup to avoid reading beyond buffer*/
286 memcpy(cmd.write_mem.payload, buffer, txlen);
287 txlen = roundup(txlen, 4);
288
289 cmd.id = __cpu_to_le32(BMI_WRITE_MEMORY);
290 cmd.write_mem.addr = __cpu_to_le32(address);
291 cmd.write_mem.len = __cpu_to_le32(txlen);
292
293 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
294 NULL, NULL);
295 if (ret) {
296 ath10k_warn(ar, "unable to write to the device (%d)\n",
297 ret);
298 return ret;
299 }
300
301 /* fixup roundup() so `length` zeroes out for last chunk */
302 txlen = min(txlen, length);
303
304 address += txlen;
305 buffer += txlen;
306 length -= txlen;
307 }
308
309 return 0;
310 }
311
ath10k_bmi_execute(struct ath10k * ar,u32 address,u32 param,u32 * result)312 int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
313 {
314 struct bmi_cmd cmd;
315 union bmi_resp resp;
316 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
317 u32 resplen = sizeof(resp.execute);
318 int ret;
319
320 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
321 address, param);
322
323 if (ar->bmi.done_sent) {
324 ath10k_warn(ar, "command disallowed\n");
325 return -EBUSY;
326 }
327
328 cmd.id = __cpu_to_le32(BMI_EXECUTE);
329 cmd.execute.addr = __cpu_to_le32(address);
330 cmd.execute.param = __cpu_to_le32(param);
331
332 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
333 if (ret) {
334 ath10k_warn(ar, "unable to read from the device\n");
335 return ret;
336 }
337
338 if (resplen < sizeof(resp.execute)) {
339 ath10k_warn(ar, "invalid execute response length (%d)\n",
340 resplen);
341 return -EIO;
342 }
343
344 *result = __le32_to_cpu(resp.execute.result);
345
346 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
347
348 return 0;
349 }
350
351 #if defined(__linux__)
ath10k_bmi_lz_data_large(struct ath10k * ar,const void * buffer,u32 length)352 static int ath10k_bmi_lz_data_large(struct ath10k *ar, const void *buffer, u32 length)
353 #elif defined(__FreeBSD__)
354 static int ath10k_bmi_lz_data_large(struct ath10k *ar, const u8 *buffer, u32 length)
355 #endif
356 {
357 struct bmi_cmd *cmd;
358 u32 hdrlen = sizeof(cmd->id) + sizeof(cmd->lz_data);
359 u32 txlen;
360 int ret;
361 size_t buf_len;
362
363 ath10k_dbg(ar, ATH10K_DBG_BMI, "large bmi lz data buffer 0x%pK length %d\n",
364 buffer, length);
365
366 if (ar->bmi.done_sent) {
367 ath10k_warn(ar, "command disallowed\n");
368 return -EBUSY;
369 }
370
371 buf_len = sizeof(*cmd) + BMI_MAX_LARGE_DATA_SIZE - BMI_MAX_DATA_SIZE;
372 cmd = kzalloc(buf_len, GFP_KERNEL);
373 if (!cmd)
374 return -ENOMEM;
375
376 while (length) {
377 txlen = min(length, BMI_MAX_LARGE_DATA_SIZE - hdrlen);
378
379 WARN_ON_ONCE(txlen & 3);
380
381 cmd->id = __cpu_to_le32(BMI_LZ_DATA);
382 cmd->lz_data.len = __cpu_to_le32(txlen);
383 memcpy(cmd->lz_data.payload, buffer, txlen);
384
385 ret = ath10k_hif_exchange_bmi_msg(ar, cmd, hdrlen + txlen,
386 NULL, NULL);
387 if (ret) {
388 ath10k_warn(ar, "unable to write to the device\n");
389 kfree(cmd);
390 return ret;
391 }
392
393 buffer += txlen;
394 length -= txlen;
395 }
396
397 kfree(cmd);
398
399 return 0;
400 }
401
402 #if defined(__linux__)
ath10k_bmi_lz_data(struct ath10k * ar,const void * buffer,u32 length)403 int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
404 #elif defined(__FreeBSD__)
405 static
406 int ath10k_bmi_lz_data(struct ath10k *ar, const u8 *buffer, u32 length)
407 #endif
408 {
409 struct bmi_cmd cmd;
410 u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
411 u32 txlen;
412 int ret;
413
414 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%pK length %d\n",
415 buffer, length);
416
417 if (ar->bmi.done_sent) {
418 ath10k_warn(ar, "command disallowed\n");
419 return -EBUSY;
420 }
421
422 while (length) {
423 txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
424
425 WARN_ON_ONCE(txlen & 3);
426
427 cmd.id = __cpu_to_le32(BMI_LZ_DATA);
428 cmd.lz_data.len = __cpu_to_le32(txlen);
429 memcpy(cmd.lz_data.payload, buffer, txlen);
430
431 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
432 NULL, NULL);
433 if (ret) {
434 ath10k_warn(ar, "unable to write to the device\n");
435 return ret;
436 }
437
438 buffer += txlen;
439 length -= txlen;
440 }
441
442 return 0;
443 }
444
ath10k_bmi_lz_stream_start(struct ath10k * ar,u32 address)445 int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
446 {
447 struct bmi_cmd cmd;
448 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
449 int ret;
450
451 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
452 address);
453
454 if (ar->bmi.done_sent) {
455 ath10k_warn(ar, "command disallowed\n");
456 return -EBUSY;
457 }
458
459 cmd.id = __cpu_to_le32(BMI_LZ_STREAM_START);
460 cmd.lz_start.addr = __cpu_to_le32(address);
461
462 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
463 if (ret) {
464 ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
465 return ret;
466 }
467
468 return 0;
469 }
470
ath10k_bmi_fast_download(struct ath10k * ar,u32 address,const void * buffer,u32 length)471 int ath10k_bmi_fast_download(struct ath10k *ar,
472 u32 address, const void *buffer, u32 length)
473 {
474 u8 trailer[4] = {};
475 u32 head_len = rounddown(length, 4);
476 u32 trailer_len = length - head_len;
477 int ret;
478
479 ath10k_dbg(ar, ATH10K_DBG_BMI,
480 "bmi fast download address 0x%x buffer 0x%pK length %d\n",
481 address, buffer, length);
482
483 ret = ath10k_bmi_lz_stream_start(ar, address);
484 if (ret)
485 return ret;
486
487 /* copy the last word into a zero padded buffer */
488 if (trailer_len > 0)
489 #if defined(__linux__)
490 memcpy(trailer, buffer + head_len, trailer_len);
491 #elif defined(__FreeBSD__)
492 memcpy(trailer, (const u8 *)buffer + head_len, trailer_len);
493 #endif
494
495 if (ar->hw_params.bmi_large_size_download)
496 ret = ath10k_bmi_lz_data_large(ar, buffer, head_len);
497 else
498 ret = ath10k_bmi_lz_data(ar, buffer, head_len);
499
500 if (ret)
501 return ret;
502
503 if (trailer_len > 0)
504 ret = ath10k_bmi_lz_data(ar, trailer, 4);
505
506 if (ret != 0)
507 return ret;
508
509 /*
510 * Close compressed stream and open a new (fake) one.
511 * This serves mainly to flush Target caches.
512 */
513 ret = ath10k_bmi_lz_stream_start(ar, 0x00);
514
515 return ret;
516 }
517
ath10k_bmi_set_start(struct ath10k * ar,u32 address)518 int ath10k_bmi_set_start(struct ath10k *ar, u32 address)
519 {
520 struct bmi_cmd cmd;
521 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.set_app_start);
522 int ret;
523
524 if (ar->bmi.done_sent) {
525 ath10k_warn(ar, "bmi set start command disallowed\n");
526 return -EBUSY;
527 }
528
529 cmd.id = __cpu_to_le32(BMI_SET_APP_START);
530 cmd.set_app_start.addr = __cpu_to_le32(address);
531
532 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
533 if (ret) {
534 ath10k_warn(ar, "unable to set start to the device:%d\n", ret);
535 return ret;
536 }
537
538 return 0;
539 }
540