1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
5 */
6
7 #include <linux/export.h>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/remoteproc.h>
11 #include <linux/firmware.h>
12 #include <linux/of.h>
13 #include <linux/of_graph.h>
14 #include "ahb.h"
15 #include "core.h"
16 #include "dp_tx.h"
17 #include "dp_rx.h"
18 #include "debug.h"
19 #include "debugfs.h"
20 #include "fw.h"
21 #include "hif.h"
22 #include "pci.h"
23 #include "wow.h"
24 #include "dp_cmn.h"
25 #include "peer.h"
26
27 unsigned int ath12k_debug_mask;
28 module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
29 MODULE_PARM_DESC(debug_mask, "Debugging mask");
30 EXPORT_SYMBOL(ath12k_debug_mask);
31
32 bool ath12k_ftm_mode;
33 module_param_named(ftm_mode, ath12k_ftm_mode, bool, 0444);
34 MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode");
35 EXPORT_SYMBOL(ath12k_ftm_mode);
36
37 /* protected with ath12k_hw_group_mutex */
38 static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
39
40 static DEFINE_MUTEX(ath12k_hw_group_mutex);
41
42 static const struct
43 ath12k_mem_profile_based_param ath12k_mem_profile_based_param[] = {
44 [ATH12K_QMI_MEMORY_MODE_DEFAULT] = {
45 .num_vdevs = 17,
46 .max_client_single = 512,
47 .max_client_dbs = 128,
48 .max_client_dbs_sbs = 128,
49 .dp_params = {
50 .tx_comp_ring_size = 32768,
51 .rxdma_monitor_buf_ring_size = 4096,
52 .rxdma_monitor_dst_ring_size = 8092,
53 .num_pool_tx_desc = 32768,
54 .rx_desc_count = 12288,
55 },
56 },
57 [ATH12K_QMI_MEMORY_MODE_LOW_512_M] = {
58 .num_vdevs = 9,
59 .max_client_single = 128,
60 .max_client_dbs = 64,
61 .max_client_dbs_sbs = 64,
62 .dp_params = {
63 .tx_comp_ring_size = 16384,
64 .rxdma_monitor_buf_ring_size = 256,
65 .rxdma_monitor_dst_ring_size = 512,
66 .num_pool_tx_desc = 16384,
67 .rx_desc_count = 6144,
68 },
69 },
70 };
71
ath12k_core_rfkill_config(struct ath12k_base * ab)72 static int ath12k_core_rfkill_config(struct ath12k_base *ab)
73 {
74 struct ath12k *ar;
75 int ret = 0, i;
76
77 if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
78 return 0;
79
80 if (ath12k_acpi_get_disable_rfkill(ab))
81 return 0;
82
83 for (i = 0; i < ab->num_radios; i++) {
84 ar = ab->pdevs[i].ar;
85
86 ret = ath12k_mac_rfkill_config(ar);
87 if (ret && ret != -EOPNOTSUPP) {
88 ath12k_warn(ab, "failed to configure rfkill: %d", ret);
89 return ret;
90 }
91 }
92
93 return ret;
94 }
95
96 /* Check if we need to continue with suspend/resume operation.
97 * Return:
98 * a negative value: error happens and don't continue.
99 * 0: no error but don't continue.
100 * positive value: no error and do continue.
101 */
ath12k_core_continue_suspend_resume(struct ath12k_base * ab)102 static int ath12k_core_continue_suspend_resume(struct ath12k_base *ab)
103 {
104 struct ath12k *ar;
105
106 if (!ab->hw_params->supports_suspend)
107 return -EOPNOTSUPP;
108
109 /* so far single_pdev_only chips have supports_suspend as true
110 * so pass 0 as a dummy pdev_id here.
111 */
112 ar = ab->pdevs[0].ar;
113 if (!ar || !ar->ah || ar->ah->state != ATH12K_HW_STATE_OFF)
114 return 0;
115
116 return 1;
117 }
118
ath12k_core_suspend(struct ath12k_base * ab)119 int ath12k_core_suspend(struct ath12k_base *ab)
120 {
121 struct ath12k *ar;
122 int ret, i;
123
124 ret = ath12k_core_continue_suspend_resume(ab);
125 if (ret <= 0)
126 return ret;
127
128 for (i = 0; i < ab->num_radios; i++) {
129 ar = ab->pdevs[i].ar;
130 if (!ar)
131 continue;
132
133 wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
134
135 ret = ath12k_mac_wait_tx_complete(ar);
136 if (ret) {
137 wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
138 ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
139 return ret;
140 }
141
142 wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
143 }
144
145 /* PM framework skips suspend_late/resume_early callbacks
146 * if other devices report errors in their suspend callbacks.
147 * However ath12k_core_resume() would still be called because
148 * here we return success thus kernel put us on dpm_suspended_list.
149 * Since we won't go through a power down/up cycle, there is
150 * no chance to call complete(&ab->restart_completed) in
151 * ath12k_core_restart(), making ath12k_core_resume() timeout.
152 * So call it here to avoid this issue. This also works in case
153 * no error happens thus suspend_late/resume_early get called,
154 * because it will be reinitialized in ath12k_core_resume_early().
155 */
156 complete(&ab->restart_completed);
157
158 return 0;
159 }
160 EXPORT_SYMBOL(ath12k_core_suspend);
161
ath12k_core_suspend_late(struct ath12k_base * ab)162 int ath12k_core_suspend_late(struct ath12k_base *ab)
163 {
164 int ret;
165
166 ret = ath12k_core_continue_suspend_resume(ab);
167 if (ret <= 0)
168 return ret;
169
170 ath12k_acpi_stop(ab);
171
172 ath12k_hif_irq_disable(ab);
173 ath12k_hif_ce_irq_disable(ab);
174
175 ath12k_hif_power_down(ab, true);
176
177 return 0;
178 }
179 EXPORT_SYMBOL(ath12k_core_suspend_late);
180
ath12k_core_resume_early(struct ath12k_base * ab)181 int ath12k_core_resume_early(struct ath12k_base *ab)
182 {
183 int ret;
184
185 ret = ath12k_core_continue_suspend_resume(ab);
186 if (ret <= 0)
187 return ret;
188
189 reinit_completion(&ab->restart_completed);
190 ret = ath12k_hif_power_up(ab);
191 if (ret)
192 ath12k_warn(ab, "failed to power up hif during resume: %d\n", ret);
193
194 return ret;
195 }
196 EXPORT_SYMBOL(ath12k_core_resume_early);
197
ath12k_core_resume(struct ath12k_base * ab)198 int ath12k_core_resume(struct ath12k_base *ab)
199 {
200 long time_left;
201 int ret;
202
203 ret = ath12k_core_continue_suspend_resume(ab);
204 if (ret <= 0)
205 return ret;
206
207 time_left = wait_for_completion_timeout(&ab->restart_completed,
208 ATH12K_RESET_TIMEOUT_HZ);
209 if (time_left == 0) {
210 ath12k_warn(ab, "timeout while waiting for restart complete");
211 return -ETIMEDOUT;
212 }
213
214 return 0;
215 }
216 EXPORT_SYMBOL(ath12k_core_resume);
217
__ath12k_core_create_board_name(struct ath12k_base * ab,char * name,size_t name_len,bool with_variant,bool bus_type_mode,bool with_default)218 static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
219 size_t name_len, bool with_variant,
220 bool bus_type_mode, bool with_default)
221 {
222 /* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
223 char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = {};
224
225 if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
226 scnprintf(variant, sizeof(variant), ",variant=%s",
227 ab->qmi.target.bdf_ext);
228
229 switch (ab->id.bdf_search) {
230 case ATH12K_BDF_SEARCH_BUS_AND_BOARD:
231 if (bus_type_mode)
232 scnprintf(name, name_len,
233 "bus=%s",
234 ath12k_bus_str(ab->hif.bus));
235 else
236 scnprintf(name, name_len,
237 "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
238 ath12k_bus_str(ab->hif.bus),
239 ab->id.vendor, ab->id.device,
240 ab->id.subsystem_vendor,
241 ab->id.subsystem_device,
242 ab->qmi.target.chip_id,
243 ab->qmi.target.board_id,
244 variant);
245 break;
246 default:
247 scnprintf(name, name_len,
248 "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
249 ath12k_bus_str(ab->hif.bus),
250 ab->qmi.target.chip_id,
251 with_default ?
252 ATH12K_BOARD_ID_DEFAULT : ab->qmi.target.board_id,
253 variant);
254 break;
255 }
256
257 ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name);
258
259 return 0;
260 }
261
ath12k_core_create_board_name(struct ath12k_base * ab,char * name,size_t name_len)262 static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
263 size_t name_len)
264 {
265 return __ath12k_core_create_board_name(ab, name, name_len, true, false, false);
266 }
267
ath12k_core_create_fallback_board_name(struct ath12k_base * ab,char * name,size_t name_len)268 static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
269 size_t name_len)
270 {
271 return __ath12k_core_create_board_name(ab, name, name_len, false, false, true);
272 }
273
ath12k_core_create_bus_type_board_name(struct ath12k_base * ab,char * name,size_t name_len)274 static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
275 size_t name_len)
276 {
277 return __ath12k_core_create_board_name(ab, name, name_len, false, true, true);
278 }
279
ath12k_core_firmware_request(struct ath12k_base * ab,const char * file)280 const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
281 const char *file)
282 {
283 const struct firmware *fw;
284 char path[100];
285 int ret;
286
287 if (!file)
288 return ERR_PTR(-ENOENT);
289
290 ath12k_core_create_firmware_path(ab, file, path, sizeof(path));
291
292 ret = firmware_request_nowarn(&fw, path, ab->dev);
293 if (ret)
294 return ERR_PTR(ret);
295
296 ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot firmware request %s size %zu\n",
297 path, fw->size);
298
299 return fw;
300 }
301
ath12k_core_free_bdf(struct ath12k_base * ab,struct ath12k_board_data * bd)302 void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
303 {
304 if (!IS_ERR(bd->fw))
305 release_firmware(bd->fw);
306
307 memset(bd, 0, sizeof(*bd));
308 }
309
ath12k_core_parse_bd_ie_board(struct ath12k_base * ab,struct ath12k_board_data * bd,const void * buf,size_t buf_len,const char * boardname,int ie_id,int name_id,int data_id)310 static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
311 struct ath12k_board_data *bd,
312 const void *buf, size_t buf_len,
313 const char *boardname,
314 int ie_id,
315 int name_id,
316 int data_id)
317 {
318 const struct ath12k_fw_ie *hdr;
319 bool name_match_found;
320 int ret, board_ie_id;
321 size_t board_ie_len;
322 const void *board_ie_data;
323
324 name_match_found = false;
325
326 /* go through ATH12K_BD_IE_BOARD_/ATH12K_BD_IE_REGDB_ elements */
327 while (buf_len > sizeof(struct ath12k_fw_ie)) {
328 hdr = buf;
329 board_ie_id = le32_to_cpu(hdr->id);
330 board_ie_len = le32_to_cpu(hdr->len);
331 board_ie_data = hdr->data;
332
333 buf_len -= sizeof(*hdr);
334 buf += sizeof(*hdr);
335
336 if (buf_len < ALIGN(board_ie_len, 4)) {
337 ath12k_err(ab, "invalid %s length: %zu < %zu\n",
338 ath12k_bd_ie_type_str(ie_id),
339 buf_len, ALIGN(board_ie_len, 4));
340 ret = -EINVAL;
341 goto out;
342 }
343
344 if (board_ie_id == name_id) {
345 ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "",
346 board_ie_data, board_ie_len);
347
348 if (board_ie_len != strlen(boardname))
349 goto next;
350
351 ret = memcmp(board_ie_data, boardname, strlen(boardname));
352 if (ret)
353 goto next;
354
355 name_match_found = true;
356 ath12k_dbg(ab, ATH12K_DBG_BOOT,
357 "boot found match %s for name '%s'",
358 ath12k_bd_ie_type_str(ie_id),
359 boardname);
360 } else if (board_ie_id == data_id) {
361 if (!name_match_found)
362 /* no match found */
363 goto next;
364
365 ath12k_dbg(ab, ATH12K_DBG_BOOT,
366 "boot found %s for '%s'",
367 ath12k_bd_ie_type_str(ie_id),
368 boardname);
369
370 bd->data = board_ie_data;
371 bd->len = board_ie_len;
372
373 ret = 0;
374 goto out;
375 } else {
376 ath12k_warn(ab, "unknown %s id found: %d\n",
377 ath12k_bd_ie_type_str(ie_id),
378 board_ie_id);
379 }
380 next:
381 /* jump over the padding */
382 board_ie_len = ALIGN(board_ie_len, 4);
383
384 buf_len -= board_ie_len;
385 buf += board_ie_len;
386 }
387
388 /* no match found */
389 ret = -ENOENT;
390
391 out:
392 return ret;
393 }
394
ath12k_core_fetch_board_data_api_n(struct ath12k_base * ab,struct ath12k_board_data * bd,const char * boardname,int ie_id_match,int name_id,int data_id)395 static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
396 struct ath12k_board_data *bd,
397 const char *boardname,
398 int ie_id_match,
399 int name_id,
400 int data_id)
401 {
402 size_t len, magic_len;
403 const u8 *data;
404 char *filename, filepath[100];
405 size_t ie_len;
406 struct ath12k_fw_ie *hdr;
407 int ret, ie_id;
408
409 filename = ATH12K_BOARD_API2_FILE;
410
411 if (!bd->fw)
412 bd->fw = ath12k_core_firmware_request(ab, filename);
413
414 if (IS_ERR(bd->fw))
415 return PTR_ERR(bd->fw);
416
417 data = bd->fw->data;
418 len = bd->fw->size;
419
420 ath12k_core_create_firmware_path(ab, filename,
421 filepath, sizeof(filepath));
422
423 /* magic has extra null byte padded */
424 magic_len = strlen(ATH12K_BOARD_MAGIC) + 1;
425 if (len < magic_len) {
426 ath12k_err(ab, "failed to find magic value in %s, file too short: %zu\n",
427 filepath, len);
428 ret = -EINVAL;
429 goto err;
430 }
431
432 if (memcmp(data, ATH12K_BOARD_MAGIC, magic_len)) {
433 ath12k_err(ab, "found invalid board magic\n");
434 ret = -EINVAL;
435 goto err;
436 }
437
438 /* magic is padded to 4 bytes */
439 magic_len = ALIGN(magic_len, 4);
440 if (len < magic_len) {
441 ath12k_err(ab, "failed: %s too small to contain board data, len: %zu\n",
442 filepath, len);
443 ret = -EINVAL;
444 goto err;
445 }
446
447 data += magic_len;
448 len -= magic_len;
449
450 while (len > sizeof(struct ath12k_fw_ie)) {
451 hdr = (struct ath12k_fw_ie *)data;
452 ie_id = le32_to_cpu(hdr->id);
453 ie_len = le32_to_cpu(hdr->len);
454
455 len -= sizeof(*hdr);
456 data = hdr->data;
457
458 if (len < ALIGN(ie_len, 4)) {
459 ath12k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
460 ie_id, ie_len, len);
461 ret = -EINVAL;
462 goto err;
463 }
464
465 if (ie_id == ie_id_match) {
466 ret = ath12k_core_parse_bd_ie_board(ab, bd, data,
467 ie_len,
468 boardname,
469 ie_id_match,
470 name_id,
471 data_id);
472 if (ret == -ENOENT)
473 /* no match found, continue */
474 goto next;
475 else if (ret)
476 /* there was an error, bail out */
477 goto err;
478 /* either found or error, so stop searching */
479 goto out;
480 }
481 next:
482 /* jump over the padding */
483 ie_len = ALIGN(ie_len, 4);
484
485 len -= ie_len;
486 data += ie_len;
487 }
488
489 out:
490 if (!bd->data || !bd->len) {
491 ath12k_dbg(ab, ATH12K_DBG_BOOT,
492 "failed to fetch %s for %s from %s\n",
493 ath12k_bd_ie_type_str(ie_id_match),
494 boardname, filepath);
495 ret = -ENODATA;
496 goto err;
497 }
498
499 return 0;
500
501 err:
502 ath12k_core_free_bdf(ab, bd);
503 return ret;
504 }
505
ath12k_core_fetch_board_data_api_1(struct ath12k_base * ab,struct ath12k_board_data * bd,char * filename)506 int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
507 struct ath12k_board_data *bd,
508 char *filename)
509 {
510 bd->fw = ath12k_core_firmware_request(ab, filename);
511 if (IS_ERR(bd->fw))
512 return PTR_ERR(bd->fw);
513
514 bd->data = bd->fw->data;
515 bd->len = bd->fw->size;
516
517 return 0;
518 }
519
520 #define BOARD_NAME_SIZE 200
ath12k_core_fetch_bdf(struct ath12k_base * ab,struct ath12k_board_data * bd)521 int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
522 {
523 char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE];
524 char *filename, filepath[100];
525 int bd_api;
526 int ret;
527
528 filename = ATH12K_BOARD_API2_FILE;
529
530 ret = ath12k_core_create_board_name(ab, boardname, sizeof(boardname));
531 if (ret) {
532 ath12k_err(ab, "failed to create board name: %d", ret);
533 return ret;
534 }
535
536 bd_api = 2;
537 ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
538 ATH12K_BD_IE_BOARD,
539 ATH12K_BD_IE_BOARD_NAME,
540 ATH12K_BD_IE_BOARD_DATA);
541 if (!ret)
542 goto success;
543
544 ret = ath12k_core_create_fallback_board_name(ab, fallback_boardname,
545 sizeof(fallback_boardname));
546 if (ret) {
547 ath12k_err(ab, "failed to create fallback board name: %d", ret);
548 return ret;
549 }
550
551 ret = ath12k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
552 ATH12K_BD_IE_BOARD,
553 ATH12K_BD_IE_BOARD_NAME,
554 ATH12K_BD_IE_BOARD_DATA);
555 if (!ret)
556 goto success;
557
558 bd_api = 1;
559 ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE);
560 if (ret) {
561 ath12k_core_create_firmware_path(ab, filename,
562 filepath, sizeof(filepath));
563 ath12k_err(ab, "failed to fetch board data for %s from %s\n",
564 boardname, filepath);
565 if (memcmp(boardname, fallback_boardname, strlen(boardname)))
566 ath12k_err(ab, "failed to fetch board data for %s from %s\n",
567 fallback_boardname, filepath);
568
569 ath12k_err(ab, "failed to fetch board.bin from %s\n",
570 ab->hw_params->fw.dir);
571 return ret;
572 }
573
574 success:
575 ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", bd_api);
576 return 0;
577 }
578
ath12k_core_fetch_regdb(struct ath12k_base * ab,struct ath12k_board_data * bd)579 int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd)
580 {
581 char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE];
582 int ret;
583
584 ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
585 if (ret) {
586 ath12k_dbg(ab, ATH12K_DBG_BOOT,
587 "failed to create board name for regdb: %d", ret);
588 goto exit;
589 }
590
591 ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
592 ATH12K_BD_IE_REGDB,
593 ATH12K_BD_IE_REGDB_NAME,
594 ATH12K_BD_IE_REGDB_DATA);
595 if (!ret)
596 goto exit;
597
598 ret = ath12k_core_create_bus_type_board_name(ab, default_boardname,
599 BOARD_NAME_SIZE);
600 if (ret) {
601 ath12k_dbg(ab, ATH12K_DBG_BOOT,
602 "failed to create default board name for regdb: %d", ret);
603 goto exit;
604 }
605
606 ret = ath12k_core_fetch_board_data_api_n(ab, bd, default_boardname,
607 ATH12K_BD_IE_REGDB,
608 ATH12K_BD_IE_REGDB_NAME,
609 ATH12K_BD_IE_REGDB_DATA);
610 if (!ret)
611 goto exit;
612
613 ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_REGDB_FILE_NAME);
614 if (ret)
615 ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to fetch %s from %s\n",
616 ATH12K_REGDB_FILE_NAME, ab->hw_params->fw.dir);
617
618 exit:
619 if (!ret)
620 ath12k_dbg(ab, ATH12K_DBG_BOOT, "fetched regdb\n");
621
622 return ret;
623 }
624
ath12k_core_get_max_station_per_radio(struct ath12k_base * ab)625 u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab)
626 {
627 if (ab->num_radios == 2)
628 return TARGET_NUM_STATIONS(ab, DBS);
629 if (ab->num_radios == 3)
630 return TARGET_NUM_STATIONS(ab, DBS_SBS);
631 return TARGET_NUM_STATIONS(ab, SINGLE);
632 }
633
ath12k_core_get_max_peers_per_radio(struct ath12k_base * ab)634 u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab)
635 {
636 return ath12k_core_get_max_station_per_radio(ab) + TARGET_NUM_VDEVS(ab);
637 }
638 EXPORT_SYMBOL(ath12k_core_get_max_peers_per_radio);
639
ath12k_core_get_reserved_mem(struct ath12k_base * ab,int index)640 struct reserved_mem *ath12k_core_get_reserved_mem(struct ath12k_base *ab,
641 int index)
642 {
643 struct device *dev = ab->dev;
644 struct reserved_mem *rmem;
645 struct device_node *node;
646
647 node = of_parse_phandle(dev->of_node, "memory-region", index);
648 if (!node) {
649 ath12k_dbg(ab, ATH12K_DBG_BOOT,
650 "failed to parse memory-region for index %d\n", index);
651 return NULL;
652 }
653
654 rmem = of_reserved_mem_lookup(node);
655 of_node_put(node);
656 if (!rmem) {
657 ath12k_dbg(ab, ATH12K_DBG_BOOT,
658 "unable to get memory-region for index %d\n", index);
659 return NULL;
660 }
661
662 return rmem;
663 }
664
665 static inline
ath12k_core_to_group_ref_get(struct ath12k_base * ab)666 void ath12k_core_to_group_ref_get(struct ath12k_base *ab)
667 {
668 struct ath12k_hw_group *ag = ab->ag;
669
670 lockdep_assert_held(&ag->mutex);
671
672 if (ab->hw_group_ref) {
673 ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already attached to group %d\n",
674 ag->id);
675 return;
676 }
677
678 ab->hw_group_ref = true;
679 ag->num_started++;
680
681 ath12k_dbg(ab, ATH12K_DBG_BOOT, "core attached to group %d, num_started %d\n",
682 ag->id, ag->num_started);
683 }
684
685 static inline
ath12k_core_to_group_ref_put(struct ath12k_base * ab)686 void ath12k_core_to_group_ref_put(struct ath12k_base *ab)
687 {
688 struct ath12k_hw_group *ag = ab->ag;
689
690 lockdep_assert_held(&ag->mutex);
691
692 if (!ab->hw_group_ref) {
693 ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already de-attached from group %d\n",
694 ag->id);
695 return;
696 }
697
698 ab->hw_group_ref = false;
699 ag->num_started--;
700
701 ath12k_dbg(ab, ATH12K_DBG_BOOT, "core de-attached from group %d, num_started %d\n",
702 ag->id, ag->num_started);
703 }
704
ath12k_core_stop(struct ath12k_base * ab)705 static void ath12k_core_stop(struct ath12k_base *ab)
706 {
707 ath12k_link_sta_rhash_tbl_destroy(ab);
708
709 ath12k_core_to_group_ref_put(ab);
710
711 if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
712 ath12k_qmi_firmware_stop(ab);
713
714 ath12k_acpi_stop(ab);
715
716 ath12k_dp_rx_pdev_reo_cleanup(ab);
717 ath12k_hif_stop(ab);
718 ath12k_wmi_detach(ab);
719 ath12k_dp_cmn_device_deinit(ath12k_ab_to_dp(ab));
720
721 /* De-Init of components as needed */
722 }
723
ath12k_core_check_cc_code_bdfext(const struct dmi_header * hdr,void * data)724 static void ath12k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data)
725 {
726 struct ath12k_base *ab = data;
727 const char *magic = ATH12K_SMBIOS_BDF_EXT_MAGIC;
728 struct ath12k_smbios_bdf *smbios = (struct ath12k_smbios_bdf *)hdr;
729 ssize_t copied;
730 size_t len;
731 int i;
732
733 if (ab->qmi.target.bdf_ext[0] != '\0')
734 return;
735
736 if (hdr->type != ATH12K_SMBIOS_BDF_EXT_TYPE)
737 return;
738
739 if (hdr->length != ATH12K_SMBIOS_BDF_EXT_LENGTH) {
740 ath12k_dbg(ab, ATH12K_DBG_BOOT,
741 "wrong smbios bdf ext type length (%d).\n",
742 hdr->length);
743 return;
744 }
745
746 spin_lock_bh(&ab->base_lock);
747
748 switch (smbios->country_code_flag) {
749 case ATH12K_SMBIOS_CC_ISO:
750 ab->new_alpha2[0] = u16_get_bits(smbios->cc_code >> 8, 0xff);
751 ab->new_alpha2[1] = u16_get_bits(smbios->cc_code, 0xff);
752 ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios cc_code %c%c\n",
753 ab->new_alpha2[0], ab->new_alpha2[1]);
754 break;
755 case ATH12K_SMBIOS_CC_WW:
756 ab->new_alpha2[0] = '0';
757 ab->new_alpha2[1] = '0';
758 ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios worldwide regdomain\n");
759 break;
760 default:
761 ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot ignore smbios country code setting %d\n",
762 smbios->country_code_flag);
763 break;
764 }
765
766 spin_unlock_bh(&ab->base_lock);
767
768 if (!smbios->bdf_enabled) {
769 ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant name not found.\n");
770 return;
771 }
772
773 /* Only one string exists (per spec) */
774 if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) {
775 ath12k_dbg(ab, ATH12K_DBG_BOOT,
776 "bdf variant magic does not match.\n");
777 return;
778 }
779
780 len = min_t(size_t,
781 strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext));
782 for (i = 0; i < len; i++) {
783 if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) {
784 ath12k_dbg(ab, ATH12K_DBG_BOOT,
785 "bdf variant name contains non ascii chars.\n");
786 return;
787 }
788 }
789
790 /* Copy extension name without magic prefix */
791 copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic),
792 sizeof(ab->qmi.target.bdf_ext));
793 if (copied < 0) {
794 ath12k_dbg(ab, ATH12K_DBG_BOOT,
795 "bdf variant string is longer than the buffer can accommodate\n");
796 return;
797 }
798
799 ath12k_dbg(ab, ATH12K_DBG_BOOT,
800 "found and validated bdf variant smbios_type 0x%x bdf %s\n",
801 ATH12K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext);
802 }
803
ath12k_core_check_smbios(struct ath12k_base * ab)804 int ath12k_core_check_smbios(struct ath12k_base *ab)
805 {
806 ab->qmi.target.bdf_ext[0] = '\0';
807 dmi_walk(ath12k_core_check_cc_code_bdfext, ab);
808
809 if (ab->qmi.target.bdf_ext[0] == '\0')
810 return -ENODATA;
811
812 return 0;
813 }
814
ath12k_core_soc_create(struct ath12k_base * ab)815 static int ath12k_core_soc_create(struct ath12k_base *ab)
816 {
817 int ret;
818
819 if (ath12k_ftm_mode) {
820 ab->fw_mode = ATH12K_FIRMWARE_MODE_FTM;
821 ath12k_info(ab, "Booting in ftm mode\n");
822 }
823
824 ret = ath12k_qmi_init_service(ab);
825 if (ret) {
826 ath12k_err(ab, "failed to initialize qmi :%d\n", ret);
827 return ret;
828 }
829
830 ath12k_debugfs_soc_create(ab);
831
832 ret = ath12k_hif_power_up(ab);
833 if (ret) {
834 ath12k_err(ab, "failed to power up :%d\n", ret);
835 goto err_qmi_deinit;
836 }
837
838 return 0;
839
840 err_qmi_deinit:
841 ath12k_debugfs_soc_destroy(ab);
842 ath12k_qmi_deinit_service(ab);
843 return ret;
844 }
845
ath12k_core_soc_destroy(struct ath12k_base * ab)846 static void ath12k_core_soc_destroy(struct ath12k_base *ab)
847 {
848 ath12k_hif_power_down(ab, false);
849 ath12k_reg_free(ab);
850 ath12k_debugfs_soc_destroy(ab);
851 ath12k_qmi_deinit_service(ab);
852 }
853
ath12k_core_pdev_create(struct ath12k_base * ab)854 static int ath12k_core_pdev_create(struct ath12k_base *ab)
855 {
856 int ret;
857
858 ret = ath12k_dp_pdev_alloc(ab);
859 if (ret) {
860 ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
861 return ret;
862 }
863
864 ret = ath12k_thermal_register(ab);
865 if (ret) {
866 ath12k_err(ab, "could not register thermal device: %d\n", ret);
867 goto err_dp_pdev_free;
868 }
869
870 ath12k_debugfs_pdev_create(ab);
871
872 return 0;
873
874 err_dp_pdev_free:
875 ath12k_dp_pdev_free(ab);
876 return ret;
877 }
878
ath12k_core_pdev_destroy(struct ath12k_base * ab)879 static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
880 {
881 ath12k_thermal_unregister(ab);
882 ath12k_dp_pdev_free(ab);
883 }
884
ath12k_core_start(struct ath12k_base * ab)885 static int ath12k_core_start(struct ath12k_base *ab)
886 {
887 int ret;
888
889 lockdep_assert_held(&ab->core_lock);
890
891 ret = ath12k_wmi_attach(ab);
892 if (ret) {
893 ath12k_err(ab, "failed to attach wmi: %d\n", ret);
894 return ret;
895 }
896
897 ret = ath12k_htc_init(ab);
898 if (ret) {
899 ath12k_err(ab, "failed to init htc: %d\n", ret);
900 goto err_wmi_detach;
901 }
902
903 ret = ath12k_hif_start(ab);
904 if (ret) {
905 ath12k_err(ab, "failed to start HIF: %d\n", ret);
906 goto err_wmi_detach;
907 }
908
909 ret = ath12k_htc_wait_target(&ab->htc);
910 if (ret) {
911 ath12k_err(ab, "failed to connect to HTC: %d\n", ret);
912 goto err_hif_stop;
913 }
914
915 ret = ath12k_dp_htt_connect(ath12k_ab_to_dp(ab));
916 if (ret) {
917 ath12k_err(ab, "failed to connect to HTT: %d\n", ret);
918 goto err_hif_stop;
919 }
920
921 ret = ath12k_wmi_connect(ab);
922 if (ret) {
923 ath12k_err(ab, "failed to connect wmi: %d\n", ret);
924 goto err_hif_stop;
925 }
926
927 ret = ath12k_htc_start(&ab->htc);
928 if (ret) {
929 ath12k_err(ab, "failed to start HTC: %d\n", ret);
930 goto err_hif_stop;
931 }
932
933 ret = ath12k_wmi_wait_for_service_ready(ab);
934 if (ret) {
935 ath12k_err(ab, "failed to receive wmi service ready event: %d\n",
936 ret);
937 goto err_hif_stop;
938 }
939
940 ath12k_hal_cc_config(ab);
941
942 ret = ath12k_dp_rx_pdev_reo_setup(ab);
943 if (ret) {
944 ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
945 goto err_hif_stop;
946 }
947
948 ret = ath12k_wmi_cmd_init(ab);
949 if (ret) {
950 ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret);
951 goto err_reo_cleanup;
952 }
953
954 ret = ath12k_wmi_wait_for_unified_ready(ab);
955 if (ret) {
956 ath12k_err(ab, "failed to receive wmi unified ready event: %d\n",
957 ret);
958 goto err_reo_cleanup;
959 }
960
961 /* put hardware to DBS mode */
962 if (ab->hw_params->single_pdev_only) {
963 ret = ath12k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
964 if (ret) {
965 ath12k_err(ab, "failed to send dbs mode: %d\n", ret);
966 goto err_reo_cleanup;
967 }
968 }
969
970 ret = ath12k_dp_tx_htt_h2t_ver_req_msg(ab);
971 if (ret) {
972 ath12k_err(ab, "failed to send htt version request message: %d\n",
973 ret);
974 goto err_reo_cleanup;
975 }
976
977 ath12k_acpi_set_dsm_func(ab);
978
979 /* Indicate the core start in the appropriate group */
980 ath12k_core_to_group_ref_get(ab);
981
982 ret = ath12k_link_sta_rhash_tbl_init(ab);
983 if (ret) {
984 ath12k_warn(ab, "failed to init peer addr rhash table %d\n", ret);
985 goto err_reo_cleanup;
986 }
987
988 return 0;
989
990 err_reo_cleanup:
991 ath12k_dp_rx_pdev_reo_cleanup(ab);
992 err_hif_stop:
993 ath12k_hif_stop(ab);
994 err_wmi_detach:
995 ath12k_wmi_detach(ab);
996 return ret;
997 }
998
ath12k_core_device_cleanup(struct ath12k_base * ab)999 static void ath12k_core_device_cleanup(struct ath12k_base *ab)
1000 {
1001 mutex_lock(&ab->core_lock);
1002
1003 ath12k_hif_irq_disable(ab);
1004 ath12k_core_pdev_destroy(ab);
1005
1006 mutex_unlock(&ab->core_lock);
1007 }
1008
ath12k_core_hw_group_stop(struct ath12k_hw_group * ag)1009 static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
1010 {
1011 struct ath12k_base *ab;
1012 int i;
1013
1014 lockdep_assert_held(&ag->mutex);
1015
1016 clear_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
1017
1018 ath12k_mac_unregister(ag);
1019
1020 ath12k_mac_mlo_teardown(ag);
1021
1022 for (i = ag->num_devices - 1; i >= 0; i--) {
1023 ab = ag->ab[i];
1024 if (!ab)
1025 continue;
1026
1027 clear_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
1028
1029 ath12k_core_device_cleanup(ab);
1030 }
1031
1032 ath12k_mac_destroy(ag);
1033 }
1034
ath12k_get_num_partner_link(struct ath12k * ar)1035 u8 ath12k_get_num_partner_link(struct ath12k *ar)
1036 {
1037 struct ath12k_base *partner_ab, *ab = ar->ab;
1038 struct ath12k_hw_group *ag = ab->ag;
1039 struct ath12k_pdev *pdev;
1040 u8 num_link = 0;
1041 int i, j;
1042
1043 lockdep_assert_held(&ag->mutex);
1044
1045 for (i = 0; i < ag->num_devices; i++) {
1046 partner_ab = ag->ab[i];
1047
1048 for (j = 0; j < partner_ab->num_radios; j++) {
1049 pdev = &partner_ab->pdevs[j];
1050
1051 /* Avoid the self link */
1052 if (ar == pdev->ar)
1053 continue;
1054
1055 num_link++;
1056 }
1057 }
1058
1059 return num_link;
1060 }
1061
__ath12k_mac_mlo_ready(struct ath12k * ar)1062 static int __ath12k_mac_mlo_ready(struct ath12k *ar)
1063 {
1064 u8 num_link = ath12k_get_num_partner_link(ar);
1065 int ret;
1066
1067 if (num_link == 0)
1068 return 0;
1069
1070 ret = ath12k_wmi_mlo_ready(ar);
1071 if (ret) {
1072 ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n",
1073 ar->pdev_idx, ret);
1074 return ret;
1075 }
1076
1077 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mlo ready done for pdev %d\n",
1078 ar->pdev_idx);
1079
1080 return 0;
1081 }
1082
ath12k_mac_mlo_ready(struct ath12k_hw_group * ag)1083 int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag)
1084 {
1085 struct ath12k_hw *ah;
1086 struct ath12k *ar;
1087 int ret;
1088 int i, j;
1089
1090 for (i = 0; i < ag->num_hw; i++) {
1091 ah = ag->ah[i];
1092 if (!ah)
1093 continue;
1094
1095 for_each_ar(ah, ar, j) {
1096 ar = &ah->radio[j];
1097 ret = __ath12k_mac_mlo_ready(ar);
1098 if (ret)
1099 return ret;
1100 }
1101 }
1102
1103 return 0;
1104 }
1105
ath12k_core_mlo_setup(struct ath12k_hw_group * ag)1106 static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag)
1107 {
1108 int ret, i;
1109
1110 if (!ag->mlo_capable)
1111 return 0;
1112
1113 ret = ath12k_mac_mlo_setup(ag);
1114 if (ret)
1115 return ret;
1116
1117 for (i = 0; i < ag->num_devices; i++)
1118 ath12k_dp_partner_cc_init(ag->ab[i]);
1119
1120 ret = ath12k_mac_mlo_ready(ag);
1121 if (ret)
1122 goto err_mlo_teardown;
1123
1124 return 0;
1125
1126 err_mlo_teardown:
1127 ath12k_mac_mlo_teardown(ag);
1128
1129 return ret;
1130 }
1131
ath12k_core_hw_group_start(struct ath12k_hw_group * ag)1132 static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag)
1133 {
1134 struct ath12k_base *ab;
1135 int ret, i;
1136
1137 lockdep_assert_held(&ag->mutex);
1138
1139 if (test_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags)) {
1140 ret = ath12k_core_mlo_setup(ag);
1141 if (WARN_ON(ret)) {
1142 ath12k_mac_unregister(ag);
1143 goto err_mac_destroy;
1144 }
1145 goto core_pdev_create;
1146 }
1147
1148 ret = ath12k_mac_allocate(ag);
1149 if (WARN_ON(ret))
1150 return ret;
1151
1152 ret = ath12k_core_mlo_setup(ag);
1153 if (WARN_ON(ret))
1154 goto err_mac_destroy;
1155
1156 ret = ath12k_mac_register(ag);
1157 if (WARN_ON(ret))
1158 goto err_mlo_teardown;
1159
1160 set_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
1161
1162 core_pdev_create:
1163 for (i = 0; i < ag->num_devices; i++) {
1164 ab = ag->ab[i];
1165 if (!ab)
1166 continue;
1167
1168 mutex_lock(&ab->core_lock);
1169
1170 set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
1171
1172 ret = ath12k_core_pdev_create(ab);
1173 if (ret) {
1174 ath12k_err(ab, "failed to create pdev core %d\n", ret);
1175 mutex_unlock(&ab->core_lock);
1176 goto err;
1177 }
1178
1179 ath12k_hif_irq_enable(ab);
1180
1181 ret = ath12k_core_rfkill_config(ab);
1182 if (ret && ret != -EOPNOTSUPP) {
1183 mutex_unlock(&ab->core_lock);
1184 goto err;
1185 }
1186
1187 mutex_unlock(&ab->core_lock);
1188 }
1189
1190 return 0;
1191
1192 err:
1193 ath12k_core_hw_group_stop(ag);
1194 return ret;
1195
1196 err_mlo_teardown:
1197 ath12k_mac_mlo_teardown(ag);
1198
1199 err_mac_destroy:
1200 ath12k_mac_destroy(ag);
1201
1202 return ret;
1203 }
1204
ath12k_core_start_firmware(struct ath12k_base * ab,enum ath12k_firmware_mode mode)1205 static int ath12k_core_start_firmware(struct ath12k_base *ab,
1206 enum ath12k_firmware_mode mode)
1207 {
1208 int ret;
1209
1210 ath12k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v3,
1211 &ab->qmi.ce_cfg.shadow_reg_v3_len);
1212
1213 ret = ath12k_qmi_firmware_start(ab, mode);
1214 if (ret) {
1215 ath12k_err(ab, "failed to send firmware start: %d\n", ret);
1216 return ret;
1217 }
1218
1219 return ret;
1220 }
1221
1222 static inline
ath12k_core_hw_group_start_ready(struct ath12k_hw_group * ag)1223 bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag)
1224 {
1225 lockdep_assert_held(&ag->mutex);
1226
1227 return (ag->num_started == ag->num_devices);
1228 }
1229
ath12k_fw_stats_pdevs_free(struct list_head * head)1230 static void ath12k_fw_stats_pdevs_free(struct list_head *head)
1231 {
1232 struct ath12k_fw_stats_pdev *i, *tmp;
1233
1234 list_for_each_entry_safe(i, tmp, head, list) {
1235 list_del(&i->list);
1236 kfree(i);
1237 }
1238 }
1239
ath12k_fw_stats_bcn_free(struct list_head * head)1240 void ath12k_fw_stats_bcn_free(struct list_head *head)
1241 {
1242 struct ath12k_fw_stats_bcn *i, *tmp;
1243
1244 list_for_each_entry_safe(i, tmp, head, list) {
1245 list_del(&i->list);
1246 kfree(i);
1247 }
1248 }
1249
ath12k_fw_stats_vdevs_free(struct list_head * head)1250 static void ath12k_fw_stats_vdevs_free(struct list_head *head)
1251 {
1252 struct ath12k_fw_stats_vdev *i, *tmp;
1253
1254 list_for_each_entry_safe(i, tmp, head, list) {
1255 list_del(&i->list);
1256 kfree(i);
1257 }
1258 }
1259
ath12k_fw_stats_init(struct ath12k * ar)1260 void ath12k_fw_stats_init(struct ath12k *ar)
1261 {
1262 INIT_LIST_HEAD(&ar->fw_stats.vdevs);
1263 INIT_LIST_HEAD(&ar->fw_stats.pdevs);
1264 INIT_LIST_HEAD(&ar->fw_stats.bcn);
1265 init_completion(&ar->fw_stats_complete);
1266 init_completion(&ar->fw_stats_done);
1267 }
1268
ath12k_fw_stats_free(struct ath12k_fw_stats * stats)1269 void ath12k_fw_stats_free(struct ath12k_fw_stats *stats)
1270 {
1271 ath12k_fw_stats_pdevs_free(&stats->pdevs);
1272 ath12k_fw_stats_vdevs_free(&stats->vdevs);
1273 ath12k_fw_stats_bcn_free(&stats->bcn);
1274 }
1275
ath12k_fw_stats_reset(struct ath12k * ar)1276 void ath12k_fw_stats_reset(struct ath12k *ar)
1277 {
1278 spin_lock_bh(&ar->data_lock);
1279 ath12k_fw_stats_free(&ar->fw_stats);
1280 ar->fw_stats.num_vdev_recvd = 0;
1281 spin_unlock_bh(&ar->data_lock);
1282 }
1283
ath12k_core_trigger_partner(struct ath12k_base * ab)1284 static void ath12k_core_trigger_partner(struct ath12k_base *ab)
1285 {
1286 struct ath12k_hw_group *ag = ab->ag;
1287 struct ath12k_base *partner_ab;
1288 bool found = false;
1289 int i;
1290
1291 for (i = 0; i < ag->num_devices; i++) {
1292 partner_ab = ag->ab[i];
1293 if (!partner_ab)
1294 continue;
1295
1296 if (found)
1297 ath12k_qmi_trigger_host_cap(partner_ab);
1298
1299 found = (partner_ab == ab);
1300 }
1301 }
1302
ath12k_core_qmi_firmware_ready(struct ath12k_base * ab)1303 int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
1304 {
1305 struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
1306 int ret, i;
1307
1308 ret = ath12k_core_start_firmware(ab, ab->fw_mode);
1309 if (ret) {
1310 ath12k_err(ab, "failed to start firmware: %d\n", ret);
1311 return ret;
1312 }
1313
1314 ret = ath12k_ce_init_pipes(ab);
1315 if (ret) {
1316 ath12k_err(ab, "failed to initialize CE: %d\n", ret);
1317 goto err_firmware_stop;
1318 }
1319
1320 ret = ath12k_dp_cmn_device_init(ath12k_ab_to_dp(ab));
1321 if (ret) {
1322 ath12k_err(ab, "failed to init DP: %d\n", ret);
1323 goto err_firmware_stop;
1324 }
1325
1326 mutex_lock(&ag->mutex);
1327 mutex_lock(&ab->core_lock);
1328
1329 ret = ath12k_core_start(ab);
1330 if (ret) {
1331 ath12k_err(ab, "failed to start core: %d\n", ret);
1332 goto err_deinit;
1333 }
1334
1335 mutex_unlock(&ab->core_lock);
1336
1337 if (ath12k_core_hw_group_start_ready(ag)) {
1338 ret = ath12k_core_hw_group_start(ag);
1339 if (ret) {
1340 ath12k_warn(ab, "unable to start hw group\n");
1341 goto err_core_stop;
1342 }
1343 ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id);
1344 } else {
1345 ath12k_core_trigger_partner(ab);
1346 }
1347
1348 mutex_unlock(&ag->mutex);
1349
1350 return 0;
1351
1352 err_core_stop:
1353 for (i = ag->num_devices - 1; i >= 0; i--) {
1354 ab = ag->ab[i];
1355 if (!ab)
1356 continue;
1357
1358 mutex_lock(&ab->core_lock);
1359 ath12k_core_stop(ab);
1360 mutex_unlock(&ab->core_lock);
1361 }
1362 mutex_unlock(&ag->mutex);
1363 goto exit;
1364
1365 err_deinit:
1366 ath12k_dp_cmn_device_deinit(ath12k_ab_to_dp(ab));
1367 mutex_unlock(&ab->core_lock);
1368 mutex_unlock(&ag->mutex);
1369
1370 err_firmware_stop:
1371 ath12k_qmi_firmware_stop(ab);
1372
1373 exit:
1374 return ret;
1375 }
1376
ath12k_core_reconfigure_on_crash(struct ath12k_base * ab)1377 static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
1378 {
1379 int ret, total_vdev;
1380
1381 mutex_lock(&ab->core_lock);
1382 ath12k_link_sta_rhash_tbl_destroy(ab);
1383 ath12k_thermal_unregister(ab);
1384 ath12k_dp_pdev_free(ab);
1385 ath12k_ce_cleanup_pipes(ab);
1386 ath12k_wmi_detach(ab);
1387 ath12k_dp_rx_pdev_reo_cleanup(ab);
1388 mutex_unlock(&ab->core_lock);
1389
1390 ath12k_dp_cmn_device_deinit(ath12k_ab_to_dp(ab));
1391 ath12k_hal_srng_deinit(ab);
1392 total_vdev = ab->num_radios * TARGET_NUM_VDEVS(ab);
1393 ab->free_vdev_map = (1LL << total_vdev) - 1;
1394
1395 ret = ath12k_hal_srng_init(ab);
1396 if (ret)
1397 return ret;
1398
1399 clear_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1400
1401 ret = ath12k_core_qmi_firmware_ready(ab);
1402 if (ret)
1403 goto err_hal_srng_deinit;
1404
1405 clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
1406
1407 return 0;
1408
1409 err_hal_srng_deinit:
1410 ath12k_hal_srng_deinit(ab);
1411 return ret;
1412 }
1413
ath12k_rfkill_work(struct work_struct * work)1414 static void ath12k_rfkill_work(struct work_struct *work)
1415 {
1416 struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
1417 struct ath12k_hw_group *ag = ab->ag;
1418 struct ath12k *ar;
1419 struct ath12k_hw *ah;
1420 struct ieee80211_hw *hw;
1421 bool rfkill_radio_on;
1422 int i, j;
1423
1424 spin_lock_bh(&ab->base_lock);
1425 rfkill_radio_on = ab->rfkill_radio_on;
1426 spin_unlock_bh(&ab->base_lock);
1427
1428 for (i = 0; i < ag->num_hw; i++) {
1429 ah = ath12k_ag_to_ah(ag, i);
1430 if (!ah)
1431 continue;
1432
1433 for (j = 0; j < ah->num_radio; j++) {
1434 ar = &ah->radio[j];
1435 if (!ar)
1436 continue;
1437
1438 ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
1439 }
1440
1441 hw = ah->hw;
1442 wiphy_rfkill_set_hw_state(hw->wiphy, !rfkill_radio_on);
1443 }
1444 }
1445
ath12k_core_halt(struct ath12k * ar)1446 void ath12k_core_halt(struct ath12k *ar)
1447 {
1448 struct list_head *pos, *n;
1449 struct ath12k_base *ab = ar->ab;
1450
1451 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
1452
1453 ar->num_created_vdevs = 0;
1454 ar->allocated_vdev_map = 0;
1455
1456 ath12k_mac_scan_finish(ar);
1457 ath12k_mac_peer_cleanup_all(ar);
1458 cancel_delayed_work_sync(&ar->scan.timeout);
1459 cancel_work_sync(&ar->regd_update_work);
1460 cancel_work_sync(&ar->regd_channel_update_work);
1461 cancel_work_sync(&ab->rfkill_work);
1462 cancel_work_sync(&ab->update_11d_work);
1463
1464 rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
1465 synchronize_rcu();
1466
1467 spin_lock_bh(&ar->data_lock);
1468 list_for_each_safe(pos, n, &ar->arvifs)
1469 list_del_init(pos);
1470 spin_unlock_bh(&ar->data_lock);
1471
1472 idr_init(&ar->txmgmt_idr);
1473 }
1474
ath12k_core_pre_reconfigure_recovery(struct ath12k_base * ab)1475 static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
1476 {
1477 struct ath12k_hw_group *ag = ab->ag;
1478 struct ath12k *ar;
1479 struct ath12k_hw *ah;
1480 int i, j;
1481
1482 spin_lock_bh(&ab->base_lock);
1483 ab->stats.fw_crash_counter++;
1484 spin_unlock_bh(&ab->base_lock);
1485
1486 if (ab->is_reset)
1487 set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1488
1489 for (i = 0; i < ag->num_hw; i++) {
1490 ah = ath12k_ag_to_ah(ag, i);
1491 if (!ah || ah->state == ATH12K_HW_STATE_OFF ||
1492 ah->state == ATH12K_HW_STATE_TM)
1493 continue;
1494
1495 wiphy_lock(ah->hw->wiphy);
1496
1497 /* If queue 0 is stopped, it is safe to assume that all
1498 * other queues are stopped by driver via
1499 * ieee80211_stop_queues() below. This means, there is
1500 * no need to stop it again and hence continue
1501 */
1502 if (ieee80211_queue_stopped(ah->hw, 0)) {
1503 wiphy_unlock(ah->hw->wiphy);
1504 continue;
1505 }
1506
1507 ieee80211_stop_queues(ah->hw);
1508
1509 for (j = 0; j < ah->num_radio; j++) {
1510 ar = &ah->radio[j];
1511
1512 ath12k_mac_drain_tx(ar);
1513 ar->state_11d = ATH12K_11D_IDLE;
1514 complete(&ar->completed_11d_scan);
1515 complete(&ar->scan.started);
1516 complete_all(&ar->scan.completed);
1517 complete(&ar->scan.on_channel);
1518 complete(&ar->peer_assoc_done);
1519 complete(&ar->peer_delete_done);
1520 complete(&ar->install_key_done);
1521 complete(&ar->vdev_setup_done);
1522 complete(&ar->vdev_delete_done);
1523 complete(&ar->bss_survey_done);
1524 complete_all(&ar->regd_update_completed);
1525 complete_all(&ar->thermal.wmi_sync);
1526
1527 wake_up(&ar->dp.tx_empty_waitq);
1528 idr_for_each(&ar->txmgmt_idr,
1529 ath12k_mac_tx_mgmt_pending_free, ar);
1530 idr_destroy(&ar->txmgmt_idr);
1531 wake_up(&ar->txmgmt_empty_waitq);
1532
1533 ar->monitor_vdev_id = -1;
1534 ar->monitor_vdev_created = false;
1535 ar->monitor_started = false;
1536 }
1537
1538 wiphy_unlock(ah->hw->wiphy);
1539 }
1540
1541 wake_up(&ab->wmi_ab.tx_credits_wq);
1542 wake_up(&ab->peer_mapping_wq);
1543 }
1544
ath12k_update_11d(struct work_struct * work)1545 static void ath12k_update_11d(struct work_struct *work)
1546 {
1547 struct ath12k_base *ab = container_of(work, struct ath12k_base, update_11d_work);
1548 struct ath12k *ar;
1549 struct ath12k_pdev *pdev;
1550 struct wmi_set_current_country_arg arg = {};
1551 int ret, i;
1552
1553 spin_lock_bh(&ab->base_lock);
1554 memcpy(&arg.alpha2, &ab->new_alpha2, 2);
1555 spin_unlock_bh(&ab->base_lock);
1556
1557 ath12k_dbg(ab, ATH12K_DBG_WMI, "update 11d new cc %c%c\n",
1558 arg.alpha2[0], arg.alpha2[1]);
1559
1560 for (i = 0; i < ab->num_radios; i++) {
1561 pdev = &ab->pdevs[i];
1562 ar = pdev->ar;
1563
1564 memcpy(&ar->alpha2, &arg.alpha2, 2);
1565
1566 reinit_completion(&ar->regd_update_completed);
1567
1568 ret = ath12k_wmi_send_set_current_country_cmd(ar, &arg);
1569 if (ret)
1570 ath12k_warn(ar->ab,
1571 "pdev id %d failed set current country code: %d\n",
1572 i, ret);
1573 }
1574 }
1575
ath12k_core_post_reconfigure_recovery(struct ath12k_base * ab)1576 static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
1577 {
1578 struct ath12k_hw_group *ag = ab->ag;
1579 struct ath12k_hw *ah;
1580 struct ath12k *ar;
1581 int i, j;
1582
1583 for (i = 0; i < ag->num_hw; i++) {
1584 ah = ath12k_ag_to_ah(ag, i);
1585 if (!ah || ah->state == ATH12K_HW_STATE_OFF)
1586 continue;
1587
1588 wiphy_lock(ah->hw->wiphy);
1589 mutex_lock(&ah->hw_mutex);
1590
1591 switch (ah->state) {
1592 case ATH12K_HW_STATE_ON:
1593 ah->state = ATH12K_HW_STATE_RESTARTING;
1594
1595 for (j = 0; j < ah->num_radio; j++) {
1596 ar = &ah->radio[j];
1597 ath12k_core_halt(ar);
1598 }
1599
1600 ath12k_mac_dp_peer_cleanup(ah);
1601 break;
1602 case ATH12K_HW_STATE_OFF:
1603 ath12k_warn(ab,
1604 "cannot restart hw %d that hasn't been started\n",
1605 i);
1606 break;
1607 case ATH12K_HW_STATE_RESTARTING:
1608 break;
1609 case ATH12K_HW_STATE_RESTARTED:
1610 ah->state = ATH12K_HW_STATE_WEDGED;
1611 fallthrough;
1612 case ATH12K_HW_STATE_WEDGED:
1613 ath12k_warn(ab,
1614 "device is wedged, will not restart hw %d\n", i);
1615 break;
1616 case ATH12K_HW_STATE_TM:
1617 ath12k_warn(ab, "fw mode reset done radio %d\n", i);
1618 break;
1619 }
1620
1621 mutex_unlock(&ah->hw_mutex);
1622 wiphy_unlock(ah->hw->wiphy);
1623 }
1624
1625 complete(&ab->driver_recovery);
1626 }
1627
ath12k_core_restart(struct work_struct * work)1628 static void ath12k_core_restart(struct work_struct *work)
1629 {
1630 struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
1631 struct ath12k_hw_group *ag = ab->ag;
1632 struct ath12k_hw *ah;
1633 int ret, i;
1634
1635 ret = ath12k_core_reconfigure_on_crash(ab);
1636 if (ret) {
1637 ath12k_err(ab, "failed to reconfigure driver on crash recovery\n");
1638 return;
1639 }
1640
1641 if (ab->is_reset) {
1642 if (!test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
1643 atomic_dec(&ab->reset_count);
1644 complete(&ab->reset_complete);
1645 ab->is_reset = false;
1646 atomic_set(&ab->fail_cont_count, 0);
1647 ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
1648 }
1649
1650 mutex_lock(&ag->mutex);
1651
1652 if (!ath12k_core_hw_group_start_ready(ag)) {
1653 mutex_unlock(&ag->mutex);
1654 goto exit_restart;
1655 }
1656
1657 for (i = 0; i < ag->num_hw; i++) {
1658 ah = ath12k_ag_to_ah(ag, i);
1659 ieee80211_restart_hw(ah->hw);
1660 }
1661
1662 mutex_unlock(&ag->mutex);
1663 }
1664
1665 exit_restart:
1666 complete(&ab->restart_completed);
1667 }
1668
ath12k_core_reset(struct work_struct * work)1669 static void ath12k_core_reset(struct work_struct *work)
1670 {
1671 struct ath12k_base *ab = container_of(work, struct ath12k_base, reset_work);
1672 struct ath12k_hw_group *ag = ab->ag;
1673 int reset_count, fail_cont_count, i;
1674 long time_left;
1675
1676 if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) {
1677 ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
1678 return;
1679 }
1680
1681 /* Sometimes the recovery will fail and then the next all recovery fail,
1682 * this is to avoid infinite recovery since it can not recovery success
1683 */
1684 fail_cont_count = atomic_read(&ab->fail_cont_count);
1685
1686 if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FINAL)
1687 return;
1688
1689 if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FIRST &&
1690 time_before(jiffies, ab->reset_fail_timeout))
1691 return;
1692
1693 reset_count = atomic_inc_return(&ab->reset_count);
1694
1695 if (reset_count > 1) {
1696 /* Sometimes it happened another reset worker before the previous one
1697 * completed, then the second reset worker will destroy the previous one,
1698 * thus below is to avoid that.
1699 */
1700 ath12k_warn(ab, "already resetting count %d\n", reset_count);
1701
1702 reinit_completion(&ab->reset_complete);
1703 time_left = wait_for_completion_timeout(&ab->reset_complete,
1704 ATH12K_RESET_TIMEOUT_HZ);
1705 if (time_left) {
1706 ath12k_dbg(ab, ATH12K_DBG_BOOT, "to skip reset\n");
1707 atomic_dec(&ab->reset_count);
1708 return;
1709 }
1710
1711 ab->reset_fail_timeout = jiffies + ATH12K_RESET_FAIL_TIMEOUT_HZ;
1712 /* Record the continuous recovery fail count when recovery failed*/
1713 fail_cont_count = atomic_inc_return(&ab->fail_cont_count);
1714 }
1715
1716 ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n");
1717
1718 ab->is_reset = true;
1719 atomic_set(&ab->recovery_count, 0);
1720
1721 ath12k_coredump_collect(ab);
1722 ath12k_core_pre_reconfigure_recovery(ab);
1723
1724 ath12k_core_post_reconfigure_recovery(ab);
1725
1726 ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n");
1727
1728 ath12k_hif_irq_disable(ab);
1729 ath12k_hif_ce_irq_disable(ab);
1730
1731 ath12k_hif_power_down(ab, false);
1732
1733 /* prepare for power up */
1734 ab->qmi.num_radios = U8_MAX;
1735
1736 mutex_lock(&ag->mutex);
1737 ath12k_core_to_group_ref_put(ab);
1738
1739 if (ag->num_started > 0) {
1740 ath12k_dbg(ab, ATH12K_DBG_BOOT,
1741 "waiting for %d partner device(s) to reset\n",
1742 ag->num_started);
1743 mutex_unlock(&ag->mutex);
1744 return;
1745 }
1746
1747 /* Prepare MLO global memory region for power up */
1748 ath12k_qmi_reset_mlo_mem(ag);
1749
1750 for (i = 0; i < ag->num_devices; i++) {
1751 ab = ag->ab[i];
1752 if (!ab)
1753 continue;
1754
1755 ath12k_hif_power_up(ab);
1756 ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
1757 }
1758
1759 mutex_unlock(&ag->mutex);
1760 }
1761
ath12k_core_get_memory_mode(struct ath12k_base * ab)1762 enum ath12k_qmi_mem_mode ath12k_core_get_memory_mode(struct ath12k_base *ab)
1763 {
1764 unsigned long total_ram;
1765 struct sysinfo si;
1766
1767 si_meminfo(&si);
1768 total_ram = si.totalram * si.mem_unit;
1769
1770 if (total_ram < SZ_512M)
1771 return ATH12K_QMI_MEMORY_MODE_LOW_512_M;
1772
1773 return ATH12K_QMI_MEMORY_MODE_DEFAULT;
1774 }
1775 EXPORT_SYMBOL(ath12k_core_get_memory_mode);
1776
ath12k_core_pre_init(struct ath12k_base * ab)1777 int ath12k_core_pre_init(struct ath12k_base *ab)
1778 {
1779 const struct ath12k_mem_profile_based_param *param;
1780
1781 param = &ath12k_mem_profile_based_param[ab->target_mem_mode];
1782 ab->profile_param = param;
1783 ath12k_fw_map(ab);
1784
1785 return 0;
1786 }
1787
ath12k_core_panic_handler(struct notifier_block * nb,unsigned long action,void * data)1788 static int ath12k_core_panic_handler(struct notifier_block *nb,
1789 unsigned long action, void *data)
1790 {
1791 struct ath12k_base *ab = container_of(nb, struct ath12k_base,
1792 panic_nb);
1793
1794 return ath12k_hif_panic_handler(ab);
1795 }
1796
ath12k_core_panic_notifier_register(struct ath12k_base * ab)1797 static int ath12k_core_panic_notifier_register(struct ath12k_base *ab)
1798 {
1799 ab->panic_nb.notifier_call = ath12k_core_panic_handler;
1800
1801 return atomic_notifier_chain_register(&panic_notifier_list,
1802 &ab->panic_nb);
1803 }
1804
ath12k_core_panic_notifier_unregister(struct ath12k_base * ab)1805 static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
1806 {
1807 atomic_notifier_chain_unregister(&panic_notifier_list,
1808 &ab->panic_nb);
1809 }
1810
1811 static inline
ath12k_core_hw_group_create_ready(struct ath12k_hw_group * ag)1812 bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag)
1813 {
1814 lockdep_assert_held(&ag->mutex);
1815
1816 return (ag->num_probed == ag->num_devices);
1817 }
1818
ath12k_core_hw_group_alloc(struct ath12k_base * ab)1819 static struct ath12k_hw_group *ath12k_core_hw_group_alloc(struct ath12k_base *ab)
1820 {
1821 struct ath12k_hw_group *ag;
1822 int count = 0;
1823
1824 lockdep_assert_held(&ath12k_hw_group_mutex);
1825
1826 list_for_each_entry(ag, &ath12k_hw_group_list, list)
1827 count++;
1828
1829 ag = kzalloc_obj(*ag);
1830 if (!ag)
1831 return NULL;
1832
1833 ag->id = count;
1834 list_add(&ag->list, &ath12k_hw_group_list);
1835 mutex_init(&ag->mutex);
1836 ag->mlo_capable = false;
1837
1838 return ag;
1839 }
1840
ath12k_core_free_wsi_info(struct ath12k_hw_group * ag)1841 static void ath12k_core_free_wsi_info(struct ath12k_hw_group *ag)
1842 {
1843 int i;
1844
1845 for (i = 0; i < ag->num_devices; i++) {
1846 of_node_put(ag->wsi_node[i]);
1847 ag->wsi_node[i] = NULL;
1848 }
1849 ag->num_devices = 0;
1850 }
1851
ath12k_core_hw_group_free(struct ath12k_hw_group * ag)1852 static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag)
1853 {
1854 mutex_lock(&ath12k_hw_group_mutex);
1855
1856 ath12k_core_free_wsi_info(ag);
1857 list_del(&ag->list);
1858 kfree(ag);
1859
1860 mutex_unlock(&ath12k_hw_group_mutex);
1861 }
1862
ath12k_core_hw_group_find_by_dt(struct ath12k_base * ab)1863 static struct ath12k_hw_group *ath12k_core_hw_group_find_by_dt(struct ath12k_base *ab)
1864 {
1865 struct ath12k_hw_group *ag;
1866 int i;
1867
1868 if (!ab->dev->of_node)
1869 return NULL;
1870
1871 list_for_each_entry(ag, &ath12k_hw_group_list, list)
1872 for (i = 0; i < ag->num_devices; i++)
1873 if (ag->wsi_node[i] == ab->dev->of_node)
1874 return ag;
1875
1876 return NULL;
1877 }
1878
ath12k_core_get_wsi_info(struct ath12k_hw_group * ag,struct ath12k_base * ab)1879 static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag,
1880 struct ath12k_base *ab)
1881 {
1882 struct device_node *next_wsi_dev;
1883 int device_count = 0, ret = 0;
1884 struct device_node *wsi_dev;
1885
1886 wsi_dev = of_node_get(ab->dev->of_node);
1887 if (!wsi_dev)
1888 return -ENODEV;
1889
1890 do {
1891 if (device_count >= ATH12K_MAX_DEVICES) {
1892 ath12k_warn(ab, "device count in DT %d is more than limit %d\n",
1893 device_count, ATH12K_MAX_DEVICES);
1894 ret = -EINVAL;
1895 break;
1896 }
1897
1898 ag->wsi_node[device_count++] = of_node_get(wsi_dev);
1899
1900 struct device_node *tx_endpoint __free(device_node) =
1901 of_graph_get_endpoint_by_regs(wsi_dev, 0, -1);
1902 if (!tx_endpoint) {
1903 ret = -ENODEV;
1904 break;
1905 }
1906
1907 struct device_node *next_rx_endpoint __free(device_node) =
1908 of_graph_get_remote_endpoint(tx_endpoint);
1909 if (!next_rx_endpoint) {
1910 ret = -ENODEV;
1911 break;
1912 }
1913
1914 next_wsi_dev = of_graph_get_port_parent(next_rx_endpoint);
1915 if (!next_wsi_dev) {
1916 ret = -ENODEV;
1917 break;
1918 }
1919
1920 of_node_put(wsi_dev);
1921 wsi_dev = next_wsi_dev;
1922 } while (ab->dev->of_node != wsi_dev);
1923
1924 if (ret) {
1925 while (--device_count >= 0) {
1926 of_node_put(ag->wsi_node[device_count]);
1927 ag->wsi_node[device_count] = NULL;
1928 }
1929
1930 of_node_put(wsi_dev);
1931 return ret;
1932 }
1933
1934 of_node_put(wsi_dev);
1935 ag->num_devices = device_count;
1936
1937 return 0;
1938 }
1939
ath12k_core_get_wsi_index(struct ath12k_hw_group * ag,struct ath12k_base * ab)1940 static int ath12k_core_get_wsi_index(struct ath12k_hw_group *ag,
1941 struct ath12k_base *ab)
1942 {
1943 int i, wsi_controller_index = -1, node_index = -1;
1944 bool control;
1945
1946 for (i = 0; i < ag->num_devices; i++) {
1947 control = of_property_read_bool(ag->wsi_node[i], "qcom,wsi-controller");
1948 if (control)
1949 wsi_controller_index = i;
1950
1951 if (ag->wsi_node[i] == ab->dev->of_node)
1952 node_index = i;
1953 }
1954
1955 if (wsi_controller_index == -1) {
1956 ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi controller is not defined in dt");
1957 return -EINVAL;
1958 }
1959
1960 if (node_index == -1) {
1961 ath12k_dbg(ab, ATH12K_DBG_BOOT, "unable to get WSI node index");
1962 return -EINVAL;
1963 }
1964
1965 ab->wsi_info.index = (ag->num_devices + node_index - wsi_controller_index) %
1966 ag->num_devices;
1967
1968 return 0;
1969 }
1970
ath12k_core_hw_group_assign(struct ath12k_base * ab)1971 static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *ab)
1972 {
1973 struct ath12k_wsi_info *wsi = &ab->wsi_info;
1974 struct ath12k_hw_group *ag;
1975
1976 lockdep_assert_held(&ath12k_hw_group_mutex);
1977
1978 if (ath12k_ftm_mode)
1979 goto invalid_group;
1980
1981 /* The grouping of multiple devices will be done based on device tree file.
1982 * The platforms that do not have any valid group information would have
1983 * each device to be part of its own invalid group.
1984 *
1985 * We use group id ATH12K_INVALID_GROUP_ID for single device group
1986 * which didn't have dt entry or wrong dt entry, there could be many
1987 * groups with same group id, i.e ATH12K_INVALID_GROUP_ID. So
1988 * default group id of ATH12K_INVALID_GROUP_ID combined with
1989 * num devices in ath12k_hw_group determines if the group is
1990 * multi device or single device group
1991 */
1992
1993 ag = ath12k_core_hw_group_find_by_dt(ab);
1994 if (!ag) {
1995 ag = ath12k_core_hw_group_alloc(ab);
1996 if (!ag) {
1997 ath12k_warn(ab, "unable to create new hw group\n");
1998 return NULL;
1999 }
2000
2001 if (ath12k_core_get_wsi_info(ag, ab) ||
2002 ath12k_core_get_wsi_index(ag, ab)) {
2003 ath12k_dbg(ab, ATH12K_DBG_BOOT,
2004 "unable to get wsi info from dt, grouping single device");
2005 ath12k_core_free_wsi_info(ag);
2006 ag->id = ATH12K_INVALID_GROUP_ID;
2007 ag->num_devices = 1;
2008 wsi->index = 0;
2009 }
2010
2011 goto exit;
2012 } else if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
2013 ath12k_dbg(ab, ATH12K_DBG_BOOT, "group id %d in unregister state\n",
2014 ag->id);
2015 goto invalid_group;
2016 } else {
2017 if (ath12k_core_get_wsi_index(ag, ab))
2018 goto invalid_group;
2019 goto exit;
2020 }
2021
2022 invalid_group:
2023 ag = ath12k_core_hw_group_alloc(ab);
2024 if (!ag) {
2025 ath12k_warn(ab, "unable to create new hw group\n");
2026 return NULL;
2027 }
2028
2029 ag->id = ATH12K_INVALID_GROUP_ID;
2030 ag->num_devices = 1;
2031 wsi->index = 0;
2032
2033 ath12k_dbg(ab, ATH12K_DBG_BOOT, "single device added to hardware group\n");
2034
2035 exit:
2036 if (ag->num_probed >= ag->num_devices) {
2037 ath12k_warn(ab, "unable to add new device to group, max limit reached\n");
2038 goto invalid_group;
2039 }
2040
2041 ab->device_id = ag->num_probed++;
2042 ag->ab[ab->device_id] = ab;
2043 ab->ag = ag;
2044
2045 ath12k_dp_cmn_hw_group_assign(ath12k_ab_to_dp(ab), ag);
2046
2047 ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi group-id %d num-devices %d index %d",
2048 ag->id, ag->num_devices, wsi->index);
2049
2050 return ag;
2051 }
2052
ath12k_core_hw_group_unassign(struct ath12k_base * ab)2053 void ath12k_core_hw_group_unassign(struct ath12k_base *ab)
2054 {
2055 struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
2056 u8 device_id = ab->device_id;
2057 int num_probed;
2058
2059 if (!ag)
2060 return;
2061
2062 mutex_lock(&ag->mutex);
2063
2064 if (WARN_ON(device_id >= ag->num_devices)) {
2065 mutex_unlock(&ag->mutex);
2066 return;
2067 }
2068
2069 if (WARN_ON(ag->ab[device_id] != ab)) {
2070 mutex_unlock(&ag->mutex);
2071 return;
2072 }
2073
2074 ath12k_dp_cmn_hw_group_unassign(ath12k_ab_to_dp(ab), ag);
2075
2076 ag->ab[device_id] = NULL;
2077 ab->ag = NULL;
2078 ab->device_id = ATH12K_INVALID_DEVICE_ID;
2079
2080 if (ag->num_probed)
2081 ag->num_probed--;
2082
2083 num_probed = ag->num_probed;
2084
2085 mutex_unlock(&ag->mutex);
2086
2087 if (!num_probed)
2088 ath12k_core_hw_group_free(ag);
2089 }
2090
ath12k_core_hw_group_destroy(struct ath12k_hw_group * ag)2091 static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
2092 {
2093 struct ath12k_base *ab;
2094 int i;
2095
2096 if (WARN_ON(!ag))
2097 return;
2098
2099 for (i = 0; i < ag->num_devices; i++) {
2100 ab = ag->ab[i];
2101 if (!ab)
2102 continue;
2103
2104 ath12k_core_soc_destroy(ab);
2105 }
2106 }
2107
ath12k_core_hw_group_cleanup(struct ath12k_hw_group * ag)2108 void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
2109 {
2110 struct ath12k_base *ab;
2111 int i;
2112
2113 if (!ag)
2114 return;
2115
2116 mutex_lock(&ag->mutex);
2117
2118 if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
2119 mutex_unlock(&ag->mutex);
2120 return;
2121 }
2122
2123 set_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags);
2124
2125 ath12k_core_hw_group_stop(ag);
2126
2127 for (i = 0; i < ag->num_devices; i++) {
2128 ab = ag->ab[i];
2129 if (!ab)
2130 continue;
2131
2132 mutex_lock(&ab->core_lock);
2133 ath12k_core_stop(ab);
2134 mutex_unlock(&ab->core_lock);
2135 }
2136
2137 mutex_unlock(&ag->mutex);
2138 }
2139
ath12k_core_hw_group_create(struct ath12k_hw_group * ag)2140 static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag)
2141 {
2142 struct ath12k_base *ab;
2143 int i, ret;
2144
2145 lockdep_assert_held(&ag->mutex);
2146
2147 for (i = 0; i < ag->num_devices; i++) {
2148 ab = ag->ab[i];
2149 if (!ab)
2150 continue;
2151
2152 mutex_lock(&ab->core_lock);
2153
2154 ret = ath12k_core_soc_create(ab);
2155 if (ret) {
2156 mutex_unlock(&ab->core_lock);
2157 ath12k_err(ab, "failed to create soc %d core: %d\n", i, ret);
2158 goto destroy;
2159 }
2160
2161 mutex_unlock(&ab->core_lock);
2162 }
2163
2164 return 0;
2165
2166 destroy:
2167 for (i--; i >= 0; i--) {
2168 ab = ag->ab[i];
2169 if (!ab)
2170 continue;
2171
2172 mutex_lock(&ab->core_lock);
2173 ath12k_core_soc_destroy(ab);
2174 mutex_unlock(&ab->core_lock);
2175 }
2176
2177 return ret;
2178 }
2179
ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group * ag)2180 void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
2181 {
2182 struct ath12k_base *ab;
2183 int i;
2184
2185 if (ath12k_ftm_mode)
2186 return;
2187
2188 lockdep_assert_held(&ag->mutex);
2189
2190 if (ag->num_devices == 1) {
2191 ab = ag->ab[0];
2192 /* QCN9274 firmware uses firmware IE for MLO advertisement */
2193 if (ab->fw.fw_features_valid) {
2194 ag->mlo_capable =
2195 ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO);
2196 return;
2197 }
2198
2199 /* while WCN7850 firmware uses QMI single_chip_mlo_support bit */
2200 ag->mlo_capable = ab->single_chip_mlo_support;
2201 return;
2202 }
2203
2204 ag->mlo_capable = true;
2205
2206 for (i = 0; i < ag->num_devices; i++) {
2207 ab = ag->ab[i];
2208 if (!ab)
2209 continue;
2210
2211 /* even if 1 device's firmware feature indicates MLO
2212 * unsupported, make MLO unsupported for the whole group
2213 */
2214 if (!ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO)) {
2215 ag->mlo_capable = false;
2216 return;
2217 }
2218 }
2219 }
2220
ath12k_core_init(struct ath12k_base * ab)2221 int ath12k_core_init(struct ath12k_base *ab)
2222 {
2223 struct ath12k_hw_group *ag;
2224 int ret;
2225
2226 ret = ath12k_core_panic_notifier_register(ab);
2227 if (ret)
2228 ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
2229
2230 mutex_lock(&ath12k_hw_group_mutex);
2231
2232 ag = ath12k_core_hw_group_assign(ab);
2233 if (!ag) {
2234 mutex_unlock(&ath12k_hw_group_mutex);
2235 ath12k_warn(ab, "unable to get hw group\n");
2236 ret = -ENODEV;
2237 goto err_unregister_notifier;
2238 }
2239
2240 mutex_unlock(&ath12k_hw_group_mutex);
2241
2242 mutex_lock(&ag->mutex);
2243
2244 ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices %d num probed %d\n",
2245 ag->num_devices, ag->num_probed);
2246
2247 if (ath12k_core_hw_group_create_ready(ag)) {
2248 ret = ath12k_core_hw_group_create(ag);
2249 if (ret) {
2250 mutex_unlock(&ag->mutex);
2251 ath12k_warn(ab, "unable to create hw group\n");
2252 goto err_unassign_hw_group;
2253 }
2254 }
2255
2256 mutex_unlock(&ag->mutex);
2257
2258 return 0;
2259
2260 err_unassign_hw_group:
2261 ath12k_core_hw_group_unassign(ab);
2262 err_unregister_notifier:
2263 ath12k_core_panic_notifier_unregister(ab);
2264
2265 return ret;
2266 }
2267
ath12k_core_deinit(struct ath12k_base * ab)2268 void ath12k_core_deinit(struct ath12k_base *ab)
2269 {
2270 ath12k_core_hw_group_destroy(ab->ag);
2271 ath12k_core_hw_group_unassign(ab);
2272 ath12k_core_panic_notifier_unregister(ab);
2273 }
2274
ath12k_core_free(struct ath12k_base * ab)2275 void ath12k_core_free(struct ath12k_base *ab)
2276 {
2277 timer_delete_sync(&ab->rx_replenish_retry);
2278 destroy_workqueue(ab->workqueue_aux);
2279 destroy_workqueue(ab->workqueue);
2280 kfree(ab);
2281 }
2282
ath12k_core_alloc(struct device * dev,size_t priv_size,enum ath12k_bus bus)2283 struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
2284 enum ath12k_bus bus)
2285 {
2286 struct ath12k_base *ab;
2287
2288 ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL);
2289 if (!ab)
2290 return NULL;
2291
2292 init_completion(&ab->driver_recovery);
2293
2294 ab->workqueue = create_singlethread_workqueue("ath12k_wq");
2295 if (!ab->workqueue)
2296 goto err_sc_free;
2297
2298 ab->workqueue_aux = create_singlethread_workqueue("ath12k_aux_wq");
2299 if (!ab->workqueue_aux)
2300 goto err_free_wq;
2301
2302 mutex_init(&ab->core_lock);
2303 spin_lock_init(&ab->base_lock);
2304 init_completion(&ab->reset_complete);
2305
2306 init_waitqueue_head(&ab->peer_mapping_wq);
2307 init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
2308 INIT_WORK(&ab->restart_work, ath12k_core_restart);
2309 INIT_WORK(&ab->reset_work, ath12k_core_reset);
2310 INIT_WORK(&ab->rfkill_work, ath12k_rfkill_work);
2311 INIT_WORK(&ab->dump_work, ath12k_coredump_upload);
2312 INIT_WORK(&ab->update_11d_work, ath12k_update_11d);
2313
2314 timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
2315 init_completion(&ab->htc_suspend);
2316 init_completion(&ab->restart_completed);
2317 init_completion(&ab->wow.wakeup_completed);
2318
2319 ab->dev = dev;
2320 ab->hif.bus = bus;
2321 ab->qmi.num_radios = U8_MAX;
2322 ab->single_chip_mlo_support = false;
2323
2324 /* Device index used to identify the devices in a group.
2325 *
2326 * In Intra-device MLO, only one device present in a group,
2327 * so it is always zero.
2328 *
2329 * In Inter-device MLO, Multiple device present in a group,
2330 * expect non-zero value.
2331 */
2332 ab->device_id = 0;
2333
2334 return ab;
2335
2336 err_free_wq:
2337 destroy_workqueue(ab->workqueue);
2338 err_sc_free:
2339 kfree(ab);
2340 return NULL;
2341 }
2342
2343 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies WLAN devices");
2344 MODULE_LICENSE("Dual BSD/GPL");
2345