1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
6 */
7
8 #include <linux/export.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/remoteproc.h>
12 #include <linux/firmware.h>
13 #include <linux/of.h>
14 #include <linux/of_graph.h>
15 #include "ahb.h"
16 #include "core.h"
17 #include "dp_tx.h"
18 #include "dp_rx.h"
19 #include "debug.h"
20 #include "debugfs.h"
21 #include "fw.h"
22 #include "hif.h"
23 #include "pci.h"
24 #include "wow.h"
25
26 static int ahb_err, pci_err;
27 unsigned int ath12k_debug_mask;
28 module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
29 MODULE_PARM_DESC(debug_mask, "Debugging mask");
30
31 bool ath12k_ftm_mode;
32 module_param_named(ftm_mode, ath12k_ftm_mode, bool, 0444);
33 MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode");
34
35 /* protected with ath12k_hw_group_mutex */
36 static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
37
38 static DEFINE_MUTEX(ath12k_hw_group_mutex);
39
40 static const struct
41 ath12k_mem_profile_based_param ath12k_mem_profile_based_param[] = {
42 [ATH12K_QMI_MEMORY_MODE_DEFAULT] = {
43 .num_vdevs = 17,
44 .max_client_single = 512,
45 .max_client_dbs = 128,
46 .max_client_dbs_sbs = 128,
47 .dp_params = {
48 .tx_comp_ring_size = 32768,
49 .rxdma_monitor_buf_ring_size = 4096,
50 .rxdma_monitor_dst_ring_size = 8092,
51 .num_pool_tx_desc = 32768,
52 .rx_desc_count = 12288,
53 },
54 },
55 [ATH12K_QMI_MEMORY_MODE_LOW_512_M] = {
56 .num_vdevs = 9,
57 .max_client_single = 128,
58 .max_client_dbs = 64,
59 .max_client_dbs_sbs = 64,
60 .dp_params = {
61 .tx_comp_ring_size = 16384,
62 .rxdma_monitor_buf_ring_size = 256,
63 .rxdma_monitor_dst_ring_size = 512,
64 .num_pool_tx_desc = 16384,
65 .rx_desc_count = 6144,
66 },
67 },
68 };
69
ath12k_core_rfkill_config(struct ath12k_base * ab)70 static int ath12k_core_rfkill_config(struct ath12k_base *ab)
71 {
72 struct ath12k *ar;
73 int ret = 0, i;
74
75 if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
76 return 0;
77
78 if (ath12k_acpi_get_disable_rfkill(ab))
79 return 0;
80
81 for (i = 0; i < ab->num_radios; i++) {
82 ar = ab->pdevs[i].ar;
83
84 ret = ath12k_mac_rfkill_config(ar);
85 if (ret && ret != -EOPNOTSUPP) {
86 ath12k_warn(ab, "failed to configure rfkill: %d", ret);
87 return ret;
88 }
89 }
90
91 return ret;
92 }
93
94 /* Check if we need to continue with suspend/resume operation.
95 * Return:
96 * a negative value: error happens and don't continue.
97 * 0: no error but don't continue.
98 * positive value: no error and do continue.
99 */
ath12k_core_continue_suspend_resume(struct ath12k_base * ab)100 static int ath12k_core_continue_suspend_resume(struct ath12k_base *ab)
101 {
102 struct ath12k *ar;
103
104 if (!ab->hw_params->supports_suspend)
105 return -EOPNOTSUPP;
106
107 /* so far single_pdev_only chips have supports_suspend as true
108 * so pass 0 as a dummy pdev_id here.
109 */
110 ar = ab->pdevs[0].ar;
111 if (!ar || !ar->ah || ar->ah->state != ATH12K_HW_STATE_OFF)
112 return 0;
113
114 return 1;
115 }
116
ath12k_core_suspend(struct ath12k_base * ab)117 int ath12k_core_suspend(struct ath12k_base *ab)
118 {
119 struct ath12k *ar;
120 int ret, i;
121
122 ret = ath12k_core_continue_suspend_resume(ab);
123 if (ret <= 0)
124 return ret;
125
126 for (i = 0; i < ab->num_radios; i++) {
127 ar = ab->pdevs[i].ar;
128 if (!ar)
129 continue;
130
131 wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
132
133 ret = ath12k_mac_wait_tx_complete(ar);
134 if (ret) {
135 wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
136 ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
137 return ret;
138 }
139
140 wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
141 }
142
143 /* PM framework skips suspend_late/resume_early callbacks
144 * if other devices report errors in their suspend callbacks.
145 * However ath12k_core_resume() would still be called because
146 * here we return success thus kernel put us on dpm_suspended_list.
147 * Since we won't go through a power down/up cycle, there is
148 * no chance to call complete(&ab->restart_completed) in
149 * ath12k_core_restart(), making ath12k_core_resume() timeout.
150 * So call it here to avoid this issue. This also works in case
151 * no error happens thus suspend_late/resume_early get called,
152 * because it will be reinitialized in ath12k_core_resume_early().
153 */
154 complete(&ab->restart_completed);
155
156 return 0;
157 }
158 EXPORT_SYMBOL(ath12k_core_suspend);
159
ath12k_core_suspend_late(struct ath12k_base * ab)160 int ath12k_core_suspend_late(struct ath12k_base *ab)
161 {
162 int ret;
163
164 ret = ath12k_core_continue_suspend_resume(ab);
165 if (ret <= 0)
166 return ret;
167
168 ath12k_acpi_stop(ab);
169
170 ath12k_hif_irq_disable(ab);
171 ath12k_hif_ce_irq_disable(ab);
172
173 ath12k_hif_power_down(ab, true);
174
175 return 0;
176 }
177 EXPORT_SYMBOL(ath12k_core_suspend_late);
178
ath12k_core_resume_early(struct ath12k_base * ab)179 int ath12k_core_resume_early(struct ath12k_base *ab)
180 {
181 int ret;
182
183 ret = ath12k_core_continue_suspend_resume(ab);
184 if (ret <= 0)
185 return ret;
186
187 reinit_completion(&ab->restart_completed);
188 ret = ath12k_hif_power_up(ab);
189 if (ret)
190 ath12k_warn(ab, "failed to power up hif during resume: %d\n", ret);
191
192 return ret;
193 }
194 EXPORT_SYMBOL(ath12k_core_resume_early);
195
ath12k_core_resume(struct ath12k_base * ab)196 int ath12k_core_resume(struct ath12k_base *ab)
197 {
198 long time_left;
199 int ret;
200
201 ret = ath12k_core_continue_suspend_resume(ab);
202 if (ret <= 0)
203 return ret;
204
205 time_left = wait_for_completion_timeout(&ab->restart_completed,
206 ATH12K_RESET_TIMEOUT_HZ);
207 if (time_left == 0) {
208 ath12k_warn(ab, "timeout while waiting for restart complete");
209 return -ETIMEDOUT;
210 }
211
212 return 0;
213 }
214 EXPORT_SYMBOL(ath12k_core_resume);
215
__ath12k_core_create_board_name(struct ath12k_base * ab,char * name,size_t name_len,bool with_variant,bool bus_type_mode,bool with_default)216 static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
217 size_t name_len, bool with_variant,
218 bool bus_type_mode, bool with_default)
219 {
220 /* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
221 char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = {};
222
223 if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
224 scnprintf(variant, sizeof(variant), ",variant=%s",
225 ab->qmi.target.bdf_ext);
226
227 switch (ab->id.bdf_search) {
228 case ATH12K_BDF_SEARCH_BUS_AND_BOARD:
229 if (bus_type_mode)
230 scnprintf(name, name_len,
231 "bus=%s",
232 ath12k_bus_str(ab->hif.bus));
233 else
234 scnprintf(name, name_len,
235 "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
236 ath12k_bus_str(ab->hif.bus),
237 ab->id.vendor, ab->id.device,
238 ab->id.subsystem_vendor,
239 ab->id.subsystem_device,
240 ab->qmi.target.chip_id,
241 ab->qmi.target.board_id,
242 variant);
243 break;
244 default:
245 scnprintf(name, name_len,
246 "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
247 ath12k_bus_str(ab->hif.bus),
248 ab->qmi.target.chip_id,
249 with_default ?
250 ATH12K_BOARD_ID_DEFAULT : ab->qmi.target.board_id,
251 variant);
252 break;
253 }
254
255 ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name);
256
257 return 0;
258 }
259
ath12k_core_create_board_name(struct ath12k_base * ab,char * name,size_t name_len)260 static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
261 size_t name_len)
262 {
263 return __ath12k_core_create_board_name(ab, name, name_len, true, false, false);
264 }
265
ath12k_core_create_fallback_board_name(struct ath12k_base * ab,char * name,size_t name_len)266 static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
267 size_t name_len)
268 {
269 return __ath12k_core_create_board_name(ab, name, name_len, false, false, true);
270 }
271
ath12k_core_create_bus_type_board_name(struct ath12k_base * ab,char * name,size_t name_len)272 static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
273 size_t name_len)
274 {
275 return __ath12k_core_create_board_name(ab, name, name_len, false, true, true);
276 }
277
ath12k_core_firmware_request(struct ath12k_base * ab,const char * file)278 const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
279 const char *file)
280 {
281 const struct firmware *fw;
282 char path[100];
283 int ret;
284
285 if (!file)
286 return ERR_PTR(-ENOENT);
287
288 ath12k_core_create_firmware_path(ab, file, path, sizeof(path));
289
290 ret = firmware_request_nowarn(&fw, path, ab->dev);
291 if (ret)
292 return ERR_PTR(ret);
293
294 ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot firmware request %s size %zu\n",
295 path, fw->size);
296
297 return fw;
298 }
299
ath12k_core_free_bdf(struct ath12k_base * ab,struct ath12k_board_data * bd)300 void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
301 {
302 if (!IS_ERR(bd->fw))
303 release_firmware(bd->fw);
304
305 memset(bd, 0, sizeof(*bd));
306 }
307
ath12k_core_parse_bd_ie_board(struct ath12k_base * ab,struct ath12k_board_data * bd,const void * buf,size_t buf_len,const char * boardname,int ie_id,int name_id,int data_id)308 static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
309 struct ath12k_board_data *bd,
310 const void *buf, size_t buf_len,
311 const char *boardname,
312 int ie_id,
313 int name_id,
314 int data_id)
315 {
316 const struct ath12k_fw_ie *hdr;
317 bool name_match_found;
318 int ret, board_ie_id;
319 size_t board_ie_len;
320 const void *board_ie_data;
321
322 name_match_found = false;
323
324 /* go through ATH12K_BD_IE_BOARD_/ATH12K_BD_IE_REGDB_ elements */
325 while (buf_len > sizeof(struct ath12k_fw_ie)) {
326 hdr = buf;
327 board_ie_id = le32_to_cpu(hdr->id);
328 board_ie_len = le32_to_cpu(hdr->len);
329 board_ie_data = hdr->data;
330
331 buf_len -= sizeof(*hdr);
332 buf += sizeof(*hdr);
333
334 if (buf_len < ALIGN(board_ie_len, 4)) {
335 ath12k_err(ab, "invalid %s length: %zu < %zu\n",
336 ath12k_bd_ie_type_str(ie_id),
337 buf_len, ALIGN(board_ie_len, 4));
338 ret = -EINVAL;
339 goto out;
340 }
341
342 if (board_ie_id == name_id) {
343 ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "",
344 board_ie_data, board_ie_len);
345
346 if (board_ie_len != strlen(boardname))
347 goto next;
348
349 ret = memcmp(board_ie_data, boardname, strlen(boardname));
350 if (ret)
351 goto next;
352
353 name_match_found = true;
354 ath12k_dbg(ab, ATH12K_DBG_BOOT,
355 "boot found match %s for name '%s'",
356 ath12k_bd_ie_type_str(ie_id),
357 boardname);
358 } else if (board_ie_id == data_id) {
359 if (!name_match_found)
360 /* no match found */
361 goto next;
362
363 ath12k_dbg(ab, ATH12K_DBG_BOOT,
364 "boot found %s for '%s'",
365 ath12k_bd_ie_type_str(ie_id),
366 boardname);
367
368 bd->data = board_ie_data;
369 bd->len = board_ie_len;
370
371 ret = 0;
372 goto out;
373 } else {
374 ath12k_warn(ab, "unknown %s id found: %d\n",
375 ath12k_bd_ie_type_str(ie_id),
376 board_ie_id);
377 }
378 next:
379 /* jump over the padding */
380 board_ie_len = ALIGN(board_ie_len, 4);
381
382 buf_len -= board_ie_len;
383 buf += board_ie_len;
384 }
385
386 /* no match found */
387 ret = -ENOENT;
388
389 out:
390 return ret;
391 }
392
ath12k_core_fetch_board_data_api_n(struct ath12k_base * ab,struct ath12k_board_data * bd,const char * boardname,int ie_id_match,int name_id,int data_id)393 static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
394 struct ath12k_board_data *bd,
395 const char *boardname,
396 int ie_id_match,
397 int name_id,
398 int data_id)
399 {
400 size_t len, magic_len;
401 const u8 *data;
402 char *filename, filepath[100];
403 size_t ie_len;
404 struct ath12k_fw_ie *hdr;
405 int ret, ie_id;
406
407 filename = ATH12K_BOARD_API2_FILE;
408
409 if (!bd->fw)
410 bd->fw = ath12k_core_firmware_request(ab, filename);
411
412 if (IS_ERR(bd->fw))
413 return PTR_ERR(bd->fw);
414
415 data = bd->fw->data;
416 len = bd->fw->size;
417
418 ath12k_core_create_firmware_path(ab, filename,
419 filepath, sizeof(filepath));
420
421 /* magic has extra null byte padded */
422 magic_len = strlen(ATH12K_BOARD_MAGIC) + 1;
423 if (len < magic_len) {
424 ath12k_err(ab, "failed to find magic value in %s, file too short: %zu\n",
425 filepath, len);
426 ret = -EINVAL;
427 goto err;
428 }
429
430 if (memcmp(data, ATH12K_BOARD_MAGIC, magic_len)) {
431 ath12k_err(ab, "found invalid board magic\n");
432 ret = -EINVAL;
433 goto err;
434 }
435
436 /* magic is padded to 4 bytes */
437 magic_len = ALIGN(magic_len, 4);
438 if (len < magic_len) {
439 ath12k_err(ab, "failed: %s too small to contain board data, len: %zu\n",
440 filepath, len);
441 ret = -EINVAL;
442 goto err;
443 }
444
445 data += magic_len;
446 len -= magic_len;
447
448 while (len > sizeof(struct ath12k_fw_ie)) {
449 hdr = (struct ath12k_fw_ie *)data;
450 ie_id = le32_to_cpu(hdr->id);
451 ie_len = le32_to_cpu(hdr->len);
452
453 len -= sizeof(*hdr);
454 data = hdr->data;
455
456 if (len < ALIGN(ie_len, 4)) {
457 ath12k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
458 ie_id, ie_len, len);
459 ret = -EINVAL;
460 goto err;
461 }
462
463 if (ie_id == ie_id_match) {
464 ret = ath12k_core_parse_bd_ie_board(ab, bd, data,
465 ie_len,
466 boardname,
467 ie_id_match,
468 name_id,
469 data_id);
470 if (ret == -ENOENT)
471 /* no match found, continue */
472 goto next;
473 else if (ret)
474 /* there was an error, bail out */
475 goto err;
476 /* either found or error, so stop searching */
477 goto out;
478 }
479 next:
480 /* jump over the padding */
481 ie_len = ALIGN(ie_len, 4);
482
483 len -= ie_len;
484 data += ie_len;
485 }
486
487 out:
488 if (!bd->data || !bd->len) {
489 ath12k_dbg(ab, ATH12K_DBG_BOOT,
490 "failed to fetch %s for %s from %s\n",
491 ath12k_bd_ie_type_str(ie_id_match),
492 boardname, filepath);
493 ret = -ENODATA;
494 goto err;
495 }
496
497 return 0;
498
499 err:
500 ath12k_core_free_bdf(ab, bd);
501 return ret;
502 }
503
ath12k_core_fetch_board_data_api_1(struct ath12k_base * ab,struct ath12k_board_data * bd,char * filename)504 int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
505 struct ath12k_board_data *bd,
506 char *filename)
507 {
508 bd->fw = ath12k_core_firmware_request(ab, filename);
509 if (IS_ERR(bd->fw))
510 return PTR_ERR(bd->fw);
511
512 bd->data = bd->fw->data;
513 bd->len = bd->fw->size;
514
515 return 0;
516 }
517
518 #define BOARD_NAME_SIZE 200
ath12k_core_fetch_bdf(struct ath12k_base * ab,struct ath12k_board_data * bd)519 int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
520 {
521 char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE];
522 char *filename, filepath[100];
523 int bd_api;
524 int ret;
525
526 filename = ATH12K_BOARD_API2_FILE;
527
528 ret = ath12k_core_create_board_name(ab, boardname, sizeof(boardname));
529 if (ret) {
530 ath12k_err(ab, "failed to create board name: %d", ret);
531 return ret;
532 }
533
534 bd_api = 2;
535 ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
536 ATH12K_BD_IE_BOARD,
537 ATH12K_BD_IE_BOARD_NAME,
538 ATH12K_BD_IE_BOARD_DATA);
539 if (!ret)
540 goto success;
541
542 ret = ath12k_core_create_fallback_board_name(ab, fallback_boardname,
543 sizeof(fallback_boardname));
544 if (ret) {
545 ath12k_err(ab, "failed to create fallback board name: %d", ret);
546 return ret;
547 }
548
549 ret = ath12k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
550 ATH12K_BD_IE_BOARD,
551 ATH12K_BD_IE_BOARD_NAME,
552 ATH12K_BD_IE_BOARD_DATA);
553 if (!ret)
554 goto success;
555
556 bd_api = 1;
557 ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE);
558 if (ret) {
559 ath12k_core_create_firmware_path(ab, filename,
560 filepath, sizeof(filepath));
561 ath12k_err(ab, "failed to fetch board data for %s from %s\n",
562 boardname, filepath);
563 if (memcmp(boardname, fallback_boardname, strlen(boardname)))
564 ath12k_err(ab, "failed to fetch board data for %s from %s\n",
565 fallback_boardname, filepath);
566
567 ath12k_err(ab, "failed to fetch board.bin from %s\n",
568 ab->hw_params->fw.dir);
569 return ret;
570 }
571
572 success:
573 ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", bd_api);
574 return 0;
575 }
576
ath12k_core_fetch_regdb(struct ath12k_base * ab,struct ath12k_board_data * bd)577 int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd)
578 {
579 char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE];
580 int ret;
581
582 ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
583 if (ret) {
584 ath12k_dbg(ab, ATH12K_DBG_BOOT,
585 "failed to create board name for regdb: %d", ret);
586 goto exit;
587 }
588
589 ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
590 ATH12K_BD_IE_REGDB,
591 ATH12K_BD_IE_REGDB_NAME,
592 ATH12K_BD_IE_REGDB_DATA);
593 if (!ret)
594 goto exit;
595
596 ret = ath12k_core_create_bus_type_board_name(ab, default_boardname,
597 BOARD_NAME_SIZE);
598 if (ret) {
599 ath12k_dbg(ab, ATH12K_DBG_BOOT,
600 "failed to create default board name for regdb: %d", ret);
601 goto exit;
602 }
603
604 ret = ath12k_core_fetch_board_data_api_n(ab, bd, default_boardname,
605 ATH12K_BD_IE_REGDB,
606 ATH12K_BD_IE_REGDB_NAME,
607 ATH12K_BD_IE_REGDB_DATA);
608 if (!ret)
609 goto exit;
610
611 ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_REGDB_FILE_NAME);
612 if (ret)
613 ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to fetch %s from %s\n",
614 ATH12K_REGDB_FILE_NAME, ab->hw_params->fw.dir);
615
616 exit:
617 if (!ret)
618 ath12k_dbg(ab, ATH12K_DBG_BOOT, "fetched regdb\n");
619
620 return ret;
621 }
622
ath12k_core_get_max_station_per_radio(struct ath12k_base * ab)623 u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab)
624 {
625 if (ab->num_radios == 2)
626 return TARGET_NUM_STATIONS(ab, DBS);
627 if (ab->num_radios == 3)
628 return TARGET_NUM_STATIONS(ab, DBS_SBS);
629 return TARGET_NUM_STATIONS(ab, SINGLE);
630 }
631
ath12k_core_get_max_peers_per_radio(struct ath12k_base * ab)632 u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab)
633 {
634 return ath12k_core_get_max_station_per_radio(ab) + TARGET_NUM_VDEVS(ab);
635 }
636
ath12k_core_get_reserved_mem(struct ath12k_base * ab,int index)637 struct reserved_mem *ath12k_core_get_reserved_mem(struct ath12k_base *ab,
638 int index)
639 {
640 struct device *dev = ab->dev;
641 struct reserved_mem *rmem;
642 struct device_node *node;
643
644 node = of_parse_phandle(dev->of_node, "memory-region", index);
645 if (!node) {
646 ath12k_dbg(ab, ATH12K_DBG_BOOT,
647 "failed to parse memory-region for index %d\n", index);
648 return NULL;
649 }
650
651 rmem = of_reserved_mem_lookup(node);
652 of_node_put(node);
653 if (!rmem) {
654 ath12k_dbg(ab, ATH12K_DBG_BOOT,
655 "unable to get memory-region for index %d\n", index);
656 return NULL;
657 }
658
659 return rmem;
660 }
661
662 static inline
ath12k_core_to_group_ref_get(struct ath12k_base * ab)663 void ath12k_core_to_group_ref_get(struct ath12k_base *ab)
664 {
665 struct ath12k_hw_group *ag = ab->ag;
666
667 lockdep_assert_held(&ag->mutex);
668
669 if (ab->hw_group_ref) {
670 ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already attached to group %d\n",
671 ag->id);
672 return;
673 }
674
675 ab->hw_group_ref = true;
676 ag->num_started++;
677
678 ath12k_dbg(ab, ATH12K_DBG_BOOT, "core attached to group %d, num_started %d\n",
679 ag->id, ag->num_started);
680 }
681
682 static inline
ath12k_core_to_group_ref_put(struct ath12k_base * ab)683 void ath12k_core_to_group_ref_put(struct ath12k_base *ab)
684 {
685 struct ath12k_hw_group *ag = ab->ag;
686
687 lockdep_assert_held(&ag->mutex);
688
689 if (!ab->hw_group_ref) {
690 ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already de-attached from group %d\n",
691 ag->id);
692 return;
693 }
694
695 ab->hw_group_ref = false;
696 ag->num_started--;
697
698 ath12k_dbg(ab, ATH12K_DBG_BOOT, "core de-attached from group %d, num_started %d\n",
699 ag->id, ag->num_started);
700 }
701
ath12k_core_stop(struct ath12k_base * ab)702 static void ath12k_core_stop(struct ath12k_base *ab)
703 {
704 ath12k_core_to_group_ref_put(ab);
705
706 if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
707 ath12k_qmi_firmware_stop(ab);
708
709 ath12k_acpi_stop(ab);
710
711 ath12k_dp_rx_pdev_reo_cleanup(ab);
712 ath12k_hif_stop(ab);
713 ath12k_wmi_detach(ab);
714 ath12k_dp_free(ab);
715
716 /* De-Init of components as needed */
717 }
718
ath12k_core_check_cc_code_bdfext(const struct dmi_header * hdr,void * data)719 static void ath12k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data)
720 {
721 struct ath12k_base *ab = data;
722 const char *magic = ATH12K_SMBIOS_BDF_EXT_MAGIC;
723 struct ath12k_smbios_bdf *smbios = (struct ath12k_smbios_bdf *)hdr;
724 ssize_t copied;
725 size_t len;
726 int i;
727
728 if (ab->qmi.target.bdf_ext[0] != '\0')
729 return;
730
731 if (hdr->type != ATH12K_SMBIOS_BDF_EXT_TYPE)
732 return;
733
734 if (hdr->length != ATH12K_SMBIOS_BDF_EXT_LENGTH) {
735 ath12k_dbg(ab, ATH12K_DBG_BOOT,
736 "wrong smbios bdf ext type length (%d).\n",
737 hdr->length);
738 return;
739 }
740
741 spin_lock_bh(&ab->base_lock);
742
743 switch (smbios->country_code_flag) {
744 case ATH12K_SMBIOS_CC_ISO:
745 ab->new_alpha2[0] = u16_get_bits(smbios->cc_code >> 8, 0xff);
746 ab->new_alpha2[1] = u16_get_bits(smbios->cc_code, 0xff);
747 ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios cc_code %c%c\n",
748 ab->new_alpha2[0], ab->new_alpha2[1]);
749 break;
750 case ATH12K_SMBIOS_CC_WW:
751 ab->new_alpha2[0] = '0';
752 ab->new_alpha2[1] = '0';
753 ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios worldwide regdomain\n");
754 break;
755 default:
756 ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot ignore smbios country code setting %d\n",
757 smbios->country_code_flag);
758 break;
759 }
760
761 spin_unlock_bh(&ab->base_lock);
762
763 if (!smbios->bdf_enabled) {
764 ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant name not found.\n");
765 return;
766 }
767
768 /* Only one string exists (per spec) */
769 if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) {
770 ath12k_dbg(ab, ATH12K_DBG_BOOT,
771 "bdf variant magic does not match.\n");
772 return;
773 }
774
775 len = min_t(size_t,
776 strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext));
777 for (i = 0; i < len; i++) {
778 if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) {
779 ath12k_dbg(ab, ATH12K_DBG_BOOT,
780 "bdf variant name contains non ascii chars.\n");
781 return;
782 }
783 }
784
785 /* Copy extension name without magic prefix */
786 copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic),
787 sizeof(ab->qmi.target.bdf_ext));
788 if (copied < 0) {
789 ath12k_dbg(ab, ATH12K_DBG_BOOT,
790 "bdf variant string is longer than the buffer can accommodate\n");
791 return;
792 }
793
794 ath12k_dbg(ab, ATH12K_DBG_BOOT,
795 "found and validated bdf variant smbios_type 0x%x bdf %s\n",
796 ATH12K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext);
797 }
798
ath12k_core_check_smbios(struct ath12k_base * ab)799 int ath12k_core_check_smbios(struct ath12k_base *ab)
800 {
801 ab->qmi.target.bdf_ext[0] = '\0';
802 dmi_walk(ath12k_core_check_cc_code_bdfext, ab);
803
804 if (ab->qmi.target.bdf_ext[0] == '\0')
805 return -ENODATA;
806
807 return 0;
808 }
809
ath12k_core_soc_create(struct ath12k_base * ab)810 static int ath12k_core_soc_create(struct ath12k_base *ab)
811 {
812 int ret;
813
814 if (ath12k_ftm_mode) {
815 ab->fw_mode = ATH12K_FIRMWARE_MODE_FTM;
816 ath12k_info(ab, "Booting in ftm mode\n");
817 }
818
819 ret = ath12k_qmi_init_service(ab);
820 if (ret) {
821 ath12k_err(ab, "failed to initialize qmi :%d\n", ret);
822 return ret;
823 }
824
825 ath12k_debugfs_soc_create(ab);
826
827 ret = ath12k_hif_power_up(ab);
828 if (ret) {
829 ath12k_err(ab, "failed to power up :%d\n", ret);
830 goto err_qmi_deinit;
831 }
832
833 ath12k_debugfs_pdev_create(ab);
834
835 return 0;
836
837 err_qmi_deinit:
838 ath12k_debugfs_soc_destroy(ab);
839 ath12k_qmi_deinit_service(ab);
840 return ret;
841 }
842
ath12k_core_soc_destroy(struct ath12k_base * ab)843 static void ath12k_core_soc_destroy(struct ath12k_base *ab)
844 {
845 ath12k_hif_power_down(ab, false);
846 ath12k_reg_free(ab);
847 ath12k_debugfs_soc_destroy(ab);
848 ath12k_qmi_deinit_service(ab);
849 }
850
ath12k_core_pdev_create(struct ath12k_base * ab)851 static int ath12k_core_pdev_create(struct ath12k_base *ab)
852 {
853 int ret;
854
855 ret = ath12k_dp_pdev_alloc(ab);
856 if (ret) {
857 ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
858 return ret;
859 }
860
861 return 0;
862 }
863
ath12k_core_pdev_destroy(struct ath12k_base * ab)864 static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
865 {
866 ath12k_dp_pdev_free(ab);
867 }
868
ath12k_core_start(struct ath12k_base * ab)869 static int ath12k_core_start(struct ath12k_base *ab)
870 {
871 int ret;
872
873 lockdep_assert_held(&ab->core_lock);
874
875 ret = ath12k_wmi_attach(ab);
876 if (ret) {
877 ath12k_err(ab, "failed to attach wmi: %d\n", ret);
878 return ret;
879 }
880
881 ret = ath12k_htc_init(ab);
882 if (ret) {
883 ath12k_err(ab, "failed to init htc: %d\n", ret);
884 goto err_wmi_detach;
885 }
886
887 ret = ath12k_hif_start(ab);
888 if (ret) {
889 ath12k_err(ab, "failed to start HIF: %d\n", ret);
890 goto err_wmi_detach;
891 }
892
893 ret = ath12k_htc_wait_target(&ab->htc);
894 if (ret) {
895 ath12k_err(ab, "failed to connect to HTC: %d\n", ret);
896 goto err_hif_stop;
897 }
898
899 ret = ath12k_dp_htt_connect(&ab->dp);
900 if (ret) {
901 ath12k_err(ab, "failed to connect to HTT: %d\n", ret);
902 goto err_hif_stop;
903 }
904
905 ret = ath12k_wmi_connect(ab);
906 if (ret) {
907 ath12k_err(ab, "failed to connect wmi: %d\n", ret);
908 goto err_hif_stop;
909 }
910
911 ret = ath12k_htc_start(&ab->htc);
912 if (ret) {
913 ath12k_err(ab, "failed to start HTC: %d\n", ret);
914 goto err_hif_stop;
915 }
916
917 ret = ath12k_wmi_wait_for_service_ready(ab);
918 if (ret) {
919 ath12k_err(ab, "failed to receive wmi service ready event: %d\n",
920 ret);
921 goto err_hif_stop;
922 }
923
924 ath12k_dp_cc_config(ab);
925
926 ret = ath12k_dp_rx_pdev_reo_setup(ab);
927 if (ret) {
928 ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
929 goto err_hif_stop;
930 }
931
932 ath12k_dp_hal_rx_desc_init(ab);
933
934 ret = ath12k_wmi_cmd_init(ab);
935 if (ret) {
936 ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret);
937 goto err_reo_cleanup;
938 }
939
940 ret = ath12k_wmi_wait_for_unified_ready(ab);
941 if (ret) {
942 ath12k_err(ab, "failed to receive wmi unified ready event: %d\n",
943 ret);
944 goto err_reo_cleanup;
945 }
946
947 /* put hardware to DBS mode */
948 if (ab->hw_params->single_pdev_only) {
949 ret = ath12k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
950 if (ret) {
951 ath12k_err(ab, "failed to send dbs mode: %d\n", ret);
952 goto err_reo_cleanup;
953 }
954 }
955
956 ret = ath12k_dp_tx_htt_h2t_ver_req_msg(ab);
957 if (ret) {
958 ath12k_err(ab, "failed to send htt version request message: %d\n",
959 ret);
960 goto err_reo_cleanup;
961 }
962
963 ath12k_acpi_set_dsm_func(ab);
964
965 /* Indicate the core start in the appropriate group */
966 ath12k_core_to_group_ref_get(ab);
967
968 return 0;
969
970 err_reo_cleanup:
971 ath12k_dp_rx_pdev_reo_cleanup(ab);
972 err_hif_stop:
973 ath12k_hif_stop(ab);
974 err_wmi_detach:
975 ath12k_wmi_detach(ab);
976 return ret;
977 }
978
ath12k_core_device_cleanup(struct ath12k_base * ab)979 static void ath12k_core_device_cleanup(struct ath12k_base *ab)
980 {
981 mutex_lock(&ab->core_lock);
982
983 ath12k_hif_irq_disable(ab);
984 ath12k_core_pdev_destroy(ab);
985
986 mutex_unlock(&ab->core_lock);
987 }
988
ath12k_core_hw_group_stop(struct ath12k_hw_group * ag)989 static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
990 {
991 struct ath12k_base *ab;
992 int i;
993
994 lockdep_assert_held(&ag->mutex);
995
996 clear_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
997
998 ath12k_mac_unregister(ag);
999
1000 for (i = ag->num_devices - 1; i >= 0; i--) {
1001 ab = ag->ab[i];
1002 if (!ab)
1003 continue;
1004
1005 clear_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
1006
1007 ath12k_core_device_cleanup(ab);
1008 }
1009
1010 ath12k_mac_destroy(ag);
1011 }
1012
ath12k_get_num_partner_link(struct ath12k * ar)1013 u8 ath12k_get_num_partner_link(struct ath12k *ar)
1014 {
1015 struct ath12k_base *partner_ab, *ab = ar->ab;
1016 struct ath12k_hw_group *ag = ab->ag;
1017 struct ath12k_pdev *pdev;
1018 u8 num_link = 0;
1019 int i, j;
1020
1021 lockdep_assert_held(&ag->mutex);
1022
1023 for (i = 0; i < ag->num_devices; i++) {
1024 partner_ab = ag->ab[i];
1025
1026 for (j = 0; j < partner_ab->num_radios; j++) {
1027 pdev = &partner_ab->pdevs[j];
1028
1029 /* Avoid the self link */
1030 if (ar == pdev->ar)
1031 continue;
1032
1033 num_link++;
1034 }
1035 }
1036
1037 return num_link;
1038 }
1039
__ath12k_mac_mlo_ready(struct ath12k * ar)1040 static int __ath12k_mac_mlo_ready(struct ath12k *ar)
1041 {
1042 u8 num_link = ath12k_get_num_partner_link(ar);
1043 int ret;
1044
1045 if (num_link == 0)
1046 return 0;
1047
1048 ret = ath12k_wmi_mlo_ready(ar);
1049 if (ret) {
1050 ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n",
1051 ar->pdev_idx, ret);
1052 return ret;
1053 }
1054
1055 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mlo ready done for pdev %d\n",
1056 ar->pdev_idx);
1057
1058 return 0;
1059 }
1060
ath12k_mac_mlo_ready(struct ath12k_hw_group * ag)1061 int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag)
1062 {
1063 struct ath12k_hw *ah;
1064 struct ath12k *ar;
1065 int ret;
1066 int i, j;
1067
1068 for (i = 0; i < ag->num_hw; i++) {
1069 ah = ag->ah[i];
1070 if (!ah)
1071 continue;
1072
1073 for_each_ar(ah, ar, j) {
1074 ar = &ah->radio[j];
1075 ret = __ath12k_mac_mlo_ready(ar);
1076 if (ret)
1077 return ret;
1078 }
1079 }
1080
1081 return 0;
1082 }
1083
ath12k_core_mlo_setup(struct ath12k_hw_group * ag)1084 static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag)
1085 {
1086 int ret, i;
1087
1088 if (!ag->mlo_capable)
1089 return 0;
1090
1091 ret = ath12k_mac_mlo_setup(ag);
1092 if (ret)
1093 return ret;
1094
1095 for (i = 0; i < ag->num_devices; i++)
1096 ath12k_dp_partner_cc_init(ag->ab[i]);
1097
1098 ret = ath12k_mac_mlo_ready(ag);
1099 if (ret)
1100 goto err_mlo_teardown;
1101
1102 return 0;
1103
1104 err_mlo_teardown:
1105 ath12k_mac_mlo_teardown(ag);
1106
1107 return ret;
1108 }
1109
ath12k_core_hw_group_start(struct ath12k_hw_group * ag)1110 static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag)
1111 {
1112 struct ath12k_base *ab;
1113 int ret, i;
1114
1115 lockdep_assert_held(&ag->mutex);
1116
1117 if (test_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags))
1118 goto core_pdev_create;
1119
1120 ret = ath12k_mac_allocate(ag);
1121 if (WARN_ON(ret))
1122 return ret;
1123
1124 ret = ath12k_core_mlo_setup(ag);
1125 if (WARN_ON(ret))
1126 goto err_mac_destroy;
1127
1128 ret = ath12k_mac_register(ag);
1129 if (WARN_ON(ret))
1130 goto err_mlo_teardown;
1131
1132 set_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
1133
1134 core_pdev_create:
1135 for (i = 0; i < ag->num_devices; i++) {
1136 ab = ag->ab[i];
1137 if (!ab)
1138 continue;
1139
1140 mutex_lock(&ab->core_lock);
1141
1142 set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
1143
1144 ret = ath12k_core_pdev_create(ab);
1145 if (ret) {
1146 ath12k_err(ab, "failed to create pdev core %d\n", ret);
1147 mutex_unlock(&ab->core_lock);
1148 goto err;
1149 }
1150
1151 ath12k_hif_irq_enable(ab);
1152
1153 ret = ath12k_core_rfkill_config(ab);
1154 if (ret && ret != -EOPNOTSUPP) {
1155 mutex_unlock(&ab->core_lock);
1156 goto err;
1157 }
1158
1159 mutex_unlock(&ab->core_lock);
1160 }
1161
1162 return 0;
1163
1164 err:
1165 ath12k_core_hw_group_stop(ag);
1166 return ret;
1167
1168 err_mlo_teardown:
1169 ath12k_mac_mlo_teardown(ag);
1170
1171 err_mac_destroy:
1172 ath12k_mac_destroy(ag);
1173
1174 return ret;
1175 }
1176
ath12k_core_start_firmware(struct ath12k_base * ab,enum ath12k_firmware_mode mode)1177 static int ath12k_core_start_firmware(struct ath12k_base *ab,
1178 enum ath12k_firmware_mode mode)
1179 {
1180 int ret;
1181
1182 ath12k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v3,
1183 &ab->qmi.ce_cfg.shadow_reg_v3_len);
1184
1185 ret = ath12k_qmi_firmware_start(ab, mode);
1186 if (ret) {
1187 ath12k_err(ab, "failed to send firmware start: %d\n", ret);
1188 return ret;
1189 }
1190
1191 return ret;
1192 }
1193
1194 static inline
ath12k_core_hw_group_start_ready(struct ath12k_hw_group * ag)1195 bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag)
1196 {
1197 lockdep_assert_held(&ag->mutex);
1198
1199 return (ag->num_started == ag->num_devices);
1200 }
1201
ath12k_fw_stats_pdevs_free(struct list_head * head)1202 static void ath12k_fw_stats_pdevs_free(struct list_head *head)
1203 {
1204 struct ath12k_fw_stats_pdev *i, *tmp;
1205
1206 list_for_each_entry_safe(i, tmp, head, list) {
1207 list_del(&i->list);
1208 kfree(i);
1209 }
1210 }
1211
ath12k_fw_stats_bcn_free(struct list_head * head)1212 void ath12k_fw_stats_bcn_free(struct list_head *head)
1213 {
1214 struct ath12k_fw_stats_bcn *i, *tmp;
1215
1216 list_for_each_entry_safe(i, tmp, head, list) {
1217 list_del(&i->list);
1218 kfree(i);
1219 }
1220 }
1221
ath12k_fw_stats_vdevs_free(struct list_head * head)1222 static void ath12k_fw_stats_vdevs_free(struct list_head *head)
1223 {
1224 struct ath12k_fw_stats_vdev *i, *tmp;
1225
1226 list_for_each_entry_safe(i, tmp, head, list) {
1227 list_del(&i->list);
1228 kfree(i);
1229 }
1230 }
1231
ath12k_fw_stats_init(struct ath12k * ar)1232 void ath12k_fw_stats_init(struct ath12k *ar)
1233 {
1234 INIT_LIST_HEAD(&ar->fw_stats.vdevs);
1235 INIT_LIST_HEAD(&ar->fw_stats.pdevs);
1236 INIT_LIST_HEAD(&ar->fw_stats.bcn);
1237 init_completion(&ar->fw_stats_complete);
1238 init_completion(&ar->fw_stats_done);
1239 }
1240
ath12k_fw_stats_free(struct ath12k_fw_stats * stats)1241 void ath12k_fw_stats_free(struct ath12k_fw_stats *stats)
1242 {
1243 ath12k_fw_stats_pdevs_free(&stats->pdevs);
1244 ath12k_fw_stats_vdevs_free(&stats->vdevs);
1245 ath12k_fw_stats_bcn_free(&stats->bcn);
1246 }
1247
ath12k_fw_stats_reset(struct ath12k * ar)1248 void ath12k_fw_stats_reset(struct ath12k *ar)
1249 {
1250 spin_lock_bh(&ar->data_lock);
1251 ath12k_fw_stats_free(&ar->fw_stats);
1252 ar->fw_stats.num_vdev_recvd = 0;
1253 ar->fw_stats.num_bcn_recvd = 0;
1254 spin_unlock_bh(&ar->data_lock);
1255 }
1256
ath12k_core_trigger_partner(struct ath12k_base * ab)1257 static void ath12k_core_trigger_partner(struct ath12k_base *ab)
1258 {
1259 struct ath12k_hw_group *ag = ab->ag;
1260 struct ath12k_base *partner_ab;
1261 bool found = false;
1262 int i;
1263
1264 for (i = 0; i < ag->num_devices; i++) {
1265 partner_ab = ag->ab[i];
1266 if (!partner_ab)
1267 continue;
1268
1269 if (found)
1270 ath12k_qmi_trigger_host_cap(partner_ab);
1271
1272 found = (partner_ab == ab);
1273 }
1274 }
1275
ath12k_core_qmi_firmware_ready(struct ath12k_base * ab)1276 int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
1277 {
1278 struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
1279 int ret, i;
1280
1281 ret = ath12k_core_start_firmware(ab, ab->fw_mode);
1282 if (ret) {
1283 ath12k_err(ab, "failed to start firmware: %d\n", ret);
1284 return ret;
1285 }
1286
1287 ret = ath12k_ce_init_pipes(ab);
1288 if (ret) {
1289 ath12k_err(ab, "failed to initialize CE: %d\n", ret);
1290 goto err_firmware_stop;
1291 }
1292
1293 ret = ath12k_dp_alloc(ab);
1294 if (ret) {
1295 ath12k_err(ab, "failed to init DP: %d\n", ret);
1296 goto err_firmware_stop;
1297 }
1298
1299 mutex_lock(&ag->mutex);
1300 mutex_lock(&ab->core_lock);
1301
1302 ret = ath12k_core_start(ab);
1303 if (ret) {
1304 ath12k_err(ab, "failed to start core: %d\n", ret);
1305 goto err_dp_free;
1306 }
1307
1308 mutex_unlock(&ab->core_lock);
1309
1310 if (ath12k_core_hw_group_start_ready(ag)) {
1311 ret = ath12k_core_hw_group_start(ag);
1312 if (ret) {
1313 ath12k_warn(ab, "unable to start hw group\n");
1314 goto err_core_stop;
1315 }
1316 ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id);
1317 } else {
1318 ath12k_core_trigger_partner(ab);
1319 }
1320
1321 mutex_unlock(&ag->mutex);
1322
1323 return 0;
1324
1325 err_core_stop:
1326 for (i = ag->num_devices - 1; i >= 0; i--) {
1327 ab = ag->ab[i];
1328 if (!ab)
1329 continue;
1330
1331 mutex_lock(&ab->core_lock);
1332 ath12k_core_stop(ab);
1333 mutex_unlock(&ab->core_lock);
1334 }
1335 mutex_unlock(&ag->mutex);
1336 goto exit;
1337
1338 err_dp_free:
1339 ath12k_dp_free(ab);
1340 mutex_unlock(&ab->core_lock);
1341 mutex_unlock(&ag->mutex);
1342
1343 err_firmware_stop:
1344 ath12k_qmi_firmware_stop(ab);
1345
1346 exit:
1347 return ret;
1348 }
1349
ath12k_core_reconfigure_on_crash(struct ath12k_base * ab)1350 static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
1351 {
1352 int ret, total_vdev;
1353
1354 mutex_lock(&ab->core_lock);
1355 ath12k_dp_pdev_free(ab);
1356 ath12k_ce_cleanup_pipes(ab);
1357 ath12k_wmi_detach(ab);
1358 ath12k_dp_rx_pdev_reo_cleanup(ab);
1359 mutex_unlock(&ab->core_lock);
1360
1361 ath12k_dp_free(ab);
1362 ath12k_hal_srng_deinit(ab);
1363 total_vdev = ab->num_radios * TARGET_NUM_VDEVS(ab);
1364 ab->free_vdev_map = (1LL << total_vdev) - 1;
1365
1366 ret = ath12k_hal_srng_init(ab);
1367 if (ret)
1368 return ret;
1369
1370 clear_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1371
1372 ret = ath12k_core_qmi_firmware_ready(ab);
1373 if (ret)
1374 goto err_hal_srng_deinit;
1375
1376 clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
1377
1378 return 0;
1379
1380 err_hal_srng_deinit:
1381 ath12k_hal_srng_deinit(ab);
1382 return ret;
1383 }
1384
ath12k_rfkill_work(struct work_struct * work)1385 static void ath12k_rfkill_work(struct work_struct *work)
1386 {
1387 struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
1388 struct ath12k_hw_group *ag = ab->ag;
1389 struct ath12k *ar;
1390 struct ath12k_hw *ah;
1391 struct ieee80211_hw *hw;
1392 bool rfkill_radio_on;
1393 int i, j;
1394
1395 spin_lock_bh(&ab->base_lock);
1396 rfkill_radio_on = ab->rfkill_radio_on;
1397 spin_unlock_bh(&ab->base_lock);
1398
1399 for (i = 0; i < ag->num_hw; i++) {
1400 ah = ath12k_ag_to_ah(ag, i);
1401 if (!ah)
1402 continue;
1403
1404 for (j = 0; j < ah->num_radio; j++) {
1405 ar = &ah->radio[j];
1406 if (!ar)
1407 continue;
1408
1409 ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
1410 }
1411
1412 hw = ah->hw;
1413 wiphy_rfkill_set_hw_state(hw->wiphy, !rfkill_radio_on);
1414 }
1415 }
1416
ath12k_core_halt(struct ath12k * ar)1417 void ath12k_core_halt(struct ath12k *ar)
1418 {
1419 struct list_head *pos, *n;
1420 struct ath12k_base *ab = ar->ab;
1421
1422 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
1423
1424 ar->num_created_vdevs = 0;
1425 ar->allocated_vdev_map = 0;
1426
1427 ath12k_mac_scan_finish(ar);
1428 ath12k_mac_peer_cleanup_all(ar);
1429 cancel_delayed_work_sync(&ar->scan.timeout);
1430 cancel_work_sync(&ar->regd_update_work);
1431 cancel_work_sync(&ar->regd_channel_update_work);
1432 cancel_work_sync(&ab->rfkill_work);
1433 cancel_work_sync(&ab->update_11d_work);
1434
1435 rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
1436 synchronize_rcu();
1437
1438 spin_lock_bh(&ar->data_lock);
1439 list_for_each_safe(pos, n, &ar->arvifs)
1440 list_del_init(pos);
1441 spin_unlock_bh(&ar->data_lock);
1442
1443 idr_init(&ar->txmgmt_idr);
1444 }
1445
ath12k_core_pre_reconfigure_recovery(struct ath12k_base * ab)1446 static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
1447 {
1448 struct ath12k_hw_group *ag = ab->ag;
1449 struct ath12k *ar;
1450 struct ath12k_hw *ah;
1451 int i, j;
1452
1453 spin_lock_bh(&ab->base_lock);
1454 ab->stats.fw_crash_counter++;
1455 spin_unlock_bh(&ab->base_lock);
1456
1457 if (ab->is_reset)
1458 set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1459
1460 for (i = 0; i < ag->num_hw; i++) {
1461 ah = ath12k_ag_to_ah(ag, i);
1462 if (!ah || ah->state == ATH12K_HW_STATE_OFF ||
1463 ah->state == ATH12K_HW_STATE_TM)
1464 continue;
1465
1466 wiphy_lock(ah->hw->wiphy);
1467
1468 /* If queue 0 is stopped, it is safe to assume that all
1469 * other queues are stopped by driver via
1470 * ieee80211_stop_queues() below. This means, there is
1471 * no need to stop it again and hence continue
1472 */
1473 if (ieee80211_queue_stopped(ah->hw, 0)) {
1474 wiphy_unlock(ah->hw->wiphy);
1475 continue;
1476 }
1477
1478 ieee80211_stop_queues(ah->hw);
1479
1480 for (j = 0; j < ah->num_radio; j++) {
1481 ar = &ah->radio[j];
1482
1483 ath12k_mac_drain_tx(ar);
1484 ar->state_11d = ATH12K_11D_IDLE;
1485 complete(&ar->completed_11d_scan);
1486 complete(&ar->scan.started);
1487 complete_all(&ar->scan.completed);
1488 complete(&ar->scan.on_channel);
1489 complete(&ar->peer_assoc_done);
1490 complete(&ar->peer_delete_done);
1491 complete(&ar->install_key_done);
1492 complete(&ar->vdev_setup_done);
1493 complete(&ar->vdev_delete_done);
1494 complete(&ar->bss_survey_done);
1495 complete_all(&ar->regd_update_completed);
1496
1497 wake_up(&ar->dp.tx_empty_waitq);
1498 idr_for_each(&ar->txmgmt_idr,
1499 ath12k_mac_tx_mgmt_pending_free, ar);
1500 idr_destroy(&ar->txmgmt_idr);
1501 wake_up(&ar->txmgmt_empty_waitq);
1502
1503 ar->monitor_vdev_id = -1;
1504 ar->monitor_vdev_created = false;
1505 ar->monitor_started = false;
1506 }
1507
1508 wiphy_unlock(ah->hw->wiphy);
1509 }
1510
1511 wake_up(&ab->wmi_ab.tx_credits_wq);
1512 wake_up(&ab->peer_mapping_wq);
1513 }
1514
ath12k_update_11d(struct work_struct * work)1515 static void ath12k_update_11d(struct work_struct *work)
1516 {
1517 struct ath12k_base *ab = container_of(work, struct ath12k_base, update_11d_work);
1518 struct ath12k *ar;
1519 struct ath12k_pdev *pdev;
1520 struct wmi_set_current_country_arg arg = {};
1521 int ret, i;
1522
1523 spin_lock_bh(&ab->base_lock);
1524 memcpy(&arg.alpha2, &ab->new_alpha2, 2);
1525 spin_unlock_bh(&ab->base_lock);
1526
1527 ath12k_dbg(ab, ATH12K_DBG_WMI, "update 11d new cc %c%c\n",
1528 arg.alpha2[0], arg.alpha2[1]);
1529
1530 for (i = 0; i < ab->num_radios; i++) {
1531 pdev = &ab->pdevs[i];
1532 ar = pdev->ar;
1533
1534 memcpy(&ar->alpha2, &arg.alpha2, 2);
1535
1536 reinit_completion(&ar->regd_update_completed);
1537
1538 ret = ath12k_wmi_send_set_current_country_cmd(ar, &arg);
1539 if (ret)
1540 ath12k_warn(ar->ab,
1541 "pdev id %d failed set current country code: %d\n",
1542 i, ret);
1543 }
1544 }
1545
ath12k_core_post_reconfigure_recovery(struct ath12k_base * ab)1546 static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
1547 {
1548 struct ath12k_hw_group *ag = ab->ag;
1549 struct ath12k_hw *ah;
1550 struct ath12k *ar;
1551 int i, j;
1552
1553 for (i = 0; i < ag->num_hw; i++) {
1554 ah = ath12k_ag_to_ah(ag, i);
1555 if (!ah || ah->state == ATH12K_HW_STATE_OFF)
1556 continue;
1557
1558 wiphy_lock(ah->hw->wiphy);
1559 mutex_lock(&ah->hw_mutex);
1560
1561 switch (ah->state) {
1562 case ATH12K_HW_STATE_ON:
1563 ah->state = ATH12K_HW_STATE_RESTARTING;
1564
1565 for (j = 0; j < ah->num_radio; j++) {
1566 ar = &ah->radio[j];
1567 ath12k_core_halt(ar);
1568 }
1569
1570 break;
1571 case ATH12K_HW_STATE_OFF:
1572 ath12k_warn(ab,
1573 "cannot restart hw %d that hasn't been started\n",
1574 i);
1575 break;
1576 case ATH12K_HW_STATE_RESTARTING:
1577 break;
1578 case ATH12K_HW_STATE_RESTARTED:
1579 ah->state = ATH12K_HW_STATE_WEDGED;
1580 fallthrough;
1581 case ATH12K_HW_STATE_WEDGED:
1582 ath12k_warn(ab,
1583 "device is wedged, will not restart hw %d\n", i);
1584 break;
1585 case ATH12K_HW_STATE_TM:
1586 ath12k_warn(ab, "fw mode reset done radio %d\n", i);
1587 break;
1588 }
1589
1590 mutex_unlock(&ah->hw_mutex);
1591 wiphy_unlock(ah->hw->wiphy);
1592 }
1593
1594 complete(&ab->driver_recovery);
1595 }
1596
ath12k_core_restart(struct work_struct * work)1597 static void ath12k_core_restart(struct work_struct *work)
1598 {
1599 struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
1600 struct ath12k_hw_group *ag = ab->ag;
1601 struct ath12k_hw *ah;
1602 int ret, i;
1603
1604 ret = ath12k_core_reconfigure_on_crash(ab);
1605 if (ret) {
1606 ath12k_err(ab, "failed to reconfigure driver on crash recovery\n");
1607 return;
1608 }
1609
1610 if (ab->is_reset) {
1611 if (!test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
1612 atomic_dec(&ab->reset_count);
1613 complete(&ab->reset_complete);
1614 ab->is_reset = false;
1615 atomic_set(&ab->fail_cont_count, 0);
1616 ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
1617 }
1618
1619 mutex_lock(&ag->mutex);
1620
1621 if (!ath12k_core_hw_group_start_ready(ag)) {
1622 mutex_unlock(&ag->mutex);
1623 goto exit_restart;
1624 }
1625
1626 for (i = 0; i < ag->num_hw; i++) {
1627 ah = ath12k_ag_to_ah(ag, i);
1628 ieee80211_restart_hw(ah->hw);
1629 }
1630
1631 mutex_unlock(&ag->mutex);
1632 }
1633
1634 exit_restart:
1635 complete(&ab->restart_completed);
1636 }
1637
ath12k_core_reset(struct work_struct * work)1638 static void ath12k_core_reset(struct work_struct *work)
1639 {
1640 struct ath12k_base *ab = container_of(work, struct ath12k_base, reset_work);
1641 struct ath12k_hw_group *ag = ab->ag;
1642 int reset_count, fail_cont_count, i;
1643 long time_left;
1644
1645 if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) {
1646 ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
1647 return;
1648 }
1649
1650 /* Sometimes the recovery will fail and then the next all recovery fail,
1651 * this is to avoid infinite recovery since it can not recovery success
1652 */
1653 fail_cont_count = atomic_read(&ab->fail_cont_count);
1654
1655 if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FINAL)
1656 return;
1657
1658 if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FIRST &&
1659 time_before(jiffies, ab->reset_fail_timeout))
1660 return;
1661
1662 reset_count = atomic_inc_return(&ab->reset_count);
1663
1664 if (reset_count > 1) {
1665 /* Sometimes it happened another reset worker before the previous one
1666 * completed, then the second reset worker will destroy the previous one,
1667 * thus below is to avoid that.
1668 */
1669 ath12k_warn(ab, "already resetting count %d\n", reset_count);
1670
1671 reinit_completion(&ab->reset_complete);
1672 time_left = wait_for_completion_timeout(&ab->reset_complete,
1673 ATH12K_RESET_TIMEOUT_HZ);
1674 if (time_left) {
1675 ath12k_dbg(ab, ATH12K_DBG_BOOT, "to skip reset\n");
1676 atomic_dec(&ab->reset_count);
1677 return;
1678 }
1679
1680 ab->reset_fail_timeout = jiffies + ATH12K_RESET_FAIL_TIMEOUT_HZ;
1681 /* Record the continuous recovery fail count when recovery failed*/
1682 fail_cont_count = atomic_inc_return(&ab->fail_cont_count);
1683 }
1684
1685 ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n");
1686
1687 ab->is_reset = true;
1688 atomic_set(&ab->recovery_count, 0);
1689
1690 ath12k_coredump_collect(ab);
1691 ath12k_core_pre_reconfigure_recovery(ab);
1692
1693 ath12k_core_post_reconfigure_recovery(ab);
1694
1695 ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n");
1696
1697 ath12k_hif_irq_disable(ab);
1698 ath12k_hif_ce_irq_disable(ab);
1699
1700 ath12k_hif_power_down(ab, false);
1701
1702 /* prepare for power up */
1703 ab->qmi.num_radios = U8_MAX;
1704
1705 mutex_lock(&ag->mutex);
1706 ath12k_core_to_group_ref_put(ab);
1707
1708 if (ag->num_started > 0) {
1709 ath12k_dbg(ab, ATH12K_DBG_BOOT,
1710 "waiting for %d partner device(s) to reset\n",
1711 ag->num_started);
1712 mutex_unlock(&ag->mutex);
1713 return;
1714 }
1715
1716 /* Prepare MLO global memory region for power up */
1717 ath12k_qmi_reset_mlo_mem(ag);
1718
1719 for (i = 0; i < ag->num_devices; i++) {
1720 ab = ag->ab[i];
1721 if (!ab)
1722 continue;
1723
1724 ath12k_hif_power_up(ab);
1725 ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
1726 }
1727
1728 mutex_unlock(&ag->mutex);
1729 }
1730
ath12k_core_get_memory_mode(struct ath12k_base * ab)1731 enum ath12k_qmi_mem_mode ath12k_core_get_memory_mode(struct ath12k_base *ab)
1732 {
1733 unsigned long total_ram;
1734 struct sysinfo si;
1735
1736 si_meminfo(&si);
1737 total_ram = si.totalram * si.mem_unit;
1738
1739 if (total_ram < SZ_512M)
1740 return ATH12K_QMI_MEMORY_MODE_LOW_512_M;
1741
1742 return ATH12K_QMI_MEMORY_MODE_DEFAULT;
1743 }
1744
ath12k_core_pre_init(struct ath12k_base * ab)1745 int ath12k_core_pre_init(struct ath12k_base *ab)
1746 {
1747 const struct ath12k_mem_profile_based_param *param;
1748 int ret;
1749
1750 ret = ath12k_hw_init(ab);
1751 if (ret) {
1752 ath12k_err(ab, "failed to init hw params: %d\n", ret);
1753 return ret;
1754 }
1755
1756 param = &ath12k_mem_profile_based_param[ab->target_mem_mode];
1757 ab->profile_param = param;
1758 ath12k_fw_map(ab);
1759
1760 return 0;
1761 }
1762
ath12k_core_panic_handler(struct notifier_block * nb,unsigned long action,void * data)1763 static int ath12k_core_panic_handler(struct notifier_block *nb,
1764 unsigned long action, void *data)
1765 {
1766 struct ath12k_base *ab = container_of(nb, struct ath12k_base,
1767 panic_nb);
1768
1769 return ath12k_hif_panic_handler(ab);
1770 }
1771
ath12k_core_panic_notifier_register(struct ath12k_base * ab)1772 static int ath12k_core_panic_notifier_register(struct ath12k_base *ab)
1773 {
1774 ab->panic_nb.notifier_call = ath12k_core_panic_handler;
1775
1776 return atomic_notifier_chain_register(&panic_notifier_list,
1777 &ab->panic_nb);
1778 }
1779
ath12k_core_panic_notifier_unregister(struct ath12k_base * ab)1780 static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
1781 {
1782 atomic_notifier_chain_unregister(&panic_notifier_list,
1783 &ab->panic_nb);
1784 }
1785
1786 static inline
ath12k_core_hw_group_create_ready(struct ath12k_hw_group * ag)1787 bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag)
1788 {
1789 lockdep_assert_held(&ag->mutex);
1790
1791 return (ag->num_probed == ag->num_devices);
1792 }
1793
ath12k_core_hw_group_alloc(struct ath12k_base * ab)1794 static struct ath12k_hw_group *ath12k_core_hw_group_alloc(struct ath12k_base *ab)
1795 {
1796 struct ath12k_hw_group *ag;
1797 int count = 0;
1798
1799 lockdep_assert_held(&ath12k_hw_group_mutex);
1800
1801 list_for_each_entry(ag, &ath12k_hw_group_list, list)
1802 count++;
1803
1804 ag = kzalloc(sizeof(*ag), GFP_KERNEL);
1805 if (!ag)
1806 return NULL;
1807
1808 ag->id = count;
1809 list_add(&ag->list, &ath12k_hw_group_list);
1810 mutex_init(&ag->mutex);
1811 ag->mlo_capable = false;
1812
1813 return ag;
1814 }
1815
ath12k_core_hw_group_free(struct ath12k_hw_group * ag)1816 static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag)
1817 {
1818 mutex_lock(&ath12k_hw_group_mutex);
1819
1820 list_del(&ag->list);
1821 kfree(ag);
1822
1823 mutex_unlock(&ath12k_hw_group_mutex);
1824 }
1825
ath12k_core_hw_group_find_by_dt(struct ath12k_base * ab)1826 static struct ath12k_hw_group *ath12k_core_hw_group_find_by_dt(struct ath12k_base *ab)
1827 {
1828 struct ath12k_hw_group *ag;
1829 int i;
1830
1831 if (!ab->dev->of_node)
1832 return NULL;
1833
1834 list_for_each_entry(ag, &ath12k_hw_group_list, list)
1835 for (i = 0; i < ag->num_devices; i++)
1836 if (ag->wsi_node[i] == ab->dev->of_node)
1837 return ag;
1838
1839 return NULL;
1840 }
1841
ath12k_core_get_wsi_info(struct ath12k_hw_group * ag,struct ath12k_base * ab)1842 static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag,
1843 struct ath12k_base *ab)
1844 {
1845 struct device_node *wsi_dev = ab->dev->of_node, *next_wsi_dev;
1846 struct device_node *tx_endpoint, *next_rx_endpoint;
1847 int device_count = 0;
1848
1849 next_wsi_dev = wsi_dev;
1850
1851 if (!next_wsi_dev)
1852 return -ENODEV;
1853
1854 do {
1855 ag->wsi_node[device_count] = next_wsi_dev;
1856
1857 tx_endpoint = of_graph_get_endpoint_by_regs(next_wsi_dev, 0, -1);
1858 if (!tx_endpoint) {
1859 of_node_put(next_wsi_dev);
1860 return -ENODEV;
1861 }
1862
1863 next_rx_endpoint = of_graph_get_remote_endpoint(tx_endpoint);
1864 if (!next_rx_endpoint) {
1865 of_node_put(next_wsi_dev);
1866 of_node_put(tx_endpoint);
1867 return -ENODEV;
1868 }
1869
1870 of_node_put(tx_endpoint);
1871 of_node_put(next_wsi_dev);
1872
1873 next_wsi_dev = of_graph_get_port_parent(next_rx_endpoint);
1874 if (!next_wsi_dev) {
1875 of_node_put(next_rx_endpoint);
1876 return -ENODEV;
1877 }
1878
1879 of_node_put(next_rx_endpoint);
1880
1881 device_count++;
1882 if (device_count > ATH12K_MAX_DEVICES) {
1883 ath12k_warn(ab, "device count in DT %d is more than limit %d\n",
1884 device_count, ATH12K_MAX_DEVICES);
1885 of_node_put(next_wsi_dev);
1886 return -EINVAL;
1887 }
1888 } while (wsi_dev != next_wsi_dev);
1889
1890 of_node_put(next_wsi_dev);
1891 ag->num_devices = device_count;
1892
1893 return 0;
1894 }
1895
ath12k_core_get_wsi_index(struct ath12k_hw_group * ag,struct ath12k_base * ab)1896 static int ath12k_core_get_wsi_index(struct ath12k_hw_group *ag,
1897 struct ath12k_base *ab)
1898 {
1899 int i, wsi_controller_index = -1, node_index = -1;
1900 bool control;
1901
1902 for (i = 0; i < ag->num_devices; i++) {
1903 control = of_property_read_bool(ag->wsi_node[i], "qcom,wsi-controller");
1904 if (control)
1905 wsi_controller_index = i;
1906
1907 if (ag->wsi_node[i] == ab->dev->of_node)
1908 node_index = i;
1909 }
1910
1911 if (wsi_controller_index == -1) {
1912 ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi controller is not defined in dt");
1913 return -EINVAL;
1914 }
1915
1916 if (node_index == -1) {
1917 ath12k_dbg(ab, ATH12K_DBG_BOOT, "unable to get WSI node index");
1918 return -EINVAL;
1919 }
1920
1921 ab->wsi_info.index = (ag->num_devices + node_index - wsi_controller_index) %
1922 ag->num_devices;
1923
1924 return 0;
1925 }
1926
ath12k_core_hw_group_assign(struct ath12k_base * ab)1927 static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *ab)
1928 {
1929 struct ath12k_wsi_info *wsi = &ab->wsi_info;
1930 struct ath12k_hw_group *ag;
1931
1932 lockdep_assert_held(&ath12k_hw_group_mutex);
1933
1934 if (ath12k_ftm_mode)
1935 goto invalid_group;
1936
1937 /* The grouping of multiple devices will be done based on device tree file.
1938 * The platforms that do not have any valid group information would have
1939 * each device to be part of its own invalid group.
1940 *
1941 * We use group id ATH12K_INVALID_GROUP_ID for single device group
1942 * which didn't have dt entry or wrong dt entry, there could be many
1943 * groups with same group id, i.e ATH12K_INVALID_GROUP_ID. So
1944 * default group id of ATH12K_INVALID_GROUP_ID combined with
1945 * num devices in ath12k_hw_group determines if the group is
1946 * multi device or single device group
1947 */
1948
1949 ag = ath12k_core_hw_group_find_by_dt(ab);
1950 if (!ag) {
1951 ag = ath12k_core_hw_group_alloc(ab);
1952 if (!ag) {
1953 ath12k_warn(ab, "unable to create new hw group\n");
1954 return NULL;
1955 }
1956
1957 if (ath12k_core_get_wsi_info(ag, ab) ||
1958 ath12k_core_get_wsi_index(ag, ab)) {
1959 ath12k_dbg(ab, ATH12K_DBG_BOOT,
1960 "unable to get wsi info from dt, grouping single device");
1961 ag->id = ATH12K_INVALID_GROUP_ID;
1962 ag->num_devices = 1;
1963 memset(ag->wsi_node, 0, sizeof(ag->wsi_node));
1964 wsi->index = 0;
1965 }
1966
1967 goto exit;
1968 } else if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
1969 ath12k_dbg(ab, ATH12K_DBG_BOOT, "group id %d in unregister state\n",
1970 ag->id);
1971 goto invalid_group;
1972 } else {
1973 if (ath12k_core_get_wsi_index(ag, ab))
1974 goto invalid_group;
1975 goto exit;
1976 }
1977
1978 invalid_group:
1979 ag = ath12k_core_hw_group_alloc(ab);
1980 if (!ag) {
1981 ath12k_warn(ab, "unable to create new hw group\n");
1982 return NULL;
1983 }
1984
1985 ag->id = ATH12K_INVALID_GROUP_ID;
1986 ag->num_devices = 1;
1987 wsi->index = 0;
1988
1989 ath12k_dbg(ab, ATH12K_DBG_BOOT, "single device added to hardware group\n");
1990
1991 exit:
1992 if (ag->num_probed >= ag->num_devices) {
1993 ath12k_warn(ab, "unable to add new device to group, max limit reached\n");
1994 goto invalid_group;
1995 }
1996
1997 ab->device_id = ag->num_probed++;
1998 ag->ab[ab->device_id] = ab;
1999 ab->ag = ag;
2000
2001 ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi group-id %d num-devices %d index %d",
2002 ag->id, ag->num_devices, wsi->index);
2003
2004 return ag;
2005 }
2006
ath12k_core_hw_group_unassign(struct ath12k_base * ab)2007 void ath12k_core_hw_group_unassign(struct ath12k_base *ab)
2008 {
2009 struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
2010 u8 device_id = ab->device_id;
2011 int num_probed;
2012
2013 if (!ag)
2014 return;
2015
2016 mutex_lock(&ag->mutex);
2017
2018 if (WARN_ON(device_id >= ag->num_devices)) {
2019 mutex_unlock(&ag->mutex);
2020 return;
2021 }
2022
2023 if (WARN_ON(ag->ab[device_id] != ab)) {
2024 mutex_unlock(&ag->mutex);
2025 return;
2026 }
2027
2028 ag->ab[device_id] = NULL;
2029 ab->ag = NULL;
2030 ab->device_id = ATH12K_INVALID_DEVICE_ID;
2031
2032 if (ag->num_probed)
2033 ag->num_probed--;
2034
2035 num_probed = ag->num_probed;
2036
2037 mutex_unlock(&ag->mutex);
2038
2039 if (!num_probed)
2040 ath12k_core_hw_group_free(ag);
2041 }
2042
ath12k_core_hw_group_destroy(struct ath12k_hw_group * ag)2043 static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
2044 {
2045 struct ath12k_base *ab;
2046 int i;
2047
2048 if (WARN_ON(!ag))
2049 return;
2050
2051 for (i = 0; i < ag->num_devices; i++) {
2052 ab = ag->ab[i];
2053 if (!ab)
2054 continue;
2055
2056 ath12k_core_soc_destroy(ab);
2057 }
2058 }
2059
ath12k_core_hw_group_cleanup(struct ath12k_hw_group * ag)2060 void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
2061 {
2062 struct ath12k_base *ab;
2063 int i;
2064
2065 if (!ag)
2066 return;
2067
2068 mutex_lock(&ag->mutex);
2069
2070 if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
2071 mutex_unlock(&ag->mutex);
2072 return;
2073 }
2074
2075 set_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags);
2076
2077 ath12k_core_hw_group_stop(ag);
2078
2079 for (i = 0; i < ag->num_devices; i++) {
2080 ab = ag->ab[i];
2081 if (!ab)
2082 continue;
2083
2084 mutex_lock(&ab->core_lock);
2085 ath12k_core_stop(ab);
2086 mutex_unlock(&ab->core_lock);
2087 }
2088
2089 mutex_unlock(&ag->mutex);
2090 }
2091
ath12k_core_hw_group_create(struct ath12k_hw_group * ag)2092 static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag)
2093 {
2094 struct ath12k_base *ab;
2095 int i, ret;
2096
2097 lockdep_assert_held(&ag->mutex);
2098
2099 for (i = 0; i < ag->num_devices; i++) {
2100 ab = ag->ab[i];
2101 if (!ab)
2102 continue;
2103
2104 mutex_lock(&ab->core_lock);
2105
2106 ret = ath12k_core_soc_create(ab);
2107 if (ret) {
2108 mutex_unlock(&ab->core_lock);
2109 ath12k_err(ab, "failed to create soc core: %d\n", ret);
2110 return ret;
2111 }
2112
2113 mutex_unlock(&ab->core_lock);
2114 }
2115
2116 return 0;
2117 }
2118
ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group * ag)2119 void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
2120 {
2121 struct ath12k_base *ab;
2122 int i;
2123
2124 if (ath12k_ftm_mode)
2125 return;
2126
2127 lockdep_assert_held(&ag->mutex);
2128
2129 if (ag->num_devices == 1) {
2130 ab = ag->ab[0];
2131 /* QCN9274 firmware uses firmware IE for MLO advertisement */
2132 if (ab->fw.fw_features_valid) {
2133 ag->mlo_capable =
2134 ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO);
2135 return;
2136 }
2137
2138 /* while WCN7850 firmware uses QMI single_chip_mlo_support bit */
2139 ag->mlo_capable = ab->single_chip_mlo_support;
2140 return;
2141 }
2142
2143 ag->mlo_capable = true;
2144
2145 for (i = 0; i < ag->num_devices; i++) {
2146 ab = ag->ab[i];
2147 if (!ab)
2148 continue;
2149
2150 /* even if 1 device's firmware feature indicates MLO
2151 * unsupported, make MLO unsupported for the whole group
2152 */
2153 if (!ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO)) {
2154 ag->mlo_capable = false;
2155 return;
2156 }
2157 }
2158 }
2159
ath12k_core_init(struct ath12k_base * ab)2160 int ath12k_core_init(struct ath12k_base *ab)
2161 {
2162 struct ath12k_hw_group *ag;
2163 int ret;
2164
2165 ret = ath12k_core_panic_notifier_register(ab);
2166 if (ret)
2167 ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
2168
2169 mutex_lock(&ath12k_hw_group_mutex);
2170
2171 ag = ath12k_core_hw_group_assign(ab);
2172 if (!ag) {
2173 mutex_unlock(&ath12k_hw_group_mutex);
2174 ath12k_warn(ab, "unable to get hw group\n");
2175 ret = -ENODEV;
2176 goto err_unregister_notifier;
2177 }
2178
2179 mutex_unlock(&ath12k_hw_group_mutex);
2180
2181 mutex_lock(&ag->mutex);
2182
2183 ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices %d num probed %d\n",
2184 ag->num_devices, ag->num_probed);
2185
2186 if (ath12k_core_hw_group_create_ready(ag)) {
2187 ret = ath12k_core_hw_group_create(ag);
2188 if (ret) {
2189 mutex_unlock(&ag->mutex);
2190 ath12k_warn(ab, "unable to create hw group\n");
2191 goto err_destroy_hw_group;
2192 }
2193 }
2194
2195 mutex_unlock(&ag->mutex);
2196
2197 return 0;
2198
2199 err_destroy_hw_group:
2200 ath12k_core_hw_group_destroy(ab->ag);
2201 ath12k_core_hw_group_unassign(ab);
2202 err_unregister_notifier:
2203 ath12k_core_panic_notifier_unregister(ab);
2204
2205 return ret;
2206 }
2207
ath12k_core_deinit(struct ath12k_base * ab)2208 void ath12k_core_deinit(struct ath12k_base *ab)
2209 {
2210 ath12k_core_hw_group_destroy(ab->ag);
2211 ath12k_core_hw_group_unassign(ab);
2212 ath12k_core_panic_notifier_unregister(ab);
2213 }
2214
ath12k_core_free(struct ath12k_base * ab)2215 void ath12k_core_free(struct ath12k_base *ab)
2216 {
2217 timer_delete_sync(&ab->rx_replenish_retry);
2218 destroy_workqueue(ab->workqueue_aux);
2219 destroy_workqueue(ab->workqueue);
2220 kfree(ab);
2221 }
2222
ath12k_core_alloc(struct device * dev,size_t priv_size,enum ath12k_bus bus)2223 struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
2224 enum ath12k_bus bus)
2225 {
2226 struct ath12k_base *ab;
2227
2228 ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL);
2229 if (!ab)
2230 return NULL;
2231
2232 init_completion(&ab->driver_recovery);
2233
2234 ab->workqueue = create_singlethread_workqueue("ath12k_wq");
2235 if (!ab->workqueue)
2236 goto err_sc_free;
2237
2238 ab->workqueue_aux = create_singlethread_workqueue("ath12k_aux_wq");
2239 if (!ab->workqueue_aux)
2240 goto err_free_wq;
2241
2242 mutex_init(&ab->core_lock);
2243 spin_lock_init(&ab->base_lock);
2244 init_completion(&ab->reset_complete);
2245
2246 INIT_LIST_HEAD(&ab->peers);
2247 init_waitqueue_head(&ab->peer_mapping_wq);
2248 init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
2249 INIT_WORK(&ab->restart_work, ath12k_core_restart);
2250 INIT_WORK(&ab->reset_work, ath12k_core_reset);
2251 INIT_WORK(&ab->rfkill_work, ath12k_rfkill_work);
2252 INIT_WORK(&ab->dump_work, ath12k_coredump_upload);
2253 INIT_WORK(&ab->update_11d_work, ath12k_update_11d);
2254
2255 timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
2256 init_completion(&ab->htc_suspend);
2257 init_completion(&ab->restart_completed);
2258 init_completion(&ab->wow.wakeup_completed);
2259
2260 ab->dev = dev;
2261 ab->hif.bus = bus;
2262 ab->qmi.num_radios = U8_MAX;
2263 ab->single_chip_mlo_support = false;
2264
2265 /* Device index used to identify the devices in a group.
2266 *
2267 * In Intra-device MLO, only one device present in a group,
2268 * so it is always zero.
2269 *
2270 * In Inter-device MLO, Multiple device present in a group,
2271 * expect non-zero value.
2272 */
2273 ab->device_id = 0;
2274
2275 return ab;
2276
2277 err_free_wq:
2278 destroy_workqueue(ab->workqueue);
2279 err_sc_free:
2280 kfree(ab);
2281 return NULL;
2282 }
2283
ath12k_init(void)2284 static int ath12k_init(void)
2285 {
2286 ahb_err = ath12k_ahb_init();
2287 if (ahb_err)
2288 pr_warn("Failed to initialize ath12k AHB device: %d\n", ahb_err);
2289
2290 pci_err = ath12k_pci_init();
2291 if (pci_err)
2292 pr_warn("Failed to initialize ath12k PCI device: %d\n", pci_err);
2293
2294 /* If both failed, return one of the failures (arbitrary) */
2295 return ahb_err && pci_err ? ahb_err : 0;
2296 }
2297
ath12k_exit(void)2298 static void ath12k_exit(void)
2299 {
2300 if (!pci_err)
2301 ath12k_pci_exit();
2302
2303 if (!ahb_err)
2304 ath12k_ahb_exit();
2305 }
2306
2307 module_init(ath12k_init);
2308 module_exit(ath12k_exit);
2309
2310 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11be WLAN devices");
2311 MODULE_LICENSE("Dual BSD/GPL");
2312