xref: /linux/drivers/net/wireless/ath/ath12k/core.c (revision defae535dd63b1eb78ba87d5b8c0b4fb5418fe0c)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
6  */
7 
8 #include <linux/export.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/remoteproc.h>
12 #include <linux/firmware.h>
13 #include <linux/of.h>
14 #include <linux/of_graph.h>
15 #include "ahb.h"
16 #include "core.h"
17 #include "dp_tx.h"
18 #include "dp_rx.h"
19 #include "debug.h"
20 #include "debugfs.h"
21 #include "fw.h"
22 #include "hif.h"
23 #include "pci.h"
24 #include "wow.h"
25 
26 static int ahb_err, pci_err;
27 unsigned int ath12k_debug_mask;
28 module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
29 MODULE_PARM_DESC(debug_mask, "Debugging mask");
30 
31 bool ath12k_ftm_mode;
32 module_param_named(ftm_mode, ath12k_ftm_mode, bool, 0444);
33 MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode");
34 
35 /* protected with ath12k_hw_group_mutex */
36 static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
37 
38 static DEFINE_MUTEX(ath12k_hw_group_mutex);
39 
40 static const struct
41 ath12k_mem_profile_based_param ath12k_mem_profile_based_param[] = {
42 [ATH12K_QMI_MEMORY_MODE_DEFAULT] = {
43 		.num_vdevs = 17,
44 		.max_client_single = 512,
45 		.max_client_dbs = 128,
46 		.max_client_dbs_sbs = 128,
47 		.dp_params = {
48 			.tx_comp_ring_size = 32768,
49 			.rxdma_monitor_buf_ring_size = 4096,
50 			.rxdma_monitor_dst_ring_size = 8092,
51 			.num_pool_tx_desc = 32768,
52 			.rx_desc_count = 12288,
53 		},
54 	},
55 [ATH12K_QMI_MEMORY_MODE_LOW_512_M] = {
56 		.num_vdevs = 9,
57 		.max_client_single = 128,
58 		.max_client_dbs = 64,
59 		.max_client_dbs_sbs = 64,
60 		.dp_params = {
61 			.tx_comp_ring_size = 16384,
62 			.rxdma_monitor_buf_ring_size = 256,
63 			.rxdma_monitor_dst_ring_size = 512,
64 			.num_pool_tx_desc = 16384,
65 			.rx_desc_count = 6144,
66 		},
67 	},
68 };
69 
70 static int ath12k_core_rfkill_config(struct ath12k_base *ab)
71 {
72 	struct ath12k *ar;
73 	int ret = 0, i;
74 
75 	if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
76 		return 0;
77 
78 	if (ath12k_acpi_get_disable_rfkill(ab))
79 		return 0;
80 
81 	for (i = 0; i < ab->num_radios; i++) {
82 		ar = ab->pdevs[i].ar;
83 
84 		ret = ath12k_mac_rfkill_config(ar);
85 		if (ret && ret != -EOPNOTSUPP) {
86 			ath12k_warn(ab, "failed to configure rfkill: %d", ret);
87 			return ret;
88 		}
89 	}
90 
91 	return ret;
92 }
93 
94 /* Check if we need to continue with suspend/resume operation.
95  * Return:
96  *	a negative value: error happens and don't continue.
97  *	0:  no error but don't continue.
98  *	positive value: no error and do continue.
99  */
100 static int ath12k_core_continue_suspend_resume(struct ath12k_base *ab)
101 {
102 	struct ath12k *ar;
103 
104 	if (!ab->hw_params->supports_suspend)
105 		return -EOPNOTSUPP;
106 
107 	/* so far single_pdev_only chips have supports_suspend as true
108 	 * so pass 0 as a dummy pdev_id here.
109 	 */
110 	ar = ab->pdevs[0].ar;
111 	if (!ar || !ar->ah || ar->ah->state != ATH12K_HW_STATE_OFF)
112 		return 0;
113 
114 	return 1;
115 }
116 
117 int ath12k_core_suspend(struct ath12k_base *ab)
118 {
119 	struct ath12k *ar;
120 	int ret, i;
121 
122 	ret = ath12k_core_continue_suspend_resume(ab);
123 	if (ret <= 0)
124 		return ret;
125 
126 	for (i = 0; i < ab->num_radios; i++) {
127 		ar = ab->pdevs[i].ar;
128 		if (!ar)
129 			continue;
130 
131 		wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
132 
133 		ret = ath12k_mac_wait_tx_complete(ar);
134 		if (ret) {
135 			wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
136 			ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
137 			return ret;
138 		}
139 
140 		wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
141 	}
142 
143 	/* PM framework skips suspend_late/resume_early callbacks
144 	 * if other devices report errors in their suspend callbacks.
145 	 * However ath12k_core_resume() would still be called because
146 	 * here we return success thus kernel put us on dpm_suspended_list.
147 	 * Since we won't go through a power down/up cycle, there is
148 	 * no chance to call complete(&ab->restart_completed) in
149 	 * ath12k_core_restart(), making ath12k_core_resume() timeout.
150 	 * So call it here to avoid this issue. This also works in case
151 	 * no error happens thus suspend_late/resume_early get called,
152 	 * because it will be reinitialized in ath12k_core_resume_early().
153 	 */
154 	complete(&ab->restart_completed);
155 
156 	return 0;
157 }
158 EXPORT_SYMBOL(ath12k_core_suspend);
159 
160 int ath12k_core_suspend_late(struct ath12k_base *ab)
161 {
162 	int ret;
163 
164 	ret = ath12k_core_continue_suspend_resume(ab);
165 	if (ret <= 0)
166 		return ret;
167 
168 	ath12k_acpi_stop(ab);
169 
170 	ath12k_hif_irq_disable(ab);
171 	ath12k_hif_ce_irq_disable(ab);
172 
173 	ath12k_hif_power_down(ab, true);
174 
175 	return 0;
176 }
177 EXPORT_SYMBOL(ath12k_core_suspend_late);
178 
179 int ath12k_core_resume_early(struct ath12k_base *ab)
180 {
181 	int ret;
182 
183 	ret = ath12k_core_continue_suspend_resume(ab);
184 	if (ret <= 0)
185 		return ret;
186 
187 	reinit_completion(&ab->restart_completed);
188 	ret = ath12k_hif_power_up(ab);
189 	if (ret)
190 		ath12k_warn(ab, "failed to power up hif during resume: %d\n", ret);
191 
192 	return ret;
193 }
194 EXPORT_SYMBOL(ath12k_core_resume_early);
195 
196 int ath12k_core_resume(struct ath12k_base *ab)
197 {
198 	long time_left;
199 	int ret;
200 
201 	ret = ath12k_core_continue_suspend_resume(ab);
202 	if (ret <= 0)
203 		return ret;
204 
205 	time_left = wait_for_completion_timeout(&ab->restart_completed,
206 						ATH12K_RESET_TIMEOUT_HZ);
207 	if (time_left == 0) {
208 		ath12k_warn(ab, "timeout while waiting for restart complete");
209 		return -ETIMEDOUT;
210 	}
211 
212 	return 0;
213 }
214 EXPORT_SYMBOL(ath12k_core_resume);
215 
216 static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
217 					   size_t name_len, bool with_variant,
218 					   bool bus_type_mode, bool with_default)
219 {
220 	/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
221 	char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
222 
223 	if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
224 		scnprintf(variant, sizeof(variant), ",variant=%s",
225 			  ab->qmi.target.bdf_ext);
226 
227 	switch (ab->id.bdf_search) {
228 	case ATH12K_BDF_SEARCH_BUS_AND_BOARD:
229 		if (bus_type_mode)
230 			scnprintf(name, name_len,
231 				  "bus=%s",
232 				  ath12k_bus_str(ab->hif.bus));
233 		else
234 			scnprintf(name, name_len,
235 				  "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
236 				  ath12k_bus_str(ab->hif.bus),
237 				  ab->id.vendor, ab->id.device,
238 				  ab->id.subsystem_vendor,
239 				  ab->id.subsystem_device,
240 				  ab->qmi.target.chip_id,
241 				  ab->qmi.target.board_id,
242 				  variant);
243 		break;
244 	default:
245 		scnprintf(name, name_len,
246 			  "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
247 			  ath12k_bus_str(ab->hif.bus),
248 			  ab->qmi.target.chip_id,
249 			  with_default ?
250 			  ATH12K_BOARD_ID_DEFAULT : ab->qmi.target.board_id,
251 			  variant);
252 		break;
253 	}
254 
255 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name);
256 
257 	return 0;
258 }
259 
260 static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
261 					 size_t name_len)
262 {
263 	return __ath12k_core_create_board_name(ab, name, name_len, true, false, false);
264 }
265 
266 static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
267 						  size_t name_len)
268 {
269 	return __ath12k_core_create_board_name(ab, name, name_len, false, false, true);
270 }
271 
272 static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
273 						  size_t name_len)
274 {
275 	return __ath12k_core_create_board_name(ab, name, name_len, false, true, true);
276 }
277 
278 const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
279 						    const char *file)
280 {
281 	const struct firmware *fw;
282 	char path[100];
283 	int ret;
284 
285 	if (!file)
286 		return ERR_PTR(-ENOENT);
287 
288 	ath12k_core_create_firmware_path(ab, file, path, sizeof(path));
289 
290 	ret = firmware_request_nowarn(&fw, path, ab->dev);
291 	if (ret)
292 		return ERR_PTR(ret);
293 
294 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot firmware request %s size %zu\n",
295 		   path, fw->size);
296 
297 	return fw;
298 }
299 
300 void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
301 {
302 	if (!IS_ERR(bd->fw))
303 		release_firmware(bd->fw);
304 
305 	memset(bd, 0, sizeof(*bd));
306 }
307 
308 static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
309 					 struct ath12k_board_data *bd,
310 					 const void *buf, size_t buf_len,
311 					 const char *boardname,
312 					 int ie_id,
313 					 int name_id,
314 					 int data_id)
315 {
316 	const struct ath12k_fw_ie *hdr;
317 	bool name_match_found;
318 	int ret, board_ie_id;
319 	size_t board_ie_len;
320 	const void *board_ie_data;
321 
322 	name_match_found = false;
323 
324 	/* go through ATH12K_BD_IE_BOARD_/ATH12K_BD_IE_REGDB_ elements */
325 	while (buf_len > sizeof(struct ath12k_fw_ie)) {
326 		hdr = buf;
327 		board_ie_id = le32_to_cpu(hdr->id);
328 		board_ie_len = le32_to_cpu(hdr->len);
329 		board_ie_data = hdr->data;
330 
331 		buf_len -= sizeof(*hdr);
332 		buf += sizeof(*hdr);
333 
334 		if (buf_len < ALIGN(board_ie_len, 4)) {
335 			ath12k_err(ab, "invalid %s length: %zu < %zu\n",
336 				   ath12k_bd_ie_type_str(ie_id),
337 				   buf_len, ALIGN(board_ie_len, 4));
338 			ret = -EINVAL;
339 			goto out;
340 		}
341 
342 		if (board_ie_id == name_id) {
343 			ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "",
344 					board_ie_data, board_ie_len);
345 
346 			if (board_ie_len != strlen(boardname))
347 				goto next;
348 
349 			ret = memcmp(board_ie_data, boardname, strlen(boardname));
350 			if (ret)
351 				goto next;
352 
353 			name_match_found = true;
354 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
355 				   "boot found match %s for name '%s'",
356 				   ath12k_bd_ie_type_str(ie_id),
357 				   boardname);
358 		} else if (board_ie_id == data_id) {
359 			if (!name_match_found)
360 				/* no match found */
361 				goto next;
362 
363 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
364 				   "boot found %s for '%s'",
365 				   ath12k_bd_ie_type_str(ie_id),
366 				   boardname);
367 
368 			bd->data = board_ie_data;
369 			bd->len = board_ie_len;
370 
371 			ret = 0;
372 			goto out;
373 		} else {
374 			ath12k_warn(ab, "unknown %s id found: %d\n",
375 				    ath12k_bd_ie_type_str(ie_id),
376 				    board_ie_id);
377 		}
378 next:
379 		/* jump over the padding */
380 		board_ie_len = ALIGN(board_ie_len, 4);
381 
382 		buf_len -= board_ie_len;
383 		buf += board_ie_len;
384 	}
385 
386 	/* no match found */
387 	ret = -ENOENT;
388 
389 out:
390 	return ret;
391 }
392 
393 static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
394 					      struct ath12k_board_data *bd,
395 					      const char *boardname,
396 					      int ie_id_match,
397 					      int name_id,
398 					      int data_id)
399 {
400 	size_t len, magic_len;
401 	const u8 *data;
402 	char *filename, filepath[100];
403 	size_t ie_len;
404 	struct ath12k_fw_ie *hdr;
405 	int ret, ie_id;
406 
407 	filename = ATH12K_BOARD_API2_FILE;
408 
409 	if (!bd->fw)
410 		bd->fw = ath12k_core_firmware_request(ab, filename);
411 
412 	if (IS_ERR(bd->fw))
413 		return PTR_ERR(bd->fw);
414 
415 	data = bd->fw->data;
416 	len = bd->fw->size;
417 
418 	ath12k_core_create_firmware_path(ab, filename,
419 					 filepath, sizeof(filepath));
420 
421 	/* magic has extra null byte padded */
422 	magic_len = strlen(ATH12K_BOARD_MAGIC) + 1;
423 	if (len < magic_len) {
424 		ath12k_err(ab, "failed to find magic value in %s, file too short: %zu\n",
425 			   filepath, len);
426 		ret = -EINVAL;
427 		goto err;
428 	}
429 
430 	if (memcmp(data, ATH12K_BOARD_MAGIC, magic_len)) {
431 		ath12k_err(ab, "found invalid board magic\n");
432 		ret = -EINVAL;
433 		goto err;
434 	}
435 
436 	/* magic is padded to 4 bytes */
437 	magic_len = ALIGN(magic_len, 4);
438 	if (len < magic_len) {
439 		ath12k_err(ab, "failed: %s too small to contain board data, len: %zu\n",
440 			   filepath, len);
441 		ret = -EINVAL;
442 		goto err;
443 	}
444 
445 	data += magic_len;
446 	len -= magic_len;
447 
448 	while (len > sizeof(struct ath12k_fw_ie)) {
449 		hdr = (struct ath12k_fw_ie *)data;
450 		ie_id = le32_to_cpu(hdr->id);
451 		ie_len = le32_to_cpu(hdr->len);
452 
453 		len -= sizeof(*hdr);
454 		data = hdr->data;
455 
456 		if (len < ALIGN(ie_len, 4)) {
457 			ath12k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
458 				   ie_id, ie_len, len);
459 			ret = -EINVAL;
460 			goto err;
461 		}
462 
463 		if (ie_id == ie_id_match) {
464 			ret = ath12k_core_parse_bd_ie_board(ab, bd, data,
465 							    ie_len,
466 							    boardname,
467 							    ie_id_match,
468 							    name_id,
469 							    data_id);
470 			if (ret == -ENOENT)
471 				/* no match found, continue */
472 				goto next;
473 			else if (ret)
474 				/* there was an error, bail out */
475 				goto err;
476 			/* either found or error, so stop searching */
477 			goto out;
478 		}
479 next:
480 		/* jump over the padding */
481 		ie_len = ALIGN(ie_len, 4);
482 
483 		len -= ie_len;
484 		data += ie_len;
485 	}
486 
487 out:
488 	if (!bd->data || !bd->len) {
489 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
490 			   "failed to fetch %s for %s from %s\n",
491 			   ath12k_bd_ie_type_str(ie_id_match),
492 			   boardname, filepath);
493 		ret = -ENODATA;
494 		goto err;
495 	}
496 
497 	return 0;
498 
499 err:
500 	ath12k_core_free_bdf(ab, bd);
501 	return ret;
502 }
503 
504 int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
505 				       struct ath12k_board_data *bd,
506 				       char *filename)
507 {
508 	bd->fw = ath12k_core_firmware_request(ab, filename);
509 	if (IS_ERR(bd->fw))
510 		return PTR_ERR(bd->fw);
511 
512 	bd->data = bd->fw->data;
513 	bd->len = bd->fw->size;
514 
515 	return 0;
516 }
517 
518 #define BOARD_NAME_SIZE 200
519 int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
520 {
521 	char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE];
522 	char *filename, filepath[100];
523 	int bd_api;
524 	int ret;
525 
526 	filename = ATH12K_BOARD_API2_FILE;
527 
528 	ret = ath12k_core_create_board_name(ab, boardname, sizeof(boardname));
529 	if (ret) {
530 		ath12k_err(ab, "failed to create board name: %d", ret);
531 		return ret;
532 	}
533 
534 	bd_api = 2;
535 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
536 						 ATH12K_BD_IE_BOARD,
537 						 ATH12K_BD_IE_BOARD_NAME,
538 						 ATH12K_BD_IE_BOARD_DATA);
539 	if (!ret)
540 		goto success;
541 
542 	ret = ath12k_core_create_fallback_board_name(ab, fallback_boardname,
543 						     sizeof(fallback_boardname));
544 	if (ret) {
545 		ath12k_err(ab, "failed to create fallback board name: %d", ret);
546 		return ret;
547 	}
548 
549 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
550 						 ATH12K_BD_IE_BOARD,
551 						 ATH12K_BD_IE_BOARD_NAME,
552 						 ATH12K_BD_IE_BOARD_DATA);
553 	if (!ret)
554 		goto success;
555 
556 	bd_api = 1;
557 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE);
558 	if (ret) {
559 		ath12k_core_create_firmware_path(ab, filename,
560 						 filepath, sizeof(filepath));
561 		ath12k_err(ab, "failed to fetch board data for %s from %s\n",
562 			   boardname, filepath);
563 		if (memcmp(boardname, fallback_boardname, strlen(boardname)))
564 			ath12k_err(ab, "failed to fetch board data for %s from %s\n",
565 				   fallback_boardname, filepath);
566 
567 		ath12k_err(ab, "failed to fetch board.bin from %s\n",
568 			   ab->hw_params->fw.dir);
569 		return ret;
570 	}
571 
572 success:
573 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", bd_api);
574 	return 0;
575 }
576 
577 int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd)
578 {
579 	char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE];
580 	int ret;
581 
582 	ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
583 	if (ret) {
584 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
585 			   "failed to create board name for regdb: %d", ret);
586 		goto exit;
587 	}
588 
589 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
590 						 ATH12K_BD_IE_REGDB,
591 						 ATH12K_BD_IE_REGDB_NAME,
592 						 ATH12K_BD_IE_REGDB_DATA);
593 	if (!ret)
594 		goto exit;
595 
596 	ret = ath12k_core_create_bus_type_board_name(ab, default_boardname,
597 						     BOARD_NAME_SIZE);
598 	if (ret) {
599 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
600 			   "failed to create default board name for regdb: %d", ret);
601 		goto exit;
602 	}
603 
604 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, default_boardname,
605 						 ATH12K_BD_IE_REGDB,
606 						 ATH12K_BD_IE_REGDB_NAME,
607 						 ATH12K_BD_IE_REGDB_DATA);
608 	if (!ret)
609 		goto exit;
610 
611 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_REGDB_FILE_NAME);
612 	if (ret)
613 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to fetch %s from %s\n",
614 			   ATH12K_REGDB_FILE_NAME, ab->hw_params->fw.dir);
615 
616 exit:
617 	if (!ret)
618 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "fetched regdb\n");
619 
620 	return ret;
621 }
622 
623 u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab)
624 {
625 	if (ab->num_radios == 2)
626 		return TARGET_NUM_STATIONS_DBS;
627 	else if (ab->num_radios == 3)
628 		return TARGET_NUM_PEERS_PDEV_DBS_SBS;
629 	return TARGET_NUM_STATIONS_SINGLE;
630 }
631 
632 u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab)
633 {
634 	if (ab->num_radios == 2)
635 		return TARGET_NUM_PEERS_PDEV_DBS;
636 	else if (ab->num_radios == 3)
637 		return TARGET_NUM_PEERS_PDEV_DBS_SBS;
638 	return TARGET_NUM_PEERS_PDEV_SINGLE;
639 }
640 
641 u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab)
642 {
643 	if (ab->num_radios == 2)
644 		return TARGET_NUM_TIDS(DBS);
645 	else if (ab->num_radios == 3)
646 		return TARGET_NUM_TIDS(DBS_SBS);
647 	return TARGET_NUM_TIDS(SINGLE);
648 }
649 
650 struct reserved_mem *ath12k_core_get_reserved_mem(struct ath12k_base *ab,
651 						  int index)
652 {
653 	struct device *dev = ab->dev;
654 	struct reserved_mem *rmem;
655 	struct device_node *node;
656 
657 	node = of_parse_phandle(dev->of_node, "memory-region", index);
658 	if (!node) {
659 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
660 			   "failed to parse memory-region for index %d\n", index);
661 		return NULL;
662 	}
663 
664 	rmem = of_reserved_mem_lookup(node);
665 	of_node_put(node);
666 	if (!rmem) {
667 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
668 			   "unable to get memory-region for index %d\n", index);
669 		return NULL;
670 	}
671 
672 	return rmem;
673 }
674 
675 static inline
676 void ath12k_core_to_group_ref_get(struct ath12k_base *ab)
677 {
678 	struct ath12k_hw_group *ag = ab->ag;
679 
680 	lockdep_assert_held(&ag->mutex);
681 
682 	if (ab->hw_group_ref) {
683 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already attached to group %d\n",
684 			   ag->id);
685 		return;
686 	}
687 
688 	ab->hw_group_ref = true;
689 	ag->num_started++;
690 
691 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "core attached to group %d, num_started %d\n",
692 		   ag->id, ag->num_started);
693 }
694 
695 static inline
696 void ath12k_core_to_group_ref_put(struct ath12k_base *ab)
697 {
698 	struct ath12k_hw_group *ag = ab->ag;
699 
700 	lockdep_assert_held(&ag->mutex);
701 
702 	if (!ab->hw_group_ref) {
703 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already de-attached from group %d\n",
704 			   ag->id);
705 		return;
706 	}
707 
708 	ab->hw_group_ref = false;
709 	ag->num_started--;
710 
711 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "core de-attached from group %d, num_started %d\n",
712 		   ag->id, ag->num_started);
713 }
714 
715 static void ath12k_core_stop(struct ath12k_base *ab)
716 {
717 	ath12k_core_to_group_ref_put(ab);
718 
719 	if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
720 		ath12k_qmi_firmware_stop(ab);
721 
722 	ath12k_acpi_stop(ab);
723 
724 	ath12k_dp_rx_pdev_reo_cleanup(ab);
725 	ath12k_hif_stop(ab);
726 	ath12k_wmi_detach(ab);
727 	ath12k_dp_free(ab);
728 
729 	/* De-Init of components as needed */
730 }
731 
732 static void ath12k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data)
733 {
734 	struct ath12k_base *ab = data;
735 	const char *magic = ATH12K_SMBIOS_BDF_EXT_MAGIC;
736 	struct ath12k_smbios_bdf *smbios = (struct ath12k_smbios_bdf *)hdr;
737 	ssize_t copied;
738 	size_t len;
739 	int i;
740 
741 	if (ab->qmi.target.bdf_ext[0] != '\0')
742 		return;
743 
744 	if (hdr->type != ATH12K_SMBIOS_BDF_EXT_TYPE)
745 		return;
746 
747 	if (hdr->length != ATH12K_SMBIOS_BDF_EXT_LENGTH) {
748 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
749 			   "wrong smbios bdf ext type length (%d).\n",
750 			   hdr->length);
751 		return;
752 	}
753 
754 	spin_lock_bh(&ab->base_lock);
755 
756 	switch (smbios->country_code_flag) {
757 	case ATH12K_SMBIOS_CC_ISO:
758 		ab->new_alpha2[0] = u16_get_bits(smbios->cc_code >> 8, 0xff);
759 		ab->new_alpha2[1] = u16_get_bits(smbios->cc_code, 0xff);
760 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios cc_code %c%c\n",
761 			   ab->new_alpha2[0], ab->new_alpha2[1]);
762 		break;
763 	case ATH12K_SMBIOS_CC_WW:
764 		ab->new_alpha2[0] = '0';
765 		ab->new_alpha2[1] = '0';
766 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios worldwide regdomain\n");
767 		break;
768 	default:
769 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot ignore smbios country code setting %d\n",
770 			   smbios->country_code_flag);
771 		break;
772 	}
773 
774 	spin_unlock_bh(&ab->base_lock);
775 
776 	if (!smbios->bdf_enabled) {
777 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant name not found.\n");
778 		return;
779 	}
780 
781 	/* Only one string exists (per spec) */
782 	if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) {
783 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
784 			   "bdf variant magic does not match.\n");
785 		return;
786 	}
787 
788 	len = min_t(size_t,
789 		    strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext));
790 	for (i = 0; i < len; i++) {
791 		if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) {
792 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
793 				   "bdf variant name contains non ascii chars.\n");
794 			return;
795 		}
796 	}
797 
798 	/* Copy extension name without magic prefix */
799 	copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic),
800 			 sizeof(ab->qmi.target.bdf_ext));
801 	if (copied < 0) {
802 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
803 			   "bdf variant string is longer than the buffer can accommodate\n");
804 		return;
805 	}
806 
807 	ath12k_dbg(ab, ATH12K_DBG_BOOT,
808 		   "found and validated bdf variant smbios_type 0x%x bdf %s\n",
809 		   ATH12K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext);
810 }
811 
812 int ath12k_core_check_smbios(struct ath12k_base *ab)
813 {
814 	ab->qmi.target.bdf_ext[0] = '\0';
815 	dmi_walk(ath12k_core_check_cc_code_bdfext, ab);
816 
817 	if (ab->qmi.target.bdf_ext[0] == '\0')
818 		return -ENODATA;
819 
820 	return 0;
821 }
822 
823 static int ath12k_core_soc_create(struct ath12k_base *ab)
824 {
825 	int ret;
826 
827 	if (ath12k_ftm_mode) {
828 		ab->fw_mode = ATH12K_FIRMWARE_MODE_FTM;
829 		ath12k_info(ab, "Booting in ftm mode\n");
830 	}
831 
832 	ret = ath12k_qmi_init_service(ab);
833 	if (ret) {
834 		ath12k_err(ab, "failed to initialize qmi :%d\n", ret);
835 		return ret;
836 	}
837 
838 	ath12k_debugfs_soc_create(ab);
839 
840 	ret = ath12k_hif_power_up(ab);
841 	if (ret) {
842 		ath12k_err(ab, "failed to power up :%d\n", ret);
843 		goto err_qmi_deinit;
844 	}
845 
846 	ath12k_debugfs_pdev_create(ab);
847 
848 	return 0;
849 
850 err_qmi_deinit:
851 	ath12k_debugfs_soc_destroy(ab);
852 	ath12k_qmi_deinit_service(ab);
853 	return ret;
854 }
855 
856 static void ath12k_core_soc_destroy(struct ath12k_base *ab)
857 {
858 	ath12k_hif_power_down(ab, false);
859 	ath12k_reg_free(ab);
860 	ath12k_debugfs_soc_destroy(ab);
861 	ath12k_qmi_deinit_service(ab);
862 }
863 
864 static int ath12k_core_pdev_create(struct ath12k_base *ab)
865 {
866 	int ret;
867 
868 	ret = ath12k_dp_pdev_alloc(ab);
869 	if (ret) {
870 		ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
871 		return ret;
872 	}
873 
874 	return 0;
875 }
876 
877 static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
878 {
879 	ath12k_dp_pdev_free(ab);
880 }
881 
882 static int ath12k_core_start(struct ath12k_base *ab)
883 {
884 	int ret;
885 
886 	lockdep_assert_held(&ab->core_lock);
887 
888 	ret = ath12k_wmi_attach(ab);
889 	if (ret) {
890 		ath12k_err(ab, "failed to attach wmi: %d\n", ret);
891 		return ret;
892 	}
893 
894 	ret = ath12k_htc_init(ab);
895 	if (ret) {
896 		ath12k_err(ab, "failed to init htc: %d\n", ret);
897 		goto err_wmi_detach;
898 	}
899 
900 	ret = ath12k_hif_start(ab);
901 	if (ret) {
902 		ath12k_err(ab, "failed to start HIF: %d\n", ret);
903 		goto err_wmi_detach;
904 	}
905 
906 	ret = ath12k_htc_wait_target(&ab->htc);
907 	if (ret) {
908 		ath12k_err(ab, "failed to connect to HTC: %d\n", ret);
909 		goto err_hif_stop;
910 	}
911 
912 	ret = ath12k_dp_htt_connect(&ab->dp);
913 	if (ret) {
914 		ath12k_err(ab, "failed to connect to HTT: %d\n", ret);
915 		goto err_hif_stop;
916 	}
917 
918 	ret = ath12k_wmi_connect(ab);
919 	if (ret) {
920 		ath12k_err(ab, "failed to connect wmi: %d\n", ret);
921 		goto err_hif_stop;
922 	}
923 
924 	ret = ath12k_htc_start(&ab->htc);
925 	if (ret) {
926 		ath12k_err(ab, "failed to start HTC: %d\n", ret);
927 		goto err_hif_stop;
928 	}
929 
930 	ret = ath12k_wmi_wait_for_service_ready(ab);
931 	if (ret) {
932 		ath12k_err(ab, "failed to receive wmi service ready event: %d\n",
933 			   ret);
934 		goto err_hif_stop;
935 	}
936 
937 	ath12k_dp_cc_config(ab);
938 
939 	ret = ath12k_dp_rx_pdev_reo_setup(ab);
940 	if (ret) {
941 		ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
942 		goto err_hif_stop;
943 	}
944 
945 	ath12k_dp_hal_rx_desc_init(ab);
946 
947 	ret = ath12k_wmi_cmd_init(ab);
948 	if (ret) {
949 		ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret);
950 		goto err_reo_cleanup;
951 	}
952 
953 	ret = ath12k_wmi_wait_for_unified_ready(ab);
954 	if (ret) {
955 		ath12k_err(ab, "failed to receive wmi unified ready event: %d\n",
956 			   ret);
957 		goto err_reo_cleanup;
958 	}
959 
960 	/* put hardware to DBS mode */
961 	if (ab->hw_params->single_pdev_only) {
962 		ret = ath12k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
963 		if (ret) {
964 			ath12k_err(ab, "failed to send dbs mode: %d\n", ret);
965 			goto err_reo_cleanup;
966 		}
967 	}
968 
969 	ret = ath12k_dp_tx_htt_h2t_ver_req_msg(ab);
970 	if (ret) {
971 		ath12k_err(ab, "failed to send htt version request message: %d\n",
972 			   ret);
973 		goto err_reo_cleanup;
974 	}
975 
976 	ath12k_acpi_set_dsm_func(ab);
977 
978 	/* Indicate the core start in the appropriate group */
979 	ath12k_core_to_group_ref_get(ab);
980 
981 	return 0;
982 
983 err_reo_cleanup:
984 	ath12k_dp_rx_pdev_reo_cleanup(ab);
985 err_hif_stop:
986 	ath12k_hif_stop(ab);
987 err_wmi_detach:
988 	ath12k_wmi_detach(ab);
989 	return ret;
990 }
991 
992 static void ath12k_core_device_cleanup(struct ath12k_base *ab)
993 {
994 	mutex_lock(&ab->core_lock);
995 
996 	ath12k_hif_irq_disable(ab);
997 	ath12k_core_pdev_destroy(ab);
998 
999 	mutex_unlock(&ab->core_lock);
1000 }
1001 
1002 static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
1003 {
1004 	struct ath12k_base *ab;
1005 	int i;
1006 
1007 	lockdep_assert_held(&ag->mutex);
1008 
1009 	clear_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
1010 
1011 	ath12k_mac_unregister(ag);
1012 
1013 	for (i = ag->num_devices - 1; i >= 0; i--) {
1014 		ab = ag->ab[i];
1015 		if (!ab)
1016 			continue;
1017 
1018 		clear_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
1019 
1020 		ath12k_core_device_cleanup(ab);
1021 	}
1022 
1023 	ath12k_mac_destroy(ag);
1024 }
1025 
1026 u8 ath12k_get_num_partner_link(struct ath12k *ar)
1027 {
1028 	struct ath12k_base *partner_ab, *ab = ar->ab;
1029 	struct ath12k_hw_group *ag = ab->ag;
1030 	struct ath12k_pdev *pdev;
1031 	u8 num_link = 0;
1032 	int i, j;
1033 
1034 	lockdep_assert_held(&ag->mutex);
1035 
1036 	for (i = 0; i < ag->num_devices; i++) {
1037 		partner_ab = ag->ab[i];
1038 
1039 		for (j = 0; j < partner_ab->num_radios; j++) {
1040 			pdev = &partner_ab->pdevs[j];
1041 
1042 			/* Avoid the self link */
1043 			if (ar == pdev->ar)
1044 				continue;
1045 
1046 			num_link++;
1047 		}
1048 	}
1049 
1050 	return num_link;
1051 }
1052 
1053 static int __ath12k_mac_mlo_ready(struct ath12k *ar)
1054 {
1055 	u8 num_link = ath12k_get_num_partner_link(ar);
1056 	int ret;
1057 
1058 	if (num_link == 0)
1059 		return 0;
1060 
1061 	ret = ath12k_wmi_mlo_ready(ar);
1062 	if (ret) {
1063 		ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n",
1064 			   ar->pdev_idx, ret);
1065 		return ret;
1066 	}
1067 
1068 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mlo ready done for pdev %d\n",
1069 		   ar->pdev_idx);
1070 
1071 	return 0;
1072 }
1073 
1074 int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag)
1075 {
1076 	struct ath12k_hw *ah;
1077 	struct ath12k *ar;
1078 	int ret;
1079 	int i, j;
1080 
1081 	for (i = 0; i < ag->num_hw; i++) {
1082 		ah = ag->ah[i];
1083 		if (!ah)
1084 			continue;
1085 
1086 		for_each_ar(ah, ar, j) {
1087 			ar = &ah->radio[j];
1088 			ret = __ath12k_mac_mlo_ready(ar);
1089 			if (ret)
1090 				return ret;
1091 		}
1092 	}
1093 
1094 	return 0;
1095 }
1096 
1097 static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag)
1098 {
1099 	int ret, i;
1100 
1101 	if (!ag->mlo_capable)
1102 		return 0;
1103 
1104 	ret = ath12k_mac_mlo_setup(ag);
1105 	if (ret)
1106 		return ret;
1107 
1108 	for (i = 0; i < ag->num_devices; i++)
1109 		ath12k_dp_partner_cc_init(ag->ab[i]);
1110 
1111 	ret = ath12k_mac_mlo_ready(ag);
1112 	if (ret)
1113 		goto err_mlo_teardown;
1114 
1115 	return 0;
1116 
1117 err_mlo_teardown:
1118 	ath12k_mac_mlo_teardown(ag);
1119 
1120 	return ret;
1121 }
1122 
1123 static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag)
1124 {
1125 	struct ath12k_base *ab;
1126 	int ret, i;
1127 
1128 	lockdep_assert_held(&ag->mutex);
1129 
1130 	if (test_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags))
1131 		goto core_pdev_create;
1132 
1133 	ret = ath12k_mac_allocate(ag);
1134 	if (WARN_ON(ret))
1135 		return ret;
1136 
1137 	ret = ath12k_core_mlo_setup(ag);
1138 	if (WARN_ON(ret))
1139 		goto err_mac_destroy;
1140 
1141 	ret = ath12k_mac_register(ag);
1142 	if (WARN_ON(ret))
1143 		goto err_mlo_teardown;
1144 
1145 	set_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
1146 
1147 core_pdev_create:
1148 	for (i = 0; i < ag->num_devices; i++) {
1149 		ab = ag->ab[i];
1150 		if (!ab)
1151 			continue;
1152 
1153 		mutex_lock(&ab->core_lock);
1154 
1155 		set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
1156 
1157 		ret = ath12k_core_pdev_create(ab);
1158 		if (ret) {
1159 			ath12k_err(ab, "failed to create pdev core %d\n", ret);
1160 			mutex_unlock(&ab->core_lock);
1161 			goto err;
1162 		}
1163 
1164 		ath12k_hif_irq_enable(ab);
1165 
1166 		ret = ath12k_core_rfkill_config(ab);
1167 		if (ret && ret != -EOPNOTSUPP) {
1168 			mutex_unlock(&ab->core_lock);
1169 			goto err;
1170 		}
1171 
1172 		mutex_unlock(&ab->core_lock);
1173 	}
1174 
1175 	return 0;
1176 
1177 err:
1178 	ath12k_core_hw_group_stop(ag);
1179 	return ret;
1180 
1181 err_mlo_teardown:
1182 	ath12k_mac_mlo_teardown(ag);
1183 
1184 err_mac_destroy:
1185 	ath12k_mac_destroy(ag);
1186 
1187 	return ret;
1188 }
1189 
1190 static int ath12k_core_start_firmware(struct ath12k_base *ab,
1191 				      enum ath12k_firmware_mode mode)
1192 {
1193 	int ret;
1194 
1195 	ath12k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v3,
1196 				    &ab->qmi.ce_cfg.shadow_reg_v3_len);
1197 
1198 	ret = ath12k_qmi_firmware_start(ab, mode);
1199 	if (ret) {
1200 		ath12k_err(ab, "failed to send firmware start: %d\n", ret);
1201 		return ret;
1202 	}
1203 
1204 	return ret;
1205 }
1206 
1207 static inline
1208 bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag)
1209 {
1210 	lockdep_assert_held(&ag->mutex);
1211 
1212 	return (ag->num_started == ag->num_devices);
1213 }
1214 
1215 static void ath12k_fw_stats_pdevs_free(struct list_head *head)
1216 {
1217 	struct ath12k_fw_stats_pdev *i, *tmp;
1218 
1219 	list_for_each_entry_safe(i, tmp, head, list) {
1220 		list_del(&i->list);
1221 		kfree(i);
1222 	}
1223 }
1224 
1225 void ath12k_fw_stats_bcn_free(struct list_head *head)
1226 {
1227 	struct ath12k_fw_stats_bcn *i, *tmp;
1228 
1229 	list_for_each_entry_safe(i, tmp, head, list) {
1230 		list_del(&i->list);
1231 		kfree(i);
1232 	}
1233 }
1234 
1235 static void ath12k_fw_stats_vdevs_free(struct list_head *head)
1236 {
1237 	struct ath12k_fw_stats_vdev *i, *tmp;
1238 
1239 	list_for_each_entry_safe(i, tmp, head, list) {
1240 		list_del(&i->list);
1241 		kfree(i);
1242 	}
1243 }
1244 
1245 void ath12k_fw_stats_init(struct ath12k *ar)
1246 {
1247 	INIT_LIST_HEAD(&ar->fw_stats.vdevs);
1248 	INIT_LIST_HEAD(&ar->fw_stats.pdevs);
1249 	INIT_LIST_HEAD(&ar->fw_stats.bcn);
1250 	init_completion(&ar->fw_stats_complete);
1251 	init_completion(&ar->fw_stats_done);
1252 }
1253 
1254 void ath12k_fw_stats_free(struct ath12k_fw_stats *stats)
1255 {
1256 	ath12k_fw_stats_pdevs_free(&stats->pdevs);
1257 	ath12k_fw_stats_vdevs_free(&stats->vdevs);
1258 	ath12k_fw_stats_bcn_free(&stats->bcn);
1259 }
1260 
1261 void ath12k_fw_stats_reset(struct ath12k *ar)
1262 {
1263 	spin_lock_bh(&ar->data_lock);
1264 	ath12k_fw_stats_free(&ar->fw_stats);
1265 	ar->fw_stats.num_vdev_recvd = 0;
1266 	ar->fw_stats.num_bcn_recvd = 0;
1267 	spin_unlock_bh(&ar->data_lock);
1268 }
1269 
1270 static void ath12k_core_trigger_partner(struct ath12k_base *ab)
1271 {
1272 	struct ath12k_hw_group *ag = ab->ag;
1273 	struct ath12k_base *partner_ab;
1274 	bool found = false;
1275 	int i;
1276 
1277 	for (i = 0; i < ag->num_devices; i++) {
1278 		partner_ab = ag->ab[i];
1279 		if (!partner_ab)
1280 			continue;
1281 
1282 		if (found)
1283 			ath12k_qmi_trigger_host_cap(partner_ab);
1284 
1285 		found = (partner_ab == ab);
1286 	}
1287 }
1288 
1289 int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
1290 {
1291 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
1292 	int ret, i;
1293 
1294 	ret = ath12k_core_start_firmware(ab, ab->fw_mode);
1295 	if (ret) {
1296 		ath12k_err(ab, "failed to start firmware: %d\n", ret);
1297 		return ret;
1298 	}
1299 
1300 	ret = ath12k_ce_init_pipes(ab);
1301 	if (ret) {
1302 		ath12k_err(ab, "failed to initialize CE: %d\n", ret);
1303 		goto err_firmware_stop;
1304 	}
1305 
1306 	ret = ath12k_dp_alloc(ab);
1307 	if (ret) {
1308 		ath12k_err(ab, "failed to init DP: %d\n", ret);
1309 		goto err_firmware_stop;
1310 	}
1311 
1312 	mutex_lock(&ag->mutex);
1313 	mutex_lock(&ab->core_lock);
1314 
1315 	ret = ath12k_core_start(ab);
1316 	if (ret) {
1317 		ath12k_err(ab, "failed to start core: %d\n", ret);
1318 		goto err_dp_free;
1319 	}
1320 
1321 	mutex_unlock(&ab->core_lock);
1322 
1323 	if (ath12k_core_hw_group_start_ready(ag)) {
1324 		ret = ath12k_core_hw_group_start(ag);
1325 		if (ret) {
1326 			ath12k_warn(ab, "unable to start hw group\n");
1327 			goto err_core_stop;
1328 		}
1329 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id);
1330 	} else {
1331 		ath12k_core_trigger_partner(ab);
1332 	}
1333 
1334 	mutex_unlock(&ag->mutex);
1335 
1336 	return 0;
1337 
1338 err_core_stop:
1339 	for (i = ag->num_devices - 1; i >= 0; i--) {
1340 		ab = ag->ab[i];
1341 		if (!ab)
1342 			continue;
1343 
1344 		mutex_lock(&ab->core_lock);
1345 		ath12k_core_stop(ab);
1346 		mutex_unlock(&ab->core_lock);
1347 	}
1348 	mutex_unlock(&ag->mutex);
1349 	goto exit;
1350 
1351 err_dp_free:
1352 	ath12k_dp_free(ab);
1353 	mutex_unlock(&ab->core_lock);
1354 	mutex_unlock(&ag->mutex);
1355 
1356 err_firmware_stop:
1357 	ath12k_qmi_firmware_stop(ab);
1358 
1359 exit:
1360 	return ret;
1361 }
1362 
1363 static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
1364 {
1365 	int ret;
1366 
1367 	mutex_lock(&ab->core_lock);
1368 	ath12k_dp_pdev_free(ab);
1369 	ath12k_ce_cleanup_pipes(ab);
1370 	ath12k_wmi_detach(ab);
1371 	ath12k_dp_rx_pdev_reo_cleanup(ab);
1372 	mutex_unlock(&ab->core_lock);
1373 
1374 	ath12k_dp_free(ab);
1375 	ath12k_hal_srng_deinit(ab);
1376 
1377 	ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
1378 
1379 	ret = ath12k_hal_srng_init(ab);
1380 	if (ret)
1381 		return ret;
1382 
1383 	clear_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1384 
1385 	ret = ath12k_core_qmi_firmware_ready(ab);
1386 	if (ret)
1387 		goto err_hal_srng_deinit;
1388 
1389 	clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
1390 
1391 	return 0;
1392 
1393 err_hal_srng_deinit:
1394 	ath12k_hal_srng_deinit(ab);
1395 	return ret;
1396 }
1397 
1398 static void ath12k_rfkill_work(struct work_struct *work)
1399 {
1400 	struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
1401 	struct ath12k_hw_group *ag = ab->ag;
1402 	struct ath12k *ar;
1403 	struct ath12k_hw *ah;
1404 	struct ieee80211_hw *hw;
1405 	bool rfkill_radio_on;
1406 	int i, j;
1407 
1408 	spin_lock_bh(&ab->base_lock);
1409 	rfkill_radio_on = ab->rfkill_radio_on;
1410 	spin_unlock_bh(&ab->base_lock);
1411 
1412 	for (i = 0; i < ag->num_hw; i++) {
1413 		ah = ath12k_ag_to_ah(ag, i);
1414 		if (!ah)
1415 			continue;
1416 
1417 		for (j = 0; j < ah->num_radio; j++) {
1418 			ar = &ah->radio[j];
1419 			if (!ar)
1420 				continue;
1421 
1422 			ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
1423 		}
1424 
1425 		hw = ah->hw;
1426 		wiphy_rfkill_set_hw_state(hw->wiphy, !rfkill_radio_on);
1427 	}
1428 }
1429 
1430 void ath12k_core_halt(struct ath12k *ar)
1431 {
1432 	struct list_head *pos, *n;
1433 	struct ath12k_base *ab = ar->ab;
1434 
1435 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
1436 
1437 	ar->num_created_vdevs = 0;
1438 	ar->allocated_vdev_map = 0;
1439 
1440 	ath12k_mac_scan_finish(ar);
1441 	ath12k_mac_peer_cleanup_all(ar);
1442 	cancel_delayed_work_sync(&ar->scan.timeout);
1443 	cancel_work_sync(&ar->regd_update_work);
1444 	cancel_work_sync(&ar->regd_channel_update_work);
1445 	cancel_work_sync(&ab->rfkill_work);
1446 	cancel_work_sync(&ab->update_11d_work);
1447 
1448 	rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
1449 	synchronize_rcu();
1450 
1451 	spin_lock_bh(&ar->data_lock);
1452 	list_for_each_safe(pos, n, &ar->arvifs)
1453 		list_del_init(pos);
1454 	spin_unlock_bh(&ar->data_lock);
1455 
1456 	idr_init(&ar->txmgmt_idr);
1457 }
1458 
1459 static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
1460 {
1461 	struct ath12k_hw_group *ag = ab->ag;
1462 	struct ath12k *ar;
1463 	struct ath12k_hw *ah;
1464 	int i, j;
1465 
1466 	spin_lock_bh(&ab->base_lock);
1467 	ab->stats.fw_crash_counter++;
1468 	spin_unlock_bh(&ab->base_lock);
1469 
1470 	if (ab->is_reset)
1471 		set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1472 
1473 	for (i = 0; i < ag->num_hw; i++) {
1474 		ah = ath12k_ag_to_ah(ag, i);
1475 		if (!ah || ah->state == ATH12K_HW_STATE_OFF ||
1476 		    ah->state == ATH12K_HW_STATE_TM)
1477 			continue;
1478 
1479 		wiphy_lock(ah->hw->wiphy);
1480 
1481 		/* If queue 0 is stopped, it is safe to assume that all
1482 		 * other queues are stopped by driver via
1483 		 * ieee80211_stop_queues() below. This means, there is
1484 		 * no need to stop it again and hence continue
1485 		 */
1486 		if (ieee80211_queue_stopped(ah->hw, 0)) {
1487 			wiphy_unlock(ah->hw->wiphy);
1488 			continue;
1489 		}
1490 
1491 		ieee80211_stop_queues(ah->hw);
1492 
1493 		for (j = 0; j < ah->num_radio; j++) {
1494 			ar = &ah->radio[j];
1495 
1496 			ath12k_mac_drain_tx(ar);
1497 			ar->state_11d = ATH12K_11D_IDLE;
1498 			complete(&ar->completed_11d_scan);
1499 			complete(&ar->scan.started);
1500 			complete_all(&ar->scan.completed);
1501 			complete(&ar->scan.on_channel);
1502 			complete(&ar->peer_assoc_done);
1503 			complete(&ar->peer_delete_done);
1504 			complete(&ar->install_key_done);
1505 			complete(&ar->vdev_setup_done);
1506 			complete(&ar->vdev_delete_done);
1507 			complete(&ar->bss_survey_done);
1508 			complete_all(&ar->regd_update_completed);
1509 
1510 			wake_up(&ar->dp.tx_empty_waitq);
1511 			idr_for_each(&ar->txmgmt_idr,
1512 				     ath12k_mac_tx_mgmt_pending_free, ar);
1513 			idr_destroy(&ar->txmgmt_idr);
1514 			wake_up(&ar->txmgmt_empty_waitq);
1515 
1516 			ar->monitor_vdev_id = -1;
1517 			ar->monitor_vdev_created = false;
1518 			ar->monitor_started = false;
1519 		}
1520 
1521 		wiphy_unlock(ah->hw->wiphy);
1522 	}
1523 
1524 	wake_up(&ab->wmi_ab.tx_credits_wq);
1525 	wake_up(&ab->peer_mapping_wq);
1526 }
1527 
1528 static void ath12k_update_11d(struct work_struct *work)
1529 {
1530 	struct ath12k_base *ab = container_of(work, struct ath12k_base, update_11d_work);
1531 	struct ath12k *ar;
1532 	struct ath12k_pdev *pdev;
1533 	struct wmi_set_current_country_arg arg = {};
1534 	int ret, i;
1535 
1536 	spin_lock_bh(&ab->base_lock);
1537 	memcpy(&arg.alpha2, &ab->new_alpha2, 2);
1538 	spin_unlock_bh(&ab->base_lock);
1539 
1540 	ath12k_dbg(ab, ATH12K_DBG_WMI, "update 11d new cc %c%c\n",
1541 		   arg.alpha2[0], arg.alpha2[1]);
1542 
1543 	for (i = 0; i < ab->num_radios; i++) {
1544 		pdev = &ab->pdevs[i];
1545 		ar = pdev->ar;
1546 
1547 		memcpy(&ar->alpha2, &arg.alpha2, 2);
1548 
1549 		reinit_completion(&ar->regd_update_completed);
1550 
1551 		ret = ath12k_wmi_send_set_current_country_cmd(ar, &arg);
1552 		if (ret)
1553 			ath12k_warn(ar->ab,
1554 				    "pdev id %d failed set current country code: %d\n",
1555 				    i, ret);
1556 	}
1557 }
1558 
1559 static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
1560 {
1561 	struct ath12k_hw_group *ag = ab->ag;
1562 	struct ath12k_hw *ah;
1563 	struct ath12k *ar;
1564 	int i, j;
1565 
1566 	for (i = 0; i < ag->num_hw; i++) {
1567 		ah = ath12k_ag_to_ah(ag, i);
1568 		if (!ah || ah->state == ATH12K_HW_STATE_OFF)
1569 			continue;
1570 
1571 		wiphy_lock(ah->hw->wiphy);
1572 		mutex_lock(&ah->hw_mutex);
1573 
1574 		switch (ah->state) {
1575 		case ATH12K_HW_STATE_ON:
1576 			ah->state = ATH12K_HW_STATE_RESTARTING;
1577 
1578 			for (j = 0; j < ah->num_radio; j++) {
1579 				ar = &ah->radio[j];
1580 				ath12k_core_halt(ar);
1581 			}
1582 
1583 			break;
1584 		case ATH12K_HW_STATE_OFF:
1585 			ath12k_warn(ab,
1586 				    "cannot restart hw %d that hasn't been started\n",
1587 				    i);
1588 			break;
1589 		case ATH12K_HW_STATE_RESTARTING:
1590 			break;
1591 		case ATH12K_HW_STATE_RESTARTED:
1592 			ah->state = ATH12K_HW_STATE_WEDGED;
1593 			fallthrough;
1594 		case ATH12K_HW_STATE_WEDGED:
1595 			ath12k_warn(ab,
1596 				    "device is wedged, will not restart hw %d\n", i);
1597 			break;
1598 		case ATH12K_HW_STATE_TM:
1599 			ath12k_warn(ab, "fw mode reset done radio %d\n", i);
1600 			break;
1601 		}
1602 
1603 		mutex_unlock(&ah->hw_mutex);
1604 		wiphy_unlock(ah->hw->wiphy);
1605 	}
1606 
1607 	complete(&ab->driver_recovery);
1608 }
1609 
1610 static void ath12k_core_restart(struct work_struct *work)
1611 {
1612 	struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
1613 	struct ath12k_hw_group *ag = ab->ag;
1614 	struct ath12k_hw *ah;
1615 	int ret, i;
1616 
1617 	ret = ath12k_core_reconfigure_on_crash(ab);
1618 	if (ret) {
1619 		ath12k_err(ab, "failed to reconfigure driver on crash recovery\n");
1620 		return;
1621 	}
1622 
1623 	if (ab->is_reset) {
1624 		if (!test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
1625 			atomic_dec(&ab->reset_count);
1626 			complete(&ab->reset_complete);
1627 			ab->is_reset = false;
1628 			atomic_set(&ab->fail_cont_count, 0);
1629 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
1630 		}
1631 
1632 		mutex_lock(&ag->mutex);
1633 
1634 		if (!ath12k_core_hw_group_start_ready(ag)) {
1635 			mutex_unlock(&ag->mutex);
1636 			goto exit_restart;
1637 		}
1638 
1639 		for (i = 0; i < ag->num_hw; i++) {
1640 			ah = ath12k_ag_to_ah(ag, i);
1641 			ieee80211_restart_hw(ah->hw);
1642 		}
1643 
1644 		mutex_unlock(&ag->mutex);
1645 	}
1646 
1647 exit_restart:
1648 	complete(&ab->restart_completed);
1649 }
1650 
1651 static void ath12k_core_reset(struct work_struct *work)
1652 {
1653 	struct ath12k_base *ab = container_of(work, struct ath12k_base, reset_work);
1654 	struct ath12k_hw_group *ag = ab->ag;
1655 	int reset_count, fail_cont_count, i;
1656 	long time_left;
1657 
1658 	if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) {
1659 		ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
1660 		return;
1661 	}
1662 
1663 	/* Sometimes the recovery will fail and then the next all recovery fail,
1664 	 * this is to avoid infinite recovery since it can not recovery success
1665 	 */
1666 	fail_cont_count = atomic_read(&ab->fail_cont_count);
1667 
1668 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FINAL)
1669 		return;
1670 
1671 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FIRST &&
1672 	    time_before(jiffies, ab->reset_fail_timeout))
1673 		return;
1674 
1675 	reset_count = atomic_inc_return(&ab->reset_count);
1676 
1677 	if (reset_count > 1) {
1678 		/* Sometimes it happened another reset worker before the previous one
1679 		 * completed, then the second reset worker will destroy the previous one,
1680 		 * thus below is to avoid that.
1681 		 */
1682 		ath12k_warn(ab, "already resetting count %d\n", reset_count);
1683 
1684 		reinit_completion(&ab->reset_complete);
1685 		time_left = wait_for_completion_timeout(&ab->reset_complete,
1686 							ATH12K_RESET_TIMEOUT_HZ);
1687 		if (time_left) {
1688 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "to skip reset\n");
1689 			atomic_dec(&ab->reset_count);
1690 			return;
1691 		}
1692 
1693 		ab->reset_fail_timeout = jiffies + ATH12K_RESET_FAIL_TIMEOUT_HZ;
1694 		/* Record the continuous recovery fail count when recovery failed*/
1695 		fail_cont_count = atomic_inc_return(&ab->fail_cont_count);
1696 	}
1697 
1698 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n");
1699 
1700 	ab->is_reset = true;
1701 	atomic_set(&ab->recovery_count, 0);
1702 
1703 	ath12k_coredump_collect(ab);
1704 	ath12k_core_pre_reconfigure_recovery(ab);
1705 
1706 	ath12k_core_post_reconfigure_recovery(ab);
1707 
1708 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n");
1709 
1710 	ath12k_hif_irq_disable(ab);
1711 	ath12k_hif_ce_irq_disable(ab);
1712 
1713 	ath12k_hif_power_down(ab, false);
1714 
1715 	/* prepare for power up */
1716 	ab->qmi.num_radios = U8_MAX;
1717 
1718 	mutex_lock(&ag->mutex);
1719 	ath12k_core_to_group_ref_put(ab);
1720 
1721 	if (ag->num_started > 0) {
1722 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
1723 			   "waiting for %d partner device(s) to reset\n",
1724 			   ag->num_started);
1725 		mutex_unlock(&ag->mutex);
1726 		return;
1727 	}
1728 
1729 	/* Prepare MLO global memory region for power up */
1730 	ath12k_qmi_reset_mlo_mem(ag);
1731 
1732 	for (i = 0; i < ag->num_devices; i++) {
1733 		ab = ag->ab[i];
1734 		if (!ab)
1735 			continue;
1736 
1737 		ath12k_hif_power_up(ab);
1738 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
1739 	}
1740 
1741 	mutex_unlock(&ag->mutex);
1742 }
1743 
1744 int ath12k_core_pre_init(struct ath12k_base *ab)
1745 {
1746 	const struct ath12k_mem_profile_based_param *param;
1747 	int ret;
1748 
1749 	ret = ath12k_hw_init(ab);
1750 	if (ret) {
1751 		ath12k_err(ab, "failed to init hw params: %d\n", ret);
1752 		return ret;
1753 	}
1754 
1755 	param = &ath12k_mem_profile_based_param[ATH12K_QMI_MEMORY_MODE_DEFAULT];
1756 	ab->profile_param = param;
1757 	ath12k_fw_map(ab);
1758 
1759 	return 0;
1760 }
1761 
1762 static int ath12k_core_panic_handler(struct notifier_block *nb,
1763 				     unsigned long action, void *data)
1764 {
1765 	struct ath12k_base *ab = container_of(nb, struct ath12k_base,
1766 					      panic_nb);
1767 
1768 	return ath12k_hif_panic_handler(ab);
1769 }
1770 
1771 static int ath12k_core_panic_notifier_register(struct ath12k_base *ab)
1772 {
1773 	ab->panic_nb.notifier_call = ath12k_core_panic_handler;
1774 
1775 	return atomic_notifier_chain_register(&panic_notifier_list,
1776 					      &ab->panic_nb);
1777 }
1778 
1779 static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
1780 {
1781 	atomic_notifier_chain_unregister(&panic_notifier_list,
1782 					 &ab->panic_nb);
1783 }
1784 
1785 static inline
1786 bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag)
1787 {
1788 	lockdep_assert_held(&ag->mutex);
1789 
1790 	return (ag->num_probed == ag->num_devices);
1791 }
1792 
1793 static struct ath12k_hw_group *ath12k_core_hw_group_alloc(struct ath12k_base *ab)
1794 {
1795 	struct ath12k_hw_group *ag;
1796 	int count = 0;
1797 
1798 	lockdep_assert_held(&ath12k_hw_group_mutex);
1799 
1800 	list_for_each_entry(ag, &ath12k_hw_group_list, list)
1801 		count++;
1802 
1803 	ag = kzalloc(sizeof(*ag), GFP_KERNEL);
1804 	if (!ag)
1805 		return NULL;
1806 
1807 	ag->id = count;
1808 	list_add(&ag->list, &ath12k_hw_group_list);
1809 	mutex_init(&ag->mutex);
1810 	ag->mlo_capable = false;
1811 
1812 	return ag;
1813 }
1814 
1815 static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag)
1816 {
1817 	mutex_lock(&ath12k_hw_group_mutex);
1818 
1819 	list_del(&ag->list);
1820 	kfree(ag);
1821 
1822 	mutex_unlock(&ath12k_hw_group_mutex);
1823 }
1824 
1825 static struct ath12k_hw_group *ath12k_core_hw_group_find_by_dt(struct ath12k_base *ab)
1826 {
1827 	struct ath12k_hw_group *ag;
1828 	int i;
1829 
1830 	if (!ab->dev->of_node)
1831 		return NULL;
1832 
1833 	list_for_each_entry(ag, &ath12k_hw_group_list, list)
1834 		for (i = 0; i < ag->num_devices; i++)
1835 			if (ag->wsi_node[i] == ab->dev->of_node)
1836 				return ag;
1837 
1838 	return NULL;
1839 }
1840 
1841 static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag,
1842 				    struct ath12k_base *ab)
1843 {
1844 	struct device_node *wsi_dev = ab->dev->of_node, *next_wsi_dev;
1845 	struct device_node *tx_endpoint, *next_rx_endpoint;
1846 	int device_count = 0;
1847 
1848 	next_wsi_dev = wsi_dev;
1849 
1850 	if (!next_wsi_dev)
1851 		return -ENODEV;
1852 
1853 	do {
1854 		ag->wsi_node[device_count] = next_wsi_dev;
1855 
1856 		tx_endpoint = of_graph_get_endpoint_by_regs(next_wsi_dev, 0, -1);
1857 		if (!tx_endpoint) {
1858 			of_node_put(next_wsi_dev);
1859 			return -ENODEV;
1860 		}
1861 
1862 		next_rx_endpoint = of_graph_get_remote_endpoint(tx_endpoint);
1863 		if (!next_rx_endpoint) {
1864 			of_node_put(next_wsi_dev);
1865 			of_node_put(tx_endpoint);
1866 			return -ENODEV;
1867 		}
1868 
1869 		of_node_put(tx_endpoint);
1870 		of_node_put(next_wsi_dev);
1871 
1872 		next_wsi_dev = of_graph_get_port_parent(next_rx_endpoint);
1873 		if (!next_wsi_dev) {
1874 			of_node_put(next_rx_endpoint);
1875 			return -ENODEV;
1876 		}
1877 
1878 		of_node_put(next_rx_endpoint);
1879 
1880 		device_count++;
1881 		if (device_count > ATH12K_MAX_DEVICES) {
1882 			ath12k_warn(ab, "device count in DT %d is more than limit %d\n",
1883 				    device_count, ATH12K_MAX_DEVICES);
1884 			of_node_put(next_wsi_dev);
1885 			return -EINVAL;
1886 		}
1887 	} while (wsi_dev != next_wsi_dev);
1888 
1889 	of_node_put(next_wsi_dev);
1890 	ag->num_devices = device_count;
1891 
1892 	return 0;
1893 }
1894 
1895 static int ath12k_core_get_wsi_index(struct ath12k_hw_group *ag,
1896 				     struct ath12k_base *ab)
1897 {
1898 	int i, wsi_controller_index = -1, node_index = -1;
1899 	bool control;
1900 
1901 	for (i = 0; i < ag->num_devices; i++) {
1902 		control = of_property_read_bool(ag->wsi_node[i], "qcom,wsi-controller");
1903 		if (control)
1904 			wsi_controller_index = i;
1905 
1906 		if (ag->wsi_node[i] == ab->dev->of_node)
1907 			node_index = i;
1908 	}
1909 
1910 	if (wsi_controller_index == -1) {
1911 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi controller is not defined in dt");
1912 		return -EINVAL;
1913 	}
1914 
1915 	if (node_index == -1) {
1916 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "unable to get WSI node index");
1917 		return -EINVAL;
1918 	}
1919 
1920 	ab->wsi_info.index = (ag->num_devices + node_index - wsi_controller_index) %
1921 		ag->num_devices;
1922 
1923 	return 0;
1924 }
1925 
1926 static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *ab)
1927 {
1928 	struct ath12k_wsi_info *wsi = &ab->wsi_info;
1929 	struct ath12k_hw_group *ag;
1930 
1931 	lockdep_assert_held(&ath12k_hw_group_mutex);
1932 
1933 	if (ath12k_ftm_mode)
1934 		goto invalid_group;
1935 
1936 	/* The grouping of multiple devices will be done based on device tree file.
1937 	 * The platforms that do not have any valid group information would have
1938 	 * each device to be part of its own invalid group.
1939 	 *
1940 	 * We use group id ATH12K_INVALID_GROUP_ID for single device group
1941 	 * which didn't have dt entry or wrong dt entry, there could be many
1942 	 * groups with same group id, i.e ATH12K_INVALID_GROUP_ID. So
1943 	 * default group id of ATH12K_INVALID_GROUP_ID combined with
1944 	 * num devices in ath12k_hw_group determines if the group is
1945 	 * multi device or single device group
1946 	 */
1947 
1948 	ag = ath12k_core_hw_group_find_by_dt(ab);
1949 	if (!ag) {
1950 		ag = ath12k_core_hw_group_alloc(ab);
1951 		if (!ag) {
1952 			ath12k_warn(ab, "unable to create new hw group\n");
1953 			return NULL;
1954 		}
1955 
1956 		if (ath12k_core_get_wsi_info(ag, ab) ||
1957 		    ath12k_core_get_wsi_index(ag, ab)) {
1958 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
1959 				   "unable to get wsi info from dt, grouping single device");
1960 			ag->id = ATH12K_INVALID_GROUP_ID;
1961 			ag->num_devices = 1;
1962 			memset(ag->wsi_node, 0, sizeof(ag->wsi_node));
1963 			wsi->index = 0;
1964 		}
1965 
1966 		goto exit;
1967 	} else if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
1968 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group id %d in unregister state\n",
1969 			   ag->id);
1970 		goto invalid_group;
1971 	} else {
1972 		if (ath12k_core_get_wsi_index(ag, ab))
1973 			goto invalid_group;
1974 		goto exit;
1975 	}
1976 
1977 invalid_group:
1978 	ag = ath12k_core_hw_group_alloc(ab);
1979 	if (!ag) {
1980 		ath12k_warn(ab, "unable to create new hw group\n");
1981 		return NULL;
1982 	}
1983 
1984 	ag->id = ATH12K_INVALID_GROUP_ID;
1985 	ag->num_devices = 1;
1986 	wsi->index = 0;
1987 
1988 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "single device added to hardware group\n");
1989 
1990 exit:
1991 	if (ag->num_probed >= ag->num_devices) {
1992 		ath12k_warn(ab, "unable to add new device to group, max limit reached\n");
1993 		goto invalid_group;
1994 	}
1995 
1996 	ab->device_id = ag->num_probed++;
1997 	ag->ab[ab->device_id] = ab;
1998 	ab->ag = ag;
1999 
2000 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi group-id %d num-devices %d index %d",
2001 		   ag->id, ag->num_devices, wsi->index);
2002 
2003 	return ag;
2004 }
2005 
2006 void ath12k_core_hw_group_unassign(struct ath12k_base *ab)
2007 {
2008 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
2009 	u8 device_id = ab->device_id;
2010 	int num_probed;
2011 
2012 	if (!ag)
2013 		return;
2014 
2015 	mutex_lock(&ag->mutex);
2016 
2017 	if (WARN_ON(device_id >= ag->num_devices)) {
2018 		mutex_unlock(&ag->mutex);
2019 		return;
2020 	}
2021 
2022 	if (WARN_ON(ag->ab[device_id] != ab)) {
2023 		mutex_unlock(&ag->mutex);
2024 		return;
2025 	}
2026 
2027 	ag->ab[device_id] = NULL;
2028 	ab->ag = NULL;
2029 	ab->device_id = ATH12K_INVALID_DEVICE_ID;
2030 
2031 	if (ag->num_probed)
2032 		ag->num_probed--;
2033 
2034 	num_probed = ag->num_probed;
2035 
2036 	mutex_unlock(&ag->mutex);
2037 
2038 	if (!num_probed)
2039 		ath12k_core_hw_group_free(ag);
2040 }
2041 
2042 static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
2043 {
2044 	struct ath12k_base *ab;
2045 	int i;
2046 
2047 	if (WARN_ON(!ag))
2048 		return;
2049 
2050 	for (i = 0; i < ag->num_devices; i++) {
2051 		ab = ag->ab[i];
2052 		if (!ab)
2053 			continue;
2054 
2055 		ath12k_core_soc_destroy(ab);
2056 	}
2057 }
2058 
2059 void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
2060 {
2061 	struct ath12k_base *ab;
2062 	int i;
2063 
2064 	if (!ag)
2065 		return;
2066 
2067 	mutex_lock(&ag->mutex);
2068 
2069 	if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
2070 		mutex_unlock(&ag->mutex);
2071 		return;
2072 	}
2073 
2074 	set_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags);
2075 
2076 	ath12k_core_hw_group_stop(ag);
2077 
2078 	for (i = 0; i < ag->num_devices; i++) {
2079 		ab = ag->ab[i];
2080 		if (!ab)
2081 			continue;
2082 
2083 		mutex_lock(&ab->core_lock);
2084 		ath12k_core_stop(ab);
2085 		mutex_unlock(&ab->core_lock);
2086 	}
2087 
2088 	mutex_unlock(&ag->mutex);
2089 }
2090 
2091 static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag)
2092 {
2093 	struct ath12k_base *ab;
2094 	int i, ret;
2095 
2096 	lockdep_assert_held(&ag->mutex);
2097 
2098 	for (i = 0; i < ag->num_devices; i++) {
2099 		ab = ag->ab[i];
2100 		if (!ab)
2101 			continue;
2102 
2103 		mutex_lock(&ab->core_lock);
2104 
2105 		ret = ath12k_core_soc_create(ab);
2106 		if (ret) {
2107 			mutex_unlock(&ab->core_lock);
2108 			ath12k_err(ab, "failed to create soc core: %d\n", ret);
2109 			return ret;
2110 		}
2111 
2112 		mutex_unlock(&ab->core_lock);
2113 	}
2114 
2115 	return 0;
2116 }
2117 
2118 void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
2119 {
2120 	struct ath12k_base *ab;
2121 	int i;
2122 
2123 	if (ath12k_ftm_mode)
2124 		return;
2125 
2126 	lockdep_assert_held(&ag->mutex);
2127 
2128 	if (ag->num_devices == 1) {
2129 		ab = ag->ab[0];
2130 		/* QCN9274 firmware uses firmware IE for MLO advertisement */
2131 		if (ab->fw.fw_features_valid) {
2132 			ag->mlo_capable =
2133 				ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO);
2134 			return;
2135 		}
2136 
2137 		/* while WCN7850 firmware uses QMI single_chip_mlo_support bit */
2138 		ag->mlo_capable = ab->single_chip_mlo_support;
2139 		return;
2140 	}
2141 
2142 	ag->mlo_capable = true;
2143 
2144 	for (i = 0; i < ag->num_devices; i++) {
2145 		ab = ag->ab[i];
2146 		if (!ab)
2147 			continue;
2148 
2149 		/* even if 1 device's firmware feature indicates MLO
2150 		 * unsupported, make MLO unsupported for the whole group
2151 		 */
2152 		if (!ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO)) {
2153 			ag->mlo_capable = false;
2154 			return;
2155 		}
2156 	}
2157 }
2158 
2159 int ath12k_core_init(struct ath12k_base *ab)
2160 {
2161 	struct ath12k_hw_group *ag;
2162 	int ret;
2163 
2164 	ret = ath12k_core_panic_notifier_register(ab);
2165 	if (ret)
2166 		ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
2167 
2168 	mutex_lock(&ath12k_hw_group_mutex);
2169 
2170 	ag = ath12k_core_hw_group_assign(ab);
2171 	if (!ag) {
2172 		mutex_unlock(&ath12k_hw_group_mutex);
2173 		ath12k_warn(ab, "unable to get hw group\n");
2174 		ret = -ENODEV;
2175 		goto err_unregister_notifier;
2176 	}
2177 
2178 	mutex_unlock(&ath12k_hw_group_mutex);
2179 
2180 	mutex_lock(&ag->mutex);
2181 
2182 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices %d num probed %d\n",
2183 		   ag->num_devices, ag->num_probed);
2184 
2185 	if (ath12k_core_hw_group_create_ready(ag)) {
2186 		ret = ath12k_core_hw_group_create(ag);
2187 		if (ret) {
2188 			mutex_unlock(&ag->mutex);
2189 			ath12k_warn(ab, "unable to create hw group\n");
2190 			goto err_destroy_hw_group;
2191 		}
2192 	}
2193 
2194 	mutex_unlock(&ag->mutex);
2195 
2196 	return 0;
2197 
2198 err_destroy_hw_group:
2199 	ath12k_core_hw_group_destroy(ab->ag);
2200 	ath12k_core_hw_group_unassign(ab);
2201 err_unregister_notifier:
2202 	ath12k_core_panic_notifier_unregister(ab);
2203 
2204 	return ret;
2205 }
2206 
2207 void ath12k_core_deinit(struct ath12k_base *ab)
2208 {
2209 	ath12k_core_hw_group_destroy(ab->ag);
2210 	ath12k_core_hw_group_unassign(ab);
2211 	ath12k_core_panic_notifier_unregister(ab);
2212 }
2213 
2214 void ath12k_core_free(struct ath12k_base *ab)
2215 {
2216 	timer_delete_sync(&ab->rx_replenish_retry);
2217 	destroy_workqueue(ab->workqueue_aux);
2218 	destroy_workqueue(ab->workqueue);
2219 	kfree(ab);
2220 }
2221 
2222 struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
2223 				      enum ath12k_bus bus)
2224 {
2225 	struct ath12k_base *ab;
2226 
2227 	ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL);
2228 	if (!ab)
2229 		return NULL;
2230 
2231 	init_completion(&ab->driver_recovery);
2232 
2233 	ab->workqueue = create_singlethread_workqueue("ath12k_wq");
2234 	if (!ab->workqueue)
2235 		goto err_sc_free;
2236 
2237 	ab->workqueue_aux = create_singlethread_workqueue("ath12k_aux_wq");
2238 	if (!ab->workqueue_aux)
2239 		goto err_free_wq;
2240 
2241 	mutex_init(&ab->core_lock);
2242 	spin_lock_init(&ab->base_lock);
2243 	init_completion(&ab->reset_complete);
2244 
2245 	INIT_LIST_HEAD(&ab->peers);
2246 	init_waitqueue_head(&ab->peer_mapping_wq);
2247 	init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
2248 	INIT_WORK(&ab->restart_work, ath12k_core_restart);
2249 	INIT_WORK(&ab->reset_work, ath12k_core_reset);
2250 	INIT_WORK(&ab->rfkill_work, ath12k_rfkill_work);
2251 	INIT_WORK(&ab->dump_work, ath12k_coredump_upload);
2252 	INIT_WORK(&ab->update_11d_work, ath12k_update_11d);
2253 
2254 	timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
2255 	init_completion(&ab->htc_suspend);
2256 	init_completion(&ab->restart_completed);
2257 	init_completion(&ab->wow.wakeup_completed);
2258 
2259 	ab->dev = dev;
2260 	ab->hif.bus = bus;
2261 	ab->qmi.num_radios = U8_MAX;
2262 	ab->single_chip_mlo_support = false;
2263 
2264 	/* Device index used to identify the devices in a group.
2265 	 *
2266 	 * In Intra-device MLO, only one device present in a group,
2267 	 * so it is always zero.
2268 	 *
2269 	 * In Inter-device MLO, Multiple device present in a group,
2270 	 * expect non-zero value.
2271 	 */
2272 	ab->device_id = 0;
2273 
2274 	return ab;
2275 
2276 err_free_wq:
2277 	destroy_workqueue(ab->workqueue);
2278 err_sc_free:
2279 	kfree(ab);
2280 	return NULL;
2281 }
2282 
2283 static int ath12k_init(void)
2284 {
2285 	ahb_err = ath12k_ahb_init();
2286 	if (ahb_err)
2287 		pr_warn("Failed to initialize ath12k AHB device: %d\n", ahb_err);
2288 
2289 	pci_err = ath12k_pci_init();
2290 	if (pci_err)
2291 		pr_warn("Failed to initialize ath12k PCI device: %d\n", pci_err);
2292 
2293 	/* If both failed, return one of the failures (arbitrary) */
2294 	return ahb_err && pci_err ? ahb_err : 0;
2295 }
2296 
2297 static void ath12k_exit(void)
2298 {
2299 	if (!pci_err)
2300 		ath12k_pci_exit();
2301 
2302 	if (!ahb_err)
2303 		ath12k_ahb_exit();
2304 }
2305 
2306 module_init(ath12k_init);
2307 module_exit(ath12k_exit);
2308 
2309 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11be WLAN devices");
2310 MODULE_LICENSE("Dual BSD/GPL");
2311