xref: /linux/drivers/net/wireless/ath/ath12k/core.c (revision 8f7aa3d3c7323f4ca2768a9e74ebbe359c4f8f88)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
5  */
6 
7 #include <linux/export.h>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/remoteproc.h>
11 #include <linux/firmware.h>
12 #include <linux/of.h>
13 #include <linux/of_graph.h>
14 #include "ahb.h"
15 #include "core.h"
16 #include "dp_tx.h"
17 #include "dp_rx.h"
18 #include "debug.h"
19 #include "debugfs.h"
20 #include "fw.h"
21 #include "hif.h"
22 #include "pci.h"
23 #include "wow.h"
24 
25 static int ahb_err, pci_err;
26 unsigned int ath12k_debug_mask;
27 module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
28 MODULE_PARM_DESC(debug_mask, "Debugging mask");
29 
30 bool ath12k_ftm_mode;
31 module_param_named(ftm_mode, ath12k_ftm_mode, bool, 0444);
32 MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode");
33 
34 /* protected with ath12k_hw_group_mutex */
35 static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
36 
37 static DEFINE_MUTEX(ath12k_hw_group_mutex);
38 
39 static const struct
40 ath12k_mem_profile_based_param ath12k_mem_profile_based_param[] = {
41 [ATH12K_QMI_MEMORY_MODE_DEFAULT] = {
42 		.num_vdevs = 17,
43 		.max_client_single = 512,
44 		.max_client_dbs = 128,
45 		.max_client_dbs_sbs = 128,
46 		.dp_params = {
47 			.tx_comp_ring_size = 32768,
48 			.rxdma_monitor_buf_ring_size = 4096,
49 			.rxdma_monitor_dst_ring_size = 8092,
50 			.num_pool_tx_desc = 32768,
51 			.rx_desc_count = 12288,
52 		},
53 	},
54 [ATH12K_QMI_MEMORY_MODE_LOW_512_M] = {
55 		.num_vdevs = 9,
56 		.max_client_single = 128,
57 		.max_client_dbs = 64,
58 		.max_client_dbs_sbs = 64,
59 		.dp_params = {
60 			.tx_comp_ring_size = 16384,
61 			.rxdma_monitor_buf_ring_size = 256,
62 			.rxdma_monitor_dst_ring_size = 512,
63 			.num_pool_tx_desc = 16384,
64 			.rx_desc_count = 6144,
65 		},
66 	},
67 };
68 
69 static int ath12k_core_rfkill_config(struct ath12k_base *ab)
70 {
71 	struct ath12k *ar;
72 	int ret = 0, i;
73 
74 	if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
75 		return 0;
76 
77 	if (ath12k_acpi_get_disable_rfkill(ab))
78 		return 0;
79 
80 	for (i = 0; i < ab->num_radios; i++) {
81 		ar = ab->pdevs[i].ar;
82 
83 		ret = ath12k_mac_rfkill_config(ar);
84 		if (ret && ret != -EOPNOTSUPP) {
85 			ath12k_warn(ab, "failed to configure rfkill: %d", ret);
86 			return ret;
87 		}
88 	}
89 
90 	return ret;
91 }
92 
93 /* Check if we need to continue with suspend/resume operation.
94  * Return:
95  *	a negative value: error happens and don't continue.
96  *	0:  no error but don't continue.
97  *	positive value: no error and do continue.
98  */
99 static int ath12k_core_continue_suspend_resume(struct ath12k_base *ab)
100 {
101 	struct ath12k *ar;
102 
103 	if (!ab->hw_params->supports_suspend)
104 		return -EOPNOTSUPP;
105 
106 	/* so far single_pdev_only chips have supports_suspend as true
107 	 * so pass 0 as a dummy pdev_id here.
108 	 */
109 	ar = ab->pdevs[0].ar;
110 	if (!ar || !ar->ah || ar->ah->state != ATH12K_HW_STATE_OFF)
111 		return 0;
112 
113 	return 1;
114 }
115 
116 int ath12k_core_suspend(struct ath12k_base *ab)
117 {
118 	struct ath12k *ar;
119 	int ret, i;
120 
121 	ret = ath12k_core_continue_suspend_resume(ab);
122 	if (ret <= 0)
123 		return ret;
124 
125 	for (i = 0; i < ab->num_radios; i++) {
126 		ar = ab->pdevs[i].ar;
127 		if (!ar)
128 			continue;
129 
130 		wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
131 
132 		ret = ath12k_mac_wait_tx_complete(ar);
133 		if (ret) {
134 			wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
135 			ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
136 			return ret;
137 		}
138 
139 		wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
140 	}
141 
142 	/* PM framework skips suspend_late/resume_early callbacks
143 	 * if other devices report errors in their suspend callbacks.
144 	 * However ath12k_core_resume() would still be called because
145 	 * here we return success thus kernel put us on dpm_suspended_list.
146 	 * Since we won't go through a power down/up cycle, there is
147 	 * no chance to call complete(&ab->restart_completed) in
148 	 * ath12k_core_restart(), making ath12k_core_resume() timeout.
149 	 * So call it here to avoid this issue. This also works in case
150 	 * no error happens thus suspend_late/resume_early get called,
151 	 * because it will be reinitialized in ath12k_core_resume_early().
152 	 */
153 	complete(&ab->restart_completed);
154 
155 	return 0;
156 }
157 EXPORT_SYMBOL(ath12k_core_suspend);
158 
159 int ath12k_core_suspend_late(struct ath12k_base *ab)
160 {
161 	int ret;
162 
163 	ret = ath12k_core_continue_suspend_resume(ab);
164 	if (ret <= 0)
165 		return ret;
166 
167 	ath12k_acpi_stop(ab);
168 
169 	ath12k_hif_irq_disable(ab);
170 	ath12k_hif_ce_irq_disable(ab);
171 
172 	ath12k_hif_power_down(ab, true);
173 
174 	return 0;
175 }
176 EXPORT_SYMBOL(ath12k_core_suspend_late);
177 
178 int ath12k_core_resume_early(struct ath12k_base *ab)
179 {
180 	int ret;
181 
182 	ret = ath12k_core_continue_suspend_resume(ab);
183 	if (ret <= 0)
184 		return ret;
185 
186 	reinit_completion(&ab->restart_completed);
187 	ret = ath12k_hif_power_up(ab);
188 	if (ret)
189 		ath12k_warn(ab, "failed to power up hif during resume: %d\n", ret);
190 
191 	return ret;
192 }
193 EXPORT_SYMBOL(ath12k_core_resume_early);
194 
195 int ath12k_core_resume(struct ath12k_base *ab)
196 {
197 	long time_left;
198 	int ret;
199 
200 	ret = ath12k_core_continue_suspend_resume(ab);
201 	if (ret <= 0)
202 		return ret;
203 
204 	time_left = wait_for_completion_timeout(&ab->restart_completed,
205 						ATH12K_RESET_TIMEOUT_HZ);
206 	if (time_left == 0) {
207 		ath12k_warn(ab, "timeout while waiting for restart complete");
208 		return -ETIMEDOUT;
209 	}
210 
211 	return 0;
212 }
213 EXPORT_SYMBOL(ath12k_core_resume);
214 
215 static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
216 					   size_t name_len, bool with_variant,
217 					   bool bus_type_mode, bool with_default)
218 {
219 	/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
220 	char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = {};
221 
222 	if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
223 		scnprintf(variant, sizeof(variant), ",variant=%s",
224 			  ab->qmi.target.bdf_ext);
225 
226 	switch (ab->id.bdf_search) {
227 	case ATH12K_BDF_SEARCH_BUS_AND_BOARD:
228 		if (bus_type_mode)
229 			scnprintf(name, name_len,
230 				  "bus=%s",
231 				  ath12k_bus_str(ab->hif.bus));
232 		else
233 			scnprintf(name, name_len,
234 				  "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
235 				  ath12k_bus_str(ab->hif.bus),
236 				  ab->id.vendor, ab->id.device,
237 				  ab->id.subsystem_vendor,
238 				  ab->id.subsystem_device,
239 				  ab->qmi.target.chip_id,
240 				  ab->qmi.target.board_id,
241 				  variant);
242 		break;
243 	default:
244 		scnprintf(name, name_len,
245 			  "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
246 			  ath12k_bus_str(ab->hif.bus),
247 			  ab->qmi.target.chip_id,
248 			  with_default ?
249 			  ATH12K_BOARD_ID_DEFAULT : ab->qmi.target.board_id,
250 			  variant);
251 		break;
252 	}
253 
254 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name);
255 
256 	return 0;
257 }
258 
259 static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
260 					 size_t name_len)
261 {
262 	return __ath12k_core_create_board_name(ab, name, name_len, true, false, false);
263 }
264 
265 static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
266 						  size_t name_len)
267 {
268 	return __ath12k_core_create_board_name(ab, name, name_len, false, false, true);
269 }
270 
271 static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
272 						  size_t name_len)
273 {
274 	return __ath12k_core_create_board_name(ab, name, name_len, false, true, true);
275 }
276 
277 const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
278 						    const char *file)
279 {
280 	const struct firmware *fw;
281 	char path[100];
282 	int ret;
283 
284 	if (!file)
285 		return ERR_PTR(-ENOENT);
286 
287 	ath12k_core_create_firmware_path(ab, file, path, sizeof(path));
288 
289 	ret = firmware_request_nowarn(&fw, path, ab->dev);
290 	if (ret)
291 		return ERR_PTR(ret);
292 
293 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot firmware request %s size %zu\n",
294 		   path, fw->size);
295 
296 	return fw;
297 }
298 
299 void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
300 {
301 	if (!IS_ERR(bd->fw))
302 		release_firmware(bd->fw);
303 
304 	memset(bd, 0, sizeof(*bd));
305 }
306 
307 static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
308 					 struct ath12k_board_data *bd,
309 					 const void *buf, size_t buf_len,
310 					 const char *boardname,
311 					 int ie_id,
312 					 int name_id,
313 					 int data_id)
314 {
315 	const struct ath12k_fw_ie *hdr;
316 	bool name_match_found;
317 	int ret, board_ie_id;
318 	size_t board_ie_len;
319 	const void *board_ie_data;
320 
321 	name_match_found = false;
322 
323 	/* go through ATH12K_BD_IE_BOARD_/ATH12K_BD_IE_REGDB_ elements */
324 	while (buf_len > sizeof(struct ath12k_fw_ie)) {
325 		hdr = buf;
326 		board_ie_id = le32_to_cpu(hdr->id);
327 		board_ie_len = le32_to_cpu(hdr->len);
328 		board_ie_data = hdr->data;
329 
330 		buf_len -= sizeof(*hdr);
331 		buf += sizeof(*hdr);
332 
333 		if (buf_len < ALIGN(board_ie_len, 4)) {
334 			ath12k_err(ab, "invalid %s length: %zu < %zu\n",
335 				   ath12k_bd_ie_type_str(ie_id),
336 				   buf_len, ALIGN(board_ie_len, 4));
337 			ret = -EINVAL;
338 			goto out;
339 		}
340 
341 		if (board_ie_id == name_id) {
342 			ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "",
343 					board_ie_data, board_ie_len);
344 
345 			if (board_ie_len != strlen(boardname))
346 				goto next;
347 
348 			ret = memcmp(board_ie_data, boardname, strlen(boardname));
349 			if (ret)
350 				goto next;
351 
352 			name_match_found = true;
353 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
354 				   "boot found match %s for name '%s'",
355 				   ath12k_bd_ie_type_str(ie_id),
356 				   boardname);
357 		} else if (board_ie_id == data_id) {
358 			if (!name_match_found)
359 				/* no match found */
360 				goto next;
361 
362 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
363 				   "boot found %s for '%s'",
364 				   ath12k_bd_ie_type_str(ie_id),
365 				   boardname);
366 
367 			bd->data = board_ie_data;
368 			bd->len = board_ie_len;
369 
370 			ret = 0;
371 			goto out;
372 		} else {
373 			ath12k_warn(ab, "unknown %s id found: %d\n",
374 				    ath12k_bd_ie_type_str(ie_id),
375 				    board_ie_id);
376 		}
377 next:
378 		/* jump over the padding */
379 		board_ie_len = ALIGN(board_ie_len, 4);
380 
381 		buf_len -= board_ie_len;
382 		buf += board_ie_len;
383 	}
384 
385 	/* no match found */
386 	ret = -ENOENT;
387 
388 out:
389 	return ret;
390 }
391 
392 static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
393 					      struct ath12k_board_data *bd,
394 					      const char *boardname,
395 					      int ie_id_match,
396 					      int name_id,
397 					      int data_id)
398 {
399 	size_t len, magic_len;
400 	const u8 *data;
401 	char *filename, filepath[100];
402 	size_t ie_len;
403 	struct ath12k_fw_ie *hdr;
404 	int ret, ie_id;
405 
406 	filename = ATH12K_BOARD_API2_FILE;
407 
408 	if (!bd->fw)
409 		bd->fw = ath12k_core_firmware_request(ab, filename);
410 
411 	if (IS_ERR(bd->fw))
412 		return PTR_ERR(bd->fw);
413 
414 	data = bd->fw->data;
415 	len = bd->fw->size;
416 
417 	ath12k_core_create_firmware_path(ab, filename,
418 					 filepath, sizeof(filepath));
419 
420 	/* magic has extra null byte padded */
421 	magic_len = strlen(ATH12K_BOARD_MAGIC) + 1;
422 	if (len < magic_len) {
423 		ath12k_err(ab, "failed to find magic value in %s, file too short: %zu\n",
424 			   filepath, len);
425 		ret = -EINVAL;
426 		goto err;
427 	}
428 
429 	if (memcmp(data, ATH12K_BOARD_MAGIC, magic_len)) {
430 		ath12k_err(ab, "found invalid board magic\n");
431 		ret = -EINVAL;
432 		goto err;
433 	}
434 
435 	/* magic is padded to 4 bytes */
436 	magic_len = ALIGN(magic_len, 4);
437 	if (len < magic_len) {
438 		ath12k_err(ab, "failed: %s too small to contain board data, len: %zu\n",
439 			   filepath, len);
440 		ret = -EINVAL;
441 		goto err;
442 	}
443 
444 	data += magic_len;
445 	len -= magic_len;
446 
447 	while (len > sizeof(struct ath12k_fw_ie)) {
448 		hdr = (struct ath12k_fw_ie *)data;
449 		ie_id = le32_to_cpu(hdr->id);
450 		ie_len = le32_to_cpu(hdr->len);
451 
452 		len -= sizeof(*hdr);
453 		data = hdr->data;
454 
455 		if (len < ALIGN(ie_len, 4)) {
456 			ath12k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
457 				   ie_id, ie_len, len);
458 			ret = -EINVAL;
459 			goto err;
460 		}
461 
462 		if (ie_id == ie_id_match) {
463 			ret = ath12k_core_parse_bd_ie_board(ab, bd, data,
464 							    ie_len,
465 							    boardname,
466 							    ie_id_match,
467 							    name_id,
468 							    data_id);
469 			if (ret == -ENOENT)
470 				/* no match found, continue */
471 				goto next;
472 			else if (ret)
473 				/* there was an error, bail out */
474 				goto err;
475 			/* either found or error, so stop searching */
476 			goto out;
477 		}
478 next:
479 		/* jump over the padding */
480 		ie_len = ALIGN(ie_len, 4);
481 
482 		len -= ie_len;
483 		data += ie_len;
484 	}
485 
486 out:
487 	if (!bd->data || !bd->len) {
488 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
489 			   "failed to fetch %s for %s from %s\n",
490 			   ath12k_bd_ie_type_str(ie_id_match),
491 			   boardname, filepath);
492 		ret = -ENODATA;
493 		goto err;
494 	}
495 
496 	return 0;
497 
498 err:
499 	ath12k_core_free_bdf(ab, bd);
500 	return ret;
501 }
502 
503 int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
504 				       struct ath12k_board_data *bd,
505 				       char *filename)
506 {
507 	bd->fw = ath12k_core_firmware_request(ab, filename);
508 	if (IS_ERR(bd->fw))
509 		return PTR_ERR(bd->fw);
510 
511 	bd->data = bd->fw->data;
512 	bd->len = bd->fw->size;
513 
514 	return 0;
515 }
516 
517 #define BOARD_NAME_SIZE 200
518 int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
519 {
520 	char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE];
521 	char *filename, filepath[100];
522 	int bd_api;
523 	int ret;
524 
525 	filename = ATH12K_BOARD_API2_FILE;
526 
527 	ret = ath12k_core_create_board_name(ab, boardname, sizeof(boardname));
528 	if (ret) {
529 		ath12k_err(ab, "failed to create board name: %d", ret);
530 		return ret;
531 	}
532 
533 	bd_api = 2;
534 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
535 						 ATH12K_BD_IE_BOARD,
536 						 ATH12K_BD_IE_BOARD_NAME,
537 						 ATH12K_BD_IE_BOARD_DATA);
538 	if (!ret)
539 		goto success;
540 
541 	ret = ath12k_core_create_fallback_board_name(ab, fallback_boardname,
542 						     sizeof(fallback_boardname));
543 	if (ret) {
544 		ath12k_err(ab, "failed to create fallback board name: %d", ret);
545 		return ret;
546 	}
547 
548 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
549 						 ATH12K_BD_IE_BOARD,
550 						 ATH12K_BD_IE_BOARD_NAME,
551 						 ATH12K_BD_IE_BOARD_DATA);
552 	if (!ret)
553 		goto success;
554 
555 	bd_api = 1;
556 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE);
557 	if (ret) {
558 		ath12k_core_create_firmware_path(ab, filename,
559 						 filepath, sizeof(filepath));
560 		ath12k_err(ab, "failed to fetch board data for %s from %s\n",
561 			   boardname, filepath);
562 		if (memcmp(boardname, fallback_boardname, strlen(boardname)))
563 			ath12k_err(ab, "failed to fetch board data for %s from %s\n",
564 				   fallback_boardname, filepath);
565 
566 		ath12k_err(ab, "failed to fetch board.bin from %s\n",
567 			   ab->hw_params->fw.dir);
568 		return ret;
569 	}
570 
571 success:
572 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", bd_api);
573 	return 0;
574 }
575 
576 int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd)
577 {
578 	char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE];
579 	int ret;
580 
581 	ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
582 	if (ret) {
583 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
584 			   "failed to create board name for regdb: %d", ret);
585 		goto exit;
586 	}
587 
588 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
589 						 ATH12K_BD_IE_REGDB,
590 						 ATH12K_BD_IE_REGDB_NAME,
591 						 ATH12K_BD_IE_REGDB_DATA);
592 	if (!ret)
593 		goto exit;
594 
595 	ret = ath12k_core_create_bus_type_board_name(ab, default_boardname,
596 						     BOARD_NAME_SIZE);
597 	if (ret) {
598 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
599 			   "failed to create default board name for regdb: %d", ret);
600 		goto exit;
601 	}
602 
603 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, default_boardname,
604 						 ATH12K_BD_IE_REGDB,
605 						 ATH12K_BD_IE_REGDB_NAME,
606 						 ATH12K_BD_IE_REGDB_DATA);
607 	if (!ret)
608 		goto exit;
609 
610 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_REGDB_FILE_NAME);
611 	if (ret)
612 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to fetch %s from %s\n",
613 			   ATH12K_REGDB_FILE_NAME, ab->hw_params->fw.dir);
614 
615 exit:
616 	if (!ret)
617 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "fetched regdb\n");
618 
619 	return ret;
620 }
621 
622 u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab)
623 {
624 	if (ab->num_radios == 2)
625 		return TARGET_NUM_STATIONS(ab, DBS);
626 	if (ab->num_radios == 3)
627 		return TARGET_NUM_STATIONS(ab, DBS_SBS);
628 	return TARGET_NUM_STATIONS(ab, SINGLE);
629 }
630 
631 u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab)
632 {
633 	return ath12k_core_get_max_station_per_radio(ab) + TARGET_NUM_VDEVS(ab);
634 }
635 
636 struct reserved_mem *ath12k_core_get_reserved_mem(struct ath12k_base *ab,
637 						  int index)
638 {
639 	struct device *dev = ab->dev;
640 	struct reserved_mem *rmem;
641 	struct device_node *node;
642 
643 	node = of_parse_phandle(dev->of_node, "memory-region", index);
644 	if (!node) {
645 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
646 			   "failed to parse memory-region for index %d\n", index);
647 		return NULL;
648 	}
649 
650 	rmem = of_reserved_mem_lookup(node);
651 	of_node_put(node);
652 	if (!rmem) {
653 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
654 			   "unable to get memory-region for index %d\n", index);
655 		return NULL;
656 	}
657 
658 	return rmem;
659 }
660 
661 static inline
662 void ath12k_core_to_group_ref_get(struct ath12k_base *ab)
663 {
664 	struct ath12k_hw_group *ag = ab->ag;
665 
666 	lockdep_assert_held(&ag->mutex);
667 
668 	if (ab->hw_group_ref) {
669 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already attached to group %d\n",
670 			   ag->id);
671 		return;
672 	}
673 
674 	ab->hw_group_ref = true;
675 	ag->num_started++;
676 
677 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "core attached to group %d, num_started %d\n",
678 		   ag->id, ag->num_started);
679 }
680 
681 static inline
682 void ath12k_core_to_group_ref_put(struct ath12k_base *ab)
683 {
684 	struct ath12k_hw_group *ag = ab->ag;
685 
686 	lockdep_assert_held(&ag->mutex);
687 
688 	if (!ab->hw_group_ref) {
689 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already de-attached from group %d\n",
690 			   ag->id);
691 		return;
692 	}
693 
694 	ab->hw_group_ref = false;
695 	ag->num_started--;
696 
697 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "core de-attached from group %d, num_started %d\n",
698 		   ag->id, ag->num_started);
699 }
700 
701 static void ath12k_core_stop(struct ath12k_base *ab)
702 {
703 	ath12k_core_to_group_ref_put(ab);
704 
705 	if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
706 		ath12k_qmi_firmware_stop(ab);
707 
708 	ath12k_acpi_stop(ab);
709 
710 	ath12k_dp_rx_pdev_reo_cleanup(ab);
711 	ath12k_hif_stop(ab);
712 	ath12k_wmi_detach(ab);
713 	ath12k_dp_free(ab);
714 
715 	/* De-Init of components as needed */
716 }
717 
718 static void ath12k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data)
719 {
720 	struct ath12k_base *ab = data;
721 	const char *magic = ATH12K_SMBIOS_BDF_EXT_MAGIC;
722 	struct ath12k_smbios_bdf *smbios = (struct ath12k_smbios_bdf *)hdr;
723 	ssize_t copied;
724 	size_t len;
725 	int i;
726 
727 	if (ab->qmi.target.bdf_ext[0] != '\0')
728 		return;
729 
730 	if (hdr->type != ATH12K_SMBIOS_BDF_EXT_TYPE)
731 		return;
732 
733 	if (hdr->length != ATH12K_SMBIOS_BDF_EXT_LENGTH) {
734 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
735 			   "wrong smbios bdf ext type length (%d).\n",
736 			   hdr->length);
737 		return;
738 	}
739 
740 	spin_lock_bh(&ab->base_lock);
741 
742 	switch (smbios->country_code_flag) {
743 	case ATH12K_SMBIOS_CC_ISO:
744 		ab->new_alpha2[0] = u16_get_bits(smbios->cc_code >> 8, 0xff);
745 		ab->new_alpha2[1] = u16_get_bits(smbios->cc_code, 0xff);
746 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios cc_code %c%c\n",
747 			   ab->new_alpha2[0], ab->new_alpha2[1]);
748 		break;
749 	case ATH12K_SMBIOS_CC_WW:
750 		ab->new_alpha2[0] = '0';
751 		ab->new_alpha2[1] = '0';
752 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios worldwide regdomain\n");
753 		break;
754 	default:
755 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot ignore smbios country code setting %d\n",
756 			   smbios->country_code_flag);
757 		break;
758 	}
759 
760 	spin_unlock_bh(&ab->base_lock);
761 
762 	if (!smbios->bdf_enabled) {
763 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant name not found.\n");
764 		return;
765 	}
766 
767 	/* Only one string exists (per spec) */
768 	if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) {
769 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
770 			   "bdf variant magic does not match.\n");
771 		return;
772 	}
773 
774 	len = min_t(size_t,
775 		    strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext));
776 	for (i = 0; i < len; i++) {
777 		if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) {
778 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
779 				   "bdf variant name contains non ascii chars.\n");
780 			return;
781 		}
782 	}
783 
784 	/* Copy extension name without magic prefix */
785 	copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic),
786 			 sizeof(ab->qmi.target.bdf_ext));
787 	if (copied < 0) {
788 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
789 			   "bdf variant string is longer than the buffer can accommodate\n");
790 		return;
791 	}
792 
793 	ath12k_dbg(ab, ATH12K_DBG_BOOT,
794 		   "found and validated bdf variant smbios_type 0x%x bdf %s\n",
795 		   ATH12K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext);
796 }
797 
798 int ath12k_core_check_smbios(struct ath12k_base *ab)
799 {
800 	ab->qmi.target.bdf_ext[0] = '\0';
801 	dmi_walk(ath12k_core_check_cc_code_bdfext, ab);
802 
803 	if (ab->qmi.target.bdf_ext[0] == '\0')
804 		return -ENODATA;
805 
806 	return 0;
807 }
808 
809 static int ath12k_core_soc_create(struct ath12k_base *ab)
810 {
811 	int ret;
812 
813 	if (ath12k_ftm_mode) {
814 		ab->fw_mode = ATH12K_FIRMWARE_MODE_FTM;
815 		ath12k_info(ab, "Booting in ftm mode\n");
816 	}
817 
818 	ret = ath12k_qmi_init_service(ab);
819 	if (ret) {
820 		ath12k_err(ab, "failed to initialize qmi :%d\n", ret);
821 		return ret;
822 	}
823 
824 	ath12k_debugfs_soc_create(ab);
825 
826 	ret = ath12k_hif_power_up(ab);
827 	if (ret) {
828 		ath12k_err(ab, "failed to power up :%d\n", ret);
829 		goto err_qmi_deinit;
830 	}
831 
832 	ath12k_debugfs_pdev_create(ab);
833 
834 	return 0;
835 
836 err_qmi_deinit:
837 	ath12k_debugfs_soc_destroy(ab);
838 	ath12k_qmi_deinit_service(ab);
839 	return ret;
840 }
841 
842 static void ath12k_core_soc_destroy(struct ath12k_base *ab)
843 {
844 	ath12k_hif_power_down(ab, false);
845 	ath12k_reg_free(ab);
846 	ath12k_debugfs_soc_destroy(ab);
847 	ath12k_qmi_deinit_service(ab);
848 }
849 
850 static int ath12k_core_pdev_create(struct ath12k_base *ab)
851 {
852 	int ret;
853 
854 	ret = ath12k_dp_pdev_alloc(ab);
855 	if (ret) {
856 		ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
857 		return ret;
858 	}
859 
860 	return 0;
861 }
862 
863 static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
864 {
865 	ath12k_dp_pdev_free(ab);
866 }
867 
868 static int ath12k_core_start(struct ath12k_base *ab)
869 {
870 	int ret;
871 
872 	lockdep_assert_held(&ab->core_lock);
873 
874 	ret = ath12k_wmi_attach(ab);
875 	if (ret) {
876 		ath12k_err(ab, "failed to attach wmi: %d\n", ret);
877 		return ret;
878 	}
879 
880 	ret = ath12k_htc_init(ab);
881 	if (ret) {
882 		ath12k_err(ab, "failed to init htc: %d\n", ret);
883 		goto err_wmi_detach;
884 	}
885 
886 	ret = ath12k_hif_start(ab);
887 	if (ret) {
888 		ath12k_err(ab, "failed to start HIF: %d\n", ret);
889 		goto err_wmi_detach;
890 	}
891 
892 	ret = ath12k_htc_wait_target(&ab->htc);
893 	if (ret) {
894 		ath12k_err(ab, "failed to connect to HTC: %d\n", ret);
895 		goto err_hif_stop;
896 	}
897 
898 	ret = ath12k_dp_htt_connect(&ab->dp);
899 	if (ret) {
900 		ath12k_err(ab, "failed to connect to HTT: %d\n", ret);
901 		goto err_hif_stop;
902 	}
903 
904 	ret = ath12k_wmi_connect(ab);
905 	if (ret) {
906 		ath12k_err(ab, "failed to connect wmi: %d\n", ret);
907 		goto err_hif_stop;
908 	}
909 
910 	ret = ath12k_htc_start(&ab->htc);
911 	if (ret) {
912 		ath12k_err(ab, "failed to start HTC: %d\n", ret);
913 		goto err_hif_stop;
914 	}
915 
916 	ret = ath12k_wmi_wait_for_service_ready(ab);
917 	if (ret) {
918 		ath12k_err(ab, "failed to receive wmi service ready event: %d\n",
919 			   ret);
920 		goto err_hif_stop;
921 	}
922 
923 	ath12k_dp_cc_config(ab);
924 
925 	ret = ath12k_dp_rx_pdev_reo_setup(ab);
926 	if (ret) {
927 		ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
928 		goto err_hif_stop;
929 	}
930 
931 	ath12k_dp_hal_rx_desc_init(ab);
932 
933 	ret = ath12k_wmi_cmd_init(ab);
934 	if (ret) {
935 		ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret);
936 		goto err_reo_cleanup;
937 	}
938 
939 	ret = ath12k_wmi_wait_for_unified_ready(ab);
940 	if (ret) {
941 		ath12k_err(ab, "failed to receive wmi unified ready event: %d\n",
942 			   ret);
943 		goto err_reo_cleanup;
944 	}
945 
946 	/* put hardware to DBS mode */
947 	if (ab->hw_params->single_pdev_only) {
948 		ret = ath12k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
949 		if (ret) {
950 			ath12k_err(ab, "failed to send dbs mode: %d\n", ret);
951 			goto err_reo_cleanup;
952 		}
953 	}
954 
955 	ret = ath12k_dp_tx_htt_h2t_ver_req_msg(ab);
956 	if (ret) {
957 		ath12k_err(ab, "failed to send htt version request message: %d\n",
958 			   ret);
959 		goto err_reo_cleanup;
960 	}
961 
962 	ath12k_acpi_set_dsm_func(ab);
963 
964 	/* Indicate the core start in the appropriate group */
965 	ath12k_core_to_group_ref_get(ab);
966 
967 	return 0;
968 
969 err_reo_cleanup:
970 	ath12k_dp_rx_pdev_reo_cleanup(ab);
971 err_hif_stop:
972 	ath12k_hif_stop(ab);
973 err_wmi_detach:
974 	ath12k_wmi_detach(ab);
975 	return ret;
976 }
977 
978 static void ath12k_core_device_cleanup(struct ath12k_base *ab)
979 {
980 	mutex_lock(&ab->core_lock);
981 
982 	ath12k_hif_irq_disable(ab);
983 	ath12k_core_pdev_destroy(ab);
984 
985 	mutex_unlock(&ab->core_lock);
986 }
987 
988 static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
989 {
990 	struct ath12k_base *ab;
991 	int i;
992 
993 	lockdep_assert_held(&ag->mutex);
994 
995 	clear_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
996 
997 	ath12k_mac_unregister(ag);
998 
999 	for (i = ag->num_devices - 1; i >= 0; i--) {
1000 		ab = ag->ab[i];
1001 		if (!ab)
1002 			continue;
1003 
1004 		clear_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
1005 
1006 		ath12k_core_device_cleanup(ab);
1007 	}
1008 
1009 	ath12k_mac_destroy(ag);
1010 }
1011 
1012 u8 ath12k_get_num_partner_link(struct ath12k *ar)
1013 {
1014 	struct ath12k_base *partner_ab, *ab = ar->ab;
1015 	struct ath12k_hw_group *ag = ab->ag;
1016 	struct ath12k_pdev *pdev;
1017 	u8 num_link = 0;
1018 	int i, j;
1019 
1020 	lockdep_assert_held(&ag->mutex);
1021 
1022 	for (i = 0; i < ag->num_devices; i++) {
1023 		partner_ab = ag->ab[i];
1024 
1025 		for (j = 0; j < partner_ab->num_radios; j++) {
1026 			pdev = &partner_ab->pdevs[j];
1027 
1028 			/* Avoid the self link */
1029 			if (ar == pdev->ar)
1030 				continue;
1031 
1032 			num_link++;
1033 		}
1034 	}
1035 
1036 	return num_link;
1037 }
1038 
1039 static int __ath12k_mac_mlo_ready(struct ath12k *ar)
1040 {
1041 	u8 num_link = ath12k_get_num_partner_link(ar);
1042 	int ret;
1043 
1044 	if (num_link == 0)
1045 		return 0;
1046 
1047 	ret = ath12k_wmi_mlo_ready(ar);
1048 	if (ret) {
1049 		ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n",
1050 			   ar->pdev_idx, ret);
1051 		return ret;
1052 	}
1053 
1054 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mlo ready done for pdev %d\n",
1055 		   ar->pdev_idx);
1056 
1057 	return 0;
1058 }
1059 
1060 int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag)
1061 {
1062 	struct ath12k_hw *ah;
1063 	struct ath12k *ar;
1064 	int ret;
1065 	int i, j;
1066 
1067 	for (i = 0; i < ag->num_hw; i++) {
1068 		ah = ag->ah[i];
1069 		if (!ah)
1070 			continue;
1071 
1072 		for_each_ar(ah, ar, j) {
1073 			ar = &ah->radio[j];
1074 			ret = __ath12k_mac_mlo_ready(ar);
1075 			if (ret)
1076 				return ret;
1077 		}
1078 	}
1079 
1080 	return 0;
1081 }
1082 
1083 static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag)
1084 {
1085 	int ret, i;
1086 
1087 	if (!ag->mlo_capable)
1088 		return 0;
1089 
1090 	ret = ath12k_mac_mlo_setup(ag);
1091 	if (ret)
1092 		return ret;
1093 
1094 	for (i = 0; i < ag->num_devices; i++)
1095 		ath12k_dp_partner_cc_init(ag->ab[i]);
1096 
1097 	ret = ath12k_mac_mlo_ready(ag);
1098 	if (ret)
1099 		goto err_mlo_teardown;
1100 
1101 	return 0;
1102 
1103 err_mlo_teardown:
1104 	ath12k_mac_mlo_teardown(ag);
1105 
1106 	return ret;
1107 }
1108 
1109 static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag)
1110 {
1111 	struct ath12k_base *ab;
1112 	int ret, i;
1113 
1114 	lockdep_assert_held(&ag->mutex);
1115 
1116 	if (test_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags))
1117 		goto core_pdev_create;
1118 
1119 	ret = ath12k_mac_allocate(ag);
1120 	if (WARN_ON(ret))
1121 		return ret;
1122 
1123 	ret = ath12k_core_mlo_setup(ag);
1124 	if (WARN_ON(ret))
1125 		goto err_mac_destroy;
1126 
1127 	ret = ath12k_mac_register(ag);
1128 	if (WARN_ON(ret))
1129 		goto err_mlo_teardown;
1130 
1131 	set_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
1132 
1133 core_pdev_create:
1134 	for (i = 0; i < ag->num_devices; i++) {
1135 		ab = ag->ab[i];
1136 		if (!ab)
1137 			continue;
1138 
1139 		mutex_lock(&ab->core_lock);
1140 
1141 		set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
1142 
1143 		ret = ath12k_core_pdev_create(ab);
1144 		if (ret) {
1145 			ath12k_err(ab, "failed to create pdev core %d\n", ret);
1146 			mutex_unlock(&ab->core_lock);
1147 			goto err;
1148 		}
1149 
1150 		ath12k_hif_irq_enable(ab);
1151 
1152 		ret = ath12k_core_rfkill_config(ab);
1153 		if (ret && ret != -EOPNOTSUPP) {
1154 			mutex_unlock(&ab->core_lock);
1155 			goto err;
1156 		}
1157 
1158 		mutex_unlock(&ab->core_lock);
1159 	}
1160 
1161 	return 0;
1162 
1163 err:
1164 	ath12k_core_hw_group_stop(ag);
1165 	return ret;
1166 
1167 err_mlo_teardown:
1168 	ath12k_mac_mlo_teardown(ag);
1169 
1170 err_mac_destroy:
1171 	ath12k_mac_destroy(ag);
1172 
1173 	return ret;
1174 }
1175 
1176 static int ath12k_core_start_firmware(struct ath12k_base *ab,
1177 				      enum ath12k_firmware_mode mode)
1178 {
1179 	int ret;
1180 
1181 	ath12k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v3,
1182 				    &ab->qmi.ce_cfg.shadow_reg_v3_len);
1183 
1184 	ret = ath12k_qmi_firmware_start(ab, mode);
1185 	if (ret) {
1186 		ath12k_err(ab, "failed to send firmware start: %d\n", ret);
1187 		return ret;
1188 	}
1189 
1190 	return ret;
1191 }
1192 
1193 static inline
1194 bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag)
1195 {
1196 	lockdep_assert_held(&ag->mutex);
1197 
1198 	return (ag->num_started == ag->num_devices);
1199 }
1200 
1201 static void ath12k_fw_stats_pdevs_free(struct list_head *head)
1202 {
1203 	struct ath12k_fw_stats_pdev *i, *tmp;
1204 
1205 	list_for_each_entry_safe(i, tmp, head, list) {
1206 		list_del(&i->list);
1207 		kfree(i);
1208 	}
1209 }
1210 
1211 void ath12k_fw_stats_bcn_free(struct list_head *head)
1212 {
1213 	struct ath12k_fw_stats_bcn *i, *tmp;
1214 
1215 	list_for_each_entry_safe(i, tmp, head, list) {
1216 		list_del(&i->list);
1217 		kfree(i);
1218 	}
1219 }
1220 
1221 static void ath12k_fw_stats_vdevs_free(struct list_head *head)
1222 {
1223 	struct ath12k_fw_stats_vdev *i, *tmp;
1224 
1225 	list_for_each_entry_safe(i, tmp, head, list) {
1226 		list_del(&i->list);
1227 		kfree(i);
1228 	}
1229 }
1230 
1231 void ath12k_fw_stats_init(struct ath12k *ar)
1232 {
1233 	INIT_LIST_HEAD(&ar->fw_stats.vdevs);
1234 	INIT_LIST_HEAD(&ar->fw_stats.pdevs);
1235 	INIT_LIST_HEAD(&ar->fw_stats.bcn);
1236 	init_completion(&ar->fw_stats_complete);
1237 	init_completion(&ar->fw_stats_done);
1238 }
1239 
1240 void ath12k_fw_stats_free(struct ath12k_fw_stats *stats)
1241 {
1242 	ath12k_fw_stats_pdevs_free(&stats->pdevs);
1243 	ath12k_fw_stats_vdevs_free(&stats->vdevs);
1244 	ath12k_fw_stats_bcn_free(&stats->bcn);
1245 }
1246 
1247 void ath12k_fw_stats_reset(struct ath12k *ar)
1248 {
1249 	spin_lock_bh(&ar->data_lock);
1250 	ath12k_fw_stats_free(&ar->fw_stats);
1251 	ar->fw_stats.num_vdev_recvd = 0;
1252 	spin_unlock_bh(&ar->data_lock);
1253 }
1254 
1255 static void ath12k_core_trigger_partner(struct ath12k_base *ab)
1256 {
1257 	struct ath12k_hw_group *ag = ab->ag;
1258 	struct ath12k_base *partner_ab;
1259 	bool found = false;
1260 	int i;
1261 
1262 	for (i = 0; i < ag->num_devices; i++) {
1263 		partner_ab = ag->ab[i];
1264 		if (!partner_ab)
1265 			continue;
1266 
1267 		if (found)
1268 			ath12k_qmi_trigger_host_cap(partner_ab);
1269 
1270 		found = (partner_ab == ab);
1271 	}
1272 }
1273 
1274 int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
1275 {
1276 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
1277 	int ret, i;
1278 
1279 	ret = ath12k_core_start_firmware(ab, ab->fw_mode);
1280 	if (ret) {
1281 		ath12k_err(ab, "failed to start firmware: %d\n", ret);
1282 		return ret;
1283 	}
1284 
1285 	ret = ath12k_ce_init_pipes(ab);
1286 	if (ret) {
1287 		ath12k_err(ab, "failed to initialize CE: %d\n", ret);
1288 		goto err_firmware_stop;
1289 	}
1290 
1291 	ret = ath12k_dp_alloc(ab);
1292 	if (ret) {
1293 		ath12k_err(ab, "failed to init DP: %d\n", ret);
1294 		goto err_firmware_stop;
1295 	}
1296 
1297 	mutex_lock(&ag->mutex);
1298 	mutex_lock(&ab->core_lock);
1299 
1300 	ret = ath12k_core_start(ab);
1301 	if (ret) {
1302 		ath12k_err(ab, "failed to start core: %d\n", ret);
1303 		goto err_dp_free;
1304 	}
1305 
1306 	mutex_unlock(&ab->core_lock);
1307 
1308 	if (ath12k_core_hw_group_start_ready(ag)) {
1309 		ret = ath12k_core_hw_group_start(ag);
1310 		if (ret) {
1311 			ath12k_warn(ab, "unable to start hw group\n");
1312 			goto err_core_stop;
1313 		}
1314 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id);
1315 	} else {
1316 		ath12k_core_trigger_partner(ab);
1317 	}
1318 
1319 	mutex_unlock(&ag->mutex);
1320 
1321 	return 0;
1322 
1323 err_core_stop:
1324 	for (i = ag->num_devices - 1; i >= 0; i--) {
1325 		ab = ag->ab[i];
1326 		if (!ab)
1327 			continue;
1328 
1329 		mutex_lock(&ab->core_lock);
1330 		ath12k_core_stop(ab);
1331 		mutex_unlock(&ab->core_lock);
1332 	}
1333 	mutex_unlock(&ag->mutex);
1334 	goto exit;
1335 
1336 err_dp_free:
1337 	ath12k_dp_free(ab);
1338 	mutex_unlock(&ab->core_lock);
1339 	mutex_unlock(&ag->mutex);
1340 
1341 err_firmware_stop:
1342 	ath12k_qmi_firmware_stop(ab);
1343 
1344 exit:
1345 	return ret;
1346 }
1347 
1348 static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
1349 {
1350 	int ret, total_vdev;
1351 
1352 	mutex_lock(&ab->core_lock);
1353 	ath12k_dp_pdev_free(ab);
1354 	ath12k_ce_cleanup_pipes(ab);
1355 	ath12k_wmi_detach(ab);
1356 	ath12k_dp_rx_pdev_reo_cleanup(ab);
1357 	mutex_unlock(&ab->core_lock);
1358 
1359 	ath12k_dp_free(ab);
1360 	ath12k_hal_srng_deinit(ab);
1361 	total_vdev = ab->num_radios * TARGET_NUM_VDEVS(ab);
1362 	ab->free_vdev_map = (1LL << total_vdev) - 1;
1363 
1364 	ret = ath12k_hal_srng_init(ab);
1365 	if (ret)
1366 		return ret;
1367 
1368 	clear_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1369 
1370 	ret = ath12k_core_qmi_firmware_ready(ab);
1371 	if (ret)
1372 		goto err_hal_srng_deinit;
1373 
1374 	clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
1375 
1376 	return 0;
1377 
1378 err_hal_srng_deinit:
1379 	ath12k_hal_srng_deinit(ab);
1380 	return ret;
1381 }
1382 
1383 static void ath12k_rfkill_work(struct work_struct *work)
1384 {
1385 	struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
1386 	struct ath12k_hw_group *ag = ab->ag;
1387 	struct ath12k *ar;
1388 	struct ath12k_hw *ah;
1389 	struct ieee80211_hw *hw;
1390 	bool rfkill_radio_on;
1391 	int i, j;
1392 
1393 	spin_lock_bh(&ab->base_lock);
1394 	rfkill_radio_on = ab->rfkill_radio_on;
1395 	spin_unlock_bh(&ab->base_lock);
1396 
1397 	for (i = 0; i < ag->num_hw; i++) {
1398 		ah = ath12k_ag_to_ah(ag, i);
1399 		if (!ah)
1400 			continue;
1401 
1402 		for (j = 0; j < ah->num_radio; j++) {
1403 			ar = &ah->radio[j];
1404 			if (!ar)
1405 				continue;
1406 
1407 			ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
1408 		}
1409 
1410 		hw = ah->hw;
1411 		wiphy_rfkill_set_hw_state(hw->wiphy, !rfkill_radio_on);
1412 	}
1413 }
1414 
1415 void ath12k_core_halt(struct ath12k *ar)
1416 {
1417 	struct list_head *pos, *n;
1418 	struct ath12k_base *ab = ar->ab;
1419 
1420 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
1421 
1422 	ar->num_created_vdevs = 0;
1423 	ar->allocated_vdev_map = 0;
1424 
1425 	ath12k_mac_scan_finish(ar);
1426 	ath12k_mac_peer_cleanup_all(ar);
1427 	cancel_delayed_work_sync(&ar->scan.timeout);
1428 	cancel_work_sync(&ar->regd_update_work);
1429 	cancel_work_sync(&ar->regd_channel_update_work);
1430 	cancel_work_sync(&ab->rfkill_work);
1431 	cancel_work_sync(&ab->update_11d_work);
1432 
1433 	rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
1434 	synchronize_rcu();
1435 
1436 	spin_lock_bh(&ar->data_lock);
1437 	list_for_each_safe(pos, n, &ar->arvifs)
1438 		list_del_init(pos);
1439 	spin_unlock_bh(&ar->data_lock);
1440 
1441 	idr_init(&ar->txmgmt_idr);
1442 }
1443 
1444 static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
1445 {
1446 	struct ath12k_hw_group *ag = ab->ag;
1447 	struct ath12k *ar;
1448 	struct ath12k_hw *ah;
1449 	int i, j;
1450 
1451 	spin_lock_bh(&ab->base_lock);
1452 	ab->stats.fw_crash_counter++;
1453 	spin_unlock_bh(&ab->base_lock);
1454 
1455 	if (ab->is_reset)
1456 		set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1457 
1458 	for (i = 0; i < ag->num_hw; i++) {
1459 		ah = ath12k_ag_to_ah(ag, i);
1460 		if (!ah || ah->state == ATH12K_HW_STATE_OFF ||
1461 		    ah->state == ATH12K_HW_STATE_TM)
1462 			continue;
1463 
1464 		wiphy_lock(ah->hw->wiphy);
1465 
1466 		/* If queue 0 is stopped, it is safe to assume that all
1467 		 * other queues are stopped by driver via
1468 		 * ieee80211_stop_queues() below. This means, there is
1469 		 * no need to stop it again and hence continue
1470 		 */
1471 		if (ieee80211_queue_stopped(ah->hw, 0)) {
1472 			wiphy_unlock(ah->hw->wiphy);
1473 			continue;
1474 		}
1475 
1476 		ieee80211_stop_queues(ah->hw);
1477 
1478 		for (j = 0; j < ah->num_radio; j++) {
1479 			ar = &ah->radio[j];
1480 
1481 			ath12k_mac_drain_tx(ar);
1482 			ar->state_11d = ATH12K_11D_IDLE;
1483 			complete(&ar->completed_11d_scan);
1484 			complete(&ar->scan.started);
1485 			complete_all(&ar->scan.completed);
1486 			complete(&ar->scan.on_channel);
1487 			complete(&ar->peer_assoc_done);
1488 			complete(&ar->peer_delete_done);
1489 			complete(&ar->install_key_done);
1490 			complete(&ar->vdev_setup_done);
1491 			complete(&ar->vdev_delete_done);
1492 			complete(&ar->bss_survey_done);
1493 			complete_all(&ar->regd_update_completed);
1494 
1495 			wake_up(&ar->dp.tx_empty_waitq);
1496 			idr_for_each(&ar->txmgmt_idr,
1497 				     ath12k_mac_tx_mgmt_pending_free, ar);
1498 			idr_destroy(&ar->txmgmt_idr);
1499 			wake_up(&ar->txmgmt_empty_waitq);
1500 
1501 			ar->monitor_vdev_id = -1;
1502 			ar->monitor_vdev_created = false;
1503 			ar->monitor_started = false;
1504 		}
1505 
1506 		wiphy_unlock(ah->hw->wiphy);
1507 	}
1508 
1509 	wake_up(&ab->wmi_ab.tx_credits_wq);
1510 	wake_up(&ab->peer_mapping_wq);
1511 }
1512 
1513 static void ath12k_update_11d(struct work_struct *work)
1514 {
1515 	struct ath12k_base *ab = container_of(work, struct ath12k_base, update_11d_work);
1516 	struct ath12k *ar;
1517 	struct ath12k_pdev *pdev;
1518 	struct wmi_set_current_country_arg arg = {};
1519 	int ret, i;
1520 
1521 	spin_lock_bh(&ab->base_lock);
1522 	memcpy(&arg.alpha2, &ab->new_alpha2, 2);
1523 	spin_unlock_bh(&ab->base_lock);
1524 
1525 	ath12k_dbg(ab, ATH12K_DBG_WMI, "update 11d new cc %c%c\n",
1526 		   arg.alpha2[0], arg.alpha2[1]);
1527 
1528 	for (i = 0; i < ab->num_radios; i++) {
1529 		pdev = &ab->pdevs[i];
1530 		ar = pdev->ar;
1531 
1532 		memcpy(&ar->alpha2, &arg.alpha2, 2);
1533 
1534 		reinit_completion(&ar->regd_update_completed);
1535 
1536 		ret = ath12k_wmi_send_set_current_country_cmd(ar, &arg);
1537 		if (ret)
1538 			ath12k_warn(ar->ab,
1539 				    "pdev id %d failed set current country code: %d\n",
1540 				    i, ret);
1541 	}
1542 }
1543 
1544 static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
1545 {
1546 	struct ath12k_hw_group *ag = ab->ag;
1547 	struct ath12k_hw *ah;
1548 	struct ath12k *ar;
1549 	int i, j;
1550 
1551 	for (i = 0; i < ag->num_hw; i++) {
1552 		ah = ath12k_ag_to_ah(ag, i);
1553 		if (!ah || ah->state == ATH12K_HW_STATE_OFF)
1554 			continue;
1555 
1556 		wiphy_lock(ah->hw->wiphy);
1557 		mutex_lock(&ah->hw_mutex);
1558 
1559 		switch (ah->state) {
1560 		case ATH12K_HW_STATE_ON:
1561 			ah->state = ATH12K_HW_STATE_RESTARTING;
1562 
1563 			for (j = 0; j < ah->num_radio; j++) {
1564 				ar = &ah->radio[j];
1565 				ath12k_core_halt(ar);
1566 			}
1567 
1568 			break;
1569 		case ATH12K_HW_STATE_OFF:
1570 			ath12k_warn(ab,
1571 				    "cannot restart hw %d that hasn't been started\n",
1572 				    i);
1573 			break;
1574 		case ATH12K_HW_STATE_RESTARTING:
1575 			break;
1576 		case ATH12K_HW_STATE_RESTARTED:
1577 			ah->state = ATH12K_HW_STATE_WEDGED;
1578 			fallthrough;
1579 		case ATH12K_HW_STATE_WEDGED:
1580 			ath12k_warn(ab,
1581 				    "device is wedged, will not restart hw %d\n", i);
1582 			break;
1583 		case ATH12K_HW_STATE_TM:
1584 			ath12k_warn(ab, "fw mode reset done radio %d\n", i);
1585 			break;
1586 		}
1587 
1588 		mutex_unlock(&ah->hw_mutex);
1589 		wiphy_unlock(ah->hw->wiphy);
1590 	}
1591 
1592 	complete(&ab->driver_recovery);
1593 }
1594 
1595 static void ath12k_core_restart(struct work_struct *work)
1596 {
1597 	struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
1598 	struct ath12k_hw_group *ag = ab->ag;
1599 	struct ath12k_hw *ah;
1600 	int ret, i;
1601 
1602 	ret = ath12k_core_reconfigure_on_crash(ab);
1603 	if (ret) {
1604 		ath12k_err(ab, "failed to reconfigure driver on crash recovery\n");
1605 		return;
1606 	}
1607 
1608 	if (ab->is_reset) {
1609 		if (!test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
1610 			atomic_dec(&ab->reset_count);
1611 			complete(&ab->reset_complete);
1612 			ab->is_reset = false;
1613 			atomic_set(&ab->fail_cont_count, 0);
1614 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
1615 		}
1616 
1617 		mutex_lock(&ag->mutex);
1618 
1619 		if (!ath12k_core_hw_group_start_ready(ag)) {
1620 			mutex_unlock(&ag->mutex);
1621 			goto exit_restart;
1622 		}
1623 
1624 		for (i = 0; i < ag->num_hw; i++) {
1625 			ah = ath12k_ag_to_ah(ag, i);
1626 			ieee80211_restart_hw(ah->hw);
1627 		}
1628 
1629 		mutex_unlock(&ag->mutex);
1630 	}
1631 
1632 exit_restart:
1633 	complete(&ab->restart_completed);
1634 }
1635 
1636 static void ath12k_core_reset(struct work_struct *work)
1637 {
1638 	struct ath12k_base *ab = container_of(work, struct ath12k_base, reset_work);
1639 	struct ath12k_hw_group *ag = ab->ag;
1640 	int reset_count, fail_cont_count, i;
1641 	long time_left;
1642 
1643 	if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) {
1644 		ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
1645 		return;
1646 	}
1647 
1648 	/* Sometimes the recovery will fail and then the next all recovery fail,
1649 	 * this is to avoid infinite recovery since it can not recovery success
1650 	 */
1651 	fail_cont_count = atomic_read(&ab->fail_cont_count);
1652 
1653 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FINAL)
1654 		return;
1655 
1656 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FIRST &&
1657 	    time_before(jiffies, ab->reset_fail_timeout))
1658 		return;
1659 
1660 	reset_count = atomic_inc_return(&ab->reset_count);
1661 
1662 	if (reset_count > 1) {
1663 		/* Sometimes it happened another reset worker before the previous one
1664 		 * completed, then the second reset worker will destroy the previous one,
1665 		 * thus below is to avoid that.
1666 		 */
1667 		ath12k_warn(ab, "already resetting count %d\n", reset_count);
1668 
1669 		reinit_completion(&ab->reset_complete);
1670 		time_left = wait_for_completion_timeout(&ab->reset_complete,
1671 							ATH12K_RESET_TIMEOUT_HZ);
1672 		if (time_left) {
1673 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "to skip reset\n");
1674 			atomic_dec(&ab->reset_count);
1675 			return;
1676 		}
1677 
1678 		ab->reset_fail_timeout = jiffies + ATH12K_RESET_FAIL_TIMEOUT_HZ;
1679 		/* Record the continuous recovery fail count when recovery failed*/
1680 		fail_cont_count = atomic_inc_return(&ab->fail_cont_count);
1681 	}
1682 
1683 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n");
1684 
1685 	ab->is_reset = true;
1686 	atomic_set(&ab->recovery_count, 0);
1687 
1688 	ath12k_coredump_collect(ab);
1689 	ath12k_core_pre_reconfigure_recovery(ab);
1690 
1691 	ath12k_core_post_reconfigure_recovery(ab);
1692 
1693 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n");
1694 
1695 	ath12k_hif_irq_disable(ab);
1696 	ath12k_hif_ce_irq_disable(ab);
1697 
1698 	ath12k_hif_power_down(ab, false);
1699 
1700 	/* prepare for power up */
1701 	ab->qmi.num_radios = U8_MAX;
1702 
1703 	mutex_lock(&ag->mutex);
1704 	ath12k_core_to_group_ref_put(ab);
1705 
1706 	if (ag->num_started > 0) {
1707 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
1708 			   "waiting for %d partner device(s) to reset\n",
1709 			   ag->num_started);
1710 		mutex_unlock(&ag->mutex);
1711 		return;
1712 	}
1713 
1714 	/* Prepare MLO global memory region for power up */
1715 	ath12k_qmi_reset_mlo_mem(ag);
1716 
1717 	for (i = 0; i < ag->num_devices; i++) {
1718 		ab = ag->ab[i];
1719 		if (!ab)
1720 			continue;
1721 
1722 		ath12k_hif_power_up(ab);
1723 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
1724 	}
1725 
1726 	mutex_unlock(&ag->mutex);
1727 }
1728 
1729 enum ath12k_qmi_mem_mode ath12k_core_get_memory_mode(struct ath12k_base *ab)
1730 {
1731 	unsigned long total_ram;
1732 	struct sysinfo si;
1733 
1734 	si_meminfo(&si);
1735 	total_ram = si.totalram * si.mem_unit;
1736 
1737 	if (total_ram < SZ_512M)
1738 		return ATH12K_QMI_MEMORY_MODE_LOW_512_M;
1739 
1740 	return ATH12K_QMI_MEMORY_MODE_DEFAULT;
1741 }
1742 
1743 int ath12k_core_pre_init(struct ath12k_base *ab)
1744 {
1745 	const struct ath12k_mem_profile_based_param *param;
1746 	int ret;
1747 
1748 	ret = ath12k_hw_init(ab);
1749 	if (ret) {
1750 		ath12k_err(ab, "failed to init hw params: %d\n", ret);
1751 		return ret;
1752 	}
1753 
1754 	param = &ath12k_mem_profile_based_param[ab->target_mem_mode];
1755 	ab->profile_param = param;
1756 	ath12k_fw_map(ab);
1757 
1758 	return 0;
1759 }
1760 
1761 static int ath12k_core_panic_handler(struct notifier_block *nb,
1762 				     unsigned long action, void *data)
1763 {
1764 	struct ath12k_base *ab = container_of(nb, struct ath12k_base,
1765 					      panic_nb);
1766 
1767 	return ath12k_hif_panic_handler(ab);
1768 }
1769 
1770 static int ath12k_core_panic_notifier_register(struct ath12k_base *ab)
1771 {
1772 	ab->panic_nb.notifier_call = ath12k_core_panic_handler;
1773 
1774 	return atomic_notifier_chain_register(&panic_notifier_list,
1775 					      &ab->panic_nb);
1776 }
1777 
1778 static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
1779 {
1780 	atomic_notifier_chain_unregister(&panic_notifier_list,
1781 					 &ab->panic_nb);
1782 }
1783 
1784 static inline
1785 bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag)
1786 {
1787 	lockdep_assert_held(&ag->mutex);
1788 
1789 	return (ag->num_probed == ag->num_devices);
1790 }
1791 
1792 static struct ath12k_hw_group *ath12k_core_hw_group_alloc(struct ath12k_base *ab)
1793 {
1794 	struct ath12k_hw_group *ag;
1795 	int count = 0;
1796 
1797 	lockdep_assert_held(&ath12k_hw_group_mutex);
1798 
1799 	list_for_each_entry(ag, &ath12k_hw_group_list, list)
1800 		count++;
1801 
1802 	ag = kzalloc(sizeof(*ag), GFP_KERNEL);
1803 	if (!ag)
1804 		return NULL;
1805 
1806 	ag->id = count;
1807 	list_add(&ag->list, &ath12k_hw_group_list);
1808 	mutex_init(&ag->mutex);
1809 	ag->mlo_capable = false;
1810 
1811 	return ag;
1812 }
1813 
1814 static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag)
1815 {
1816 	mutex_lock(&ath12k_hw_group_mutex);
1817 
1818 	list_del(&ag->list);
1819 	kfree(ag);
1820 
1821 	mutex_unlock(&ath12k_hw_group_mutex);
1822 }
1823 
1824 static struct ath12k_hw_group *ath12k_core_hw_group_find_by_dt(struct ath12k_base *ab)
1825 {
1826 	struct ath12k_hw_group *ag;
1827 	int i;
1828 
1829 	if (!ab->dev->of_node)
1830 		return NULL;
1831 
1832 	list_for_each_entry(ag, &ath12k_hw_group_list, list)
1833 		for (i = 0; i < ag->num_devices; i++)
1834 			if (ag->wsi_node[i] == ab->dev->of_node)
1835 				return ag;
1836 
1837 	return NULL;
1838 }
1839 
1840 static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag,
1841 				    struct ath12k_base *ab)
1842 {
1843 	struct device_node *wsi_dev = ab->dev->of_node, *next_wsi_dev;
1844 	struct device_node *tx_endpoint, *next_rx_endpoint;
1845 	int device_count = 0;
1846 
1847 	next_wsi_dev = wsi_dev;
1848 
1849 	if (!next_wsi_dev)
1850 		return -ENODEV;
1851 
1852 	do {
1853 		ag->wsi_node[device_count] = next_wsi_dev;
1854 
1855 		tx_endpoint = of_graph_get_endpoint_by_regs(next_wsi_dev, 0, -1);
1856 		if (!tx_endpoint) {
1857 			of_node_put(next_wsi_dev);
1858 			return -ENODEV;
1859 		}
1860 
1861 		next_rx_endpoint = of_graph_get_remote_endpoint(tx_endpoint);
1862 		if (!next_rx_endpoint) {
1863 			of_node_put(next_wsi_dev);
1864 			of_node_put(tx_endpoint);
1865 			return -ENODEV;
1866 		}
1867 
1868 		of_node_put(tx_endpoint);
1869 		of_node_put(next_wsi_dev);
1870 
1871 		next_wsi_dev = of_graph_get_port_parent(next_rx_endpoint);
1872 		if (!next_wsi_dev) {
1873 			of_node_put(next_rx_endpoint);
1874 			return -ENODEV;
1875 		}
1876 
1877 		of_node_put(next_rx_endpoint);
1878 
1879 		device_count++;
1880 		if (device_count > ATH12K_MAX_DEVICES) {
1881 			ath12k_warn(ab, "device count in DT %d is more than limit %d\n",
1882 				    device_count, ATH12K_MAX_DEVICES);
1883 			of_node_put(next_wsi_dev);
1884 			return -EINVAL;
1885 		}
1886 	} while (wsi_dev != next_wsi_dev);
1887 
1888 	of_node_put(next_wsi_dev);
1889 	ag->num_devices = device_count;
1890 
1891 	return 0;
1892 }
1893 
1894 static int ath12k_core_get_wsi_index(struct ath12k_hw_group *ag,
1895 				     struct ath12k_base *ab)
1896 {
1897 	int i, wsi_controller_index = -1, node_index = -1;
1898 	bool control;
1899 
1900 	for (i = 0; i < ag->num_devices; i++) {
1901 		control = of_property_read_bool(ag->wsi_node[i], "qcom,wsi-controller");
1902 		if (control)
1903 			wsi_controller_index = i;
1904 
1905 		if (ag->wsi_node[i] == ab->dev->of_node)
1906 			node_index = i;
1907 	}
1908 
1909 	if (wsi_controller_index == -1) {
1910 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi controller is not defined in dt");
1911 		return -EINVAL;
1912 	}
1913 
1914 	if (node_index == -1) {
1915 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "unable to get WSI node index");
1916 		return -EINVAL;
1917 	}
1918 
1919 	ab->wsi_info.index = (ag->num_devices + node_index - wsi_controller_index) %
1920 		ag->num_devices;
1921 
1922 	return 0;
1923 }
1924 
1925 static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *ab)
1926 {
1927 	struct ath12k_wsi_info *wsi = &ab->wsi_info;
1928 	struct ath12k_hw_group *ag;
1929 
1930 	lockdep_assert_held(&ath12k_hw_group_mutex);
1931 
1932 	if (ath12k_ftm_mode)
1933 		goto invalid_group;
1934 
1935 	/* The grouping of multiple devices will be done based on device tree file.
1936 	 * The platforms that do not have any valid group information would have
1937 	 * each device to be part of its own invalid group.
1938 	 *
1939 	 * We use group id ATH12K_INVALID_GROUP_ID for single device group
1940 	 * which didn't have dt entry or wrong dt entry, there could be many
1941 	 * groups with same group id, i.e ATH12K_INVALID_GROUP_ID. So
1942 	 * default group id of ATH12K_INVALID_GROUP_ID combined with
1943 	 * num devices in ath12k_hw_group determines if the group is
1944 	 * multi device or single device group
1945 	 */
1946 
1947 	ag = ath12k_core_hw_group_find_by_dt(ab);
1948 	if (!ag) {
1949 		ag = ath12k_core_hw_group_alloc(ab);
1950 		if (!ag) {
1951 			ath12k_warn(ab, "unable to create new hw group\n");
1952 			return NULL;
1953 		}
1954 
1955 		if (ath12k_core_get_wsi_info(ag, ab) ||
1956 		    ath12k_core_get_wsi_index(ag, ab)) {
1957 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
1958 				   "unable to get wsi info from dt, grouping single device");
1959 			ag->id = ATH12K_INVALID_GROUP_ID;
1960 			ag->num_devices = 1;
1961 			memset(ag->wsi_node, 0, sizeof(ag->wsi_node));
1962 			wsi->index = 0;
1963 		}
1964 
1965 		goto exit;
1966 	} else if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
1967 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group id %d in unregister state\n",
1968 			   ag->id);
1969 		goto invalid_group;
1970 	} else {
1971 		if (ath12k_core_get_wsi_index(ag, ab))
1972 			goto invalid_group;
1973 		goto exit;
1974 	}
1975 
1976 invalid_group:
1977 	ag = ath12k_core_hw_group_alloc(ab);
1978 	if (!ag) {
1979 		ath12k_warn(ab, "unable to create new hw group\n");
1980 		return NULL;
1981 	}
1982 
1983 	ag->id = ATH12K_INVALID_GROUP_ID;
1984 	ag->num_devices = 1;
1985 	wsi->index = 0;
1986 
1987 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "single device added to hardware group\n");
1988 
1989 exit:
1990 	if (ag->num_probed >= ag->num_devices) {
1991 		ath12k_warn(ab, "unable to add new device to group, max limit reached\n");
1992 		goto invalid_group;
1993 	}
1994 
1995 	ab->device_id = ag->num_probed++;
1996 	ag->ab[ab->device_id] = ab;
1997 	ab->ag = ag;
1998 
1999 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi group-id %d num-devices %d index %d",
2000 		   ag->id, ag->num_devices, wsi->index);
2001 
2002 	return ag;
2003 }
2004 
2005 void ath12k_core_hw_group_unassign(struct ath12k_base *ab)
2006 {
2007 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
2008 	u8 device_id = ab->device_id;
2009 	int num_probed;
2010 
2011 	if (!ag)
2012 		return;
2013 
2014 	mutex_lock(&ag->mutex);
2015 
2016 	if (WARN_ON(device_id >= ag->num_devices)) {
2017 		mutex_unlock(&ag->mutex);
2018 		return;
2019 	}
2020 
2021 	if (WARN_ON(ag->ab[device_id] != ab)) {
2022 		mutex_unlock(&ag->mutex);
2023 		return;
2024 	}
2025 
2026 	ag->ab[device_id] = NULL;
2027 	ab->ag = NULL;
2028 	ab->device_id = ATH12K_INVALID_DEVICE_ID;
2029 
2030 	if (ag->num_probed)
2031 		ag->num_probed--;
2032 
2033 	num_probed = ag->num_probed;
2034 
2035 	mutex_unlock(&ag->mutex);
2036 
2037 	if (!num_probed)
2038 		ath12k_core_hw_group_free(ag);
2039 }
2040 
2041 static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
2042 {
2043 	struct ath12k_base *ab;
2044 	int i;
2045 
2046 	if (WARN_ON(!ag))
2047 		return;
2048 
2049 	for (i = 0; i < ag->num_devices; i++) {
2050 		ab = ag->ab[i];
2051 		if (!ab)
2052 			continue;
2053 
2054 		ath12k_core_soc_destroy(ab);
2055 	}
2056 }
2057 
2058 void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
2059 {
2060 	struct ath12k_base *ab;
2061 	int i;
2062 
2063 	if (!ag)
2064 		return;
2065 
2066 	mutex_lock(&ag->mutex);
2067 
2068 	if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
2069 		mutex_unlock(&ag->mutex);
2070 		return;
2071 	}
2072 
2073 	set_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags);
2074 
2075 	ath12k_core_hw_group_stop(ag);
2076 
2077 	for (i = 0; i < ag->num_devices; i++) {
2078 		ab = ag->ab[i];
2079 		if (!ab)
2080 			continue;
2081 
2082 		mutex_lock(&ab->core_lock);
2083 		ath12k_core_stop(ab);
2084 		mutex_unlock(&ab->core_lock);
2085 	}
2086 
2087 	mutex_unlock(&ag->mutex);
2088 }
2089 
2090 static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag)
2091 {
2092 	struct ath12k_base *ab;
2093 	int i, ret;
2094 
2095 	lockdep_assert_held(&ag->mutex);
2096 
2097 	for (i = 0; i < ag->num_devices; i++) {
2098 		ab = ag->ab[i];
2099 		if (!ab)
2100 			continue;
2101 
2102 		mutex_lock(&ab->core_lock);
2103 
2104 		ret = ath12k_core_soc_create(ab);
2105 		if (ret) {
2106 			mutex_unlock(&ab->core_lock);
2107 			ath12k_err(ab, "failed to create soc %d core: %d\n", i, ret);
2108 			goto destroy;
2109 		}
2110 
2111 		mutex_unlock(&ab->core_lock);
2112 	}
2113 
2114 	return 0;
2115 
2116 destroy:
2117 	for (i--; i >= 0; i--) {
2118 		ab = ag->ab[i];
2119 		if (!ab)
2120 			continue;
2121 
2122 		mutex_lock(&ab->core_lock);
2123 		ath12k_core_soc_destroy(ab);
2124 		mutex_unlock(&ab->core_lock);
2125 	}
2126 
2127 	return ret;
2128 }
2129 
2130 void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
2131 {
2132 	struct ath12k_base *ab;
2133 	int i;
2134 
2135 	if (ath12k_ftm_mode)
2136 		return;
2137 
2138 	lockdep_assert_held(&ag->mutex);
2139 
2140 	if (ag->num_devices == 1) {
2141 		ab = ag->ab[0];
2142 		/* QCN9274 firmware uses firmware IE for MLO advertisement */
2143 		if (ab->fw.fw_features_valid) {
2144 			ag->mlo_capable =
2145 				ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO);
2146 			return;
2147 		}
2148 
2149 		/* while WCN7850 firmware uses QMI single_chip_mlo_support bit */
2150 		ag->mlo_capable = ab->single_chip_mlo_support;
2151 		return;
2152 	}
2153 
2154 	ag->mlo_capable = true;
2155 
2156 	for (i = 0; i < ag->num_devices; i++) {
2157 		ab = ag->ab[i];
2158 		if (!ab)
2159 			continue;
2160 
2161 		/* even if 1 device's firmware feature indicates MLO
2162 		 * unsupported, make MLO unsupported for the whole group
2163 		 */
2164 		if (!ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO)) {
2165 			ag->mlo_capable = false;
2166 			return;
2167 		}
2168 	}
2169 }
2170 
2171 int ath12k_core_init(struct ath12k_base *ab)
2172 {
2173 	struct ath12k_hw_group *ag;
2174 	int ret;
2175 
2176 	ret = ath12k_core_panic_notifier_register(ab);
2177 	if (ret)
2178 		ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
2179 
2180 	mutex_lock(&ath12k_hw_group_mutex);
2181 
2182 	ag = ath12k_core_hw_group_assign(ab);
2183 	if (!ag) {
2184 		mutex_unlock(&ath12k_hw_group_mutex);
2185 		ath12k_warn(ab, "unable to get hw group\n");
2186 		ret = -ENODEV;
2187 		goto err_unregister_notifier;
2188 	}
2189 
2190 	mutex_unlock(&ath12k_hw_group_mutex);
2191 
2192 	mutex_lock(&ag->mutex);
2193 
2194 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices %d num probed %d\n",
2195 		   ag->num_devices, ag->num_probed);
2196 
2197 	if (ath12k_core_hw_group_create_ready(ag)) {
2198 		ret = ath12k_core_hw_group_create(ag);
2199 		if (ret) {
2200 			mutex_unlock(&ag->mutex);
2201 			ath12k_warn(ab, "unable to create hw group\n");
2202 			goto err_unassign_hw_group;
2203 		}
2204 	}
2205 
2206 	mutex_unlock(&ag->mutex);
2207 
2208 	return 0;
2209 
2210 err_unassign_hw_group:
2211 	ath12k_core_hw_group_unassign(ab);
2212 err_unregister_notifier:
2213 	ath12k_core_panic_notifier_unregister(ab);
2214 
2215 	return ret;
2216 }
2217 
2218 void ath12k_core_deinit(struct ath12k_base *ab)
2219 {
2220 	ath12k_core_hw_group_destroy(ab->ag);
2221 	ath12k_core_hw_group_unassign(ab);
2222 	ath12k_core_panic_notifier_unregister(ab);
2223 }
2224 
2225 void ath12k_core_free(struct ath12k_base *ab)
2226 {
2227 	timer_delete_sync(&ab->rx_replenish_retry);
2228 	destroy_workqueue(ab->workqueue_aux);
2229 	destroy_workqueue(ab->workqueue);
2230 	kfree(ab);
2231 }
2232 
2233 struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
2234 				      enum ath12k_bus bus)
2235 {
2236 	struct ath12k_base *ab;
2237 
2238 	ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL);
2239 	if (!ab)
2240 		return NULL;
2241 
2242 	init_completion(&ab->driver_recovery);
2243 
2244 	ab->workqueue = create_singlethread_workqueue("ath12k_wq");
2245 	if (!ab->workqueue)
2246 		goto err_sc_free;
2247 
2248 	ab->workqueue_aux = create_singlethread_workqueue("ath12k_aux_wq");
2249 	if (!ab->workqueue_aux)
2250 		goto err_free_wq;
2251 
2252 	mutex_init(&ab->core_lock);
2253 	spin_lock_init(&ab->base_lock);
2254 	init_completion(&ab->reset_complete);
2255 
2256 	INIT_LIST_HEAD(&ab->peers);
2257 	init_waitqueue_head(&ab->peer_mapping_wq);
2258 	init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
2259 	INIT_WORK(&ab->restart_work, ath12k_core_restart);
2260 	INIT_WORK(&ab->reset_work, ath12k_core_reset);
2261 	INIT_WORK(&ab->rfkill_work, ath12k_rfkill_work);
2262 	INIT_WORK(&ab->dump_work, ath12k_coredump_upload);
2263 	INIT_WORK(&ab->update_11d_work, ath12k_update_11d);
2264 
2265 	timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
2266 	init_completion(&ab->htc_suspend);
2267 	init_completion(&ab->restart_completed);
2268 	init_completion(&ab->wow.wakeup_completed);
2269 
2270 	ab->dev = dev;
2271 	ab->hif.bus = bus;
2272 	ab->qmi.num_radios = U8_MAX;
2273 	ab->single_chip_mlo_support = false;
2274 
2275 	/* Device index used to identify the devices in a group.
2276 	 *
2277 	 * In Intra-device MLO, only one device present in a group,
2278 	 * so it is always zero.
2279 	 *
2280 	 * In Inter-device MLO, Multiple device present in a group,
2281 	 * expect non-zero value.
2282 	 */
2283 	ab->device_id = 0;
2284 
2285 	return ab;
2286 
2287 err_free_wq:
2288 	destroy_workqueue(ab->workqueue);
2289 err_sc_free:
2290 	kfree(ab);
2291 	return NULL;
2292 }
2293 
2294 static int ath12k_init(void)
2295 {
2296 	ahb_err = ath12k_ahb_init();
2297 	if (ahb_err)
2298 		pr_warn("Failed to initialize ath12k AHB device: %d\n", ahb_err);
2299 
2300 	pci_err = ath12k_pci_init();
2301 	if (pci_err)
2302 		pr_warn("Failed to initialize ath12k PCI device: %d\n", pci_err);
2303 
2304 	/* If both failed, return one of the failures (arbitrary) */
2305 	return ahb_err && pci_err ? ahb_err : 0;
2306 }
2307 
2308 static void ath12k_exit(void)
2309 {
2310 	if (!pci_err)
2311 		ath12k_pci_exit();
2312 
2313 	if (!ahb_err)
2314 		ath12k_ahb_exit();
2315 }
2316 
2317 module_init(ath12k_init);
2318 module_exit(ath12k_exit);
2319 
2320 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11be WLAN devices");
2321 MODULE_LICENSE("Dual BSD/GPL");
2322