xref: /linux/drivers/net/wireless/ath/ath12k/core.c (revision bca5cfbb694d66a1c482d0c347eee80f6afbc870)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/remoteproc.h>
10 #include <linux/firmware.h>
11 #include <linux/of.h>
12 #include <linux/of_graph.h>
13 #include "ahb.h"
14 #include "core.h"
15 #include "dp_tx.h"
16 #include "dp_rx.h"
17 #include "debug.h"
18 #include "debugfs.h"
19 #include "fw.h"
20 #include "hif.h"
21 #include "pci.h"
22 #include "wow.h"
23 
24 static int ahb_err, pci_err;
25 unsigned int ath12k_debug_mask;
26 module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
27 MODULE_PARM_DESC(debug_mask, "Debugging mask");
28 
29 bool ath12k_ftm_mode;
30 module_param_named(ftm_mode, ath12k_ftm_mode, bool, 0444);
31 MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode");
32 
33 /* protected with ath12k_hw_group_mutex */
34 static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
35 
36 static DEFINE_MUTEX(ath12k_hw_group_mutex);
37 
38 static int ath12k_core_rfkill_config(struct ath12k_base *ab)
39 {
40 	struct ath12k *ar;
41 	int ret = 0, i;
42 
43 	if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
44 		return 0;
45 
46 	if (ath12k_acpi_get_disable_rfkill(ab))
47 		return 0;
48 
49 	for (i = 0; i < ab->num_radios; i++) {
50 		ar = ab->pdevs[i].ar;
51 
52 		ret = ath12k_mac_rfkill_config(ar);
53 		if (ret && ret != -EOPNOTSUPP) {
54 			ath12k_warn(ab, "failed to configure rfkill: %d", ret);
55 			return ret;
56 		}
57 	}
58 
59 	return ret;
60 }
61 
62 /* Check if we need to continue with suspend/resume operation.
63  * Return:
64  *	a negative value: error happens and don't continue.
65  *	0:  no error but don't continue.
66  *	positive value: no error and do continue.
67  */
68 static int ath12k_core_continue_suspend_resume(struct ath12k_base *ab)
69 {
70 	struct ath12k *ar;
71 
72 	if (!ab->hw_params->supports_suspend)
73 		return -EOPNOTSUPP;
74 
75 	/* so far single_pdev_only chips have supports_suspend as true
76 	 * so pass 0 as a dummy pdev_id here.
77 	 */
78 	ar = ab->pdevs[0].ar;
79 	if (!ar || !ar->ah || ar->ah->state != ATH12K_HW_STATE_OFF)
80 		return 0;
81 
82 	return 1;
83 }
84 
85 int ath12k_core_suspend(struct ath12k_base *ab)
86 {
87 	struct ath12k *ar;
88 	int ret, i;
89 
90 	ret = ath12k_core_continue_suspend_resume(ab);
91 	if (ret <= 0)
92 		return ret;
93 
94 	for (i = 0; i < ab->num_radios; i++) {
95 		ar = ab->pdevs[i].ar;
96 		if (!ar)
97 			continue;
98 
99 		wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
100 
101 		ret = ath12k_mac_wait_tx_complete(ar);
102 		if (ret) {
103 			wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
104 			ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
105 			return ret;
106 		}
107 
108 		wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
109 	}
110 
111 	/* PM framework skips suspend_late/resume_early callbacks
112 	 * if other devices report errors in their suspend callbacks.
113 	 * However ath12k_core_resume() would still be called because
114 	 * here we return success thus kernel put us on dpm_suspended_list.
115 	 * Since we won't go through a power down/up cycle, there is
116 	 * no chance to call complete(&ab->restart_completed) in
117 	 * ath12k_core_restart(), making ath12k_core_resume() timeout.
118 	 * So call it here to avoid this issue. This also works in case
119 	 * no error happens thus suspend_late/resume_early get called,
120 	 * because it will be reinitialized in ath12k_core_resume_early().
121 	 */
122 	complete(&ab->restart_completed);
123 
124 	return 0;
125 }
126 EXPORT_SYMBOL(ath12k_core_suspend);
127 
128 int ath12k_core_suspend_late(struct ath12k_base *ab)
129 {
130 	int ret;
131 
132 	ret = ath12k_core_continue_suspend_resume(ab);
133 	if (ret <= 0)
134 		return ret;
135 
136 	ath12k_acpi_stop(ab);
137 
138 	ath12k_hif_irq_disable(ab);
139 	ath12k_hif_ce_irq_disable(ab);
140 
141 	ath12k_hif_power_down(ab, true);
142 
143 	return 0;
144 }
145 EXPORT_SYMBOL(ath12k_core_suspend_late);
146 
147 int ath12k_core_resume_early(struct ath12k_base *ab)
148 {
149 	int ret;
150 
151 	ret = ath12k_core_continue_suspend_resume(ab);
152 	if (ret <= 0)
153 		return ret;
154 
155 	reinit_completion(&ab->restart_completed);
156 	ret = ath12k_hif_power_up(ab);
157 	if (ret)
158 		ath12k_warn(ab, "failed to power up hif during resume: %d\n", ret);
159 
160 	return ret;
161 }
162 EXPORT_SYMBOL(ath12k_core_resume_early);
163 
164 int ath12k_core_resume(struct ath12k_base *ab)
165 {
166 	long time_left;
167 	int ret;
168 
169 	ret = ath12k_core_continue_suspend_resume(ab);
170 	if (ret <= 0)
171 		return ret;
172 
173 	time_left = wait_for_completion_timeout(&ab->restart_completed,
174 						ATH12K_RESET_TIMEOUT_HZ);
175 	if (time_left == 0) {
176 		ath12k_warn(ab, "timeout while waiting for restart complete");
177 		return -ETIMEDOUT;
178 	}
179 
180 	return 0;
181 }
182 EXPORT_SYMBOL(ath12k_core_resume);
183 
184 static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
185 					   size_t name_len, bool with_variant,
186 					   bool bus_type_mode, bool with_default)
187 {
188 	/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
189 	char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
190 
191 	if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
192 		scnprintf(variant, sizeof(variant), ",variant=%s",
193 			  ab->qmi.target.bdf_ext);
194 
195 	switch (ab->id.bdf_search) {
196 	case ATH12K_BDF_SEARCH_BUS_AND_BOARD:
197 		if (bus_type_mode)
198 			scnprintf(name, name_len,
199 				  "bus=%s",
200 				  ath12k_bus_str(ab->hif.bus));
201 		else
202 			scnprintf(name, name_len,
203 				  "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
204 				  ath12k_bus_str(ab->hif.bus),
205 				  ab->id.vendor, ab->id.device,
206 				  ab->id.subsystem_vendor,
207 				  ab->id.subsystem_device,
208 				  ab->qmi.target.chip_id,
209 				  ab->qmi.target.board_id,
210 				  variant);
211 		break;
212 	default:
213 		scnprintf(name, name_len,
214 			  "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
215 			  ath12k_bus_str(ab->hif.bus),
216 			  ab->qmi.target.chip_id,
217 			  with_default ?
218 			  ATH12K_BOARD_ID_DEFAULT : ab->qmi.target.board_id,
219 			  variant);
220 		break;
221 	}
222 
223 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name);
224 
225 	return 0;
226 }
227 
228 static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
229 					 size_t name_len)
230 {
231 	return __ath12k_core_create_board_name(ab, name, name_len, true, false, false);
232 }
233 
234 static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
235 						  size_t name_len)
236 {
237 	return __ath12k_core_create_board_name(ab, name, name_len, false, false, true);
238 }
239 
240 static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
241 						  size_t name_len)
242 {
243 	return __ath12k_core_create_board_name(ab, name, name_len, false, true, true);
244 }
245 
246 const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
247 						    const char *file)
248 {
249 	const struct firmware *fw;
250 	char path[100];
251 	int ret;
252 
253 	if (!file)
254 		return ERR_PTR(-ENOENT);
255 
256 	ath12k_core_create_firmware_path(ab, file, path, sizeof(path));
257 
258 	ret = firmware_request_nowarn(&fw, path, ab->dev);
259 	if (ret)
260 		return ERR_PTR(ret);
261 
262 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot firmware request %s size %zu\n",
263 		   path, fw->size);
264 
265 	return fw;
266 }
267 
268 void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
269 {
270 	if (!IS_ERR(bd->fw))
271 		release_firmware(bd->fw);
272 
273 	memset(bd, 0, sizeof(*bd));
274 }
275 
276 static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
277 					 struct ath12k_board_data *bd,
278 					 const void *buf, size_t buf_len,
279 					 const char *boardname,
280 					 int ie_id,
281 					 int name_id,
282 					 int data_id)
283 {
284 	const struct ath12k_fw_ie *hdr;
285 	bool name_match_found;
286 	int ret, board_ie_id;
287 	size_t board_ie_len;
288 	const void *board_ie_data;
289 
290 	name_match_found = false;
291 
292 	/* go through ATH12K_BD_IE_BOARD_/ATH12K_BD_IE_REGDB_ elements */
293 	while (buf_len > sizeof(struct ath12k_fw_ie)) {
294 		hdr = buf;
295 		board_ie_id = le32_to_cpu(hdr->id);
296 		board_ie_len = le32_to_cpu(hdr->len);
297 		board_ie_data = hdr->data;
298 
299 		buf_len -= sizeof(*hdr);
300 		buf += sizeof(*hdr);
301 
302 		if (buf_len < ALIGN(board_ie_len, 4)) {
303 			ath12k_err(ab, "invalid %s length: %zu < %zu\n",
304 				   ath12k_bd_ie_type_str(ie_id),
305 				   buf_len, ALIGN(board_ie_len, 4));
306 			ret = -EINVAL;
307 			goto out;
308 		}
309 
310 		if (board_ie_id == name_id) {
311 			ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "",
312 					board_ie_data, board_ie_len);
313 
314 			if (board_ie_len != strlen(boardname))
315 				goto next;
316 
317 			ret = memcmp(board_ie_data, boardname, strlen(boardname));
318 			if (ret)
319 				goto next;
320 
321 			name_match_found = true;
322 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
323 				   "boot found match %s for name '%s'",
324 				   ath12k_bd_ie_type_str(ie_id),
325 				   boardname);
326 		} else if (board_ie_id == data_id) {
327 			if (!name_match_found)
328 				/* no match found */
329 				goto next;
330 
331 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
332 				   "boot found %s for '%s'",
333 				   ath12k_bd_ie_type_str(ie_id),
334 				   boardname);
335 
336 			bd->data = board_ie_data;
337 			bd->len = board_ie_len;
338 
339 			ret = 0;
340 			goto out;
341 		} else {
342 			ath12k_warn(ab, "unknown %s id found: %d\n",
343 				    ath12k_bd_ie_type_str(ie_id),
344 				    board_ie_id);
345 		}
346 next:
347 		/* jump over the padding */
348 		board_ie_len = ALIGN(board_ie_len, 4);
349 
350 		buf_len -= board_ie_len;
351 		buf += board_ie_len;
352 	}
353 
354 	/* no match found */
355 	ret = -ENOENT;
356 
357 out:
358 	return ret;
359 }
360 
361 static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
362 					      struct ath12k_board_data *bd,
363 					      const char *boardname,
364 					      int ie_id_match,
365 					      int name_id,
366 					      int data_id)
367 {
368 	size_t len, magic_len;
369 	const u8 *data;
370 	char *filename, filepath[100];
371 	size_t ie_len;
372 	struct ath12k_fw_ie *hdr;
373 	int ret, ie_id;
374 
375 	filename = ATH12K_BOARD_API2_FILE;
376 
377 	if (!bd->fw)
378 		bd->fw = ath12k_core_firmware_request(ab, filename);
379 
380 	if (IS_ERR(bd->fw))
381 		return PTR_ERR(bd->fw);
382 
383 	data = bd->fw->data;
384 	len = bd->fw->size;
385 
386 	ath12k_core_create_firmware_path(ab, filename,
387 					 filepath, sizeof(filepath));
388 
389 	/* magic has extra null byte padded */
390 	magic_len = strlen(ATH12K_BOARD_MAGIC) + 1;
391 	if (len < magic_len) {
392 		ath12k_err(ab, "failed to find magic value in %s, file too short: %zu\n",
393 			   filepath, len);
394 		ret = -EINVAL;
395 		goto err;
396 	}
397 
398 	if (memcmp(data, ATH12K_BOARD_MAGIC, magic_len)) {
399 		ath12k_err(ab, "found invalid board magic\n");
400 		ret = -EINVAL;
401 		goto err;
402 	}
403 
404 	/* magic is padded to 4 bytes */
405 	magic_len = ALIGN(magic_len, 4);
406 	if (len < magic_len) {
407 		ath12k_err(ab, "failed: %s too small to contain board data, len: %zu\n",
408 			   filepath, len);
409 		ret = -EINVAL;
410 		goto err;
411 	}
412 
413 	data += magic_len;
414 	len -= magic_len;
415 
416 	while (len > sizeof(struct ath12k_fw_ie)) {
417 		hdr = (struct ath12k_fw_ie *)data;
418 		ie_id = le32_to_cpu(hdr->id);
419 		ie_len = le32_to_cpu(hdr->len);
420 
421 		len -= sizeof(*hdr);
422 		data = hdr->data;
423 
424 		if (len < ALIGN(ie_len, 4)) {
425 			ath12k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
426 				   ie_id, ie_len, len);
427 			ret = -EINVAL;
428 			goto err;
429 		}
430 
431 		if (ie_id == ie_id_match) {
432 			ret = ath12k_core_parse_bd_ie_board(ab, bd, data,
433 							    ie_len,
434 							    boardname,
435 							    ie_id_match,
436 							    name_id,
437 							    data_id);
438 			if (ret == -ENOENT)
439 				/* no match found, continue */
440 				goto next;
441 			else if (ret)
442 				/* there was an error, bail out */
443 				goto err;
444 			/* either found or error, so stop searching */
445 			goto out;
446 		}
447 next:
448 		/* jump over the padding */
449 		ie_len = ALIGN(ie_len, 4);
450 
451 		len -= ie_len;
452 		data += ie_len;
453 	}
454 
455 out:
456 	if (!bd->data || !bd->len) {
457 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
458 			   "failed to fetch %s for %s from %s\n",
459 			   ath12k_bd_ie_type_str(ie_id_match),
460 			   boardname, filepath);
461 		ret = -ENODATA;
462 		goto err;
463 	}
464 
465 	return 0;
466 
467 err:
468 	ath12k_core_free_bdf(ab, bd);
469 	return ret;
470 }
471 
472 int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
473 				       struct ath12k_board_data *bd,
474 				       char *filename)
475 {
476 	bd->fw = ath12k_core_firmware_request(ab, filename);
477 	if (IS_ERR(bd->fw))
478 		return PTR_ERR(bd->fw);
479 
480 	bd->data = bd->fw->data;
481 	bd->len = bd->fw->size;
482 
483 	return 0;
484 }
485 
486 #define BOARD_NAME_SIZE 200
487 int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
488 {
489 	char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE];
490 	char *filename, filepath[100];
491 	int bd_api;
492 	int ret;
493 
494 	filename = ATH12K_BOARD_API2_FILE;
495 
496 	ret = ath12k_core_create_board_name(ab, boardname, sizeof(boardname));
497 	if (ret) {
498 		ath12k_err(ab, "failed to create board name: %d", ret);
499 		return ret;
500 	}
501 
502 	bd_api = 2;
503 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
504 						 ATH12K_BD_IE_BOARD,
505 						 ATH12K_BD_IE_BOARD_NAME,
506 						 ATH12K_BD_IE_BOARD_DATA);
507 	if (!ret)
508 		goto success;
509 
510 	ret = ath12k_core_create_fallback_board_name(ab, fallback_boardname,
511 						     sizeof(fallback_boardname));
512 	if (ret) {
513 		ath12k_err(ab, "failed to create fallback board name: %d", ret);
514 		return ret;
515 	}
516 
517 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
518 						 ATH12K_BD_IE_BOARD,
519 						 ATH12K_BD_IE_BOARD_NAME,
520 						 ATH12K_BD_IE_BOARD_DATA);
521 	if (!ret)
522 		goto success;
523 
524 	bd_api = 1;
525 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE);
526 	if (ret) {
527 		ath12k_core_create_firmware_path(ab, filename,
528 						 filepath, sizeof(filepath));
529 		ath12k_err(ab, "failed to fetch board data for %s from %s\n",
530 			   boardname, filepath);
531 		if (memcmp(boardname, fallback_boardname, strlen(boardname)))
532 			ath12k_err(ab, "failed to fetch board data for %s from %s\n",
533 				   fallback_boardname, filepath);
534 
535 		ath12k_err(ab, "failed to fetch board.bin from %s\n",
536 			   ab->hw_params->fw.dir);
537 		return ret;
538 	}
539 
540 success:
541 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", bd_api);
542 	return 0;
543 }
544 
545 int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd)
546 {
547 	char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE];
548 	int ret;
549 
550 	ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
551 	if (ret) {
552 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
553 			   "failed to create board name for regdb: %d", ret);
554 		goto exit;
555 	}
556 
557 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
558 						 ATH12K_BD_IE_REGDB,
559 						 ATH12K_BD_IE_REGDB_NAME,
560 						 ATH12K_BD_IE_REGDB_DATA);
561 	if (!ret)
562 		goto exit;
563 
564 	ret = ath12k_core_create_bus_type_board_name(ab, default_boardname,
565 						     BOARD_NAME_SIZE);
566 	if (ret) {
567 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
568 			   "failed to create default board name for regdb: %d", ret);
569 		goto exit;
570 	}
571 
572 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, default_boardname,
573 						 ATH12K_BD_IE_REGDB,
574 						 ATH12K_BD_IE_REGDB_NAME,
575 						 ATH12K_BD_IE_REGDB_DATA);
576 	if (!ret)
577 		goto exit;
578 
579 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_REGDB_FILE_NAME);
580 	if (ret)
581 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to fetch %s from %s\n",
582 			   ATH12K_REGDB_FILE_NAME, ab->hw_params->fw.dir);
583 
584 exit:
585 	if (!ret)
586 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "fetched regdb\n");
587 
588 	return ret;
589 }
590 
591 u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab)
592 {
593 	if (ab->num_radios == 2)
594 		return TARGET_NUM_STATIONS_DBS;
595 	else if (ab->num_radios == 3)
596 		return TARGET_NUM_PEERS_PDEV_DBS_SBS;
597 	return TARGET_NUM_STATIONS_SINGLE;
598 }
599 
600 u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab)
601 {
602 	if (ab->num_radios == 2)
603 		return TARGET_NUM_PEERS_PDEV_DBS;
604 	else if (ab->num_radios == 3)
605 		return TARGET_NUM_PEERS_PDEV_DBS_SBS;
606 	return TARGET_NUM_PEERS_PDEV_SINGLE;
607 }
608 
609 u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab)
610 {
611 	if (ab->num_radios == 2)
612 		return TARGET_NUM_TIDS(DBS);
613 	else if (ab->num_radios == 3)
614 		return TARGET_NUM_TIDS(DBS_SBS);
615 	return TARGET_NUM_TIDS(SINGLE);
616 }
617 
618 struct reserved_mem *ath12k_core_get_reserved_mem(struct ath12k_base *ab,
619 						  int index)
620 {
621 	struct device *dev = ab->dev;
622 	struct reserved_mem *rmem;
623 	struct device_node *node;
624 
625 	node = of_parse_phandle(dev->of_node, "memory-region", index);
626 	if (!node) {
627 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
628 			   "failed to parse memory-region for index %d\n", index);
629 		return NULL;
630 	}
631 
632 	rmem = of_reserved_mem_lookup(node);
633 	of_node_put(node);
634 	if (!rmem) {
635 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
636 			   "unable to get memory-region for index %d\n", index);
637 		return NULL;
638 	}
639 
640 	return rmem;
641 }
642 
643 static inline
644 void ath12k_core_to_group_ref_get(struct ath12k_base *ab)
645 {
646 	struct ath12k_hw_group *ag = ab->ag;
647 
648 	lockdep_assert_held(&ag->mutex);
649 
650 	if (ab->hw_group_ref) {
651 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already attached to group %d\n",
652 			   ag->id);
653 		return;
654 	}
655 
656 	ab->hw_group_ref = true;
657 	ag->num_started++;
658 
659 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "core attached to group %d, num_started %d\n",
660 		   ag->id, ag->num_started);
661 }
662 
663 static inline
664 void ath12k_core_to_group_ref_put(struct ath12k_base *ab)
665 {
666 	struct ath12k_hw_group *ag = ab->ag;
667 
668 	lockdep_assert_held(&ag->mutex);
669 
670 	if (!ab->hw_group_ref) {
671 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already de-attached from group %d\n",
672 			   ag->id);
673 		return;
674 	}
675 
676 	ab->hw_group_ref = false;
677 	ag->num_started--;
678 
679 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "core de-attached from group %d, num_started %d\n",
680 		   ag->id, ag->num_started);
681 }
682 
683 static void ath12k_core_stop(struct ath12k_base *ab)
684 {
685 	ath12k_core_to_group_ref_put(ab);
686 
687 	if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
688 		ath12k_qmi_firmware_stop(ab);
689 
690 	ath12k_acpi_stop(ab);
691 
692 	ath12k_dp_rx_pdev_reo_cleanup(ab);
693 	ath12k_hif_stop(ab);
694 	ath12k_wmi_detach(ab);
695 	ath12k_dp_free(ab);
696 
697 	/* De-Init of components as needed */
698 }
699 
700 static void ath12k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data)
701 {
702 	struct ath12k_base *ab = data;
703 	const char *magic = ATH12K_SMBIOS_BDF_EXT_MAGIC;
704 	struct ath12k_smbios_bdf *smbios = (struct ath12k_smbios_bdf *)hdr;
705 	ssize_t copied;
706 	size_t len;
707 	int i;
708 
709 	if (ab->qmi.target.bdf_ext[0] != '\0')
710 		return;
711 
712 	if (hdr->type != ATH12K_SMBIOS_BDF_EXT_TYPE)
713 		return;
714 
715 	if (hdr->length != ATH12K_SMBIOS_BDF_EXT_LENGTH) {
716 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
717 			   "wrong smbios bdf ext type length (%d).\n",
718 			   hdr->length);
719 		return;
720 	}
721 
722 	spin_lock_bh(&ab->base_lock);
723 
724 	switch (smbios->country_code_flag) {
725 	case ATH12K_SMBIOS_CC_ISO:
726 		ab->new_alpha2[0] = u16_get_bits(smbios->cc_code >> 8, 0xff);
727 		ab->new_alpha2[1] = u16_get_bits(smbios->cc_code, 0xff);
728 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios cc_code %c%c\n",
729 			   ab->new_alpha2[0], ab->new_alpha2[1]);
730 		break;
731 	case ATH12K_SMBIOS_CC_WW:
732 		ab->new_alpha2[0] = '0';
733 		ab->new_alpha2[1] = '0';
734 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios worldwide regdomain\n");
735 		break;
736 	default:
737 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot ignore smbios country code setting %d\n",
738 			   smbios->country_code_flag);
739 		break;
740 	}
741 
742 	spin_unlock_bh(&ab->base_lock);
743 
744 	if (!smbios->bdf_enabled) {
745 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant name not found.\n");
746 		return;
747 	}
748 
749 	/* Only one string exists (per spec) */
750 	if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) {
751 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
752 			   "bdf variant magic does not match.\n");
753 		return;
754 	}
755 
756 	len = min_t(size_t,
757 		    strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext));
758 	for (i = 0; i < len; i++) {
759 		if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) {
760 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
761 				   "bdf variant name contains non ascii chars.\n");
762 			return;
763 		}
764 	}
765 
766 	/* Copy extension name without magic prefix */
767 	copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic),
768 			 sizeof(ab->qmi.target.bdf_ext));
769 	if (copied < 0) {
770 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
771 			   "bdf variant string is longer than the buffer can accommodate\n");
772 		return;
773 	}
774 
775 	ath12k_dbg(ab, ATH12K_DBG_BOOT,
776 		   "found and validated bdf variant smbios_type 0x%x bdf %s\n",
777 		   ATH12K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext);
778 }
779 
780 int ath12k_core_check_smbios(struct ath12k_base *ab)
781 {
782 	ab->qmi.target.bdf_ext[0] = '\0';
783 	dmi_walk(ath12k_core_check_cc_code_bdfext, ab);
784 
785 	if (ab->qmi.target.bdf_ext[0] == '\0')
786 		return -ENODATA;
787 
788 	return 0;
789 }
790 
791 static int ath12k_core_soc_create(struct ath12k_base *ab)
792 {
793 	int ret;
794 
795 	if (ath12k_ftm_mode) {
796 		ab->fw_mode = ATH12K_FIRMWARE_MODE_FTM;
797 		ath12k_info(ab, "Booting in ftm mode\n");
798 	}
799 
800 	ret = ath12k_qmi_init_service(ab);
801 	if (ret) {
802 		ath12k_err(ab, "failed to initialize qmi :%d\n", ret);
803 		return ret;
804 	}
805 
806 	ath12k_debugfs_soc_create(ab);
807 
808 	ret = ath12k_hif_power_up(ab);
809 	if (ret) {
810 		ath12k_err(ab, "failed to power up :%d\n", ret);
811 		goto err_qmi_deinit;
812 	}
813 
814 	ath12k_debugfs_pdev_create(ab);
815 
816 	return 0;
817 
818 err_qmi_deinit:
819 	ath12k_debugfs_soc_destroy(ab);
820 	ath12k_qmi_deinit_service(ab);
821 	return ret;
822 }
823 
824 static void ath12k_core_soc_destroy(struct ath12k_base *ab)
825 {
826 	ath12k_hif_power_down(ab, false);
827 	ath12k_reg_free(ab);
828 	ath12k_debugfs_soc_destroy(ab);
829 	ath12k_qmi_deinit_service(ab);
830 }
831 
832 static int ath12k_core_pdev_create(struct ath12k_base *ab)
833 {
834 	int ret;
835 
836 	ret = ath12k_dp_pdev_alloc(ab);
837 	if (ret) {
838 		ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
839 		return ret;
840 	}
841 
842 	return 0;
843 }
844 
845 static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
846 {
847 	ath12k_dp_pdev_free(ab);
848 }
849 
850 static int ath12k_core_start(struct ath12k_base *ab)
851 {
852 	int ret;
853 
854 	lockdep_assert_held(&ab->core_lock);
855 
856 	ret = ath12k_wmi_attach(ab);
857 	if (ret) {
858 		ath12k_err(ab, "failed to attach wmi: %d\n", ret);
859 		return ret;
860 	}
861 
862 	ret = ath12k_htc_init(ab);
863 	if (ret) {
864 		ath12k_err(ab, "failed to init htc: %d\n", ret);
865 		goto err_wmi_detach;
866 	}
867 
868 	ret = ath12k_hif_start(ab);
869 	if (ret) {
870 		ath12k_err(ab, "failed to start HIF: %d\n", ret);
871 		goto err_wmi_detach;
872 	}
873 
874 	ret = ath12k_htc_wait_target(&ab->htc);
875 	if (ret) {
876 		ath12k_err(ab, "failed to connect to HTC: %d\n", ret);
877 		goto err_hif_stop;
878 	}
879 
880 	ret = ath12k_dp_htt_connect(&ab->dp);
881 	if (ret) {
882 		ath12k_err(ab, "failed to connect to HTT: %d\n", ret);
883 		goto err_hif_stop;
884 	}
885 
886 	ret = ath12k_wmi_connect(ab);
887 	if (ret) {
888 		ath12k_err(ab, "failed to connect wmi: %d\n", ret);
889 		goto err_hif_stop;
890 	}
891 
892 	ret = ath12k_htc_start(&ab->htc);
893 	if (ret) {
894 		ath12k_err(ab, "failed to start HTC: %d\n", ret);
895 		goto err_hif_stop;
896 	}
897 
898 	ret = ath12k_wmi_wait_for_service_ready(ab);
899 	if (ret) {
900 		ath12k_err(ab, "failed to receive wmi service ready event: %d\n",
901 			   ret);
902 		goto err_hif_stop;
903 	}
904 
905 	ath12k_dp_cc_config(ab);
906 
907 	ret = ath12k_dp_rx_pdev_reo_setup(ab);
908 	if (ret) {
909 		ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
910 		goto err_hif_stop;
911 	}
912 
913 	ath12k_dp_hal_rx_desc_init(ab);
914 
915 	ret = ath12k_wmi_cmd_init(ab);
916 	if (ret) {
917 		ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret);
918 		goto err_reo_cleanup;
919 	}
920 
921 	ret = ath12k_wmi_wait_for_unified_ready(ab);
922 	if (ret) {
923 		ath12k_err(ab, "failed to receive wmi unified ready event: %d\n",
924 			   ret);
925 		goto err_reo_cleanup;
926 	}
927 
928 	/* put hardware to DBS mode */
929 	if (ab->hw_params->single_pdev_only) {
930 		ret = ath12k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
931 		if (ret) {
932 			ath12k_err(ab, "failed to send dbs mode: %d\n", ret);
933 			goto err_reo_cleanup;
934 		}
935 	}
936 
937 	ret = ath12k_dp_tx_htt_h2t_ver_req_msg(ab);
938 	if (ret) {
939 		ath12k_err(ab, "failed to send htt version request message: %d\n",
940 			   ret);
941 		goto err_reo_cleanup;
942 	}
943 
944 	ath12k_acpi_set_dsm_func(ab);
945 
946 	/* Indicate the core start in the appropriate group */
947 	ath12k_core_to_group_ref_get(ab);
948 
949 	return 0;
950 
951 err_reo_cleanup:
952 	ath12k_dp_rx_pdev_reo_cleanup(ab);
953 err_hif_stop:
954 	ath12k_hif_stop(ab);
955 err_wmi_detach:
956 	ath12k_wmi_detach(ab);
957 	return ret;
958 }
959 
960 static void ath12k_core_device_cleanup(struct ath12k_base *ab)
961 {
962 	mutex_lock(&ab->core_lock);
963 
964 	ath12k_hif_irq_disable(ab);
965 	ath12k_core_pdev_destroy(ab);
966 
967 	mutex_unlock(&ab->core_lock);
968 }
969 
970 static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
971 {
972 	struct ath12k_base *ab;
973 	int i;
974 
975 	lockdep_assert_held(&ag->mutex);
976 
977 	clear_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
978 
979 	ath12k_mac_unregister(ag);
980 
981 	for (i = ag->num_devices - 1; i >= 0; i--) {
982 		ab = ag->ab[i];
983 		if (!ab)
984 			continue;
985 
986 		clear_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
987 
988 		ath12k_core_device_cleanup(ab);
989 	}
990 
991 	ath12k_mac_destroy(ag);
992 }
993 
994 u8 ath12k_get_num_partner_link(struct ath12k *ar)
995 {
996 	struct ath12k_base *partner_ab, *ab = ar->ab;
997 	struct ath12k_hw_group *ag = ab->ag;
998 	struct ath12k_pdev *pdev;
999 	u8 num_link = 0;
1000 	int i, j;
1001 
1002 	lockdep_assert_held(&ag->mutex);
1003 
1004 	for (i = 0; i < ag->num_devices; i++) {
1005 		partner_ab = ag->ab[i];
1006 
1007 		for (j = 0; j < partner_ab->num_radios; j++) {
1008 			pdev = &partner_ab->pdevs[j];
1009 
1010 			/* Avoid the self link */
1011 			if (ar == pdev->ar)
1012 				continue;
1013 
1014 			num_link++;
1015 		}
1016 	}
1017 
1018 	return num_link;
1019 }
1020 
1021 static int __ath12k_mac_mlo_ready(struct ath12k *ar)
1022 {
1023 	u8 num_link = ath12k_get_num_partner_link(ar);
1024 	int ret;
1025 
1026 	if (num_link == 0)
1027 		return 0;
1028 
1029 	ret = ath12k_wmi_mlo_ready(ar);
1030 	if (ret) {
1031 		ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n",
1032 			   ar->pdev_idx, ret);
1033 		return ret;
1034 	}
1035 
1036 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mlo ready done for pdev %d\n",
1037 		   ar->pdev_idx);
1038 
1039 	return 0;
1040 }
1041 
1042 int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag)
1043 {
1044 	struct ath12k_hw *ah;
1045 	struct ath12k *ar;
1046 	int ret;
1047 	int i, j;
1048 
1049 	for (i = 0; i < ag->num_hw; i++) {
1050 		ah = ag->ah[i];
1051 		if (!ah)
1052 			continue;
1053 
1054 		for_each_ar(ah, ar, j) {
1055 			ar = &ah->radio[j];
1056 			ret = __ath12k_mac_mlo_ready(ar);
1057 			if (ret)
1058 				return ret;
1059 		}
1060 	}
1061 
1062 	return 0;
1063 }
1064 
1065 static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag)
1066 {
1067 	int ret, i;
1068 
1069 	if (!ag->mlo_capable)
1070 		return 0;
1071 
1072 	ret = ath12k_mac_mlo_setup(ag);
1073 	if (ret)
1074 		return ret;
1075 
1076 	for (i = 0; i < ag->num_devices; i++)
1077 		ath12k_dp_partner_cc_init(ag->ab[i]);
1078 
1079 	ret = ath12k_mac_mlo_ready(ag);
1080 	if (ret)
1081 		goto err_mlo_teardown;
1082 
1083 	return 0;
1084 
1085 err_mlo_teardown:
1086 	ath12k_mac_mlo_teardown(ag);
1087 
1088 	return ret;
1089 }
1090 
1091 static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag)
1092 {
1093 	struct ath12k_base *ab;
1094 	int ret, i;
1095 
1096 	lockdep_assert_held(&ag->mutex);
1097 
1098 	if (test_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags))
1099 		goto core_pdev_create;
1100 
1101 	ret = ath12k_mac_allocate(ag);
1102 	if (WARN_ON(ret))
1103 		return ret;
1104 
1105 	ret = ath12k_core_mlo_setup(ag);
1106 	if (WARN_ON(ret))
1107 		goto err_mac_destroy;
1108 
1109 	ret = ath12k_mac_register(ag);
1110 	if (WARN_ON(ret))
1111 		goto err_mlo_teardown;
1112 
1113 	set_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
1114 
1115 core_pdev_create:
1116 	for (i = 0; i < ag->num_devices; i++) {
1117 		ab = ag->ab[i];
1118 		if (!ab)
1119 			continue;
1120 
1121 		mutex_lock(&ab->core_lock);
1122 
1123 		set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
1124 
1125 		ret = ath12k_core_pdev_create(ab);
1126 		if (ret) {
1127 			ath12k_err(ab, "failed to create pdev core %d\n", ret);
1128 			mutex_unlock(&ab->core_lock);
1129 			goto err;
1130 		}
1131 
1132 		ath12k_hif_irq_enable(ab);
1133 
1134 		ret = ath12k_core_rfkill_config(ab);
1135 		if (ret && ret != -EOPNOTSUPP) {
1136 			mutex_unlock(&ab->core_lock);
1137 			goto err;
1138 		}
1139 
1140 		mutex_unlock(&ab->core_lock);
1141 	}
1142 
1143 	return 0;
1144 
1145 err:
1146 	ath12k_core_hw_group_stop(ag);
1147 	return ret;
1148 
1149 err_mlo_teardown:
1150 	ath12k_mac_mlo_teardown(ag);
1151 
1152 err_mac_destroy:
1153 	ath12k_mac_destroy(ag);
1154 
1155 	return ret;
1156 }
1157 
1158 static int ath12k_core_start_firmware(struct ath12k_base *ab,
1159 				      enum ath12k_firmware_mode mode)
1160 {
1161 	int ret;
1162 
1163 	ath12k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v3,
1164 				    &ab->qmi.ce_cfg.shadow_reg_v3_len);
1165 
1166 	ret = ath12k_qmi_firmware_start(ab, mode);
1167 	if (ret) {
1168 		ath12k_err(ab, "failed to send firmware start: %d\n", ret);
1169 		return ret;
1170 	}
1171 
1172 	return ret;
1173 }
1174 
1175 static inline
1176 bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag)
1177 {
1178 	lockdep_assert_held(&ag->mutex);
1179 
1180 	return (ag->num_started == ag->num_devices);
1181 }
1182 
1183 static void ath12k_fw_stats_pdevs_free(struct list_head *head)
1184 {
1185 	struct ath12k_fw_stats_pdev *i, *tmp;
1186 
1187 	list_for_each_entry_safe(i, tmp, head, list) {
1188 		list_del(&i->list);
1189 		kfree(i);
1190 	}
1191 }
1192 
1193 void ath12k_fw_stats_bcn_free(struct list_head *head)
1194 {
1195 	struct ath12k_fw_stats_bcn *i, *tmp;
1196 
1197 	list_for_each_entry_safe(i, tmp, head, list) {
1198 		list_del(&i->list);
1199 		kfree(i);
1200 	}
1201 }
1202 
1203 static void ath12k_fw_stats_vdevs_free(struct list_head *head)
1204 {
1205 	struct ath12k_fw_stats_vdev *i, *tmp;
1206 
1207 	list_for_each_entry_safe(i, tmp, head, list) {
1208 		list_del(&i->list);
1209 		kfree(i);
1210 	}
1211 }
1212 
1213 void ath12k_fw_stats_init(struct ath12k *ar)
1214 {
1215 	INIT_LIST_HEAD(&ar->fw_stats.vdevs);
1216 	INIT_LIST_HEAD(&ar->fw_stats.pdevs);
1217 	INIT_LIST_HEAD(&ar->fw_stats.bcn);
1218 	init_completion(&ar->fw_stats_complete);
1219 }
1220 
1221 void ath12k_fw_stats_free(struct ath12k_fw_stats *stats)
1222 {
1223 	ath12k_fw_stats_pdevs_free(&stats->pdevs);
1224 	ath12k_fw_stats_vdevs_free(&stats->vdevs);
1225 	ath12k_fw_stats_bcn_free(&stats->bcn);
1226 }
1227 
1228 void ath12k_fw_stats_reset(struct ath12k *ar)
1229 {
1230 	spin_lock_bh(&ar->data_lock);
1231 	ar->fw_stats.fw_stats_done = false;
1232 	ath12k_fw_stats_free(&ar->fw_stats);
1233 	spin_unlock_bh(&ar->data_lock);
1234 }
1235 
1236 static void ath12k_core_trigger_partner(struct ath12k_base *ab)
1237 {
1238 	struct ath12k_hw_group *ag = ab->ag;
1239 	struct ath12k_base *partner_ab;
1240 	bool found = false;
1241 	int i;
1242 
1243 	for (i = 0; i < ag->num_devices; i++) {
1244 		partner_ab = ag->ab[i];
1245 		if (!partner_ab)
1246 			continue;
1247 
1248 		if (found)
1249 			ath12k_qmi_trigger_host_cap(partner_ab);
1250 
1251 		found = (partner_ab == ab);
1252 	}
1253 }
1254 
1255 int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
1256 {
1257 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
1258 	int ret, i;
1259 
1260 	ret = ath12k_core_start_firmware(ab, ab->fw_mode);
1261 	if (ret) {
1262 		ath12k_err(ab, "failed to start firmware: %d\n", ret);
1263 		return ret;
1264 	}
1265 
1266 	ret = ath12k_ce_init_pipes(ab);
1267 	if (ret) {
1268 		ath12k_err(ab, "failed to initialize CE: %d\n", ret);
1269 		goto err_firmware_stop;
1270 	}
1271 
1272 	ret = ath12k_dp_alloc(ab);
1273 	if (ret) {
1274 		ath12k_err(ab, "failed to init DP: %d\n", ret);
1275 		goto err_firmware_stop;
1276 	}
1277 
1278 	mutex_lock(&ag->mutex);
1279 	mutex_lock(&ab->core_lock);
1280 
1281 	ret = ath12k_core_start(ab);
1282 	if (ret) {
1283 		ath12k_err(ab, "failed to start core: %d\n", ret);
1284 		goto err_dp_free;
1285 	}
1286 
1287 	mutex_unlock(&ab->core_lock);
1288 
1289 	if (ath12k_core_hw_group_start_ready(ag)) {
1290 		ret = ath12k_core_hw_group_start(ag);
1291 		if (ret) {
1292 			ath12k_warn(ab, "unable to start hw group\n");
1293 			goto err_core_stop;
1294 		}
1295 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id);
1296 	} else {
1297 		ath12k_core_trigger_partner(ab);
1298 	}
1299 
1300 	mutex_unlock(&ag->mutex);
1301 
1302 	return 0;
1303 
1304 err_core_stop:
1305 	for (i = ag->num_devices - 1; i >= 0; i--) {
1306 		ab = ag->ab[i];
1307 		if (!ab)
1308 			continue;
1309 
1310 		mutex_lock(&ab->core_lock);
1311 		ath12k_core_stop(ab);
1312 		mutex_unlock(&ab->core_lock);
1313 	}
1314 	mutex_unlock(&ag->mutex);
1315 	goto exit;
1316 
1317 err_dp_free:
1318 	ath12k_dp_free(ab);
1319 	mutex_unlock(&ab->core_lock);
1320 	mutex_unlock(&ag->mutex);
1321 
1322 err_firmware_stop:
1323 	ath12k_qmi_firmware_stop(ab);
1324 
1325 exit:
1326 	return ret;
1327 }
1328 
1329 static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
1330 {
1331 	int ret;
1332 
1333 	mutex_lock(&ab->core_lock);
1334 	ath12k_dp_pdev_free(ab);
1335 	ath12k_ce_cleanup_pipes(ab);
1336 	ath12k_wmi_detach(ab);
1337 	ath12k_dp_rx_pdev_reo_cleanup(ab);
1338 	mutex_unlock(&ab->core_lock);
1339 
1340 	ath12k_dp_free(ab);
1341 	ath12k_hal_srng_deinit(ab);
1342 
1343 	ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
1344 
1345 	ret = ath12k_hal_srng_init(ab);
1346 	if (ret)
1347 		return ret;
1348 
1349 	clear_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1350 
1351 	ret = ath12k_core_qmi_firmware_ready(ab);
1352 	if (ret)
1353 		goto err_hal_srng_deinit;
1354 
1355 	clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
1356 
1357 	return 0;
1358 
1359 err_hal_srng_deinit:
1360 	ath12k_hal_srng_deinit(ab);
1361 	return ret;
1362 }
1363 
1364 static void ath12k_rfkill_work(struct work_struct *work)
1365 {
1366 	struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
1367 	struct ath12k_hw_group *ag = ab->ag;
1368 	struct ath12k *ar;
1369 	struct ath12k_hw *ah;
1370 	struct ieee80211_hw *hw;
1371 	bool rfkill_radio_on;
1372 	int i, j;
1373 
1374 	spin_lock_bh(&ab->base_lock);
1375 	rfkill_radio_on = ab->rfkill_radio_on;
1376 	spin_unlock_bh(&ab->base_lock);
1377 
1378 	for (i = 0; i < ag->num_hw; i++) {
1379 		ah = ath12k_ag_to_ah(ag, i);
1380 		if (!ah)
1381 			continue;
1382 
1383 		for (j = 0; j < ah->num_radio; j++) {
1384 			ar = &ah->radio[j];
1385 			if (!ar)
1386 				continue;
1387 
1388 			ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
1389 		}
1390 
1391 		hw = ah->hw;
1392 		wiphy_rfkill_set_hw_state(hw->wiphy, !rfkill_radio_on);
1393 	}
1394 }
1395 
1396 void ath12k_core_halt(struct ath12k *ar)
1397 {
1398 	struct list_head *pos, *n;
1399 	struct ath12k_base *ab = ar->ab;
1400 
1401 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
1402 
1403 	ar->num_created_vdevs = 0;
1404 	ar->allocated_vdev_map = 0;
1405 
1406 	ath12k_mac_scan_finish(ar);
1407 	ath12k_mac_peer_cleanup_all(ar);
1408 	cancel_delayed_work_sync(&ar->scan.timeout);
1409 	cancel_work_sync(&ar->regd_update_work);
1410 	cancel_work_sync(&ab->rfkill_work);
1411 	cancel_work_sync(&ab->update_11d_work);
1412 
1413 	rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
1414 	synchronize_rcu();
1415 
1416 	spin_lock_bh(&ar->data_lock);
1417 	list_for_each_safe(pos, n, &ar->arvifs)
1418 		list_del_init(pos);
1419 	spin_unlock_bh(&ar->data_lock);
1420 
1421 	idr_init(&ar->txmgmt_idr);
1422 }
1423 
1424 static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
1425 {
1426 	struct ath12k_hw_group *ag = ab->ag;
1427 	struct ath12k *ar;
1428 	struct ath12k_hw *ah;
1429 	int i, j;
1430 
1431 	spin_lock_bh(&ab->base_lock);
1432 	ab->stats.fw_crash_counter++;
1433 	spin_unlock_bh(&ab->base_lock);
1434 
1435 	if (ab->is_reset)
1436 		set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1437 
1438 	for (i = 0; i < ag->num_hw; i++) {
1439 		ah = ath12k_ag_to_ah(ag, i);
1440 		if (!ah || ah->state == ATH12K_HW_STATE_OFF ||
1441 		    ah->state == ATH12K_HW_STATE_TM)
1442 			continue;
1443 
1444 		wiphy_lock(ah->hw->wiphy);
1445 
1446 		/* If queue 0 is stopped, it is safe to assume that all
1447 		 * other queues are stopped by driver via
1448 		 * ieee80211_stop_queues() below. This means, there is
1449 		 * no need to stop it again and hence continue
1450 		 */
1451 		if (ieee80211_queue_stopped(ah->hw, 0)) {
1452 			wiphy_unlock(ah->hw->wiphy);
1453 			continue;
1454 		}
1455 
1456 		ieee80211_stop_queues(ah->hw);
1457 
1458 		for (j = 0; j < ah->num_radio; j++) {
1459 			ar = &ah->radio[j];
1460 
1461 			ath12k_mac_drain_tx(ar);
1462 			ar->state_11d = ATH12K_11D_IDLE;
1463 			complete(&ar->completed_11d_scan);
1464 			complete(&ar->scan.started);
1465 			complete_all(&ar->scan.completed);
1466 			complete(&ar->scan.on_channel);
1467 			complete(&ar->peer_assoc_done);
1468 			complete(&ar->peer_delete_done);
1469 			complete(&ar->install_key_done);
1470 			complete(&ar->vdev_setup_done);
1471 			complete(&ar->vdev_delete_done);
1472 			complete(&ar->bss_survey_done);
1473 
1474 			wake_up(&ar->dp.tx_empty_waitq);
1475 			idr_for_each(&ar->txmgmt_idr,
1476 				     ath12k_mac_tx_mgmt_pending_free, ar);
1477 			idr_destroy(&ar->txmgmt_idr);
1478 			wake_up(&ar->txmgmt_empty_waitq);
1479 
1480 			ar->monitor_vdev_id = -1;
1481 			ar->monitor_vdev_created = false;
1482 			ar->monitor_started = false;
1483 		}
1484 
1485 		wiphy_unlock(ah->hw->wiphy);
1486 	}
1487 
1488 	wake_up(&ab->wmi_ab.tx_credits_wq);
1489 	wake_up(&ab->peer_mapping_wq);
1490 }
1491 
1492 static void ath12k_update_11d(struct work_struct *work)
1493 {
1494 	struct ath12k_base *ab = container_of(work, struct ath12k_base, update_11d_work);
1495 	struct ath12k *ar;
1496 	struct ath12k_pdev *pdev;
1497 	struct wmi_set_current_country_arg arg = {};
1498 	int ret, i;
1499 
1500 	spin_lock_bh(&ab->base_lock);
1501 	memcpy(&arg.alpha2, &ab->new_alpha2, 2);
1502 	spin_unlock_bh(&ab->base_lock);
1503 
1504 	ath12k_dbg(ab, ATH12K_DBG_WMI, "update 11d new cc %c%c\n",
1505 		   arg.alpha2[0], arg.alpha2[1]);
1506 
1507 	for (i = 0; i < ab->num_radios; i++) {
1508 		pdev = &ab->pdevs[i];
1509 		ar = pdev->ar;
1510 
1511 		memcpy(&ar->alpha2, &arg.alpha2, 2);
1512 		ret = ath12k_wmi_send_set_current_country_cmd(ar, &arg);
1513 		if (ret)
1514 			ath12k_warn(ar->ab,
1515 				    "pdev id %d failed set current country code: %d\n",
1516 				    i, ret);
1517 	}
1518 }
1519 
1520 static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
1521 {
1522 	struct ath12k_hw_group *ag = ab->ag;
1523 	struct ath12k_hw *ah;
1524 	struct ath12k *ar;
1525 	int i, j;
1526 
1527 	for (i = 0; i < ag->num_hw; i++) {
1528 		ah = ath12k_ag_to_ah(ag, i);
1529 		if (!ah || ah->state == ATH12K_HW_STATE_OFF)
1530 			continue;
1531 
1532 		wiphy_lock(ah->hw->wiphy);
1533 		mutex_lock(&ah->hw_mutex);
1534 
1535 		switch (ah->state) {
1536 		case ATH12K_HW_STATE_ON:
1537 			ah->state = ATH12K_HW_STATE_RESTARTING;
1538 
1539 			for (j = 0; j < ah->num_radio; j++) {
1540 				ar = &ah->radio[j];
1541 				ath12k_core_halt(ar);
1542 			}
1543 
1544 			break;
1545 		case ATH12K_HW_STATE_OFF:
1546 			ath12k_warn(ab,
1547 				    "cannot restart hw %d that hasn't been started\n",
1548 				    i);
1549 			break;
1550 		case ATH12K_HW_STATE_RESTARTING:
1551 			break;
1552 		case ATH12K_HW_STATE_RESTARTED:
1553 			ah->state = ATH12K_HW_STATE_WEDGED;
1554 			fallthrough;
1555 		case ATH12K_HW_STATE_WEDGED:
1556 			ath12k_warn(ab,
1557 				    "device is wedged, will not restart hw %d\n", i);
1558 			break;
1559 		case ATH12K_HW_STATE_TM:
1560 			ath12k_warn(ab, "fw mode reset done radio %d\n", i);
1561 			break;
1562 		}
1563 
1564 		mutex_unlock(&ah->hw_mutex);
1565 		wiphy_unlock(ah->hw->wiphy);
1566 	}
1567 
1568 	complete(&ab->driver_recovery);
1569 }
1570 
1571 static void ath12k_core_restart(struct work_struct *work)
1572 {
1573 	struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
1574 	struct ath12k_hw_group *ag = ab->ag;
1575 	struct ath12k_hw *ah;
1576 	int ret, i;
1577 
1578 	ret = ath12k_core_reconfigure_on_crash(ab);
1579 	if (ret) {
1580 		ath12k_err(ab, "failed to reconfigure driver on crash recovery\n");
1581 		return;
1582 	}
1583 
1584 	if (ab->is_reset) {
1585 		if (!test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
1586 			atomic_dec(&ab->reset_count);
1587 			complete(&ab->reset_complete);
1588 			ab->is_reset = false;
1589 			atomic_set(&ab->fail_cont_count, 0);
1590 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
1591 		}
1592 
1593 		mutex_lock(&ag->mutex);
1594 
1595 		if (!ath12k_core_hw_group_start_ready(ag)) {
1596 			mutex_unlock(&ag->mutex);
1597 			goto exit_restart;
1598 		}
1599 
1600 		for (i = 0; i < ag->num_hw; i++) {
1601 			ah = ath12k_ag_to_ah(ag, i);
1602 			ieee80211_restart_hw(ah->hw);
1603 		}
1604 
1605 		mutex_unlock(&ag->mutex);
1606 	}
1607 
1608 exit_restart:
1609 	complete(&ab->restart_completed);
1610 }
1611 
1612 static void ath12k_core_reset(struct work_struct *work)
1613 {
1614 	struct ath12k_base *ab = container_of(work, struct ath12k_base, reset_work);
1615 	struct ath12k_hw_group *ag = ab->ag;
1616 	int reset_count, fail_cont_count, i;
1617 	long time_left;
1618 
1619 	if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) {
1620 		ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
1621 		return;
1622 	}
1623 
1624 	/* Sometimes the recovery will fail and then the next all recovery fail,
1625 	 * this is to avoid infinite recovery since it can not recovery success
1626 	 */
1627 	fail_cont_count = atomic_read(&ab->fail_cont_count);
1628 
1629 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FINAL)
1630 		return;
1631 
1632 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FIRST &&
1633 	    time_before(jiffies, ab->reset_fail_timeout))
1634 		return;
1635 
1636 	reset_count = atomic_inc_return(&ab->reset_count);
1637 
1638 	if (reset_count > 1) {
1639 		/* Sometimes it happened another reset worker before the previous one
1640 		 * completed, then the second reset worker will destroy the previous one,
1641 		 * thus below is to avoid that.
1642 		 */
1643 		ath12k_warn(ab, "already resetting count %d\n", reset_count);
1644 
1645 		reinit_completion(&ab->reset_complete);
1646 		time_left = wait_for_completion_timeout(&ab->reset_complete,
1647 							ATH12K_RESET_TIMEOUT_HZ);
1648 		if (time_left) {
1649 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "to skip reset\n");
1650 			atomic_dec(&ab->reset_count);
1651 			return;
1652 		}
1653 
1654 		ab->reset_fail_timeout = jiffies + ATH12K_RESET_FAIL_TIMEOUT_HZ;
1655 		/* Record the continuous recovery fail count when recovery failed*/
1656 		fail_cont_count = atomic_inc_return(&ab->fail_cont_count);
1657 	}
1658 
1659 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n");
1660 
1661 	ab->is_reset = true;
1662 	atomic_set(&ab->recovery_count, 0);
1663 
1664 	ath12k_coredump_collect(ab);
1665 	ath12k_core_pre_reconfigure_recovery(ab);
1666 
1667 	ath12k_core_post_reconfigure_recovery(ab);
1668 
1669 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n");
1670 
1671 	ath12k_hif_irq_disable(ab);
1672 	ath12k_hif_ce_irq_disable(ab);
1673 
1674 	ath12k_hif_power_down(ab, false);
1675 
1676 	/* prepare for power up */
1677 	ab->qmi.num_radios = U8_MAX;
1678 
1679 	mutex_lock(&ag->mutex);
1680 	ath12k_core_to_group_ref_put(ab);
1681 
1682 	if (ag->num_started > 0) {
1683 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
1684 			   "waiting for %d partner device(s) to reset\n",
1685 			   ag->num_started);
1686 		mutex_unlock(&ag->mutex);
1687 		return;
1688 	}
1689 
1690 	/* Prepare MLO global memory region for power up */
1691 	ath12k_qmi_reset_mlo_mem(ag);
1692 
1693 	for (i = 0; i < ag->num_devices; i++) {
1694 		ab = ag->ab[i];
1695 		if (!ab)
1696 			continue;
1697 
1698 		ath12k_hif_power_up(ab);
1699 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
1700 	}
1701 
1702 	mutex_unlock(&ag->mutex);
1703 }
1704 
1705 int ath12k_core_pre_init(struct ath12k_base *ab)
1706 {
1707 	int ret;
1708 
1709 	ret = ath12k_hw_init(ab);
1710 	if (ret) {
1711 		ath12k_err(ab, "failed to init hw params: %d\n", ret);
1712 		return ret;
1713 	}
1714 
1715 	ath12k_fw_map(ab);
1716 
1717 	return 0;
1718 }
1719 
1720 static int ath12k_core_panic_handler(struct notifier_block *nb,
1721 				     unsigned long action, void *data)
1722 {
1723 	struct ath12k_base *ab = container_of(nb, struct ath12k_base,
1724 					      panic_nb);
1725 
1726 	return ath12k_hif_panic_handler(ab);
1727 }
1728 
1729 static int ath12k_core_panic_notifier_register(struct ath12k_base *ab)
1730 {
1731 	ab->panic_nb.notifier_call = ath12k_core_panic_handler;
1732 
1733 	return atomic_notifier_chain_register(&panic_notifier_list,
1734 					      &ab->panic_nb);
1735 }
1736 
1737 static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
1738 {
1739 	atomic_notifier_chain_unregister(&panic_notifier_list,
1740 					 &ab->panic_nb);
1741 }
1742 
1743 static inline
1744 bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag)
1745 {
1746 	lockdep_assert_held(&ag->mutex);
1747 
1748 	return (ag->num_probed == ag->num_devices);
1749 }
1750 
1751 static struct ath12k_hw_group *ath12k_core_hw_group_alloc(struct ath12k_base *ab)
1752 {
1753 	struct ath12k_hw_group *ag;
1754 	int count = 0;
1755 
1756 	lockdep_assert_held(&ath12k_hw_group_mutex);
1757 
1758 	list_for_each_entry(ag, &ath12k_hw_group_list, list)
1759 		count++;
1760 
1761 	ag = kzalloc(sizeof(*ag), GFP_KERNEL);
1762 	if (!ag)
1763 		return NULL;
1764 
1765 	ag->id = count;
1766 	list_add(&ag->list, &ath12k_hw_group_list);
1767 	mutex_init(&ag->mutex);
1768 	ag->mlo_capable = false;
1769 
1770 	return ag;
1771 }
1772 
1773 static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag)
1774 {
1775 	mutex_lock(&ath12k_hw_group_mutex);
1776 
1777 	list_del(&ag->list);
1778 	kfree(ag);
1779 
1780 	mutex_unlock(&ath12k_hw_group_mutex);
1781 }
1782 
1783 static struct ath12k_hw_group *ath12k_core_hw_group_find_by_dt(struct ath12k_base *ab)
1784 {
1785 	struct ath12k_hw_group *ag;
1786 	int i;
1787 
1788 	if (!ab->dev->of_node)
1789 		return NULL;
1790 
1791 	list_for_each_entry(ag, &ath12k_hw_group_list, list)
1792 		for (i = 0; i < ag->num_devices; i++)
1793 			if (ag->wsi_node[i] == ab->dev->of_node)
1794 				return ag;
1795 
1796 	return NULL;
1797 }
1798 
1799 static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag,
1800 				    struct ath12k_base *ab)
1801 {
1802 	struct device_node *wsi_dev = ab->dev->of_node, *next_wsi_dev;
1803 	struct device_node *tx_endpoint, *next_rx_endpoint;
1804 	int device_count = 0;
1805 
1806 	next_wsi_dev = wsi_dev;
1807 
1808 	if (!next_wsi_dev)
1809 		return -ENODEV;
1810 
1811 	do {
1812 		ag->wsi_node[device_count] = next_wsi_dev;
1813 
1814 		tx_endpoint = of_graph_get_endpoint_by_regs(next_wsi_dev, 0, -1);
1815 		if (!tx_endpoint) {
1816 			of_node_put(next_wsi_dev);
1817 			return -ENODEV;
1818 		}
1819 
1820 		next_rx_endpoint = of_graph_get_remote_endpoint(tx_endpoint);
1821 		if (!next_rx_endpoint) {
1822 			of_node_put(next_wsi_dev);
1823 			of_node_put(tx_endpoint);
1824 			return -ENODEV;
1825 		}
1826 
1827 		of_node_put(tx_endpoint);
1828 		of_node_put(next_wsi_dev);
1829 
1830 		next_wsi_dev = of_graph_get_port_parent(next_rx_endpoint);
1831 		if (!next_wsi_dev) {
1832 			of_node_put(next_rx_endpoint);
1833 			return -ENODEV;
1834 		}
1835 
1836 		of_node_put(next_rx_endpoint);
1837 
1838 		device_count++;
1839 		if (device_count > ATH12K_MAX_DEVICES) {
1840 			ath12k_warn(ab, "device count in DT %d is more than limit %d\n",
1841 				    device_count, ATH12K_MAX_DEVICES);
1842 			of_node_put(next_wsi_dev);
1843 			return -EINVAL;
1844 		}
1845 	} while (wsi_dev != next_wsi_dev);
1846 
1847 	of_node_put(next_wsi_dev);
1848 	ag->num_devices = device_count;
1849 
1850 	return 0;
1851 }
1852 
1853 static int ath12k_core_get_wsi_index(struct ath12k_hw_group *ag,
1854 				     struct ath12k_base *ab)
1855 {
1856 	int i, wsi_controller_index = -1, node_index = -1;
1857 	bool control;
1858 
1859 	for (i = 0; i < ag->num_devices; i++) {
1860 		control = of_property_read_bool(ag->wsi_node[i], "qcom,wsi-controller");
1861 		if (control)
1862 			wsi_controller_index = i;
1863 
1864 		if (ag->wsi_node[i] == ab->dev->of_node)
1865 			node_index = i;
1866 	}
1867 
1868 	if (wsi_controller_index == -1) {
1869 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi controller is not defined in dt");
1870 		return -EINVAL;
1871 	}
1872 
1873 	if (node_index == -1) {
1874 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "unable to get WSI node index");
1875 		return -EINVAL;
1876 	}
1877 
1878 	ab->wsi_info.index = (ag->num_devices + node_index - wsi_controller_index) %
1879 		ag->num_devices;
1880 
1881 	return 0;
1882 }
1883 
1884 static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *ab)
1885 {
1886 	struct ath12k_wsi_info *wsi = &ab->wsi_info;
1887 	struct ath12k_hw_group *ag;
1888 
1889 	lockdep_assert_held(&ath12k_hw_group_mutex);
1890 
1891 	if (ath12k_ftm_mode)
1892 		goto invalid_group;
1893 
1894 	/* The grouping of multiple devices will be done based on device tree file.
1895 	 * The platforms that do not have any valid group information would have
1896 	 * each device to be part of its own invalid group.
1897 	 *
1898 	 * We use group id ATH12K_INVALID_GROUP_ID for single device group
1899 	 * which didn't have dt entry or wrong dt entry, there could be many
1900 	 * groups with same group id, i.e ATH12K_INVALID_GROUP_ID. So
1901 	 * default group id of ATH12K_INVALID_GROUP_ID combined with
1902 	 * num devices in ath12k_hw_group determines if the group is
1903 	 * multi device or single device group
1904 	 */
1905 
1906 	ag = ath12k_core_hw_group_find_by_dt(ab);
1907 	if (!ag) {
1908 		ag = ath12k_core_hw_group_alloc(ab);
1909 		if (!ag) {
1910 			ath12k_warn(ab, "unable to create new hw group\n");
1911 			return NULL;
1912 		}
1913 
1914 		if (ath12k_core_get_wsi_info(ag, ab) ||
1915 		    ath12k_core_get_wsi_index(ag, ab)) {
1916 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
1917 				   "unable to get wsi info from dt, grouping single device");
1918 			ag->id = ATH12K_INVALID_GROUP_ID;
1919 			ag->num_devices = 1;
1920 			memset(ag->wsi_node, 0, sizeof(ag->wsi_node));
1921 			wsi->index = 0;
1922 		}
1923 
1924 		goto exit;
1925 	} else if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
1926 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group id %d in unregister state\n",
1927 			   ag->id);
1928 		goto invalid_group;
1929 	} else {
1930 		if (ath12k_core_get_wsi_index(ag, ab))
1931 			goto invalid_group;
1932 		goto exit;
1933 	}
1934 
1935 invalid_group:
1936 	ag = ath12k_core_hw_group_alloc(ab);
1937 	if (!ag) {
1938 		ath12k_warn(ab, "unable to create new hw group\n");
1939 		return NULL;
1940 	}
1941 
1942 	ag->id = ATH12K_INVALID_GROUP_ID;
1943 	ag->num_devices = 1;
1944 	wsi->index = 0;
1945 
1946 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "single device added to hardware group\n");
1947 
1948 exit:
1949 	if (ag->num_probed >= ag->num_devices) {
1950 		ath12k_warn(ab, "unable to add new device to group, max limit reached\n");
1951 		goto invalid_group;
1952 	}
1953 
1954 	ab->device_id = ag->num_probed++;
1955 	ag->ab[ab->device_id] = ab;
1956 	ab->ag = ag;
1957 
1958 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi group-id %d num-devices %d index %d",
1959 		   ag->id, ag->num_devices, wsi->index);
1960 
1961 	return ag;
1962 }
1963 
1964 void ath12k_core_hw_group_unassign(struct ath12k_base *ab)
1965 {
1966 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
1967 	u8 device_id = ab->device_id;
1968 	int num_probed;
1969 
1970 	if (!ag)
1971 		return;
1972 
1973 	mutex_lock(&ag->mutex);
1974 
1975 	if (WARN_ON(device_id >= ag->num_devices)) {
1976 		mutex_unlock(&ag->mutex);
1977 		return;
1978 	}
1979 
1980 	if (WARN_ON(ag->ab[device_id] != ab)) {
1981 		mutex_unlock(&ag->mutex);
1982 		return;
1983 	}
1984 
1985 	ag->ab[device_id] = NULL;
1986 	ab->ag = NULL;
1987 	ab->device_id = ATH12K_INVALID_DEVICE_ID;
1988 
1989 	if (ag->num_probed)
1990 		ag->num_probed--;
1991 
1992 	num_probed = ag->num_probed;
1993 
1994 	mutex_unlock(&ag->mutex);
1995 
1996 	if (!num_probed)
1997 		ath12k_core_hw_group_free(ag);
1998 }
1999 
2000 static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
2001 {
2002 	struct ath12k_base *ab;
2003 	int i;
2004 
2005 	if (WARN_ON(!ag))
2006 		return;
2007 
2008 	for (i = 0; i < ag->num_devices; i++) {
2009 		ab = ag->ab[i];
2010 		if (!ab)
2011 			continue;
2012 
2013 		ath12k_core_soc_destroy(ab);
2014 	}
2015 }
2016 
2017 void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
2018 {
2019 	struct ath12k_base *ab;
2020 	int i;
2021 
2022 	if (!ag)
2023 		return;
2024 
2025 	mutex_lock(&ag->mutex);
2026 
2027 	if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
2028 		mutex_unlock(&ag->mutex);
2029 		return;
2030 	}
2031 
2032 	set_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags);
2033 
2034 	ath12k_core_hw_group_stop(ag);
2035 
2036 	for (i = 0; i < ag->num_devices; i++) {
2037 		ab = ag->ab[i];
2038 		if (!ab)
2039 			continue;
2040 
2041 		mutex_lock(&ab->core_lock);
2042 		ath12k_core_stop(ab);
2043 		mutex_unlock(&ab->core_lock);
2044 	}
2045 
2046 	mutex_unlock(&ag->mutex);
2047 }
2048 
2049 static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag)
2050 {
2051 	struct ath12k_base *ab;
2052 	int i, ret;
2053 
2054 	lockdep_assert_held(&ag->mutex);
2055 
2056 	for (i = 0; i < ag->num_devices; i++) {
2057 		ab = ag->ab[i];
2058 		if (!ab)
2059 			continue;
2060 
2061 		mutex_lock(&ab->core_lock);
2062 
2063 		ret = ath12k_core_soc_create(ab);
2064 		if (ret) {
2065 			mutex_unlock(&ab->core_lock);
2066 			ath12k_err(ab, "failed to create soc core: %d\n", ret);
2067 			return ret;
2068 		}
2069 
2070 		mutex_unlock(&ab->core_lock);
2071 	}
2072 
2073 	return 0;
2074 }
2075 
2076 void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
2077 {
2078 	struct ath12k_base *ab;
2079 	int i;
2080 
2081 	if (ath12k_ftm_mode)
2082 		return;
2083 
2084 	lockdep_assert_held(&ag->mutex);
2085 
2086 	if (ag->num_devices == 1) {
2087 		ab = ag->ab[0];
2088 		/* QCN9274 firmware uses firmware IE for MLO advertisement */
2089 		if (ab->fw.fw_features_valid) {
2090 			ag->mlo_capable =
2091 				ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO);
2092 			return;
2093 		}
2094 
2095 		/* while WCN7850 firmware uses QMI single_chip_mlo_support bit */
2096 		ag->mlo_capable = ab->single_chip_mlo_support;
2097 		return;
2098 	}
2099 
2100 	ag->mlo_capable = true;
2101 
2102 	for (i = 0; i < ag->num_devices; i++) {
2103 		ab = ag->ab[i];
2104 		if (!ab)
2105 			continue;
2106 
2107 		/* even if 1 device's firmware feature indicates MLO
2108 		 * unsupported, make MLO unsupported for the whole group
2109 		 */
2110 		if (!ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO)) {
2111 			ag->mlo_capable = false;
2112 			return;
2113 		}
2114 	}
2115 }
2116 
2117 int ath12k_core_init(struct ath12k_base *ab)
2118 {
2119 	struct ath12k_hw_group *ag;
2120 	int ret;
2121 
2122 	ret = ath12k_core_panic_notifier_register(ab);
2123 	if (ret)
2124 		ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
2125 
2126 	mutex_lock(&ath12k_hw_group_mutex);
2127 
2128 	ag = ath12k_core_hw_group_assign(ab);
2129 	if (!ag) {
2130 		mutex_unlock(&ath12k_hw_group_mutex);
2131 		ath12k_warn(ab, "unable to get hw group\n");
2132 		return -ENODEV;
2133 	}
2134 
2135 	mutex_unlock(&ath12k_hw_group_mutex);
2136 
2137 	mutex_lock(&ag->mutex);
2138 
2139 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices %d num probed %d\n",
2140 		   ag->num_devices, ag->num_probed);
2141 
2142 	if (ath12k_core_hw_group_create_ready(ag)) {
2143 		ret = ath12k_core_hw_group_create(ag);
2144 		if (ret) {
2145 			mutex_unlock(&ag->mutex);
2146 			ath12k_warn(ab, "unable to create hw group\n");
2147 			goto err;
2148 		}
2149 	}
2150 
2151 	mutex_unlock(&ag->mutex);
2152 
2153 	return 0;
2154 
2155 err:
2156 	ath12k_core_hw_group_destroy(ab->ag);
2157 	ath12k_core_hw_group_unassign(ab);
2158 	return ret;
2159 }
2160 
2161 void ath12k_core_deinit(struct ath12k_base *ab)
2162 {
2163 	ath12k_core_hw_group_destroy(ab->ag);
2164 	ath12k_core_hw_group_unassign(ab);
2165 	ath12k_core_panic_notifier_unregister(ab);
2166 }
2167 
2168 void ath12k_core_free(struct ath12k_base *ab)
2169 {
2170 	timer_delete_sync(&ab->rx_replenish_retry);
2171 	destroy_workqueue(ab->workqueue_aux);
2172 	destroy_workqueue(ab->workqueue);
2173 	kfree(ab);
2174 }
2175 
2176 struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
2177 				      enum ath12k_bus bus)
2178 {
2179 	struct ath12k_base *ab;
2180 
2181 	ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL);
2182 	if (!ab)
2183 		return NULL;
2184 
2185 	init_completion(&ab->driver_recovery);
2186 
2187 	ab->workqueue = create_singlethread_workqueue("ath12k_wq");
2188 	if (!ab->workqueue)
2189 		goto err_sc_free;
2190 
2191 	ab->workqueue_aux = create_singlethread_workqueue("ath12k_aux_wq");
2192 	if (!ab->workqueue_aux)
2193 		goto err_free_wq;
2194 
2195 	mutex_init(&ab->core_lock);
2196 	spin_lock_init(&ab->base_lock);
2197 	init_completion(&ab->reset_complete);
2198 
2199 	INIT_LIST_HEAD(&ab->peers);
2200 	init_waitqueue_head(&ab->peer_mapping_wq);
2201 	init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
2202 	INIT_WORK(&ab->restart_work, ath12k_core_restart);
2203 	INIT_WORK(&ab->reset_work, ath12k_core_reset);
2204 	INIT_WORK(&ab->rfkill_work, ath12k_rfkill_work);
2205 	INIT_WORK(&ab->dump_work, ath12k_coredump_upload);
2206 	INIT_WORK(&ab->update_11d_work, ath12k_update_11d);
2207 
2208 	timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
2209 	init_completion(&ab->htc_suspend);
2210 	init_completion(&ab->restart_completed);
2211 	init_completion(&ab->wow.wakeup_completed);
2212 
2213 	ab->dev = dev;
2214 	ab->hif.bus = bus;
2215 	ab->qmi.num_radios = U8_MAX;
2216 	ab->single_chip_mlo_support = false;
2217 
2218 	/* Device index used to identify the devices in a group.
2219 	 *
2220 	 * In Intra-device MLO, only one device present in a group,
2221 	 * so it is always zero.
2222 	 *
2223 	 * In Inter-device MLO, Multiple device present in a group,
2224 	 * expect non-zero value.
2225 	 */
2226 	ab->device_id = 0;
2227 
2228 	return ab;
2229 
2230 err_free_wq:
2231 	destroy_workqueue(ab->workqueue);
2232 err_sc_free:
2233 	kfree(ab);
2234 	return NULL;
2235 }
2236 
2237 static int ath12k_init(void)
2238 {
2239 	ahb_err = ath12k_ahb_init();
2240 	if (ahb_err)
2241 		pr_warn("Failed to initialize ath12k AHB device: %d\n", ahb_err);
2242 
2243 	pci_err = ath12k_pci_init();
2244 	if (pci_err)
2245 		pr_warn("Failed to initialize ath12k PCI device: %d\n", pci_err);
2246 
2247 	/* If both failed, return one of the failures (arbitrary) */
2248 	return ahb_err && pci_err ? ahb_err : 0;
2249 }
2250 
2251 static void ath12k_exit(void)
2252 {
2253 	if (!pci_err)
2254 		ath12k_pci_exit();
2255 
2256 	if (!ahb_err)
2257 		ath12k_ahb_exit();
2258 }
2259 
2260 module_init(ath12k_init);
2261 module_exit(ath12k_exit);
2262 
2263 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11be WLAN devices");
2264 MODULE_LICENSE("Dual BSD/GPL");
2265