xref: /linux/drivers/net/wireless/ath/ath12k/core.c (revision 0ce92d548b44649a8de706f9bb9e74a4ed2f18a7)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
6  */
7 
8 #include <linux/export.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/remoteproc.h>
12 #include <linux/firmware.h>
13 #include <linux/of.h>
14 #include <linux/of_graph.h>
15 #include "ahb.h"
16 #include "core.h"
17 #include "dp_tx.h"
18 #include "dp_rx.h"
19 #include "debug.h"
20 #include "debugfs.h"
21 #include "fw.h"
22 #include "hif.h"
23 #include "pci.h"
24 #include "wow.h"
25 
26 static int ahb_err, pci_err;
27 unsigned int ath12k_debug_mask;
28 module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
29 MODULE_PARM_DESC(debug_mask, "Debugging mask");
30 
31 bool ath12k_ftm_mode;
32 module_param_named(ftm_mode, ath12k_ftm_mode, bool, 0444);
33 MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode");
34 
35 /* protected with ath12k_hw_group_mutex */
36 static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
37 
38 static DEFINE_MUTEX(ath12k_hw_group_mutex);
39 
40 static int ath12k_core_rfkill_config(struct ath12k_base *ab)
41 {
42 	struct ath12k *ar;
43 	int ret = 0, i;
44 
45 	if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
46 		return 0;
47 
48 	if (ath12k_acpi_get_disable_rfkill(ab))
49 		return 0;
50 
51 	for (i = 0; i < ab->num_radios; i++) {
52 		ar = ab->pdevs[i].ar;
53 
54 		ret = ath12k_mac_rfkill_config(ar);
55 		if (ret && ret != -EOPNOTSUPP) {
56 			ath12k_warn(ab, "failed to configure rfkill: %d", ret);
57 			return ret;
58 		}
59 	}
60 
61 	return ret;
62 }
63 
64 /* Check if we need to continue with suspend/resume operation.
65  * Return:
66  *	a negative value: error happens and don't continue.
67  *	0:  no error but don't continue.
68  *	positive value: no error and do continue.
69  */
70 static int ath12k_core_continue_suspend_resume(struct ath12k_base *ab)
71 {
72 	struct ath12k *ar;
73 
74 	if (!ab->hw_params->supports_suspend)
75 		return -EOPNOTSUPP;
76 
77 	/* so far single_pdev_only chips have supports_suspend as true
78 	 * so pass 0 as a dummy pdev_id here.
79 	 */
80 	ar = ab->pdevs[0].ar;
81 	if (!ar || !ar->ah || ar->ah->state != ATH12K_HW_STATE_OFF)
82 		return 0;
83 
84 	return 1;
85 }
86 
87 int ath12k_core_suspend(struct ath12k_base *ab)
88 {
89 	struct ath12k *ar;
90 	int ret, i;
91 
92 	ret = ath12k_core_continue_suspend_resume(ab);
93 	if (ret <= 0)
94 		return ret;
95 
96 	for (i = 0; i < ab->num_radios; i++) {
97 		ar = ab->pdevs[i].ar;
98 		if (!ar)
99 			continue;
100 
101 		wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
102 
103 		ret = ath12k_mac_wait_tx_complete(ar);
104 		if (ret) {
105 			wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
106 			ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
107 			return ret;
108 		}
109 
110 		wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
111 	}
112 
113 	/* PM framework skips suspend_late/resume_early callbacks
114 	 * if other devices report errors in their suspend callbacks.
115 	 * However ath12k_core_resume() would still be called because
116 	 * here we return success thus kernel put us on dpm_suspended_list.
117 	 * Since we won't go through a power down/up cycle, there is
118 	 * no chance to call complete(&ab->restart_completed) in
119 	 * ath12k_core_restart(), making ath12k_core_resume() timeout.
120 	 * So call it here to avoid this issue. This also works in case
121 	 * no error happens thus suspend_late/resume_early get called,
122 	 * because it will be reinitialized in ath12k_core_resume_early().
123 	 */
124 	complete(&ab->restart_completed);
125 
126 	return 0;
127 }
128 EXPORT_SYMBOL(ath12k_core_suspend);
129 
130 int ath12k_core_suspend_late(struct ath12k_base *ab)
131 {
132 	int ret;
133 
134 	ret = ath12k_core_continue_suspend_resume(ab);
135 	if (ret <= 0)
136 		return ret;
137 
138 	ath12k_acpi_stop(ab);
139 
140 	ath12k_hif_irq_disable(ab);
141 	ath12k_hif_ce_irq_disable(ab);
142 
143 	ath12k_hif_power_down(ab, true);
144 
145 	return 0;
146 }
147 EXPORT_SYMBOL(ath12k_core_suspend_late);
148 
149 int ath12k_core_resume_early(struct ath12k_base *ab)
150 {
151 	int ret;
152 
153 	ret = ath12k_core_continue_suspend_resume(ab);
154 	if (ret <= 0)
155 		return ret;
156 
157 	reinit_completion(&ab->restart_completed);
158 	ret = ath12k_hif_power_up(ab);
159 	if (ret)
160 		ath12k_warn(ab, "failed to power up hif during resume: %d\n", ret);
161 
162 	return ret;
163 }
164 EXPORT_SYMBOL(ath12k_core_resume_early);
165 
166 int ath12k_core_resume(struct ath12k_base *ab)
167 {
168 	long time_left;
169 	int ret;
170 
171 	ret = ath12k_core_continue_suspend_resume(ab);
172 	if (ret <= 0)
173 		return ret;
174 
175 	time_left = wait_for_completion_timeout(&ab->restart_completed,
176 						ATH12K_RESET_TIMEOUT_HZ);
177 	if (time_left == 0) {
178 		ath12k_warn(ab, "timeout while waiting for restart complete");
179 		return -ETIMEDOUT;
180 	}
181 
182 	return 0;
183 }
184 EXPORT_SYMBOL(ath12k_core_resume);
185 
186 static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
187 					   size_t name_len, bool with_variant,
188 					   bool bus_type_mode, bool with_default)
189 {
190 	/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
191 	char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
192 
193 	if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
194 		scnprintf(variant, sizeof(variant), ",variant=%s",
195 			  ab->qmi.target.bdf_ext);
196 
197 	switch (ab->id.bdf_search) {
198 	case ATH12K_BDF_SEARCH_BUS_AND_BOARD:
199 		if (bus_type_mode)
200 			scnprintf(name, name_len,
201 				  "bus=%s",
202 				  ath12k_bus_str(ab->hif.bus));
203 		else
204 			scnprintf(name, name_len,
205 				  "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
206 				  ath12k_bus_str(ab->hif.bus),
207 				  ab->id.vendor, ab->id.device,
208 				  ab->id.subsystem_vendor,
209 				  ab->id.subsystem_device,
210 				  ab->qmi.target.chip_id,
211 				  ab->qmi.target.board_id,
212 				  variant);
213 		break;
214 	default:
215 		scnprintf(name, name_len,
216 			  "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
217 			  ath12k_bus_str(ab->hif.bus),
218 			  ab->qmi.target.chip_id,
219 			  with_default ?
220 			  ATH12K_BOARD_ID_DEFAULT : ab->qmi.target.board_id,
221 			  variant);
222 		break;
223 	}
224 
225 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name);
226 
227 	return 0;
228 }
229 
230 static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
231 					 size_t name_len)
232 {
233 	return __ath12k_core_create_board_name(ab, name, name_len, true, false, false);
234 }
235 
236 static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
237 						  size_t name_len)
238 {
239 	return __ath12k_core_create_board_name(ab, name, name_len, false, false, true);
240 }
241 
242 static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
243 						  size_t name_len)
244 {
245 	return __ath12k_core_create_board_name(ab, name, name_len, false, true, true);
246 }
247 
248 const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
249 						    const char *file)
250 {
251 	const struct firmware *fw;
252 	char path[100];
253 	int ret;
254 
255 	if (!file)
256 		return ERR_PTR(-ENOENT);
257 
258 	ath12k_core_create_firmware_path(ab, file, path, sizeof(path));
259 
260 	ret = firmware_request_nowarn(&fw, path, ab->dev);
261 	if (ret)
262 		return ERR_PTR(ret);
263 
264 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot firmware request %s size %zu\n",
265 		   path, fw->size);
266 
267 	return fw;
268 }
269 
270 void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
271 {
272 	if (!IS_ERR(bd->fw))
273 		release_firmware(bd->fw);
274 
275 	memset(bd, 0, sizeof(*bd));
276 }
277 
278 static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
279 					 struct ath12k_board_data *bd,
280 					 const void *buf, size_t buf_len,
281 					 const char *boardname,
282 					 int ie_id,
283 					 int name_id,
284 					 int data_id)
285 {
286 	const struct ath12k_fw_ie *hdr;
287 	bool name_match_found;
288 	int ret, board_ie_id;
289 	size_t board_ie_len;
290 	const void *board_ie_data;
291 
292 	name_match_found = false;
293 
294 	/* go through ATH12K_BD_IE_BOARD_/ATH12K_BD_IE_REGDB_ elements */
295 	while (buf_len > sizeof(struct ath12k_fw_ie)) {
296 		hdr = buf;
297 		board_ie_id = le32_to_cpu(hdr->id);
298 		board_ie_len = le32_to_cpu(hdr->len);
299 		board_ie_data = hdr->data;
300 
301 		buf_len -= sizeof(*hdr);
302 		buf += sizeof(*hdr);
303 
304 		if (buf_len < ALIGN(board_ie_len, 4)) {
305 			ath12k_err(ab, "invalid %s length: %zu < %zu\n",
306 				   ath12k_bd_ie_type_str(ie_id),
307 				   buf_len, ALIGN(board_ie_len, 4));
308 			ret = -EINVAL;
309 			goto out;
310 		}
311 
312 		if (board_ie_id == name_id) {
313 			ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "",
314 					board_ie_data, board_ie_len);
315 
316 			if (board_ie_len != strlen(boardname))
317 				goto next;
318 
319 			ret = memcmp(board_ie_data, boardname, strlen(boardname));
320 			if (ret)
321 				goto next;
322 
323 			name_match_found = true;
324 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
325 				   "boot found match %s for name '%s'",
326 				   ath12k_bd_ie_type_str(ie_id),
327 				   boardname);
328 		} else if (board_ie_id == data_id) {
329 			if (!name_match_found)
330 				/* no match found */
331 				goto next;
332 
333 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
334 				   "boot found %s for '%s'",
335 				   ath12k_bd_ie_type_str(ie_id),
336 				   boardname);
337 
338 			bd->data = board_ie_data;
339 			bd->len = board_ie_len;
340 
341 			ret = 0;
342 			goto out;
343 		} else {
344 			ath12k_warn(ab, "unknown %s id found: %d\n",
345 				    ath12k_bd_ie_type_str(ie_id),
346 				    board_ie_id);
347 		}
348 next:
349 		/* jump over the padding */
350 		board_ie_len = ALIGN(board_ie_len, 4);
351 
352 		buf_len -= board_ie_len;
353 		buf += board_ie_len;
354 	}
355 
356 	/* no match found */
357 	ret = -ENOENT;
358 
359 out:
360 	return ret;
361 }
362 
363 static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
364 					      struct ath12k_board_data *bd,
365 					      const char *boardname,
366 					      int ie_id_match,
367 					      int name_id,
368 					      int data_id)
369 {
370 	size_t len, magic_len;
371 	const u8 *data;
372 	char *filename, filepath[100];
373 	size_t ie_len;
374 	struct ath12k_fw_ie *hdr;
375 	int ret, ie_id;
376 
377 	filename = ATH12K_BOARD_API2_FILE;
378 
379 	if (!bd->fw)
380 		bd->fw = ath12k_core_firmware_request(ab, filename);
381 
382 	if (IS_ERR(bd->fw))
383 		return PTR_ERR(bd->fw);
384 
385 	data = bd->fw->data;
386 	len = bd->fw->size;
387 
388 	ath12k_core_create_firmware_path(ab, filename,
389 					 filepath, sizeof(filepath));
390 
391 	/* magic has extra null byte padded */
392 	magic_len = strlen(ATH12K_BOARD_MAGIC) + 1;
393 	if (len < magic_len) {
394 		ath12k_err(ab, "failed to find magic value in %s, file too short: %zu\n",
395 			   filepath, len);
396 		ret = -EINVAL;
397 		goto err;
398 	}
399 
400 	if (memcmp(data, ATH12K_BOARD_MAGIC, magic_len)) {
401 		ath12k_err(ab, "found invalid board magic\n");
402 		ret = -EINVAL;
403 		goto err;
404 	}
405 
406 	/* magic is padded to 4 bytes */
407 	magic_len = ALIGN(magic_len, 4);
408 	if (len < magic_len) {
409 		ath12k_err(ab, "failed: %s too small to contain board data, len: %zu\n",
410 			   filepath, len);
411 		ret = -EINVAL;
412 		goto err;
413 	}
414 
415 	data += magic_len;
416 	len -= magic_len;
417 
418 	while (len > sizeof(struct ath12k_fw_ie)) {
419 		hdr = (struct ath12k_fw_ie *)data;
420 		ie_id = le32_to_cpu(hdr->id);
421 		ie_len = le32_to_cpu(hdr->len);
422 
423 		len -= sizeof(*hdr);
424 		data = hdr->data;
425 
426 		if (len < ALIGN(ie_len, 4)) {
427 			ath12k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
428 				   ie_id, ie_len, len);
429 			ret = -EINVAL;
430 			goto err;
431 		}
432 
433 		if (ie_id == ie_id_match) {
434 			ret = ath12k_core_parse_bd_ie_board(ab, bd, data,
435 							    ie_len,
436 							    boardname,
437 							    ie_id_match,
438 							    name_id,
439 							    data_id);
440 			if (ret == -ENOENT)
441 				/* no match found, continue */
442 				goto next;
443 			else if (ret)
444 				/* there was an error, bail out */
445 				goto err;
446 			/* either found or error, so stop searching */
447 			goto out;
448 		}
449 next:
450 		/* jump over the padding */
451 		ie_len = ALIGN(ie_len, 4);
452 
453 		len -= ie_len;
454 		data += ie_len;
455 	}
456 
457 out:
458 	if (!bd->data || !bd->len) {
459 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
460 			   "failed to fetch %s for %s from %s\n",
461 			   ath12k_bd_ie_type_str(ie_id_match),
462 			   boardname, filepath);
463 		ret = -ENODATA;
464 		goto err;
465 	}
466 
467 	return 0;
468 
469 err:
470 	ath12k_core_free_bdf(ab, bd);
471 	return ret;
472 }
473 
474 int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
475 				       struct ath12k_board_data *bd,
476 				       char *filename)
477 {
478 	bd->fw = ath12k_core_firmware_request(ab, filename);
479 	if (IS_ERR(bd->fw))
480 		return PTR_ERR(bd->fw);
481 
482 	bd->data = bd->fw->data;
483 	bd->len = bd->fw->size;
484 
485 	return 0;
486 }
487 
488 #define BOARD_NAME_SIZE 200
489 int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
490 {
491 	char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE];
492 	char *filename, filepath[100];
493 	int bd_api;
494 	int ret;
495 
496 	filename = ATH12K_BOARD_API2_FILE;
497 
498 	ret = ath12k_core_create_board_name(ab, boardname, sizeof(boardname));
499 	if (ret) {
500 		ath12k_err(ab, "failed to create board name: %d", ret);
501 		return ret;
502 	}
503 
504 	bd_api = 2;
505 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
506 						 ATH12K_BD_IE_BOARD,
507 						 ATH12K_BD_IE_BOARD_NAME,
508 						 ATH12K_BD_IE_BOARD_DATA);
509 	if (!ret)
510 		goto success;
511 
512 	ret = ath12k_core_create_fallback_board_name(ab, fallback_boardname,
513 						     sizeof(fallback_boardname));
514 	if (ret) {
515 		ath12k_err(ab, "failed to create fallback board name: %d", ret);
516 		return ret;
517 	}
518 
519 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
520 						 ATH12K_BD_IE_BOARD,
521 						 ATH12K_BD_IE_BOARD_NAME,
522 						 ATH12K_BD_IE_BOARD_DATA);
523 	if (!ret)
524 		goto success;
525 
526 	bd_api = 1;
527 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE);
528 	if (ret) {
529 		ath12k_core_create_firmware_path(ab, filename,
530 						 filepath, sizeof(filepath));
531 		ath12k_err(ab, "failed to fetch board data for %s from %s\n",
532 			   boardname, filepath);
533 		if (memcmp(boardname, fallback_boardname, strlen(boardname)))
534 			ath12k_err(ab, "failed to fetch board data for %s from %s\n",
535 				   fallback_boardname, filepath);
536 
537 		ath12k_err(ab, "failed to fetch board.bin from %s\n",
538 			   ab->hw_params->fw.dir);
539 		return ret;
540 	}
541 
542 success:
543 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", bd_api);
544 	return 0;
545 }
546 
547 int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd)
548 {
549 	char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE];
550 	int ret;
551 
552 	ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
553 	if (ret) {
554 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
555 			   "failed to create board name for regdb: %d", ret);
556 		goto exit;
557 	}
558 
559 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
560 						 ATH12K_BD_IE_REGDB,
561 						 ATH12K_BD_IE_REGDB_NAME,
562 						 ATH12K_BD_IE_REGDB_DATA);
563 	if (!ret)
564 		goto exit;
565 
566 	ret = ath12k_core_create_bus_type_board_name(ab, default_boardname,
567 						     BOARD_NAME_SIZE);
568 	if (ret) {
569 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
570 			   "failed to create default board name for regdb: %d", ret);
571 		goto exit;
572 	}
573 
574 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, default_boardname,
575 						 ATH12K_BD_IE_REGDB,
576 						 ATH12K_BD_IE_REGDB_NAME,
577 						 ATH12K_BD_IE_REGDB_DATA);
578 	if (!ret)
579 		goto exit;
580 
581 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_REGDB_FILE_NAME);
582 	if (ret)
583 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to fetch %s from %s\n",
584 			   ATH12K_REGDB_FILE_NAME, ab->hw_params->fw.dir);
585 
586 exit:
587 	if (!ret)
588 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "fetched regdb\n");
589 
590 	return ret;
591 }
592 
593 u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab)
594 {
595 	if (ab->num_radios == 2)
596 		return TARGET_NUM_STATIONS_DBS;
597 	else if (ab->num_radios == 3)
598 		return TARGET_NUM_PEERS_PDEV_DBS_SBS;
599 	return TARGET_NUM_STATIONS_SINGLE;
600 }
601 
602 u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab)
603 {
604 	if (ab->num_radios == 2)
605 		return TARGET_NUM_PEERS_PDEV_DBS;
606 	else if (ab->num_radios == 3)
607 		return TARGET_NUM_PEERS_PDEV_DBS_SBS;
608 	return TARGET_NUM_PEERS_PDEV_SINGLE;
609 }
610 
611 u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab)
612 {
613 	if (ab->num_radios == 2)
614 		return TARGET_NUM_TIDS(DBS);
615 	else if (ab->num_radios == 3)
616 		return TARGET_NUM_TIDS(DBS_SBS);
617 	return TARGET_NUM_TIDS(SINGLE);
618 }
619 
620 struct reserved_mem *ath12k_core_get_reserved_mem(struct ath12k_base *ab,
621 						  int index)
622 {
623 	struct device *dev = ab->dev;
624 	struct reserved_mem *rmem;
625 	struct device_node *node;
626 
627 	node = of_parse_phandle(dev->of_node, "memory-region", index);
628 	if (!node) {
629 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
630 			   "failed to parse memory-region for index %d\n", index);
631 		return NULL;
632 	}
633 
634 	rmem = of_reserved_mem_lookup(node);
635 	of_node_put(node);
636 	if (!rmem) {
637 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
638 			   "unable to get memory-region for index %d\n", index);
639 		return NULL;
640 	}
641 
642 	return rmem;
643 }
644 
645 static inline
646 void ath12k_core_to_group_ref_get(struct ath12k_base *ab)
647 {
648 	struct ath12k_hw_group *ag = ab->ag;
649 
650 	lockdep_assert_held(&ag->mutex);
651 
652 	if (ab->hw_group_ref) {
653 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already attached to group %d\n",
654 			   ag->id);
655 		return;
656 	}
657 
658 	ab->hw_group_ref = true;
659 	ag->num_started++;
660 
661 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "core attached to group %d, num_started %d\n",
662 		   ag->id, ag->num_started);
663 }
664 
665 static inline
666 void ath12k_core_to_group_ref_put(struct ath12k_base *ab)
667 {
668 	struct ath12k_hw_group *ag = ab->ag;
669 
670 	lockdep_assert_held(&ag->mutex);
671 
672 	if (!ab->hw_group_ref) {
673 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already de-attached from group %d\n",
674 			   ag->id);
675 		return;
676 	}
677 
678 	ab->hw_group_ref = false;
679 	ag->num_started--;
680 
681 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "core de-attached from group %d, num_started %d\n",
682 		   ag->id, ag->num_started);
683 }
684 
685 static void ath12k_core_stop(struct ath12k_base *ab)
686 {
687 	ath12k_core_to_group_ref_put(ab);
688 
689 	if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
690 		ath12k_qmi_firmware_stop(ab);
691 
692 	ath12k_acpi_stop(ab);
693 
694 	ath12k_dp_rx_pdev_reo_cleanup(ab);
695 	ath12k_hif_stop(ab);
696 	ath12k_wmi_detach(ab);
697 	ath12k_dp_free(ab);
698 
699 	/* De-Init of components as needed */
700 }
701 
702 static void ath12k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data)
703 {
704 	struct ath12k_base *ab = data;
705 	const char *magic = ATH12K_SMBIOS_BDF_EXT_MAGIC;
706 	struct ath12k_smbios_bdf *smbios = (struct ath12k_smbios_bdf *)hdr;
707 	ssize_t copied;
708 	size_t len;
709 	int i;
710 
711 	if (ab->qmi.target.bdf_ext[0] != '\0')
712 		return;
713 
714 	if (hdr->type != ATH12K_SMBIOS_BDF_EXT_TYPE)
715 		return;
716 
717 	if (hdr->length != ATH12K_SMBIOS_BDF_EXT_LENGTH) {
718 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
719 			   "wrong smbios bdf ext type length (%d).\n",
720 			   hdr->length);
721 		return;
722 	}
723 
724 	spin_lock_bh(&ab->base_lock);
725 
726 	switch (smbios->country_code_flag) {
727 	case ATH12K_SMBIOS_CC_ISO:
728 		ab->new_alpha2[0] = u16_get_bits(smbios->cc_code >> 8, 0xff);
729 		ab->new_alpha2[1] = u16_get_bits(smbios->cc_code, 0xff);
730 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios cc_code %c%c\n",
731 			   ab->new_alpha2[0], ab->new_alpha2[1]);
732 		break;
733 	case ATH12K_SMBIOS_CC_WW:
734 		ab->new_alpha2[0] = '0';
735 		ab->new_alpha2[1] = '0';
736 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios worldwide regdomain\n");
737 		break;
738 	default:
739 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot ignore smbios country code setting %d\n",
740 			   smbios->country_code_flag);
741 		break;
742 	}
743 
744 	spin_unlock_bh(&ab->base_lock);
745 
746 	if (!smbios->bdf_enabled) {
747 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant name not found.\n");
748 		return;
749 	}
750 
751 	/* Only one string exists (per spec) */
752 	if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) {
753 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
754 			   "bdf variant magic does not match.\n");
755 		return;
756 	}
757 
758 	len = min_t(size_t,
759 		    strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext));
760 	for (i = 0; i < len; i++) {
761 		if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) {
762 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
763 				   "bdf variant name contains non ascii chars.\n");
764 			return;
765 		}
766 	}
767 
768 	/* Copy extension name without magic prefix */
769 	copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic),
770 			 sizeof(ab->qmi.target.bdf_ext));
771 	if (copied < 0) {
772 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
773 			   "bdf variant string is longer than the buffer can accommodate\n");
774 		return;
775 	}
776 
777 	ath12k_dbg(ab, ATH12K_DBG_BOOT,
778 		   "found and validated bdf variant smbios_type 0x%x bdf %s\n",
779 		   ATH12K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext);
780 }
781 
782 int ath12k_core_check_smbios(struct ath12k_base *ab)
783 {
784 	ab->qmi.target.bdf_ext[0] = '\0';
785 	dmi_walk(ath12k_core_check_cc_code_bdfext, ab);
786 
787 	if (ab->qmi.target.bdf_ext[0] == '\0')
788 		return -ENODATA;
789 
790 	return 0;
791 }
792 
793 static int ath12k_core_soc_create(struct ath12k_base *ab)
794 {
795 	int ret;
796 
797 	if (ath12k_ftm_mode) {
798 		ab->fw_mode = ATH12K_FIRMWARE_MODE_FTM;
799 		ath12k_info(ab, "Booting in ftm mode\n");
800 	}
801 
802 	ret = ath12k_qmi_init_service(ab);
803 	if (ret) {
804 		ath12k_err(ab, "failed to initialize qmi :%d\n", ret);
805 		return ret;
806 	}
807 
808 	ath12k_debugfs_soc_create(ab);
809 
810 	ret = ath12k_hif_power_up(ab);
811 	if (ret) {
812 		ath12k_err(ab, "failed to power up :%d\n", ret);
813 		goto err_qmi_deinit;
814 	}
815 
816 	ath12k_debugfs_pdev_create(ab);
817 
818 	return 0;
819 
820 err_qmi_deinit:
821 	ath12k_debugfs_soc_destroy(ab);
822 	ath12k_qmi_deinit_service(ab);
823 	return ret;
824 }
825 
826 static void ath12k_core_soc_destroy(struct ath12k_base *ab)
827 {
828 	ath12k_hif_power_down(ab, false);
829 	ath12k_reg_free(ab);
830 	ath12k_debugfs_soc_destroy(ab);
831 	ath12k_qmi_deinit_service(ab);
832 }
833 
834 static int ath12k_core_pdev_create(struct ath12k_base *ab)
835 {
836 	int ret;
837 
838 	ret = ath12k_dp_pdev_alloc(ab);
839 	if (ret) {
840 		ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
841 		return ret;
842 	}
843 
844 	return 0;
845 }
846 
847 static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
848 {
849 	ath12k_dp_pdev_free(ab);
850 }
851 
852 static int ath12k_core_start(struct ath12k_base *ab)
853 {
854 	int ret;
855 
856 	lockdep_assert_held(&ab->core_lock);
857 
858 	ret = ath12k_wmi_attach(ab);
859 	if (ret) {
860 		ath12k_err(ab, "failed to attach wmi: %d\n", ret);
861 		return ret;
862 	}
863 
864 	ret = ath12k_htc_init(ab);
865 	if (ret) {
866 		ath12k_err(ab, "failed to init htc: %d\n", ret);
867 		goto err_wmi_detach;
868 	}
869 
870 	ret = ath12k_hif_start(ab);
871 	if (ret) {
872 		ath12k_err(ab, "failed to start HIF: %d\n", ret);
873 		goto err_wmi_detach;
874 	}
875 
876 	ret = ath12k_htc_wait_target(&ab->htc);
877 	if (ret) {
878 		ath12k_err(ab, "failed to connect to HTC: %d\n", ret);
879 		goto err_hif_stop;
880 	}
881 
882 	ret = ath12k_dp_htt_connect(&ab->dp);
883 	if (ret) {
884 		ath12k_err(ab, "failed to connect to HTT: %d\n", ret);
885 		goto err_hif_stop;
886 	}
887 
888 	ret = ath12k_wmi_connect(ab);
889 	if (ret) {
890 		ath12k_err(ab, "failed to connect wmi: %d\n", ret);
891 		goto err_hif_stop;
892 	}
893 
894 	ret = ath12k_htc_start(&ab->htc);
895 	if (ret) {
896 		ath12k_err(ab, "failed to start HTC: %d\n", ret);
897 		goto err_hif_stop;
898 	}
899 
900 	ret = ath12k_wmi_wait_for_service_ready(ab);
901 	if (ret) {
902 		ath12k_err(ab, "failed to receive wmi service ready event: %d\n",
903 			   ret);
904 		goto err_hif_stop;
905 	}
906 
907 	ath12k_dp_cc_config(ab);
908 
909 	ret = ath12k_dp_rx_pdev_reo_setup(ab);
910 	if (ret) {
911 		ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
912 		goto err_hif_stop;
913 	}
914 
915 	ath12k_dp_hal_rx_desc_init(ab);
916 
917 	ret = ath12k_wmi_cmd_init(ab);
918 	if (ret) {
919 		ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret);
920 		goto err_reo_cleanup;
921 	}
922 
923 	ret = ath12k_wmi_wait_for_unified_ready(ab);
924 	if (ret) {
925 		ath12k_err(ab, "failed to receive wmi unified ready event: %d\n",
926 			   ret);
927 		goto err_reo_cleanup;
928 	}
929 
930 	/* put hardware to DBS mode */
931 	if (ab->hw_params->single_pdev_only) {
932 		ret = ath12k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
933 		if (ret) {
934 			ath12k_err(ab, "failed to send dbs mode: %d\n", ret);
935 			goto err_reo_cleanup;
936 		}
937 	}
938 
939 	ret = ath12k_dp_tx_htt_h2t_ver_req_msg(ab);
940 	if (ret) {
941 		ath12k_err(ab, "failed to send htt version request message: %d\n",
942 			   ret);
943 		goto err_reo_cleanup;
944 	}
945 
946 	ath12k_acpi_set_dsm_func(ab);
947 
948 	/* Indicate the core start in the appropriate group */
949 	ath12k_core_to_group_ref_get(ab);
950 
951 	return 0;
952 
953 err_reo_cleanup:
954 	ath12k_dp_rx_pdev_reo_cleanup(ab);
955 err_hif_stop:
956 	ath12k_hif_stop(ab);
957 err_wmi_detach:
958 	ath12k_wmi_detach(ab);
959 	return ret;
960 }
961 
962 static void ath12k_core_device_cleanup(struct ath12k_base *ab)
963 {
964 	mutex_lock(&ab->core_lock);
965 
966 	ath12k_hif_irq_disable(ab);
967 	ath12k_core_pdev_destroy(ab);
968 
969 	mutex_unlock(&ab->core_lock);
970 }
971 
972 static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
973 {
974 	struct ath12k_base *ab;
975 	int i;
976 
977 	lockdep_assert_held(&ag->mutex);
978 
979 	clear_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
980 
981 	ath12k_mac_unregister(ag);
982 
983 	for (i = ag->num_devices - 1; i >= 0; i--) {
984 		ab = ag->ab[i];
985 		if (!ab)
986 			continue;
987 
988 		clear_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
989 
990 		ath12k_core_device_cleanup(ab);
991 	}
992 
993 	ath12k_mac_destroy(ag);
994 }
995 
996 u8 ath12k_get_num_partner_link(struct ath12k *ar)
997 {
998 	struct ath12k_base *partner_ab, *ab = ar->ab;
999 	struct ath12k_hw_group *ag = ab->ag;
1000 	struct ath12k_pdev *pdev;
1001 	u8 num_link = 0;
1002 	int i, j;
1003 
1004 	lockdep_assert_held(&ag->mutex);
1005 
1006 	for (i = 0; i < ag->num_devices; i++) {
1007 		partner_ab = ag->ab[i];
1008 
1009 		for (j = 0; j < partner_ab->num_radios; j++) {
1010 			pdev = &partner_ab->pdevs[j];
1011 
1012 			/* Avoid the self link */
1013 			if (ar == pdev->ar)
1014 				continue;
1015 
1016 			num_link++;
1017 		}
1018 	}
1019 
1020 	return num_link;
1021 }
1022 
1023 static int __ath12k_mac_mlo_ready(struct ath12k *ar)
1024 {
1025 	u8 num_link = ath12k_get_num_partner_link(ar);
1026 	int ret;
1027 
1028 	if (num_link == 0)
1029 		return 0;
1030 
1031 	ret = ath12k_wmi_mlo_ready(ar);
1032 	if (ret) {
1033 		ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n",
1034 			   ar->pdev_idx, ret);
1035 		return ret;
1036 	}
1037 
1038 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mlo ready done for pdev %d\n",
1039 		   ar->pdev_idx);
1040 
1041 	return 0;
1042 }
1043 
1044 int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag)
1045 {
1046 	struct ath12k_hw *ah;
1047 	struct ath12k *ar;
1048 	int ret;
1049 	int i, j;
1050 
1051 	for (i = 0; i < ag->num_hw; i++) {
1052 		ah = ag->ah[i];
1053 		if (!ah)
1054 			continue;
1055 
1056 		for_each_ar(ah, ar, j) {
1057 			ar = &ah->radio[j];
1058 			ret = __ath12k_mac_mlo_ready(ar);
1059 			if (ret)
1060 				return ret;
1061 		}
1062 	}
1063 
1064 	return 0;
1065 }
1066 
1067 static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag)
1068 {
1069 	int ret, i;
1070 
1071 	if (!ag->mlo_capable)
1072 		return 0;
1073 
1074 	ret = ath12k_mac_mlo_setup(ag);
1075 	if (ret)
1076 		return ret;
1077 
1078 	for (i = 0; i < ag->num_devices; i++)
1079 		ath12k_dp_partner_cc_init(ag->ab[i]);
1080 
1081 	ret = ath12k_mac_mlo_ready(ag);
1082 	if (ret)
1083 		goto err_mlo_teardown;
1084 
1085 	return 0;
1086 
1087 err_mlo_teardown:
1088 	ath12k_mac_mlo_teardown(ag);
1089 
1090 	return ret;
1091 }
1092 
1093 static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag)
1094 {
1095 	struct ath12k_base *ab;
1096 	int ret, i;
1097 
1098 	lockdep_assert_held(&ag->mutex);
1099 
1100 	if (test_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags))
1101 		goto core_pdev_create;
1102 
1103 	ret = ath12k_mac_allocate(ag);
1104 	if (WARN_ON(ret))
1105 		return ret;
1106 
1107 	ret = ath12k_core_mlo_setup(ag);
1108 	if (WARN_ON(ret))
1109 		goto err_mac_destroy;
1110 
1111 	ret = ath12k_mac_register(ag);
1112 	if (WARN_ON(ret))
1113 		goto err_mlo_teardown;
1114 
1115 	set_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
1116 
1117 core_pdev_create:
1118 	for (i = 0; i < ag->num_devices; i++) {
1119 		ab = ag->ab[i];
1120 		if (!ab)
1121 			continue;
1122 
1123 		mutex_lock(&ab->core_lock);
1124 
1125 		set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
1126 
1127 		ret = ath12k_core_pdev_create(ab);
1128 		if (ret) {
1129 			ath12k_err(ab, "failed to create pdev core %d\n", ret);
1130 			mutex_unlock(&ab->core_lock);
1131 			goto err;
1132 		}
1133 
1134 		ath12k_hif_irq_enable(ab);
1135 
1136 		ret = ath12k_core_rfkill_config(ab);
1137 		if (ret && ret != -EOPNOTSUPP) {
1138 			mutex_unlock(&ab->core_lock);
1139 			goto err;
1140 		}
1141 
1142 		mutex_unlock(&ab->core_lock);
1143 	}
1144 
1145 	return 0;
1146 
1147 err:
1148 	ath12k_core_hw_group_stop(ag);
1149 	return ret;
1150 
1151 err_mlo_teardown:
1152 	ath12k_mac_mlo_teardown(ag);
1153 
1154 err_mac_destroy:
1155 	ath12k_mac_destroy(ag);
1156 
1157 	return ret;
1158 }
1159 
1160 static int ath12k_core_start_firmware(struct ath12k_base *ab,
1161 				      enum ath12k_firmware_mode mode)
1162 {
1163 	int ret;
1164 
1165 	ath12k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v3,
1166 				    &ab->qmi.ce_cfg.shadow_reg_v3_len);
1167 
1168 	ret = ath12k_qmi_firmware_start(ab, mode);
1169 	if (ret) {
1170 		ath12k_err(ab, "failed to send firmware start: %d\n", ret);
1171 		return ret;
1172 	}
1173 
1174 	return ret;
1175 }
1176 
1177 static inline
1178 bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag)
1179 {
1180 	lockdep_assert_held(&ag->mutex);
1181 
1182 	return (ag->num_started == ag->num_devices);
1183 }
1184 
1185 static void ath12k_fw_stats_pdevs_free(struct list_head *head)
1186 {
1187 	struct ath12k_fw_stats_pdev *i, *tmp;
1188 
1189 	list_for_each_entry_safe(i, tmp, head, list) {
1190 		list_del(&i->list);
1191 		kfree(i);
1192 	}
1193 }
1194 
1195 void ath12k_fw_stats_bcn_free(struct list_head *head)
1196 {
1197 	struct ath12k_fw_stats_bcn *i, *tmp;
1198 
1199 	list_for_each_entry_safe(i, tmp, head, list) {
1200 		list_del(&i->list);
1201 		kfree(i);
1202 	}
1203 }
1204 
1205 static void ath12k_fw_stats_vdevs_free(struct list_head *head)
1206 {
1207 	struct ath12k_fw_stats_vdev *i, *tmp;
1208 
1209 	list_for_each_entry_safe(i, tmp, head, list) {
1210 		list_del(&i->list);
1211 		kfree(i);
1212 	}
1213 }
1214 
1215 void ath12k_fw_stats_init(struct ath12k *ar)
1216 {
1217 	INIT_LIST_HEAD(&ar->fw_stats.vdevs);
1218 	INIT_LIST_HEAD(&ar->fw_stats.pdevs);
1219 	INIT_LIST_HEAD(&ar->fw_stats.bcn);
1220 	init_completion(&ar->fw_stats_complete);
1221 	init_completion(&ar->fw_stats_done);
1222 }
1223 
1224 void ath12k_fw_stats_free(struct ath12k_fw_stats *stats)
1225 {
1226 	ath12k_fw_stats_pdevs_free(&stats->pdevs);
1227 	ath12k_fw_stats_vdevs_free(&stats->vdevs);
1228 	ath12k_fw_stats_bcn_free(&stats->bcn);
1229 }
1230 
1231 void ath12k_fw_stats_reset(struct ath12k *ar)
1232 {
1233 	spin_lock_bh(&ar->data_lock);
1234 	ath12k_fw_stats_free(&ar->fw_stats);
1235 	ar->fw_stats.num_vdev_recvd = 0;
1236 	ar->fw_stats.num_bcn_recvd = 0;
1237 	spin_unlock_bh(&ar->data_lock);
1238 }
1239 
1240 static void ath12k_core_trigger_partner(struct ath12k_base *ab)
1241 {
1242 	struct ath12k_hw_group *ag = ab->ag;
1243 	struct ath12k_base *partner_ab;
1244 	bool found = false;
1245 	int i;
1246 
1247 	for (i = 0; i < ag->num_devices; i++) {
1248 		partner_ab = ag->ab[i];
1249 		if (!partner_ab)
1250 			continue;
1251 
1252 		if (found)
1253 			ath12k_qmi_trigger_host_cap(partner_ab);
1254 
1255 		found = (partner_ab == ab);
1256 	}
1257 }
1258 
1259 int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
1260 {
1261 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
1262 	int ret, i;
1263 
1264 	ret = ath12k_core_start_firmware(ab, ab->fw_mode);
1265 	if (ret) {
1266 		ath12k_err(ab, "failed to start firmware: %d\n", ret);
1267 		return ret;
1268 	}
1269 
1270 	ret = ath12k_ce_init_pipes(ab);
1271 	if (ret) {
1272 		ath12k_err(ab, "failed to initialize CE: %d\n", ret);
1273 		goto err_firmware_stop;
1274 	}
1275 
1276 	ret = ath12k_dp_alloc(ab);
1277 	if (ret) {
1278 		ath12k_err(ab, "failed to init DP: %d\n", ret);
1279 		goto err_firmware_stop;
1280 	}
1281 
1282 	mutex_lock(&ag->mutex);
1283 	mutex_lock(&ab->core_lock);
1284 
1285 	ret = ath12k_core_start(ab);
1286 	if (ret) {
1287 		ath12k_err(ab, "failed to start core: %d\n", ret);
1288 		goto err_dp_free;
1289 	}
1290 
1291 	mutex_unlock(&ab->core_lock);
1292 
1293 	if (ath12k_core_hw_group_start_ready(ag)) {
1294 		ret = ath12k_core_hw_group_start(ag);
1295 		if (ret) {
1296 			ath12k_warn(ab, "unable to start hw group\n");
1297 			goto err_core_stop;
1298 		}
1299 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id);
1300 	} else {
1301 		ath12k_core_trigger_partner(ab);
1302 	}
1303 
1304 	mutex_unlock(&ag->mutex);
1305 
1306 	return 0;
1307 
1308 err_core_stop:
1309 	for (i = ag->num_devices - 1; i >= 0; i--) {
1310 		ab = ag->ab[i];
1311 		if (!ab)
1312 			continue;
1313 
1314 		mutex_lock(&ab->core_lock);
1315 		ath12k_core_stop(ab);
1316 		mutex_unlock(&ab->core_lock);
1317 	}
1318 	mutex_unlock(&ag->mutex);
1319 	goto exit;
1320 
1321 err_dp_free:
1322 	ath12k_dp_free(ab);
1323 	mutex_unlock(&ab->core_lock);
1324 	mutex_unlock(&ag->mutex);
1325 
1326 err_firmware_stop:
1327 	ath12k_qmi_firmware_stop(ab);
1328 
1329 exit:
1330 	return ret;
1331 }
1332 
1333 static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
1334 {
1335 	int ret;
1336 
1337 	mutex_lock(&ab->core_lock);
1338 	ath12k_dp_pdev_free(ab);
1339 	ath12k_ce_cleanup_pipes(ab);
1340 	ath12k_wmi_detach(ab);
1341 	ath12k_dp_rx_pdev_reo_cleanup(ab);
1342 	mutex_unlock(&ab->core_lock);
1343 
1344 	ath12k_dp_free(ab);
1345 	ath12k_hal_srng_deinit(ab);
1346 
1347 	ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
1348 
1349 	ret = ath12k_hal_srng_init(ab);
1350 	if (ret)
1351 		return ret;
1352 
1353 	clear_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1354 
1355 	ret = ath12k_core_qmi_firmware_ready(ab);
1356 	if (ret)
1357 		goto err_hal_srng_deinit;
1358 
1359 	clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
1360 
1361 	return 0;
1362 
1363 err_hal_srng_deinit:
1364 	ath12k_hal_srng_deinit(ab);
1365 	return ret;
1366 }
1367 
1368 static void ath12k_rfkill_work(struct work_struct *work)
1369 {
1370 	struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
1371 	struct ath12k_hw_group *ag = ab->ag;
1372 	struct ath12k *ar;
1373 	struct ath12k_hw *ah;
1374 	struct ieee80211_hw *hw;
1375 	bool rfkill_radio_on;
1376 	int i, j;
1377 
1378 	spin_lock_bh(&ab->base_lock);
1379 	rfkill_radio_on = ab->rfkill_radio_on;
1380 	spin_unlock_bh(&ab->base_lock);
1381 
1382 	for (i = 0; i < ag->num_hw; i++) {
1383 		ah = ath12k_ag_to_ah(ag, i);
1384 		if (!ah)
1385 			continue;
1386 
1387 		for (j = 0; j < ah->num_radio; j++) {
1388 			ar = &ah->radio[j];
1389 			if (!ar)
1390 				continue;
1391 
1392 			ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
1393 		}
1394 
1395 		hw = ah->hw;
1396 		wiphy_rfkill_set_hw_state(hw->wiphy, !rfkill_radio_on);
1397 	}
1398 }
1399 
1400 void ath12k_core_halt(struct ath12k *ar)
1401 {
1402 	struct list_head *pos, *n;
1403 	struct ath12k_base *ab = ar->ab;
1404 
1405 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
1406 
1407 	ar->num_created_vdevs = 0;
1408 	ar->allocated_vdev_map = 0;
1409 
1410 	ath12k_mac_scan_finish(ar);
1411 	ath12k_mac_peer_cleanup_all(ar);
1412 	cancel_delayed_work_sync(&ar->scan.timeout);
1413 	cancel_work_sync(&ar->regd_update_work);
1414 	cancel_work_sync(&ar->regd_channel_update_work);
1415 	cancel_work_sync(&ab->rfkill_work);
1416 	cancel_work_sync(&ab->update_11d_work);
1417 
1418 	rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
1419 	synchronize_rcu();
1420 
1421 	spin_lock_bh(&ar->data_lock);
1422 	list_for_each_safe(pos, n, &ar->arvifs)
1423 		list_del_init(pos);
1424 	spin_unlock_bh(&ar->data_lock);
1425 
1426 	idr_init(&ar->txmgmt_idr);
1427 }
1428 
1429 static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
1430 {
1431 	struct ath12k_hw_group *ag = ab->ag;
1432 	struct ath12k *ar;
1433 	struct ath12k_hw *ah;
1434 	int i, j;
1435 
1436 	spin_lock_bh(&ab->base_lock);
1437 	ab->stats.fw_crash_counter++;
1438 	spin_unlock_bh(&ab->base_lock);
1439 
1440 	if (ab->is_reset)
1441 		set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1442 
1443 	for (i = 0; i < ag->num_hw; i++) {
1444 		ah = ath12k_ag_to_ah(ag, i);
1445 		if (!ah || ah->state == ATH12K_HW_STATE_OFF ||
1446 		    ah->state == ATH12K_HW_STATE_TM)
1447 			continue;
1448 
1449 		wiphy_lock(ah->hw->wiphy);
1450 
1451 		/* If queue 0 is stopped, it is safe to assume that all
1452 		 * other queues are stopped by driver via
1453 		 * ieee80211_stop_queues() below. This means, there is
1454 		 * no need to stop it again and hence continue
1455 		 */
1456 		if (ieee80211_queue_stopped(ah->hw, 0)) {
1457 			wiphy_unlock(ah->hw->wiphy);
1458 			continue;
1459 		}
1460 
1461 		ieee80211_stop_queues(ah->hw);
1462 
1463 		for (j = 0; j < ah->num_radio; j++) {
1464 			ar = &ah->radio[j];
1465 
1466 			ath12k_mac_drain_tx(ar);
1467 			ar->state_11d = ATH12K_11D_IDLE;
1468 			complete(&ar->completed_11d_scan);
1469 			complete(&ar->scan.started);
1470 			complete_all(&ar->scan.completed);
1471 			complete(&ar->scan.on_channel);
1472 			complete(&ar->peer_assoc_done);
1473 			complete(&ar->peer_delete_done);
1474 			complete(&ar->install_key_done);
1475 			complete(&ar->vdev_setup_done);
1476 			complete(&ar->vdev_delete_done);
1477 			complete(&ar->bss_survey_done);
1478 			complete(&ar->regd_update_completed);
1479 
1480 			wake_up(&ar->dp.tx_empty_waitq);
1481 			idr_for_each(&ar->txmgmt_idr,
1482 				     ath12k_mac_tx_mgmt_pending_free, ar);
1483 			idr_destroy(&ar->txmgmt_idr);
1484 			wake_up(&ar->txmgmt_empty_waitq);
1485 
1486 			ar->monitor_vdev_id = -1;
1487 			ar->monitor_vdev_created = false;
1488 			ar->monitor_started = false;
1489 		}
1490 
1491 		wiphy_unlock(ah->hw->wiphy);
1492 	}
1493 
1494 	wake_up(&ab->wmi_ab.tx_credits_wq);
1495 	wake_up(&ab->peer_mapping_wq);
1496 }
1497 
1498 static void ath12k_update_11d(struct work_struct *work)
1499 {
1500 	struct ath12k_base *ab = container_of(work, struct ath12k_base, update_11d_work);
1501 	struct ath12k *ar;
1502 	struct ath12k_pdev *pdev;
1503 	struct wmi_set_current_country_arg arg = {};
1504 	int ret, i;
1505 
1506 	spin_lock_bh(&ab->base_lock);
1507 	memcpy(&arg.alpha2, &ab->new_alpha2, 2);
1508 	spin_unlock_bh(&ab->base_lock);
1509 
1510 	ath12k_dbg(ab, ATH12K_DBG_WMI, "update 11d new cc %c%c\n",
1511 		   arg.alpha2[0], arg.alpha2[1]);
1512 
1513 	for (i = 0; i < ab->num_radios; i++) {
1514 		pdev = &ab->pdevs[i];
1515 		ar = pdev->ar;
1516 
1517 		memcpy(&ar->alpha2, &arg.alpha2, 2);
1518 
1519 		reinit_completion(&ar->regd_update_completed);
1520 
1521 		ret = ath12k_wmi_send_set_current_country_cmd(ar, &arg);
1522 		if (ret)
1523 			ath12k_warn(ar->ab,
1524 				    "pdev id %d failed set current country code: %d\n",
1525 				    i, ret);
1526 	}
1527 }
1528 
1529 static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
1530 {
1531 	struct ath12k_hw_group *ag = ab->ag;
1532 	struct ath12k_hw *ah;
1533 	struct ath12k *ar;
1534 	int i, j;
1535 
1536 	for (i = 0; i < ag->num_hw; i++) {
1537 		ah = ath12k_ag_to_ah(ag, i);
1538 		if (!ah || ah->state == ATH12K_HW_STATE_OFF)
1539 			continue;
1540 
1541 		wiphy_lock(ah->hw->wiphy);
1542 		mutex_lock(&ah->hw_mutex);
1543 
1544 		switch (ah->state) {
1545 		case ATH12K_HW_STATE_ON:
1546 			ah->state = ATH12K_HW_STATE_RESTARTING;
1547 
1548 			for (j = 0; j < ah->num_radio; j++) {
1549 				ar = &ah->radio[j];
1550 				ath12k_core_halt(ar);
1551 			}
1552 
1553 			break;
1554 		case ATH12K_HW_STATE_OFF:
1555 			ath12k_warn(ab,
1556 				    "cannot restart hw %d that hasn't been started\n",
1557 				    i);
1558 			break;
1559 		case ATH12K_HW_STATE_RESTARTING:
1560 			break;
1561 		case ATH12K_HW_STATE_RESTARTED:
1562 			ah->state = ATH12K_HW_STATE_WEDGED;
1563 			fallthrough;
1564 		case ATH12K_HW_STATE_WEDGED:
1565 			ath12k_warn(ab,
1566 				    "device is wedged, will not restart hw %d\n", i);
1567 			break;
1568 		case ATH12K_HW_STATE_TM:
1569 			ath12k_warn(ab, "fw mode reset done radio %d\n", i);
1570 			break;
1571 		}
1572 
1573 		mutex_unlock(&ah->hw_mutex);
1574 		wiphy_unlock(ah->hw->wiphy);
1575 	}
1576 
1577 	complete(&ab->driver_recovery);
1578 }
1579 
1580 static void ath12k_core_restart(struct work_struct *work)
1581 {
1582 	struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
1583 	struct ath12k_hw_group *ag = ab->ag;
1584 	struct ath12k_hw *ah;
1585 	int ret, i;
1586 
1587 	ret = ath12k_core_reconfigure_on_crash(ab);
1588 	if (ret) {
1589 		ath12k_err(ab, "failed to reconfigure driver on crash recovery\n");
1590 		return;
1591 	}
1592 
1593 	if (ab->is_reset) {
1594 		if (!test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
1595 			atomic_dec(&ab->reset_count);
1596 			complete(&ab->reset_complete);
1597 			ab->is_reset = false;
1598 			atomic_set(&ab->fail_cont_count, 0);
1599 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
1600 		}
1601 
1602 		mutex_lock(&ag->mutex);
1603 
1604 		if (!ath12k_core_hw_group_start_ready(ag)) {
1605 			mutex_unlock(&ag->mutex);
1606 			goto exit_restart;
1607 		}
1608 
1609 		for (i = 0; i < ag->num_hw; i++) {
1610 			ah = ath12k_ag_to_ah(ag, i);
1611 			ieee80211_restart_hw(ah->hw);
1612 		}
1613 
1614 		mutex_unlock(&ag->mutex);
1615 	}
1616 
1617 exit_restart:
1618 	complete(&ab->restart_completed);
1619 }
1620 
1621 static void ath12k_core_reset(struct work_struct *work)
1622 {
1623 	struct ath12k_base *ab = container_of(work, struct ath12k_base, reset_work);
1624 	struct ath12k_hw_group *ag = ab->ag;
1625 	int reset_count, fail_cont_count, i;
1626 	long time_left;
1627 
1628 	if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) {
1629 		ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
1630 		return;
1631 	}
1632 
1633 	/* Sometimes the recovery will fail and then the next all recovery fail,
1634 	 * this is to avoid infinite recovery since it can not recovery success
1635 	 */
1636 	fail_cont_count = atomic_read(&ab->fail_cont_count);
1637 
1638 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FINAL)
1639 		return;
1640 
1641 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FIRST &&
1642 	    time_before(jiffies, ab->reset_fail_timeout))
1643 		return;
1644 
1645 	reset_count = atomic_inc_return(&ab->reset_count);
1646 
1647 	if (reset_count > 1) {
1648 		/* Sometimes it happened another reset worker before the previous one
1649 		 * completed, then the second reset worker will destroy the previous one,
1650 		 * thus below is to avoid that.
1651 		 */
1652 		ath12k_warn(ab, "already resetting count %d\n", reset_count);
1653 
1654 		reinit_completion(&ab->reset_complete);
1655 		time_left = wait_for_completion_timeout(&ab->reset_complete,
1656 							ATH12K_RESET_TIMEOUT_HZ);
1657 		if (time_left) {
1658 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "to skip reset\n");
1659 			atomic_dec(&ab->reset_count);
1660 			return;
1661 		}
1662 
1663 		ab->reset_fail_timeout = jiffies + ATH12K_RESET_FAIL_TIMEOUT_HZ;
1664 		/* Record the continuous recovery fail count when recovery failed*/
1665 		fail_cont_count = atomic_inc_return(&ab->fail_cont_count);
1666 	}
1667 
1668 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n");
1669 
1670 	ab->is_reset = true;
1671 	atomic_set(&ab->recovery_count, 0);
1672 
1673 	ath12k_coredump_collect(ab);
1674 	ath12k_core_pre_reconfigure_recovery(ab);
1675 
1676 	ath12k_core_post_reconfigure_recovery(ab);
1677 
1678 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n");
1679 
1680 	ath12k_hif_irq_disable(ab);
1681 	ath12k_hif_ce_irq_disable(ab);
1682 
1683 	ath12k_hif_power_down(ab, false);
1684 
1685 	/* prepare for power up */
1686 	ab->qmi.num_radios = U8_MAX;
1687 
1688 	mutex_lock(&ag->mutex);
1689 	ath12k_core_to_group_ref_put(ab);
1690 
1691 	if (ag->num_started > 0) {
1692 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
1693 			   "waiting for %d partner device(s) to reset\n",
1694 			   ag->num_started);
1695 		mutex_unlock(&ag->mutex);
1696 		return;
1697 	}
1698 
1699 	/* Prepare MLO global memory region for power up */
1700 	ath12k_qmi_reset_mlo_mem(ag);
1701 
1702 	for (i = 0; i < ag->num_devices; i++) {
1703 		ab = ag->ab[i];
1704 		if (!ab)
1705 			continue;
1706 
1707 		ath12k_hif_power_up(ab);
1708 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
1709 	}
1710 
1711 	mutex_unlock(&ag->mutex);
1712 }
1713 
1714 int ath12k_core_pre_init(struct ath12k_base *ab)
1715 {
1716 	int ret;
1717 
1718 	ret = ath12k_hw_init(ab);
1719 	if (ret) {
1720 		ath12k_err(ab, "failed to init hw params: %d\n", ret);
1721 		return ret;
1722 	}
1723 
1724 	ath12k_fw_map(ab);
1725 
1726 	return 0;
1727 }
1728 
1729 static int ath12k_core_panic_handler(struct notifier_block *nb,
1730 				     unsigned long action, void *data)
1731 {
1732 	struct ath12k_base *ab = container_of(nb, struct ath12k_base,
1733 					      panic_nb);
1734 
1735 	return ath12k_hif_panic_handler(ab);
1736 }
1737 
1738 static int ath12k_core_panic_notifier_register(struct ath12k_base *ab)
1739 {
1740 	ab->panic_nb.notifier_call = ath12k_core_panic_handler;
1741 
1742 	return atomic_notifier_chain_register(&panic_notifier_list,
1743 					      &ab->panic_nb);
1744 }
1745 
1746 static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
1747 {
1748 	atomic_notifier_chain_unregister(&panic_notifier_list,
1749 					 &ab->panic_nb);
1750 }
1751 
1752 static inline
1753 bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag)
1754 {
1755 	lockdep_assert_held(&ag->mutex);
1756 
1757 	return (ag->num_probed == ag->num_devices);
1758 }
1759 
1760 static struct ath12k_hw_group *ath12k_core_hw_group_alloc(struct ath12k_base *ab)
1761 {
1762 	struct ath12k_hw_group *ag;
1763 	int count = 0;
1764 
1765 	lockdep_assert_held(&ath12k_hw_group_mutex);
1766 
1767 	list_for_each_entry(ag, &ath12k_hw_group_list, list)
1768 		count++;
1769 
1770 	ag = kzalloc(sizeof(*ag), GFP_KERNEL);
1771 	if (!ag)
1772 		return NULL;
1773 
1774 	ag->id = count;
1775 	list_add(&ag->list, &ath12k_hw_group_list);
1776 	mutex_init(&ag->mutex);
1777 	ag->mlo_capable = false;
1778 
1779 	return ag;
1780 }
1781 
1782 static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag)
1783 {
1784 	mutex_lock(&ath12k_hw_group_mutex);
1785 
1786 	list_del(&ag->list);
1787 	kfree(ag);
1788 
1789 	mutex_unlock(&ath12k_hw_group_mutex);
1790 }
1791 
1792 static struct ath12k_hw_group *ath12k_core_hw_group_find_by_dt(struct ath12k_base *ab)
1793 {
1794 	struct ath12k_hw_group *ag;
1795 	int i;
1796 
1797 	if (!ab->dev->of_node)
1798 		return NULL;
1799 
1800 	list_for_each_entry(ag, &ath12k_hw_group_list, list)
1801 		for (i = 0; i < ag->num_devices; i++)
1802 			if (ag->wsi_node[i] == ab->dev->of_node)
1803 				return ag;
1804 
1805 	return NULL;
1806 }
1807 
1808 static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag,
1809 				    struct ath12k_base *ab)
1810 {
1811 	struct device_node *wsi_dev = ab->dev->of_node, *next_wsi_dev;
1812 	struct device_node *tx_endpoint, *next_rx_endpoint;
1813 	int device_count = 0;
1814 
1815 	next_wsi_dev = wsi_dev;
1816 
1817 	if (!next_wsi_dev)
1818 		return -ENODEV;
1819 
1820 	do {
1821 		ag->wsi_node[device_count] = next_wsi_dev;
1822 
1823 		tx_endpoint = of_graph_get_endpoint_by_regs(next_wsi_dev, 0, -1);
1824 		if (!tx_endpoint) {
1825 			of_node_put(next_wsi_dev);
1826 			return -ENODEV;
1827 		}
1828 
1829 		next_rx_endpoint = of_graph_get_remote_endpoint(tx_endpoint);
1830 		if (!next_rx_endpoint) {
1831 			of_node_put(next_wsi_dev);
1832 			of_node_put(tx_endpoint);
1833 			return -ENODEV;
1834 		}
1835 
1836 		of_node_put(tx_endpoint);
1837 		of_node_put(next_wsi_dev);
1838 
1839 		next_wsi_dev = of_graph_get_port_parent(next_rx_endpoint);
1840 		if (!next_wsi_dev) {
1841 			of_node_put(next_rx_endpoint);
1842 			return -ENODEV;
1843 		}
1844 
1845 		of_node_put(next_rx_endpoint);
1846 
1847 		device_count++;
1848 		if (device_count > ATH12K_MAX_DEVICES) {
1849 			ath12k_warn(ab, "device count in DT %d is more than limit %d\n",
1850 				    device_count, ATH12K_MAX_DEVICES);
1851 			of_node_put(next_wsi_dev);
1852 			return -EINVAL;
1853 		}
1854 	} while (wsi_dev != next_wsi_dev);
1855 
1856 	of_node_put(next_wsi_dev);
1857 	ag->num_devices = device_count;
1858 
1859 	return 0;
1860 }
1861 
1862 static int ath12k_core_get_wsi_index(struct ath12k_hw_group *ag,
1863 				     struct ath12k_base *ab)
1864 {
1865 	int i, wsi_controller_index = -1, node_index = -1;
1866 	bool control;
1867 
1868 	for (i = 0; i < ag->num_devices; i++) {
1869 		control = of_property_read_bool(ag->wsi_node[i], "qcom,wsi-controller");
1870 		if (control)
1871 			wsi_controller_index = i;
1872 
1873 		if (ag->wsi_node[i] == ab->dev->of_node)
1874 			node_index = i;
1875 	}
1876 
1877 	if (wsi_controller_index == -1) {
1878 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi controller is not defined in dt");
1879 		return -EINVAL;
1880 	}
1881 
1882 	if (node_index == -1) {
1883 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "unable to get WSI node index");
1884 		return -EINVAL;
1885 	}
1886 
1887 	ab->wsi_info.index = (ag->num_devices + node_index - wsi_controller_index) %
1888 		ag->num_devices;
1889 
1890 	return 0;
1891 }
1892 
1893 static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *ab)
1894 {
1895 	struct ath12k_wsi_info *wsi = &ab->wsi_info;
1896 	struct ath12k_hw_group *ag;
1897 
1898 	lockdep_assert_held(&ath12k_hw_group_mutex);
1899 
1900 	if (ath12k_ftm_mode)
1901 		goto invalid_group;
1902 
1903 	/* The grouping of multiple devices will be done based on device tree file.
1904 	 * The platforms that do not have any valid group information would have
1905 	 * each device to be part of its own invalid group.
1906 	 *
1907 	 * We use group id ATH12K_INVALID_GROUP_ID for single device group
1908 	 * which didn't have dt entry or wrong dt entry, there could be many
1909 	 * groups with same group id, i.e ATH12K_INVALID_GROUP_ID. So
1910 	 * default group id of ATH12K_INVALID_GROUP_ID combined with
1911 	 * num devices in ath12k_hw_group determines if the group is
1912 	 * multi device or single device group
1913 	 */
1914 
1915 	ag = ath12k_core_hw_group_find_by_dt(ab);
1916 	if (!ag) {
1917 		ag = ath12k_core_hw_group_alloc(ab);
1918 		if (!ag) {
1919 			ath12k_warn(ab, "unable to create new hw group\n");
1920 			return NULL;
1921 		}
1922 
1923 		if (ath12k_core_get_wsi_info(ag, ab) ||
1924 		    ath12k_core_get_wsi_index(ag, ab)) {
1925 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
1926 				   "unable to get wsi info from dt, grouping single device");
1927 			ag->id = ATH12K_INVALID_GROUP_ID;
1928 			ag->num_devices = 1;
1929 			memset(ag->wsi_node, 0, sizeof(ag->wsi_node));
1930 			wsi->index = 0;
1931 		}
1932 
1933 		goto exit;
1934 	} else if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
1935 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group id %d in unregister state\n",
1936 			   ag->id);
1937 		goto invalid_group;
1938 	} else {
1939 		if (ath12k_core_get_wsi_index(ag, ab))
1940 			goto invalid_group;
1941 		goto exit;
1942 	}
1943 
1944 invalid_group:
1945 	ag = ath12k_core_hw_group_alloc(ab);
1946 	if (!ag) {
1947 		ath12k_warn(ab, "unable to create new hw group\n");
1948 		return NULL;
1949 	}
1950 
1951 	ag->id = ATH12K_INVALID_GROUP_ID;
1952 	ag->num_devices = 1;
1953 	wsi->index = 0;
1954 
1955 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "single device added to hardware group\n");
1956 
1957 exit:
1958 	if (ag->num_probed >= ag->num_devices) {
1959 		ath12k_warn(ab, "unable to add new device to group, max limit reached\n");
1960 		goto invalid_group;
1961 	}
1962 
1963 	ab->device_id = ag->num_probed++;
1964 	ag->ab[ab->device_id] = ab;
1965 	ab->ag = ag;
1966 
1967 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi group-id %d num-devices %d index %d",
1968 		   ag->id, ag->num_devices, wsi->index);
1969 
1970 	return ag;
1971 }
1972 
1973 void ath12k_core_hw_group_unassign(struct ath12k_base *ab)
1974 {
1975 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
1976 	u8 device_id = ab->device_id;
1977 	int num_probed;
1978 
1979 	if (!ag)
1980 		return;
1981 
1982 	mutex_lock(&ag->mutex);
1983 
1984 	if (WARN_ON(device_id >= ag->num_devices)) {
1985 		mutex_unlock(&ag->mutex);
1986 		return;
1987 	}
1988 
1989 	if (WARN_ON(ag->ab[device_id] != ab)) {
1990 		mutex_unlock(&ag->mutex);
1991 		return;
1992 	}
1993 
1994 	ag->ab[device_id] = NULL;
1995 	ab->ag = NULL;
1996 	ab->device_id = ATH12K_INVALID_DEVICE_ID;
1997 
1998 	if (ag->num_probed)
1999 		ag->num_probed--;
2000 
2001 	num_probed = ag->num_probed;
2002 
2003 	mutex_unlock(&ag->mutex);
2004 
2005 	if (!num_probed)
2006 		ath12k_core_hw_group_free(ag);
2007 }
2008 
2009 static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
2010 {
2011 	struct ath12k_base *ab;
2012 	int i;
2013 
2014 	if (WARN_ON(!ag))
2015 		return;
2016 
2017 	for (i = 0; i < ag->num_devices; i++) {
2018 		ab = ag->ab[i];
2019 		if (!ab)
2020 			continue;
2021 
2022 		ath12k_core_soc_destroy(ab);
2023 	}
2024 }
2025 
2026 void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
2027 {
2028 	struct ath12k_base *ab;
2029 	int i;
2030 
2031 	if (!ag)
2032 		return;
2033 
2034 	mutex_lock(&ag->mutex);
2035 
2036 	if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
2037 		mutex_unlock(&ag->mutex);
2038 		return;
2039 	}
2040 
2041 	set_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags);
2042 
2043 	ath12k_core_hw_group_stop(ag);
2044 
2045 	for (i = 0; i < ag->num_devices; i++) {
2046 		ab = ag->ab[i];
2047 		if (!ab)
2048 			continue;
2049 
2050 		mutex_lock(&ab->core_lock);
2051 		ath12k_core_stop(ab);
2052 		mutex_unlock(&ab->core_lock);
2053 	}
2054 
2055 	mutex_unlock(&ag->mutex);
2056 }
2057 
2058 static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag)
2059 {
2060 	struct ath12k_base *ab;
2061 	int i, ret;
2062 
2063 	lockdep_assert_held(&ag->mutex);
2064 
2065 	for (i = 0; i < ag->num_devices; i++) {
2066 		ab = ag->ab[i];
2067 		if (!ab)
2068 			continue;
2069 
2070 		mutex_lock(&ab->core_lock);
2071 
2072 		ret = ath12k_core_soc_create(ab);
2073 		if (ret) {
2074 			mutex_unlock(&ab->core_lock);
2075 			ath12k_err(ab, "failed to create soc core: %d\n", ret);
2076 			return ret;
2077 		}
2078 
2079 		mutex_unlock(&ab->core_lock);
2080 	}
2081 
2082 	return 0;
2083 }
2084 
2085 void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
2086 {
2087 	struct ath12k_base *ab;
2088 	int i;
2089 
2090 	if (ath12k_ftm_mode)
2091 		return;
2092 
2093 	lockdep_assert_held(&ag->mutex);
2094 
2095 	if (ag->num_devices == 1) {
2096 		ab = ag->ab[0];
2097 		/* QCN9274 firmware uses firmware IE for MLO advertisement */
2098 		if (ab->fw.fw_features_valid) {
2099 			ag->mlo_capable =
2100 				ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO);
2101 			return;
2102 		}
2103 
2104 		/* while WCN7850 firmware uses QMI single_chip_mlo_support bit */
2105 		ag->mlo_capable = ab->single_chip_mlo_support;
2106 		return;
2107 	}
2108 
2109 	ag->mlo_capable = true;
2110 
2111 	for (i = 0; i < ag->num_devices; i++) {
2112 		ab = ag->ab[i];
2113 		if (!ab)
2114 			continue;
2115 
2116 		/* even if 1 device's firmware feature indicates MLO
2117 		 * unsupported, make MLO unsupported for the whole group
2118 		 */
2119 		if (!ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO)) {
2120 			ag->mlo_capable = false;
2121 			return;
2122 		}
2123 	}
2124 }
2125 
2126 int ath12k_core_init(struct ath12k_base *ab)
2127 {
2128 	struct ath12k_hw_group *ag;
2129 	int ret;
2130 
2131 	ret = ath12k_core_panic_notifier_register(ab);
2132 	if (ret)
2133 		ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
2134 
2135 	mutex_lock(&ath12k_hw_group_mutex);
2136 
2137 	ag = ath12k_core_hw_group_assign(ab);
2138 	if (!ag) {
2139 		mutex_unlock(&ath12k_hw_group_mutex);
2140 		ath12k_warn(ab, "unable to get hw group\n");
2141 		ret = -ENODEV;
2142 		goto err_unregister_notifier;
2143 	}
2144 
2145 	mutex_unlock(&ath12k_hw_group_mutex);
2146 
2147 	mutex_lock(&ag->mutex);
2148 
2149 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices %d num probed %d\n",
2150 		   ag->num_devices, ag->num_probed);
2151 
2152 	if (ath12k_core_hw_group_create_ready(ag)) {
2153 		ret = ath12k_core_hw_group_create(ag);
2154 		if (ret) {
2155 			mutex_unlock(&ag->mutex);
2156 			ath12k_warn(ab, "unable to create hw group\n");
2157 			goto err_destroy_hw_group;
2158 		}
2159 	}
2160 
2161 	mutex_unlock(&ag->mutex);
2162 
2163 	return 0;
2164 
2165 err_destroy_hw_group:
2166 	ath12k_core_hw_group_destroy(ab->ag);
2167 	ath12k_core_hw_group_unassign(ab);
2168 err_unregister_notifier:
2169 	ath12k_core_panic_notifier_unregister(ab);
2170 
2171 	return ret;
2172 }
2173 
2174 void ath12k_core_deinit(struct ath12k_base *ab)
2175 {
2176 	ath12k_core_hw_group_destroy(ab->ag);
2177 	ath12k_core_hw_group_unassign(ab);
2178 	ath12k_core_panic_notifier_unregister(ab);
2179 }
2180 
2181 void ath12k_core_free(struct ath12k_base *ab)
2182 {
2183 	timer_delete_sync(&ab->rx_replenish_retry);
2184 	destroy_workqueue(ab->workqueue_aux);
2185 	destroy_workqueue(ab->workqueue);
2186 	kfree(ab);
2187 }
2188 
2189 struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
2190 				      enum ath12k_bus bus)
2191 {
2192 	struct ath12k_base *ab;
2193 
2194 	ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL);
2195 	if (!ab)
2196 		return NULL;
2197 
2198 	init_completion(&ab->driver_recovery);
2199 
2200 	ab->workqueue = create_singlethread_workqueue("ath12k_wq");
2201 	if (!ab->workqueue)
2202 		goto err_sc_free;
2203 
2204 	ab->workqueue_aux = create_singlethread_workqueue("ath12k_aux_wq");
2205 	if (!ab->workqueue_aux)
2206 		goto err_free_wq;
2207 
2208 	mutex_init(&ab->core_lock);
2209 	spin_lock_init(&ab->base_lock);
2210 	init_completion(&ab->reset_complete);
2211 
2212 	INIT_LIST_HEAD(&ab->peers);
2213 	init_waitqueue_head(&ab->peer_mapping_wq);
2214 	init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
2215 	INIT_WORK(&ab->restart_work, ath12k_core_restart);
2216 	INIT_WORK(&ab->reset_work, ath12k_core_reset);
2217 	INIT_WORK(&ab->rfkill_work, ath12k_rfkill_work);
2218 	INIT_WORK(&ab->dump_work, ath12k_coredump_upload);
2219 	INIT_WORK(&ab->update_11d_work, ath12k_update_11d);
2220 
2221 	timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
2222 	init_completion(&ab->htc_suspend);
2223 	init_completion(&ab->restart_completed);
2224 	init_completion(&ab->wow.wakeup_completed);
2225 
2226 	ab->dev = dev;
2227 	ab->hif.bus = bus;
2228 	ab->qmi.num_radios = U8_MAX;
2229 	ab->single_chip_mlo_support = false;
2230 
2231 	/* Device index used to identify the devices in a group.
2232 	 *
2233 	 * In Intra-device MLO, only one device present in a group,
2234 	 * so it is always zero.
2235 	 *
2236 	 * In Inter-device MLO, Multiple device present in a group,
2237 	 * expect non-zero value.
2238 	 */
2239 	ab->device_id = 0;
2240 
2241 	return ab;
2242 
2243 err_free_wq:
2244 	destroy_workqueue(ab->workqueue);
2245 err_sc_free:
2246 	kfree(ab);
2247 	return NULL;
2248 }
2249 
2250 static int ath12k_init(void)
2251 {
2252 	ahb_err = ath12k_ahb_init();
2253 	if (ahb_err)
2254 		pr_warn("Failed to initialize ath12k AHB device: %d\n", ahb_err);
2255 
2256 	pci_err = ath12k_pci_init();
2257 	if (pci_err)
2258 		pr_warn("Failed to initialize ath12k PCI device: %d\n", pci_err);
2259 
2260 	/* If both failed, return one of the failures (arbitrary) */
2261 	return ahb_err && pci_err ? ahb_err : 0;
2262 }
2263 
2264 static void ath12k_exit(void)
2265 {
2266 	if (!pci_err)
2267 		ath12k_pci_exit();
2268 
2269 	if (!ahb_err)
2270 		ath12k_ahb_exit();
2271 }
2272 
2273 module_init(ath12k_init);
2274 module_exit(ath12k_exit);
2275 
2276 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11be WLAN devices");
2277 MODULE_LICENSE("Dual BSD/GPL");
2278