xref: /linux/drivers/net/wireless/ath/ath12k/core.c (revision 0ad9617c78acbc71373fb341a6f75d4012b01d69)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/remoteproc.h>
10 #include <linux/firmware.h>
11 #include <linux/of.h>
12 #include <linux/of_graph.h>
13 #include "core.h"
14 #include "dp_tx.h"
15 #include "dp_rx.h"
16 #include "debug.h"
17 #include "hif.h"
18 #include "fw.h"
19 #include "debugfs.h"
20 #include "wow.h"
21 
22 unsigned int ath12k_debug_mask;
23 module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
24 MODULE_PARM_DESC(debug_mask, "Debugging mask");
25 
26 /* protected with ath12k_hw_group_mutex */
27 static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
28 
29 static DEFINE_MUTEX(ath12k_hw_group_mutex);
30 
31 static int ath12k_core_rfkill_config(struct ath12k_base *ab)
32 {
33 	struct ath12k *ar;
34 	int ret = 0, i;
35 
36 	if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
37 		return 0;
38 
39 	for (i = 0; i < ab->num_radios; i++) {
40 		ar = ab->pdevs[i].ar;
41 
42 		ret = ath12k_mac_rfkill_config(ar);
43 		if (ret && ret != -EOPNOTSUPP) {
44 			ath12k_warn(ab, "failed to configure rfkill: %d", ret);
45 			return ret;
46 		}
47 	}
48 
49 	return ret;
50 }
51 
52 /* Check if we need to continue with suspend/resume operation.
53  * Return:
54  *	a negative value: error happens and don't continue.
55  *	0:  no error but don't continue.
56  *	positive value: no error and do continue.
57  */
58 static int ath12k_core_continue_suspend_resume(struct ath12k_base *ab)
59 {
60 	struct ath12k *ar;
61 
62 	if (!ab->hw_params->supports_suspend)
63 		return -EOPNOTSUPP;
64 
65 	/* so far single_pdev_only chips have supports_suspend as true
66 	 * so pass 0 as a dummy pdev_id here.
67 	 */
68 	ar = ab->pdevs[0].ar;
69 	if (!ar || !ar->ah || ar->ah->state != ATH12K_HW_STATE_OFF)
70 		return 0;
71 
72 	return 1;
73 }
74 
75 int ath12k_core_suspend(struct ath12k_base *ab)
76 {
77 	struct ath12k *ar;
78 	int ret, i;
79 
80 	ret = ath12k_core_continue_suspend_resume(ab);
81 	if (ret <= 0)
82 		return ret;
83 
84 	for (i = 0; i < ab->num_radios; i++) {
85 		ar = ab->pdevs[i].ar;
86 		if (!ar)
87 			continue;
88 
89 		wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
90 
91 		ret = ath12k_mac_wait_tx_complete(ar);
92 		if (ret) {
93 			wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
94 			ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
95 			return ret;
96 		}
97 
98 		wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
99 	}
100 
101 	/* PM framework skips suspend_late/resume_early callbacks
102 	 * if other devices report errors in their suspend callbacks.
103 	 * However ath12k_core_resume() would still be called because
104 	 * here we return success thus kernel put us on dpm_suspended_list.
105 	 * Since we won't go through a power down/up cycle, there is
106 	 * no chance to call complete(&ab->restart_completed) in
107 	 * ath12k_core_restart(), making ath12k_core_resume() timeout.
108 	 * So call it here to avoid this issue. This also works in case
109 	 * no error happens thus suspend_late/resume_early get called,
110 	 * because it will be reinitialized in ath12k_core_resume_early().
111 	 */
112 	complete(&ab->restart_completed);
113 
114 	return 0;
115 }
116 EXPORT_SYMBOL(ath12k_core_suspend);
117 
118 int ath12k_core_suspend_late(struct ath12k_base *ab)
119 {
120 	int ret;
121 
122 	ret = ath12k_core_continue_suspend_resume(ab);
123 	if (ret <= 0)
124 		return ret;
125 
126 	ath12k_acpi_stop(ab);
127 
128 	ath12k_hif_irq_disable(ab);
129 	ath12k_hif_ce_irq_disable(ab);
130 
131 	ath12k_hif_power_down(ab, true);
132 
133 	return 0;
134 }
135 EXPORT_SYMBOL(ath12k_core_suspend_late);
136 
137 int ath12k_core_resume_early(struct ath12k_base *ab)
138 {
139 	int ret;
140 
141 	ret = ath12k_core_continue_suspend_resume(ab);
142 	if (ret <= 0)
143 		return ret;
144 
145 	reinit_completion(&ab->restart_completed);
146 	ret = ath12k_hif_power_up(ab);
147 	if (ret)
148 		ath12k_warn(ab, "failed to power up hif during resume: %d\n", ret);
149 
150 	return ret;
151 }
152 EXPORT_SYMBOL(ath12k_core_resume_early);
153 
154 int ath12k_core_resume(struct ath12k_base *ab)
155 {
156 	long time_left;
157 	int ret;
158 
159 	ret = ath12k_core_continue_suspend_resume(ab);
160 	if (ret <= 0)
161 		return ret;
162 
163 	time_left = wait_for_completion_timeout(&ab->restart_completed,
164 						ATH12K_RESET_TIMEOUT_HZ);
165 	if (time_left == 0) {
166 		ath12k_warn(ab, "timeout while waiting for restart complete");
167 		return -ETIMEDOUT;
168 	}
169 
170 	return 0;
171 }
172 EXPORT_SYMBOL(ath12k_core_resume);
173 
174 static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
175 					   size_t name_len, bool with_variant,
176 					   bool bus_type_mode)
177 {
178 	/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
179 	char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
180 
181 	if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
182 		scnprintf(variant, sizeof(variant), ",variant=%s",
183 			  ab->qmi.target.bdf_ext);
184 
185 	switch (ab->id.bdf_search) {
186 	case ATH12K_BDF_SEARCH_BUS_AND_BOARD:
187 		if (bus_type_mode)
188 			scnprintf(name, name_len,
189 				  "bus=%s",
190 				  ath12k_bus_str(ab->hif.bus));
191 		else
192 			scnprintf(name, name_len,
193 				  "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
194 				  ath12k_bus_str(ab->hif.bus),
195 				  ab->id.vendor, ab->id.device,
196 				  ab->id.subsystem_vendor,
197 				  ab->id.subsystem_device,
198 				  ab->qmi.target.chip_id,
199 				  ab->qmi.target.board_id,
200 				  variant);
201 		break;
202 	default:
203 		scnprintf(name, name_len,
204 			  "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
205 			  ath12k_bus_str(ab->hif.bus),
206 			  ab->qmi.target.chip_id,
207 			  ab->qmi.target.board_id, variant);
208 		break;
209 	}
210 
211 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name);
212 
213 	return 0;
214 }
215 
216 static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
217 					 size_t name_len)
218 {
219 	return __ath12k_core_create_board_name(ab, name, name_len, true, false);
220 }
221 
222 static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
223 						  size_t name_len)
224 {
225 	return __ath12k_core_create_board_name(ab, name, name_len, false, false);
226 }
227 
228 static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
229 						  size_t name_len)
230 {
231 	return __ath12k_core_create_board_name(ab, name, name_len, false, true);
232 }
233 
234 const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
235 						    const char *file)
236 {
237 	const struct firmware *fw;
238 	char path[100];
239 	int ret;
240 
241 	if (!file)
242 		return ERR_PTR(-ENOENT);
243 
244 	ath12k_core_create_firmware_path(ab, file, path, sizeof(path));
245 
246 	ret = firmware_request_nowarn(&fw, path, ab->dev);
247 	if (ret)
248 		return ERR_PTR(ret);
249 
250 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot firmware request %s size %zu\n",
251 		   path, fw->size);
252 
253 	return fw;
254 }
255 
256 void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
257 {
258 	if (!IS_ERR(bd->fw))
259 		release_firmware(bd->fw);
260 
261 	memset(bd, 0, sizeof(*bd));
262 }
263 
264 static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
265 					 struct ath12k_board_data *bd,
266 					 const void *buf, size_t buf_len,
267 					 const char *boardname,
268 					 int ie_id,
269 					 int name_id,
270 					 int data_id)
271 {
272 	const struct ath12k_fw_ie *hdr;
273 	bool name_match_found;
274 	int ret, board_ie_id;
275 	size_t board_ie_len;
276 	const void *board_ie_data;
277 
278 	name_match_found = false;
279 
280 	/* go through ATH12K_BD_IE_BOARD_/ATH12K_BD_IE_REGDB_ elements */
281 	while (buf_len > sizeof(struct ath12k_fw_ie)) {
282 		hdr = buf;
283 		board_ie_id = le32_to_cpu(hdr->id);
284 		board_ie_len = le32_to_cpu(hdr->len);
285 		board_ie_data = hdr->data;
286 
287 		buf_len -= sizeof(*hdr);
288 		buf += sizeof(*hdr);
289 
290 		if (buf_len < ALIGN(board_ie_len, 4)) {
291 			ath12k_err(ab, "invalid %s length: %zu < %zu\n",
292 				   ath12k_bd_ie_type_str(ie_id),
293 				   buf_len, ALIGN(board_ie_len, 4));
294 			ret = -EINVAL;
295 			goto out;
296 		}
297 
298 		if (board_ie_id == name_id) {
299 			ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "",
300 					board_ie_data, board_ie_len);
301 
302 			if (board_ie_len != strlen(boardname))
303 				goto next;
304 
305 			ret = memcmp(board_ie_data, boardname, strlen(boardname));
306 			if (ret)
307 				goto next;
308 
309 			name_match_found = true;
310 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
311 				   "boot found match %s for name '%s'",
312 				   ath12k_bd_ie_type_str(ie_id),
313 				   boardname);
314 		} else if (board_ie_id == data_id) {
315 			if (!name_match_found)
316 				/* no match found */
317 				goto next;
318 
319 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
320 				   "boot found %s for '%s'",
321 				   ath12k_bd_ie_type_str(ie_id),
322 				   boardname);
323 
324 			bd->data = board_ie_data;
325 			bd->len = board_ie_len;
326 
327 			ret = 0;
328 			goto out;
329 		} else {
330 			ath12k_warn(ab, "unknown %s id found: %d\n",
331 				    ath12k_bd_ie_type_str(ie_id),
332 				    board_ie_id);
333 		}
334 next:
335 		/* jump over the padding */
336 		board_ie_len = ALIGN(board_ie_len, 4);
337 
338 		buf_len -= board_ie_len;
339 		buf += board_ie_len;
340 	}
341 
342 	/* no match found */
343 	ret = -ENOENT;
344 
345 out:
346 	return ret;
347 }
348 
349 static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
350 					      struct ath12k_board_data *bd,
351 					      const char *boardname,
352 					      int ie_id_match,
353 					      int name_id,
354 					      int data_id)
355 {
356 	size_t len, magic_len;
357 	const u8 *data;
358 	char *filename, filepath[100];
359 	size_t ie_len;
360 	struct ath12k_fw_ie *hdr;
361 	int ret, ie_id;
362 
363 	filename = ATH12K_BOARD_API2_FILE;
364 
365 	if (!bd->fw)
366 		bd->fw = ath12k_core_firmware_request(ab, filename);
367 
368 	if (IS_ERR(bd->fw))
369 		return PTR_ERR(bd->fw);
370 
371 	data = bd->fw->data;
372 	len = bd->fw->size;
373 
374 	ath12k_core_create_firmware_path(ab, filename,
375 					 filepath, sizeof(filepath));
376 
377 	/* magic has extra null byte padded */
378 	magic_len = strlen(ATH12K_BOARD_MAGIC) + 1;
379 	if (len < magic_len) {
380 		ath12k_err(ab, "failed to find magic value in %s, file too short: %zu\n",
381 			   filepath, len);
382 		ret = -EINVAL;
383 		goto err;
384 	}
385 
386 	if (memcmp(data, ATH12K_BOARD_MAGIC, magic_len)) {
387 		ath12k_err(ab, "found invalid board magic\n");
388 		ret = -EINVAL;
389 		goto err;
390 	}
391 
392 	/* magic is padded to 4 bytes */
393 	magic_len = ALIGN(magic_len, 4);
394 	if (len < magic_len) {
395 		ath12k_err(ab, "failed: %s too small to contain board data, len: %zu\n",
396 			   filepath, len);
397 		ret = -EINVAL;
398 		goto err;
399 	}
400 
401 	data += magic_len;
402 	len -= magic_len;
403 
404 	while (len > sizeof(struct ath12k_fw_ie)) {
405 		hdr = (struct ath12k_fw_ie *)data;
406 		ie_id = le32_to_cpu(hdr->id);
407 		ie_len = le32_to_cpu(hdr->len);
408 
409 		len -= sizeof(*hdr);
410 		data = hdr->data;
411 
412 		if (len < ALIGN(ie_len, 4)) {
413 			ath12k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
414 				   ie_id, ie_len, len);
415 			ret = -EINVAL;
416 			goto err;
417 		}
418 
419 		if (ie_id == ie_id_match) {
420 			ret = ath12k_core_parse_bd_ie_board(ab, bd, data,
421 							    ie_len,
422 							    boardname,
423 							    ie_id_match,
424 							    name_id,
425 							    data_id);
426 			if (ret == -ENOENT)
427 				/* no match found, continue */
428 				goto next;
429 			else if (ret)
430 				/* there was an error, bail out */
431 				goto err;
432 			/* either found or error, so stop searching */
433 			goto out;
434 		}
435 next:
436 		/* jump over the padding */
437 		ie_len = ALIGN(ie_len, 4);
438 
439 		len -= ie_len;
440 		data += ie_len;
441 	}
442 
443 out:
444 	if (!bd->data || !bd->len) {
445 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
446 			   "failed to fetch %s for %s from %s\n",
447 			   ath12k_bd_ie_type_str(ie_id_match),
448 			   boardname, filepath);
449 		ret = -ENODATA;
450 		goto err;
451 	}
452 
453 	return 0;
454 
455 err:
456 	ath12k_core_free_bdf(ab, bd);
457 	return ret;
458 }
459 
460 int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
461 				       struct ath12k_board_data *bd,
462 				       char *filename)
463 {
464 	bd->fw = ath12k_core_firmware_request(ab, filename);
465 	if (IS_ERR(bd->fw))
466 		return PTR_ERR(bd->fw);
467 
468 	bd->data = bd->fw->data;
469 	bd->len = bd->fw->size;
470 
471 	return 0;
472 }
473 
474 #define BOARD_NAME_SIZE 200
475 int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
476 {
477 	char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE];
478 	char *filename, filepath[100];
479 	int bd_api;
480 	int ret;
481 
482 	filename = ATH12K_BOARD_API2_FILE;
483 
484 	ret = ath12k_core_create_board_name(ab, boardname, sizeof(boardname));
485 	if (ret) {
486 		ath12k_err(ab, "failed to create board name: %d", ret);
487 		return ret;
488 	}
489 
490 	bd_api = 2;
491 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
492 						 ATH12K_BD_IE_BOARD,
493 						 ATH12K_BD_IE_BOARD_NAME,
494 						 ATH12K_BD_IE_BOARD_DATA);
495 	if (!ret)
496 		goto success;
497 
498 	ret = ath12k_core_create_fallback_board_name(ab, fallback_boardname,
499 						     sizeof(fallback_boardname));
500 	if (ret) {
501 		ath12k_err(ab, "failed to create fallback board name: %d", ret);
502 		return ret;
503 	}
504 
505 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
506 						 ATH12K_BD_IE_BOARD,
507 						 ATH12K_BD_IE_BOARD_NAME,
508 						 ATH12K_BD_IE_BOARD_DATA);
509 	if (!ret)
510 		goto success;
511 
512 	bd_api = 1;
513 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE);
514 	if (ret) {
515 		ath12k_core_create_firmware_path(ab, filename,
516 						 filepath, sizeof(filepath));
517 		ath12k_err(ab, "failed to fetch board data for %s from %s\n",
518 			   boardname, filepath);
519 		if (memcmp(boardname, fallback_boardname, strlen(boardname)))
520 			ath12k_err(ab, "failed to fetch board data for %s from %s\n",
521 				   fallback_boardname, filepath);
522 
523 		ath12k_err(ab, "failed to fetch board.bin from %s\n",
524 			   ab->hw_params->fw.dir);
525 		return ret;
526 	}
527 
528 success:
529 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", bd_api);
530 	return 0;
531 }
532 
533 int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd)
534 {
535 	char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE];
536 	int ret;
537 
538 	ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
539 	if (ret) {
540 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
541 			   "failed to create board name for regdb: %d", ret);
542 		goto exit;
543 	}
544 
545 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
546 						 ATH12K_BD_IE_REGDB,
547 						 ATH12K_BD_IE_REGDB_NAME,
548 						 ATH12K_BD_IE_REGDB_DATA);
549 	if (!ret)
550 		goto exit;
551 
552 	ret = ath12k_core_create_bus_type_board_name(ab, default_boardname,
553 						     BOARD_NAME_SIZE);
554 	if (ret) {
555 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
556 			   "failed to create default board name for regdb: %d", ret);
557 		goto exit;
558 	}
559 
560 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, default_boardname,
561 						 ATH12K_BD_IE_REGDB,
562 						 ATH12K_BD_IE_REGDB_NAME,
563 						 ATH12K_BD_IE_REGDB_DATA);
564 	if (!ret)
565 		goto exit;
566 
567 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_REGDB_FILE_NAME);
568 	if (ret)
569 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to fetch %s from %s\n",
570 			   ATH12K_REGDB_FILE_NAME, ab->hw_params->fw.dir);
571 
572 exit:
573 	if (!ret)
574 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "fetched regdb\n");
575 
576 	return ret;
577 }
578 
579 u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab)
580 {
581 	if (ab->num_radios == 2)
582 		return TARGET_NUM_STATIONS_DBS;
583 	else if (ab->num_radios == 3)
584 		return TARGET_NUM_PEERS_PDEV_DBS_SBS;
585 	return TARGET_NUM_STATIONS_SINGLE;
586 }
587 
588 u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab)
589 {
590 	if (ab->num_radios == 2)
591 		return TARGET_NUM_PEERS_PDEV_DBS;
592 	else if (ab->num_radios == 3)
593 		return TARGET_NUM_PEERS_PDEV_DBS_SBS;
594 	return TARGET_NUM_PEERS_PDEV_SINGLE;
595 }
596 
597 u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab)
598 {
599 	if (ab->num_radios == 2)
600 		return TARGET_NUM_TIDS(DBS);
601 	else if (ab->num_radios == 3)
602 		return TARGET_NUM_TIDS(DBS_SBS);
603 	return TARGET_NUM_TIDS(SINGLE);
604 }
605 
606 static void ath12k_core_stop(struct ath12k_base *ab)
607 {
608 	ath12k_core_stopped(ab);
609 
610 	if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
611 		ath12k_qmi_firmware_stop(ab);
612 
613 	ath12k_acpi_stop(ab);
614 
615 	ath12k_dp_rx_pdev_reo_cleanup(ab);
616 	ath12k_hif_stop(ab);
617 	ath12k_wmi_detach(ab);
618 	ath12k_dp_free(ab);
619 
620 	/* De-Init of components as needed */
621 }
622 
623 static void ath12k_core_check_bdfext(const struct dmi_header *hdr, void *data)
624 {
625 	struct ath12k_base *ab = data;
626 	const char *magic = ATH12K_SMBIOS_BDF_EXT_MAGIC;
627 	struct ath12k_smbios_bdf *smbios = (struct ath12k_smbios_bdf *)hdr;
628 	ssize_t copied;
629 	size_t len;
630 	int i;
631 
632 	if (ab->qmi.target.bdf_ext[0] != '\0')
633 		return;
634 
635 	if (hdr->type != ATH12K_SMBIOS_BDF_EXT_TYPE)
636 		return;
637 
638 	if (hdr->length != ATH12K_SMBIOS_BDF_EXT_LENGTH) {
639 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
640 			   "wrong smbios bdf ext type length (%d).\n",
641 			   hdr->length);
642 		return;
643 	}
644 
645 	if (!smbios->bdf_enabled) {
646 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant name not found.\n");
647 		return;
648 	}
649 
650 	/* Only one string exists (per spec) */
651 	if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) {
652 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
653 			   "bdf variant magic does not match.\n");
654 		return;
655 	}
656 
657 	len = min_t(size_t,
658 		    strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext));
659 	for (i = 0; i < len; i++) {
660 		if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) {
661 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
662 				   "bdf variant name contains non ascii chars.\n");
663 			return;
664 		}
665 	}
666 
667 	/* Copy extension name without magic prefix */
668 	copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic),
669 			 sizeof(ab->qmi.target.bdf_ext));
670 	if (copied < 0) {
671 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
672 			   "bdf variant string is longer than the buffer can accommodate\n");
673 		return;
674 	}
675 
676 	ath12k_dbg(ab, ATH12K_DBG_BOOT,
677 		   "found and validated bdf variant smbios_type 0x%x bdf %s\n",
678 		   ATH12K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext);
679 }
680 
681 int ath12k_core_check_smbios(struct ath12k_base *ab)
682 {
683 	ab->qmi.target.bdf_ext[0] = '\0';
684 	dmi_walk(ath12k_core_check_bdfext, ab);
685 
686 	if (ab->qmi.target.bdf_ext[0] == '\0')
687 		return -ENODATA;
688 
689 	return 0;
690 }
691 
692 static int ath12k_core_soc_create(struct ath12k_base *ab)
693 {
694 	int ret;
695 
696 	ret = ath12k_qmi_init_service(ab);
697 	if (ret) {
698 		ath12k_err(ab, "failed to initialize qmi :%d\n", ret);
699 		return ret;
700 	}
701 
702 	ath12k_debugfs_soc_create(ab);
703 
704 	ret = ath12k_hif_power_up(ab);
705 	if (ret) {
706 		ath12k_err(ab, "failed to power up :%d\n", ret);
707 		goto err_qmi_deinit;
708 	}
709 
710 	return 0;
711 
712 err_qmi_deinit:
713 	ath12k_debugfs_soc_destroy(ab);
714 	ath12k_qmi_deinit_service(ab);
715 	return ret;
716 }
717 
718 static void ath12k_core_soc_destroy(struct ath12k_base *ab)
719 {
720 	ath12k_hif_power_down(ab, false);
721 	ath12k_reg_free(ab);
722 	ath12k_debugfs_soc_destroy(ab);
723 	ath12k_qmi_deinit_service(ab);
724 }
725 
726 static int ath12k_core_pdev_create(struct ath12k_base *ab)
727 {
728 	int ret;
729 
730 	ret = ath12k_dp_pdev_alloc(ab);
731 	if (ret) {
732 		ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
733 		return ret;
734 	}
735 
736 	return 0;
737 }
738 
739 static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
740 {
741 	ath12k_dp_pdev_free(ab);
742 }
743 
744 static int ath12k_core_start(struct ath12k_base *ab,
745 			     enum ath12k_firmware_mode mode)
746 {
747 	int ret;
748 
749 	lockdep_assert_held(&ab->core_lock);
750 
751 	ret = ath12k_wmi_attach(ab);
752 	if (ret) {
753 		ath12k_err(ab, "failed to attach wmi: %d\n", ret);
754 		return ret;
755 	}
756 
757 	ret = ath12k_htc_init(ab);
758 	if (ret) {
759 		ath12k_err(ab, "failed to init htc: %d\n", ret);
760 		goto err_wmi_detach;
761 	}
762 
763 	ret = ath12k_hif_start(ab);
764 	if (ret) {
765 		ath12k_err(ab, "failed to start HIF: %d\n", ret);
766 		goto err_wmi_detach;
767 	}
768 
769 	ret = ath12k_htc_wait_target(&ab->htc);
770 	if (ret) {
771 		ath12k_err(ab, "failed to connect to HTC: %d\n", ret);
772 		goto err_hif_stop;
773 	}
774 
775 	ret = ath12k_dp_htt_connect(&ab->dp);
776 	if (ret) {
777 		ath12k_err(ab, "failed to connect to HTT: %d\n", ret);
778 		goto err_hif_stop;
779 	}
780 
781 	ret = ath12k_wmi_connect(ab);
782 	if (ret) {
783 		ath12k_err(ab, "failed to connect wmi: %d\n", ret);
784 		goto err_hif_stop;
785 	}
786 
787 	ret = ath12k_htc_start(&ab->htc);
788 	if (ret) {
789 		ath12k_err(ab, "failed to start HTC: %d\n", ret);
790 		goto err_hif_stop;
791 	}
792 
793 	ret = ath12k_wmi_wait_for_service_ready(ab);
794 	if (ret) {
795 		ath12k_err(ab, "failed to receive wmi service ready event: %d\n",
796 			   ret);
797 		goto err_hif_stop;
798 	}
799 
800 	ath12k_dp_cc_config(ab);
801 
802 	ret = ath12k_dp_rx_pdev_reo_setup(ab);
803 	if (ret) {
804 		ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
805 		goto err_hif_stop;
806 	}
807 
808 	ath12k_dp_hal_rx_desc_init(ab);
809 
810 	ret = ath12k_wmi_cmd_init(ab);
811 	if (ret) {
812 		ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret);
813 		goto err_reo_cleanup;
814 	}
815 
816 	ret = ath12k_wmi_wait_for_unified_ready(ab);
817 	if (ret) {
818 		ath12k_err(ab, "failed to receive wmi unified ready event: %d\n",
819 			   ret);
820 		goto err_reo_cleanup;
821 	}
822 
823 	/* put hardware to DBS mode */
824 	if (ab->hw_params->single_pdev_only) {
825 		ret = ath12k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
826 		if (ret) {
827 			ath12k_err(ab, "failed to send dbs mode: %d\n", ret);
828 			goto err_reo_cleanup;
829 		}
830 	}
831 
832 	ret = ath12k_dp_tx_htt_h2t_ver_req_msg(ab);
833 	if (ret) {
834 		ath12k_err(ab, "failed to send htt version request message: %d\n",
835 			   ret);
836 		goto err_reo_cleanup;
837 	}
838 
839 	ret = ath12k_acpi_start(ab);
840 	if (ret)
841 		/* ACPI is optional so continue in case of an error */
842 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", ret);
843 
844 	if (!test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags))
845 		/* Indicate the core start in the appropriate group */
846 		ath12k_core_started(ab);
847 
848 	return 0;
849 
850 err_reo_cleanup:
851 	ath12k_dp_rx_pdev_reo_cleanup(ab);
852 err_hif_stop:
853 	ath12k_hif_stop(ab);
854 err_wmi_detach:
855 	ath12k_wmi_detach(ab);
856 	return ret;
857 }
858 
859 static void ath12k_core_device_cleanup(struct ath12k_base *ab)
860 {
861 	mutex_lock(&ab->core_lock);
862 
863 	ath12k_hif_irq_disable(ab);
864 	ath12k_core_pdev_destroy(ab);
865 
866 	mutex_unlock(&ab->core_lock);
867 }
868 
869 static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
870 {
871 	struct ath12k_base *ab;
872 	int i;
873 
874 	lockdep_assert_held(&ag->mutex);
875 
876 	clear_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
877 
878 	ath12k_mac_unregister(ag);
879 
880 	for (i = ag->num_devices - 1; i >= 0; i--) {
881 		ab = ag->ab[i];
882 		if (!ab)
883 			continue;
884 		ath12k_core_device_cleanup(ab);
885 	}
886 
887 	ath12k_mac_destroy(ag);
888 }
889 
890 static int __ath12k_mac_mlo_ready(struct ath12k *ar)
891 {
892 	int ret;
893 
894 	ret = ath12k_wmi_mlo_ready(ar);
895 	if (ret) {
896 		ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n",
897 			   ar->pdev_idx, ret);
898 		return ret;
899 	}
900 
901 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mlo ready done for pdev %d\n",
902 		   ar->pdev_idx);
903 
904 	return 0;
905 }
906 
907 int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag)
908 {
909 	struct ath12k_hw *ah;
910 	struct ath12k *ar;
911 	int ret;
912 	int i, j;
913 
914 	for (i = 0; i < ag->num_hw; i++) {
915 		ah = ag->ah[i];
916 		if (!ah)
917 			continue;
918 
919 		for_each_ar(ah, ar, j) {
920 			ar = &ah->radio[j];
921 			ret = __ath12k_mac_mlo_ready(ar);
922 			if (ret)
923 				goto out;
924 		}
925 	}
926 
927 out:
928 	return ret;
929 }
930 
931 static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag)
932 {
933 	int ret, i;
934 
935 	if (!ag->mlo_capable || ag->num_devices == 1)
936 		return 0;
937 
938 	ret = ath12k_mac_mlo_setup(ag);
939 	if (ret)
940 		return ret;
941 
942 	for (i = 0; i < ag->num_devices; i++)
943 		ath12k_dp_partner_cc_init(ag->ab[i]);
944 
945 	ret = ath12k_mac_mlo_ready(ag);
946 	if (ret)
947 		goto err_mlo_teardown;
948 
949 	return 0;
950 
951 err_mlo_teardown:
952 	ath12k_mac_mlo_teardown(ag);
953 
954 	return ret;
955 }
956 
957 static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag)
958 {
959 	struct ath12k_base *ab;
960 	int ret, i;
961 
962 	lockdep_assert_held(&ag->mutex);
963 
964 	if (test_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags))
965 		goto core_pdev_create;
966 
967 	ret = ath12k_mac_allocate(ag);
968 	if (WARN_ON(ret))
969 		return ret;
970 
971 	ret = ath12k_core_mlo_setup(ag);
972 	if (WARN_ON(ret))
973 		goto err_mac_destroy;
974 
975 	ret = ath12k_mac_register(ag);
976 	if (WARN_ON(ret))
977 		goto err_mlo_teardown;
978 
979 	set_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
980 
981 core_pdev_create:
982 	for (i = 0; i < ag->num_devices; i++) {
983 		ab = ag->ab[i];
984 		if (!ab)
985 			continue;
986 
987 		mutex_lock(&ab->core_lock);
988 
989 		ret = ath12k_core_pdev_create(ab);
990 		if (ret) {
991 			ath12k_err(ab, "failed to create pdev core %d\n", ret);
992 			mutex_unlock(&ab->core_lock);
993 			goto err;
994 		}
995 
996 		ath12k_hif_irq_enable(ab);
997 
998 		ret = ath12k_core_rfkill_config(ab);
999 		if (ret && ret != -EOPNOTSUPP) {
1000 			mutex_unlock(&ab->core_lock);
1001 			goto err;
1002 		}
1003 
1004 		mutex_unlock(&ab->core_lock);
1005 	}
1006 
1007 	return 0;
1008 
1009 err:
1010 	ath12k_core_hw_group_stop(ag);
1011 	return ret;
1012 
1013 err_mlo_teardown:
1014 	ath12k_mac_mlo_teardown(ag);
1015 
1016 err_mac_destroy:
1017 	ath12k_mac_destroy(ag);
1018 
1019 	return ret;
1020 }
1021 
1022 static int ath12k_core_start_firmware(struct ath12k_base *ab,
1023 				      enum ath12k_firmware_mode mode)
1024 {
1025 	int ret;
1026 
1027 	ath12k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v3,
1028 				    &ab->qmi.ce_cfg.shadow_reg_v3_len);
1029 
1030 	ret = ath12k_qmi_firmware_start(ab, mode);
1031 	if (ret) {
1032 		ath12k_err(ab, "failed to send firmware start: %d\n", ret);
1033 		return ret;
1034 	}
1035 
1036 	return ret;
1037 }
1038 
1039 static inline
1040 bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag)
1041 {
1042 	lockdep_assert_held(&ag->mutex);
1043 
1044 	return (ag->num_started == ag->num_devices);
1045 }
1046 
1047 static void ath12k_core_trigger_partner(struct ath12k_base *ab)
1048 {
1049 	struct ath12k_hw_group *ag = ab->ag;
1050 	struct ath12k_base *partner_ab;
1051 	bool found = false;
1052 	int i;
1053 
1054 	for (i = 0; i < ag->num_devices; i++) {
1055 		partner_ab = ag->ab[i];
1056 		if (!partner_ab)
1057 			continue;
1058 
1059 		if (found)
1060 			ath12k_qmi_trigger_host_cap(partner_ab);
1061 
1062 		found = (partner_ab == ab);
1063 	}
1064 }
1065 
1066 int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
1067 {
1068 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
1069 	int ret, i;
1070 
1071 	ret = ath12k_core_start_firmware(ab, ATH12K_FIRMWARE_MODE_NORMAL);
1072 	if (ret) {
1073 		ath12k_err(ab, "failed to start firmware: %d\n", ret);
1074 		return ret;
1075 	}
1076 
1077 	ret = ath12k_ce_init_pipes(ab);
1078 	if (ret) {
1079 		ath12k_err(ab, "failed to initialize CE: %d\n", ret);
1080 		goto err_firmware_stop;
1081 	}
1082 
1083 	ret = ath12k_dp_alloc(ab);
1084 	if (ret) {
1085 		ath12k_err(ab, "failed to init DP: %d\n", ret);
1086 		goto err_firmware_stop;
1087 	}
1088 
1089 	mutex_lock(&ag->mutex);
1090 	mutex_lock(&ab->core_lock);
1091 
1092 	ret = ath12k_core_start(ab, ATH12K_FIRMWARE_MODE_NORMAL);
1093 	if (ret) {
1094 		ath12k_err(ab, "failed to start core: %d\n", ret);
1095 		goto err_dp_free;
1096 	}
1097 
1098 	mutex_unlock(&ab->core_lock);
1099 
1100 	if (ath12k_core_hw_group_start_ready(ag)) {
1101 		ret = ath12k_core_hw_group_start(ag);
1102 		if (ret) {
1103 			ath12k_warn(ab, "unable to start hw group\n");
1104 			goto err_core_stop;
1105 		}
1106 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id);
1107 	} else {
1108 		ath12k_core_trigger_partner(ab);
1109 	}
1110 
1111 	mutex_unlock(&ag->mutex);
1112 
1113 	return 0;
1114 
1115 err_core_stop:
1116 	for (i = ag->num_devices - 1; i >= 0; i--) {
1117 		ab = ag->ab[i];
1118 		if (!ab)
1119 			continue;
1120 
1121 		mutex_lock(&ab->core_lock);
1122 		ath12k_core_stop(ab);
1123 		mutex_unlock(&ab->core_lock);
1124 	}
1125 	goto exit;
1126 
1127 err_dp_free:
1128 	ath12k_dp_free(ab);
1129 	mutex_unlock(&ab->core_lock);
1130 err_firmware_stop:
1131 	ath12k_qmi_firmware_stop(ab);
1132 
1133 exit:
1134 	mutex_unlock(&ag->mutex);
1135 	return ret;
1136 }
1137 
1138 static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
1139 {
1140 	int ret;
1141 
1142 	mutex_lock(&ab->core_lock);
1143 	ath12k_dp_pdev_free(ab);
1144 	ath12k_ce_cleanup_pipes(ab);
1145 	ath12k_wmi_detach(ab);
1146 	ath12k_dp_rx_pdev_reo_cleanup(ab);
1147 	mutex_unlock(&ab->core_lock);
1148 
1149 	ath12k_dp_free(ab);
1150 	ath12k_hal_srng_deinit(ab);
1151 
1152 	ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
1153 
1154 	ret = ath12k_hal_srng_init(ab);
1155 	if (ret)
1156 		return ret;
1157 
1158 	clear_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1159 
1160 	ret = ath12k_core_qmi_firmware_ready(ab);
1161 	if (ret)
1162 		goto err_hal_srng_deinit;
1163 
1164 	clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
1165 
1166 	return 0;
1167 
1168 err_hal_srng_deinit:
1169 	ath12k_hal_srng_deinit(ab);
1170 	return ret;
1171 }
1172 
1173 static void ath12k_rfkill_work(struct work_struct *work)
1174 {
1175 	struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
1176 	struct ath12k_hw_group *ag = ab->ag;
1177 	struct ath12k *ar;
1178 	struct ath12k_hw *ah;
1179 	struct ieee80211_hw *hw;
1180 	bool rfkill_radio_on;
1181 	int i, j;
1182 
1183 	spin_lock_bh(&ab->base_lock);
1184 	rfkill_radio_on = ab->rfkill_radio_on;
1185 	spin_unlock_bh(&ab->base_lock);
1186 
1187 	for (i = 0; i < ag->num_hw; i++) {
1188 		ah = ath12k_ag_to_ah(ag, i);
1189 		if (!ah)
1190 			continue;
1191 
1192 		for (j = 0; j < ah->num_radio; j++) {
1193 			ar = &ah->radio[j];
1194 			if (!ar)
1195 				continue;
1196 
1197 			ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
1198 		}
1199 
1200 		hw = ah->hw;
1201 		wiphy_rfkill_set_hw_state(hw->wiphy, !rfkill_radio_on);
1202 	}
1203 }
1204 
1205 void ath12k_core_halt(struct ath12k *ar)
1206 {
1207 	struct ath12k_base *ab = ar->ab;
1208 
1209 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
1210 
1211 	ar->num_created_vdevs = 0;
1212 	ar->allocated_vdev_map = 0;
1213 
1214 	ath12k_mac_scan_finish(ar);
1215 	ath12k_mac_peer_cleanup_all(ar);
1216 	cancel_delayed_work_sync(&ar->scan.timeout);
1217 	cancel_work_sync(&ar->regd_update_work);
1218 	cancel_work_sync(&ab->rfkill_work);
1219 
1220 	rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
1221 	synchronize_rcu();
1222 	INIT_LIST_HEAD(&ar->arvifs);
1223 	idr_init(&ar->txmgmt_idr);
1224 }
1225 
1226 static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
1227 {
1228 	struct ath12k_hw_group *ag = ab->ag;
1229 	struct ath12k *ar;
1230 	struct ath12k_hw *ah;
1231 	int i, j;
1232 
1233 	spin_lock_bh(&ab->base_lock);
1234 	ab->stats.fw_crash_counter++;
1235 	spin_unlock_bh(&ab->base_lock);
1236 
1237 	if (ab->is_reset)
1238 		set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1239 
1240 	for (i = 0; i < ag->num_hw; i++) {
1241 		ah = ath12k_ag_to_ah(ag, i);
1242 		if (!ah || ah->state == ATH12K_HW_STATE_OFF)
1243 			continue;
1244 
1245 		ieee80211_stop_queues(ah->hw);
1246 
1247 		for (j = 0; j < ah->num_radio; j++) {
1248 			ar = &ah->radio[j];
1249 
1250 			ath12k_mac_drain_tx(ar);
1251 			complete(&ar->scan.started);
1252 			complete(&ar->scan.completed);
1253 			complete(&ar->scan.on_channel);
1254 			complete(&ar->peer_assoc_done);
1255 			complete(&ar->peer_delete_done);
1256 			complete(&ar->install_key_done);
1257 			complete(&ar->vdev_setup_done);
1258 			complete(&ar->vdev_delete_done);
1259 			complete(&ar->bss_survey_done);
1260 
1261 			wake_up(&ar->dp.tx_empty_waitq);
1262 			idr_for_each(&ar->txmgmt_idr,
1263 				     ath12k_mac_tx_mgmt_pending_free, ar);
1264 			idr_destroy(&ar->txmgmt_idr);
1265 			wake_up(&ar->txmgmt_empty_waitq);
1266 		}
1267 	}
1268 
1269 	wake_up(&ab->wmi_ab.tx_credits_wq);
1270 	wake_up(&ab->peer_mapping_wq);
1271 }
1272 
1273 static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
1274 {
1275 	struct ath12k_hw_group *ag = ab->ag;
1276 	struct ath12k_hw *ah;
1277 	struct ath12k *ar;
1278 	int i, j;
1279 
1280 	for (i = 0; i < ag->num_hw; i++) {
1281 		ah = ath12k_ag_to_ah(ag, i);
1282 		if (!ah || ah->state == ATH12K_HW_STATE_OFF)
1283 			continue;
1284 
1285 		wiphy_lock(ah->hw->wiphy);
1286 		mutex_lock(&ah->hw_mutex);
1287 
1288 		switch (ah->state) {
1289 		case ATH12K_HW_STATE_ON:
1290 			ah->state = ATH12K_HW_STATE_RESTARTING;
1291 
1292 			for (j = 0; j < ah->num_radio; j++) {
1293 				ar = &ah->radio[j];
1294 				ath12k_core_halt(ar);
1295 			}
1296 
1297 			break;
1298 		case ATH12K_HW_STATE_OFF:
1299 			ath12k_warn(ab,
1300 				    "cannot restart hw %d that hasn't been started\n",
1301 				    i);
1302 			break;
1303 		case ATH12K_HW_STATE_RESTARTING:
1304 			break;
1305 		case ATH12K_HW_STATE_RESTARTED:
1306 			ah->state = ATH12K_HW_STATE_WEDGED;
1307 			fallthrough;
1308 		case ATH12K_HW_STATE_WEDGED:
1309 			ath12k_warn(ab,
1310 				    "device is wedged, will not restart hw %d\n", i);
1311 			break;
1312 		}
1313 
1314 		mutex_unlock(&ah->hw_mutex);
1315 		wiphy_unlock(ah->hw->wiphy);
1316 	}
1317 
1318 	complete(&ab->driver_recovery);
1319 }
1320 
1321 static void ath12k_core_restart(struct work_struct *work)
1322 {
1323 	struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
1324 	struct ath12k_hw_group *ag = ab->ag;
1325 	struct ath12k_hw *ah;
1326 	int ret, i;
1327 
1328 	ret = ath12k_core_reconfigure_on_crash(ab);
1329 	if (ret) {
1330 		ath12k_err(ab, "failed to reconfigure driver on crash recovery\n");
1331 		return;
1332 	}
1333 
1334 	if (ab->is_reset) {
1335 		if (!test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
1336 			atomic_dec(&ab->reset_count);
1337 			complete(&ab->reset_complete);
1338 			ab->is_reset = false;
1339 			atomic_set(&ab->fail_cont_count, 0);
1340 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
1341 		}
1342 
1343 		for (i = 0; i < ag->num_hw; i++) {
1344 			ah = ath12k_ag_to_ah(ab->ag, i);
1345 			ieee80211_restart_hw(ah->hw);
1346 		}
1347 	}
1348 
1349 	complete(&ab->restart_completed);
1350 }
1351 
1352 static void ath12k_core_reset(struct work_struct *work)
1353 {
1354 	struct ath12k_base *ab = container_of(work, struct ath12k_base, reset_work);
1355 	int reset_count, fail_cont_count;
1356 	long time_left;
1357 
1358 	if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) {
1359 		ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
1360 		return;
1361 	}
1362 
1363 	/* Sometimes the recovery will fail and then the next all recovery fail,
1364 	 * this is to avoid infinite recovery since it can not recovery success
1365 	 */
1366 	fail_cont_count = atomic_read(&ab->fail_cont_count);
1367 
1368 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FINAL)
1369 		return;
1370 
1371 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FIRST &&
1372 	    time_before(jiffies, ab->reset_fail_timeout))
1373 		return;
1374 
1375 	reset_count = atomic_inc_return(&ab->reset_count);
1376 
1377 	if (reset_count > 1) {
1378 		/* Sometimes it happened another reset worker before the previous one
1379 		 * completed, then the second reset worker will destroy the previous one,
1380 		 * thus below is to avoid that.
1381 		 */
1382 		ath12k_warn(ab, "already resetting count %d\n", reset_count);
1383 
1384 		reinit_completion(&ab->reset_complete);
1385 		time_left = wait_for_completion_timeout(&ab->reset_complete,
1386 							ATH12K_RESET_TIMEOUT_HZ);
1387 		if (time_left) {
1388 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "to skip reset\n");
1389 			atomic_dec(&ab->reset_count);
1390 			return;
1391 		}
1392 
1393 		ab->reset_fail_timeout = jiffies + ATH12K_RESET_FAIL_TIMEOUT_HZ;
1394 		/* Record the continuous recovery fail count when recovery failed*/
1395 		fail_cont_count = atomic_inc_return(&ab->fail_cont_count);
1396 	}
1397 
1398 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n");
1399 
1400 	ab->is_reset = true;
1401 	atomic_set(&ab->recovery_count, 0);
1402 
1403 	ath12k_coredump_collect(ab);
1404 	ath12k_core_pre_reconfigure_recovery(ab);
1405 
1406 	ath12k_core_post_reconfigure_recovery(ab);
1407 
1408 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n");
1409 
1410 	ath12k_hif_irq_disable(ab);
1411 	ath12k_hif_ce_irq_disable(ab);
1412 
1413 	ath12k_hif_power_down(ab, false);
1414 	ath12k_hif_power_up(ab);
1415 
1416 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
1417 }
1418 
1419 int ath12k_core_pre_init(struct ath12k_base *ab)
1420 {
1421 	int ret;
1422 
1423 	ret = ath12k_hw_init(ab);
1424 	if (ret) {
1425 		ath12k_err(ab, "failed to init hw params: %d\n", ret);
1426 		return ret;
1427 	}
1428 
1429 	ath12k_fw_map(ab);
1430 
1431 	return 0;
1432 }
1433 
1434 static int ath12k_core_panic_handler(struct notifier_block *nb,
1435 				     unsigned long action, void *data)
1436 {
1437 	struct ath12k_base *ab = container_of(nb, struct ath12k_base,
1438 					      panic_nb);
1439 
1440 	return ath12k_hif_panic_handler(ab);
1441 }
1442 
1443 static int ath12k_core_panic_notifier_register(struct ath12k_base *ab)
1444 {
1445 	ab->panic_nb.notifier_call = ath12k_core_panic_handler;
1446 
1447 	return atomic_notifier_chain_register(&panic_notifier_list,
1448 					      &ab->panic_nb);
1449 }
1450 
1451 static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
1452 {
1453 	atomic_notifier_chain_unregister(&panic_notifier_list,
1454 					 &ab->panic_nb);
1455 }
1456 
1457 static inline
1458 bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag)
1459 {
1460 	lockdep_assert_held(&ag->mutex);
1461 
1462 	return (ag->num_probed == ag->num_devices);
1463 }
1464 
1465 static struct ath12k_hw_group *ath12k_core_hw_group_alloc(struct ath12k_base *ab)
1466 {
1467 	struct ath12k_hw_group *ag;
1468 	int count = 0;
1469 
1470 	lockdep_assert_held(&ath12k_hw_group_mutex);
1471 
1472 	list_for_each_entry(ag, &ath12k_hw_group_list, list)
1473 		count++;
1474 
1475 	ag = kzalloc(sizeof(*ag), GFP_KERNEL);
1476 	if (!ag)
1477 		return NULL;
1478 
1479 	ag->id = count;
1480 	list_add(&ag->list, &ath12k_hw_group_list);
1481 	mutex_init(&ag->mutex);
1482 	ag->mlo_capable = false;
1483 
1484 	return ag;
1485 }
1486 
1487 static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag)
1488 {
1489 	mutex_lock(&ath12k_hw_group_mutex);
1490 
1491 	list_del(&ag->list);
1492 	kfree(ag);
1493 
1494 	mutex_unlock(&ath12k_hw_group_mutex);
1495 }
1496 
1497 static struct ath12k_hw_group *ath12k_core_hw_group_find_by_dt(struct ath12k_base *ab)
1498 {
1499 	struct ath12k_hw_group *ag;
1500 	int i;
1501 
1502 	if (!ab->dev->of_node)
1503 		return NULL;
1504 
1505 	list_for_each_entry(ag, &ath12k_hw_group_list, list)
1506 		for (i = 0; i < ag->num_devices; i++)
1507 			if (ag->wsi_node[i] == ab->dev->of_node)
1508 				return ag;
1509 
1510 	return NULL;
1511 }
1512 
1513 static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag,
1514 				    struct ath12k_base *ab)
1515 {
1516 	struct device_node *wsi_dev = ab->dev->of_node, *next_wsi_dev;
1517 	struct device_node *tx_endpoint, *next_rx_endpoint;
1518 	int device_count = 0;
1519 
1520 	next_wsi_dev = wsi_dev;
1521 
1522 	if (!next_wsi_dev)
1523 		return -ENODEV;
1524 
1525 	do {
1526 		ag->wsi_node[device_count] = next_wsi_dev;
1527 
1528 		tx_endpoint = of_graph_get_endpoint_by_regs(next_wsi_dev, 0, -1);
1529 		if (!tx_endpoint) {
1530 			of_node_put(next_wsi_dev);
1531 			return -ENODEV;
1532 		}
1533 
1534 		next_rx_endpoint = of_graph_get_remote_endpoint(tx_endpoint);
1535 		if (!next_rx_endpoint) {
1536 			of_node_put(next_wsi_dev);
1537 			of_node_put(tx_endpoint);
1538 			return -ENODEV;
1539 		}
1540 
1541 		of_node_put(tx_endpoint);
1542 		of_node_put(next_wsi_dev);
1543 
1544 		next_wsi_dev = of_graph_get_port_parent(next_rx_endpoint);
1545 		if (!next_wsi_dev) {
1546 			of_node_put(next_rx_endpoint);
1547 			return -ENODEV;
1548 		}
1549 
1550 		of_node_put(next_rx_endpoint);
1551 
1552 		device_count++;
1553 		if (device_count > ATH12K_MAX_SOCS) {
1554 			ath12k_warn(ab, "device count in DT %d is more than limit %d\n",
1555 				    device_count, ATH12K_MAX_SOCS);
1556 			of_node_put(next_wsi_dev);
1557 			return -EINVAL;
1558 		}
1559 	} while (wsi_dev != next_wsi_dev);
1560 
1561 	of_node_put(next_wsi_dev);
1562 	ag->num_devices = device_count;
1563 
1564 	return 0;
1565 }
1566 
1567 static int ath12k_core_get_wsi_index(struct ath12k_hw_group *ag,
1568 				     struct ath12k_base *ab)
1569 {
1570 	int i, wsi_controller_index = -1, node_index = -1;
1571 	bool control;
1572 
1573 	for (i = 0; i < ag->num_devices; i++) {
1574 		control = of_property_read_bool(ag->wsi_node[i], "qcom,wsi-controller");
1575 		if (control)
1576 			wsi_controller_index = i;
1577 
1578 		if (ag->wsi_node[i] == ab->dev->of_node)
1579 			node_index = i;
1580 	}
1581 
1582 	if (wsi_controller_index == -1) {
1583 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi controller is not defined in dt");
1584 		return -EINVAL;
1585 	}
1586 
1587 	if (node_index == -1) {
1588 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "unable to get WSI node index");
1589 		return -EINVAL;
1590 	}
1591 
1592 	ab->wsi_info.index = (ag->num_devices + node_index - wsi_controller_index) %
1593 		ag->num_devices;
1594 
1595 	return 0;
1596 }
1597 
1598 static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *ab)
1599 {
1600 	struct ath12k_wsi_info *wsi = &ab->wsi_info;
1601 	struct ath12k_hw_group *ag;
1602 
1603 	lockdep_assert_held(&ath12k_hw_group_mutex);
1604 
1605 	/* The grouping of multiple devices will be done based on device tree file.
1606 	 * The platforms that do not have any valid group information would have
1607 	 * each device to be part of its own invalid group.
1608 	 *
1609 	 * We use group id ATH12K_INVALID_GROUP_ID for single device group
1610 	 * which didn't have dt entry or wrong dt entry, there could be many
1611 	 * groups with same group id, i.e ATH12K_INVALID_GROUP_ID. So
1612 	 * default group id of ATH12K_INVALID_GROUP_ID combined with
1613 	 * num devices in ath12k_hw_group determines if the group is
1614 	 * multi device or single device group
1615 	 */
1616 
1617 	ag = ath12k_core_hw_group_find_by_dt(ab);
1618 	if (!ag) {
1619 		ag = ath12k_core_hw_group_alloc(ab);
1620 		if (!ag) {
1621 			ath12k_warn(ab, "unable to create new hw group\n");
1622 			return NULL;
1623 		}
1624 
1625 		if (ath12k_core_get_wsi_info(ag, ab) ||
1626 		    ath12k_core_get_wsi_index(ag, ab)) {
1627 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
1628 				   "unable to get wsi info from dt, grouping single device");
1629 			ag->id = ATH12K_INVALID_GROUP_ID;
1630 			ag->num_devices = 1;
1631 			memset(ag->wsi_node, 0, sizeof(ag->wsi_node));
1632 			wsi->index = 0;
1633 		}
1634 
1635 		goto exit;
1636 	} else if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
1637 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group id %d in unregister state\n",
1638 			   ag->id);
1639 		goto invalid_group;
1640 	} else {
1641 		if (ath12k_core_get_wsi_index(ag, ab))
1642 			goto invalid_group;
1643 		goto exit;
1644 	}
1645 
1646 invalid_group:
1647 	ag = ath12k_core_hw_group_alloc(ab);
1648 	if (!ag) {
1649 		ath12k_warn(ab, "unable to create new hw group\n");
1650 		return NULL;
1651 	}
1652 
1653 	ag->id = ATH12K_INVALID_GROUP_ID;
1654 	ag->num_devices = 1;
1655 	wsi->index = 0;
1656 
1657 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "single device added to hardware group\n");
1658 
1659 exit:
1660 	if (ag->num_probed >= ag->num_devices) {
1661 		ath12k_warn(ab, "unable to add new device to group, max limit reached\n");
1662 		goto invalid_group;
1663 	}
1664 
1665 	ab->device_id = ag->num_probed++;
1666 	ag->ab[ab->device_id] = ab;
1667 	ab->ag = ag;
1668 
1669 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi group-id %d num-devices %d index %d",
1670 		   ag->id, ag->num_devices, wsi->index);
1671 
1672 	return ag;
1673 }
1674 
1675 void ath12k_core_hw_group_unassign(struct ath12k_base *ab)
1676 {
1677 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
1678 	u8 device_id = ab->device_id;
1679 	int num_probed;
1680 
1681 	if (!ag)
1682 		return;
1683 
1684 	mutex_lock(&ag->mutex);
1685 
1686 	if (WARN_ON(device_id >= ag->num_devices)) {
1687 		mutex_unlock(&ag->mutex);
1688 		return;
1689 	}
1690 
1691 	if (WARN_ON(ag->ab[device_id] != ab)) {
1692 		mutex_unlock(&ag->mutex);
1693 		return;
1694 	}
1695 
1696 	ag->ab[device_id] = NULL;
1697 	ab->ag = NULL;
1698 	ab->device_id = ATH12K_INVALID_DEVICE_ID;
1699 
1700 	if (ag->num_probed)
1701 		ag->num_probed--;
1702 
1703 	num_probed = ag->num_probed;
1704 
1705 	mutex_unlock(&ag->mutex);
1706 
1707 	if (!num_probed)
1708 		ath12k_core_hw_group_free(ag);
1709 }
1710 
1711 static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
1712 {
1713 	struct ath12k_base *ab;
1714 	int i;
1715 
1716 	if (WARN_ON(!ag))
1717 		return;
1718 
1719 	for (i = 0; i < ag->num_devices; i++) {
1720 		ab = ag->ab[i];
1721 		if (!ab)
1722 			continue;
1723 
1724 		ath12k_core_soc_destroy(ab);
1725 	}
1726 }
1727 
1728 static void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
1729 {
1730 	struct ath12k_base *ab;
1731 	int i;
1732 
1733 	if (!ag)
1734 		return;
1735 
1736 	mutex_lock(&ag->mutex);
1737 
1738 	if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
1739 		mutex_unlock(&ag->mutex);
1740 		return;
1741 	}
1742 
1743 	set_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags);
1744 
1745 	ath12k_core_hw_group_stop(ag);
1746 
1747 	for (i = 0; i < ag->num_devices; i++) {
1748 		ab = ag->ab[i];
1749 		if (!ab)
1750 			continue;
1751 
1752 		mutex_lock(&ab->core_lock);
1753 		ath12k_core_stop(ab);
1754 		mutex_unlock(&ab->core_lock);
1755 	}
1756 
1757 	mutex_unlock(&ag->mutex);
1758 }
1759 
1760 static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag)
1761 {
1762 	struct ath12k_base *ab;
1763 	int i, ret;
1764 
1765 	lockdep_assert_held(&ag->mutex);
1766 
1767 	for (i = 0; i < ag->num_devices; i++) {
1768 		ab = ag->ab[i];
1769 		if (!ab)
1770 			continue;
1771 
1772 		mutex_lock(&ab->core_lock);
1773 
1774 		ret = ath12k_core_soc_create(ab);
1775 		if (ret) {
1776 			mutex_unlock(&ab->core_lock);
1777 			ath12k_err(ab, "failed to create soc core: %d\n", ret);
1778 			return ret;
1779 		}
1780 
1781 		mutex_unlock(&ab->core_lock);
1782 	}
1783 
1784 	return 0;
1785 }
1786 
1787 void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
1788 {
1789 	struct ath12k_base *ab;
1790 	int i;
1791 
1792 	lockdep_assert_held(&ag->mutex);
1793 
1794 	/* If more than one devices are grouped, then inter MLO
1795 	 * functionality can work still independent of whether internally
1796 	 * each device supports single_chip_mlo or not.
1797 	 * Only when there is one device, then it depends whether the
1798 	 * device can support intra chip MLO or not
1799 	 */
1800 	if (ag->num_devices > 1) {
1801 		ag->mlo_capable = true;
1802 	} else {
1803 		ab = ag->ab[0];
1804 		ag->mlo_capable = ab->single_chip_mlo_supp;
1805 
1806 		/* WCN chipsets does not advertise in firmware features
1807 		 * hence skip checking
1808 		 */
1809 		if (ab->hw_params->def_num_link)
1810 			return;
1811 	}
1812 
1813 	if (!ag->mlo_capable)
1814 		return;
1815 
1816 	for (i = 0; i < ag->num_devices; i++) {
1817 		ab = ag->ab[i];
1818 		if (!ab)
1819 			continue;
1820 
1821 		/* even if 1 device's firmware feature indicates MLO
1822 		 * unsupported, make MLO unsupported for the whole group
1823 		 */
1824 		if (!test_bit(ATH12K_FW_FEATURE_MLO, ab->fw.fw_features)) {
1825 			ag->mlo_capable = false;
1826 			return;
1827 		}
1828 	}
1829 }
1830 
1831 int ath12k_core_init(struct ath12k_base *ab)
1832 {
1833 	struct ath12k_hw_group *ag;
1834 	int ret;
1835 
1836 	ret = ath12k_core_panic_notifier_register(ab);
1837 	if (ret)
1838 		ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
1839 
1840 	mutex_lock(&ath12k_hw_group_mutex);
1841 
1842 	ag = ath12k_core_hw_group_assign(ab);
1843 	if (!ag) {
1844 		mutex_unlock(&ath12k_hw_group_mutex);
1845 		ath12k_warn(ab, "unable to get hw group\n");
1846 		return -ENODEV;
1847 	}
1848 
1849 	mutex_unlock(&ath12k_hw_group_mutex);
1850 
1851 	mutex_lock(&ag->mutex);
1852 
1853 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices %d num probed %d\n",
1854 		   ag->num_devices, ag->num_probed);
1855 
1856 	if (ath12k_core_hw_group_create_ready(ag)) {
1857 		ret = ath12k_core_hw_group_create(ag);
1858 		if (ret) {
1859 			mutex_unlock(&ag->mutex);
1860 			ath12k_warn(ab, "unable to create hw group\n");
1861 			goto err;
1862 		}
1863 	}
1864 
1865 	mutex_unlock(&ag->mutex);
1866 
1867 	return 0;
1868 
1869 err:
1870 	ath12k_core_hw_group_destroy(ab->ag);
1871 	ath12k_core_hw_group_unassign(ab);
1872 	return ret;
1873 }
1874 
1875 void ath12k_core_deinit(struct ath12k_base *ab)
1876 {
1877 	ath12k_core_panic_notifier_unregister(ab);
1878 	ath12k_core_hw_group_cleanup(ab->ag);
1879 	ath12k_core_hw_group_destroy(ab->ag);
1880 	ath12k_core_hw_group_unassign(ab);
1881 }
1882 
1883 void ath12k_core_free(struct ath12k_base *ab)
1884 {
1885 	timer_delete_sync(&ab->rx_replenish_retry);
1886 	destroy_workqueue(ab->workqueue_aux);
1887 	destroy_workqueue(ab->workqueue);
1888 	kfree(ab);
1889 }
1890 
1891 struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
1892 				      enum ath12k_bus bus)
1893 {
1894 	struct ath12k_base *ab;
1895 
1896 	ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL);
1897 	if (!ab)
1898 		return NULL;
1899 
1900 	init_completion(&ab->driver_recovery);
1901 
1902 	ab->workqueue = create_singlethread_workqueue("ath12k_wq");
1903 	if (!ab->workqueue)
1904 		goto err_sc_free;
1905 
1906 	ab->workqueue_aux = create_singlethread_workqueue("ath12k_aux_wq");
1907 	if (!ab->workqueue_aux)
1908 		goto err_free_wq;
1909 
1910 	mutex_init(&ab->core_lock);
1911 	spin_lock_init(&ab->base_lock);
1912 	init_completion(&ab->reset_complete);
1913 
1914 	INIT_LIST_HEAD(&ab->peers);
1915 	init_waitqueue_head(&ab->peer_mapping_wq);
1916 	init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
1917 	INIT_WORK(&ab->restart_work, ath12k_core_restart);
1918 	INIT_WORK(&ab->reset_work, ath12k_core_reset);
1919 	INIT_WORK(&ab->rfkill_work, ath12k_rfkill_work);
1920 	INIT_WORK(&ab->dump_work, ath12k_coredump_upload);
1921 
1922 	timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
1923 	init_completion(&ab->htc_suspend);
1924 	init_completion(&ab->restart_completed);
1925 	init_completion(&ab->wow.wakeup_completed);
1926 
1927 	ab->dev = dev;
1928 	ab->hif.bus = bus;
1929 	ab->qmi.num_radios = U8_MAX;
1930 	ab->single_chip_mlo_supp = false;
1931 
1932 	/* Device index used to identify the devices in a group.
1933 	 *
1934 	 * In Intra-device MLO, only one device present in a group,
1935 	 * so it is always zero.
1936 	 *
1937 	 * In Inter-device MLO, Multiple device present in a group,
1938 	 * expect non-zero value.
1939 	 */
1940 	ab->device_id = 0;
1941 
1942 	return ab;
1943 
1944 err_free_wq:
1945 	destroy_workqueue(ab->workqueue);
1946 err_sc_free:
1947 	kfree(ab);
1948 	return NULL;
1949 }
1950 
1951 MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11be wireless LAN cards.");
1952 MODULE_LICENSE("Dual BSD/GPL");
1953