xref: /linux/drivers/net/wireless/ath/ath12k/core.c (revision 816b02e63a759c4458edee142b721ab09c918b3d)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/remoteproc.h>
10 #include <linux/firmware.h>
11 #include <linux/of.h>
12 #include "core.h"
13 #include "dp_tx.h"
14 #include "dp_rx.h"
15 #include "debug.h"
16 #include "hif.h"
17 #include "fw.h"
18 #include "debugfs.h"
19 #include "wow.h"
20 
21 unsigned int ath12k_debug_mask;
22 module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
23 MODULE_PARM_DESC(debug_mask, "Debugging mask");
24 
25 /* protected with ath12k_hw_group_mutex */
26 static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
27 
28 static DEFINE_MUTEX(ath12k_hw_group_mutex);
29 
30 static int ath12k_core_rfkill_config(struct ath12k_base *ab)
31 {
32 	struct ath12k *ar;
33 	int ret = 0, i;
34 
35 	if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
36 		return 0;
37 
38 	for (i = 0; i < ab->num_radios; i++) {
39 		ar = ab->pdevs[i].ar;
40 
41 		ret = ath12k_mac_rfkill_config(ar);
42 		if (ret && ret != -EOPNOTSUPP) {
43 			ath12k_warn(ab, "failed to configure rfkill: %d", ret);
44 			return ret;
45 		}
46 	}
47 
48 	return ret;
49 }
50 
51 /* Check if we need to continue with suspend/resume operation.
52  * Return:
53  *	a negative value: error happens and don't continue.
54  *	0:  no error but don't continue.
55  *	positive value: no error and do continue.
56  */
57 static int ath12k_core_continue_suspend_resume(struct ath12k_base *ab)
58 {
59 	struct ath12k *ar;
60 
61 	if (!ab->hw_params->supports_suspend)
62 		return -EOPNOTSUPP;
63 
64 	/* so far single_pdev_only chips have supports_suspend as true
65 	 * so pass 0 as a dummy pdev_id here.
66 	 */
67 	ar = ab->pdevs[0].ar;
68 	if (!ar || !ar->ah || ar->ah->state != ATH12K_HW_STATE_OFF)
69 		return 0;
70 
71 	return 1;
72 }
73 
74 int ath12k_core_suspend(struct ath12k_base *ab)
75 {
76 	struct ath12k *ar;
77 	int ret, i;
78 
79 	ret = ath12k_core_continue_suspend_resume(ab);
80 	if (ret <= 0)
81 		return ret;
82 
83 	for (i = 0; i < ab->num_radios; i++) {
84 		ar = ab->pdevs[i].ar;
85 		if (!ar)
86 			continue;
87 
88 		wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
89 
90 		ret = ath12k_mac_wait_tx_complete(ar);
91 		if (ret) {
92 			wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
93 			ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
94 			return ret;
95 		}
96 
97 		wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
98 	}
99 
100 	/* PM framework skips suspend_late/resume_early callbacks
101 	 * if other devices report errors in their suspend callbacks.
102 	 * However ath12k_core_resume() would still be called because
103 	 * here we return success thus kernel put us on dpm_suspended_list.
104 	 * Since we won't go through a power down/up cycle, there is
105 	 * no chance to call complete(&ab->restart_completed) in
106 	 * ath12k_core_restart(), making ath12k_core_resume() timeout.
107 	 * So call it here to avoid this issue. This also works in case
108 	 * no error happens thus suspend_late/resume_early get called,
109 	 * because it will be reinitialized in ath12k_core_resume_early().
110 	 */
111 	complete(&ab->restart_completed);
112 
113 	return 0;
114 }
115 EXPORT_SYMBOL(ath12k_core_suspend);
116 
117 int ath12k_core_suspend_late(struct ath12k_base *ab)
118 {
119 	int ret;
120 
121 	ret = ath12k_core_continue_suspend_resume(ab);
122 	if (ret <= 0)
123 		return ret;
124 
125 	ath12k_acpi_stop(ab);
126 
127 	ath12k_hif_irq_disable(ab);
128 	ath12k_hif_ce_irq_disable(ab);
129 
130 	ath12k_hif_power_down(ab, true);
131 
132 	return 0;
133 }
134 EXPORT_SYMBOL(ath12k_core_suspend_late);
135 
136 int ath12k_core_resume_early(struct ath12k_base *ab)
137 {
138 	int ret;
139 
140 	ret = ath12k_core_continue_suspend_resume(ab);
141 	if (ret <= 0)
142 		return ret;
143 
144 	reinit_completion(&ab->restart_completed);
145 	ret = ath12k_hif_power_up(ab);
146 	if (ret)
147 		ath12k_warn(ab, "failed to power up hif during resume: %d\n", ret);
148 
149 	return ret;
150 }
151 EXPORT_SYMBOL(ath12k_core_resume_early);
152 
153 int ath12k_core_resume(struct ath12k_base *ab)
154 {
155 	long time_left;
156 	int ret;
157 
158 	ret = ath12k_core_continue_suspend_resume(ab);
159 	if (ret <= 0)
160 		return ret;
161 
162 	time_left = wait_for_completion_timeout(&ab->restart_completed,
163 						ATH12K_RESET_TIMEOUT_HZ);
164 	if (time_left == 0) {
165 		ath12k_warn(ab, "timeout while waiting for restart complete");
166 		return -ETIMEDOUT;
167 	}
168 
169 	return 0;
170 }
171 EXPORT_SYMBOL(ath12k_core_resume);
172 
173 static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
174 					   size_t name_len, bool with_variant,
175 					   bool bus_type_mode)
176 {
177 	/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
178 	char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
179 
180 	if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
181 		scnprintf(variant, sizeof(variant), ",variant=%s",
182 			  ab->qmi.target.bdf_ext);
183 
184 	switch (ab->id.bdf_search) {
185 	case ATH12K_BDF_SEARCH_BUS_AND_BOARD:
186 		if (bus_type_mode)
187 			scnprintf(name, name_len,
188 				  "bus=%s",
189 				  ath12k_bus_str(ab->hif.bus));
190 		else
191 			scnprintf(name, name_len,
192 				  "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
193 				  ath12k_bus_str(ab->hif.bus),
194 				  ab->id.vendor, ab->id.device,
195 				  ab->id.subsystem_vendor,
196 				  ab->id.subsystem_device,
197 				  ab->qmi.target.chip_id,
198 				  ab->qmi.target.board_id,
199 				  variant);
200 		break;
201 	default:
202 		scnprintf(name, name_len,
203 			  "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
204 			  ath12k_bus_str(ab->hif.bus),
205 			  ab->qmi.target.chip_id,
206 			  ab->qmi.target.board_id, variant);
207 		break;
208 	}
209 
210 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name);
211 
212 	return 0;
213 }
214 
215 static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
216 					 size_t name_len)
217 {
218 	return __ath12k_core_create_board_name(ab, name, name_len, true, false);
219 }
220 
221 static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
222 						  size_t name_len)
223 {
224 	return __ath12k_core_create_board_name(ab, name, name_len, false, false);
225 }
226 
227 static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
228 						  size_t name_len)
229 {
230 	return __ath12k_core_create_board_name(ab, name, name_len, false, true);
231 }
232 
233 const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
234 						    const char *file)
235 {
236 	const struct firmware *fw;
237 	char path[100];
238 	int ret;
239 
240 	if (!file)
241 		return ERR_PTR(-ENOENT);
242 
243 	ath12k_core_create_firmware_path(ab, file, path, sizeof(path));
244 
245 	ret = firmware_request_nowarn(&fw, path, ab->dev);
246 	if (ret)
247 		return ERR_PTR(ret);
248 
249 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot firmware request %s size %zu\n",
250 		   path, fw->size);
251 
252 	return fw;
253 }
254 
255 void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
256 {
257 	if (!IS_ERR(bd->fw))
258 		release_firmware(bd->fw);
259 
260 	memset(bd, 0, sizeof(*bd));
261 }
262 
263 static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
264 					 struct ath12k_board_data *bd,
265 					 const void *buf, size_t buf_len,
266 					 const char *boardname,
267 					 int ie_id,
268 					 int name_id,
269 					 int data_id)
270 {
271 	const struct ath12k_fw_ie *hdr;
272 	bool name_match_found;
273 	int ret, board_ie_id;
274 	size_t board_ie_len;
275 	const void *board_ie_data;
276 
277 	name_match_found = false;
278 
279 	/* go through ATH12K_BD_IE_BOARD_/ATH12K_BD_IE_REGDB_ elements */
280 	while (buf_len > sizeof(struct ath12k_fw_ie)) {
281 		hdr = buf;
282 		board_ie_id = le32_to_cpu(hdr->id);
283 		board_ie_len = le32_to_cpu(hdr->len);
284 		board_ie_data = hdr->data;
285 
286 		buf_len -= sizeof(*hdr);
287 		buf += sizeof(*hdr);
288 
289 		if (buf_len < ALIGN(board_ie_len, 4)) {
290 			ath12k_err(ab, "invalid %s length: %zu < %zu\n",
291 				   ath12k_bd_ie_type_str(ie_id),
292 				   buf_len, ALIGN(board_ie_len, 4));
293 			ret = -EINVAL;
294 			goto out;
295 		}
296 
297 		if (board_ie_id == name_id) {
298 			ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "",
299 					board_ie_data, board_ie_len);
300 
301 			if (board_ie_len != strlen(boardname))
302 				goto next;
303 
304 			ret = memcmp(board_ie_data, boardname, strlen(boardname));
305 			if (ret)
306 				goto next;
307 
308 			name_match_found = true;
309 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
310 				   "boot found match %s for name '%s'",
311 				   ath12k_bd_ie_type_str(ie_id),
312 				   boardname);
313 		} else if (board_ie_id == data_id) {
314 			if (!name_match_found)
315 				/* no match found */
316 				goto next;
317 
318 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
319 				   "boot found %s for '%s'",
320 				   ath12k_bd_ie_type_str(ie_id),
321 				   boardname);
322 
323 			bd->data = board_ie_data;
324 			bd->len = board_ie_len;
325 
326 			ret = 0;
327 			goto out;
328 		} else {
329 			ath12k_warn(ab, "unknown %s id found: %d\n",
330 				    ath12k_bd_ie_type_str(ie_id),
331 				    board_ie_id);
332 		}
333 next:
334 		/* jump over the padding */
335 		board_ie_len = ALIGN(board_ie_len, 4);
336 
337 		buf_len -= board_ie_len;
338 		buf += board_ie_len;
339 	}
340 
341 	/* no match found */
342 	ret = -ENOENT;
343 
344 out:
345 	return ret;
346 }
347 
348 static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
349 					      struct ath12k_board_data *bd,
350 					      const char *boardname,
351 					      int ie_id_match,
352 					      int name_id,
353 					      int data_id)
354 {
355 	size_t len, magic_len;
356 	const u8 *data;
357 	char *filename, filepath[100];
358 	size_t ie_len;
359 	struct ath12k_fw_ie *hdr;
360 	int ret, ie_id;
361 
362 	filename = ATH12K_BOARD_API2_FILE;
363 
364 	if (!bd->fw)
365 		bd->fw = ath12k_core_firmware_request(ab, filename);
366 
367 	if (IS_ERR(bd->fw))
368 		return PTR_ERR(bd->fw);
369 
370 	data = bd->fw->data;
371 	len = bd->fw->size;
372 
373 	ath12k_core_create_firmware_path(ab, filename,
374 					 filepath, sizeof(filepath));
375 
376 	/* magic has extra null byte padded */
377 	magic_len = strlen(ATH12K_BOARD_MAGIC) + 1;
378 	if (len < magic_len) {
379 		ath12k_err(ab, "failed to find magic value in %s, file too short: %zu\n",
380 			   filepath, len);
381 		ret = -EINVAL;
382 		goto err;
383 	}
384 
385 	if (memcmp(data, ATH12K_BOARD_MAGIC, magic_len)) {
386 		ath12k_err(ab, "found invalid board magic\n");
387 		ret = -EINVAL;
388 		goto err;
389 	}
390 
391 	/* magic is padded to 4 bytes */
392 	magic_len = ALIGN(magic_len, 4);
393 	if (len < magic_len) {
394 		ath12k_err(ab, "failed: %s too small to contain board data, len: %zu\n",
395 			   filepath, len);
396 		ret = -EINVAL;
397 		goto err;
398 	}
399 
400 	data += magic_len;
401 	len -= magic_len;
402 
403 	while (len > sizeof(struct ath12k_fw_ie)) {
404 		hdr = (struct ath12k_fw_ie *)data;
405 		ie_id = le32_to_cpu(hdr->id);
406 		ie_len = le32_to_cpu(hdr->len);
407 
408 		len -= sizeof(*hdr);
409 		data = hdr->data;
410 
411 		if (len < ALIGN(ie_len, 4)) {
412 			ath12k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
413 				   ie_id, ie_len, len);
414 			ret = -EINVAL;
415 			goto err;
416 		}
417 
418 		if (ie_id == ie_id_match) {
419 			ret = ath12k_core_parse_bd_ie_board(ab, bd, data,
420 							    ie_len,
421 							    boardname,
422 							    ie_id_match,
423 							    name_id,
424 							    data_id);
425 			if (ret == -ENOENT)
426 				/* no match found, continue */
427 				goto next;
428 			else if (ret)
429 				/* there was an error, bail out */
430 				goto err;
431 			/* either found or error, so stop searching */
432 			goto out;
433 		}
434 next:
435 		/* jump over the padding */
436 		ie_len = ALIGN(ie_len, 4);
437 
438 		len -= ie_len;
439 		data += ie_len;
440 	}
441 
442 out:
443 	if (!bd->data || !bd->len) {
444 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
445 			   "failed to fetch %s for %s from %s\n",
446 			   ath12k_bd_ie_type_str(ie_id_match),
447 			   boardname, filepath);
448 		ret = -ENODATA;
449 		goto err;
450 	}
451 
452 	return 0;
453 
454 err:
455 	ath12k_core_free_bdf(ab, bd);
456 	return ret;
457 }
458 
459 int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
460 				       struct ath12k_board_data *bd,
461 				       char *filename)
462 {
463 	bd->fw = ath12k_core_firmware_request(ab, filename);
464 	if (IS_ERR(bd->fw))
465 		return PTR_ERR(bd->fw);
466 
467 	bd->data = bd->fw->data;
468 	bd->len = bd->fw->size;
469 
470 	return 0;
471 }
472 
473 #define BOARD_NAME_SIZE 200
474 int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
475 {
476 	char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE];
477 	char *filename, filepath[100];
478 	int bd_api;
479 	int ret;
480 
481 	filename = ATH12K_BOARD_API2_FILE;
482 
483 	ret = ath12k_core_create_board_name(ab, boardname, sizeof(boardname));
484 	if (ret) {
485 		ath12k_err(ab, "failed to create board name: %d", ret);
486 		return ret;
487 	}
488 
489 	bd_api = 2;
490 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
491 						 ATH12K_BD_IE_BOARD,
492 						 ATH12K_BD_IE_BOARD_NAME,
493 						 ATH12K_BD_IE_BOARD_DATA);
494 	if (!ret)
495 		goto success;
496 
497 	ret = ath12k_core_create_fallback_board_name(ab, fallback_boardname,
498 						     sizeof(fallback_boardname));
499 	if (ret) {
500 		ath12k_err(ab, "failed to create fallback board name: %d", ret);
501 		return ret;
502 	}
503 
504 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
505 						 ATH12K_BD_IE_BOARD,
506 						 ATH12K_BD_IE_BOARD_NAME,
507 						 ATH12K_BD_IE_BOARD_DATA);
508 	if (!ret)
509 		goto success;
510 
511 	bd_api = 1;
512 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE);
513 	if (ret) {
514 		ath12k_core_create_firmware_path(ab, filename,
515 						 filepath, sizeof(filepath));
516 		ath12k_err(ab, "failed to fetch board data for %s from %s\n",
517 			   boardname, filepath);
518 		if (memcmp(boardname, fallback_boardname, strlen(boardname)))
519 			ath12k_err(ab, "failed to fetch board data for %s from %s\n",
520 				   fallback_boardname, filepath);
521 
522 		ath12k_err(ab, "failed to fetch board.bin from %s\n",
523 			   ab->hw_params->fw.dir);
524 		return ret;
525 	}
526 
527 success:
528 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", bd_api);
529 	return 0;
530 }
531 
532 int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd)
533 {
534 	char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE];
535 	int ret;
536 
537 	ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
538 	if (ret) {
539 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
540 			   "failed to create board name for regdb: %d", ret);
541 		goto exit;
542 	}
543 
544 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
545 						 ATH12K_BD_IE_REGDB,
546 						 ATH12K_BD_IE_REGDB_NAME,
547 						 ATH12K_BD_IE_REGDB_DATA);
548 	if (!ret)
549 		goto exit;
550 
551 	ret = ath12k_core_create_bus_type_board_name(ab, default_boardname,
552 						     BOARD_NAME_SIZE);
553 	if (ret) {
554 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
555 			   "failed to create default board name for regdb: %d", ret);
556 		goto exit;
557 	}
558 
559 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, default_boardname,
560 						 ATH12K_BD_IE_REGDB,
561 						 ATH12K_BD_IE_REGDB_NAME,
562 						 ATH12K_BD_IE_REGDB_DATA);
563 	if (!ret)
564 		goto exit;
565 
566 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_REGDB_FILE_NAME);
567 	if (ret)
568 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to fetch %s from %s\n",
569 			   ATH12K_REGDB_FILE_NAME, ab->hw_params->fw.dir);
570 
571 exit:
572 	if (!ret)
573 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "fetched regdb\n");
574 
575 	return ret;
576 }
577 
578 u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab)
579 {
580 	if (ab->num_radios == 2)
581 		return TARGET_NUM_STATIONS_DBS;
582 	else if (ab->num_radios == 3)
583 		return TARGET_NUM_PEERS_PDEV_DBS_SBS;
584 	return TARGET_NUM_STATIONS_SINGLE;
585 }
586 
587 u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab)
588 {
589 	if (ab->num_radios == 2)
590 		return TARGET_NUM_PEERS_PDEV_DBS;
591 	else if (ab->num_radios == 3)
592 		return TARGET_NUM_PEERS_PDEV_DBS_SBS;
593 	return TARGET_NUM_PEERS_PDEV_SINGLE;
594 }
595 
596 u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab)
597 {
598 	if (ab->num_radios == 2)
599 		return TARGET_NUM_TIDS(DBS);
600 	else if (ab->num_radios == 3)
601 		return TARGET_NUM_TIDS(DBS_SBS);
602 	return TARGET_NUM_TIDS(SINGLE);
603 }
604 
605 static void ath12k_core_stop(struct ath12k_base *ab)
606 {
607 	ath12k_core_stopped(ab);
608 
609 	if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
610 		ath12k_qmi_firmware_stop(ab);
611 
612 	ath12k_acpi_stop(ab);
613 
614 	ath12k_dp_rx_pdev_reo_cleanup(ab);
615 	ath12k_hif_stop(ab);
616 	ath12k_wmi_detach(ab);
617 	ath12k_dp_free(ab);
618 
619 	/* De-Init of components as needed */
620 }
621 
622 static void ath12k_core_check_bdfext(const struct dmi_header *hdr, void *data)
623 {
624 	struct ath12k_base *ab = data;
625 	const char *magic = ATH12K_SMBIOS_BDF_EXT_MAGIC;
626 	struct ath12k_smbios_bdf *smbios = (struct ath12k_smbios_bdf *)hdr;
627 	ssize_t copied;
628 	size_t len;
629 	int i;
630 
631 	if (ab->qmi.target.bdf_ext[0] != '\0')
632 		return;
633 
634 	if (hdr->type != ATH12K_SMBIOS_BDF_EXT_TYPE)
635 		return;
636 
637 	if (hdr->length != ATH12K_SMBIOS_BDF_EXT_LENGTH) {
638 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
639 			   "wrong smbios bdf ext type length (%d).\n",
640 			   hdr->length);
641 		return;
642 	}
643 
644 	if (!smbios->bdf_enabled) {
645 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant name not found.\n");
646 		return;
647 	}
648 
649 	/* Only one string exists (per spec) */
650 	if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) {
651 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
652 			   "bdf variant magic does not match.\n");
653 		return;
654 	}
655 
656 	len = min_t(size_t,
657 		    strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext));
658 	for (i = 0; i < len; i++) {
659 		if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) {
660 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
661 				   "bdf variant name contains non ascii chars.\n");
662 			return;
663 		}
664 	}
665 
666 	/* Copy extension name without magic prefix */
667 	copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic),
668 			 sizeof(ab->qmi.target.bdf_ext));
669 	if (copied < 0) {
670 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
671 			   "bdf variant string is longer than the buffer can accommodate\n");
672 		return;
673 	}
674 
675 	ath12k_dbg(ab, ATH12K_DBG_BOOT,
676 		   "found and validated bdf variant smbios_type 0x%x bdf %s\n",
677 		   ATH12K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext);
678 }
679 
680 int ath12k_core_check_smbios(struct ath12k_base *ab)
681 {
682 	ab->qmi.target.bdf_ext[0] = '\0';
683 	dmi_walk(ath12k_core_check_bdfext, ab);
684 
685 	if (ab->qmi.target.bdf_ext[0] == '\0')
686 		return -ENODATA;
687 
688 	return 0;
689 }
690 
691 static int ath12k_core_soc_create(struct ath12k_base *ab)
692 {
693 	int ret;
694 
695 	ret = ath12k_qmi_init_service(ab);
696 	if (ret) {
697 		ath12k_err(ab, "failed to initialize qmi :%d\n", ret);
698 		return ret;
699 	}
700 
701 	ath12k_debugfs_soc_create(ab);
702 
703 	ret = ath12k_hif_power_up(ab);
704 	if (ret) {
705 		ath12k_err(ab, "failed to power up :%d\n", ret);
706 		goto err_qmi_deinit;
707 	}
708 
709 	return 0;
710 
711 err_qmi_deinit:
712 	ath12k_debugfs_soc_destroy(ab);
713 	ath12k_qmi_deinit_service(ab);
714 	return ret;
715 }
716 
717 static void ath12k_core_soc_destroy(struct ath12k_base *ab)
718 {
719 	ath12k_hif_power_down(ab, false);
720 	ath12k_reg_free(ab);
721 	ath12k_debugfs_soc_destroy(ab);
722 	ath12k_qmi_deinit_service(ab);
723 }
724 
725 static int ath12k_core_pdev_create(struct ath12k_base *ab)
726 {
727 	int ret;
728 
729 	ret = ath12k_dp_pdev_alloc(ab);
730 	if (ret) {
731 		ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
732 		return ret;
733 	}
734 
735 	return 0;
736 }
737 
738 static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
739 {
740 	ath12k_dp_pdev_free(ab);
741 }
742 
743 static int ath12k_core_start(struct ath12k_base *ab,
744 			     enum ath12k_firmware_mode mode)
745 {
746 	int ret;
747 
748 	lockdep_assert_held(&ab->core_lock);
749 
750 	ret = ath12k_wmi_attach(ab);
751 	if (ret) {
752 		ath12k_err(ab, "failed to attach wmi: %d\n", ret);
753 		return ret;
754 	}
755 
756 	ret = ath12k_htc_init(ab);
757 	if (ret) {
758 		ath12k_err(ab, "failed to init htc: %d\n", ret);
759 		goto err_wmi_detach;
760 	}
761 
762 	ret = ath12k_hif_start(ab);
763 	if (ret) {
764 		ath12k_err(ab, "failed to start HIF: %d\n", ret);
765 		goto err_wmi_detach;
766 	}
767 
768 	ret = ath12k_htc_wait_target(&ab->htc);
769 	if (ret) {
770 		ath12k_err(ab, "failed to connect to HTC: %d\n", ret);
771 		goto err_hif_stop;
772 	}
773 
774 	ret = ath12k_dp_htt_connect(&ab->dp);
775 	if (ret) {
776 		ath12k_err(ab, "failed to connect to HTT: %d\n", ret);
777 		goto err_hif_stop;
778 	}
779 
780 	ret = ath12k_wmi_connect(ab);
781 	if (ret) {
782 		ath12k_err(ab, "failed to connect wmi: %d\n", ret);
783 		goto err_hif_stop;
784 	}
785 
786 	ret = ath12k_htc_start(&ab->htc);
787 	if (ret) {
788 		ath12k_err(ab, "failed to start HTC: %d\n", ret);
789 		goto err_hif_stop;
790 	}
791 
792 	ret = ath12k_wmi_wait_for_service_ready(ab);
793 	if (ret) {
794 		ath12k_err(ab, "failed to receive wmi service ready event: %d\n",
795 			   ret);
796 		goto err_hif_stop;
797 	}
798 
799 	ath12k_dp_cc_config(ab);
800 
801 	ret = ath12k_dp_rx_pdev_reo_setup(ab);
802 	if (ret) {
803 		ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
804 		goto err_hif_stop;
805 	}
806 
807 	ath12k_dp_hal_rx_desc_init(ab);
808 
809 	ret = ath12k_wmi_cmd_init(ab);
810 	if (ret) {
811 		ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret);
812 		goto err_reo_cleanup;
813 	}
814 
815 	ret = ath12k_wmi_wait_for_unified_ready(ab);
816 	if (ret) {
817 		ath12k_err(ab, "failed to receive wmi unified ready event: %d\n",
818 			   ret);
819 		goto err_reo_cleanup;
820 	}
821 
822 	/* put hardware to DBS mode */
823 	if (ab->hw_params->single_pdev_only) {
824 		ret = ath12k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
825 		if (ret) {
826 			ath12k_err(ab, "failed to send dbs mode: %d\n", ret);
827 			goto err_reo_cleanup;
828 		}
829 	}
830 
831 	ret = ath12k_dp_tx_htt_h2t_ver_req_msg(ab);
832 	if (ret) {
833 		ath12k_err(ab, "failed to send htt version request message: %d\n",
834 			   ret);
835 		goto err_reo_cleanup;
836 	}
837 
838 	ret = ath12k_acpi_start(ab);
839 	if (ret)
840 		/* ACPI is optional so continue in case of an error */
841 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", ret);
842 
843 	if (!test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags))
844 		/* Indicate the core start in the appropriate group */
845 		ath12k_core_started(ab);
846 
847 	return 0;
848 
849 err_reo_cleanup:
850 	ath12k_dp_rx_pdev_reo_cleanup(ab);
851 err_hif_stop:
852 	ath12k_hif_stop(ab);
853 err_wmi_detach:
854 	ath12k_wmi_detach(ab);
855 	return ret;
856 }
857 
858 static void ath12k_core_device_cleanup(struct ath12k_base *ab)
859 {
860 	mutex_lock(&ab->core_lock);
861 
862 	ath12k_hif_irq_disable(ab);
863 	ath12k_core_pdev_destroy(ab);
864 
865 	mutex_unlock(&ab->core_lock);
866 }
867 
868 static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
869 {
870 	struct ath12k_base *ab;
871 	int i;
872 
873 	lockdep_assert_held(&ag->mutex);
874 
875 	clear_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
876 
877 	ath12k_mac_unregister(ag);
878 
879 	for (i = ag->num_devices - 1; i >= 0; i--) {
880 		ab = ag->ab[i];
881 		if (!ab)
882 			continue;
883 		ath12k_core_device_cleanup(ab);
884 	}
885 
886 	ath12k_mac_destroy(ag);
887 }
888 
889 static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag)
890 {
891 	struct ath12k_base *ab;
892 	int ret, i;
893 
894 	lockdep_assert_held(&ag->mutex);
895 
896 	if (test_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags))
897 		goto core_pdev_create;
898 
899 	ret = ath12k_mac_allocate(ag);
900 	if (WARN_ON(ret))
901 		return ret;
902 
903 	ret = ath12k_mac_register(ag);
904 	if (WARN_ON(ret))
905 		goto err_mac_destroy;
906 
907 	set_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
908 
909 core_pdev_create:
910 	for (i = 0; i < ag->num_devices; i++) {
911 		ab = ag->ab[i];
912 		if (!ab)
913 			continue;
914 
915 		mutex_lock(&ab->core_lock);
916 
917 		ret = ath12k_core_pdev_create(ab);
918 		if (ret) {
919 			ath12k_err(ab, "failed to create pdev core %d\n", ret);
920 			mutex_unlock(&ab->core_lock);
921 			goto err;
922 		}
923 
924 		ath12k_hif_irq_enable(ab);
925 
926 		ret = ath12k_core_rfkill_config(ab);
927 		if (ret && ret != -EOPNOTSUPP) {
928 			mutex_unlock(&ab->core_lock);
929 			goto err;
930 		}
931 
932 		mutex_unlock(&ab->core_lock);
933 	}
934 
935 	return 0;
936 
937 err:
938 	ath12k_core_hw_group_stop(ag);
939 	return ret;
940 
941 err_mac_destroy:
942 	ath12k_mac_destroy(ag);
943 
944 	return ret;
945 }
946 
947 static int ath12k_core_start_firmware(struct ath12k_base *ab,
948 				      enum ath12k_firmware_mode mode)
949 {
950 	int ret;
951 
952 	ath12k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v3,
953 				    &ab->qmi.ce_cfg.shadow_reg_v3_len);
954 
955 	ret = ath12k_qmi_firmware_start(ab, mode);
956 	if (ret) {
957 		ath12k_err(ab, "failed to send firmware start: %d\n", ret);
958 		return ret;
959 	}
960 
961 	return ret;
962 }
963 
964 static inline
965 bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag)
966 {
967 	lockdep_assert_held(&ag->mutex);
968 
969 	return (ag->num_started == ag->num_devices);
970 }
971 
972 static void ath12k_core_trigger_partner(struct ath12k_base *ab)
973 {
974 	struct ath12k_hw_group *ag = ab->ag;
975 	struct ath12k_base *partner_ab;
976 	bool found = false;
977 	int i;
978 
979 	for (i = 0; i < ag->num_devices; i++) {
980 		partner_ab = ag->ab[i];
981 		if (!partner_ab)
982 			continue;
983 
984 		if (found)
985 			ath12k_qmi_trigger_host_cap(partner_ab);
986 
987 		found = (partner_ab == ab);
988 	}
989 }
990 
991 int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
992 {
993 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
994 	int ret, i;
995 
996 	ret = ath12k_core_start_firmware(ab, ATH12K_FIRMWARE_MODE_NORMAL);
997 	if (ret) {
998 		ath12k_err(ab, "failed to start firmware: %d\n", ret);
999 		return ret;
1000 	}
1001 
1002 	ret = ath12k_ce_init_pipes(ab);
1003 	if (ret) {
1004 		ath12k_err(ab, "failed to initialize CE: %d\n", ret);
1005 		goto err_firmware_stop;
1006 	}
1007 
1008 	ret = ath12k_dp_alloc(ab);
1009 	if (ret) {
1010 		ath12k_err(ab, "failed to init DP: %d\n", ret);
1011 		goto err_firmware_stop;
1012 	}
1013 
1014 	mutex_lock(&ag->mutex);
1015 	mutex_lock(&ab->core_lock);
1016 
1017 	ret = ath12k_core_start(ab, ATH12K_FIRMWARE_MODE_NORMAL);
1018 	if (ret) {
1019 		ath12k_err(ab, "failed to start core: %d\n", ret);
1020 		goto err_dp_free;
1021 	}
1022 
1023 	mutex_unlock(&ab->core_lock);
1024 
1025 	if (ath12k_core_hw_group_start_ready(ag)) {
1026 		ret = ath12k_core_hw_group_start(ag);
1027 		if (ret) {
1028 			ath12k_warn(ab, "unable to start hw group\n");
1029 			goto err_core_stop;
1030 		}
1031 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id);
1032 	} else {
1033 		ath12k_core_trigger_partner(ab);
1034 	}
1035 
1036 	mutex_unlock(&ag->mutex);
1037 
1038 	return 0;
1039 
1040 err_core_stop:
1041 	for (i = ag->num_devices - 1; i >= 0; i--) {
1042 		ab = ag->ab[i];
1043 		if (!ab)
1044 			continue;
1045 
1046 		mutex_lock(&ab->core_lock);
1047 		ath12k_core_stop(ab);
1048 		mutex_unlock(&ab->core_lock);
1049 	}
1050 	goto exit;
1051 
1052 err_dp_free:
1053 	ath12k_dp_free(ab);
1054 	mutex_unlock(&ab->core_lock);
1055 err_firmware_stop:
1056 	ath12k_qmi_firmware_stop(ab);
1057 
1058 exit:
1059 	mutex_unlock(&ag->mutex);
1060 	return ret;
1061 }
1062 
1063 static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
1064 {
1065 	int ret;
1066 
1067 	mutex_lock(&ab->core_lock);
1068 	ath12k_dp_pdev_free(ab);
1069 	ath12k_ce_cleanup_pipes(ab);
1070 	ath12k_wmi_detach(ab);
1071 	ath12k_dp_rx_pdev_reo_cleanup(ab);
1072 	mutex_unlock(&ab->core_lock);
1073 
1074 	ath12k_dp_free(ab);
1075 	ath12k_hal_srng_deinit(ab);
1076 
1077 	ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
1078 
1079 	ret = ath12k_hal_srng_init(ab);
1080 	if (ret)
1081 		return ret;
1082 
1083 	clear_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1084 
1085 	ret = ath12k_core_qmi_firmware_ready(ab);
1086 	if (ret)
1087 		goto err_hal_srng_deinit;
1088 
1089 	clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
1090 
1091 	return 0;
1092 
1093 err_hal_srng_deinit:
1094 	ath12k_hal_srng_deinit(ab);
1095 	return ret;
1096 }
1097 
1098 static void ath12k_rfkill_work(struct work_struct *work)
1099 {
1100 	struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
1101 	struct ath12k *ar;
1102 	struct ath12k_hw *ah;
1103 	struct ieee80211_hw *hw;
1104 	bool rfkill_radio_on;
1105 	int i, j;
1106 
1107 	spin_lock_bh(&ab->base_lock);
1108 	rfkill_radio_on = ab->rfkill_radio_on;
1109 	spin_unlock_bh(&ab->base_lock);
1110 
1111 	for (i = 0; i < ath12k_get_num_hw(ab); i++) {
1112 		ah = ath12k_ab_to_ah(ab, i);
1113 		if (!ah)
1114 			continue;
1115 
1116 		for (j = 0; j < ah->num_radio; j++) {
1117 			ar = &ah->radio[j];
1118 			if (!ar)
1119 				continue;
1120 
1121 			ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
1122 		}
1123 
1124 		hw = ah->hw;
1125 		wiphy_rfkill_set_hw_state(hw->wiphy, !rfkill_radio_on);
1126 	}
1127 }
1128 
1129 void ath12k_core_halt(struct ath12k *ar)
1130 {
1131 	struct ath12k_base *ab = ar->ab;
1132 
1133 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
1134 
1135 	ar->num_created_vdevs = 0;
1136 	ar->allocated_vdev_map = 0;
1137 
1138 	ath12k_mac_scan_finish(ar);
1139 	ath12k_mac_peer_cleanup_all(ar);
1140 	cancel_delayed_work_sync(&ar->scan.timeout);
1141 	cancel_work_sync(&ar->regd_update_work);
1142 	cancel_work_sync(&ab->rfkill_work);
1143 
1144 	rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
1145 	synchronize_rcu();
1146 	INIT_LIST_HEAD(&ar->arvifs);
1147 	idr_init(&ar->txmgmt_idr);
1148 }
1149 
1150 static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
1151 {
1152 	struct ath12k *ar;
1153 	struct ath12k_hw *ah;
1154 	int i, j;
1155 
1156 	spin_lock_bh(&ab->base_lock);
1157 	ab->stats.fw_crash_counter++;
1158 	spin_unlock_bh(&ab->base_lock);
1159 
1160 	if (ab->is_reset)
1161 		set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1162 
1163 	for (i = 0; i < ath12k_get_num_hw(ab); i++) {
1164 		ah = ath12k_ab_to_ah(ab, i);
1165 		if (!ah || ah->state == ATH12K_HW_STATE_OFF)
1166 			continue;
1167 
1168 		ieee80211_stop_queues(ah->hw);
1169 
1170 		for (j = 0; j < ah->num_radio; j++) {
1171 			ar = &ah->radio[j];
1172 
1173 			ath12k_mac_drain_tx(ar);
1174 			complete(&ar->scan.started);
1175 			complete(&ar->scan.completed);
1176 			complete(&ar->scan.on_channel);
1177 			complete(&ar->peer_assoc_done);
1178 			complete(&ar->peer_delete_done);
1179 			complete(&ar->install_key_done);
1180 			complete(&ar->vdev_setup_done);
1181 			complete(&ar->vdev_delete_done);
1182 			complete(&ar->bss_survey_done);
1183 
1184 			wake_up(&ar->dp.tx_empty_waitq);
1185 			idr_for_each(&ar->txmgmt_idr,
1186 				     ath12k_mac_tx_mgmt_pending_free, ar);
1187 			idr_destroy(&ar->txmgmt_idr);
1188 			wake_up(&ar->txmgmt_empty_waitq);
1189 		}
1190 	}
1191 
1192 	wake_up(&ab->wmi_ab.tx_credits_wq);
1193 	wake_up(&ab->peer_mapping_wq);
1194 }
1195 
1196 static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
1197 {
1198 	struct ath12k_hw *ah;
1199 	struct ath12k *ar;
1200 	int i, j;
1201 
1202 	for (i = 0; i < ath12k_get_num_hw(ab); i++) {
1203 		ah = ath12k_ab_to_ah(ab, i);
1204 		if (!ah || ah->state == ATH12K_HW_STATE_OFF)
1205 			continue;
1206 
1207 		wiphy_lock(ah->hw->wiphy);
1208 		mutex_lock(&ah->hw_mutex);
1209 
1210 		switch (ah->state) {
1211 		case ATH12K_HW_STATE_ON:
1212 			ah->state = ATH12K_HW_STATE_RESTARTING;
1213 
1214 			for (j = 0; j < ah->num_radio; j++) {
1215 				ar = &ah->radio[j];
1216 				ath12k_core_halt(ar);
1217 			}
1218 
1219 			break;
1220 		case ATH12K_HW_STATE_OFF:
1221 			ath12k_warn(ab,
1222 				    "cannot restart hw %d that hasn't been started\n",
1223 				    i);
1224 			break;
1225 		case ATH12K_HW_STATE_RESTARTING:
1226 			break;
1227 		case ATH12K_HW_STATE_RESTARTED:
1228 			ah->state = ATH12K_HW_STATE_WEDGED;
1229 			fallthrough;
1230 		case ATH12K_HW_STATE_WEDGED:
1231 			ath12k_warn(ab,
1232 				    "device is wedged, will not restart hw %d\n", i);
1233 			break;
1234 		}
1235 
1236 		mutex_unlock(&ah->hw_mutex);
1237 		wiphy_unlock(ah->hw->wiphy);
1238 	}
1239 
1240 	complete(&ab->driver_recovery);
1241 }
1242 
1243 static void ath12k_core_restart(struct work_struct *work)
1244 {
1245 	struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
1246 	struct ath12k_hw *ah;
1247 	int ret, i;
1248 
1249 	ret = ath12k_core_reconfigure_on_crash(ab);
1250 	if (ret) {
1251 		ath12k_err(ab, "failed to reconfigure driver on crash recovery\n");
1252 		return;
1253 	}
1254 
1255 	if (ab->is_reset) {
1256 		if (!test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
1257 			atomic_dec(&ab->reset_count);
1258 			complete(&ab->reset_complete);
1259 			ab->is_reset = false;
1260 			atomic_set(&ab->fail_cont_count, 0);
1261 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
1262 		}
1263 
1264 		for (i = 0; i < ath12k_get_num_hw(ab); i++) {
1265 			ah = ath12k_ab_to_ah(ab, i);
1266 			ieee80211_restart_hw(ah->hw);
1267 		}
1268 	}
1269 
1270 	complete(&ab->restart_completed);
1271 }
1272 
1273 static void ath12k_core_reset(struct work_struct *work)
1274 {
1275 	struct ath12k_base *ab = container_of(work, struct ath12k_base, reset_work);
1276 	int reset_count, fail_cont_count;
1277 	long time_left;
1278 
1279 	if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) {
1280 		ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
1281 		return;
1282 	}
1283 
1284 	/* Sometimes the recovery will fail and then the next all recovery fail,
1285 	 * this is to avoid infinite recovery since it can not recovery success
1286 	 */
1287 	fail_cont_count = atomic_read(&ab->fail_cont_count);
1288 
1289 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FINAL)
1290 		return;
1291 
1292 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FIRST &&
1293 	    time_before(jiffies, ab->reset_fail_timeout))
1294 		return;
1295 
1296 	reset_count = atomic_inc_return(&ab->reset_count);
1297 
1298 	if (reset_count > 1) {
1299 		/* Sometimes it happened another reset worker before the previous one
1300 		 * completed, then the second reset worker will destroy the previous one,
1301 		 * thus below is to avoid that.
1302 		 */
1303 		ath12k_warn(ab, "already resetting count %d\n", reset_count);
1304 
1305 		reinit_completion(&ab->reset_complete);
1306 		time_left = wait_for_completion_timeout(&ab->reset_complete,
1307 							ATH12K_RESET_TIMEOUT_HZ);
1308 		if (time_left) {
1309 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "to skip reset\n");
1310 			atomic_dec(&ab->reset_count);
1311 			return;
1312 		}
1313 
1314 		ab->reset_fail_timeout = jiffies + ATH12K_RESET_FAIL_TIMEOUT_HZ;
1315 		/* Record the continuous recovery fail count when recovery failed*/
1316 		fail_cont_count = atomic_inc_return(&ab->fail_cont_count);
1317 	}
1318 
1319 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n");
1320 
1321 	ab->is_reset = true;
1322 	atomic_set(&ab->recovery_count, 0);
1323 
1324 	ath12k_coredump_collect(ab);
1325 	ath12k_core_pre_reconfigure_recovery(ab);
1326 
1327 	ath12k_core_post_reconfigure_recovery(ab);
1328 
1329 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n");
1330 
1331 	ath12k_hif_irq_disable(ab);
1332 	ath12k_hif_ce_irq_disable(ab);
1333 
1334 	ath12k_hif_power_down(ab, false);
1335 	ath12k_hif_power_up(ab);
1336 
1337 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
1338 }
1339 
1340 int ath12k_core_pre_init(struct ath12k_base *ab)
1341 {
1342 	int ret;
1343 
1344 	ret = ath12k_hw_init(ab);
1345 	if (ret) {
1346 		ath12k_err(ab, "failed to init hw params: %d\n", ret);
1347 		return ret;
1348 	}
1349 
1350 	ath12k_fw_map(ab);
1351 
1352 	return 0;
1353 }
1354 
1355 static int ath12k_core_panic_handler(struct notifier_block *nb,
1356 				     unsigned long action, void *data)
1357 {
1358 	struct ath12k_base *ab = container_of(nb, struct ath12k_base,
1359 					      panic_nb);
1360 
1361 	return ath12k_hif_panic_handler(ab);
1362 }
1363 
1364 static int ath12k_core_panic_notifier_register(struct ath12k_base *ab)
1365 {
1366 	ab->panic_nb.notifier_call = ath12k_core_panic_handler;
1367 
1368 	return atomic_notifier_chain_register(&panic_notifier_list,
1369 					      &ab->panic_nb);
1370 }
1371 
1372 static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
1373 {
1374 	atomic_notifier_chain_unregister(&panic_notifier_list,
1375 					 &ab->panic_nb);
1376 }
1377 
1378 static inline
1379 bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag)
1380 {
1381 	lockdep_assert_held(&ag->mutex);
1382 
1383 	return (ag->num_probed == ag->num_devices);
1384 }
1385 
1386 static struct ath12k_hw_group *ath12k_core_hw_group_alloc(u8 id, u8 max_devices)
1387 {
1388 	struct ath12k_hw_group *ag;
1389 
1390 	lockdep_assert_held(&ath12k_hw_group_mutex);
1391 
1392 	ag = kzalloc(sizeof(*ag), GFP_KERNEL);
1393 	if (!ag)
1394 		return NULL;
1395 
1396 	ag->id = id;
1397 	ag->num_devices = max_devices;
1398 	list_add(&ag->list, &ath12k_hw_group_list);
1399 	mutex_init(&ag->mutex);
1400 
1401 	return ag;
1402 }
1403 
1404 static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag)
1405 {
1406 	mutex_lock(&ath12k_hw_group_mutex);
1407 
1408 	list_del(&ag->list);
1409 	kfree(ag);
1410 
1411 	mutex_unlock(&ath12k_hw_group_mutex);
1412 }
1413 
1414 static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *ab)
1415 {
1416 	u32 group_id = ATH12K_INVALID_GROUP_ID;
1417 	struct ath12k_hw_group *ag;
1418 
1419 	lockdep_assert_held(&ath12k_hw_group_mutex);
1420 
1421 	/* The grouping of multiple devices will be done based on device tree file.
1422 	 * TODO: device tree file parsing to know about the devices involved in group.
1423 	 *
1424 	 * The platforms that do not have any valid group information would have each
1425 	 * device to be part of its own invalid group.
1426 	 *
1427 	 * Currently, we are not parsing any device tree information and hence, grouping
1428 	 * of multiple devices is not involved. Thus, single device is added to device
1429 	 * group.
1430 	 */
1431 	ag = ath12k_core_hw_group_alloc(group_id, 1);
1432 	if (!ag) {
1433 		ath12k_warn(ab, "unable to create new hw group\n");
1434 		return NULL;
1435 	}
1436 
1437 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "single device added to hardware group\n");
1438 
1439 	ab->device_id = ag->num_probed++;
1440 	ag->ab[ab->device_id] = ab;
1441 	ab->ag = ag;
1442 	ag->mlo_capable = false;
1443 
1444 	return ag;
1445 }
1446 
1447 void ath12k_core_hw_group_unassign(struct ath12k_base *ab)
1448 {
1449 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
1450 	u8 device_id = ab->device_id;
1451 	int num_probed;
1452 
1453 	if (!ag)
1454 		return;
1455 
1456 	mutex_lock(&ag->mutex);
1457 
1458 	if (WARN_ON(device_id >= ag->num_devices)) {
1459 		mutex_unlock(&ag->mutex);
1460 		return;
1461 	}
1462 
1463 	if (WARN_ON(ag->ab[device_id] != ab)) {
1464 		mutex_unlock(&ag->mutex);
1465 		return;
1466 	}
1467 
1468 	ag->ab[device_id] = NULL;
1469 	ab->ag = NULL;
1470 	ab->device_id = ATH12K_INVALID_DEVICE_ID;
1471 
1472 	if (ag->num_probed)
1473 		ag->num_probed--;
1474 
1475 	num_probed = ag->num_probed;
1476 
1477 	mutex_unlock(&ag->mutex);
1478 
1479 	if (!num_probed)
1480 		ath12k_core_hw_group_free(ag);
1481 }
1482 
1483 static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
1484 {
1485 	struct ath12k_base *ab;
1486 	int i;
1487 
1488 	if (WARN_ON(!ag))
1489 		return;
1490 
1491 	for (i = 0; i < ag->num_devices; i++) {
1492 		ab = ag->ab[i];
1493 		if (!ab)
1494 			continue;
1495 
1496 		ath12k_core_soc_destroy(ab);
1497 	}
1498 }
1499 
1500 static void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
1501 {
1502 	struct ath12k_base *ab;
1503 	int i;
1504 
1505 	if (!ag)
1506 		return;
1507 
1508 	mutex_lock(&ag->mutex);
1509 
1510 	ath12k_core_hw_group_stop(ag);
1511 
1512 	for (i = 0; i < ag->num_devices; i++) {
1513 		ab = ag->ab[i];
1514 		if (!ab)
1515 			continue;
1516 
1517 		mutex_lock(&ab->core_lock);
1518 		ath12k_core_stop(ab);
1519 		mutex_unlock(&ab->core_lock);
1520 	}
1521 
1522 	mutex_unlock(&ag->mutex);
1523 }
1524 
1525 static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag)
1526 {
1527 	struct ath12k_base *ab;
1528 	int i, ret;
1529 
1530 	lockdep_assert_held(&ag->mutex);
1531 
1532 	for (i = 0; i < ag->num_devices; i++) {
1533 		ab = ag->ab[i];
1534 		if (!ab)
1535 			continue;
1536 
1537 		mutex_lock(&ab->core_lock);
1538 
1539 		ret = ath12k_core_soc_create(ab);
1540 		if (ret) {
1541 			mutex_unlock(&ab->core_lock);
1542 			ath12k_err(ab, "failed to create soc core: %d\n", ret);
1543 			return ret;
1544 		}
1545 
1546 		mutex_unlock(&ab->core_lock);
1547 	}
1548 
1549 	return 0;
1550 }
1551 
1552 void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
1553 {
1554 	lockdep_assert_held(&ag->mutex);
1555 
1556 	/* If more than one devices are grouped, then inter MLO
1557 	 * functionality can work still independent of whether internally
1558 	 * each device supports single_chip_mlo or not.
1559 	 * Only when there is one device, then it depends whether the
1560 	 * device can support intra chip MLO or not
1561 	 */
1562 	if (ag->num_devices > 1)
1563 		ag->mlo_capable = true;
1564 	else
1565 		ag->mlo_capable = ag->ab[0]->single_chip_mlo_supp;
1566 }
1567 
1568 int ath12k_core_init(struct ath12k_base *ab)
1569 {
1570 	struct ath12k_hw_group *ag;
1571 	int ret;
1572 
1573 	ret = ath12k_core_panic_notifier_register(ab);
1574 	if (ret)
1575 		ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
1576 
1577 	mutex_lock(&ath12k_hw_group_mutex);
1578 
1579 	ag = ath12k_core_hw_group_assign(ab);
1580 	if (!ag) {
1581 		mutex_unlock(&ath12k_hw_group_mutex);
1582 		ath12k_warn(ab, "unable to get hw group\n");
1583 		return -ENODEV;
1584 	}
1585 
1586 	mutex_unlock(&ath12k_hw_group_mutex);
1587 
1588 	mutex_lock(&ag->mutex);
1589 
1590 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices %d num probed %d\n",
1591 		   ag->num_devices, ag->num_probed);
1592 
1593 	if (ath12k_core_hw_group_create_ready(ag)) {
1594 		ret = ath12k_core_hw_group_create(ag);
1595 		if (ret) {
1596 			mutex_unlock(&ag->mutex);
1597 			ath12k_warn(ab, "unable to create hw group\n");
1598 			goto err;
1599 		}
1600 	}
1601 
1602 	mutex_unlock(&ag->mutex);
1603 
1604 	return 0;
1605 
1606 err:
1607 	ath12k_core_hw_group_destroy(ab->ag);
1608 	ath12k_core_hw_group_unassign(ab);
1609 	return ret;
1610 }
1611 
1612 void ath12k_core_deinit(struct ath12k_base *ab)
1613 {
1614 	ath12k_core_panic_notifier_unregister(ab);
1615 	ath12k_core_hw_group_cleanup(ab->ag);
1616 	ath12k_core_hw_group_destroy(ab->ag);
1617 	ath12k_core_hw_group_unassign(ab);
1618 }
1619 
1620 void ath12k_core_free(struct ath12k_base *ab)
1621 {
1622 	timer_delete_sync(&ab->rx_replenish_retry);
1623 	destroy_workqueue(ab->workqueue_aux);
1624 	destroy_workqueue(ab->workqueue);
1625 	kfree(ab);
1626 }
1627 
1628 struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
1629 				      enum ath12k_bus bus)
1630 {
1631 	struct ath12k_base *ab;
1632 
1633 	ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL);
1634 	if (!ab)
1635 		return NULL;
1636 
1637 	init_completion(&ab->driver_recovery);
1638 
1639 	ab->workqueue = create_singlethread_workqueue("ath12k_wq");
1640 	if (!ab->workqueue)
1641 		goto err_sc_free;
1642 
1643 	ab->workqueue_aux = create_singlethread_workqueue("ath12k_aux_wq");
1644 	if (!ab->workqueue_aux)
1645 		goto err_free_wq;
1646 
1647 	mutex_init(&ab->core_lock);
1648 	spin_lock_init(&ab->base_lock);
1649 	init_completion(&ab->reset_complete);
1650 
1651 	INIT_LIST_HEAD(&ab->peers);
1652 	init_waitqueue_head(&ab->peer_mapping_wq);
1653 	init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
1654 	INIT_WORK(&ab->restart_work, ath12k_core_restart);
1655 	INIT_WORK(&ab->reset_work, ath12k_core_reset);
1656 	INIT_WORK(&ab->rfkill_work, ath12k_rfkill_work);
1657 	INIT_WORK(&ab->dump_work, ath12k_coredump_upload);
1658 
1659 	timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
1660 	init_completion(&ab->htc_suspend);
1661 	init_completion(&ab->restart_completed);
1662 	init_completion(&ab->wow.wakeup_completed);
1663 
1664 	ab->dev = dev;
1665 	ab->hif.bus = bus;
1666 	ab->qmi.num_radios = U8_MAX;
1667 	ab->single_chip_mlo_supp = false;
1668 
1669 	/* Device index used to identify the devices in a group.
1670 	 *
1671 	 * In Intra-device MLO, only one device present in a group,
1672 	 * so it is always zero.
1673 	 *
1674 	 * In Inter-device MLO, Multiple device present in a group,
1675 	 * expect non-zero value.
1676 	 */
1677 	ab->device_id = 0;
1678 
1679 	return ab;
1680 
1681 err_free_wq:
1682 	destroy_workqueue(ab->workqueue);
1683 err_sc_free:
1684 	kfree(ab);
1685 	return NULL;
1686 }
1687 
1688 MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11be wireless LAN cards.");
1689 MODULE_LICENSE("Dual BSD/GPL");
1690