xref: /linux/drivers/net/wireless/ath/ath12k/core.c (revision 0ec4b904be72f78ba6ce6bb9a8aaf2eb6b9b1004)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
5  */
6 
7 #include <linux/export.h>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/remoteproc.h>
11 #include <linux/firmware.h>
12 #include <linux/of.h>
13 #include <linux/of_graph.h>
14 #include "ahb.h"
15 #include "core.h"
16 #include "dp_tx.h"
17 #include "dp_rx.h"
18 #include "debug.h"
19 #include "debugfs.h"
20 #include "fw.h"
21 #include "hif.h"
22 #include "pci.h"
23 #include "wow.h"
24 #include "dp_cmn.h"
25 #include "peer.h"
26 
27 unsigned int ath12k_debug_mask;
28 module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
29 MODULE_PARM_DESC(debug_mask, "Debugging mask");
30 EXPORT_SYMBOL(ath12k_debug_mask);
31 
32 bool ath12k_ftm_mode;
33 module_param_named(ftm_mode, ath12k_ftm_mode, bool, 0444);
34 MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode");
35 EXPORT_SYMBOL(ath12k_ftm_mode);
36 
37 /* protected with ath12k_hw_group_mutex */
38 static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
39 
40 static DEFINE_MUTEX(ath12k_hw_group_mutex);
41 
42 static const struct
43 ath12k_mem_profile_based_param ath12k_mem_profile_based_param[] = {
44 [ATH12K_QMI_MEMORY_MODE_DEFAULT] = {
45 		.num_vdevs = 17,
46 		.max_client_single = 512,
47 		.max_client_dbs = 128,
48 		.max_client_dbs_sbs = 128,
49 		.dp_params = {
50 			.tx_comp_ring_size = 32768,
51 			.rxdma_monitor_buf_ring_size = 4096,
52 			.rxdma_monitor_dst_ring_size = 8092,
53 			.num_pool_tx_desc = 32768,
54 			.rx_desc_count = 12288,
55 		},
56 	},
57 [ATH12K_QMI_MEMORY_MODE_LOW_512_M] = {
58 		.num_vdevs = 9,
59 		.max_client_single = 128,
60 		.max_client_dbs = 64,
61 		.max_client_dbs_sbs = 64,
62 		.dp_params = {
63 			.tx_comp_ring_size = 16384,
64 			.rxdma_monitor_buf_ring_size = 256,
65 			.rxdma_monitor_dst_ring_size = 512,
66 			.num_pool_tx_desc = 16384,
67 			.rx_desc_count = 6144,
68 		},
69 	},
70 };
71 
72 static int ath12k_core_rfkill_config(struct ath12k_base *ab)
73 {
74 	struct ath12k *ar;
75 	int ret = 0, i;
76 
77 	if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
78 		return 0;
79 
80 	if (ath12k_acpi_get_disable_rfkill(ab))
81 		return 0;
82 
83 	for (i = 0; i < ab->num_radios; i++) {
84 		ar = ab->pdevs[i].ar;
85 
86 		ret = ath12k_mac_rfkill_config(ar);
87 		if (ret && ret != -EOPNOTSUPP) {
88 			ath12k_warn(ab, "failed to configure rfkill: %d", ret);
89 			return ret;
90 		}
91 	}
92 
93 	return ret;
94 }
95 
96 /* Check if we need to continue with suspend/resume operation.
97  * Return:
98  *	a negative value: error happens and don't continue.
99  *	0:  no error but don't continue.
100  *	positive value: no error and do continue.
101  */
102 static int ath12k_core_continue_suspend_resume(struct ath12k_base *ab)
103 {
104 	struct ath12k *ar;
105 
106 	if (!ab->hw_params->supports_suspend)
107 		return -EOPNOTSUPP;
108 
109 	/* so far single_pdev_only chips have supports_suspend as true
110 	 * so pass 0 as a dummy pdev_id here.
111 	 */
112 	ar = ab->pdevs[0].ar;
113 	if (!ar || !ar->ah || ar->ah->state != ATH12K_HW_STATE_OFF)
114 		return 0;
115 
116 	return 1;
117 }
118 
119 int ath12k_core_suspend(struct ath12k_base *ab)
120 {
121 	struct ath12k *ar;
122 	int ret, i;
123 
124 	ret = ath12k_core_continue_suspend_resume(ab);
125 	if (ret <= 0)
126 		return ret;
127 
128 	for (i = 0; i < ab->num_radios; i++) {
129 		ar = ab->pdevs[i].ar;
130 		if (!ar)
131 			continue;
132 
133 		wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
134 
135 		ret = ath12k_mac_wait_tx_complete(ar);
136 		if (ret) {
137 			wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
138 			ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
139 			return ret;
140 		}
141 
142 		wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
143 	}
144 
145 	/* PM framework skips suspend_late/resume_early callbacks
146 	 * if other devices report errors in their suspend callbacks.
147 	 * However ath12k_core_resume() would still be called because
148 	 * here we return success thus kernel put us on dpm_suspended_list.
149 	 * Since we won't go through a power down/up cycle, there is
150 	 * no chance to call complete(&ab->restart_completed) in
151 	 * ath12k_core_restart(), making ath12k_core_resume() timeout.
152 	 * So call it here to avoid this issue. This also works in case
153 	 * no error happens thus suspend_late/resume_early get called,
154 	 * because it will be reinitialized in ath12k_core_resume_early().
155 	 */
156 	complete(&ab->restart_completed);
157 
158 	return 0;
159 }
160 EXPORT_SYMBOL(ath12k_core_suspend);
161 
162 int ath12k_core_suspend_late(struct ath12k_base *ab)
163 {
164 	int ret;
165 
166 	ret = ath12k_core_continue_suspend_resume(ab);
167 	if (ret <= 0)
168 		return ret;
169 
170 	ath12k_acpi_stop(ab);
171 
172 	ath12k_hif_irq_disable(ab);
173 	ath12k_hif_ce_irq_disable(ab);
174 
175 	ath12k_hif_power_down(ab, true);
176 
177 	return 0;
178 }
179 EXPORT_SYMBOL(ath12k_core_suspend_late);
180 
181 int ath12k_core_resume_early(struct ath12k_base *ab)
182 {
183 	int ret;
184 
185 	ret = ath12k_core_continue_suspend_resume(ab);
186 	if (ret <= 0)
187 		return ret;
188 
189 	reinit_completion(&ab->restart_completed);
190 	ret = ath12k_hif_power_up(ab);
191 	if (ret)
192 		ath12k_warn(ab, "failed to power up hif during resume: %d\n", ret);
193 
194 	return ret;
195 }
196 EXPORT_SYMBOL(ath12k_core_resume_early);
197 
198 int ath12k_core_resume(struct ath12k_base *ab)
199 {
200 	long time_left;
201 	int ret;
202 
203 	ret = ath12k_core_continue_suspend_resume(ab);
204 	if (ret <= 0)
205 		return ret;
206 
207 	time_left = wait_for_completion_timeout(&ab->restart_completed,
208 						ATH12K_RESET_TIMEOUT_HZ);
209 	if (time_left == 0) {
210 		ath12k_warn(ab, "timeout while waiting for restart complete");
211 		return -ETIMEDOUT;
212 	}
213 
214 	return 0;
215 }
216 EXPORT_SYMBOL(ath12k_core_resume);
217 
218 static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
219 					   size_t name_len, bool with_variant,
220 					   bool bus_type_mode, bool with_default)
221 {
222 	/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
223 	char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = {};
224 
225 	if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
226 		scnprintf(variant, sizeof(variant), ",variant=%s",
227 			  ab->qmi.target.bdf_ext);
228 
229 	switch (ab->id.bdf_search) {
230 	case ATH12K_BDF_SEARCH_BUS_AND_BOARD:
231 		if (bus_type_mode)
232 			scnprintf(name, name_len,
233 				  "bus=%s",
234 				  ath12k_bus_str(ab->hif.bus));
235 		else
236 			scnprintf(name, name_len,
237 				  "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
238 				  ath12k_bus_str(ab->hif.bus),
239 				  ab->id.vendor, ab->id.device,
240 				  ab->id.subsystem_vendor,
241 				  ab->id.subsystem_device,
242 				  ab->qmi.target.chip_id,
243 				  ab->qmi.target.board_id,
244 				  variant);
245 		break;
246 	default:
247 		scnprintf(name, name_len,
248 			  "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
249 			  ath12k_bus_str(ab->hif.bus),
250 			  ab->qmi.target.chip_id,
251 			  with_default ?
252 			  ATH12K_BOARD_ID_DEFAULT : ab->qmi.target.board_id,
253 			  variant);
254 		break;
255 	}
256 
257 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name);
258 
259 	return 0;
260 }
261 
262 static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
263 					 size_t name_len)
264 {
265 	return __ath12k_core_create_board_name(ab, name, name_len, true, false, false);
266 }
267 
268 static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
269 						  size_t name_len)
270 {
271 	return __ath12k_core_create_board_name(ab, name, name_len, false, false, true);
272 }
273 
274 static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
275 						  size_t name_len)
276 {
277 	return __ath12k_core_create_board_name(ab, name, name_len, false, true, true);
278 }
279 
280 const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
281 						    const char *file)
282 {
283 	const struct firmware *fw;
284 	char path[100];
285 	int ret;
286 
287 	if (!file)
288 		return ERR_PTR(-ENOENT);
289 
290 	ath12k_core_create_firmware_path(ab, file, path, sizeof(path));
291 
292 	ret = firmware_request_nowarn(&fw, path, ab->dev);
293 	if (ret)
294 		return ERR_PTR(ret);
295 
296 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot firmware request %s size %zu\n",
297 		   path, fw->size);
298 
299 	return fw;
300 }
301 
302 void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
303 {
304 	if (!IS_ERR(bd->fw))
305 		release_firmware(bd->fw);
306 
307 	memset(bd, 0, sizeof(*bd));
308 }
309 
310 static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
311 					 struct ath12k_board_data *bd,
312 					 const void *buf, size_t buf_len,
313 					 const char *boardname,
314 					 int ie_id,
315 					 int name_id,
316 					 int data_id)
317 {
318 	const struct ath12k_fw_ie *hdr;
319 	bool name_match_found;
320 	int ret, board_ie_id;
321 	size_t board_ie_len;
322 	const void *board_ie_data;
323 
324 	name_match_found = false;
325 
326 	/* go through ATH12K_BD_IE_BOARD_/ATH12K_BD_IE_REGDB_ elements */
327 	while (buf_len > sizeof(struct ath12k_fw_ie)) {
328 		hdr = buf;
329 		board_ie_id = le32_to_cpu(hdr->id);
330 		board_ie_len = le32_to_cpu(hdr->len);
331 		board_ie_data = hdr->data;
332 
333 		buf_len -= sizeof(*hdr);
334 		buf += sizeof(*hdr);
335 
336 		if (buf_len < ALIGN(board_ie_len, 4)) {
337 			ath12k_err(ab, "invalid %s length: %zu < %zu\n",
338 				   ath12k_bd_ie_type_str(ie_id),
339 				   buf_len, ALIGN(board_ie_len, 4));
340 			ret = -EINVAL;
341 			goto out;
342 		}
343 
344 		if (board_ie_id == name_id) {
345 			ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "",
346 					board_ie_data, board_ie_len);
347 
348 			if (board_ie_len != strlen(boardname))
349 				goto next;
350 
351 			ret = memcmp(board_ie_data, boardname, strlen(boardname));
352 			if (ret)
353 				goto next;
354 
355 			name_match_found = true;
356 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
357 				   "boot found match %s for name '%s'",
358 				   ath12k_bd_ie_type_str(ie_id),
359 				   boardname);
360 		} else if (board_ie_id == data_id) {
361 			if (!name_match_found)
362 				/* no match found */
363 				goto next;
364 
365 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
366 				   "boot found %s for '%s'",
367 				   ath12k_bd_ie_type_str(ie_id),
368 				   boardname);
369 
370 			bd->data = board_ie_data;
371 			bd->len = board_ie_len;
372 
373 			ret = 0;
374 			goto out;
375 		} else {
376 			ath12k_warn(ab, "unknown %s id found: %d\n",
377 				    ath12k_bd_ie_type_str(ie_id),
378 				    board_ie_id);
379 		}
380 next:
381 		/* jump over the padding */
382 		board_ie_len = ALIGN(board_ie_len, 4);
383 
384 		buf_len -= board_ie_len;
385 		buf += board_ie_len;
386 	}
387 
388 	/* no match found */
389 	ret = -ENOENT;
390 
391 out:
392 	return ret;
393 }
394 
395 static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
396 					      struct ath12k_board_data *bd,
397 					      const char *boardname,
398 					      int ie_id_match,
399 					      int name_id,
400 					      int data_id)
401 {
402 	size_t len, magic_len;
403 	const u8 *data;
404 	char *filename, filepath[100];
405 	size_t ie_len;
406 	struct ath12k_fw_ie *hdr;
407 	int ret, ie_id;
408 
409 	filename = ATH12K_BOARD_API2_FILE;
410 
411 	if (!bd->fw)
412 		bd->fw = ath12k_core_firmware_request(ab, filename);
413 
414 	if (IS_ERR(bd->fw))
415 		return PTR_ERR(bd->fw);
416 
417 	data = bd->fw->data;
418 	len = bd->fw->size;
419 
420 	ath12k_core_create_firmware_path(ab, filename,
421 					 filepath, sizeof(filepath));
422 
423 	/* magic has extra null byte padded */
424 	magic_len = strlen(ATH12K_BOARD_MAGIC) + 1;
425 	if (len < magic_len) {
426 		ath12k_err(ab, "failed to find magic value in %s, file too short: %zu\n",
427 			   filepath, len);
428 		ret = -EINVAL;
429 		goto err;
430 	}
431 
432 	if (memcmp(data, ATH12K_BOARD_MAGIC, magic_len)) {
433 		ath12k_err(ab, "found invalid board magic\n");
434 		ret = -EINVAL;
435 		goto err;
436 	}
437 
438 	/* magic is padded to 4 bytes */
439 	magic_len = ALIGN(magic_len, 4);
440 	if (len < magic_len) {
441 		ath12k_err(ab, "failed: %s too small to contain board data, len: %zu\n",
442 			   filepath, len);
443 		ret = -EINVAL;
444 		goto err;
445 	}
446 
447 	data += magic_len;
448 	len -= magic_len;
449 
450 	while (len > sizeof(struct ath12k_fw_ie)) {
451 		hdr = (struct ath12k_fw_ie *)data;
452 		ie_id = le32_to_cpu(hdr->id);
453 		ie_len = le32_to_cpu(hdr->len);
454 
455 		len -= sizeof(*hdr);
456 		data = hdr->data;
457 
458 		if (len < ALIGN(ie_len, 4)) {
459 			ath12k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
460 				   ie_id, ie_len, len);
461 			ret = -EINVAL;
462 			goto err;
463 		}
464 
465 		if (ie_id == ie_id_match) {
466 			ret = ath12k_core_parse_bd_ie_board(ab, bd, data,
467 							    ie_len,
468 							    boardname,
469 							    ie_id_match,
470 							    name_id,
471 							    data_id);
472 			if (ret == -ENOENT)
473 				/* no match found, continue */
474 				goto next;
475 			else if (ret)
476 				/* there was an error, bail out */
477 				goto err;
478 			/* either found or error, so stop searching */
479 			goto out;
480 		}
481 next:
482 		/* jump over the padding */
483 		ie_len = ALIGN(ie_len, 4);
484 
485 		len -= ie_len;
486 		data += ie_len;
487 	}
488 
489 out:
490 	if (!bd->data || !bd->len) {
491 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
492 			   "failed to fetch %s for %s from %s\n",
493 			   ath12k_bd_ie_type_str(ie_id_match),
494 			   boardname, filepath);
495 		ret = -ENODATA;
496 		goto err;
497 	}
498 
499 	return 0;
500 
501 err:
502 	ath12k_core_free_bdf(ab, bd);
503 	return ret;
504 }
505 
506 int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
507 				       struct ath12k_board_data *bd,
508 				       char *filename)
509 {
510 	bd->fw = ath12k_core_firmware_request(ab, filename);
511 	if (IS_ERR(bd->fw))
512 		return PTR_ERR(bd->fw);
513 
514 	bd->data = bd->fw->data;
515 	bd->len = bd->fw->size;
516 
517 	return 0;
518 }
519 
520 #define BOARD_NAME_SIZE 200
521 int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
522 {
523 	char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE];
524 	char *filename, filepath[100];
525 	int bd_api;
526 	int ret;
527 
528 	filename = ATH12K_BOARD_API2_FILE;
529 
530 	ret = ath12k_core_create_board_name(ab, boardname, sizeof(boardname));
531 	if (ret) {
532 		ath12k_err(ab, "failed to create board name: %d", ret);
533 		return ret;
534 	}
535 
536 	bd_api = 2;
537 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
538 						 ATH12K_BD_IE_BOARD,
539 						 ATH12K_BD_IE_BOARD_NAME,
540 						 ATH12K_BD_IE_BOARD_DATA);
541 	if (!ret)
542 		goto success;
543 
544 	ret = ath12k_core_create_fallback_board_name(ab, fallback_boardname,
545 						     sizeof(fallback_boardname));
546 	if (ret) {
547 		ath12k_err(ab, "failed to create fallback board name: %d", ret);
548 		return ret;
549 	}
550 
551 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
552 						 ATH12K_BD_IE_BOARD,
553 						 ATH12K_BD_IE_BOARD_NAME,
554 						 ATH12K_BD_IE_BOARD_DATA);
555 	if (!ret)
556 		goto success;
557 
558 	bd_api = 1;
559 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE);
560 	if (ret) {
561 		ath12k_core_create_firmware_path(ab, filename,
562 						 filepath, sizeof(filepath));
563 		ath12k_err(ab, "failed to fetch board data for %s from %s\n",
564 			   boardname, filepath);
565 		if (memcmp(boardname, fallback_boardname, strlen(boardname)))
566 			ath12k_err(ab, "failed to fetch board data for %s from %s\n",
567 				   fallback_boardname, filepath);
568 
569 		ath12k_err(ab, "failed to fetch board.bin from %s\n",
570 			   ab->hw_params->fw.dir);
571 		return ret;
572 	}
573 
574 success:
575 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", bd_api);
576 	return 0;
577 }
578 
579 int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd)
580 {
581 	char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE];
582 	int ret;
583 
584 	ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
585 	if (ret) {
586 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
587 			   "failed to create board name for regdb: %d", ret);
588 		goto exit;
589 	}
590 
591 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
592 						 ATH12K_BD_IE_REGDB,
593 						 ATH12K_BD_IE_REGDB_NAME,
594 						 ATH12K_BD_IE_REGDB_DATA);
595 	if (!ret)
596 		goto exit;
597 
598 	ret = ath12k_core_create_bus_type_board_name(ab, default_boardname,
599 						     BOARD_NAME_SIZE);
600 	if (ret) {
601 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
602 			   "failed to create default board name for regdb: %d", ret);
603 		goto exit;
604 	}
605 
606 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, default_boardname,
607 						 ATH12K_BD_IE_REGDB,
608 						 ATH12K_BD_IE_REGDB_NAME,
609 						 ATH12K_BD_IE_REGDB_DATA);
610 	if (!ret)
611 		goto exit;
612 
613 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_REGDB_FILE_NAME);
614 	if (ret)
615 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to fetch %s from %s\n",
616 			   ATH12K_REGDB_FILE_NAME, ab->hw_params->fw.dir);
617 
618 exit:
619 	if (!ret)
620 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "fetched regdb\n");
621 
622 	return ret;
623 }
624 
625 u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab)
626 {
627 	if (ab->num_radios == 2)
628 		return TARGET_NUM_STATIONS(ab, DBS);
629 	if (ab->num_radios == 3)
630 		return TARGET_NUM_STATIONS(ab, DBS_SBS);
631 	return TARGET_NUM_STATIONS(ab, SINGLE);
632 }
633 
634 u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab)
635 {
636 	return ath12k_core_get_max_station_per_radio(ab) + TARGET_NUM_VDEVS(ab);
637 }
638 EXPORT_SYMBOL(ath12k_core_get_max_peers_per_radio);
639 
640 struct reserved_mem *ath12k_core_get_reserved_mem(struct ath12k_base *ab,
641 						  int index)
642 {
643 	struct device *dev = ab->dev;
644 	struct reserved_mem *rmem;
645 	struct device_node *node;
646 
647 	node = of_parse_phandle(dev->of_node, "memory-region", index);
648 	if (!node) {
649 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
650 			   "failed to parse memory-region for index %d\n", index);
651 		return NULL;
652 	}
653 
654 	rmem = of_reserved_mem_lookup(node);
655 	of_node_put(node);
656 	if (!rmem) {
657 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
658 			   "unable to get memory-region for index %d\n", index);
659 		return NULL;
660 	}
661 
662 	return rmem;
663 }
664 
665 static inline
666 void ath12k_core_to_group_ref_get(struct ath12k_base *ab)
667 {
668 	struct ath12k_hw_group *ag = ab->ag;
669 
670 	lockdep_assert_held(&ag->mutex);
671 
672 	if (ab->hw_group_ref) {
673 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already attached to group %d\n",
674 			   ag->id);
675 		return;
676 	}
677 
678 	ab->hw_group_ref = true;
679 	ag->num_started++;
680 
681 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "core attached to group %d, num_started %d\n",
682 		   ag->id, ag->num_started);
683 }
684 
685 static inline
686 void ath12k_core_to_group_ref_put(struct ath12k_base *ab)
687 {
688 	struct ath12k_hw_group *ag = ab->ag;
689 
690 	lockdep_assert_held(&ag->mutex);
691 
692 	if (!ab->hw_group_ref) {
693 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already de-attached from group %d\n",
694 			   ag->id);
695 		return;
696 	}
697 
698 	ab->hw_group_ref = false;
699 	ag->num_started--;
700 
701 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "core de-attached from group %d, num_started %d\n",
702 		   ag->id, ag->num_started);
703 }
704 
705 static void ath12k_core_stop(struct ath12k_base *ab)
706 {
707 	ath12k_link_sta_rhash_tbl_destroy(ab);
708 
709 	ath12k_core_to_group_ref_put(ab);
710 
711 	if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
712 		ath12k_qmi_firmware_stop(ab);
713 
714 	ath12k_acpi_stop(ab);
715 
716 	ath12k_dp_rx_pdev_reo_cleanup(ab);
717 	ath12k_hif_stop(ab);
718 	ath12k_wmi_detach(ab);
719 	ath12k_dp_cmn_device_deinit(ath12k_ab_to_dp(ab));
720 
721 	/* De-Init of components as needed */
722 }
723 
724 static void ath12k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data)
725 {
726 	struct ath12k_base *ab = data;
727 	const char *magic = ATH12K_SMBIOS_BDF_EXT_MAGIC;
728 	struct ath12k_smbios_bdf *smbios = (struct ath12k_smbios_bdf *)hdr;
729 	ssize_t copied;
730 	size_t len;
731 	int i;
732 
733 	if (ab->qmi.target.bdf_ext[0] != '\0')
734 		return;
735 
736 	if (hdr->type != ATH12K_SMBIOS_BDF_EXT_TYPE)
737 		return;
738 
739 	if (hdr->length != ATH12K_SMBIOS_BDF_EXT_LENGTH) {
740 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
741 			   "wrong smbios bdf ext type length (%d).\n",
742 			   hdr->length);
743 		return;
744 	}
745 
746 	spin_lock_bh(&ab->base_lock);
747 
748 	switch (smbios->country_code_flag) {
749 	case ATH12K_SMBIOS_CC_ISO:
750 		ab->new_alpha2[0] = u16_get_bits(smbios->cc_code >> 8, 0xff);
751 		ab->new_alpha2[1] = u16_get_bits(smbios->cc_code, 0xff);
752 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios cc_code %c%c\n",
753 			   ab->new_alpha2[0], ab->new_alpha2[1]);
754 		break;
755 	case ATH12K_SMBIOS_CC_WW:
756 		ab->new_alpha2[0] = '0';
757 		ab->new_alpha2[1] = '0';
758 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios worldwide regdomain\n");
759 		break;
760 	default:
761 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot ignore smbios country code setting %d\n",
762 			   smbios->country_code_flag);
763 		break;
764 	}
765 
766 	spin_unlock_bh(&ab->base_lock);
767 
768 	if (!smbios->bdf_enabled) {
769 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant name not found.\n");
770 		return;
771 	}
772 
773 	/* Only one string exists (per spec) */
774 	if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) {
775 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
776 			   "bdf variant magic does not match.\n");
777 		return;
778 	}
779 
780 	len = min_t(size_t,
781 		    strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext));
782 	for (i = 0; i < len; i++) {
783 		if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) {
784 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
785 				   "bdf variant name contains non ascii chars.\n");
786 			return;
787 		}
788 	}
789 
790 	/* Copy extension name without magic prefix */
791 	copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic),
792 			 sizeof(ab->qmi.target.bdf_ext));
793 	if (copied < 0) {
794 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
795 			   "bdf variant string is longer than the buffer can accommodate\n");
796 		return;
797 	}
798 
799 	ath12k_dbg(ab, ATH12K_DBG_BOOT,
800 		   "found and validated bdf variant smbios_type 0x%x bdf %s\n",
801 		   ATH12K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext);
802 }
803 
804 int ath12k_core_check_smbios(struct ath12k_base *ab)
805 {
806 	ab->qmi.target.bdf_ext[0] = '\0';
807 	dmi_walk(ath12k_core_check_cc_code_bdfext, ab);
808 
809 	if (ab->qmi.target.bdf_ext[0] == '\0')
810 		return -ENODATA;
811 
812 	return 0;
813 }
814 
815 static int ath12k_core_soc_create(struct ath12k_base *ab)
816 {
817 	int ret;
818 
819 	if (ath12k_ftm_mode) {
820 		ab->fw_mode = ATH12K_FIRMWARE_MODE_FTM;
821 		ath12k_info(ab, "Booting in ftm mode\n");
822 	}
823 
824 	ret = ath12k_qmi_init_service(ab);
825 	if (ret) {
826 		ath12k_err(ab, "failed to initialize qmi :%d\n", ret);
827 		return ret;
828 	}
829 
830 	ath12k_debugfs_soc_create(ab);
831 
832 	ret = ath12k_hif_power_up(ab);
833 	if (ret) {
834 		ath12k_err(ab, "failed to power up :%d\n", ret);
835 		goto err_qmi_deinit;
836 	}
837 
838 	return 0;
839 
840 err_qmi_deinit:
841 	ath12k_debugfs_soc_destroy(ab);
842 	ath12k_qmi_deinit_service(ab);
843 	return ret;
844 }
845 
846 static void ath12k_core_soc_destroy(struct ath12k_base *ab)
847 {
848 	ath12k_hif_power_down(ab, false);
849 	ath12k_reg_free(ab);
850 	ath12k_debugfs_soc_destroy(ab);
851 	ath12k_qmi_deinit_service(ab);
852 }
853 
854 static int ath12k_core_pdev_create(struct ath12k_base *ab)
855 {
856 	int ret;
857 
858 	ret = ath12k_dp_pdev_alloc(ab);
859 	if (ret) {
860 		ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
861 		return ret;
862 	}
863 
864 	ret = ath12k_thermal_register(ab);
865 	if (ret) {
866 		ath12k_err(ab, "could not register thermal device: %d\n", ret);
867 		goto err_dp_pdev_free;
868 	}
869 
870 	ath12k_debugfs_pdev_create(ab);
871 
872 	return 0;
873 
874 err_dp_pdev_free:
875 	ath12k_dp_pdev_free(ab);
876 	return ret;
877 }
878 
879 static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
880 {
881 	ath12k_thermal_unregister(ab);
882 	ath12k_dp_pdev_free(ab);
883 }
884 
885 static int ath12k_core_start(struct ath12k_base *ab)
886 {
887 	int ret;
888 
889 	lockdep_assert_held(&ab->core_lock);
890 
891 	ret = ath12k_wmi_attach(ab);
892 	if (ret) {
893 		ath12k_err(ab, "failed to attach wmi: %d\n", ret);
894 		return ret;
895 	}
896 
897 	ret = ath12k_htc_init(ab);
898 	if (ret) {
899 		ath12k_err(ab, "failed to init htc: %d\n", ret);
900 		goto err_wmi_detach;
901 	}
902 
903 	ret = ath12k_hif_start(ab);
904 	if (ret) {
905 		ath12k_err(ab, "failed to start HIF: %d\n", ret);
906 		goto err_wmi_detach;
907 	}
908 
909 	ret = ath12k_htc_wait_target(&ab->htc);
910 	if (ret) {
911 		ath12k_err(ab, "failed to connect to HTC: %d\n", ret);
912 		goto err_hif_stop;
913 	}
914 
915 	ret = ath12k_dp_htt_connect(ath12k_ab_to_dp(ab));
916 	if (ret) {
917 		ath12k_err(ab, "failed to connect to HTT: %d\n", ret);
918 		goto err_hif_stop;
919 	}
920 
921 	ret = ath12k_wmi_connect(ab);
922 	if (ret) {
923 		ath12k_err(ab, "failed to connect wmi: %d\n", ret);
924 		goto err_hif_stop;
925 	}
926 
927 	ret = ath12k_htc_start(&ab->htc);
928 	if (ret) {
929 		ath12k_err(ab, "failed to start HTC: %d\n", ret);
930 		goto err_hif_stop;
931 	}
932 
933 	ret = ath12k_wmi_wait_for_service_ready(ab);
934 	if (ret) {
935 		ath12k_err(ab, "failed to receive wmi service ready event: %d\n",
936 			   ret);
937 		goto err_hif_stop;
938 	}
939 
940 	ath12k_hal_cc_config(ab);
941 
942 	ret = ath12k_dp_rx_pdev_reo_setup(ab);
943 	if (ret) {
944 		ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
945 		goto err_hif_stop;
946 	}
947 
948 	ret = ath12k_wmi_cmd_init(ab);
949 	if (ret) {
950 		ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret);
951 		goto err_reo_cleanup;
952 	}
953 
954 	ret = ath12k_wmi_wait_for_unified_ready(ab);
955 	if (ret) {
956 		ath12k_err(ab, "failed to receive wmi unified ready event: %d\n",
957 			   ret);
958 		goto err_reo_cleanup;
959 	}
960 
961 	/* put hardware to DBS mode */
962 	if (ab->hw_params->single_pdev_only) {
963 		ret = ath12k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
964 		if (ret) {
965 			ath12k_err(ab, "failed to send dbs mode: %d\n", ret);
966 			goto err_reo_cleanup;
967 		}
968 	}
969 
970 	ret = ath12k_dp_tx_htt_h2t_ver_req_msg(ab);
971 	if (ret) {
972 		ath12k_err(ab, "failed to send htt version request message: %d\n",
973 			   ret);
974 		goto err_reo_cleanup;
975 	}
976 
977 	ath12k_acpi_set_dsm_func(ab);
978 
979 	/* Indicate the core start in the appropriate group */
980 	ath12k_core_to_group_ref_get(ab);
981 
982 	ret = ath12k_link_sta_rhash_tbl_init(ab);
983 	if (ret) {
984 		ath12k_warn(ab, "failed to init peer addr rhash table %d\n", ret);
985 		goto err_reo_cleanup;
986 	}
987 
988 	return 0;
989 
990 err_reo_cleanup:
991 	ath12k_dp_rx_pdev_reo_cleanup(ab);
992 err_hif_stop:
993 	ath12k_hif_stop(ab);
994 err_wmi_detach:
995 	ath12k_wmi_detach(ab);
996 	return ret;
997 }
998 
999 static void ath12k_core_device_cleanup(struct ath12k_base *ab)
1000 {
1001 	mutex_lock(&ab->core_lock);
1002 
1003 	ath12k_hif_irq_disable(ab);
1004 	ath12k_core_pdev_destroy(ab);
1005 
1006 	mutex_unlock(&ab->core_lock);
1007 }
1008 
1009 static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
1010 {
1011 	struct ath12k_base *ab;
1012 	int i;
1013 
1014 	lockdep_assert_held(&ag->mutex);
1015 
1016 	clear_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
1017 
1018 	ath12k_mac_unregister(ag);
1019 
1020 	ath12k_mac_mlo_teardown(ag);
1021 
1022 	for (i = ag->num_devices - 1; i >= 0; i--) {
1023 		ab = ag->ab[i];
1024 		if (!ab)
1025 			continue;
1026 
1027 		clear_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
1028 
1029 		ath12k_core_device_cleanup(ab);
1030 	}
1031 
1032 	ath12k_mac_destroy(ag);
1033 }
1034 
1035 u8 ath12k_get_num_partner_link(struct ath12k *ar)
1036 {
1037 	struct ath12k_base *partner_ab, *ab = ar->ab;
1038 	struct ath12k_hw_group *ag = ab->ag;
1039 	struct ath12k_pdev *pdev;
1040 	u8 num_link = 0;
1041 	int i, j;
1042 
1043 	lockdep_assert_held(&ag->mutex);
1044 
1045 	for (i = 0; i < ag->num_devices; i++) {
1046 		partner_ab = ag->ab[i];
1047 
1048 		for (j = 0; j < partner_ab->num_radios; j++) {
1049 			pdev = &partner_ab->pdevs[j];
1050 
1051 			/* Avoid the self link */
1052 			if (ar == pdev->ar)
1053 				continue;
1054 
1055 			num_link++;
1056 		}
1057 	}
1058 
1059 	return num_link;
1060 }
1061 
1062 static int __ath12k_mac_mlo_ready(struct ath12k *ar)
1063 {
1064 	u8 num_link = ath12k_get_num_partner_link(ar);
1065 	int ret;
1066 
1067 	if (num_link == 0)
1068 		return 0;
1069 
1070 	ret = ath12k_wmi_mlo_ready(ar);
1071 	if (ret) {
1072 		ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n",
1073 			   ar->pdev_idx, ret);
1074 		return ret;
1075 	}
1076 
1077 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mlo ready done for pdev %d\n",
1078 		   ar->pdev_idx);
1079 
1080 	return 0;
1081 }
1082 
1083 int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag)
1084 {
1085 	struct ath12k_hw *ah;
1086 	struct ath12k *ar;
1087 	int ret;
1088 	int i, j;
1089 
1090 	for (i = 0; i < ag->num_hw; i++) {
1091 		ah = ag->ah[i];
1092 		if (!ah)
1093 			continue;
1094 
1095 		for_each_ar(ah, ar, j) {
1096 			ar = &ah->radio[j];
1097 			ret = __ath12k_mac_mlo_ready(ar);
1098 			if (ret)
1099 				return ret;
1100 		}
1101 	}
1102 
1103 	return 0;
1104 }
1105 
1106 static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag)
1107 {
1108 	int ret, i;
1109 
1110 	if (!ag->mlo_capable)
1111 		return 0;
1112 
1113 	ret = ath12k_mac_mlo_setup(ag);
1114 	if (ret)
1115 		return ret;
1116 
1117 	for (i = 0; i < ag->num_devices; i++)
1118 		ath12k_dp_partner_cc_init(ag->ab[i]);
1119 
1120 	ret = ath12k_mac_mlo_ready(ag);
1121 	if (ret)
1122 		goto err_mlo_teardown;
1123 
1124 	return 0;
1125 
1126 err_mlo_teardown:
1127 	ath12k_mac_mlo_teardown(ag);
1128 
1129 	return ret;
1130 }
1131 
1132 static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag)
1133 {
1134 	struct ath12k_base *ab;
1135 	int ret, i;
1136 
1137 	lockdep_assert_held(&ag->mutex);
1138 
1139 	if (test_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags)) {
1140 		ret = ath12k_core_mlo_setup(ag);
1141 		if (WARN_ON(ret)) {
1142 			ath12k_mac_unregister(ag);
1143 			goto err_mac_destroy;
1144 		}
1145 		goto core_pdev_create;
1146 	}
1147 
1148 	ret = ath12k_mac_allocate(ag);
1149 	if (WARN_ON(ret))
1150 		return ret;
1151 
1152 	ret = ath12k_core_mlo_setup(ag);
1153 	if (WARN_ON(ret))
1154 		goto err_mac_destroy;
1155 
1156 	ret = ath12k_mac_register(ag);
1157 	if (WARN_ON(ret))
1158 		goto err_mlo_teardown;
1159 
1160 	set_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
1161 
1162 core_pdev_create:
1163 	for (i = 0; i < ag->num_devices; i++) {
1164 		ab = ag->ab[i];
1165 		if (!ab)
1166 			continue;
1167 
1168 		mutex_lock(&ab->core_lock);
1169 
1170 		set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
1171 
1172 		ret = ath12k_core_pdev_create(ab);
1173 		if (ret) {
1174 			ath12k_err(ab, "failed to create pdev core %d\n", ret);
1175 			mutex_unlock(&ab->core_lock);
1176 			goto err;
1177 		}
1178 
1179 		ath12k_hif_irq_enable(ab);
1180 
1181 		ret = ath12k_core_rfkill_config(ab);
1182 		if (ret && ret != -EOPNOTSUPP) {
1183 			mutex_unlock(&ab->core_lock);
1184 			goto err;
1185 		}
1186 
1187 		mutex_unlock(&ab->core_lock);
1188 	}
1189 
1190 	return 0;
1191 
1192 err:
1193 	ath12k_core_hw_group_stop(ag);
1194 	return ret;
1195 
1196 err_mlo_teardown:
1197 	ath12k_mac_mlo_teardown(ag);
1198 
1199 err_mac_destroy:
1200 	ath12k_mac_destroy(ag);
1201 
1202 	return ret;
1203 }
1204 
1205 static int ath12k_core_start_firmware(struct ath12k_base *ab,
1206 				      enum ath12k_firmware_mode mode)
1207 {
1208 	int ret;
1209 
1210 	ath12k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v3,
1211 				    &ab->qmi.ce_cfg.shadow_reg_v3_len);
1212 
1213 	ret = ath12k_qmi_firmware_start(ab, mode);
1214 	if (ret) {
1215 		ath12k_err(ab, "failed to send firmware start: %d\n", ret);
1216 		return ret;
1217 	}
1218 
1219 	return ret;
1220 }
1221 
1222 static inline
1223 bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag)
1224 {
1225 	lockdep_assert_held(&ag->mutex);
1226 
1227 	return (ag->num_started == ag->num_devices);
1228 }
1229 
1230 static void ath12k_fw_stats_pdevs_free(struct list_head *head)
1231 {
1232 	struct ath12k_fw_stats_pdev *i, *tmp;
1233 
1234 	list_for_each_entry_safe(i, tmp, head, list) {
1235 		list_del(&i->list);
1236 		kfree(i);
1237 	}
1238 }
1239 
1240 void ath12k_fw_stats_bcn_free(struct list_head *head)
1241 {
1242 	struct ath12k_fw_stats_bcn *i, *tmp;
1243 
1244 	list_for_each_entry_safe(i, tmp, head, list) {
1245 		list_del(&i->list);
1246 		kfree(i);
1247 	}
1248 }
1249 
1250 static void ath12k_fw_stats_vdevs_free(struct list_head *head)
1251 {
1252 	struct ath12k_fw_stats_vdev *i, *tmp;
1253 
1254 	list_for_each_entry_safe(i, tmp, head, list) {
1255 		list_del(&i->list);
1256 		kfree(i);
1257 	}
1258 }
1259 
1260 void ath12k_fw_stats_init(struct ath12k *ar)
1261 {
1262 	INIT_LIST_HEAD(&ar->fw_stats.vdevs);
1263 	INIT_LIST_HEAD(&ar->fw_stats.pdevs);
1264 	INIT_LIST_HEAD(&ar->fw_stats.bcn);
1265 	init_completion(&ar->fw_stats_complete);
1266 	init_completion(&ar->fw_stats_done);
1267 }
1268 
1269 void ath12k_fw_stats_free(struct ath12k_fw_stats *stats)
1270 {
1271 	ath12k_fw_stats_pdevs_free(&stats->pdevs);
1272 	ath12k_fw_stats_vdevs_free(&stats->vdevs);
1273 	ath12k_fw_stats_bcn_free(&stats->bcn);
1274 }
1275 
1276 void ath12k_fw_stats_reset(struct ath12k *ar)
1277 {
1278 	spin_lock_bh(&ar->data_lock);
1279 	ath12k_fw_stats_free(&ar->fw_stats);
1280 	ar->fw_stats.num_vdev_recvd = 0;
1281 	spin_unlock_bh(&ar->data_lock);
1282 }
1283 
1284 static void ath12k_core_trigger_partner(struct ath12k_base *ab)
1285 {
1286 	struct ath12k_hw_group *ag = ab->ag;
1287 	struct ath12k_base *partner_ab;
1288 	bool found = false;
1289 	int i;
1290 
1291 	for (i = 0; i < ag->num_devices; i++) {
1292 		partner_ab = ag->ab[i];
1293 		if (!partner_ab)
1294 			continue;
1295 
1296 		if (found)
1297 			ath12k_qmi_trigger_host_cap(partner_ab);
1298 
1299 		found = (partner_ab == ab);
1300 	}
1301 }
1302 
1303 int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
1304 {
1305 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
1306 	int ret, i;
1307 
1308 	ret = ath12k_core_start_firmware(ab, ab->fw_mode);
1309 	if (ret) {
1310 		ath12k_err(ab, "failed to start firmware: %d\n", ret);
1311 		return ret;
1312 	}
1313 
1314 	ret = ath12k_ce_init_pipes(ab);
1315 	if (ret) {
1316 		ath12k_err(ab, "failed to initialize CE: %d\n", ret);
1317 		goto err_firmware_stop;
1318 	}
1319 
1320 	ret = ath12k_dp_cmn_device_init(ath12k_ab_to_dp(ab));
1321 	if (ret) {
1322 		ath12k_err(ab, "failed to init DP: %d\n", ret);
1323 		goto err_firmware_stop;
1324 	}
1325 
1326 	mutex_lock(&ag->mutex);
1327 	mutex_lock(&ab->core_lock);
1328 
1329 	ret = ath12k_core_start(ab);
1330 	if (ret) {
1331 		ath12k_err(ab, "failed to start core: %d\n", ret);
1332 		goto err_deinit;
1333 	}
1334 
1335 	mutex_unlock(&ab->core_lock);
1336 
1337 	if (ath12k_core_hw_group_start_ready(ag)) {
1338 		ret = ath12k_core_hw_group_start(ag);
1339 		if (ret) {
1340 			ath12k_warn(ab, "unable to start hw group\n");
1341 			goto err_core_stop;
1342 		}
1343 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id);
1344 	} else {
1345 		ath12k_core_trigger_partner(ab);
1346 	}
1347 
1348 	mutex_unlock(&ag->mutex);
1349 
1350 	return 0;
1351 
1352 err_core_stop:
1353 	for (i = ag->num_devices - 1; i >= 0; i--) {
1354 		ab = ag->ab[i];
1355 		if (!ab)
1356 			continue;
1357 
1358 		mutex_lock(&ab->core_lock);
1359 		ath12k_core_stop(ab);
1360 		mutex_unlock(&ab->core_lock);
1361 	}
1362 	mutex_unlock(&ag->mutex);
1363 	goto exit;
1364 
1365 err_deinit:
1366 	ath12k_dp_cmn_device_deinit(ath12k_ab_to_dp(ab));
1367 	mutex_unlock(&ab->core_lock);
1368 	mutex_unlock(&ag->mutex);
1369 
1370 err_firmware_stop:
1371 	ath12k_qmi_firmware_stop(ab);
1372 
1373 exit:
1374 	return ret;
1375 }
1376 
1377 static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
1378 {
1379 	int ret, total_vdev;
1380 
1381 	mutex_lock(&ab->core_lock);
1382 	ath12k_link_sta_rhash_tbl_destroy(ab);
1383 	ath12k_thermal_unregister(ab);
1384 	ath12k_dp_pdev_free(ab);
1385 	ath12k_ce_cleanup_pipes(ab);
1386 	ath12k_wmi_detach(ab);
1387 	ath12k_dp_rx_pdev_reo_cleanup(ab);
1388 	mutex_unlock(&ab->core_lock);
1389 
1390 	ath12k_dp_cmn_device_deinit(ath12k_ab_to_dp(ab));
1391 	ath12k_hal_srng_deinit(ab);
1392 	total_vdev = ab->num_radios * TARGET_NUM_VDEVS(ab);
1393 	ab->free_vdev_map = (1LL << total_vdev) - 1;
1394 
1395 	ret = ath12k_hal_srng_init(ab);
1396 	if (ret)
1397 		return ret;
1398 
1399 	clear_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1400 
1401 	ret = ath12k_core_qmi_firmware_ready(ab);
1402 	if (ret)
1403 		goto err_hal_srng_deinit;
1404 
1405 	clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
1406 
1407 	return 0;
1408 
1409 err_hal_srng_deinit:
1410 	ath12k_hal_srng_deinit(ab);
1411 	return ret;
1412 }
1413 
1414 static void ath12k_rfkill_work(struct work_struct *work)
1415 {
1416 	struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
1417 	struct ath12k_hw_group *ag = ab->ag;
1418 	struct ath12k *ar;
1419 	struct ath12k_hw *ah;
1420 	struct ieee80211_hw *hw;
1421 	bool rfkill_radio_on;
1422 	int i, j;
1423 
1424 	spin_lock_bh(&ab->base_lock);
1425 	rfkill_radio_on = ab->rfkill_radio_on;
1426 	spin_unlock_bh(&ab->base_lock);
1427 
1428 	for (i = 0; i < ag->num_hw; i++) {
1429 		ah = ath12k_ag_to_ah(ag, i);
1430 		if (!ah)
1431 			continue;
1432 
1433 		for (j = 0; j < ah->num_radio; j++) {
1434 			ar = &ah->radio[j];
1435 			if (!ar)
1436 				continue;
1437 
1438 			ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
1439 		}
1440 
1441 		hw = ah->hw;
1442 		wiphy_rfkill_set_hw_state(hw->wiphy, !rfkill_radio_on);
1443 	}
1444 }
1445 
1446 void ath12k_core_halt(struct ath12k *ar)
1447 {
1448 	struct list_head *pos, *n;
1449 	struct ath12k_base *ab = ar->ab;
1450 
1451 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
1452 
1453 	ar->num_created_vdevs = 0;
1454 	ar->allocated_vdev_map = 0;
1455 
1456 	ath12k_mac_scan_finish(ar);
1457 	ath12k_mac_peer_cleanup_all(ar);
1458 	cancel_delayed_work_sync(&ar->scan.timeout);
1459 	cancel_work_sync(&ar->regd_update_work);
1460 	cancel_work_sync(&ar->regd_channel_update_work);
1461 	cancel_work_sync(&ab->rfkill_work);
1462 	cancel_work_sync(&ab->update_11d_work);
1463 
1464 	rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
1465 	synchronize_rcu();
1466 
1467 	spin_lock_bh(&ar->data_lock);
1468 	list_for_each_safe(pos, n, &ar->arvifs)
1469 		list_del_init(pos);
1470 	spin_unlock_bh(&ar->data_lock);
1471 
1472 	idr_init(&ar->txmgmt_idr);
1473 }
1474 
1475 static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
1476 {
1477 	struct ath12k_hw_group *ag = ab->ag;
1478 	struct ath12k *ar;
1479 	struct ath12k_hw *ah;
1480 	int i, j;
1481 
1482 	spin_lock_bh(&ab->base_lock);
1483 	ab->stats.fw_crash_counter++;
1484 	spin_unlock_bh(&ab->base_lock);
1485 
1486 	if (ab->is_reset)
1487 		set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1488 
1489 	for (i = 0; i < ag->num_hw; i++) {
1490 		ah = ath12k_ag_to_ah(ag, i);
1491 		if (!ah || ah->state == ATH12K_HW_STATE_OFF ||
1492 		    ah->state == ATH12K_HW_STATE_TM)
1493 			continue;
1494 
1495 		wiphy_lock(ah->hw->wiphy);
1496 
1497 		/* If queue 0 is stopped, it is safe to assume that all
1498 		 * other queues are stopped by driver via
1499 		 * ieee80211_stop_queues() below. This means, there is
1500 		 * no need to stop it again and hence continue
1501 		 */
1502 		if (ieee80211_queue_stopped(ah->hw, 0)) {
1503 			wiphy_unlock(ah->hw->wiphy);
1504 			continue;
1505 		}
1506 
1507 		ieee80211_stop_queues(ah->hw);
1508 
1509 		for (j = 0; j < ah->num_radio; j++) {
1510 			ar = &ah->radio[j];
1511 
1512 			ath12k_mac_drain_tx(ar);
1513 			ar->state_11d = ATH12K_11D_IDLE;
1514 			complete(&ar->completed_11d_scan);
1515 			complete(&ar->scan.started);
1516 			complete_all(&ar->scan.completed);
1517 			complete(&ar->scan.on_channel);
1518 			complete(&ar->peer_assoc_done);
1519 			complete(&ar->peer_delete_done);
1520 			complete(&ar->install_key_done);
1521 			complete(&ar->vdev_setup_done);
1522 			complete(&ar->vdev_delete_done);
1523 			complete(&ar->bss_survey_done);
1524 			complete_all(&ar->regd_update_completed);
1525 			complete_all(&ar->thermal.wmi_sync);
1526 
1527 			wake_up(&ar->dp.tx_empty_waitq);
1528 			idr_for_each(&ar->txmgmt_idr,
1529 				     ath12k_mac_tx_mgmt_pending_free, ar);
1530 			idr_destroy(&ar->txmgmt_idr);
1531 			wake_up(&ar->txmgmt_empty_waitq);
1532 
1533 			ar->monitor_vdev_id = -1;
1534 			ar->monitor_vdev_created = false;
1535 			ar->monitor_started = false;
1536 		}
1537 
1538 		wiphy_unlock(ah->hw->wiphy);
1539 	}
1540 
1541 	wake_up(&ab->wmi_ab.tx_credits_wq);
1542 	wake_up(&ab->peer_mapping_wq);
1543 }
1544 
1545 static void ath12k_update_11d(struct work_struct *work)
1546 {
1547 	struct ath12k_base *ab = container_of(work, struct ath12k_base, update_11d_work);
1548 	struct ath12k *ar;
1549 	struct ath12k_pdev *pdev;
1550 	struct wmi_set_current_country_arg arg = {};
1551 	int ret, i;
1552 
1553 	spin_lock_bh(&ab->base_lock);
1554 	memcpy(&arg.alpha2, &ab->new_alpha2, 2);
1555 	spin_unlock_bh(&ab->base_lock);
1556 
1557 	ath12k_dbg(ab, ATH12K_DBG_WMI, "update 11d new cc %c%c\n",
1558 		   arg.alpha2[0], arg.alpha2[1]);
1559 
1560 	for (i = 0; i < ab->num_radios; i++) {
1561 		pdev = &ab->pdevs[i];
1562 		ar = pdev->ar;
1563 
1564 		memcpy(&ar->alpha2, &arg.alpha2, 2);
1565 
1566 		reinit_completion(&ar->regd_update_completed);
1567 
1568 		ret = ath12k_wmi_send_set_current_country_cmd(ar, &arg);
1569 		if (ret)
1570 			ath12k_warn(ar->ab,
1571 				    "pdev id %d failed set current country code: %d\n",
1572 				    i, ret);
1573 	}
1574 }
1575 
1576 static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
1577 {
1578 	struct ath12k_hw_group *ag = ab->ag;
1579 	struct ath12k_hw *ah;
1580 	struct ath12k *ar;
1581 	int i, j;
1582 
1583 	for (i = 0; i < ag->num_hw; i++) {
1584 		ah = ath12k_ag_to_ah(ag, i);
1585 		if (!ah || ah->state == ATH12K_HW_STATE_OFF)
1586 			continue;
1587 
1588 		wiphy_lock(ah->hw->wiphy);
1589 		mutex_lock(&ah->hw_mutex);
1590 
1591 		switch (ah->state) {
1592 		case ATH12K_HW_STATE_ON:
1593 			ah->state = ATH12K_HW_STATE_RESTARTING;
1594 
1595 			for (j = 0; j < ah->num_radio; j++) {
1596 				ar = &ah->radio[j];
1597 				ath12k_core_halt(ar);
1598 			}
1599 
1600 			ath12k_mac_dp_peer_cleanup(ah);
1601 			break;
1602 		case ATH12K_HW_STATE_OFF:
1603 			ath12k_warn(ab,
1604 				    "cannot restart hw %d that hasn't been started\n",
1605 				    i);
1606 			break;
1607 		case ATH12K_HW_STATE_RESTARTING:
1608 			break;
1609 		case ATH12K_HW_STATE_RESTARTED:
1610 			ah->state = ATH12K_HW_STATE_WEDGED;
1611 			fallthrough;
1612 		case ATH12K_HW_STATE_WEDGED:
1613 			ath12k_warn(ab,
1614 				    "device is wedged, will not restart hw %d\n", i);
1615 			break;
1616 		case ATH12K_HW_STATE_TM:
1617 			ath12k_warn(ab, "fw mode reset done radio %d\n", i);
1618 			break;
1619 		}
1620 
1621 		mutex_unlock(&ah->hw_mutex);
1622 		wiphy_unlock(ah->hw->wiphy);
1623 	}
1624 
1625 	complete(&ab->driver_recovery);
1626 }
1627 
1628 static void ath12k_core_restart(struct work_struct *work)
1629 {
1630 	struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
1631 	struct ath12k_hw_group *ag = ab->ag;
1632 	struct ath12k_hw *ah;
1633 	int ret, i;
1634 
1635 	ret = ath12k_core_reconfigure_on_crash(ab);
1636 	if (ret) {
1637 		ath12k_err(ab, "failed to reconfigure driver on crash recovery\n");
1638 		return;
1639 	}
1640 
1641 	if (ab->is_reset) {
1642 		if (!test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
1643 			atomic_dec(&ab->reset_count);
1644 			complete(&ab->reset_complete);
1645 			ab->is_reset = false;
1646 			atomic_set(&ab->fail_cont_count, 0);
1647 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
1648 		}
1649 
1650 		mutex_lock(&ag->mutex);
1651 
1652 		if (!ath12k_core_hw_group_start_ready(ag)) {
1653 			mutex_unlock(&ag->mutex);
1654 			goto exit_restart;
1655 		}
1656 
1657 		for (i = 0; i < ag->num_hw; i++) {
1658 			ah = ath12k_ag_to_ah(ag, i);
1659 			ieee80211_restart_hw(ah->hw);
1660 		}
1661 
1662 		mutex_unlock(&ag->mutex);
1663 	}
1664 
1665 exit_restart:
1666 	complete(&ab->restart_completed);
1667 }
1668 
1669 static void ath12k_core_reset(struct work_struct *work)
1670 {
1671 	struct ath12k_base *ab = container_of(work, struct ath12k_base, reset_work);
1672 	struct ath12k_hw_group *ag = ab->ag;
1673 	int reset_count, fail_cont_count, i;
1674 	long time_left;
1675 
1676 	if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) {
1677 		ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
1678 		return;
1679 	}
1680 
1681 	/* Sometimes the recovery will fail and then the next all recovery fail,
1682 	 * this is to avoid infinite recovery since it can not recovery success
1683 	 */
1684 	fail_cont_count = atomic_read(&ab->fail_cont_count);
1685 
1686 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FINAL)
1687 		return;
1688 
1689 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FIRST &&
1690 	    time_before(jiffies, ab->reset_fail_timeout))
1691 		return;
1692 
1693 	reset_count = atomic_inc_return(&ab->reset_count);
1694 
1695 	if (reset_count > 1) {
1696 		/* Sometimes it happened another reset worker before the previous one
1697 		 * completed, then the second reset worker will destroy the previous one,
1698 		 * thus below is to avoid that.
1699 		 */
1700 		ath12k_warn(ab, "already resetting count %d\n", reset_count);
1701 
1702 		reinit_completion(&ab->reset_complete);
1703 		time_left = wait_for_completion_timeout(&ab->reset_complete,
1704 							ATH12K_RESET_TIMEOUT_HZ);
1705 		if (time_left) {
1706 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "to skip reset\n");
1707 			atomic_dec(&ab->reset_count);
1708 			return;
1709 		}
1710 
1711 		ab->reset_fail_timeout = jiffies + ATH12K_RESET_FAIL_TIMEOUT_HZ;
1712 		/* Record the continuous recovery fail count when recovery failed*/
1713 		fail_cont_count = atomic_inc_return(&ab->fail_cont_count);
1714 	}
1715 
1716 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n");
1717 
1718 	ab->is_reset = true;
1719 	atomic_set(&ab->recovery_count, 0);
1720 
1721 	ath12k_coredump_collect(ab);
1722 	ath12k_core_pre_reconfigure_recovery(ab);
1723 
1724 	ath12k_core_post_reconfigure_recovery(ab);
1725 
1726 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n");
1727 
1728 	ath12k_hif_irq_disable(ab);
1729 	ath12k_hif_ce_irq_disable(ab);
1730 
1731 	ath12k_hif_power_down(ab, false);
1732 
1733 	/* prepare for power up */
1734 	ab->qmi.num_radios = U8_MAX;
1735 
1736 	mutex_lock(&ag->mutex);
1737 	ath12k_core_to_group_ref_put(ab);
1738 
1739 	if (ag->num_started > 0) {
1740 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
1741 			   "waiting for %d partner device(s) to reset\n",
1742 			   ag->num_started);
1743 		mutex_unlock(&ag->mutex);
1744 		return;
1745 	}
1746 
1747 	/* Prepare MLO global memory region for power up */
1748 	ath12k_qmi_reset_mlo_mem(ag);
1749 
1750 	for (i = 0; i < ag->num_devices; i++) {
1751 		ab = ag->ab[i];
1752 		if (!ab)
1753 			continue;
1754 
1755 		ath12k_hif_power_up(ab);
1756 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
1757 	}
1758 
1759 	mutex_unlock(&ag->mutex);
1760 }
1761 
1762 enum ath12k_qmi_mem_mode ath12k_core_get_memory_mode(struct ath12k_base *ab)
1763 {
1764 	unsigned long total_ram;
1765 	struct sysinfo si;
1766 
1767 	si_meminfo(&si);
1768 	total_ram = si.totalram * si.mem_unit;
1769 
1770 	if (total_ram < SZ_512M)
1771 		return ATH12K_QMI_MEMORY_MODE_LOW_512_M;
1772 
1773 	return ATH12K_QMI_MEMORY_MODE_DEFAULT;
1774 }
1775 EXPORT_SYMBOL(ath12k_core_get_memory_mode);
1776 
1777 int ath12k_core_pre_init(struct ath12k_base *ab)
1778 {
1779 	const struct ath12k_mem_profile_based_param *param;
1780 
1781 	param = &ath12k_mem_profile_based_param[ab->target_mem_mode];
1782 	ab->profile_param = param;
1783 	ath12k_fw_map(ab);
1784 
1785 	return 0;
1786 }
1787 
1788 static int ath12k_core_panic_handler(struct notifier_block *nb,
1789 				     unsigned long action, void *data)
1790 {
1791 	struct ath12k_base *ab = container_of(nb, struct ath12k_base,
1792 					      panic_nb);
1793 
1794 	return ath12k_hif_panic_handler(ab);
1795 }
1796 
1797 static int ath12k_core_panic_notifier_register(struct ath12k_base *ab)
1798 {
1799 	ab->panic_nb.notifier_call = ath12k_core_panic_handler;
1800 
1801 	return atomic_notifier_chain_register(&panic_notifier_list,
1802 					      &ab->panic_nb);
1803 }
1804 
1805 static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
1806 {
1807 	atomic_notifier_chain_unregister(&panic_notifier_list,
1808 					 &ab->panic_nb);
1809 }
1810 
1811 static inline
1812 bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag)
1813 {
1814 	lockdep_assert_held(&ag->mutex);
1815 
1816 	return (ag->num_probed == ag->num_devices);
1817 }
1818 
1819 static struct ath12k_hw_group *ath12k_core_hw_group_alloc(struct ath12k_base *ab)
1820 {
1821 	struct ath12k_hw_group *ag;
1822 	int count = 0;
1823 
1824 	lockdep_assert_held(&ath12k_hw_group_mutex);
1825 
1826 	list_for_each_entry(ag, &ath12k_hw_group_list, list)
1827 		count++;
1828 
1829 	ag = kzalloc_obj(*ag);
1830 	if (!ag)
1831 		return NULL;
1832 
1833 	ag->id = count;
1834 	list_add(&ag->list, &ath12k_hw_group_list);
1835 	mutex_init(&ag->mutex);
1836 	ag->mlo_capable = false;
1837 
1838 	return ag;
1839 }
1840 
1841 static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag)
1842 {
1843 	mutex_lock(&ath12k_hw_group_mutex);
1844 
1845 	list_del(&ag->list);
1846 	kfree(ag);
1847 
1848 	mutex_unlock(&ath12k_hw_group_mutex);
1849 }
1850 
1851 static struct ath12k_hw_group *ath12k_core_hw_group_find_by_dt(struct ath12k_base *ab)
1852 {
1853 	struct ath12k_hw_group *ag;
1854 	int i;
1855 
1856 	if (!ab->dev->of_node)
1857 		return NULL;
1858 
1859 	list_for_each_entry(ag, &ath12k_hw_group_list, list)
1860 		for (i = 0; i < ag->num_devices; i++)
1861 			if (ag->wsi_node[i] == ab->dev->of_node)
1862 				return ag;
1863 
1864 	return NULL;
1865 }
1866 
1867 static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag,
1868 				    struct ath12k_base *ab)
1869 {
1870 	struct device_node *wsi_dev = ab->dev->of_node, *next_wsi_dev;
1871 	struct device_node *tx_endpoint, *next_rx_endpoint;
1872 	int device_count = 0;
1873 
1874 	next_wsi_dev = wsi_dev;
1875 
1876 	if (!next_wsi_dev)
1877 		return -ENODEV;
1878 
1879 	do {
1880 		ag->wsi_node[device_count] = next_wsi_dev;
1881 
1882 		tx_endpoint = of_graph_get_endpoint_by_regs(next_wsi_dev, 0, -1);
1883 		if (!tx_endpoint) {
1884 			of_node_put(next_wsi_dev);
1885 			return -ENODEV;
1886 		}
1887 
1888 		next_rx_endpoint = of_graph_get_remote_endpoint(tx_endpoint);
1889 		if (!next_rx_endpoint) {
1890 			of_node_put(next_wsi_dev);
1891 			of_node_put(tx_endpoint);
1892 			return -ENODEV;
1893 		}
1894 
1895 		of_node_put(tx_endpoint);
1896 		of_node_put(next_wsi_dev);
1897 
1898 		next_wsi_dev = of_graph_get_port_parent(next_rx_endpoint);
1899 		if (!next_wsi_dev) {
1900 			of_node_put(next_rx_endpoint);
1901 			return -ENODEV;
1902 		}
1903 
1904 		of_node_put(next_rx_endpoint);
1905 
1906 		device_count++;
1907 		if (device_count > ATH12K_MAX_DEVICES) {
1908 			ath12k_warn(ab, "device count in DT %d is more than limit %d\n",
1909 				    device_count, ATH12K_MAX_DEVICES);
1910 			of_node_put(next_wsi_dev);
1911 			return -EINVAL;
1912 		}
1913 	} while (wsi_dev != next_wsi_dev);
1914 
1915 	of_node_put(next_wsi_dev);
1916 	ag->num_devices = device_count;
1917 
1918 	return 0;
1919 }
1920 
1921 static int ath12k_core_get_wsi_index(struct ath12k_hw_group *ag,
1922 				     struct ath12k_base *ab)
1923 {
1924 	int i, wsi_controller_index = -1, node_index = -1;
1925 	bool control;
1926 
1927 	for (i = 0; i < ag->num_devices; i++) {
1928 		control = of_property_read_bool(ag->wsi_node[i], "qcom,wsi-controller");
1929 		if (control)
1930 			wsi_controller_index = i;
1931 
1932 		if (ag->wsi_node[i] == ab->dev->of_node)
1933 			node_index = i;
1934 	}
1935 
1936 	if (wsi_controller_index == -1) {
1937 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi controller is not defined in dt");
1938 		return -EINVAL;
1939 	}
1940 
1941 	if (node_index == -1) {
1942 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "unable to get WSI node index");
1943 		return -EINVAL;
1944 	}
1945 
1946 	ab->wsi_info.index = (ag->num_devices + node_index - wsi_controller_index) %
1947 		ag->num_devices;
1948 
1949 	return 0;
1950 }
1951 
1952 static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *ab)
1953 {
1954 	struct ath12k_wsi_info *wsi = &ab->wsi_info;
1955 	struct ath12k_hw_group *ag;
1956 
1957 	lockdep_assert_held(&ath12k_hw_group_mutex);
1958 
1959 	if (ath12k_ftm_mode)
1960 		goto invalid_group;
1961 
1962 	/* The grouping of multiple devices will be done based on device tree file.
1963 	 * The platforms that do not have any valid group information would have
1964 	 * each device to be part of its own invalid group.
1965 	 *
1966 	 * We use group id ATH12K_INVALID_GROUP_ID for single device group
1967 	 * which didn't have dt entry or wrong dt entry, there could be many
1968 	 * groups with same group id, i.e ATH12K_INVALID_GROUP_ID. So
1969 	 * default group id of ATH12K_INVALID_GROUP_ID combined with
1970 	 * num devices in ath12k_hw_group determines if the group is
1971 	 * multi device or single device group
1972 	 */
1973 
1974 	ag = ath12k_core_hw_group_find_by_dt(ab);
1975 	if (!ag) {
1976 		ag = ath12k_core_hw_group_alloc(ab);
1977 		if (!ag) {
1978 			ath12k_warn(ab, "unable to create new hw group\n");
1979 			return NULL;
1980 		}
1981 
1982 		if (ath12k_core_get_wsi_info(ag, ab) ||
1983 		    ath12k_core_get_wsi_index(ag, ab)) {
1984 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
1985 				   "unable to get wsi info from dt, grouping single device");
1986 			ag->id = ATH12K_INVALID_GROUP_ID;
1987 			ag->num_devices = 1;
1988 			memset(ag->wsi_node, 0, sizeof(ag->wsi_node));
1989 			wsi->index = 0;
1990 		}
1991 
1992 		goto exit;
1993 	} else if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
1994 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group id %d in unregister state\n",
1995 			   ag->id);
1996 		goto invalid_group;
1997 	} else {
1998 		if (ath12k_core_get_wsi_index(ag, ab))
1999 			goto invalid_group;
2000 		goto exit;
2001 	}
2002 
2003 invalid_group:
2004 	ag = ath12k_core_hw_group_alloc(ab);
2005 	if (!ag) {
2006 		ath12k_warn(ab, "unable to create new hw group\n");
2007 		return NULL;
2008 	}
2009 
2010 	ag->id = ATH12K_INVALID_GROUP_ID;
2011 	ag->num_devices = 1;
2012 	wsi->index = 0;
2013 
2014 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "single device added to hardware group\n");
2015 
2016 exit:
2017 	if (ag->num_probed >= ag->num_devices) {
2018 		ath12k_warn(ab, "unable to add new device to group, max limit reached\n");
2019 		goto invalid_group;
2020 	}
2021 
2022 	ab->device_id = ag->num_probed++;
2023 	ag->ab[ab->device_id] = ab;
2024 	ab->ag = ag;
2025 
2026 	ath12k_dp_cmn_hw_group_assign(ath12k_ab_to_dp(ab), ag);
2027 
2028 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi group-id %d num-devices %d index %d",
2029 		   ag->id, ag->num_devices, wsi->index);
2030 
2031 	return ag;
2032 }
2033 
2034 void ath12k_core_hw_group_unassign(struct ath12k_base *ab)
2035 {
2036 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
2037 	u8 device_id = ab->device_id;
2038 	int num_probed;
2039 
2040 	if (!ag)
2041 		return;
2042 
2043 	mutex_lock(&ag->mutex);
2044 
2045 	if (WARN_ON(device_id >= ag->num_devices)) {
2046 		mutex_unlock(&ag->mutex);
2047 		return;
2048 	}
2049 
2050 	if (WARN_ON(ag->ab[device_id] != ab)) {
2051 		mutex_unlock(&ag->mutex);
2052 		return;
2053 	}
2054 
2055 	ath12k_dp_cmn_hw_group_unassign(ath12k_ab_to_dp(ab), ag);
2056 
2057 	ag->ab[device_id] = NULL;
2058 	ab->ag = NULL;
2059 	ab->device_id = ATH12K_INVALID_DEVICE_ID;
2060 
2061 	if (ag->num_probed)
2062 		ag->num_probed--;
2063 
2064 	num_probed = ag->num_probed;
2065 
2066 	mutex_unlock(&ag->mutex);
2067 
2068 	if (!num_probed)
2069 		ath12k_core_hw_group_free(ag);
2070 }
2071 
2072 static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
2073 {
2074 	struct ath12k_base *ab;
2075 	int i;
2076 
2077 	if (WARN_ON(!ag))
2078 		return;
2079 
2080 	for (i = 0; i < ag->num_devices; i++) {
2081 		ab = ag->ab[i];
2082 		if (!ab)
2083 			continue;
2084 
2085 		ath12k_core_soc_destroy(ab);
2086 	}
2087 }
2088 
2089 void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
2090 {
2091 	struct ath12k_base *ab;
2092 	int i;
2093 
2094 	if (!ag)
2095 		return;
2096 
2097 	mutex_lock(&ag->mutex);
2098 
2099 	if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
2100 		mutex_unlock(&ag->mutex);
2101 		return;
2102 	}
2103 
2104 	set_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags);
2105 
2106 	ath12k_core_hw_group_stop(ag);
2107 
2108 	for (i = 0; i < ag->num_devices; i++) {
2109 		ab = ag->ab[i];
2110 		if (!ab)
2111 			continue;
2112 
2113 		mutex_lock(&ab->core_lock);
2114 		ath12k_core_stop(ab);
2115 		mutex_unlock(&ab->core_lock);
2116 	}
2117 
2118 	mutex_unlock(&ag->mutex);
2119 }
2120 
2121 static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag)
2122 {
2123 	struct ath12k_base *ab;
2124 	int i, ret;
2125 
2126 	lockdep_assert_held(&ag->mutex);
2127 
2128 	for (i = 0; i < ag->num_devices; i++) {
2129 		ab = ag->ab[i];
2130 		if (!ab)
2131 			continue;
2132 
2133 		mutex_lock(&ab->core_lock);
2134 
2135 		ret = ath12k_core_soc_create(ab);
2136 		if (ret) {
2137 			mutex_unlock(&ab->core_lock);
2138 			ath12k_err(ab, "failed to create soc %d core: %d\n", i, ret);
2139 			goto destroy;
2140 		}
2141 
2142 		mutex_unlock(&ab->core_lock);
2143 	}
2144 
2145 	return 0;
2146 
2147 destroy:
2148 	for (i--; i >= 0; i--) {
2149 		ab = ag->ab[i];
2150 		if (!ab)
2151 			continue;
2152 
2153 		mutex_lock(&ab->core_lock);
2154 		ath12k_core_soc_destroy(ab);
2155 		mutex_unlock(&ab->core_lock);
2156 	}
2157 
2158 	return ret;
2159 }
2160 
2161 void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
2162 {
2163 	struct ath12k_base *ab;
2164 	int i;
2165 
2166 	if (ath12k_ftm_mode)
2167 		return;
2168 
2169 	lockdep_assert_held(&ag->mutex);
2170 
2171 	if (ag->num_devices == 1) {
2172 		ab = ag->ab[0];
2173 		/* QCN9274 firmware uses firmware IE for MLO advertisement */
2174 		if (ab->fw.fw_features_valid) {
2175 			ag->mlo_capable =
2176 				ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO);
2177 			return;
2178 		}
2179 
2180 		/* while WCN7850 firmware uses QMI single_chip_mlo_support bit */
2181 		ag->mlo_capable = ab->single_chip_mlo_support;
2182 		return;
2183 	}
2184 
2185 	ag->mlo_capable = true;
2186 
2187 	for (i = 0; i < ag->num_devices; i++) {
2188 		ab = ag->ab[i];
2189 		if (!ab)
2190 			continue;
2191 
2192 		/* even if 1 device's firmware feature indicates MLO
2193 		 * unsupported, make MLO unsupported for the whole group
2194 		 */
2195 		if (!ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO)) {
2196 			ag->mlo_capable = false;
2197 			return;
2198 		}
2199 	}
2200 }
2201 
2202 int ath12k_core_init(struct ath12k_base *ab)
2203 {
2204 	struct ath12k_hw_group *ag;
2205 	int ret;
2206 
2207 	ret = ath12k_core_panic_notifier_register(ab);
2208 	if (ret)
2209 		ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
2210 
2211 	mutex_lock(&ath12k_hw_group_mutex);
2212 
2213 	ag = ath12k_core_hw_group_assign(ab);
2214 	if (!ag) {
2215 		mutex_unlock(&ath12k_hw_group_mutex);
2216 		ath12k_warn(ab, "unable to get hw group\n");
2217 		ret = -ENODEV;
2218 		goto err_unregister_notifier;
2219 	}
2220 
2221 	mutex_unlock(&ath12k_hw_group_mutex);
2222 
2223 	mutex_lock(&ag->mutex);
2224 
2225 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices %d num probed %d\n",
2226 		   ag->num_devices, ag->num_probed);
2227 
2228 	if (ath12k_core_hw_group_create_ready(ag)) {
2229 		ret = ath12k_core_hw_group_create(ag);
2230 		if (ret) {
2231 			mutex_unlock(&ag->mutex);
2232 			ath12k_warn(ab, "unable to create hw group\n");
2233 			goto err_unassign_hw_group;
2234 		}
2235 	}
2236 
2237 	mutex_unlock(&ag->mutex);
2238 
2239 	return 0;
2240 
2241 err_unassign_hw_group:
2242 	ath12k_core_hw_group_unassign(ab);
2243 err_unregister_notifier:
2244 	ath12k_core_panic_notifier_unregister(ab);
2245 
2246 	return ret;
2247 }
2248 
2249 void ath12k_core_deinit(struct ath12k_base *ab)
2250 {
2251 	ath12k_core_hw_group_destroy(ab->ag);
2252 	ath12k_core_hw_group_unassign(ab);
2253 	ath12k_core_panic_notifier_unregister(ab);
2254 }
2255 
2256 void ath12k_core_free(struct ath12k_base *ab)
2257 {
2258 	timer_delete_sync(&ab->rx_replenish_retry);
2259 	destroy_workqueue(ab->workqueue_aux);
2260 	destroy_workqueue(ab->workqueue);
2261 	kfree(ab);
2262 }
2263 
2264 struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
2265 				      enum ath12k_bus bus)
2266 {
2267 	struct ath12k_base *ab;
2268 
2269 	ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL);
2270 	if (!ab)
2271 		return NULL;
2272 
2273 	init_completion(&ab->driver_recovery);
2274 
2275 	ab->workqueue = create_singlethread_workqueue("ath12k_wq");
2276 	if (!ab->workqueue)
2277 		goto err_sc_free;
2278 
2279 	ab->workqueue_aux = create_singlethread_workqueue("ath12k_aux_wq");
2280 	if (!ab->workqueue_aux)
2281 		goto err_free_wq;
2282 
2283 	mutex_init(&ab->core_lock);
2284 	spin_lock_init(&ab->base_lock);
2285 	init_completion(&ab->reset_complete);
2286 
2287 	init_waitqueue_head(&ab->peer_mapping_wq);
2288 	init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
2289 	INIT_WORK(&ab->restart_work, ath12k_core_restart);
2290 	INIT_WORK(&ab->reset_work, ath12k_core_reset);
2291 	INIT_WORK(&ab->rfkill_work, ath12k_rfkill_work);
2292 	INIT_WORK(&ab->dump_work, ath12k_coredump_upload);
2293 	INIT_WORK(&ab->update_11d_work, ath12k_update_11d);
2294 
2295 	timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
2296 	init_completion(&ab->htc_suspend);
2297 	init_completion(&ab->restart_completed);
2298 	init_completion(&ab->wow.wakeup_completed);
2299 
2300 	ab->dev = dev;
2301 	ab->hif.bus = bus;
2302 	ab->qmi.num_radios = U8_MAX;
2303 	ab->single_chip_mlo_support = false;
2304 
2305 	/* Device index used to identify the devices in a group.
2306 	 *
2307 	 * In Intra-device MLO, only one device present in a group,
2308 	 * so it is always zero.
2309 	 *
2310 	 * In Inter-device MLO, Multiple device present in a group,
2311 	 * expect non-zero value.
2312 	 */
2313 	ab->device_id = 0;
2314 
2315 	return ab;
2316 
2317 err_free_wq:
2318 	destroy_workqueue(ab->workqueue);
2319 err_sc_free:
2320 	kfree(ab);
2321 	return NULL;
2322 }
2323 
2324 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies WLAN devices");
2325 MODULE_LICENSE("Dual BSD/GPL");
2326