xref: /freebsd/sys/contrib/dev/athk/ath12k/core.c (revision 60bac4d6438b6bcb3d7b439684211d05396d90ce)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
5  */
6 
7 #if defined(__FreeBSD__)
8 #define	LINUXKPI_PARAM_PREFIX	ath12k_core_
9 #endif
10 
11 #include <linux/export.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/remoteproc.h>
15 #include <linux/firmware.h>
16 #include <linux/of.h>
17 #if defined(__FreeBSD__)
18 #include <linux/delay.h>
19 #endif
20 #include <linux/of_graph.h>
21 #include "ahb.h"
22 #include "core.h"
23 #include "dp_tx.h"
24 #include "dp_rx.h"
25 #include "debug.h"
26 #include "debugfs.h"
27 #include "fw.h"
28 #include "hif.h"
29 #include "pci.h"
30 #include "wow.h"
31 #include "dp_cmn.h"
32 #include "peer.h"
33 
34 unsigned int ath12k_debug_mask;
35 module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
36 MODULE_PARM_DESC(debug_mask, "Debugging mask");
37 EXPORT_SYMBOL(ath12k_debug_mask);
38 
39 bool ath12k_ftm_mode;
40 module_param_named(ftm_mode, ath12k_ftm_mode, bool, 0444);
41 MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode");
42 EXPORT_SYMBOL(ath12k_ftm_mode);
43 
44 /* protected with ath12k_hw_group_mutex */
45 static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
46 
47 static DEFINE_MUTEX(ath12k_hw_group_mutex);
48 
49 static const struct
50 ath12k_mem_profile_based_param ath12k_mem_profile_based_param[] = {
51 [ATH12K_QMI_MEMORY_MODE_DEFAULT] = {
52 		.num_vdevs = 17,
53 		.max_client_single = 512,
54 		.max_client_dbs = 128,
55 		.max_client_dbs_sbs = 128,
56 		.dp_params = {
57 			.tx_comp_ring_size = 32768,
58 			.rxdma_monitor_buf_ring_size = 4096,
59 			.rxdma_monitor_dst_ring_size = 8092,
60 			.num_pool_tx_desc = 32768,
61 			.rx_desc_count = 12288,
62 		},
63 	},
64 [ATH12K_QMI_MEMORY_MODE_LOW_512_M] = {
65 		.num_vdevs = 9,
66 		.max_client_single = 128,
67 		.max_client_dbs = 64,
68 		.max_client_dbs_sbs = 64,
69 		.dp_params = {
70 			.tx_comp_ring_size = 16384,
71 			.rxdma_monitor_buf_ring_size = 256,
72 			.rxdma_monitor_dst_ring_size = 512,
73 			.num_pool_tx_desc = 16384,
74 			.rx_desc_count = 6144,
75 		},
76 	},
77 };
78 
ath12k_core_rfkill_config(struct ath12k_base * ab)79 static int ath12k_core_rfkill_config(struct ath12k_base *ab)
80 {
81 	struct ath12k *ar;
82 	int ret = 0, i;
83 
84 	if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
85 		return 0;
86 
87 	if (ath12k_acpi_get_disable_rfkill(ab))
88 		return 0;
89 
90 	for (i = 0; i < ab->num_radios; i++) {
91 		ar = ab->pdevs[i].ar;
92 
93 		ret = ath12k_mac_rfkill_config(ar);
94 		if (ret && ret != -EOPNOTSUPP) {
95 			ath12k_warn(ab, "failed to configure rfkill: %d", ret);
96 			return ret;
97 		}
98 	}
99 
100 	return ret;
101 }
102 
103 /* Check if we need to continue with suspend/resume operation.
104  * Return:
105  *	a negative value: error happens and don't continue.
106  *	0:  no error but don't continue.
107  *	positive value: no error and do continue.
108  */
ath12k_core_continue_suspend_resume(struct ath12k_base * ab)109 static int ath12k_core_continue_suspend_resume(struct ath12k_base *ab)
110 {
111 	struct ath12k *ar;
112 
113 	if (!ab->hw_params->supports_suspend)
114 		return -EOPNOTSUPP;
115 
116 	/* so far single_pdev_only chips have supports_suspend as true
117 	 * so pass 0 as a dummy pdev_id here.
118 	 */
119 	ar = ab->pdevs[0].ar;
120 	if (!ar || !ar->ah || ar->ah->state != ATH12K_HW_STATE_OFF)
121 		return 0;
122 
123 	return 1;
124 }
125 
ath12k_core_suspend(struct ath12k_base * ab)126 int ath12k_core_suspend(struct ath12k_base *ab)
127 {
128 	struct ath12k *ar;
129 	int ret, i;
130 
131 	ret = ath12k_core_continue_suspend_resume(ab);
132 	if (ret <= 0)
133 		return ret;
134 
135 	for (i = 0; i < ab->num_radios; i++) {
136 		ar = ab->pdevs[i].ar;
137 		if (!ar)
138 			continue;
139 
140 		wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
141 
142 		ret = ath12k_mac_wait_tx_complete(ar);
143 		if (ret) {
144 			wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
145 			ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
146 			return ret;
147 		}
148 
149 		wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
150 	}
151 
152 	/* PM framework skips suspend_late/resume_early callbacks
153 	 * if other devices report errors in their suspend callbacks.
154 	 * However ath12k_core_resume() would still be called because
155 	 * here we return success thus kernel put us on dpm_suspended_list.
156 	 * Since we won't go through a power down/up cycle, there is
157 	 * no chance to call complete(&ab->restart_completed) in
158 	 * ath12k_core_restart(), making ath12k_core_resume() timeout.
159 	 * So call it here to avoid this issue. This also works in case
160 	 * no error happens thus suspend_late/resume_early get called,
161 	 * because it will be reinitialized in ath12k_core_resume_early().
162 	 */
163 	complete(&ab->restart_completed);
164 
165 	return 0;
166 }
167 EXPORT_SYMBOL(ath12k_core_suspend);
168 
ath12k_core_suspend_late(struct ath12k_base * ab)169 int ath12k_core_suspend_late(struct ath12k_base *ab)
170 {
171 	int ret;
172 
173 	ret = ath12k_core_continue_suspend_resume(ab);
174 	if (ret <= 0)
175 		return ret;
176 
177 	ath12k_acpi_stop(ab);
178 
179 	ath12k_hif_irq_disable(ab);
180 	ath12k_hif_ce_irq_disable(ab);
181 
182 	ath12k_hif_power_down(ab, true);
183 
184 	return 0;
185 }
186 EXPORT_SYMBOL(ath12k_core_suspend_late);
187 
ath12k_core_resume_early(struct ath12k_base * ab)188 int ath12k_core_resume_early(struct ath12k_base *ab)
189 {
190 	int ret;
191 
192 	ret = ath12k_core_continue_suspend_resume(ab);
193 	if (ret <= 0)
194 		return ret;
195 
196 	reinit_completion(&ab->restart_completed);
197 	ret = ath12k_hif_power_up(ab);
198 	if (ret)
199 		ath12k_warn(ab, "failed to power up hif during resume: %d\n", ret);
200 
201 	return ret;
202 }
203 EXPORT_SYMBOL(ath12k_core_resume_early);
204 
ath12k_core_resume(struct ath12k_base * ab)205 int ath12k_core_resume(struct ath12k_base *ab)
206 {
207 	long time_left;
208 	int ret;
209 
210 	ret = ath12k_core_continue_suspend_resume(ab);
211 	if (ret <= 0)
212 		return ret;
213 
214 	time_left = wait_for_completion_timeout(&ab->restart_completed,
215 						ATH12K_RESET_TIMEOUT_HZ);
216 	if (time_left == 0) {
217 		ath12k_warn(ab, "timeout while waiting for restart complete");
218 		return -ETIMEDOUT;
219 	}
220 
221 	return 0;
222 }
223 EXPORT_SYMBOL(ath12k_core_resume);
224 
__ath12k_core_create_board_name(struct ath12k_base * ab,char * name,size_t name_len,bool with_variant,bool bus_type_mode,bool with_default)225 static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
226 					   size_t name_len, bool with_variant,
227 					   bool bus_type_mode, bool with_default)
228 {
229 	/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
230 	char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = {};
231 
232 	if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
233 		scnprintf(variant, sizeof(variant), ",variant=%s",
234 			  ab->qmi.target.bdf_ext);
235 
236 	switch (ab->id.bdf_search) {
237 	case ATH12K_BDF_SEARCH_BUS_AND_BOARD:
238 		if (bus_type_mode)
239 			scnprintf(name, name_len,
240 				  "bus=%s",
241 				  ath12k_bus_str(ab->hif.bus));
242 		else
243 			scnprintf(name, name_len,
244 				  "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
245 				  ath12k_bus_str(ab->hif.bus),
246 				  ab->id.vendor, ab->id.device,
247 				  ab->id.subsystem_vendor,
248 				  ab->id.subsystem_device,
249 				  ab->qmi.target.chip_id,
250 				  ab->qmi.target.board_id,
251 				  variant);
252 		break;
253 	default:
254 		scnprintf(name, name_len,
255 			  "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
256 			  ath12k_bus_str(ab->hif.bus),
257 			  ab->qmi.target.chip_id,
258 			  with_default ?
259 			  ATH12K_BOARD_ID_DEFAULT : ab->qmi.target.board_id,
260 			  variant);
261 		break;
262 	}
263 
264 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name);
265 
266 	return 0;
267 }
268 
ath12k_core_create_board_name(struct ath12k_base * ab,char * name,size_t name_len)269 static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
270 					 size_t name_len)
271 {
272 	return __ath12k_core_create_board_name(ab, name, name_len, true, false, false);
273 }
274 
ath12k_core_create_fallback_board_name(struct ath12k_base * ab,char * name,size_t name_len)275 static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
276 						  size_t name_len)
277 {
278 	return __ath12k_core_create_board_name(ab, name, name_len, false, false, true);
279 }
280 
ath12k_core_create_bus_type_board_name(struct ath12k_base * ab,char * name,size_t name_len)281 static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
282 						  size_t name_len)
283 {
284 	return __ath12k_core_create_board_name(ab, name, name_len, false, true, true);
285 }
286 
ath12k_core_firmware_request(struct ath12k_base * ab,const char * file)287 const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
288 						    const char *file)
289 {
290 	const struct firmware *fw;
291 	char path[100];
292 	int ret;
293 
294 	if (!file)
295 		return ERR_PTR(-ENOENT);
296 
297 	ath12k_core_create_firmware_path(ab, file, path, sizeof(path));
298 
299 	ret = firmware_request_nowarn(&fw, path, ab->dev);
300 	if (ret)
301 		return ERR_PTR(ret);
302 
303 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot firmware request %s size %zu\n",
304 		   path, fw->size);
305 
306 	return fw;
307 }
308 
ath12k_core_free_bdf(struct ath12k_base * ab,struct ath12k_board_data * bd)309 void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
310 {
311 	if (!IS_ERR(bd->fw))
312 		release_firmware(bd->fw);
313 
314 	memset(bd, 0, sizeof(*bd));
315 }
316 
ath12k_core_parse_bd_ie_board(struct ath12k_base * ab,struct ath12k_board_data * bd,const void * buf,size_t buf_len,const char * boardname,int ie_id,int name_id,int data_id)317 static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
318 					 struct ath12k_board_data *bd,
319 #if defined(__linux__)
320 					 const void *buf, size_t buf_len,
321 #elif defined(__FreeBSD__)
322 					 const u8 *buf, size_t buf_len,
323 #endif
324 					 const char *boardname,
325 					 int ie_id,
326 					 int name_id,
327 					 int data_id)
328 {
329 	const struct ath12k_fw_ie *hdr;
330 	bool name_match_found;
331 	int ret, board_ie_id;
332 	size_t board_ie_len;
333 	const void *board_ie_data;
334 
335 	name_match_found = false;
336 
337 	/* go through ATH12K_BD_IE_BOARD_/ATH12K_BD_IE_REGDB_ elements */
338 	while (buf_len > sizeof(struct ath12k_fw_ie)) {
339 #if defined(__linux__)
340 		hdr = buf;
341 #elif defined(__FreeBSD__)
342 		hdr = (const struct ath12k_fw_ie *)buf;
343 #endif
344 		board_ie_id = le32_to_cpu(hdr->id);
345 		board_ie_len = le32_to_cpu(hdr->len);
346 		board_ie_data = hdr->data;
347 
348 		buf_len -= sizeof(*hdr);
349 		buf += sizeof(*hdr);
350 
351 		if (buf_len < ALIGN(board_ie_len, 4)) {
352 			ath12k_err(ab, "invalid %s length: %zu < %zu\n",
353 				   ath12k_bd_ie_type_str(ie_id),
354 				   buf_len, ALIGN(board_ie_len, 4));
355 			ret = -EINVAL;
356 			goto out;
357 		}
358 
359 		if (board_ie_id == name_id) {
360 			ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "",
361 					board_ie_data, board_ie_len);
362 
363 			if (board_ie_len != strlen(boardname))
364 				goto next;
365 
366 			ret = memcmp(board_ie_data, boardname, strlen(boardname));
367 			if (ret)
368 				goto next;
369 
370 			name_match_found = true;
371 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
372 				   "boot found match %s for name '%s'",
373 				   ath12k_bd_ie_type_str(ie_id),
374 				   boardname);
375 		} else if (board_ie_id == data_id) {
376 			if (!name_match_found)
377 				/* no match found */
378 				goto next;
379 
380 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
381 				   "boot found %s for '%s'",
382 				   ath12k_bd_ie_type_str(ie_id),
383 				   boardname);
384 
385 			bd->data = board_ie_data;
386 			bd->len = board_ie_len;
387 
388 			ret = 0;
389 			goto out;
390 		} else {
391 			ath12k_warn(ab, "unknown %s id found: %d\n",
392 				    ath12k_bd_ie_type_str(ie_id),
393 				    board_ie_id);
394 		}
395 next:
396 		/* jump over the padding */
397 		board_ie_len = ALIGN(board_ie_len, 4);
398 
399 		buf_len -= board_ie_len;
400 		buf += board_ie_len;
401 	}
402 
403 	/* no match found */
404 	ret = -ENOENT;
405 
406 out:
407 	return ret;
408 }
409 
ath12k_core_fetch_board_data_api_n(struct ath12k_base * ab,struct ath12k_board_data * bd,const char * boardname,int ie_id_match,int name_id,int data_id)410 static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
411 					      struct ath12k_board_data *bd,
412 					      const char *boardname,
413 					      int ie_id_match,
414 					      int name_id,
415 					      int data_id)
416 {
417 	size_t len, magic_len;
418 	const u8 *data;
419 	char *filename, filepath[100];
420 	size_t ie_len;
421 #if defined(__linux__)
422 	struct ath12k_fw_ie *hdr;
423 #elif defined(__FreeBSD__)
424 	const struct ath12k_fw_ie *hdr;
425 #endif
426 	int ret, ie_id;
427 
428 	filename = ATH12K_BOARD_API2_FILE;
429 
430 	if (!bd->fw)
431 		bd->fw = ath12k_core_firmware_request(ab, filename);
432 
433 	if (IS_ERR(bd->fw))
434 		return PTR_ERR(bd->fw);
435 
436 	data = bd->fw->data;
437 	len = bd->fw->size;
438 
439 	ath12k_core_create_firmware_path(ab, filename,
440 					 filepath, sizeof(filepath));
441 
442 	/* magic has extra null byte padded */
443 	magic_len = strlen(ATH12K_BOARD_MAGIC) + 1;
444 	if (len < magic_len) {
445 		ath12k_err(ab, "failed to find magic value in %s, file too short: %zu\n",
446 			   filepath, len);
447 		ret = -EINVAL;
448 		goto err;
449 	}
450 
451 	if (memcmp(data, ATH12K_BOARD_MAGIC, magic_len)) {
452 		ath12k_err(ab, "found invalid board magic\n");
453 		ret = -EINVAL;
454 		goto err;
455 	}
456 
457 	/* magic is padded to 4 bytes */
458 	magic_len = ALIGN(magic_len, 4);
459 	if (len < magic_len) {
460 		ath12k_err(ab, "failed: %s too small to contain board data, len: %zu\n",
461 			   filepath, len);
462 		ret = -EINVAL;
463 		goto err;
464 	}
465 
466 	data += magic_len;
467 	len -= magic_len;
468 
469 	while (len > sizeof(struct ath12k_fw_ie)) {
470 #if defined(__linux__)
471 		hdr = (struct ath12k_fw_ie *)data;
472 #elif defined(__FreeBSD__)
473 		hdr = (const struct ath12k_fw_ie *)data;
474 #endif
475 		ie_id = le32_to_cpu(hdr->id);
476 		ie_len = le32_to_cpu(hdr->len);
477 
478 		len -= sizeof(*hdr);
479 		data = hdr->data;
480 
481 		if (len < ALIGN(ie_len, 4)) {
482 			ath12k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
483 				   ie_id, ie_len, len);
484 			ret = -EINVAL;
485 			goto err;
486 		}
487 
488 		if (ie_id == ie_id_match) {
489 			ret = ath12k_core_parse_bd_ie_board(ab, bd, data,
490 							    ie_len,
491 							    boardname,
492 							    ie_id_match,
493 							    name_id,
494 							    data_id);
495 			if (ret == -ENOENT)
496 				/* no match found, continue */
497 				goto next;
498 			else if (ret)
499 				/* there was an error, bail out */
500 				goto err;
501 			/* either found or error, so stop searching */
502 			goto out;
503 		}
504 next:
505 		/* jump over the padding */
506 		ie_len = ALIGN(ie_len, 4);
507 
508 		len -= ie_len;
509 		data += ie_len;
510 	}
511 
512 out:
513 	if (!bd->data || !bd->len) {
514 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
515 			   "failed to fetch %s for %s from %s\n",
516 			   ath12k_bd_ie_type_str(ie_id_match),
517 			   boardname, filepath);
518 		ret = -ENODATA;
519 		goto err;
520 	}
521 
522 	return 0;
523 
524 err:
525 	ath12k_core_free_bdf(ab, bd);
526 	return ret;
527 }
528 
ath12k_core_fetch_board_data_api_1(struct ath12k_base * ab,struct ath12k_board_data * bd,char * filename)529 int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
530 				       struct ath12k_board_data *bd,
531 				       char *filename)
532 {
533 	bd->fw = ath12k_core_firmware_request(ab, filename);
534 	if (IS_ERR(bd->fw))
535 		return PTR_ERR(bd->fw);
536 
537 	bd->data = bd->fw->data;
538 	bd->len = bd->fw->size;
539 
540 	return 0;
541 }
542 
543 #define BOARD_NAME_SIZE 200
ath12k_core_fetch_bdf(struct ath12k_base * ab,struct ath12k_board_data * bd)544 int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
545 {
546 	char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE];
547 	char *filename, filepath[100];
548 	int bd_api;
549 	int ret;
550 
551 	filename = ATH12K_BOARD_API2_FILE;
552 
553 	ret = ath12k_core_create_board_name(ab, boardname, sizeof(boardname));
554 	if (ret) {
555 		ath12k_err(ab, "failed to create board name: %d", ret);
556 		return ret;
557 	}
558 
559 	bd_api = 2;
560 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
561 						 ATH12K_BD_IE_BOARD,
562 						 ATH12K_BD_IE_BOARD_NAME,
563 						 ATH12K_BD_IE_BOARD_DATA);
564 	if (!ret)
565 		goto success;
566 
567 	ret = ath12k_core_create_fallback_board_name(ab, fallback_boardname,
568 						     sizeof(fallback_boardname));
569 	if (ret) {
570 		ath12k_err(ab, "failed to create fallback board name: %d", ret);
571 		return ret;
572 	}
573 
574 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
575 						 ATH12K_BD_IE_BOARD,
576 						 ATH12K_BD_IE_BOARD_NAME,
577 						 ATH12K_BD_IE_BOARD_DATA);
578 	if (!ret)
579 		goto success;
580 
581 	bd_api = 1;
582 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE);
583 	if (ret) {
584 		ath12k_core_create_firmware_path(ab, filename,
585 						 filepath, sizeof(filepath));
586 		ath12k_err(ab, "failed to fetch board data for %s from %s\n",
587 			   boardname, filepath);
588 		if (memcmp(boardname, fallback_boardname, strlen(boardname)))
589 			ath12k_err(ab, "failed to fetch board data for %s from %s\n",
590 				   fallback_boardname, filepath);
591 
592 		ath12k_err(ab, "failed to fetch board.bin from %s\n",
593 			   ab->hw_params->fw.dir);
594 		return ret;
595 	}
596 
597 success:
598 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", bd_api);
599 	return 0;
600 }
601 
ath12k_core_fetch_regdb(struct ath12k_base * ab,struct ath12k_board_data * bd)602 int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd)
603 {
604 	char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE];
605 	int ret;
606 
607 	ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
608 	if (ret) {
609 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
610 			   "failed to create board name for regdb: %d", ret);
611 		goto exit;
612 	}
613 
614 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
615 						 ATH12K_BD_IE_REGDB,
616 						 ATH12K_BD_IE_REGDB_NAME,
617 						 ATH12K_BD_IE_REGDB_DATA);
618 	if (!ret)
619 		goto exit;
620 
621 	ret = ath12k_core_create_bus_type_board_name(ab, default_boardname,
622 						     BOARD_NAME_SIZE);
623 	if (ret) {
624 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
625 			   "failed to create default board name for regdb: %d", ret);
626 		goto exit;
627 	}
628 
629 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, default_boardname,
630 						 ATH12K_BD_IE_REGDB,
631 						 ATH12K_BD_IE_REGDB_NAME,
632 						 ATH12K_BD_IE_REGDB_DATA);
633 	if (!ret)
634 		goto exit;
635 
636 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_REGDB_FILE_NAME);
637 	if (ret)
638 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to fetch %s from %s\n",
639 			   ATH12K_REGDB_FILE_NAME, ab->hw_params->fw.dir);
640 
641 exit:
642 	if (!ret)
643 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "fetched regdb\n");
644 
645 	return ret;
646 }
647 
ath12k_core_get_max_station_per_radio(struct ath12k_base * ab)648 u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab)
649 {
650 	if (ab->num_radios == 2)
651 		return TARGET_NUM_STATIONS(ab, DBS);
652 	if (ab->num_radios == 3)
653 		return TARGET_NUM_STATIONS(ab, DBS_SBS);
654 	return TARGET_NUM_STATIONS(ab, SINGLE);
655 }
656 
ath12k_core_get_max_peers_per_radio(struct ath12k_base * ab)657 u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab)
658 {
659 	return ath12k_core_get_max_station_per_radio(ab) + TARGET_NUM_VDEVS(ab);
660 }
661 EXPORT_SYMBOL(ath12k_core_get_max_peers_per_radio);
662 
ath12k_core_get_reserved_mem(struct ath12k_base * ab,int index)663 struct reserved_mem *ath12k_core_get_reserved_mem(struct ath12k_base *ab,
664 						  int index)
665 {
666 	struct device *dev = ab->dev;
667 	struct reserved_mem *rmem;
668 	struct device_node *node;
669 
670 	node = of_parse_phandle(dev->of_node, "memory-region", index);
671 	if (!node) {
672 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
673 			   "failed to parse memory-region for index %d\n", index);
674 		return NULL;
675 	}
676 
677 	rmem = of_reserved_mem_lookup(node);
678 	of_node_put(node);
679 	if (!rmem) {
680 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
681 			   "unable to get memory-region for index %d\n", index);
682 		return NULL;
683 	}
684 
685 	return rmem;
686 }
687 
688 static inline
ath12k_core_to_group_ref_get(struct ath12k_base * ab)689 void ath12k_core_to_group_ref_get(struct ath12k_base *ab)
690 {
691 	struct ath12k_hw_group *ag = ab->ag;
692 
693 	lockdep_assert_held(&ag->mutex);
694 
695 	if (ab->hw_group_ref) {
696 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already attached to group %d\n",
697 			   ag->id);
698 		return;
699 	}
700 
701 	ab->hw_group_ref = true;
702 	ag->num_started++;
703 
704 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "core attached to group %d, num_started %d\n",
705 		   ag->id, ag->num_started);
706 }
707 
708 static inline
ath12k_core_to_group_ref_put(struct ath12k_base * ab)709 void ath12k_core_to_group_ref_put(struct ath12k_base *ab)
710 {
711 	struct ath12k_hw_group *ag = ab->ag;
712 
713 	lockdep_assert_held(&ag->mutex);
714 
715 	if (!ab->hw_group_ref) {
716 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already de-attached from group %d\n",
717 			   ag->id);
718 		return;
719 	}
720 
721 	ab->hw_group_ref = false;
722 	ag->num_started--;
723 
724 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "core de-attached from group %d, num_started %d\n",
725 		   ag->id, ag->num_started);
726 }
727 
ath12k_core_stop(struct ath12k_base * ab)728 static void ath12k_core_stop(struct ath12k_base *ab)
729 {
730 	ath12k_link_sta_rhash_tbl_destroy(ab);
731 
732 	ath12k_core_to_group_ref_put(ab);
733 
734 	if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
735 		ath12k_qmi_firmware_stop(ab);
736 
737 	ath12k_acpi_stop(ab);
738 
739 	ath12k_dp_rx_pdev_reo_cleanup(ab);
740 	ath12k_hif_stop(ab);
741 	ath12k_wmi_detach(ab);
742 	ath12k_dp_cmn_device_deinit(ath12k_ab_to_dp(ab));
743 
744 	/* De-Init of components as needed */
745 }
746 
ath12k_core_check_cc_code_bdfext(const struct dmi_header * hdr,void * data)747 static void ath12k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data)
748 {
749 	struct ath12k_base *ab = data;
750 	const char *magic = ATH12K_SMBIOS_BDF_EXT_MAGIC;
751 	struct ath12k_smbios_bdf *smbios = (struct ath12k_smbios_bdf *)hdr;
752 	ssize_t copied;
753 	size_t len;
754 	int i;
755 
756 	if (ab->qmi.target.bdf_ext[0] != '\0')
757 		return;
758 
759 	if (hdr->type != ATH12K_SMBIOS_BDF_EXT_TYPE)
760 		return;
761 
762 	if (hdr->length != ATH12K_SMBIOS_BDF_EXT_LENGTH) {
763 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
764 			   "wrong smbios bdf ext type length (%d).\n",
765 			   hdr->length);
766 		return;
767 	}
768 
769 	spin_lock_bh(&ab->base_lock);
770 
771 	switch (smbios->country_code_flag) {
772 	case ATH12K_SMBIOS_CC_ISO:
773 		ab->new_alpha2[0] = u16_get_bits(smbios->cc_code >> 8, 0xff);
774 		ab->new_alpha2[1] = u16_get_bits(smbios->cc_code, 0xff);
775 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios cc_code %c%c\n",
776 			   ab->new_alpha2[0], ab->new_alpha2[1]);
777 		break;
778 	case ATH12K_SMBIOS_CC_WW:
779 		ab->new_alpha2[0] = '0';
780 		ab->new_alpha2[1] = '0';
781 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios worldwide regdomain\n");
782 		break;
783 	default:
784 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot ignore smbios country code setting %d\n",
785 			   smbios->country_code_flag);
786 		break;
787 	}
788 
789 	spin_unlock_bh(&ab->base_lock);
790 
791 	if (!smbios->bdf_enabled) {
792 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant name not found.\n");
793 		return;
794 	}
795 
796 	/* Only one string exists (per spec) */
797 	if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) {
798 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
799 			   "bdf variant magic does not match.\n");
800 		return;
801 	}
802 
803 	len = min_t(size_t,
804 		    strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext));
805 	for (i = 0; i < len; i++) {
806 		if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) {
807 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
808 				   "bdf variant name contains non ascii chars.\n");
809 			return;
810 		}
811 	}
812 
813 	/* Copy extension name without magic prefix */
814 	copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic),
815 			 sizeof(ab->qmi.target.bdf_ext));
816 	if (copied < 0) {
817 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
818 			   "bdf variant string is longer than the buffer can accommodate\n");
819 		return;
820 	}
821 
822 	ath12k_dbg(ab, ATH12K_DBG_BOOT,
823 		   "found and validated bdf variant smbios_type 0x%x bdf %s\n",
824 		   ATH12K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext);
825 }
826 
ath12k_core_check_smbios(struct ath12k_base * ab)827 int ath12k_core_check_smbios(struct ath12k_base *ab)
828 {
829 	ab->qmi.target.bdf_ext[0] = '\0';
830 	dmi_walk(ath12k_core_check_cc_code_bdfext, ab);
831 
832 	if (ab->qmi.target.bdf_ext[0] == '\0')
833 		return -ENODATA;
834 
835 	return 0;
836 }
837 
ath12k_core_soc_create(struct ath12k_base * ab)838 static int ath12k_core_soc_create(struct ath12k_base *ab)
839 {
840 	int ret;
841 
842 	if (ath12k_ftm_mode) {
843 		ab->fw_mode = ATH12K_FIRMWARE_MODE_FTM;
844 		ath12k_info(ab, "Booting in ftm mode\n");
845 	}
846 
847 	ret = ath12k_qmi_init_service(ab);
848 	if (ret) {
849 		ath12k_err(ab, "failed to initialize qmi :%d\n", ret);
850 		return ret;
851 	}
852 
853 	ath12k_debugfs_soc_create(ab);
854 
855 	ret = ath12k_hif_power_up(ab);
856 	if (ret) {
857 		ath12k_err(ab, "failed to power up :%d\n", ret);
858 		goto err_qmi_deinit;
859 	}
860 
861 	ath12k_debugfs_pdev_create(ab);
862 
863 	return 0;
864 
865 err_qmi_deinit:
866 	ath12k_debugfs_soc_destroy(ab);
867 	ath12k_qmi_deinit_service(ab);
868 	return ret;
869 }
870 
ath12k_core_soc_destroy(struct ath12k_base * ab)871 static void ath12k_core_soc_destroy(struct ath12k_base *ab)
872 {
873 	ath12k_hif_power_down(ab, false);
874 	ath12k_reg_free(ab);
875 	ath12k_debugfs_soc_destroy(ab);
876 	ath12k_qmi_deinit_service(ab);
877 }
878 
ath12k_core_pdev_create(struct ath12k_base * ab)879 static int ath12k_core_pdev_create(struct ath12k_base *ab)
880 {
881 	int ret;
882 
883 	ret = ath12k_dp_pdev_alloc(ab);
884 	if (ret) {
885 		ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
886 		return ret;
887 	}
888 
889 	return 0;
890 }
891 
ath12k_core_pdev_destroy(struct ath12k_base * ab)892 static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
893 {
894 	ath12k_dp_pdev_free(ab);
895 }
896 
ath12k_core_start(struct ath12k_base * ab)897 static int ath12k_core_start(struct ath12k_base *ab)
898 {
899 	int ret;
900 
901 	lockdep_assert_held(&ab->core_lock);
902 
903 	ret = ath12k_wmi_attach(ab);
904 	if (ret) {
905 		ath12k_err(ab, "failed to attach wmi: %d\n", ret);
906 		return ret;
907 	}
908 
909 	ret = ath12k_htc_init(ab);
910 	if (ret) {
911 		ath12k_err(ab, "failed to init htc: %d\n", ret);
912 		goto err_wmi_detach;
913 	}
914 
915 	ret = ath12k_hif_start(ab);
916 	if (ret) {
917 		ath12k_err(ab, "failed to start HIF: %d\n", ret);
918 		goto err_wmi_detach;
919 	}
920 
921 	ret = ath12k_htc_wait_target(&ab->htc);
922 	if (ret) {
923 		ath12k_err(ab, "failed to connect to HTC: %d\n", ret);
924 		goto err_hif_stop;
925 	}
926 
927 	ret = ath12k_dp_htt_connect(ath12k_ab_to_dp(ab));
928 	if (ret) {
929 		ath12k_err(ab, "failed to connect to HTT: %d\n", ret);
930 		goto err_hif_stop;
931 	}
932 
933 	ret = ath12k_wmi_connect(ab);
934 	if (ret) {
935 		ath12k_err(ab, "failed to connect wmi: %d\n", ret);
936 		goto err_hif_stop;
937 	}
938 
939 	ret = ath12k_htc_start(&ab->htc);
940 	if (ret) {
941 		ath12k_err(ab, "failed to start HTC: %d\n", ret);
942 		goto err_hif_stop;
943 	}
944 
945 	ret = ath12k_wmi_wait_for_service_ready(ab);
946 	if (ret) {
947 		ath12k_err(ab, "failed to receive wmi service ready event: %d\n",
948 			   ret);
949 		goto err_hif_stop;
950 	}
951 
952 	ath12k_hal_cc_config(ab);
953 
954 	ret = ath12k_dp_rx_pdev_reo_setup(ab);
955 	if (ret) {
956 		ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
957 		goto err_hif_stop;
958 	}
959 
960 	ret = ath12k_wmi_cmd_init(ab);
961 	if (ret) {
962 		ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret);
963 		goto err_reo_cleanup;
964 	}
965 
966 	ret = ath12k_wmi_wait_for_unified_ready(ab);
967 	if (ret) {
968 		ath12k_err(ab, "failed to receive wmi unified ready event: %d\n",
969 			   ret);
970 		goto err_reo_cleanup;
971 	}
972 
973 	/* put hardware to DBS mode */
974 	if (ab->hw_params->single_pdev_only) {
975 		ret = ath12k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
976 		if (ret) {
977 			ath12k_err(ab, "failed to send dbs mode: %d\n", ret);
978 			goto err_reo_cleanup;
979 		}
980 	}
981 
982 	ret = ath12k_dp_tx_htt_h2t_ver_req_msg(ab);
983 	if (ret) {
984 		ath12k_err(ab, "failed to send htt version request message: %d\n",
985 			   ret);
986 		goto err_reo_cleanup;
987 	}
988 
989 	ath12k_acpi_set_dsm_func(ab);
990 
991 	/* Indicate the core start in the appropriate group */
992 	ath12k_core_to_group_ref_get(ab);
993 
994 	ret = ath12k_link_sta_rhash_tbl_init(ab);
995 	if (ret) {
996 		ath12k_warn(ab, "failed to init peer addr rhash table %d\n", ret);
997 		goto err_reo_cleanup;
998 	}
999 
1000 	return 0;
1001 
1002 err_reo_cleanup:
1003 	ath12k_dp_rx_pdev_reo_cleanup(ab);
1004 err_hif_stop:
1005 	ath12k_hif_stop(ab);
1006 err_wmi_detach:
1007 	ath12k_wmi_detach(ab);
1008 	return ret;
1009 }
1010 
ath12k_core_device_cleanup(struct ath12k_base * ab)1011 static void ath12k_core_device_cleanup(struct ath12k_base *ab)
1012 {
1013 	mutex_lock(&ab->core_lock);
1014 
1015 	ath12k_hif_irq_disable(ab);
1016 	ath12k_core_pdev_destroy(ab);
1017 
1018 	mutex_unlock(&ab->core_lock);
1019 }
1020 
ath12k_core_hw_group_stop(struct ath12k_hw_group * ag)1021 static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
1022 {
1023 	struct ath12k_base *ab;
1024 	int i;
1025 
1026 	lockdep_assert_held(&ag->mutex);
1027 
1028 	clear_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
1029 
1030 	ath12k_mac_unregister(ag);
1031 
1032 	for (i = ag->num_devices - 1; i >= 0; i--) {
1033 		ab = ag->ab[i];
1034 		if (!ab)
1035 			continue;
1036 
1037 		clear_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
1038 
1039 		ath12k_core_device_cleanup(ab);
1040 	}
1041 
1042 	ath12k_mac_destroy(ag);
1043 }
1044 
ath12k_get_num_partner_link(struct ath12k * ar)1045 u8 ath12k_get_num_partner_link(struct ath12k *ar)
1046 {
1047 	struct ath12k_base *partner_ab, *ab = ar->ab;
1048 	struct ath12k_hw_group *ag = ab->ag;
1049 	struct ath12k_pdev *pdev;
1050 	u8 num_link = 0;
1051 	int i, j;
1052 
1053 	lockdep_assert_held(&ag->mutex);
1054 
1055 	for (i = 0; i < ag->num_devices; i++) {
1056 		partner_ab = ag->ab[i];
1057 
1058 		for (j = 0; j < partner_ab->num_radios; j++) {
1059 			pdev = &partner_ab->pdevs[j];
1060 
1061 			/* Avoid the self link */
1062 			if (ar == pdev->ar)
1063 				continue;
1064 
1065 			num_link++;
1066 		}
1067 	}
1068 
1069 	return num_link;
1070 }
1071 
__ath12k_mac_mlo_ready(struct ath12k * ar)1072 static int __ath12k_mac_mlo_ready(struct ath12k *ar)
1073 {
1074 	u8 num_link = ath12k_get_num_partner_link(ar);
1075 	int ret;
1076 
1077 	if (num_link == 0)
1078 		return 0;
1079 
1080 	ret = ath12k_wmi_mlo_ready(ar);
1081 	if (ret) {
1082 		ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n",
1083 			   ar->pdev_idx, ret);
1084 		return ret;
1085 	}
1086 
1087 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mlo ready done for pdev %d\n",
1088 		   ar->pdev_idx);
1089 
1090 	return 0;
1091 }
1092 
ath12k_mac_mlo_ready(struct ath12k_hw_group * ag)1093 int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag)
1094 {
1095 	struct ath12k_hw *ah;
1096 	struct ath12k *ar;
1097 	int ret;
1098 	int i, j;
1099 
1100 	for (i = 0; i < ag->num_hw; i++) {
1101 		ah = ag->ah[i];
1102 		if (!ah)
1103 			continue;
1104 
1105 		for_each_ar(ah, ar, j) {
1106 			ar = &ah->radio[j];
1107 			ret = __ath12k_mac_mlo_ready(ar);
1108 			if (ret)
1109 				return ret;
1110 		}
1111 	}
1112 
1113 	return 0;
1114 }
1115 
ath12k_core_mlo_setup(struct ath12k_hw_group * ag)1116 static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag)
1117 {
1118 	int ret, i;
1119 
1120 	if (!ag->mlo_capable)
1121 		return 0;
1122 
1123 	ret = ath12k_mac_mlo_setup(ag);
1124 	if (ret)
1125 		return ret;
1126 
1127 	for (i = 0; i < ag->num_devices; i++)
1128 		ath12k_dp_partner_cc_init(ag->ab[i]);
1129 
1130 	ret = ath12k_mac_mlo_ready(ag);
1131 	if (ret)
1132 		goto err_mlo_teardown;
1133 
1134 	return 0;
1135 
1136 err_mlo_teardown:
1137 	ath12k_mac_mlo_teardown(ag);
1138 
1139 	return ret;
1140 }
1141 
ath12k_core_hw_group_start(struct ath12k_hw_group * ag)1142 static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag)
1143 {
1144 	struct ath12k_base *ab;
1145 	int ret, i;
1146 
1147 	lockdep_assert_held(&ag->mutex);
1148 
1149 	if (test_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags))
1150 		goto core_pdev_create;
1151 
1152 	ret = ath12k_mac_allocate(ag);
1153 	if (WARN_ON(ret))
1154 		return ret;
1155 
1156 	ret = ath12k_core_mlo_setup(ag);
1157 	if (WARN_ON(ret))
1158 		goto err_mac_destroy;
1159 
1160 	ret = ath12k_mac_register(ag);
1161 	if (WARN_ON(ret))
1162 		goto err_mlo_teardown;
1163 
1164 	set_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
1165 
1166 core_pdev_create:
1167 	for (i = 0; i < ag->num_devices; i++) {
1168 		ab = ag->ab[i];
1169 		if (!ab)
1170 			continue;
1171 
1172 		mutex_lock(&ab->core_lock);
1173 
1174 		set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
1175 
1176 		ret = ath12k_core_pdev_create(ab);
1177 		if (ret) {
1178 			ath12k_err(ab, "failed to create pdev core %d\n", ret);
1179 			mutex_unlock(&ab->core_lock);
1180 			goto err;
1181 		}
1182 
1183 		ath12k_hif_irq_enable(ab);
1184 
1185 		ret = ath12k_core_rfkill_config(ab);
1186 		if (ret && ret != -EOPNOTSUPP) {
1187 			mutex_unlock(&ab->core_lock);
1188 			goto err;
1189 		}
1190 
1191 		mutex_unlock(&ab->core_lock);
1192 	}
1193 
1194 	return 0;
1195 
1196 err:
1197 	ath12k_core_hw_group_stop(ag);
1198 	return ret;
1199 
1200 err_mlo_teardown:
1201 	ath12k_mac_mlo_teardown(ag);
1202 
1203 err_mac_destroy:
1204 	ath12k_mac_destroy(ag);
1205 
1206 	return ret;
1207 }
1208 
ath12k_core_start_firmware(struct ath12k_base * ab,enum ath12k_firmware_mode mode)1209 static int ath12k_core_start_firmware(struct ath12k_base *ab,
1210 				      enum ath12k_firmware_mode mode)
1211 {
1212 	int ret;
1213 
1214 	ath12k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v3,
1215 				    &ab->qmi.ce_cfg.shadow_reg_v3_len);
1216 
1217 	ret = ath12k_qmi_firmware_start(ab, mode);
1218 	if (ret) {
1219 		ath12k_err(ab, "failed to send firmware start: %d\n", ret);
1220 		return ret;
1221 	}
1222 
1223 	return ret;
1224 }
1225 
1226 static inline
ath12k_core_hw_group_start_ready(struct ath12k_hw_group * ag)1227 bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag)
1228 {
1229 	lockdep_assert_held(&ag->mutex);
1230 
1231 	return (ag->num_started == ag->num_devices);
1232 }
1233 
ath12k_fw_stats_pdevs_free(struct list_head * head)1234 static void ath12k_fw_stats_pdevs_free(struct list_head *head)
1235 {
1236 	struct ath12k_fw_stats_pdev *i, *tmp;
1237 
1238 	list_for_each_entry_safe(i, tmp, head, list) {
1239 		list_del(&i->list);
1240 		kfree(i);
1241 	}
1242 }
1243 
ath12k_fw_stats_bcn_free(struct list_head * head)1244 void ath12k_fw_stats_bcn_free(struct list_head *head)
1245 {
1246 	struct ath12k_fw_stats_bcn *i, *tmp;
1247 
1248 	list_for_each_entry_safe(i, tmp, head, list) {
1249 		list_del(&i->list);
1250 		kfree(i);
1251 	}
1252 }
1253 
ath12k_fw_stats_vdevs_free(struct list_head * head)1254 static void ath12k_fw_stats_vdevs_free(struct list_head *head)
1255 {
1256 	struct ath12k_fw_stats_vdev *i, *tmp;
1257 
1258 	list_for_each_entry_safe(i, tmp, head, list) {
1259 		list_del(&i->list);
1260 		kfree(i);
1261 	}
1262 }
1263 
ath12k_fw_stats_init(struct ath12k * ar)1264 void ath12k_fw_stats_init(struct ath12k *ar)
1265 {
1266 	INIT_LIST_HEAD(&ar->fw_stats.vdevs);
1267 	INIT_LIST_HEAD(&ar->fw_stats.pdevs);
1268 	INIT_LIST_HEAD(&ar->fw_stats.bcn);
1269 	init_completion(&ar->fw_stats_complete);
1270 	init_completion(&ar->fw_stats_done);
1271 }
1272 
ath12k_fw_stats_free(struct ath12k_fw_stats * stats)1273 void ath12k_fw_stats_free(struct ath12k_fw_stats *stats)
1274 {
1275 	ath12k_fw_stats_pdevs_free(&stats->pdevs);
1276 	ath12k_fw_stats_vdevs_free(&stats->vdevs);
1277 	ath12k_fw_stats_bcn_free(&stats->bcn);
1278 }
1279 
ath12k_fw_stats_reset(struct ath12k * ar)1280 void ath12k_fw_stats_reset(struct ath12k *ar)
1281 {
1282 	spin_lock_bh(&ar->data_lock);
1283 	ath12k_fw_stats_free(&ar->fw_stats);
1284 	ar->fw_stats.num_vdev_recvd = 0;
1285 	spin_unlock_bh(&ar->data_lock);
1286 }
1287 
ath12k_core_trigger_partner(struct ath12k_base * ab)1288 static void ath12k_core_trigger_partner(struct ath12k_base *ab)
1289 {
1290 	struct ath12k_hw_group *ag = ab->ag;
1291 	struct ath12k_base *partner_ab;
1292 	bool found = false;
1293 	int i;
1294 
1295 	for (i = 0; i < ag->num_devices; i++) {
1296 		partner_ab = ag->ab[i];
1297 		if (!partner_ab)
1298 			continue;
1299 
1300 		if (found)
1301 			ath12k_qmi_trigger_host_cap(partner_ab);
1302 
1303 		found = (partner_ab == ab);
1304 	}
1305 }
1306 
ath12k_core_qmi_firmware_ready(struct ath12k_base * ab)1307 int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
1308 {
1309 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
1310 	int ret, i;
1311 
1312 	ret = ath12k_core_start_firmware(ab, ab->fw_mode);
1313 	if (ret) {
1314 		ath12k_err(ab, "failed to start firmware: %d\n", ret);
1315 		return ret;
1316 	}
1317 
1318 	ret = ath12k_ce_init_pipes(ab);
1319 	if (ret) {
1320 		ath12k_err(ab, "failed to initialize CE: %d\n", ret);
1321 		goto err_firmware_stop;
1322 	}
1323 
1324 	ret = ath12k_dp_cmn_device_init(ath12k_ab_to_dp(ab));
1325 	if (ret) {
1326 		ath12k_err(ab, "failed to init DP: %d\n", ret);
1327 		goto err_firmware_stop;
1328 	}
1329 
1330 	mutex_lock(&ag->mutex);
1331 	mutex_lock(&ab->core_lock);
1332 
1333 	ret = ath12k_core_start(ab);
1334 	if (ret) {
1335 		ath12k_err(ab, "failed to start core: %d\n", ret);
1336 		goto err_deinit;
1337 	}
1338 
1339 	mutex_unlock(&ab->core_lock);
1340 
1341 	if (ath12k_core_hw_group_start_ready(ag)) {
1342 		ret = ath12k_core_hw_group_start(ag);
1343 		if (ret) {
1344 			ath12k_warn(ab, "unable to start hw group\n");
1345 			goto err_core_stop;
1346 		}
1347 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id);
1348 	} else {
1349 		ath12k_core_trigger_partner(ab);
1350 	}
1351 
1352 	mutex_unlock(&ag->mutex);
1353 
1354 	return 0;
1355 
1356 err_core_stop:
1357 	for (i = ag->num_devices - 1; i >= 0; i--) {
1358 		ab = ag->ab[i];
1359 		if (!ab)
1360 			continue;
1361 
1362 		mutex_lock(&ab->core_lock);
1363 		ath12k_core_stop(ab);
1364 		mutex_unlock(&ab->core_lock);
1365 	}
1366 	mutex_unlock(&ag->mutex);
1367 	goto exit;
1368 
1369 err_deinit:
1370 	ath12k_dp_cmn_device_deinit(ath12k_ab_to_dp(ab));
1371 	mutex_unlock(&ab->core_lock);
1372 	mutex_unlock(&ag->mutex);
1373 
1374 err_firmware_stop:
1375 	ath12k_qmi_firmware_stop(ab);
1376 
1377 exit:
1378 	return ret;
1379 }
1380 
ath12k_core_reconfigure_on_crash(struct ath12k_base * ab)1381 static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
1382 {
1383 	int ret, total_vdev;
1384 
1385 	mutex_lock(&ab->core_lock);
1386 	ath12k_link_sta_rhash_tbl_destroy(ab);
1387 	ath12k_dp_pdev_free(ab);
1388 	ath12k_ce_cleanup_pipes(ab);
1389 	ath12k_wmi_detach(ab);
1390 	ath12k_dp_rx_pdev_reo_cleanup(ab);
1391 	mutex_unlock(&ab->core_lock);
1392 
1393 	ath12k_dp_cmn_device_deinit(ath12k_ab_to_dp(ab));
1394 	ath12k_hal_srng_deinit(ab);
1395 	total_vdev = ab->num_radios * TARGET_NUM_VDEVS(ab);
1396 	ab->free_vdev_map = (1LL << total_vdev) - 1;
1397 
1398 	ret = ath12k_hal_srng_init(ab);
1399 	if (ret)
1400 		return ret;
1401 
1402 	clear_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1403 
1404 	ret = ath12k_core_qmi_firmware_ready(ab);
1405 	if (ret)
1406 		goto err_hal_srng_deinit;
1407 
1408 	clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
1409 
1410 	return 0;
1411 
1412 err_hal_srng_deinit:
1413 	ath12k_hal_srng_deinit(ab);
1414 	return ret;
1415 }
1416 
ath12k_rfkill_work(struct work_struct * work)1417 static void ath12k_rfkill_work(struct work_struct *work)
1418 {
1419 	struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
1420 	struct ath12k_hw_group *ag = ab->ag;
1421 	struct ath12k *ar;
1422 	struct ath12k_hw *ah;
1423 	struct ieee80211_hw *hw;
1424 	bool rfkill_radio_on;
1425 	int i, j;
1426 
1427 	spin_lock_bh(&ab->base_lock);
1428 	rfkill_radio_on = ab->rfkill_radio_on;
1429 	spin_unlock_bh(&ab->base_lock);
1430 
1431 	for (i = 0; i < ag->num_hw; i++) {
1432 		ah = ath12k_ag_to_ah(ag, i);
1433 		if (!ah)
1434 			continue;
1435 
1436 		for (j = 0; j < ah->num_radio; j++) {
1437 			ar = &ah->radio[j];
1438 			if (!ar)
1439 				continue;
1440 
1441 			ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
1442 		}
1443 
1444 		hw = ah->hw;
1445 		wiphy_rfkill_set_hw_state(hw->wiphy, !rfkill_radio_on);
1446 	}
1447 }
1448 
ath12k_core_halt(struct ath12k * ar)1449 void ath12k_core_halt(struct ath12k *ar)
1450 {
1451 	struct list_head *pos, *n;
1452 	struct ath12k_base *ab = ar->ab;
1453 
1454 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
1455 
1456 	ar->num_created_vdevs = 0;
1457 	ar->allocated_vdev_map = 0;
1458 
1459 	ath12k_mac_scan_finish(ar);
1460 	ath12k_mac_peer_cleanup_all(ar);
1461 	cancel_delayed_work_sync(&ar->scan.timeout);
1462 	cancel_work_sync(&ar->regd_update_work);
1463 	cancel_work_sync(&ar->regd_channel_update_work);
1464 	cancel_work_sync(&ab->rfkill_work);
1465 	cancel_work_sync(&ab->update_11d_work);
1466 
1467 	rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
1468 	synchronize_rcu();
1469 
1470 	spin_lock_bh(&ar->data_lock);
1471 	list_for_each_safe(pos, n, &ar->arvifs)
1472 		list_del_init(pos);
1473 	spin_unlock_bh(&ar->data_lock);
1474 
1475 	idr_init(&ar->txmgmt_idr);
1476 }
1477 
ath12k_core_pre_reconfigure_recovery(struct ath12k_base * ab)1478 static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
1479 {
1480 	struct ath12k_hw_group *ag = ab->ag;
1481 	struct ath12k *ar;
1482 	struct ath12k_hw *ah;
1483 	int i, j;
1484 
1485 	spin_lock_bh(&ab->base_lock);
1486 	ab->stats.fw_crash_counter++;
1487 	spin_unlock_bh(&ab->base_lock);
1488 
1489 	if (ab->is_reset)
1490 		set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1491 
1492 	for (i = 0; i < ag->num_hw; i++) {
1493 		ah = ath12k_ag_to_ah(ag, i);
1494 		if (!ah || ah->state == ATH12K_HW_STATE_OFF ||
1495 		    ah->state == ATH12K_HW_STATE_TM)
1496 			continue;
1497 
1498 		wiphy_lock(ah->hw->wiphy);
1499 
1500 		/* If queue 0 is stopped, it is safe to assume that all
1501 		 * other queues are stopped by driver via
1502 		 * ieee80211_stop_queues() below. This means, there is
1503 		 * no need to stop it again and hence continue
1504 		 */
1505 		if (ieee80211_queue_stopped(ah->hw, 0)) {
1506 			wiphy_unlock(ah->hw->wiphy);
1507 			continue;
1508 		}
1509 
1510 		ieee80211_stop_queues(ah->hw);
1511 
1512 		for (j = 0; j < ah->num_radio; j++) {
1513 			ar = &ah->radio[j];
1514 
1515 			ath12k_mac_drain_tx(ar);
1516 			ar->state_11d = ATH12K_11D_IDLE;
1517 			complete(&ar->completed_11d_scan);
1518 			complete(&ar->scan.started);
1519 			complete_all(&ar->scan.completed);
1520 			complete(&ar->scan.on_channel);
1521 			complete(&ar->peer_assoc_done);
1522 			complete(&ar->peer_delete_done);
1523 			complete(&ar->install_key_done);
1524 			complete(&ar->vdev_setup_done);
1525 			complete(&ar->vdev_delete_done);
1526 			complete(&ar->bss_survey_done);
1527 			complete_all(&ar->regd_update_completed);
1528 
1529 			wake_up(&ar->dp.tx_empty_waitq);
1530 			idr_for_each(&ar->txmgmt_idr,
1531 				     ath12k_mac_tx_mgmt_pending_free, ar);
1532 			idr_destroy(&ar->txmgmt_idr);
1533 			wake_up(&ar->txmgmt_empty_waitq);
1534 
1535 			ar->monitor_vdev_id = -1;
1536 			ar->monitor_vdev_created = false;
1537 			ar->monitor_started = false;
1538 		}
1539 
1540 		wiphy_unlock(ah->hw->wiphy);
1541 	}
1542 
1543 	wake_up(&ab->wmi_ab.tx_credits_wq);
1544 	wake_up(&ab->peer_mapping_wq);
1545 }
1546 
ath12k_update_11d(struct work_struct * work)1547 static void ath12k_update_11d(struct work_struct *work)
1548 {
1549 	struct ath12k_base *ab = container_of(work, struct ath12k_base, update_11d_work);
1550 	struct ath12k *ar;
1551 	struct ath12k_pdev *pdev;
1552 	struct wmi_set_current_country_arg arg = {};
1553 	int ret, i;
1554 
1555 	spin_lock_bh(&ab->base_lock);
1556 	memcpy(&arg.alpha2, &ab->new_alpha2, 2);
1557 	spin_unlock_bh(&ab->base_lock);
1558 
1559 	ath12k_dbg(ab, ATH12K_DBG_WMI, "update 11d new cc %c%c\n",
1560 		   arg.alpha2[0], arg.alpha2[1]);
1561 
1562 	for (i = 0; i < ab->num_radios; i++) {
1563 		pdev = &ab->pdevs[i];
1564 		ar = pdev->ar;
1565 
1566 		memcpy(&ar->alpha2, &arg.alpha2, 2);
1567 
1568 		reinit_completion(&ar->regd_update_completed);
1569 
1570 		ret = ath12k_wmi_send_set_current_country_cmd(ar, &arg);
1571 		if (ret)
1572 			ath12k_warn(ar->ab,
1573 				    "pdev id %d failed set current country code: %d\n",
1574 				    i, ret);
1575 	}
1576 }
1577 
ath12k_core_post_reconfigure_recovery(struct ath12k_base * ab)1578 static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
1579 {
1580 	struct ath12k_hw_group *ag = ab->ag;
1581 	struct ath12k_hw *ah;
1582 	struct ath12k *ar;
1583 	int i, j;
1584 
1585 	for (i = 0; i < ag->num_hw; i++) {
1586 		ah = ath12k_ag_to_ah(ag, i);
1587 		if (!ah || ah->state == ATH12K_HW_STATE_OFF)
1588 			continue;
1589 
1590 		wiphy_lock(ah->hw->wiphy);
1591 		mutex_lock(&ah->hw_mutex);
1592 
1593 		switch (ah->state) {
1594 		case ATH12K_HW_STATE_ON:
1595 			ah->state = ATH12K_HW_STATE_RESTARTING;
1596 
1597 			for (j = 0; j < ah->num_radio; j++) {
1598 				ar = &ah->radio[j];
1599 				ath12k_core_halt(ar);
1600 			}
1601 
1602 			ath12k_mac_dp_peer_cleanup(ah);
1603 			break;
1604 		case ATH12K_HW_STATE_OFF:
1605 			ath12k_warn(ab,
1606 				    "cannot restart hw %d that hasn't been started\n",
1607 				    i);
1608 			break;
1609 		case ATH12K_HW_STATE_RESTARTING:
1610 			break;
1611 		case ATH12K_HW_STATE_RESTARTED:
1612 			ah->state = ATH12K_HW_STATE_WEDGED;
1613 			fallthrough;
1614 		case ATH12K_HW_STATE_WEDGED:
1615 			ath12k_warn(ab,
1616 				    "device is wedged, will not restart hw %d\n", i);
1617 			break;
1618 		case ATH12K_HW_STATE_TM:
1619 			ath12k_warn(ab, "fw mode reset done radio %d\n", i);
1620 			break;
1621 		}
1622 
1623 		mutex_unlock(&ah->hw_mutex);
1624 		wiphy_unlock(ah->hw->wiphy);
1625 	}
1626 
1627 	complete(&ab->driver_recovery);
1628 }
1629 
ath12k_core_restart(struct work_struct * work)1630 static void ath12k_core_restart(struct work_struct *work)
1631 {
1632 	struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
1633 	struct ath12k_hw_group *ag = ab->ag;
1634 	struct ath12k_hw *ah;
1635 	int ret, i;
1636 
1637 	ret = ath12k_core_reconfigure_on_crash(ab);
1638 	if (ret) {
1639 		ath12k_err(ab, "failed to reconfigure driver on crash recovery\n");
1640 		return;
1641 	}
1642 
1643 	if (ab->is_reset) {
1644 		if (!test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
1645 			atomic_dec(&ab->reset_count);
1646 			complete(&ab->reset_complete);
1647 			ab->is_reset = false;
1648 			atomic_set(&ab->fail_cont_count, 0);
1649 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
1650 		}
1651 
1652 		mutex_lock(&ag->mutex);
1653 
1654 		if (!ath12k_core_hw_group_start_ready(ag)) {
1655 			mutex_unlock(&ag->mutex);
1656 			goto exit_restart;
1657 		}
1658 
1659 		for (i = 0; i < ag->num_hw; i++) {
1660 			ah = ath12k_ag_to_ah(ag, i);
1661 			ieee80211_restart_hw(ah->hw);
1662 		}
1663 
1664 		mutex_unlock(&ag->mutex);
1665 	}
1666 
1667 exit_restart:
1668 	complete(&ab->restart_completed);
1669 }
1670 
ath12k_core_reset(struct work_struct * work)1671 static void ath12k_core_reset(struct work_struct *work)
1672 {
1673 	struct ath12k_base *ab = container_of(work, struct ath12k_base, reset_work);
1674 	struct ath12k_hw_group *ag = ab->ag;
1675 	int reset_count, fail_cont_count, i;
1676 	long time_left;
1677 
1678 	if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) {
1679 		ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
1680 		return;
1681 	}
1682 
1683 	/* Sometimes the recovery will fail and then the next all recovery fail,
1684 	 * this is to avoid infinite recovery since it can not recovery success
1685 	 */
1686 	fail_cont_count = atomic_read(&ab->fail_cont_count);
1687 
1688 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FINAL)
1689 		return;
1690 
1691 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FIRST &&
1692 	    time_before(jiffies, ab->reset_fail_timeout))
1693 		return;
1694 
1695 	reset_count = atomic_inc_return(&ab->reset_count);
1696 
1697 	if (reset_count > 1) {
1698 		/* Sometimes it happened another reset worker before the previous one
1699 		 * completed, then the second reset worker will destroy the previous one,
1700 		 * thus below is to avoid that.
1701 		 */
1702 		ath12k_warn(ab, "already resetting count %d\n", reset_count);
1703 
1704 		reinit_completion(&ab->reset_complete);
1705 		time_left = wait_for_completion_timeout(&ab->reset_complete,
1706 							ATH12K_RESET_TIMEOUT_HZ);
1707 		if (time_left) {
1708 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "to skip reset\n");
1709 			atomic_dec(&ab->reset_count);
1710 			return;
1711 		}
1712 
1713 		ab->reset_fail_timeout = jiffies + ATH12K_RESET_FAIL_TIMEOUT_HZ;
1714 		/* Record the continuous recovery fail count when recovery failed*/
1715 		fail_cont_count = atomic_inc_return(&ab->fail_cont_count);
1716 	}
1717 
1718 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n");
1719 
1720 	ab->is_reset = true;
1721 	atomic_set(&ab->recovery_count, 0);
1722 
1723 	ath12k_coredump_collect(ab);
1724 	ath12k_core_pre_reconfigure_recovery(ab);
1725 
1726 	ath12k_core_post_reconfigure_recovery(ab);
1727 
1728 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n");
1729 
1730 	ath12k_hif_irq_disable(ab);
1731 	ath12k_hif_ce_irq_disable(ab);
1732 
1733 	ath12k_hif_power_down(ab, false);
1734 
1735 	/* prepare for power up */
1736 	ab->qmi.num_radios = U8_MAX;
1737 
1738 	mutex_lock(&ag->mutex);
1739 	ath12k_core_to_group_ref_put(ab);
1740 
1741 	if (ag->num_started > 0) {
1742 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
1743 			   "waiting for %d partner device(s) to reset\n",
1744 			   ag->num_started);
1745 		mutex_unlock(&ag->mutex);
1746 		return;
1747 	}
1748 
1749 	/* Prepare MLO global memory region for power up */
1750 	ath12k_qmi_reset_mlo_mem(ag);
1751 
1752 	for (i = 0; i < ag->num_devices; i++) {
1753 		ab = ag->ab[i];
1754 		if (!ab)
1755 			continue;
1756 
1757 		ath12k_hif_power_up(ab);
1758 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
1759 	}
1760 
1761 	mutex_unlock(&ag->mutex);
1762 }
1763 
ath12k_core_get_memory_mode(struct ath12k_base * ab)1764 enum ath12k_qmi_mem_mode ath12k_core_get_memory_mode(struct ath12k_base *ab)
1765 {
1766 	unsigned long total_ram;
1767 	struct sysinfo si;
1768 
1769 	si_meminfo(&si);
1770 	total_ram = si.totalram * si.mem_unit;
1771 
1772 	if (total_ram < SZ_512M)
1773 		return ATH12K_QMI_MEMORY_MODE_LOW_512_M;
1774 
1775 	return ATH12K_QMI_MEMORY_MODE_DEFAULT;
1776 }
1777 EXPORT_SYMBOL(ath12k_core_get_memory_mode);
1778 
ath12k_core_pre_init(struct ath12k_base * ab)1779 int ath12k_core_pre_init(struct ath12k_base *ab)
1780 {
1781 	const struct ath12k_mem_profile_based_param *param;
1782 
1783 	param = &ath12k_mem_profile_based_param[ab->target_mem_mode];
1784 	ab->profile_param = param;
1785 	ath12k_fw_map(ab);
1786 
1787 	return 0;
1788 }
1789 
ath12k_core_panic_handler(struct notifier_block * nb,unsigned long action,void * data)1790 static int ath12k_core_panic_handler(struct notifier_block *nb,
1791 				     unsigned long action, void *data)
1792 {
1793 	struct ath12k_base *ab = container_of(nb, struct ath12k_base,
1794 					      panic_nb);
1795 
1796 	return ath12k_hif_panic_handler(ab);
1797 }
1798 
ath12k_core_panic_notifier_register(struct ath12k_base * ab)1799 static int ath12k_core_panic_notifier_register(struct ath12k_base *ab)
1800 {
1801 	ab->panic_nb.notifier_call = ath12k_core_panic_handler;
1802 
1803 	return atomic_notifier_chain_register(&panic_notifier_list,
1804 					      &ab->panic_nb);
1805 }
1806 
ath12k_core_panic_notifier_unregister(struct ath12k_base * ab)1807 static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
1808 {
1809 	atomic_notifier_chain_unregister(&panic_notifier_list,
1810 					 &ab->panic_nb);
1811 }
1812 
1813 static inline
ath12k_core_hw_group_create_ready(struct ath12k_hw_group * ag)1814 bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag)
1815 {
1816 	lockdep_assert_held(&ag->mutex);
1817 
1818 	return (ag->num_probed == ag->num_devices);
1819 }
1820 
ath12k_core_hw_group_alloc(struct ath12k_base * ab)1821 static struct ath12k_hw_group *ath12k_core_hw_group_alloc(struct ath12k_base *ab)
1822 {
1823 	struct ath12k_hw_group *ag;
1824 	int count = 0;
1825 
1826 	lockdep_assert_held(&ath12k_hw_group_mutex);
1827 
1828 	list_for_each_entry(ag, &ath12k_hw_group_list, list)
1829 		count++;
1830 
1831 	ag = kzalloc_obj(*ag);
1832 	if (!ag)
1833 		return NULL;
1834 
1835 	ag->id = count;
1836 	list_add(&ag->list, &ath12k_hw_group_list);
1837 	mutex_init(&ag->mutex);
1838 	ag->mlo_capable = false;
1839 
1840 	return ag;
1841 }
1842 
ath12k_core_hw_group_free(struct ath12k_hw_group * ag)1843 static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag)
1844 {
1845 	mutex_lock(&ath12k_hw_group_mutex);
1846 
1847 	list_del(&ag->list);
1848 	kfree(ag);
1849 
1850 	mutex_unlock(&ath12k_hw_group_mutex);
1851 }
1852 
ath12k_core_hw_group_find_by_dt(struct ath12k_base * ab)1853 static struct ath12k_hw_group *ath12k_core_hw_group_find_by_dt(struct ath12k_base *ab)
1854 {
1855 	struct ath12k_hw_group *ag;
1856 	int i;
1857 
1858 	if (!ab->dev->of_node)
1859 		return NULL;
1860 
1861 	list_for_each_entry(ag, &ath12k_hw_group_list, list)
1862 		for (i = 0; i < ag->num_devices; i++)
1863 			if (ag->wsi_node[i] == ab->dev->of_node)
1864 				return ag;
1865 
1866 	return NULL;
1867 }
1868 
ath12k_core_get_wsi_info(struct ath12k_hw_group * ag,struct ath12k_base * ab)1869 static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag,
1870 				    struct ath12k_base *ab)
1871 {
1872 	struct device_node *wsi_dev = ab->dev->of_node, *next_wsi_dev;
1873 	struct device_node *tx_endpoint, *next_rx_endpoint;
1874 	int device_count = 0;
1875 
1876 	next_wsi_dev = wsi_dev;
1877 
1878 	if (!next_wsi_dev)
1879 		return -ENODEV;
1880 
1881 	do {
1882 		ag->wsi_node[device_count] = next_wsi_dev;
1883 
1884 		tx_endpoint = of_graph_get_endpoint_by_regs(next_wsi_dev, 0, -1);
1885 		if (!tx_endpoint) {
1886 			of_node_put(next_wsi_dev);
1887 			return -ENODEV;
1888 		}
1889 
1890 		next_rx_endpoint = of_graph_get_remote_endpoint(tx_endpoint);
1891 		if (!next_rx_endpoint) {
1892 			of_node_put(next_wsi_dev);
1893 			of_node_put(tx_endpoint);
1894 			return -ENODEV;
1895 		}
1896 
1897 		of_node_put(tx_endpoint);
1898 		of_node_put(next_wsi_dev);
1899 
1900 		next_wsi_dev = of_graph_get_port_parent(next_rx_endpoint);
1901 		if (!next_wsi_dev) {
1902 			of_node_put(next_rx_endpoint);
1903 			return -ENODEV;
1904 		}
1905 
1906 		of_node_put(next_rx_endpoint);
1907 
1908 		device_count++;
1909 		if (device_count > ATH12K_MAX_DEVICES) {
1910 			ath12k_warn(ab, "device count in DT %d is more than limit %d\n",
1911 				    device_count, ATH12K_MAX_DEVICES);
1912 			of_node_put(next_wsi_dev);
1913 			return -EINVAL;
1914 		}
1915 	} while (wsi_dev != next_wsi_dev);
1916 
1917 	of_node_put(next_wsi_dev);
1918 	ag->num_devices = device_count;
1919 
1920 	return 0;
1921 }
1922 
ath12k_core_get_wsi_index(struct ath12k_hw_group * ag,struct ath12k_base * ab)1923 static int ath12k_core_get_wsi_index(struct ath12k_hw_group *ag,
1924 				     struct ath12k_base *ab)
1925 {
1926 	int i, wsi_controller_index = -1, node_index = -1;
1927 	bool control;
1928 
1929 	for (i = 0; i < ag->num_devices; i++) {
1930 		control = of_property_read_bool(ag->wsi_node[i], "qcom,wsi-controller");
1931 		if (control)
1932 			wsi_controller_index = i;
1933 
1934 		if (ag->wsi_node[i] == ab->dev->of_node)
1935 			node_index = i;
1936 	}
1937 
1938 	if (wsi_controller_index == -1) {
1939 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi controller is not defined in dt");
1940 		return -EINVAL;
1941 	}
1942 
1943 	if (node_index == -1) {
1944 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "unable to get WSI node index");
1945 		return -EINVAL;
1946 	}
1947 
1948 	ab->wsi_info.index = (ag->num_devices + node_index - wsi_controller_index) %
1949 		ag->num_devices;
1950 
1951 	return 0;
1952 }
1953 
ath12k_core_hw_group_assign(struct ath12k_base * ab)1954 static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *ab)
1955 {
1956 	struct ath12k_wsi_info *wsi = &ab->wsi_info;
1957 	struct ath12k_hw_group *ag;
1958 
1959 	lockdep_assert_held(&ath12k_hw_group_mutex);
1960 
1961 	if (ath12k_ftm_mode)
1962 		goto invalid_group;
1963 
1964 	/* The grouping of multiple devices will be done based on device tree file.
1965 	 * The platforms that do not have any valid group information would have
1966 	 * each device to be part of its own invalid group.
1967 	 *
1968 	 * We use group id ATH12K_INVALID_GROUP_ID for single device group
1969 	 * which didn't have dt entry or wrong dt entry, there could be many
1970 	 * groups with same group id, i.e ATH12K_INVALID_GROUP_ID. So
1971 	 * default group id of ATH12K_INVALID_GROUP_ID combined with
1972 	 * num devices in ath12k_hw_group determines if the group is
1973 	 * multi device or single device group
1974 	 */
1975 
1976 	ag = ath12k_core_hw_group_find_by_dt(ab);
1977 	if (!ag) {
1978 		ag = ath12k_core_hw_group_alloc(ab);
1979 		if (!ag) {
1980 			ath12k_warn(ab, "unable to create new hw group\n");
1981 			return NULL;
1982 		}
1983 
1984 		if (ath12k_core_get_wsi_info(ag, ab) ||
1985 		    ath12k_core_get_wsi_index(ag, ab)) {
1986 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
1987 				   "unable to get wsi info from dt, grouping single device");
1988 			ag->id = ATH12K_INVALID_GROUP_ID;
1989 			ag->num_devices = 1;
1990 			memset(ag->wsi_node, 0, sizeof(ag->wsi_node));
1991 			wsi->index = 0;
1992 		}
1993 
1994 		goto exit;
1995 	} else if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
1996 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group id %d in unregister state\n",
1997 			   ag->id);
1998 		goto invalid_group;
1999 	} else {
2000 		if (ath12k_core_get_wsi_index(ag, ab))
2001 			goto invalid_group;
2002 		goto exit;
2003 	}
2004 
2005 invalid_group:
2006 	ag = ath12k_core_hw_group_alloc(ab);
2007 	if (!ag) {
2008 		ath12k_warn(ab, "unable to create new hw group\n");
2009 		return NULL;
2010 	}
2011 
2012 	ag->id = ATH12K_INVALID_GROUP_ID;
2013 	ag->num_devices = 1;
2014 	wsi->index = 0;
2015 
2016 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "single device added to hardware group\n");
2017 
2018 exit:
2019 	if (ag->num_probed >= ag->num_devices) {
2020 		ath12k_warn(ab, "unable to add new device to group, max limit reached\n");
2021 		goto invalid_group;
2022 	}
2023 
2024 	ab->device_id = ag->num_probed++;
2025 	ag->ab[ab->device_id] = ab;
2026 	ab->ag = ag;
2027 
2028 	ath12k_dp_cmn_hw_group_assign(ath12k_ab_to_dp(ab), ag);
2029 
2030 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi group-id %d num-devices %d index %d",
2031 		   ag->id, ag->num_devices, wsi->index);
2032 
2033 	return ag;
2034 }
2035 
ath12k_core_hw_group_unassign(struct ath12k_base * ab)2036 void ath12k_core_hw_group_unassign(struct ath12k_base *ab)
2037 {
2038 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
2039 	u8 device_id = ab->device_id;
2040 	int num_probed;
2041 
2042 	if (!ag)
2043 		return;
2044 
2045 	mutex_lock(&ag->mutex);
2046 
2047 	if (WARN_ON(device_id >= ag->num_devices)) {
2048 		mutex_unlock(&ag->mutex);
2049 		return;
2050 	}
2051 
2052 	if (WARN_ON(ag->ab[device_id] != ab)) {
2053 		mutex_unlock(&ag->mutex);
2054 		return;
2055 	}
2056 
2057 	ath12k_dp_cmn_hw_group_unassign(ath12k_ab_to_dp(ab), ag);
2058 
2059 	ag->ab[device_id] = NULL;
2060 	ab->ag = NULL;
2061 	ab->device_id = ATH12K_INVALID_DEVICE_ID;
2062 
2063 	if (ag->num_probed)
2064 		ag->num_probed--;
2065 
2066 	num_probed = ag->num_probed;
2067 
2068 	mutex_unlock(&ag->mutex);
2069 
2070 	if (!num_probed)
2071 		ath12k_core_hw_group_free(ag);
2072 }
2073 
ath12k_core_hw_group_destroy(struct ath12k_hw_group * ag)2074 static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
2075 {
2076 	struct ath12k_base *ab;
2077 	int i;
2078 
2079 	if (WARN_ON(!ag))
2080 		return;
2081 
2082 	for (i = 0; i < ag->num_devices; i++) {
2083 		ab = ag->ab[i];
2084 		if (!ab)
2085 			continue;
2086 
2087 		ath12k_core_soc_destroy(ab);
2088 	}
2089 }
2090 
ath12k_core_hw_group_cleanup(struct ath12k_hw_group * ag)2091 void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
2092 {
2093 	struct ath12k_base *ab;
2094 	int i;
2095 
2096 	if (!ag)
2097 		return;
2098 
2099 	mutex_lock(&ag->mutex);
2100 
2101 	if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
2102 		mutex_unlock(&ag->mutex);
2103 		return;
2104 	}
2105 
2106 	set_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags);
2107 
2108 	ath12k_core_hw_group_stop(ag);
2109 
2110 	for (i = 0; i < ag->num_devices; i++) {
2111 		ab = ag->ab[i];
2112 		if (!ab)
2113 			continue;
2114 
2115 		mutex_lock(&ab->core_lock);
2116 		ath12k_core_stop(ab);
2117 		mutex_unlock(&ab->core_lock);
2118 	}
2119 
2120 	mutex_unlock(&ag->mutex);
2121 }
2122 
ath12k_core_hw_group_create(struct ath12k_hw_group * ag)2123 static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag)
2124 {
2125 	struct ath12k_base *ab;
2126 	int i, ret;
2127 
2128 	lockdep_assert_held(&ag->mutex);
2129 
2130 	for (i = 0; i < ag->num_devices; i++) {
2131 		ab = ag->ab[i];
2132 		if (!ab)
2133 			continue;
2134 
2135 		mutex_lock(&ab->core_lock);
2136 
2137 		ret = ath12k_core_soc_create(ab);
2138 		if (ret) {
2139 			mutex_unlock(&ab->core_lock);
2140 			ath12k_err(ab, "failed to create soc %d core: %d\n", i, ret);
2141 			goto destroy;
2142 		}
2143 
2144 		mutex_unlock(&ab->core_lock);
2145 	}
2146 
2147 	return 0;
2148 
2149 destroy:
2150 	for (i--; i >= 0; i--) {
2151 		ab = ag->ab[i];
2152 		if (!ab)
2153 			continue;
2154 
2155 		mutex_lock(&ab->core_lock);
2156 		ath12k_core_soc_destroy(ab);
2157 		mutex_unlock(&ab->core_lock);
2158 	}
2159 
2160 	return ret;
2161 }
2162 
ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group * ag)2163 void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
2164 {
2165 	struct ath12k_base *ab;
2166 	int i;
2167 
2168 	if (ath12k_ftm_mode)
2169 		return;
2170 
2171 	lockdep_assert_held(&ag->mutex);
2172 
2173 	if (ag->num_devices == 1) {
2174 		ab = ag->ab[0];
2175 		/* QCN9274 firmware uses firmware IE for MLO advertisement */
2176 		if (ab->fw.fw_features_valid) {
2177 			ag->mlo_capable =
2178 				ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO);
2179 			return;
2180 		}
2181 
2182 		/* while WCN7850 firmware uses QMI single_chip_mlo_support bit */
2183 		ag->mlo_capable = ab->single_chip_mlo_support;
2184 		return;
2185 	}
2186 
2187 	ag->mlo_capable = true;
2188 
2189 	for (i = 0; i < ag->num_devices; i++) {
2190 		ab = ag->ab[i];
2191 		if (!ab)
2192 			continue;
2193 
2194 		/* even if 1 device's firmware feature indicates MLO
2195 		 * unsupported, make MLO unsupported for the whole group
2196 		 */
2197 		if (!ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO)) {
2198 			ag->mlo_capable = false;
2199 			return;
2200 		}
2201 	}
2202 }
2203 
ath12k_core_init(struct ath12k_base * ab)2204 int ath12k_core_init(struct ath12k_base *ab)
2205 {
2206 	struct ath12k_hw_group *ag;
2207 	int ret;
2208 
2209 	ret = ath12k_core_panic_notifier_register(ab);
2210 	if (ret)
2211 		ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
2212 
2213 	mutex_lock(&ath12k_hw_group_mutex);
2214 
2215 	ag = ath12k_core_hw_group_assign(ab);
2216 	if (!ag) {
2217 		mutex_unlock(&ath12k_hw_group_mutex);
2218 		ath12k_warn(ab, "unable to get hw group\n");
2219 		ret = -ENODEV;
2220 		goto err_unregister_notifier;
2221 	}
2222 
2223 	mutex_unlock(&ath12k_hw_group_mutex);
2224 
2225 	mutex_lock(&ag->mutex);
2226 
2227 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices %d num probed %d\n",
2228 		   ag->num_devices, ag->num_probed);
2229 
2230 	if (ath12k_core_hw_group_create_ready(ag)) {
2231 		ret = ath12k_core_hw_group_create(ag);
2232 		if (ret) {
2233 			mutex_unlock(&ag->mutex);
2234 			ath12k_warn(ab, "unable to create hw group\n");
2235 			goto err_unassign_hw_group;
2236 		}
2237 	}
2238 
2239 	mutex_unlock(&ag->mutex);
2240 
2241 	return 0;
2242 
2243 err_unassign_hw_group:
2244 	ath12k_core_hw_group_unassign(ab);
2245 err_unregister_notifier:
2246 	ath12k_core_panic_notifier_unregister(ab);
2247 
2248 	return ret;
2249 }
2250 
ath12k_core_deinit(struct ath12k_base * ab)2251 void ath12k_core_deinit(struct ath12k_base *ab)
2252 {
2253 	ath12k_core_hw_group_destroy(ab->ag);
2254 	ath12k_core_hw_group_unassign(ab);
2255 	ath12k_core_panic_notifier_unregister(ab);
2256 }
2257 
ath12k_core_free(struct ath12k_base * ab)2258 void ath12k_core_free(struct ath12k_base *ab)
2259 {
2260 	timer_delete_sync(&ab->rx_replenish_retry);
2261 	destroy_workqueue(ab->workqueue_aux);
2262 	destroy_workqueue(ab->workqueue);
2263 	kfree(ab);
2264 }
2265 
ath12k_core_alloc(struct device * dev,size_t priv_size,enum ath12k_bus bus)2266 struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
2267 				      enum ath12k_bus bus)
2268 {
2269 	struct ath12k_base *ab;
2270 
2271 	ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL);
2272 	if (!ab)
2273 		return NULL;
2274 
2275 	init_completion(&ab->driver_recovery);
2276 
2277 	ab->workqueue = create_singlethread_workqueue("ath12k_wq");
2278 	if (!ab->workqueue)
2279 		goto err_sc_free;
2280 
2281 	ab->workqueue_aux = create_singlethread_workqueue("ath12k_aux_wq");
2282 	if (!ab->workqueue_aux)
2283 		goto err_free_wq;
2284 
2285 	mutex_init(&ab->core_lock);
2286 	spin_lock_init(&ab->base_lock);
2287 	init_completion(&ab->reset_complete);
2288 
2289 	init_waitqueue_head(&ab->peer_mapping_wq);
2290 	init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
2291 	INIT_WORK(&ab->restart_work, ath12k_core_restart);
2292 	INIT_WORK(&ab->reset_work, ath12k_core_reset);
2293 	INIT_WORK(&ab->rfkill_work, ath12k_rfkill_work);
2294 	INIT_WORK(&ab->dump_work, ath12k_coredump_upload);
2295 	INIT_WORK(&ab->update_11d_work, ath12k_update_11d);
2296 
2297 	timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
2298 	init_completion(&ab->htc_suspend);
2299 	init_completion(&ab->restart_completed);
2300 	init_completion(&ab->wow.wakeup_completed);
2301 
2302 	ab->dev = dev;
2303 	ab->hif.bus = bus;
2304 	ab->qmi.num_radios = U8_MAX;
2305 	ab->single_chip_mlo_support = false;
2306 
2307 	/* Device index used to identify the devices in a group.
2308 	 *
2309 	 * In Intra-device MLO, only one device present in a group,
2310 	 * so it is always zero.
2311 	 *
2312 	 * In Inter-device MLO, Multiple device present in a group,
2313 	 * expect non-zero value.
2314 	 */
2315 	ab->device_id = 0;
2316 
2317 	return ab;
2318 
2319 err_free_wq:
2320 	destroy_workqueue(ab->workqueue);
2321 err_sc_free:
2322 	kfree(ab);
2323 	return NULL;
2324 }
2325 
2326 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies WLAN devices");
2327 MODULE_LICENSE("Dual BSD/GPL");
2328