xref: /linux/drivers/net/wireless/ath/ath12k/core.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
5  */
6 
7 #include <linux/export.h>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/remoteproc.h>
11 #include <linux/firmware.h>
12 #include <linux/of.h>
13 #include <linux/of_graph.h>
14 #include "ahb.h"
15 #include "core.h"
16 #include "dp_tx.h"
17 #include "dp_rx.h"
18 #include "debug.h"
19 #include "debugfs.h"
20 #include "fw.h"
21 #include "hif.h"
22 #include "pci.h"
23 #include "wow.h"
24 #include "dp_cmn.h"
25 #include "peer.h"
26 
27 unsigned int ath12k_debug_mask;
28 module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
29 MODULE_PARM_DESC(debug_mask, "Debugging mask");
30 EXPORT_SYMBOL(ath12k_debug_mask);
31 
32 bool ath12k_ftm_mode;
33 module_param_named(ftm_mode, ath12k_ftm_mode, bool, 0444);
34 MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode");
35 EXPORT_SYMBOL(ath12k_ftm_mode);
36 
37 /* protected with ath12k_hw_group_mutex */
38 static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
39 
40 static DEFINE_MUTEX(ath12k_hw_group_mutex);
41 
42 static const struct
43 ath12k_mem_profile_based_param ath12k_mem_profile_based_param[] = {
44 [ATH12K_QMI_MEMORY_MODE_DEFAULT] = {
45 		.num_vdevs = 17,
46 		.max_client_single = 512,
47 		.max_client_dbs = 128,
48 		.max_client_dbs_sbs = 128,
49 		.dp_params = {
50 			.tx_comp_ring_size = 32768,
51 			.rxdma_monitor_buf_ring_size = 4096,
52 			.rxdma_monitor_dst_ring_size = 8092,
53 			.num_pool_tx_desc = 32768,
54 			.rx_desc_count = 12288,
55 		},
56 	},
57 [ATH12K_QMI_MEMORY_MODE_LOW_512_M] = {
58 		.num_vdevs = 9,
59 		.max_client_single = 128,
60 		.max_client_dbs = 64,
61 		.max_client_dbs_sbs = 64,
62 		.dp_params = {
63 			.tx_comp_ring_size = 16384,
64 			.rxdma_monitor_buf_ring_size = 256,
65 			.rxdma_monitor_dst_ring_size = 512,
66 			.num_pool_tx_desc = 16384,
67 			.rx_desc_count = 6144,
68 		},
69 	},
70 };
71 
ath12k_core_rfkill_config(struct ath12k_base * ab)72 static int ath12k_core_rfkill_config(struct ath12k_base *ab)
73 {
74 	struct ath12k *ar;
75 	int ret = 0, i;
76 
77 	if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
78 		return 0;
79 
80 	if (ath12k_acpi_get_disable_rfkill(ab))
81 		return 0;
82 
83 	for (i = 0; i < ab->num_radios; i++) {
84 		ar = ab->pdevs[i].ar;
85 
86 		ret = ath12k_mac_rfkill_config(ar);
87 		if (ret && ret != -EOPNOTSUPP) {
88 			ath12k_warn(ab, "failed to configure rfkill: %d", ret);
89 			return ret;
90 		}
91 	}
92 
93 	return ret;
94 }
95 
96 /* Check if we need to continue with suspend/resume operation.
97  * Return:
98  *	a negative value: error happens and don't continue.
99  *	0:  no error but don't continue.
100  *	positive value: no error and do continue.
101  */
ath12k_core_continue_suspend_resume(struct ath12k_base * ab)102 static int ath12k_core_continue_suspend_resume(struct ath12k_base *ab)
103 {
104 	struct ath12k *ar;
105 
106 	if (!ab->hw_params->supports_suspend)
107 		return -EOPNOTSUPP;
108 
109 	/* so far single_pdev_only chips have supports_suspend as true
110 	 * so pass 0 as a dummy pdev_id here.
111 	 */
112 	ar = ab->pdevs[0].ar;
113 	if (!ar || !ar->ah || ar->ah->state != ATH12K_HW_STATE_OFF)
114 		return 0;
115 
116 	return 1;
117 }
118 
ath12k_core_suspend(struct ath12k_base * ab)119 int ath12k_core_suspend(struct ath12k_base *ab)
120 {
121 	struct ath12k *ar;
122 	int ret, i;
123 
124 	ret = ath12k_core_continue_suspend_resume(ab);
125 	if (ret <= 0)
126 		return ret;
127 
128 	for (i = 0; i < ab->num_radios; i++) {
129 		ar = ab->pdevs[i].ar;
130 		if (!ar)
131 			continue;
132 
133 		wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
134 
135 		ret = ath12k_mac_wait_tx_complete(ar);
136 		if (ret) {
137 			wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
138 			ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
139 			return ret;
140 		}
141 
142 		wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
143 	}
144 
145 	/* PM framework skips suspend_late/resume_early callbacks
146 	 * if other devices report errors in their suspend callbacks.
147 	 * However ath12k_core_resume() would still be called because
148 	 * here we return success thus kernel put us on dpm_suspended_list.
149 	 * Since we won't go through a power down/up cycle, there is
150 	 * no chance to call complete(&ab->restart_completed) in
151 	 * ath12k_core_restart(), making ath12k_core_resume() timeout.
152 	 * So call it here to avoid this issue. This also works in case
153 	 * no error happens thus suspend_late/resume_early get called,
154 	 * because it will be reinitialized in ath12k_core_resume_early().
155 	 */
156 	complete(&ab->restart_completed);
157 
158 	return 0;
159 }
160 EXPORT_SYMBOL(ath12k_core_suspend);
161 
ath12k_core_suspend_late(struct ath12k_base * ab)162 int ath12k_core_suspend_late(struct ath12k_base *ab)
163 {
164 	int ret;
165 
166 	ret = ath12k_core_continue_suspend_resume(ab);
167 	if (ret <= 0)
168 		return ret;
169 
170 	ath12k_acpi_stop(ab);
171 
172 	ath12k_hif_irq_disable(ab);
173 	ath12k_hif_ce_irq_disable(ab);
174 
175 	ath12k_hif_power_down(ab, true);
176 
177 	return 0;
178 }
179 EXPORT_SYMBOL(ath12k_core_suspend_late);
180 
ath12k_core_resume_early(struct ath12k_base * ab)181 int ath12k_core_resume_early(struct ath12k_base *ab)
182 {
183 	int ret;
184 
185 	ret = ath12k_core_continue_suspend_resume(ab);
186 	if (ret <= 0)
187 		return ret;
188 
189 	reinit_completion(&ab->restart_completed);
190 	ret = ath12k_hif_power_up(ab);
191 	if (ret)
192 		ath12k_warn(ab, "failed to power up hif during resume: %d\n", ret);
193 
194 	return ret;
195 }
196 EXPORT_SYMBOL(ath12k_core_resume_early);
197 
ath12k_core_resume(struct ath12k_base * ab)198 int ath12k_core_resume(struct ath12k_base *ab)
199 {
200 	long time_left;
201 	int ret;
202 
203 	ret = ath12k_core_continue_suspend_resume(ab);
204 	if (ret <= 0)
205 		return ret;
206 
207 	time_left = wait_for_completion_timeout(&ab->restart_completed,
208 						ATH12K_RESET_TIMEOUT_HZ);
209 	if (time_left == 0) {
210 		ath12k_warn(ab, "timeout while waiting for restart complete");
211 		return -ETIMEDOUT;
212 	}
213 
214 	return 0;
215 }
216 EXPORT_SYMBOL(ath12k_core_resume);
217 
__ath12k_core_create_board_name(struct ath12k_base * ab,char * name,size_t name_len,bool with_variant,bool bus_type_mode,bool with_default)218 static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
219 					   size_t name_len, bool with_variant,
220 					   bool bus_type_mode, bool with_default)
221 {
222 	/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
223 	char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = {};
224 
225 	if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
226 		scnprintf(variant, sizeof(variant), ",variant=%s",
227 			  ab->qmi.target.bdf_ext);
228 
229 	switch (ab->id.bdf_search) {
230 	case ATH12K_BDF_SEARCH_BUS_AND_BOARD:
231 		if (bus_type_mode)
232 			scnprintf(name, name_len,
233 				  "bus=%s",
234 				  ath12k_bus_str(ab->hif.bus));
235 		else
236 			scnprintf(name, name_len,
237 				  "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
238 				  ath12k_bus_str(ab->hif.bus),
239 				  ab->id.vendor, ab->id.device,
240 				  ab->id.subsystem_vendor,
241 				  ab->id.subsystem_device,
242 				  ab->qmi.target.chip_id,
243 				  ab->qmi.target.board_id,
244 				  variant);
245 		break;
246 	default:
247 		scnprintf(name, name_len,
248 			  "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
249 			  ath12k_bus_str(ab->hif.bus),
250 			  ab->qmi.target.chip_id,
251 			  with_default ?
252 			  ATH12K_BOARD_ID_DEFAULT : ab->qmi.target.board_id,
253 			  variant);
254 		break;
255 	}
256 
257 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name);
258 
259 	return 0;
260 }
261 
ath12k_core_create_board_name(struct ath12k_base * ab,char * name,size_t name_len)262 static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
263 					 size_t name_len)
264 {
265 	return __ath12k_core_create_board_name(ab, name, name_len, true, false, false);
266 }
267 
ath12k_core_create_fallback_board_name(struct ath12k_base * ab,char * name,size_t name_len)268 static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
269 						  size_t name_len)
270 {
271 	return __ath12k_core_create_board_name(ab, name, name_len, false, false, true);
272 }
273 
ath12k_core_create_bus_type_board_name(struct ath12k_base * ab,char * name,size_t name_len)274 static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
275 						  size_t name_len)
276 {
277 	return __ath12k_core_create_board_name(ab, name, name_len, false, true, true);
278 }
279 
ath12k_core_firmware_request(struct ath12k_base * ab,const char * file)280 const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
281 						    const char *file)
282 {
283 	const struct firmware *fw;
284 	char path[100];
285 	int ret;
286 
287 	if (!file)
288 		return ERR_PTR(-ENOENT);
289 
290 	ath12k_core_create_firmware_path(ab, file, path, sizeof(path));
291 
292 	ret = firmware_request_nowarn(&fw, path, ab->dev);
293 	if (ret)
294 		return ERR_PTR(ret);
295 
296 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot firmware request %s size %zu\n",
297 		   path, fw->size);
298 
299 	return fw;
300 }
301 
ath12k_core_free_bdf(struct ath12k_base * ab,struct ath12k_board_data * bd)302 void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
303 {
304 	if (!IS_ERR(bd->fw))
305 		release_firmware(bd->fw);
306 
307 	memset(bd, 0, sizeof(*bd));
308 }
309 
ath12k_core_parse_bd_ie_board(struct ath12k_base * ab,struct ath12k_board_data * bd,const void * buf,size_t buf_len,const char * boardname,int ie_id,int name_id,int data_id)310 static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
311 					 struct ath12k_board_data *bd,
312 					 const void *buf, size_t buf_len,
313 					 const char *boardname,
314 					 int ie_id,
315 					 int name_id,
316 					 int data_id)
317 {
318 	const struct ath12k_fw_ie *hdr;
319 	bool name_match_found;
320 	int ret, board_ie_id;
321 	size_t board_ie_len;
322 	const void *board_ie_data;
323 
324 	name_match_found = false;
325 
326 	/* go through ATH12K_BD_IE_BOARD_/ATH12K_BD_IE_REGDB_ elements */
327 	while (buf_len > sizeof(struct ath12k_fw_ie)) {
328 		hdr = buf;
329 		board_ie_id = le32_to_cpu(hdr->id);
330 		board_ie_len = le32_to_cpu(hdr->len);
331 		board_ie_data = hdr->data;
332 
333 		buf_len -= sizeof(*hdr);
334 		buf += sizeof(*hdr);
335 
336 		if (buf_len < ALIGN(board_ie_len, 4)) {
337 			ath12k_err(ab, "invalid %s length: %zu < %zu\n",
338 				   ath12k_bd_ie_type_str(ie_id),
339 				   buf_len, ALIGN(board_ie_len, 4));
340 			ret = -EINVAL;
341 			goto out;
342 		}
343 
344 		if (board_ie_id == name_id) {
345 			ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "",
346 					board_ie_data, board_ie_len);
347 
348 			if (board_ie_len != strlen(boardname))
349 				goto next;
350 
351 			ret = memcmp(board_ie_data, boardname, strlen(boardname));
352 			if (ret)
353 				goto next;
354 
355 			name_match_found = true;
356 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
357 				   "boot found match %s for name '%s'",
358 				   ath12k_bd_ie_type_str(ie_id),
359 				   boardname);
360 		} else if (board_ie_id == data_id) {
361 			if (!name_match_found)
362 				/* no match found */
363 				goto next;
364 
365 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
366 				   "boot found %s for '%s'",
367 				   ath12k_bd_ie_type_str(ie_id),
368 				   boardname);
369 
370 			bd->data = board_ie_data;
371 			bd->len = board_ie_len;
372 
373 			ret = 0;
374 			goto out;
375 		} else {
376 			ath12k_warn(ab, "unknown %s id found: %d\n",
377 				    ath12k_bd_ie_type_str(ie_id),
378 				    board_ie_id);
379 		}
380 next:
381 		/* jump over the padding */
382 		board_ie_len = ALIGN(board_ie_len, 4);
383 
384 		buf_len -= board_ie_len;
385 		buf += board_ie_len;
386 	}
387 
388 	/* no match found */
389 	ret = -ENOENT;
390 
391 out:
392 	return ret;
393 }
394 
ath12k_core_fetch_board_data_api_n(struct ath12k_base * ab,struct ath12k_board_data * bd,const char * boardname,int ie_id_match,int name_id,int data_id)395 static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
396 					      struct ath12k_board_data *bd,
397 					      const char *boardname,
398 					      int ie_id_match,
399 					      int name_id,
400 					      int data_id)
401 {
402 	size_t len, magic_len;
403 	const u8 *data;
404 	char *filename, filepath[100];
405 	size_t ie_len;
406 	struct ath12k_fw_ie *hdr;
407 	int ret, ie_id;
408 
409 	filename = ATH12K_BOARD_API2_FILE;
410 
411 	if (!bd->fw)
412 		bd->fw = ath12k_core_firmware_request(ab, filename);
413 
414 	if (IS_ERR(bd->fw))
415 		return PTR_ERR(bd->fw);
416 
417 	data = bd->fw->data;
418 	len = bd->fw->size;
419 
420 	ath12k_core_create_firmware_path(ab, filename,
421 					 filepath, sizeof(filepath));
422 
423 	/* magic has extra null byte padded */
424 	magic_len = strlen(ATH12K_BOARD_MAGIC) + 1;
425 	if (len < magic_len) {
426 		ath12k_err(ab, "failed to find magic value in %s, file too short: %zu\n",
427 			   filepath, len);
428 		ret = -EINVAL;
429 		goto err;
430 	}
431 
432 	if (memcmp(data, ATH12K_BOARD_MAGIC, magic_len)) {
433 		ath12k_err(ab, "found invalid board magic\n");
434 		ret = -EINVAL;
435 		goto err;
436 	}
437 
438 	/* magic is padded to 4 bytes */
439 	magic_len = ALIGN(magic_len, 4);
440 	if (len < magic_len) {
441 		ath12k_err(ab, "failed: %s too small to contain board data, len: %zu\n",
442 			   filepath, len);
443 		ret = -EINVAL;
444 		goto err;
445 	}
446 
447 	data += magic_len;
448 	len -= magic_len;
449 
450 	while (len > sizeof(struct ath12k_fw_ie)) {
451 		hdr = (struct ath12k_fw_ie *)data;
452 		ie_id = le32_to_cpu(hdr->id);
453 		ie_len = le32_to_cpu(hdr->len);
454 
455 		len -= sizeof(*hdr);
456 		data = hdr->data;
457 
458 		if (len < ALIGN(ie_len, 4)) {
459 			ath12k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
460 				   ie_id, ie_len, len);
461 			ret = -EINVAL;
462 			goto err;
463 		}
464 
465 		if (ie_id == ie_id_match) {
466 			ret = ath12k_core_parse_bd_ie_board(ab, bd, data,
467 							    ie_len,
468 							    boardname,
469 							    ie_id_match,
470 							    name_id,
471 							    data_id);
472 			if (ret == -ENOENT)
473 				/* no match found, continue */
474 				goto next;
475 			else if (ret)
476 				/* there was an error, bail out */
477 				goto err;
478 			/* either found or error, so stop searching */
479 			goto out;
480 		}
481 next:
482 		/* jump over the padding */
483 		ie_len = ALIGN(ie_len, 4);
484 
485 		len -= ie_len;
486 		data += ie_len;
487 	}
488 
489 out:
490 	if (!bd->data || !bd->len) {
491 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
492 			   "failed to fetch %s for %s from %s\n",
493 			   ath12k_bd_ie_type_str(ie_id_match),
494 			   boardname, filepath);
495 		ret = -ENODATA;
496 		goto err;
497 	}
498 
499 	return 0;
500 
501 err:
502 	ath12k_core_free_bdf(ab, bd);
503 	return ret;
504 }
505 
ath12k_core_fetch_board_data_api_1(struct ath12k_base * ab,struct ath12k_board_data * bd,char * filename)506 int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
507 				       struct ath12k_board_data *bd,
508 				       char *filename)
509 {
510 	bd->fw = ath12k_core_firmware_request(ab, filename);
511 	if (IS_ERR(bd->fw))
512 		return PTR_ERR(bd->fw);
513 
514 	bd->data = bd->fw->data;
515 	bd->len = bd->fw->size;
516 
517 	return 0;
518 }
519 
520 #define BOARD_NAME_SIZE 200
ath12k_core_fetch_bdf(struct ath12k_base * ab,struct ath12k_board_data * bd)521 int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
522 {
523 	char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE];
524 	char *filename, filepath[100];
525 	int bd_api;
526 	int ret;
527 
528 	filename = ATH12K_BOARD_API2_FILE;
529 
530 	ret = ath12k_core_create_board_name(ab, boardname, sizeof(boardname));
531 	if (ret) {
532 		ath12k_err(ab, "failed to create board name: %d", ret);
533 		return ret;
534 	}
535 
536 	bd_api = 2;
537 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
538 						 ATH12K_BD_IE_BOARD,
539 						 ATH12K_BD_IE_BOARD_NAME,
540 						 ATH12K_BD_IE_BOARD_DATA);
541 	if (!ret)
542 		goto success;
543 
544 	ret = ath12k_core_create_fallback_board_name(ab, fallback_boardname,
545 						     sizeof(fallback_boardname));
546 	if (ret) {
547 		ath12k_err(ab, "failed to create fallback board name: %d", ret);
548 		return ret;
549 	}
550 
551 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
552 						 ATH12K_BD_IE_BOARD,
553 						 ATH12K_BD_IE_BOARD_NAME,
554 						 ATH12K_BD_IE_BOARD_DATA);
555 	if (!ret)
556 		goto success;
557 
558 	bd_api = 1;
559 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE);
560 	if (ret) {
561 		ath12k_core_create_firmware_path(ab, filename,
562 						 filepath, sizeof(filepath));
563 		ath12k_err(ab, "failed to fetch board data for %s from %s\n",
564 			   boardname, filepath);
565 		if (memcmp(boardname, fallback_boardname, strlen(boardname)))
566 			ath12k_err(ab, "failed to fetch board data for %s from %s\n",
567 				   fallback_boardname, filepath);
568 
569 		ath12k_err(ab, "failed to fetch board.bin from %s\n",
570 			   ab->hw_params->fw.dir);
571 		return ret;
572 	}
573 
574 success:
575 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", bd_api);
576 	return 0;
577 }
578 
ath12k_core_fetch_regdb(struct ath12k_base * ab,struct ath12k_board_data * bd)579 int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd)
580 {
581 	char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE];
582 	int ret;
583 
584 	ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
585 	if (ret) {
586 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
587 			   "failed to create board name for regdb: %d", ret);
588 		goto exit;
589 	}
590 
591 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
592 						 ATH12K_BD_IE_REGDB,
593 						 ATH12K_BD_IE_REGDB_NAME,
594 						 ATH12K_BD_IE_REGDB_DATA);
595 	if (!ret)
596 		goto exit;
597 
598 	ret = ath12k_core_create_bus_type_board_name(ab, default_boardname,
599 						     BOARD_NAME_SIZE);
600 	if (ret) {
601 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
602 			   "failed to create default board name for regdb: %d", ret);
603 		goto exit;
604 	}
605 
606 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, default_boardname,
607 						 ATH12K_BD_IE_REGDB,
608 						 ATH12K_BD_IE_REGDB_NAME,
609 						 ATH12K_BD_IE_REGDB_DATA);
610 	if (!ret)
611 		goto exit;
612 
613 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_REGDB_FILE_NAME);
614 	if (ret)
615 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to fetch %s from %s\n",
616 			   ATH12K_REGDB_FILE_NAME, ab->hw_params->fw.dir);
617 
618 exit:
619 	if (!ret)
620 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "fetched regdb\n");
621 
622 	return ret;
623 }
624 
ath12k_core_get_max_station_per_radio(struct ath12k_base * ab)625 u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab)
626 {
627 	if (ab->num_radios == 2)
628 		return TARGET_NUM_STATIONS(ab, DBS);
629 	if (ab->num_radios == 3)
630 		return TARGET_NUM_STATIONS(ab, DBS_SBS);
631 	return TARGET_NUM_STATIONS(ab, SINGLE);
632 }
633 
ath12k_core_get_max_peers_per_radio(struct ath12k_base * ab)634 u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab)
635 {
636 	return ath12k_core_get_max_station_per_radio(ab) + TARGET_NUM_VDEVS(ab);
637 }
638 EXPORT_SYMBOL(ath12k_core_get_max_peers_per_radio);
639 
ath12k_core_get_reserved_mem(struct ath12k_base * ab,int index)640 struct reserved_mem *ath12k_core_get_reserved_mem(struct ath12k_base *ab,
641 						  int index)
642 {
643 	struct device *dev = ab->dev;
644 	struct reserved_mem *rmem;
645 	struct device_node *node;
646 
647 	node = of_parse_phandle(dev->of_node, "memory-region", index);
648 	if (!node) {
649 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
650 			   "failed to parse memory-region for index %d\n", index);
651 		return NULL;
652 	}
653 
654 	rmem = of_reserved_mem_lookup(node);
655 	of_node_put(node);
656 	if (!rmem) {
657 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
658 			   "unable to get memory-region for index %d\n", index);
659 		return NULL;
660 	}
661 
662 	return rmem;
663 }
664 
665 static inline
ath12k_core_to_group_ref_get(struct ath12k_base * ab)666 void ath12k_core_to_group_ref_get(struct ath12k_base *ab)
667 {
668 	struct ath12k_hw_group *ag = ab->ag;
669 
670 	lockdep_assert_held(&ag->mutex);
671 
672 	if (ab->hw_group_ref) {
673 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already attached to group %d\n",
674 			   ag->id);
675 		return;
676 	}
677 
678 	ab->hw_group_ref = true;
679 	ag->num_started++;
680 
681 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "core attached to group %d, num_started %d\n",
682 		   ag->id, ag->num_started);
683 }
684 
685 static inline
ath12k_core_to_group_ref_put(struct ath12k_base * ab)686 void ath12k_core_to_group_ref_put(struct ath12k_base *ab)
687 {
688 	struct ath12k_hw_group *ag = ab->ag;
689 
690 	lockdep_assert_held(&ag->mutex);
691 
692 	if (!ab->hw_group_ref) {
693 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already de-attached from group %d\n",
694 			   ag->id);
695 		return;
696 	}
697 
698 	ab->hw_group_ref = false;
699 	ag->num_started--;
700 
701 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "core de-attached from group %d, num_started %d\n",
702 		   ag->id, ag->num_started);
703 }
704 
ath12k_core_stop(struct ath12k_base * ab)705 static void ath12k_core_stop(struct ath12k_base *ab)
706 {
707 	ath12k_link_sta_rhash_tbl_destroy(ab);
708 
709 	ath12k_core_to_group_ref_put(ab);
710 
711 	if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
712 		ath12k_qmi_firmware_stop(ab);
713 
714 	ath12k_acpi_stop(ab);
715 
716 	ath12k_dp_rx_pdev_reo_cleanup(ab);
717 	ath12k_hif_stop(ab);
718 	ath12k_wmi_detach(ab);
719 	ath12k_dp_cmn_device_deinit(ath12k_ab_to_dp(ab));
720 
721 	/* De-Init of components as needed */
722 }
723 
ath12k_core_check_cc_code_bdfext(const struct dmi_header * hdr,void * data)724 static void ath12k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data)
725 {
726 	struct ath12k_base *ab = data;
727 	const char *magic = ATH12K_SMBIOS_BDF_EXT_MAGIC;
728 	struct ath12k_smbios_bdf *smbios = (struct ath12k_smbios_bdf *)hdr;
729 	ssize_t copied;
730 	size_t len;
731 	int i;
732 
733 	if (ab->qmi.target.bdf_ext[0] != '\0')
734 		return;
735 
736 	if (hdr->type != ATH12K_SMBIOS_BDF_EXT_TYPE)
737 		return;
738 
739 	if (hdr->length != ATH12K_SMBIOS_BDF_EXT_LENGTH) {
740 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
741 			   "wrong smbios bdf ext type length (%d).\n",
742 			   hdr->length);
743 		return;
744 	}
745 
746 	spin_lock_bh(&ab->base_lock);
747 
748 	switch (smbios->country_code_flag) {
749 	case ATH12K_SMBIOS_CC_ISO:
750 		ab->new_alpha2[0] = u16_get_bits(smbios->cc_code >> 8, 0xff);
751 		ab->new_alpha2[1] = u16_get_bits(smbios->cc_code, 0xff);
752 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios cc_code %c%c\n",
753 			   ab->new_alpha2[0], ab->new_alpha2[1]);
754 		break;
755 	case ATH12K_SMBIOS_CC_WW:
756 		ab->new_alpha2[0] = '0';
757 		ab->new_alpha2[1] = '0';
758 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios worldwide regdomain\n");
759 		break;
760 	default:
761 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot ignore smbios country code setting %d\n",
762 			   smbios->country_code_flag);
763 		break;
764 	}
765 
766 	spin_unlock_bh(&ab->base_lock);
767 
768 	if (!smbios->bdf_enabled) {
769 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant name not found.\n");
770 		return;
771 	}
772 
773 	/* Only one string exists (per spec) */
774 	if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) {
775 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
776 			   "bdf variant magic does not match.\n");
777 		return;
778 	}
779 
780 	len = min_t(size_t,
781 		    strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext));
782 	for (i = 0; i < len; i++) {
783 		if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) {
784 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
785 				   "bdf variant name contains non ascii chars.\n");
786 			return;
787 		}
788 	}
789 
790 	/* Copy extension name without magic prefix */
791 	copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic),
792 			 sizeof(ab->qmi.target.bdf_ext));
793 	if (copied < 0) {
794 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
795 			   "bdf variant string is longer than the buffer can accommodate\n");
796 		return;
797 	}
798 
799 	ath12k_dbg(ab, ATH12K_DBG_BOOT,
800 		   "found and validated bdf variant smbios_type 0x%x bdf %s\n",
801 		   ATH12K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext);
802 }
803 
ath12k_core_check_smbios(struct ath12k_base * ab)804 int ath12k_core_check_smbios(struct ath12k_base *ab)
805 {
806 	ab->qmi.target.bdf_ext[0] = '\0';
807 	dmi_walk(ath12k_core_check_cc_code_bdfext, ab);
808 
809 	if (ab->qmi.target.bdf_ext[0] == '\0')
810 		return -ENODATA;
811 
812 	return 0;
813 }
814 
ath12k_core_soc_create(struct ath12k_base * ab)815 static int ath12k_core_soc_create(struct ath12k_base *ab)
816 {
817 	int ret;
818 
819 	if (ath12k_ftm_mode) {
820 		ab->fw_mode = ATH12K_FIRMWARE_MODE_FTM;
821 		ath12k_info(ab, "Booting in ftm mode\n");
822 	}
823 
824 	ret = ath12k_qmi_init_service(ab);
825 	if (ret) {
826 		ath12k_err(ab, "failed to initialize qmi :%d\n", ret);
827 		return ret;
828 	}
829 
830 	ath12k_debugfs_soc_create(ab);
831 
832 	ret = ath12k_hif_power_up(ab);
833 	if (ret) {
834 		ath12k_err(ab, "failed to power up :%d\n", ret);
835 		goto err_qmi_deinit;
836 	}
837 
838 	ath12k_debugfs_pdev_create(ab);
839 
840 	return 0;
841 
842 err_qmi_deinit:
843 	ath12k_debugfs_soc_destroy(ab);
844 	ath12k_qmi_deinit_service(ab);
845 	return ret;
846 }
847 
ath12k_core_soc_destroy(struct ath12k_base * ab)848 static void ath12k_core_soc_destroy(struct ath12k_base *ab)
849 {
850 	ath12k_hif_power_down(ab, false);
851 	ath12k_reg_free(ab);
852 	ath12k_debugfs_soc_destroy(ab);
853 	ath12k_qmi_deinit_service(ab);
854 }
855 
ath12k_core_pdev_create(struct ath12k_base * ab)856 static int ath12k_core_pdev_create(struct ath12k_base *ab)
857 {
858 	int ret;
859 
860 	ret = ath12k_dp_pdev_alloc(ab);
861 	if (ret) {
862 		ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
863 		return ret;
864 	}
865 
866 	return 0;
867 }
868 
ath12k_core_pdev_destroy(struct ath12k_base * ab)869 static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
870 {
871 	ath12k_dp_pdev_free(ab);
872 }
873 
ath12k_core_start(struct ath12k_base * ab)874 static int ath12k_core_start(struct ath12k_base *ab)
875 {
876 	int ret;
877 
878 	lockdep_assert_held(&ab->core_lock);
879 
880 	ret = ath12k_wmi_attach(ab);
881 	if (ret) {
882 		ath12k_err(ab, "failed to attach wmi: %d\n", ret);
883 		return ret;
884 	}
885 
886 	ret = ath12k_htc_init(ab);
887 	if (ret) {
888 		ath12k_err(ab, "failed to init htc: %d\n", ret);
889 		goto err_wmi_detach;
890 	}
891 
892 	ret = ath12k_hif_start(ab);
893 	if (ret) {
894 		ath12k_err(ab, "failed to start HIF: %d\n", ret);
895 		goto err_wmi_detach;
896 	}
897 
898 	ret = ath12k_htc_wait_target(&ab->htc);
899 	if (ret) {
900 		ath12k_err(ab, "failed to connect to HTC: %d\n", ret);
901 		goto err_hif_stop;
902 	}
903 
904 	ret = ath12k_dp_htt_connect(ath12k_ab_to_dp(ab));
905 	if (ret) {
906 		ath12k_err(ab, "failed to connect to HTT: %d\n", ret);
907 		goto err_hif_stop;
908 	}
909 
910 	ret = ath12k_wmi_connect(ab);
911 	if (ret) {
912 		ath12k_err(ab, "failed to connect wmi: %d\n", ret);
913 		goto err_hif_stop;
914 	}
915 
916 	ret = ath12k_htc_start(&ab->htc);
917 	if (ret) {
918 		ath12k_err(ab, "failed to start HTC: %d\n", ret);
919 		goto err_hif_stop;
920 	}
921 
922 	ret = ath12k_wmi_wait_for_service_ready(ab);
923 	if (ret) {
924 		ath12k_err(ab, "failed to receive wmi service ready event: %d\n",
925 			   ret);
926 		goto err_hif_stop;
927 	}
928 
929 	ath12k_hal_cc_config(ab);
930 
931 	ret = ath12k_dp_rx_pdev_reo_setup(ab);
932 	if (ret) {
933 		ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
934 		goto err_hif_stop;
935 	}
936 
937 	ret = ath12k_wmi_cmd_init(ab);
938 	if (ret) {
939 		ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret);
940 		goto err_reo_cleanup;
941 	}
942 
943 	ret = ath12k_wmi_wait_for_unified_ready(ab);
944 	if (ret) {
945 		ath12k_err(ab, "failed to receive wmi unified ready event: %d\n",
946 			   ret);
947 		goto err_reo_cleanup;
948 	}
949 
950 	/* put hardware to DBS mode */
951 	if (ab->hw_params->single_pdev_only) {
952 		ret = ath12k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
953 		if (ret) {
954 			ath12k_err(ab, "failed to send dbs mode: %d\n", ret);
955 			goto err_reo_cleanup;
956 		}
957 	}
958 
959 	ret = ath12k_dp_tx_htt_h2t_ver_req_msg(ab);
960 	if (ret) {
961 		ath12k_err(ab, "failed to send htt version request message: %d\n",
962 			   ret);
963 		goto err_reo_cleanup;
964 	}
965 
966 	ath12k_acpi_set_dsm_func(ab);
967 
968 	/* Indicate the core start in the appropriate group */
969 	ath12k_core_to_group_ref_get(ab);
970 
971 	ret = ath12k_link_sta_rhash_tbl_init(ab);
972 	if (ret) {
973 		ath12k_warn(ab, "failed to init peer addr rhash table %d\n", ret);
974 		goto err_reo_cleanup;
975 	}
976 
977 	return 0;
978 
979 err_reo_cleanup:
980 	ath12k_dp_rx_pdev_reo_cleanup(ab);
981 err_hif_stop:
982 	ath12k_hif_stop(ab);
983 err_wmi_detach:
984 	ath12k_wmi_detach(ab);
985 	return ret;
986 }
987 
ath12k_core_device_cleanup(struct ath12k_base * ab)988 static void ath12k_core_device_cleanup(struct ath12k_base *ab)
989 {
990 	mutex_lock(&ab->core_lock);
991 
992 	ath12k_hif_irq_disable(ab);
993 	ath12k_core_pdev_destroy(ab);
994 
995 	mutex_unlock(&ab->core_lock);
996 }
997 
ath12k_core_hw_group_stop(struct ath12k_hw_group * ag)998 static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
999 {
1000 	struct ath12k_base *ab;
1001 	int i;
1002 
1003 	lockdep_assert_held(&ag->mutex);
1004 
1005 	clear_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
1006 
1007 	ath12k_mac_unregister(ag);
1008 
1009 	for (i = ag->num_devices - 1; i >= 0; i--) {
1010 		ab = ag->ab[i];
1011 		if (!ab)
1012 			continue;
1013 
1014 		clear_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
1015 
1016 		ath12k_core_device_cleanup(ab);
1017 	}
1018 
1019 	ath12k_mac_destroy(ag);
1020 }
1021 
ath12k_get_num_partner_link(struct ath12k * ar)1022 u8 ath12k_get_num_partner_link(struct ath12k *ar)
1023 {
1024 	struct ath12k_base *partner_ab, *ab = ar->ab;
1025 	struct ath12k_hw_group *ag = ab->ag;
1026 	struct ath12k_pdev *pdev;
1027 	u8 num_link = 0;
1028 	int i, j;
1029 
1030 	lockdep_assert_held(&ag->mutex);
1031 
1032 	for (i = 0; i < ag->num_devices; i++) {
1033 		partner_ab = ag->ab[i];
1034 
1035 		for (j = 0; j < partner_ab->num_radios; j++) {
1036 			pdev = &partner_ab->pdevs[j];
1037 
1038 			/* Avoid the self link */
1039 			if (ar == pdev->ar)
1040 				continue;
1041 
1042 			num_link++;
1043 		}
1044 	}
1045 
1046 	return num_link;
1047 }
1048 
__ath12k_mac_mlo_ready(struct ath12k * ar)1049 static int __ath12k_mac_mlo_ready(struct ath12k *ar)
1050 {
1051 	u8 num_link = ath12k_get_num_partner_link(ar);
1052 	int ret;
1053 
1054 	if (num_link == 0)
1055 		return 0;
1056 
1057 	ret = ath12k_wmi_mlo_ready(ar);
1058 	if (ret) {
1059 		ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n",
1060 			   ar->pdev_idx, ret);
1061 		return ret;
1062 	}
1063 
1064 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mlo ready done for pdev %d\n",
1065 		   ar->pdev_idx);
1066 
1067 	return 0;
1068 }
1069 
ath12k_mac_mlo_ready(struct ath12k_hw_group * ag)1070 int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag)
1071 {
1072 	struct ath12k_hw *ah;
1073 	struct ath12k *ar;
1074 	int ret;
1075 	int i, j;
1076 
1077 	for (i = 0; i < ag->num_hw; i++) {
1078 		ah = ag->ah[i];
1079 		if (!ah)
1080 			continue;
1081 
1082 		for_each_ar(ah, ar, j) {
1083 			ar = &ah->radio[j];
1084 			ret = __ath12k_mac_mlo_ready(ar);
1085 			if (ret)
1086 				return ret;
1087 		}
1088 	}
1089 
1090 	return 0;
1091 }
1092 
ath12k_core_mlo_setup(struct ath12k_hw_group * ag)1093 static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag)
1094 {
1095 	int ret, i;
1096 
1097 	if (!ag->mlo_capable)
1098 		return 0;
1099 
1100 	ret = ath12k_mac_mlo_setup(ag);
1101 	if (ret)
1102 		return ret;
1103 
1104 	for (i = 0; i < ag->num_devices; i++)
1105 		ath12k_dp_partner_cc_init(ag->ab[i]);
1106 
1107 	ret = ath12k_mac_mlo_ready(ag);
1108 	if (ret)
1109 		goto err_mlo_teardown;
1110 
1111 	return 0;
1112 
1113 err_mlo_teardown:
1114 	ath12k_mac_mlo_teardown(ag);
1115 
1116 	return ret;
1117 }
1118 
ath12k_core_hw_group_start(struct ath12k_hw_group * ag)1119 static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag)
1120 {
1121 	struct ath12k_base *ab;
1122 	int ret, i;
1123 
1124 	lockdep_assert_held(&ag->mutex);
1125 
1126 	if (test_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags))
1127 		goto core_pdev_create;
1128 
1129 	ret = ath12k_mac_allocate(ag);
1130 	if (WARN_ON(ret))
1131 		return ret;
1132 
1133 	ret = ath12k_core_mlo_setup(ag);
1134 	if (WARN_ON(ret))
1135 		goto err_mac_destroy;
1136 
1137 	ret = ath12k_mac_register(ag);
1138 	if (WARN_ON(ret))
1139 		goto err_mlo_teardown;
1140 
1141 	set_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
1142 
1143 core_pdev_create:
1144 	for (i = 0; i < ag->num_devices; i++) {
1145 		ab = ag->ab[i];
1146 		if (!ab)
1147 			continue;
1148 
1149 		mutex_lock(&ab->core_lock);
1150 
1151 		set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
1152 
1153 		ret = ath12k_core_pdev_create(ab);
1154 		if (ret) {
1155 			ath12k_err(ab, "failed to create pdev core %d\n", ret);
1156 			mutex_unlock(&ab->core_lock);
1157 			goto err;
1158 		}
1159 
1160 		ath12k_hif_irq_enable(ab);
1161 
1162 		ret = ath12k_core_rfkill_config(ab);
1163 		if (ret && ret != -EOPNOTSUPP) {
1164 			mutex_unlock(&ab->core_lock);
1165 			goto err;
1166 		}
1167 
1168 		mutex_unlock(&ab->core_lock);
1169 	}
1170 
1171 	return 0;
1172 
1173 err:
1174 	ath12k_core_hw_group_stop(ag);
1175 	return ret;
1176 
1177 err_mlo_teardown:
1178 	ath12k_mac_mlo_teardown(ag);
1179 
1180 err_mac_destroy:
1181 	ath12k_mac_destroy(ag);
1182 
1183 	return ret;
1184 }
1185 
ath12k_core_start_firmware(struct ath12k_base * ab,enum ath12k_firmware_mode mode)1186 static int ath12k_core_start_firmware(struct ath12k_base *ab,
1187 				      enum ath12k_firmware_mode mode)
1188 {
1189 	int ret;
1190 
1191 	ath12k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v3,
1192 				    &ab->qmi.ce_cfg.shadow_reg_v3_len);
1193 
1194 	ret = ath12k_qmi_firmware_start(ab, mode);
1195 	if (ret) {
1196 		ath12k_err(ab, "failed to send firmware start: %d\n", ret);
1197 		return ret;
1198 	}
1199 
1200 	return ret;
1201 }
1202 
1203 static inline
ath12k_core_hw_group_start_ready(struct ath12k_hw_group * ag)1204 bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag)
1205 {
1206 	lockdep_assert_held(&ag->mutex);
1207 
1208 	return (ag->num_started == ag->num_devices);
1209 }
1210 
ath12k_fw_stats_pdevs_free(struct list_head * head)1211 static void ath12k_fw_stats_pdevs_free(struct list_head *head)
1212 {
1213 	struct ath12k_fw_stats_pdev *i, *tmp;
1214 
1215 	list_for_each_entry_safe(i, tmp, head, list) {
1216 		list_del(&i->list);
1217 		kfree(i);
1218 	}
1219 }
1220 
ath12k_fw_stats_bcn_free(struct list_head * head)1221 void ath12k_fw_stats_bcn_free(struct list_head *head)
1222 {
1223 	struct ath12k_fw_stats_bcn *i, *tmp;
1224 
1225 	list_for_each_entry_safe(i, tmp, head, list) {
1226 		list_del(&i->list);
1227 		kfree(i);
1228 	}
1229 }
1230 
ath12k_fw_stats_vdevs_free(struct list_head * head)1231 static void ath12k_fw_stats_vdevs_free(struct list_head *head)
1232 {
1233 	struct ath12k_fw_stats_vdev *i, *tmp;
1234 
1235 	list_for_each_entry_safe(i, tmp, head, list) {
1236 		list_del(&i->list);
1237 		kfree(i);
1238 	}
1239 }
1240 
ath12k_fw_stats_init(struct ath12k * ar)1241 void ath12k_fw_stats_init(struct ath12k *ar)
1242 {
1243 	INIT_LIST_HEAD(&ar->fw_stats.vdevs);
1244 	INIT_LIST_HEAD(&ar->fw_stats.pdevs);
1245 	INIT_LIST_HEAD(&ar->fw_stats.bcn);
1246 	init_completion(&ar->fw_stats_complete);
1247 	init_completion(&ar->fw_stats_done);
1248 }
1249 
ath12k_fw_stats_free(struct ath12k_fw_stats * stats)1250 void ath12k_fw_stats_free(struct ath12k_fw_stats *stats)
1251 {
1252 	ath12k_fw_stats_pdevs_free(&stats->pdevs);
1253 	ath12k_fw_stats_vdevs_free(&stats->vdevs);
1254 	ath12k_fw_stats_bcn_free(&stats->bcn);
1255 }
1256 
ath12k_fw_stats_reset(struct ath12k * ar)1257 void ath12k_fw_stats_reset(struct ath12k *ar)
1258 {
1259 	spin_lock_bh(&ar->data_lock);
1260 	ath12k_fw_stats_free(&ar->fw_stats);
1261 	ar->fw_stats.num_vdev_recvd = 0;
1262 	spin_unlock_bh(&ar->data_lock);
1263 }
1264 
ath12k_core_trigger_partner(struct ath12k_base * ab)1265 static void ath12k_core_trigger_partner(struct ath12k_base *ab)
1266 {
1267 	struct ath12k_hw_group *ag = ab->ag;
1268 	struct ath12k_base *partner_ab;
1269 	bool found = false;
1270 	int i;
1271 
1272 	for (i = 0; i < ag->num_devices; i++) {
1273 		partner_ab = ag->ab[i];
1274 		if (!partner_ab)
1275 			continue;
1276 
1277 		if (found)
1278 			ath12k_qmi_trigger_host_cap(partner_ab);
1279 
1280 		found = (partner_ab == ab);
1281 	}
1282 }
1283 
ath12k_core_qmi_firmware_ready(struct ath12k_base * ab)1284 int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
1285 {
1286 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
1287 	int ret, i;
1288 
1289 	ret = ath12k_core_start_firmware(ab, ab->fw_mode);
1290 	if (ret) {
1291 		ath12k_err(ab, "failed to start firmware: %d\n", ret);
1292 		return ret;
1293 	}
1294 
1295 	ret = ath12k_ce_init_pipes(ab);
1296 	if (ret) {
1297 		ath12k_err(ab, "failed to initialize CE: %d\n", ret);
1298 		goto err_firmware_stop;
1299 	}
1300 
1301 	ret = ath12k_dp_cmn_device_init(ath12k_ab_to_dp(ab));
1302 	if (ret) {
1303 		ath12k_err(ab, "failed to init DP: %d\n", ret);
1304 		goto err_firmware_stop;
1305 	}
1306 
1307 	mutex_lock(&ag->mutex);
1308 	mutex_lock(&ab->core_lock);
1309 
1310 	ret = ath12k_core_start(ab);
1311 	if (ret) {
1312 		ath12k_err(ab, "failed to start core: %d\n", ret);
1313 		goto err_deinit;
1314 	}
1315 
1316 	mutex_unlock(&ab->core_lock);
1317 
1318 	if (ath12k_core_hw_group_start_ready(ag)) {
1319 		ret = ath12k_core_hw_group_start(ag);
1320 		if (ret) {
1321 			ath12k_warn(ab, "unable to start hw group\n");
1322 			goto err_core_stop;
1323 		}
1324 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id);
1325 	} else {
1326 		ath12k_core_trigger_partner(ab);
1327 	}
1328 
1329 	mutex_unlock(&ag->mutex);
1330 
1331 	return 0;
1332 
1333 err_core_stop:
1334 	for (i = ag->num_devices - 1; i >= 0; i--) {
1335 		ab = ag->ab[i];
1336 		if (!ab)
1337 			continue;
1338 
1339 		mutex_lock(&ab->core_lock);
1340 		ath12k_core_stop(ab);
1341 		mutex_unlock(&ab->core_lock);
1342 	}
1343 	mutex_unlock(&ag->mutex);
1344 	goto exit;
1345 
1346 err_deinit:
1347 	ath12k_dp_cmn_device_deinit(ath12k_ab_to_dp(ab));
1348 	mutex_unlock(&ab->core_lock);
1349 	mutex_unlock(&ag->mutex);
1350 
1351 err_firmware_stop:
1352 	ath12k_qmi_firmware_stop(ab);
1353 
1354 exit:
1355 	return ret;
1356 }
1357 
ath12k_core_reconfigure_on_crash(struct ath12k_base * ab)1358 static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
1359 {
1360 	int ret, total_vdev;
1361 
1362 	mutex_lock(&ab->core_lock);
1363 	ath12k_link_sta_rhash_tbl_destroy(ab);
1364 	ath12k_dp_pdev_free(ab);
1365 	ath12k_ce_cleanup_pipes(ab);
1366 	ath12k_wmi_detach(ab);
1367 	ath12k_dp_rx_pdev_reo_cleanup(ab);
1368 	mutex_unlock(&ab->core_lock);
1369 
1370 	ath12k_dp_cmn_device_deinit(ath12k_ab_to_dp(ab));
1371 	ath12k_hal_srng_deinit(ab);
1372 	total_vdev = ab->num_radios * TARGET_NUM_VDEVS(ab);
1373 	ab->free_vdev_map = (1LL << total_vdev) - 1;
1374 
1375 	ret = ath12k_hal_srng_init(ab);
1376 	if (ret)
1377 		return ret;
1378 
1379 	clear_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1380 
1381 	ret = ath12k_core_qmi_firmware_ready(ab);
1382 	if (ret)
1383 		goto err_hal_srng_deinit;
1384 
1385 	clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
1386 
1387 	return 0;
1388 
1389 err_hal_srng_deinit:
1390 	ath12k_hal_srng_deinit(ab);
1391 	return ret;
1392 }
1393 
ath12k_rfkill_work(struct work_struct * work)1394 static void ath12k_rfkill_work(struct work_struct *work)
1395 {
1396 	struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
1397 	struct ath12k_hw_group *ag = ab->ag;
1398 	struct ath12k *ar;
1399 	struct ath12k_hw *ah;
1400 	struct ieee80211_hw *hw;
1401 	bool rfkill_radio_on;
1402 	int i, j;
1403 
1404 	spin_lock_bh(&ab->base_lock);
1405 	rfkill_radio_on = ab->rfkill_radio_on;
1406 	spin_unlock_bh(&ab->base_lock);
1407 
1408 	for (i = 0; i < ag->num_hw; i++) {
1409 		ah = ath12k_ag_to_ah(ag, i);
1410 		if (!ah)
1411 			continue;
1412 
1413 		for (j = 0; j < ah->num_radio; j++) {
1414 			ar = &ah->radio[j];
1415 			if (!ar)
1416 				continue;
1417 
1418 			ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
1419 		}
1420 
1421 		hw = ah->hw;
1422 		wiphy_rfkill_set_hw_state(hw->wiphy, !rfkill_radio_on);
1423 	}
1424 }
1425 
ath12k_core_halt(struct ath12k * ar)1426 void ath12k_core_halt(struct ath12k *ar)
1427 {
1428 	struct list_head *pos, *n;
1429 	struct ath12k_base *ab = ar->ab;
1430 
1431 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
1432 
1433 	ar->num_created_vdevs = 0;
1434 	ar->allocated_vdev_map = 0;
1435 
1436 	ath12k_mac_scan_finish(ar);
1437 	ath12k_mac_peer_cleanup_all(ar);
1438 	cancel_delayed_work_sync(&ar->scan.timeout);
1439 	cancel_work_sync(&ar->regd_update_work);
1440 	cancel_work_sync(&ar->regd_channel_update_work);
1441 	cancel_work_sync(&ab->rfkill_work);
1442 	cancel_work_sync(&ab->update_11d_work);
1443 
1444 	rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
1445 	synchronize_rcu();
1446 
1447 	spin_lock_bh(&ar->data_lock);
1448 	list_for_each_safe(pos, n, &ar->arvifs)
1449 		list_del_init(pos);
1450 	spin_unlock_bh(&ar->data_lock);
1451 
1452 	idr_init(&ar->txmgmt_idr);
1453 }
1454 
ath12k_core_pre_reconfigure_recovery(struct ath12k_base * ab)1455 static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
1456 {
1457 	struct ath12k_hw_group *ag = ab->ag;
1458 	struct ath12k *ar;
1459 	struct ath12k_hw *ah;
1460 	int i, j;
1461 
1462 	spin_lock_bh(&ab->base_lock);
1463 	ab->stats.fw_crash_counter++;
1464 	spin_unlock_bh(&ab->base_lock);
1465 
1466 	if (ab->is_reset)
1467 		set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1468 
1469 	for (i = 0; i < ag->num_hw; i++) {
1470 		ah = ath12k_ag_to_ah(ag, i);
1471 		if (!ah || ah->state == ATH12K_HW_STATE_OFF ||
1472 		    ah->state == ATH12K_HW_STATE_TM)
1473 			continue;
1474 
1475 		wiphy_lock(ah->hw->wiphy);
1476 
1477 		/* If queue 0 is stopped, it is safe to assume that all
1478 		 * other queues are stopped by driver via
1479 		 * ieee80211_stop_queues() below. This means, there is
1480 		 * no need to stop it again and hence continue
1481 		 */
1482 		if (ieee80211_queue_stopped(ah->hw, 0)) {
1483 			wiphy_unlock(ah->hw->wiphy);
1484 			continue;
1485 		}
1486 
1487 		ieee80211_stop_queues(ah->hw);
1488 
1489 		for (j = 0; j < ah->num_radio; j++) {
1490 			ar = &ah->radio[j];
1491 
1492 			ath12k_mac_drain_tx(ar);
1493 			ar->state_11d = ATH12K_11D_IDLE;
1494 			complete(&ar->completed_11d_scan);
1495 			complete(&ar->scan.started);
1496 			complete_all(&ar->scan.completed);
1497 			complete(&ar->scan.on_channel);
1498 			complete(&ar->peer_assoc_done);
1499 			complete(&ar->peer_delete_done);
1500 			complete(&ar->install_key_done);
1501 			complete(&ar->vdev_setup_done);
1502 			complete(&ar->vdev_delete_done);
1503 			complete(&ar->bss_survey_done);
1504 			complete_all(&ar->regd_update_completed);
1505 
1506 			wake_up(&ar->dp.tx_empty_waitq);
1507 			idr_for_each(&ar->txmgmt_idr,
1508 				     ath12k_mac_tx_mgmt_pending_free, ar);
1509 			idr_destroy(&ar->txmgmt_idr);
1510 			wake_up(&ar->txmgmt_empty_waitq);
1511 
1512 			ar->monitor_vdev_id = -1;
1513 			ar->monitor_vdev_created = false;
1514 			ar->monitor_started = false;
1515 		}
1516 
1517 		wiphy_unlock(ah->hw->wiphy);
1518 	}
1519 
1520 	wake_up(&ab->wmi_ab.tx_credits_wq);
1521 	wake_up(&ab->peer_mapping_wq);
1522 }
1523 
ath12k_update_11d(struct work_struct * work)1524 static void ath12k_update_11d(struct work_struct *work)
1525 {
1526 	struct ath12k_base *ab = container_of(work, struct ath12k_base, update_11d_work);
1527 	struct ath12k *ar;
1528 	struct ath12k_pdev *pdev;
1529 	struct wmi_set_current_country_arg arg = {};
1530 	int ret, i;
1531 
1532 	spin_lock_bh(&ab->base_lock);
1533 	memcpy(&arg.alpha2, &ab->new_alpha2, 2);
1534 	spin_unlock_bh(&ab->base_lock);
1535 
1536 	ath12k_dbg(ab, ATH12K_DBG_WMI, "update 11d new cc %c%c\n",
1537 		   arg.alpha2[0], arg.alpha2[1]);
1538 
1539 	for (i = 0; i < ab->num_radios; i++) {
1540 		pdev = &ab->pdevs[i];
1541 		ar = pdev->ar;
1542 
1543 		memcpy(&ar->alpha2, &arg.alpha2, 2);
1544 
1545 		reinit_completion(&ar->regd_update_completed);
1546 
1547 		ret = ath12k_wmi_send_set_current_country_cmd(ar, &arg);
1548 		if (ret)
1549 			ath12k_warn(ar->ab,
1550 				    "pdev id %d failed set current country code: %d\n",
1551 				    i, ret);
1552 	}
1553 }
1554 
ath12k_core_post_reconfigure_recovery(struct ath12k_base * ab)1555 static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
1556 {
1557 	struct ath12k_hw_group *ag = ab->ag;
1558 	struct ath12k_hw *ah;
1559 	struct ath12k *ar;
1560 	int i, j;
1561 
1562 	for (i = 0; i < ag->num_hw; i++) {
1563 		ah = ath12k_ag_to_ah(ag, i);
1564 		if (!ah || ah->state == ATH12K_HW_STATE_OFF)
1565 			continue;
1566 
1567 		wiphy_lock(ah->hw->wiphy);
1568 		mutex_lock(&ah->hw_mutex);
1569 
1570 		switch (ah->state) {
1571 		case ATH12K_HW_STATE_ON:
1572 			ah->state = ATH12K_HW_STATE_RESTARTING;
1573 
1574 			for (j = 0; j < ah->num_radio; j++) {
1575 				ar = &ah->radio[j];
1576 				ath12k_core_halt(ar);
1577 			}
1578 
1579 			ath12k_mac_dp_peer_cleanup(ah);
1580 			break;
1581 		case ATH12K_HW_STATE_OFF:
1582 			ath12k_warn(ab,
1583 				    "cannot restart hw %d that hasn't been started\n",
1584 				    i);
1585 			break;
1586 		case ATH12K_HW_STATE_RESTARTING:
1587 			break;
1588 		case ATH12K_HW_STATE_RESTARTED:
1589 			ah->state = ATH12K_HW_STATE_WEDGED;
1590 			fallthrough;
1591 		case ATH12K_HW_STATE_WEDGED:
1592 			ath12k_warn(ab,
1593 				    "device is wedged, will not restart hw %d\n", i);
1594 			break;
1595 		case ATH12K_HW_STATE_TM:
1596 			ath12k_warn(ab, "fw mode reset done radio %d\n", i);
1597 			break;
1598 		}
1599 
1600 		mutex_unlock(&ah->hw_mutex);
1601 		wiphy_unlock(ah->hw->wiphy);
1602 	}
1603 
1604 	complete(&ab->driver_recovery);
1605 }
1606 
ath12k_core_restart(struct work_struct * work)1607 static void ath12k_core_restart(struct work_struct *work)
1608 {
1609 	struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
1610 	struct ath12k_hw_group *ag = ab->ag;
1611 	struct ath12k_hw *ah;
1612 	int ret, i;
1613 
1614 	ret = ath12k_core_reconfigure_on_crash(ab);
1615 	if (ret) {
1616 		ath12k_err(ab, "failed to reconfigure driver on crash recovery\n");
1617 		return;
1618 	}
1619 
1620 	if (ab->is_reset) {
1621 		if (!test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
1622 			atomic_dec(&ab->reset_count);
1623 			complete(&ab->reset_complete);
1624 			ab->is_reset = false;
1625 			atomic_set(&ab->fail_cont_count, 0);
1626 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
1627 		}
1628 
1629 		mutex_lock(&ag->mutex);
1630 
1631 		if (!ath12k_core_hw_group_start_ready(ag)) {
1632 			mutex_unlock(&ag->mutex);
1633 			goto exit_restart;
1634 		}
1635 
1636 		for (i = 0; i < ag->num_hw; i++) {
1637 			ah = ath12k_ag_to_ah(ag, i);
1638 			ieee80211_restart_hw(ah->hw);
1639 		}
1640 
1641 		mutex_unlock(&ag->mutex);
1642 	}
1643 
1644 exit_restart:
1645 	complete(&ab->restart_completed);
1646 }
1647 
ath12k_core_reset(struct work_struct * work)1648 static void ath12k_core_reset(struct work_struct *work)
1649 {
1650 	struct ath12k_base *ab = container_of(work, struct ath12k_base, reset_work);
1651 	struct ath12k_hw_group *ag = ab->ag;
1652 	int reset_count, fail_cont_count, i;
1653 	long time_left;
1654 
1655 	if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) {
1656 		ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
1657 		return;
1658 	}
1659 
1660 	/* Sometimes the recovery will fail and then the next all recovery fail,
1661 	 * this is to avoid infinite recovery since it can not recovery success
1662 	 */
1663 	fail_cont_count = atomic_read(&ab->fail_cont_count);
1664 
1665 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FINAL)
1666 		return;
1667 
1668 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FIRST &&
1669 	    time_before(jiffies, ab->reset_fail_timeout))
1670 		return;
1671 
1672 	reset_count = atomic_inc_return(&ab->reset_count);
1673 
1674 	if (reset_count > 1) {
1675 		/* Sometimes it happened another reset worker before the previous one
1676 		 * completed, then the second reset worker will destroy the previous one,
1677 		 * thus below is to avoid that.
1678 		 */
1679 		ath12k_warn(ab, "already resetting count %d\n", reset_count);
1680 
1681 		reinit_completion(&ab->reset_complete);
1682 		time_left = wait_for_completion_timeout(&ab->reset_complete,
1683 							ATH12K_RESET_TIMEOUT_HZ);
1684 		if (time_left) {
1685 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "to skip reset\n");
1686 			atomic_dec(&ab->reset_count);
1687 			return;
1688 		}
1689 
1690 		ab->reset_fail_timeout = jiffies + ATH12K_RESET_FAIL_TIMEOUT_HZ;
1691 		/* Record the continuous recovery fail count when recovery failed*/
1692 		fail_cont_count = atomic_inc_return(&ab->fail_cont_count);
1693 	}
1694 
1695 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n");
1696 
1697 	ab->is_reset = true;
1698 	atomic_set(&ab->recovery_count, 0);
1699 
1700 	ath12k_coredump_collect(ab);
1701 	ath12k_core_pre_reconfigure_recovery(ab);
1702 
1703 	ath12k_core_post_reconfigure_recovery(ab);
1704 
1705 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n");
1706 
1707 	ath12k_hif_irq_disable(ab);
1708 	ath12k_hif_ce_irq_disable(ab);
1709 
1710 	ath12k_hif_power_down(ab, false);
1711 
1712 	/* prepare for power up */
1713 	ab->qmi.num_radios = U8_MAX;
1714 
1715 	mutex_lock(&ag->mutex);
1716 	ath12k_core_to_group_ref_put(ab);
1717 
1718 	if (ag->num_started > 0) {
1719 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
1720 			   "waiting for %d partner device(s) to reset\n",
1721 			   ag->num_started);
1722 		mutex_unlock(&ag->mutex);
1723 		return;
1724 	}
1725 
1726 	/* Prepare MLO global memory region for power up */
1727 	ath12k_qmi_reset_mlo_mem(ag);
1728 
1729 	for (i = 0; i < ag->num_devices; i++) {
1730 		ab = ag->ab[i];
1731 		if (!ab)
1732 			continue;
1733 
1734 		ath12k_hif_power_up(ab);
1735 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
1736 	}
1737 
1738 	mutex_unlock(&ag->mutex);
1739 }
1740 
ath12k_core_get_memory_mode(struct ath12k_base * ab)1741 enum ath12k_qmi_mem_mode ath12k_core_get_memory_mode(struct ath12k_base *ab)
1742 {
1743 	unsigned long total_ram;
1744 	struct sysinfo si;
1745 
1746 	si_meminfo(&si);
1747 	total_ram = si.totalram * si.mem_unit;
1748 
1749 	if (total_ram < SZ_512M)
1750 		return ATH12K_QMI_MEMORY_MODE_LOW_512_M;
1751 
1752 	return ATH12K_QMI_MEMORY_MODE_DEFAULT;
1753 }
1754 EXPORT_SYMBOL(ath12k_core_get_memory_mode);
1755 
ath12k_core_pre_init(struct ath12k_base * ab)1756 int ath12k_core_pre_init(struct ath12k_base *ab)
1757 {
1758 	const struct ath12k_mem_profile_based_param *param;
1759 
1760 	param = &ath12k_mem_profile_based_param[ab->target_mem_mode];
1761 	ab->profile_param = param;
1762 	ath12k_fw_map(ab);
1763 
1764 	return 0;
1765 }
1766 
ath12k_core_panic_handler(struct notifier_block * nb,unsigned long action,void * data)1767 static int ath12k_core_panic_handler(struct notifier_block *nb,
1768 				     unsigned long action, void *data)
1769 {
1770 	struct ath12k_base *ab = container_of(nb, struct ath12k_base,
1771 					      panic_nb);
1772 
1773 	return ath12k_hif_panic_handler(ab);
1774 }
1775 
ath12k_core_panic_notifier_register(struct ath12k_base * ab)1776 static int ath12k_core_panic_notifier_register(struct ath12k_base *ab)
1777 {
1778 	ab->panic_nb.notifier_call = ath12k_core_panic_handler;
1779 
1780 	return atomic_notifier_chain_register(&panic_notifier_list,
1781 					      &ab->panic_nb);
1782 }
1783 
ath12k_core_panic_notifier_unregister(struct ath12k_base * ab)1784 static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
1785 {
1786 	atomic_notifier_chain_unregister(&panic_notifier_list,
1787 					 &ab->panic_nb);
1788 }
1789 
1790 static inline
ath12k_core_hw_group_create_ready(struct ath12k_hw_group * ag)1791 bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag)
1792 {
1793 	lockdep_assert_held(&ag->mutex);
1794 
1795 	return (ag->num_probed == ag->num_devices);
1796 }
1797 
ath12k_core_hw_group_alloc(struct ath12k_base * ab)1798 static struct ath12k_hw_group *ath12k_core_hw_group_alloc(struct ath12k_base *ab)
1799 {
1800 	struct ath12k_hw_group *ag;
1801 	int count = 0;
1802 
1803 	lockdep_assert_held(&ath12k_hw_group_mutex);
1804 
1805 	list_for_each_entry(ag, &ath12k_hw_group_list, list)
1806 		count++;
1807 
1808 	ag = kzalloc_obj(*ag);
1809 	if (!ag)
1810 		return NULL;
1811 
1812 	ag->id = count;
1813 	list_add(&ag->list, &ath12k_hw_group_list);
1814 	mutex_init(&ag->mutex);
1815 	ag->mlo_capable = false;
1816 
1817 	return ag;
1818 }
1819 
ath12k_core_hw_group_free(struct ath12k_hw_group * ag)1820 static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag)
1821 {
1822 	mutex_lock(&ath12k_hw_group_mutex);
1823 
1824 	list_del(&ag->list);
1825 	kfree(ag);
1826 
1827 	mutex_unlock(&ath12k_hw_group_mutex);
1828 }
1829 
ath12k_core_hw_group_find_by_dt(struct ath12k_base * ab)1830 static struct ath12k_hw_group *ath12k_core_hw_group_find_by_dt(struct ath12k_base *ab)
1831 {
1832 	struct ath12k_hw_group *ag;
1833 	int i;
1834 
1835 	if (!ab->dev->of_node)
1836 		return NULL;
1837 
1838 	list_for_each_entry(ag, &ath12k_hw_group_list, list)
1839 		for (i = 0; i < ag->num_devices; i++)
1840 			if (ag->wsi_node[i] == ab->dev->of_node)
1841 				return ag;
1842 
1843 	return NULL;
1844 }
1845 
ath12k_core_get_wsi_info(struct ath12k_hw_group * ag,struct ath12k_base * ab)1846 static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag,
1847 				    struct ath12k_base *ab)
1848 {
1849 	struct device_node *wsi_dev = ab->dev->of_node, *next_wsi_dev;
1850 	struct device_node *tx_endpoint, *next_rx_endpoint;
1851 	int device_count = 0;
1852 
1853 	next_wsi_dev = wsi_dev;
1854 
1855 	if (!next_wsi_dev)
1856 		return -ENODEV;
1857 
1858 	do {
1859 		ag->wsi_node[device_count] = next_wsi_dev;
1860 
1861 		tx_endpoint = of_graph_get_endpoint_by_regs(next_wsi_dev, 0, -1);
1862 		if (!tx_endpoint) {
1863 			of_node_put(next_wsi_dev);
1864 			return -ENODEV;
1865 		}
1866 
1867 		next_rx_endpoint = of_graph_get_remote_endpoint(tx_endpoint);
1868 		if (!next_rx_endpoint) {
1869 			of_node_put(next_wsi_dev);
1870 			of_node_put(tx_endpoint);
1871 			return -ENODEV;
1872 		}
1873 
1874 		of_node_put(tx_endpoint);
1875 		of_node_put(next_wsi_dev);
1876 
1877 		next_wsi_dev = of_graph_get_port_parent(next_rx_endpoint);
1878 		if (!next_wsi_dev) {
1879 			of_node_put(next_rx_endpoint);
1880 			return -ENODEV;
1881 		}
1882 
1883 		of_node_put(next_rx_endpoint);
1884 
1885 		device_count++;
1886 		if (device_count > ATH12K_MAX_DEVICES) {
1887 			ath12k_warn(ab, "device count in DT %d is more than limit %d\n",
1888 				    device_count, ATH12K_MAX_DEVICES);
1889 			of_node_put(next_wsi_dev);
1890 			return -EINVAL;
1891 		}
1892 	} while (wsi_dev != next_wsi_dev);
1893 
1894 	of_node_put(next_wsi_dev);
1895 	ag->num_devices = device_count;
1896 
1897 	return 0;
1898 }
1899 
ath12k_core_get_wsi_index(struct ath12k_hw_group * ag,struct ath12k_base * ab)1900 static int ath12k_core_get_wsi_index(struct ath12k_hw_group *ag,
1901 				     struct ath12k_base *ab)
1902 {
1903 	int i, wsi_controller_index = -1, node_index = -1;
1904 	bool control;
1905 
1906 	for (i = 0; i < ag->num_devices; i++) {
1907 		control = of_property_read_bool(ag->wsi_node[i], "qcom,wsi-controller");
1908 		if (control)
1909 			wsi_controller_index = i;
1910 
1911 		if (ag->wsi_node[i] == ab->dev->of_node)
1912 			node_index = i;
1913 	}
1914 
1915 	if (wsi_controller_index == -1) {
1916 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi controller is not defined in dt");
1917 		return -EINVAL;
1918 	}
1919 
1920 	if (node_index == -1) {
1921 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "unable to get WSI node index");
1922 		return -EINVAL;
1923 	}
1924 
1925 	ab->wsi_info.index = (ag->num_devices + node_index - wsi_controller_index) %
1926 		ag->num_devices;
1927 
1928 	return 0;
1929 }
1930 
ath12k_core_hw_group_assign(struct ath12k_base * ab)1931 static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *ab)
1932 {
1933 	struct ath12k_wsi_info *wsi = &ab->wsi_info;
1934 	struct ath12k_hw_group *ag;
1935 
1936 	lockdep_assert_held(&ath12k_hw_group_mutex);
1937 
1938 	if (ath12k_ftm_mode)
1939 		goto invalid_group;
1940 
1941 	/* The grouping of multiple devices will be done based on device tree file.
1942 	 * The platforms that do not have any valid group information would have
1943 	 * each device to be part of its own invalid group.
1944 	 *
1945 	 * We use group id ATH12K_INVALID_GROUP_ID for single device group
1946 	 * which didn't have dt entry or wrong dt entry, there could be many
1947 	 * groups with same group id, i.e ATH12K_INVALID_GROUP_ID. So
1948 	 * default group id of ATH12K_INVALID_GROUP_ID combined with
1949 	 * num devices in ath12k_hw_group determines if the group is
1950 	 * multi device or single device group
1951 	 */
1952 
1953 	ag = ath12k_core_hw_group_find_by_dt(ab);
1954 	if (!ag) {
1955 		ag = ath12k_core_hw_group_alloc(ab);
1956 		if (!ag) {
1957 			ath12k_warn(ab, "unable to create new hw group\n");
1958 			return NULL;
1959 		}
1960 
1961 		if (ath12k_core_get_wsi_info(ag, ab) ||
1962 		    ath12k_core_get_wsi_index(ag, ab)) {
1963 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
1964 				   "unable to get wsi info from dt, grouping single device");
1965 			ag->id = ATH12K_INVALID_GROUP_ID;
1966 			ag->num_devices = 1;
1967 			memset(ag->wsi_node, 0, sizeof(ag->wsi_node));
1968 			wsi->index = 0;
1969 		}
1970 
1971 		goto exit;
1972 	} else if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
1973 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group id %d in unregister state\n",
1974 			   ag->id);
1975 		goto invalid_group;
1976 	} else {
1977 		if (ath12k_core_get_wsi_index(ag, ab))
1978 			goto invalid_group;
1979 		goto exit;
1980 	}
1981 
1982 invalid_group:
1983 	ag = ath12k_core_hw_group_alloc(ab);
1984 	if (!ag) {
1985 		ath12k_warn(ab, "unable to create new hw group\n");
1986 		return NULL;
1987 	}
1988 
1989 	ag->id = ATH12K_INVALID_GROUP_ID;
1990 	ag->num_devices = 1;
1991 	wsi->index = 0;
1992 
1993 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "single device added to hardware group\n");
1994 
1995 exit:
1996 	if (ag->num_probed >= ag->num_devices) {
1997 		ath12k_warn(ab, "unable to add new device to group, max limit reached\n");
1998 		goto invalid_group;
1999 	}
2000 
2001 	ab->device_id = ag->num_probed++;
2002 	ag->ab[ab->device_id] = ab;
2003 	ab->ag = ag;
2004 
2005 	ath12k_dp_cmn_hw_group_assign(ath12k_ab_to_dp(ab), ag);
2006 
2007 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi group-id %d num-devices %d index %d",
2008 		   ag->id, ag->num_devices, wsi->index);
2009 
2010 	return ag;
2011 }
2012 
ath12k_core_hw_group_unassign(struct ath12k_base * ab)2013 void ath12k_core_hw_group_unassign(struct ath12k_base *ab)
2014 {
2015 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
2016 	u8 device_id = ab->device_id;
2017 	int num_probed;
2018 
2019 	if (!ag)
2020 		return;
2021 
2022 	mutex_lock(&ag->mutex);
2023 
2024 	if (WARN_ON(device_id >= ag->num_devices)) {
2025 		mutex_unlock(&ag->mutex);
2026 		return;
2027 	}
2028 
2029 	if (WARN_ON(ag->ab[device_id] != ab)) {
2030 		mutex_unlock(&ag->mutex);
2031 		return;
2032 	}
2033 
2034 	ath12k_dp_cmn_hw_group_unassign(ath12k_ab_to_dp(ab), ag);
2035 
2036 	ag->ab[device_id] = NULL;
2037 	ab->ag = NULL;
2038 	ab->device_id = ATH12K_INVALID_DEVICE_ID;
2039 
2040 	if (ag->num_probed)
2041 		ag->num_probed--;
2042 
2043 	num_probed = ag->num_probed;
2044 
2045 	mutex_unlock(&ag->mutex);
2046 
2047 	if (!num_probed)
2048 		ath12k_core_hw_group_free(ag);
2049 }
2050 
ath12k_core_hw_group_destroy(struct ath12k_hw_group * ag)2051 static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
2052 {
2053 	struct ath12k_base *ab;
2054 	int i;
2055 
2056 	if (WARN_ON(!ag))
2057 		return;
2058 
2059 	for (i = 0; i < ag->num_devices; i++) {
2060 		ab = ag->ab[i];
2061 		if (!ab)
2062 			continue;
2063 
2064 		ath12k_core_soc_destroy(ab);
2065 	}
2066 }
2067 
ath12k_core_hw_group_cleanup(struct ath12k_hw_group * ag)2068 void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
2069 {
2070 	struct ath12k_base *ab;
2071 	int i;
2072 
2073 	if (!ag)
2074 		return;
2075 
2076 	mutex_lock(&ag->mutex);
2077 
2078 	if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
2079 		mutex_unlock(&ag->mutex);
2080 		return;
2081 	}
2082 
2083 	set_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags);
2084 
2085 	ath12k_core_hw_group_stop(ag);
2086 
2087 	for (i = 0; i < ag->num_devices; i++) {
2088 		ab = ag->ab[i];
2089 		if (!ab)
2090 			continue;
2091 
2092 		mutex_lock(&ab->core_lock);
2093 		ath12k_core_stop(ab);
2094 		mutex_unlock(&ab->core_lock);
2095 	}
2096 
2097 	mutex_unlock(&ag->mutex);
2098 }
2099 
ath12k_core_hw_group_create(struct ath12k_hw_group * ag)2100 static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag)
2101 {
2102 	struct ath12k_base *ab;
2103 	int i, ret;
2104 
2105 	lockdep_assert_held(&ag->mutex);
2106 
2107 	for (i = 0; i < ag->num_devices; i++) {
2108 		ab = ag->ab[i];
2109 		if (!ab)
2110 			continue;
2111 
2112 		mutex_lock(&ab->core_lock);
2113 
2114 		ret = ath12k_core_soc_create(ab);
2115 		if (ret) {
2116 			mutex_unlock(&ab->core_lock);
2117 			ath12k_err(ab, "failed to create soc %d core: %d\n", i, ret);
2118 			goto destroy;
2119 		}
2120 
2121 		mutex_unlock(&ab->core_lock);
2122 	}
2123 
2124 	return 0;
2125 
2126 destroy:
2127 	for (i--; i >= 0; i--) {
2128 		ab = ag->ab[i];
2129 		if (!ab)
2130 			continue;
2131 
2132 		mutex_lock(&ab->core_lock);
2133 		ath12k_core_soc_destroy(ab);
2134 		mutex_unlock(&ab->core_lock);
2135 	}
2136 
2137 	return ret;
2138 }
2139 
ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group * ag)2140 void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
2141 {
2142 	struct ath12k_base *ab;
2143 	int i;
2144 
2145 	if (ath12k_ftm_mode)
2146 		return;
2147 
2148 	lockdep_assert_held(&ag->mutex);
2149 
2150 	if (ag->num_devices == 1) {
2151 		ab = ag->ab[0];
2152 		/* QCN9274 firmware uses firmware IE for MLO advertisement */
2153 		if (ab->fw.fw_features_valid) {
2154 			ag->mlo_capable =
2155 				ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO);
2156 			return;
2157 		}
2158 
2159 		/* while WCN7850 firmware uses QMI single_chip_mlo_support bit */
2160 		ag->mlo_capable = ab->single_chip_mlo_support;
2161 		return;
2162 	}
2163 
2164 	ag->mlo_capable = true;
2165 
2166 	for (i = 0; i < ag->num_devices; i++) {
2167 		ab = ag->ab[i];
2168 		if (!ab)
2169 			continue;
2170 
2171 		/* even if 1 device's firmware feature indicates MLO
2172 		 * unsupported, make MLO unsupported for the whole group
2173 		 */
2174 		if (!ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO)) {
2175 			ag->mlo_capable = false;
2176 			return;
2177 		}
2178 	}
2179 }
2180 
ath12k_core_init(struct ath12k_base * ab)2181 int ath12k_core_init(struct ath12k_base *ab)
2182 {
2183 	struct ath12k_hw_group *ag;
2184 	int ret;
2185 
2186 	ret = ath12k_core_panic_notifier_register(ab);
2187 	if (ret)
2188 		ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
2189 
2190 	mutex_lock(&ath12k_hw_group_mutex);
2191 
2192 	ag = ath12k_core_hw_group_assign(ab);
2193 	if (!ag) {
2194 		mutex_unlock(&ath12k_hw_group_mutex);
2195 		ath12k_warn(ab, "unable to get hw group\n");
2196 		ret = -ENODEV;
2197 		goto err_unregister_notifier;
2198 	}
2199 
2200 	mutex_unlock(&ath12k_hw_group_mutex);
2201 
2202 	mutex_lock(&ag->mutex);
2203 
2204 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices %d num probed %d\n",
2205 		   ag->num_devices, ag->num_probed);
2206 
2207 	if (ath12k_core_hw_group_create_ready(ag)) {
2208 		ret = ath12k_core_hw_group_create(ag);
2209 		if (ret) {
2210 			mutex_unlock(&ag->mutex);
2211 			ath12k_warn(ab, "unable to create hw group\n");
2212 			goto err_unassign_hw_group;
2213 		}
2214 	}
2215 
2216 	mutex_unlock(&ag->mutex);
2217 
2218 	return 0;
2219 
2220 err_unassign_hw_group:
2221 	ath12k_core_hw_group_unassign(ab);
2222 err_unregister_notifier:
2223 	ath12k_core_panic_notifier_unregister(ab);
2224 
2225 	return ret;
2226 }
2227 
ath12k_core_deinit(struct ath12k_base * ab)2228 void ath12k_core_deinit(struct ath12k_base *ab)
2229 {
2230 	ath12k_core_hw_group_destroy(ab->ag);
2231 	ath12k_core_hw_group_unassign(ab);
2232 	ath12k_core_panic_notifier_unregister(ab);
2233 }
2234 
ath12k_core_free(struct ath12k_base * ab)2235 void ath12k_core_free(struct ath12k_base *ab)
2236 {
2237 	timer_delete_sync(&ab->rx_replenish_retry);
2238 	destroy_workqueue(ab->workqueue_aux);
2239 	destroy_workqueue(ab->workqueue);
2240 	kfree(ab);
2241 }
2242 
ath12k_core_alloc(struct device * dev,size_t priv_size,enum ath12k_bus bus)2243 struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
2244 				      enum ath12k_bus bus)
2245 {
2246 	struct ath12k_base *ab;
2247 
2248 	ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL);
2249 	if (!ab)
2250 		return NULL;
2251 
2252 	init_completion(&ab->driver_recovery);
2253 
2254 	ab->workqueue = create_singlethread_workqueue("ath12k_wq");
2255 	if (!ab->workqueue)
2256 		goto err_sc_free;
2257 
2258 	ab->workqueue_aux = create_singlethread_workqueue("ath12k_aux_wq");
2259 	if (!ab->workqueue_aux)
2260 		goto err_free_wq;
2261 
2262 	mutex_init(&ab->core_lock);
2263 	spin_lock_init(&ab->base_lock);
2264 	init_completion(&ab->reset_complete);
2265 
2266 	init_waitqueue_head(&ab->peer_mapping_wq);
2267 	init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
2268 	INIT_WORK(&ab->restart_work, ath12k_core_restart);
2269 	INIT_WORK(&ab->reset_work, ath12k_core_reset);
2270 	INIT_WORK(&ab->rfkill_work, ath12k_rfkill_work);
2271 	INIT_WORK(&ab->dump_work, ath12k_coredump_upload);
2272 	INIT_WORK(&ab->update_11d_work, ath12k_update_11d);
2273 
2274 	timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
2275 	init_completion(&ab->htc_suspend);
2276 	init_completion(&ab->restart_completed);
2277 	init_completion(&ab->wow.wakeup_completed);
2278 
2279 	ab->dev = dev;
2280 	ab->hif.bus = bus;
2281 	ab->qmi.num_radios = U8_MAX;
2282 	ab->single_chip_mlo_support = false;
2283 
2284 	/* Device index used to identify the devices in a group.
2285 	 *
2286 	 * In Intra-device MLO, only one device present in a group,
2287 	 * so it is always zero.
2288 	 *
2289 	 * In Inter-device MLO, Multiple device present in a group,
2290 	 * expect non-zero value.
2291 	 */
2292 	ab->device_id = 0;
2293 
2294 	return ab;
2295 
2296 err_free_wq:
2297 	destroy_workqueue(ab->workqueue);
2298 err_sc_free:
2299 	kfree(ab);
2300 	return NULL;
2301 }
2302 
2303 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies WLAN devices");
2304 MODULE_LICENSE("Dual BSD/GPL");
2305