xref: /linux/drivers/net/wireless/ath/ath12k/core.c (revision 5c8013ae2e86ec36b07500ba4cacb14ab4d6f728)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/remoteproc.h>
10 #include <linux/firmware.h>
11 #include <linux/of.h>
12 #include <linux/of_graph.h>
13 #include "ahb.h"
14 #include "core.h"
15 #include "dp_tx.h"
16 #include "dp_rx.h"
17 #include "debug.h"
18 #include "debugfs.h"
19 #include "fw.h"
20 #include "hif.h"
21 #include "pci.h"
22 #include "wow.h"
23 
24 static int ahb_err, pci_err;
25 unsigned int ath12k_debug_mask;
26 module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
27 MODULE_PARM_DESC(debug_mask, "Debugging mask");
28 
29 bool ath12k_ftm_mode;
30 module_param_named(ftm_mode, ath12k_ftm_mode, bool, 0444);
31 MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode");
32 
33 /* protected with ath12k_hw_group_mutex */
34 static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
35 
36 static DEFINE_MUTEX(ath12k_hw_group_mutex);
37 
ath12k_core_rfkill_config(struct ath12k_base * ab)38 static int ath12k_core_rfkill_config(struct ath12k_base *ab)
39 {
40 	struct ath12k *ar;
41 	int ret = 0, i;
42 
43 	if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
44 		return 0;
45 
46 	if (ath12k_acpi_get_disable_rfkill(ab))
47 		return 0;
48 
49 	for (i = 0; i < ab->num_radios; i++) {
50 		ar = ab->pdevs[i].ar;
51 
52 		ret = ath12k_mac_rfkill_config(ar);
53 		if (ret && ret != -EOPNOTSUPP) {
54 			ath12k_warn(ab, "failed to configure rfkill: %d", ret);
55 			return ret;
56 		}
57 	}
58 
59 	return ret;
60 }
61 
62 /* Check if we need to continue with suspend/resume operation.
63  * Return:
64  *	a negative value: error happens and don't continue.
65  *	0:  no error but don't continue.
66  *	positive value: no error and do continue.
67  */
ath12k_core_continue_suspend_resume(struct ath12k_base * ab)68 static int ath12k_core_continue_suspend_resume(struct ath12k_base *ab)
69 {
70 	struct ath12k *ar;
71 
72 	if (!ab->hw_params->supports_suspend)
73 		return -EOPNOTSUPP;
74 
75 	/* so far single_pdev_only chips have supports_suspend as true
76 	 * so pass 0 as a dummy pdev_id here.
77 	 */
78 	ar = ab->pdevs[0].ar;
79 	if (!ar || !ar->ah || ar->ah->state != ATH12K_HW_STATE_OFF)
80 		return 0;
81 
82 	return 1;
83 }
84 
ath12k_core_suspend(struct ath12k_base * ab)85 int ath12k_core_suspend(struct ath12k_base *ab)
86 {
87 	struct ath12k *ar;
88 	int ret, i;
89 
90 	ret = ath12k_core_continue_suspend_resume(ab);
91 	if (ret <= 0)
92 		return ret;
93 
94 	for (i = 0; i < ab->num_radios; i++) {
95 		ar = ab->pdevs[i].ar;
96 		if (!ar)
97 			continue;
98 
99 		wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
100 
101 		ret = ath12k_mac_wait_tx_complete(ar);
102 		if (ret) {
103 			wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
104 			ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
105 			return ret;
106 		}
107 
108 		wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
109 	}
110 
111 	/* PM framework skips suspend_late/resume_early callbacks
112 	 * if other devices report errors in their suspend callbacks.
113 	 * However ath12k_core_resume() would still be called because
114 	 * here we return success thus kernel put us on dpm_suspended_list.
115 	 * Since we won't go through a power down/up cycle, there is
116 	 * no chance to call complete(&ab->restart_completed) in
117 	 * ath12k_core_restart(), making ath12k_core_resume() timeout.
118 	 * So call it here to avoid this issue. This also works in case
119 	 * no error happens thus suspend_late/resume_early get called,
120 	 * because it will be reinitialized in ath12k_core_resume_early().
121 	 */
122 	complete(&ab->restart_completed);
123 
124 	return 0;
125 }
126 EXPORT_SYMBOL(ath12k_core_suspend);
127 
ath12k_core_suspend_late(struct ath12k_base * ab)128 int ath12k_core_suspend_late(struct ath12k_base *ab)
129 {
130 	int ret;
131 
132 	ret = ath12k_core_continue_suspend_resume(ab);
133 	if (ret <= 0)
134 		return ret;
135 
136 	ath12k_acpi_stop(ab);
137 
138 	ath12k_hif_irq_disable(ab);
139 	ath12k_hif_ce_irq_disable(ab);
140 
141 	ath12k_hif_power_down(ab, true);
142 
143 	return 0;
144 }
145 EXPORT_SYMBOL(ath12k_core_suspend_late);
146 
ath12k_core_resume_early(struct ath12k_base * ab)147 int ath12k_core_resume_early(struct ath12k_base *ab)
148 {
149 	int ret;
150 
151 	ret = ath12k_core_continue_suspend_resume(ab);
152 	if (ret <= 0)
153 		return ret;
154 
155 	reinit_completion(&ab->restart_completed);
156 	ret = ath12k_hif_power_up(ab);
157 	if (ret)
158 		ath12k_warn(ab, "failed to power up hif during resume: %d\n", ret);
159 
160 	return ret;
161 }
162 EXPORT_SYMBOL(ath12k_core_resume_early);
163 
ath12k_core_resume(struct ath12k_base * ab)164 int ath12k_core_resume(struct ath12k_base *ab)
165 {
166 	long time_left;
167 	int ret;
168 
169 	ret = ath12k_core_continue_suspend_resume(ab);
170 	if (ret <= 0)
171 		return ret;
172 
173 	time_left = wait_for_completion_timeout(&ab->restart_completed,
174 						ATH12K_RESET_TIMEOUT_HZ);
175 	if (time_left == 0) {
176 		ath12k_warn(ab, "timeout while waiting for restart complete");
177 		return -ETIMEDOUT;
178 	}
179 
180 	return 0;
181 }
182 EXPORT_SYMBOL(ath12k_core_resume);
183 
__ath12k_core_create_board_name(struct ath12k_base * ab,char * name,size_t name_len,bool with_variant,bool bus_type_mode,bool with_default)184 static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
185 					   size_t name_len, bool with_variant,
186 					   bool bus_type_mode, bool with_default)
187 {
188 	/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
189 	char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
190 
191 	if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
192 		scnprintf(variant, sizeof(variant), ",variant=%s",
193 			  ab->qmi.target.bdf_ext);
194 
195 	switch (ab->id.bdf_search) {
196 	case ATH12K_BDF_SEARCH_BUS_AND_BOARD:
197 		if (bus_type_mode)
198 			scnprintf(name, name_len,
199 				  "bus=%s",
200 				  ath12k_bus_str(ab->hif.bus));
201 		else
202 			scnprintf(name, name_len,
203 				  "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
204 				  ath12k_bus_str(ab->hif.bus),
205 				  ab->id.vendor, ab->id.device,
206 				  ab->id.subsystem_vendor,
207 				  ab->id.subsystem_device,
208 				  ab->qmi.target.chip_id,
209 				  ab->qmi.target.board_id,
210 				  variant);
211 		break;
212 	default:
213 		scnprintf(name, name_len,
214 			  "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
215 			  ath12k_bus_str(ab->hif.bus),
216 			  ab->qmi.target.chip_id,
217 			  with_default ?
218 			  ATH12K_BOARD_ID_DEFAULT : ab->qmi.target.board_id,
219 			  variant);
220 		break;
221 	}
222 
223 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name);
224 
225 	return 0;
226 }
227 
ath12k_core_create_board_name(struct ath12k_base * ab,char * name,size_t name_len)228 static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
229 					 size_t name_len)
230 {
231 	return __ath12k_core_create_board_name(ab, name, name_len, true, false, false);
232 }
233 
ath12k_core_create_fallback_board_name(struct ath12k_base * ab,char * name,size_t name_len)234 static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
235 						  size_t name_len)
236 {
237 	return __ath12k_core_create_board_name(ab, name, name_len, false, false, true);
238 }
239 
ath12k_core_create_bus_type_board_name(struct ath12k_base * ab,char * name,size_t name_len)240 static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
241 						  size_t name_len)
242 {
243 	return __ath12k_core_create_board_name(ab, name, name_len, false, true, true);
244 }
245 
ath12k_core_firmware_request(struct ath12k_base * ab,const char * file)246 const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
247 						    const char *file)
248 {
249 	const struct firmware *fw;
250 	char path[100];
251 	int ret;
252 
253 	if (!file)
254 		return ERR_PTR(-ENOENT);
255 
256 	ath12k_core_create_firmware_path(ab, file, path, sizeof(path));
257 
258 	ret = firmware_request_nowarn(&fw, path, ab->dev);
259 	if (ret)
260 		return ERR_PTR(ret);
261 
262 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot firmware request %s size %zu\n",
263 		   path, fw->size);
264 
265 	return fw;
266 }
267 
ath12k_core_free_bdf(struct ath12k_base * ab,struct ath12k_board_data * bd)268 void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
269 {
270 	if (!IS_ERR(bd->fw))
271 		release_firmware(bd->fw);
272 
273 	memset(bd, 0, sizeof(*bd));
274 }
275 
ath12k_core_parse_bd_ie_board(struct ath12k_base * ab,struct ath12k_board_data * bd,const void * buf,size_t buf_len,const char * boardname,int ie_id,int name_id,int data_id)276 static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
277 					 struct ath12k_board_data *bd,
278 					 const void *buf, size_t buf_len,
279 					 const char *boardname,
280 					 int ie_id,
281 					 int name_id,
282 					 int data_id)
283 {
284 	const struct ath12k_fw_ie *hdr;
285 	bool name_match_found;
286 	int ret, board_ie_id;
287 	size_t board_ie_len;
288 	const void *board_ie_data;
289 
290 	name_match_found = false;
291 
292 	/* go through ATH12K_BD_IE_BOARD_/ATH12K_BD_IE_REGDB_ elements */
293 	while (buf_len > sizeof(struct ath12k_fw_ie)) {
294 		hdr = buf;
295 		board_ie_id = le32_to_cpu(hdr->id);
296 		board_ie_len = le32_to_cpu(hdr->len);
297 		board_ie_data = hdr->data;
298 
299 		buf_len -= sizeof(*hdr);
300 		buf += sizeof(*hdr);
301 
302 		if (buf_len < ALIGN(board_ie_len, 4)) {
303 			ath12k_err(ab, "invalid %s length: %zu < %zu\n",
304 				   ath12k_bd_ie_type_str(ie_id),
305 				   buf_len, ALIGN(board_ie_len, 4));
306 			ret = -EINVAL;
307 			goto out;
308 		}
309 
310 		if (board_ie_id == name_id) {
311 			ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "",
312 					board_ie_data, board_ie_len);
313 
314 			if (board_ie_len != strlen(boardname))
315 				goto next;
316 
317 			ret = memcmp(board_ie_data, boardname, strlen(boardname));
318 			if (ret)
319 				goto next;
320 
321 			name_match_found = true;
322 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
323 				   "boot found match %s for name '%s'",
324 				   ath12k_bd_ie_type_str(ie_id),
325 				   boardname);
326 		} else if (board_ie_id == data_id) {
327 			if (!name_match_found)
328 				/* no match found */
329 				goto next;
330 
331 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
332 				   "boot found %s for '%s'",
333 				   ath12k_bd_ie_type_str(ie_id),
334 				   boardname);
335 
336 			bd->data = board_ie_data;
337 			bd->len = board_ie_len;
338 
339 			ret = 0;
340 			goto out;
341 		} else {
342 			ath12k_warn(ab, "unknown %s id found: %d\n",
343 				    ath12k_bd_ie_type_str(ie_id),
344 				    board_ie_id);
345 		}
346 next:
347 		/* jump over the padding */
348 		board_ie_len = ALIGN(board_ie_len, 4);
349 
350 		buf_len -= board_ie_len;
351 		buf += board_ie_len;
352 	}
353 
354 	/* no match found */
355 	ret = -ENOENT;
356 
357 out:
358 	return ret;
359 }
360 
ath12k_core_fetch_board_data_api_n(struct ath12k_base * ab,struct ath12k_board_data * bd,const char * boardname,int ie_id_match,int name_id,int data_id)361 static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
362 					      struct ath12k_board_data *bd,
363 					      const char *boardname,
364 					      int ie_id_match,
365 					      int name_id,
366 					      int data_id)
367 {
368 	size_t len, magic_len;
369 	const u8 *data;
370 	char *filename, filepath[100];
371 	size_t ie_len;
372 	struct ath12k_fw_ie *hdr;
373 	int ret, ie_id;
374 
375 	filename = ATH12K_BOARD_API2_FILE;
376 
377 	if (!bd->fw)
378 		bd->fw = ath12k_core_firmware_request(ab, filename);
379 
380 	if (IS_ERR(bd->fw))
381 		return PTR_ERR(bd->fw);
382 
383 	data = bd->fw->data;
384 	len = bd->fw->size;
385 
386 	ath12k_core_create_firmware_path(ab, filename,
387 					 filepath, sizeof(filepath));
388 
389 	/* magic has extra null byte padded */
390 	magic_len = strlen(ATH12K_BOARD_MAGIC) + 1;
391 	if (len < magic_len) {
392 		ath12k_err(ab, "failed to find magic value in %s, file too short: %zu\n",
393 			   filepath, len);
394 		ret = -EINVAL;
395 		goto err;
396 	}
397 
398 	if (memcmp(data, ATH12K_BOARD_MAGIC, magic_len)) {
399 		ath12k_err(ab, "found invalid board magic\n");
400 		ret = -EINVAL;
401 		goto err;
402 	}
403 
404 	/* magic is padded to 4 bytes */
405 	magic_len = ALIGN(magic_len, 4);
406 	if (len < magic_len) {
407 		ath12k_err(ab, "failed: %s too small to contain board data, len: %zu\n",
408 			   filepath, len);
409 		ret = -EINVAL;
410 		goto err;
411 	}
412 
413 	data += magic_len;
414 	len -= magic_len;
415 
416 	while (len > sizeof(struct ath12k_fw_ie)) {
417 		hdr = (struct ath12k_fw_ie *)data;
418 		ie_id = le32_to_cpu(hdr->id);
419 		ie_len = le32_to_cpu(hdr->len);
420 
421 		len -= sizeof(*hdr);
422 		data = hdr->data;
423 
424 		if (len < ALIGN(ie_len, 4)) {
425 			ath12k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
426 				   ie_id, ie_len, len);
427 			ret = -EINVAL;
428 			goto err;
429 		}
430 
431 		if (ie_id == ie_id_match) {
432 			ret = ath12k_core_parse_bd_ie_board(ab, bd, data,
433 							    ie_len,
434 							    boardname,
435 							    ie_id_match,
436 							    name_id,
437 							    data_id);
438 			if (ret == -ENOENT)
439 				/* no match found, continue */
440 				goto next;
441 			else if (ret)
442 				/* there was an error, bail out */
443 				goto err;
444 			/* either found or error, so stop searching */
445 			goto out;
446 		}
447 next:
448 		/* jump over the padding */
449 		ie_len = ALIGN(ie_len, 4);
450 
451 		len -= ie_len;
452 		data += ie_len;
453 	}
454 
455 out:
456 	if (!bd->data || !bd->len) {
457 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
458 			   "failed to fetch %s for %s from %s\n",
459 			   ath12k_bd_ie_type_str(ie_id_match),
460 			   boardname, filepath);
461 		ret = -ENODATA;
462 		goto err;
463 	}
464 
465 	return 0;
466 
467 err:
468 	ath12k_core_free_bdf(ab, bd);
469 	return ret;
470 }
471 
ath12k_core_fetch_board_data_api_1(struct ath12k_base * ab,struct ath12k_board_data * bd,char * filename)472 int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
473 				       struct ath12k_board_data *bd,
474 				       char *filename)
475 {
476 	bd->fw = ath12k_core_firmware_request(ab, filename);
477 	if (IS_ERR(bd->fw))
478 		return PTR_ERR(bd->fw);
479 
480 	bd->data = bd->fw->data;
481 	bd->len = bd->fw->size;
482 
483 	return 0;
484 }
485 
486 #define BOARD_NAME_SIZE 200
ath12k_core_fetch_bdf(struct ath12k_base * ab,struct ath12k_board_data * bd)487 int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
488 {
489 	char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE];
490 	char *filename, filepath[100];
491 	int bd_api;
492 	int ret;
493 
494 	filename = ATH12K_BOARD_API2_FILE;
495 
496 	ret = ath12k_core_create_board_name(ab, boardname, sizeof(boardname));
497 	if (ret) {
498 		ath12k_err(ab, "failed to create board name: %d", ret);
499 		return ret;
500 	}
501 
502 	bd_api = 2;
503 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
504 						 ATH12K_BD_IE_BOARD,
505 						 ATH12K_BD_IE_BOARD_NAME,
506 						 ATH12K_BD_IE_BOARD_DATA);
507 	if (!ret)
508 		goto success;
509 
510 	ret = ath12k_core_create_fallback_board_name(ab, fallback_boardname,
511 						     sizeof(fallback_boardname));
512 	if (ret) {
513 		ath12k_err(ab, "failed to create fallback board name: %d", ret);
514 		return ret;
515 	}
516 
517 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
518 						 ATH12K_BD_IE_BOARD,
519 						 ATH12K_BD_IE_BOARD_NAME,
520 						 ATH12K_BD_IE_BOARD_DATA);
521 	if (!ret)
522 		goto success;
523 
524 	bd_api = 1;
525 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE);
526 	if (ret) {
527 		ath12k_core_create_firmware_path(ab, filename,
528 						 filepath, sizeof(filepath));
529 		ath12k_err(ab, "failed to fetch board data for %s from %s\n",
530 			   boardname, filepath);
531 		if (memcmp(boardname, fallback_boardname, strlen(boardname)))
532 			ath12k_err(ab, "failed to fetch board data for %s from %s\n",
533 				   fallback_boardname, filepath);
534 
535 		ath12k_err(ab, "failed to fetch board.bin from %s\n",
536 			   ab->hw_params->fw.dir);
537 		return ret;
538 	}
539 
540 success:
541 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", bd_api);
542 	return 0;
543 }
544 
ath12k_core_fetch_regdb(struct ath12k_base * ab,struct ath12k_board_data * bd)545 int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd)
546 {
547 	char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE];
548 	int ret;
549 
550 	ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
551 	if (ret) {
552 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
553 			   "failed to create board name for regdb: %d", ret);
554 		goto exit;
555 	}
556 
557 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
558 						 ATH12K_BD_IE_REGDB,
559 						 ATH12K_BD_IE_REGDB_NAME,
560 						 ATH12K_BD_IE_REGDB_DATA);
561 	if (!ret)
562 		goto exit;
563 
564 	ret = ath12k_core_create_bus_type_board_name(ab, default_boardname,
565 						     BOARD_NAME_SIZE);
566 	if (ret) {
567 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
568 			   "failed to create default board name for regdb: %d", ret);
569 		goto exit;
570 	}
571 
572 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, default_boardname,
573 						 ATH12K_BD_IE_REGDB,
574 						 ATH12K_BD_IE_REGDB_NAME,
575 						 ATH12K_BD_IE_REGDB_DATA);
576 	if (!ret)
577 		goto exit;
578 
579 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_REGDB_FILE_NAME);
580 	if (ret)
581 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to fetch %s from %s\n",
582 			   ATH12K_REGDB_FILE_NAME, ab->hw_params->fw.dir);
583 
584 exit:
585 	if (!ret)
586 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "fetched regdb\n");
587 
588 	return ret;
589 }
590 
ath12k_core_get_max_station_per_radio(struct ath12k_base * ab)591 u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab)
592 {
593 	if (ab->num_radios == 2)
594 		return TARGET_NUM_STATIONS_DBS;
595 	else if (ab->num_radios == 3)
596 		return TARGET_NUM_PEERS_PDEV_DBS_SBS;
597 	return TARGET_NUM_STATIONS_SINGLE;
598 }
599 
ath12k_core_get_max_peers_per_radio(struct ath12k_base * ab)600 u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab)
601 {
602 	if (ab->num_radios == 2)
603 		return TARGET_NUM_PEERS_PDEV_DBS;
604 	else if (ab->num_radios == 3)
605 		return TARGET_NUM_PEERS_PDEV_DBS_SBS;
606 	return TARGET_NUM_PEERS_PDEV_SINGLE;
607 }
608 
ath12k_core_get_max_num_tids(struct ath12k_base * ab)609 u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab)
610 {
611 	if (ab->num_radios == 2)
612 		return TARGET_NUM_TIDS(DBS);
613 	else if (ab->num_radios == 3)
614 		return TARGET_NUM_TIDS(DBS_SBS);
615 	return TARGET_NUM_TIDS(SINGLE);
616 }
617 
ath12k_core_get_reserved_mem(struct ath12k_base * ab,int index)618 struct reserved_mem *ath12k_core_get_reserved_mem(struct ath12k_base *ab,
619 						  int index)
620 {
621 	struct device *dev = ab->dev;
622 	struct reserved_mem *rmem;
623 	struct device_node *node;
624 
625 	node = of_parse_phandle(dev->of_node, "memory-region", index);
626 	if (!node) {
627 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
628 			   "failed to parse memory-region for index %d\n", index);
629 		return NULL;
630 	}
631 
632 	rmem = of_reserved_mem_lookup(node);
633 	of_node_put(node);
634 	if (!rmem) {
635 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
636 			   "unable to get memory-region for index %d\n", index);
637 		return NULL;
638 	}
639 
640 	return rmem;
641 }
642 
643 static inline
ath12k_core_to_group_ref_get(struct ath12k_base * ab)644 void ath12k_core_to_group_ref_get(struct ath12k_base *ab)
645 {
646 	struct ath12k_hw_group *ag = ab->ag;
647 
648 	lockdep_assert_held(&ag->mutex);
649 
650 	if (ab->hw_group_ref) {
651 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already attached to group %d\n",
652 			   ag->id);
653 		return;
654 	}
655 
656 	ab->hw_group_ref = true;
657 	ag->num_started++;
658 
659 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "core attached to group %d, num_started %d\n",
660 		   ag->id, ag->num_started);
661 }
662 
663 static inline
ath12k_core_to_group_ref_put(struct ath12k_base * ab)664 void ath12k_core_to_group_ref_put(struct ath12k_base *ab)
665 {
666 	struct ath12k_hw_group *ag = ab->ag;
667 
668 	lockdep_assert_held(&ag->mutex);
669 
670 	if (!ab->hw_group_ref) {
671 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already de-attached from group %d\n",
672 			   ag->id);
673 		return;
674 	}
675 
676 	ab->hw_group_ref = false;
677 	ag->num_started--;
678 
679 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "core de-attached from group %d, num_started %d\n",
680 		   ag->id, ag->num_started);
681 }
682 
ath12k_core_stop(struct ath12k_base * ab)683 static void ath12k_core_stop(struct ath12k_base *ab)
684 {
685 	ath12k_core_to_group_ref_put(ab);
686 
687 	if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
688 		ath12k_qmi_firmware_stop(ab);
689 
690 	ath12k_acpi_stop(ab);
691 
692 	ath12k_dp_rx_pdev_reo_cleanup(ab);
693 	ath12k_hif_stop(ab);
694 	ath12k_wmi_detach(ab);
695 	ath12k_dp_free(ab);
696 
697 	/* De-Init of components as needed */
698 }
699 
ath12k_core_check_cc_code_bdfext(const struct dmi_header * hdr,void * data)700 static void ath12k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data)
701 {
702 	struct ath12k_base *ab = data;
703 	const char *magic = ATH12K_SMBIOS_BDF_EXT_MAGIC;
704 	struct ath12k_smbios_bdf *smbios = (struct ath12k_smbios_bdf *)hdr;
705 	ssize_t copied;
706 	size_t len;
707 	int i;
708 
709 	if (ab->qmi.target.bdf_ext[0] != '\0')
710 		return;
711 
712 	if (hdr->type != ATH12K_SMBIOS_BDF_EXT_TYPE)
713 		return;
714 
715 	if (hdr->length != ATH12K_SMBIOS_BDF_EXT_LENGTH) {
716 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
717 			   "wrong smbios bdf ext type length (%d).\n",
718 			   hdr->length);
719 		return;
720 	}
721 
722 	spin_lock_bh(&ab->base_lock);
723 
724 	switch (smbios->country_code_flag) {
725 	case ATH12K_SMBIOS_CC_ISO:
726 		ab->new_alpha2[0] = u16_get_bits(smbios->cc_code >> 8, 0xff);
727 		ab->new_alpha2[1] = u16_get_bits(smbios->cc_code, 0xff);
728 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios cc_code %c%c\n",
729 			   ab->new_alpha2[0], ab->new_alpha2[1]);
730 		break;
731 	case ATH12K_SMBIOS_CC_WW:
732 		ab->new_alpha2[0] = '0';
733 		ab->new_alpha2[1] = '0';
734 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios worldwide regdomain\n");
735 		break;
736 	default:
737 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot ignore smbios country code setting %d\n",
738 			   smbios->country_code_flag);
739 		break;
740 	}
741 
742 	spin_unlock_bh(&ab->base_lock);
743 
744 	if (!smbios->bdf_enabled) {
745 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant name not found.\n");
746 		return;
747 	}
748 
749 	/* Only one string exists (per spec) */
750 	if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) {
751 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
752 			   "bdf variant magic does not match.\n");
753 		return;
754 	}
755 
756 	len = min_t(size_t,
757 		    strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext));
758 	for (i = 0; i < len; i++) {
759 		if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) {
760 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
761 				   "bdf variant name contains non ascii chars.\n");
762 			return;
763 		}
764 	}
765 
766 	/* Copy extension name without magic prefix */
767 	copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic),
768 			 sizeof(ab->qmi.target.bdf_ext));
769 	if (copied < 0) {
770 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
771 			   "bdf variant string is longer than the buffer can accommodate\n");
772 		return;
773 	}
774 
775 	ath12k_dbg(ab, ATH12K_DBG_BOOT,
776 		   "found and validated bdf variant smbios_type 0x%x bdf %s\n",
777 		   ATH12K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext);
778 }
779 
ath12k_core_check_smbios(struct ath12k_base * ab)780 int ath12k_core_check_smbios(struct ath12k_base *ab)
781 {
782 	ab->qmi.target.bdf_ext[0] = '\0';
783 	dmi_walk(ath12k_core_check_cc_code_bdfext, ab);
784 
785 	if (ab->qmi.target.bdf_ext[0] == '\0')
786 		return -ENODATA;
787 
788 	return 0;
789 }
790 
ath12k_core_soc_create(struct ath12k_base * ab)791 static int ath12k_core_soc_create(struct ath12k_base *ab)
792 {
793 	int ret;
794 
795 	if (ath12k_ftm_mode) {
796 		ab->fw_mode = ATH12K_FIRMWARE_MODE_FTM;
797 		ath12k_info(ab, "Booting in ftm mode\n");
798 	}
799 
800 	ret = ath12k_qmi_init_service(ab);
801 	if (ret) {
802 		ath12k_err(ab, "failed to initialize qmi :%d\n", ret);
803 		return ret;
804 	}
805 
806 	ath12k_debugfs_soc_create(ab);
807 
808 	ret = ath12k_hif_power_up(ab);
809 	if (ret) {
810 		ath12k_err(ab, "failed to power up :%d\n", ret);
811 		goto err_qmi_deinit;
812 	}
813 
814 	ath12k_debugfs_pdev_create(ab);
815 
816 	return 0;
817 
818 err_qmi_deinit:
819 	ath12k_debugfs_soc_destroy(ab);
820 	ath12k_qmi_deinit_service(ab);
821 	return ret;
822 }
823 
ath12k_core_soc_destroy(struct ath12k_base * ab)824 static void ath12k_core_soc_destroy(struct ath12k_base *ab)
825 {
826 	ath12k_hif_power_down(ab, false);
827 	ath12k_reg_free(ab);
828 	ath12k_debugfs_soc_destroy(ab);
829 	ath12k_qmi_deinit_service(ab);
830 }
831 
ath12k_core_pdev_create(struct ath12k_base * ab)832 static int ath12k_core_pdev_create(struct ath12k_base *ab)
833 {
834 	int ret;
835 
836 	ret = ath12k_dp_pdev_alloc(ab);
837 	if (ret) {
838 		ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
839 		return ret;
840 	}
841 
842 	return 0;
843 }
844 
ath12k_core_pdev_destroy(struct ath12k_base * ab)845 static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
846 {
847 	ath12k_dp_pdev_free(ab);
848 }
849 
ath12k_core_start(struct ath12k_base * ab)850 static int ath12k_core_start(struct ath12k_base *ab)
851 {
852 	int ret;
853 
854 	lockdep_assert_held(&ab->core_lock);
855 
856 	ret = ath12k_wmi_attach(ab);
857 	if (ret) {
858 		ath12k_err(ab, "failed to attach wmi: %d\n", ret);
859 		return ret;
860 	}
861 
862 	ret = ath12k_htc_init(ab);
863 	if (ret) {
864 		ath12k_err(ab, "failed to init htc: %d\n", ret);
865 		goto err_wmi_detach;
866 	}
867 
868 	ret = ath12k_hif_start(ab);
869 	if (ret) {
870 		ath12k_err(ab, "failed to start HIF: %d\n", ret);
871 		goto err_wmi_detach;
872 	}
873 
874 	ret = ath12k_htc_wait_target(&ab->htc);
875 	if (ret) {
876 		ath12k_err(ab, "failed to connect to HTC: %d\n", ret);
877 		goto err_hif_stop;
878 	}
879 
880 	ret = ath12k_dp_htt_connect(&ab->dp);
881 	if (ret) {
882 		ath12k_err(ab, "failed to connect to HTT: %d\n", ret);
883 		goto err_hif_stop;
884 	}
885 
886 	ret = ath12k_wmi_connect(ab);
887 	if (ret) {
888 		ath12k_err(ab, "failed to connect wmi: %d\n", ret);
889 		goto err_hif_stop;
890 	}
891 
892 	ret = ath12k_htc_start(&ab->htc);
893 	if (ret) {
894 		ath12k_err(ab, "failed to start HTC: %d\n", ret);
895 		goto err_hif_stop;
896 	}
897 
898 	ret = ath12k_wmi_wait_for_service_ready(ab);
899 	if (ret) {
900 		ath12k_err(ab, "failed to receive wmi service ready event: %d\n",
901 			   ret);
902 		goto err_hif_stop;
903 	}
904 
905 	ath12k_dp_cc_config(ab);
906 
907 	ret = ath12k_dp_rx_pdev_reo_setup(ab);
908 	if (ret) {
909 		ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
910 		goto err_hif_stop;
911 	}
912 
913 	ath12k_dp_hal_rx_desc_init(ab);
914 
915 	ret = ath12k_wmi_cmd_init(ab);
916 	if (ret) {
917 		ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret);
918 		goto err_reo_cleanup;
919 	}
920 
921 	ret = ath12k_wmi_wait_for_unified_ready(ab);
922 	if (ret) {
923 		ath12k_err(ab, "failed to receive wmi unified ready event: %d\n",
924 			   ret);
925 		goto err_reo_cleanup;
926 	}
927 
928 	/* put hardware to DBS mode */
929 	if (ab->hw_params->single_pdev_only) {
930 		ret = ath12k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
931 		if (ret) {
932 			ath12k_err(ab, "failed to send dbs mode: %d\n", ret);
933 			goto err_reo_cleanup;
934 		}
935 	}
936 
937 	ret = ath12k_dp_tx_htt_h2t_ver_req_msg(ab);
938 	if (ret) {
939 		ath12k_err(ab, "failed to send htt version request message: %d\n",
940 			   ret);
941 		goto err_reo_cleanup;
942 	}
943 
944 	ath12k_acpi_set_dsm_func(ab);
945 
946 	/* Indicate the core start in the appropriate group */
947 	ath12k_core_to_group_ref_get(ab);
948 
949 	return 0;
950 
951 err_reo_cleanup:
952 	ath12k_dp_rx_pdev_reo_cleanup(ab);
953 err_hif_stop:
954 	ath12k_hif_stop(ab);
955 err_wmi_detach:
956 	ath12k_wmi_detach(ab);
957 	return ret;
958 }
959 
ath12k_core_device_cleanup(struct ath12k_base * ab)960 static void ath12k_core_device_cleanup(struct ath12k_base *ab)
961 {
962 	mutex_lock(&ab->core_lock);
963 
964 	ath12k_hif_irq_disable(ab);
965 	ath12k_core_pdev_destroy(ab);
966 
967 	mutex_unlock(&ab->core_lock);
968 }
969 
ath12k_core_hw_group_stop(struct ath12k_hw_group * ag)970 static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
971 {
972 	struct ath12k_base *ab;
973 	int i;
974 
975 	lockdep_assert_held(&ag->mutex);
976 
977 	clear_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
978 
979 	ath12k_mac_unregister(ag);
980 
981 	for (i = ag->num_devices - 1; i >= 0; i--) {
982 		ab = ag->ab[i];
983 		if (!ab)
984 			continue;
985 
986 		clear_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
987 
988 		ath12k_core_device_cleanup(ab);
989 	}
990 
991 	ath12k_mac_destroy(ag);
992 }
993 
ath12k_get_num_partner_link(struct ath12k * ar)994 u8 ath12k_get_num_partner_link(struct ath12k *ar)
995 {
996 	struct ath12k_base *partner_ab, *ab = ar->ab;
997 	struct ath12k_hw_group *ag = ab->ag;
998 	struct ath12k_pdev *pdev;
999 	u8 num_link = 0;
1000 	int i, j;
1001 
1002 	lockdep_assert_held(&ag->mutex);
1003 
1004 	for (i = 0; i < ag->num_devices; i++) {
1005 		partner_ab = ag->ab[i];
1006 
1007 		for (j = 0; j < partner_ab->num_radios; j++) {
1008 			pdev = &partner_ab->pdevs[j];
1009 
1010 			/* Avoid the self link */
1011 			if (ar == pdev->ar)
1012 				continue;
1013 
1014 			num_link++;
1015 		}
1016 	}
1017 
1018 	return num_link;
1019 }
1020 
__ath12k_mac_mlo_ready(struct ath12k * ar)1021 static int __ath12k_mac_mlo_ready(struct ath12k *ar)
1022 {
1023 	u8 num_link = ath12k_get_num_partner_link(ar);
1024 	int ret;
1025 
1026 	if (num_link == 0)
1027 		return 0;
1028 
1029 	ret = ath12k_wmi_mlo_ready(ar);
1030 	if (ret) {
1031 		ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n",
1032 			   ar->pdev_idx, ret);
1033 		return ret;
1034 	}
1035 
1036 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mlo ready done for pdev %d\n",
1037 		   ar->pdev_idx);
1038 
1039 	return 0;
1040 }
1041 
ath12k_mac_mlo_ready(struct ath12k_hw_group * ag)1042 int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag)
1043 {
1044 	struct ath12k_hw *ah;
1045 	struct ath12k *ar;
1046 	int ret;
1047 	int i, j;
1048 
1049 	for (i = 0; i < ag->num_hw; i++) {
1050 		ah = ag->ah[i];
1051 		if (!ah)
1052 			continue;
1053 
1054 		for_each_ar(ah, ar, j) {
1055 			ar = &ah->radio[j];
1056 			ret = __ath12k_mac_mlo_ready(ar);
1057 			if (ret)
1058 				return ret;
1059 		}
1060 	}
1061 
1062 	return 0;
1063 }
1064 
ath12k_core_mlo_setup(struct ath12k_hw_group * ag)1065 static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag)
1066 {
1067 	int ret, i;
1068 
1069 	if (!ag->mlo_capable)
1070 		return 0;
1071 
1072 	ret = ath12k_mac_mlo_setup(ag);
1073 	if (ret)
1074 		return ret;
1075 
1076 	for (i = 0; i < ag->num_devices; i++)
1077 		ath12k_dp_partner_cc_init(ag->ab[i]);
1078 
1079 	ret = ath12k_mac_mlo_ready(ag);
1080 	if (ret)
1081 		goto err_mlo_teardown;
1082 
1083 	return 0;
1084 
1085 err_mlo_teardown:
1086 	ath12k_mac_mlo_teardown(ag);
1087 
1088 	return ret;
1089 }
1090 
ath12k_core_hw_group_start(struct ath12k_hw_group * ag)1091 static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag)
1092 {
1093 	struct ath12k_base *ab;
1094 	int ret, i;
1095 
1096 	lockdep_assert_held(&ag->mutex);
1097 
1098 	if (test_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags))
1099 		goto core_pdev_create;
1100 
1101 	ret = ath12k_mac_allocate(ag);
1102 	if (WARN_ON(ret))
1103 		return ret;
1104 
1105 	ret = ath12k_core_mlo_setup(ag);
1106 	if (WARN_ON(ret))
1107 		goto err_mac_destroy;
1108 
1109 	ret = ath12k_mac_register(ag);
1110 	if (WARN_ON(ret))
1111 		goto err_mlo_teardown;
1112 
1113 	set_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
1114 
1115 core_pdev_create:
1116 	for (i = 0; i < ag->num_devices; i++) {
1117 		ab = ag->ab[i];
1118 		if (!ab)
1119 			continue;
1120 
1121 		mutex_lock(&ab->core_lock);
1122 
1123 		set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
1124 
1125 		ret = ath12k_core_pdev_create(ab);
1126 		if (ret) {
1127 			ath12k_err(ab, "failed to create pdev core %d\n", ret);
1128 			mutex_unlock(&ab->core_lock);
1129 			goto err;
1130 		}
1131 
1132 		ath12k_hif_irq_enable(ab);
1133 
1134 		ret = ath12k_core_rfkill_config(ab);
1135 		if (ret && ret != -EOPNOTSUPP) {
1136 			mutex_unlock(&ab->core_lock);
1137 			goto err;
1138 		}
1139 
1140 		mutex_unlock(&ab->core_lock);
1141 	}
1142 
1143 	return 0;
1144 
1145 err:
1146 	ath12k_core_hw_group_stop(ag);
1147 	return ret;
1148 
1149 err_mlo_teardown:
1150 	ath12k_mac_mlo_teardown(ag);
1151 
1152 err_mac_destroy:
1153 	ath12k_mac_destroy(ag);
1154 
1155 	return ret;
1156 }
1157 
ath12k_core_start_firmware(struct ath12k_base * ab,enum ath12k_firmware_mode mode)1158 static int ath12k_core_start_firmware(struct ath12k_base *ab,
1159 				      enum ath12k_firmware_mode mode)
1160 {
1161 	int ret;
1162 
1163 	ath12k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v3,
1164 				    &ab->qmi.ce_cfg.shadow_reg_v3_len);
1165 
1166 	ret = ath12k_qmi_firmware_start(ab, mode);
1167 	if (ret) {
1168 		ath12k_err(ab, "failed to send firmware start: %d\n", ret);
1169 		return ret;
1170 	}
1171 
1172 	return ret;
1173 }
1174 
1175 static inline
ath12k_core_hw_group_start_ready(struct ath12k_hw_group * ag)1176 bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag)
1177 {
1178 	lockdep_assert_held(&ag->mutex);
1179 
1180 	return (ag->num_started == ag->num_devices);
1181 }
1182 
ath12k_fw_stats_pdevs_free(struct list_head * head)1183 static void ath12k_fw_stats_pdevs_free(struct list_head *head)
1184 {
1185 	struct ath12k_fw_stats_pdev *i, *tmp;
1186 
1187 	list_for_each_entry_safe(i, tmp, head, list) {
1188 		list_del(&i->list);
1189 		kfree(i);
1190 	}
1191 }
1192 
ath12k_fw_stats_bcn_free(struct list_head * head)1193 void ath12k_fw_stats_bcn_free(struct list_head *head)
1194 {
1195 	struct ath12k_fw_stats_bcn *i, *tmp;
1196 
1197 	list_for_each_entry_safe(i, tmp, head, list) {
1198 		list_del(&i->list);
1199 		kfree(i);
1200 	}
1201 }
1202 
ath12k_fw_stats_vdevs_free(struct list_head * head)1203 static void ath12k_fw_stats_vdevs_free(struct list_head *head)
1204 {
1205 	struct ath12k_fw_stats_vdev *i, *tmp;
1206 
1207 	list_for_each_entry_safe(i, tmp, head, list) {
1208 		list_del(&i->list);
1209 		kfree(i);
1210 	}
1211 }
1212 
ath12k_fw_stats_init(struct ath12k * ar)1213 void ath12k_fw_stats_init(struct ath12k *ar)
1214 {
1215 	INIT_LIST_HEAD(&ar->fw_stats.vdevs);
1216 	INIT_LIST_HEAD(&ar->fw_stats.pdevs);
1217 	INIT_LIST_HEAD(&ar->fw_stats.bcn);
1218 	init_completion(&ar->fw_stats_complete);
1219 	init_completion(&ar->fw_stats_done);
1220 }
1221 
ath12k_fw_stats_free(struct ath12k_fw_stats * stats)1222 void ath12k_fw_stats_free(struct ath12k_fw_stats *stats)
1223 {
1224 	ath12k_fw_stats_pdevs_free(&stats->pdevs);
1225 	ath12k_fw_stats_vdevs_free(&stats->vdevs);
1226 	ath12k_fw_stats_bcn_free(&stats->bcn);
1227 }
1228 
ath12k_fw_stats_reset(struct ath12k * ar)1229 void ath12k_fw_stats_reset(struct ath12k *ar)
1230 {
1231 	spin_lock_bh(&ar->data_lock);
1232 	ath12k_fw_stats_free(&ar->fw_stats);
1233 	ar->fw_stats.num_vdev_recvd = 0;
1234 	ar->fw_stats.num_bcn_recvd = 0;
1235 	spin_unlock_bh(&ar->data_lock);
1236 }
1237 
ath12k_core_trigger_partner(struct ath12k_base * ab)1238 static void ath12k_core_trigger_partner(struct ath12k_base *ab)
1239 {
1240 	struct ath12k_hw_group *ag = ab->ag;
1241 	struct ath12k_base *partner_ab;
1242 	bool found = false;
1243 	int i;
1244 
1245 	for (i = 0; i < ag->num_devices; i++) {
1246 		partner_ab = ag->ab[i];
1247 		if (!partner_ab)
1248 			continue;
1249 
1250 		if (found)
1251 			ath12k_qmi_trigger_host_cap(partner_ab);
1252 
1253 		found = (partner_ab == ab);
1254 	}
1255 }
1256 
ath12k_core_qmi_firmware_ready(struct ath12k_base * ab)1257 int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
1258 {
1259 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
1260 	int ret, i;
1261 
1262 	ret = ath12k_core_start_firmware(ab, ab->fw_mode);
1263 	if (ret) {
1264 		ath12k_err(ab, "failed to start firmware: %d\n", ret);
1265 		return ret;
1266 	}
1267 
1268 	ret = ath12k_ce_init_pipes(ab);
1269 	if (ret) {
1270 		ath12k_err(ab, "failed to initialize CE: %d\n", ret);
1271 		goto err_firmware_stop;
1272 	}
1273 
1274 	ret = ath12k_dp_alloc(ab);
1275 	if (ret) {
1276 		ath12k_err(ab, "failed to init DP: %d\n", ret);
1277 		goto err_firmware_stop;
1278 	}
1279 
1280 	mutex_lock(&ag->mutex);
1281 	mutex_lock(&ab->core_lock);
1282 
1283 	ret = ath12k_core_start(ab);
1284 	if (ret) {
1285 		ath12k_err(ab, "failed to start core: %d\n", ret);
1286 		goto err_dp_free;
1287 	}
1288 
1289 	mutex_unlock(&ab->core_lock);
1290 
1291 	if (ath12k_core_hw_group_start_ready(ag)) {
1292 		ret = ath12k_core_hw_group_start(ag);
1293 		if (ret) {
1294 			ath12k_warn(ab, "unable to start hw group\n");
1295 			goto err_core_stop;
1296 		}
1297 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id);
1298 	} else {
1299 		ath12k_core_trigger_partner(ab);
1300 	}
1301 
1302 	mutex_unlock(&ag->mutex);
1303 
1304 	return 0;
1305 
1306 err_core_stop:
1307 	for (i = ag->num_devices - 1; i >= 0; i--) {
1308 		ab = ag->ab[i];
1309 		if (!ab)
1310 			continue;
1311 
1312 		mutex_lock(&ab->core_lock);
1313 		ath12k_core_stop(ab);
1314 		mutex_unlock(&ab->core_lock);
1315 	}
1316 	mutex_unlock(&ag->mutex);
1317 	goto exit;
1318 
1319 err_dp_free:
1320 	ath12k_dp_free(ab);
1321 	mutex_unlock(&ab->core_lock);
1322 	mutex_unlock(&ag->mutex);
1323 
1324 err_firmware_stop:
1325 	ath12k_qmi_firmware_stop(ab);
1326 
1327 exit:
1328 	return ret;
1329 }
1330 
ath12k_core_reconfigure_on_crash(struct ath12k_base * ab)1331 static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
1332 {
1333 	int ret;
1334 
1335 	mutex_lock(&ab->core_lock);
1336 	ath12k_dp_pdev_free(ab);
1337 	ath12k_ce_cleanup_pipes(ab);
1338 	ath12k_wmi_detach(ab);
1339 	ath12k_dp_rx_pdev_reo_cleanup(ab);
1340 	mutex_unlock(&ab->core_lock);
1341 
1342 	ath12k_dp_free(ab);
1343 	ath12k_hal_srng_deinit(ab);
1344 
1345 	ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
1346 
1347 	ret = ath12k_hal_srng_init(ab);
1348 	if (ret)
1349 		return ret;
1350 
1351 	clear_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1352 
1353 	ret = ath12k_core_qmi_firmware_ready(ab);
1354 	if (ret)
1355 		goto err_hal_srng_deinit;
1356 
1357 	clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
1358 
1359 	return 0;
1360 
1361 err_hal_srng_deinit:
1362 	ath12k_hal_srng_deinit(ab);
1363 	return ret;
1364 }
1365 
ath12k_rfkill_work(struct work_struct * work)1366 static void ath12k_rfkill_work(struct work_struct *work)
1367 {
1368 	struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
1369 	struct ath12k_hw_group *ag = ab->ag;
1370 	struct ath12k *ar;
1371 	struct ath12k_hw *ah;
1372 	struct ieee80211_hw *hw;
1373 	bool rfkill_radio_on;
1374 	int i, j;
1375 
1376 	spin_lock_bh(&ab->base_lock);
1377 	rfkill_radio_on = ab->rfkill_radio_on;
1378 	spin_unlock_bh(&ab->base_lock);
1379 
1380 	for (i = 0; i < ag->num_hw; i++) {
1381 		ah = ath12k_ag_to_ah(ag, i);
1382 		if (!ah)
1383 			continue;
1384 
1385 		for (j = 0; j < ah->num_radio; j++) {
1386 			ar = &ah->radio[j];
1387 			if (!ar)
1388 				continue;
1389 
1390 			ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
1391 		}
1392 
1393 		hw = ah->hw;
1394 		wiphy_rfkill_set_hw_state(hw->wiphy, !rfkill_radio_on);
1395 	}
1396 }
1397 
ath12k_core_halt(struct ath12k * ar)1398 void ath12k_core_halt(struct ath12k *ar)
1399 {
1400 	struct list_head *pos, *n;
1401 	struct ath12k_base *ab = ar->ab;
1402 
1403 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
1404 
1405 	ar->num_created_vdevs = 0;
1406 	ar->allocated_vdev_map = 0;
1407 
1408 	ath12k_mac_scan_finish(ar);
1409 	ath12k_mac_peer_cleanup_all(ar);
1410 	cancel_delayed_work_sync(&ar->scan.timeout);
1411 	cancel_work_sync(&ar->regd_update_work);
1412 	cancel_work_sync(&ab->rfkill_work);
1413 	cancel_work_sync(&ab->update_11d_work);
1414 
1415 	rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
1416 	synchronize_rcu();
1417 
1418 	spin_lock_bh(&ar->data_lock);
1419 	list_for_each_safe(pos, n, &ar->arvifs)
1420 		list_del_init(pos);
1421 	spin_unlock_bh(&ar->data_lock);
1422 
1423 	idr_init(&ar->txmgmt_idr);
1424 }
1425 
ath12k_core_pre_reconfigure_recovery(struct ath12k_base * ab)1426 static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
1427 {
1428 	struct ath12k_hw_group *ag = ab->ag;
1429 	struct ath12k *ar;
1430 	struct ath12k_hw *ah;
1431 	int i, j;
1432 
1433 	spin_lock_bh(&ab->base_lock);
1434 	ab->stats.fw_crash_counter++;
1435 	spin_unlock_bh(&ab->base_lock);
1436 
1437 	if (ab->is_reset)
1438 		set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1439 
1440 	for (i = 0; i < ag->num_hw; i++) {
1441 		ah = ath12k_ag_to_ah(ag, i);
1442 		if (!ah || ah->state == ATH12K_HW_STATE_OFF ||
1443 		    ah->state == ATH12K_HW_STATE_TM)
1444 			continue;
1445 
1446 		wiphy_lock(ah->hw->wiphy);
1447 
1448 		/* If queue 0 is stopped, it is safe to assume that all
1449 		 * other queues are stopped by driver via
1450 		 * ieee80211_stop_queues() below. This means, there is
1451 		 * no need to stop it again and hence continue
1452 		 */
1453 		if (ieee80211_queue_stopped(ah->hw, 0)) {
1454 			wiphy_unlock(ah->hw->wiphy);
1455 			continue;
1456 		}
1457 
1458 		ieee80211_stop_queues(ah->hw);
1459 
1460 		for (j = 0; j < ah->num_radio; j++) {
1461 			ar = &ah->radio[j];
1462 
1463 			ath12k_mac_drain_tx(ar);
1464 			ar->state_11d = ATH12K_11D_IDLE;
1465 			complete(&ar->completed_11d_scan);
1466 			complete(&ar->scan.started);
1467 			complete_all(&ar->scan.completed);
1468 			complete(&ar->scan.on_channel);
1469 			complete(&ar->peer_assoc_done);
1470 			complete(&ar->peer_delete_done);
1471 			complete(&ar->install_key_done);
1472 			complete(&ar->vdev_setup_done);
1473 			complete(&ar->vdev_delete_done);
1474 			complete(&ar->bss_survey_done);
1475 
1476 			wake_up(&ar->dp.tx_empty_waitq);
1477 			idr_for_each(&ar->txmgmt_idr,
1478 				     ath12k_mac_tx_mgmt_pending_free, ar);
1479 			idr_destroy(&ar->txmgmt_idr);
1480 			wake_up(&ar->txmgmt_empty_waitq);
1481 
1482 			ar->monitor_vdev_id = -1;
1483 			ar->monitor_vdev_created = false;
1484 			ar->monitor_started = false;
1485 		}
1486 
1487 		wiphy_unlock(ah->hw->wiphy);
1488 	}
1489 
1490 	wake_up(&ab->wmi_ab.tx_credits_wq);
1491 	wake_up(&ab->peer_mapping_wq);
1492 }
1493 
ath12k_update_11d(struct work_struct * work)1494 static void ath12k_update_11d(struct work_struct *work)
1495 {
1496 	struct ath12k_base *ab = container_of(work, struct ath12k_base, update_11d_work);
1497 	struct ath12k *ar;
1498 	struct ath12k_pdev *pdev;
1499 	struct wmi_set_current_country_arg arg = {};
1500 	int ret, i;
1501 
1502 	spin_lock_bh(&ab->base_lock);
1503 	memcpy(&arg.alpha2, &ab->new_alpha2, 2);
1504 	spin_unlock_bh(&ab->base_lock);
1505 
1506 	ath12k_dbg(ab, ATH12K_DBG_WMI, "update 11d new cc %c%c\n",
1507 		   arg.alpha2[0], arg.alpha2[1]);
1508 
1509 	for (i = 0; i < ab->num_radios; i++) {
1510 		pdev = &ab->pdevs[i];
1511 		ar = pdev->ar;
1512 
1513 		memcpy(&ar->alpha2, &arg.alpha2, 2);
1514 		ret = ath12k_wmi_send_set_current_country_cmd(ar, &arg);
1515 		if (ret)
1516 			ath12k_warn(ar->ab,
1517 				    "pdev id %d failed set current country code: %d\n",
1518 				    i, ret);
1519 	}
1520 }
1521 
ath12k_core_post_reconfigure_recovery(struct ath12k_base * ab)1522 static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
1523 {
1524 	struct ath12k_hw_group *ag = ab->ag;
1525 	struct ath12k_hw *ah;
1526 	struct ath12k *ar;
1527 	int i, j;
1528 
1529 	for (i = 0; i < ag->num_hw; i++) {
1530 		ah = ath12k_ag_to_ah(ag, i);
1531 		if (!ah || ah->state == ATH12K_HW_STATE_OFF)
1532 			continue;
1533 
1534 		wiphy_lock(ah->hw->wiphy);
1535 		mutex_lock(&ah->hw_mutex);
1536 
1537 		switch (ah->state) {
1538 		case ATH12K_HW_STATE_ON:
1539 			ah->state = ATH12K_HW_STATE_RESTARTING;
1540 
1541 			for (j = 0; j < ah->num_radio; j++) {
1542 				ar = &ah->radio[j];
1543 				ath12k_core_halt(ar);
1544 			}
1545 
1546 			break;
1547 		case ATH12K_HW_STATE_OFF:
1548 			ath12k_warn(ab,
1549 				    "cannot restart hw %d that hasn't been started\n",
1550 				    i);
1551 			break;
1552 		case ATH12K_HW_STATE_RESTARTING:
1553 			break;
1554 		case ATH12K_HW_STATE_RESTARTED:
1555 			ah->state = ATH12K_HW_STATE_WEDGED;
1556 			fallthrough;
1557 		case ATH12K_HW_STATE_WEDGED:
1558 			ath12k_warn(ab,
1559 				    "device is wedged, will not restart hw %d\n", i);
1560 			break;
1561 		case ATH12K_HW_STATE_TM:
1562 			ath12k_warn(ab, "fw mode reset done radio %d\n", i);
1563 			break;
1564 		}
1565 
1566 		mutex_unlock(&ah->hw_mutex);
1567 		wiphy_unlock(ah->hw->wiphy);
1568 	}
1569 
1570 	complete(&ab->driver_recovery);
1571 }
1572 
ath12k_core_restart(struct work_struct * work)1573 static void ath12k_core_restart(struct work_struct *work)
1574 {
1575 	struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
1576 	struct ath12k_hw_group *ag = ab->ag;
1577 	struct ath12k_hw *ah;
1578 	int ret, i;
1579 
1580 	ret = ath12k_core_reconfigure_on_crash(ab);
1581 	if (ret) {
1582 		ath12k_err(ab, "failed to reconfigure driver on crash recovery\n");
1583 		return;
1584 	}
1585 
1586 	if (ab->is_reset) {
1587 		if (!test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
1588 			atomic_dec(&ab->reset_count);
1589 			complete(&ab->reset_complete);
1590 			ab->is_reset = false;
1591 			atomic_set(&ab->fail_cont_count, 0);
1592 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
1593 		}
1594 
1595 		mutex_lock(&ag->mutex);
1596 
1597 		if (!ath12k_core_hw_group_start_ready(ag)) {
1598 			mutex_unlock(&ag->mutex);
1599 			goto exit_restart;
1600 		}
1601 
1602 		for (i = 0; i < ag->num_hw; i++) {
1603 			ah = ath12k_ag_to_ah(ag, i);
1604 			ieee80211_restart_hw(ah->hw);
1605 		}
1606 
1607 		mutex_unlock(&ag->mutex);
1608 	}
1609 
1610 exit_restart:
1611 	complete(&ab->restart_completed);
1612 }
1613 
ath12k_core_reset(struct work_struct * work)1614 static void ath12k_core_reset(struct work_struct *work)
1615 {
1616 	struct ath12k_base *ab = container_of(work, struct ath12k_base, reset_work);
1617 	struct ath12k_hw_group *ag = ab->ag;
1618 	int reset_count, fail_cont_count, i;
1619 	long time_left;
1620 
1621 	if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) {
1622 		ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
1623 		return;
1624 	}
1625 
1626 	/* Sometimes the recovery will fail and then the next all recovery fail,
1627 	 * this is to avoid infinite recovery since it can not recovery success
1628 	 */
1629 	fail_cont_count = atomic_read(&ab->fail_cont_count);
1630 
1631 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FINAL)
1632 		return;
1633 
1634 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FIRST &&
1635 	    time_before(jiffies, ab->reset_fail_timeout))
1636 		return;
1637 
1638 	reset_count = atomic_inc_return(&ab->reset_count);
1639 
1640 	if (reset_count > 1) {
1641 		/* Sometimes it happened another reset worker before the previous one
1642 		 * completed, then the second reset worker will destroy the previous one,
1643 		 * thus below is to avoid that.
1644 		 */
1645 		ath12k_warn(ab, "already resetting count %d\n", reset_count);
1646 
1647 		reinit_completion(&ab->reset_complete);
1648 		time_left = wait_for_completion_timeout(&ab->reset_complete,
1649 							ATH12K_RESET_TIMEOUT_HZ);
1650 		if (time_left) {
1651 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "to skip reset\n");
1652 			atomic_dec(&ab->reset_count);
1653 			return;
1654 		}
1655 
1656 		ab->reset_fail_timeout = jiffies + ATH12K_RESET_FAIL_TIMEOUT_HZ;
1657 		/* Record the continuous recovery fail count when recovery failed*/
1658 		fail_cont_count = atomic_inc_return(&ab->fail_cont_count);
1659 	}
1660 
1661 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n");
1662 
1663 	ab->is_reset = true;
1664 	atomic_set(&ab->recovery_count, 0);
1665 
1666 	ath12k_coredump_collect(ab);
1667 	ath12k_core_pre_reconfigure_recovery(ab);
1668 
1669 	ath12k_core_post_reconfigure_recovery(ab);
1670 
1671 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n");
1672 
1673 	ath12k_hif_irq_disable(ab);
1674 	ath12k_hif_ce_irq_disable(ab);
1675 
1676 	ath12k_hif_power_down(ab, false);
1677 
1678 	/* prepare for power up */
1679 	ab->qmi.num_radios = U8_MAX;
1680 
1681 	mutex_lock(&ag->mutex);
1682 	ath12k_core_to_group_ref_put(ab);
1683 
1684 	if (ag->num_started > 0) {
1685 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
1686 			   "waiting for %d partner device(s) to reset\n",
1687 			   ag->num_started);
1688 		mutex_unlock(&ag->mutex);
1689 		return;
1690 	}
1691 
1692 	/* Prepare MLO global memory region for power up */
1693 	ath12k_qmi_reset_mlo_mem(ag);
1694 
1695 	for (i = 0; i < ag->num_devices; i++) {
1696 		ab = ag->ab[i];
1697 		if (!ab)
1698 			continue;
1699 
1700 		ath12k_hif_power_up(ab);
1701 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
1702 	}
1703 
1704 	mutex_unlock(&ag->mutex);
1705 }
1706 
ath12k_core_pre_init(struct ath12k_base * ab)1707 int ath12k_core_pre_init(struct ath12k_base *ab)
1708 {
1709 	int ret;
1710 
1711 	ret = ath12k_hw_init(ab);
1712 	if (ret) {
1713 		ath12k_err(ab, "failed to init hw params: %d\n", ret);
1714 		return ret;
1715 	}
1716 
1717 	ath12k_fw_map(ab);
1718 
1719 	return 0;
1720 }
1721 
ath12k_core_panic_handler(struct notifier_block * nb,unsigned long action,void * data)1722 static int ath12k_core_panic_handler(struct notifier_block *nb,
1723 				     unsigned long action, void *data)
1724 {
1725 	struct ath12k_base *ab = container_of(nb, struct ath12k_base,
1726 					      panic_nb);
1727 
1728 	return ath12k_hif_panic_handler(ab);
1729 }
1730 
ath12k_core_panic_notifier_register(struct ath12k_base * ab)1731 static int ath12k_core_panic_notifier_register(struct ath12k_base *ab)
1732 {
1733 	ab->panic_nb.notifier_call = ath12k_core_panic_handler;
1734 
1735 	return atomic_notifier_chain_register(&panic_notifier_list,
1736 					      &ab->panic_nb);
1737 }
1738 
ath12k_core_panic_notifier_unregister(struct ath12k_base * ab)1739 static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
1740 {
1741 	atomic_notifier_chain_unregister(&panic_notifier_list,
1742 					 &ab->panic_nb);
1743 }
1744 
1745 static inline
ath12k_core_hw_group_create_ready(struct ath12k_hw_group * ag)1746 bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag)
1747 {
1748 	lockdep_assert_held(&ag->mutex);
1749 
1750 	return (ag->num_probed == ag->num_devices);
1751 }
1752 
ath12k_core_hw_group_alloc(struct ath12k_base * ab)1753 static struct ath12k_hw_group *ath12k_core_hw_group_alloc(struct ath12k_base *ab)
1754 {
1755 	struct ath12k_hw_group *ag;
1756 	int count = 0;
1757 
1758 	lockdep_assert_held(&ath12k_hw_group_mutex);
1759 
1760 	list_for_each_entry(ag, &ath12k_hw_group_list, list)
1761 		count++;
1762 
1763 	ag = kzalloc(sizeof(*ag), GFP_KERNEL);
1764 	if (!ag)
1765 		return NULL;
1766 
1767 	ag->id = count;
1768 	list_add(&ag->list, &ath12k_hw_group_list);
1769 	mutex_init(&ag->mutex);
1770 	ag->mlo_capable = false;
1771 
1772 	return ag;
1773 }
1774 
ath12k_core_hw_group_free(struct ath12k_hw_group * ag)1775 static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag)
1776 {
1777 	mutex_lock(&ath12k_hw_group_mutex);
1778 
1779 	list_del(&ag->list);
1780 	kfree(ag);
1781 
1782 	mutex_unlock(&ath12k_hw_group_mutex);
1783 }
1784 
ath12k_core_hw_group_find_by_dt(struct ath12k_base * ab)1785 static struct ath12k_hw_group *ath12k_core_hw_group_find_by_dt(struct ath12k_base *ab)
1786 {
1787 	struct ath12k_hw_group *ag;
1788 	int i;
1789 
1790 	if (!ab->dev->of_node)
1791 		return NULL;
1792 
1793 	list_for_each_entry(ag, &ath12k_hw_group_list, list)
1794 		for (i = 0; i < ag->num_devices; i++)
1795 			if (ag->wsi_node[i] == ab->dev->of_node)
1796 				return ag;
1797 
1798 	return NULL;
1799 }
1800 
ath12k_core_get_wsi_info(struct ath12k_hw_group * ag,struct ath12k_base * ab)1801 static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag,
1802 				    struct ath12k_base *ab)
1803 {
1804 	struct device_node *wsi_dev = ab->dev->of_node, *next_wsi_dev;
1805 	struct device_node *tx_endpoint, *next_rx_endpoint;
1806 	int device_count = 0;
1807 
1808 	next_wsi_dev = wsi_dev;
1809 
1810 	if (!next_wsi_dev)
1811 		return -ENODEV;
1812 
1813 	do {
1814 		ag->wsi_node[device_count] = next_wsi_dev;
1815 
1816 		tx_endpoint = of_graph_get_endpoint_by_regs(next_wsi_dev, 0, -1);
1817 		if (!tx_endpoint) {
1818 			of_node_put(next_wsi_dev);
1819 			return -ENODEV;
1820 		}
1821 
1822 		next_rx_endpoint = of_graph_get_remote_endpoint(tx_endpoint);
1823 		if (!next_rx_endpoint) {
1824 			of_node_put(next_wsi_dev);
1825 			of_node_put(tx_endpoint);
1826 			return -ENODEV;
1827 		}
1828 
1829 		of_node_put(tx_endpoint);
1830 		of_node_put(next_wsi_dev);
1831 
1832 		next_wsi_dev = of_graph_get_port_parent(next_rx_endpoint);
1833 		if (!next_wsi_dev) {
1834 			of_node_put(next_rx_endpoint);
1835 			return -ENODEV;
1836 		}
1837 
1838 		of_node_put(next_rx_endpoint);
1839 
1840 		device_count++;
1841 		if (device_count > ATH12K_MAX_DEVICES) {
1842 			ath12k_warn(ab, "device count in DT %d is more than limit %d\n",
1843 				    device_count, ATH12K_MAX_DEVICES);
1844 			of_node_put(next_wsi_dev);
1845 			return -EINVAL;
1846 		}
1847 	} while (wsi_dev != next_wsi_dev);
1848 
1849 	of_node_put(next_wsi_dev);
1850 	ag->num_devices = device_count;
1851 
1852 	return 0;
1853 }
1854 
ath12k_core_get_wsi_index(struct ath12k_hw_group * ag,struct ath12k_base * ab)1855 static int ath12k_core_get_wsi_index(struct ath12k_hw_group *ag,
1856 				     struct ath12k_base *ab)
1857 {
1858 	int i, wsi_controller_index = -1, node_index = -1;
1859 	bool control;
1860 
1861 	for (i = 0; i < ag->num_devices; i++) {
1862 		control = of_property_read_bool(ag->wsi_node[i], "qcom,wsi-controller");
1863 		if (control)
1864 			wsi_controller_index = i;
1865 
1866 		if (ag->wsi_node[i] == ab->dev->of_node)
1867 			node_index = i;
1868 	}
1869 
1870 	if (wsi_controller_index == -1) {
1871 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi controller is not defined in dt");
1872 		return -EINVAL;
1873 	}
1874 
1875 	if (node_index == -1) {
1876 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "unable to get WSI node index");
1877 		return -EINVAL;
1878 	}
1879 
1880 	ab->wsi_info.index = (ag->num_devices + node_index - wsi_controller_index) %
1881 		ag->num_devices;
1882 
1883 	return 0;
1884 }
1885 
ath12k_core_hw_group_assign(struct ath12k_base * ab)1886 static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *ab)
1887 {
1888 	struct ath12k_wsi_info *wsi = &ab->wsi_info;
1889 	struct ath12k_hw_group *ag;
1890 
1891 	lockdep_assert_held(&ath12k_hw_group_mutex);
1892 
1893 	if (ath12k_ftm_mode)
1894 		goto invalid_group;
1895 
1896 	/* The grouping of multiple devices will be done based on device tree file.
1897 	 * The platforms that do not have any valid group information would have
1898 	 * each device to be part of its own invalid group.
1899 	 *
1900 	 * We use group id ATH12K_INVALID_GROUP_ID for single device group
1901 	 * which didn't have dt entry or wrong dt entry, there could be many
1902 	 * groups with same group id, i.e ATH12K_INVALID_GROUP_ID. So
1903 	 * default group id of ATH12K_INVALID_GROUP_ID combined with
1904 	 * num devices in ath12k_hw_group determines if the group is
1905 	 * multi device or single device group
1906 	 */
1907 
1908 	ag = ath12k_core_hw_group_find_by_dt(ab);
1909 	if (!ag) {
1910 		ag = ath12k_core_hw_group_alloc(ab);
1911 		if (!ag) {
1912 			ath12k_warn(ab, "unable to create new hw group\n");
1913 			return NULL;
1914 		}
1915 
1916 		if (ath12k_core_get_wsi_info(ag, ab) ||
1917 		    ath12k_core_get_wsi_index(ag, ab)) {
1918 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
1919 				   "unable to get wsi info from dt, grouping single device");
1920 			ag->id = ATH12K_INVALID_GROUP_ID;
1921 			ag->num_devices = 1;
1922 			memset(ag->wsi_node, 0, sizeof(ag->wsi_node));
1923 			wsi->index = 0;
1924 		}
1925 
1926 		goto exit;
1927 	} else if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
1928 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group id %d in unregister state\n",
1929 			   ag->id);
1930 		goto invalid_group;
1931 	} else {
1932 		if (ath12k_core_get_wsi_index(ag, ab))
1933 			goto invalid_group;
1934 		goto exit;
1935 	}
1936 
1937 invalid_group:
1938 	ag = ath12k_core_hw_group_alloc(ab);
1939 	if (!ag) {
1940 		ath12k_warn(ab, "unable to create new hw group\n");
1941 		return NULL;
1942 	}
1943 
1944 	ag->id = ATH12K_INVALID_GROUP_ID;
1945 	ag->num_devices = 1;
1946 	wsi->index = 0;
1947 
1948 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "single device added to hardware group\n");
1949 
1950 exit:
1951 	if (ag->num_probed >= ag->num_devices) {
1952 		ath12k_warn(ab, "unable to add new device to group, max limit reached\n");
1953 		goto invalid_group;
1954 	}
1955 
1956 	ab->device_id = ag->num_probed++;
1957 	ag->ab[ab->device_id] = ab;
1958 	ab->ag = ag;
1959 
1960 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi group-id %d num-devices %d index %d",
1961 		   ag->id, ag->num_devices, wsi->index);
1962 
1963 	return ag;
1964 }
1965 
ath12k_core_hw_group_unassign(struct ath12k_base * ab)1966 void ath12k_core_hw_group_unassign(struct ath12k_base *ab)
1967 {
1968 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
1969 	u8 device_id = ab->device_id;
1970 	int num_probed;
1971 
1972 	if (!ag)
1973 		return;
1974 
1975 	mutex_lock(&ag->mutex);
1976 
1977 	if (WARN_ON(device_id >= ag->num_devices)) {
1978 		mutex_unlock(&ag->mutex);
1979 		return;
1980 	}
1981 
1982 	if (WARN_ON(ag->ab[device_id] != ab)) {
1983 		mutex_unlock(&ag->mutex);
1984 		return;
1985 	}
1986 
1987 	ag->ab[device_id] = NULL;
1988 	ab->ag = NULL;
1989 	ab->device_id = ATH12K_INVALID_DEVICE_ID;
1990 
1991 	if (ag->num_probed)
1992 		ag->num_probed--;
1993 
1994 	num_probed = ag->num_probed;
1995 
1996 	mutex_unlock(&ag->mutex);
1997 
1998 	if (!num_probed)
1999 		ath12k_core_hw_group_free(ag);
2000 }
2001 
ath12k_core_hw_group_destroy(struct ath12k_hw_group * ag)2002 static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
2003 {
2004 	struct ath12k_base *ab;
2005 	int i;
2006 
2007 	if (WARN_ON(!ag))
2008 		return;
2009 
2010 	for (i = 0; i < ag->num_devices; i++) {
2011 		ab = ag->ab[i];
2012 		if (!ab)
2013 			continue;
2014 
2015 		ath12k_core_soc_destroy(ab);
2016 	}
2017 }
2018 
ath12k_core_hw_group_cleanup(struct ath12k_hw_group * ag)2019 void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
2020 {
2021 	struct ath12k_base *ab;
2022 	int i;
2023 
2024 	if (!ag)
2025 		return;
2026 
2027 	mutex_lock(&ag->mutex);
2028 
2029 	if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
2030 		mutex_unlock(&ag->mutex);
2031 		return;
2032 	}
2033 
2034 	set_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags);
2035 
2036 	ath12k_core_hw_group_stop(ag);
2037 
2038 	for (i = 0; i < ag->num_devices; i++) {
2039 		ab = ag->ab[i];
2040 		if (!ab)
2041 			continue;
2042 
2043 		mutex_lock(&ab->core_lock);
2044 		ath12k_core_stop(ab);
2045 		mutex_unlock(&ab->core_lock);
2046 	}
2047 
2048 	mutex_unlock(&ag->mutex);
2049 }
2050 
ath12k_core_hw_group_create(struct ath12k_hw_group * ag)2051 static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag)
2052 {
2053 	struct ath12k_base *ab;
2054 	int i, ret;
2055 
2056 	lockdep_assert_held(&ag->mutex);
2057 
2058 	for (i = 0; i < ag->num_devices; i++) {
2059 		ab = ag->ab[i];
2060 		if (!ab)
2061 			continue;
2062 
2063 		mutex_lock(&ab->core_lock);
2064 
2065 		ret = ath12k_core_soc_create(ab);
2066 		if (ret) {
2067 			mutex_unlock(&ab->core_lock);
2068 			ath12k_err(ab, "failed to create soc core: %d\n", ret);
2069 			return ret;
2070 		}
2071 
2072 		mutex_unlock(&ab->core_lock);
2073 	}
2074 
2075 	return 0;
2076 }
2077 
ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group * ag)2078 void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
2079 {
2080 	struct ath12k_base *ab;
2081 	int i;
2082 
2083 	if (ath12k_ftm_mode)
2084 		return;
2085 
2086 	lockdep_assert_held(&ag->mutex);
2087 
2088 	if (ag->num_devices == 1) {
2089 		ab = ag->ab[0];
2090 		/* QCN9274 firmware uses firmware IE for MLO advertisement */
2091 		if (ab->fw.fw_features_valid) {
2092 			ag->mlo_capable =
2093 				ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO);
2094 			return;
2095 		}
2096 
2097 		/* while WCN7850 firmware uses QMI single_chip_mlo_support bit */
2098 		ag->mlo_capable = ab->single_chip_mlo_support;
2099 		return;
2100 	}
2101 
2102 	ag->mlo_capable = true;
2103 
2104 	for (i = 0; i < ag->num_devices; i++) {
2105 		ab = ag->ab[i];
2106 		if (!ab)
2107 			continue;
2108 
2109 		/* even if 1 device's firmware feature indicates MLO
2110 		 * unsupported, make MLO unsupported for the whole group
2111 		 */
2112 		if (!ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO)) {
2113 			ag->mlo_capable = false;
2114 			return;
2115 		}
2116 	}
2117 }
2118 
ath12k_core_init(struct ath12k_base * ab)2119 int ath12k_core_init(struct ath12k_base *ab)
2120 {
2121 	struct ath12k_hw_group *ag;
2122 	int ret;
2123 
2124 	ret = ath12k_core_panic_notifier_register(ab);
2125 	if (ret)
2126 		ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
2127 
2128 	mutex_lock(&ath12k_hw_group_mutex);
2129 
2130 	ag = ath12k_core_hw_group_assign(ab);
2131 	if (!ag) {
2132 		mutex_unlock(&ath12k_hw_group_mutex);
2133 		ath12k_warn(ab, "unable to get hw group\n");
2134 		ret = -ENODEV;
2135 		goto err_unregister_notifier;
2136 	}
2137 
2138 	mutex_unlock(&ath12k_hw_group_mutex);
2139 
2140 	mutex_lock(&ag->mutex);
2141 
2142 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices %d num probed %d\n",
2143 		   ag->num_devices, ag->num_probed);
2144 
2145 	if (ath12k_core_hw_group_create_ready(ag)) {
2146 		ret = ath12k_core_hw_group_create(ag);
2147 		if (ret) {
2148 			mutex_unlock(&ag->mutex);
2149 			ath12k_warn(ab, "unable to create hw group\n");
2150 			goto err_destroy_hw_group;
2151 		}
2152 	}
2153 
2154 	mutex_unlock(&ag->mutex);
2155 
2156 	return 0;
2157 
2158 err_destroy_hw_group:
2159 	ath12k_core_hw_group_destroy(ab->ag);
2160 	ath12k_core_hw_group_unassign(ab);
2161 err_unregister_notifier:
2162 	ath12k_core_panic_notifier_unregister(ab);
2163 
2164 	return ret;
2165 }
2166 
ath12k_core_deinit(struct ath12k_base * ab)2167 void ath12k_core_deinit(struct ath12k_base *ab)
2168 {
2169 	ath12k_core_hw_group_destroy(ab->ag);
2170 	ath12k_core_hw_group_unassign(ab);
2171 	ath12k_core_panic_notifier_unregister(ab);
2172 }
2173 
ath12k_core_free(struct ath12k_base * ab)2174 void ath12k_core_free(struct ath12k_base *ab)
2175 {
2176 	timer_delete_sync(&ab->rx_replenish_retry);
2177 	destroy_workqueue(ab->workqueue_aux);
2178 	destroy_workqueue(ab->workqueue);
2179 	kfree(ab);
2180 }
2181 
ath12k_core_alloc(struct device * dev,size_t priv_size,enum ath12k_bus bus)2182 struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
2183 				      enum ath12k_bus bus)
2184 {
2185 	struct ath12k_base *ab;
2186 
2187 	ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL);
2188 	if (!ab)
2189 		return NULL;
2190 
2191 	init_completion(&ab->driver_recovery);
2192 
2193 	ab->workqueue = create_singlethread_workqueue("ath12k_wq");
2194 	if (!ab->workqueue)
2195 		goto err_sc_free;
2196 
2197 	ab->workqueue_aux = create_singlethread_workqueue("ath12k_aux_wq");
2198 	if (!ab->workqueue_aux)
2199 		goto err_free_wq;
2200 
2201 	mutex_init(&ab->core_lock);
2202 	spin_lock_init(&ab->base_lock);
2203 	init_completion(&ab->reset_complete);
2204 
2205 	INIT_LIST_HEAD(&ab->peers);
2206 	init_waitqueue_head(&ab->peer_mapping_wq);
2207 	init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
2208 	INIT_WORK(&ab->restart_work, ath12k_core_restart);
2209 	INIT_WORK(&ab->reset_work, ath12k_core_reset);
2210 	INIT_WORK(&ab->rfkill_work, ath12k_rfkill_work);
2211 	INIT_WORK(&ab->dump_work, ath12k_coredump_upload);
2212 	INIT_WORK(&ab->update_11d_work, ath12k_update_11d);
2213 
2214 	timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
2215 	init_completion(&ab->htc_suspend);
2216 	init_completion(&ab->restart_completed);
2217 	init_completion(&ab->wow.wakeup_completed);
2218 
2219 	ab->dev = dev;
2220 	ab->hif.bus = bus;
2221 	ab->qmi.num_radios = U8_MAX;
2222 	ab->single_chip_mlo_support = false;
2223 
2224 	/* Device index used to identify the devices in a group.
2225 	 *
2226 	 * In Intra-device MLO, only one device present in a group,
2227 	 * so it is always zero.
2228 	 *
2229 	 * In Inter-device MLO, Multiple device present in a group,
2230 	 * expect non-zero value.
2231 	 */
2232 	ab->device_id = 0;
2233 
2234 	return ab;
2235 
2236 err_free_wq:
2237 	destroy_workqueue(ab->workqueue);
2238 err_sc_free:
2239 	kfree(ab);
2240 	return NULL;
2241 }
2242 
ath12k_init(void)2243 static int ath12k_init(void)
2244 {
2245 	ahb_err = ath12k_ahb_init();
2246 	if (ahb_err)
2247 		pr_warn("Failed to initialize ath12k AHB device: %d\n", ahb_err);
2248 
2249 	pci_err = ath12k_pci_init();
2250 	if (pci_err)
2251 		pr_warn("Failed to initialize ath12k PCI device: %d\n", pci_err);
2252 
2253 	/* If both failed, return one of the failures (arbitrary) */
2254 	return ahb_err && pci_err ? ahb_err : 0;
2255 }
2256 
ath12k_exit(void)2257 static void ath12k_exit(void)
2258 {
2259 	if (!pci_err)
2260 		ath12k_pci_exit();
2261 
2262 	if (!ahb_err)
2263 		ath12k_ahb_exit();
2264 }
2265 
2266 module_init(ath12k_init);
2267 module_exit(ath12k_exit);
2268 
2269 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11be WLAN devices");
2270 MODULE_LICENSE("Dual BSD/GPL");
2271