xref: /linux/drivers/net/wireless/ath/ath12k/ahb.c (revision ee0e5ce2790c5be14ea3d422cac323e059a43792)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/dma-mapping.h>
8 #include <linux/firmware/qcom/qcom_scm.h>
9 #include <linux/of.h>
10 #include <linux/of_device.h>
11 #include <linux/platform_device.h>
12 #include <linux/remoteproc.h>
13 #include <linux/soc/qcom/mdt_loader.h>
14 #include <linux/soc/qcom/smem_state.h>
15 #include "ahb.h"
16 #include "debug.h"
17 #include "hif.h"
18 
19 static const struct of_device_id ath12k_ahb_of_match[] = {
20 	{ .compatible = "qcom,ipq5332-wifi",
21 	  .data = (void *)ATH12K_HW_IPQ5332_HW10,
22 	},
23 	{ }
24 };
25 
26 MODULE_DEVICE_TABLE(of, ath12k_ahb_of_match);
27 
28 #define ATH12K_IRQ_CE0_OFFSET 4
29 #define ATH12K_MAX_UPDS 1
30 #define ATH12K_UPD_IRQ_WRD_LEN  18
31 static const char ath12k_userpd_irq[][9] = {"spawn",
32 				     "ready",
33 				     "stop-ack"};
34 
35 static const char *irq_name[ATH12K_IRQ_NUM_MAX] = {
36 	"misc-pulse1",
37 	"misc-latch",
38 	"sw-exception",
39 	"watchdog",
40 	"ce0",
41 	"ce1",
42 	"ce2",
43 	"ce3",
44 	"ce4",
45 	"ce5",
46 	"ce6",
47 	"ce7",
48 	"ce8",
49 	"ce9",
50 	"ce10",
51 	"ce11",
52 	"host2wbm-desc-feed",
53 	"host2reo-re-injection",
54 	"host2reo-command",
55 	"host2rxdma-monitor-ring3",
56 	"host2rxdma-monitor-ring2",
57 	"host2rxdma-monitor-ring1",
58 	"reo2ost-exception",
59 	"wbm2host-rx-release",
60 	"reo2host-status",
61 	"reo2host-destination-ring4",
62 	"reo2host-destination-ring3",
63 	"reo2host-destination-ring2",
64 	"reo2host-destination-ring1",
65 	"rxdma2host-monitor-destination-mac3",
66 	"rxdma2host-monitor-destination-mac2",
67 	"rxdma2host-monitor-destination-mac1",
68 	"ppdu-end-interrupts-mac3",
69 	"ppdu-end-interrupts-mac2",
70 	"ppdu-end-interrupts-mac1",
71 	"rxdma2host-monitor-status-ring-mac3",
72 	"rxdma2host-monitor-status-ring-mac2",
73 	"rxdma2host-monitor-status-ring-mac1",
74 	"host2rxdma-host-buf-ring-mac3",
75 	"host2rxdma-host-buf-ring-mac2",
76 	"host2rxdma-host-buf-ring-mac1",
77 	"rxdma2host-destination-ring-mac3",
78 	"rxdma2host-destination-ring-mac2",
79 	"rxdma2host-destination-ring-mac1",
80 	"host2tcl-input-ring4",
81 	"host2tcl-input-ring3",
82 	"host2tcl-input-ring2",
83 	"host2tcl-input-ring1",
84 	"wbm2host-tx-completions-ring4",
85 	"wbm2host-tx-completions-ring3",
86 	"wbm2host-tx-completions-ring2",
87 	"wbm2host-tx-completions-ring1",
88 	"tcl2host-status-ring",
89 };
90 
91 enum ext_irq_num {
92 	host2wbm_desc_feed = 16,
93 	host2reo_re_injection,
94 	host2reo_command,
95 	host2rxdma_monitor_ring3,
96 	host2rxdma_monitor_ring2,
97 	host2rxdma_monitor_ring1,
98 	reo2host_exception,
99 	wbm2host_rx_release,
100 	reo2host_status,
101 	reo2host_destination_ring4,
102 	reo2host_destination_ring3,
103 	reo2host_destination_ring2,
104 	reo2host_destination_ring1,
105 	rxdma2host_monitor_destination_mac3,
106 	rxdma2host_monitor_destination_mac2,
107 	rxdma2host_monitor_destination_mac1,
108 	ppdu_end_interrupts_mac3,
109 	ppdu_end_interrupts_mac2,
110 	ppdu_end_interrupts_mac1,
111 	rxdma2host_monitor_status_ring_mac3,
112 	rxdma2host_monitor_status_ring_mac2,
113 	rxdma2host_monitor_status_ring_mac1,
114 	host2rxdma_host_buf_ring_mac3,
115 	host2rxdma_host_buf_ring_mac2,
116 	host2rxdma_host_buf_ring_mac1,
117 	rxdma2host_destination_ring_mac3,
118 	rxdma2host_destination_ring_mac2,
119 	rxdma2host_destination_ring_mac1,
120 	host2tcl_input_ring4,
121 	host2tcl_input_ring3,
122 	host2tcl_input_ring2,
123 	host2tcl_input_ring1,
124 	wbm2host_tx_completions_ring4,
125 	wbm2host_tx_completions_ring3,
126 	wbm2host_tx_completions_ring2,
127 	wbm2host_tx_completions_ring1,
128 	tcl2host_status_ring,
129 };
130 
131 static u32 ath12k_ahb_read32(struct ath12k_base *ab, u32 offset)
132 {
133 	if (ab->ce_remap && offset < HAL_SEQ_WCSS_CMEM_OFFSET)
134 		return ioread32(ab->mem_ce + offset);
135 	return ioread32(ab->mem + offset);
136 }
137 
138 static void ath12k_ahb_write32(struct ath12k_base *ab, u32 offset,
139 			       u32 value)
140 {
141 	if (ab->ce_remap && offset < HAL_SEQ_WCSS_CMEM_OFFSET)
142 		iowrite32(value, ab->mem_ce + offset);
143 	else
144 		iowrite32(value, ab->mem + offset);
145 }
146 
147 static void ath12k_ahb_cancel_workqueue(struct ath12k_base *ab)
148 {
149 	int i;
150 
151 	for (i = 0; i < ab->hw_params->ce_count; i++) {
152 		struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
153 
154 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
155 			continue;
156 
157 		cancel_work_sync(&ce_pipe->intr_wq);
158 	}
159 }
160 
161 static void ath12k_ahb_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp)
162 {
163 	int i;
164 
165 	for (i = 0; i < irq_grp->num_irq; i++)
166 		disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
167 }
168 
169 static void __ath12k_ahb_ext_irq_disable(struct ath12k_base *ab)
170 {
171 	int i;
172 
173 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
174 		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
175 
176 		ath12k_ahb_ext_grp_disable(irq_grp);
177 		if (irq_grp->napi_enabled) {
178 			napi_synchronize(&irq_grp->napi);
179 			napi_disable(&irq_grp->napi);
180 			irq_grp->napi_enabled = false;
181 		}
182 	}
183 }
184 
185 static void ath12k_ahb_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp)
186 {
187 	int i;
188 
189 	for (i = 0; i < irq_grp->num_irq; i++)
190 		enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
191 }
192 
193 static void ath12k_ahb_setbit32(struct ath12k_base *ab, u8 bit, u32 offset)
194 {
195 	u32 val;
196 
197 	val = ath12k_ahb_read32(ab, offset);
198 	ath12k_ahb_write32(ab, offset, val | BIT(bit));
199 }
200 
201 static void ath12k_ahb_clearbit32(struct ath12k_base *ab, u8 bit, u32 offset)
202 {
203 	u32 val;
204 
205 	val = ath12k_ahb_read32(ab, offset);
206 	ath12k_ahb_write32(ab, offset, val & ~BIT(bit));
207 }
208 
209 static void ath12k_ahb_ce_irq_enable(struct ath12k_base *ab, u16 ce_id)
210 {
211 	const struct ce_attr *ce_attr;
212 	const struct ce_ie_addr *ce_ie_addr = ab->hw_params->ce_ie_addr;
213 	u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
214 
215 	ie1_reg_addr = ce_ie_addr->ie1_reg_addr;
216 	ie2_reg_addr = ce_ie_addr->ie2_reg_addr;
217 	ie3_reg_addr = ce_ie_addr->ie3_reg_addr;
218 
219 	ce_attr = &ab->hw_params->host_ce_config[ce_id];
220 	if (ce_attr->src_nentries)
221 		ath12k_ahb_setbit32(ab, ce_id, ie1_reg_addr);
222 
223 	if (ce_attr->dest_nentries) {
224 		ath12k_ahb_setbit32(ab, ce_id, ie2_reg_addr);
225 		ath12k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
226 				    ie3_reg_addr);
227 	}
228 }
229 
230 static void ath12k_ahb_ce_irq_disable(struct ath12k_base *ab, u16 ce_id)
231 {
232 	const struct ce_attr *ce_attr;
233 	const struct ce_ie_addr *ce_ie_addr = ab->hw_params->ce_ie_addr;
234 	u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
235 
236 	ie1_reg_addr = ce_ie_addr->ie1_reg_addr;
237 	ie2_reg_addr = ce_ie_addr->ie2_reg_addr;
238 	ie3_reg_addr = ce_ie_addr->ie3_reg_addr;
239 
240 	ce_attr = &ab->hw_params->host_ce_config[ce_id];
241 	if (ce_attr->src_nentries)
242 		ath12k_ahb_clearbit32(ab, ce_id, ie1_reg_addr);
243 
244 	if (ce_attr->dest_nentries) {
245 		ath12k_ahb_clearbit32(ab, ce_id, ie2_reg_addr);
246 		ath12k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
247 				      ie3_reg_addr);
248 	}
249 }
250 
251 static void ath12k_ahb_sync_ce_irqs(struct ath12k_base *ab)
252 {
253 	int i;
254 	int irq_idx;
255 
256 	for (i = 0; i < ab->hw_params->ce_count; i++) {
257 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
258 			continue;
259 
260 		irq_idx = ATH12K_IRQ_CE0_OFFSET + i;
261 		synchronize_irq(ab->irq_num[irq_idx]);
262 	}
263 }
264 
265 static void ath12k_ahb_sync_ext_irqs(struct ath12k_base *ab)
266 {
267 	int i, j;
268 	int irq_idx;
269 
270 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
271 		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
272 
273 		for (j = 0; j < irq_grp->num_irq; j++) {
274 			irq_idx = irq_grp->irqs[j];
275 			synchronize_irq(ab->irq_num[irq_idx]);
276 		}
277 	}
278 }
279 
280 static void ath12k_ahb_ce_irqs_enable(struct ath12k_base *ab)
281 {
282 	int i;
283 
284 	for (i = 0; i < ab->hw_params->ce_count; i++) {
285 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
286 			continue;
287 		ath12k_ahb_ce_irq_enable(ab, i);
288 	}
289 }
290 
291 static void ath12k_ahb_ce_irqs_disable(struct ath12k_base *ab)
292 {
293 	int i;
294 
295 	for (i = 0; i < ab->hw_params->ce_count; i++) {
296 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
297 			continue;
298 		ath12k_ahb_ce_irq_disable(ab, i);
299 	}
300 }
301 
302 static int ath12k_ahb_start(struct ath12k_base *ab)
303 {
304 	ath12k_ahb_ce_irqs_enable(ab);
305 	ath12k_ce_rx_post_buf(ab);
306 
307 	return 0;
308 }
309 
310 static void ath12k_ahb_ext_irq_enable(struct ath12k_base *ab)
311 {
312 	struct ath12k_ext_irq_grp *irq_grp;
313 	int i;
314 
315 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
316 		irq_grp = &ab->ext_irq_grp[i];
317 		if (!irq_grp->napi_enabled) {
318 			napi_enable(&irq_grp->napi);
319 			irq_grp->napi_enabled = true;
320 		}
321 		ath12k_ahb_ext_grp_enable(irq_grp);
322 	}
323 }
324 
325 static void ath12k_ahb_ext_irq_disable(struct ath12k_base *ab)
326 {
327 	__ath12k_ahb_ext_irq_disable(ab);
328 	ath12k_ahb_sync_ext_irqs(ab);
329 }
330 
331 static void ath12k_ahb_stop(struct ath12k_base *ab)
332 {
333 	if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
334 		ath12k_ahb_ce_irqs_disable(ab);
335 	ath12k_ahb_sync_ce_irqs(ab);
336 	ath12k_ahb_cancel_workqueue(ab);
337 	timer_delete_sync(&ab->rx_replenish_retry);
338 	ath12k_ce_cleanup_pipes(ab);
339 }
340 
341 static int ath12k_ahb_power_up(struct ath12k_base *ab)
342 {
343 	struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
344 	char fw_name[ATH12K_USERPD_FW_NAME_LEN];
345 	char fw2_name[ATH12K_USERPD_FW_NAME_LEN];
346 	struct device *dev = ab->dev;
347 	const struct firmware *fw, *fw2;
348 	struct reserved_mem *rmem = NULL;
349 	unsigned long time_left;
350 	phys_addr_t mem_phys;
351 	void *mem_region;
352 	size_t mem_size;
353 	u32 pasid;
354 	int ret;
355 
356 	rmem = ath12k_core_get_reserved_mem(ab, 0);
357 	if (!rmem)
358 		return -ENODEV;
359 
360 	mem_phys = rmem->base;
361 	mem_size = rmem->size;
362 	mem_region = devm_memremap(dev, mem_phys, mem_size, MEMREMAP_WC);
363 	if (IS_ERR(mem_region)) {
364 		ath12k_err(ab, "unable to map memory region: %pa+%pa\n",
365 			   &rmem->base, &rmem->size);
366 		return PTR_ERR(mem_region);
367 	}
368 
369 	snprintf(fw_name, sizeof(fw_name), "%s/%s/%s%d%s", ATH12K_FW_DIR,
370 		 ab->hw_params->fw.dir, ATH12K_AHB_FW_PREFIX, ab_ahb->userpd_id,
371 		 ATH12K_AHB_FW_SUFFIX);
372 
373 	ret = request_firmware(&fw, fw_name, dev);
374 	if (ret < 0) {
375 		ath12k_err(ab, "request_firmware failed\n");
376 		return ret;
377 	}
378 
379 	ath12k_dbg(ab, ATH12K_DBG_AHB, "Booting fw image %s, size %zd\n", fw_name,
380 		   fw->size);
381 
382 	if (!fw->size) {
383 		ath12k_err(ab, "Invalid firmware size\n");
384 		ret = -EINVAL;
385 		goto err_fw;
386 	}
387 
388 	pasid = (u32_encode_bits(ab_ahb->userpd_id, ATH12K_USERPD_ID_MASK)) |
389 		ATH12K_AHB_UPD_SWID;
390 
391 	/* Load FW image to a reserved memory location */
392 	ret = qcom_mdt_load(dev, fw, fw_name, pasid, mem_region, mem_phys, mem_size,
393 			    &mem_phys);
394 	if (ret) {
395 		ath12k_err(ab, "Failed to load MDT segments: %d\n", ret);
396 		goto err_fw;
397 	}
398 
399 	snprintf(fw2_name, sizeof(fw2_name), "%s/%s/%s", ATH12K_FW_DIR,
400 		 ab->hw_params->fw.dir, ATH12K_AHB_FW2);
401 
402 	ret = request_firmware(&fw2, fw2_name, dev);
403 	if (ret < 0) {
404 		ath12k_err(ab, "request_firmware failed\n");
405 		goto err_fw;
406 	}
407 
408 	ath12k_dbg(ab, ATH12K_DBG_AHB, "Booting fw image %s, size %zd\n", fw2_name,
409 		   fw2->size);
410 
411 	if (!fw2->size) {
412 		ath12k_err(ab, "Invalid firmware size\n");
413 		ret = -EINVAL;
414 		goto err_fw2;
415 	}
416 
417 	ret = qcom_mdt_load_no_init(dev, fw2, fw2_name, pasid, mem_region, mem_phys,
418 				    mem_size, &mem_phys);
419 	if (ret) {
420 		ath12k_err(ab, "Failed to load MDT segments: %d\n", ret);
421 		goto err_fw2;
422 	}
423 
424 	/* Authenticate FW image using peripheral ID */
425 	ret = qcom_scm_pas_auth_and_reset(pasid);
426 	if (ret) {
427 		ath12k_err(ab, "failed to boot the remote processor %d\n", ret);
428 		goto err_fw2;
429 	}
430 
431 	/* Instruct Q6 to spawn userPD thread */
432 	ret = qcom_smem_state_update_bits(ab_ahb->spawn_state, BIT(ab_ahb->spawn_bit),
433 					  BIT(ab_ahb->spawn_bit));
434 	if (ret) {
435 		ath12k_err(ab, "Failed to update spawn state %d\n", ret);
436 		goto err_fw2;
437 	}
438 
439 	time_left = wait_for_completion_timeout(&ab_ahb->userpd_spawned,
440 						ATH12K_USERPD_SPAWN_TIMEOUT);
441 	if (!time_left) {
442 		ath12k_err(ab, "UserPD spawn wait timed out\n");
443 		ret = -ETIMEDOUT;
444 		goto err_fw2;
445 	}
446 
447 	time_left = wait_for_completion_timeout(&ab_ahb->userpd_ready,
448 						ATH12K_USERPD_READY_TIMEOUT);
449 	if (!time_left) {
450 		ath12k_err(ab, "UserPD ready wait timed out\n");
451 		ret = -ETIMEDOUT;
452 		goto err_fw2;
453 	}
454 
455 	qcom_smem_state_update_bits(ab_ahb->spawn_state, BIT(ab_ahb->spawn_bit), 0);
456 
457 	ath12k_dbg(ab, ATH12K_DBG_AHB, "UserPD%d is now UP\n", ab_ahb->userpd_id);
458 
459 err_fw2:
460 	release_firmware(fw2);
461 err_fw:
462 	release_firmware(fw);
463 	return ret;
464 }
465 
466 static void ath12k_ahb_power_down(struct ath12k_base *ab, bool is_suspend)
467 {
468 	struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
469 	unsigned long time_left;
470 	u32 pasid;
471 	int ret;
472 
473 	qcom_smem_state_update_bits(ab_ahb->stop_state, BIT(ab_ahb->stop_bit),
474 				    BIT(ab_ahb->stop_bit));
475 
476 	time_left = wait_for_completion_timeout(&ab_ahb->userpd_stopped,
477 						ATH12K_USERPD_STOP_TIMEOUT);
478 	if (!time_left) {
479 		ath12k_err(ab, "UserPD stop wait timed out\n");
480 		return;
481 	}
482 
483 	qcom_smem_state_update_bits(ab_ahb->stop_state, BIT(ab_ahb->stop_bit), 0);
484 
485 	pasid = (u32_encode_bits(ab_ahb->userpd_id, ATH12K_USERPD_ID_MASK)) |
486 		ATH12K_AHB_UPD_SWID;
487 	/* Release the firmware */
488 	ret = qcom_scm_pas_shutdown(pasid);
489 	if (ret)
490 		ath12k_err(ab, "scm pas shutdown failed for userPD%d: %d\n",
491 			   ab_ahb->userpd_id, ret);
492 }
493 
494 static void ath12k_ahb_init_qmi_ce_config(struct ath12k_base *ab)
495 {
496 	struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
497 
498 	cfg->tgt_ce_len = ab->hw_params->target_ce_count;
499 	cfg->tgt_ce = ab->hw_params->target_ce_config;
500 	cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len;
501 	cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map;
502 	ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id;
503 }
504 
505 static void ath12k_ahb_ce_workqueue(struct work_struct *work)
506 {
507 	struct ath12k_ce_pipe *ce_pipe = from_work(ce_pipe, work, intr_wq);
508 
509 	ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
510 
511 	ath12k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
512 }
513 
514 static irqreturn_t ath12k_ahb_ce_interrupt_handler(int irq, void *arg)
515 {
516 	struct ath12k_ce_pipe *ce_pipe = arg;
517 
518 	/* last interrupt received for this CE */
519 	ce_pipe->timestamp = jiffies;
520 
521 	ath12k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
522 
523 	queue_work(system_bh_wq, &ce_pipe->intr_wq);
524 
525 	return IRQ_HANDLED;
526 }
527 
528 static int ath12k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
529 {
530 	struct ath12k_ext_irq_grp *irq_grp = container_of(napi,
531 						struct ath12k_ext_irq_grp,
532 						napi);
533 	struct ath12k_base *ab = irq_grp->ab;
534 	int work_done;
535 
536 	work_done = ath12k_dp_service_srng(ab, irq_grp, budget);
537 	if (work_done < budget) {
538 		napi_complete_done(napi, work_done);
539 		ath12k_ahb_ext_grp_enable(irq_grp);
540 	}
541 
542 	if (work_done > budget)
543 		work_done = budget;
544 
545 	return work_done;
546 }
547 
548 static irqreturn_t ath12k_ahb_ext_interrupt_handler(int irq, void *arg)
549 {
550 	struct ath12k_ext_irq_grp *irq_grp = arg;
551 
552 	/* last interrupt received for this group */
553 	irq_grp->timestamp = jiffies;
554 
555 	ath12k_ahb_ext_grp_disable(irq_grp);
556 
557 	napi_schedule(&irq_grp->napi);
558 
559 	return IRQ_HANDLED;
560 }
561 
562 static int ath12k_ahb_config_ext_irq(struct ath12k_base *ab)
563 {
564 	const struct ath12k_hw_ring_mask *ring_mask;
565 	struct ath12k_ext_irq_grp *irq_grp;
566 	const struct hal_ops *hal_ops;
567 	int i, j, irq, irq_idx, ret;
568 	u32 num_irq;
569 
570 	ring_mask = ab->hw_params->ring_mask;
571 	hal_ops = ab->hw_params->hal_ops;
572 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
573 		irq_grp = &ab->ext_irq_grp[i];
574 		num_irq = 0;
575 
576 		irq_grp->ab = ab;
577 		irq_grp->grp_id = i;
578 
579 		irq_grp->napi_ndev = alloc_netdev_dummy(0);
580 		if (!irq_grp->napi_ndev)
581 			return -ENOMEM;
582 
583 		netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
584 			       ath12k_ahb_ext_grp_napi_poll);
585 
586 		for (j = 0; j < ATH12K_EXT_IRQ_NUM_MAX; j++) {
587 			/* For TX ring, ensure that the ring mask and the
588 			 * tcl_to_wbm_rbm_map point to the same ring number.
589 			 */
590 			if (ring_mask->tx[i] &
591 			    BIT(hal_ops->tcl_to_wbm_rbm_map[j].wbm_ring_num)) {
592 				irq_grp->irqs[num_irq++] =
593 					wbm2host_tx_completions_ring1 - j;
594 			}
595 
596 			if (ring_mask->rx[i] & BIT(j)) {
597 				irq_grp->irqs[num_irq++] =
598 					reo2host_destination_ring1 - j;
599 			}
600 
601 			if (ring_mask->rx_err[i] & BIT(j))
602 				irq_grp->irqs[num_irq++] = reo2host_exception;
603 
604 			if (ring_mask->rx_wbm_rel[i] & BIT(j))
605 				irq_grp->irqs[num_irq++] = wbm2host_rx_release;
606 
607 			if (ring_mask->reo_status[i] & BIT(j))
608 				irq_grp->irqs[num_irq++] = reo2host_status;
609 
610 			if (ring_mask->rx_mon_dest[i] & BIT(j))
611 				irq_grp->irqs[num_irq++] =
612 					rxdma2host_monitor_destination_mac1;
613 		}
614 
615 		irq_grp->num_irq = num_irq;
616 
617 		for (j = 0; j < irq_grp->num_irq; j++) {
618 			irq_idx = irq_grp->irqs[j];
619 
620 			irq = platform_get_irq_byname(ab->pdev,
621 						      irq_name[irq_idx]);
622 			ab->irq_num[irq_idx] = irq;
623 			irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY);
624 			ret = devm_request_irq(ab->dev, irq,
625 					       ath12k_ahb_ext_interrupt_handler,
626 					       IRQF_TRIGGER_RISING,
627 					       irq_name[irq_idx], irq_grp);
628 			if (ret)
629 				ath12k_warn(ab, "failed request_irq for %d\n", irq);
630 		}
631 	}
632 
633 	return 0;
634 }
635 
636 static int ath12k_ahb_config_irq(struct ath12k_base *ab)
637 {
638 	int irq, irq_idx, i;
639 	int ret;
640 
641 	/* Configure CE irqs */
642 	for (i = 0; i < ab->hw_params->ce_count; i++) {
643 		struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
644 
645 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
646 			continue;
647 
648 		irq_idx = ATH12K_IRQ_CE0_OFFSET + i;
649 
650 		INIT_WORK(&ce_pipe->intr_wq, ath12k_ahb_ce_workqueue);
651 		irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
652 		ret = devm_request_irq(ab->dev, irq, ath12k_ahb_ce_interrupt_handler,
653 				       IRQF_TRIGGER_RISING, irq_name[irq_idx],
654 				       ce_pipe);
655 		if (ret)
656 			return ret;
657 
658 		ab->irq_num[irq_idx] = irq;
659 	}
660 
661 	/* Configure external interrupts */
662 	ret = ath12k_ahb_config_ext_irq(ab);
663 
664 	return ret;
665 }
666 
667 static int ath12k_ahb_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
668 					  u8 *ul_pipe, u8 *dl_pipe)
669 {
670 	const struct service_to_pipe *entry;
671 	bool ul_set = false, dl_set = false;
672 	u32 pipedir;
673 	int i;
674 
675 	for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) {
676 		entry = &ab->hw_params->svc_to_ce_map[i];
677 
678 		if (__le32_to_cpu(entry->service_id) != service_id)
679 			continue;
680 
681 		pipedir = __le32_to_cpu(entry->pipedir);
682 		if (pipedir == PIPEDIR_IN || pipedir == PIPEDIR_INOUT) {
683 			WARN_ON(dl_set);
684 			*dl_pipe = __le32_to_cpu(entry->pipenum);
685 			dl_set = true;
686 		}
687 
688 		if (pipedir == PIPEDIR_OUT || pipedir == PIPEDIR_INOUT) {
689 			WARN_ON(ul_set);
690 			*ul_pipe = __le32_to_cpu(entry->pipenum);
691 			ul_set = true;
692 		}
693 	}
694 
695 	if (WARN_ON(!ul_set || !dl_set))
696 		return -ENOENT;
697 
698 	return 0;
699 }
700 
701 static const struct ath12k_hif_ops ath12k_ahb_hif_ops_ipq5332 = {
702 	.start = ath12k_ahb_start,
703 	.stop = ath12k_ahb_stop,
704 	.read32 = ath12k_ahb_read32,
705 	.write32 = ath12k_ahb_write32,
706 	.irq_enable = ath12k_ahb_ext_irq_enable,
707 	.irq_disable = ath12k_ahb_ext_irq_disable,
708 	.map_service_to_pipe = ath12k_ahb_map_service_to_pipe,
709 	.power_up = ath12k_ahb_power_up,
710 	.power_down = ath12k_ahb_power_down,
711 };
712 
713 static irqreturn_t ath12k_userpd_irq_handler(int irq, void *data)
714 {
715 	struct ath12k_base *ab = data;
716 	struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
717 
718 	if (irq == ab_ahb->userpd_irq_num[ATH12K_USERPD_SPAWN_IRQ]) {
719 		complete(&ab_ahb->userpd_spawned);
720 	} else if (irq == ab_ahb->userpd_irq_num[ATH12K_USERPD_READY_IRQ]) {
721 		complete(&ab_ahb->userpd_ready);
722 	} else if (irq == ab_ahb->userpd_irq_num[ATH12K_USERPD_STOP_ACK_IRQ])	{
723 		complete(&ab_ahb->userpd_stopped);
724 	} else {
725 		ath12k_err(ab, "Invalid userpd interrupt\n");
726 		return IRQ_NONE;
727 	}
728 
729 	return IRQ_HANDLED;
730 }
731 
732 static int ath12k_ahb_config_rproc_irq(struct ath12k_base *ab)
733 {
734 	struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
735 	int i, ret;
736 	char *upd_irq_name;
737 
738 	for (i = 0; i < ATH12K_USERPD_MAX_IRQ; i++) {
739 		ab_ahb->userpd_irq_num[i] = platform_get_irq_byname(ab->pdev,
740 								    ath12k_userpd_irq[i]);
741 		if (ab_ahb->userpd_irq_num[i] < 0)
742 			return ab_ahb->userpd_irq_num[i];
743 
744 		upd_irq_name = devm_kzalloc(&ab->pdev->dev, ATH12K_UPD_IRQ_WRD_LEN,
745 					    GFP_KERNEL);
746 		if (!upd_irq_name)
747 			return -ENOMEM;
748 
749 		scnprintf(upd_irq_name, ATH12K_UPD_IRQ_WRD_LEN, "UserPD%u-%s",
750 			  ab_ahb->userpd_id, ath12k_userpd_irq[i]);
751 		ret = devm_request_threaded_irq(&ab->pdev->dev, ab_ahb->userpd_irq_num[i],
752 						NULL, ath12k_userpd_irq_handler,
753 						IRQF_TRIGGER_RISING | IRQF_ONESHOT,
754 						upd_irq_name, ab);
755 		if (ret)
756 			return dev_err_probe(&ab->pdev->dev, ret,
757 					     "Request %s irq failed: %d\n",
758 					     ath12k_userpd_irq[i], ret);
759 	}
760 
761 	ab_ahb->spawn_state = devm_qcom_smem_state_get(&ab->pdev->dev, "spawn",
762 						       &ab_ahb->spawn_bit);
763 	if (IS_ERR(ab_ahb->spawn_state))
764 		return dev_err_probe(&ab->pdev->dev, PTR_ERR(ab_ahb->spawn_state),
765 				     "Failed to acquire spawn state\n");
766 
767 	ab_ahb->stop_state = devm_qcom_smem_state_get(&ab->pdev->dev, "stop",
768 						      &ab_ahb->stop_bit);
769 	if (IS_ERR(ab_ahb->stop_state))
770 		return dev_err_probe(&ab->pdev->dev, PTR_ERR(ab_ahb->stop_state),
771 				     "Failed to acquire stop state\n");
772 
773 	init_completion(&ab_ahb->userpd_spawned);
774 	init_completion(&ab_ahb->userpd_ready);
775 	init_completion(&ab_ahb->userpd_stopped);
776 	return 0;
777 }
778 
779 static int ath12k_ahb_root_pd_state_notifier(struct notifier_block *nb,
780 					     const unsigned long event, void *data)
781 {
782 	struct ath12k_ahb *ab_ahb = container_of(nb, struct ath12k_ahb, root_pd_nb);
783 	struct ath12k_base *ab = ab_ahb->ab;
784 
785 	if (event == ATH12K_RPROC_AFTER_POWERUP) {
786 		ath12k_dbg(ab, ATH12K_DBG_AHB, "Root PD is UP\n");
787 		complete(&ab_ahb->rootpd_ready);
788 	}
789 
790 	return 0;
791 }
792 
793 static int ath12k_ahb_register_rproc_notifier(struct ath12k_base *ab)
794 {
795 	struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
796 
797 	ab_ahb->root_pd_nb.notifier_call = ath12k_ahb_root_pd_state_notifier;
798 	init_completion(&ab_ahb->rootpd_ready);
799 
800 	ab_ahb->root_pd_notifier = qcom_register_ssr_notifier(ab_ahb->tgt_rproc->name,
801 							      &ab_ahb->root_pd_nb);
802 	if (IS_ERR(ab_ahb->root_pd_notifier))
803 		return PTR_ERR(ab_ahb->root_pd_notifier);
804 
805 	return 0;
806 }
807 
808 static void ath12k_ahb_unregister_rproc_notifier(struct ath12k_base *ab)
809 {
810 	struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
811 
812 	if (!ab_ahb->root_pd_notifier) {
813 		ath12k_err(ab, "Rproc notifier not registered\n");
814 		return;
815 	}
816 
817 	qcom_unregister_ssr_notifier(ab_ahb->root_pd_notifier,
818 				     &ab_ahb->root_pd_nb);
819 	ab_ahb->root_pd_notifier = NULL;
820 }
821 
822 static int ath12k_ahb_get_rproc(struct ath12k_base *ab)
823 {
824 	struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
825 	struct device *dev = ab->dev;
826 	struct device_node *np;
827 	struct rproc *prproc;
828 
829 	np = of_parse_phandle(dev->of_node, "qcom,rproc", 0);
830 	if (!np) {
831 		ath12k_err(ab, "failed to get q6_rproc handle\n");
832 		return -ENOENT;
833 	}
834 
835 	prproc = rproc_get_by_phandle(np->phandle);
836 	of_node_put(np);
837 	if (!prproc)
838 		return dev_err_probe(&ab->pdev->dev, -EPROBE_DEFER,
839 				     "failed to get rproc\n");
840 
841 	ab_ahb->tgt_rproc = prproc;
842 
843 	return 0;
844 }
845 
846 static int ath12k_ahb_boot_root_pd(struct ath12k_base *ab)
847 {
848 	struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
849 	unsigned long time_left;
850 	int ret;
851 
852 	ret = rproc_boot(ab_ahb->tgt_rproc);
853 	if (ret < 0) {
854 		ath12k_err(ab, "RootPD boot failed\n");
855 		return ret;
856 	}
857 
858 	time_left = wait_for_completion_timeout(&ab_ahb->rootpd_ready,
859 						ATH12K_ROOTPD_READY_TIMEOUT);
860 	if (!time_left) {
861 		ath12k_err(ab, "RootPD ready wait timed out\n");
862 		return -ETIMEDOUT;
863 	}
864 
865 	return 0;
866 }
867 
868 static int ath12k_ahb_configure_rproc(struct ath12k_base *ab)
869 {
870 	struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
871 	int ret;
872 
873 	ret = ath12k_ahb_get_rproc(ab);
874 	if (ret < 0)
875 		return ret;
876 
877 	ret = ath12k_ahb_register_rproc_notifier(ab);
878 	if (ret < 0) {
879 		ret = dev_err_probe(&ab->pdev->dev, ret,
880 				    "failed to register rproc notifier\n");
881 		goto err_put_rproc;
882 	}
883 
884 	if (ab_ahb->tgt_rproc->state != RPROC_RUNNING) {
885 		ret = ath12k_ahb_boot_root_pd(ab);
886 		if (ret < 0) {
887 			ath12k_err(ab, "failed to boot the remote processor Q6\n");
888 			goto err_unreg_notifier;
889 		}
890 	}
891 
892 	return ath12k_ahb_config_rproc_irq(ab);
893 
894 err_unreg_notifier:
895 	ath12k_ahb_unregister_rproc_notifier(ab);
896 
897 err_put_rproc:
898 	rproc_put(ab_ahb->tgt_rproc);
899 	return ret;
900 }
901 
902 static void ath12k_ahb_deconfigure_rproc(struct ath12k_base *ab)
903 {
904 	struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
905 
906 	ath12k_ahb_unregister_rproc_notifier(ab);
907 	rproc_put(ab_ahb->tgt_rproc);
908 }
909 
910 static int ath12k_ahb_resource_init(struct ath12k_base *ab)
911 {
912 	struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
913 	struct platform_device *pdev = ab->pdev;
914 	struct resource *mem_res;
915 	int ret;
916 
917 	ab->mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
918 	if (IS_ERR(ab->mem)) {
919 		ret = dev_err_probe(&pdev->dev, PTR_ERR(ab->mem), "ioremap error\n");
920 		goto out;
921 	}
922 
923 	ab->mem_len = resource_size(mem_res);
924 
925 	if (ab->hw_params->ce_remap) {
926 		const struct ce_remap *ce_remap = ab->hw_params->ce_remap;
927 		/* CE register space is moved out of WCSS and the space is not
928 		 * contiguous, hence remapping the CE registers to a new space
929 		 * for accessing them.
930 		 */
931 		ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
932 		if (!ab->mem_ce) {
933 			dev_err(&pdev->dev, "ce ioremap error\n");
934 			ret = -ENOMEM;
935 			goto err_mem_unmap;
936 		}
937 		ab->ce_remap = true;
938 		ab->ce_remap_base_addr = HAL_IPQ5332_CE_WFSS_REG_BASE;
939 	}
940 
941 	ab_ahb->xo_clk = devm_clk_get(ab->dev, "xo");
942 	if (IS_ERR(ab_ahb->xo_clk)) {
943 		ret = dev_err_probe(&pdev->dev, PTR_ERR(ab_ahb->xo_clk),
944 				    "failed to get xo clock\n");
945 		goto err_mem_ce_unmap;
946 	}
947 
948 	ret = clk_prepare_enable(ab_ahb->xo_clk);
949 	if (ret) {
950 		dev_err(&pdev->dev, "failed to enable gcc_xo_clk: %d\n", ret);
951 		goto err_clock_deinit;
952 	}
953 
954 	return 0;
955 
956 err_clock_deinit:
957 	devm_clk_put(ab->dev, ab_ahb->xo_clk);
958 
959 err_mem_ce_unmap:
960 	ab_ahb->xo_clk = NULL;
961 	if (ab->hw_params->ce_remap)
962 		iounmap(ab->mem_ce);
963 
964 err_mem_unmap:
965 	ab->mem_ce = NULL;
966 	devm_iounmap(ab->dev, ab->mem);
967 
968 out:
969 	ab->mem = NULL;
970 	return ret;
971 }
972 
973 static void ath12k_ahb_resource_deinit(struct ath12k_base *ab)
974 {
975 	struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
976 
977 	if (ab->mem)
978 		devm_iounmap(ab->dev, ab->mem);
979 
980 	if (ab->mem_ce)
981 		iounmap(ab->mem_ce);
982 
983 	ab->mem = NULL;
984 	ab->mem_ce = NULL;
985 
986 	clk_disable_unprepare(ab_ahb->xo_clk);
987 	devm_clk_put(ab->dev, ab_ahb->xo_clk);
988 	ab_ahb->xo_clk = NULL;
989 }
990 
991 static int ath12k_ahb_probe(struct platform_device *pdev)
992 {
993 	struct ath12k_base *ab;
994 	const struct ath12k_hif_ops *hif_ops;
995 	struct ath12k_ahb *ab_ahb;
996 	enum ath12k_hw_rev hw_rev;
997 	u32 addr, userpd_id;
998 	int ret;
999 
1000 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1001 	if (ret) {
1002 		dev_err(&pdev->dev, "Failed to set 32-bit coherent dma\n");
1003 		return ret;
1004 	}
1005 
1006 	ab = ath12k_core_alloc(&pdev->dev, sizeof(struct ath12k_ahb),
1007 			       ATH12K_BUS_AHB);
1008 	if (!ab)
1009 		return -ENOMEM;
1010 
1011 	hw_rev = (enum ath12k_hw_rev)(kernel_ulong_t)of_device_get_match_data(&pdev->dev);
1012 	switch (hw_rev) {
1013 	case ATH12K_HW_IPQ5332_HW10:
1014 		hif_ops = &ath12k_ahb_hif_ops_ipq5332;
1015 		userpd_id = ATH12K_IPQ5332_USERPD_ID;
1016 		break;
1017 	default:
1018 		ret = -EOPNOTSUPP;
1019 		goto err_core_free;
1020 	}
1021 
1022 	ab->hif.ops = hif_ops;
1023 	ab->pdev = pdev;
1024 	ab->hw_rev = hw_rev;
1025 	ab->target_mem_mode = ATH12K_QMI_MEMORY_MODE_DEFAULT;
1026 	platform_set_drvdata(pdev, ab);
1027 	ab_ahb = ath12k_ab_to_ahb(ab);
1028 	ab_ahb->ab = ab;
1029 	ab_ahb->userpd_id = userpd_id;
1030 
1031 	/* Set fixed_mem_region to true for platforms that support fixed memory
1032 	 * reservation from DT. If memory is reserved from DT for FW, ath12k driver
1033 	 * need not to allocate memory.
1034 	 */
1035 	if (!of_property_read_u32(ab->dev->of_node, "memory-region", &addr))
1036 		set_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags);
1037 
1038 	ret = ath12k_core_pre_init(ab);
1039 	if (ret)
1040 		goto err_core_free;
1041 
1042 	ret = ath12k_ahb_resource_init(ab);
1043 	if (ret)
1044 		goto err_core_free;
1045 
1046 	ret = ath12k_hal_srng_init(ab);
1047 	if (ret)
1048 		goto err_resource_deinit;
1049 
1050 	ret = ath12k_ce_alloc_pipes(ab);
1051 	if (ret) {
1052 		ath12k_err(ab, "failed to allocate ce pipes: %d\n", ret);
1053 		goto err_hal_srng_deinit;
1054 	}
1055 
1056 	ath12k_ahb_init_qmi_ce_config(ab);
1057 
1058 	ret = ath12k_ahb_configure_rproc(ab);
1059 	if (ret)
1060 		goto err_ce_free;
1061 
1062 	ret = ath12k_ahb_config_irq(ab);
1063 	if (ret) {
1064 		ath12k_err(ab, "failed to configure irq: %d\n", ret);
1065 		goto err_rproc_deconfigure;
1066 	}
1067 
1068 	ret = ath12k_core_init(ab);
1069 	if (ret) {
1070 		ath12k_err(ab, "failed to init core: %d\n", ret);
1071 		goto err_rproc_deconfigure;
1072 	}
1073 
1074 	return 0;
1075 
1076 err_rproc_deconfigure:
1077 	ath12k_ahb_deconfigure_rproc(ab);
1078 
1079 err_ce_free:
1080 	ath12k_ce_free_pipes(ab);
1081 
1082 err_hal_srng_deinit:
1083 	ath12k_hal_srng_deinit(ab);
1084 
1085 err_resource_deinit:
1086 	ath12k_ahb_resource_deinit(ab);
1087 
1088 err_core_free:
1089 	ath12k_core_free(ab);
1090 	platform_set_drvdata(pdev, NULL);
1091 
1092 	return ret;
1093 }
1094 
1095 static void ath12k_ahb_remove_prepare(struct ath12k_base *ab)
1096 {
1097 	unsigned long left;
1098 
1099 	if (test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags)) {
1100 		left = wait_for_completion_timeout(&ab->driver_recovery,
1101 						   ATH12K_AHB_RECOVERY_TIMEOUT);
1102 		if (!left)
1103 			ath12k_warn(ab, "failed to receive recovery response completion\n");
1104 	}
1105 
1106 	set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags);
1107 	cancel_work_sync(&ab->restart_work);
1108 	cancel_work_sync(&ab->qmi.event_work);
1109 }
1110 
1111 static void ath12k_ahb_free_resources(struct ath12k_base *ab)
1112 {
1113 	struct platform_device *pdev = ab->pdev;
1114 
1115 	ath12k_hal_srng_deinit(ab);
1116 	ath12k_ce_free_pipes(ab);
1117 	ath12k_ahb_resource_deinit(ab);
1118 	ath12k_ahb_deconfigure_rproc(ab);
1119 	ath12k_core_free(ab);
1120 	platform_set_drvdata(pdev, NULL);
1121 }
1122 
1123 static void ath12k_ahb_remove(struct platform_device *pdev)
1124 {
1125 	struct ath12k_base *ab = platform_get_drvdata(pdev);
1126 
1127 	if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1128 		ath12k_ahb_power_down(ab, false);
1129 		goto qmi_fail;
1130 	}
1131 
1132 	ath12k_ahb_remove_prepare(ab);
1133 	ath12k_core_hw_group_cleanup(ab->ag);
1134 qmi_fail:
1135 	ath12k_core_deinit(ab);
1136 	ath12k_ahb_free_resources(ab);
1137 }
1138 
1139 static struct platform_driver ath12k_ahb_driver = {
1140 	.driver         = {
1141 		.name   = "ath12k_ahb",
1142 		.of_match_table = ath12k_ahb_of_match,
1143 	},
1144 	.probe  = ath12k_ahb_probe,
1145 	.remove = ath12k_ahb_remove,
1146 };
1147 
1148 int ath12k_ahb_init(void)
1149 {
1150 	return platform_driver_register(&ath12k_ahb_driver);
1151 }
1152 
1153 void ath12k_ahb_exit(void)
1154 {
1155 	platform_driver_unregister(&ath12k_ahb_driver);
1156 }
1157