xref: /linux/drivers/net/wireless/ath/ath11k/ahb.c (revision 3286f88f31da060ac2789cee247153961ba57e49)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/module.h>
8 #include <linux/platform_device.h>
9 #include <linux/of_device.h>
10 #include <linux/of.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/of_address.h>
13 #include <linux/iommu.h>
14 #include "ahb.h"
15 #include "debug.h"
16 #include "hif.h"
17 #include <linux/remoteproc.h>
18 #include "pcic.h"
19 #include <linux/soc/qcom/smem.h>
20 #include <linux/soc/qcom/smem_state.h>
21 
22 static const struct of_device_id ath11k_ahb_of_match[] = {
23 	/* TODO: Should we change the compatible string to something similar
24 	 * to one that ath10k uses?
25 	 */
26 	{ .compatible = "qcom,ipq8074-wifi",
27 	  .data = (void *)ATH11K_HW_IPQ8074,
28 	},
29 	{ .compatible = "qcom,ipq6018-wifi",
30 	  .data = (void *)ATH11K_HW_IPQ6018_HW10,
31 	},
32 	{ .compatible = "qcom,wcn6750-wifi",
33 	  .data = (void *)ATH11K_HW_WCN6750_HW10,
34 	},
35 	{ .compatible = "qcom,ipq5018-wifi",
36 	  .data = (void *)ATH11K_HW_IPQ5018_HW10,
37 	},
38 	{ }
39 };
40 
41 MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match);
42 
43 #define ATH11K_IRQ_CE0_OFFSET 4
44 
45 static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
46 	"misc-pulse1",
47 	"misc-latch",
48 	"sw-exception",
49 	"watchdog",
50 	"ce0",
51 	"ce1",
52 	"ce2",
53 	"ce3",
54 	"ce4",
55 	"ce5",
56 	"ce6",
57 	"ce7",
58 	"ce8",
59 	"ce9",
60 	"ce10",
61 	"ce11",
62 	"host2wbm-desc-feed",
63 	"host2reo-re-injection",
64 	"host2reo-command",
65 	"host2rxdma-monitor-ring3",
66 	"host2rxdma-monitor-ring2",
67 	"host2rxdma-monitor-ring1",
68 	"reo2ost-exception",
69 	"wbm2host-rx-release",
70 	"reo2host-status",
71 	"reo2host-destination-ring4",
72 	"reo2host-destination-ring3",
73 	"reo2host-destination-ring2",
74 	"reo2host-destination-ring1",
75 	"rxdma2host-monitor-destination-mac3",
76 	"rxdma2host-monitor-destination-mac2",
77 	"rxdma2host-monitor-destination-mac1",
78 	"ppdu-end-interrupts-mac3",
79 	"ppdu-end-interrupts-mac2",
80 	"ppdu-end-interrupts-mac1",
81 	"rxdma2host-monitor-status-ring-mac3",
82 	"rxdma2host-monitor-status-ring-mac2",
83 	"rxdma2host-monitor-status-ring-mac1",
84 	"host2rxdma-host-buf-ring-mac3",
85 	"host2rxdma-host-buf-ring-mac2",
86 	"host2rxdma-host-buf-ring-mac1",
87 	"rxdma2host-destination-ring-mac3",
88 	"rxdma2host-destination-ring-mac2",
89 	"rxdma2host-destination-ring-mac1",
90 	"host2tcl-input-ring4",
91 	"host2tcl-input-ring3",
92 	"host2tcl-input-ring2",
93 	"host2tcl-input-ring1",
94 	"wbm2host-tx-completions-ring3",
95 	"wbm2host-tx-completions-ring2",
96 	"wbm2host-tx-completions-ring1",
97 	"tcl2host-status-ring",
98 };
99 
100 /* enum ext_irq_num - irq numbers that can be used by external modules
101  * like datapath
102  */
103 enum ext_irq_num {
104 	host2wbm_desc_feed = 16,
105 	host2reo_re_injection,
106 	host2reo_command,
107 	host2rxdma_monitor_ring3,
108 	host2rxdma_monitor_ring2,
109 	host2rxdma_monitor_ring1,
110 	reo2host_exception,
111 	wbm2host_rx_release,
112 	reo2host_status,
113 	reo2host_destination_ring4,
114 	reo2host_destination_ring3,
115 	reo2host_destination_ring2,
116 	reo2host_destination_ring1,
117 	rxdma2host_monitor_destination_mac3,
118 	rxdma2host_monitor_destination_mac2,
119 	rxdma2host_monitor_destination_mac1,
120 	ppdu_end_interrupts_mac3,
121 	ppdu_end_interrupts_mac2,
122 	ppdu_end_interrupts_mac1,
123 	rxdma2host_monitor_status_ring_mac3,
124 	rxdma2host_monitor_status_ring_mac2,
125 	rxdma2host_monitor_status_ring_mac1,
126 	host2rxdma_host_buf_ring_mac3,
127 	host2rxdma_host_buf_ring_mac2,
128 	host2rxdma_host_buf_ring_mac1,
129 	rxdma2host_destination_ring_mac3,
130 	rxdma2host_destination_ring_mac2,
131 	rxdma2host_destination_ring_mac1,
132 	host2tcl_input_ring4,
133 	host2tcl_input_ring3,
134 	host2tcl_input_ring2,
135 	host2tcl_input_ring1,
136 	wbm2host_tx_completions_ring3,
137 	wbm2host_tx_completions_ring2,
138 	wbm2host_tx_completions_ring1,
139 	tcl2host_status_ring,
140 };
141 
142 static int
143 ath11k_ahb_get_msi_irq_wcn6750(struct ath11k_base *ab, unsigned int vector)
144 {
145 	return ab->pci.msi.irqs[vector];
146 }
147 
148 static inline u32
149 ath11k_ahb_get_window_start_wcn6750(struct ath11k_base *ab, u32 offset)
150 {
151 	u32 window_start = 0;
152 
153 	/* If offset lies within DP register range, use 1st window */
154 	if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
155 		window_start = ATH11K_PCI_WINDOW_START;
156 	/* If offset lies within CE register range, use 2nd window */
157 	else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
158 		 ATH11K_PCI_WINDOW_RANGE_MASK)
159 		window_start = 2 * ATH11K_PCI_WINDOW_START;
160 
161 	return window_start;
162 }
163 
164 static void
165 ath11k_ahb_window_write32_wcn6750(struct ath11k_base *ab, u32 offset, u32 value)
166 {
167 	u32 window_start;
168 
169 	/* WCN6750 uses static window based register access*/
170 	window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
171 
172 	iowrite32(value, ab->mem + window_start +
173 		  (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
174 }
175 
176 static u32 ath11k_ahb_window_read32_wcn6750(struct ath11k_base *ab, u32 offset)
177 {
178 	u32 window_start;
179 	u32 val;
180 
181 	/* WCN6750 uses static window based register access */
182 	window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
183 
184 	val = ioread32(ab->mem + window_start +
185 		       (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
186 	return val;
187 }
188 
189 static const struct ath11k_pci_ops ath11k_ahb_pci_ops_wcn6750 = {
190 	.wakeup = NULL,
191 	.release = NULL,
192 	.get_msi_irq = ath11k_ahb_get_msi_irq_wcn6750,
193 	.window_write32 = ath11k_ahb_window_write32_wcn6750,
194 	.window_read32 = ath11k_ahb_window_read32_wcn6750,
195 };
196 
197 static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
198 {
199 	return ioread32(ab->mem + offset);
200 }
201 
202 static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value)
203 {
204 	iowrite32(value, ab->mem + offset);
205 }
206 
207 static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab)
208 {
209 	int i;
210 
211 	for (i = 0; i < ab->hw_params.ce_count; i++) {
212 		struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
213 
214 		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
215 			continue;
216 
217 		tasklet_kill(&ce_pipe->intr_tq);
218 	}
219 }
220 
221 static void ath11k_ahb_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
222 {
223 	int i;
224 
225 	for (i = 0; i < irq_grp->num_irq; i++)
226 		disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
227 }
228 
229 static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
230 {
231 	int i;
232 
233 	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
234 		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
235 
236 		ath11k_ahb_ext_grp_disable(irq_grp);
237 
238 		if (irq_grp->napi_enabled) {
239 			napi_synchronize(&irq_grp->napi);
240 			napi_disable(&irq_grp->napi);
241 			irq_grp->napi_enabled = false;
242 		}
243 	}
244 }
245 
246 static void ath11k_ahb_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
247 {
248 	int i;
249 
250 	for (i = 0; i < irq_grp->num_irq; i++)
251 		enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
252 }
253 
254 static void ath11k_ahb_setbit32(struct ath11k_base *ab, u8 bit, u32 offset)
255 {
256 	u32 val;
257 
258 	val = ath11k_ahb_read32(ab, offset);
259 	ath11k_ahb_write32(ab, offset, val | BIT(bit));
260 }
261 
262 static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset)
263 {
264 	u32 val;
265 
266 	val = ath11k_ahb_read32(ab, offset);
267 	ath11k_ahb_write32(ab, offset, val & ~BIT(bit));
268 }
269 
270 static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
271 {
272 	const struct ce_attr *ce_attr;
273 	const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
274 	u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
275 
276 	ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
277 	ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
278 	ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
279 
280 	ce_attr = &ab->hw_params.host_ce_config[ce_id];
281 	if (ce_attr->src_nentries)
282 		ath11k_ahb_setbit32(ab, ce_id, ie1_reg_addr);
283 
284 	if (ce_attr->dest_nentries) {
285 		ath11k_ahb_setbit32(ab, ce_id, ie2_reg_addr);
286 		ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
287 				    ie3_reg_addr);
288 	}
289 }
290 
291 static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
292 {
293 	const struct ce_attr *ce_attr;
294 	const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
295 	u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
296 
297 	ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
298 	ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
299 	ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
300 
301 	ce_attr = &ab->hw_params.host_ce_config[ce_id];
302 	if (ce_attr->src_nentries)
303 		ath11k_ahb_clearbit32(ab, ce_id, ie1_reg_addr);
304 
305 	if (ce_attr->dest_nentries) {
306 		ath11k_ahb_clearbit32(ab, ce_id, ie2_reg_addr);
307 		ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
308 				      ie3_reg_addr);
309 	}
310 }
311 
312 static void ath11k_ahb_sync_ce_irqs(struct ath11k_base *ab)
313 {
314 	int i;
315 	int irq_idx;
316 
317 	for (i = 0; i < ab->hw_params.ce_count; i++) {
318 		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
319 			continue;
320 
321 		irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
322 		synchronize_irq(ab->irq_num[irq_idx]);
323 	}
324 }
325 
326 static void ath11k_ahb_sync_ext_irqs(struct ath11k_base *ab)
327 {
328 	int i, j;
329 	int irq_idx;
330 
331 	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
332 		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
333 
334 		for (j = 0; j < irq_grp->num_irq; j++) {
335 			irq_idx = irq_grp->irqs[j];
336 			synchronize_irq(ab->irq_num[irq_idx]);
337 		}
338 	}
339 }
340 
341 static void ath11k_ahb_ce_irqs_enable(struct ath11k_base *ab)
342 {
343 	int i;
344 
345 	for (i = 0; i < ab->hw_params.ce_count; i++) {
346 		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
347 			continue;
348 		ath11k_ahb_ce_irq_enable(ab, i);
349 	}
350 }
351 
352 static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab)
353 {
354 	int i;
355 
356 	for (i = 0; i < ab->hw_params.ce_count; i++) {
357 		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
358 			continue;
359 		ath11k_ahb_ce_irq_disable(ab, i);
360 	}
361 }
362 
363 static int ath11k_ahb_start(struct ath11k_base *ab)
364 {
365 	ath11k_ahb_ce_irqs_enable(ab);
366 	ath11k_ce_rx_post_buf(ab);
367 
368 	return 0;
369 }
370 
371 static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab)
372 {
373 	int i;
374 
375 	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
376 		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
377 
378 		if (!irq_grp->napi_enabled) {
379 			napi_enable(&irq_grp->napi);
380 			irq_grp->napi_enabled = true;
381 		}
382 		ath11k_ahb_ext_grp_enable(irq_grp);
383 	}
384 }
385 
386 static void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
387 {
388 	__ath11k_ahb_ext_irq_disable(ab);
389 	ath11k_ahb_sync_ext_irqs(ab);
390 }
391 
392 static void ath11k_ahb_stop(struct ath11k_base *ab)
393 {
394 	if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
395 		ath11k_ahb_ce_irqs_disable(ab);
396 	ath11k_ahb_sync_ce_irqs(ab);
397 	ath11k_ahb_kill_tasklets(ab);
398 	del_timer_sync(&ab->rx_replenish_retry);
399 	ath11k_ce_cleanup_pipes(ab);
400 }
401 
402 static int ath11k_ahb_power_up(struct ath11k_base *ab)
403 {
404 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
405 	int ret;
406 
407 	ret = rproc_boot(ab_ahb->tgt_rproc);
408 	if (ret)
409 		ath11k_err(ab, "failed to boot the remote processor Q6\n");
410 
411 	return ret;
412 }
413 
414 static void ath11k_ahb_power_down(struct ath11k_base *ab)
415 {
416 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
417 
418 	rproc_shutdown(ab_ahb->tgt_rproc);
419 }
420 
421 static int ath11k_ahb_fwreset_from_cold_boot(struct ath11k_base *ab)
422 {
423 	int timeout;
424 
425 	if (ath11k_cold_boot_cal == 0 || ab->qmi.cal_done ||
426 	    ab->hw_params.cold_boot_calib == 0 ||
427 	    ab->hw_params.cbcal_restart_fw == 0)
428 		return 0;
429 
430 	ath11k_dbg(ab, ATH11K_DBG_AHB, "wait for cold boot done\n");
431 	timeout = wait_event_timeout(ab->qmi.cold_boot_waitq,
432 				     (ab->qmi.cal_done  == 1),
433 				     ATH11K_COLD_BOOT_FW_RESET_DELAY);
434 	if (timeout <= 0) {
435 		ath11k_cold_boot_cal = 0;
436 		ath11k_warn(ab, "Coldboot Calibration failed timed out\n");
437 	}
438 
439 	/* reset the firmware */
440 	ath11k_ahb_power_down(ab);
441 	ath11k_ahb_power_up(ab);
442 
443 	ath11k_dbg(ab, ATH11K_DBG_AHB, "exited from cold boot mode\n");
444 	return 0;
445 }
446 
447 static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab)
448 {
449 	struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
450 
451 	cfg->tgt_ce_len = ab->hw_params.target_ce_count;
452 	cfg->tgt_ce = ab->hw_params.target_ce_config;
453 	cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
454 	cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
455 	ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id;
456 }
457 
458 static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
459 {
460 	int i, j;
461 
462 	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
463 		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
464 
465 		for (j = 0; j < irq_grp->num_irq; j++)
466 			free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
467 
468 		netif_napi_del(&irq_grp->napi);
469 	}
470 }
471 
472 static void ath11k_ahb_free_irq(struct ath11k_base *ab)
473 {
474 	int irq_idx;
475 	int i;
476 
477 	if (ab->hw_params.hybrid_bus_type)
478 		return ath11k_pcic_free_irq(ab);
479 
480 	for (i = 0; i < ab->hw_params.ce_count; i++) {
481 		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
482 			continue;
483 		irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
484 		free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
485 	}
486 
487 	ath11k_ahb_free_ext_irq(ab);
488 }
489 
490 static void ath11k_ahb_ce_tasklet(struct tasklet_struct *t)
491 {
492 	struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
493 
494 	ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
495 
496 	ath11k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
497 }
498 
499 static irqreturn_t ath11k_ahb_ce_interrupt_handler(int irq, void *arg)
500 {
501 	struct ath11k_ce_pipe *ce_pipe = arg;
502 
503 	/* last interrupt received for this CE */
504 	ce_pipe->timestamp = jiffies;
505 
506 	ath11k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
507 
508 	tasklet_schedule(&ce_pipe->intr_tq);
509 
510 	return IRQ_HANDLED;
511 }
512 
513 static int ath11k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
514 {
515 	struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
516 						struct ath11k_ext_irq_grp,
517 						napi);
518 	struct ath11k_base *ab = irq_grp->ab;
519 	int work_done;
520 
521 	work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
522 	if (work_done < budget) {
523 		napi_complete_done(napi, work_done);
524 		ath11k_ahb_ext_grp_enable(irq_grp);
525 	}
526 
527 	if (work_done > budget)
528 		work_done = budget;
529 
530 	return work_done;
531 }
532 
533 static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
534 {
535 	struct ath11k_ext_irq_grp *irq_grp = arg;
536 
537 	/* last interrupt received for this group */
538 	irq_grp->timestamp = jiffies;
539 
540 	ath11k_ahb_ext_grp_disable(irq_grp);
541 
542 	napi_schedule(&irq_grp->napi);
543 
544 	return IRQ_HANDLED;
545 }
546 
547 static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab)
548 {
549 	struct ath11k_hw_params *hw = &ab->hw_params;
550 	int i, j;
551 	int irq;
552 	int ret;
553 
554 	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
555 		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
556 		u32 num_irq = 0;
557 
558 		irq_grp->ab = ab;
559 		irq_grp->grp_id = i;
560 		init_dummy_netdev(&irq_grp->napi_ndev);
561 		netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
562 			       ath11k_ahb_ext_grp_napi_poll);
563 
564 		for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
565 			if (ab->hw_params.ring_mask->tx[i] & BIT(j)) {
566 				irq_grp->irqs[num_irq++] =
567 					wbm2host_tx_completions_ring1 - j;
568 			}
569 
570 			if (ab->hw_params.ring_mask->rx[i] & BIT(j)) {
571 				irq_grp->irqs[num_irq++] =
572 					reo2host_destination_ring1 - j;
573 			}
574 
575 			if (ab->hw_params.ring_mask->rx_err[i] & BIT(j))
576 				irq_grp->irqs[num_irq++] = reo2host_exception;
577 
578 			if (ab->hw_params.ring_mask->rx_wbm_rel[i] & BIT(j))
579 				irq_grp->irqs[num_irq++] = wbm2host_rx_release;
580 
581 			if (ab->hw_params.ring_mask->reo_status[i] & BIT(j))
582 				irq_grp->irqs[num_irq++] = reo2host_status;
583 
584 			if (j < ab->hw_params.max_radios) {
585 				if (ab->hw_params.ring_mask->rxdma2host[i] & BIT(j)) {
586 					irq_grp->irqs[num_irq++] =
587 						rxdma2host_destination_ring_mac1 -
588 						ath11k_hw_get_mac_from_pdev_id(hw, j);
589 				}
590 
591 				if (ab->hw_params.ring_mask->host2rxdma[i] & BIT(j)) {
592 					irq_grp->irqs[num_irq++] =
593 						host2rxdma_host_buf_ring_mac1 -
594 						ath11k_hw_get_mac_from_pdev_id(hw, j);
595 				}
596 
597 				if (ab->hw_params.ring_mask->rx_mon_status[i] & BIT(j)) {
598 					irq_grp->irqs[num_irq++] =
599 						ppdu_end_interrupts_mac1 -
600 						ath11k_hw_get_mac_from_pdev_id(hw, j);
601 					irq_grp->irqs[num_irq++] =
602 						rxdma2host_monitor_status_ring_mac1 -
603 						ath11k_hw_get_mac_from_pdev_id(hw, j);
604 				}
605 			}
606 		}
607 		irq_grp->num_irq = num_irq;
608 
609 		for (j = 0; j < irq_grp->num_irq; j++) {
610 			int irq_idx = irq_grp->irqs[j];
611 
612 			irq = platform_get_irq_byname(ab->pdev,
613 						      irq_name[irq_idx]);
614 			ab->irq_num[irq_idx] = irq;
615 			irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY);
616 			ret = request_irq(irq, ath11k_ahb_ext_interrupt_handler,
617 					  IRQF_TRIGGER_RISING,
618 					  irq_name[irq_idx], irq_grp);
619 			if (ret) {
620 				ath11k_err(ab, "failed request_irq for %d\n",
621 					   irq);
622 			}
623 		}
624 	}
625 
626 	return 0;
627 }
628 
629 static int ath11k_ahb_config_irq(struct ath11k_base *ab)
630 {
631 	int irq, irq_idx, i;
632 	int ret;
633 
634 	if (ab->hw_params.hybrid_bus_type)
635 		return ath11k_pcic_config_irq(ab);
636 
637 	/* Configure CE irqs */
638 	for (i = 0; i < ab->hw_params.ce_count; i++) {
639 		struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
640 
641 		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
642 			continue;
643 
644 		irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
645 
646 		tasklet_setup(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet);
647 		irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
648 		ret = request_irq(irq, ath11k_ahb_ce_interrupt_handler,
649 				  IRQF_TRIGGER_RISING, irq_name[irq_idx],
650 				  ce_pipe);
651 		if (ret)
652 			return ret;
653 
654 		ab->irq_num[irq_idx] = irq;
655 	}
656 
657 	/* Configure external interrupts */
658 	ret = ath11k_ahb_config_ext_irq(ab);
659 
660 	return ret;
661 }
662 
663 static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
664 					  u8 *ul_pipe, u8 *dl_pipe)
665 {
666 	const struct service_to_pipe *entry;
667 	bool ul_set = false, dl_set = false;
668 	int i;
669 
670 	for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
671 		entry = &ab->hw_params.svc_to_ce_map[i];
672 
673 		if (__le32_to_cpu(entry->service_id) != service_id)
674 			continue;
675 
676 		switch (__le32_to_cpu(entry->pipedir)) {
677 		case PIPEDIR_NONE:
678 			break;
679 		case PIPEDIR_IN:
680 			WARN_ON(dl_set);
681 			*dl_pipe = __le32_to_cpu(entry->pipenum);
682 			dl_set = true;
683 			break;
684 		case PIPEDIR_OUT:
685 			WARN_ON(ul_set);
686 			*ul_pipe = __le32_to_cpu(entry->pipenum);
687 			ul_set = true;
688 			break;
689 		case PIPEDIR_INOUT:
690 			WARN_ON(dl_set);
691 			WARN_ON(ul_set);
692 			*dl_pipe = __le32_to_cpu(entry->pipenum);
693 			*ul_pipe = __le32_to_cpu(entry->pipenum);
694 			dl_set = true;
695 			ul_set = true;
696 			break;
697 		}
698 	}
699 
700 	if (WARN_ON(!ul_set || !dl_set))
701 		return -ENOENT;
702 
703 	return 0;
704 }
705 
706 static int ath11k_ahb_hif_suspend(struct ath11k_base *ab)
707 {
708 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
709 	u32 wake_irq;
710 	u32 value = 0;
711 	int ret;
712 
713 	if (!device_may_wakeup(ab->dev))
714 		return -EPERM;
715 
716 	wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
717 
718 	ret = enable_irq_wake(wake_irq);
719 	if (ret) {
720 		ath11k_err(ab, "failed to enable wakeup irq :%d\n", ret);
721 		return ret;
722 	}
723 
724 	value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
725 				ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
726 	value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_ENTER,
727 				 ATH11K_AHB_SMP2P_SMEM_MSG);
728 
729 	ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
730 					  ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
731 	if (ret) {
732 		ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
733 		return ret;
734 	}
735 
736 	ath11k_dbg(ab, ATH11K_DBG_AHB, "device suspended\n");
737 
738 	return ret;
739 }
740 
741 static int ath11k_ahb_hif_resume(struct ath11k_base *ab)
742 {
743 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
744 	u32 wake_irq;
745 	u32 value = 0;
746 	int ret;
747 
748 	if (!device_may_wakeup(ab->dev))
749 		return -EPERM;
750 
751 	wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
752 
753 	ret = disable_irq_wake(wake_irq);
754 	if (ret) {
755 		ath11k_err(ab, "failed to disable wakeup irq: %d\n", ret);
756 		return ret;
757 	}
758 
759 	reinit_completion(&ab->wow.wakeup_completed);
760 
761 	value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
762 				ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
763 	value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_EXIT,
764 				 ATH11K_AHB_SMP2P_SMEM_MSG);
765 
766 	ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
767 					  ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
768 	if (ret) {
769 		ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
770 		return ret;
771 	}
772 
773 	ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
774 	if (ret == 0) {
775 		ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
776 		return -ETIMEDOUT;
777 	}
778 
779 	ath11k_dbg(ab, ATH11K_DBG_AHB, "device resumed\n");
780 
781 	return 0;
782 }
783 
784 static const struct ath11k_hif_ops ath11k_ahb_hif_ops_ipq8074 = {
785 	.start = ath11k_ahb_start,
786 	.stop = ath11k_ahb_stop,
787 	.read32 = ath11k_ahb_read32,
788 	.write32 = ath11k_ahb_write32,
789 	.read = NULL,
790 	.irq_enable = ath11k_ahb_ext_irq_enable,
791 	.irq_disable = ath11k_ahb_ext_irq_disable,
792 	.map_service_to_pipe = ath11k_ahb_map_service_to_pipe,
793 	.power_down = ath11k_ahb_power_down,
794 	.power_up = ath11k_ahb_power_up,
795 };
796 
797 static const struct ath11k_hif_ops ath11k_ahb_hif_ops_wcn6750 = {
798 	.start = ath11k_pcic_start,
799 	.stop = ath11k_pcic_stop,
800 	.read32 = ath11k_pcic_read32,
801 	.write32 = ath11k_pcic_write32,
802 	.read = NULL,
803 	.irq_enable = ath11k_pcic_ext_irq_enable,
804 	.irq_disable = ath11k_pcic_ext_irq_disable,
805 	.get_msi_address =  ath11k_pcic_get_msi_address,
806 	.get_user_msi_vector = ath11k_pcic_get_user_msi_assignment,
807 	.map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
808 	.power_down = ath11k_ahb_power_down,
809 	.power_up = ath11k_ahb_power_up,
810 	.suspend = ath11k_ahb_hif_suspend,
811 	.resume = ath11k_ahb_hif_resume,
812 	.ce_irq_enable = ath11k_pci_enable_ce_irqs_except_wake_irq,
813 	.ce_irq_disable = ath11k_pci_disable_ce_irqs_except_wake_irq,
814 };
815 
816 static int ath11k_core_get_rproc(struct ath11k_base *ab)
817 {
818 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
819 	struct device *dev = ab->dev;
820 	struct rproc *prproc;
821 	phandle rproc_phandle;
822 
823 	if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) {
824 		ath11k_err(ab, "failed to get q6_rproc handle\n");
825 		return -ENOENT;
826 	}
827 
828 	prproc = rproc_get_by_phandle(rproc_phandle);
829 	if (!prproc) {
830 		ath11k_err(ab, "failed to get rproc\n");
831 		return -EINVAL;
832 	}
833 	ab_ahb->tgt_rproc = prproc;
834 
835 	return 0;
836 }
837 
838 static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab)
839 {
840 	struct platform_device *pdev = ab->pdev;
841 	phys_addr_t msi_addr_pa;
842 	dma_addr_t msi_addr_iova;
843 	struct resource *res;
844 	int int_prop;
845 	int ret;
846 	int i;
847 
848 	ret = ath11k_pcic_init_msi_config(ab);
849 	if (ret) {
850 		ath11k_err(ab, "failed to init msi config: %d\n", ret);
851 		return ret;
852 	}
853 
854 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
855 	if (!res) {
856 		ath11k_err(ab, "failed to fetch msi_addr\n");
857 		return -ENOENT;
858 	}
859 
860 	msi_addr_pa = res->start;
861 	msi_addr_iova = dma_map_resource(ab->dev, msi_addr_pa, PAGE_SIZE,
862 					 DMA_FROM_DEVICE, 0);
863 	if (dma_mapping_error(ab->dev, msi_addr_iova))
864 		return -ENOMEM;
865 
866 	ab->pci.msi.addr_lo = lower_32_bits(msi_addr_iova);
867 	ab->pci.msi.addr_hi = upper_32_bits(msi_addr_iova);
868 
869 	ret = of_property_read_u32_index(ab->dev->of_node, "interrupts", 1, &int_prop);
870 	if (ret)
871 		return ret;
872 
873 	ab->pci.msi.ep_base_data = int_prop + 32;
874 
875 	for (i = 0; i < ab->pci.msi.config->total_vectors; i++) {
876 		ret = platform_get_irq(pdev, i);
877 		if (ret < 0)
878 			return ret;
879 
880 		ab->pci.msi.irqs[i] = ret;
881 	}
882 
883 	set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
884 
885 	return 0;
886 }
887 
888 static int ath11k_ahb_setup_smp2p_handle(struct ath11k_base *ab)
889 {
890 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
891 
892 	if (!ab->hw_params.smp2p_wow_exit)
893 		return 0;
894 
895 	ab_ahb->smp2p_info.smem_state = qcom_smem_state_get(ab->dev, "wlan-smp2p-out",
896 							    &ab_ahb->smp2p_info.smem_bit);
897 	if (IS_ERR(ab_ahb->smp2p_info.smem_state)) {
898 		ath11k_err(ab, "failed to fetch smem state: %ld\n",
899 			   PTR_ERR(ab_ahb->smp2p_info.smem_state));
900 		return PTR_ERR(ab_ahb->smp2p_info.smem_state);
901 	}
902 
903 	return 0;
904 }
905 
906 static void ath11k_ahb_release_smp2p_handle(struct ath11k_base *ab)
907 {
908 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
909 
910 	if (!ab->hw_params.smp2p_wow_exit)
911 		return;
912 
913 	qcom_smem_state_put(ab_ahb->smp2p_info.smem_state);
914 }
915 
916 static int ath11k_ahb_setup_resources(struct ath11k_base *ab)
917 {
918 	struct platform_device *pdev = ab->pdev;
919 	struct resource *mem_res;
920 	void __iomem *mem;
921 
922 	if (ab->hw_params.hybrid_bus_type)
923 		return ath11k_ahb_setup_msi_resources(ab);
924 
925 	mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
926 	if (IS_ERR(mem)) {
927 		dev_err(&pdev->dev, "ioremap error\n");
928 		return PTR_ERR(mem);
929 	}
930 
931 	ab->mem = mem;
932 	ab->mem_len = resource_size(mem_res);
933 
934 	return 0;
935 }
936 
937 static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
938 {
939 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
940 	struct device *dev = ab->dev;
941 	struct device_node *node;
942 	struct resource r;
943 	int ret;
944 
945 	node = of_parse_phandle(dev->of_node, "memory-region", 0);
946 	if (!node)
947 		return -ENOENT;
948 
949 	ret = of_address_to_resource(node, 0, &r);
950 	of_node_put(node);
951 	if (ret) {
952 		dev_err(dev, "failed to resolve msa fixed region\n");
953 		return ret;
954 	}
955 
956 	ab_ahb->fw.msa_paddr = r.start;
957 	ab_ahb->fw.msa_size = resource_size(&r);
958 
959 	node = of_parse_phandle(dev->of_node, "memory-region", 1);
960 	if (!node)
961 		return -ENOENT;
962 
963 	ret = of_address_to_resource(node, 0, &r);
964 	of_node_put(node);
965 	if (ret) {
966 		dev_err(dev, "failed to resolve ce fixed region\n");
967 		return ret;
968 	}
969 
970 	ab_ahb->fw.ce_paddr = r.start;
971 	ab_ahb->fw.ce_size = resource_size(&r);
972 
973 	return 0;
974 }
975 
976 static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
977 {
978 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
979 	struct device *host_dev = ab->dev;
980 	struct platform_device_info info = {0};
981 	struct iommu_domain *iommu_dom;
982 	struct platform_device *pdev;
983 	struct device_node *node;
984 	int ret;
985 
986 	/* Chipsets not requiring MSA need not initialize
987 	 * MSA resources, return success in such cases.
988 	 */
989 	if (!ab->hw_params.fixed_fw_mem)
990 		return 0;
991 
992 	ret = ath11k_ahb_setup_msa_resources(ab);
993 	if (ret) {
994 		ath11k_err(ab, "failed to setup msa resources\n");
995 		return ret;
996 	}
997 
998 	node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
999 	if (!node) {
1000 		ab_ahb->fw.use_tz = true;
1001 		return 0;
1002 	}
1003 
1004 	info.fwnode = &node->fwnode;
1005 	info.parent = host_dev;
1006 	info.name = node->name;
1007 	info.dma_mask = DMA_BIT_MASK(32);
1008 
1009 	pdev = platform_device_register_full(&info);
1010 	if (IS_ERR(pdev)) {
1011 		of_node_put(node);
1012 		return PTR_ERR(pdev);
1013 	}
1014 
1015 	ret = of_dma_configure(&pdev->dev, node, true);
1016 	if (ret) {
1017 		ath11k_err(ab, "dma configure fail: %d\n", ret);
1018 		goto err_unregister;
1019 	}
1020 
1021 	ab_ahb->fw.dev = &pdev->dev;
1022 
1023 	iommu_dom = iommu_domain_alloc(&platform_bus_type);
1024 	if (!iommu_dom) {
1025 		ath11k_err(ab, "failed to allocate iommu domain\n");
1026 		ret = -ENOMEM;
1027 		goto err_unregister;
1028 	}
1029 
1030 	ret = iommu_attach_device(iommu_dom, ab_ahb->fw.dev);
1031 	if (ret) {
1032 		ath11k_err(ab, "could not attach device: %d\n", ret);
1033 		goto err_iommu_free;
1034 	}
1035 
1036 	ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr,
1037 			ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size,
1038 			IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
1039 	if (ret) {
1040 		ath11k_err(ab, "failed to map firmware region: %d\n", ret);
1041 		goto err_iommu_detach;
1042 	}
1043 
1044 	ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr,
1045 			ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size,
1046 			IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
1047 	if (ret) {
1048 		ath11k_err(ab, "failed to map firmware CE region: %d\n", ret);
1049 		goto err_iommu_unmap;
1050 	}
1051 
1052 	ab_ahb->fw.use_tz = false;
1053 	ab_ahb->fw.iommu_domain = iommu_dom;
1054 	of_node_put(node);
1055 
1056 	return 0;
1057 
1058 err_iommu_unmap:
1059 	iommu_unmap(iommu_dom, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
1060 
1061 err_iommu_detach:
1062 	iommu_detach_device(iommu_dom, ab_ahb->fw.dev);
1063 
1064 err_iommu_free:
1065 	iommu_domain_free(iommu_dom);
1066 
1067 err_unregister:
1068 	platform_device_unregister(pdev);
1069 	of_node_put(node);
1070 
1071 	return ret;
1072 }
1073 
1074 static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab)
1075 {
1076 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
1077 	struct iommu_domain *iommu;
1078 	size_t unmapped_size;
1079 
1080 	/* Chipsets not requiring MSA would have not initialized
1081 	 * MSA resources, return success in such cases.
1082 	 */
1083 	if (!ab->hw_params.fixed_fw_mem)
1084 		return 0;
1085 
1086 	if (ab_ahb->fw.use_tz)
1087 		return 0;
1088 
1089 	iommu = ab_ahb->fw.iommu_domain;
1090 
1091 	unmapped_size = iommu_unmap(iommu, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
1092 	if (unmapped_size != ab_ahb->fw.msa_size)
1093 		ath11k_err(ab, "failed to unmap firmware: %zu\n",
1094 			   unmapped_size);
1095 
1096 	unmapped_size = iommu_unmap(iommu, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size);
1097 	if (unmapped_size != ab_ahb->fw.ce_size)
1098 		ath11k_err(ab, "failed to unmap firmware CE memory: %zu\n",
1099 			   unmapped_size);
1100 
1101 	iommu_detach_device(iommu, ab_ahb->fw.dev);
1102 	iommu_domain_free(iommu);
1103 
1104 	platform_device_unregister(to_platform_device(ab_ahb->fw.dev));
1105 
1106 	return 0;
1107 }
1108 
1109 static int ath11k_ahb_probe(struct platform_device *pdev)
1110 {
1111 	struct ath11k_base *ab;
1112 	const struct of_device_id *of_id;
1113 	const struct ath11k_hif_ops *hif_ops;
1114 	const struct ath11k_pci_ops *pci_ops;
1115 	enum ath11k_hw_rev hw_rev;
1116 	int ret;
1117 
1118 	of_id = of_match_device(ath11k_ahb_of_match, &pdev->dev);
1119 	if (!of_id) {
1120 		dev_err(&pdev->dev, "failed to find matching device tree id\n");
1121 		return -EINVAL;
1122 	}
1123 
1124 	hw_rev = (enum ath11k_hw_rev)of_id->data;
1125 
1126 	switch (hw_rev) {
1127 	case ATH11K_HW_IPQ8074:
1128 	case ATH11K_HW_IPQ6018_HW10:
1129 	case ATH11K_HW_IPQ5018_HW10:
1130 		hif_ops = &ath11k_ahb_hif_ops_ipq8074;
1131 		pci_ops = NULL;
1132 		break;
1133 	case ATH11K_HW_WCN6750_HW10:
1134 		hif_ops = &ath11k_ahb_hif_ops_wcn6750;
1135 		pci_ops = &ath11k_ahb_pci_ops_wcn6750;
1136 		break;
1137 	default:
1138 		dev_err(&pdev->dev, "unsupported device type %d\n", hw_rev);
1139 		return -EOPNOTSUPP;
1140 	}
1141 
1142 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1143 	if (ret) {
1144 		dev_err(&pdev->dev, "failed to set 32-bit consistent dma\n");
1145 		return ret;
1146 	}
1147 
1148 	ab = ath11k_core_alloc(&pdev->dev, sizeof(struct ath11k_ahb),
1149 			       ATH11K_BUS_AHB);
1150 	if (!ab) {
1151 		dev_err(&pdev->dev, "failed to allocate ath11k base\n");
1152 		return -ENOMEM;
1153 	}
1154 
1155 	ab->hif.ops = hif_ops;
1156 	ab->pdev = pdev;
1157 	ab->hw_rev = hw_rev;
1158 	ab->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL;
1159 	platform_set_drvdata(pdev, ab);
1160 
1161 	ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
1162 	if (ret) {
1163 		ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
1164 		goto err_core_free;
1165 	}
1166 
1167 	ret = ath11k_core_pre_init(ab);
1168 	if (ret)
1169 		goto err_core_free;
1170 
1171 	ret = ath11k_ahb_setup_resources(ab);
1172 	if (ret)
1173 		goto err_core_free;
1174 
1175 	ab->mem_ce = ab->mem;
1176 
1177 	if (ab->hw_params.ce_remap) {
1178 		const struct ce_remap *ce_remap = ab->hw_params.ce_remap;
1179 		/* ce register space is moved out of wcss unlike ipq8074 or ipq6018
1180 		 * and the space is not contiguous, hence remapping the CE registers
1181 		 * to a new space for accessing them.
1182 		 */
1183 		ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
1184 		if (!ab->mem_ce) {
1185 			dev_err(&pdev->dev, "ce ioremap error\n");
1186 			ret = -ENOMEM;
1187 			goto err_core_free;
1188 		}
1189 	}
1190 
1191 	ret = ath11k_ahb_fw_resources_init(ab);
1192 	if (ret)
1193 		goto err_core_free;
1194 
1195 	ret = ath11k_ahb_setup_smp2p_handle(ab);
1196 	if (ret)
1197 		goto err_fw_deinit;
1198 
1199 	ret = ath11k_hal_srng_init(ab);
1200 	if (ret)
1201 		goto err_release_smp2p_handle;
1202 
1203 	ret = ath11k_ce_alloc_pipes(ab);
1204 	if (ret) {
1205 		ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
1206 		goto err_hal_srng_deinit;
1207 	}
1208 
1209 	ath11k_ahb_init_qmi_ce_config(ab);
1210 
1211 	ret = ath11k_core_get_rproc(ab);
1212 	if (ret) {
1213 		ath11k_err(ab, "failed to get rproc: %d\n", ret);
1214 		goto err_ce_free;
1215 	}
1216 
1217 	ret = ath11k_core_init(ab);
1218 	if (ret) {
1219 		ath11k_err(ab, "failed to init core: %d\n", ret);
1220 		goto err_ce_free;
1221 	}
1222 
1223 	ret = ath11k_ahb_config_irq(ab);
1224 	if (ret) {
1225 		ath11k_err(ab, "failed to configure irq: %d\n", ret);
1226 		goto err_ce_free;
1227 	}
1228 
1229 	ath11k_ahb_fwreset_from_cold_boot(ab);
1230 
1231 	return 0;
1232 
1233 err_ce_free:
1234 	ath11k_ce_free_pipes(ab);
1235 
1236 err_hal_srng_deinit:
1237 	ath11k_hal_srng_deinit(ab);
1238 
1239 err_release_smp2p_handle:
1240 	ath11k_ahb_release_smp2p_handle(ab);
1241 
1242 err_fw_deinit:
1243 	ath11k_ahb_fw_resource_deinit(ab);
1244 
1245 err_core_free:
1246 	ath11k_core_free(ab);
1247 	platform_set_drvdata(pdev, NULL);
1248 
1249 	return ret;
1250 }
1251 
1252 static void ath11k_ahb_remove_prepare(struct ath11k_base *ab)
1253 {
1254 	unsigned long left;
1255 
1256 	if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) {
1257 		left = wait_for_completion_timeout(&ab->driver_recovery,
1258 						   ATH11K_AHB_RECOVERY_TIMEOUT);
1259 		if (!left)
1260 			ath11k_warn(ab, "failed to receive recovery response completion\n");
1261 	}
1262 
1263 	set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
1264 	cancel_work_sync(&ab->restart_work);
1265 	cancel_work_sync(&ab->qmi.event_work);
1266 }
1267 
1268 static void ath11k_ahb_free_resources(struct ath11k_base *ab)
1269 {
1270 	struct platform_device *pdev = ab->pdev;
1271 
1272 	ath11k_ahb_free_irq(ab);
1273 	ath11k_hal_srng_deinit(ab);
1274 	ath11k_ahb_release_smp2p_handle(ab);
1275 	ath11k_ahb_fw_resource_deinit(ab);
1276 	ath11k_ce_free_pipes(ab);
1277 
1278 	if (ab->hw_params.ce_remap)
1279 		iounmap(ab->mem_ce);
1280 
1281 	ath11k_core_free(ab);
1282 	platform_set_drvdata(pdev, NULL);
1283 }
1284 
1285 static int ath11k_ahb_remove(struct platform_device *pdev)
1286 {
1287 	struct ath11k_base *ab = platform_get_drvdata(pdev);
1288 
1289 	if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1290 		ath11k_ahb_power_down(ab);
1291 		ath11k_debugfs_soc_destroy(ab);
1292 		ath11k_qmi_deinit_service(ab);
1293 		goto qmi_fail;
1294 	}
1295 
1296 	ath11k_ahb_remove_prepare(ab);
1297 	ath11k_core_deinit(ab);
1298 
1299 qmi_fail:
1300 	ath11k_ahb_free_resources(ab);
1301 
1302 	return 0;
1303 }
1304 
1305 static void ath11k_ahb_shutdown(struct platform_device *pdev)
1306 {
1307 	struct ath11k_base *ab = platform_get_drvdata(pdev);
1308 
1309 	/* platform shutdown() & remove() are mutually exclusive.
1310 	 * remove() is invoked during rmmod & shutdown() during
1311 	 * system reboot/shutdown.
1312 	 */
1313 	ath11k_ahb_remove_prepare(ab);
1314 
1315 	if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)))
1316 		goto free_resources;
1317 
1318 	ath11k_core_deinit(ab);
1319 
1320 free_resources:
1321 	ath11k_ahb_free_resources(ab);
1322 }
1323 
1324 static struct platform_driver ath11k_ahb_driver = {
1325 	.driver         = {
1326 		.name   = "ath11k",
1327 		.of_match_table = ath11k_ahb_of_match,
1328 	},
1329 	.probe  = ath11k_ahb_probe,
1330 	.remove = ath11k_ahb_remove,
1331 	.shutdown = ath11k_ahb_shutdown,
1332 };
1333 
1334 static int ath11k_ahb_init(void)
1335 {
1336 	return platform_driver_register(&ath11k_ahb_driver);
1337 }
1338 module_init(ath11k_ahb_init);
1339 
1340 static void ath11k_ahb_exit(void)
1341 {
1342 	platform_driver_unregister(&ath11k_ahb_driver);
1343 }
1344 module_exit(ath11k_ahb_exit);
1345 
1346 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN AHB devices");
1347 MODULE_LICENSE("Dual BSD/GPL");
1348