xref: /linux/drivers/net/wireless/ath/ath11k/ahb.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/module.h>
8 #include <linux/platform_device.h>
9 #include <linux/property.h>
10 #include <linux/of_device.h>
11 #include <linux/of.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/of_address.h>
14 #include <linux/iommu.h>
15 #include "ahb.h"
16 #include "debug.h"
17 #include "hif.h"
18 #include "qmi.h"
19 #include <linux/remoteproc.h>
20 #include "pcic.h"
21 #include <linux/soc/qcom/smem.h>
22 #include <linux/soc/qcom/smem_state.h>
23 
24 static const struct of_device_id ath11k_ahb_of_match[] = {
25 	/* TODO: Should we change the compatible string to something similar
26 	 * to one that ath10k uses?
27 	 */
28 	{ .compatible = "qcom,ipq8074-wifi",
29 	  .data = (void *)ATH11K_HW_IPQ8074,
30 	},
31 	{ .compatible = "qcom,ipq6018-wifi",
32 	  .data = (void *)ATH11K_HW_IPQ6018_HW10,
33 	},
34 	{ .compatible = "qcom,wcn6750-wifi",
35 	  .data = (void *)ATH11K_HW_WCN6750_HW10,
36 	},
37 	{ .compatible = "qcom,ipq5018-wifi",
38 	  .data = (void *)ATH11K_HW_IPQ5018_HW10,
39 	},
40 	{ }
41 };
42 
43 MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match);
44 
45 #define ATH11K_IRQ_CE0_OFFSET 4
46 
47 static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
48 	"misc-pulse1",
49 	"misc-latch",
50 	"sw-exception",
51 	"watchdog",
52 	"ce0",
53 	"ce1",
54 	"ce2",
55 	"ce3",
56 	"ce4",
57 	"ce5",
58 	"ce6",
59 	"ce7",
60 	"ce8",
61 	"ce9",
62 	"ce10",
63 	"ce11",
64 	"host2wbm-desc-feed",
65 	"host2reo-re-injection",
66 	"host2reo-command",
67 	"host2rxdma-monitor-ring3",
68 	"host2rxdma-monitor-ring2",
69 	"host2rxdma-monitor-ring1",
70 	"reo2ost-exception",
71 	"wbm2host-rx-release",
72 	"reo2host-status",
73 	"reo2host-destination-ring4",
74 	"reo2host-destination-ring3",
75 	"reo2host-destination-ring2",
76 	"reo2host-destination-ring1",
77 	"rxdma2host-monitor-destination-mac3",
78 	"rxdma2host-monitor-destination-mac2",
79 	"rxdma2host-monitor-destination-mac1",
80 	"ppdu-end-interrupts-mac3",
81 	"ppdu-end-interrupts-mac2",
82 	"ppdu-end-interrupts-mac1",
83 	"rxdma2host-monitor-status-ring-mac3",
84 	"rxdma2host-monitor-status-ring-mac2",
85 	"rxdma2host-monitor-status-ring-mac1",
86 	"host2rxdma-host-buf-ring-mac3",
87 	"host2rxdma-host-buf-ring-mac2",
88 	"host2rxdma-host-buf-ring-mac1",
89 	"rxdma2host-destination-ring-mac3",
90 	"rxdma2host-destination-ring-mac2",
91 	"rxdma2host-destination-ring-mac1",
92 	"host2tcl-input-ring4",
93 	"host2tcl-input-ring3",
94 	"host2tcl-input-ring2",
95 	"host2tcl-input-ring1",
96 	"wbm2host-tx-completions-ring3",
97 	"wbm2host-tx-completions-ring2",
98 	"wbm2host-tx-completions-ring1",
99 	"tcl2host-status-ring",
100 };
101 
102 /* enum ext_irq_num - irq numbers that can be used by external modules
103  * like datapath
104  */
105 enum ext_irq_num {
106 	host2wbm_desc_feed = 16,
107 	host2reo_re_injection,
108 	host2reo_command,
109 	host2rxdma_monitor_ring3,
110 	host2rxdma_monitor_ring2,
111 	host2rxdma_monitor_ring1,
112 	reo2host_exception,
113 	wbm2host_rx_release,
114 	reo2host_status,
115 	reo2host_destination_ring4,
116 	reo2host_destination_ring3,
117 	reo2host_destination_ring2,
118 	reo2host_destination_ring1,
119 	rxdma2host_monitor_destination_mac3,
120 	rxdma2host_monitor_destination_mac2,
121 	rxdma2host_monitor_destination_mac1,
122 	ppdu_end_interrupts_mac3,
123 	ppdu_end_interrupts_mac2,
124 	ppdu_end_interrupts_mac1,
125 	rxdma2host_monitor_status_ring_mac3,
126 	rxdma2host_monitor_status_ring_mac2,
127 	rxdma2host_monitor_status_ring_mac1,
128 	host2rxdma_host_buf_ring_mac3,
129 	host2rxdma_host_buf_ring_mac2,
130 	host2rxdma_host_buf_ring_mac1,
131 	rxdma2host_destination_ring_mac3,
132 	rxdma2host_destination_ring_mac2,
133 	rxdma2host_destination_ring_mac1,
134 	host2tcl_input_ring4,
135 	host2tcl_input_ring3,
136 	host2tcl_input_ring2,
137 	host2tcl_input_ring1,
138 	wbm2host_tx_completions_ring3,
139 	wbm2host_tx_completions_ring2,
140 	wbm2host_tx_completions_ring1,
141 	tcl2host_status_ring,
142 };
143 
144 static int
145 ath11k_ahb_get_msi_irq_wcn6750(struct ath11k_base *ab, unsigned int vector)
146 {
147 	return ab->pci.msi.irqs[vector];
148 }
149 
150 static inline u32
151 ath11k_ahb_get_window_start_wcn6750(struct ath11k_base *ab, u32 offset)
152 {
153 	u32 window_start = 0;
154 
155 	/* If offset lies within DP register range, use 1st window */
156 	if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
157 		window_start = ATH11K_PCI_WINDOW_START;
158 	/* If offset lies within CE register range, use 2nd window */
159 	else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
160 		 ATH11K_PCI_WINDOW_RANGE_MASK)
161 		window_start = 2 * ATH11K_PCI_WINDOW_START;
162 
163 	return window_start;
164 }
165 
166 static void
167 ath11k_ahb_window_write32_wcn6750(struct ath11k_base *ab, u32 offset, u32 value)
168 {
169 	u32 window_start;
170 
171 	/* WCN6750 uses static window based register access*/
172 	window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
173 
174 	iowrite32(value, ab->mem + window_start +
175 		  (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
176 }
177 
178 static u32 ath11k_ahb_window_read32_wcn6750(struct ath11k_base *ab, u32 offset)
179 {
180 	u32 window_start;
181 	u32 val;
182 
183 	/* WCN6750 uses static window based register access */
184 	window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
185 
186 	val = ioread32(ab->mem + window_start +
187 		       (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
188 	return val;
189 }
190 
191 static const struct ath11k_pci_ops ath11k_ahb_pci_ops_wcn6750 = {
192 	.wakeup = NULL,
193 	.release = NULL,
194 	.get_msi_irq = ath11k_ahb_get_msi_irq_wcn6750,
195 	.window_write32 = ath11k_ahb_window_write32_wcn6750,
196 	.window_read32 = ath11k_ahb_window_read32_wcn6750,
197 };
198 
199 static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
200 {
201 	return ioread32(ab->mem + offset);
202 }
203 
204 static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value)
205 {
206 	iowrite32(value, ab->mem + offset);
207 }
208 
209 static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab)
210 {
211 	int i;
212 
213 	for (i = 0; i < ab->hw_params.ce_count; i++) {
214 		struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
215 
216 		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
217 			continue;
218 
219 		tasklet_kill(&ce_pipe->intr_tq);
220 	}
221 }
222 
223 static void ath11k_ahb_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
224 {
225 	int i;
226 
227 	for (i = 0; i < irq_grp->num_irq; i++)
228 		disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
229 }
230 
231 static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
232 {
233 	int i;
234 
235 	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
236 		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
237 
238 		ath11k_ahb_ext_grp_disable(irq_grp);
239 
240 		if (irq_grp->napi_enabled) {
241 			napi_synchronize(&irq_grp->napi);
242 			napi_disable(&irq_grp->napi);
243 			irq_grp->napi_enabled = false;
244 		}
245 	}
246 }
247 
248 static void ath11k_ahb_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
249 {
250 	int i;
251 
252 	for (i = 0; i < irq_grp->num_irq; i++)
253 		enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
254 }
255 
256 static void ath11k_ahb_setbit32(struct ath11k_base *ab, u8 bit, u32 offset)
257 {
258 	u32 val;
259 
260 	val = ath11k_ahb_read32(ab, offset);
261 	ath11k_ahb_write32(ab, offset, val | BIT(bit));
262 }
263 
264 static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset)
265 {
266 	u32 val;
267 
268 	val = ath11k_ahb_read32(ab, offset);
269 	ath11k_ahb_write32(ab, offset, val & ~BIT(bit));
270 }
271 
272 static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
273 {
274 	const struct ce_attr *ce_attr;
275 	const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
276 	u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
277 
278 	ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
279 	ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
280 	ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
281 
282 	ce_attr = &ab->hw_params.host_ce_config[ce_id];
283 	if (ce_attr->src_nentries)
284 		ath11k_ahb_setbit32(ab, ce_id, ie1_reg_addr);
285 
286 	if (ce_attr->dest_nentries) {
287 		ath11k_ahb_setbit32(ab, ce_id, ie2_reg_addr);
288 		ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
289 				    ie3_reg_addr);
290 	}
291 }
292 
293 static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
294 {
295 	const struct ce_attr *ce_attr;
296 	const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
297 	u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
298 
299 	ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
300 	ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
301 	ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
302 
303 	ce_attr = &ab->hw_params.host_ce_config[ce_id];
304 	if (ce_attr->src_nentries)
305 		ath11k_ahb_clearbit32(ab, ce_id, ie1_reg_addr);
306 
307 	if (ce_attr->dest_nentries) {
308 		ath11k_ahb_clearbit32(ab, ce_id, ie2_reg_addr);
309 		ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
310 				      ie3_reg_addr);
311 	}
312 }
313 
314 static void ath11k_ahb_sync_ce_irqs(struct ath11k_base *ab)
315 {
316 	int i;
317 	int irq_idx;
318 
319 	for (i = 0; i < ab->hw_params.ce_count; i++) {
320 		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
321 			continue;
322 
323 		irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
324 		synchronize_irq(ab->irq_num[irq_idx]);
325 	}
326 }
327 
328 static void ath11k_ahb_sync_ext_irqs(struct ath11k_base *ab)
329 {
330 	int i, j;
331 	int irq_idx;
332 
333 	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
334 		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
335 
336 		for (j = 0; j < irq_grp->num_irq; j++) {
337 			irq_idx = irq_grp->irqs[j];
338 			synchronize_irq(ab->irq_num[irq_idx]);
339 		}
340 	}
341 }
342 
343 static void ath11k_ahb_ce_irqs_enable(struct ath11k_base *ab)
344 {
345 	int i;
346 
347 	for (i = 0; i < ab->hw_params.ce_count; i++) {
348 		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
349 			continue;
350 		ath11k_ahb_ce_irq_enable(ab, i);
351 	}
352 }
353 
354 static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab)
355 {
356 	int i;
357 
358 	for (i = 0; i < ab->hw_params.ce_count; i++) {
359 		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
360 			continue;
361 		ath11k_ahb_ce_irq_disable(ab, i);
362 	}
363 }
364 
365 static int ath11k_ahb_start(struct ath11k_base *ab)
366 {
367 	ath11k_ahb_ce_irqs_enable(ab);
368 	ath11k_ce_rx_post_buf(ab);
369 
370 	return 0;
371 }
372 
373 static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab)
374 {
375 	int i;
376 
377 	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
378 		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
379 
380 		if (!irq_grp->napi_enabled) {
381 			napi_enable(&irq_grp->napi);
382 			irq_grp->napi_enabled = true;
383 		}
384 		ath11k_ahb_ext_grp_enable(irq_grp);
385 	}
386 }
387 
388 static void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
389 {
390 	__ath11k_ahb_ext_irq_disable(ab);
391 	ath11k_ahb_sync_ext_irqs(ab);
392 }
393 
394 static void ath11k_ahb_stop(struct ath11k_base *ab)
395 {
396 	if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
397 		ath11k_ahb_ce_irqs_disable(ab);
398 	ath11k_ahb_sync_ce_irqs(ab);
399 	ath11k_ahb_kill_tasklets(ab);
400 	del_timer_sync(&ab->rx_replenish_retry);
401 	ath11k_ce_cleanup_pipes(ab);
402 }
403 
404 static int ath11k_ahb_power_up(struct ath11k_base *ab)
405 {
406 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
407 	int ret;
408 
409 	ret = rproc_boot(ab_ahb->tgt_rproc);
410 	if (ret)
411 		ath11k_err(ab, "failed to boot the remote processor Q6\n");
412 
413 	return ret;
414 }
415 
416 static void ath11k_ahb_power_down(struct ath11k_base *ab)
417 {
418 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
419 
420 	rproc_shutdown(ab_ahb->tgt_rproc);
421 }
422 
423 static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab)
424 {
425 	struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
426 
427 	cfg->tgt_ce_len = ab->hw_params.target_ce_count;
428 	cfg->tgt_ce = ab->hw_params.target_ce_config;
429 	cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
430 	cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
431 	ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id;
432 }
433 
434 static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
435 {
436 	int i, j;
437 
438 	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
439 		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
440 
441 		for (j = 0; j < irq_grp->num_irq; j++)
442 			free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
443 
444 		netif_napi_del(&irq_grp->napi);
445 		free_netdev(irq_grp->napi_ndev);
446 	}
447 }
448 
449 static void ath11k_ahb_free_irq(struct ath11k_base *ab)
450 {
451 	int irq_idx;
452 	int i;
453 
454 	if (ab->hw_params.hybrid_bus_type)
455 		return ath11k_pcic_free_irq(ab);
456 
457 	for (i = 0; i < ab->hw_params.ce_count; i++) {
458 		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
459 			continue;
460 		irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
461 		free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
462 	}
463 
464 	ath11k_ahb_free_ext_irq(ab);
465 }
466 
467 static void ath11k_ahb_ce_tasklet(struct tasklet_struct *t)
468 {
469 	struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
470 
471 	ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
472 
473 	ath11k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
474 }
475 
476 static irqreturn_t ath11k_ahb_ce_interrupt_handler(int irq, void *arg)
477 {
478 	struct ath11k_ce_pipe *ce_pipe = arg;
479 
480 	/* last interrupt received for this CE */
481 	ce_pipe->timestamp = jiffies;
482 
483 	ath11k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
484 
485 	tasklet_schedule(&ce_pipe->intr_tq);
486 
487 	return IRQ_HANDLED;
488 }
489 
490 static int ath11k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
491 {
492 	struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
493 						struct ath11k_ext_irq_grp,
494 						napi);
495 	struct ath11k_base *ab = irq_grp->ab;
496 	int work_done;
497 
498 	work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
499 	if (work_done < budget) {
500 		napi_complete_done(napi, work_done);
501 		ath11k_ahb_ext_grp_enable(irq_grp);
502 	}
503 
504 	if (work_done > budget)
505 		work_done = budget;
506 
507 	return work_done;
508 }
509 
510 static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
511 {
512 	struct ath11k_ext_irq_grp *irq_grp = arg;
513 
514 	/* last interrupt received for this group */
515 	irq_grp->timestamp = jiffies;
516 
517 	ath11k_ahb_ext_grp_disable(irq_grp);
518 
519 	napi_schedule(&irq_grp->napi);
520 
521 	return IRQ_HANDLED;
522 }
523 
524 static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab)
525 {
526 	struct ath11k_hw_params *hw = &ab->hw_params;
527 	int i, j;
528 	int irq;
529 	int ret;
530 
531 	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
532 		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
533 		u32 num_irq = 0;
534 
535 		irq_grp->ab = ab;
536 		irq_grp->grp_id = i;
537 
538 		irq_grp->napi_ndev = alloc_netdev_dummy(0);
539 		if (!irq_grp->napi_ndev)
540 			return -ENOMEM;
541 
542 		netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
543 			       ath11k_ahb_ext_grp_napi_poll);
544 
545 		for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
546 			if (ab->hw_params.ring_mask->tx[i] & BIT(j)) {
547 				irq_grp->irqs[num_irq++] =
548 					wbm2host_tx_completions_ring1 - j;
549 			}
550 
551 			if (ab->hw_params.ring_mask->rx[i] & BIT(j)) {
552 				irq_grp->irqs[num_irq++] =
553 					reo2host_destination_ring1 - j;
554 			}
555 
556 			if (ab->hw_params.ring_mask->rx_err[i] & BIT(j))
557 				irq_grp->irqs[num_irq++] = reo2host_exception;
558 
559 			if (ab->hw_params.ring_mask->rx_wbm_rel[i] & BIT(j))
560 				irq_grp->irqs[num_irq++] = wbm2host_rx_release;
561 
562 			if (ab->hw_params.ring_mask->reo_status[i] & BIT(j))
563 				irq_grp->irqs[num_irq++] = reo2host_status;
564 
565 			if (j < ab->hw_params.max_radios) {
566 				if (ab->hw_params.ring_mask->rxdma2host[i] & BIT(j)) {
567 					irq_grp->irqs[num_irq++] =
568 						rxdma2host_destination_ring_mac1 -
569 						ath11k_hw_get_mac_from_pdev_id(hw, j);
570 				}
571 
572 				if (ab->hw_params.ring_mask->host2rxdma[i] & BIT(j)) {
573 					irq_grp->irqs[num_irq++] =
574 						host2rxdma_host_buf_ring_mac1 -
575 						ath11k_hw_get_mac_from_pdev_id(hw, j);
576 				}
577 
578 				if (ab->hw_params.ring_mask->rx_mon_status[i] & BIT(j)) {
579 					irq_grp->irqs[num_irq++] =
580 						ppdu_end_interrupts_mac1 -
581 						ath11k_hw_get_mac_from_pdev_id(hw, j);
582 					irq_grp->irqs[num_irq++] =
583 						rxdma2host_monitor_status_ring_mac1 -
584 						ath11k_hw_get_mac_from_pdev_id(hw, j);
585 				}
586 			}
587 		}
588 		irq_grp->num_irq = num_irq;
589 
590 		for (j = 0; j < irq_grp->num_irq; j++) {
591 			int irq_idx = irq_grp->irqs[j];
592 
593 			irq = platform_get_irq_byname(ab->pdev,
594 						      irq_name[irq_idx]);
595 			ab->irq_num[irq_idx] = irq;
596 			irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY);
597 			ret = request_irq(irq, ath11k_ahb_ext_interrupt_handler,
598 					  IRQF_TRIGGER_RISING,
599 					  irq_name[irq_idx], irq_grp);
600 			if (ret) {
601 				ath11k_err(ab, "failed request_irq for %d\n",
602 					   irq);
603 			}
604 		}
605 	}
606 
607 	return 0;
608 }
609 
610 static int ath11k_ahb_config_irq(struct ath11k_base *ab)
611 {
612 	int irq, irq_idx, i;
613 	int ret;
614 
615 	if (ab->hw_params.hybrid_bus_type)
616 		return ath11k_pcic_config_irq(ab);
617 
618 	/* Configure CE irqs */
619 	for (i = 0; i < ab->hw_params.ce_count; i++) {
620 		struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
621 
622 		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
623 			continue;
624 
625 		irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
626 
627 		tasklet_setup(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet);
628 		irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
629 		ret = request_irq(irq, ath11k_ahb_ce_interrupt_handler,
630 				  IRQF_TRIGGER_RISING, irq_name[irq_idx],
631 				  ce_pipe);
632 		if (ret)
633 			return ret;
634 
635 		ab->irq_num[irq_idx] = irq;
636 	}
637 
638 	/* Configure external interrupts */
639 	ret = ath11k_ahb_config_ext_irq(ab);
640 
641 	return ret;
642 }
643 
644 static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
645 					  u8 *ul_pipe, u8 *dl_pipe)
646 {
647 	const struct service_to_pipe *entry;
648 	bool ul_set = false, dl_set = false;
649 	int i;
650 
651 	for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
652 		entry = &ab->hw_params.svc_to_ce_map[i];
653 
654 		if (__le32_to_cpu(entry->service_id) != service_id)
655 			continue;
656 
657 		switch (__le32_to_cpu(entry->pipedir)) {
658 		case PIPEDIR_NONE:
659 			break;
660 		case PIPEDIR_IN:
661 			WARN_ON(dl_set);
662 			*dl_pipe = __le32_to_cpu(entry->pipenum);
663 			dl_set = true;
664 			break;
665 		case PIPEDIR_OUT:
666 			WARN_ON(ul_set);
667 			*ul_pipe = __le32_to_cpu(entry->pipenum);
668 			ul_set = true;
669 			break;
670 		case PIPEDIR_INOUT:
671 			WARN_ON(dl_set);
672 			WARN_ON(ul_set);
673 			*dl_pipe = __le32_to_cpu(entry->pipenum);
674 			*ul_pipe = __le32_to_cpu(entry->pipenum);
675 			dl_set = true;
676 			ul_set = true;
677 			break;
678 		}
679 	}
680 
681 	if (WARN_ON(!ul_set || !dl_set))
682 		return -ENOENT;
683 
684 	return 0;
685 }
686 
687 static int ath11k_ahb_hif_suspend(struct ath11k_base *ab)
688 {
689 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
690 	u32 wake_irq;
691 	u32 value = 0;
692 	int ret;
693 
694 	if (!device_may_wakeup(ab->dev))
695 		return -EPERM;
696 
697 	wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
698 
699 	ret = enable_irq_wake(wake_irq);
700 	if (ret) {
701 		ath11k_err(ab, "failed to enable wakeup irq :%d\n", ret);
702 		return ret;
703 	}
704 
705 	value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
706 				ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
707 	value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_ENTER,
708 				 ATH11K_AHB_SMP2P_SMEM_MSG);
709 
710 	ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
711 					  ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
712 	if (ret) {
713 		ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
714 		return ret;
715 	}
716 
717 	ath11k_dbg(ab, ATH11K_DBG_AHB, "device suspended\n");
718 
719 	return ret;
720 }
721 
722 static int ath11k_ahb_hif_resume(struct ath11k_base *ab)
723 {
724 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
725 	u32 wake_irq;
726 	u32 value = 0;
727 	int ret;
728 
729 	if (!device_may_wakeup(ab->dev))
730 		return -EPERM;
731 
732 	wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
733 
734 	ret = disable_irq_wake(wake_irq);
735 	if (ret) {
736 		ath11k_err(ab, "failed to disable wakeup irq: %d\n", ret);
737 		return ret;
738 	}
739 
740 	reinit_completion(&ab->wow.wakeup_completed);
741 
742 	value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
743 				ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
744 	value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_EXIT,
745 				 ATH11K_AHB_SMP2P_SMEM_MSG);
746 
747 	ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
748 					  ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
749 	if (ret) {
750 		ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
751 		return ret;
752 	}
753 
754 	ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
755 	if (ret == 0) {
756 		ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
757 		return -ETIMEDOUT;
758 	}
759 
760 	ath11k_dbg(ab, ATH11K_DBG_AHB, "device resumed\n");
761 
762 	return 0;
763 }
764 
765 static const struct ath11k_hif_ops ath11k_ahb_hif_ops_ipq8074 = {
766 	.start = ath11k_ahb_start,
767 	.stop = ath11k_ahb_stop,
768 	.read32 = ath11k_ahb_read32,
769 	.write32 = ath11k_ahb_write32,
770 	.read = NULL,
771 	.irq_enable = ath11k_ahb_ext_irq_enable,
772 	.irq_disable = ath11k_ahb_ext_irq_disable,
773 	.map_service_to_pipe = ath11k_ahb_map_service_to_pipe,
774 	.power_down = ath11k_ahb_power_down,
775 	.power_up = ath11k_ahb_power_up,
776 };
777 
778 static const struct ath11k_hif_ops ath11k_ahb_hif_ops_wcn6750 = {
779 	.start = ath11k_pcic_start,
780 	.stop = ath11k_pcic_stop,
781 	.read32 = ath11k_pcic_read32,
782 	.write32 = ath11k_pcic_write32,
783 	.read = NULL,
784 	.irq_enable = ath11k_pcic_ext_irq_enable,
785 	.irq_disable = ath11k_pcic_ext_irq_disable,
786 	.get_msi_address =  ath11k_pcic_get_msi_address,
787 	.get_user_msi_vector = ath11k_pcic_get_user_msi_assignment,
788 	.map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
789 	.power_down = ath11k_ahb_power_down,
790 	.power_up = ath11k_ahb_power_up,
791 	.suspend = ath11k_ahb_hif_suspend,
792 	.resume = ath11k_ahb_hif_resume,
793 	.ce_irq_enable = ath11k_pci_enable_ce_irqs_except_wake_irq,
794 	.ce_irq_disable = ath11k_pci_disable_ce_irqs_except_wake_irq,
795 };
796 
797 static int ath11k_core_get_rproc(struct ath11k_base *ab)
798 {
799 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
800 	struct device *dev = ab->dev;
801 	struct rproc *prproc;
802 	phandle rproc_phandle;
803 
804 	if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) {
805 		ath11k_err(ab, "failed to get q6_rproc handle\n");
806 		return -ENOENT;
807 	}
808 
809 	prproc = rproc_get_by_phandle(rproc_phandle);
810 	if (!prproc) {
811 		ath11k_dbg(ab, ATH11K_DBG_AHB, "failed to get rproc, deferring\n");
812 		return -EPROBE_DEFER;
813 	}
814 	ab_ahb->tgt_rproc = prproc;
815 
816 	return 0;
817 }
818 
819 static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab)
820 {
821 	struct platform_device *pdev = ab->pdev;
822 	phys_addr_t msi_addr_pa;
823 	dma_addr_t msi_addr_iova;
824 	struct resource *res;
825 	int int_prop;
826 	int ret;
827 	int i;
828 
829 	ret = ath11k_pcic_init_msi_config(ab);
830 	if (ret) {
831 		ath11k_err(ab, "failed to init msi config: %d\n", ret);
832 		return ret;
833 	}
834 
835 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
836 	if (!res) {
837 		ath11k_err(ab, "failed to fetch msi_addr\n");
838 		return -ENOENT;
839 	}
840 
841 	msi_addr_pa = res->start;
842 	msi_addr_iova = dma_map_resource(ab->dev, msi_addr_pa, PAGE_SIZE,
843 					 DMA_FROM_DEVICE, 0);
844 	if (dma_mapping_error(ab->dev, msi_addr_iova))
845 		return -ENOMEM;
846 
847 	ab->pci.msi.addr_lo = lower_32_bits(msi_addr_iova);
848 	ab->pci.msi.addr_hi = upper_32_bits(msi_addr_iova);
849 
850 	ret = of_property_read_u32_index(ab->dev->of_node, "interrupts", 1, &int_prop);
851 	if (ret)
852 		return ret;
853 
854 	ab->pci.msi.ep_base_data = int_prop + 32;
855 
856 	for (i = 0; i < ab->pci.msi.config->total_vectors; i++) {
857 		ret = platform_get_irq(pdev, i);
858 		if (ret < 0)
859 			return ret;
860 
861 		ab->pci.msi.irqs[i] = ret;
862 	}
863 
864 	set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
865 
866 	return 0;
867 }
868 
869 static int ath11k_ahb_setup_smp2p_handle(struct ath11k_base *ab)
870 {
871 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
872 
873 	if (!ab->hw_params.smp2p_wow_exit)
874 		return 0;
875 
876 	ab_ahb->smp2p_info.smem_state = qcom_smem_state_get(ab->dev, "wlan-smp2p-out",
877 							    &ab_ahb->smp2p_info.smem_bit);
878 	if (IS_ERR(ab_ahb->smp2p_info.smem_state)) {
879 		ath11k_err(ab, "failed to fetch smem state: %ld\n",
880 			   PTR_ERR(ab_ahb->smp2p_info.smem_state));
881 		return PTR_ERR(ab_ahb->smp2p_info.smem_state);
882 	}
883 
884 	return 0;
885 }
886 
887 static void ath11k_ahb_release_smp2p_handle(struct ath11k_base *ab)
888 {
889 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
890 
891 	if (!ab->hw_params.smp2p_wow_exit)
892 		return;
893 
894 	qcom_smem_state_put(ab_ahb->smp2p_info.smem_state);
895 }
896 
897 static int ath11k_ahb_setup_resources(struct ath11k_base *ab)
898 {
899 	struct platform_device *pdev = ab->pdev;
900 	struct resource *mem_res;
901 	void __iomem *mem;
902 
903 	if (ab->hw_params.hybrid_bus_type)
904 		return ath11k_ahb_setup_msi_resources(ab);
905 
906 	mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
907 	if (IS_ERR(mem)) {
908 		dev_err(&pdev->dev, "ioremap error\n");
909 		return PTR_ERR(mem);
910 	}
911 
912 	ab->mem = mem;
913 	ab->mem_len = resource_size(mem_res);
914 
915 	return 0;
916 }
917 
918 static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
919 {
920 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
921 	struct device *dev = ab->dev;
922 	struct device_node *node;
923 	struct resource r;
924 	int ret;
925 
926 	node = of_parse_phandle(dev->of_node, "memory-region", 0);
927 	if (!node)
928 		return -ENOENT;
929 
930 	ret = of_address_to_resource(node, 0, &r);
931 	of_node_put(node);
932 	if (ret) {
933 		dev_err(dev, "failed to resolve msa fixed region\n");
934 		return ret;
935 	}
936 
937 	ab_ahb->fw.msa_paddr = r.start;
938 	ab_ahb->fw.msa_size = resource_size(&r);
939 
940 	node = of_parse_phandle(dev->of_node, "memory-region", 1);
941 	if (!node)
942 		return -ENOENT;
943 
944 	ret = of_address_to_resource(node, 0, &r);
945 	of_node_put(node);
946 	if (ret) {
947 		dev_err(dev, "failed to resolve ce fixed region\n");
948 		return ret;
949 	}
950 
951 	ab_ahb->fw.ce_paddr = r.start;
952 	ab_ahb->fw.ce_size = resource_size(&r);
953 
954 	return 0;
955 }
956 
957 static int ath11k_ahb_ce_remap(struct ath11k_base *ab)
958 {
959 	const struct ce_remap *ce_remap = ab->hw_params.ce_remap;
960 	struct platform_device *pdev = ab->pdev;
961 
962 	if (!ce_remap) {
963 		/* no separate CE register space */
964 		ab->mem_ce = ab->mem;
965 		return 0;
966 	}
967 
968 	/* ce register space is moved out of wcss unlike ipq8074 or ipq6018
969 	 * and the space is not contiguous, hence remapping the CE registers
970 	 * to a new space for accessing them.
971 	 */
972 	ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
973 	if (!ab->mem_ce) {
974 		dev_err(&pdev->dev, "ce ioremap error\n");
975 		return -ENOMEM;
976 	}
977 
978 	return 0;
979 }
980 
981 static void ath11k_ahb_ce_unmap(struct ath11k_base *ab)
982 {
983 	if (ab->hw_params.ce_remap)
984 		iounmap(ab->mem_ce);
985 }
986 
987 static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
988 {
989 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
990 	struct device *host_dev = ab->dev;
991 	struct platform_device_info info = {0};
992 	struct iommu_domain *iommu_dom;
993 	struct platform_device *pdev;
994 	struct device_node *node;
995 	int ret;
996 
997 	/* Chipsets not requiring MSA need not initialize
998 	 * MSA resources, return success in such cases.
999 	 */
1000 	if (!ab->hw_params.fixed_fw_mem)
1001 		return 0;
1002 
1003 	node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
1004 	if (!node) {
1005 		ab_ahb->fw.use_tz = true;
1006 		return 0;
1007 	}
1008 
1009 	ret = ath11k_ahb_setup_msa_resources(ab);
1010 	if (ret) {
1011 		ath11k_err(ab, "failed to setup msa resources\n");
1012 		return ret;
1013 	}
1014 
1015 	info.fwnode = &node->fwnode;
1016 	info.parent = host_dev;
1017 	info.name = node->name;
1018 	info.dma_mask = DMA_BIT_MASK(32);
1019 
1020 	pdev = platform_device_register_full(&info);
1021 	if (IS_ERR(pdev)) {
1022 		of_node_put(node);
1023 		return PTR_ERR(pdev);
1024 	}
1025 
1026 	ret = of_dma_configure(&pdev->dev, node, true);
1027 	if (ret) {
1028 		ath11k_err(ab, "dma configure fail: %d\n", ret);
1029 		goto err_unregister;
1030 	}
1031 
1032 	ab_ahb->fw.dev = &pdev->dev;
1033 
1034 	iommu_dom = iommu_paging_domain_alloc(ab_ahb->fw.dev);
1035 	if (IS_ERR(iommu_dom)) {
1036 		ath11k_err(ab, "failed to allocate iommu domain\n");
1037 		ret = PTR_ERR(iommu_dom);
1038 		goto err_unregister;
1039 	}
1040 
1041 	ret = iommu_attach_device(iommu_dom, ab_ahb->fw.dev);
1042 	if (ret) {
1043 		ath11k_err(ab, "could not attach device: %d\n", ret);
1044 		goto err_iommu_free;
1045 	}
1046 
1047 	ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr,
1048 			ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size,
1049 			IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
1050 	if (ret) {
1051 		ath11k_err(ab, "failed to map firmware region: %d\n", ret);
1052 		goto err_iommu_detach;
1053 	}
1054 
1055 	ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr,
1056 			ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size,
1057 			IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
1058 	if (ret) {
1059 		ath11k_err(ab, "failed to map firmware CE region: %d\n", ret);
1060 		goto err_iommu_unmap;
1061 	}
1062 
1063 	ab_ahb->fw.use_tz = false;
1064 	ab_ahb->fw.iommu_domain = iommu_dom;
1065 	of_node_put(node);
1066 
1067 	return 0;
1068 
1069 err_iommu_unmap:
1070 	iommu_unmap(iommu_dom, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
1071 
1072 err_iommu_detach:
1073 	iommu_detach_device(iommu_dom, ab_ahb->fw.dev);
1074 
1075 err_iommu_free:
1076 	iommu_domain_free(iommu_dom);
1077 
1078 err_unregister:
1079 	platform_device_unregister(pdev);
1080 	of_node_put(node);
1081 
1082 	return ret;
1083 }
1084 
1085 static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab)
1086 {
1087 	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
1088 	struct iommu_domain *iommu;
1089 	size_t unmapped_size;
1090 
1091 	/* Chipsets not requiring MSA would have not initialized
1092 	 * MSA resources, return success in such cases.
1093 	 */
1094 	if (!ab->hw_params.fixed_fw_mem)
1095 		return 0;
1096 
1097 	if (ab_ahb->fw.use_tz)
1098 		return 0;
1099 
1100 	iommu = ab_ahb->fw.iommu_domain;
1101 
1102 	unmapped_size = iommu_unmap(iommu, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
1103 	if (unmapped_size != ab_ahb->fw.msa_size)
1104 		ath11k_err(ab, "failed to unmap firmware: %zu\n",
1105 			   unmapped_size);
1106 
1107 	unmapped_size = iommu_unmap(iommu, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size);
1108 	if (unmapped_size != ab_ahb->fw.ce_size)
1109 		ath11k_err(ab, "failed to unmap firmware CE memory: %zu\n",
1110 			   unmapped_size);
1111 
1112 	iommu_detach_device(iommu, ab_ahb->fw.dev);
1113 	iommu_domain_free(iommu);
1114 
1115 	platform_device_unregister(to_platform_device(ab_ahb->fw.dev));
1116 
1117 	return 0;
1118 }
1119 
1120 static int ath11k_ahb_probe(struct platform_device *pdev)
1121 {
1122 	struct ath11k_base *ab;
1123 	const struct ath11k_hif_ops *hif_ops;
1124 	const struct ath11k_pci_ops *pci_ops;
1125 	enum ath11k_hw_rev hw_rev;
1126 	int ret;
1127 
1128 	hw_rev = (uintptr_t)device_get_match_data(&pdev->dev);
1129 
1130 	switch (hw_rev) {
1131 	case ATH11K_HW_IPQ8074:
1132 	case ATH11K_HW_IPQ6018_HW10:
1133 	case ATH11K_HW_IPQ5018_HW10:
1134 		hif_ops = &ath11k_ahb_hif_ops_ipq8074;
1135 		pci_ops = NULL;
1136 		break;
1137 	case ATH11K_HW_WCN6750_HW10:
1138 		hif_ops = &ath11k_ahb_hif_ops_wcn6750;
1139 		pci_ops = &ath11k_ahb_pci_ops_wcn6750;
1140 		break;
1141 	default:
1142 		dev_err(&pdev->dev, "unsupported device type %d\n", hw_rev);
1143 		return -EOPNOTSUPP;
1144 	}
1145 
1146 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1147 	if (ret) {
1148 		dev_err(&pdev->dev, "failed to set 32-bit consistent dma\n");
1149 		return ret;
1150 	}
1151 
1152 	ab = ath11k_core_alloc(&pdev->dev, sizeof(struct ath11k_ahb),
1153 			       ATH11K_BUS_AHB);
1154 	if (!ab) {
1155 		dev_err(&pdev->dev, "failed to allocate ath11k base\n");
1156 		return -ENOMEM;
1157 	}
1158 
1159 	ab->hif.ops = hif_ops;
1160 	ab->pdev = pdev;
1161 	ab->hw_rev = hw_rev;
1162 	ab->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL;
1163 	platform_set_drvdata(pdev, ab);
1164 
1165 	ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
1166 	if (ret) {
1167 		ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
1168 		goto err_core_free;
1169 	}
1170 
1171 	ret = ath11k_core_pre_init(ab);
1172 	if (ret)
1173 		goto err_core_free;
1174 
1175 	ret = ath11k_ahb_setup_resources(ab);
1176 	if (ret)
1177 		goto err_core_free;
1178 
1179 	ret = ath11k_ahb_ce_remap(ab);
1180 	if (ret)
1181 		goto err_core_free;
1182 
1183 	ret = ath11k_ahb_fw_resources_init(ab);
1184 	if (ret)
1185 		goto err_ce_unmap;
1186 
1187 	ret = ath11k_ahb_setup_smp2p_handle(ab);
1188 	if (ret)
1189 		goto err_fw_deinit;
1190 
1191 	ret = ath11k_hal_srng_init(ab);
1192 	if (ret)
1193 		goto err_release_smp2p_handle;
1194 
1195 	ret = ath11k_ce_alloc_pipes(ab);
1196 	if (ret) {
1197 		ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
1198 		goto err_hal_srng_deinit;
1199 	}
1200 
1201 	ath11k_ahb_init_qmi_ce_config(ab);
1202 
1203 	ret = ath11k_core_get_rproc(ab);
1204 	if (ret) {
1205 		ath11k_err(ab, "failed to get rproc: %d\n", ret);
1206 		goto err_ce_free;
1207 	}
1208 
1209 	ret = ath11k_core_init(ab);
1210 	if (ret) {
1211 		ath11k_err(ab, "failed to init core: %d\n", ret);
1212 		goto err_ce_free;
1213 	}
1214 
1215 	ret = ath11k_ahb_config_irq(ab);
1216 	if (ret) {
1217 		ath11k_err(ab, "failed to configure irq: %d\n", ret);
1218 		goto err_ce_free;
1219 	}
1220 
1221 	ath11k_qmi_fwreset_from_cold_boot(ab);
1222 
1223 	return 0;
1224 
1225 err_ce_free:
1226 	ath11k_ce_free_pipes(ab);
1227 
1228 err_hal_srng_deinit:
1229 	ath11k_hal_srng_deinit(ab);
1230 
1231 err_release_smp2p_handle:
1232 	ath11k_ahb_release_smp2p_handle(ab);
1233 
1234 err_fw_deinit:
1235 	ath11k_ahb_fw_resource_deinit(ab);
1236 
1237 err_ce_unmap:
1238 	ath11k_ahb_ce_unmap(ab);
1239 
1240 err_core_free:
1241 	ath11k_core_free(ab);
1242 	platform_set_drvdata(pdev, NULL);
1243 
1244 	return ret;
1245 }
1246 
1247 static void ath11k_ahb_remove_prepare(struct ath11k_base *ab)
1248 {
1249 	unsigned long left;
1250 
1251 	if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) {
1252 		left = wait_for_completion_timeout(&ab->driver_recovery,
1253 						   ATH11K_AHB_RECOVERY_TIMEOUT);
1254 		if (!left)
1255 			ath11k_warn(ab, "failed to receive recovery response completion\n");
1256 	}
1257 
1258 	set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
1259 	cancel_work_sync(&ab->restart_work);
1260 	cancel_work_sync(&ab->qmi.event_work);
1261 }
1262 
1263 static void ath11k_ahb_free_resources(struct ath11k_base *ab)
1264 {
1265 	struct platform_device *pdev = ab->pdev;
1266 
1267 	ath11k_ahb_free_irq(ab);
1268 	ath11k_hal_srng_deinit(ab);
1269 	ath11k_ahb_release_smp2p_handle(ab);
1270 	ath11k_ahb_fw_resource_deinit(ab);
1271 	ath11k_ce_free_pipes(ab);
1272 	ath11k_ahb_ce_unmap(ab);
1273 
1274 	ath11k_core_free(ab);
1275 	platform_set_drvdata(pdev, NULL);
1276 }
1277 
1278 static void ath11k_ahb_remove(struct platform_device *pdev)
1279 {
1280 	struct ath11k_base *ab = platform_get_drvdata(pdev);
1281 
1282 	if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1283 		ath11k_ahb_power_down(ab);
1284 		ath11k_debugfs_soc_destroy(ab);
1285 		ath11k_qmi_deinit_service(ab);
1286 		goto qmi_fail;
1287 	}
1288 
1289 	ath11k_ahb_remove_prepare(ab);
1290 	ath11k_core_deinit(ab);
1291 
1292 qmi_fail:
1293 	ath11k_ahb_free_resources(ab);
1294 }
1295 
1296 static void ath11k_ahb_shutdown(struct platform_device *pdev)
1297 {
1298 	struct ath11k_base *ab = platform_get_drvdata(pdev);
1299 
1300 	/* platform shutdown() & remove() are mutually exclusive.
1301 	 * remove() is invoked during rmmod & shutdown() during
1302 	 * system reboot/shutdown.
1303 	 */
1304 	ath11k_ahb_remove_prepare(ab);
1305 
1306 	if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)))
1307 		goto free_resources;
1308 
1309 	ath11k_core_deinit(ab);
1310 
1311 free_resources:
1312 	ath11k_ahb_free_resources(ab);
1313 }
1314 
1315 static struct platform_driver ath11k_ahb_driver = {
1316 	.driver = {
1317 		.name = "ath11k",
1318 		.of_match_table = ath11k_ahb_of_match,
1319 	},
1320 	.probe = ath11k_ahb_probe,
1321 	.remove = ath11k_ahb_remove,
1322 	.shutdown = ath11k_ahb_shutdown,
1323 };
1324 
1325 module_platform_driver(ath11k_ahb_driver);
1326 
1327 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN AHB devices");
1328 MODULE_LICENSE("Dual BSD/GPL");
1329