1*a9655020SBjoern A. Zeeb // SPDX-License-Identifier: BSD-3-Clause-Clear
2*a9655020SBjoern A. Zeeb /*
3*a9655020SBjoern A. Zeeb * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4*a9655020SBjoern A. Zeeb * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5*a9655020SBjoern A. Zeeb */
6*a9655020SBjoern A. Zeeb
7*a9655020SBjoern A. Zeeb #include <linux/dma-mapping.h>
8*a9655020SBjoern A. Zeeb #include <linux/firmware/qcom/qcom_scm.h>
9*a9655020SBjoern A. Zeeb #include <linux/of.h>
10*a9655020SBjoern A. Zeeb #include <linux/of_device.h>
11*a9655020SBjoern A. Zeeb #include <linux/platform_device.h>
12*a9655020SBjoern A. Zeeb #include <linux/remoteproc.h>
13*a9655020SBjoern A. Zeeb #include <linux/soc/qcom/mdt_loader.h>
14*a9655020SBjoern A. Zeeb #include <linux/soc/qcom/smem_state.h>
15*a9655020SBjoern A. Zeeb #include "ahb.h"
16*a9655020SBjoern A. Zeeb #include "debug.h"
17*a9655020SBjoern A. Zeeb #include "hif.h"
18*a9655020SBjoern A. Zeeb
19*a9655020SBjoern A. Zeeb static const struct of_device_id ath12k_ahb_of_match[] = {
20*a9655020SBjoern A. Zeeb { .compatible = "qcom,ipq5332-wifi",
21*a9655020SBjoern A. Zeeb .data = (void *)ATH12K_HW_IPQ5332_HW10,
22*a9655020SBjoern A. Zeeb },
23*a9655020SBjoern A. Zeeb { }
24*a9655020SBjoern A. Zeeb };
25*a9655020SBjoern A. Zeeb
26*a9655020SBjoern A. Zeeb MODULE_DEVICE_TABLE(of, ath12k_ahb_of_match);
27*a9655020SBjoern A. Zeeb
28*a9655020SBjoern A. Zeeb #define ATH12K_IRQ_CE0_OFFSET 4
29*a9655020SBjoern A. Zeeb #define ATH12K_MAX_UPDS 1
30*a9655020SBjoern A. Zeeb #define ATH12K_UPD_IRQ_WRD_LEN 18
31*a9655020SBjoern A. Zeeb static const char ath12k_userpd_irq[][9] = {"spawn",
32*a9655020SBjoern A. Zeeb "ready",
33*a9655020SBjoern A. Zeeb "stop-ack"};
34*a9655020SBjoern A. Zeeb
35*a9655020SBjoern A. Zeeb static const char *irq_name[ATH12K_IRQ_NUM_MAX] = {
36*a9655020SBjoern A. Zeeb "misc-pulse1",
37*a9655020SBjoern A. Zeeb "misc-latch",
38*a9655020SBjoern A. Zeeb "sw-exception",
39*a9655020SBjoern A. Zeeb "watchdog",
40*a9655020SBjoern A. Zeeb "ce0",
41*a9655020SBjoern A. Zeeb "ce1",
42*a9655020SBjoern A. Zeeb "ce2",
43*a9655020SBjoern A. Zeeb "ce3",
44*a9655020SBjoern A. Zeeb "ce4",
45*a9655020SBjoern A. Zeeb "ce5",
46*a9655020SBjoern A. Zeeb "ce6",
47*a9655020SBjoern A. Zeeb "ce7",
48*a9655020SBjoern A. Zeeb "ce8",
49*a9655020SBjoern A. Zeeb "ce9",
50*a9655020SBjoern A. Zeeb "ce10",
51*a9655020SBjoern A. Zeeb "ce11",
52*a9655020SBjoern A. Zeeb "host2wbm-desc-feed",
53*a9655020SBjoern A. Zeeb "host2reo-re-injection",
54*a9655020SBjoern A. Zeeb "host2reo-command",
55*a9655020SBjoern A. Zeeb "host2rxdma-monitor-ring3",
56*a9655020SBjoern A. Zeeb "host2rxdma-monitor-ring2",
57*a9655020SBjoern A. Zeeb "host2rxdma-monitor-ring1",
58*a9655020SBjoern A. Zeeb "reo2ost-exception",
59*a9655020SBjoern A. Zeeb "wbm2host-rx-release",
60*a9655020SBjoern A. Zeeb "reo2host-status",
61*a9655020SBjoern A. Zeeb "reo2host-destination-ring4",
62*a9655020SBjoern A. Zeeb "reo2host-destination-ring3",
63*a9655020SBjoern A. Zeeb "reo2host-destination-ring2",
64*a9655020SBjoern A. Zeeb "reo2host-destination-ring1",
65*a9655020SBjoern A. Zeeb "rxdma2host-monitor-destination-mac3",
66*a9655020SBjoern A. Zeeb "rxdma2host-monitor-destination-mac2",
67*a9655020SBjoern A. Zeeb "rxdma2host-monitor-destination-mac1",
68*a9655020SBjoern A. Zeeb "ppdu-end-interrupts-mac3",
69*a9655020SBjoern A. Zeeb "ppdu-end-interrupts-mac2",
70*a9655020SBjoern A. Zeeb "ppdu-end-interrupts-mac1",
71*a9655020SBjoern A. Zeeb "rxdma2host-monitor-status-ring-mac3",
72*a9655020SBjoern A. Zeeb "rxdma2host-monitor-status-ring-mac2",
73*a9655020SBjoern A. Zeeb "rxdma2host-monitor-status-ring-mac1",
74*a9655020SBjoern A. Zeeb "host2rxdma-host-buf-ring-mac3",
75*a9655020SBjoern A. Zeeb "host2rxdma-host-buf-ring-mac2",
76*a9655020SBjoern A. Zeeb "host2rxdma-host-buf-ring-mac1",
77*a9655020SBjoern A. Zeeb "rxdma2host-destination-ring-mac3",
78*a9655020SBjoern A. Zeeb "rxdma2host-destination-ring-mac2",
79*a9655020SBjoern A. Zeeb "rxdma2host-destination-ring-mac1",
80*a9655020SBjoern A. Zeeb "host2tcl-input-ring4",
81*a9655020SBjoern A. Zeeb "host2tcl-input-ring3",
82*a9655020SBjoern A. Zeeb "host2tcl-input-ring2",
83*a9655020SBjoern A. Zeeb "host2tcl-input-ring1",
84*a9655020SBjoern A. Zeeb "wbm2host-tx-completions-ring4",
85*a9655020SBjoern A. Zeeb "wbm2host-tx-completions-ring3",
86*a9655020SBjoern A. Zeeb "wbm2host-tx-completions-ring2",
87*a9655020SBjoern A. Zeeb "wbm2host-tx-completions-ring1",
88*a9655020SBjoern A. Zeeb "tcl2host-status-ring",
89*a9655020SBjoern A. Zeeb };
90*a9655020SBjoern A. Zeeb
91*a9655020SBjoern A. Zeeb enum ext_irq_num {
92*a9655020SBjoern A. Zeeb host2wbm_desc_feed = 16,
93*a9655020SBjoern A. Zeeb host2reo_re_injection,
94*a9655020SBjoern A. Zeeb host2reo_command,
95*a9655020SBjoern A. Zeeb host2rxdma_monitor_ring3,
96*a9655020SBjoern A. Zeeb host2rxdma_monitor_ring2,
97*a9655020SBjoern A. Zeeb host2rxdma_monitor_ring1,
98*a9655020SBjoern A. Zeeb reo2host_exception,
99*a9655020SBjoern A. Zeeb wbm2host_rx_release,
100*a9655020SBjoern A. Zeeb reo2host_status,
101*a9655020SBjoern A. Zeeb reo2host_destination_ring4,
102*a9655020SBjoern A. Zeeb reo2host_destination_ring3,
103*a9655020SBjoern A. Zeeb reo2host_destination_ring2,
104*a9655020SBjoern A. Zeeb reo2host_destination_ring1,
105*a9655020SBjoern A. Zeeb rxdma2host_monitor_destination_mac3,
106*a9655020SBjoern A. Zeeb rxdma2host_monitor_destination_mac2,
107*a9655020SBjoern A. Zeeb rxdma2host_monitor_destination_mac1,
108*a9655020SBjoern A. Zeeb ppdu_end_interrupts_mac3,
109*a9655020SBjoern A. Zeeb ppdu_end_interrupts_mac2,
110*a9655020SBjoern A. Zeeb ppdu_end_interrupts_mac1,
111*a9655020SBjoern A. Zeeb rxdma2host_monitor_status_ring_mac3,
112*a9655020SBjoern A. Zeeb rxdma2host_monitor_status_ring_mac2,
113*a9655020SBjoern A. Zeeb rxdma2host_monitor_status_ring_mac1,
114*a9655020SBjoern A. Zeeb host2rxdma_host_buf_ring_mac3,
115*a9655020SBjoern A. Zeeb host2rxdma_host_buf_ring_mac2,
116*a9655020SBjoern A. Zeeb host2rxdma_host_buf_ring_mac1,
117*a9655020SBjoern A. Zeeb rxdma2host_destination_ring_mac3,
118*a9655020SBjoern A. Zeeb rxdma2host_destination_ring_mac2,
119*a9655020SBjoern A. Zeeb rxdma2host_destination_ring_mac1,
120*a9655020SBjoern A. Zeeb host2tcl_input_ring4,
121*a9655020SBjoern A. Zeeb host2tcl_input_ring3,
122*a9655020SBjoern A. Zeeb host2tcl_input_ring2,
123*a9655020SBjoern A. Zeeb host2tcl_input_ring1,
124*a9655020SBjoern A. Zeeb wbm2host_tx_completions_ring4,
125*a9655020SBjoern A. Zeeb wbm2host_tx_completions_ring3,
126*a9655020SBjoern A. Zeeb wbm2host_tx_completions_ring2,
127*a9655020SBjoern A. Zeeb wbm2host_tx_completions_ring1,
128*a9655020SBjoern A. Zeeb tcl2host_status_ring,
129*a9655020SBjoern A. Zeeb };
130*a9655020SBjoern A. Zeeb
ath12k_ahb_read32(struct ath12k_base * ab,u32 offset)131*a9655020SBjoern A. Zeeb static u32 ath12k_ahb_read32(struct ath12k_base *ab, u32 offset)
132*a9655020SBjoern A. Zeeb {
133*a9655020SBjoern A. Zeeb if (ab->ce_remap && offset < HAL_SEQ_WCSS_CMEM_OFFSET)
134*a9655020SBjoern A. Zeeb return ioread32(ab->mem_ce + offset);
135*a9655020SBjoern A. Zeeb return ioread32(ab->mem + offset);
136*a9655020SBjoern A. Zeeb }
137*a9655020SBjoern A. Zeeb
ath12k_ahb_write32(struct ath12k_base * ab,u32 offset,u32 value)138*a9655020SBjoern A. Zeeb static void ath12k_ahb_write32(struct ath12k_base *ab, u32 offset,
139*a9655020SBjoern A. Zeeb u32 value)
140*a9655020SBjoern A. Zeeb {
141*a9655020SBjoern A. Zeeb if (ab->ce_remap && offset < HAL_SEQ_WCSS_CMEM_OFFSET)
142*a9655020SBjoern A. Zeeb iowrite32(value, ab->mem_ce + offset);
143*a9655020SBjoern A. Zeeb else
144*a9655020SBjoern A. Zeeb iowrite32(value, ab->mem + offset);
145*a9655020SBjoern A. Zeeb }
146*a9655020SBjoern A. Zeeb
ath12k_ahb_cancel_workqueue(struct ath12k_base * ab)147*a9655020SBjoern A. Zeeb static void ath12k_ahb_cancel_workqueue(struct ath12k_base *ab)
148*a9655020SBjoern A. Zeeb {
149*a9655020SBjoern A. Zeeb int i;
150*a9655020SBjoern A. Zeeb
151*a9655020SBjoern A. Zeeb for (i = 0; i < ab->hw_params->ce_count; i++) {
152*a9655020SBjoern A. Zeeb struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
153*a9655020SBjoern A. Zeeb
154*a9655020SBjoern A. Zeeb if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
155*a9655020SBjoern A. Zeeb continue;
156*a9655020SBjoern A. Zeeb
157*a9655020SBjoern A. Zeeb cancel_work_sync(&ce_pipe->intr_wq);
158*a9655020SBjoern A. Zeeb }
159*a9655020SBjoern A. Zeeb }
160*a9655020SBjoern A. Zeeb
ath12k_ahb_ext_grp_disable(struct ath12k_ext_irq_grp * irq_grp)161*a9655020SBjoern A. Zeeb static void ath12k_ahb_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp)
162*a9655020SBjoern A. Zeeb {
163*a9655020SBjoern A. Zeeb int i;
164*a9655020SBjoern A. Zeeb
165*a9655020SBjoern A. Zeeb for (i = 0; i < irq_grp->num_irq; i++)
166*a9655020SBjoern A. Zeeb disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
167*a9655020SBjoern A. Zeeb }
168*a9655020SBjoern A. Zeeb
__ath12k_ahb_ext_irq_disable(struct ath12k_base * ab)169*a9655020SBjoern A. Zeeb static void __ath12k_ahb_ext_irq_disable(struct ath12k_base *ab)
170*a9655020SBjoern A. Zeeb {
171*a9655020SBjoern A. Zeeb int i;
172*a9655020SBjoern A. Zeeb
173*a9655020SBjoern A. Zeeb for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
174*a9655020SBjoern A. Zeeb struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
175*a9655020SBjoern A. Zeeb
176*a9655020SBjoern A. Zeeb ath12k_ahb_ext_grp_disable(irq_grp);
177*a9655020SBjoern A. Zeeb if (irq_grp->napi_enabled) {
178*a9655020SBjoern A. Zeeb napi_synchronize(&irq_grp->napi);
179*a9655020SBjoern A. Zeeb napi_disable(&irq_grp->napi);
180*a9655020SBjoern A. Zeeb irq_grp->napi_enabled = false;
181*a9655020SBjoern A. Zeeb }
182*a9655020SBjoern A. Zeeb }
183*a9655020SBjoern A. Zeeb }
184*a9655020SBjoern A. Zeeb
ath12k_ahb_ext_grp_enable(struct ath12k_ext_irq_grp * irq_grp)185*a9655020SBjoern A. Zeeb static void ath12k_ahb_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp)
186*a9655020SBjoern A. Zeeb {
187*a9655020SBjoern A. Zeeb int i;
188*a9655020SBjoern A. Zeeb
189*a9655020SBjoern A. Zeeb for (i = 0; i < irq_grp->num_irq; i++)
190*a9655020SBjoern A. Zeeb enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
191*a9655020SBjoern A. Zeeb }
192*a9655020SBjoern A. Zeeb
ath12k_ahb_setbit32(struct ath12k_base * ab,u8 bit,u32 offset)193*a9655020SBjoern A. Zeeb static void ath12k_ahb_setbit32(struct ath12k_base *ab, u8 bit, u32 offset)
194*a9655020SBjoern A. Zeeb {
195*a9655020SBjoern A. Zeeb u32 val;
196*a9655020SBjoern A. Zeeb
197*a9655020SBjoern A. Zeeb val = ath12k_ahb_read32(ab, offset);
198*a9655020SBjoern A. Zeeb ath12k_ahb_write32(ab, offset, val | BIT(bit));
199*a9655020SBjoern A. Zeeb }
200*a9655020SBjoern A. Zeeb
ath12k_ahb_clearbit32(struct ath12k_base * ab,u8 bit,u32 offset)201*a9655020SBjoern A. Zeeb static void ath12k_ahb_clearbit32(struct ath12k_base *ab, u8 bit, u32 offset)
202*a9655020SBjoern A. Zeeb {
203*a9655020SBjoern A. Zeeb u32 val;
204*a9655020SBjoern A. Zeeb
205*a9655020SBjoern A. Zeeb val = ath12k_ahb_read32(ab, offset);
206*a9655020SBjoern A. Zeeb ath12k_ahb_write32(ab, offset, val & ~BIT(bit));
207*a9655020SBjoern A. Zeeb }
208*a9655020SBjoern A. Zeeb
ath12k_ahb_ce_irq_enable(struct ath12k_base * ab,u16 ce_id)209*a9655020SBjoern A. Zeeb static void ath12k_ahb_ce_irq_enable(struct ath12k_base *ab, u16 ce_id)
210*a9655020SBjoern A. Zeeb {
211*a9655020SBjoern A. Zeeb const struct ce_attr *ce_attr;
212*a9655020SBjoern A. Zeeb const struct ce_ie_addr *ce_ie_addr = ab->hw_params->ce_ie_addr;
213*a9655020SBjoern A. Zeeb u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
214*a9655020SBjoern A. Zeeb
215*a9655020SBjoern A. Zeeb ie1_reg_addr = ce_ie_addr->ie1_reg_addr;
216*a9655020SBjoern A. Zeeb ie2_reg_addr = ce_ie_addr->ie2_reg_addr;
217*a9655020SBjoern A. Zeeb ie3_reg_addr = ce_ie_addr->ie3_reg_addr;
218*a9655020SBjoern A. Zeeb
219*a9655020SBjoern A. Zeeb ce_attr = &ab->hw_params->host_ce_config[ce_id];
220*a9655020SBjoern A. Zeeb if (ce_attr->src_nentries)
221*a9655020SBjoern A. Zeeb ath12k_ahb_setbit32(ab, ce_id, ie1_reg_addr);
222*a9655020SBjoern A. Zeeb
223*a9655020SBjoern A. Zeeb if (ce_attr->dest_nentries) {
224*a9655020SBjoern A. Zeeb ath12k_ahb_setbit32(ab, ce_id, ie2_reg_addr);
225*a9655020SBjoern A. Zeeb ath12k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
226*a9655020SBjoern A. Zeeb ie3_reg_addr);
227*a9655020SBjoern A. Zeeb }
228*a9655020SBjoern A. Zeeb }
229*a9655020SBjoern A. Zeeb
ath12k_ahb_ce_irq_disable(struct ath12k_base * ab,u16 ce_id)230*a9655020SBjoern A. Zeeb static void ath12k_ahb_ce_irq_disable(struct ath12k_base *ab, u16 ce_id)
231*a9655020SBjoern A. Zeeb {
232*a9655020SBjoern A. Zeeb const struct ce_attr *ce_attr;
233*a9655020SBjoern A. Zeeb const struct ce_ie_addr *ce_ie_addr = ab->hw_params->ce_ie_addr;
234*a9655020SBjoern A. Zeeb u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
235*a9655020SBjoern A. Zeeb
236*a9655020SBjoern A. Zeeb ie1_reg_addr = ce_ie_addr->ie1_reg_addr;
237*a9655020SBjoern A. Zeeb ie2_reg_addr = ce_ie_addr->ie2_reg_addr;
238*a9655020SBjoern A. Zeeb ie3_reg_addr = ce_ie_addr->ie3_reg_addr;
239*a9655020SBjoern A. Zeeb
240*a9655020SBjoern A. Zeeb ce_attr = &ab->hw_params->host_ce_config[ce_id];
241*a9655020SBjoern A. Zeeb if (ce_attr->src_nentries)
242*a9655020SBjoern A. Zeeb ath12k_ahb_clearbit32(ab, ce_id, ie1_reg_addr);
243*a9655020SBjoern A. Zeeb
244*a9655020SBjoern A. Zeeb if (ce_attr->dest_nentries) {
245*a9655020SBjoern A. Zeeb ath12k_ahb_clearbit32(ab, ce_id, ie2_reg_addr);
246*a9655020SBjoern A. Zeeb ath12k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
247*a9655020SBjoern A. Zeeb ie3_reg_addr);
248*a9655020SBjoern A. Zeeb }
249*a9655020SBjoern A. Zeeb }
250*a9655020SBjoern A. Zeeb
ath12k_ahb_sync_ce_irqs(struct ath12k_base * ab)251*a9655020SBjoern A. Zeeb static void ath12k_ahb_sync_ce_irqs(struct ath12k_base *ab)
252*a9655020SBjoern A. Zeeb {
253*a9655020SBjoern A. Zeeb int i;
254*a9655020SBjoern A. Zeeb int irq_idx;
255*a9655020SBjoern A. Zeeb
256*a9655020SBjoern A. Zeeb for (i = 0; i < ab->hw_params->ce_count; i++) {
257*a9655020SBjoern A. Zeeb if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
258*a9655020SBjoern A. Zeeb continue;
259*a9655020SBjoern A. Zeeb
260*a9655020SBjoern A. Zeeb irq_idx = ATH12K_IRQ_CE0_OFFSET + i;
261*a9655020SBjoern A. Zeeb synchronize_irq(ab->irq_num[irq_idx]);
262*a9655020SBjoern A. Zeeb }
263*a9655020SBjoern A. Zeeb }
264*a9655020SBjoern A. Zeeb
ath12k_ahb_sync_ext_irqs(struct ath12k_base * ab)265*a9655020SBjoern A. Zeeb static void ath12k_ahb_sync_ext_irqs(struct ath12k_base *ab)
266*a9655020SBjoern A. Zeeb {
267*a9655020SBjoern A. Zeeb int i, j;
268*a9655020SBjoern A. Zeeb int irq_idx;
269*a9655020SBjoern A. Zeeb
270*a9655020SBjoern A. Zeeb for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
271*a9655020SBjoern A. Zeeb struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
272*a9655020SBjoern A. Zeeb
273*a9655020SBjoern A. Zeeb for (j = 0; j < irq_grp->num_irq; j++) {
274*a9655020SBjoern A. Zeeb irq_idx = irq_grp->irqs[j];
275*a9655020SBjoern A. Zeeb synchronize_irq(ab->irq_num[irq_idx]);
276*a9655020SBjoern A. Zeeb }
277*a9655020SBjoern A. Zeeb }
278*a9655020SBjoern A. Zeeb }
279*a9655020SBjoern A. Zeeb
ath12k_ahb_ce_irqs_enable(struct ath12k_base * ab)280*a9655020SBjoern A. Zeeb static void ath12k_ahb_ce_irqs_enable(struct ath12k_base *ab)
281*a9655020SBjoern A. Zeeb {
282*a9655020SBjoern A. Zeeb int i;
283*a9655020SBjoern A. Zeeb
284*a9655020SBjoern A. Zeeb for (i = 0; i < ab->hw_params->ce_count; i++) {
285*a9655020SBjoern A. Zeeb if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
286*a9655020SBjoern A. Zeeb continue;
287*a9655020SBjoern A. Zeeb ath12k_ahb_ce_irq_enable(ab, i);
288*a9655020SBjoern A. Zeeb }
289*a9655020SBjoern A. Zeeb }
290*a9655020SBjoern A. Zeeb
ath12k_ahb_ce_irqs_disable(struct ath12k_base * ab)291*a9655020SBjoern A. Zeeb static void ath12k_ahb_ce_irqs_disable(struct ath12k_base *ab)
292*a9655020SBjoern A. Zeeb {
293*a9655020SBjoern A. Zeeb int i;
294*a9655020SBjoern A. Zeeb
295*a9655020SBjoern A. Zeeb for (i = 0; i < ab->hw_params->ce_count; i++) {
296*a9655020SBjoern A. Zeeb if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
297*a9655020SBjoern A. Zeeb continue;
298*a9655020SBjoern A. Zeeb ath12k_ahb_ce_irq_disable(ab, i);
299*a9655020SBjoern A. Zeeb }
300*a9655020SBjoern A. Zeeb }
301*a9655020SBjoern A. Zeeb
ath12k_ahb_start(struct ath12k_base * ab)302*a9655020SBjoern A. Zeeb static int ath12k_ahb_start(struct ath12k_base *ab)
303*a9655020SBjoern A. Zeeb {
304*a9655020SBjoern A. Zeeb ath12k_ahb_ce_irqs_enable(ab);
305*a9655020SBjoern A. Zeeb ath12k_ce_rx_post_buf(ab);
306*a9655020SBjoern A. Zeeb
307*a9655020SBjoern A. Zeeb return 0;
308*a9655020SBjoern A. Zeeb }
309*a9655020SBjoern A. Zeeb
ath12k_ahb_ext_irq_enable(struct ath12k_base * ab)310*a9655020SBjoern A. Zeeb static void ath12k_ahb_ext_irq_enable(struct ath12k_base *ab)
311*a9655020SBjoern A. Zeeb {
312*a9655020SBjoern A. Zeeb struct ath12k_ext_irq_grp *irq_grp;
313*a9655020SBjoern A. Zeeb int i;
314*a9655020SBjoern A. Zeeb
315*a9655020SBjoern A. Zeeb for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
316*a9655020SBjoern A. Zeeb irq_grp = &ab->ext_irq_grp[i];
317*a9655020SBjoern A. Zeeb if (!irq_grp->napi_enabled) {
318*a9655020SBjoern A. Zeeb napi_enable(&irq_grp->napi);
319*a9655020SBjoern A. Zeeb irq_grp->napi_enabled = true;
320*a9655020SBjoern A. Zeeb }
321*a9655020SBjoern A. Zeeb ath12k_ahb_ext_grp_enable(irq_grp);
322*a9655020SBjoern A. Zeeb }
323*a9655020SBjoern A. Zeeb }
324*a9655020SBjoern A. Zeeb
ath12k_ahb_ext_irq_disable(struct ath12k_base * ab)325*a9655020SBjoern A. Zeeb static void ath12k_ahb_ext_irq_disable(struct ath12k_base *ab)
326*a9655020SBjoern A. Zeeb {
327*a9655020SBjoern A. Zeeb __ath12k_ahb_ext_irq_disable(ab);
328*a9655020SBjoern A. Zeeb ath12k_ahb_sync_ext_irqs(ab);
329*a9655020SBjoern A. Zeeb }
330*a9655020SBjoern A. Zeeb
ath12k_ahb_stop(struct ath12k_base * ab)331*a9655020SBjoern A. Zeeb static void ath12k_ahb_stop(struct ath12k_base *ab)
332*a9655020SBjoern A. Zeeb {
333*a9655020SBjoern A. Zeeb if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
334*a9655020SBjoern A. Zeeb ath12k_ahb_ce_irqs_disable(ab);
335*a9655020SBjoern A. Zeeb ath12k_ahb_sync_ce_irqs(ab);
336*a9655020SBjoern A. Zeeb ath12k_ahb_cancel_workqueue(ab);
337*a9655020SBjoern A. Zeeb timer_delete_sync(&ab->rx_replenish_retry);
338*a9655020SBjoern A. Zeeb ath12k_ce_cleanup_pipes(ab);
339*a9655020SBjoern A. Zeeb }
340*a9655020SBjoern A. Zeeb
ath12k_ahb_power_up(struct ath12k_base * ab)341*a9655020SBjoern A. Zeeb static int ath12k_ahb_power_up(struct ath12k_base *ab)
342*a9655020SBjoern A. Zeeb {
343*a9655020SBjoern A. Zeeb struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
344*a9655020SBjoern A. Zeeb char fw_name[ATH12K_USERPD_FW_NAME_LEN];
345*a9655020SBjoern A. Zeeb char fw2_name[ATH12K_USERPD_FW_NAME_LEN];
346*a9655020SBjoern A. Zeeb struct device *dev = ab->dev;
347*a9655020SBjoern A. Zeeb const struct firmware *fw, *fw2;
348*a9655020SBjoern A. Zeeb struct reserved_mem *rmem = NULL;
349*a9655020SBjoern A. Zeeb unsigned long time_left;
350*a9655020SBjoern A. Zeeb phys_addr_t mem_phys;
351*a9655020SBjoern A. Zeeb void *mem_region;
352*a9655020SBjoern A. Zeeb size_t mem_size;
353*a9655020SBjoern A. Zeeb u32 pasid;
354*a9655020SBjoern A. Zeeb int ret;
355*a9655020SBjoern A. Zeeb
356*a9655020SBjoern A. Zeeb rmem = ath12k_core_get_reserved_mem(ab, 0);
357*a9655020SBjoern A. Zeeb if (!rmem)
358*a9655020SBjoern A. Zeeb return -ENODEV;
359*a9655020SBjoern A. Zeeb
360*a9655020SBjoern A. Zeeb mem_phys = rmem->base;
361*a9655020SBjoern A. Zeeb mem_size = rmem->size;
362*a9655020SBjoern A. Zeeb mem_region = devm_memremap(dev, mem_phys, mem_size, MEMREMAP_WC);
363*a9655020SBjoern A. Zeeb if (IS_ERR(mem_region)) {
364*a9655020SBjoern A. Zeeb ath12k_err(ab, "unable to map memory region: %pa+%pa\n",
365*a9655020SBjoern A. Zeeb &rmem->base, &rmem->size);
366*a9655020SBjoern A. Zeeb return PTR_ERR(mem_region);
367*a9655020SBjoern A. Zeeb }
368*a9655020SBjoern A. Zeeb
369*a9655020SBjoern A. Zeeb snprintf(fw_name, sizeof(fw_name), "%s/%s/%s%d%s", ATH12K_FW_DIR,
370*a9655020SBjoern A. Zeeb ab->hw_params->fw.dir, ATH12K_AHB_FW_PREFIX, ab_ahb->userpd_id,
371*a9655020SBjoern A. Zeeb ATH12K_AHB_FW_SUFFIX);
372*a9655020SBjoern A. Zeeb
373*a9655020SBjoern A. Zeeb ret = request_firmware(&fw, fw_name, dev);
374*a9655020SBjoern A. Zeeb if (ret < 0) {
375*a9655020SBjoern A. Zeeb ath12k_err(ab, "request_firmware failed\n");
376*a9655020SBjoern A. Zeeb return ret;
377*a9655020SBjoern A. Zeeb }
378*a9655020SBjoern A. Zeeb
379*a9655020SBjoern A. Zeeb ath12k_dbg(ab, ATH12K_DBG_AHB, "Booting fw image %s, size %zd\n", fw_name,
380*a9655020SBjoern A. Zeeb fw->size);
381*a9655020SBjoern A. Zeeb
382*a9655020SBjoern A. Zeeb if (!fw->size) {
383*a9655020SBjoern A. Zeeb ath12k_err(ab, "Invalid firmware size\n");
384*a9655020SBjoern A. Zeeb ret = -EINVAL;
385*a9655020SBjoern A. Zeeb goto err_fw;
386*a9655020SBjoern A. Zeeb }
387*a9655020SBjoern A. Zeeb
388*a9655020SBjoern A. Zeeb pasid = (u32_encode_bits(ab_ahb->userpd_id, ATH12K_USERPD_ID_MASK)) |
389*a9655020SBjoern A. Zeeb ATH12K_AHB_UPD_SWID;
390*a9655020SBjoern A. Zeeb
391*a9655020SBjoern A. Zeeb /* Load FW image to a reserved memory location */
392*a9655020SBjoern A. Zeeb ret = qcom_mdt_load(dev, fw, fw_name, pasid, mem_region, mem_phys, mem_size,
393*a9655020SBjoern A. Zeeb &mem_phys);
394*a9655020SBjoern A. Zeeb if (ret) {
395*a9655020SBjoern A. Zeeb ath12k_err(ab, "Failed to load MDT segments: %d\n", ret);
396*a9655020SBjoern A. Zeeb goto err_fw;
397*a9655020SBjoern A. Zeeb }
398*a9655020SBjoern A. Zeeb
399*a9655020SBjoern A. Zeeb snprintf(fw2_name, sizeof(fw2_name), "%s/%s/%s", ATH12K_FW_DIR,
400*a9655020SBjoern A. Zeeb ab->hw_params->fw.dir, ATH12K_AHB_FW2);
401*a9655020SBjoern A. Zeeb
402*a9655020SBjoern A. Zeeb ret = request_firmware(&fw2, fw2_name, dev);
403*a9655020SBjoern A. Zeeb if (ret < 0) {
404*a9655020SBjoern A. Zeeb ath12k_err(ab, "request_firmware failed\n");
405*a9655020SBjoern A. Zeeb goto err_fw;
406*a9655020SBjoern A. Zeeb }
407*a9655020SBjoern A. Zeeb
408*a9655020SBjoern A. Zeeb ath12k_dbg(ab, ATH12K_DBG_AHB, "Booting fw image %s, size %zd\n", fw2_name,
409*a9655020SBjoern A. Zeeb fw2->size);
410*a9655020SBjoern A. Zeeb
411*a9655020SBjoern A. Zeeb if (!fw2->size) {
412*a9655020SBjoern A. Zeeb ath12k_err(ab, "Invalid firmware size\n");
413*a9655020SBjoern A. Zeeb ret = -EINVAL;
414*a9655020SBjoern A. Zeeb goto err_fw2;
415*a9655020SBjoern A. Zeeb }
416*a9655020SBjoern A. Zeeb
417*a9655020SBjoern A. Zeeb ret = qcom_mdt_load_no_init(dev, fw2, fw2_name, mem_region, mem_phys,
418*a9655020SBjoern A. Zeeb mem_size, &mem_phys);
419*a9655020SBjoern A. Zeeb if (ret) {
420*a9655020SBjoern A. Zeeb ath12k_err(ab, "Failed to load MDT segments: %d\n", ret);
421*a9655020SBjoern A. Zeeb goto err_fw2;
422*a9655020SBjoern A. Zeeb }
423*a9655020SBjoern A. Zeeb
424*a9655020SBjoern A. Zeeb /* Authenticate FW image using peripheral ID */
425*a9655020SBjoern A. Zeeb ret = qcom_scm_pas_auth_and_reset(pasid);
426*a9655020SBjoern A. Zeeb if (ret) {
427*a9655020SBjoern A. Zeeb ath12k_err(ab, "failed to boot the remote processor %d\n", ret);
428*a9655020SBjoern A. Zeeb goto err_fw2;
429*a9655020SBjoern A. Zeeb }
430*a9655020SBjoern A. Zeeb
431*a9655020SBjoern A. Zeeb /* Instruct Q6 to spawn userPD thread */
432*a9655020SBjoern A. Zeeb ret = qcom_smem_state_update_bits(ab_ahb->spawn_state, BIT(ab_ahb->spawn_bit),
433*a9655020SBjoern A. Zeeb BIT(ab_ahb->spawn_bit));
434*a9655020SBjoern A. Zeeb if (ret) {
435*a9655020SBjoern A. Zeeb ath12k_err(ab, "Failed to update spawn state %d\n", ret);
436*a9655020SBjoern A. Zeeb goto err_fw2;
437*a9655020SBjoern A. Zeeb }
438*a9655020SBjoern A. Zeeb
439*a9655020SBjoern A. Zeeb time_left = wait_for_completion_timeout(&ab_ahb->userpd_spawned,
440*a9655020SBjoern A. Zeeb ATH12K_USERPD_SPAWN_TIMEOUT);
441*a9655020SBjoern A. Zeeb if (!time_left) {
442*a9655020SBjoern A. Zeeb ath12k_err(ab, "UserPD spawn wait timed out\n");
443*a9655020SBjoern A. Zeeb ret = -ETIMEDOUT;
444*a9655020SBjoern A. Zeeb goto err_fw2;
445*a9655020SBjoern A. Zeeb }
446*a9655020SBjoern A. Zeeb
447*a9655020SBjoern A. Zeeb time_left = wait_for_completion_timeout(&ab_ahb->userpd_ready,
448*a9655020SBjoern A. Zeeb ATH12K_USERPD_READY_TIMEOUT);
449*a9655020SBjoern A. Zeeb if (!time_left) {
450*a9655020SBjoern A. Zeeb ath12k_err(ab, "UserPD ready wait timed out\n");
451*a9655020SBjoern A. Zeeb ret = -ETIMEDOUT;
452*a9655020SBjoern A. Zeeb goto err_fw2;
453*a9655020SBjoern A. Zeeb }
454*a9655020SBjoern A. Zeeb
455*a9655020SBjoern A. Zeeb qcom_smem_state_update_bits(ab_ahb->spawn_state, BIT(ab_ahb->spawn_bit), 0);
456*a9655020SBjoern A. Zeeb
457*a9655020SBjoern A. Zeeb ath12k_dbg(ab, ATH12K_DBG_AHB, "UserPD%d is now UP\n", ab_ahb->userpd_id);
458*a9655020SBjoern A. Zeeb
459*a9655020SBjoern A. Zeeb err_fw2:
460*a9655020SBjoern A. Zeeb release_firmware(fw2);
461*a9655020SBjoern A. Zeeb err_fw:
462*a9655020SBjoern A. Zeeb release_firmware(fw);
463*a9655020SBjoern A. Zeeb return ret;
464*a9655020SBjoern A. Zeeb }
465*a9655020SBjoern A. Zeeb
ath12k_ahb_power_down(struct ath12k_base * ab,bool is_suspend)466*a9655020SBjoern A. Zeeb static void ath12k_ahb_power_down(struct ath12k_base *ab, bool is_suspend)
467*a9655020SBjoern A. Zeeb {
468*a9655020SBjoern A. Zeeb struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
469*a9655020SBjoern A. Zeeb unsigned long time_left;
470*a9655020SBjoern A. Zeeb u32 pasid;
471*a9655020SBjoern A. Zeeb int ret;
472*a9655020SBjoern A. Zeeb
473*a9655020SBjoern A. Zeeb qcom_smem_state_update_bits(ab_ahb->stop_state, BIT(ab_ahb->stop_bit),
474*a9655020SBjoern A. Zeeb BIT(ab_ahb->stop_bit));
475*a9655020SBjoern A. Zeeb
476*a9655020SBjoern A. Zeeb time_left = wait_for_completion_timeout(&ab_ahb->userpd_stopped,
477*a9655020SBjoern A. Zeeb ATH12K_USERPD_STOP_TIMEOUT);
478*a9655020SBjoern A. Zeeb if (!time_left) {
479*a9655020SBjoern A. Zeeb ath12k_err(ab, "UserPD stop wait timed out\n");
480*a9655020SBjoern A. Zeeb return;
481*a9655020SBjoern A. Zeeb }
482*a9655020SBjoern A. Zeeb
483*a9655020SBjoern A. Zeeb qcom_smem_state_update_bits(ab_ahb->stop_state, BIT(ab_ahb->stop_bit), 0);
484*a9655020SBjoern A. Zeeb
485*a9655020SBjoern A. Zeeb pasid = (u32_encode_bits(ab_ahb->userpd_id, ATH12K_USERPD_ID_MASK)) |
486*a9655020SBjoern A. Zeeb ATH12K_AHB_UPD_SWID;
487*a9655020SBjoern A. Zeeb /* Release the firmware */
488*a9655020SBjoern A. Zeeb ret = qcom_scm_pas_shutdown(pasid);
489*a9655020SBjoern A. Zeeb if (ret)
490*a9655020SBjoern A. Zeeb ath12k_err(ab, "scm pas shutdown failed for userPD%d: %d\n",
491*a9655020SBjoern A. Zeeb ab_ahb->userpd_id, ret);
492*a9655020SBjoern A. Zeeb }
493*a9655020SBjoern A. Zeeb
ath12k_ahb_init_qmi_ce_config(struct ath12k_base * ab)494*a9655020SBjoern A. Zeeb static void ath12k_ahb_init_qmi_ce_config(struct ath12k_base *ab)
495*a9655020SBjoern A. Zeeb {
496*a9655020SBjoern A. Zeeb struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
497*a9655020SBjoern A. Zeeb
498*a9655020SBjoern A. Zeeb cfg->tgt_ce_len = ab->hw_params->target_ce_count;
499*a9655020SBjoern A. Zeeb cfg->tgt_ce = ab->hw_params->target_ce_config;
500*a9655020SBjoern A. Zeeb cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len;
501*a9655020SBjoern A. Zeeb cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map;
502*a9655020SBjoern A. Zeeb ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id;
503*a9655020SBjoern A. Zeeb }
504*a9655020SBjoern A. Zeeb
ath12k_ahb_ce_workqueue(struct work_struct * work)505*a9655020SBjoern A. Zeeb static void ath12k_ahb_ce_workqueue(struct work_struct *work)
506*a9655020SBjoern A. Zeeb {
507*a9655020SBjoern A. Zeeb struct ath12k_ce_pipe *ce_pipe = from_work(ce_pipe, work, intr_wq);
508*a9655020SBjoern A. Zeeb
509*a9655020SBjoern A. Zeeb ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
510*a9655020SBjoern A. Zeeb
511*a9655020SBjoern A. Zeeb ath12k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
512*a9655020SBjoern A. Zeeb }
513*a9655020SBjoern A. Zeeb
ath12k_ahb_ce_interrupt_handler(int irq,void * arg)514*a9655020SBjoern A. Zeeb static irqreturn_t ath12k_ahb_ce_interrupt_handler(int irq, void *arg)
515*a9655020SBjoern A. Zeeb {
516*a9655020SBjoern A. Zeeb struct ath12k_ce_pipe *ce_pipe = arg;
517*a9655020SBjoern A. Zeeb
518*a9655020SBjoern A. Zeeb /* last interrupt received for this CE */
519*a9655020SBjoern A. Zeeb ce_pipe->timestamp = jiffies;
520*a9655020SBjoern A. Zeeb
521*a9655020SBjoern A. Zeeb ath12k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
522*a9655020SBjoern A. Zeeb
523*a9655020SBjoern A. Zeeb queue_work(system_bh_wq, &ce_pipe->intr_wq);
524*a9655020SBjoern A. Zeeb
525*a9655020SBjoern A. Zeeb return IRQ_HANDLED;
526*a9655020SBjoern A. Zeeb }
527*a9655020SBjoern A. Zeeb
ath12k_ahb_ext_grp_napi_poll(struct napi_struct * napi,int budget)528*a9655020SBjoern A. Zeeb static int ath12k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
529*a9655020SBjoern A. Zeeb {
530*a9655020SBjoern A. Zeeb struct ath12k_ext_irq_grp *irq_grp = container_of(napi,
531*a9655020SBjoern A. Zeeb struct ath12k_ext_irq_grp,
532*a9655020SBjoern A. Zeeb napi);
533*a9655020SBjoern A. Zeeb struct ath12k_base *ab = irq_grp->ab;
534*a9655020SBjoern A. Zeeb int work_done;
535*a9655020SBjoern A. Zeeb
536*a9655020SBjoern A. Zeeb work_done = ath12k_dp_service_srng(ab, irq_grp, budget);
537*a9655020SBjoern A. Zeeb if (work_done < budget) {
538*a9655020SBjoern A. Zeeb napi_complete_done(napi, work_done);
539*a9655020SBjoern A. Zeeb ath12k_ahb_ext_grp_enable(irq_grp);
540*a9655020SBjoern A. Zeeb }
541*a9655020SBjoern A. Zeeb
542*a9655020SBjoern A. Zeeb if (work_done > budget)
543*a9655020SBjoern A. Zeeb work_done = budget;
544*a9655020SBjoern A. Zeeb
545*a9655020SBjoern A. Zeeb return work_done;
546*a9655020SBjoern A. Zeeb }
547*a9655020SBjoern A. Zeeb
ath12k_ahb_ext_interrupt_handler(int irq,void * arg)548*a9655020SBjoern A. Zeeb static irqreturn_t ath12k_ahb_ext_interrupt_handler(int irq, void *arg)
549*a9655020SBjoern A. Zeeb {
550*a9655020SBjoern A. Zeeb struct ath12k_ext_irq_grp *irq_grp = arg;
551*a9655020SBjoern A. Zeeb
552*a9655020SBjoern A. Zeeb /* last interrupt received for this group */
553*a9655020SBjoern A. Zeeb irq_grp->timestamp = jiffies;
554*a9655020SBjoern A. Zeeb
555*a9655020SBjoern A. Zeeb ath12k_ahb_ext_grp_disable(irq_grp);
556*a9655020SBjoern A. Zeeb
557*a9655020SBjoern A. Zeeb napi_schedule(&irq_grp->napi);
558*a9655020SBjoern A. Zeeb
559*a9655020SBjoern A. Zeeb return IRQ_HANDLED;
560*a9655020SBjoern A. Zeeb }
561*a9655020SBjoern A. Zeeb
ath12k_ahb_config_ext_irq(struct ath12k_base * ab)562*a9655020SBjoern A. Zeeb static int ath12k_ahb_config_ext_irq(struct ath12k_base *ab)
563*a9655020SBjoern A. Zeeb {
564*a9655020SBjoern A. Zeeb const struct ath12k_hw_ring_mask *ring_mask;
565*a9655020SBjoern A. Zeeb struct ath12k_ext_irq_grp *irq_grp;
566*a9655020SBjoern A. Zeeb const struct hal_ops *hal_ops;
567*a9655020SBjoern A. Zeeb int i, j, irq, irq_idx, ret;
568*a9655020SBjoern A. Zeeb u32 num_irq;
569*a9655020SBjoern A. Zeeb
570*a9655020SBjoern A. Zeeb ring_mask = ab->hw_params->ring_mask;
571*a9655020SBjoern A. Zeeb hal_ops = ab->hw_params->hal_ops;
572*a9655020SBjoern A. Zeeb for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
573*a9655020SBjoern A. Zeeb irq_grp = &ab->ext_irq_grp[i];
574*a9655020SBjoern A. Zeeb num_irq = 0;
575*a9655020SBjoern A. Zeeb
576*a9655020SBjoern A. Zeeb irq_grp->ab = ab;
577*a9655020SBjoern A. Zeeb irq_grp->grp_id = i;
578*a9655020SBjoern A. Zeeb
579*a9655020SBjoern A. Zeeb irq_grp->napi_ndev = alloc_netdev_dummy(0);
580*a9655020SBjoern A. Zeeb if (!irq_grp->napi_ndev)
581*a9655020SBjoern A. Zeeb return -ENOMEM;
582*a9655020SBjoern A. Zeeb
583*a9655020SBjoern A. Zeeb netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
584*a9655020SBjoern A. Zeeb ath12k_ahb_ext_grp_napi_poll);
585*a9655020SBjoern A. Zeeb
586*a9655020SBjoern A. Zeeb for (j = 0; j < ATH12K_EXT_IRQ_NUM_MAX; j++) {
587*a9655020SBjoern A. Zeeb /* For TX ring, ensure that the ring mask and the
588*a9655020SBjoern A. Zeeb * tcl_to_wbm_rbm_map point to the same ring number.
589*a9655020SBjoern A. Zeeb */
590*a9655020SBjoern A. Zeeb if (ring_mask->tx[i] &
591*a9655020SBjoern A. Zeeb BIT(hal_ops->tcl_to_wbm_rbm_map[j].wbm_ring_num)) {
592*a9655020SBjoern A. Zeeb irq_grp->irqs[num_irq++] =
593*a9655020SBjoern A. Zeeb wbm2host_tx_completions_ring1 - j;
594*a9655020SBjoern A. Zeeb }
595*a9655020SBjoern A. Zeeb
596*a9655020SBjoern A. Zeeb if (ring_mask->rx[i] & BIT(j)) {
597*a9655020SBjoern A. Zeeb irq_grp->irqs[num_irq++] =
598*a9655020SBjoern A. Zeeb reo2host_destination_ring1 - j;
599*a9655020SBjoern A. Zeeb }
600*a9655020SBjoern A. Zeeb
601*a9655020SBjoern A. Zeeb if (ring_mask->rx_err[i] & BIT(j))
602*a9655020SBjoern A. Zeeb irq_grp->irqs[num_irq++] = reo2host_exception;
603*a9655020SBjoern A. Zeeb
604*a9655020SBjoern A. Zeeb if (ring_mask->rx_wbm_rel[i] & BIT(j))
605*a9655020SBjoern A. Zeeb irq_grp->irqs[num_irq++] = wbm2host_rx_release;
606*a9655020SBjoern A. Zeeb
607*a9655020SBjoern A. Zeeb if (ring_mask->reo_status[i] & BIT(j))
608*a9655020SBjoern A. Zeeb irq_grp->irqs[num_irq++] = reo2host_status;
609*a9655020SBjoern A. Zeeb
610*a9655020SBjoern A. Zeeb if (ring_mask->rx_mon_dest[i] & BIT(j))
611*a9655020SBjoern A. Zeeb irq_grp->irqs[num_irq++] =
612*a9655020SBjoern A. Zeeb rxdma2host_monitor_destination_mac1;
613*a9655020SBjoern A. Zeeb }
614*a9655020SBjoern A. Zeeb
615*a9655020SBjoern A. Zeeb irq_grp->num_irq = num_irq;
616*a9655020SBjoern A. Zeeb
617*a9655020SBjoern A. Zeeb for (j = 0; j < irq_grp->num_irq; j++) {
618*a9655020SBjoern A. Zeeb irq_idx = irq_grp->irqs[j];
619*a9655020SBjoern A. Zeeb
620*a9655020SBjoern A. Zeeb irq = platform_get_irq_byname(ab->pdev,
621*a9655020SBjoern A. Zeeb irq_name[irq_idx]);
622*a9655020SBjoern A. Zeeb ab->irq_num[irq_idx] = irq;
623*a9655020SBjoern A. Zeeb irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY);
624*a9655020SBjoern A. Zeeb ret = devm_request_irq(ab->dev, irq,
625*a9655020SBjoern A. Zeeb ath12k_ahb_ext_interrupt_handler,
626*a9655020SBjoern A. Zeeb IRQF_TRIGGER_RISING,
627*a9655020SBjoern A. Zeeb irq_name[irq_idx], irq_grp);
628*a9655020SBjoern A. Zeeb if (ret)
629*a9655020SBjoern A. Zeeb ath12k_warn(ab, "failed request_irq for %d\n", irq);
630*a9655020SBjoern A. Zeeb }
631*a9655020SBjoern A. Zeeb }
632*a9655020SBjoern A. Zeeb
633*a9655020SBjoern A. Zeeb return 0;
634*a9655020SBjoern A. Zeeb }
635*a9655020SBjoern A. Zeeb
ath12k_ahb_config_irq(struct ath12k_base * ab)636*a9655020SBjoern A. Zeeb static int ath12k_ahb_config_irq(struct ath12k_base *ab)
637*a9655020SBjoern A. Zeeb {
638*a9655020SBjoern A. Zeeb int irq, irq_idx, i;
639*a9655020SBjoern A. Zeeb int ret;
640*a9655020SBjoern A. Zeeb
641*a9655020SBjoern A. Zeeb /* Configure CE irqs */
642*a9655020SBjoern A. Zeeb for (i = 0; i < ab->hw_params->ce_count; i++) {
643*a9655020SBjoern A. Zeeb struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
644*a9655020SBjoern A. Zeeb
645*a9655020SBjoern A. Zeeb if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
646*a9655020SBjoern A. Zeeb continue;
647*a9655020SBjoern A. Zeeb
648*a9655020SBjoern A. Zeeb irq_idx = ATH12K_IRQ_CE0_OFFSET + i;
649*a9655020SBjoern A. Zeeb
650*a9655020SBjoern A. Zeeb INIT_WORK(&ce_pipe->intr_wq, ath12k_ahb_ce_workqueue);
651*a9655020SBjoern A. Zeeb irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
652*a9655020SBjoern A. Zeeb ret = devm_request_irq(ab->dev, irq, ath12k_ahb_ce_interrupt_handler,
653*a9655020SBjoern A. Zeeb IRQF_TRIGGER_RISING, irq_name[irq_idx],
654*a9655020SBjoern A. Zeeb ce_pipe);
655*a9655020SBjoern A. Zeeb if (ret)
656*a9655020SBjoern A. Zeeb return ret;
657*a9655020SBjoern A. Zeeb
658*a9655020SBjoern A. Zeeb ab->irq_num[irq_idx] = irq;
659*a9655020SBjoern A. Zeeb }
660*a9655020SBjoern A. Zeeb
661*a9655020SBjoern A. Zeeb /* Configure external interrupts */
662*a9655020SBjoern A. Zeeb ret = ath12k_ahb_config_ext_irq(ab);
663*a9655020SBjoern A. Zeeb
664*a9655020SBjoern A. Zeeb return ret;
665*a9655020SBjoern A. Zeeb }
666*a9655020SBjoern A. Zeeb
ath12k_ahb_map_service_to_pipe(struct ath12k_base * ab,u16 service_id,u8 * ul_pipe,u8 * dl_pipe)667*a9655020SBjoern A. Zeeb static int ath12k_ahb_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
668*a9655020SBjoern A. Zeeb u8 *ul_pipe, u8 *dl_pipe)
669*a9655020SBjoern A. Zeeb {
670*a9655020SBjoern A. Zeeb const struct service_to_pipe *entry;
671*a9655020SBjoern A. Zeeb bool ul_set = false, dl_set = false;
672*a9655020SBjoern A. Zeeb u32 pipedir;
673*a9655020SBjoern A. Zeeb int i;
674*a9655020SBjoern A. Zeeb
675*a9655020SBjoern A. Zeeb for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) {
676*a9655020SBjoern A. Zeeb entry = &ab->hw_params->svc_to_ce_map[i];
677*a9655020SBjoern A. Zeeb
678*a9655020SBjoern A. Zeeb if (__le32_to_cpu(entry->service_id) != service_id)
679*a9655020SBjoern A. Zeeb continue;
680*a9655020SBjoern A. Zeeb
681*a9655020SBjoern A. Zeeb pipedir = __le32_to_cpu(entry->pipedir);
682*a9655020SBjoern A. Zeeb if (pipedir == PIPEDIR_IN || pipedir == PIPEDIR_INOUT) {
683*a9655020SBjoern A. Zeeb WARN_ON(dl_set);
684*a9655020SBjoern A. Zeeb *dl_pipe = __le32_to_cpu(entry->pipenum);
685*a9655020SBjoern A. Zeeb dl_set = true;
686*a9655020SBjoern A. Zeeb }
687*a9655020SBjoern A. Zeeb
688*a9655020SBjoern A. Zeeb if (pipedir == PIPEDIR_OUT || pipedir == PIPEDIR_INOUT) {
689*a9655020SBjoern A. Zeeb WARN_ON(ul_set);
690*a9655020SBjoern A. Zeeb *ul_pipe = __le32_to_cpu(entry->pipenum);
691*a9655020SBjoern A. Zeeb ul_set = true;
692*a9655020SBjoern A. Zeeb }
693*a9655020SBjoern A. Zeeb }
694*a9655020SBjoern A. Zeeb
695*a9655020SBjoern A. Zeeb if (WARN_ON(!ul_set || !dl_set))
696*a9655020SBjoern A. Zeeb return -ENOENT;
697*a9655020SBjoern A. Zeeb
698*a9655020SBjoern A. Zeeb return 0;
699*a9655020SBjoern A. Zeeb }
700*a9655020SBjoern A. Zeeb
701*a9655020SBjoern A. Zeeb static const struct ath12k_hif_ops ath12k_ahb_hif_ops_ipq5332 = {
702*a9655020SBjoern A. Zeeb .start = ath12k_ahb_start,
703*a9655020SBjoern A. Zeeb .stop = ath12k_ahb_stop,
704*a9655020SBjoern A. Zeeb .read32 = ath12k_ahb_read32,
705*a9655020SBjoern A. Zeeb .write32 = ath12k_ahb_write32,
706*a9655020SBjoern A. Zeeb .irq_enable = ath12k_ahb_ext_irq_enable,
707*a9655020SBjoern A. Zeeb .irq_disable = ath12k_ahb_ext_irq_disable,
708*a9655020SBjoern A. Zeeb .map_service_to_pipe = ath12k_ahb_map_service_to_pipe,
709*a9655020SBjoern A. Zeeb .power_up = ath12k_ahb_power_up,
710*a9655020SBjoern A. Zeeb .power_down = ath12k_ahb_power_down,
711*a9655020SBjoern A. Zeeb };
712*a9655020SBjoern A. Zeeb
ath12k_userpd_irq_handler(int irq,void * data)713*a9655020SBjoern A. Zeeb static irqreturn_t ath12k_userpd_irq_handler(int irq, void *data)
714*a9655020SBjoern A. Zeeb {
715*a9655020SBjoern A. Zeeb struct ath12k_base *ab = data;
716*a9655020SBjoern A. Zeeb struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
717*a9655020SBjoern A. Zeeb
718*a9655020SBjoern A. Zeeb if (irq == ab_ahb->userpd_irq_num[ATH12K_USERPD_SPAWN_IRQ]) {
719*a9655020SBjoern A. Zeeb complete(&ab_ahb->userpd_spawned);
720*a9655020SBjoern A. Zeeb } else if (irq == ab_ahb->userpd_irq_num[ATH12K_USERPD_READY_IRQ]) {
721*a9655020SBjoern A. Zeeb complete(&ab_ahb->userpd_ready);
722*a9655020SBjoern A. Zeeb } else if (irq == ab_ahb->userpd_irq_num[ATH12K_USERPD_STOP_ACK_IRQ]) {
723*a9655020SBjoern A. Zeeb complete(&ab_ahb->userpd_stopped);
724*a9655020SBjoern A. Zeeb } else {
725*a9655020SBjoern A. Zeeb ath12k_err(ab, "Invalid userpd interrupt\n");
726*a9655020SBjoern A. Zeeb return IRQ_NONE;
727*a9655020SBjoern A. Zeeb }
728*a9655020SBjoern A. Zeeb
729*a9655020SBjoern A. Zeeb return IRQ_HANDLED;
730*a9655020SBjoern A. Zeeb }
731*a9655020SBjoern A. Zeeb
ath12k_ahb_config_rproc_irq(struct ath12k_base * ab)732*a9655020SBjoern A. Zeeb static int ath12k_ahb_config_rproc_irq(struct ath12k_base *ab)
733*a9655020SBjoern A. Zeeb {
734*a9655020SBjoern A. Zeeb struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
735*a9655020SBjoern A. Zeeb int i, ret;
736*a9655020SBjoern A. Zeeb char *upd_irq_name;
737*a9655020SBjoern A. Zeeb
738*a9655020SBjoern A. Zeeb for (i = 0; i < ATH12K_USERPD_MAX_IRQ; i++) {
739*a9655020SBjoern A. Zeeb ab_ahb->userpd_irq_num[i] = platform_get_irq_byname(ab->pdev,
740*a9655020SBjoern A. Zeeb ath12k_userpd_irq[i]);
741*a9655020SBjoern A. Zeeb if (ab_ahb->userpd_irq_num[i] < 0)
742*a9655020SBjoern A. Zeeb return ab_ahb->userpd_irq_num[i];
743*a9655020SBjoern A. Zeeb
744*a9655020SBjoern A. Zeeb upd_irq_name = devm_kzalloc(&ab->pdev->dev, ATH12K_UPD_IRQ_WRD_LEN,
745*a9655020SBjoern A. Zeeb GFP_KERNEL);
746*a9655020SBjoern A. Zeeb if (!upd_irq_name)
747*a9655020SBjoern A. Zeeb return -ENOMEM;
748*a9655020SBjoern A. Zeeb
749*a9655020SBjoern A. Zeeb scnprintf(upd_irq_name, ATH12K_UPD_IRQ_WRD_LEN, "UserPD%u-%s",
750*a9655020SBjoern A. Zeeb ab_ahb->userpd_id, ath12k_userpd_irq[i]);
751*a9655020SBjoern A. Zeeb ret = devm_request_threaded_irq(&ab->pdev->dev, ab_ahb->userpd_irq_num[i],
752*a9655020SBjoern A. Zeeb NULL, ath12k_userpd_irq_handler,
753*a9655020SBjoern A. Zeeb IRQF_TRIGGER_RISING | IRQF_ONESHOT,
754*a9655020SBjoern A. Zeeb upd_irq_name, ab);
755*a9655020SBjoern A. Zeeb if (ret)
756*a9655020SBjoern A. Zeeb return dev_err_probe(&ab->pdev->dev, ret,
757*a9655020SBjoern A. Zeeb "Request %s irq failed: %d\n",
758*a9655020SBjoern A. Zeeb ath12k_userpd_irq[i], ret);
759*a9655020SBjoern A. Zeeb }
760*a9655020SBjoern A. Zeeb
761*a9655020SBjoern A. Zeeb ab_ahb->spawn_state = devm_qcom_smem_state_get(&ab->pdev->dev, "spawn",
762*a9655020SBjoern A. Zeeb &ab_ahb->spawn_bit);
763*a9655020SBjoern A. Zeeb if (IS_ERR(ab_ahb->spawn_state))
764*a9655020SBjoern A. Zeeb return dev_err_probe(&ab->pdev->dev, PTR_ERR(ab_ahb->spawn_state),
765*a9655020SBjoern A. Zeeb "Failed to acquire spawn state\n");
766*a9655020SBjoern A. Zeeb
767*a9655020SBjoern A. Zeeb ab_ahb->stop_state = devm_qcom_smem_state_get(&ab->pdev->dev, "stop",
768*a9655020SBjoern A. Zeeb &ab_ahb->stop_bit);
769*a9655020SBjoern A. Zeeb if (IS_ERR(ab_ahb->stop_state))
770*a9655020SBjoern A. Zeeb return dev_err_probe(&ab->pdev->dev, PTR_ERR(ab_ahb->stop_state),
771*a9655020SBjoern A. Zeeb "Failed to acquire stop state\n");
772*a9655020SBjoern A. Zeeb
773*a9655020SBjoern A. Zeeb init_completion(&ab_ahb->userpd_spawned);
774*a9655020SBjoern A. Zeeb init_completion(&ab_ahb->userpd_ready);
775*a9655020SBjoern A. Zeeb init_completion(&ab_ahb->userpd_stopped);
776*a9655020SBjoern A. Zeeb return 0;
777*a9655020SBjoern A. Zeeb }
778*a9655020SBjoern A. Zeeb
ath12k_ahb_root_pd_state_notifier(struct notifier_block * nb,const unsigned long event,void * data)779*a9655020SBjoern A. Zeeb static int ath12k_ahb_root_pd_state_notifier(struct notifier_block *nb,
780*a9655020SBjoern A. Zeeb const unsigned long event, void *data)
781*a9655020SBjoern A. Zeeb {
782*a9655020SBjoern A. Zeeb struct ath12k_ahb *ab_ahb = container_of(nb, struct ath12k_ahb, root_pd_nb);
783*a9655020SBjoern A. Zeeb struct ath12k_base *ab = ab_ahb->ab;
784*a9655020SBjoern A. Zeeb
785*a9655020SBjoern A. Zeeb if (event == ATH12K_RPROC_AFTER_POWERUP) {
786*a9655020SBjoern A. Zeeb ath12k_dbg(ab, ATH12K_DBG_AHB, "Root PD is UP\n");
787*a9655020SBjoern A. Zeeb complete(&ab_ahb->rootpd_ready);
788*a9655020SBjoern A. Zeeb }
789*a9655020SBjoern A. Zeeb
790*a9655020SBjoern A. Zeeb return 0;
791*a9655020SBjoern A. Zeeb }
792*a9655020SBjoern A. Zeeb
ath12k_ahb_register_rproc_notifier(struct ath12k_base * ab)793*a9655020SBjoern A. Zeeb static int ath12k_ahb_register_rproc_notifier(struct ath12k_base *ab)
794*a9655020SBjoern A. Zeeb {
795*a9655020SBjoern A. Zeeb struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
796*a9655020SBjoern A. Zeeb
797*a9655020SBjoern A. Zeeb ab_ahb->root_pd_nb.notifier_call = ath12k_ahb_root_pd_state_notifier;
798*a9655020SBjoern A. Zeeb init_completion(&ab_ahb->rootpd_ready);
799*a9655020SBjoern A. Zeeb
800*a9655020SBjoern A. Zeeb ab_ahb->root_pd_notifier = qcom_register_ssr_notifier(ab_ahb->tgt_rproc->name,
801*a9655020SBjoern A. Zeeb &ab_ahb->root_pd_nb);
802*a9655020SBjoern A. Zeeb if (IS_ERR(ab_ahb->root_pd_notifier))
803*a9655020SBjoern A. Zeeb return PTR_ERR(ab_ahb->root_pd_notifier);
804*a9655020SBjoern A. Zeeb
805*a9655020SBjoern A. Zeeb return 0;
806*a9655020SBjoern A. Zeeb }
807*a9655020SBjoern A. Zeeb
ath12k_ahb_unregister_rproc_notifier(struct ath12k_base * ab)808*a9655020SBjoern A. Zeeb static void ath12k_ahb_unregister_rproc_notifier(struct ath12k_base *ab)
809*a9655020SBjoern A. Zeeb {
810*a9655020SBjoern A. Zeeb struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
811*a9655020SBjoern A. Zeeb
812*a9655020SBjoern A. Zeeb if (!ab_ahb->root_pd_notifier) {
813*a9655020SBjoern A. Zeeb ath12k_err(ab, "Rproc notifier not registered\n");
814*a9655020SBjoern A. Zeeb return;
815*a9655020SBjoern A. Zeeb }
816*a9655020SBjoern A. Zeeb
817*a9655020SBjoern A. Zeeb qcom_unregister_ssr_notifier(ab_ahb->root_pd_notifier,
818*a9655020SBjoern A. Zeeb &ab_ahb->root_pd_nb);
819*a9655020SBjoern A. Zeeb ab_ahb->root_pd_notifier = NULL;
820*a9655020SBjoern A. Zeeb }
821*a9655020SBjoern A. Zeeb
ath12k_ahb_get_rproc(struct ath12k_base * ab)822*a9655020SBjoern A. Zeeb static int ath12k_ahb_get_rproc(struct ath12k_base *ab)
823*a9655020SBjoern A. Zeeb {
824*a9655020SBjoern A. Zeeb struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
825*a9655020SBjoern A. Zeeb struct device *dev = ab->dev;
826*a9655020SBjoern A. Zeeb struct device_node *np;
827*a9655020SBjoern A. Zeeb struct rproc *prproc;
828*a9655020SBjoern A. Zeeb
829*a9655020SBjoern A. Zeeb np = of_parse_phandle(dev->of_node, "qcom,rproc", 0);
830*a9655020SBjoern A. Zeeb if (!np) {
831*a9655020SBjoern A. Zeeb ath12k_err(ab, "failed to get q6_rproc handle\n");
832*a9655020SBjoern A. Zeeb return -ENOENT;
833*a9655020SBjoern A. Zeeb }
834*a9655020SBjoern A. Zeeb
835*a9655020SBjoern A. Zeeb prproc = rproc_get_by_phandle(np->phandle);
836*a9655020SBjoern A. Zeeb of_node_put(np);
837*a9655020SBjoern A. Zeeb if (!prproc)
838*a9655020SBjoern A. Zeeb return dev_err_probe(&ab->pdev->dev, -EPROBE_DEFER,
839*a9655020SBjoern A. Zeeb "failed to get rproc\n");
840*a9655020SBjoern A. Zeeb
841*a9655020SBjoern A. Zeeb ab_ahb->tgt_rproc = prproc;
842*a9655020SBjoern A. Zeeb
843*a9655020SBjoern A. Zeeb return 0;
844*a9655020SBjoern A. Zeeb }
845*a9655020SBjoern A. Zeeb
ath12k_ahb_boot_root_pd(struct ath12k_base * ab)846*a9655020SBjoern A. Zeeb static int ath12k_ahb_boot_root_pd(struct ath12k_base *ab)
847*a9655020SBjoern A. Zeeb {
848*a9655020SBjoern A. Zeeb struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
849*a9655020SBjoern A. Zeeb unsigned long time_left;
850*a9655020SBjoern A. Zeeb int ret;
851*a9655020SBjoern A. Zeeb
852*a9655020SBjoern A. Zeeb ret = rproc_boot(ab_ahb->tgt_rproc);
853*a9655020SBjoern A. Zeeb if (ret < 0) {
854*a9655020SBjoern A. Zeeb ath12k_err(ab, "RootPD boot failed\n");
855*a9655020SBjoern A. Zeeb return ret;
856*a9655020SBjoern A. Zeeb }
857*a9655020SBjoern A. Zeeb
858*a9655020SBjoern A. Zeeb time_left = wait_for_completion_timeout(&ab_ahb->rootpd_ready,
859*a9655020SBjoern A. Zeeb ATH12K_ROOTPD_READY_TIMEOUT);
860*a9655020SBjoern A. Zeeb if (!time_left) {
861*a9655020SBjoern A. Zeeb ath12k_err(ab, "RootPD ready wait timed out\n");
862*a9655020SBjoern A. Zeeb return -ETIMEDOUT;
863*a9655020SBjoern A. Zeeb }
864*a9655020SBjoern A. Zeeb
865*a9655020SBjoern A. Zeeb return 0;
866*a9655020SBjoern A. Zeeb }
867*a9655020SBjoern A. Zeeb
ath12k_ahb_configure_rproc(struct ath12k_base * ab)868*a9655020SBjoern A. Zeeb static int ath12k_ahb_configure_rproc(struct ath12k_base *ab)
869*a9655020SBjoern A. Zeeb {
870*a9655020SBjoern A. Zeeb struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
871*a9655020SBjoern A. Zeeb int ret;
872*a9655020SBjoern A. Zeeb
873*a9655020SBjoern A. Zeeb ret = ath12k_ahb_get_rproc(ab);
874*a9655020SBjoern A. Zeeb if (ret < 0)
875*a9655020SBjoern A. Zeeb return ret;
876*a9655020SBjoern A. Zeeb
877*a9655020SBjoern A. Zeeb ret = ath12k_ahb_register_rproc_notifier(ab);
878*a9655020SBjoern A. Zeeb if (ret < 0) {
879*a9655020SBjoern A. Zeeb ret = dev_err_probe(&ab->pdev->dev, ret,
880*a9655020SBjoern A. Zeeb "failed to register rproc notifier\n");
881*a9655020SBjoern A. Zeeb goto err_put_rproc;
882*a9655020SBjoern A. Zeeb }
883*a9655020SBjoern A. Zeeb
884*a9655020SBjoern A. Zeeb if (ab_ahb->tgt_rproc->state != RPROC_RUNNING) {
885*a9655020SBjoern A. Zeeb ret = ath12k_ahb_boot_root_pd(ab);
886*a9655020SBjoern A. Zeeb if (ret < 0) {
887*a9655020SBjoern A. Zeeb ath12k_err(ab, "failed to boot the remote processor Q6\n");
888*a9655020SBjoern A. Zeeb goto err_unreg_notifier;
889*a9655020SBjoern A. Zeeb }
890*a9655020SBjoern A. Zeeb }
891*a9655020SBjoern A. Zeeb
892*a9655020SBjoern A. Zeeb return ath12k_ahb_config_rproc_irq(ab);
893*a9655020SBjoern A. Zeeb
894*a9655020SBjoern A. Zeeb err_unreg_notifier:
895*a9655020SBjoern A. Zeeb ath12k_ahb_unregister_rproc_notifier(ab);
896*a9655020SBjoern A. Zeeb
897*a9655020SBjoern A. Zeeb err_put_rproc:
898*a9655020SBjoern A. Zeeb rproc_put(ab_ahb->tgt_rproc);
899*a9655020SBjoern A. Zeeb return ret;
900*a9655020SBjoern A. Zeeb }
901*a9655020SBjoern A. Zeeb
ath12k_ahb_deconfigure_rproc(struct ath12k_base * ab)902*a9655020SBjoern A. Zeeb static void ath12k_ahb_deconfigure_rproc(struct ath12k_base *ab)
903*a9655020SBjoern A. Zeeb {
904*a9655020SBjoern A. Zeeb struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
905*a9655020SBjoern A. Zeeb
906*a9655020SBjoern A. Zeeb ath12k_ahb_unregister_rproc_notifier(ab);
907*a9655020SBjoern A. Zeeb rproc_put(ab_ahb->tgt_rproc);
908*a9655020SBjoern A. Zeeb }
909*a9655020SBjoern A. Zeeb
ath12k_ahb_resource_init(struct ath12k_base * ab)910*a9655020SBjoern A. Zeeb static int ath12k_ahb_resource_init(struct ath12k_base *ab)
911*a9655020SBjoern A. Zeeb {
912*a9655020SBjoern A. Zeeb struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
913*a9655020SBjoern A. Zeeb struct platform_device *pdev = ab->pdev;
914*a9655020SBjoern A. Zeeb struct resource *mem_res;
915*a9655020SBjoern A. Zeeb int ret;
916*a9655020SBjoern A. Zeeb
917*a9655020SBjoern A. Zeeb ab->mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
918*a9655020SBjoern A. Zeeb if (IS_ERR(ab->mem)) {
919*a9655020SBjoern A. Zeeb ret = dev_err_probe(&pdev->dev, PTR_ERR(ab->mem), "ioremap error\n");
920*a9655020SBjoern A. Zeeb goto out;
921*a9655020SBjoern A. Zeeb }
922*a9655020SBjoern A. Zeeb
923*a9655020SBjoern A. Zeeb ab->mem_len = resource_size(mem_res);
924*a9655020SBjoern A. Zeeb
925*a9655020SBjoern A. Zeeb if (ab->hw_params->ce_remap) {
926*a9655020SBjoern A. Zeeb const struct ce_remap *ce_remap = ab->hw_params->ce_remap;
927*a9655020SBjoern A. Zeeb /* CE register space is moved out of WCSS and the space is not
928*a9655020SBjoern A. Zeeb * contiguous, hence remapping the CE registers to a new space
929*a9655020SBjoern A. Zeeb * for accessing them.
930*a9655020SBjoern A. Zeeb */
931*a9655020SBjoern A. Zeeb ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
932*a9655020SBjoern A. Zeeb if (!ab->mem_ce) {
933*a9655020SBjoern A. Zeeb dev_err(&pdev->dev, "ce ioremap error\n");
934*a9655020SBjoern A. Zeeb ret = -ENOMEM;
935*a9655020SBjoern A. Zeeb goto err_mem_unmap;
936*a9655020SBjoern A. Zeeb }
937*a9655020SBjoern A. Zeeb ab->ce_remap = true;
938*a9655020SBjoern A. Zeeb ab->ce_remap_base_addr = HAL_IPQ5332_CE_WFSS_REG_BASE;
939*a9655020SBjoern A. Zeeb }
940*a9655020SBjoern A. Zeeb
941*a9655020SBjoern A. Zeeb ab_ahb->xo_clk = devm_clk_get(ab->dev, "xo");
942*a9655020SBjoern A. Zeeb if (IS_ERR(ab_ahb->xo_clk)) {
943*a9655020SBjoern A. Zeeb ret = dev_err_probe(&pdev->dev, PTR_ERR(ab_ahb->xo_clk),
944*a9655020SBjoern A. Zeeb "failed to get xo clock\n");
945*a9655020SBjoern A. Zeeb goto err_mem_ce_unmap;
946*a9655020SBjoern A. Zeeb }
947*a9655020SBjoern A. Zeeb
948*a9655020SBjoern A. Zeeb ret = clk_prepare_enable(ab_ahb->xo_clk);
949*a9655020SBjoern A. Zeeb if (ret) {
950*a9655020SBjoern A. Zeeb dev_err(&pdev->dev, "failed to enable gcc_xo_clk: %d\n", ret);
951*a9655020SBjoern A. Zeeb goto err_clock_deinit;
952*a9655020SBjoern A. Zeeb }
953*a9655020SBjoern A. Zeeb
954*a9655020SBjoern A. Zeeb return 0;
955*a9655020SBjoern A. Zeeb
956*a9655020SBjoern A. Zeeb err_clock_deinit:
957*a9655020SBjoern A. Zeeb devm_clk_put(ab->dev, ab_ahb->xo_clk);
958*a9655020SBjoern A. Zeeb
959*a9655020SBjoern A. Zeeb err_mem_ce_unmap:
960*a9655020SBjoern A. Zeeb ab_ahb->xo_clk = NULL;
961*a9655020SBjoern A. Zeeb if (ab->hw_params->ce_remap)
962*a9655020SBjoern A. Zeeb iounmap(ab->mem_ce);
963*a9655020SBjoern A. Zeeb
964*a9655020SBjoern A. Zeeb err_mem_unmap:
965*a9655020SBjoern A. Zeeb ab->mem_ce = NULL;
966*a9655020SBjoern A. Zeeb devm_iounmap(ab->dev, ab->mem);
967*a9655020SBjoern A. Zeeb
968*a9655020SBjoern A. Zeeb out:
969*a9655020SBjoern A. Zeeb ab->mem = NULL;
970*a9655020SBjoern A. Zeeb return ret;
971*a9655020SBjoern A. Zeeb }
972*a9655020SBjoern A. Zeeb
ath12k_ahb_resource_deinit(struct ath12k_base * ab)973*a9655020SBjoern A. Zeeb static void ath12k_ahb_resource_deinit(struct ath12k_base *ab)
974*a9655020SBjoern A. Zeeb {
975*a9655020SBjoern A. Zeeb struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
976*a9655020SBjoern A. Zeeb
977*a9655020SBjoern A. Zeeb if (ab->mem)
978*a9655020SBjoern A. Zeeb devm_iounmap(ab->dev, ab->mem);
979*a9655020SBjoern A. Zeeb
980*a9655020SBjoern A. Zeeb if (ab->mem_ce)
981*a9655020SBjoern A. Zeeb iounmap(ab->mem_ce);
982*a9655020SBjoern A. Zeeb
983*a9655020SBjoern A. Zeeb ab->mem = NULL;
984*a9655020SBjoern A. Zeeb ab->mem_ce = NULL;
985*a9655020SBjoern A. Zeeb
986*a9655020SBjoern A. Zeeb clk_disable_unprepare(ab_ahb->xo_clk);
987*a9655020SBjoern A. Zeeb devm_clk_put(ab->dev, ab_ahb->xo_clk);
988*a9655020SBjoern A. Zeeb ab_ahb->xo_clk = NULL;
989*a9655020SBjoern A. Zeeb }
990*a9655020SBjoern A. Zeeb
ath12k_ahb_probe(struct platform_device * pdev)991*a9655020SBjoern A. Zeeb static int ath12k_ahb_probe(struct platform_device *pdev)
992*a9655020SBjoern A. Zeeb {
993*a9655020SBjoern A. Zeeb struct ath12k_base *ab;
994*a9655020SBjoern A. Zeeb const struct ath12k_hif_ops *hif_ops;
995*a9655020SBjoern A. Zeeb struct ath12k_ahb *ab_ahb;
996*a9655020SBjoern A. Zeeb enum ath12k_hw_rev hw_rev;
997*a9655020SBjoern A. Zeeb u32 addr, userpd_id;
998*a9655020SBjoern A. Zeeb int ret;
999*a9655020SBjoern A. Zeeb
1000*a9655020SBjoern A. Zeeb ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1001*a9655020SBjoern A. Zeeb if (ret) {
1002*a9655020SBjoern A. Zeeb dev_err(&pdev->dev, "Failed to set 32-bit coherent dma\n");
1003*a9655020SBjoern A. Zeeb return ret;
1004*a9655020SBjoern A. Zeeb }
1005*a9655020SBjoern A. Zeeb
1006*a9655020SBjoern A. Zeeb ab = ath12k_core_alloc(&pdev->dev, sizeof(struct ath12k_ahb),
1007*a9655020SBjoern A. Zeeb ATH12K_BUS_AHB);
1008*a9655020SBjoern A. Zeeb if (!ab)
1009*a9655020SBjoern A. Zeeb return -ENOMEM;
1010*a9655020SBjoern A. Zeeb
1011*a9655020SBjoern A. Zeeb hw_rev = (enum ath12k_hw_rev)(kernel_ulong_t)of_device_get_match_data(&pdev->dev);
1012*a9655020SBjoern A. Zeeb switch (hw_rev) {
1013*a9655020SBjoern A. Zeeb case ATH12K_HW_IPQ5332_HW10:
1014*a9655020SBjoern A. Zeeb hif_ops = &ath12k_ahb_hif_ops_ipq5332;
1015*a9655020SBjoern A. Zeeb userpd_id = ATH12K_IPQ5332_USERPD_ID;
1016*a9655020SBjoern A. Zeeb break;
1017*a9655020SBjoern A. Zeeb default:
1018*a9655020SBjoern A. Zeeb ret = -EOPNOTSUPP;
1019*a9655020SBjoern A. Zeeb goto err_core_free;
1020*a9655020SBjoern A. Zeeb }
1021*a9655020SBjoern A. Zeeb
1022*a9655020SBjoern A. Zeeb ab->hif.ops = hif_ops;
1023*a9655020SBjoern A. Zeeb ab->pdev = pdev;
1024*a9655020SBjoern A. Zeeb ab->hw_rev = hw_rev;
1025*a9655020SBjoern A. Zeeb ab->target_mem_mode = ATH12K_QMI_MEMORY_MODE_DEFAULT;
1026*a9655020SBjoern A. Zeeb platform_set_drvdata(pdev, ab);
1027*a9655020SBjoern A. Zeeb ab_ahb = ath12k_ab_to_ahb(ab);
1028*a9655020SBjoern A. Zeeb ab_ahb->ab = ab;
1029*a9655020SBjoern A. Zeeb ab_ahb->userpd_id = userpd_id;
1030*a9655020SBjoern A. Zeeb
1031*a9655020SBjoern A. Zeeb /* Set fixed_mem_region to true for platforms that support fixed memory
1032*a9655020SBjoern A. Zeeb * reservation from DT. If memory is reserved from DT for FW, ath12k driver
1033*a9655020SBjoern A. Zeeb * need not to allocate memory.
1034*a9655020SBjoern A. Zeeb */
1035*a9655020SBjoern A. Zeeb if (!of_property_read_u32(ab->dev->of_node, "memory-region", &addr))
1036*a9655020SBjoern A. Zeeb set_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags);
1037*a9655020SBjoern A. Zeeb
1038*a9655020SBjoern A. Zeeb ret = ath12k_core_pre_init(ab);
1039*a9655020SBjoern A. Zeeb if (ret)
1040*a9655020SBjoern A. Zeeb goto err_core_free;
1041*a9655020SBjoern A. Zeeb
1042*a9655020SBjoern A. Zeeb ret = ath12k_ahb_resource_init(ab);
1043*a9655020SBjoern A. Zeeb if (ret)
1044*a9655020SBjoern A. Zeeb goto err_core_free;
1045*a9655020SBjoern A. Zeeb
1046*a9655020SBjoern A. Zeeb ret = ath12k_hal_srng_init(ab);
1047*a9655020SBjoern A. Zeeb if (ret)
1048*a9655020SBjoern A. Zeeb goto err_resource_deinit;
1049*a9655020SBjoern A. Zeeb
1050*a9655020SBjoern A. Zeeb ret = ath12k_ce_alloc_pipes(ab);
1051*a9655020SBjoern A. Zeeb if (ret) {
1052*a9655020SBjoern A. Zeeb ath12k_err(ab, "failed to allocate ce pipes: %d\n", ret);
1053*a9655020SBjoern A. Zeeb goto err_hal_srng_deinit;
1054*a9655020SBjoern A. Zeeb }
1055*a9655020SBjoern A. Zeeb
1056*a9655020SBjoern A. Zeeb ath12k_ahb_init_qmi_ce_config(ab);
1057*a9655020SBjoern A. Zeeb
1058*a9655020SBjoern A. Zeeb ret = ath12k_ahb_configure_rproc(ab);
1059*a9655020SBjoern A. Zeeb if (ret)
1060*a9655020SBjoern A. Zeeb goto err_ce_free;
1061*a9655020SBjoern A. Zeeb
1062*a9655020SBjoern A. Zeeb ret = ath12k_ahb_config_irq(ab);
1063*a9655020SBjoern A. Zeeb if (ret) {
1064*a9655020SBjoern A. Zeeb ath12k_err(ab, "failed to configure irq: %d\n", ret);
1065*a9655020SBjoern A. Zeeb goto err_rproc_deconfigure;
1066*a9655020SBjoern A. Zeeb }
1067*a9655020SBjoern A. Zeeb
1068*a9655020SBjoern A. Zeeb ret = ath12k_core_init(ab);
1069*a9655020SBjoern A. Zeeb if (ret) {
1070*a9655020SBjoern A. Zeeb ath12k_err(ab, "failed to init core: %d\n", ret);
1071*a9655020SBjoern A. Zeeb goto err_rproc_deconfigure;
1072*a9655020SBjoern A. Zeeb }
1073*a9655020SBjoern A. Zeeb
1074*a9655020SBjoern A. Zeeb return 0;
1075*a9655020SBjoern A. Zeeb
1076*a9655020SBjoern A. Zeeb err_rproc_deconfigure:
1077*a9655020SBjoern A. Zeeb ath12k_ahb_deconfigure_rproc(ab);
1078*a9655020SBjoern A. Zeeb
1079*a9655020SBjoern A. Zeeb err_ce_free:
1080*a9655020SBjoern A. Zeeb ath12k_ce_free_pipes(ab);
1081*a9655020SBjoern A. Zeeb
1082*a9655020SBjoern A. Zeeb err_hal_srng_deinit:
1083*a9655020SBjoern A. Zeeb ath12k_hal_srng_deinit(ab);
1084*a9655020SBjoern A. Zeeb
1085*a9655020SBjoern A. Zeeb err_resource_deinit:
1086*a9655020SBjoern A. Zeeb ath12k_ahb_resource_deinit(ab);
1087*a9655020SBjoern A. Zeeb
1088*a9655020SBjoern A. Zeeb err_core_free:
1089*a9655020SBjoern A. Zeeb ath12k_core_free(ab);
1090*a9655020SBjoern A. Zeeb platform_set_drvdata(pdev, NULL);
1091*a9655020SBjoern A. Zeeb
1092*a9655020SBjoern A. Zeeb return ret;
1093*a9655020SBjoern A. Zeeb }
1094*a9655020SBjoern A. Zeeb
ath12k_ahb_remove_prepare(struct ath12k_base * ab)1095*a9655020SBjoern A. Zeeb static void ath12k_ahb_remove_prepare(struct ath12k_base *ab)
1096*a9655020SBjoern A. Zeeb {
1097*a9655020SBjoern A. Zeeb unsigned long left;
1098*a9655020SBjoern A. Zeeb
1099*a9655020SBjoern A. Zeeb if (test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags)) {
1100*a9655020SBjoern A. Zeeb left = wait_for_completion_timeout(&ab->driver_recovery,
1101*a9655020SBjoern A. Zeeb ATH12K_AHB_RECOVERY_TIMEOUT);
1102*a9655020SBjoern A. Zeeb if (!left)
1103*a9655020SBjoern A. Zeeb ath12k_warn(ab, "failed to receive recovery response completion\n");
1104*a9655020SBjoern A. Zeeb }
1105*a9655020SBjoern A. Zeeb
1106*a9655020SBjoern A. Zeeb set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags);
1107*a9655020SBjoern A. Zeeb cancel_work_sync(&ab->restart_work);
1108*a9655020SBjoern A. Zeeb cancel_work_sync(&ab->qmi.event_work);
1109*a9655020SBjoern A. Zeeb }
1110*a9655020SBjoern A. Zeeb
ath12k_ahb_free_resources(struct ath12k_base * ab)1111*a9655020SBjoern A. Zeeb static void ath12k_ahb_free_resources(struct ath12k_base *ab)
1112*a9655020SBjoern A. Zeeb {
1113*a9655020SBjoern A. Zeeb struct platform_device *pdev = ab->pdev;
1114*a9655020SBjoern A. Zeeb
1115*a9655020SBjoern A. Zeeb ath12k_hal_srng_deinit(ab);
1116*a9655020SBjoern A. Zeeb ath12k_ce_free_pipes(ab);
1117*a9655020SBjoern A. Zeeb ath12k_ahb_resource_deinit(ab);
1118*a9655020SBjoern A. Zeeb ath12k_ahb_deconfigure_rproc(ab);
1119*a9655020SBjoern A. Zeeb ath12k_core_free(ab);
1120*a9655020SBjoern A. Zeeb platform_set_drvdata(pdev, NULL);
1121*a9655020SBjoern A. Zeeb }
1122*a9655020SBjoern A. Zeeb
ath12k_ahb_remove(struct platform_device * pdev)1123*a9655020SBjoern A. Zeeb static void ath12k_ahb_remove(struct platform_device *pdev)
1124*a9655020SBjoern A. Zeeb {
1125*a9655020SBjoern A. Zeeb struct ath12k_base *ab = platform_get_drvdata(pdev);
1126*a9655020SBjoern A. Zeeb
1127*a9655020SBjoern A. Zeeb if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1128*a9655020SBjoern A. Zeeb ath12k_ahb_power_down(ab, false);
1129*a9655020SBjoern A. Zeeb goto qmi_fail;
1130*a9655020SBjoern A. Zeeb }
1131*a9655020SBjoern A. Zeeb
1132*a9655020SBjoern A. Zeeb ath12k_ahb_remove_prepare(ab);
1133*a9655020SBjoern A. Zeeb ath12k_core_hw_group_cleanup(ab->ag);
1134*a9655020SBjoern A. Zeeb qmi_fail:
1135*a9655020SBjoern A. Zeeb ath12k_core_deinit(ab);
1136*a9655020SBjoern A. Zeeb ath12k_ahb_free_resources(ab);
1137*a9655020SBjoern A. Zeeb }
1138*a9655020SBjoern A. Zeeb
1139*a9655020SBjoern A. Zeeb static struct platform_driver ath12k_ahb_driver = {
1140*a9655020SBjoern A. Zeeb .driver = {
1141*a9655020SBjoern A. Zeeb .name = "ath12k_ahb",
1142*a9655020SBjoern A. Zeeb .of_match_table = ath12k_ahb_of_match,
1143*a9655020SBjoern A. Zeeb },
1144*a9655020SBjoern A. Zeeb .probe = ath12k_ahb_probe,
1145*a9655020SBjoern A. Zeeb .remove = ath12k_ahb_remove,
1146*a9655020SBjoern A. Zeeb };
1147*a9655020SBjoern A. Zeeb
ath12k_ahb_init(void)1148*a9655020SBjoern A. Zeeb int ath12k_ahb_init(void)
1149*a9655020SBjoern A. Zeeb {
1150*a9655020SBjoern A. Zeeb return platform_driver_register(&ath12k_ahb_driver);
1151*a9655020SBjoern A. Zeeb }
1152*a9655020SBjoern A. Zeeb
ath12k_ahb_exit(void)1153*a9655020SBjoern A. Zeeb void ath12k_ahb_exit(void)
1154*a9655020SBjoern A. Zeeb {
1155*a9655020SBjoern A. Zeeb platform_driver_unregister(&ath12k_ahb_driver);
1156*a9655020SBjoern A. Zeeb }
1157