xref: /linux/drivers/net/wireless/ath/ath12k/ahb.c (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
5  */
6 
7 #include <linux/dma-mapping.h>
8 #include <linux/firmware/qcom/qcom_scm.h>
9 #include <linux/of.h>
10 #include <linux/of_device.h>
11 #include <linux/platform_device.h>
12 #include <linux/remoteproc.h>
13 #include <linux/soc/qcom/mdt_loader.h>
14 #include <linux/soc/qcom/smem_state.h>
15 #include "ahb.h"
16 #include "debug.h"
17 #include "hif.h"
18 
19 #define ATH12K_IRQ_CE0_OFFSET 4
20 #define ATH12K_MAX_UPDS 1
21 #define ATH12K_UPD_IRQ_WRD_LEN  18
22 
23 static struct ath12k_ahb_driver *ath12k_ahb_family_drivers[ATH12K_DEVICE_FAMILY_MAX];
24 static const char ath12k_userpd_irq[][9] = {"spawn",
25 				     "ready",
26 				     "stop-ack"};
27 
28 static const char *irq_name[ATH12K_IRQ_NUM_MAX] = {
29 	"misc-pulse1",
30 	"misc-latch",
31 	"sw-exception",
32 	"watchdog",
33 	"ce0",
34 	"ce1",
35 	"ce2",
36 	"ce3",
37 	"ce4",
38 	"ce5",
39 	"ce6",
40 	"ce7",
41 	"ce8",
42 	"ce9",
43 	"ce10",
44 	"ce11",
45 	"host2wbm-desc-feed",
46 	"host2reo-re-injection",
47 	"host2reo-command",
48 	"host2rxdma-monitor-ring3",
49 	"host2rxdma-monitor-ring2",
50 	"host2rxdma-monitor-ring1",
51 	"reo2ost-exception",
52 	"wbm2host-rx-release",
53 	"reo2host-status",
54 	"reo2host-destination-ring4",
55 	"reo2host-destination-ring3",
56 	"reo2host-destination-ring2",
57 	"reo2host-destination-ring1",
58 	"rxdma2host-monitor-destination-mac3",
59 	"rxdma2host-monitor-destination-mac2",
60 	"rxdma2host-monitor-destination-mac1",
61 	"ppdu-end-interrupts-mac3",
62 	"ppdu-end-interrupts-mac2",
63 	"ppdu-end-interrupts-mac1",
64 	"rxdma2host-monitor-status-ring-mac3",
65 	"rxdma2host-monitor-status-ring-mac2",
66 	"rxdma2host-monitor-status-ring-mac1",
67 	"host2rxdma-host-buf-ring-mac3",
68 	"host2rxdma-host-buf-ring-mac2",
69 	"host2rxdma-host-buf-ring-mac1",
70 	"rxdma2host-destination-ring-mac3",
71 	"rxdma2host-destination-ring-mac2",
72 	"rxdma2host-destination-ring-mac1",
73 	"host2tcl-input-ring4",
74 	"host2tcl-input-ring3",
75 	"host2tcl-input-ring2",
76 	"host2tcl-input-ring1",
77 	"wbm2host-tx-completions-ring4",
78 	"wbm2host-tx-completions-ring3",
79 	"wbm2host-tx-completions-ring2",
80 	"wbm2host-tx-completions-ring1",
81 	"tcl2host-status-ring",
82 };
83 
84 enum ext_irq_num {
85 	host2wbm_desc_feed = 16,
86 	host2reo_re_injection,
87 	host2reo_command,
88 	host2rxdma_monitor_ring3,
89 	host2rxdma_monitor_ring2,
90 	host2rxdma_monitor_ring1,
91 	reo2host_exception,
92 	wbm2host_rx_release,
93 	reo2host_status,
94 	reo2host_destination_ring4,
95 	reo2host_destination_ring3,
96 	reo2host_destination_ring2,
97 	reo2host_destination_ring1,
98 	rxdma2host_monitor_destination_mac3,
99 	rxdma2host_monitor_destination_mac2,
100 	rxdma2host_monitor_destination_mac1,
101 	ppdu_end_interrupts_mac3,
102 	ppdu_end_interrupts_mac2,
103 	ppdu_end_interrupts_mac1,
104 	rxdma2host_monitor_status_ring_mac3,
105 	rxdma2host_monitor_status_ring_mac2,
106 	rxdma2host_monitor_status_ring_mac1,
107 	host2rxdma_host_buf_ring_mac3,
108 	host2rxdma_host_buf_ring_mac2,
109 	host2rxdma_host_buf_ring_mac1,
110 	rxdma2host_destination_ring_mac3,
111 	rxdma2host_destination_ring_mac2,
112 	rxdma2host_destination_ring_mac1,
113 	host2tcl_input_ring4,
114 	host2tcl_input_ring3,
115 	host2tcl_input_ring2,
116 	host2tcl_input_ring1,
117 	wbm2host_tx_completions_ring4,
118 	wbm2host_tx_completions_ring3,
119 	wbm2host_tx_completions_ring2,
120 	wbm2host_tx_completions_ring1,
121 	tcl2host_status_ring,
122 };
123 
124 static u32 ath12k_ahb_read32(struct ath12k_base *ab, u32 offset)
125 {
126 	if (ab->ce_remap && offset < ab->cmem_offset)
127 		return ioread32(ab->mem_ce + offset);
128 	return ioread32(ab->mem + offset);
129 }
130 
131 static void ath12k_ahb_write32(struct ath12k_base *ab, u32 offset,
132 			       u32 value)
133 {
134 	if (ab->ce_remap && offset < ab->cmem_offset)
135 		iowrite32(value, ab->mem_ce + offset);
136 	else
137 		iowrite32(value, ab->mem + offset);
138 }
139 
140 static void ath12k_ahb_cancel_workqueue(struct ath12k_base *ab)
141 {
142 	int i;
143 
144 	for (i = 0; i < ab->hw_params->ce_count; i++) {
145 		struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
146 
147 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
148 			continue;
149 
150 		cancel_work_sync(&ce_pipe->intr_wq);
151 	}
152 }
153 
154 static void ath12k_ahb_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp)
155 {
156 	int i;
157 
158 	for (i = 0; i < irq_grp->num_irq; i++)
159 		disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
160 }
161 
162 static void __ath12k_ahb_ext_irq_disable(struct ath12k_base *ab)
163 {
164 	int i;
165 
166 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
167 		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
168 
169 		ath12k_ahb_ext_grp_disable(irq_grp);
170 		if (irq_grp->napi_enabled) {
171 			napi_synchronize(&irq_grp->napi);
172 			napi_disable(&irq_grp->napi);
173 			irq_grp->napi_enabled = false;
174 		}
175 	}
176 }
177 
178 static void ath12k_ahb_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp)
179 {
180 	int i;
181 
182 	for (i = 0; i < irq_grp->num_irq; i++)
183 		enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
184 }
185 
186 static void ath12k_ahb_setbit32(struct ath12k_base *ab, u8 bit, u32 offset)
187 {
188 	u32 val;
189 
190 	val = ath12k_ahb_read32(ab, offset);
191 	ath12k_ahb_write32(ab, offset, val | BIT(bit));
192 }
193 
194 static void ath12k_ahb_clearbit32(struct ath12k_base *ab, u8 bit, u32 offset)
195 {
196 	u32 val;
197 
198 	val = ath12k_ahb_read32(ab, offset);
199 	ath12k_ahb_write32(ab, offset, val & ~BIT(bit));
200 }
201 
202 static void ath12k_ahb_ce_irq_enable(struct ath12k_base *ab, u16 ce_id)
203 {
204 	const struct ce_attr *ce_attr;
205 	const struct ce_ie_addr *ce_ie_addr = ab->hw_params->ce_ie_addr;
206 	u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
207 
208 	ie1_reg_addr = ce_ie_addr->ie1_reg_addr;
209 	ie2_reg_addr = ce_ie_addr->ie2_reg_addr;
210 	ie3_reg_addr = ce_ie_addr->ie3_reg_addr;
211 
212 	ce_attr = &ab->hw_params->host_ce_config[ce_id];
213 	if (ce_attr->src_nentries)
214 		ath12k_ahb_setbit32(ab, ce_id, ie1_reg_addr);
215 
216 	if (ce_attr->dest_nentries) {
217 		ath12k_ahb_setbit32(ab, ce_id, ie2_reg_addr);
218 		ath12k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
219 				    ie3_reg_addr);
220 	}
221 }
222 
223 static void ath12k_ahb_ce_irq_disable(struct ath12k_base *ab, u16 ce_id)
224 {
225 	const struct ce_attr *ce_attr;
226 	const struct ce_ie_addr *ce_ie_addr = ab->hw_params->ce_ie_addr;
227 	u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
228 
229 	ie1_reg_addr = ce_ie_addr->ie1_reg_addr;
230 	ie2_reg_addr = ce_ie_addr->ie2_reg_addr;
231 	ie3_reg_addr = ce_ie_addr->ie3_reg_addr;
232 
233 	ce_attr = &ab->hw_params->host_ce_config[ce_id];
234 	if (ce_attr->src_nentries)
235 		ath12k_ahb_clearbit32(ab, ce_id, ie1_reg_addr);
236 
237 	if (ce_attr->dest_nentries) {
238 		ath12k_ahb_clearbit32(ab, ce_id, ie2_reg_addr);
239 		ath12k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
240 				      ie3_reg_addr);
241 	}
242 }
243 
244 static void ath12k_ahb_sync_ce_irqs(struct ath12k_base *ab)
245 {
246 	int i;
247 	int irq_idx;
248 
249 	for (i = 0; i < ab->hw_params->ce_count; i++) {
250 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
251 			continue;
252 
253 		irq_idx = ATH12K_IRQ_CE0_OFFSET + i;
254 		synchronize_irq(ab->irq_num[irq_idx]);
255 	}
256 }
257 
258 static void ath12k_ahb_sync_ext_irqs(struct ath12k_base *ab)
259 {
260 	int i, j;
261 	int irq_idx;
262 
263 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
264 		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
265 
266 		for (j = 0; j < irq_grp->num_irq; j++) {
267 			irq_idx = irq_grp->irqs[j];
268 			synchronize_irq(ab->irq_num[irq_idx]);
269 		}
270 	}
271 }
272 
273 static void ath12k_ahb_ce_irqs_enable(struct ath12k_base *ab)
274 {
275 	int i;
276 
277 	for (i = 0; i < ab->hw_params->ce_count; i++) {
278 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
279 			continue;
280 		ath12k_ahb_ce_irq_enable(ab, i);
281 	}
282 }
283 
284 static void ath12k_ahb_ce_irqs_disable(struct ath12k_base *ab)
285 {
286 	int i;
287 
288 	for (i = 0; i < ab->hw_params->ce_count; i++) {
289 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
290 			continue;
291 		ath12k_ahb_ce_irq_disable(ab, i);
292 	}
293 }
294 
295 static int ath12k_ahb_start(struct ath12k_base *ab)
296 {
297 	ath12k_ahb_ce_irqs_enable(ab);
298 	ath12k_ce_rx_post_buf(ab);
299 
300 	return 0;
301 }
302 
303 static void ath12k_ahb_ext_irq_enable(struct ath12k_base *ab)
304 {
305 	struct ath12k_ext_irq_grp *irq_grp;
306 	int i;
307 
308 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
309 		irq_grp = &ab->ext_irq_grp[i];
310 		if (!irq_grp->napi_enabled) {
311 			napi_enable(&irq_grp->napi);
312 			irq_grp->napi_enabled = true;
313 		}
314 		ath12k_ahb_ext_grp_enable(irq_grp);
315 	}
316 }
317 
318 static void ath12k_ahb_ext_irq_disable(struct ath12k_base *ab)
319 {
320 	__ath12k_ahb_ext_irq_disable(ab);
321 	ath12k_ahb_sync_ext_irqs(ab);
322 }
323 
324 static void ath12k_ahb_stop(struct ath12k_base *ab)
325 {
326 	if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
327 		ath12k_ahb_ce_irqs_disable(ab);
328 	ath12k_ahb_sync_ce_irqs(ab);
329 	ath12k_ahb_cancel_workqueue(ab);
330 	timer_delete_sync(&ab->rx_replenish_retry);
331 	ath12k_ce_cleanup_pipes(ab);
332 }
333 
334 static int ath12k_ahb_power_up(struct ath12k_base *ab)
335 {
336 	struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
337 	char fw_name[ATH12K_USERPD_FW_NAME_LEN];
338 	char fw2_name[ATH12K_USERPD_FW_NAME_LEN];
339 	struct device *dev = ab->dev;
340 	const struct firmware *fw, *fw2;
341 	struct reserved_mem *rmem = NULL;
342 	unsigned long time_left;
343 	phys_addr_t mem_phys;
344 	void *mem_region;
345 	size_t mem_size;
346 	u32 pasid;
347 	int ret;
348 
349 	rmem = ath12k_core_get_reserved_mem(ab, 0);
350 	if (!rmem)
351 		return -ENODEV;
352 
353 	mem_phys = rmem->base;
354 	mem_size = rmem->size;
355 	mem_region = devm_memremap(dev, mem_phys, mem_size, MEMREMAP_WC);
356 	if (IS_ERR(mem_region)) {
357 		ath12k_err(ab, "unable to map memory region: %pa+%pa\n",
358 			   &rmem->base, &rmem->size);
359 		return PTR_ERR(mem_region);
360 	}
361 
362 	snprintf(fw_name, sizeof(fw_name), "%s/%s/%s%d%s", ATH12K_FW_DIR,
363 		 ab->hw_params->fw.dir, ATH12K_AHB_FW_PREFIX, ab_ahb->userpd_id,
364 		 ATH12K_AHB_FW_SUFFIX);
365 
366 	ret = request_firmware(&fw, fw_name, dev);
367 	if (ret < 0) {
368 		ath12k_err(ab, "request_firmware failed\n");
369 		return ret;
370 	}
371 
372 	ath12k_dbg(ab, ATH12K_DBG_AHB, "Booting fw image %s, size %zd\n", fw_name,
373 		   fw->size);
374 
375 	if (!fw->size) {
376 		ath12k_err(ab, "Invalid firmware size\n");
377 		ret = -EINVAL;
378 		goto err_fw;
379 	}
380 
381 	pasid = (u32_encode_bits(ab_ahb->userpd_id, ATH12K_USERPD_ID_MASK)) |
382 		ATH12K_AHB_UPD_SWID;
383 
384 	/* Load FW image to a reserved memory location */
385 	ret = qcom_mdt_load(dev, fw, fw_name, pasid, mem_region, mem_phys, mem_size,
386 			    &mem_phys);
387 	if (ret) {
388 		ath12k_err(ab, "Failed to load MDT segments: %d\n", ret);
389 		goto err_fw;
390 	}
391 
392 	snprintf(fw2_name, sizeof(fw2_name), "%s/%s/%s", ATH12K_FW_DIR,
393 		 ab->hw_params->fw.dir, ATH12K_AHB_FW2);
394 
395 	ret = request_firmware(&fw2, fw2_name, dev);
396 	if (ret < 0) {
397 		ath12k_err(ab, "request_firmware failed\n");
398 		goto err_fw;
399 	}
400 
401 	ath12k_dbg(ab, ATH12K_DBG_AHB, "Booting fw image %s, size %zd\n", fw2_name,
402 		   fw2->size);
403 
404 	if (!fw2->size) {
405 		ath12k_err(ab, "Invalid firmware size\n");
406 		ret = -EINVAL;
407 		goto err_fw2;
408 	}
409 
410 	ret = qcom_mdt_load_no_init(dev, fw2, fw2_name, mem_region, mem_phys,
411 				    mem_size, &mem_phys);
412 	if (ret) {
413 		ath12k_err(ab, "Failed to load MDT segments: %d\n", ret);
414 		goto err_fw2;
415 	}
416 
417 	/* Authenticate FW image using peripheral ID */
418 	ret = qcom_scm_pas_auth_and_reset(pasid);
419 	if (ret) {
420 		ath12k_err(ab, "failed to boot the remote processor %d\n", ret);
421 		goto err_fw2;
422 	}
423 
424 	/* Instruct Q6 to spawn userPD thread */
425 	ret = qcom_smem_state_update_bits(ab_ahb->spawn_state, BIT(ab_ahb->spawn_bit),
426 					  BIT(ab_ahb->spawn_bit));
427 	if (ret) {
428 		ath12k_err(ab, "Failed to update spawn state %d\n", ret);
429 		goto err_fw2;
430 	}
431 
432 	time_left = wait_for_completion_timeout(&ab_ahb->userpd_spawned,
433 						ATH12K_USERPD_SPAWN_TIMEOUT);
434 	if (!time_left) {
435 		ath12k_err(ab, "UserPD spawn wait timed out\n");
436 		ret = -ETIMEDOUT;
437 		goto err_fw2;
438 	}
439 
440 	time_left = wait_for_completion_timeout(&ab_ahb->userpd_ready,
441 						ATH12K_USERPD_READY_TIMEOUT);
442 	if (!time_left) {
443 		ath12k_err(ab, "UserPD ready wait timed out\n");
444 		ret = -ETIMEDOUT;
445 		goto err_fw2;
446 	}
447 
448 	qcom_smem_state_update_bits(ab_ahb->spawn_state, BIT(ab_ahb->spawn_bit), 0);
449 
450 	ath12k_dbg(ab, ATH12K_DBG_AHB, "UserPD%d is now UP\n", ab_ahb->userpd_id);
451 
452 err_fw2:
453 	release_firmware(fw2);
454 err_fw:
455 	release_firmware(fw);
456 	return ret;
457 }
458 
459 static void ath12k_ahb_power_down(struct ath12k_base *ab, bool is_suspend)
460 {
461 	struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
462 	unsigned long time_left;
463 	u32 pasid;
464 	int ret;
465 
466 	qcom_smem_state_update_bits(ab_ahb->stop_state, BIT(ab_ahb->stop_bit),
467 				    BIT(ab_ahb->stop_bit));
468 
469 	time_left = wait_for_completion_timeout(&ab_ahb->userpd_stopped,
470 						ATH12K_USERPD_STOP_TIMEOUT);
471 	if (!time_left) {
472 		ath12k_err(ab, "UserPD stop wait timed out\n");
473 		return;
474 	}
475 
476 	qcom_smem_state_update_bits(ab_ahb->stop_state, BIT(ab_ahb->stop_bit), 0);
477 
478 	pasid = (u32_encode_bits(ab_ahb->userpd_id, ATH12K_USERPD_ID_MASK)) |
479 		ATH12K_AHB_UPD_SWID;
480 	/* Release the firmware */
481 	ret = qcom_scm_pas_shutdown(pasid);
482 	if (ret)
483 		ath12k_err(ab, "scm pas shutdown failed for userPD%d: %d\n",
484 			   ab_ahb->userpd_id, ret);
485 }
486 
487 static void ath12k_ahb_init_qmi_ce_config(struct ath12k_base *ab)
488 {
489 	struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
490 
491 	cfg->tgt_ce_len = ab->hw_params->target_ce_count;
492 	cfg->tgt_ce = ab->hw_params->target_ce_config;
493 	cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len;
494 	cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map;
495 	ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id;
496 }
497 
498 static void ath12k_ahb_ce_workqueue(struct work_struct *work)
499 {
500 	struct ath12k_ce_pipe *ce_pipe = from_work(ce_pipe, work, intr_wq);
501 
502 	ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
503 
504 	ath12k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
505 }
506 
507 static irqreturn_t ath12k_ahb_ce_interrupt_handler(int irq, void *arg)
508 {
509 	struct ath12k_ce_pipe *ce_pipe = arg;
510 
511 	/* last interrupt received for this CE */
512 	ce_pipe->timestamp = jiffies;
513 
514 	ath12k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
515 
516 	queue_work(system_bh_wq, &ce_pipe->intr_wq);
517 
518 	return IRQ_HANDLED;
519 }
520 
521 static int ath12k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
522 {
523 	struct ath12k_ext_irq_grp *irq_grp = container_of(napi,
524 						struct ath12k_ext_irq_grp,
525 						napi);
526 	struct ath12k_base *ab = irq_grp->ab;
527 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
528 	int work_done;
529 
530 	work_done = ath12k_dp_service_srng(dp, irq_grp, budget);
531 	if (work_done < budget) {
532 		napi_complete_done(napi, work_done);
533 		ath12k_ahb_ext_grp_enable(irq_grp);
534 	}
535 
536 	if (work_done > budget)
537 		work_done = budget;
538 
539 	return work_done;
540 }
541 
542 static irqreturn_t ath12k_ahb_ext_interrupt_handler(int irq, void *arg)
543 {
544 	struct ath12k_ext_irq_grp *irq_grp = arg;
545 
546 	/* last interrupt received for this group */
547 	irq_grp->timestamp = jiffies;
548 
549 	ath12k_ahb_ext_grp_disable(irq_grp);
550 
551 	napi_schedule(&irq_grp->napi);
552 
553 	return IRQ_HANDLED;
554 }
555 
556 static int ath12k_ahb_config_ext_irq(struct ath12k_base *ab)
557 {
558 	const struct ath12k_hw_ring_mask *ring_mask;
559 	struct ath12k_ext_irq_grp *irq_grp;
560 	int i, j, irq, irq_idx, ret;
561 	u32 num_irq;
562 
563 	ring_mask = ab->hw_params->ring_mask;
564 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
565 		irq_grp = &ab->ext_irq_grp[i];
566 		num_irq = 0;
567 
568 		irq_grp->ab = ab;
569 		irq_grp->grp_id = i;
570 
571 		irq_grp->napi_ndev = alloc_netdev_dummy(0);
572 		if (!irq_grp->napi_ndev)
573 			return -ENOMEM;
574 
575 		netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
576 			       ath12k_ahb_ext_grp_napi_poll);
577 
578 		for (j = 0; j < ATH12K_EXT_IRQ_NUM_MAX; j++) {
579 			/* For TX ring, ensure that the ring mask and the
580 			 * tcl_to_wbm_rbm_map point to the same ring number.
581 			 */
582 			if (ring_mask->tx[i] &
583 			    BIT(ab->hal.tcl_to_wbm_rbm_map[j].wbm_ring_num)) {
584 				irq_grp->irqs[num_irq++] =
585 					wbm2host_tx_completions_ring1 - j;
586 			}
587 
588 			if (ring_mask->rx[i] & BIT(j)) {
589 				irq_grp->irqs[num_irq++] =
590 					reo2host_destination_ring1 - j;
591 			}
592 
593 			if (ring_mask->rx_err[i] & BIT(j))
594 				irq_grp->irqs[num_irq++] = reo2host_exception;
595 
596 			if (ring_mask->rx_wbm_rel[i] & BIT(j))
597 				irq_grp->irqs[num_irq++] = wbm2host_rx_release;
598 
599 			if (ring_mask->reo_status[i] & BIT(j))
600 				irq_grp->irqs[num_irq++] = reo2host_status;
601 
602 			if (ring_mask->rx_mon_dest[i] & BIT(j))
603 				irq_grp->irqs[num_irq++] =
604 					rxdma2host_monitor_destination_mac1;
605 		}
606 
607 		irq_grp->num_irq = num_irq;
608 
609 		for (j = 0; j < irq_grp->num_irq; j++) {
610 			irq_idx = irq_grp->irqs[j];
611 
612 			irq = platform_get_irq_byname(ab->pdev,
613 						      irq_name[irq_idx]);
614 			ab->irq_num[irq_idx] = irq;
615 			irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY);
616 			ret = devm_request_irq(ab->dev, irq,
617 					       ath12k_ahb_ext_interrupt_handler,
618 					       IRQF_TRIGGER_RISING,
619 					       irq_name[irq_idx], irq_grp);
620 			if (ret)
621 				ath12k_warn(ab, "failed request_irq for %d\n", irq);
622 		}
623 	}
624 
625 	return 0;
626 }
627 
628 static int ath12k_ahb_config_irq(struct ath12k_base *ab)
629 {
630 	int irq, irq_idx, i;
631 	int ret;
632 
633 	/* Configure CE irqs */
634 	for (i = 0; i < ab->hw_params->ce_count; i++) {
635 		struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
636 
637 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
638 			continue;
639 
640 		irq_idx = ATH12K_IRQ_CE0_OFFSET + i;
641 
642 		INIT_WORK(&ce_pipe->intr_wq, ath12k_ahb_ce_workqueue);
643 		irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
644 		ret = devm_request_irq(ab->dev, irq, ath12k_ahb_ce_interrupt_handler,
645 				       IRQF_TRIGGER_RISING, irq_name[irq_idx],
646 				       ce_pipe);
647 		if (ret)
648 			return ret;
649 
650 		ab->irq_num[irq_idx] = irq;
651 	}
652 
653 	/* Configure external interrupts */
654 	ret = ath12k_ahb_config_ext_irq(ab);
655 
656 	return ret;
657 }
658 
659 static int ath12k_ahb_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
660 					  u8 *ul_pipe, u8 *dl_pipe)
661 {
662 	const struct service_to_pipe *entry;
663 	bool ul_set = false, dl_set = false;
664 	u32 pipedir;
665 	int i;
666 
667 	for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) {
668 		entry = &ab->hw_params->svc_to_ce_map[i];
669 
670 		if (__le32_to_cpu(entry->service_id) != service_id)
671 			continue;
672 
673 		pipedir = __le32_to_cpu(entry->pipedir);
674 		if (pipedir == PIPEDIR_IN || pipedir == PIPEDIR_INOUT) {
675 			WARN_ON(dl_set);
676 			*dl_pipe = __le32_to_cpu(entry->pipenum);
677 			dl_set = true;
678 		}
679 
680 		if (pipedir == PIPEDIR_OUT || pipedir == PIPEDIR_INOUT) {
681 			WARN_ON(ul_set);
682 			*ul_pipe = __le32_to_cpu(entry->pipenum);
683 			ul_set = true;
684 		}
685 	}
686 
687 	if (WARN_ON(!ul_set || !dl_set))
688 		return -ENOENT;
689 
690 	return 0;
691 }
692 
693 static const struct ath12k_hif_ops ath12k_ahb_hif_ops = {
694 	.start = ath12k_ahb_start,
695 	.stop = ath12k_ahb_stop,
696 	.read32 = ath12k_ahb_read32,
697 	.write32 = ath12k_ahb_write32,
698 	.irq_enable = ath12k_ahb_ext_irq_enable,
699 	.irq_disable = ath12k_ahb_ext_irq_disable,
700 	.map_service_to_pipe = ath12k_ahb_map_service_to_pipe,
701 	.power_up = ath12k_ahb_power_up,
702 	.power_down = ath12k_ahb_power_down,
703 };
704 
705 static irqreturn_t ath12k_userpd_irq_handler(int irq, void *data)
706 {
707 	struct ath12k_base *ab = data;
708 	struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
709 
710 	if (irq == ab_ahb->userpd_irq_num[ATH12K_USERPD_SPAWN_IRQ]) {
711 		complete(&ab_ahb->userpd_spawned);
712 	} else if (irq == ab_ahb->userpd_irq_num[ATH12K_USERPD_READY_IRQ]) {
713 		complete(&ab_ahb->userpd_ready);
714 	} else if (irq == ab_ahb->userpd_irq_num[ATH12K_USERPD_STOP_ACK_IRQ])	{
715 		complete(&ab_ahb->userpd_stopped);
716 	} else {
717 		ath12k_err(ab, "Invalid userpd interrupt\n");
718 		return IRQ_NONE;
719 	}
720 
721 	return IRQ_HANDLED;
722 }
723 
724 static int ath12k_ahb_config_rproc_irq(struct ath12k_base *ab)
725 {
726 	struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
727 	int i, ret;
728 	char *upd_irq_name;
729 
730 	for (i = 0; i < ATH12K_USERPD_MAX_IRQ; i++) {
731 		ab_ahb->userpd_irq_num[i] = platform_get_irq_byname(ab->pdev,
732 								    ath12k_userpd_irq[i]);
733 		if (ab_ahb->userpd_irq_num[i] < 0)
734 			return ab_ahb->userpd_irq_num[i];
735 
736 		upd_irq_name = devm_kzalloc(&ab->pdev->dev, ATH12K_UPD_IRQ_WRD_LEN,
737 					    GFP_KERNEL);
738 		if (!upd_irq_name)
739 			return -ENOMEM;
740 
741 		scnprintf(upd_irq_name, ATH12K_UPD_IRQ_WRD_LEN, "UserPD%u-%s",
742 			  ab_ahb->userpd_id, ath12k_userpd_irq[i]);
743 		ret = devm_request_threaded_irq(&ab->pdev->dev, ab_ahb->userpd_irq_num[i],
744 						NULL, ath12k_userpd_irq_handler,
745 						IRQF_TRIGGER_RISING | IRQF_ONESHOT,
746 						upd_irq_name, ab);
747 		if (ret)
748 			return dev_err_probe(&ab->pdev->dev, ret,
749 					     "Request %s irq failed: %d\n",
750 					     ath12k_userpd_irq[i], ret);
751 	}
752 
753 	ab_ahb->spawn_state = devm_qcom_smem_state_get(&ab->pdev->dev, "spawn",
754 						       &ab_ahb->spawn_bit);
755 	if (IS_ERR(ab_ahb->spawn_state))
756 		return dev_err_probe(&ab->pdev->dev, PTR_ERR(ab_ahb->spawn_state),
757 				     "Failed to acquire spawn state\n");
758 
759 	ab_ahb->stop_state = devm_qcom_smem_state_get(&ab->pdev->dev, "stop",
760 						      &ab_ahb->stop_bit);
761 	if (IS_ERR(ab_ahb->stop_state))
762 		return dev_err_probe(&ab->pdev->dev, PTR_ERR(ab_ahb->stop_state),
763 				     "Failed to acquire stop state\n");
764 
765 	init_completion(&ab_ahb->userpd_spawned);
766 	init_completion(&ab_ahb->userpd_ready);
767 	init_completion(&ab_ahb->userpd_stopped);
768 	return 0;
769 }
770 
771 static int ath12k_ahb_root_pd_state_notifier(struct notifier_block *nb,
772 					     const unsigned long event, void *data)
773 {
774 	struct ath12k_ahb *ab_ahb = container_of(nb, struct ath12k_ahb, root_pd_nb);
775 	struct ath12k_base *ab = ab_ahb->ab;
776 
777 	if (event == ATH12K_RPROC_AFTER_POWERUP) {
778 		ath12k_dbg(ab, ATH12K_DBG_AHB, "Root PD is UP\n");
779 		complete(&ab_ahb->rootpd_ready);
780 	}
781 
782 	return 0;
783 }
784 
785 static int ath12k_ahb_register_rproc_notifier(struct ath12k_base *ab)
786 {
787 	struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
788 
789 	ab_ahb->root_pd_nb.notifier_call = ath12k_ahb_root_pd_state_notifier;
790 	init_completion(&ab_ahb->rootpd_ready);
791 
792 	ab_ahb->root_pd_notifier = qcom_register_ssr_notifier(ab_ahb->tgt_rproc->name,
793 							      &ab_ahb->root_pd_nb);
794 	if (IS_ERR(ab_ahb->root_pd_notifier))
795 		return PTR_ERR(ab_ahb->root_pd_notifier);
796 
797 	return 0;
798 }
799 
800 static void ath12k_ahb_unregister_rproc_notifier(struct ath12k_base *ab)
801 {
802 	struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
803 
804 	if (!ab_ahb->root_pd_notifier) {
805 		ath12k_err(ab, "Rproc notifier not registered\n");
806 		return;
807 	}
808 
809 	qcom_unregister_ssr_notifier(ab_ahb->root_pd_notifier,
810 				     &ab_ahb->root_pd_nb);
811 	ab_ahb->root_pd_notifier = NULL;
812 }
813 
814 static int ath12k_ahb_get_rproc(struct ath12k_base *ab)
815 {
816 	struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
817 	struct device *dev = ab->dev;
818 	struct device_node *np;
819 	struct rproc *prproc;
820 
821 	np = of_parse_phandle(dev->of_node, "qcom,rproc", 0);
822 	if (!np) {
823 		ath12k_err(ab, "failed to get q6_rproc handle\n");
824 		return -ENOENT;
825 	}
826 
827 	prproc = rproc_get_by_phandle(np->phandle);
828 	of_node_put(np);
829 	if (!prproc)
830 		return dev_err_probe(&ab->pdev->dev, -EPROBE_DEFER,
831 				     "failed to get rproc\n");
832 
833 	ab_ahb->tgt_rproc = prproc;
834 
835 	return 0;
836 }
837 
838 static int ath12k_ahb_boot_root_pd(struct ath12k_base *ab)
839 {
840 	struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
841 	unsigned long time_left;
842 	int ret;
843 
844 	ret = rproc_boot(ab_ahb->tgt_rproc);
845 	if (ret < 0) {
846 		ath12k_err(ab, "RootPD boot failed\n");
847 		return ret;
848 	}
849 
850 	time_left = wait_for_completion_timeout(&ab_ahb->rootpd_ready,
851 						ATH12K_ROOTPD_READY_TIMEOUT);
852 	if (!time_left) {
853 		ath12k_err(ab, "RootPD ready wait timed out\n");
854 		return -ETIMEDOUT;
855 	}
856 
857 	return 0;
858 }
859 
860 static int ath12k_ahb_configure_rproc(struct ath12k_base *ab)
861 {
862 	struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
863 	int ret;
864 
865 	ret = ath12k_ahb_get_rproc(ab);
866 	if (ret < 0)
867 		return ret;
868 
869 	ret = ath12k_ahb_register_rproc_notifier(ab);
870 	if (ret < 0) {
871 		ret = dev_err_probe(&ab->pdev->dev, ret,
872 				    "failed to register rproc notifier\n");
873 		goto err_put_rproc;
874 	}
875 
876 	if (ab_ahb->tgt_rproc->state != RPROC_RUNNING) {
877 		ret = ath12k_ahb_boot_root_pd(ab);
878 		if (ret < 0) {
879 			ath12k_err(ab, "failed to boot the remote processor Q6\n");
880 			goto err_unreg_notifier;
881 		}
882 	}
883 
884 	return ath12k_ahb_config_rproc_irq(ab);
885 
886 err_unreg_notifier:
887 	ath12k_ahb_unregister_rproc_notifier(ab);
888 
889 err_put_rproc:
890 	rproc_put(ab_ahb->tgt_rproc);
891 	return ret;
892 }
893 
894 static void ath12k_ahb_deconfigure_rproc(struct ath12k_base *ab)
895 {
896 	struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
897 
898 	ath12k_ahb_unregister_rproc_notifier(ab);
899 	rproc_put(ab_ahb->tgt_rproc);
900 }
901 
902 static int ath12k_ahb_resource_init(struct ath12k_base *ab)
903 {
904 	struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
905 	struct platform_device *pdev = ab->pdev;
906 	struct resource *mem_res;
907 	int ret;
908 
909 	ab->mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
910 	if (IS_ERR(ab->mem)) {
911 		ret = dev_err_probe(&pdev->dev, PTR_ERR(ab->mem), "ioremap error\n");
912 		goto out;
913 	}
914 
915 	ab->mem_len = resource_size(mem_res);
916 
917 	if (ab->hw_params->ce_remap) {
918 		const struct ce_remap *ce_remap = ab->hw_params->ce_remap;
919 		/* CE register space is moved out of WCSS and the space is not
920 		 * contiguous, hence remapping the CE registers to a new space
921 		 * for accessing them.
922 		 */
923 		ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
924 		if (!ab->mem_ce) {
925 			dev_err(&pdev->dev, "ce ioremap error\n");
926 			ret = -ENOMEM;
927 			goto err_mem_unmap;
928 		}
929 		ab->ce_remap = true;
930 		ab->cmem_offset = ce_remap->cmem_offset;
931 		ab->ce_remap_base_addr = ce_remap->base;
932 	}
933 
934 	ab_ahb->xo_clk = devm_clk_get(ab->dev, "xo");
935 	if (IS_ERR(ab_ahb->xo_clk)) {
936 		ret = dev_err_probe(&pdev->dev, PTR_ERR(ab_ahb->xo_clk),
937 				    "failed to get xo clock\n");
938 		goto err_mem_ce_unmap;
939 	}
940 
941 	ret = clk_prepare_enable(ab_ahb->xo_clk);
942 	if (ret) {
943 		dev_err(&pdev->dev, "failed to enable gcc_xo_clk: %d\n", ret);
944 		goto err_clock_deinit;
945 	}
946 
947 	return 0;
948 
949 err_clock_deinit:
950 	devm_clk_put(ab->dev, ab_ahb->xo_clk);
951 
952 err_mem_ce_unmap:
953 	ab_ahb->xo_clk = NULL;
954 	if (ab->hw_params->ce_remap)
955 		iounmap(ab->mem_ce);
956 
957 err_mem_unmap:
958 	ab->mem_ce = NULL;
959 	devm_iounmap(ab->dev, ab->mem);
960 
961 out:
962 	ab->mem = NULL;
963 	return ret;
964 }
965 
966 static void ath12k_ahb_resource_deinit(struct ath12k_base *ab)
967 {
968 	struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
969 
970 	if (ab->mem)
971 		devm_iounmap(ab->dev, ab->mem);
972 
973 	if (ab->mem_ce)
974 		iounmap(ab->mem_ce);
975 
976 	ab->mem = NULL;
977 	ab->mem_ce = NULL;
978 
979 	clk_disable_unprepare(ab_ahb->xo_clk);
980 	devm_clk_put(ab->dev, ab_ahb->xo_clk);
981 	ab_ahb->xo_clk = NULL;
982 }
983 
984 static enum ath12k_device_family
985 ath12k_ahb_get_device_family(const struct platform_device *pdev)
986 {
987 	enum ath12k_device_family device_family_id;
988 	struct ath12k_ahb_driver *driver;
989 	const struct of_device_id *of_id;
990 
991 	for (device_family_id = ATH12K_DEVICE_FAMILY_START;
992 	     device_family_id < ATH12K_DEVICE_FAMILY_MAX; device_family_id++) {
993 		driver = ath12k_ahb_family_drivers[device_family_id];
994 		if (driver) {
995 			of_id = of_match_device(driver->id_table, &pdev->dev);
996 			if (of_id) {
997 				/* Found the driver */
998 				return device_family_id;
999 			}
1000 		}
1001 	}
1002 
1003 	return ATH12K_DEVICE_FAMILY_MAX;
1004 }
1005 
1006 static int ath12k_ahb_probe(struct platform_device *pdev)
1007 {
1008 	enum ath12k_device_family device_id;
1009 	struct ath12k_ahb *ab_ahb;
1010 	struct ath12k_base *ab;
1011 	u32 addr;
1012 	int ret;
1013 
1014 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1015 	if (ret) {
1016 		dev_err(&pdev->dev, "Failed to set 32-bit coherent dma\n");
1017 		return ret;
1018 	}
1019 
1020 	ab = ath12k_core_alloc(&pdev->dev, sizeof(struct ath12k_ahb),
1021 			       ATH12K_BUS_AHB);
1022 	if (!ab)
1023 		return -ENOMEM;
1024 
1025 	ab_ahb = ath12k_ab_to_ahb(ab);
1026 	ab_ahb->ab = ab;
1027 	ab->hif.ops = &ath12k_ahb_hif_ops;
1028 	ab->pdev = pdev;
1029 	platform_set_drvdata(pdev, ab);
1030 
1031 	device_id = ath12k_ahb_get_device_family(pdev);
1032 	if (device_id >= ATH12K_DEVICE_FAMILY_MAX) {
1033 		ath12k_err(ab, "failed to get device family: %d\n", device_id);
1034 		ret = -EINVAL;
1035 		goto err_core_free;
1036 	}
1037 
1038 	ath12k_dbg(ab, ATH12K_DBG_AHB, "AHB device family id: %d\n", device_id);
1039 
1040 	ab_ahb->device_family_ops = &ath12k_ahb_family_drivers[device_id]->ops;
1041 
1042 	/* Call device specific probe. This is the callback that can
1043 	 * be used to override any ops in future
1044 	 * probe is validated for NULL during registration.
1045 	 */
1046 	ret = ab_ahb->device_family_ops->probe(pdev);
1047 	if (ret) {
1048 		ath12k_err(ab, "failed to probe device: %d\n", ret);
1049 		goto err_core_free;
1050 	}
1051 
1052 	/* Set fixed_mem_region to true for platforms that support fixed memory
1053 	 * reservation from DT. If memory is reserved from DT for FW, ath12k driver
1054 	 * need not to allocate memory.
1055 	 */
1056 	if (!of_property_read_u32(ab->dev->of_node, "memory-region", &addr))
1057 		set_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags);
1058 
1059 	ret = ath12k_core_pre_init(ab);
1060 	if (ret)
1061 		goto err_core_free;
1062 
1063 	ret = ath12k_ahb_resource_init(ab);
1064 	if (ret)
1065 		goto err_core_free;
1066 
1067 	ret = ath12k_hal_srng_init(ab);
1068 	if (ret)
1069 		goto err_resource_deinit;
1070 
1071 	ret = ath12k_ce_alloc_pipes(ab);
1072 	if (ret) {
1073 		ath12k_err(ab, "failed to allocate ce pipes: %d\n", ret);
1074 		goto err_hal_srng_deinit;
1075 	}
1076 
1077 	ath12k_ahb_init_qmi_ce_config(ab);
1078 
1079 	ret = ath12k_ahb_configure_rproc(ab);
1080 	if (ret)
1081 		goto err_ce_free;
1082 
1083 	ret = ath12k_ahb_config_irq(ab);
1084 	if (ret) {
1085 		ath12k_err(ab, "failed to configure irq: %d\n", ret);
1086 		goto err_rproc_deconfigure;
1087 	}
1088 
1089 	/* Invoke arch_init here so that arch-specific init operations
1090 	 * can utilize already initialized ab fields, such as HAL SRNGs.
1091 	 */
1092 	ret = ab_ahb->device_family_ops->arch_init(ab);
1093 	if (ret) {
1094 		ath12k_err(ab, "AHB arch_init failed %d\n", ret);
1095 		goto err_rproc_deconfigure;
1096 	}
1097 
1098 	ret = ath12k_core_init(ab);
1099 	if (ret) {
1100 		ath12k_err(ab, "failed to init core: %d\n", ret);
1101 		goto err_deinit_arch;
1102 	}
1103 
1104 	return 0;
1105 
1106 err_deinit_arch:
1107 	ab_ahb->device_family_ops->arch_deinit(ab);
1108 
1109 err_rproc_deconfigure:
1110 	ath12k_ahb_deconfigure_rproc(ab);
1111 
1112 err_ce_free:
1113 	ath12k_ce_free_pipes(ab);
1114 
1115 err_hal_srng_deinit:
1116 	ath12k_hal_srng_deinit(ab);
1117 
1118 err_resource_deinit:
1119 	ath12k_ahb_resource_deinit(ab);
1120 
1121 err_core_free:
1122 	ath12k_core_free(ab);
1123 	platform_set_drvdata(pdev, NULL);
1124 
1125 	return ret;
1126 }
1127 
1128 static void ath12k_ahb_remove_prepare(struct ath12k_base *ab)
1129 {
1130 	unsigned long left;
1131 
1132 	if (test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags)) {
1133 		left = wait_for_completion_timeout(&ab->driver_recovery,
1134 						   ATH12K_AHB_RECOVERY_TIMEOUT);
1135 		if (!left)
1136 			ath12k_warn(ab, "failed to receive recovery response completion\n");
1137 	}
1138 
1139 	set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags);
1140 	cancel_work_sync(&ab->restart_work);
1141 	cancel_work_sync(&ab->qmi.event_work);
1142 }
1143 
1144 static void ath12k_ahb_free_resources(struct ath12k_base *ab)
1145 {
1146 	struct platform_device *pdev = ab->pdev;
1147 	struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
1148 
1149 	ath12k_hal_srng_deinit(ab);
1150 	ath12k_ce_free_pipes(ab);
1151 	ath12k_ahb_resource_deinit(ab);
1152 	ath12k_ahb_deconfigure_rproc(ab);
1153 	ab_ahb->device_family_ops->arch_deinit(ab);
1154 	ath12k_core_free(ab);
1155 	platform_set_drvdata(pdev, NULL);
1156 }
1157 
1158 static void ath12k_ahb_remove(struct platform_device *pdev)
1159 {
1160 	struct ath12k_base *ab = platform_get_drvdata(pdev);
1161 
1162 	if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1163 		ath12k_ahb_power_down(ab, false);
1164 		goto qmi_fail;
1165 	}
1166 
1167 	ath12k_ahb_remove_prepare(ab);
1168 	ath12k_core_hw_group_cleanup(ab->ag);
1169 qmi_fail:
1170 	ath12k_core_deinit(ab);
1171 	ath12k_ahb_free_resources(ab);
1172 }
1173 
1174 int ath12k_ahb_register_driver(const enum ath12k_device_family device_id,
1175 			       struct ath12k_ahb_driver *driver)
1176 {
1177 	struct platform_driver *ahb_driver;
1178 
1179 	if (device_id >= ATH12K_DEVICE_FAMILY_MAX)
1180 		return -EINVAL;
1181 
1182 	if (!driver || !driver->ops.probe ||
1183 	    !driver->ops.arch_init || !driver->ops.arch_deinit)
1184 		return -EINVAL;
1185 
1186 	if (ath12k_ahb_family_drivers[device_id]) {
1187 		pr_err("Driver already registered for id %d\n", device_id);
1188 		return -EALREADY;
1189 	}
1190 
1191 	ath12k_ahb_family_drivers[device_id] = driver;
1192 
1193 	ahb_driver = &ath12k_ahb_family_drivers[device_id]->driver;
1194 	ahb_driver->driver.name = driver->name;
1195 	ahb_driver->driver.of_match_table = driver->id_table;
1196 	ahb_driver->probe  = ath12k_ahb_probe;
1197 	ahb_driver->remove = ath12k_ahb_remove;
1198 
1199 	return platform_driver_register(ahb_driver);
1200 }
1201 EXPORT_SYMBOL(ath12k_ahb_register_driver);
1202 
1203 void ath12k_ahb_unregister_driver(const enum ath12k_device_family device_id)
1204 {
1205 	struct platform_driver *ahb_driver;
1206 
1207 	if (device_id >= ATH12K_DEVICE_FAMILY_MAX)
1208 		return;
1209 
1210 	if (!ath12k_ahb_family_drivers[device_id])
1211 		return;
1212 
1213 	ahb_driver = &ath12k_ahb_family_drivers[device_id]->driver;
1214 	platform_driver_unregister(ahb_driver);
1215 	ath12k_ahb_family_drivers[device_id] = NULL;
1216 }
1217 EXPORT_SYMBOL(ath12k_ahb_unregister_driver);
1218