xref: /linux/drivers/remoteproc/qcom_wcnss.c (revision a1c3be890440a1769ed6f822376a3e3ab0d42994)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Qualcomm Wireless Connectivity Subsystem Peripheral Image Loader
4  *
5  * Copyright (C) 2016 Linaro Ltd
6  * Copyright (C) 2014 Sony Mobile Communications AB
7  * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
8  */
9 
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/firmware.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/io.h>
17 #include <linux/of_address.h>
18 #include <linux/of_device.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/qcom_scm.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/remoteproc.h>
25 #include <linux/soc/qcom/mdt_loader.h>
26 #include <linux/soc/qcom/smem.h>
27 #include <linux/soc/qcom/smem_state.h>
28 #include <linux/rpmsg/qcom_smd.h>
29 
30 #include "qcom_common.h"
31 #include "remoteproc_internal.h"
32 #include "qcom_pil_info.h"
33 #include "qcom_wcnss.h"
34 
35 #define WCNSS_CRASH_REASON_SMEM		422
36 #define WCNSS_FIRMWARE_NAME		"wcnss.mdt"
37 #define WCNSS_PAS_ID			6
38 #define WCNSS_SSCTL_ID			0x13
39 
40 #define WCNSS_SPARE_NVBIN_DLND		BIT(25)
41 
42 #define WCNSS_PMU_IRIS_XO_CFG		BIT(3)
43 #define WCNSS_PMU_IRIS_XO_EN		BIT(4)
44 #define WCNSS_PMU_GC_BUS_MUX_SEL_TOP	BIT(5)
45 #define WCNSS_PMU_IRIS_XO_CFG_STS	BIT(6) /* 1: in progress, 0: done */
46 
47 #define WCNSS_PMU_IRIS_RESET		BIT(7)
48 #define WCNSS_PMU_IRIS_RESET_STS	BIT(8) /* 1: in progress, 0: done */
49 #define WCNSS_PMU_IRIS_XO_READ		BIT(9)
50 #define WCNSS_PMU_IRIS_XO_READ_STS	BIT(10)
51 
52 #define WCNSS_PMU_XO_MODE_MASK		GENMASK(2, 1)
53 #define WCNSS_PMU_XO_MODE_19p2		0
54 #define WCNSS_PMU_XO_MODE_48		3
55 
56 #define WCNSS_MAX_PDS			2
57 
58 struct wcnss_data {
59 	size_t pmu_offset;
60 	size_t spare_offset;
61 
62 	const char *pd_names[WCNSS_MAX_PDS];
63 	const struct wcnss_vreg_info *vregs;
64 	size_t num_vregs, num_pd_vregs;
65 };
66 
67 struct qcom_wcnss {
68 	struct device *dev;
69 	struct rproc *rproc;
70 
71 	void __iomem *pmu_cfg;
72 	void __iomem *spare_out;
73 
74 	bool use_48mhz_xo;
75 
76 	int wdog_irq;
77 	int fatal_irq;
78 	int ready_irq;
79 	int handover_irq;
80 	int stop_ack_irq;
81 
82 	struct qcom_smem_state *state;
83 	unsigned stop_bit;
84 
85 	struct mutex iris_lock;
86 	struct qcom_iris *iris;
87 
88 	struct device *pds[WCNSS_MAX_PDS];
89 	size_t num_pds;
90 	struct regulator_bulk_data *vregs;
91 	size_t num_vregs;
92 
93 	struct completion start_done;
94 	struct completion stop_done;
95 
96 	phys_addr_t mem_phys;
97 	phys_addr_t mem_reloc;
98 	void *mem_region;
99 	size_t mem_size;
100 
101 	struct qcom_rproc_subdev smd_subdev;
102 	struct qcom_sysmon *sysmon;
103 };
104 
105 static const struct wcnss_data riva_data = {
106 	.pmu_offset = 0x28,
107 	.spare_offset = 0xb4,
108 
109 	.vregs = (struct wcnss_vreg_info[]) {
110 		{ "vddmx",  1050000, 1150000, 0 },
111 		{ "vddcx",  1050000, 1150000, 0 },
112 		{ "vddpx",  1800000, 1800000, 0 },
113 	},
114 	.num_vregs = 3,
115 };
116 
117 static const struct wcnss_data pronto_v1_data = {
118 	.pmu_offset = 0x1004,
119 	.spare_offset = 0x1088,
120 
121 	.pd_names = { "mx", "cx" },
122 	.vregs = (struct wcnss_vreg_info[]) {
123 		{ "vddmx", 950000, 1150000, 0 },
124 		{ "vddcx", .super_turbo = true},
125 		{ "vddpx", 1800000, 1800000, 0 },
126 	},
127 	.num_pd_vregs = 2,
128 	.num_vregs = 1,
129 };
130 
131 static const struct wcnss_data pronto_v2_data = {
132 	.pmu_offset = 0x1004,
133 	.spare_offset = 0x1088,
134 
135 	.pd_names = { "mx", "cx" },
136 	.vregs = (struct wcnss_vreg_info[]) {
137 		{ "vddmx", 1287500, 1287500, 0 },
138 		{ "vddcx", .super_turbo = true },
139 		{ "vddpx", 1800000, 1800000, 0 },
140 	},
141 	.num_pd_vregs = 2,
142 	.num_vregs = 1,
143 };
144 
145 void qcom_wcnss_assign_iris(struct qcom_wcnss *wcnss,
146 			    struct qcom_iris *iris,
147 			    bool use_48mhz_xo)
148 {
149 	mutex_lock(&wcnss->iris_lock);
150 
151 	wcnss->iris = iris;
152 	wcnss->use_48mhz_xo = use_48mhz_xo;
153 
154 	mutex_unlock(&wcnss->iris_lock);
155 }
156 
157 static int wcnss_load(struct rproc *rproc, const struct firmware *fw)
158 {
159 	struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
160 	int ret;
161 
162 	ret = qcom_mdt_load(wcnss->dev, fw, rproc->firmware, WCNSS_PAS_ID,
163 			    wcnss->mem_region, wcnss->mem_phys,
164 			    wcnss->mem_size, &wcnss->mem_reloc);
165 	if (ret)
166 		return ret;
167 
168 	qcom_pil_info_store("wcnss", wcnss->mem_phys, wcnss->mem_size);
169 
170 	return 0;
171 }
172 
173 static void wcnss_indicate_nv_download(struct qcom_wcnss *wcnss)
174 {
175 	u32 val;
176 
177 	/* Indicate NV download capability */
178 	val = readl(wcnss->spare_out);
179 	val |= WCNSS_SPARE_NVBIN_DLND;
180 	writel(val, wcnss->spare_out);
181 }
182 
183 static void wcnss_configure_iris(struct qcom_wcnss *wcnss)
184 {
185 	u32 val;
186 
187 	/* Clear PMU cfg register */
188 	writel(0, wcnss->pmu_cfg);
189 
190 	val = WCNSS_PMU_GC_BUS_MUX_SEL_TOP | WCNSS_PMU_IRIS_XO_EN;
191 	writel(val, wcnss->pmu_cfg);
192 
193 	/* Clear XO_MODE */
194 	val &= ~WCNSS_PMU_XO_MODE_MASK;
195 	if (wcnss->use_48mhz_xo)
196 		val |= WCNSS_PMU_XO_MODE_48 << 1;
197 	else
198 		val |= WCNSS_PMU_XO_MODE_19p2 << 1;
199 	writel(val, wcnss->pmu_cfg);
200 
201 	/* Reset IRIS */
202 	val |= WCNSS_PMU_IRIS_RESET;
203 	writel(val, wcnss->pmu_cfg);
204 
205 	/* Wait for PMU.iris_reg_reset_sts */
206 	while (readl(wcnss->pmu_cfg) & WCNSS_PMU_IRIS_RESET_STS)
207 		cpu_relax();
208 
209 	/* Clear IRIS reset */
210 	val &= ~WCNSS_PMU_IRIS_RESET;
211 	writel(val, wcnss->pmu_cfg);
212 
213 	/* Start IRIS XO configuration */
214 	val |= WCNSS_PMU_IRIS_XO_CFG;
215 	writel(val, wcnss->pmu_cfg);
216 
217 	/* Wait for XO configuration to finish */
218 	while (readl(wcnss->pmu_cfg) & WCNSS_PMU_IRIS_XO_CFG_STS)
219 		cpu_relax();
220 
221 	/* Stop IRIS XO configuration */
222 	val &= ~WCNSS_PMU_GC_BUS_MUX_SEL_TOP;
223 	val &= ~WCNSS_PMU_IRIS_XO_CFG;
224 	writel(val, wcnss->pmu_cfg);
225 
226 	/* Add some delay for XO to settle */
227 	msleep(20);
228 }
229 
230 static int wcnss_start(struct rproc *rproc)
231 {
232 	struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
233 	int ret, i;
234 
235 	mutex_lock(&wcnss->iris_lock);
236 	if (!wcnss->iris) {
237 		dev_err(wcnss->dev, "no iris registered\n");
238 		ret = -EINVAL;
239 		goto release_iris_lock;
240 	}
241 
242 	for (i = 0; i < wcnss->num_pds; i++) {
243 		dev_pm_genpd_set_performance_state(wcnss->pds[i], INT_MAX);
244 		ret = pm_runtime_get_sync(wcnss->pds[i]);
245 		if (ret < 0) {
246 			pm_runtime_put_noidle(wcnss->pds[i]);
247 			goto disable_pds;
248 		}
249 	}
250 
251 	ret = regulator_bulk_enable(wcnss->num_vregs, wcnss->vregs);
252 	if (ret)
253 		goto disable_pds;
254 
255 	ret = qcom_iris_enable(wcnss->iris);
256 	if (ret)
257 		goto disable_regulators;
258 
259 	wcnss_indicate_nv_download(wcnss);
260 	wcnss_configure_iris(wcnss);
261 
262 	ret = qcom_scm_pas_auth_and_reset(WCNSS_PAS_ID);
263 	if (ret) {
264 		dev_err(wcnss->dev,
265 			"failed to authenticate image and release reset\n");
266 		goto disable_iris;
267 	}
268 
269 	ret = wait_for_completion_timeout(&wcnss->start_done,
270 					  msecs_to_jiffies(5000));
271 	if (wcnss->ready_irq > 0 && ret == 0) {
272 		/* We have a ready_irq, but it didn't fire in time. */
273 		dev_err(wcnss->dev, "start timed out\n");
274 		qcom_scm_pas_shutdown(WCNSS_PAS_ID);
275 		ret = -ETIMEDOUT;
276 		goto disable_iris;
277 	}
278 
279 	ret = 0;
280 
281 disable_iris:
282 	qcom_iris_disable(wcnss->iris);
283 disable_regulators:
284 	regulator_bulk_disable(wcnss->num_vregs, wcnss->vregs);
285 disable_pds:
286 	for (i--; i >= 0; i--) {
287 		pm_runtime_put(wcnss->pds[i]);
288 		dev_pm_genpd_set_performance_state(wcnss->pds[i], 0);
289 	}
290 release_iris_lock:
291 	mutex_unlock(&wcnss->iris_lock);
292 
293 	return ret;
294 }
295 
296 static int wcnss_stop(struct rproc *rproc)
297 {
298 	struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
299 	int ret;
300 
301 	if (wcnss->state) {
302 		qcom_smem_state_update_bits(wcnss->state,
303 					    BIT(wcnss->stop_bit),
304 					    BIT(wcnss->stop_bit));
305 
306 		ret = wait_for_completion_timeout(&wcnss->stop_done,
307 						  msecs_to_jiffies(5000));
308 		if (ret == 0)
309 			dev_err(wcnss->dev, "timed out on wait\n");
310 
311 		qcom_smem_state_update_bits(wcnss->state,
312 					    BIT(wcnss->stop_bit),
313 					    0);
314 	}
315 
316 	ret = qcom_scm_pas_shutdown(WCNSS_PAS_ID);
317 	if (ret)
318 		dev_err(wcnss->dev, "failed to shutdown: %d\n", ret);
319 
320 	return ret;
321 }
322 
323 static void *wcnss_da_to_va(struct rproc *rproc, u64 da, size_t len)
324 {
325 	struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
326 	int offset;
327 
328 	offset = da - wcnss->mem_reloc;
329 	if (offset < 0 || offset + len > wcnss->mem_size)
330 		return NULL;
331 
332 	return wcnss->mem_region + offset;
333 }
334 
335 static const struct rproc_ops wcnss_ops = {
336 	.start = wcnss_start,
337 	.stop = wcnss_stop,
338 	.da_to_va = wcnss_da_to_va,
339 	.parse_fw = qcom_register_dump_segments,
340 	.load = wcnss_load,
341 };
342 
343 static irqreturn_t wcnss_wdog_interrupt(int irq, void *dev)
344 {
345 	struct qcom_wcnss *wcnss = dev;
346 
347 	rproc_report_crash(wcnss->rproc, RPROC_WATCHDOG);
348 
349 	return IRQ_HANDLED;
350 }
351 
352 static irqreturn_t wcnss_fatal_interrupt(int irq, void *dev)
353 {
354 	struct qcom_wcnss *wcnss = dev;
355 	size_t len;
356 	char *msg;
357 
358 	msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, WCNSS_CRASH_REASON_SMEM, &len);
359 	if (!IS_ERR(msg) && len > 0 && msg[0])
360 		dev_err(wcnss->dev, "fatal error received: %s\n", msg);
361 
362 	rproc_report_crash(wcnss->rproc, RPROC_FATAL_ERROR);
363 
364 	return IRQ_HANDLED;
365 }
366 
367 static irqreturn_t wcnss_ready_interrupt(int irq, void *dev)
368 {
369 	struct qcom_wcnss *wcnss = dev;
370 
371 	complete(&wcnss->start_done);
372 
373 	return IRQ_HANDLED;
374 }
375 
376 static irqreturn_t wcnss_handover_interrupt(int irq, void *dev)
377 {
378 	/*
379 	 * XXX: At this point we're supposed to release the resources that we
380 	 * have been holding on behalf of the WCNSS. Unfortunately this
381 	 * interrupt comes way before the other side seems to be done.
382 	 *
383 	 * So we're currently relying on the ready interrupt firing later then
384 	 * this and we just disable the resources at the end of wcnss_start().
385 	 */
386 
387 	return IRQ_HANDLED;
388 }
389 
390 static irqreturn_t wcnss_stop_ack_interrupt(int irq, void *dev)
391 {
392 	struct qcom_wcnss *wcnss = dev;
393 
394 	complete(&wcnss->stop_done);
395 
396 	return IRQ_HANDLED;
397 }
398 
399 static int wcnss_init_pds(struct qcom_wcnss *wcnss,
400 			  const char * const pd_names[WCNSS_MAX_PDS])
401 {
402 	int i, ret;
403 
404 	for (i = 0; i < WCNSS_MAX_PDS; i++) {
405 		if (!pd_names[i])
406 			break;
407 
408 		wcnss->pds[i] = dev_pm_domain_attach_by_name(wcnss->dev, pd_names[i]);
409 		if (IS_ERR_OR_NULL(wcnss->pds[i])) {
410 			ret = PTR_ERR(wcnss->pds[i]) ? : -ENODATA;
411 			for (i--; i >= 0; i--)
412 				dev_pm_domain_detach(wcnss->pds[i], false);
413 			return ret;
414 		}
415 	}
416 	wcnss->num_pds = i;
417 
418 	return 0;
419 }
420 
421 static void wcnss_release_pds(struct qcom_wcnss *wcnss)
422 {
423 	int i;
424 
425 	for (i = 0; i < wcnss->num_pds; i++)
426 		dev_pm_domain_detach(wcnss->pds[i], false);
427 }
428 
429 static int wcnss_init_regulators(struct qcom_wcnss *wcnss,
430 				 const struct wcnss_vreg_info *info,
431 				 int num_vregs, int num_pd_vregs)
432 {
433 	struct regulator_bulk_data *bulk;
434 	int ret;
435 	int i;
436 
437 	/*
438 	 * If attaching the power domains suceeded we can skip requesting
439 	 * the regulators for the power domains. For old device trees we need to
440 	 * reserve extra space to manage them through the regulator interface.
441 	 */
442 	if (wcnss->num_pds)
443 		info += num_pd_vregs;
444 	else
445 		num_vregs += num_pd_vregs;
446 
447 	bulk = devm_kcalloc(wcnss->dev,
448 			    num_vregs, sizeof(struct regulator_bulk_data),
449 			    GFP_KERNEL);
450 	if (!bulk)
451 		return -ENOMEM;
452 
453 	for (i = 0; i < num_vregs; i++)
454 		bulk[i].supply = info[i].name;
455 
456 	ret = devm_regulator_bulk_get(wcnss->dev, num_vregs, bulk);
457 	if (ret)
458 		return ret;
459 
460 	for (i = 0; i < num_vregs; i++) {
461 		if (info[i].max_voltage)
462 			regulator_set_voltage(bulk[i].consumer,
463 					      info[i].min_voltage,
464 					      info[i].max_voltage);
465 
466 		if (info[i].load_uA)
467 			regulator_set_load(bulk[i].consumer, info[i].load_uA);
468 	}
469 
470 	wcnss->vregs = bulk;
471 	wcnss->num_vregs = num_vregs;
472 
473 	return 0;
474 }
475 
476 static int wcnss_request_irq(struct qcom_wcnss *wcnss,
477 			     struct platform_device *pdev,
478 			     const char *name,
479 			     bool optional,
480 			     irq_handler_t thread_fn)
481 {
482 	int ret;
483 
484 	ret = platform_get_irq_byname(pdev, name);
485 	if (ret < 0 && optional) {
486 		dev_dbg(&pdev->dev, "no %s IRQ defined, ignoring\n", name);
487 		return 0;
488 	} else if (ret < 0) {
489 		dev_err(&pdev->dev, "no %s IRQ defined\n", name);
490 		return ret;
491 	}
492 
493 	ret = devm_request_threaded_irq(&pdev->dev, ret,
494 					NULL, thread_fn,
495 					IRQF_TRIGGER_RISING | IRQF_ONESHOT,
496 					"wcnss", wcnss);
497 	if (ret)
498 		dev_err(&pdev->dev, "request %s IRQ failed\n", name);
499 
500 	return ret;
501 }
502 
503 static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss)
504 {
505 	struct device_node *node;
506 	struct resource r;
507 	int ret;
508 
509 	node = of_parse_phandle(wcnss->dev->of_node, "memory-region", 0);
510 	if (!node) {
511 		dev_err(wcnss->dev, "no memory-region specified\n");
512 		return -EINVAL;
513 	}
514 
515 	ret = of_address_to_resource(node, 0, &r);
516 	if (ret)
517 		return ret;
518 
519 	wcnss->mem_phys = wcnss->mem_reloc = r.start;
520 	wcnss->mem_size = resource_size(&r);
521 	wcnss->mem_region = devm_ioremap_wc(wcnss->dev, wcnss->mem_phys, wcnss->mem_size);
522 	if (!wcnss->mem_region) {
523 		dev_err(wcnss->dev, "unable to map memory region: %pa+%zx\n",
524 			&r.start, wcnss->mem_size);
525 		return -EBUSY;
526 	}
527 
528 	return 0;
529 }
530 
531 static int wcnss_probe(struct platform_device *pdev)
532 {
533 	const struct wcnss_data *data;
534 	struct qcom_wcnss *wcnss;
535 	struct resource *res;
536 	struct rproc *rproc;
537 	void __iomem *mmio;
538 	int ret;
539 
540 	data = of_device_get_match_data(&pdev->dev);
541 
542 	if (!qcom_scm_is_available())
543 		return -EPROBE_DEFER;
544 
545 	if (!qcom_scm_pas_supported(WCNSS_PAS_ID)) {
546 		dev_err(&pdev->dev, "PAS is not available for WCNSS\n");
547 		return -ENXIO;
548 	}
549 
550 	rproc = rproc_alloc(&pdev->dev, pdev->name, &wcnss_ops,
551 			    WCNSS_FIRMWARE_NAME, sizeof(*wcnss));
552 	if (!rproc) {
553 		dev_err(&pdev->dev, "unable to allocate remoteproc\n");
554 		return -ENOMEM;
555 	}
556 	rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
557 
558 	wcnss = (struct qcom_wcnss *)rproc->priv;
559 	wcnss->dev = &pdev->dev;
560 	wcnss->rproc = rproc;
561 	platform_set_drvdata(pdev, wcnss);
562 
563 	init_completion(&wcnss->start_done);
564 	init_completion(&wcnss->stop_done);
565 
566 	mutex_init(&wcnss->iris_lock);
567 
568 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pmu");
569 	mmio = devm_ioremap_resource(&pdev->dev, res);
570 	if (IS_ERR(mmio)) {
571 		ret = PTR_ERR(mmio);
572 		goto free_rproc;
573 	}
574 
575 	ret = wcnss_alloc_memory_region(wcnss);
576 	if (ret)
577 		goto free_rproc;
578 
579 	wcnss->pmu_cfg = mmio + data->pmu_offset;
580 	wcnss->spare_out = mmio + data->spare_offset;
581 
582 	/*
583 	 * We might need to fallback to regulators instead of power domains
584 	 * for old device trees. Don't report an error in that case.
585 	 */
586 	ret = wcnss_init_pds(wcnss, data->pd_names);
587 	if (ret && (ret != -ENODATA || !data->num_pd_vregs))
588 		goto free_rproc;
589 
590 	ret = wcnss_init_regulators(wcnss, data->vregs, data->num_vregs,
591 				    data->num_pd_vregs);
592 	if (ret)
593 		goto detach_pds;
594 
595 	ret = wcnss_request_irq(wcnss, pdev, "wdog", false, wcnss_wdog_interrupt);
596 	if (ret < 0)
597 		goto detach_pds;
598 	wcnss->wdog_irq = ret;
599 
600 	ret = wcnss_request_irq(wcnss, pdev, "fatal", false, wcnss_fatal_interrupt);
601 	if (ret < 0)
602 		goto detach_pds;
603 	wcnss->fatal_irq = ret;
604 
605 	ret = wcnss_request_irq(wcnss, pdev, "ready", true, wcnss_ready_interrupt);
606 	if (ret < 0)
607 		goto detach_pds;
608 	wcnss->ready_irq = ret;
609 
610 	ret = wcnss_request_irq(wcnss, pdev, "handover", true, wcnss_handover_interrupt);
611 	if (ret < 0)
612 		goto detach_pds;
613 	wcnss->handover_irq = ret;
614 
615 	ret = wcnss_request_irq(wcnss, pdev, "stop-ack", true, wcnss_stop_ack_interrupt);
616 	if (ret < 0)
617 		goto detach_pds;
618 	wcnss->stop_ack_irq = ret;
619 
620 	if (wcnss->stop_ack_irq) {
621 		wcnss->state = qcom_smem_state_get(&pdev->dev, "stop",
622 						   &wcnss->stop_bit);
623 		if (IS_ERR(wcnss->state)) {
624 			ret = PTR_ERR(wcnss->state);
625 			goto detach_pds;
626 		}
627 	}
628 
629 	qcom_add_smd_subdev(rproc, &wcnss->smd_subdev);
630 	wcnss->sysmon = qcom_add_sysmon_subdev(rproc, "wcnss", WCNSS_SSCTL_ID);
631 	if (IS_ERR(wcnss->sysmon)) {
632 		ret = PTR_ERR(wcnss->sysmon);
633 		goto detach_pds;
634 	}
635 
636 	ret = rproc_add(rproc);
637 	if (ret)
638 		goto detach_pds;
639 
640 	return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
641 
642 detach_pds:
643 	wcnss_release_pds(wcnss);
644 free_rproc:
645 	rproc_free(rproc);
646 
647 	return ret;
648 }
649 
650 static int wcnss_remove(struct platform_device *pdev)
651 {
652 	struct qcom_wcnss *wcnss = platform_get_drvdata(pdev);
653 
654 	of_platform_depopulate(&pdev->dev);
655 
656 	qcom_smem_state_put(wcnss->state);
657 	rproc_del(wcnss->rproc);
658 
659 	qcom_remove_sysmon_subdev(wcnss->sysmon);
660 	qcom_remove_smd_subdev(wcnss->rproc, &wcnss->smd_subdev);
661 	wcnss_release_pds(wcnss);
662 	rproc_free(wcnss->rproc);
663 
664 	return 0;
665 }
666 
667 static const struct of_device_id wcnss_of_match[] = {
668 	{ .compatible = "qcom,riva-pil", &riva_data },
669 	{ .compatible = "qcom,pronto-v1-pil", &pronto_v1_data },
670 	{ .compatible = "qcom,pronto-v2-pil", &pronto_v2_data },
671 	{ },
672 };
673 MODULE_DEVICE_TABLE(of, wcnss_of_match);
674 
675 static struct platform_driver wcnss_driver = {
676 	.probe = wcnss_probe,
677 	.remove = wcnss_remove,
678 	.driver = {
679 		.name = "qcom-wcnss-pil",
680 		.of_match_table = wcnss_of_match,
681 	},
682 };
683 
684 static int __init wcnss_init(void)
685 {
686 	int ret;
687 
688 	ret = platform_driver_register(&wcnss_driver);
689 	if (ret)
690 		return ret;
691 
692 	ret = platform_driver_register(&qcom_iris_driver);
693 	if (ret)
694 		platform_driver_unregister(&wcnss_driver);
695 
696 	return ret;
697 }
698 module_init(wcnss_init);
699 
700 static void __exit wcnss_exit(void)
701 {
702 	platform_driver_unregister(&qcom_iris_driver);
703 	platform_driver_unregister(&wcnss_driver);
704 }
705 module_exit(wcnss_exit);
706 
707 MODULE_DESCRIPTION("Qualcomm Peripheral Image Loader for Wireless Subsystem");
708 MODULE_LICENSE("GPL v2");
709