xref: /linux/drivers/net/ipa/ipa_mem.c (revision bca5cfbb694d66a1c482d0c347eee80f6afbc870)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2019-2024 Linaro Ltd.
5  */
6 
7 #include <linux/dma-mapping.h>
8 #include <linux/io.h>
9 #include <linux/iommu.h>
10 #include <linux/platform_device.h>
11 #include <linux/types.h>
12 
13 #include <linux/soc/qcom/smem.h>
14 
15 #include "gsi_trans.h"
16 #include "ipa.h"
17 #include "ipa_cmd.h"
18 #include "ipa_data.h"
19 #include "ipa_mem.h"
20 #include "ipa_reg.h"
21 #include "ipa_table.h"
22 
23 /* "Canary" value placed between memory regions to detect overflow */
24 #define IPA_MEM_CANARY_VAL		cpu_to_le32(0xdeadbeef)
25 
26 /* SMEM host id representing the modem. */
27 #define QCOM_SMEM_HOST_MODEM	1
28 
29 #define SMEM_IPA_FILTER_TABLE	497
30 
31 const struct ipa_mem *ipa_mem_find(struct ipa *ipa, enum ipa_mem_id mem_id)
32 {
33 	u32 i;
34 
35 	for (i = 0; i < ipa->mem_count; i++) {
36 		const struct ipa_mem *mem = &ipa->mem[i];
37 
38 		if (mem->id == mem_id)
39 			return mem;
40 	}
41 
42 	return NULL;
43 }
44 
45 /* Add an immediate command to a transaction that zeroes a memory region */
46 static void
47 ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id)
48 {
49 	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
50 	const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
51 	dma_addr_t addr = ipa->zero_addr;
52 
53 	if (!mem->size)
54 		return;
55 
56 	ipa_cmd_dma_shared_mem_add(trans, mem->offset, mem->size, addr, true);
57 }
58 
59 /**
60  * ipa_mem_setup() - Set up IPA AP and modem shared memory areas
61  * @ipa:	IPA pointer
62  *
63  * Set up the shared memory regions in IPA local memory.  This involves
64  * zero-filling memory regions, and in the case of header memory, telling
65  * the IPA where it's located.
66  *
67  * This function performs the initial setup of this memory.  If the modem
68  * crashes, its regions are re-zeroed in ipa_mem_zero_modem().
69  *
70  * The AP informs the modem where its portions of memory are located
71  * in a QMI exchange that occurs at modem startup.
72  *
73  * There is no need for a matching ipa_mem_teardown() function.
74  *
75  * Return:	0 if successful, or a negative error code
76  */
77 int ipa_mem_setup(struct ipa *ipa)
78 {
79 	dma_addr_t addr = ipa->zero_addr;
80 	const struct ipa_mem *mem;
81 	struct gsi_trans *trans;
82 	const struct reg *reg;
83 	u32 offset;
84 	u16 size;
85 	u32 val;
86 
87 	/* Get a transaction to define the header memory region and to zero
88 	 * the processing context and modem memory regions.
89 	 */
90 	trans = ipa_cmd_trans_alloc(ipa, 4);
91 	if (!trans) {
92 		dev_err(ipa->dev, "no transaction for memory setup\n");
93 		return -EBUSY;
94 	}
95 
96 	/* Initialize IPA-local header memory.  The AP header region, if
97 	 * present, is contiguous with and follows the modem header region,
98 	 * and they are initialized together.
99 	 */
100 	mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
101 	offset = mem->offset;
102 	size = mem->size;
103 	mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
104 	if (mem)
105 		size += mem->size;
106 
107 	ipa_cmd_hdr_init_local_add(trans, offset, size, addr);
108 
109 	ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_PROC_CTX);
110 	ipa_mem_zero_region_add(trans, IPA_MEM_AP_PROC_CTX);
111 	ipa_mem_zero_region_add(trans, IPA_MEM_MODEM);
112 
113 	gsi_trans_commit_wait(trans);
114 
115 	/* Tell the hardware where the processing context area is located */
116 	mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX);
117 	offset = ipa->mem_offset + mem->offset;
118 
119 	reg = ipa_reg(ipa, LOCAL_PKT_PROC_CNTXT);
120 	val = reg_encode(reg, IPA_BASE_ADDR, offset);
121 	iowrite32(val, ipa->reg_virt + reg_offset(reg));
122 
123 	return 0;
124 }
125 
126 /* Is the given memory region ID is valid for the current IPA version? */
127 static bool ipa_mem_id_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
128 {
129 	enum ipa_version version = ipa->version;
130 
131 	switch (mem_id) {
132 	case IPA_MEM_UC_SHARED:
133 	case IPA_MEM_UC_INFO:
134 	case IPA_MEM_V4_FILTER_HASHED:
135 	case IPA_MEM_V4_FILTER:
136 	case IPA_MEM_V6_FILTER_HASHED:
137 	case IPA_MEM_V6_FILTER:
138 	case IPA_MEM_V4_ROUTE_HASHED:
139 	case IPA_MEM_V4_ROUTE:
140 	case IPA_MEM_V6_ROUTE_HASHED:
141 	case IPA_MEM_V6_ROUTE:
142 	case IPA_MEM_MODEM_HEADER:
143 	case IPA_MEM_AP_HEADER:
144 	case IPA_MEM_MODEM_PROC_CTX:
145 	case IPA_MEM_AP_PROC_CTX:
146 	case IPA_MEM_MODEM:
147 	case IPA_MEM_UC_EVENT_RING:
148 	case IPA_MEM_PDN_CONFIG:
149 	case IPA_MEM_STATS_QUOTA_MODEM:
150 	case IPA_MEM_STATS_QUOTA_AP:
151 	case IPA_MEM_END_MARKER:	/* pseudo region */
152 		break;
153 
154 	case IPA_MEM_STATS_TETHERING:
155 	case IPA_MEM_STATS_DROP:
156 		if (version < IPA_VERSION_4_0)
157 			return false;
158 		break;
159 
160 	case IPA_MEM_STATS_V4_FILTER:
161 	case IPA_MEM_STATS_V6_FILTER:
162 	case IPA_MEM_STATS_V4_ROUTE:
163 	case IPA_MEM_STATS_V6_ROUTE:
164 		if (version < IPA_VERSION_4_0 || version > IPA_VERSION_4_2)
165 			return false;
166 		break;
167 
168 	case IPA_MEM_AP_V4_FILTER:
169 	case IPA_MEM_AP_V6_FILTER:
170 		if (version < IPA_VERSION_5_0)
171 			return false;
172 		break;
173 
174 	case IPA_MEM_NAT_TABLE:
175 	case IPA_MEM_STATS_FILTER_ROUTE:
176 		if (version < IPA_VERSION_4_5)
177 			return false;
178 		break;
179 
180 	default:
181 		return false;
182 	}
183 
184 	return true;
185 }
186 
187 /* Must the given memory region be present in the configuration? */
188 static bool ipa_mem_id_required(struct ipa *ipa, enum ipa_mem_id mem_id)
189 {
190 	switch (mem_id) {
191 	case IPA_MEM_UC_SHARED:
192 	case IPA_MEM_UC_INFO:
193 	case IPA_MEM_V4_FILTER_HASHED:
194 	case IPA_MEM_V4_FILTER:
195 	case IPA_MEM_V6_FILTER_HASHED:
196 	case IPA_MEM_V6_FILTER:
197 	case IPA_MEM_V4_ROUTE_HASHED:
198 	case IPA_MEM_V4_ROUTE:
199 	case IPA_MEM_V6_ROUTE_HASHED:
200 	case IPA_MEM_V6_ROUTE:
201 	case IPA_MEM_MODEM_HEADER:
202 	case IPA_MEM_MODEM_PROC_CTX:
203 	case IPA_MEM_AP_PROC_CTX:
204 	case IPA_MEM_MODEM:
205 		return true;
206 
207 	case IPA_MEM_PDN_CONFIG:
208 	case IPA_MEM_STATS_QUOTA_MODEM:
209 		return ipa->version >= IPA_VERSION_4_0;
210 
211 	case IPA_MEM_STATS_TETHERING:
212 		return ipa->version >= IPA_VERSION_4_0 &&
213 			ipa->version != IPA_VERSION_5_0;
214 
215 	default:
216 		return false;		/* Anything else is optional */
217 	}
218 }
219 
220 static bool ipa_mem_valid_one(struct ipa *ipa, const struct ipa_mem *mem)
221 {
222 	enum ipa_mem_id mem_id = mem->id;
223 	struct device *dev = ipa->dev;
224 	u16 size_multiple;
225 
226 	/* Make sure the memory region is valid for this version of IPA */
227 	if (!ipa_mem_id_valid(ipa, mem_id)) {
228 		dev_err(dev, "region id %u not valid\n", mem_id);
229 		return false;
230 	}
231 
232 	if (!mem->size && !mem->canary_count) {
233 		dev_err(dev, "empty memory region %u\n", mem_id);
234 		return false;
235 	}
236 
237 	/* Other than modem memory, sizes must be a multiple of 8 */
238 	size_multiple = mem_id == IPA_MEM_MODEM ? 4 : 8;
239 	if (mem->size % size_multiple)
240 		dev_err(dev, "region %u size not a multiple of %u bytes\n",
241 			mem_id, size_multiple);
242 	else if (mem->offset % 8)
243 		dev_err(dev, "region %u offset not 8-byte aligned\n", mem_id);
244 	else if (mem->offset < mem->canary_count * sizeof(__le32))
245 		dev_err(dev, "region %u offset too small for %hu canaries\n",
246 			mem_id, mem->canary_count);
247 	else if (mem_id == IPA_MEM_END_MARKER && mem->size)
248 		dev_err(dev, "non-zero end marker region size\n");
249 	else
250 		return true;
251 
252 	return false;
253 }
254 
255 /* Verify each defined memory region is valid. */
256 static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
257 {
258 	DECLARE_BITMAP(regions, IPA_MEM_COUNT) = { };
259 	struct device *dev = ipa->dev;
260 	enum ipa_mem_id mem_id;
261 	u32 i;
262 
263 	if (mem_data->local_count > IPA_MEM_COUNT) {
264 		dev_err(dev, "too many memory regions (%u > %u)\n",
265 			mem_data->local_count, IPA_MEM_COUNT);
266 		return false;
267 	}
268 
269 	for (i = 0; i < mem_data->local_count; i++) {
270 		const struct ipa_mem *mem = &mem_data->local[i];
271 
272 		if (__test_and_set_bit(mem->id, regions)) {
273 			dev_err(dev, "duplicate memory region %u\n", mem->id);
274 			return false;
275 		}
276 
277 		/* Defined regions have non-zero size and/or canary count */
278 		if (!ipa_mem_valid_one(ipa, mem))
279 			return false;
280 	}
281 
282 	/* Now see if any required regions are not defined */
283 	for_each_clear_bit(mem_id, regions, IPA_MEM_COUNT) {
284 		if (ipa_mem_id_required(ipa, mem_id))
285 			dev_err(dev, "required memory region %u missing\n",
286 				mem_id);
287 	}
288 
289 	return true;
290 }
291 
292 /* Do all memory regions fit within the IPA local memory? */
293 static bool ipa_mem_size_valid(struct ipa *ipa)
294 {
295 	struct device *dev = ipa->dev;
296 	u32 limit = ipa->mem_size;
297 	u32 i;
298 
299 	for (i = 0; i < ipa->mem_count; i++) {
300 		const struct ipa_mem *mem = &ipa->mem[i];
301 
302 		if (mem->offset + mem->size <= limit)
303 			continue;
304 
305 		dev_err(dev, "region %u ends beyond memory limit (0x%08x)\n",
306 			mem->id, limit);
307 
308 		return false;
309 	}
310 
311 	return true;
312 }
313 
314 /**
315  * ipa_mem_config() - Configure IPA shared memory
316  * @ipa:	IPA pointer
317  *
318  * Return:	0 if successful, or a negative error code
319  */
320 int ipa_mem_config(struct ipa *ipa)
321 {
322 	struct device *dev = ipa->dev;
323 	const struct ipa_mem *mem;
324 	const struct reg *reg;
325 	dma_addr_t addr;
326 	u32 mem_size;
327 	void *virt;
328 	u32 val;
329 	u32 i;
330 
331 	/* Check the advertised location and size of the shared memory area */
332 	reg = ipa_reg(ipa, SHARED_MEM_SIZE);
333 	val = ioread32(ipa->reg_virt + reg_offset(reg));
334 
335 	/* The fields in the register are in 8 byte units */
336 	ipa->mem_offset = 8 * reg_decode(reg, MEM_BADDR, val);
337 
338 	/* Make sure the end is within the region's mapped space */
339 	mem_size = 8 * reg_decode(reg, MEM_SIZE, val);
340 
341 	/* If the sizes don't match, issue a warning */
342 	if (ipa->mem_offset + mem_size < ipa->mem_size) {
343 		dev_warn(dev, "limiting IPA memory size to 0x%08x\n",
344 			 mem_size);
345 		ipa->mem_size = mem_size;
346 	} else if (ipa->mem_offset + mem_size > ipa->mem_size) {
347 		dev_dbg(dev, "ignoring larger reported memory size: 0x%08x\n",
348 			mem_size);
349 	}
350 
351 	/* We know our memory size; make sure regions are all in range */
352 	if (!ipa_mem_size_valid(ipa))
353 		return -EINVAL;
354 
355 	/* Prealloc DMA memory for zeroing regions */
356 	virt = dma_alloc_coherent(dev, IPA_MEM_MAX, &addr, GFP_KERNEL);
357 	if (!virt)
358 		return -ENOMEM;
359 	ipa->zero_addr = addr;
360 	ipa->zero_virt = virt;
361 	ipa->zero_size = IPA_MEM_MAX;
362 
363 	/* For each defined region, write "canary" values in the
364 	 * space prior to the region's base address if indicated.
365 	 */
366 	for (i = 0; i < ipa->mem_count; i++) {
367 		u16 canary_count = ipa->mem[i].canary_count;
368 		__le32 *canary;
369 
370 		if (!canary_count)
371 			continue;
372 
373 		/* Write canary values in the space before the region */
374 		canary = ipa->mem_virt + ipa->mem_offset + ipa->mem[i].offset;
375 		do
376 			*--canary = IPA_MEM_CANARY_VAL;
377 		while (--canary_count);
378 	}
379 
380 	/* Verify the microcontroller ring alignment (if defined) */
381 	mem = ipa_mem_find(ipa, IPA_MEM_UC_EVENT_RING);
382 	if (mem && mem->offset % 1024) {
383 		dev_err(dev, "microcontroller ring not 1024-byte aligned\n");
384 		goto err_dma_free;
385 	}
386 
387 	return 0;
388 
389 err_dma_free:
390 	dma_free_coherent(dev, IPA_MEM_MAX, ipa->zero_virt, ipa->zero_addr);
391 
392 	return -EINVAL;
393 }
394 
395 /* Inverse of ipa_mem_config() */
396 void ipa_mem_deconfig(struct ipa *ipa)
397 {
398 	struct device *dev = ipa->dev;
399 
400 	dma_free_coherent(dev, ipa->zero_size, ipa->zero_virt, ipa->zero_addr);
401 	ipa->zero_size = 0;
402 	ipa->zero_virt = NULL;
403 	ipa->zero_addr = 0;
404 }
405 
406 /**
407  * ipa_mem_zero_modem() - Zero IPA-local memory regions owned by the modem
408  * @ipa:	IPA pointer
409  *
410  * Zero regions of IPA-local memory used by the modem.  These are configured
411  * (and initially zeroed) by ipa_mem_setup(), but if the modem crashes and
412  * restarts via SSR we need to re-initialize them.  A QMI message tells the
413  * modem where to find regions of IPA local memory it needs to know about
414  * (these included).
415  */
416 int ipa_mem_zero_modem(struct ipa *ipa)
417 {
418 	struct gsi_trans *trans;
419 
420 	/* Get a transaction to zero the modem memory, modem header,
421 	 * and modem processing context regions.
422 	 */
423 	trans = ipa_cmd_trans_alloc(ipa, 3);
424 	if (!trans) {
425 		dev_err(ipa->dev, "no transaction to zero modem memory\n");
426 		return -EBUSY;
427 	}
428 
429 	ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_HEADER);
430 	ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_PROC_CTX);
431 	ipa_mem_zero_region_add(trans, IPA_MEM_MODEM);
432 
433 	gsi_trans_commit_wait(trans);
434 
435 	return 0;
436 }
437 
438 /**
439  * ipa_imem_init() - Initialize IMEM memory used by the IPA
440  * @ipa:	IPA pointer
441  * @addr:	Physical address of the IPA region in IMEM
442  * @size:	Size (bytes) of the IPA region in IMEM
443  *
444  * IMEM is a block of shared memory separate from system DRAM, and
445  * a portion of this memory is available for the IPA to use.  The
446  * modem accesses this memory directly, but the IPA accesses it
447  * via the IOMMU, using the AP's credentials.
448  *
449  * If this region exists (size > 0) we map it for read/write access
450  * through the IOMMU using the IPA device.
451  *
452  * Note: @addr and @size are not guaranteed to be page-aligned.
453  */
454 static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size)
455 {
456 	struct device *dev = ipa->dev;
457 	struct iommu_domain *domain;
458 	unsigned long iova;
459 	phys_addr_t phys;
460 	int ret;
461 
462 	if (!size)
463 		return 0;	/* IMEM memory not used */
464 
465 	domain = iommu_get_domain_for_dev(dev);
466 	if (!domain) {
467 		dev_err(dev, "no IOMMU domain found for IMEM\n");
468 		return -EINVAL;
469 	}
470 
471 	/* Align the address down and the size up to page boundaries */
472 	phys = addr & PAGE_MASK;
473 	size = PAGE_ALIGN(size + addr - phys);
474 	iova = phys;	/* We just want a direct mapping */
475 
476 	ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE,
477 			GFP_KERNEL);
478 	if (ret)
479 		return ret;
480 
481 	ipa->imem_iova = iova;
482 	ipa->imem_size = size;
483 
484 	return 0;
485 }
486 
487 static void ipa_imem_exit(struct ipa *ipa)
488 {
489 	struct device *dev = ipa->dev;
490 	struct iommu_domain *domain;
491 
492 	if (!ipa->imem_size)
493 		return;
494 
495 	domain = iommu_get_domain_for_dev(dev);
496 	if (domain) {
497 		size_t size;
498 
499 		size = iommu_unmap(domain, ipa->imem_iova, ipa->imem_size);
500 		if (size != ipa->imem_size)
501 			dev_warn(dev, "unmapped %zu IMEM bytes, expected %zu\n",
502 				 size, ipa->imem_size);
503 	} else {
504 		dev_err(dev, "couldn't get IPA IOMMU domain for IMEM\n");
505 	}
506 
507 	ipa->imem_size = 0;
508 	ipa->imem_iova = 0;
509 }
510 
511 /**
512  * ipa_smem_init() - Initialize SMEM memory used by the IPA
513  * @ipa:	IPA pointer
514  * @size:	Size (bytes) of SMEM memory region
515  *
516  * SMEM is a managed block of shared DRAM, from which numbered "items"
517  * can be allocated.  One item is designated for use by the IPA.
518  *
519  * The modem accesses SMEM memory directly, but the IPA accesses it
520  * via the IOMMU, using the AP's credentials.
521  *
522  * If size provided is non-zero, we allocate it and map it for
523  * access through the IOMMU.
524  *
525  * Note: @size and the item address are is not guaranteed to be page-aligned.
526  */
527 static int ipa_smem_init(struct ipa *ipa, size_t size)
528 {
529 	struct device *dev = ipa->dev;
530 	struct iommu_domain *domain;
531 	unsigned long iova;
532 	phys_addr_t phys;
533 	phys_addr_t addr;
534 	size_t actual;
535 	void *virt;
536 	int ret;
537 
538 	if (!size)
539 		return 0;	/* SMEM memory not used */
540 
541 	/* SMEM is memory shared between the AP and another system entity
542 	 * (in this case, the modem).  An allocation from SMEM is persistent
543 	 * until the AP reboots; there is no way to free an allocated SMEM
544 	 * region.  Allocation only reserves the space; to use it you need
545 	 * to "get" a pointer it (this does not imply reference counting).
546 	 * The item might have already been allocated, in which case we
547 	 * use it unless the size isn't what we expect.
548 	 */
549 	ret = qcom_smem_alloc(QCOM_SMEM_HOST_MODEM, SMEM_IPA_FILTER_TABLE, size);
550 	if (ret && ret != -EEXIST) {
551 		dev_err(dev, "error %d allocating size %zu SMEM item\n",
552 			ret, size);
553 		return ret;
554 	}
555 
556 	/* Now get the address of the SMEM memory region */
557 	virt = qcom_smem_get(QCOM_SMEM_HOST_MODEM, SMEM_IPA_FILTER_TABLE, &actual);
558 	if (IS_ERR(virt)) {
559 		ret = PTR_ERR(virt);
560 		dev_err(dev, "error %d getting SMEM item\n", ret);
561 		return ret;
562 	}
563 
564 	/* In case the region was already allocated, verify the size */
565 	if (ret && actual != size) {
566 		dev_err(dev, "SMEM item has size %zu, expected %zu\n",
567 			actual, size);
568 		return -EINVAL;
569 	}
570 
571 	domain = iommu_get_domain_for_dev(dev);
572 	if (!domain) {
573 		dev_err(dev, "no IOMMU domain found for SMEM\n");
574 		return -EINVAL;
575 	}
576 
577 	/* Align the address down and the size up to a page boundary */
578 	addr = qcom_smem_virt_to_phys(virt);
579 	phys = addr & PAGE_MASK;
580 	size = PAGE_ALIGN(size + addr - phys);
581 	iova = phys;	/* We just want a direct mapping */
582 
583 	ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE,
584 			GFP_KERNEL);
585 	if (ret)
586 		return ret;
587 
588 	ipa->smem_iova = iova;
589 	ipa->smem_size = size;
590 
591 	return 0;
592 }
593 
594 static void ipa_smem_exit(struct ipa *ipa)
595 {
596 	struct device *dev = ipa->dev;
597 	struct iommu_domain *domain;
598 
599 	domain = iommu_get_domain_for_dev(dev);
600 	if (domain) {
601 		size_t size;
602 
603 		size = iommu_unmap(domain, ipa->smem_iova, ipa->smem_size);
604 		if (size != ipa->smem_size)
605 			dev_warn(dev, "unmapped %zu SMEM bytes, expected %zu\n",
606 				 size, ipa->smem_size);
607 
608 	} else {
609 		dev_err(dev, "couldn't get IPA IOMMU domain for SMEM\n");
610 	}
611 
612 	ipa->smem_size = 0;
613 	ipa->smem_iova = 0;
614 }
615 
616 /* Perform memory region-related initialization */
617 int ipa_mem_init(struct ipa *ipa, struct platform_device *pdev,
618 		 const struct ipa_mem_data *mem_data)
619 {
620 	struct device *dev = &pdev->dev;
621 	struct resource *res;
622 	int ret;
623 
624 	/* Make sure the set of defined memory regions is valid */
625 	if (!ipa_mem_valid(ipa, mem_data))
626 		return -EINVAL;
627 
628 	ipa->mem_count = mem_data->local_count;
629 	ipa->mem = mem_data->local;
630 
631 	/* Check the route and filter table memory regions */
632 	if (!ipa_table_mem_valid(ipa, false))
633 		return -EINVAL;
634 	if (!ipa_table_mem_valid(ipa, true))
635 		return -EINVAL;
636 
637 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
638 	if (ret) {
639 		dev_err(dev, "error %d setting DMA mask\n", ret);
640 		return ret;
641 	}
642 
643 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ipa-shared");
644 	if (!res) {
645 		dev_err(dev,
646 			"DT error getting \"ipa-shared\" memory property\n");
647 		return -ENODEV;
648 	}
649 
650 	ipa->mem_virt = memremap(res->start, resource_size(res), MEMREMAP_WC);
651 	if (!ipa->mem_virt) {
652 		dev_err(dev, "unable to remap \"ipa-shared\" memory\n");
653 		return -ENOMEM;
654 	}
655 
656 	ipa->mem_addr = res->start;
657 	ipa->mem_size = resource_size(res);
658 
659 	ret = ipa_imem_init(ipa, mem_data->imem_addr, mem_data->imem_size);
660 	if (ret)
661 		goto err_unmap;
662 
663 	ret = ipa_smem_init(ipa, mem_data->smem_size);
664 	if (ret)
665 		goto err_imem_exit;
666 
667 	return 0;
668 
669 err_imem_exit:
670 	ipa_imem_exit(ipa);
671 err_unmap:
672 	memunmap(ipa->mem_virt);
673 
674 	return ret;
675 }
676 
677 /* Inverse of ipa_mem_init() */
678 void ipa_mem_exit(struct ipa *ipa)
679 {
680 	ipa_smem_exit(ipa);
681 	ipa_imem_exit(ipa);
682 	memunmap(ipa->mem_virt);
683 }
684