xref: /linux/drivers/net/ipa/ipa_mem.c (revision f60e5fb6dfafef0bcf32b4bc7f4fc2f5f1285815)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2019-2024 Linaro Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/iommu.h>
10 #include <linux/platform_device.h>
11 #include <linux/io.h>
12 #include <linux/soc/qcom/smem.h>
13 
14 #include "ipa.h"
15 #include "ipa_reg.h"
16 #include "ipa_data.h"
17 #include "ipa_cmd.h"
18 #include "ipa_mem.h"
19 #include "ipa_table.h"
20 #include "gsi_trans.h"
21 
22 /* "Canary" value placed between memory regions to detect overflow */
23 #define IPA_MEM_CANARY_VAL		cpu_to_le32(0xdeadbeef)
24 
25 /* SMEM host id representing the modem. */
26 #define QCOM_SMEM_HOST_MODEM	1
27 
28 const struct ipa_mem *ipa_mem_find(struct ipa *ipa, enum ipa_mem_id mem_id)
29 {
30 	u32 i;
31 
32 	for (i = 0; i < ipa->mem_count; i++) {
33 		const struct ipa_mem *mem = &ipa->mem[i];
34 
35 		if (mem->id == mem_id)
36 			return mem;
37 	}
38 
39 	return NULL;
40 }
41 
42 /* Add an immediate command to a transaction that zeroes a memory region */
43 static void
44 ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id)
45 {
46 	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
47 	const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
48 	dma_addr_t addr = ipa->zero_addr;
49 
50 	if (!mem->size)
51 		return;
52 
53 	ipa_cmd_dma_shared_mem_add(trans, mem->offset, mem->size, addr, true);
54 }
55 
56 /**
57  * ipa_mem_setup() - Set up IPA AP and modem shared memory areas
58  * @ipa:	IPA pointer
59  *
60  * Set up the shared memory regions in IPA local memory.  This involves
61  * zero-filling memory regions, and in the case of header memory, telling
62  * the IPA where it's located.
63  *
64  * This function performs the initial setup of this memory.  If the modem
65  * crashes, its regions are re-zeroed in ipa_mem_zero_modem().
66  *
67  * The AP informs the modem where its portions of memory are located
68  * in a QMI exchange that occurs at modem startup.
69  *
70  * There is no need for a matching ipa_mem_teardown() function.
71  *
72  * Return:	0 if successful, or a negative error code
73  */
74 int ipa_mem_setup(struct ipa *ipa)
75 {
76 	dma_addr_t addr = ipa->zero_addr;
77 	const struct ipa_mem *mem;
78 	struct gsi_trans *trans;
79 	const struct reg *reg;
80 	u32 offset;
81 	u16 size;
82 	u32 val;
83 
84 	/* Get a transaction to define the header memory region and to zero
85 	 * the processing context and modem memory regions.
86 	 */
87 	trans = ipa_cmd_trans_alloc(ipa, 4);
88 	if (!trans) {
89 		dev_err(ipa->dev, "no transaction for memory setup\n");
90 		return -EBUSY;
91 	}
92 
93 	/* Initialize IPA-local header memory.  The AP header region, if
94 	 * present, is contiguous with and follows the modem header region,
95 	 * and they are initialized together.
96 	 */
97 	mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
98 	offset = mem->offset;
99 	size = mem->size;
100 	mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
101 	if (mem)
102 		size += mem->size;
103 
104 	ipa_cmd_hdr_init_local_add(trans, offset, size, addr);
105 
106 	ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_PROC_CTX);
107 	ipa_mem_zero_region_add(trans, IPA_MEM_AP_PROC_CTX);
108 	ipa_mem_zero_region_add(trans, IPA_MEM_MODEM);
109 
110 	gsi_trans_commit_wait(trans);
111 
112 	/* Tell the hardware where the processing context area is located */
113 	mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX);
114 	offset = ipa->mem_offset + mem->offset;
115 
116 	reg = ipa_reg(ipa, LOCAL_PKT_PROC_CNTXT);
117 	val = reg_encode(reg, IPA_BASE_ADDR, offset);
118 	iowrite32(val, ipa->reg_virt + reg_offset(reg));
119 
120 	return 0;
121 }
122 
123 /* Is the given memory region ID is valid for the current IPA version? */
124 static bool ipa_mem_id_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
125 {
126 	enum ipa_version version = ipa->version;
127 
128 	switch (mem_id) {
129 	case IPA_MEM_UC_SHARED:
130 	case IPA_MEM_UC_INFO:
131 	case IPA_MEM_V4_FILTER_HASHED:
132 	case IPA_MEM_V4_FILTER:
133 	case IPA_MEM_V6_FILTER_HASHED:
134 	case IPA_MEM_V6_FILTER:
135 	case IPA_MEM_V4_ROUTE_HASHED:
136 	case IPA_MEM_V4_ROUTE:
137 	case IPA_MEM_V6_ROUTE_HASHED:
138 	case IPA_MEM_V6_ROUTE:
139 	case IPA_MEM_MODEM_HEADER:
140 	case IPA_MEM_AP_HEADER:
141 	case IPA_MEM_MODEM_PROC_CTX:
142 	case IPA_MEM_AP_PROC_CTX:
143 	case IPA_MEM_MODEM:
144 	case IPA_MEM_UC_EVENT_RING:
145 	case IPA_MEM_PDN_CONFIG:
146 	case IPA_MEM_STATS_QUOTA_MODEM:
147 	case IPA_MEM_STATS_QUOTA_AP:
148 	case IPA_MEM_END_MARKER:	/* pseudo region */
149 		break;
150 
151 	case IPA_MEM_STATS_TETHERING:
152 	case IPA_MEM_STATS_DROP:
153 		if (version < IPA_VERSION_4_0)
154 			return false;
155 		break;
156 
157 	case IPA_MEM_STATS_V4_FILTER:
158 	case IPA_MEM_STATS_V6_FILTER:
159 	case IPA_MEM_STATS_V4_ROUTE:
160 	case IPA_MEM_STATS_V6_ROUTE:
161 		if (version < IPA_VERSION_4_0 || version > IPA_VERSION_4_2)
162 			return false;
163 		break;
164 
165 	case IPA_MEM_AP_V4_FILTER:
166 	case IPA_MEM_AP_V6_FILTER:
167 		if (version < IPA_VERSION_5_0)
168 			return false;
169 		break;
170 
171 	case IPA_MEM_NAT_TABLE:
172 	case IPA_MEM_STATS_FILTER_ROUTE:
173 		if (version < IPA_VERSION_4_5)
174 			return false;
175 		break;
176 
177 	default:
178 		return false;
179 	}
180 
181 	return true;
182 }
183 
184 /* Must the given memory region be present in the configuration? */
185 static bool ipa_mem_id_required(struct ipa *ipa, enum ipa_mem_id mem_id)
186 {
187 	switch (mem_id) {
188 	case IPA_MEM_UC_SHARED:
189 	case IPA_MEM_UC_INFO:
190 	case IPA_MEM_V4_FILTER_HASHED:
191 	case IPA_MEM_V4_FILTER:
192 	case IPA_MEM_V6_FILTER_HASHED:
193 	case IPA_MEM_V6_FILTER:
194 	case IPA_MEM_V4_ROUTE_HASHED:
195 	case IPA_MEM_V4_ROUTE:
196 	case IPA_MEM_V6_ROUTE_HASHED:
197 	case IPA_MEM_V6_ROUTE:
198 	case IPA_MEM_MODEM_HEADER:
199 	case IPA_MEM_MODEM_PROC_CTX:
200 	case IPA_MEM_AP_PROC_CTX:
201 	case IPA_MEM_MODEM:
202 		return true;
203 
204 	case IPA_MEM_PDN_CONFIG:
205 	case IPA_MEM_STATS_QUOTA_MODEM:
206 		return ipa->version >= IPA_VERSION_4_0;
207 
208 	case IPA_MEM_STATS_TETHERING:
209 		return ipa->version >= IPA_VERSION_4_0 &&
210 			ipa->version != IPA_VERSION_5_0;
211 
212 	default:
213 		return false;		/* Anything else is optional */
214 	}
215 }
216 
217 static bool ipa_mem_valid_one(struct ipa *ipa, const struct ipa_mem *mem)
218 {
219 	enum ipa_mem_id mem_id = mem->id;
220 	struct device *dev = ipa->dev;
221 	u16 size_multiple;
222 
223 	/* Make sure the memory region is valid for this version of IPA */
224 	if (!ipa_mem_id_valid(ipa, mem_id)) {
225 		dev_err(dev, "region id %u not valid\n", mem_id);
226 		return false;
227 	}
228 
229 	if (!mem->size && !mem->canary_count) {
230 		dev_err(dev, "empty memory region %u\n", mem_id);
231 		return false;
232 	}
233 
234 	/* Other than modem memory, sizes must be a multiple of 8 */
235 	size_multiple = mem_id == IPA_MEM_MODEM ? 4 : 8;
236 	if (mem->size % size_multiple)
237 		dev_err(dev, "region %u size not a multiple of %u bytes\n",
238 			mem_id, size_multiple);
239 	else if (mem->offset % 8)
240 		dev_err(dev, "region %u offset not 8-byte aligned\n", mem_id);
241 	else if (mem->offset < mem->canary_count * sizeof(__le32))
242 		dev_err(dev, "region %u offset too small for %hu canaries\n",
243 			mem_id, mem->canary_count);
244 	else if (mem_id == IPA_MEM_END_MARKER && mem->size)
245 		dev_err(dev, "non-zero end marker region size\n");
246 	else
247 		return true;
248 
249 	return false;
250 }
251 
252 /* Verify each defined memory region is valid. */
253 static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
254 {
255 	DECLARE_BITMAP(regions, IPA_MEM_COUNT) = { };
256 	struct device *dev = ipa->dev;
257 	enum ipa_mem_id mem_id;
258 	u32 i;
259 
260 	if (mem_data->local_count > IPA_MEM_COUNT) {
261 		dev_err(dev, "too many memory regions (%u > %u)\n",
262 			mem_data->local_count, IPA_MEM_COUNT);
263 		return false;
264 	}
265 
266 	for (i = 0; i < mem_data->local_count; i++) {
267 		const struct ipa_mem *mem = &mem_data->local[i];
268 
269 		if (__test_and_set_bit(mem->id, regions)) {
270 			dev_err(dev, "duplicate memory region %u\n", mem->id);
271 			return false;
272 		}
273 
274 		/* Defined regions have non-zero size and/or canary count */
275 		if (!ipa_mem_valid_one(ipa, mem))
276 			return false;
277 	}
278 
279 	/* Now see if any required regions are not defined */
280 	for_each_clear_bit(mem_id, regions, IPA_MEM_COUNT) {
281 		if (ipa_mem_id_required(ipa, mem_id))
282 			dev_err(dev, "required memory region %u missing\n",
283 				mem_id);
284 	}
285 
286 	return true;
287 }
288 
289 /* Do all memory regions fit within the IPA local memory? */
290 static bool ipa_mem_size_valid(struct ipa *ipa)
291 {
292 	struct device *dev = ipa->dev;
293 	u32 limit = ipa->mem_size;
294 	u32 i;
295 
296 	for (i = 0; i < ipa->mem_count; i++) {
297 		const struct ipa_mem *mem = &ipa->mem[i];
298 
299 		if (mem->offset + mem->size <= limit)
300 			continue;
301 
302 		dev_err(dev, "region %u ends beyond memory limit (0x%08x)\n",
303 			mem->id, limit);
304 
305 		return false;
306 	}
307 
308 	return true;
309 }
310 
311 /**
312  * ipa_mem_config() - Configure IPA shared memory
313  * @ipa:	IPA pointer
314  *
315  * Return:	0 if successful, or a negative error code
316  */
317 int ipa_mem_config(struct ipa *ipa)
318 {
319 	struct device *dev = ipa->dev;
320 	const struct ipa_mem *mem;
321 	const struct reg *reg;
322 	dma_addr_t addr;
323 	u32 mem_size;
324 	void *virt;
325 	u32 val;
326 	u32 i;
327 
328 	/* Check the advertised location and size of the shared memory area */
329 	reg = ipa_reg(ipa, SHARED_MEM_SIZE);
330 	val = ioread32(ipa->reg_virt + reg_offset(reg));
331 
332 	/* The fields in the register are in 8 byte units */
333 	ipa->mem_offset = 8 * reg_decode(reg, MEM_BADDR, val);
334 
335 	/* Make sure the end is within the region's mapped space */
336 	mem_size = 8 * reg_decode(reg, MEM_SIZE, val);
337 
338 	/* If the sizes don't match, issue a warning */
339 	if (ipa->mem_offset + mem_size < ipa->mem_size) {
340 		dev_warn(dev, "limiting IPA memory size to 0x%08x\n",
341 			 mem_size);
342 		ipa->mem_size = mem_size;
343 	} else if (ipa->mem_offset + mem_size > ipa->mem_size) {
344 		dev_dbg(dev, "ignoring larger reported memory size: 0x%08x\n",
345 			mem_size);
346 	}
347 
348 	/* We know our memory size; make sure regions are all in range */
349 	if (!ipa_mem_size_valid(ipa))
350 		return -EINVAL;
351 
352 	/* Prealloc DMA memory for zeroing regions */
353 	virt = dma_alloc_coherent(dev, IPA_MEM_MAX, &addr, GFP_KERNEL);
354 	if (!virt)
355 		return -ENOMEM;
356 	ipa->zero_addr = addr;
357 	ipa->zero_virt = virt;
358 	ipa->zero_size = IPA_MEM_MAX;
359 
360 	/* For each defined region, write "canary" values in the
361 	 * space prior to the region's base address if indicated.
362 	 */
363 	for (i = 0; i < ipa->mem_count; i++) {
364 		u16 canary_count = ipa->mem[i].canary_count;
365 		__le32 *canary;
366 
367 		if (!canary_count)
368 			continue;
369 
370 		/* Write canary values in the space before the region */
371 		canary = ipa->mem_virt + ipa->mem_offset + ipa->mem[i].offset;
372 		do
373 			*--canary = IPA_MEM_CANARY_VAL;
374 		while (--canary_count);
375 	}
376 
377 	/* Verify the microcontroller ring alignment (if defined) */
378 	mem = ipa_mem_find(ipa, IPA_MEM_UC_EVENT_RING);
379 	if (mem && mem->offset % 1024) {
380 		dev_err(dev, "microcontroller ring not 1024-byte aligned\n");
381 		goto err_dma_free;
382 	}
383 
384 	return 0;
385 
386 err_dma_free:
387 	dma_free_coherent(dev, IPA_MEM_MAX, ipa->zero_virt, ipa->zero_addr);
388 
389 	return -EINVAL;
390 }
391 
392 /* Inverse of ipa_mem_config() */
393 void ipa_mem_deconfig(struct ipa *ipa)
394 {
395 	struct device *dev = ipa->dev;
396 
397 	dma_free_coherent(dev, ipa->zero_size, ipa->zero_virt, ipa->zero_addr);
398 	ipa->zero_size = 0;
399 	ipa->zero_virt = NULL;
400 	ipa->zero_addr = 0;
401 }
402 
403 /**
404  * ipa_mem_zero_modem() - Zero IPA-local memory regions owned by the modem
405  * @ipa:	IPA pointer
406  *
407  * Zero regions of IPA-local memory used by the modem.  These are configured
408  * (and initially zeroed) by ipa_mem_setup(), but if the modem crashes and
409  * restarts via SSR we need to re-initialize them.  A QMI message tells the
410  * modem where to find regions of IPA local memory it needs to know about
411  * (these included).
412  */
413 int ipa_mem_zero_modem(struct ipa *ipa)
414 {
415 	struct gsi_trans *trans;
416 
417 	/* Get a transaction to zero the modem memory, modem header,
418 	 * and modem processing context regions.
419 	 */
420 	trans = ipa_cmd_trans_alloc(ipa, 3);
421 	if (!trans) {
422 		dev_err(ipa->dev, "no transaction to zero modem memory\n");
423 		return -EBUSY;
424 	}
425 
426 	ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_HEADER);
427 	ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_PROC_CTX);
428 	ipa_mem_zero_region_add(trans, IPA_MEM_MODEM);
429 
430 	gsi_trans_commit_wait(trans);
431 
432 	return 0;
433 }
434 
435 /**
436  * ipa_imem_init() - Initialize IMEM memory used by the IPA
437  * @ipa:	IPA pointer
438  * @addr:	Physical address of the IPA region in IMEM
439  * @size:	Size (bytes) of the IPA region in IMEM
440  *
441  * IMEM is a block of shared memory separate from system DRAM, and
442  * a portion of this memory is available for the IPA to use.  The
443  * modem accesses this memory directly, but the IPA accesses it
444  * via the IOMMU, using the AP's credentials.
445  *
446  * If this region exists (size > 0) we map it for read/write access
447  * through the IOMMU using the IPA device.
448  *
449  * Note: @addr and @size are not guaranteed to be page-aligned.
450  */
451 static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size)
452 {
453 	struct device *dev = ipa->dev;
454 	struct iommu_domain *domain;
455 	unsigned long iova;
456 	phys_addr_t phys;
457 	int ret;
458 
459 	if (!size)
460 		return 0;	/* IMEM memory not used */
461 
462 	domain = iommu_get_domain_for_dev(dev);
463 	if (!domain) {
464 		dev_err(dev, "no IOMMU domain found for IMEM\n");
465 		return -EINVAL;
466 	}
467 
468 	/* Align the address down and the size up to page boundaries */
469 	phys = addr & PAGE_MASK;
470 	size = PAGE_ALIGN(size + addr - phys);
471 	iova = phys;	/* We just want a direct mapping */
472 
473 	ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE,
474 			GFP_KERNEL);
475 	if (ret)
476 		return ret;
477 
478 	ipa->imem_iova = iova;
479 	ipa->imem_size = size;
480 
481 	return 0;
482 }
483 
484 static void ipa_imem_exit(struct ipa *ipa)
485 {
486 	struct device *dev = ipa->dev;
487 	struct iommu_domain *domain;
488 
489 	if (!ipa->imem_size)
490 		return;
491 
492 	domain = iommu_get_domain_for_dev(dev);
493 	if (domain) {
494 		size_t size;
495 
496 		size = iommu_unmap(domain, ipa->imem_iova, ipa->imem_size);
497 		if (size != ipa->imem_size)
498 			dev_warn(dev, "unmapped %zu IMEM bytes, expected %zu\n",
499 				 size, ipa->imem_size);
500 	} else {
501 		dev_err(dev, "couldn't get IPA IOMMU domain for IMEM\n");
502 	}
503 
504 	ipa->imem_size = 0;
505 	ipa->imem_iova = 0;
506 }
507 
508 /**
509  * ipa_smem_init() - Initialize SMEM memory used by the IPA
510  * @ipa:	IPA pointer
511  * @item:	Item ID of SMEM memory
512  * @size:	Size (bytes) of SMEM memory region
513  *
514  * SMEM is a managed block of shared DRAM, from which numbered "items"
515  * can be allocated.  One item is designated for use by the IPA.
516  *
517  * The modem accesses SMEM memory directly, but the IPA accesses it
518  * via the IOMMU, using the AP's credentials.
519  *
520  * If size provided is non-zero, we allocate it and map it for
521  * access through the IOMMU.
522  *
523  * Note: @size and the item address are is not guaranteed to be page-aligned.
524  */
525 static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
526 {
527 	struct device *dev = ipa->dev;
528 	struct iommu_domain *domain;
529 	unsigned long iova;
530 	phys_addr_t phys;
531 	phys_addr_t addr;
532 	size_t actual;
533 	void *virt;
534 	int ret;
535 
536 	if (!size)
537 		return 0;	/* SMEM memory not used */
538 
539 	/* SMEM is memory shared between the AP and another system entity
540 	 * (in this case, the modem).  An allocation from SMEM is persistent
541 	 * until the AP reboots; there is no way to free an allocated SMEM
542 	 * region.  Allocation only reserves the space; to use it you need
543 	 * to "get" a pointer it (this does not imply reference counting).
544 	 * The item might have already been allocated, in which case we
545 	 * use it unless the size isn't what we expect.
546 	 */
547 	ret = qcom_smem_alloc(QCOM_SMEM_HOST_MODEM, item, size);
548 	if (ret && ret != -EEXIST) {
549 		dev_err(dev, "error %d allocating size %zu SMEM item %u\n",
550 			ret, size, item);
551 		return ret;
552 	}
553 
554 	/* Now get the address of the SMEM memory region */
555 	virt = qcom_smem_get(QCOM_SMEM_HOST_MODEM, item, &actual);
556 	if (IS_ERR(virt)) {
557 		ret = PTR_ERR(virt);
558 		dev_err(dev, "error %d getting SMEM item %u\n", ret, item);
559 		return ret;
560 	}
561 
562 	/* In case the region was already allocated, verify the size */
563 	if (ret && actual != size) {
564 		dev_err(dev, "SMEM item %u has size %zu, expected %zu\n",
565 			item, actual, size);
566 		return -EINVAL;
567 	}
568 
569 	domain = iommu_get_domain_for_dev(dev);
570 	if (!domain) {
571 		dev_err(dev, "no IOMMU domain found for SMEM\n");
572 		return -EINVAL;
573 	}
574 
575 	/* Align the address down and the size up to a page boundary */
576 	addr = qcom_smem_virt_to_phys(virt);
577 	phys = addr & PAGE_MASK;
578 	size = PAGE_ALIGN(size + addr - phys);
579 	iova = phys;	/* We just want a direct mapping */
580 
581 	ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE,
582 			GFP_KERNEL);
583 	if (ret)
584 		return ret;
585 
586 	ipa->smem_iova = iova;
587 	ipa->smem_size = size;
588 
589 	return 0;
590 }
591 
592 static void ipa_smem_exit(struct ipa *ipa)
593 {
594 	struct device *dev = ipa->dev;
595 	struct iommu_domain *domain;
596 
597 	domain = iommu_get_domain_for_dev(dev);
598 	if (domain) {
599 		size_t size;
600 
601 		size = iommu_unmap(domain, ipa->smem_iova, ipa->smem_size);
602 		if (size != ipa->smem_size)
603 			dev_warn(dev, "unmapped %zu SMEM bytes, expected %zu\n",
604 				 size, ipa->smem_size);
605 
606 	} else {
607 		dev_err(dev, "couldn't get IPA IOMMU domain for SMEM\n");
608 	}
609 
610 	ipa->smem_size = 0;
611 	ipa->smem_iova = 0;
612 }
613 
614 /* Perform memory region-related initialization */
615 int ipa_mem_init(struct ipa *ipa, struct platform_device *pdev,
616 		 const struct ipa_mem_data *mem_data)
617 {
618 	struct device *dev = &pdev->dev;
619 	struct resource *res;
620 	int ret;
621 
622 	/* Make sure the set of defined memory regions is valid */
623 	if (!ipa_mem_valid(ipa, mem_data))
624 		return -EINVAL;
625 
626 	ipa->mem_count = mem_data->local_count;
627 	ipa->mem = mem_data->local;
628 
629 	/* Check the route and filter table memory regions */
630 	if (!ipa_table_mem_valid(ipa, false))
631 		return -EINVAL;
632 	if (!ipa_table_mem_valid(ipa, true))
633 		return -EINVAL;
634 
635 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
636 	if (ret) {
637 		dev_err(dev, "error %d setting DMA mask\n", ret);
638 		return ret;
639 	}
640 
641 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ipa-shared");
642 	if (!res) {
643 		dev_err(dev,
644 			"DT error getting \"ipa-shared\" memory property\n");
645 		return -ENODEV;
646 	}
647 
648 	ipa->mem_virt = memremap(res->start, resource_size(res), MEMREMAP_WC);
649 	if (!ipa->mem_virt) {
650 		dev_err(dev, "unable to remap \"ipa-shared\" memory\n");
651 		return -ENOMEM;
652 	}
653 
654 	ipa->mem_addr = res->start;
655 	ipa->mem_size = resource_size(res);
656 
657 	ret = ipa_imem_init(ipa, mem_data->imem_addr, mem_data->imem_size);
658 	if (ret)
659 		goto err_unmap;
660 
661 	ret = ipa_smem_init(ipa, mem_data->smem_id, mem_data->smem_size);
662 	if (ret)
663 		goto err_imem_exit;
664 
665 	return 0;
666 
667 err_imem_exit:
668 	ipa_imem_exit(ipa);
669 err_unmap:
670 	memunmap(ipa->mem_virt);
671 
672 	return ret;
673 }
674 
675 /* Inverse of ipa_mem_init() */
676 void ipa_mem_exit(struct ipa *ipa)
677 {
678 	ipa_smem_exit(ipa);
679 	ipa_imem_exit(ipa);
680 	memunmap(ipa->mem_virt);
681 }
682