xref: /linux/drivers/net/ipa/ipa_mem.c (revision 1677293ed891664796af51b64feba12a99def4a8)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2019-2023 Linaro Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <linux/bitfield.h>
9 #include <linux/bug.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/iommu.h>
12 #include <linux/platform_device.h>
13 #include <linux/io.h>
14 #include <linux/soc/qcom/smem.h>
15 
16 #include "ipa.h"
17 #include "ipa_reg.h"
18 #include "ipa_data.h"
19 #include "ipa_cmd.h"
20 #include "ipa_mem.h"
21 #include "ipa_table.h"
22 #include "gsi_trans.h"
23 
24 /* "Canary" value placed between memory regions to detect overflow */
25 #define IPA_MEM_CANARY_VAL		cpu_to_le32(0xdeadbeef)
26 
27 /* SMEM host id representing the modem. */
28 #define QCOM_SMEM_HOST_MODEM	1
29 
30 const struct ipa_mem *ipa_mem_find(struct ipa *ipa, enum ipa_mem_id mem_id)
31 {
32 	u32 i;
33 
34 	for (i = 0; i < ipa->mem_count; i++) {
35 		const struct ipa_mem *mem = &ipa->mem[i];
36 
37 		if (mem->id == mem_id)
38 			return mem;
39 	}
40 
41 	return NULL;
42 }
43 
44 /* Add an immediate command to a transaction that zeroes a memory region */
45 static void
46 ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id)
47 {
48 	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
49 	const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
50 	dma_addr_t addr = ipa->zero_addr;
51 
52 	if (!mem->size)
53 		return;
54 
55 	ipa_cmd_dma_shared_mem_add(trans, mem->offset, mem->size, addr, true);
56 }
57 
58 /**
59  * ipa_mem_setup() - Set up IPA AP and modem shared memory areas
60  * @ipa:	IPA pointer
61  *
62  * Set up the shared memory regions in IPA local memory.  This involves
63  * zero-filling memory regions, and in the case of header memory, telling
64  * the IPA where it's located.
65  *
66  * This function performs the initial setup of this memory.  If the modem
67  * crashes, its regions are re-zeroed in ipa_mem_zero_modem().
68  *
69  * The AP informs the modem where its portions of memory are located
70  * in a QMI exchange that occurs at modem startup.
71  *
72  * There is no need for a matching ipa_mem_teardown() function.
73  *
74  * Return:	0 if successful, or a negative error code
75  */
76 int ipa_mem_setup(struct ipa *ipa)
77 {
78 	dma_addr_t addr = ipa->zero_addr;
79 	const struct ipa_mem *mem;
80 	struct gsi_trans *trans;
81 	const struct reg *reg;
82 	u32 offset;
83 	u16 size;
84 	u32 val;
85 
86 	/* Get a transaction to define the header memory region and to zero
87 	 * the processing context and modem memory regions.
88 	 */
89 	trans = ipa_cmd_trans_alloc(ipa, 4);
90 	if (!trans) {
91 		dev_err(ipa->dev, "no transaction for memory setup\n");
92 		return -EBUSY;
93 	}
94 
95 	/* Initialize IPA-local header memory.  The AP header region, if
96 	 * present, is contiguous with and follows the modem header region,
97 	 * and they are initialized together.
98 	 */
99 	mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
100 	offset = mem->offset;
101 	size = mem->size;
102 	mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
103 	if (mem)
104 		size += mem->size;
105 
106 	ipa_cmd_hdr_init_local_add(trans, offset, size, addr);
107 
108 	ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_PROC_CTX);
109 	ipa_mem_zero_region_add(trans, IPA_MEM_AP_PROC_CTX);
110 	ipa_mem_zero_region_add(trans, IPA_MEM_MODEM);
111 
112 	gsi_trans_commit_wait(trans);
113 
114 	/* Tell the hardware where the processing context area is located */
115 	mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX);
116 	offset = ipa->mem_offset + mem->offset;
117 
118 	reg = ipa_reg(ipa, LOCAL_PKT_PROC_CNTXT);
119 	val = reg_encode(reg, IPA_BASE_ADDR, offset);
120 	iowrite32(val, ipa->reg_virt + reg_offset(reg));
121 
122 	return 0;
123 }
124 
125 /* Is the given memory region ID is valid for the current IPA version? */
126 static bool ipa_mem_id_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
127 {
128 	enum ipa_version version = ipa->version;
129 
130 	switch (mem_id) {
131 	case IPA_MEM_UC_SHARED:
132 	case IPA_MEM_UC_INFO:
133 	case IPA_MEM_V4_FILTER_HASHED:
134 	case IPA_MEM_V4_FILTER:
135 	case IPA_MEM_V6_FILTER_HASHED:
136 	case IPA_MEM_V6_FILTER:
137 	case IPA_MEM_V4_ROUTE_HASHED:
138 	case IPA_MEM_V4_ROUTE:
139 	case IPA_MEM_V6_ROUTE_HASHED:
140 	case IPA_MEM_V6_ROUTE:
141 	case IPA_MEM_MODEM_HEADER:
142 	case IPA_MEM_AP_HEADER:
143 	case IPA_MEM_MODEM_PROC_CTX:
144 	case IPA_MEM_AP_PROC_CTX:
145 	case IPA_MEM_MODEM:
146 	case IPA_MEM_UC_EVENT_RING:
147 	case IPA_MEM_PDN_CONFIG:
148 	case IPA_MEM_STATS_QUOTA_MODEM:
149 	case IPA_MEM_STATS_QUOTA_AP:
150 	case IPA_MEM_END_MARKER:	/* pseudo region */
151 		break;
152 
153 	case IPA_MEM_STATS_TETHERING:
154 	case IPA_MEM_STATS_DROP:
155 		if (version < IPA_VERSION_4_0)
156 			return false;
157 		break;
158 
159 	case IPA_MEM_STATS_V4_FILTER:
160 	case IPA_MEM_STATS_V6_FILTER:
161 	case IPA_MEM_STATS_V4_ROUTE:
162 	case IPA_MEM_STATS_V6_ROUTE:
163 		if (version < IPA_VERSION_4_0 || version > IPA_VERSION_4_2)
164 			return false;
165 		break;
166 
167 	case IPA_MEM_AP_V4_FILTER:
168 	case IPA_MEM_AP_V6_FILTER:
169 		if (version < IPA_VERSION_5_0)
170 			return false;
171 		break;
172 
173 	case IPA_MEM_NAT_TABLE:
174 	case IPA_MEM_STATS_FILTER_ROUTE:
175 		if (version < IPA_VERSION_4_5)
176 			return false;
177 		break;
178 
179 	default:
180 		return false;
181 	}
182 
183 	return true;
184 }
185 
186 /* Must the given memory region be present in the configuration? */
187 static bool ipa_mem_id_required(struct ipa *ipa, enum ipa_mem_id mem_id)
188 {
189 	switch (mem_id) {
190 	case IPA_MEM_UC_SHARED:
191 	case IPA_MEM_UC_INFO:
192 	case IPA_MEM_V4_FILTER_HASHED:
193 	case IPA_MEM_V4_FILTER:
194 	case IPA_MEM_V6_FILTER_HASHED:
195 	case IPA_MEM_V6_FILTER:
196 	case IPA_MEM_V4_ROUTE_HASHED:
197 	case IPA_MEM_V4_ROUTE:
198 	case IPA_MEM_V6_ROUTE_HASHED:
199 	case IPA_MEM_V6_ROUTE:
200 	case IPA_MEM_MODEM_HEADER:
201 	case IPA_MEM_MODEM_PROC_CTX:
202 	case IPA_MEM_AP_PROC_CTX:
203 	case IPA_MEM_MODEM:
204 		return true;
205 
206 	case IPA_MEM_PDN_CONFIG:
207 	case IPA_MEM_STATS_QUOTA_MODEM:
208 		return ipa->version >= IPA_VERSION_4_0;
209 
210 	case IPA_MEM_STATS_TETHERING:
211 		return ipa->version >= IPA_VERSION_4_0 &&
212 			ipa->version != IPA_VERSION_5_0;
213 
214 	default:
215 		return false;		/* Anything else is optional */
216 	}
217 }
218 
219 static bool ipa_mem_valid_one(struct ipa *ipa, const struct ipa_mem *mem)
220 {
221 	enum ipa_mem_id mem_id = mem->id;
222 	struct device *dev = ipa->dev;
223 	u16 size_multiple;
224 
225 	/* Make sure the memory region is valid for this version of IPA */
226 	if (!ipa_mem_id_valid(ipa, mem_id)) {
227 		dev_err(dev, "region id %u not valid\n", mem_id);
228 		return false;
229 	}
230 
231 	if (!mem->size && !mem->canary_count) {
232 		dev_err(dev, "empty memory region %u\n", mem_id);
233 		return false;
234 	}
235 
236 	/* Other than modem memory, sizes must be a multiple of 8 */
237 	size_multiple = mem_id == IPA_MEM_MODEM ? 4 : 8;
238 	if (mem->size % size_multiple)
239 		dev_err(dev, "region %u size not a multiple of %u bytes\n",
240 			mem_id, size_multiple);
241 	else if (mem->offset % 8)
242 		dev_err(dev, "region %u offset not 8-byte aligned\n", mem_id);
243 	else if (mem->offset < mem->canary_count * sizeof(__le32))
244 		dev_err(dev, "region %u offset too small for %hu canaries\n",
245 			mem_id, mem->canary_count);
246 	else if (mem_id == IPA_MEM_END_MARKER && mem->size)
247 		dev_err(dev, "non-zero end marker region size\n");
248 	else
249 		return true;
250 
251 	return false;
252 }
253 
254 /* Verify each defined memory region is valid. */
255 static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
256 {
257 	DECLARE_BITMAP(regions, IPA_MEM_COUNT) = { };
258 	struct device *dev = ipa->dev;
259 	enum ipa_mem_id mem_id;
260 	u32 i;
261 
262 	if (mem_data->local_count > IPA_MEM_COUNT) {
263 		dev_err(dev, "too many memory regions (%u > %u)\n",
264 			mem_data->local_count, IPA_MEM_COUNT);
265 		return false;
266 	}
267 
268 	for (i = 0; i < mem_data->local_count; i++) {
269 		const struct ipa_mem *mem = &mem_data->local[i];
270 
271 		if (__test_and_set_bit(mem->id, regions)) {
272 			dev_err(dev, "duplicate memory region %u\n", mem->id);
273 			return false;
274 		}
275 
276 		/* Defined regions have non-zero size and/or canary count */
277 		if (!ipa_mem_valid_one(ipa, mem))
278 			return false;
279 	}
280 
281 	/* Now see if any required regions are not defined */
282 	for_each_clear_bit(mem_id, regions, IPA_MEM_COUNT) {
283 		if (ipa_mem_id_required(ipa, mem_id))
284 			dev_err(dev, "required memory region %u missing\n",
285 				mem_id);
286 	}
287 
288 	return true;
289 }
290 
291 /* Do all memory regions fit within the IPA local memory? */
292 static bool ipa_mem_size_valid(struct ipa *ipa)
293 {
294 	struct device *dev = ipa->dev;
295 	u32 limit = ipa->mem_size;
296 	u32 i;
297 
298 	for (i = 0; i < ipa->mem_count; i++) {
299 		const struct ipa_mem *mem = &ipa->mem[i];
300 
301 		if (mem->offset + mem->size <= limit)
302 			continue;
303 
304 		dev_err(dev, "region %u ends beyond memory limit (0x%08x)\n",
305 			mem->id, limit);
306 
307 		return false;
308 	}
309 
310 	return true;
311 }
312 
313 /**
314  * ipa_mem_config() - Configure IPA shared memory
315  * @ipa:	IPA pointer
316  *
317  * Return:	0 if successful, or a negative error code
318  */
319 int ipa_mem_config(struct ipa *ipa)
320 {
321 	struct device *dev = ipa->dev;
322 	const struct ipa_mem *mem;
323 	const struct reg *reg;
324 	dma_addr_t addr;
325 	u32 mem_size;
326 	void *virt;
327 	u32 val;
328 	u32 i;
329 
330 	/* Check the advertised location and size of the shared memory area */
331 	reg = ipa_reg(ipa, SHARED_MEM_SIZE);
332 	val = ioread32(ipa->reg_virt + reg_offset(reg));
333 
334 	/* The fields in the register are in 8 byte units */
335 	ipa->mem_offset = 8 * reg_decode(reg, MEM_BADDR, val);
336 
337 	/* Make sure the end is within the region's mapped space */
338 	mem_size = 8 * reg_decode(reg, MEM_SIZE, val);
339 
340 	/* If the sizes don't match, issue a warning */
341 	if (ipa->mem_offset + mem_size < ipa->mem_size) {
342 		dev_warn(dev, "limiting IPA memory size to 0x%08x\n",
343 			 mem_size);
344 		ipa->mem_size = mem_size;
345 	} else if (ipa->mem_offset + mem_size > ipa->mem_size) {
346 		dev_dbg(dev, "ignoring larger reported memory size: 0x%08x\n",
347 			mem_size);
348 	}
349 
350 	/* We know our memory size; make sure regions are all in range */
351 	if (!ipa_mem_size_valid(ipa))
352 		return -EINVAL;
353 
354 	/* Prealloc DMA memory for zeroing regions */
355 	virt = dma_alloc_coherent(dev, IPA_MEM_MAX, &addr, GFP_KERNEL);
356 	if (!virt)
357 		return -ENOMEM;
358 	ipa->zero_addr = addr;
359 	ipa->zero_virt = virt;
360 	ipa->zero_size = IPA_MEM_MAX;
361 
362 	/* For each defined region, write "canary" values in the
363 	 * space prior to the region's base address if indicated.
364 	 */
365 	for (i = 0; i < ipa->mem_count; i++) {
366 		u16 canary_count = ipa->mem[i].canary_count;
367 		__le32 *canary;
368 
369 		if (!canary_count)
370 			continue;
371 
372 		/* Write canary values in the space before the region */
373 		canary = ipa->mem_virt + ipa->mem_offset + ipa->mem[i].offset;
374 		do
375 			*--canary = IPA_MEM_CANARY_VAL;
376 		while (--canary_count);
377 	}
378 
379 	/* Verify the microcontroller ring alignment (if defined) */
380 	mem = ipa_mem_find(ipa, IPA_MEM_UC_EVENT_RING);
381 	if (mem && mem->offset % 1024) {
382 		dev_err(dev, "microcontroller ring not 1024-byte aligned\n");
383 		goto err_dma_free;
384 	}
385 
386 	return 0;
387 
388 err_dma_free:
389 	dma_free_coherent(dev, IPA_MEM_MAX, ipa->zero_virt, ipa->zero_addr);
390 
391 	return -EINVAL;
392 }
393 
394 /* Inverse of ipa_mem_config() */
395 void ipa_mem_deconfig(struct ipa *ipa)
396 {
397 	struct device *dev = ipa->dev;
398 
399 	dma_free_coherent(dev, ipa->zero_size, ipa->zero_virt, ipa->zero_addr);
400 	ipa->zero_size = 0;
401 	ipa->zero_virt = NULL;
402 	ipa->zero_addr = 0;
403 }
404 
405 /**
406  * ipa_mem_zero_modem() - Zero IPA-local memory regions owned by the modem
407  * @ipa:	IPA pointer
408  *
409  * Zero regions of IPA-local memory used by the modem.  These are configured
410  * (and initially zeroed) by ipa_mem_setup(), but if the modem crashes and
411  * restarts via SSR we need to re-initialize them.  A QMI message tells the
412  * modem where to find regions of IPA local memory it needs to know about
413  * (these included).
414  */
415 int ipa_mem_zero_modem(struct ipa *ipa)
416 {
417 	struct gsi_trans *trans;
418 
419 	/* Get a transaction to zero the modem memory, modem header,
420 	 * and modem processing context regions.
421 	 */
422 	trans = ipa_cmd_trans_alloc(ipa, 3);
423 	if (!trans) {
424 		dev_err(ipa->dev, "no transaction to zero modem memory\n");
425 		return -EBUSY;
426 	}
427 
428 	ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_HEADER);
429 	ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_PROC_CTX);
430 	ipa_mem_zero_region_add(trans, IPA_MEM_MODEM);
431 
432 	gsi_trans_commit_wait(trans);
433 
434 	return 0;
435 }
436 
437 /**
438  * ipa_imem_init() - Initialize IMEM memory used by the IPA
439  * @ipa:	IPA pointer
440  * @addr:	Physical address of the IPA region in IMEM
441  * @size:	Size (bytes) of the IPA region in IMEM
442  *
443  * IMEM is a block of shared memory separate from system DRAM, and
444  * a portion of this memory is available for the IPA to use.  The
445  * modem accesses this memory directly, but the IPA accesses it
446  * via the IOMMU, using the AP's credentials.
447  *
448  * If this region exists (size > 0) we map it for read/write access
449  * through the IOMMU using the IPA device.
450  *
451  * Note: @addr and @size are not guaranteed to be page-aligned.
452  */
453 static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size)
454 {
455 	struct device *dev = ipa->dev;
456 	struct iommu_domain *domain;
457 	unsigned long iova;
458 	phys_addr_t phys;
459 	int ret;
460 
461 	if (!size)
462 		return 0;	/* IMEM memory not used */
463 
464 	domain = iommu_get_domain_for_dev(dev);
465 	if (!domain) {
466 		dev_err(dev, "no IOMMU domain found for IMEM\n");
467 		return -EINVAL;
468 	}
469 
470 	/* Align the address down and the size up to page boundaries */
471 	phys = addr & PAGE_MASK;
472 	size = PAGE_ALIGN(size + addr - phys);
473 	iova = phys;	/* We just want a direct mapping */
474 
475 	ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE,
476 			GFP_KERNEL);
477 	if (ret)
478 		return ret;
479 
480 	ipa->imem_iova = iova;
481 	ipa->imem_size = size;
482 
483 	return 0;
484 }
485 
486 static void ipa_imem_exit(struct ipa *ipa)
487 {
488 	struct device *dev = ipa->dev;
489 	struct iommu_domain *domain;
490 
491 	if (!ipa->imem_size)
492 		return;
493 
494 	domain = iommu_get_domain_for_dev(dev);
495 	if (domain) {
496 		size_t size;
497 
498 		size = iommu_unmap(domain, ipa->imem_iova, ipa->imem_size);
499 		if (size != ipa->imem_size)
500 			dev_warn(dev, "unmapped %zu IMEM bytes, expected %zu\n",
501 				 size, ipa->imem_size);
502 	} else {
503 		dev_err(dev, "couldn't get IPA IOMMU domain for IMEM\n");
504 	}
505 
506 	ipa->imem_size = 0;
507 	ipa->imem_iova = 0;
508 }
509 
510 /**
511  * ipa_smem_init() - Initialize SMEM memory used by the IPA
512  * @ipa:	IPA pointer
513  * @item:	Item ID of SMEM memory
514  * @size:	Size (bytes) of SMEM memory region
515  *
516  * SMEM is a managed block of shared DRAM, from which numbered "items"
517  * can be allocated.  One item is designated for use by the IPA.
518  *
519  * The modem accesses SMEM memory directly, but the IPA accesses it
520  * via the IOMMU, using the AP's credentials.
521  *
522  * If size provided is non-zero, we allocate it and map it for
523  * access through the IOMMU.
524  *
525  * Note: @size and the item address are is not guaranteed to be page-aligned.
526  */
527 static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
528 {
529 	struct device *dev = ipa->dev;
530 	struct iommu_domain *domain;
531 	unsigned long iova;
532 	phys_addr_t phys;
533 	phys_addr_t addr;
534 	size_t actual;
535 	void *virt;
536 	int ret;
537 
538 	if (!size)
539 		return 0;	/* SMEM memory not used */
540 
541 	/* SMEM is memory shared between the AP and another system entity
542 	 * (in this case, the modem).  An allocation from SMEM is persistent
543 	 * until the AP reboots; there is no way to free an allocated SMEM
544 	 * region.  Allocation only reserves the space; to use it you need
545 	 * to "get" a pointer it (this does not imply reference counting).
546 	 * The item might have already been allocated, in which case we
547 	 * use it unless the size isn't what we expect.
548 	 */
549 	ret = qcom_smem_alloc(QCOM_SMEM_HOST_MODEM, item, size);
550 	if (ret && ret != -EEXIST) {
551 		dev_err(dev, "error %d allocating size %zu SMEM item %u\n",
552 			ret, size, item);
553 		return ret;
554 	}
555 
556 	/* Now get the address of the SMEM memory region */
557 	virt = qcom_smem_get(QCOM_SMEM_HOST_MODEM, item, &actual);
558 	if (IS_ERR(virt)) {
559 		ret = PTR_ERR(virt);
560 		dev_err(dev, "error %d getting SMEM item %u\n", ret, item);
561 		return ret;
562 	}
563 
564 	/* In case the region was already allocated, verify the size */
565 	if (ret && actual != size) {
566 		dev_err(dev, "SMEM item %u has size %zu, expected %zu\n",
567 			item, actual, size);
568 		return -EINVAL;
569 	}
570 
571 	domain = iommu_get_domain_for_dev(dev);
572 	if (!domain) {
573 		dev_err(dev, "no IOMMU domain found for SMEM\n");
574 		return -EINVAL;
575 	}
576 
577 	/* Align the address down and the size up to a page boundary */
578 	addr = qcom_smem_virt_to_phys(virt);
579 	phys = addr & PAGE_MASK;
580 	size = PAGE_ALIGN(size + addr - phys);
581 	iova = phys;	/* We just want a direct mapping */
582 
583 	ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE,
584 			GFP_KERNEL);
585 	if (ret)
586 		return ret;
587 
588 	ipa->smem_iova = iova;
589 	ipa->smem_size = size;
590 
591 	return 0;
592 }
593 
594 static void ipa_smem_exit(struct ipa *ipa)
595 {
596 	struct device *dev = ipa->dev;
597 	struct iommu_domain *domain;
598 
599 	domain = iommu_get_domain_for_dev(dev);
600 	if (domain) {
601 		size_t size;
602 
603 		size = iommu_unmap(domain, ipa->smem_iova, ipa->smem_size);
604 		if (size != ipa->smem_size)
605 			dev_warn(dev, "unmapped %zu SMEM bytes, expected %zu\n",
606 				 size, ipa->smem_size);
607 
608 	} else {
609 		dev_err(dev, "couldn't get IPA IOMMU domain for SMEM\n");
610 	}
611 
612 	ipa->smem_size = 0;
613 	ipa->smem_iova = 0;
614 }
615 
616 /* Perform memory region-related initialization */
617 int ipa_mem_init(struct ipa *ipa, struct platform_device *pdev,
618 		 const struct ipa_mem_data *mem_data)
619 {
620 	struct device *dev = &pdev->dev;
621 	struct resource *res;
622 	int ret;
623 
624 	/* Make sure the set of defined memory regions is valid */
625 	if (!ipa_mem_valid(ipa, mem_data))
626 		return -EINVAL;
627 
628 	ipa->mem_count = mem_data->local_count;
629 	ipa->mem = mem_data->local;
630 
631 	/* Check the route and filter table memory regions */
632 	if (!ipa_table_mem_valid(ipa, false))
633 		return -EINVAL;
634 	if (!ipa_table_mem_valid(ipa, true))
635 		return -EINVAL;
636 
637 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
638 	if (ret) {
639 		dev_err(dev, "error %d setting DMA mask\n", ret);
640 		return ret;
641 	}
642 
643 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ipa-shared");
644 	if (!res) {
645 		dev_err(dev,
646 			"DT error getting \"ipa-shared\" memory property\n");
647 		return -ENODEV;
648 	}
649 
650 	ipa->mem_virt = memremap(res->start, resource_size(res), MEMREMAP_WC);
651 	if (!ipa->mem_virt) {
652 		dev_err(dev, "unable to remap \"ipa-shared\" memory\n");
653 		return -ENOMEM;
654 	}
655 
656 	ipa->mem_addr = res->start;
657 	ipa->mem_size = resource_size(res);
658 
659 	ret = ipa_imem_init(ipa, mem_data->imem_addr, mem_data->imem_size);
660 	if (ret)
661 		goto err_unmap;
662 
663 	ret = ipa_smem_init(ipa, mem_data->smem_id, mem_data->smem_size);
664 	if (ret)
665 		goto err_imem_exit;
666 
667 	return 0;
668 
669 err_imem_exit:
670 	ipa_imem_exit(ipa);
671 err_unmap:
672 	memunmap(ipa->mem_virt);
673 
674 	return ret;
675 }
676 
677 /* Inverse of ipa_mem_init() */
678 void ipa_mem_exit(struct ipa *ipa)
679 {
680 	ipa_smem_exit(ipa);
681 	ipa_imem_exit(ipa);
682 	memunmap(ipa->mem_virt);
683 }
684