xref: /linux/lib/devres.c (revision 37744feebc086908fd89760650f458ab19071750)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/err.h>
3 #include <linux/pci.h>
4 #include <linux/io.h>
5 #include <linux/gfp.h>
6 #include <linux/export.h>
7 #include <linux/of_address.h>
8 
9 enum devm_ioremap_type {
10 	DEVM_IOREMAP = 0,
11 	DEVM_IOREMAP_UC,
12 	DEVM_IOREMAP_WC,
13 };
14 
15 void devm_ioremap_release(struct device *dev, void *res)
16 {
17 	iounmap(*(void __iomem **)res);
18 }
19 
20 static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
21 {
22 	return *(void **)res == match_data;
23 }
24 
25 static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset,
26 				    resource_size_t size,
27 				    enum devm_ioremap_type type)
28 {
29 	void __iomem **ptr, *addr = NULL;
30 
31 	ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
32 	if (!ptr)
33 		return NULL;
34 
35 	switch (type) {
36 	case DEVM_IOREMAP:
37 		addr = ioremap(offset, size);
38 		break;
39 	case DEVM_IOREMAP_UC:
40 		addr = ioremap_uc(offset, size);
41 		break;
42 	case DEVM_IOREMAP_WC:
43 		addr = ioremap_wc(offset, size);
44 		break;
45 	}
46 
47 	if (addr) {
48 		*ptr = addr;
49 		devres_add(dev, ptr);
50 	} else
51 		devres_free(ptr);
52 
53 	return addr;
54 }
55 
56 /**
57  * devm_ioremap - Managed ioremap()
58  * @dev: Generic device to remap IO address for
59  * @offset: Resource address to map
60  * @size: Size of map
61  *
62  * Managed ioremap().  Map is automatically unmapped on driver detach.
63  */
64 void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
65 			   resource_size_t size)
66 {
67 	return __devm_ioremap(dev, offset, size, DEVM_IOREMAP);
68 }
69 EXPORT_SYMBOL(devm_ioremap);
70 
71 /**
72  * devm_ioremap_uc - Managed ioremap_uc()
73  * @dev: Generic device to remap IO address for
74  * @offset: Resource address to map
75  * @size: Size of map
76  *
77  * Managed ioremap_uc().  Map is automatically unmapped on driver detach.
78  */
79 void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
80 			      resource_size_t size)
81 {
82 	return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_UC);
83 }
84 EXPORT_SYMBOL_GPL(devm_ioremap_uc);
85 
86 /**
87  * devm_ioremap_wc - Managed ioremap_wc()
88  * @dev: Generic device to remap IO address for
89  * @offset: Resource address to map
90  * @size: Size of map
91  *
92  * Managed ioremap_wc().  Map is automatically unmapped on driver detach.
93  */
94 void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
95 			      resource_size_t size)
96 {
97 	return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_WC);
98 }
99 EXPORT_SYMBOL(devm_ioremap_wc);
100 
101 /**
102  * devm_iounmap - Managed iounmap()
103  * @dev: Generic device to unmap for
104  * @addr: Address to unmap
105  *
106  * Managed iounmap().  @addr must have been mapped using devm_ioremap*().
107  */
108 void devm_iounmap(struct device *dev, void __iomem *addr)
109 {
110 	WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
111 			       (__force void *)addr));
112 	iounmap(addr);
113 }
114 EXPORT_SYMBOL(devm_iounmap);
115 
116 static void __iomem *
117 __devm_ioremap_resource(struct device *dev, const struct resource *res,
118 			enum devm_ioremap_type type)
119 {
120 	resource_size_t size;
121 	void __iomem *dest_ptr;
122 
123 	BUG_ON(!dev);
124 
125 	if (!res || resource_type(res) != IORESOURCE_MEM) {
126 		dev_err(dev, "invalid resource\n");
127 		return IOMEM_ERR_PTR(-EINVAL);
128 	}
129 
130 	size = resource_size(res);
131 
132 	if (!devm_request_mem_region(dev, res->start, size, dev_name(dev))) {
133 		dev_err(dev, "can't request region for resource %pR\n", res);
134 		return IOMEM_ERR_PTR(-EBUSY);
135 	}
136 
137 	dest_ptr = __devm_ioremap(dev, res->start, size, type);
138 	if (!dest_ptr) {
139 		dev_err(dev, "ioremap failed for resource %pR\n", res);
140 		devm_release_mem_region(dev, res->start, size);
141 		dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
142 	}
143 
144 	return dest_ptr;
145 }
146 
147 /**
148  * devm_ioremap_resource() - check, request region, and ioremap resource
149  * @dev: generic device to handle the resource for
150  * @res: resource to be handled
151  *
152  * Checks that a resource is a valid memory region, requests the memory
153  * region and ioremaps it. All operations are managed and will be undone
154  * on driver detach.
155  *
156  * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
157  * on failure. Usage example:
158  *
159  *	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
160  *	base = devm_ioremap_resource(&pdev->dev, res);
161  *	if (IS_ERR(base))
162  *		return PTR_ERR(base);
163  */
164 void __iomem *devm_ioremap_resource(struct device *dev,
165 				    const struct resource *res)
166 {
167 	return __devm_ioremap_resource(dev, res, DEVM_IOREMAP);
168 }
169 EXPORT_SYMBOL(devm_ioremap_resource);
170 
171 /**
172  * devm_ioremap_resource_wc() - write-combined variant of
173  *				devm_ioremap_resource()
174  * @dev: generic device to handle the resource for
175  * @res: resource to be handled
176  *
177  * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
178  * on failure. Usage example:
179  */
180 void __iomem *devm_ioremap_resource_wc(struct device *dev,
181 				       const struct resource *res)
182 {
183 	return __devm_ioremap_resource(dev, res, DEVM_IOREMAP_WC);
184 }
185 
186 /*
187  * devm_of_iomap - Requests a resource and maps the memory mapped IO
188  *		   for a given device_node managed by a given device
189  *
190  * Checks that a resource is a valid memory region, requests the memory
191  * region and ioremaps it. All operations are managed and will be undone
192  * on driver detach of the device.
193  *
194  * This is to be used when a device requests/maps resources described
195  * by other device tree nodes (children or otherwise).
196  *
197  * @dev:	The device "managing" the resource
198  * @node:       The device-tree node where the resource resides
199  * @index:	index of the MMIO range in the "reg" property
200  * @size:	Returns the size of the resource (pass NULL if not needed)
201  * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded
202  * error code on failure. Usage example:
203  *
204  *	base = devm_of_iomap(&pdev->dev, node, 0, NULL);
205  *	if (IS_ERR(base))
206  *		return PTR_ERR(base);
207  */
208 void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
209 			    resource_size_t *size)
210 {
211 	struct resource res;
212 
213 	if (of_address_to_resource(node, index, &res))
214 		return IOMEM_ERR_PTR(-EINVAL);
215 	if (size)
216 		*size = resource_size(&res);
217 	return devm_ioremap_resource(dev, &res);
218 }
219 EXPORT_SYMBOL(devm_of_iomap);
220 
221 #ifdef CONFIG_HAS_IOPORT_MAP
222 /*
223  * Generic iomap devres
224  */
225 static void devm_ioport_map_release(struct device *dev, void *res)
226 {
227 	ioport_unmap(*(void __iomem **)res);
228 }
229 
230 static int devm_ioport_map_match(struct device *dev, void *res,
231 				 void *match_data)
232 {
233 	return *(void **)res == match_data;
234 }
235 
236 /**
237  * devm_ioport_map - Managed ioport_map()
238  * @dev: Generic device to map ioport for
239  * @port: Port to map
240  * @nr: Number of ports to map
241  *
242  * Managed ioport_map().  Map is automatically unmapped on driver
243  * detach.
244  */
245 void __iomem *devm_ioport_map(struct device *dev, unsigned long port,
246 			       unsigned int nr)
247 {
248 	void __iomem **ptr, *addr;
249 
250 	ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
251 	if (!ptr)
252 		return NULL;
253 
254 	addr = ioport_map(port, nr);
255 	if (addr) {
256 		*ptr = addr;
257 		devres_add(dev, ptr);
258 	} else
259 		devres_free(ptr);
260 
261 	return addr;
262 }
263 EXPORT_SYMBOL(devm_ioport_map);
264 
265 /**
266  * devm_ioport_unmap - Managed ioport_unmap()
267  * @dev: Generic device to unmap for
268  * @addr: Address to unmap
269  *
270  * Managed ioport_unmap().  @addr must have been mapped using
271  * devm_ioport_map().
272  */
273 void devm_ioport_unmap(struct device *dev, void __iomem *addr)
274 {
275 	ioport_unmap(addr);
276 	WARN_ON(devres_destroy(dev, devm_ioport_map_release,
277 			       devm_ioport_map_match, (__force void *)addr));
278 }
279 EXPORT_SYMBOL(devm_ioport_unmap);
280 #endif /* CONFIG_HAS_IOPORT_MAP */
281 
282 #ifdef CONFIG_PCI
283 /*
284  * PCI iomap devres
285  */
286 #define PCIM_IOMAP_MAX	PCI_STD_NUM_BARS
287 
288 struct pcim_iomap_devres {
289 	void __iomem *table[PCIM_IOMAP_MAX];
290 };
291 
292 static void pcim_iomap_release(struct device *gendev, void *res)
293 {
294 	struct pci_dev *dev = to_pci_dev(gendev);
295 	struct pcim_iomap_devres *this = res;
296 	int i;
297 
298 	for (i = 0; i < PCIM_IOMAP_MAX; i++)
299 		if (this->table[i])
300 			pci_iounmap(dev, this->table[i]);
301 }
302 
303 /**
304  * pcim_iomap_table - access iomap allocation table
305  * @pdev: PCI device to access iomap table for
306  *
307  * Access iomap allocation table for @dev.  If iomap table doesn't
308  * exist and @pdev is managed, it will be allocated.  All iomaps
309  * recorded in the iomap table are automatically unmapped on driver
310  * detach.
311  *
312  * This function might sleep when the table is first allocated but can
313  * be safely called without context and guaranteed to succed once
314  * allocated.
315  */
316 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
317 {
318 	struct pcim_iomap_devres *dr, *new_dr;
319 
320 	dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
321 	if (dr)
322 		return dr->table;
323 
324 	new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
325 	if (!new_dr)
326 		return NULL;
327 	dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
328 	return dr->table;
329 }
330 EXPORT_SYMBOL(pcim_iomap_table);
331 
332 /**
333  * pcim_iomap - Managed pcim_iomap()
334  * @pdev: PCI device to iomap for
335  * @bar: BAR to iomap
336  * @maxlen: Maximum length of iomap
337  *
338  * Managed pci_iomap().  Map is automatically unmapped on driver
339  * detach.
340  */
341 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
342 {
343 	void __iomem **tbl;
344 
345 	BUG_ON(bar >= PCIM_IOMAP_MAX);
346 
347 	tbl = (void __iomem **)pcim_iomap_table(pdev);
348 	if (!tbl || tbl[bar])	/* duplicate mappings not allowed */
349 		return NULL;
350 
351 	tbl[bar] = pci_iomap(pdev, bar, maxlen);
352 	return tbl[bar];
353 }
354 EXPORT_SYMBOL(pcim_iomap);
355 
356 /**
357  * pcim_iounmap - Managed pci_iounmap()
358  * @pdev: PCI device to iounmap for
359  * @addr: Address to unmap
360  *
361  * Managed pci_iounmap().  @addr must have been mapped using pcim_iomap().
362  */
363 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
364 {
365 	void __iomem **tbl;
366 	int i;
367 
368 	pci_iounmap(pdev, addr);
369 
370 	tbl = (void __iomem **)pcim_iomap_table(pdev);
371 	BUG_ON(!tbl);
372 
373 	for (i = 0; i < PCIM_IOMAP_MAX; i++)
374 		if (tbl[i] == addr) {
375 			tbl[i] = NULL;
376 			return;
377 		}
378 	WARN_ON(1);
379 }
380 EXPORT_SYMBOL(pcim_iounmap);
381 
382 /**
383  * pcim_iomap_regions - Request and iomap PCI BARs
384  * @pdev: PCI device to map IO resources for
385  * @mask: Mask of BARs to request and iomap
386  * @name: Name used when requesting regions
387  *
388  * Request and iomap regions specified by @mask.
389  */
390 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
391 {
392 	void __iomem * const *iomap;
393 	int i, rc;
394 
395 	iomap = pcim_iomap_table(pdev);
396 	if (!iomap)
397 		return -ENOMEM;
398 
399 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
400 		unsigned long len;
401 
402 		if (!(mask & (1 << i)))
403 			continue;
404 
405 		rc = -EINVAL;
406 		len = pci_resource_len(pdev, i);
407 		if (!len)
408 			goto err_inval;
409 
410 		rc = pci_request_region(pdev, i, name);
411 		if (rc)
412 			goto err_inval;
413 
414 		rc = -ENOMEM;
415 		if (!pcim_iomap(pdev, i, 0))
416 			goto err_region;
417 	}
418 
419 	return 0;
420 
421  err_region:
422 	pci_release_region(pdev, i);
423  err_inval:
424 	while (--i >= 0) {
425 		if (!(mask & (1 << i)))
426 			continue;
427 		pcim_iounmap(pdev, iomap[i]);
428 		pci_release_region(pdev, i);
429 	}
430 
431 	return rc;
432 }
433 EXPORT_SYMBOL(pcim_iomap_regions);
434 
435 /**
436  * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
437  * @pdev: PCI device to map IO resources for
438  * @mask: Mask of BARs to iomap
439  * @name: Name used when requesting regions
440  *
441  * Request all PCI BARs and iomap regions specified by @mask.
442  */
443 int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
444 				   const char *name)
445 {
446 	int request_mask = ((1 << 6) - 1) & ~mask;
447 	int rc;
448 
449 	rc = pci_request_selected_regions(pdev, request_mask, name);
450 	if (rc)
451 		return rc;
452 
453 	rc = pcim_iomap_regions(pdev, mask, name);
454 	if (rc)
455 		pci_release_selected_regions(pdev, request_mask);
456 	return rc;
457 }
458 EXPORT_SYMBOL(pcim_iomap_regions_request_all);
459 
460 /**
461  * pcim_iounmap_regions - Unmap and release PCI BARs
462  * @pdev: PCI device to map IO resources for
463  * @mask: Mask of BARs to unmap and release
464  *
465  * Unmap and release regions specified by @mask.
466  */
467 void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
468 {
469 	void __iomem * const *iomap;
470 	int i;
471 
472 	iomap = pcim_iomap_table(pdev);
473 	if (!iomap)
474 		return;
475 
476 	for (i = 0; i < PCIM_IOMAP_MAX; i++) {
477 		if (!(mask & (1 << i)))
478 			continue;
479 
480 		pcim_iounmap(pdev, iomap[i]);
481 		pci_release_region(pdev, i);
482 	}
483 }
484 EXPORT_SYMBOL(pcim_iounmap_regions);
485 #endif /* CONFIG_PCI */
486