1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/device.h>
3 #include <linux/pci.h>
4 #include "pci.h"
5
6 /*
7 * On the state of PCI's devres implementation:
8 *
9 * The older PCI devres API has one significant problem:
10 *
11 * It is very strongly tied to the statically allocated mapping table in struct
12 * pcim_iomap_devres below. This is mostly solved in the sense of the pcim_
13 * functions in this file providing things like ranged mapping by bypassing
14 * this table, whereas the functions that were present in the old API still
15 * enter the mapping addresses into the table for users of the old API.
16 *
17 * TODO:
18 * Remove the legacy table entirely once all calls to pcim_iomap_table() in
19 * the kernel have been removed.
20 */
21
22 /*
23 * Legacy struct storing addresses to whole mapped BARs.
24 */
25 struct pcim_iomap_devres {
26 void __iomem *table[PCI_NUM_RESOURCES];
27 };
28
29 /* Used to restore the old INTx state on driver detach. */
30 struct pcim_intx_devres {
31 int orig_intx;
32 };
33
34 enum pcim_addr_devres_type {
35 /* Default initializer. */
36 PCIM_ADDR_DEVRES_TYPE_INVALID,
37
38 /* A requested region spanning an entire BAR. */
39 PCIM_ADDR_DEVRES_TYPE_REGION,
40
41 /*
42 * A requested region spanning an entire BAR, and a mapping for
43 * the entire BAR.
44 */
45 PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING,
46
47 /*
48 * A mapping within a BAR, either spanning the whole BAR or just a
49 * range. Without a requested region.
50 */
51 PCIM_ADDR_DEVRES_TYPE_MAPPING,
52 };
53
54 /*
55 * This struct envelops IO or MEM addresses, i.e., mappings and region
56 * requests, because those are very frequently requested and released
57 * together.
58 */
59 struct pcim_addr_devres {
60 enum pcim_addr_devres_type type;
61 void __iomem *baseaddr;
62 unsigned long offset;
63 unsigned long len;
64 int bar;
65 };
66
pcim_addr_devres_clear(struct pcim_addr_devres * res)67 static inline void pcim_addr_devres_clear(struct pcim_addr_devres *res)
68 {
69 memset(res, 0, sizeof(*res));
70 res->bar = -1;
71 }
72
pcim_addr_resource_release(struct device * dev,void * resource_raw)73 static void pcim_addr_resource_release(struct device *dev, void *resource_raw)
74 {
75 struct pci_dev *pdev = to_pci_dev(dev);
76 struct pcim_addr_devres *res = resource_raw;
77
78 switch (res->type) {
79 case PCIM_ADDR_DEVRES_TYPE_REGION:
80 pci_release_region(pdev, res->bar);
81 break;
82 case PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING:
83 pci_iounmap(pdev, res->baseaddr);
84 pci_release_region(pdev, res->bar);
85 break;
86 case PCIM_ADDR_DEVRES_TYPE_MAPPING:
87 pci_iounmap(pdev, res->baseaddr);
88 break;
89 default:
90 break;
91 }
92 }
93
pcim_addr_devres_alloc(struct pci_dev * pdev)94 static struct pcim_addr_devres *pcim_addr_devres_alloc(struct pci_dev *pdev)
95 {
96 struct pcim_addr_devres *res;
97
98 res = devres_alloc_node(pcim_addr_resource_release, sizeof(*res),
99 GFP_KERNEL, dev_to_node(&pdev->dev));
100 if (res)
101 pcim_addr_devres_clear(res);
102 return res;
103 }
104
105 /* Just for consistency and readability. */
pcim_addr_devres_free(struct pcim_addr_devres * res)106 static inline void pcim_addr_devres_free(struct pcim_addr_devres *res)
107 {
108 devres_free(res);
109 }
110
111 /*
112 * Used by devres to identify a pcim_addr_devres.
113 */
pcim_addr_resources_match(struct device * dev,void * a_raw,void * b_raw)114 static int pcim_addr_resources_match(struct device *dev,
115 void *a_raw, void *b_raw)
116 {
117 struct pcim_addr_devres *a, *b;
118
119 a = a_raw;
120 b = b_raw;
121
122 if (a->type != b->type)
123 return 0;
124
125 switch (a->type) {
126 case PCIM_ADDR_DEVRES_TYPE_REGION:
127 case PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING:
128 return a->bar == b->bar;
129 case PCIM_ADDR_DEVRES_TYPE_MAPPING:
130 return a->baseaddr == b->baseaddr;
131 default:
132 return 0;
133 }
134 }
135
devm_pci_unmap_iospace(struct device * dev,void * ptr)136 static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
137 {
138 struct resource **res = ptr;
139
140 pci_unmap_iospace(*res);
141 }
142
143 /**
144 * devm_pci_remap_iospace - Managed pci_remap_iospace()
145 * @dev: Generic device to remap IO address for
146 * @res: Resource describing the I/O space
147 * @phys_addr: physical address of range to be mapped
148 *
149 * Managed pci_remap_iospace(). Map is automatically unmapped on driver
150 * detach.
151 */
devm_pci_remap_iospace(struct device * dev,const struct resource * res,phys_addr_t phys_addr)152 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
153 phys_addr_t phys_addr)
154 {
155 const struct resource **ptr;
156 int error;
157
158 ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
159 if (!ptr)
160 return -ENOMEM;
161
162 error = pci_remap_iospace(res, phys_addr);
163 if (error) {
164 devres_free(ptr);
165 } else {
166 *ptr = res;
167 devres_add(dev, ptr);
168 }
169
170 return error;
171 }
172 EXPORT_SYMBOL(devm_pci_remap_iospace);
173
174 /**
175 * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
176 * @dev: Generic device to remap IO address for
177 * @offset: Resource address to map
178 * @size: Size of map
179 *
180 * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver
181 * detach.
182 */
devm_pci_remap_cfgspace(struct device * dev,resource_size_t offset,resource_size_t size)183 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
184 resource_size_t offset,
185 resource_size_t size)
186 {
187 void __iomem **ptr, *addr;
188
189 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
190 if (!ptr)
191 return NULL;
192
193 addr = pci_remap_cfgspace(offset, size);
194 if (addr) {
195 *ptr = addr;
196 devres_add(dev, ptr);
197 } else
198 devres_free(ptr);
199
200 return addr;
201 }
202 EXPORT_SYMBOL(devm_pci_remap_cfgspace);
203
204 /**
205 * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
206 * @dev: generic device to handle the resource for
207 * @res: configuration space resource to be handled
208 *
209 * Checks that a resource is a valid memory region, requests the memory
210 * region and ioremaps with pci_remap_cfgspace() API that ensures the
211 * proper PCI configuration space memory attributes are guaranteed.
212 *
213 * All operations are managed and will be undone on driver detach.
214 *
215 * Returns a pointer to the remapped memory or an IOMEM_ERR_PTR() encoded error
216 * code on failure. Usage example::
217 *
218 * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
219 * base = devm_pci_remap_cfg_resource(&pdev->dev, res);
220 * if (IS_ERR(base))
221 * return PTR_ERR(base);
222 */
devm_pci_remap_cfg_resource(struct device * dev,struct resource * res)223 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
224 struct resource *res)
225 {
226 resource_size_t size;
227 const char *name;
228 void __iomem *dest_ptr;
229
230 BUG_ON(!dev);
231
232 if (!res || resource_type(res) != IORESOURCE_MEM) {
233 dev_err(dev, "invalid resource\n");
234 return IOMEM_ERR_PTR(-EINVAL);
235 }
236
237 size = resource_size(res);
238
239 if (res->name)
240 name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
241 res->name);
242 else
243 name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
244 if (!name)
245 return IOMEM_ERR_PTR(-ENOMEM);
246
247 if (!devm_request_mem_region(dev, res->start, size, name)) {
248 dev_err(dev, "can't request region for resource %pR\n", res);
249 return IOMEM_ERR_PTR(-EBUSY);
250 }
251
252 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
253 if (!dest_ptr) {
254 dev_err(dev, "ioremap failed for resource %pR\n", res);
255 devm_release_mem_region(dev, res->start, size);
256 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
257 }
258
259 return dest_ptr;
260 }
261 EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
262
__pcim_clear_mwi(void * pdev_raw)263 static void __pcim_clear_mwi(void *pdev_raw)
264 {
265 struct pci_dev *pdev = pdev_raw;
266
267 pci_clear_mwi(pdev);
268 }
269
270 /**
271 * pcim_set_mwi - a device-managed pci_set_mwi()
272 * @pdev: the PCI device for which MWI is enabled
273 *
274 * Managed pci_set_mwi().
275 *
276 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
277 */
pcim_set_mwi(struct pci_dev * pdev)278 int pcim_set_mwi(struct pci_dev *pdev)
279 {
280 int ret;
281
282 ret = devm_add_action(&pdev->dev, __pcim_clear_mwi, pdev);
283 if (ret != 0)
284 return ret;
285
286 ret = pci_set_mwi(pdev);
287 if (ret != 0)
288 devm_remove_action(&pdev->dev, __pcim_clear_mwi, pdev);
289
290 return ret;
291 }
292 EXPORT_SYMBOL(pcim_set_mwi);
293
mask_contains_bar(int mask,int bar)294 static inline bool mask_contains_bar(int mask, int bar)
295 {
296 return mask & BIT(bar);
297 }
298
pcim_intx_restore(struct device * dev,void * data)299 static void pcim_intx_restore(struct device *dev, void *data)
300 {
301 struct pci_dev *pdev = to_pci_dev(dev);
302 struct pcim_intx_devres *res = data;
303
304 pci_intx(pdev, res->orig_intx);
305 }
306
save_orig_intx(struct pci_dev * pdev,struct pcim_intx_devres * res)307 static void save_orig_intx(struct pci_dev *pdev, struct pcim_intx_devres *res)
308 {
309 u16 pci_command;
310
311 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
312 res->orig_intx = !(pci_command & PCI_COMMAND_INTX_DISABLE);
313 }
314
315 /**
316 * pcim_intx - managed pci_intx()
317 * @pdev: the PCI device to operate on
318 * @enable: boolean: whether to enable or disable PCI INTx
319 *
320 * Returns: 0 on success, -ENOMEM on error.
321 *
322 * Enable/disable PCI INTx for device @pdev.
323 * Restore the original state on driver detach.
324 */
pcim_intx(struct pci_dev * pdev,int enable)325 int pcim_intx(struct pci_dev *pdev, int enable)
326 {
327 struct pcim_intx_devres *res;
328 struct device *dev = &pdev->dev;
329
330 /*
331 * pcim_intx() must only restore the INTx value that existed before the
332 * driver was loaded, i.e., before it called pcim_intx() for the
333 * first time.
334 */
335 res = devres_find(dev, pcim_intx_restore, NULL, NULL);
336 if (!res) {
337 res = devres_alloc(pcim_intx_restore, sizeof(*res), GFP_KERNEL);
338 if (!res)
339 return -ENOMEM;
340
341 save_orig_intx(pdev, res);
342 devres_add(dev, res);
343 }
344
345 pci_intx(pdev, enable);
346
347 return 0;
348 }
349 EXPORT_SYMBOL_GPL(pcim_intx);
350
pcim_disable_device(void * pdev_raw)351 static void pcim_disable_device(void *pdev_raw)
352 {
353 struct pci_dev *pdev = pdev_raw;
354
355 if (!pdev->pinned)
356 pci_disable_device(pdev);
357
358 pdev->is_managed = false;
359 }
360
361 /**
362 * pcim_enable_device - Managed pci_enable_device()
363 * @pdev: PCI device to be initialized
364 *
365 * Returns: 0 on success, negative error code on failure.
366 *
367 * Managed pci_enable_device(). Device will automatically be disabled on
368 * driver detach.
369 */
pcim_enable_device(struct pci_dev * pdev)370 int pcim_enable_device(struct pci_dev *pdev)
371 {
372 int ret;
373
374 ret = devm_add_action(&pdev->dev, pcim_disable_device, pdev);
375 if (ret != 0)
376 return ret;
377
378 /*
379 * We prefer removing the action in case of an error over
380 * devm_add_action_or_reset() because the latter could theoretically be
381 * disturbed by users having pinned the device too soon.
382 */
383 ret = pci_enable_device(pdev);
384 if (ret != 0) {
385 devm_remove_action(&pdev->dev, pcim_disable_device, pdev);
386 return ret;
387 }
388
389 pdev->is_managed = true;
390
391 return ret;
392 }
393 EXPORT_SYMBOL(pcim_enable_device);
394
395 /**
396 * pcim_pin_device - Pin managed PCI device
397 * @pdev: PCI device to pin
398 *
399 * Pin managed PCI device @pdev. Pinned device won't be disabled on driver
400 * detach. @pdev must have been enabled with pcim_enable_device().
401 */
pcim_pin_device(struct pci_dev * pdev)402 void pcim_pin_device(struct pci_dev *pdev)
403 {
404 pdev->pinned = true;
405 }
406 EXPORT_SYMBOL(pcim_pin_device);
407
pcim_iomap_release(struct device * gendev,void * res)408 static void pcim_iomap_release(struct device *gendev, void *res)
409 {
410 /*
411 * Do nothing. This is legacy code.
412 *
413 * Cleanup of the mappings is now done directly through the callbacks
414 * registered when creating them.
415 */
416 }
417
418 /**
419 * pcim_iomap_table - access iomap allocation table (DEPRECATED)
420 * @pdev: PCI device to access iomap table for
421 *
422 * Returns:
423 * Const pointer to array of __iomem pointers on success, NULL on failure.
424 *
425 * Access iomap allocation table for @dev. If iomap table doesn't
426 * exist and @pdev is managed, it will be allocated. All iomaps
427 * recorded in the iomap table are automatically unmapped on driver
428 * detach.
429 *
430 * This function might sleep when the table is first allocated but can
431 * be safely called without context and guaranteed to succeed once
432 * allocated.
433 *
434 * This function is DEPRECATED. Do not use it in new code. Instead, obtain a
435 * mapping's address directly from one of the pcim_* mapping functions. For
436 * example:
437 * void __iomem \*mappy = pcim_iomap(pdev, bar, length);
438 */
pcim_iomap_table(struct pci_dev * pdev)439 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
440 {
441 struct pcim_iomap_devres *dr, *new_dr;
442
443 dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
444 if (dr)
445 return dr->table;
446
447 new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL,
448 dev_to_node(&pdev->dev));
449 if (!new_dr)
450 return NULL;
451 dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
452 return dr->table;
453 }
454 EXPORT_SYMBOL(pcim_iomap_table);
455
456 /*
457 * Fill the legacy mapping-table, so that drivers using the old API can
458 * still get a BAR's mapping address through pcim_iomap_table().
459 */
pcim_add_mapping_to_legacy_table(struct pci_dev * pdev,void __iomem * mapping,int bar)460 static int pcim_add_mapping_to_legacy_table(struct pci_dev *pdev,
461 void __iomem *mapping, int bar)
462 {
463 void __iomem **legacy_iomap_table;
464
465 if (!pci_bar_index_is_valid(bar))
466 return -EINVAL;
467
468 legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
469 if (!legacy_iomap_table)
470 return -ENOMEM;
471
472 /* The legacy mechanism doesn't allow for duplicate mappings. */
473 WARN_ON(legacy_iomap_table[bar]);
474
475 legacy_iomap_table[bar] = mapping;
476
477 return 0;
478 }
479
480 /*
481 * Remove a mapping. The table only contains whole-BAR mappings, so this will
482 * never interfere with ranged mappings.
483 */
pcim_remove_mapping_from_legacy_table(struct pci_dev * pdev,void __iomem * addr)484 static void pcim_remove_mapping_from_legacy_table(struct pci_dev *pdev,
485 void __iomem *addr)
486 {
487 int bar;
488 void __iomem **legacy_iomap_table;
489
490 legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
491 if (!legacy_iomap_table)
492 return;
493
494 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
495 if (legacy_iomap_table[bar] == addr) {
496 legacy_iomap_table[bar] = NULL;
497 return;
498 }
499 }
500 }
501
502 /*
503 * The same as pcim_remove_mapping_from_legacy_table(), but identifies the
504 * mapping by its BAR index.
505 */
pcim_remove_bar_from_legacy_table(struct pci_dev * pdev,int bar)506 static void pcim_remove_bar_from_legacy_table(struct pci_dev *pdev, int bar)
507 {
508 void __iomem **legacy_iomap_table;
509
510 if (!pci_bar_index_is_valid(bar))
511 return;
512
513 legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
514 if (!legacy_iomap_table)
515 return;
516
517 legacy_iomap_table[bar] = NULL;
518 }
519
520 /**
521 * pcim_iomap - Managed pcim_iomap()
522 * @pdev: PCI device to iomap for
523 * @bar: BAR to iomap
524 * @maxlen: Maximum length of iomap
525 *
526 * Returns: __iomem pointer on success, NULL on failure.
527 *
528 * Managed pci_iomap(). Map is automatically unmapped on driver detach. If
529 * desired, unmap manually only with pcim_iounmap().
530 *
531 * This SHOULD only be used once per BAR.
532 *
533 * NOTE:
534 * Contrary to the other pcim_* functions, this function does not return an
535 * IOMEM_ERR_PTR() on failure, but a simple NULL. This is done for backwards
536 * compatibility.
537 */
pcim_iomap(struct pci_dev * pdev,int bar,unsigned long maxlen)538 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
539 {
540 void __iomem *mapping;
541 struct pcim_addr_devres *res;
542
543 if (!pci_bar_index_is_valid(bar))
544 return NULL;
545
546 res = pcim_addr_devres_alloc(pdev);
547 if (!res)
548 return NULL;
549 res->type = PCIM_ADDR_DEVRES_TYPE_MAPPING;
550
551 mapping = pci_iomap(pdev, bar, maxlen);
552 if (!mapping)
553 goto err_iomap;
554 res->baseaddr = mapping;
555
556 if (pcim_add_mapping_to_legacy_table(pdev, mapping, bar) != 0)
557 goto err_table;
558
559 devres_add(&pdev->dev, res);
560 return mapping;
561
562 err_table:
563 pci_iounmap(pdev, mapping);
564 err_iomap:
565 pcim_addr_devres_free(res);
566 return NULL;
567 }
568 EXPORT_SYMBOL(pcim_iomap);
569
570 /**
571 * pcim_iounmap - Managed pci_iounmap()
572 * @pdev: PCI device to iounmap for
573 * @addr: Address to unmap
574 *
575 * Managed pci_iounmap(). @addr must have been mapped using a pcim_* mapping
576 * function.
577 */
pcim_iounmap(struct pci_dev * pdev,void __iomem * addr)578 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
579 {
580 struct pcim_addr_devres res_searched;
581
582 pcim_addr_devres_clear(&res_searched);
583 res_searched.type = PCIM_ADDR_DEVRES_TYPE_MAPPING;
584 res_searched.baseaddr = addr;
585
586 if (devres_release(&pdev->dev, pcim_addr_resource_release,
587 pcim_addr_resources_match, &res_searched) != 0) {
588 /* Doesn't exist. User passed nonsense. */
589 return;
590 }
591
592 pcim_remove_mapping_from_legacy_table(pdev, addr);
593 }
594 EXPORT_SYMBOL(pcim_iounmap);
595
596 /**
597 * pcim_iomap_region - Request and iomap a PCI BAR
598 * @pdev: PCI device to map IO resources for
599 * @bar: Index of a BAR to map
600 * @name: Name of the driver requesting the resource
601 *
602 * Returns: __iomem pointer on success, an IOMEM_ERR_PTR on failure.
603 *
604 * Mapping and region will get automatically released on driver detach. If
605 * desired, release manually only with pcim_iounmap_region().
606 */
pcim_iomap_region(struct pci_dev * pdev,int bar,const char * name)607 void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
608 const char *name)
609 {
610 int ret;
611 struct pcim_addr_devres *res;
612
613 if (!pci_bar_index_is_valid(bar))
614 return IOMEM_ERR_PTR(-EINVAL);
615
616 res = pcim_addr_devres_alloc(pdev);
617 if (!res)
618 return IOMEM_ERR_PTR(-ENOMEM);
619
620 res->type = PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING;
621 res->bar = bar;
622
623 ret = pci_request_region(pdev, bar, name);
624 if (ret != 0)
625 goto err_region;
626
627 res->baseaddr = pci_iomap(pdev, bar, 0);
628 if (!res->baseaddr) {
629 ret = -EINVAL;
630 goto err_iomap;
631 }
632
633 devres_add(&pdev->dev, res);
634 return res->baseaddr;
635
636 err_iomap:
637 pci_release_region(pdev, bar);
638 err_region:
639 pcim_addr_devres_free(res);
640
641 return IOMEM_ERR_PTR(ret);
642 }
643 EXPORT_SYMBOL(pcim_iomap_region);
644
645 /**
646 * pcim_iounmap_region - Unmap and release a PCI BAR
647 * @pdev: PCI device to operate on
648 * @bar: Index of BAR to unmap and release
649 *
650 * Unmap a BAR and release its region manually. Only pass BARs that were
651 * previously mapped by pcim_iomap_region().
652 */
pcim_iounmap_region(struct pci_dev * pdev,int bar)653 void pcim_iounmap_region(struct pci_dev *pdev, int bar)
654 {
655 struct pcim_addr_devres res_searched;
656
657 pcim_addr_devres_clear(&res_searched);
658 res_searched.type = PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING;
659 res_searched.bar = bar;
660
661 devres_release(&pdev->dev, pcim_addr_resource_release,
662 pcim_addr_resources_match, &res_searched);
663 }
664 EXPORT_SYMBOL(pcim_iounmap_region);
665
666 /**
667 * pcim_iomap_regions - Request and iomap PCI BARs (DEPRECATED)
668 * @pdev: PCI device to map IO resources for
669 * @mask: Mask of BARs to request and iomap
670 * @name: Name of the driver requesting the resources
671 *
672 * Returns: 0 on success, negative error code on failure.
673 *
674 * Request and iomap regions specified by @mask.
675 *
676 * This function is DEPRECATED. Do not use it in new code.
677 * Use pcim_iomap_region() instead.
678 */
pcim_iomap_regions(struct pci_dev * pdev,int mask,const char * name)679 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
680 {
681 int ret;
682 int bar;
683 void __iomem *mapping;
684
685 for (bar = 0; bar < DEVICE_COUNT_RESOURCE; bar++) {
686 if (!mask_contains_bar(mask, bar))
687 continue;
688
689 mapping = pcim_iomap_region(pdev, bar, name);
690 if (IS_ERR(mapping)) {
691 ret = PTR_ERR(mapping);
692 goto err;
693 }
694 ret = pcim_add_mapping_to_legacy_table(pdev, mapping, bar);
695 if (ret != 0)
696 goto err;
697 }
698
699 return 0;
700
701 err:
702 while (--bar >= 0) {
703 pcim_iounmap_region(pdev, bar);
704 pcim_remove_bar_from_legacy_table(pdev, bar);
705 }
706
707 return ret;
708 }
709 EXPORT_SYMBOL(pcim_iomap_regions);
710
711 /**
712 * pcim_request_region - Request a PCI BAR
713 * @pdev: PCI device to request region for
714 * @bar: Index of BAR to request
715 * @name: Name of the driver requesting the resource
716 *
717 * Returns: 0 on success, a negative error code on failure.
718 *
719 * Request region specified by @bar.
720 *
721 * The region will automatically be released on driver detach. If desired,
722 * release manually only with pcim_release_region().
723 */
pcim_request_region(struct pci_dev * pdev,int bar,const char * name)724 int pcim_request_region(struct pci_dev *pdev, int bar, const char *name)
725 {
726 int ret;
727 struct pcim_addr_devres *res;
728
729 if (!pci_bar_index_is_valid(bar))
730 return -EINVAL;
731
732 res = pcim_addr_devres_alloc(pdev);
733 if (!res)
734 return -ENOMEM;
735 res->type = PCIM_ADDR_DEVRES_TYPE_REGION;
736 res->bar = bar;
737
738 ret = pci_request_region(pdev, bar, name);
739 if (ret != 0) {
740 pcim_addr_devres_free(res);
741 return ret;
742 }
743
744 devres_add(&pdev->dev, res);
745 return 0;
746 }
747 EXPORT_SYMBOL(pcim_request_region);
748
749 /**
750 * pcim_release_region - Release a PCI BAR
751 * @pdev: PCI device to operate on
752 * @bar: Index of BAR to release
753 *
754 * Release a region manually that was previously requested by
755 * pcim_request_region().
756 */
pcim_release_region(struct pci_dev * pdev,int bar)757 static void pcim_release_region(struct pci_dev *pdev, int bar)
758 {
759 struct pcim_addr_devres res_searched;
760
761 pcim_addr_devres_clear(&res_searched);
762 res_searched.type = PCIM_ADDR_DEVRES_TYPE_REGION;
763 res_searched.bar = bar;
764
765 devres_release(&pdev->dev, pcim_addr_resource_release,
766 pcim_addr_resources_match, &res_searched);
767 }
768
769
770 /**
771 * pcim_release_all_regions - Release all regions of a PCI-device
772 * @pdev: the PCI device
773 *
774 * Release all regions previously requested through pcim_request_region()
775 * or pcim_request_all_regions().
776 *
777 * Can be called from any context, i.e., not necessarily as a counterpart to
778 * pcim_request_all_regions().
779 */
pcim_release_all_regions(struct pci_dev * pdev)780 static void pcim_release_all_regions(struct pci_dev *pdev)
781 {
782 int bar;
783
784 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
785 pcim_release_region(pdev, bar);
786 }
787
788 /**
789 * pcim_request_all_regions - Request all regions
790 * @pdev: PCI device to map IO resources for
791 * @name: name of the driver requesting the resources
792 *
793 * Returns: 0 on success, negative error code on failure.
794 *
795 * Requested regions will automatically be released at driver detach. If
796 * desired, release individual regions with pcim_release_region() or all of
797 * them at once with pcim_release_all_regions().
798 */
pcim_request_all_regions(struct pci_dev * pdev,const char * name)799 int pcim_request_all_regions(struct pci_dev *pdev, const char *name)
800 {
801 int ret;
802 int bar;
803
804 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
805 ret = pcim_request_region(pdev, bar, name);
806 if (ret != 0)
807 goto err;
808 }
809
810 return 0;
811
812 err:
813 pcim_release_all_regions(pdev);
814
815 return ret;
816 }
817 EXPORT_SYMBOL(pcim_request_all_regions);
818
819 /**
820 * pcim_iomap_range - Create a ranged __iomap mapping within a PCI BAR
821 * @pdev: PCI device to map IO resources for
822 * @bar: Index of the BAR
823 * @offset: Offset from the begin of the BAR
824 * @len: Length in bytes for the mapping
825 *
826 * Returns: __iomem pointer on success, an IOMEM_ERR_PTR on failure.
827 *
828 * Creates a new IO-Mapping within the specified @bar, ranging from @offset to
829 * @offset + @len.
830 *
831 * The mapping will automatically get unmapped on driver detach. If desired,
832 * release manually only with pcim_iounmap().
833 */
pcim_iomap_range(struct pci_dev * pdev,int bar,unsigned long offset,unsigned long len)834 void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar,
835 unsigned long offset, unsigned long len)
836 {
837 void __iomem *mapping;
838 struct pcim_addr_devres *res;
839
840 if (!pci_bar_index_is_valid(bar))
841 return IOMEM_ERR_PTR(-EINVAL);
842
843 res = pcim_addr_devres_alloc(pdev);
844 if (!res)
845 return IOMEM_ERR_PTR(-ENOMEM);
846
847 mapping = pci_iomap_range(pdev, bar, offset, len);
848 if (!mapping) {
849 pcim_addr_devres_free(res);
850 return IOMEM_ERR_PTR(-EINVAL);
851 }
852
853 res->type = PCIM_ADDR_DEVRES_TYPE_MAPPING;
854 res->baseaddr = mapping;
855
856 /*
857 * Ranged mappings don't get added to the legacy-table, since the table
858 * only ever keeps track of whole BARs.
859 */
860
861 devres_add(&pdev->dev, res);
862 return mapping;
863 }
864 EXPORT_SYMBOL(pcim_iomap_range);
865