xref: /linux/drivers/pci/ecam.c (revision 4f3c8320c78cdd11c8fdd23c33787407f719322e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2016 Broadcom
4  */
5 
6 #include <linux/device.h>
7 #include <linux/io.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/pci-ecam.h>
12 #include <linux/slab.h>
13 
14 /*
15  * On 64-bit systems, we do a single ioremap for the whole config space
16  * since we have enough virtual address range available.  On 32-bit, we
17  * ioremap the config space for each bus individually.
18  */
19 static const bool per_bus_mapping = !IS_ENABLED(CONFIG_64BIT);
20 
21 /*
22  * Create a PCI config space window
23  *  - reserve mem region
24  *  - alloc struct pci_config_window with space for all mappings
25  *  - ioremap the config space
26  */
27 struct pci_config_window *pci_ecam_create(struct device *dev,
28 		struct resource *cfgres, struct resource *busr,
29 		const struct pci_ecam_ops *ops)
30 {
31 	unsigned int bus_shift = ops->bus_shift;
32 	struct pci_config_window *cfg;
33 	unsigned int bus_range, bus_range_max, bsz;
34 	struct resource *conflict;
35 	int i, err;
36 
37 	if (busr->start > busr->end)
38 		return ERR_PTR(-EINVAL);
39 
40 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
41 	if (!cfg)
42 		return ERR_PTR(-ENOMEM);
43 
44 	/* ECAM-compliant platforms need not supply ops->bus_shift */
45 	if (!bus_shift)
46 		bus_shift = PCIE_ECAM_BUS_SHIFT;
47 
48 	cfg->parent = dev;
49 	cfg->ops = ops;
50 	cfg->busr.start = busr->start;
51 	cfg->busr.end = busr->end;
52 	cfg->busr.flags = IORESOURCE_BUS;
53 	bus_range = resource_size(&cfg->busr);
54 	bus_range_max = resource_size(cfgres) >> bus_shift;
55 	if (bus_range > bus_range_max) {
56 		bus_range = bus_range_max;
57 		cfg->busr.end = busr->start + bus_range - 1;
58 		dev_warn(dev, "ECAM area %pR can only accommodate %pR (reduced from %pR desired)\n",
59 			 cfgres, &cfg->busr, busr);
60 	}
61 	bsz = 1 << bus_shift;
62 
63 	cfg->res.start = cfgres->start;
64 	cfg->res.end = cfgres->end;
65 	cfg->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
66 	cfg->res.name = "PCI ECAM";
67 
68 	conflict = request_resource_conflict(&iomem_resource, &cfg->res);
69 	if (conflict) {
70 		err = -EBUSY;
71 		dev_err(dev, "can't claim ECAM area %pR: address conflict with %s %pR\n",
72 			&cfg->res, conflict->name, conflict);
73 		goto err_exit;
74 	}
75 
76 	if (per_bus_mapping) {
77 		cfg->winp = kcalloc(bus_range, sizeof(*cfg->winp), GFP_KERNEL);
78 		if (!cfg->winp)
79 			goto err_exit_malloc;
80 		for (i = 0; i < bus_range; i++) {
81 			cfg->winp[i] =
82 				pci_remap_cfgspace(cfgres->start + i * bsz,
83 						   bsz);
84 			if (!cfg->winp[i])
85 				goto err_exit_iomap;
86 		}
87 	} else {
88 		cfg->win = pci_remap_cfgspace(cfgres->start, bus_range * bsz);
89 		if (!cfg->win)
90 			goto err_exit_iomap;
91 	}
92 
93 	if (ops->init) {
94 		err = ops->init(cfg);
95 		if (err)
96 			goto err_exit;
97 	}
98 	dev_info(dev, "ECAM at %pR for %pR\n", &cfg->res, &cfg->busr);
99 	return cfg;
100 
101 err_exit_iomap:
102 	dev_err(dev, "ECAM ioremap failed\n");
103 err_exit_malloc:
104 	err = -ENOMEM;
105 err_exit:
106 	pci_ecam_free(cfg);
107 	return ERR_PTR(err);
108 }
109 EXPORT_SYMBOL_GPL(pci_ecam_create);
110 
111 void pci_ecam_free(struct pci_config_window *cfg)
112 {
113 	int i;
114 
115 	if (per_bus_mapping) {
116 		if (cfg->winp) {
117 			for (i = 0; i < resource_size(&cfg->busr); i++)
118 				if (cfg->winp[i])
119 					iounmap(cfg->winp[i]);
120 			kfree(cfg->winp);
121 		}
122 	} else {
123 		if (cfg->win)
124 			iounmap(cfg->win);
125 	}
126 	if (cfg->res.parent)
127 		release_resource(&cfg->res);
128 	kfree(cfg);
129 }
130 EXPORT_SYMBOL_GPL(pci_ecam_free);
131 
132 /*
133  * Function to implement the pci_ops ->map_bus method
134  */
135 void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn,
136 			       int where)
137 {
138 	struct pci_config_window *cfg = bus->sysdata;
139 	unsigned int bus_shift = cfg->ops->bus_shift;
140 	unsigned int devfn_shift = cfg->ops->bus_shift - 8;
141 	unsigned int busn = bus->number;
142 	void __iomem *base;
143 	u32 bus_offset, devfn_offset;
144 
145 	if (busn < cfg->busr.start || busn > cfg->busr.end)
146 		return NULL;
147 
148 	busn -= cfg->busr.start;
149 	if (per_bus_mapping) {
150 		base = cfg->winp[busn];
151 		busn = 0;
152 	} else
153 		base = cfg->win;
154 
155 	if (cfg->ops->bus_shift) {
156 		bus_offset = (busn & PCIE_ECAM_BUS_MASK) << bus_shift;
157 		devfn_offset = (devfn & PCIE_ECAM_DEVFN_MASK) << devfn_shift;
158 		where &= PCIE_ECAM_REG_MASK;
159 
160 		return base + (bus_offset | devfn_offset | where);
161 	}
162 
163 	return base + PCIE_ECAM_OFFSET(busn, devfn, where);
164 }
165 EXPORT_SYMBOL_GPL(pci_ecam_map_bus);
166 
167 /* ECAM ops */
168 const struct pci_ecam_ops pci_generic_ecam_ops = {
169 	.pci_ops	= {
170 		.map_bus	= pci_ecam_map_bus,
171 		.read		= pci_generic_config_read,
172 		.write		= pci_generic_config_write,
173 	}
174 };
175 EXPORT_SYMBOL_GPL(pci_generic_ecam_ops);
176 
177 #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
178 /* ECAM ops for 32-bit access only (non-compliant) */
179 const struct pci_ecam_ops pci_32b_ops = {
180 	.pci_ops	= {
181 		.map_bus	= pci_ecam_map_bus,
182 		.read		= pci_generic_config_read32,
183 		.write		= pci_generic_config_write32,
184 	}
185 };
186 
187 /* ECAM ops for 32-bit read only (non-compliant) */
188 const struct pci_ecam_ops pci_32b_read_ops = {
189 	.pci_ops	= {
190 		.map_bus	= pci_ecam_map_bus,
191 		.read		= pci_generic_config_read32,
192 		.write		= pci_generic_config_write,
193 	}
194 };
195 #endif
196