1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright (C) 2013-2015 ARM Limited, All Rights Reserved.
3 // Author: Marc Zyngier <marc.zyngier@arm.com>
4 // Copyright (C) 2022 Linutronix GmbH
5 // Copyright (C) 2022 Intel
6
7 #include <linux/acpi_iort.h>
8 #include <linux/of_address.h>
9 #include <linux/pci.h>
10
11 #include "irq-gic-its-msi-parent.h"
12 #include <linux/irqchip/irq-msi-lib.h>
13
14 #define ITS_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
15 MSI_FLAG_USE_DEF_CHIP_OPS | \
16 MSI_FLAG_PCI_MSI_MASK_PARENT)
17
18 #define ITS_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
19 MSI_FLAG_PCI_MSIX | \
20 MSI_FLAG_MULTI_PCI_MSI)
21
its_translate_frame_address(struct device_node * msi_node,phys_addr_t * pa)22 static int its_translate_frame_address(struct device_node *msi_node, phys_addr_t *pa)
23 {
24 struct resource res;
25 int ret;
26
27 ret = of_property_match_string(msi_node, "reg-names", "ns-translate");
28 if (ret < 0)
29 return ret;
30
31 ret = of_address_to_resource(msi_node, ret, &res);
32 if (ret)
33 return ret;
34
35 *pa = res.start;
36 return 0;
37 }
38
39 #ifdef CONFIG_PCI_MSI
its_pci_msi_vec_count(struct pci_dev * pdev,void * data)40 static int its_pci_msi_vec_count(struct pci_dev *pdev, void *data)
41 {
42 int msi, msix, *count = data;
43
44 msi = max(pci_msi_vec_count(pdev), 0);
45 msix = max(pci_msix_vec_count(pdev), 0);
46 *count += max(msi, msix);
47
48 return 0;
49 }
50
its_get_pci_alias(struct pci_dev * pdev,u16 alias,void * data)51 static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
52 {
53 struct pci_dev **alias_dev = data;
54
55 *alias_dev = pdev;
56
57 return 0;
58 }
59
its_pci_msi_prepare(struct irq_domain * domain,struct device * dev,int nvec,msi_alloc_info_t * info)60 static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
61 int nvec, msi_alloc_info_t *info)
62 {
63 struct pci_dev *pdev, *alias_dev;
64 struct msi_domain_info *msi_info;
65 int alias_count = 0, minnvec = 1;
66
67 if (!dev_is_pci(dev))
68 return -EINVAL;
69
70 pdev = to_pci_dev(dev);
71 /*
72 * If pdev is downstream of any aliasing bridges, take an upper
73 * bound of how many other vectors could map to the same DevID.
74 * Also tell the ITS that the signalling will come from a proxy
75 * device, and that special allocation rules apply.
76 */
77 pci_for_each_dma_alias(pdev, its_get_pci_alias, &alias_dev);
78 if (alias_dev != pdev) {
79 if (alias_dev->subordinate)
80 pci_walk_bus(alias_dev->subordinate,
81 its_pci_msi_vec_count, &alias_count);
82 info->flags |= MSI_ALLOC_FLAGS_PROXY_DEVICE;
83 }
84
85 /* ITS specific DeviceID, as the core ITS ignores dev. */
86 info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain->parent, pdev);
87
88 /*
89 * Always allocate a power of 2, and special case device 0 for
90 * broken systems where the DevID is not wired (and all devices
91 * appear as DevID 0). For that reason, we generously allocate a
92 * minimum of 32 MSIs for DevID 0. If you want more because all
93 * your devices are aliasing to DevID 0, consider fixing your HW.
94 */
95 nvec = max(nvec, alias_count);
96 if (!info->scratchpad[0].ul)
97 minnvec = 32;
98 nvec = max_t(int, minnvec, roundup_pow_of_two(nvec));
99
100 msi_info = msi_get_domain_info(domain->parent);
101 return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info);
102 }
103
its_v5_pci_msi_prepare(struct irq_domain * domain,struct device * dev,int nvec,msi_alloc_info_t * info)104 static int its_v5_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
105 int nvec, msi_alloc_info_t *info)
106 {
107 struct device_node *msi_node = NULL;
108 struct msi_domain_info *msi_info;
109 struct pci_dev *pdev;
110 phys_addr_t pa;
111 u32 rid;
112 int ret;
113
114 if (!dev_is_pci(dev))
115 return -EINVAL;
116
117 pdev = to_pci_dev(dev);
118
119 rid = pci_msi_map_rid_ctlr_node(pdev, &msi_node);
120 if (!msi_node)
121 return -ENODEV;
122
123 ret = its_translate_frame_address(msi_node, &pa);
124 if (ret)
125 return -ENODEV;
126
127 of_node_put(msi_node);
128
129 /* ITS specific DeviceID */
130 info->scratchpad[0].ul = rid;
131 /* ITS translate frame physical address */
132 info->scratchpad[1].ul = pa;
133
134 /* Always allocate power of two vectors */
135 nvec = roundup_pow_of_two(nvec);
136
137 msi_info = msi_get_domain_info(domain->parent);
138 return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info);
139 }
140 #else /* CONFIG_PCI_MSI */
141 #define its_pci_msi_prepare NULL
142 #define its_v5_pci_msi_prepare NULL
143 #endif /* !CONFIG_PCI_MSI */
144
of_pmsi_get_dev_id(struct irq_domain * domain,struct device * dev,u32 * dev_id)145 static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev,
146 u32 *dev_id)
147 {
148 int ret, index = 0;
149
150 /* Suck the DeviceID out of the msi-parent property */
151 do {
152 struct of_phandle_args args;
153
154 ret = of_parse_phandle_with_args(dev->of_node,
155 "msi-parent", "#msi-cells",
156 index, &args);
157 if (args.np == irq_domain_get_of_node(domain)) {
158 if (WARN_ON(args.args_count != 1))
159 return -EINVAL;
160 *dev_id = args.args[0];
161 break;
162 }
163 index++;
164 } while (!ret);
165
166 if (ret) {
167 struct device_node *np = NULL;
168
169 ret = of_map_id(dev->of_node, dev->id, "msi-map", "msi-map-mask", &np, dev_id);
170 if (np)
171 of_node_put(np);
172 }
173
174 return ret;
175 }
176
of_v5_pmsi_get_msi_info(struct irq_domain * domain,struct device * dev,u32 * dev_id,phys_addr_t * pa)177 static int of_v5_pmsi_get_msi_info(struct irq_domain *domain, struct device *dev,
178 u32 *dev_id, phys_addr_t *pa)
179 {
180 int ret, index = 0;
181 /*
182 * Retrieve the DeviceID and the ITS translate frame node pointer
183 * out of the msi-parent property.
184 */
185 do {
186 struct of_phandle_args args;
187
188 ret = of_parse_phandle_with_args(dev->of_node,
189 "msi-parent", "#msi-cells",
190 index, &args);
191 if (ret)
192 break;
193 /*
194 * The IRQ domain fwnode is the msi controller parent
195 * in GICv5 (where the msi controller nodes are the
196 * ITS translate frames).
197 */
198 if (args.np->parent == irq_domain_get_of_node(domain)) {
199 if (WARN_ON(args.args_count != 1))
200 return -EINVAL;
201 *dev_id = args.args[0];
202
203 ret = its_translate_frame_address(args.np, pa);
204 if (ret)
205 return -ENODEV;
206 break;
207 }
208 index++;
209 } while (!ret);
210
211 if (ret) {
212 struct device_node *np = NULL;
213
214 ret = of_map_id(dev->of_node, dev->id, "msi-map", "msi-map-mask", &np, dev_id);
215 if (np) {
216 ret = its_translate_frame_address(np, pa);
217 of_node_put(np);
218 }
219 }
220
221 return ret;
222 }
223
iort_pmsi_get_dev_id(struct device * dev,u32 * dev_id)224 int __weak iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
225 {
226 return -1;
227 }
228
its_pmsi_prepare(struct irq_domain * domain,struct device * dev,int nvec,msi_alloc_info_t * info)229 static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
230 int nvec, msi_alloc_info_t *info)
231 {
232 struct msi_domain_info *msi_info;
233 u32 dev_id;
234 int ret;
235
236 if (dev->of_node)
237 ret = of_pmsi_get_dev_id(domain->parent, dev, &dev_id);
238 else
239 ret = iort_pmsi_get_dev_id(dev, &dev_id);
240 if (ret)
241 return ret;
242
243 /* ITS specific DeviceID, as the core ITS ignores dev. */
244 info->scratchpad[0].ul = dev_id;
245
246 /* Allocate at least 32 MSIs, and always as a power of 2 */
247 nvec = max_t(int, 32, roundup_pow_of_two(nvec));
248
249 msi_info = msi_get_domain_info(domain->parent);
250 return msi_info->ops->msi_prepare(domain->parent,
251 dev, nvec, info);
252 }
253
its_v5_pmsi_prepare(struct irq_domain * domain,struct device * dev,int nvec,msi_alloc_info_t * info)254 static int its_v5_pmsi_prepare(struct irq_domain *domain, struct device *dev,
255 int nvec, msi_alloc_info_t *info)
256 {
257 struct msi_domain_info *msi_info;
258 phys_addr_t pa;
259 u32 dev_id;
260 int ret;
261
262 if (!dev->of_node)
263 return -ENODEV;
264
265 ret = of_v5_pmsi_get_msi_info(domain->parent, dev, &dev_id, &pa);
266 if (ret)
267 return ret;
268
269 /* ITS specific DeviceID */
270 info->scratchpad[0].ul = dev_id;
271 /* ITS translate frame physical address */
272 info->scratchpad[1].ul = pa;
273
274 /* Allocate always as a power of 2 */
275 nvec = roundup_pow_of_two(nvec);
276
277 msi_info = msi_get_domain_info(domain->parent);
278 return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info);
279 }
280
its_msi_teardown(struct irq_domain * domain,msi_alloc_info_t * info)281 static void its_msi_teardown(struct irq_domain *domain, msi_alloc_info_t *info)
282 {
283 struct msi_domain_info *msi_info;
284
285 msi_info = msi_get_domain_info(domain->parent);
286 msi_info->ops->msi_teardown(domain->parent, info);
287 }
288
its_init_dev_msi_info(struct device * dev,struct irq_domain * domain,struct irq_domain * real_parent,struct msi_domain_info * info)289 static bool its_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
290 struct irq_domain *real_parent, struct msi_domain_info *info)
291 {
292 if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
293 return false;
294
295 switch(info->bus_token) {
296 case DOMAIN_BUS_PCI_DEVICE_MSI:
297 case DOMAIN_BUS_PCI_DEVICE_MSIX:
298 /*
299 * FIXME: This probably should be done after a (not yet
300 * existing) post domain creation callback once to make
301 * support for dynamic post-enable MSI-X allocations
302 * work without having to reevaluate the domain size
303 * over and over. It is known already at allocation
304 * time via info->hwsize.
305 *
306 * That should work perfectly fine for MSI/MSI-X but needs
307 * some thoughts for purely software managed MSI domains
308 * where the index space is only limited artificially via
309 * %MSI_MAX_INDEX.
310 */
311 info->ops->msi_prepare = its_pci_msi_prepare;
312 info->ops->msi_teardown = its_msi_teardown;
313 break;
314 case DOMAIN_BUS_DEVICE_MSI:
315 case DOMAIN_BUS_WIRED_TO_MSI:
316 /*
317 * FIXME: See the above PCI prepare comment. The domain
318 * size is also known at domain creation time.
319 */
320 info->ops->msi_prepare = its_pmsi_prepare;
321 info->ops->msi_teardown = its_msi_teardown;
322 break;
323 default:
324 /* Confused. How did the lib return true? */
325 WARN_ON_ONCE(1);
326 return false;
327 }
328
329 return true;
330 }
331
its_v5_init_dev_msi_info(struct device * dev,struct irq_domain * domain,struct irq_domain * real_parent,struct msi_domain_info * info)332 static bool its_v5_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
333 struct irq_domain *real_parent, struct msi_domain_info *info)
334 {
335 if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
336 return false;
337
338 switch (info->bus_token) {
339 case DOMAIN_BUS_PCI_DEVICE_MSI:
340 case DOMAIN_BUS_PCI_DEVICE_MSIX:
341 info->ops->msi_prepare = its_v5_pci_msi_prepare;
342 info->ops->msi_teardown = its_msi_teardown;
343 break;
344 case DOMAIN_BUS_DEVICE_MSI:
345 case DOMAIN_BUS_WIRED_TO_MSI:
346 info->ops->msi_prepare = its_v5_pmsi_prepare;
347 info->ops->msi_teardown = its_msi_teardown;
348 break;
349 default:
350 /* Confused. How did the lib return true? */
351 WARN_ON_ONCE(1);
352 return false;
353 }
354
355 return true;
356 }
357
358 const struct msi_parent_ops gic_v3_its_msi_parent_ops = {
359 .supported_flags = ITS_MSI_FLAGS_SUPPORTED,
360 .required_flags = ITS_MSI_FLAGS_REQUIRED,
361 .chip_flags = MSI_CHIP_FLAG_SET_EOI,
362 .bus_select_token = DOMAIN_BUS_NEXUS,
363 .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
364 .prefix = "ITS-",
365 .init_dev_msi_info = its_init_dev_msi_info,
366 };
367
368 const struct msi_parent_ops gic_v5_its_msi_parent_ops = {
369 .supported_flags = ITS_MSI_FLAGS_SUPPORTED,
370 .required_flags = ITS_MSI_FLAGS_REQUIRED,
371 .chip_flags = MSI_CHIP_FLAG_SET_EOI,
372 .bus_select_token = DOMAIN_BUS_NEXUS,
373 .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
374 .prefix = "ITS-v5-",
375 .init_dev_msi_info = its_v5_init_dev_msi_info,
376 };
377