1 // SPDX-License-Identifier: GPL-2.0-only 2 // Copyright (C) 2013-2015 ARM Limited, All Rights Reserved. 3 // Author: Marc Zyngier <marc.zyngier@arm.com> 4 // Copyright (C) 2022 Linutronix GmbH 5 // Copyright (C) 2022 Intel 6 7 #include <linux/acpi_iort.h> 8 #include <linux/of_address.h> 9 #include <linux/pci.h> 10 11 #include "irq-gic-its-msi-parent.h" 12 #include <linux/irqchip/irq-msi-lib.h> 13 14 #define ITS_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ 15 MSI_FLAG_USE_DEF_CHIP_OPS | \ 16 MSI_FLAG_PCI_MSI_MASK_PARENT) 17 18 #define ITS_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ 19 MSI_FLAG_PCI_MSIX | \ 20 MSI_FLAG_MULTI_PCI_MSI) 21 22 static int its_translate_frame_address(struct device_node *msi_node, phys_addr_t *pa) 23 { 24 struct resource res; 25 int ret; 26 27 ret = of_property_match_string(msi_node, "reg-names", "ns-translate"); 28 if (ret < 0) 29 return ret; 30 31 ret = of_address_to_resource(msi_node, ret, &res); 32 if (ret) 33 return ret; 34 35 *pa = res.start; 36 return 0; 37 } 38 39 #ifdef CONFIG_PCI_MSI 40 static int its_pci_msi_vec_count(struct pci_dev *pdev, void *data) 41 { 42 int msi, msix, *count = data; 43 44 msi = max(pci_msi_vec_count(pdev), 0); 45 msix = max(pci_msix_vec_count(pdev), 0); 46 *count += max(msi, msix); 47 48 return 0; 49 } 50 51 static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data) 52 { 53 struct pci_dev **alias_dev = data; 54 55 *alias_dev = pdev; 56 57 return 0; 58 } 59 60 static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev, 61 int nvec, msi_alloc_info_t *info) 62 { 63 struct pci_dev *pdev, *alias_dev; 64 struct msi_domain_info *msi_info; 65 int alias_count = 0, minnvec = 1; 66 67 if (!dev_is_pci(dev)) 68 return -EINVAL; 69 70 pdev = to_pci_dev(dev); 71 /* 72 * If pdev is downstream of any aliasing bridges, take an upper 73 * bound of how many other vectors could map to the same DevID. 74 * Also tell the ITS that the signalling will come from a proxy 75 * device, and that special allocation rules apply. 76 */ 77 pci_for_each_dma_alias(pdev, its_get_pci_alias, &alias_dev); 78 if (alias_dev != pdev) { 79 if (alias_dev->subordinate) 80 pci_walk_bus(alias_dev->subordinate, 81 its_pci_msi_vec_count, &alias_count); 82 info->flags |= MSI_ALLOC_FLAGS_PROXY_DEVICE; 83 } 84 85 /* ITS specific DeviceID, as the core ITS ignores dev. */ 86 info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain->parent, pdev); 87 88 /* 89 * Always allocate a power of 2, and special case device 0 for 90 * broken systems where the DevID is not wired (and all devices 91 * appear as DevID 0). For that reason, we generously allocate a 92 * minimum of 32 MSIs for DevID 0. If you want more because all 93 * your devices are aliasing to DevID 0, consider fixing your HW. 94 */ 95 nvec = max(nvec, alias_count); 96 if (!info->scratchpad[0].ul) 97 minnvec = 32; 98 nvec = max_t(int, minnvec, roundup_pow_of_two(nvec)); 99 100 msi_info = msi_get_domain_info(domain->parent); 101 return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info); 102 } 103 104 static int its_v5_pci_msi_prepare(struct irq_domain *domain, struct device *dev, 105 int nvec, msi_alloc_info_t *info) 106 { 107 struct device_node *msi_node = NULL; 108 struct msi_domain_info *msi_info; 109 struct pci_dev *pdev; 110 phys_addr_t pa; 111 u32 rid; 112 int ret; 113 114 if (!dev_is_pci(dev)) 115 return -EINVAL; 116 117 pdev = to_pci_dev(dev); 118 119 rid = pci_msi_map_rid_ctlr_node(pdev, &msi_node); 120 if (!msi_node) 121 return -ENODEV; 122 123 ret = its_translate_frame_address(msi_node, &pa); 124 if (ret) 125 return -ENODEV; 126 127 of_node_put(msi_node); 128 129 /* ITS specific DeviceID */ 130 info->scratchpad[0].ul = rid; 131 /* ITS translate frame physical address */ 132 info->scratchpad[1].ul = pa; 133 134 /* Always allocate power of two vectors */ 135 nvec = roundup_pow_of_two(nvec); 136 137 msi_info = msi_get_domain_info(domain->parent); 138 return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info); 139 } 140 #else /* CONFIG_PCI_MSI */ 141 #define its_pci_msi_prepare NULL 142 #define its_v5_pci_msi_prepare NULL 143 #endif /* !CONFIG_PCI_MSI */ 144 145 static int of_pmsi_get_msi_info(struct irq_domain *domain, struct device *dev, u32 *dev_id, 146 phys_addr_t *pa) 147 { 148 struct of_phandle_iterator it; 149 int ret; 150 151 /* Suck the DeviceID out of the msi-parent property */ 152 of_for_each_phandle(&it, ret, dev->of_node, "msi-parent", "#msi-cells", -1) { 153 /* GICv5 ITS domain matches the MSI controller node parent */ 154 struct device_node *np __free(device_node) = pa ? of_get_parent(it.node) 155 : of_node_get(it.node); 156 157 if (np == irq_domain_get_of_node(domain)) { 158 u32 args; 159 160 if (WARN_ON(of_phandle_iterator_args(&it, &args, 1) != 1)) 161 ret = -EINVAL; 162 163 if (!ret && pa) 164 ret = its_translate_frame_address(it.node, pa); 165 166 if (!ret) 167 *dev_id = args; 168 169 of_node_put(it.node); 170 return ret; 171 } 172 } 173 174 struct device_node *msi_ctrl __free(device_node) = NULL; 175 176 return of_map_id(dev->of_node, dev->id, "msi-map", "msi-map-mask", &msi_ctrl, dev_id); 177 } 178 179 int __weak iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id) 180 { 181 return -1; 182 } 183 184 static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev, 185 int nvec, msi_alloc_info_t *info) 186 { 187 struct msi_domain_info *msi_info; 188 u32 dev_id; 189 int ret; 190 191 if (dev->of_node) 192 ret = of_pmsi_get_msi_info(domain->parent, dev, &dev_id, NULL); 193 else 194 ret = iort_pmsi_get_dev_id(dev, &dev_id); 195 if (ret) 196 return ret; 197 198 /* ITS specific DeviceID, as the core ITS ignores dev. */ 199 info->scratchpad[0].ul = dev_id; 200 201 /* Allocate at least 32 MSIs, and always as a power of 2 */ 202 nvec = max_t(int, 32, roundup_pow_of_two(nvec)); 203 204 msi_info = msi_get_domain_info(domain->parent); 205 return msi_info->ops->msi_prepare(domain->parent, 206 dev, nvec, info); 207 } 208 209 static int its_v5_pmsi_prepare(struct irq_domain *domain, struct device *dev, 210 int nvec, msi_alloc_info_t *info) 211 { 212 struct msi_domain_info *msi_info; 213 phys_addr_t pa; 214 u32 dev_id; 215 int ret; 216 217 if (!dev->of_node) 218 return -ENODEV; 219 220 ret = of_pmsi_get_msi_info(domain->parent, dev, &dev_id, &pa); 221 if (ret) 222 return ret; 223 224 /* ITS specific DeviceID */ 225 info->scratchpad[0].ul = dev_id; 226 /* ITS translate frame physical address */ 227 info->scratchpad[1].ul = pa; 228 229 /* Allocate always as a power of 2 */ 230 nvec = roundup_pow_of_two(nvec); 231 232 msi_info = msi_get_domain_info(domain->parent); 233 return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info); 234 } 235 236 static void its_msi_teardown(struct irq_domain *domain, msi_alloc_info_t *info) 237 { 238 struct msi_domain_info *msi_info; 239 240 msi_info = msi_get_domain_info(domain->parent); 241 msi_info->ops->msi_teardown(domain->parent, info); 242 } 243 244 static bool its_init_dev_msi_info(struct device *dev, struct irq_domain *domain, 245 struct irq_domain *real_parent, struct msi_domain_info *info) 246 { 247 if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info)) 248 return false; 249 250 switch(info->bus_token) { 251 case DOMAIN_BUS_PCI_DEVICE_MSI: 252 case DOMAIN_BUS_PCI_DEVICE_MSIX: 253 /* 254 * FIXME: This probably should be done after a (not yet 255 * existing) post domain creation callback once to make 256 * support for dynamic post-enable MSI-X allocations 257 * work without having to reevaluate the domain size 258 * over and over. It is known already at allocation 259 * time via info->hwsize. 260 * 261 * That should work perfectly fine for MSI/MSI-X but needs 262 * some thoughts for purely software managed MSI domains 263 * where the index space is only limited artificially via 264 * %MSI_MAX_INDEX. 265 */ 266 info->ops->msi_prepare = its_pci_msi_prepare; 267 info->ops->msi_teardown = its_msi_teardown; 268 break; 269 case DOMAIN_BUS_DEVICE_MSI: 270 case DOMAIN_BUS_WIRED_TO_MSI: 271 /* 272 * FIXME: See the above PCI prepare comment. The domain 273 * size is also known at domain creation time. 274 */ 275 info->ops->msi_prepare = its_pmsi_prepare; 276 info->ops->msi_teardown = its_msi_teardown; 277 break; 278 default: 279 /* Confused. How did the lib return true? */ 280 WARN_ON_ONCE(1); 281 return false; 282 } 283 284 return true; 285 } 286 287 static bool its_v5_init_dev_msi_info(struct device *dev, struct irq_domain *domain, 288 struct irq_domain *real_parent, struct msi_domain_info *info) 289 { 290 if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info)) 291 return false; 292 293 switch (info->bus_token) { 294 case DOMAIN_BUS_PCI_DEVICE_MSI: 295 case DOMAIN_BUS_PCI_DEVICE_MSIX: 296 info->ops->msi_prepare = its_v5_pci_msi_prepare; 297 info->ops->msi_teardown = its_msi_teardown; 298 break; 299 case DOMAIN_BUS_DEVICE_MSI: 300 case DOMAIN_BUS_WIRED_TO_MSI: 301 info->ops->msi_prepare = its_v5_pmsi_prepare; 302 info->ops->msi_teardown = its_msi_teardown; 303 break; 304 default: 305 /* Confused. How did the lib return true? */ 306 WARN_ON_ONCE(1); 307 return false; 308 } 309 310 return true; 311 } 312 313 const struct msi_parent_ops gic_v3_its_msi_parent_ops = { 314 .supported_flags = ITS_MSI_FLAGS_SUPPORTED, 315 .required_flags = ITS_MSI_FLAGS_REQUIRED, 316 .chip_flags = MSI_CHIP_FLAG_SET_EOI, 317 .bus_select_token = DOMAIN_BUS_NEXUS, 318 .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI, 319 .prefix = "ITS-", 320 .init_dev_msi_info = its_init_dev_msi_info, 321 }; 322 323 const struct msi_parent_ops gic_v5_its_msi_parent_ops = { 324 .supported_flags = ITS_MSI_FLAGS_SUPPORTED, 325 .required_flags = ITS_MSI_FLAGS_REQUIRED, 326 .chip_flags = MSI_CHIP_FLAG_SET_EOI, 327 .bus_select_token = DOMAIN_BUS_NEXUS, 328 .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI, 329 .prefix = "ITS-v5-", 330 .init_dev_msi_info = its_v5_init_dev_msi_info, 331 }; 332