1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * OF helpers for IOMMU 4 * 5 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. 6 */ 7 8 #include <linux/export.h> 9 #include <linux/iommu.h> 10 #include <linux/limits.h> 11 #include <linux/module.h> 12 #include <linux/of.h> 13 #include <linux/of_address.h> 14 #include <linux/of_iommu.h> 15 #include <linux/of_pci.h> 16 #include <linux/pci.h> 17 #include <linux/slab.h> 18 #include <linux/fsl/mc.h> 19 20 static int of_iommu_xlate(struct device *dev, 21 struct of_phandle_args *iommu_spec) 22 { 23 const struct iommu_ops *ops; 24 struct fwnode_handle *fwnode = &iommu_spec->np->fwnode; 25 int ret; 26 27 ops = iommu_ops_from_fwnode(fwnode); 28 if ((ops && !ops->of_xlate) || 29 !of_device_is_available(iommu_spec->np)) 30 return -ENODEV; 31 32 ret = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops); 33 if (ret) 34 return ret; 35 /* 36 * The otherwise-empty fwspec handily serves to indicate the specific 37 * IOMMU device we're waiting for, which will be useful if we ever get 38 * a proper probe-ordering dependency mechanism in future. 39 */ 40 if (!ops) 41 return driver_deferred_probe_check_state(dev); 42 43 if (!try_module_get(ops->owner)) 44 return -ENODEV; 45 46 ret = ops->of_xlate(dev, iommu_spec); 47 module_put(ops->owner); 48 return ret; 49 } 50 51 static int of_iommu_configure_dev_id(struct device_node *master_np, 52 struct device *dev, 53 const u32 *id) 54 { 55 struct of_phandle_args iommu_spec = { .args_count = 1 }; 56 int err; 57 58 err = of_map_id(master_np, *id, "iommu-map", 59 "iommu-map-mask", &iommu_spec.np, 60 iommu_spec.args); 61 if (err) 62 return err; 63 64 err = of_iommu_xlate(dev, &iommu_spec); 65 of_node_put(iommu_spec.np); 66 return err; 67 } 68 69 static int of_iommu_configure_dev(struct device_node *master_np, 70 struct device *dev) 71 { 72 struct of_phandle_args iommu_spec; 73 int err = -ENODEV, idx = 0; 74 75 while (!of_parse_phandle_with_args(master_np, "iommus", 76 "#iommu-cells", 77 idx, &iommu_spec)) { 78 err = of_iommu_xlate(dev, &iommu_spec); 79 of_node_put(iommu_spec.np); 80 idx++; 81 if (err) 82 break; 83 } 84 85 return err; 86 } 87 88 struct of_pci_iommu_alias_info { 89 struct device *dev; 90 struct device_node *np; 91 }; 92 93 static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data) 94 { 95 struct of_pci_iommu_alias_info *info = data; 96 u32 input_id = alias; 97 98 return of_iommu_configure_dev_id(info->np, info->dev, &input_id); 99 } 100 101 static int of_iommu_configure_device(struct device_node *master_np, 102 struct device *dev, const u32 *id) 103 { 104 return (id) ? of_iommu_configure_dev_id(master_np, dev, id) : 105 of_iommu_configure_dev(master_np, dev); 106 } 107 108 /* 109 * Returns: 110 * 0 on success, an iommu was configured 111 * -ENODEV if the device does not have any IOMMU 112 * -EPROBEDEFER if probing should be tried again 113 * -errno fatal errors 114 */ 115 int of_iommu_configure(struct device *dev, struct device_node *master_np, 116 const u32 *id) 117 { 118 struct iommu_fwspec *fwspec; 119 int err; 120 121 if (!master_np) 122 return -ENODEV; 123 124 /* Serialise to make dev->iommu stable under our potential fwspec */ 125 mutex_lock(&iommu_probe_device_lock); 126 fwspec = dev_iommu_fwspec_get(dev); 127 if (fwspec) { 128 if (fwspec->ops) { 129 mutex_unlock(&iommu_probe_device_lock); 130 return 0; 131 } 132 /* In the deferred case, start again from scratch */ 133 iommu_fwspec_free(dev); 134 } 135 136 /* 137 * We don't currently walk up the tree looking for a parent IOMMU. 138 * See the `Notes:' section of 139 * Documentation/devicetree/bindings/iommu/iommu.txt 140 */ 141 if (dev_is_pci(dev)) { 142 struct of_pci_iommu_alias_info info = { 143 .dev = dev, 144 .np = master_np, 145 }; 146 147 pci_request_acs(); 148 err = pci_for_each_dma_alias(to_pci_dev(dev), 149 of_pci_iommu_init, &info); 150 } else { 151 err = of_iommu_configure_device(master_np, dev, id); 152 } 153 mutex_unlock(&iommu_probe_device_lock); 154 155 if (err == -ENODEV || err == -EPROBE_DEFER) 156 return err; 157 if (err) 158 goto err_log; 159 160 err = iommu_probe_device(dev); 161 if (err) 162 goto err_log; 163 return 0; 164 165 err_log: 166 dev_dbg(dev, "Adding to IOMMU failed: %pe\n", ERR_PTR(err)); 167 return err; 168 } 169 170 static enum iommu_resv_type __maybe_unused 171 iommu_resv_region_get_type(struct device *dev, 172 struct resource *phys, 173 phys_addr_t start, size_t length) 174 { 175 phys_addr_t end = start + length - 1; 176 177 /* 178 * IOMMU regions without an associated physical region cannot be 179 * mapped and are simply reservations. 180 */ 181 if (phys->start >= phys->end) 182 return IOMMU_RESV_RESERVED; 183 184 /* may be IOMMU_RESV_DIRECT_RELAXABLE for certain cases */ 185 if (start == phys->start && end == phys->end) 186 return IOMMU_RESV_DIRECT; 187 188 dev_warn(dev, "treating non-direct mapping [%pr] -> [%pap-%pap] as reservation\n", phys, 189 &start, &end); 190 return IOMMU_RESV_RESERVED; 191 } 192 193 /** 194 * of_iommu_get_resv_regions - reserved region driver helper for device tree 195 * @dev: device for which to get reserved regions 196 * @list: reserved region list 197 * 198 * IOMMU drivers can use this to implement their .get_resv_regions() callback 199 * for memory regions attached to a device tree node. See the reserved-memory 200 * device tree bindings on how to use these: 201 * 202 * Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt 203 */ 204 void of_iommu_get_resv_regions(struct device *dev, struct list_head *list) 205 { 206 #if IS_ENABLED(CONFIG_OF_ADDRESS) 207 struct of_phandle_iterator it; 208 int err; 209 210 of_for_each_phandle(&it, err, dev->of_node, "memory-region", NULL, 0) { 211 const __be32 *maps, *end; 212 struct resource phys; 213 int size; 214 215 memset(&phys, 0, sizeof(phys)); 216 217 /* 218 * The "reg" property is optional and can be omitted by reserved-memory regions 219 * that represent reservations in the IOVA space, which are regions that should 220 * not be mapped. 221 */ 222 if (of_find_property(it.node, "reg", NULL)) { 223 err = of_address_to_resource(it.node, 0, &phys); 224 if (err < 0) { 225 dev_err(dev, "failed to parse memory region %pOF: %d\n", 226 it.node, err); 227 continue; 228 } 229 } 230 231 maps = of_get_property(it.node, "iommu-addresses", &size); 232 if (!maps) 233 continue; 234 235 end = maps + size / sizeof(__be32); 236 237 while (maps < end) { 238 struct device_node *np; 239 u32 phandle; 240 241 phandle = be32_to_cpup(maps++); 242 np = of_find_node_by_phandle(phandle); 243 244 if (np == dev->of_node) { 245 int prot = IOMMU_READ | IOMMU_WRITE; 246 struct iommu_resv_region *region; 247 enum iommu_resv_type type; 248 phys_addr_t iova; 249 size_t length; 250 251 if (of_dma_is_coherent(dev->of_node)) 252 prot |= IOMMU_CACHE; 253 254 maps = of_translate_dma_region(np, maps, &iova, &length); 255 if (length == 0) { 256 dev_warn(dev, "Cannot reserve IOVA region of 0 size\n"); 257 continue; 258 } 259 type = iommu_resv_region_get_type(dev, &phys, iova, length); 260 261 region = iommu_alloc_resv_region(iova, length, prot, type, 262 GFP_KERNEL); 263 if (region) 264 list_add_tail(®ion->list, list); 265 } 266 } 267 } 268 #endif 269 } 270 EXPORT_SYMBOL(of_iommu_get_resv_regions); 271