1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/of.h> 4 #include <linux/of_device.h> 5 #include <linux/of_address.h> 6 #include <linux/of_iommu.h> 7 #include <linux/of_reserved_mem.h> 8 #include <linux/dma-direct.h> /* for bus_dma_region */ 9 #include <linux/dma-map-ops.h> 10 #include <linux/init.h> 11 #include <linux/mod_devicetable.h> 12 #include <linux/slab.h> 13 #include <linux/platform_device.h> 14 15 #include <asm/errno.h> 16 #include "of_private.h" 17 18 /** 19 * of_match_device - Tell if a struct device matches an of_device_id list 20 * @matches: array of of device match structures to search in 21 * @dev: the of device structure to match against 22 * 23 * Used by a driver to check whether an platform_device present in the 24 * system is in its list of supported devices. 25 */ 26 const struct of_device_id *of_match_device(const struct of_device_id *matches, 27 const struct device *dev) 28 { 29 if (!matches || !dev->of_node || dev->of_node_reused) 30 return NULL; 31 return of_match_node(matches, dev->of_node); 32 } 33 EXPORT_SYMBOL(of_match_device); 34 35 static void 36 of_dma_set_restricted_buffer(struct device *dev, struct device_node *np) 37 { 38 struct device_node *node, *of_node = dev->of_node; 39 int count, i; 40 41 if (!IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL)) 42 return; 43 44 count = of_property_count_elems_of_size(of_node, "memory-region", 45 sizeof(u32)); 46 /* 47 * If dev->of_node doesn't exist or doesn't contain memory-region, try 48 * the OF node having DMA configuration. 49 */ 50 if (count <= 0) { 51 of_node = np; 52 count = of_property_count_elems_of_size( 53 of_node, "memory-region", sizeof(u32)); 54 } 55 56 for (i = 0; i < count; i++) { 57 node = of_parse_phandle(of_node, "memory-region", i); 58 /* 59 * There might be multiple memory regions, but only one 60 * restricted-dma-pool region is allowed. 61 */ 62 if (of_device_is_compatible(node, "restricted-dma-pool") && 63 of_device_is_available(node)) { 64 of_node_put(node); 65 break; 66 } 67 of_node_put(node); 68 } 69 70 /* 71 * Attempt to initialize a restricted-dma-pool region if one was found. 72 * Note that count can hold a negative error code. 73 */ 74 if (i < count && of_reserved_mem_device_init_by_idx(dev, of_node, i)) 75 dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n"); 76 } 77 78 /** 79 * of_dma_configure_id - Setup DMA configuration 80 * @dev: Device to apply DMA configuration 81 * @np: Pointer to OF node having DMA configuration 82 * @force_dma: Whether device is to be set up by of_dma_configure() even if 83 * DMA capability is not explicitly described by firmware. 84 * @id: Optional const pointer value input id 85 * 86 * Try to get devices's DMA configuration from DT and update it 87 * accordingly. 88 * 89 * If platform code needs to use its own special DMA configuration, it 90 * can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events 91 * to fix up DMA configuration. 92 */ 93 int of_dma_configure_id(struct device *dev, struct device_node *np, 94 bool force_dma, const u32 *id) 95 { 96 const struct bus_dma_region *map = NULL; 97 struct device_node *bus_np; 98 u64 mask, end = 0; 99 bool coherent, set_map = false; 100 int ret; 101 102 if (dev->dma_range_map) { 103 dev_dbg(dev, "dma_range_map already set\n"); 104 goto skip_map; 105 } 106 107 if (np == dev->of_node) 108 bus_np = __of_get_dma_parent(np); 109 else 110 bus_np = of_node_get(np); 111 112 ret = of_dma_get_range(bus_np, &map); 113 of_node_put(bus_np); 114 if (ret < 0) { 115 /* 116 * For legacy reasons, we have to assume some devices need 117 * DMA configuration regardless of whether "dma-ranges" is 118 * correctly specified or not. 119 */ 120 if (!force_dma) 121 return ret == -ENODEV ? 0 : ret; 122 } else { 123 /* Determine the overall bounds of all DMA regions */ 124 end = dma_range_map_max(map); 125 set_map = true; 126 } 127 skip_map: 128 /* 129 * If @dev is expected to be DMA-capable then the bus code that created 130 * it should have initialised its dma_mask pointer by this point. For 131 * now, we'll continue the legacy behaviour of coercing it to the 132 * coherent mask if not, but we'll no longer do so quietly. 133 */ 134 if (!dev->dma_mask) { 135 dev_warn(dev, "DMA mask not set\n"); 136 dev->dma_mask = &dev->coherent_dma_mask; 137 } 138 139 if (!end && dev->coherent_dma_mask) 140 end = dev->coherent_dma_mask; 141 else if (!end) 142 end = (1ULL << 32) - 1; 143 144 /* 145 * Limit coherent and dma mask based on size and default mask 146 * set by the driver. 147 */ 148 mask = DMA_BIT_MASK(ilog2(end) + 1); 149 dev->coherent_dma_mask &= mask; 150 *dev->dma_mask &= mask; 151 /* ...but only set bus limit and range map if we found valid dma-ranges earlier */ 152 if (set_map) { 153 dev->bus_dma_limit = end; 154 dev->dma_range_map = map; 155 } 156 157 coherent = of_dma_is_coherent(np); 158 dev_dbg(dev, "device is%sdma coherent\n", 159 coherent ? " " : " not "); 160 161 ret = of_iommu_configure(dev, np, id); 162 if (ret == -EPROBE_DEFER) { 163 /* Don't touch range map if it wasn't set from a valid dma-ranges */ 164 if (set_map) 165 dev->dma_range_map = NULL; 166 kfree(map); 167 return -EPROBE_DEFER; 168 } 169 /* Take all other IOMMU errors to mean we'll just carry on without it */ 170 dev_dbg(dev, "device is%sbehind an iommu\n", 171 !ret ? " " : " not "); 172 173 arch_setup_dma_ops(dev, coherent); 174 175 if (ret) 176 of_dma_set_restricted_buffer(dev, np); 177 178 return 0; 179 } 180 EXPORT_SYMBOL_GPL(of_dma_configure_id); 181 182 const void *of_device_get_match_data(const struct device *dev) 183 { 184 const struct of_device_id *match; 185 186 match = of_match_device(dev->driver->of_match_table, dev); 187 if (!match) 188 return NULL; 189 190 return match->data; 191 } 192 EXPORT_SYMBOL(of_device_get_match_data); 193 194 /** 195 * of_device_modalias - Fill buffer with newline terminated modalias string 196 * @dev: Calling device 197 * @str: Modalias string 198 * @len: Size of @str 199 */ 200 ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len) 201 { 202 ssize_t sl; 203 204 if (!dev || !dev->of_node || dev->of_node_reused) 205 return -ENODEV; 206 207 sl = of_modalias(dev->of_node, str, len - 2); 208 if (sl < 0) 209 return sl; 210 if (sl > len - 2) 211 return -ENOMEM; 212 213 str[sl++] = '\n'; 214 str[sl] = 0; 215 return sl; 216 } 217 EXPORT_SYMBOL_GPL(of_device_modalias); 218 219 /** 220 * of_device_uevent - Display OF related uevent information 221 * @dev: Device to display the uevent information for 222 * @env: Kernel object's userspace event reference to fill up 223 */ 224 void of_device_uevent(const struct device *dev, struct kobj_uevent_env *env) 225 { 226 const char *compat, *type; 227 struct alias_prop *app; 228 struct property *p; 229 int seen = 0; 230 231 if ((!dev) || (!dev->of_node)) 232 return; 233 234 add_uevent_var(env, "OF_NAME=%pOFn", dev->of_node); 235 add_uevent_var(env, "OF_FULLNAME=%pOF", dev->of_node); 236 type = of_node_get_device_type(dev->of_node); 237 if (type) 238 add_uevent_var(env, "OF_TYPE=%s", type); 239 240 /* Since the compatible field can contain pretty much anything 241 * it's not really legal to split it out with commas. We split it 242 * up using a number of environment variables instead. */ 243 of_property_for_each_string(dev->of_node, "compatible", p, compat) { 244 add_uevent_var(env, "OF_COMPATIBLE_%d=%s", seen, compat); 245 seen++; 246 } 247 add_uevent_var(env, "OF_COMPATIBLE_N=%d", seen); 248 249 seen = 0; 250 mutex_lock(&of_mutex); 251 list_for_each_entry(app, &aliases_lookup, link) { 252 if (dev->of_node == app->np) { 253 add_uevent_var(env, "OF_ALIAS_%d=%s", seen, 254 app->alias); 255 seen++; 256 } 257 } 258 mutex_unlock(&of_mutex); 259 } 260 EXPORT_SYMBOL_GPL(of_device_uevent); 261 262 int of_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env) 263 { 264 int sl; 265 266 if ((!dev) || (!dev->of_node) || dev->of_node_reused) 267 return -ENODEV; 268 269 /* Devicetree modalias is tricky, we add it in 2 steps */ 270 if (add_uevent_var(env, "MODALIAS=")) 271 return -ENOMEM; 272 273 sl = of_modalias(dev->of_node, &env->buf[env->buflen-1], 274 sizeof(env->buf) - env->buflen); 275 if (sl < 0) 276 return sl; 277 if (sl >= (sizeof(env->buf) - env->buflen)) 278 return -ENOMEM; 279 env->buflen += sl; 280 281 return 0; 282 } 283 EXPORT_SYMBOL_GPL(of_device_uevent_modalias); 284 285 /** 286 * of_device_make_bus_id - Use the device node data to assign a unique name 287 * @dev: pointer to device structure that is linked to a device tree node 288 * 289 * This routine will first try using the translated bus address to 290 * derive a unique name. If it cannot, then it will prepend names from 291 * parent nodes until a unique name can be derived. 292 */ 293 void of_device_make_bus_id(struct device *dev) 294 { 295 struct device_node *node = dev->of_node; 296 const __be32 *reg; 297 u64 addr; 298 u32 mask; 299 300 /* Construct the name, using parent nodes if necessary to ensure uniqueness */ 301 while (node->parent) { 302 /* 303 * If the address can be translated, then that is as much 304 * uniqueness as we need. Make it the first component and return 305 */ 306 reg = of_get_property(node, "reg", NULL); 307 if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) { 308 if (!of_property_read_u32(node, "mask", &mask)) 309 dev_set_name(dev, dev_name(dev) ? "%llx.%x.%pOFn:%s" : "%llx.%x.%pOFn", 310 addr, ffs(mask) - 1, node, dev_name(dev)); 311 312 else 313 dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn", 314 addr, node, dev_name(dev)); 315 return; 316 } 317 318 /* format arguments only used if dev_name() resolves to NULL */ 319 dev_set_name(dev, dev_name(dev) ? "%s:%s" : "%s", 320 kbasename(node->full_name), dev_name(dev)); 321 node = node->parent; 322 } 323 } 324 EXPORT_SYMBOL_GPL(of_device_make_bus_id); 325