xref: /linux/drivers/of/device.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/of.h>
4 #include <linux/of_device.h>
5 #include <linux/of_address.h>
6 #include <linux/of_iommu.h>
7 #include <linux/of_reserved_mem.h>
8 #include <linux/dma-direct.h> /* for bus_dma_region */
9 #include <linux/dma-map-ops.h>
10 #include <linux/init.h>
11 #include <linux/mod_devicetable.h>
12 #include <linux/slab.h>
13 #include <linux/platform_device.h>
14 
15 #include <asm/errno.h>
16 #include "of_private.h"
17 
18 /**
19  * of_match_device - Tell if a struct device matches an of_device_id list
20  * @matches: array of of device match structures to search in
21  * @dev: the of device structure to match against
22  *
23  * Used by a driver to check whether an platform_device present in the
24  * system is in its list of supported devices.
25  */
of_match_device(const struct of_device_id * matches,const struct device * dev)26 const struct of_device_id *of_match_device(const struct of_device_id *matches,
27 					   const struct device *dev)
28 {
29 	if (!matches || !dev->of_node || dev->of_node_reused)
30 		return NULL;
31 	return of_match_node(matches, dev->of_node);
32 }
33 EXPORT_SYMBOL(of_match_device);
34 
35 static void
of_dma_set_restricted_buffer(struct device * dev,struct device_node * np)36 of_dma_set_restricted_buffer(struct device *dev, struct device_node *np)
37 {
38 	struct device_node *node, *of_node = dev->of_node;
39 	int count, i;
40 
41 	if (!IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL))
42 		return;
43 
44 	count = of_property_count_elems_of_size(of_node, "memory-region",
45 						sizeof(u32));
46 	/*
47 	 * If dev->of_node doesn't exist or doesn't contain memory-region, try
48 	 * the OF node having DMA configuration.
49 	 */
50 	if (count <= 0) {
51 		of_node = np;
52 		count = of_property_count_elems_of_size(
53 			of_node, "memory-region", sizeof(u32));
54 	}
55 
56 	for (i = 0; i < count; i++) {
57 		node = of_parse_phandle(of_node, "memory-region", i);
58 		/*
59 		 * There might be multiple memory regions, but only one
60 		 * restricted-dma-pool region is allowed.
61 		 */
62 		if (of_device_is_compatible(node, "restricted-dma-pool") &&
63 		    of_device_is_available(node)) {
64 			of_node_put(node);
65 			break;
66 		}
67 		of_node_put(node);
68 	}
69 
70 	/*
71 	 * Attempt to initialize a restricted-dma-pool region if one was found.
72 	 * Note that count can hold a negative error code.
73 	 */
74 	if (i < count && of_reserved_mem_device_init_by_idx(dev, of_node, i))
75 		dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n");
76 }
77 
78 /**
79  * of_dma_configure_id - Setup DMA configuration
80  * @dev:	Device to apply DMA configuration
81  * @np:		Pointer to OF node having DMA configuration
82  * @force_dma:  Whether device is to be set up by of_dma_configure() even if
83  *		DMA capability is not explicitly described by firmware.
84  * @id:		Optional const pointer value input id
85  *
86  * Try to get devices's DMA configuration from DT and update it
87  * accordingly.
88  *
89  * If platform code needs to use its own special DMA configuration, it
90  * can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events
91  * to fix up DMA configuration.
92  */
of_dma_configure_id(struct device * dev,struct device_node * np,bool force_dma,const u32 * id)93 int of_dma_configure_id(struct device *dev, struct device_node *np,
94 			bool force_dma, const u32 *id)
95 {
96 	const struct bus_dma_region *map = NULL;
97 	struct device_node *bus_np;
98 	u64 mask, end = 0;
99 	bool coherent, set_map = false;
100 	int ret;
101 
102 	if (np == dev->of_node)
103 		bus_np = __of_get_dma_parent(np);
104 	else
105 		bus_np = of_node_get(np);
106 
107 	ret = of_dma_get_range(bus_np, &map);
108 	of_node_put(bus_np);
109 	if (ret < 0) {
110 		/*
111 		 * For legacy reasons, we have to assume some devices need
112 		 * DMA configuration regardless of whether "dma-ranges" is
113 		 * correctly specified or not.
114 		 */
115 		if (!force_dma)
116 			return ret == -ENODEV ? 0 : ret;
117 	} else {
118 		/* Determine the overall bounds of all DMA regions */
119 		end = dma_range_map_max(map);
120 		set_map = true;
121 	}
122 
123 	/*
124 	 * If @dev is expected to be DMA-capable then the bus code that created
125 	 * it should have initialised its dma_mask pointer by this point. For
126 	 * now, we'll continue the legacy behaviour of coercing it to the
127 	 * coherent mask if not, but we'll no longer do so quietly.
128 	 */
129 	if (!dev->dma_mask) {
130 		dev_warn(dev, "DMA mask not set\n");
131 		dev->dma_mask = &dev->coherent_dma_mask;
132 	}
133 
134 	if (!end && dev->coherent_dma_mask)
135 		end = dev->coherent_dma_mask;
136 	else if (!end)
137 		end = (1ULL << 32) - 1;
138 
139 	/*
140 	 * Limit coherent and dma mask based on size and default mask
141 	 * set by the driver.
142 	 */
143 	mask = DMA_BIT_MASK(ilog2(end) + 1);
144 	dev->coherent_dma_mask &= mask;
145 	*dev->dma_mask &= mask;
146 	/* ...but only set bus limit and range map if we found valid dma-ranges earlier */
147 	if (set_map) {
148 		dev->bus_dma_limit = end;
149 		dev->dma_range_map = map;
150 	}
151 
152 	coherent = of_dma_is_coherent(np);
153 	dev_dbg(dev, "device is%sdma coherent\n",
154 		coherent ? " " : " not ");
155 
156 	ret = of_iommu_configure(dev, np, id);
157 	if (ret == -EPROBE_DEFER) {
158 		/* Don't touch range map if it wasn't set from a valid dma-ranges */
159 		if (set_map)
160 			dev->dma_range_map = NULL;
161 		kfree(map);
162 		return -EPROBE_DEFER;
163 	}
164 	/* Take all other IOMMU errors to mean we'll just carry on without it */
165 	dev_dbg(dev, "device is%sbehind an iommu\n",
166 		!ret ? " " : " not ");
167 
168 	arch_setup_dma_ops(dev, coherent);
169 
170 	if (ret)
171 		of_dma_set_restricted_buffer(dev, np);
172 
173 	return 0;
174 }
175 EXPORT_SYMBOL_GPL(of_dma_configure_id);
176 
of_device_get_match_data(const struct device * dev)177 const void *of_device_get_match_data(const struct device *dev)
178 {
179 	const struct of_device_id *match;
180 
181 	match = of_match_device(dev->driver->of_match_table, dev);
182 	if (!match)
183 		return NULL;
184 
185 	return match->data;
186 }
187 EXPORT_SYMBOL(of_device_get_match_data);
188 
189 /**
190  * of_device_modalias - Fill buffer with newline terminated modalias string
191  * @dev:	Calling device
192  * @str:	Modalias string
193  * @len:	Size of @str
194  */
of_device_modalias(struct device * dev,char * str,ssize_t len)195 ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len)
196 {
197 	ssize_t sl;
198 
199 	if (!dev || !dev->of_node || dev->of_node_reused)
200 		return -ENODEV;
201 
202 	sl = of_modalias(dev->of_node, str, len - 2);
203 	if (sl < 0)
204 		return sl;
205 	if (sl > len - 2)
206 		return -ENOMEM;
207 
208 	str[sl++] = '\n';
209 	str[sl] = 0;
210 	return sl;
211 }
212 EXPORT_SYMBOL_GPL(of_device_modalias);
213 
214 /**
215  * of_device_uevent - Display OF related uevent information
216  * @dev:	Device to display the uevent information for
217  * @env:	Kernel object's userspace event reference to fill up
218  */
of_device_uevent(const struct device * dev,struct kobj_uevent_env * env)219 void of_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
220 {
221 	const char *compat, *type;
222 	struct alias_prop *app;
223 	struct property *p;
224 	int seen = 0;
225 
226 	if ((!dev) || (!dev->of_node))
227 		return;
228 
229 	add_uevent_var(env, "OF_NAME=%pOFn", dev->of_node);
230 	add_uevent_var(env, "OF_FULLNAME=%pOF", dev->of_node);
231 	type = of_node_get_device_type(dev->of_node);
232 	if (type)
233 		add_uevent_var(env, "OF_TYPE=%s", type);
234 
235 	/* Since the compatible field can contain pretty much anything
236 	 * it's not really legal to split it out with commas. We split it
237 	 * up using a number of environment variables instead. */
238 	of_property_for_each_string(dev->of_node, "compatible", p, compat) {
239 		add_uevent_var(env, "OF_COMPATIBLE_%d=%s", seen, compat);
240 		seen++;
241 	}
242 	add_uevent_var(env, "OF_COMPATIBLE_N=%d", seen);
243 
244 	seen = 0;
245 	mutex_lock(&of_mutex);
246 	list_for_each_entry(app, &aliases_lookup, link) {
247 		if (dev->of_node == app->np) {
248 			add_uevent_var(env, "OF_ALIAS_%d=%s", seen,
249 				       app->alias);
250 			seen++;
251 		}
252 	}
253 	mutex_unlock(&of_mutex);
254 }
255 EXPORT_SYMBOL_GPL(of_device_uevent);
256 
of_device_uevent_modalias(const struct device * dev,struct kobj_uevent_env * env)257 int of_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env)
258 {
259 	int sl;
260 
261 	if ((!dev) || (!dev->of_node) || dev->of_node_reused)
262 		return -ENODEV;
263 
264 	/* Devicetree modalias is tricky, we add it in 2 steps */
265 	if (add_uevent_var(env, "MODALIAS="))
266 		return -ENOMEM;
267 
268 	sl = of_modalias(dev->of_node, &env->buf[env->buflen-1],
269 			 sizeof(env->buf) - env->buflen);
270 	if (sl < 0)
271 		return sl;
272 	if (sl >= (sizeof(env->buf) - env->buflen))
273 		return -ENOMEM;
274 	env->buflen += sl;
275 
276 	return 0;
277 }
278 EXPORT_SYMBOL_GPL(of_device_uevent_modalias);
279 
280 /**
281  * of_device_make_bus_id - Use the device node data to assign a unique name
282  * @dev: pointer to device structure that is linked to a device tree node
283  *
284  * This routine will first try using the translated bus address to
285  * derive a unique name. If it cannot, then it will prepend names from
286  * parent nodes until a unique name can be derived.
287  */
of_device_make_bus_id(struct device * dev)288 void of_device_make_bus_id(struct device *dev)
289 {
290 	struct device_node *node = dev->of_node;
291 	const __be32 *reg;
292 	u64 addr;
293 	u32 mask;
294 
295 	/* Construct the name, using parent nodes if necessary to ensure uniqueness */
296 	while (node->parent) {
297 		/*
298 		 * If the address can be translated, then that is as much
299 		 * uniqueness as we need. Make it the first component and return
300 		 */
301 		reg = of_get_property(node, "reg", NULL);
302 		if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) {
303 			if (!of_property_read_u32(node, "mask", &mask))
304 				dev_set_name(dev, dev_name(dev) ? "%llx.%x.%pOFn:%s" : "%llx.%x.%pOFn",
305 					     addr, ffs(mask) - 1, node, dev_name(dev));
306 
307 			else
308 				dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn",
309 					     addr, node, dev_name(dev));
310 			return;
311 		}
312 
313 		/* format arguments only used if dev_name() resolves to NULL */
314 		dev_set_name(dev, dev_name(dev) ? "%s:%s" : "%s",
315 			     kbasename(node->full_name), dev_name(dev));
316 		node = node->parent;
317 	}
318 }
319 EXPORT_SYMBOL_GPL(of_device_make_bus_id);
320