xref: /linux/drivers/of/device.c (revision e53524cdcc02d089e757b668da031ba06ff665c3)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/of.h>
4 #include <linux/of_device.h>
5 #include <linux/of_address.h>
6 #include <linux/of_iommu.h>
7 #include <linux/of_reserved_mem.h>
8 #include <linux/dma-direct.h> /* for bus_dma_region */
9 #include <linux/dma-map-ops.h>
10 #include <linux/init.h>
11 #include <linux/mod_devicetable.h>
12 #include <linux/slab.h>
13 #include <linux/platform_device.h>
14 
15 #include <asm/errno.h>
16 #include "of_private.h"
17 
18 /**
19  * of_match_device - Tell if a struct device matches an of_device_id list
20  * @matches: array of of device match structures to search in
21  * @dev: the of device structure to match against
22  *
23  * Used by a driver to check whether an platform_device present in the
24  * system is in its list of supported devices.
25  */
26 const struct of_device_id *of_match_device(const struct of_device_id *matches,
27 					   const struct device *dev)
28 {
29 	if (!matches || !dev->of_node || dev->of_node_reused)
30 		return NULL;
31 	return of_match_node(matches, dev->of_node);
32 }
33 EXPORT_SYMBOL(of_match_device);
34 
35 int of_device_add(struct platform_device *ofdev)
36 {
37 	BUG_ON(ofdev->dev.of_node == NULL);
38 
39 	/* name and id have to be set so that the platform bus doesn't get
40 	 * confused on matching */
41 	ofdev->name = dev_name(&ofdev->dev);
42 	ofdev->id = PLATFORM_DEVID_NONE;
43 
44 	/*
45 	 * If this device has not binding numa node in devicetree, that is
46 	 * of_node_to_nid returns NUMA_NO_NODE. device_add will assume that this
47 	 * device is on the same node as the parent.
48 	 */
49 	set_dev_node(&ofdev->dev, of_node_to_nid(ofdev->dev.of_node));
50 
51 	return device_add(&ofdev->dev);
52 }
53 
54 static void
55 of_dma_set_restricted_buffer(struct device *dev, struct device_node *np)
56 {
57 	struct device_node *node, *of_node = dev->of_node;
58 	int count, i;
59 
60 	if (!IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL))
61 		return;
62 
63 	count = of_property_count_elems_of_size(of_node, "memory-region",
64 						sizeof(u32));
65 	/*
66 	 * If dev->of_node doesn't exist or doesn't contain memory-region, try
67 	 * the OF node having DMA configuration.
68 	 */
69 	if (count <= 0) {
70 		of_node = np;
71 		count = of_property_count_elems_of_size(
72 			of_node, "memory-region", sizeof(u32));
73 	}
74 
75 	for (i = 0; i < count; i++) {
76 		node = of_parse_phandle(of_node, "memory-region", i);
77 		/*
78 		 * There might be multiple memory regions, but only one
79 		 * restricted-dma-pool region is allowed.
80 		 */
81 		if (of_device_is_compatible(node, "restricted-dma-pool") &&
82 		    of_device_is_available(node)) {
83 			of_node_put(node);
84 			break;
85 		}
86 		of_node_put(node);
87 	}
88 
89 	/*
90 	 * Attempt to initialize a restricted-dma-pool region if one was found.
91 	 * Note that count can hold a negative error code.
92 	 */
93 	if (i < count && of_reserved_mem_device_init_by_idx(dev, of_node, i))
94 		dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n");
95 }
96 
97 /**
98  * of_dma_configure_id - Setup DMA configuration
99  * @dev:	Device to apply DMA configuration
100  * @np:		Pointer to OF node having DMA configuration
101  * @force_dma:  Whether device is to be set up by of_dma_configure() even if
102  *		DMA capability is not explicitly described by firmware.
103  * @id:		Optional const pointer value input id
104  *
105  * Try to get devices's DMA configuration from DT and update it
106  * accordingly.
107  *
108  * If platform code needs to use its own special DMA configuration, it
109  * can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events
110  * to fix up DMA configuration.
111  */
112 int of_dma_configure_id(struct device *dev, struct device_node *np,
113 			bool force_dma, const u32 *id)
114 {
115 	const struct iommu_ops *iommu;
116 	const struct bus_dma_region *map = NULL;
117 	struct device_node *bus_np;
118 	u64 dma_start = 0;
119 	u64 mask, end, size = 0;
120 	bool coherent;
121 	int ret;
122 
123 	if (np == dev->of_node)
124 		bus_np = __of_get_dma_parent(np);
125 	else
126 		bus_np = of_node_get(np);
127 
128 	ret = of_dma_get_range(bus_np, &map);
129 	of_node_put(bus_np);
130 	if (ret < 0) {
131 		/*
132 		 * For legacy reasons, we have to assume some devices need
133 		 * DMA configuration regardless of whether "dma-ranges" is
134 		 * correctly specified or not.
135 		 */
136 		if (!force_dma)
137 			return ret == -ENODEV ? 0 : ret;
138 	} else {
139 		const struct bus_dma_region *r = map;
140 		u64 dma_end = 0;
141 
142 		/* Determine the overall bounds of all DMA regions */
143 		for (dma_start = ~0; r->size; r++) {
144 			/* Take lower and upper limits */
145 			if (r->dma_start < dma_start)
146 				dma_start = r->dma_start;
147 			if (r->dma_start + r->size > dma_end)
148 				dma_end = r->dma_start + r->size;
149 		}
150 		size = dma_end - dma_start;
151 
152 		/*
153 		 * Add a work around to treat the size as mask + 1 in case
154 		 * it is defined in DT as a mask.
155 		 */
156 		if (size & 1) {
157 			dev_warn(dev, "Invalid size 0x%llx for dma-range(s)\n",
158 				 size);
159 			size = size + 1;
160 		}
161 
162 		if (!size) {
163 			dev_err(dev, "Adjusted size 0x%llx invalid\n", size);
164 			kfree(map);
165 			return -EINVAL;
166 		}
167 	}
168 
169 	/*
170 	 * If @dev is expected to be DMA-capable then the bus code that created
171 	 * it should have initialised its dma_mask pointer by this point. For
172 	 * now, we'll continue the legacy behaviour of coercing it to the
173 	 * coherent mask if not, but we'll no longer do so quietly.
174 	 */
175 	if (!dev->dma_mask) {
176 		dev_warn(dev, "DMA mask not set\n");
177 		dev->dma_mask = &dev->coherent_dma_mask;
178 	}
179 
180 	if (!size && dev->coherent_dma_mask)
181 		size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
182 	else if (!size)
183 		size = 1ULL << 32;
184 
185 	/*
186 	 * Limit coherent and dma mask based on size and default mask
187 	 * set by the driver.
188 	 */
189 	end = dma_start + size - 1;
190 	mask = DMA_BIT_MASK(ilog2(end) + 1);
191 	dev->coherent_dma_mask &= mask;
192 	*dev->dma_mask &= mask;
193 	/* ...but only set bus limit and range map if we found valid dma-ranges earlier */
194 	if (!ret) {
195 		dev->bus_dma_limit = end;
196 		dev->dma_range_map = map;
197 	}
198 
199 	coherent = of_dma_is_coherent(np);
200 	dev_dbg(dev, "device is%sdma coherent\n",
201 		coherent ? " " : " not ");
202 
203 	iommu = of_iommu_configure(dev, np, id);
204 	if (PTR_ERR(iommu) == -EPROBE_DEFER) {
205 		/* Don't touch range map if it wasn't set from a valid dma-ranges */
206 		if (!ret)
207 			dev->dma_range_map = NULL;
208 		kfree(map);
209 		return -EPROBE_DEFER;
210 	}
211 
212 	dev_dbg(dev, "device is%sbehind an iommu\n",
213 		iommu ? " " : " not ");
214 
215 	arch_setup_dma_ops(dev, dma_start, size, iommu, coherent);
216 
217 	if (!iommu)
218 		of_dma_set_restricted_buffer(dev, np);
219 
220 	return 0;
221 }
222 EXPORT_SYMBOL_GPL(of_dma_configure_id);
223 
224 int of_device_register(struct platform_device *pdev)
225 {
226 	device_initialize(&pdev->dev);
227 	return of_device_add(pdev);
228 }
229 EXPORT_SYMBOL(of_device_register);
230 
231 void of_device_unregister(struct platform_device *ofdev)
232 {
233 	device_unregister(&ofdev->dev);
234 }
235 EXPORT_SYMBOL(of_device_unregister);
236 
237 const void *of_device_get_match_data(const struct device *dev)
238 {
239 	const struct of_device_id *match;
240 
241 	match = of_match_device(dev->driver->of_match_table, dev);
242 	if (!match)
243 		return NULL;
244 
245 	return match->data;
246 }
247 EXPORT_SYMBOL(of_device_get_match_data);
248 
249 /**
250  * of_device_modalias - Fill buffer with newline terminated modalias string
251  * @dev:	Calling device
252  * @str:	Modalias string
253  * @len:	Size of @str
254  */
255 ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len)
256 {
257 	ssize_t sl;
258 
259 	if (!dev || !dev->of_node || dev->of_node_reused)
260 		return -ENODEV;
261 
262 	sl = of_modalias(dev->of_node, str, len - 2);
263 	if (sl < 0)
264 		return sl;
265 	if (sl > len - 2)
266 		return -ENOMEM;
267 
268 	str[sl++] = '\n';
269 	str[sl] = 0;
270 	return sl;
271 }
272 EXPORT_SYMBOL_GPL(of_device_modalias);
273 
274 /**
275  * of_device_uevent - Display OF related uevent information
276  * @dev:	Device to display the uevent information for
277  * @env:	Kernel object's userspace event reference to fill up
278  */
279 void of_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
280 {
281 	const char *compat, *type;
282 	struct alias_prop *app;
283 	struct property *p;
284 	int seen = 0;
285 
286 	if ((!dev) || (!dev->of_node))
287 		return;
288 
289 	add_uevent_var(env, "OF_NAME=%pOFn", dev->of_node);
290 	add_uevent_var(env, "OF_FULLNAME=%pOF", dev->of_node);
291 	type = of_node_get_device_type(dev->of_node);
292 	if (type)
293 		add_uevent_var(env, "OF_TYPE=%s", type);
294 
295 	/* Since the compatible field can contain pretty much anything
296 	 * it's not really legal to split it out with commas. We split it
297 	 * up using a number of environment variables instead. */
298 	of_property_for_each_string(dev->of_node, "compatible", p, compat) {
299 		add_uevent_var(env, "OF_COMPATIBLE_%d=%s", seen, compat);
300 		seen++;
301 	}
302 	add_uevent_var(env, "OF_COMPATIBLE_N=%d", seen);
303 
304 	seen = 0;
305 	mutex_lock(&of_mutex);
306 	list_for_each_entry(app, &aliases_lookup, link) {
307 		if (dev->of_node == app->np) {
308 			add_uevent_var(env, "OF_ALIAS_%d=%s", seen,
309 				       app->alias);
310 			seen++;
311 		}
312 	}
313 	mutex_unlock(&of_mutex);
314 }
315 EXPORT_SYMBOL_GPL(of_device_uevent);
316 
317 int of_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env)
318 {
319 	int sl;
320 
321 	if ((!dev) || (!dev->of_node) || dev->of_node_reused)
322 		return -ENODEV;
323 
324 	/* Devicetree modalias is tricky, we add it in 2 steps */
325 	if (add_uevent_var(env, "MODALIAS="))
326 		return -ENOMEM;
327 
328 	sl = of_modalias(dev->of_node, &env->buf[env->buflen-1],
329 			 sizeof(env->buf) - env->buflen);
330 	if (sl < 0)
331 		return sl;
332 	if (sl >= (sizeof(env->buf) - env->buflen))
333 		return -ENOMEM;
334 	env->buflen += sl;
335 
336 	return 0;
337 }
338 EXPORT_SYMBOL_GPL(of_device_uevent_modalias);
339