1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/of.h>
4 #include <linux/of_device.h>
5 #include <linux/of_address.h>
6 #include <linux/of_iommu.h>
7 #include <linux/of_reserved_mem.h>
8 #include <linux/dma-direct.h> /* for bus_dma_region */
9 #include <linux/dma-map-ops.h>
10 #include <linux/init.h>
11 #include <linux/mod_devicetable.h>
12 #include <linux/slab.h>
13 #include <linux/platform_device.h>
14
15 #include <asm/errno.h>
16 #include "of_private.h"
17
18 /**
19 * of_match_device - Tell if a struct device matches an of_device_id list
20 * @matches: array of of device match structures to search in
21 * @dev: the of device structure to match against
22 *
23 * Used by a driver to check whether an platform_device present in the
24 * system is in its list of supported devices.
25 */
of_match_device(const struct of_device_id * matches,const struct device * dev)26 const struct of_device_id *of_match_device(const struct of_device_id *matches,
27 const struct device *dev)
28 {
29 if (!matches || !dev->of_node || dev->of_node_reused)
30 return NULL;
31 return of_match_node(matches, dev->of_node);
32 }
33 EXPORT_SYMBOL(of_match_device);
34
35 static void
of_dma_set_restricted_buffer(struct device * dev,struct device_node * np)36 of_dma_set_restricted_buffer(struct device *dev, struct device_node *np)
37 {
38 struct device_node *of_node = dev->of_node;
39 struct of_phandle_iterator it;
40 int rc, i = 0;
41
42 if (!IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL))
43 return;
44
45 /*
46 * If dev->of_node doesn't exist or doesn't contain memory-region, try
47 * the OF node having DMA configuration.
48 */
49 if (!of_property_present(of_node, "memory-region"))
50 of_node = np;
51
52 of_for_each_phandle(&it, rc, of_node, "memory-region", NULL, 0) {
53 /*
54 * There might be multiple memory regions, but only one
55 * restricted-dma-pool region is allowed.
56 */
57 if (of_device_is_compatible(it.node, "restricted-dma-pool") &&
58 of_device_is_available(it.node)) {
59 if (of_reserved_mem_device_init_by_idx(dev, of_node, i))
60 dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n");
61 of_node_put(it.node);
62 break;
63 }
64 i++;
65 }
66
67 }
68
69 /**
70 * of_dma_configure_id - Setup DMA configuration
71 * @dev: Device to apply DMA configuration
72 * @np: Pointer to OF node having DMA configuration
73 * @force_dma: Whether device is to be set up by of_dma_configure() even if
74 * DMA capability is not explicitly described by firmware.
75 * @id: Optional const pointer value input id
76 *
77 * Try to get devices's DMA configuration from DT and update it
78 * accordingly.
79 *
80 * If platform code needs to use its own special DMA configuration, it
81 * can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events
82 * to fix up DMA configuration.
83 */
of_dma_configure_id(struct device * dev,struct device_node * np,bool force_dma,const u32 * id)84 int of_dma_configure_id(struct device *dev, struct device_node *np,
85 bool force_dma, const u32 *id)
86 {
87 const struct bus_dma_region *map = NULL;
88 struct device_node *bus_np;
89 u64 mask, end = 0;
90 bool coherent, set_map = false;
91 int ret;
92
93 if (dev->dma_range_map) {
94 dev_dbg(dev, "dma_range_map already set\n");
95 goto skip_map;
96 }
97
98 if (np == dev->of_node)
99 bus_np = __of_get_dma_parent(np);
100 else
101 bus_np = of_node_get(np);
102
103 ret = of_dma_get_range(bus_np, &map);
104 of_node_put(bus_np);
105 if (ret < 0) {
106 /*
107 * For legacy reasons, we have to assume some devices need
108 * DMA configuration regardless of whether "dma-ranges" is
109 * correctly specified or not.
110 */
111 if (!force_dma)
112 return ret == -ENODEV ? 0 : ret;
113 } else {
114 /* Determine the overall bounds of all DMA regions */
115 end = dma_range_map_max(map);
116 set_map = true;
117 }
118 skip_map:
119 /*
120 * If @dev is expected to be DMA-capable then the bus code that created
121 * it should have initialised its dma_mask pointer by this point. For
122 * now, we'll continue the legacy behaviour of coercing it to the
123 * coherent mask if not, but we'll no longer do so quietly.
124 */
125 if (!dev->dma_mask) {
126 dev_warn(dev, "DMA mask not set\n");
127 dev->dma_mask = &dev->coherent_dma_mask;
128 }
129
130 if (!end && dev->coherent_dma_mask)
131 end = dev->coherent_dma_mask;
132 else if (!end)
133 end = (1ULL << 32) - 1;
134
135 /*
136 * Limit coherent and dma mask based on size and default mask
137 * set by the driver.
138 */
139 mask = DMA_BIT_MASK(ilog2(end) + 1);
140 dev->coherent_dma_mask &= mask;
141 *dev->dma_mask &= mask;
142 /* ...but only set bus limit and range map if we found valid dma-ranges earlier */
143 if (set_map) {
144 dev->bus_dma_limit = end;
145 dev->dma_range_map = map;
146 }
147
148 coherent = of_dma_is_coherent(np);
149 dev_dbg(dev, "device is%sdma coherent\n",
150 coherent ? " " : " not ");
151
152 ret = of_iommu_configure(dev, np, id);
153 if (ret == -EPROBE_DEFER) {
154 /* Don't touch range map if it wasn't set from a valid dma-ranges */
155 if (set_map)
156 dev->dma_range_map = NULL;
157 kfree(map);
158 return -EPROBE_DEFER;
159 }
160 /* Take all other IOMMU errors to mean we'll just carry on without it */
161 dev_dbg(dev, "device is%sbehind an iommu\n",
162 !ret ? " " : " not ");
163
164 arch_setup_dma_ops(dev, coherent);
165
166 if (ret)
167 of_dma_set_restricted_buffer(dev, np);
168
169 return 0;
170 }
171 EXPORT_SYMBOL_GPL(of_dma_configure_id);
172
of_device_get_match_data(const struct device * dev)173 const void *of_device_get_match_data(const struct device *dev)
174 {
175 const struct of_device_id *match;
176
177 match = of_match_device(dev->driver->of_match_table, dev);
178 if (!match)
179 return NULL;
180
181 return match->data;
182 }
183 EXPORT_SYMBOL(of_device_get_match_data);
184
185 /**
186 * of_device_modalias - Fill buffer with newline terminated modalias string
187 * @dev: Calling device
188 * @str: Modalias string
189 * @len: Size of @str
190 */
of_device_modalias(struct device * dev,char * str,ssize_t len)191 ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len)
192 {
193 ssize_t sl;
194
195 if (!dev || !dev->of_node || dev->of_node_reused)
196 return -ENODEV;
197
198 sl = of_modalias(dev->of_node, str, len - 2);
199 if (sl < 0)
200 return sl;
201 if (sl > len - 2)
202 return -ENOMEM;
203
204 str[sl++] = '\n';
205 str[sl] = 0;
206 return sl;
207 }
208 EXPORT_SYMBOL_GPL(of_device_modalias);
209
210 /**
211 * of_device_uevent - Display OF related uevent information
212 * @dev: Device to display the uevent information for
213 * @env: Kernel object's userspace event reference to fill up
214 */
of_device_uevent(const struct device * dev,struct kobj_uevent_env * env)215 void of_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
216 {
217 const char *compat, *type;
218 struct alias_prop *app;
219 struct property *p;
220 int seen = 0;
221
222 if ((!dev) || (!dev->of_node))
223 return;
224
225 add_uevent_var(env, "OF_NAME=%pOFn", dev->of_node);
226 add_uevent_var(env, "OF_FULLNAME=%pOF", dev->of_node);
227 type = of_node_get_device_type(dev->of_node);
228 if (type)
229 add_uevent_var(env, "OF_TYPE=%s", type);
230
231 /* Since the compatible field can contain pretty much anything
232 * it's not really legal to split it out with commas. We split it
233 * up using a number of environment variables instead. */
234 of_property_for_each_string(dev->of_node, "compatible", p, compat) {
235 add_uevent_var(env, "OF_COMPATIBLE_%d=%s", seen, compat);
236 seen++;
237 }
238 add_uevent_var(env, "OF_COMPATIBLE_N=%d", seen);
239
240 seen = 0;
241 mutex_lock(&of_mutex);
242 list_for_each_entry(app, &aliases_lookup, link) {
243 if (dev->of_node == app->np) {
244 add_uevent_var(env, "OF_ALIAS_%d=%s", seen,
245 app->alias);
246 seen++;
247 }
248 }
249 mutex_unlock(&of_mutex);
250 }
251 EXPORT_SYMBOL_GPL(of_device_uevent);
252
of_device_uevent_modalias(const struct device * dev,struct kobj_uevent_env * env)253 int of_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env)
254 {
255 int sl;
256
257 if ((!dev) || (!dev->of_node) || dev->of_node_reused)
258 return -ENODEV;
259
260 /* Devicetree modalias is tricky, we add it in 2 steps */
261 if (add_uevent_var(env, "MODALIAS="))
262 return -ENOMEM;
263
264 sl = of_modalias(dev->of_node, &env->buf[env->buflen-1],
265 sizeof(env->buf) - env->buflen);
266 if (sl < 0)
267 return sl;
268 if (sl >= (sizeof(env->buf) - env->buflen))
269 return -ENOMEM;
270 env->buflen += sl;
271
272 return 0;
273 }
274 EXPORT_SYMBOL_GPL(of_device_uevent_modalias);
275
276 /**
277 * of_device_make_bus_id - Use the device node data to assign a unique name
278 * @dev: pointer to device structure that is linked to a device tree node
279 *
280 * This routine will first try using the translated bus address to
281 * derive a unique name. If it cannot, then it will prepend names from
282 * parent nodes until a unique name can be derived.
283 */
of_device_make_bus_id(struct device * dev)284 void of_device_make_bus_id(struct device *dev)
285 {
286 struct device_node *node = dev->of_node;
287 const __be32 *reg;
288 u64 addr;
289 u32 mask;
290
291 /* Construct the name, using parent nodes if necessary to ensure uniqueness */
292 while (node->parent) {
293 /*
294 * If the address can be translated, then that is as much
295 * uniqueness as we need. Make it the first component and return
296 */
297 reg = of_get_property(node, "reg", NULL);
298 if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) {
299 if (!of_property_read_u32(node, "mask", &mask))
300 dev_set_name(dev, dev_name(dev) ? "%llx.%x.%pOFn:%s" : "%llx.%x.%pOFn",
301 addr, ffs(mask) - 1, node, dev_name(dev));
302
303 else
304 dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn",
305 addr, node, dev_name(dev));
306 return;
307 }
308
309 /* format arguments only used if dev_name() resolves to NULL */
310 dev_set_name(dev, dev_name(dev) ? "%s:%s" : "%s",
311 kbasename(node->full_name), dev_name(dev));
312 node = node->parent;
313 }
314 }
315 EXPORT_SYMBOL_GPL(of_device_make_bus_id);
316