xref: /linux/drivers/interconnect/core.c (revision ecf11d31bf5ccde62c91abe94d4edb867b64958f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Interconnect framework core driver
4  *
5  * Copyright (c) 2017-2019, Linaro Ltd.
6  * Author: Georgi Djakov <georgi.djakov@linaro.org>
7  */
8 
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/idr.h>
12 #include <linux/init.h>
13 #include <linux/interconnect.h>
14 #include <linux/interconnect-provider.h>
15 #include <linux/list.h>
16 #include <linux/mutex.h>
17 #include <linux/slab.h>
18 #include <linux/of.h>
19 #include <linux/overflow.h>
20 
21 #include "internal.h"
22 
23 #define ICC_DYN_ID_START 100000
24 
25 #define CREATE_TRACE_POINTS
26 #include "trace.h"
27 
28 static DEFINE_IDR(icc_idr);
29 static LIST_HEAD(icc_providers);
30 static int providers_count;
31 static bool synced_state;
32 static DEFINE_MUTEX(icc_lock);
33 static DEFINE_MUTEX(icc_bw_lock);
34 static struct dentry *icc_debugfs_dir;
35 
icc_summary_show_one(struct seq_file * s,struct icc_node * n)36 static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
37 {
38 	if (!n)
39 		return;
40 
41 	seq_printf(s, "%-42s %12u %12u\n",
42 		   n->name, n->avg_bw, n->peak_bw);
43 }
44 
icc_summary_show(struct seq_file * s,void * data)45 static int icc_summary_show(struct seq_file *s, void *data)
46 {
47 	struct icc_provider *provider;
48 
49 	seq_puts(s, " node                                  tag          avg         peak\n");
50 	seq_puts(s, "--------------------------------------------------------------------\n");
51 
52 	mutex_lock(&icc_lock);
53 
54 	list_for_each_entry(provider, &icc_providers, provider_list) {
55 		struct icc_node *n;
56 
57 		list_for_each_entry(n, &provider->nodes, node_list) {
58 			struct icc_req *r;
59 
60 			icc_summary_show_one(s, n);
61 			hlist_for_each_entry(r, &n->req_list, req_node) {
62 				u32 avg_bw = 0, peak_bw = 0;
63 
64 				if (!r->dev)
65 					continue;
66 
67 				if (r->enabled) {
68 					avg_bw = r->avg_bw;
69 					peak_bw = r->peak_bw;
70 				}
71 
72 				seq_printf(s, "  %-27s %12u %12u %12u\n",
73 					   dev_name(r->dev), r->tag, avg_bw, peak_bw);
74 			}
75 		}
76 	}
77 
78 	mutex_unlock(&icc_lock);
79 
80 	return 0;
81 }
82 DEFINE_SHOW_ATTRIBUTE(icc_summary);
83 
icc_graph_show_link(struct seq_file * s,int level,struct icc_node * n,struct icc_node * m)84 static void icc_graph_show_link(struct seq_file *s, int level,
85 				struct icc_node *n, struct icc_node *m)
86 {
87 	seq_printf(s, "%s\"%d:%s\" -> \"%d:%s\"\n",
88 		   level == 2 ? "\t\t" : "\t",
89 		   n->id, n->name, m->id, m->name);
90 }
91 
icc_graph_show_node(struct seq_file * s,struct icc_node * n)92 static void icc_graph_show_node(struct seq_file *s, struct icc_node *n)
93 {
94 	seq_printf(s, "\t\t\"%d:%s\" [label=\"%d:%s",
95 		   n->id, n->name, n->id, n->name);
96 	seq_printf(s, "\n\t\t\t|avg_bw=%ukBps", n->avg_bw);
97 	seq_printf(s, "\n\t\t\t|peak_bw=%ukBps", n->peak_bw);
98 	seq_puts(s, "\"]\n");
99 }
100 
icc_graph_show(struct seq_file * s,void * data)101 static int icc_graph_show(struct seq_file *s, void *data)
102 {
103 	struct icc_provider *provider;
104 	struct icc_node *n;
105 	int cluster_index = 0;
106 	int i;
107 
108 	seq_puts(s, "digraph {\n\trankdir = LR\n\tnode [shape = record]\n");
109 	mutex_lock(&icc_lock);
110 
111 	/* draw providers as cluster subgraphs */
112 	cluster_index = 0;
113 	list_for_each_entry(provider, &icc_providers, provider_list) {
114 		seq_printf(s, "\tsubgraph cluster_%d {\n", ++cluster_index);
115 		if (provider->dev)
116 			seq_printf(s, "\t\tlabel = \"%s\"\n",
117 				   dev_name(provider->dev));
118 
119 		/* draw nodes */
120 		list_for_each_entry(n, &provider->nodes, node_list)
121 			icc_graph_show_node(s, n);
122 
123 		/* draw internal links */
124 		list_for_each_entry(n, &provider->nodes, node_list)
125 			for (i = 0; i < n->num_links; ++i)
126 				if (n->provider == n->links[i]->provider)
127 					icc_graph_show_link(s, 2, n,
128 							    n->links[i]);
129 
130 		seq_puts(s, "\t}\n");
131 	}
132 
133 	/* draw external links */
134 	list_for_each_entry(provider, &icc_providers, provider_list)
135 		list_for_each_entry(n, &provider->nodes, node_list)
136 			for (i = 0; i < n->num_links; ++i)
137 				if (n->provider != n->links[i]->provider)
138 					icc_graph_show_link(s, 1, n,
139 							    n->links[i]);
140 
141 	mutex_unlock(&icc_lock);
142 	seq_puts(s, "}");
143 
144 	return 0;
145 }
146 DEFINE_SHOW_ATTRIBUTE(icc_graph);
147 
node_find(const int id)148 static struct icc_node *node_find(const int id)
149 {
150 	return idr_find(&icc_idr, id);
151 }
152 
node_find_by_name(const char * name)153 static struct icc_node *node_find_by_name(const char *name)
154 {
155 	struct icc_provider *provider;
156 	struct icc_node *n;
157 
158 	list_for_each_entry(provider, &icc_providers, provider_list) {
159 		list_for_each_entry(n, &provider->nodes, node_list) {
160 			if (!strcmp(n->name, name))
161 				return n;
162 		}
163 	}
164 
165 	return NULL;
166 }
167 
path_init(struct device * dev,struct icc_node * dst,ssize_t num_nodes)168 static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
169 				  ssize_t num_nodes)
170 {
171 	struct icc_node *node = dst;
172 	struct icc_path *path;
173 	int i;
174 
175 	path = kzalloc(struct_size(path, reqs, num_nodes), GFP_KERNEL);
176 	if (!path)
177 		return ERR_PTR(-ENOMEM);
178 
179 	path->num_nodes = num_nodes;
180 
181 	mutex_lock(&icc_bw_lock);
182 
183 	for (i = num_nodes - 1; i >= 0; i--) {
184 		node->provider->users++;
185 		hlist_add_head(&path->reqs[i].req_node, &node->req_list);
186 		path->reqs[i].node = node;
187 		path->reqs[i].dev = dev;
188 		path->reqs[i].enabled = true;
189 		/* reference to previous node was saved during path traversal */
190 		node = node->reverse;
191 	}
192 
193 	mutex_unlock(&icc_bw_lock);
194 
195 	return path;
196 }
197 
path_find(struct device * dev,struct icc_node * src,struct icc_node * dst)198 static struct icc_path *path_find(struct device *dev, struct icc_node *src,
199 				  struct icc_node *dst)
200 {
201 	struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
202 	struct icc_node *n, *node = NULL;
203 	struct list_head traverse_list;
204 	struct list_head edge_list;
205 	struct list_head visited_list;
206 	size_t i, depth = 1;
207 	bool found = false;
208 
209 	INIT_LIST_HEAD(&traverse_list);
210 	INIT_LIST_HEAD(&edge_list);
211 	INIT_LIST_HEAD(&visited_list);
212 
213 	list_add(&src->search_list, &traverse_list);
214 	src->reverse = NULL;
215 
216 	do {
217 		list_for_each_entry_safe(node, n, &traverse_list, search_list) {
218 			if (node == dst) {
219 				found = true;
220 				list_splice_init(&edge_list, &visited_list);
221 				list_splice_init(&traverse_list, &visited_list);
222 				break;
223 			}
224 			for (i = 0; i < node->num_links; i++) {
225 				struct icc_node *tmp = node->links[i];
226 
227 				if (!tmp) {
228 					path = ERR_PTR(-ENOENT);
229 					goto out;
230 				}
231 
232 				if (tmp->is_traversed)
233 					continue;
234 
235 				tmp->is_traversed = true;
236 				tmp->reverse = node;
237 				list_add_tail(&tmp->search_list, &edge_list);
238 			}
239 		}
240 
241 		if (found)
242 			break;
243 
244 		list_splice_init(&traverse_list, &visited_list);
245 		list_splice_init(&edge_list, &traverse_list);
246 
247 		/* count the hops including the source */
248 		depth++;
249 
250 	} while (!list_empty(&traverse_list));
251 
252 out:
253 
254 	/* reset the traversed state */
255 	list_for_each_entry_reverse(n, &visited_list, search_list)
256 		n->is_traversed = false;
257 
258 	if (found)
259 		path = path_init(dev, dst, depth);
260 
261 	return path;
262 }
263 
264 /*
265  * We want the path to honor all bandwidth requests, so the average and peak
266  * bandwidth requirements from each consumer are aggregated at each node.
267  * The aggregation is platform specific, so each platform can customize it by
268  * implementing its own aggregate() function.
269  */
270 
aggregate_requests(struct icc_node * node)271 static int aggregate_requests(struct icc_node *node)
272 {
273 	struct icc_provider *p = node->provider;
274 	struct icc_req *r;
275 	u32 avg_bw, peak_bw;
276 
277 	node->avg_bw = 0;
278 	node->peak_bw = 0;
279 
280 	if (p->pre_aggregate)
281 		p->pre_aggregate(node);
282 
283 	hlist_for_each_entry(r, &node->req_list, req_node) {
284 		if (r->enabled) {
285 			avg_bw = r->avg_bw;
286 			peak_bw = r->peak_bw;
287 		} else {
288 			avg_bw = 0;
289 			peak_bw = 0;
290 		}
291 		p->aggregate(node, r->tag, avg_bw, peak_bw,
292 			     &node->avg_bw, &node->peak_bw);
293 
294 		/* during boot use the initial bandwidth as a floor value */
295 		if (!synced_state) {
296 			node->avg_bw = max(node->avg_bw, node->init_avg);
297 			node->peak_bw = max(node->peak_bw, node->init_peak);
298 		}
299 	}
300 
301 	return 0;
302 }
303 
apply_constraints(struct icc_path * path)304 static int apply_constraints(struct icc_path *path)
305 {
306 	struct icc_node *next, *prev = NULL;
307 	struct icc_provider *p;
308 	int ret = -EINVAL;
309 	int i;
310 
311 	for (i = 0; i < path->num_nodes; i++) {
312 		next = path->reqs[i].node;
313 		p = next->provider;
314 
315 		/* both endpoints should be valid master-slave pairs */
316 		if (!prev || (p != prev->provider && !p->inter_set)) {
317 			prev = next;
318 			continue;
319 		}
320 
321 		/* set the constraints */
322 		ret = p->set(prev, next);
323 		if (ret)
324 			goto out;
325 
326 		prev = next;
327 	}
328 out:
329 	return ret;
330 }
331 
icc_std_aggregate(struct icc_node * node,u32 tag,u32 avg_bw,u32 peak_bw,u32 * agg_avg,u32 * agg_peak)332 int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
333 		      u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
334 {
335 	*agg_avg += avg_bw;
336 	*agg_peak = max(*agg_peak, peak_bw);
337 
338 	return 0;
339 }
340 EXPORT_SYMBOL_GPL(icc_std_aggregate);
341 
342 /* of_icc_xlate_onecell() - Translate function using a single index.
343  * @spec: OF phandle args to map into an interconnect node.
344  * @data: private data (pointer to struct icc_onecell_data)
345  *
346  * This is a generic translate function that can be used to model simple
347  * interconnect providers that have one device tree node and provide
348  * multiple interconnect nodes. A single cell is used as an index into
349  * an array of icc nodes specified in the icc_onecell_data struct when
350  * registering the provider.
351  */
of_icc_xlate_onecell(const struct of_phandle_args * spec,void * data)352 struct icc_node *of_icc_xlate_onecell(const struct of_phandle_args *spec,
353 				      void *data)
354 {
355 	struct icc_onecell_data *icc_data = data;
356 	unsigned int idx = spec->args[0];
357 
358 	if (idx >= icc_data->num_nodes) {
359 		pr_err("%s: invalid index %u\n", __func__, idx);
360 		return ERR_PTR(-EINVAL);
361 	}
362 
363 	return icc_data->nodes[idx];
364 }
365 EXPORT_SYMBOL_GPL(of_icc_xlate_onecell);
366 
367 /**
368  * of_icc_get_from_provider() - Look-up interconnect node
369  * @spec: OF phandle args to use for look-up
370  *
371  * Looks for interconnect provider under the node specified by @spec and if
372  * found, uses xlate function of the provider to map phandle args to node.
373  *
374  * Returns a valid pointer to struct icc_node_data on success or ERR_PTR()
375  * on failure.
376  */
of_icc_get_from_provider(const struct of_phandle_args * spec)377 struct icc_node_data *of_icc_get_from_provider(const struct of_phandle_args *spec)
378 {
379 	struct icc_node *node = ERR_PTR(-EPROBE_DEFER);
380 	struct icc_node_data *data = NULL;
381 	struct icc_provider *provider;
382 
383 	if (!spec)
384 		return ERR_PTR(-EINVAL);
385 
386 	mutex_lock(&icc_lock);
387 	list_for_each_entry(provider, &icc_providers, provider_list) {
388 		if (provider->dev->of_node == spec->np) {
389 			if (provider->xlate_extended) {
390 				data = provider->xlate_extended(spec, provider->data);
391 				if (!IS_ERR(data)) {
392 					node = data->node;
393 					break;
394 				}
395 			} else {
396 				node = provider->xlate(spec, provider->data);
397 				if (!IS_ERR(node))
398 					break;
399 			}
400 		}
401 	}
402 	mutex_unlock(&icc_lock);
403 
404 	if (!node)
405 		return ERR_PTR(-EINVAL);
406 
407 	if (IS_ERR(node))
408 		return ERR_CAST(node);
409 
410 	if (!data) {
411 		data = kzalloc(sizeof(*data), GFP_KERNEL);
412 		if (!data)
413 			return ERR_PTR(-ENOMEM);
414 		data->node = node;
415 	}
416 
417 	return data;
418 }
419 EXPORT_SYMBOL_GPL(of_icc_get_from_provider);
420 
devm_icc_release(struct device * dev,void * res)421 static void devm_icc_release(struct device *dev, void *res)
422 {
423 	icc_put(*(struct icc_path **)res);
424 }
425 
devm_of_icc_get(struct device * dev,const char * name)426 struct icc_path *devm_of_icc_get(struct device *dev, const char *name)
427 {
428 	struct icc_path **ptr, *path;
429 
430 	ptr = devres_alloc(devm_icc_release, sizeof(*ptr), GFP_KERNEL);
431 	if (!ptr)
432 		return ERR_PTR(-ENOMEM);
433 
434 	path = of_icc_get(dev, name);
435 	if (!IS_ERR(path)) {
436 		*ptr = path;
437 		devres_add(dev, ptr);
438 	} else {
439 		devres_free(ptr);
440 	}
441 
442 	return path;
443 }
444 EXPORT_SYMBOL_GPL(devm_of_icc_get);
445 
446 /**
447  * of_icc_get_by_index() - get a path handle from a DT node based on index
448  * @dev: device pointer for the consumer device
449  * @idx: interconnect path index
450  *
451  * This function will search for a path between two endpoints and return an
452  * icc_path handle on success. Use icc_put() to release constraints when they
453  * are not needed anymore.
454  * If the interconnect API is disabled, NULL is returned and the consumer
455  * drivers will still build. Drivers are free to handle this specifically,
456  * but they don't have to.
457  *
458  * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
459  * when the API is disabled or the "interconnects" DT property is missing.
460  */
of_icc_get_by_index(struct device * dev,int idx)461 struct icc_path *of_icc_get_by_index(struct device *dev, int idx)
462 {
463 	struct icc_path *path;
464 	struct icc_node_data *src_data, *dst_data;
465 	struct device_node *np;
466 	struct of_phandle_args src_args, dst_args;
467 	int ret;
468 
469 	if (!dev || !dev->of_node)
470 		return ERR_PTR(-ENODEV);
471 
472 	np = dev->of_node;
473 
474 	/*
475 	 * When the consumer DT node do not have "interconnects" property
476 	 * return a NULL path to skip setting constraints.
477 	 */
478 	if (!of_property_present(np, "interconnects"))
479 		return NULL;
480 
481 	/*
482 	 * We use a combination of phandle and specifier for endpoint. For now
483 	 * lets support only global ids and extend this in the future if needed
484 	 * without breaking DT compatibility.
485 	 */
486 	ret = of_parse_phandle_with_args(np, "interconnects",
487 					 "#interconnect-cells", idx * 2,
488 					 &src_args);
489 	if (ret)
490 		return ERR_PTR(ret);
491 
492 	of_node_put(src_args.np);
493 
494 	ret = of_parse_phandle_with_args(np, "interconnects",
495 					 "#interconnect-cells", idx * 2 + 1,
496 					 &dst_args);
497 	if (ret)
498 		return ERR_PTR(ret);
499 
500 	of_node_put(dst_args.np);
501 
502 	src_data = of_icc_get_from_provider(&src_args);
503 
504 	if (IS_ERR(src_data)) {
505 		dev_err_probe(dev, PTR_ERR(src_data), "error finding src node\n");
506 		return ERR_CAST(src_data);
507 	}
508 
509 	dst_data = of_icc_get_from_provider(&dst_args);
510 
511 	if (IS_ERR(dst_data)) {
512 		dev_err_probe(dev, PTR_ERR(dst_data), "error finding dst node\n");
513 		kfree(src_data);
514 		return ERR_CAST(dst_data);
515 	}
516 
517 	mutex_lock(&icc_lock);
518 	path = path_find(dev, src_data->node, dst_data->node);
519 	mutex_unlock(&icc_lock);
520 	if (IS_ERR(path)) {
521 		dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
522 		goto free_icc_data;
523 	}
524 
525 	if (src_data->tag && src_data->tag == dst_data->tag)
526 		icc_set_tag(path, src_data->tag);
527 
528 	path->name = kasprintf(GFP_KERNEL, "%s-%s",
529 			       src_data->node->name, dst_data->node->name);
530 	if (!path->name) {
531 		kfree(path);
532 		path = ERR_PTR(-ENOMEM);
533 	}
534 
535 free_icc_data:
536 	kfree(src_data);
537 	kfree(dst_data);
538 	return path;
539 }
540 EXPORT_SYMBOL_GPL(of_icc_get_by_index);
541 
542 /**
543  * of_icc_get() - get a path handle from a DT node based on name
544  * @dev: device pointer for the consumer device
545  * @name: interconnect path name
546  *
547  * This function will search for a path between two endpoints and return an
548  * icc_path handle on success. Use icc_put() to release constraints when they
549  * are not needed anymore.
550  * If the interconnect API is disabled, NULL is returned and the consumer
551  * drivers will still build. Drivers are free to handle this specifically,
552  * but they don't have to.
553  *
554  * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
555  * when the API is disabled or the "interconnects" DT property is missing.
556  */
of_icc_get(struct device * dev,const char * name)557 struct icc_path *of_icc_get(struct device *dev, const char *name)
558 {
559 	struct device_node *np;
560 	int idx = 0;
561 
562 	if (!dev || !dev->of_node)
563 		return ERR_PTR(-ENODEV);
564 
565 	np = dev->of_node;
566 
567 	/*
568 	 * When the consumer DT node do not have "interconnects" property
569 	 * return a NULL path to skip setting constraints.
570 	 */
571 	if (!of_property_present(np, "interconnects"))
572 		return NULL;
573 
574 	/*
575 	 * We use a combination of phandle and specifier for endpoint. For now
576 	 * lets support only global ids and extend this in the future if needed
577 	 * without breaking DT compatibility.
578 	 */
579 	if (name) {
580 		idx = of_property_match_string(np, "interconnect-names", name);
581 		if (idx < 0)
582 			return ERR_PTR(idx);
583 	}
584 
585 	return of_icc_get_by_index(dev, idx);
586 }
587 EXPORT_SYMBOL_GPL(of_icc_get);
588 
589 /**
590  * icc_get() - get a path handle between two endpoints
591  * @dev: device pointer for the consumer device
592  * @src: source node name
593  * @dst: destination node name
594  *
595  * This function will search for a path between two endpoints and return an
596  * icc_path handle on success. Use icc_put() to release constraints when they
597  * are not needed anymore.
598  *
599  * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
600  * when the API is disabled.
601  */
icc_get(struct device * dev,const char * src,const char * dst)602 struct icc_path *icc_get(struct device *dev, const char *src, const char *dst)
603 {
604 	struct icc_node *src_node, *dst_node;
605 	struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
606 
607 	mutex_lock(&icc_lock);
608 
609 	src_node = node_find_by_name(src);
610 	if (!src_node) {
611 		dev_err(dev, "%s: invalid src=%s\n", __func__, src);
612 		goto out;
613 	}
614 
615 	dst_node = node_find_by_name(dst);
616 	if (!dst_node) {
617 		dev_err(dev, "%s: invalid dst=%s\n", __func__, dst);
618 		goto out;
619 	}
620 
621 	path = path_find(dev, src_node, dst_node);
622 	if (IS_ERR(path)) {
623 		dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
624 		goto out;
625 	}
626 
627 	path->name = kasprintf(GFP_KERNEL, "%s-%s", src_node->name, dst_node->name);
628 	if (!path->name) {
629 		kfree(path);
630 		path = ERR_PTR(-ENOMEM);
631 	}
632 out:
633 	mutex_unlock(&icc_lock);
634 	return path;
635 }
636 
637 /**
638  * icc_set_tag() - set an optional tag on a path
639  * @path: the path we want to tag
640  * @tag: the tag value
641  *
642  * This function allows consumers to append a tag to the requests associated
643  * with a path, so that a different aggregation could be done based on this tag.
644  */
icc_set_tag(struct icc_path * path,u32 tag)645 void icc_set_tag(struct icc_path *path, u32 tag)
646 {
647 	int i;
648 
649 	if (!path)
650 		return;
651 
652 	mutex_lock(&icc_lock);
653 
654 	for (i = 0; i < path->num_nodes; i++)
655 		path->reqs[i].tag = tag;
656 
657 	mutex_unlock(&icc_lock);
658 }
659 EXPORT_SYMBOL_GPL(icc_set_tag);
660 
661 /**
662  * icc_get_name() - Get name of the icc path
663  * @path: interconnect path
664  *
665  * This function is used by an interconnect consumer to get the name of the icc
666  * path.
667  *
668  * Returns a valid pointer on success, or NULL otherwise.
669  */
icc_get_name(struct icc_path * path)670 const char *icc_get_name(struct icc_path *path)
671 {
672 	if (!path)
673 		return NULL;
674 
675 	return path->name;
676 }
677 EXPORT_SYMBOL_GPL(icc_get_name);
678 
679 /**
680  * icc_set_bw() - set bandwidth constraints on an interconnect path
681  * @path: interconnect path
682  * @avg_bw: average bandwidth in kilobytes per second
683  * @peak_bw: peak bandwidth in kilobytes per second
684  *
685  * This function is used by an interconnect consumer to express its own needs
686  * in terms of bandwidth for a previously requested path between two endpoints.
687  * The requests are aggregated and each node is updated accordingly. The entire
688  * path is locked by a mutex to ensure that the set() is completed.
689  * The @path can be NULL when the "interconnects" DT properties is missing,
690  * which will mean that no constraints will be set.
691  *
692  * Returns 0 on success, or an appropriate error code otherwise.
693  */
icc_set_bw(struct icc_path * path,u32 avg_bw,u32 peak_bw)694 int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
695 {
696 	struct icc_node *node;
697 	u32 old_avg, old_peak;
698 	size_t i;
699 	int ret;
700 
701 	if (!path)
702 		return 0;
703 
704 	if (WARN_ON(IS_ERR(path) || !path->num_nodes))
705 		return -EINVAL;
706 
707 	mutex_lock(&icc_bw_lock);
708 
709 	old_avg = path->reqs[0].avg_bw;
710 	old_peak = path->reqs[0].peak_bw;
711 
712 	for (i = 0; i < path->num_nodes; i++) {
713 		node = path->reqs[i].node;
714 
715 		/* update the consumer request for this path */
716 		path->reqs[i].avg_bw = avg_bw;
717 		path->reqs[i].peak_bw = peak_bw;
718 
719 		/* aggregate requests for this node */
720 		aggregate_requests(node);
721 
722 		trace_icc_set_bw(path, node, i, avg_bw, peak_bw);
723 	}
724 
725 	ret = apply_constraints(path);
726 	if (ret) {
727 		pr_debug("interconnect: error applying constraints (%d)\n",
728 			 ret);
729 
730 		for (i = 0; i < path->num_nodes; i++) {
731 			node = path->reqs[i].node;
732 			path->reqs[i].avg_bw = old_avg;
733 			path->reqs[i].peak_bw = old_peak;
734 			aggregate_requests(node);
735 		}
736 		apply_constraints(path);
737 	}
738 
739 	mutex_unlock(&icc_bw_lock);
740 
741 	trace_icc_set_bw_end(path, ret);
742 
743 	return ret;
744 }
745 EXPORT_SYMBOL_GPL(icc_set_bw);
746 
__icc_enable(struct icc_path * path,bool enable)747 static int __icc_enable(struct icc_path *path, bool enable)
748 {
749 	int i;
750 
751 	if (!path)
752 		return 0;
753 
754 	if (WARN_ON(IS_ERR(path) || !path->num_nodes))
755 		return -EINVAL;
756 
757 	mutex_lock(&icc_lock);
758 
759 	for (i = 0; i < path->num_nodes; i++)
760 		path->reqs[i].enabled = enable;
761 
762 	mutex_unlock(&icc_lock);
763 
764 	return icc_set_bw(path, path->reqs[0].avg_bw,
765 			  path->reqs[0].peak_bw);
766 }
767 
icc_enable(struct icc_path * path)768 int icc_enable(struct icc_path *path)
769 {
770 	return __icc_enable(path, true);
771 }
772 EXPORT_SYMBOL_GPL(icc_enable);
773 
icc_disable(struct icc_path * path)774 int icc_disable(struct icc_path *path)
775 {
776 	return __icc_enable(path, false);
777 }
778 EXPORT_SYMBOL_GPL(icc_disable);
779 
780 /**
781  * icc_put() - release the reference to the icc_path
782  * @path: interconnect path
783  *
784  * Use this function to release the constraints on a path when the path is
785  * no longer needed. The constraints will be re-aggregated.
786  */
icc_put(struct icc_path * path)787 void icc_put(struct icc_path *path)
788 {
789 	struct icc_node *node;
790 	size_t i;
791 	int ret;
792 
793 	if (!path || WARN_ON(IS_ERR(path)))
794 		return;
795 
796 	ret = icc_set_bw(path, 0, 0);
797 	if (ret)
798 		pr_err("%s: error (%d)\n", __func__, ret);
799 
800 	mutex_lock(&icc_lock);
801 	mutex_lock(&icc_bw_lock);
802 
803 	for (i = 0; i < path->num_nodes; i++) {
804 		node = path->reqs[i].node;
805 		hlist_del(&path->reqs[i].req_node);
806 		if (!WARN_ON(!node->provider->users))
807 			node->provider->users--;
808 	}
809 
810 	mutex_unlock(&icc_bw_lock);
811 	mutex_unlock(&icc_lock);
812 
813 	kfree(path->name);
814 	kfree(path);
815 }
816 EXPORT_SYMBOL_GPL(icc_put);
817 
icc_node_create_nolock(int id)818 static struct icc_node *icc_node_create_nolock(int id)
819 {
820 	struct icc_node *node;
821 
822 	if (id >= ICC_DYN_ID_START)
823 		return ERR_PTR(-EINVAL);
824 
825 	/* check if node already exists */
826 	node = node_find(id);
827 	if (node)
828 		return node;
829 
830 	node = kzalloc(sizeof(*node), GFP_KERNEL);
831 	if (!node)
832 		return ERR_PTR(-ENOMEM);
833 
834 	/* dynamic id allocation */
835 	if (id == ICC_ALLOC_DYN_ID)
836 		id = idr_alloc(&icc_idr, node, ICC_DYN_ID_START, 0, GFP_KERNEL);
837 	else
838 		id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL);
839 
840 	if (id < 0) {
841 		WARN(1, "%s: couldn't get idr\n", __func__);
842 		kfree(node);
843 		return ERR_PTR(id);
844 	}
845 
846 	node->id = id;
847 
848 	return node;
849 }
850 
851 /**
852  * icc_node_create_dyn() - create a node with dynamic id
853  *
854  * Return: icc_node pointer on success, or ERR_PTR() on error
855  */
icc_node_create_dyn(void)856 struct icc_node *icc_node_create_dyn(void)
857 {
858 	struct icc_node *node;
859 
860 	mutex_lock(&icc_lock);
861 
862 	node = icc_node_create_nolock(ICC_ALLOC_DYN_ID);
863 
864 	mutex_unlock(&icc_lock);
865 
866 	return node;
867 }
868 EXPORT_SYMBOL_GPL(icc_node_create_dyn);
869 
870 /**
871  * icc_node_create() - create a node
872  * @id: node id
873  *
874  * Return: icc_node pointer on success, or ERR_PTR() on error
875  */
icc_node_create(int id)876 struct icc_node *icc_node_create(int id)
877 {
878 	struct icc_node *node;
879 
880 	mutex_lock(&icc_lock);
881 
882 	node = icc_node_create_nolock(id);
883 
884 	mutex_unlock(&icc_lock);
885 
886 	return node;
887 }
888 EXPORT_SYMBOL_GPL(icc_node_create);
889 
890 /**
891  * icc_node_destroy() - destroy a node
892  * @id: node id
893  */
icc_node_destroy(int id)894 void icc_node_destroy(int id)
895 {
896 	struct icc_node *node;
897 
898 	mutex_lock(&icc_lock);
899 
900 	node = node_find(id);
901 	if (node) {
902 		idr_remove(&icc_idr, node->id);
903 		WARN_ON(!hlist_empty(&node->req_list));
904 	}
905 
906 	mutex_unlock(&icc_lock);
907 
908 	if (!node)
909 		return;
910 
911 	kfree(node->links);
912 	if (node->id >= ICC_DYN_ID_START)
913 		kfree(node->name);
914 	kfree(node);
915 }
916 EXPORT_SYMBOL_GPL(icc_node_destroy);
917 
918 /**
919  * icc_node_set_name() - set node name
920  * @node: node
921  * @provider: node provider
922  * @name: node name
923  *
924  * Return: 0 on success, or -ENOMEM on allocation failure
925  */
icc_node_set_name(struct icc_node * node,const struct icc_provider * provider,const char * name)926 int icc_node_set_name(struct icc_node *node, const struct icc_provider *provider, const char *name)
927 {
928 	if (node->id >= ICC_DYN_ID_START) {
929 		node->name = kasprintf(GFP_KERNEL, "%s@%s", name,
930 				       dev_name(provider->dev));
931 		if (!node->name)
932 			return -ENOMEM;
933 	} else {
934 		node->name = name;
935 	}
936 
937 	return 0;
938 }
939 EXPORT_SYMBOL_GPL(icc_node_set_name);
940 
941 /**
942  * icc_link_nodes() - create link between two nodes
943  * @src_node: source node
944  * @dst_node: destination node
945  *
946  * Create a link between two nodes. The nodes might belong to different
947  * interconnect providers and the @dst_node might not exist (if the
948  * provider driver has not probed yet). So just create the @dst_node
949  * and when the actual provider driver is probed, the rest of the node
950  * data is filled.
951  *
952  * Return: 0 on success, or an error code otherwise
953  */
icc_link_nodes(struct icc_node * src_node,struct icc_node ** dst_node)954 int icc_link_nodes(struct icc_node *src_node, struct icc_node **dst_node)
955 {
956 	struct icc_node **new;
957 	int ret = 0;
958 
959 	if (!src_node->provider)
960 		return -EINVAL;
961 
962 	mutex_lock(&icc_lock);
963 
964 	if (!*dst_node) {
965 		*dst_node = icc_node_create_nolock(ICC_ALLOC_DYN_ID);
966 
967 		if (IS_ERR(*dst_node)) {
968 			ret = PTR_ERR(*dst_node);
969 			goto out;
970 		}
971 	}
972 
973 	new = krealloc(src_node->links,
974 		       (src_node->num_links + 1) * sizeof(*src_node->links),
975 		       GFP_KERNEL);
976 	if (!new) {
977 		ret = -ENOMEM;
978 		goto out;
979 	}
980 
981 	src_node->links = new;
982 	src_node->links[src_node->num_links++] = *dst_node;
983 
984 out:
985 	mutex_unlock(&icc_lock);
986 
987 	return ret;
988 }
989 EXPORT_SYMBOL_GPL(icc_link_nodes);
990 
991 /**
992  * icc_link_create() - create a link between two nodes
993  * @node: source node id
994  * @dst_id: destination node id
995  *
996  * Create a link between two nodes. The nodes might belong to different
997  * interconnect providers and the @dst_id node might not exist (if the
998  * provider driver has not probed yet). So just create the @dst_id node
999  * and when the actual provider driver is probed, the rest of the node
1000  * data is filled.
1001  *
1002  * Return: 0 on success, or an error code otherwise
1003  */
icc_link_create(struct icc_node * node,const int dst_id)1004 int icc_link_create(struct icc_node *node, const int dst_id)
1005 {
1006 	struct icc_node *dst;
1007 	struct icc_node **new;
1008 	int ret = 0;
1009 
1010 	if (!node->provider)
1011 		return -EINVAL;
1012 
1013 	mutex_lock(&icc_lock);
1014 
1015 	dst = node_find(dst_id);
1016 	if (!dst) {
1017 		dst = icc_node_create_nolock(dst_id);
1018 
1019 		if (IS_ERR(dst)) {
1020 			ret = PTR_ERR(dst);
1021 			goto out;
1022 		}
1023 	}
1024 
1025 	new = krealloc(node->links,
1026 		       (node->num_links + 1) * sizeof(*node->links),
1027 		       GFP_KERNEL);
1028 	if (!new) {
1029 		ret = -ENOMEM;
1030 		goto out;
1031 	}
1032 
1033 	node->links = new;
1034 	node->links[node->num_links++] = dst;
1035 
1036 out:
1037 	mutex_unlock(&icc_lock);
1038 
1039 	return ret;
1040 }
1041 EXPORT_SYMBOL_GPL(icc_link_create);
1042 
1043 /**
1044  * icc_node_add() - add interconnect node to interconnect provider
1045  * @node: pointer to the interconnect node
1046  * @provider: pointer to the interconnect provider
1047  */
icc_node_add(struct icc_node * node,struct icc_provider * provider)1048 void icc_node_add(struct icc_node *node, struct icc_provider *provider)
1049 {
1050 	if (WARN_ON(node->provider))
1051 		return;
1052 
1053 	mutex_lock(&icc_lock);
1054 	mutex_lock(&icc_bw_lock);
1055 
1056 	node->provider = provider;
1057 	list_add_tail(&node->node_list, &provider->nodes);
1058 
1059 	/* get the initial bandwidth values and sync them with hardware */
1060 	if (provider->get_bw) {
1061 		provider->get_bw(node, &node->init_avg, &node->init_peak);
1062 	} else {
1063 		node->init_avg = INT_MAX;
1064 		node->init_peak = INT_MAX;
1065 	}
1066 	node->avg_bw = node->init_avg;
1067 	node->peak_bw = node->init_peak;
1068 
1069 	if (node->avg_bw || node->peak_bw) {
1070 		if (provider->pre_aggregate)
1071 			provider->pre_aggregate(node);
1072 
1073 		if (provider->aggregate)
1074 			provider->aggregate(node, 0, node->init_avg, node->init_peak,
1075 					    &node->avg_bw, &node->peak_bw);
1076 		if (provider->set)
1077 			provider->set(node, node);
1078 	}
1079 
1080 	node->avg_bw = 0;
1081 	node->peak_bw = 0;
1082 
1083 	mutex_unlock(&icc_bw_lock);
1084 	mutex_unlock(&icc_lock);
1085 }
1086 EXPORT_SYMBOL_GPL(icc_node_add);
1087 
1088 /**
1089  * icc_node_del() - delete interconnect node from interconnect provider
1090  * @node: pointer to the interconnect node
1091  */
icc_node_del(struct icc_node * node)1092 void icc_node_del(struct icc_node *node)
1093 {
1094 	mutex_lock(&icc_lock);
1095 
1096 	list_del(&node->node_list);
1097 
1098 	mutex_unlock(&icc_lock);
1099 }
1100 EXPORT_SYMBOL_GPL(icc_node_del);
1101 
1102 /**
1103  * icc_nodes_remove() - remove all previously added nodes from provider
1104  * @provider: the interconnect provider we are removing nodes from
1105  *
1106  * Return: 0 on success, or an error code otherwise
1107  */
icc_nodes_remove(struct icc_provider * provider)1108 int icc_nodes_remove(struct icc_provider *provider)
1109 {
1110 	struct icc_node *n, *tmp;
1111 
1112 	if (WARN_ON(IS_ERR_OR_NULL(provider)))
1113 		return -EINVAL;
1114 
1115 	list_for_each_entry_safe_reverse(n, tmp, &provider->nodes, node_list) {
1116 		icc_node_del(n);
1117 		icc_node_destroy(n->id);
1118 	}
1119 
1120 	return 0;
1121 }
1122 EXPORT_SYMBOL_GPL(icc_nodes_remove);
1123 
1124 /**
1125  * icc_provider_init() - initialize a new interconnect provider
1126  * @provider: the interconnect provider to initialize
1127  *
1128  * Must be called before adding nodes to the provider.
1129  */
icc_provider_init(struct icc_provider * provider)1130 void icc_provider_init(struct icc_provider *provider)
1131 {
1132 	WARN_ON(!provider->set);
1133 
1134 	INIT_LIST_HEAD(&provider->nodes);
1135 }
1136 EXPORT_SYMBOL_GPL(icc_provider_init);
1137 
1138 /**
1139  * icc_provider_register() - register a new interconnect provider
1140  * @provider: the interconnect provider to register
1141  *
1142  * Return: 0 on success, or an error code otherwise
1143  */
icc_provider_register(struct icc_provider * provider)1144 int icc_provider_register(struct icc_provider *provider)
1145 {
1146 	if (WARN_ON(!provider->xlate && !provider->xlate_extended))
1147 		return -EINVAL;
1148 
1149 	mutex_lock(&icc_lock);
1150 	list_add_tail(&provider->provider_list, &icc_providers);
1151 	mutex_unlock(&icc_lock);
1152 
1153 	dev_dbg(provider->dev, "interconnect provider registered\n");
1154 
1155 	return 0;
1156 }
1157 EXPORT_SYMBOL_GPL(icc_provider_register);
1158 
1159 /**
1160  * icc_provider_deregister() - deregister an interconnect provider
1161  * @provider: the interconnect provider to deregister
1162  */
icc_provider_deregister(struct icc_provider * provider)1163 void icc_provider_deregister(struct icc_provider *provider)
1164 {
1165 	mutex_lock(&icc_lock);
1166 	WARN_ON(provider->users);
1167 
1168 	list_del(&provider->provider_list);
1169 	mutex_unlock(&icc_lock);
1170 }
1171 EXPORT_SYMBOL_GPL(icc_provider_deregister);
1172 
1173 static const struct of_device_id __maybe_unused ignore_list[] = {
1174 	{ .compatible = "qcom,sc7180-ipa-virt" },
1175 	{ .compatible = "qcom,sc8180x-ipa-virt" },
1176 	{ .compatible = "qcom,sdx55-ipa-virt" },
1177 	{ .compatible = "qcom,sm8150-ipa-virt" },
1178 	{ .compatible = "qcom,sm8250-ipa-virt" },
1179 	{}
1180 };
1181 
of_count_icc_providers(struct device_node * np)1182 static int of_count_icc_providers(struct device_node *np)
1183 {
1184 	struct device_node *child;
1185 	int count = 0;
1186 
1187 	for_each_available_child_of_node(np, child) {
1188 		if (of_property_present(child, "#interconnect-cells") &&
1189 		    likely(!of_match_node(ignore_list, child)))
1190 			count++;
1191 		count += of_count_icc_providers(child);
1192 	}
1193 
1194 	return count;
1195 }
1196 
icc_sync_state(struct device * dev)1197 void icc_sync_state(struct device *dev)
1198 {
1199 	struct icc_provider *p;
1200 	struct icc_node *n;
1201 	static int count;
1202 
1203 	count++;
1204 
1205 	if (count < providers_count)
1206 		return;
1207 
1208 	mutex_lock(&icc_lock);
1209 	mutex_lock(&icc_bw_lock);
1210 	synced_state = true;
1211 	list_for_each_entry(p, &icc_providers, provider_list) {
1212 		dev_dbg(p->dev, "interconnect provider is in synced state\n");
1213 		list_for_each_entry(n, &p->nodes, node_list) {
1214 			if (n->init_avg || n->init_peak) {
1215 				n->init_avg = 0;
1216 				n->init_peak = 0;
1217 				aggregate_requests(n);
1218 				p->set(n, n);
1219 			}
1220 		}
1221 	}
1222 	mutex_unlock(&icc_bw_lock);
1223 	mutex_unlock(&icc_lock);
1224 }
1225 EXPORT_SYMBOL_GPL(icc_sync_state);
1226 
icc_init(void)1227 static int __init icc_init(void)
1228 {
1229 	struct device_node *root;
1230 
1231 	/* Teach lockdep about lock ordering wrt. shrinker: */
1232 	fs_reclaim_acquire(GFP_KERNEL);
1233 	might_lock(&icc_bw_lock);
1234 	fs_reclaim_release(GFP_KERNEL);
1235 
1236 	root = of_find_node_by_path("/");
1237 
1238 	providers_count = of_count_icc_providers(root);
1239 	of_node_put(root);
1240 
1241 	icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
1242 	debugfs_create_file("interconnect_summary", 0444,
1243 			    icc_debugfs_dir, NULL, &icc_summary_fops);
1244 	debugfs_create_file("interconnect_graph", 0444,
1245 			    icc_debugfs_dir, NULL, &icc_graph_fops);
1246 
1247 	icc_debugfs_client_init(icc_debugfs_dir);
1248 
1249 	return 0;
1250 }
1251 
1252 device_initcall(icc_init);
1253