1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020 Linaro Ltd
4 */
5
6 #include <linux/device.h>
7 #include <linux/interconnect-provider.h>
8 #include <linux/io.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include <linux/of_platform.h>
12 #include <linux/platform_device.h>
13 #include <linux/regmap.h>
14 #include <linux/slab.h>
15
16 #include "icc-common.h"
17 #include "icc-rpm.h"
18
19 /* QNOC QoS */
20 #define QNOC_QOS_MCTL_LOWn_ADDR(n) (0x8 + (n * 0x1000))
21 #define QNOC_QOS_MCTL_DFLT_PRIO_MASK 0x70
22 #define QNOC_QOS_MCTL_DFLT_PRIO_SHIFT 4
23 #define QNOC_QOS_MCTL_URGFWD_EN_MASK 0x8
24 #define QNOC_QOS_MCTL_URGFWD_EN_SHIFT 3
25
26 /* BIMC QoS */
27 #define M_BKE_REG_BASE(n) (0x300 + (0x4000 * n))
28 #define M_BKE_EN_ADDR(n) (M_BKE_REG_BASE(n))
29 #define M_BKE_HEALTH_CFG_ADDR(i, n) (M_BKE_REG_BASE(n) + 0x40 + (0x4 * i))
30
31 #define M_BKE_HEALTH_CFG_LIMITCMDS_MASK 0x80000000
32 #define M_BKE_HEALTH_CFG_AREQPRIO_MASK 0x300
33 #define M_BKE_HEALTH_CFG_PRIOLVL_MASK 0x3
34 #define M_BKE_HEALTH_CFG_AREQPRIO_SHIFT 0x8
35 #define M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT 0x1f
36
37 #define M_BKE_EN_EN_BMASK 0x1
38
39 /* NoC QoS */
40 #define NOC_QOS_PRIORITYn_ADDR(n) (0x8 + (n * 0x1000))
41 #define NOC_QOS_PRIORITY_P1_MASK 0xc
42 #define NOC_QOS_PRIORITY_P0_MASK 0x3
43 #define NOC_QOS_PRIORITY_P1_SHIFT 0x2
44
45 #define NOC_QOS_MODEn_ADDR(n) (0xc + (n * 0x1000))
46 #define NOC_QOS_MODEn_MASK 0x3
47
48 #define NOC_QOS_MODE_FIXED_VAL 0x0
49 #define NOC_QOS_MODE_BYPASS_VAL 0x2
50
51 #define ICC_BUS_CLK_MIN_RATE 19200ULL /* kHz */
52
qcom_icc_set_qnoc_qos(struct icc_node * src)53 static int qcom_icc_set_qnoc_qos(struct icc_node *src)
54 {
55 struct icc_provider *provider = src->provider;
56 struct qcom_icc_provider *qp = to_qcom_provider(provider);
57 struct qcom_icc_node *qn = src->data;
58 struct qcom_icc_qos *qos = &qn->qos;
59 int rc;
60
61 rc = regmap_update_bits(qp->regmap,
62 qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
63 QNOC_QOS_MCTL_DFLT_PRIO_MASK,
64 qos->areq_prio << QNOC_QOS_MCTL_DFLT_PRIO_SHIFT);
65 if (rc)
66 return rc;
67
68 return regmap_update_bits(qp->regmap,
69 qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
70 QNOC_QOS_MCTL_URGFWD_EN_MASK,
71 !!qos->urg_fwd_en << QNOC_QOS_MCTL_URGFWD_EN_SHIFT);
72 }
73
qcom_icc_bimc_set_qos_health(struct qcom_icc_provider * qp,struct qcom_icc_qos * qos,int regnum)74 static int qcom_icc_bimc_set_qos_health(struct qcom_icc_provider *qp,
75 struct qcom_icc_qos *qos,
76 int regnum)
77 {
78 u32 val;
79 u32 mask;
80
81 val = qos->prio_level;
82 mask = M_BKE_HEALTH_CFG_PRIOLVL_MASK;
83
84 val |= qos->areq_prio << M_BKE_HEALTH_CFG_AREQPRIO_SHIFT;
85 mask |= M_BKE_HEALTH_CFG_AREQPRIO_MASK;
86
87 /* LIMITCMDS is not present on M_BKE_HEALTH_3 */
88 if (regnum != 3) {
89 val |= qos->limit_commands << M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT;
90 mask |= M_BKE_HEALTH_CFG_LIMITCMDS_MASK;
91 }
92
93 return regmap_update_bits(qp->regmap,
94 qp->qos_offset + M_BKE_HEALTH_CFG_ADDR(regnum, qos->qos_port),
95 mask, val);
96 }
97
qcom_icc_set_bimc_qos(struct icc_node * src)98 static int qcom_icc_set_bimc_qos(struct icc_node *src)
99 {
100 struct qcom_icc_provider *qp;
101 struct qcom_icc_node *qn;
102 struct icc_provider *provider;
103 u32 mode = NOC_QOS_MODE_BYPASS;
104 u32 val = 0;
105 int i, rc = 0;
106
107 qn = src->data;
108 provider = src->provider;
109 qp = to_qcom_provider(provider);
110
111 if (qn->qos.qos_mode != NOC_QOS_MODE_INVALID)
112 mode = qn->qos.qos_mode;
113
114 /* QoS Priority: The QoS Health parameters are getting considered
115 * only if we are NOT in Bypass Mode.
116 */
117 if (mode != NOC_QOS_MODE_BYPASS) {
118 for (i = 3; i >= 0; i--) {
119 rc = qcom_icc_bimc_set_qos_health(qp,
120 &qn->qos, i);
121 if (rc)
122 return rc;
123 }
124
125 /* Set BKE_EN to 1 when Fixed, Regulator or Limiter Mode */
126 val = 1;
127 }
128
129 return regmap_update_bits(qp->regmap,
130 qp->qos_offset + M_BKE_EN_ADDR(qn->qos.qos_port),
131 M_BKE_EN_EN_BMASK, val);
132 }
133
qcom_icc_noc_set_qos_priority(struct qcom_icc_provider * qp,struct qcom_icc_qos * qos)134 static int qcom_icc_noc_set_qos_priority(struct qcom_icc_provider *qp,
135 struct qcom_icc_qos *qos)
136 {
137 u32 val;
138 int rc;
139
140 /* Must be updated one at a time, P1 first, P0 last */
141 val = qos->areq_prio << NOC_QOS_PRIORITY_P1_SHIFT;
142 rc = regmap_update_bits(qp->regmap,
143 qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
144 NOC_QOS_PRIORITY_P1_MASK, val);
145 if (rc)
146 return rc;
147
148 return regmap_update_bits(qp->regmap,
149 qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
150 NOC_QOS_PRIORITY_P0_MASK, qos->prio_level);
151 }
152
qcom_icc_set_noc_qos(struct icc_node * src)153 static int qcom_icc_set_noc_qos(struct icc_node *src)
154 {
155 struct qcom_icc_provider *qp;
156 struct qcom_icc_node *qn;
157 struct icc_provider *provider;
158 u32 mode = NOC_QOS_MODE_BYPASS_VAL;
159 int rc = 0;
160
161 qn = src->data;
162 provider = src->provider;
163 qp = to_qcom_provider(provider);
164
165 if (qn->qos.qos_port < 0) {
166 dev_dbg(src->provider->dev,
167 "NoC QoS: Skipping %s: vote aggregated on parent.\n",
168 qn->name);
169 return 0;
170 }
171
172 if (qn->qos.qos_mode == NOC_QOS_MODE_FIXED) {
173 dev_dbg(src->provider->dev, "NoC QoS: %s: Set Fixed mode\n", qn->name);
174 mode = NOC_QOS_MODE_FIXED_VAL;
175 rc = qcom_icc_noc_set_qos_priority(qp, &qn->qos);
176 if (rc)
177 return rc;
178 } else if (qn->qos.qos_mode == NOC_QOS_MODE_BYPASS) {
179 dev_dbg(src->provider->dev, "NoC QoS: %s: Set Bypass mode\n", qn->name);
180 mode = NOC_QOS_MODE_BYPASS_VAL;
181 } else {
182 /* How did we get here? */
183 }
184
185 return regmap_update_bits(qp->regmap,
186 qp->qos_offset + NOC_QOS_MODEn_ADDR(qn->qos.qos_port),
187 NOC_QOS_MODEn_MASK, mode);
188 }
189
qcom_icc_qos_set(struct icc_node * node)190 static int qcom_icc_qos_set(struct icc_node *node)
191 {
192 struct qcom_icc_provider *qp = to_qcom_provider(node->provider);
193 struct qcom_icc_node *qn = node->data;
194
195 dev_dbg(node->provider->dev, "Setting QoS for %s\n", qn->name);
196
197 switch (qp->type) {
198 case QCOM_ICC_BIMC:
199 return qcom_icc_set_bimc_qos(node);
200 case QCOM_ICC_QNOC:
201 return qcom_icc_set_qnoc_qos(node);
202 default:
203 return qcom_icc_set_noc_qos(node);
204 }
205 }
206
qcom_icc_rpm_set(struct qcom_icc_node * qn,u64 * bw,bool ignore_enxio)207 static int qcom_icc_rpm_set(struct qcom_icc_node *qn, u64 *bw, bool ignore_enxio)
208 {
209 int ret, rpm_ctx = 0;
210 u64 bw_bps;
211
212 if (qn->qos.ap_owned)
213 return 0;
214
215 for (rpm_ctx = 0; rpm_ctx < QCOM_SMD_RPM_STATE_NUM; rpm_ctx++) {
216 bw_bps = icc_units_to_bps(bw[rpm_ctx]);
217
218 if (qn->mas_rpm_id != -1) {
219 ret = qcom_icc_rpm_smd_send(rpm_ctx,
220 RPM_BUS_MASTER_REQ,
221 qn->mas_rpm_id,
222 bw_bps);
223 if (ret) {
224 pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
225 qn->mas_rpm_id, ret);
226 if (ret != -ENXIO || !ignore_enxio)
227 return ret;
228 }
229 }
230
231 if (qn->slv_rpm_id != -1) {
232 ret = qcom_icc_rpm_smd_send(rpm_ctx,
233 RPM_BUS_SLAVE_REQ,
234 qn->slv_rpm_id,
235 bw_bps);
236 if (ret) {
237 pr_err("qcom_icc_rpm_smd_send slv %d error %d\n",
238 qn->slv_rpm_id, ret);
239 if (ret != -ENXIO || !ignore_enxio)
240 return ret;
241 }
242 }
243 }
244
245 return 0;
246 }
247
248 /**
249 * qcom_icc_pre_bw_aggregate - cleans up values before re-aggregate requests
250 * @node: icc node to operate on
251 */
qcom_icc_pre_bw_aggregate(struct icc_node * node)252 static void qcom_icc_pre_bw_aggregate(struct icc_node *node)
253 {
254 struct qcom_icc_node *qn;
255 size_t i;
256
257 qn = node->data;
258 for (i = 0; i < QCOM_SMD_RPM_STATE_NUM; i++) {
259 qn->sum_avg[i] = 0;
260 qn->max_peak[i] = 0;
261 }
262 }
263
264 /**
265 * qcom_icc_bw_aggregate - aggregate bw for buckets indicated by tag
266 * @node: node to aggregate
267 * @tag: tag to indicate which buckets to aggregate
268 * @avg_bw: new bw to sum aggregate
269 * @peak_bw: new bw to max aggregate
270 * @agg_avg: existing aggregate avg bw val
271 * @agg_peak: existing aggregate peak bw val
272 */
qcom_icc_bw_aggregate(struct icc_node * node,u32 tag,u32 avg_bw,u32 peak_bw,u32 * agg_avg,u32 * agg_peak)273 static int qcom_icc_bw_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
274 u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
275 {
276 size_t i;
277 struct qcom_icc_node *qn;
278
279 qn = node->data;
280
281 if (!tag)
282 tag = RPM_ALWAYS_TAG;
283
284 for (i = 0; i < QCOM_SMD_RPM_STATE_NUM; i++) {
285 if (tag & BIT(i)) {
286 qn->sum_avg[i] += avg_bw;
287 qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw);
288 }
289 }
290
291 *agg_avg += avg_bw;
292 *agg_peak = max_t(u32, *agg_peak, peak_bw);
293 return 0;
294 }
295
qcom_icc_calc_rate(struct qcom_icc_provider * qp,struct qcom_icc_node * qn,int ctx)296 static u64 qcom_icc_calc_rate(struct qcom_icc_provider *qp, struct qcom_icc_node *qn, int ctx)
297 {
298 u64 agg_avg_rate, agg_peak_rate, agg_rate;
299
300 if (qn->channels)
301 agg_avg_rate = div_u64(qn->sum_avg[ctx], qn->channels);
302 else
303 agg_avg_rate = qn->sum_avg[ctx];
304
305 if (qn->ab_coeff) {
306 agg_avg_rate = agg_avg_rate * qn->ab_coeff;
307 agg_avg_rate = div_u64(agg_avg_rate, 100);
308 }
309
310 if (qn->ib_coeff) {
311 agg_peak_rate = qn->max_peak[ctx] * 100;
312 agg_peak_rate = div_u64(agg_peak_rate, qn->ib_coeff);
313 } else {
314 agg_peak_rate = qn->max_peak[ctx];
315 }
316
317 agg_rate = max_t(u64, agg_avg_rate, agg_peak_rate);
318
319 return div_u64(agg_rate, qn->buswidth);
320 }
321
322 /**
323 * qcom_icc_bus_aggregate - calculate bus clock rates by traversing all nodes
324 * @provider: generic interconnect provider
325 * @agg_clk_rate: array containing the aggregated clock rates in kHz
326 */
qcom_icc_bus_aggregate(struct icc_provider * provider,u64 * agg_clk_rate)327 static void qcom_icc_bus_aggregate(struct icc_provider *provider, u64 *agg_clk_rate)
328 {
329 struct qcom_icc_provider *qp = to_qcom_provider(provider);
330 struct qcom_icc_node *qn;
331 struct icc_node *node;
332 int ctx;
333
334 /*
335 * Iterate nodes on the provider, aggregate bandwidth requests for
336 * every bucket and convert them into bus clock rates.
337 */
338 list_for_each_entry(node, &provider->nodes, node_list) {
339 qn = node->data;
340 for (ctx = 0; ctx < QCOM_SMD_RPM_STATE_NUM; ctx++) {
341 agg_clk_rate[ctx] = max_t(u64, agg_clk_rate[ctx],
342 qcom_icc_calc_rate(qp, qn, ctx));
343 }
344 }
345 }
346
qcom_icc_set(struct icc_node * src,struct icc_node * dst)347 static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
348 {
349 struct qcom_icc_node *src_qn = NULL, *dst_qn = NULL;
350 u64 agg_clk_rate[QCOM_SMD_RPM_STATE_NUM] = { 0 };
351 struct icc_provider *provider;
352 struct qcom_icc_provider *qp;
353 u64 active_rate, sleep_rate;
354 int ret;
355
356 src_qn = src->data;
357 if (dst)
358 dst_qn = dst->data;
359 provider = src->provider;
360 qp = to_qcom_provider(provider);
361
362 qcom_icc_bus_aggregate(provider, agg_clk_rate);
363 active_rate = agg_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE];
364 sleep_rate = agg_clk_rate[QCOM_SMD_RPM_SLEEP_STATE];
365
366 ret = qcom_icc_rpm_set(src_qn, src_qn->sum_avg, qp->ignore_enxio);
367 if (ret)
368 return ret;
369
370 if (dst_qn) {
371 ret = qcom_icc_rpm_set(dst_qn, dst_qn->sum_avg, qp->ignore_enxio);
372 if (ret)
373 return ret;
374 }
375
376 /* Some providers don't have a bus clock to scale */
377 if (!qp->bus_clk_desc && !qp->bus_clk)
378 return 0;
379
380 /*
381 * Downstream checks whether the requested rate is zero, but it makes little sense
382 * to vote for a value that's below the lower threshold, so let's not do so.
383 */
384 if (qp->keep_alive)
385 active_rate = max(ICC_BUS_CLK_MIN_RATE, active_rate);
386
387 /* Some providers have a non-RPM-owned bus clock - convert kHz->Hz for the CCF */
388 if (qp->bus_clk) {
389 active_rate = max_t(u64, active_rate, sleep_rate);
390 /* ARM32 caps clk_set_rate arg to u32.. Nothing we can do about that! */
391 active_rate = min_t(u64, 1000ULL * active_rate, ULONG_MAX);
392 return clk_set_rate(qp->bus_clk, active_rate);
393 }
394
395 /* RPM only accepts <=INT_MAX rates */
396 active_rate = min_t(u64, active_rate, INT_MAX);
397 sleep_rate = min_t(u64, sleep_rate, INT_MAX);
398
399 if (active_rate != qp->bus_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE]) {
400 ret = qcom_icc_rpm_set_bus_rate(qp->bus_clk_desc, QCOM_SMD_RPM_ACTIVE_STATE,
401 active_rate);
402 if (ret)
403 return ret;
404
405 /* Cache the rate after we've successfully commited it to RPM */
406 qp->bus_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE] = active_rate;
407 }
408
409 if (sleep_rate != qp->bus_clk_rate[QCOM_SMD_RPM_SLEEP_STATE]) {
410 ret = qcom_icc_rpm_set_bus_rate(qp->bus_clk_desc, QCOM_SMD_RPM_SLEEP_STATE,
411 sleep_rate);
412 if (ret)
413 return ret;
414
415 /* Cache the rate after we've successfully commited it to RPM */
416 qp->bus_clk_rate[QCOM_SMD_RPM_SLEEP_STATE] = sleep_rate;
417 }
418
419 /* Handle the node-specific clock */
420 if (!src_qn->bus_clk_desc)
421 return 0;
422
423 active_rate = qcom_icc_calc_rate(qp, src_qn, QCOM_SMD_RPM_ACTIVE_STATE);
424 sleep_rate = qcom_icc_calc_rate(qp, src_qn, QCOM_SMD_RPM_SLEEP_STATE);
425
426 if (active_rate != src_qn->bus_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE]) {
427 ret = qcom_icc_rpm_set_bus_rate(src_qn->bus_clk_desc, QCOM_SMD_RPM_ACTIVE_STATE,
428 active_rate);
429 if (ret)
430 return ret;
431
432 /* Cache the rate after we've successfully committed it to RPM */
433 src_qn->bus_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE] = active_rate;
434 }
435
436 if (sleep_rate != src_qn->bus_clk_rate[QCOM_SMD_RPM_SLEEP_STATE]) {
437 ret = qcom_icc_rpm_set_bus_rate(src_qn->bus_clk_desc, QCOM_SMD_RPM_SLEEP_STATE,
438 sleep_rate);
439 if (ret)
440 return ret;
441
442 /* Cache the rate after we've successfully committed it to RPM */
443 src_qn->bus_clk_rate[QCOM_SMD_RPM_SLEEP_STATE] = sleep_rate;
444 }
445
446 return 0;
447 }
448
qnoc_probe(struct platform_device * pdev)449 int qnoc_probe(struct platform_device *pdev)
450 {
451 struct device *dev = &pdev->dev;
452 const struct qcom_icc_desc *desc;
453 struct icc_onecell_data *data;
454 struct icc_provider *provider;
455 struct qcom_icc_node * const *qnodes;
456 struct qcom_icc_provider *qp;
457 struct icc_node *node;
458 size_t num_nodes, i;
459 const char * const *cds = NULL;
460 int cd_num;
461 int ret;
462
463 /* wait for the RPM proxy */
464 if (!qcom_icc_rpm_smd_available())
465 return -EPROBE_DEFER;
466
467 desc = of_device_get_match_data(dev);
468 if (!desc)
469 return -EINVAL;
470
471 qnodes = desc->nodes;
472 num_nodes = desc->num_nodes;
473
474 if (desc->num_intf_clocks) {
475 cds = desc->intf_clocks;
476 cd_num = desc->num_intf_clocks;
477 } else {
478 /* 0 intf clocks is perfectly fine */
479 cd_num = 0;
480 }
481
482 qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
483 if (!qp)
484 return -ENOMEM;
485
486 qp->intf_clks = devm_kcalloc(dev, cd_num, sizeof(*qp->intf_clks), GFP_KERNEL);
487 if (!qp->intf_clks)
488 return -ENOMEM;
489
490 if (desc->bus_clk_desc) {
491 qp->bus_clk_desc = devm_kzalloc(dev, sizeof(*qp->bus_clk_desc),
492 GFP_KERNEL);
493 if (!qp->bus_clk_desc)
494 return -ENOMEM;
495
496 qp->bus_clk_desc = desc->bus_clk_desc;
497 } else {
498 /* Some older SoCs may have a single non-RPM-owned bus clock. */
499 qp->bus_clk = devm_clk_get_optional(dev, "bus");
500 if (IS_ERR(qp->bus_clk))
501 return PTR_ERR(qp->bus_clk);
502 }
503
504 data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
505 GFP_KERNEL);
506 if (!data)
507 return -ENOMEM;
508 data->num_nodes = num_nodes;
509
510 qp->num_intf_clks = cd_num;
511 for (i = 0; i < cd_num; i++)
512 qp->intf_clks[i].id = cds[i];
513
514 qp->ignore_enxio = desc->ignore_enxio;
515 qp->keep_alive = desc->keep_alive;
516 qp->type = desc->type;
517 qp->qos_offset = desc->qos_offset;
518
519 if (desc->regmap_cfg) {
520 struct resource *res;
521 void __iomem *mmio;
522
523 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
524 if (!res) {
525 /* Try parent's regmap */
526 qp->regmap = dev_get_regmap(dev->parent, NULL);
527 if (qp->regmap)
528 goto regmap_done;
529 return -ENODEV;
530 }
531
532 mmio = devm_ioremap_resource(dev, res);
533 if (IS_ERR(mmio))
534 return PTR_ERR(mmio);
535
536 qp->regmap = devm_regmap_init_mmio(dev, mmio, desc->regmap_cfg);
537 if (IS_ERR(qp->regmap)) {
538 dev_err(dev, "Cannot regmap interconnect bus resource\n");
539 return PTR_ERR(qp->regmap);
540 }
541 }
542
543 regmap_done:
544 ret = clk_prepare_enable(qp->bus_clk);
545 if (ret)
546 return ret;
547
548 ret = devm_clk_bulk_get(dev, qp->num_intf_clks, qp->intf_clks);
549 if (ret)
550 goto err_disable_unprepare_clk;
551
552 provider = &qp->provider;
553 provider->dev = dev;
554 provider->set = qcom_icc_set;
555 provider->pre_aggregate = qcom_icc_pre_bw_aggregate;
556 provider->aggregate = qcom_icc_bw_aggregate;
557 provider->xlate_extended = qcom_icc_xlate_extended;
558 provider->data = data;
559 provider->get_bw = desc->get_bw;
560
561 icc_provider_init(provider);
562
563 /* If this fails, bus accesses will crash the platform! */
564 ret = clk_bulk_prepare_enable(qp->num_intf_clks, qp->intf_clks);
565 if (ret)
566 goto err_disable_unprepare_clk;
567
568 for (i = 0; i < num_nodes; i++) {
569 size_t j;
570
571 if (!qnodes[i]->ab_coeff)
572 qnodes[i]->ab_coeff = qp->ab_coeff;
573
574 if (!qnodes[i]->ib_coeff)
575 qnodes[i]->ib_coeff = qp->ib_coeff;
576
577 node = icc_node_create(qnodes[i]->id);
578 if (IS_ERR(node)) {
579 clk_bulk_disable_unprepare(qp->num_intf_clks,
580 qp->intf_clks);
581 ret = PTR_ERR(node);
582 goto err_remove_nodes;
583 }
584
585 node->name = qnodes[i]->name;
586 node->data = qnodes[i];
587 icc_node_add(node, provider);
588
589 for (j = 0; j < qnodes[i]->num_links; j++)
590 icc_link_create(node, qnodes[i]->links[j]);
591
592 /* Set QoS registers (we only need to do it once, generally) */
593 if (qnodes[i]->qos.ap_owned &&
594 qnodes[i]->qos.qos_mode != NOC_QOS_MODE_INVALID) {
595 ret = qcom_icc_qos_set(node);
596 if (ret) {
597 clk_bulk_disable_unprepare(qp->num_intf_clks,
598 qp->intf_clks);
599 goto err_remove_nodes;
600 }
601 }
602
603 data->nodes[i] = node;
604 }
605
606 clk_bulk_disable_unprepare(qp->num_intf_clks, qp->intf_clks);
607
608 ret = icc_provider_register(provider);
609 if (ret)
610 goto err_remove_nodes;
611
612 platform_set_drvdata(pdev, qp);
613
614 /* Populate child NoC devices if any */
615 if (of_get_child_count(dev->of_node) > 0) {
616 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
617 if (ret)
618 goto err_deregister_provider;
619 }
620
621 return 0;
622
623 err_deregister_provider:
624 icc_provider_deregister(provider);
625 err_remove_nodes:
626 icc_nodes_remove(provider);
627 err_disable_unprepare_clk:
628 clk_disable_unprepare(qp->bus_clk);
629
630 return ret;
631 }
632 EXPORT_SYMBOL(qnoc_probe);
633
qnoc_remove(struct platform_device * pdev)634 void qnoc_remove(struct platform_device *pdev)
635 {
636 struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
637
638 icc_provider_deregister(&qp->provider);
639 icc_nodes_remove(&qp->provider);
640 clk_disable_unprepare(qp->bus_clk);
641 }
642 EXPORT_SYMBOL(qnoc_remove);
643