1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 // Copyright(c) 2015-2020 Intel Corporation.
3
4 /*
5 * Bandwidth management algorithm based on 2^n gears
6 *
7 */
8
9 #include <linux/bitops.h>
10 #include <linux/device.h>
11 #include <linux/module.h>
12 #include <linux/mod_devicetable.h>
13 #include <linux/slab.h>
14 #include <linux/soundwire/sdw.h>
15 #include "bus.h"
16
17 #define SDW_STRM_RATE_GROUPING 1
18
19 struct sdw_group_params {
20 unsigned int rate;
21 unsigned int lane;
22 int full_bw;
23 int payload_bw;
24 int hwidth;
25 };
26
27 struct sdw_group {
28 unsigned int count;
29 unsigned int max_size;
30 unsigned int *rates;
31 unsigned int *lanes;
32 };
33
sdw_compute_slave_ports(struct sdw_master_runtime * m_rt,struct sdw_transport_data * t_data)34 void sdw_compute_slave_ports(struct sdw_master_runtime *m_rt,
35 struct sdw_transport_data *t_data)
36 {
37 struct sdw_slave_runtime *s_rt = NULL;
38 struct sdw_port_runtime *p_rt;
39 int port_bo, sample_int;
40 unsigned int rate, bps, ch = 0;
41 unsigned int slave_total_ch;
42 struct sdw_bus_params *b_params = &m_rt->bus->params;
43
44 port_bo = t_data->block_offset;
45
46 list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
47 rate = m_rt->stream->params.rate;
48 bps = m_rt->stream->params.bps;
49 sample_int = (m_rt->bus->params.curr_dr_freq / rate);
50 slave_total_ch = 0;
51
52 list_for_each_entry(p_rt, &s_rt->port_list, port_node) {
53 if (p_rt->lane != t_data->lane)
54 continue;
55
56 ch = hweight32(p_rt->ch_mask);
57
58 sdw_fill_xport_params(&p_rt->transport_params,
59 p_rt->num, false,
60 SDW_BLK_GRP_CNT_1,
61 sample_int, port_bo, port_bo >> 8,
62 t_data->hstart,
63 t_data->hstop,
64 SDW_BLK_PKG_PER_PORT, p_rt->lane);
65
66 sdw_fill_port_params(&p_rt->port_params,
67 p_rt->num, bps,
68 SDW_PORT_FLOW_MODE_ISOCH,
69 b_params->s_data_mode);
70
71 port_bo += bps * ch;
72 slave_total_ch += ch;
73 }
74
75 if (m_rt->direction == SDW_DATA_DIR_TX &&
76 m_rt->ch_count == slave_total_ch) {
77 /*
78 * Slave devices were configured to access all channels
79 * of the stream, which indicates that they operate in
80 * 'mirror mode'. Make sure we reset the port offset for
81 * the next device in the list
82 */
83 port_bo = t_data->block_offset;
84 }
85 }
86 }
87 EXPORT_SYMBOL(sdw_compute_slave_ports);
88
sdw_compute_dp0_slave_ports(struct sdw_master_runtime * m_rt)89 static void sdw_compute_dp0_slave_ports(struct sdw_master_runtime *m_rt)
90 {
91 struct sdw_bus *bus = m_rt->bus;
92 struct sdw_slave_runtime *s_rt;
93 struct sdw_port_runtime *p_rt;
94
95 list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
96 list_for_each_entry(p_rt, &s_rt->port_list, port_node) {
97 sdw_fill_xport_params(&p_rt->transport_params, p_rt->num, false,
98 SDW_BLK_GRP_CNT_1, bus->params.col, 0, 0, 1,
99 bus->params.col - 1, SDW_BLK_PKG_PER_PORT, 0x0);
100
101 sdw_fill_port_params(&p_rt->port_params, p_rt->num, bus->params.col - 1,
102 SDW_PORT_FLOW_MODE_ISOCH, SDW_PORT_DATA_MODE_NORMAL);
103 }
104 }
105 }
106
sdw_compute_dp0_master_ports(struct sdw_master_runtime * m_rt)107 static void sdw_compute_dp0_master_ports(struct sdw_master_runtime *m_rt)
108 {
109 struct sdw_port_runtime *p_rt;
110 struct sdw_bus *bus = m_rt->bus;
111
112 list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
113 sdw_fill_xport_params(&p_rt->transport_params, p_rt->num, false,
114 SDW_BLK_GRP_CNT_1, bus->params.col, 0, 0, 1,
115 bus->params.col - 1, SDW_BLK_PKG_PER_PORT, 0x0);
116
117 sdw_fill_port_params(&p_rt->port_params, p_rt->num, bus->params.col - 1,
118 SDW_PORT_FLOW_MODE_ISOCH, SDW_PORT_DATA_MODE_NORMAL);
119 }
120 }
121
sdw_compute_dp0_port_params(struct sdw_bus * bus)122 static void sdw_compute_dp0_port_params(struct sdw_bus *bus)
123 {
124 struct sdw_master_runtime *m_rt;
125
126 list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
127 sdw_compute_dp0_master_ports(m_rt);
128 sdw_compute_dp0_slave_ports(m_rt);
129 }
130 }
131
sdw_compute_master_ports(struct sdw_master_runtime * m_rt,struct sdw_group_params * params,int * port_bo,int hstop)132 static void sdw_compute_master_ports(struct sdw_master_runtime *m_rt,
133 struct sdw_group_params *params,
134 int *port_bo, int hstop)
135 {
136 struct sdw_transport_data t_data = {0};
137 struct sdw_port_runtime *p_rt;
138 struct sdw_bus *bus = m_rt->bus;
139 struct sdw_bus_params *b_params = &bus->params;
140 int sample_int, hstart = 0;
141 unsigned int rate, bps, ch;
142
143 rate = m_rt->stream->params.rate;
144 bps = m_rt->stream->params.bps;
145 ch = m_rt->ch_count;
146 sample_int = (bus->params.curr_dr_freq / rate);
147
148 if (rate != params->rate)
149 return;
150
151 t_data.hstop = hstop;
152 hstart = hstop - params->hwidth + 1;
153 t_data.hstart = hstart;
154
155 list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
156 if (p_rt->lane != params->lane)
157 continue;
158
159 sdw_fill_xport_params(&p_rt->transport_params, p_rt->num,
160 false, SDW_BLK_GRP_CNT_1, sample_int,
161 *port_bo, (*port_bo) >> 8, hstart, hstop,
162 SDW_BLK_PKG_PER_PORT, p_rt->lane);
163
164 sdw_fill_port_params(&p_rt->port_params,
165 p_rt->num, bps,
166 SDW_PORT_FLOW_MODE_ISOCH,
167 b_params->m_data_mode);
168
169 /* Check for first entry */
170 if (!(p_rt == list_first_entry(&m_rt->port_list,
171 struct sdw_port_runtime,
172 port_node))) {
173 (*port_bo) += bps * ch;
174 continue;
175 }
176
177 t_data.hstart = hstart;
178 t_data.hstop = hstop;
179 t_data.block_offset = *port_bo;
180 t_data.sub_block_offset = 0;
181 (*port_bo) += bps * ch;
182 }
183
184 t_data.lane = params->lane;
185 sdw_compute_slave_ports(m_rt, &t_data);
186 }
187
_sdw_compute_port_params(struct sdw_bus * bus,struct sdw_group_params * params,int count)188 static void _sdw_compute_port_params(struct sdw_bus *bus,
189 struct sdw_group_params *params, int count)
190 {
191 struct sdw_master_runtime *m_rt;
192 int port_bo, i, l;
193 int hstop;
194
195 /* Run loop for all groups to compute transport parameters */
196 for (l = 0; l < SDW_MAX_LANES; l++) {
197 if (l > 0 && !bus->lane_used_bandwidth[l])
198 continue;
199 /* reset hstop for each lane */
200 hstop = bus->params.col - 1;
201 for (i = 0; i < count; i++) {
202 if (params[i].lane != l)
203 continue;
204 port_bo = 1;
205
206 list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
207 /*
208 * Only runtimes with CONFIGURED, PREPARED, ENABLED, and DISABLED
209 * states should be included in the bandwidth calculation.
210 */
211 if (m_rt->stream->state > SDW_STREAM_DISABLED ||
212 m_rt->stream->state < SDW_STREAM_CONFIGURED)
213 continue;
214 sdw_compute_master_ports(m_rt, ¶ms[i], &port_bo, hstop);
215 }
216
217 hstop = hstop - params[i].hwidth;
218 }
219 }
220 }
221
sdw_compute_group_params(struct sdw_bus * bus,struct sdw_stream_runtime * stream,struct sdw_group_params * params,struct sdw_group * group)222 static int sdw_compute_group_params(struct sdw_bus *bus,
223 struct sdw_stream_runtime *stream,
224 struct sdw_group_params *params,
225 struct sdw_group *group)
226 {
227 struct sdw_master_runtime *m_rt;
228 struct sdw_port_runtime *p_rt;
229 int sel_col = bus->params.col;
230 unsigned int rate, bps, ch;
231 int i, l, column_needed;
232
233 /* Calculate bandwidth per group */
234 for (i = 0; i < group->count; i++) {
235 params[i].rate = group->rates[i];
236 params[i].lane = group->lanes[i];
237 params[i].full_bw = bus->params.curr_dr_freq / params[i].rate;
238 }
239
240 list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
241 if (m_rt->stream == stream) {
242 /* Only runtime during prepare should be added */
243 if (stream->state != SDW_STREAM_CONFIGURED)
244 continue;
245 } else {
246 /*
247 * Include runtimes with running (ENABLED/PREPARED state) and
248 * paused (DISABLED state) streams
249 */
250 if (m_rt->stream->state != SDW_STREAM_ENABLED &&
251 m_rt->stream->state != SDW_STREAM_PREPARED &&
252 m_rt->stream->state != SDW_STREAM_DISABLED)
253 continue;
254 }
255 list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
256 rate = m_rt->stream->params.rate;
257 bps = m_rt->stream->params.bps;
258 ch = hweight32(p_rt->ch_mask);
259
260 for (i = 0; i < group->count; i++) {
261 if (rate == params[i].rate && p_rt->lane == params[i].lane)
262 params[i].payload_bw += bps * ch;
263 }
264 }
265 }
266
267 for (l = 0; l < SDW_MAX_LANES; l++) {
268 if (l > 0 && !bus->lane_used_bandwidth[l])
269 continue;
270 /* reset column_needed for each lane */
271 column_needed = 0;
272 for (i = 0; i < group->count; i++) {
273 if (params[i].lane != l)
274 continue;
275
276 params[i].hwidth = (sel_col * params[i].payload_bw +
277 params[i].full_bw - 1) / params[i].full_bw;
278
279 column_needed += params[i].hwidth;
280 /* There is no control column for lane 1 and above */
281 if (column_needed > sel_col)
282 return -EINVAL;
283 /* Column 0 is control column on lane 0 */
284 if (params[i].lane == 0 && column_needed > sel_col - 1)
285 return -EINVAL;
286 }
287 }
288
289
290 return 0;
291 }
292
sdw_add_element_group_count(struct sdw_group * group,unsigned int rate,unsigned int lane)293 static int sdw_add_element_group_count(struct sdw_group *group,
294 unsigned int rate, unsigned int lane)
295 {
296 int num = group->count;
297 int i;
298
299 for (i = 0; i <= num; i++) {
300 if (rate == group->rates[i] && lane == group->lanes[i])
301 break;
302
303 if (i != num)
304 continue;
305
306 if (group->count >= group->max_size) {
307 unsigned int *rates;
308 unsigned int *lanes;
309
310 group->max_size += 1;
311 rates = krealloc(group->rates,
312 (sizeof(int) * group->max_size),
313 GFP_KERNEL);
314 if (!rates)
315 return -ENOMEM;
316
317 group->rates = rates;
318
319 lanes = krealloc(group->lanes,
320 (sizeof(int) * group->max_size),
321 GFP_KERNEL);
322 if (!lanes)
323 return -ENOMEM;
324
325 group->lanes = lanes;
326 }
327
328 group->rates[group->count] = rate;
329 group->lanes[group->count++] = lane;
330 }
331
332 return 0;
333 }
334
sdw_get_group_count(struct sdw_bus * bus,struct sdw_group * group)335 static int sdw_get_group_count(struct sdw_bus *bus,
336 struct sdw_group *group)
337 {
338 struct sdw_master_runtime *m_rt;
339 struct sdw_port_runtime *p_rt;
340 unsigned int rate;
341 int ret = 0;
342
343 group->count = 0;
344 group->max_size = SDW_STRM_RATE_GROUPING;
345 group->rates = kcalloc(group->max_size, sizeof(int), GFP_KERNEL);
346 if (!group->rates)
347 return -ENOMEM;
348
349 group->lanes = kcalloc(group->max_size, sizeof(int), GFP_KERNEL);
350 if (!group->lanes) {
351 kfree(group->rates);
352 group->rates = NULL;
353 return -ENOMEM;
354 }
355
356 list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
357 if (m_rt->stream->state == SDW_STREAM_DEPREPARED)
358 continue;
359
360 rate = m_rt->stream->params.rate;
361 if (m_rt == list_first_entry(&bus->m_rt_list,
362 struct sdw_master_runtime,
363 bus_node)) {
364 group->rates[group->count++] = rate;
365 }
366 /*
367 * Different ports could use different lane, add group element
368 * even if m_rt is the first entry
369 */
370 list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
371 ret = sdw_add_element_group_count(group, rate, p_rt->lane);
372 if (ret < 0) {
373 kfree(group->rates);
374 kfree(group->lanes);
375 return ret;
376 }
377 }
378 }
379
380 return ret;
381 }
382
383 /**
384 * sdw_compute_port_params: Compute transport and port parameters
385 *
386 * @bus: SDW Bus instance
387 * @stream: Soundwire stream
388 */
sdw_compute_port_params(struct sdw_bus * bus,struct sdw_stream_runtime * stream)389 static int sdw_compute_port_params(struct sdw_bus *bus, struct sdw_stream_runtime *stream)
390 {
391 struct sdw_group_params *params = NULL;
392 struct sdw_group group;
393 int ret;
394
395 ret = sdw_get_group_count(bus, &group);
396 if (ret < 0)
397 return ret;
398
399 if (group.count == 0)
400 goto out;
401
402 params = kcalloc(group.count, sizeof(*params), GFP_KERNEL);
403 if (!params) {
404 ret = -ENOMEM;
405 goto out;
406 }
407
408 /* Compute transport parameters for grouped streams */
409 ret = sdw_compute_group_params(bus, stream, params, &group);
410 if (ret < 0)
411 goto free_params;
412
413 _sdw_compute_port_params(bus, params, group.count);
414
415 free_params:
416 kfree(params);
417 out:
418 kfree(group.rates);
419 kfree(group.lanes);
420
421 return ret;
422 }
423
sdw_select_row_col(struct sdw_bus * bus,int clk_freq)424 static int sdw_select_row_col(struct sdw_bus *bus, int clk_freq)
425 {
426 struct sdw_master_prop *prop = &bus->prop;
427 int r, c;
428
429 for (c = 0; c < SDW_FRAME_COLS; c++) {
430 for (r = 0; r < SDW_FRAME_ROWS; r++) {
431 if (sdw_rows[r] != prop->default_row ||
432 sdw_cols[c] != prop->default_col)
433 continue;
434
435 if (clk_freq * (sdw_cols[c] - 1) <
436 bus->params.bandwidth * sdw_cols[c])
437 continue;
438
439 bus->params.row = sdw_rows[r];
440 bus->params.col = sdw_cols[c];
441 return 0;
442 }
443 }
444
445 return -EINVAL;
446 }
447
is_clock_scaling_supported(struct sdw_bus * bus)448 static bool is_clock_scaling_supported(struct sdw_bus *bus)
449 {
450 struct sdw_master_runtime *m_rt;
451 struct sdw_slave_runtime *s_rt;
452
453 list_for_each_entry(m_rt, &bus->m_rt_list, bus_node)
454 list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node)
455 if (!is_clock_scaling_supported_by_slave(s_rt->slave))
456 return false;
457
458 return true;
459 }
460
461 /**
462 * is_lane_connected_to_all_peripherals: Check if the given manager lane connects to all peripherals
463 * So that all peripherals can use the manager lane.
464 *
465 * @m_rt: Manager runtime
466 * @lane: Lane number
467 */
is_lane_connected_to_all_peripherals(struct sdw_master_runtime * m_rt,unsigned int lane)468 static bool is_lane_connected_to_all_peripherals(struct sdw_master_runtime *m_rt, unsigned int lane)
469 {
470 struct sdw_slave_prop *slave_prop;
471 struct sdw_slave_runtime *s_rt;
472 int i;
473
474 list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
475 slave_prop = &s_rt->slave->prop;
476 for (i = 1; i < SDW_MAX_LANES; i++) {
477 if (slave_prop->lane_maps[i] == lane) {
478 dev_dbg(&s_rt->slave->dev,
479 "M lane %d is connected to P lane %d\n",
480 lane, i);
481 break;
482 }
483 }
484 if (i == SDW_MAX_LANES) {
485 dev_dbg(&s_rt->slave->dev, "M lane %d is not connected\n", lane);
486 return false;
487 }
488 }
489 return true;
490 }
491
get_manager_lane(struct sdw_bus * bus,struct sdw_master_runtime * m_rt,struct sdw_slave_runtime * s_rt,unsigned int curr_dr_freq)492 static int get_manager_lane(struct sdw_bus *bus, struct sdw_master_runtime *m_rt,
493 struct sdw_slave_runtime *s_rt, unsigned int curr_dr_freq)
494 {
495 struct sdw_slave_prop *slave_prop = &s_rt->slave->prop;
496 struct sdw_port_runtime *m_p_rt;
497 unsigned int required_bandwidth;
498 int m_lane;
499 int l;
500
501 for (l = 1; l < SDW_MAX_LANES; l++) {
502 if (!slave_prop->lane_maps[l])
503 continue;
504
505 required_bandwidth = 0;
506 list_for_each_entry(m_p_rt, &m_rt->port_list, port_node) {
507 required_bandwidth += m_rt->stream->params.rate *
508 hweight32(m_p_rt->ch_mask) *
509 m_rt->stream->params.bps;
510 }
511 if (required_bandwidth <=
512 curr_dr_freq - bus->lane_used_bandwidth[l]) {
513 /* Check if m_lane is connected to all Peripherals */
514 if (!is_lane_connected_to_all_peripherals(m_rt,
515 slave_prop->lane_maps[l])) {
516 dev_dbg(bus->dev,
517 "Not all Peripherals are connected to M lane %d\n",
518 slave_prop->lane_maps[l]);
519 continue;
520 }
521 m_lane = slave_prop->lane_maps[l];
522 dev_dbg(&s_rt->slave->dev, "M lane %d is used\n", m_lane);
523 bus->lane_used_bandwidth[l] += required_bandwidth;
524 /*
525 * Use non-zero manager lane, subtract the lane 0
526 * bandwidth that is already calculated
527 */
528 bus->params.bandwidth -= required_bandwidth;
529 return m_lane;
530 }
531 }
532
533 /* No available multi lane found, only lane 0 can be used */
534 return 0;
535 }
536
537 /**
538 * sdw_compute_bus_params: Compute bus parameters
539 *
540 * @bus: SDW Bus instance
541 */
sdw_compute_bus_params(struct sdw_bus * bus)542 static int sdw_compute_bus_params(struct sdw_bus *bus)
543 {
544 struct sdw_master_prop *mstr_prop = &bus->prop;
545 struct sdw_slave_prop *slave_prop;
546 struct sdw_port_runtime *m_p_rt;
547 struct sdw_port_runtime *s_p_rt;
548 struct sdw_master_runtime *m_rt;
549 struct sdw_slave_runtime *s_rt;
550 unsigned int curr_dr_freq = 0;
551 int i, l, clk_values, ret;
552 bool is_gear = false;
553 int m_lane = 0;
554 u32 *clk_buf;
555
556 if (mstr_prop->num_clk_gears) {
557 clk_values = mstr_prop->num_clk_gears;
558 clk_buf = mstr_prop->clk_gears;
559 is_gear = true;
560 } else if (mstr_prop->num_clk_freq) {
561 clk_values = mstr_prop->num_clk_freq;
562 clk_buf = mstr_prop->clk_freq;
563 } else {
564 clk_values = 1;
565 clk_buf = NULL;
566 }
567
568 /* If dynamic scaling is not supported, don't try higher freq */
569 if (!is_clock_scaling_supported(bus))
570 clk_values = 1;
571
572 for (i = 0; i < clk_values; i++) {
573 if (!clk_buf)
574 curr_dr_freq = bus->params.max_dr_freq;
575 else
576 curr_dr_freq = (is_gear) ?
577 (bus->params.max_dr_freq >> clk_buf[i]) :
578 clk_buf[i] * SDW_DOUBLE_RATE_FACTOR;
579
580 if (curr_dr_freq * (mstr_prop->default_col - 1) >=
581 bus->params.bandwidth * mstr_prop->default_col)
582 break;
583
584 list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
585 /*
586 * Get the first s_rt that will be used to find the available lane that
587 * can be used. No need to check all Peripherals because we can't use
588 * multi-lane if we can't find any available lane for the first Peripheral.
589 */
590 s_rt = list_first_entry(&m_rt->slave_rt_list,
591 struct sdw_slave_runtime, m_rt_node);
592
593 /*
594 * Find the available Manager lane that connected to the first Peripheral.
595 */
596 m_lane = get_manager_lane(bus, m_rt, s_rt, curr_dr_freq);
597 if (m_lane > 0)
598 goto out;
599 }
600
601 /*
602 * TODO: Check all the Slave(s) port(s) audio modes and find
603 * whether given clock rate is supported with glitchless
604 * transition.
605 */
606 }
607
608 if (i == clk_values) {
609 dev_err(bus->dev, "%s: could not find clock value for bandwidth %d\n",
610 __func__, bus->params.bandwidth);
611 return -EINVAL;
612 }
613 out:
614 /* multilane can be used */
615 if (m_lane > 0) {
616 /* Set Peripheral lanes */
617 list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
618 slave_prop = &s_rt->slave->prop;
619 for (l = 1; l < SDW_MAX_LANES; l++) {
620 if (slave_prop->lane_maps[l] == m_lane) {
621 list_for_each_entry(s_p_rt, &s_rt->port_list, port_node) {
622 s_p_rt->lane = l;
623 dev_dbg(&s_rt->slave->dev,
624 "Set P lane %d for port %d\n",
625 l, s_p_rt->num);
626 }
627 break;
628 }
629 }
630 }
631 /*
632 * Set Manager lanes. Configure the last m_rt in bus->m_rt_list only since
633 * we don't want to touch other m_rts that are already working.
634 */
635 list_for_each_entry(m_p_rt, &m_rt->port_list, port_node) {
636 m_p_rt->lane = m_lane;
637 }
638 }
639
640 if (!mstr_prop->default_frame_rate || !mstr_prop->default_row)
641 return -EINVAL;
642
643 mstr_prop->default_col = curr_dr_freq / mstr_prop->default_frame_rate /
644 mstr_prop->default_row;
645
646 ret = sdw_select_row_col(bus, curr_dr_freq);
647 if (ret < 0) {
648 dev_err(bus->dev, "%s: could not find frame configuration for bus dr_freq %d\n",
649 __func__, curr_dr_freq);
650 return -EINVAL;
651 }
652
653 bus->params.curr_dr_freq = curr_dr_freq;
654 return 0;
655 }
656
657 /**
658 * sdw_compute_params: Compute bus, transport and port parameters
659 *
660 * @bus: SDW Bus instance
661 * @stream: Soundwire stream
662 */
sdw_compute_params(struct sdw_bus * bus,struct sdw_stream_runtime * stream)663 int sdw_compute_params(struct sdw_bus *bus, struct sdw_stream_runtime *stream)
664 {
665 int ret;
666
667 /* Computes clock frequency, frame shape and frame frequency */
668 ret = sdw_compute_bus_params(bus);
669 if (ret < 0)
670 return ret;
671
672 if (stream->type == SDW_STREAM_BPT) {
673 sdw_compute_dp0_port_params(bus);
674 return 0;
675 }
676
677 /* Compute transport and port params */
678 ret = sdw_compute_port_params(bus, stream);
679 if (ret < 0) {
680 dev_err(bus->dev, "Compute transport params failed: %d\n", ret);
681 return ret;
682 }
683
684 return 0;
685 }
686 EXPORT_SYMBOL(sdw_compute_params);
687
688 MODULE_LICENSE("Dual BSD/GPL");
689 MODULE_DESCRIPTION("SoundWire Generic Bandwidth Allocation");
690