1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 // Copyright(c) 2015-2020 Intel Corporation.
3
4 /*
5 * Bandwidth management algorithm based on 2^n gears
6 *
7 */
8
9 #include <linux/bitops.h>
10 #include <linux/device.h>
11 #include <linux/module.h>
12 #include <linux/mod_devicetable.h>
13 #include <linux/slab.h>
14 #include <linux/soundwire/sdw.h>
15 #include "bus.h"
16
17 #define SDW_STRM_RATE_GROUPING 1
18
19 struct sdw_group_params {
20 unsigned int rate;
21 unsigned int lane;
22 int full_bw;
23 int payload_bw;
24 int hwidth;
25 };
26
27 struct sdw_group {
28 unsigned int count;
29 unsigned int max_size;
30 unsigned int *rates;
31 unsigned int *lanes;
32 };
33
sdw_compute_slave_ports(struct sdw_master_runtime * m_rt,struct sdw_transport_data * t_data)34 void sdw_compute_slave_ports(struct sdw_master_runtime *m_rt,
35 struct sdw_transport_data *t_data)
36 {
37 struct sdw_slave_runtime *s_rt = NULL;
38 struct sdw_port_runtime *p_rt;
39 int port_bo, sample_int;
40 unsigned int rate, bps, ch = 0;
41 unsigned int slave_total_ch;
42 struct sdw_bus_params *b_params = &m_rt->bus->params;
43
44 port_bo = t_data->block_offset;
45
46 list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
47 rate = m_rt->stream->params.rate;
48 bps = m_rt->stream->params.bps;
49 sample_int = (m_rt->bus->params.curr_dr_freq / rate);
50 slave_total_ch = 0;
51
52 list_for_each_entry(p_rt, &s_rt->port_list, port_node) {
53 if (p_rt->lane != t_data->lane)
54 continue;
55
56 ch = hweight32(p_rt->ch_mask);
57
58 sdw_fill_xport_params(&p_rt->transport_params,
59 p_rt->num, false,
60 SDW_BLK_GRP_CNT_1,
61 sample_int, port_bo, port_bo >> 8,
62 t_data->hstart,
63 t_data->hstop,
64 SDW_BLK_PKG_PER_PORT, p_rt->lane);
65
66 sdw_fill_port_params(&p_rt->port_params,
67 p_rt->num, bps,
68 SDW_PORT_FLOW_MODE_ISOCH,
69 b_params->s_data_mode);
70
71 port_bo += bps * ch;
72 slave_total_ch += ch;
73 }
74
75 if (m_rt->direction == SDW_DATA_DIR_TX &&
76 m_rt->ch_count == slave_total_ch) {
77 /*
78 * Slave devices were configured to access all channels
79 * of the stream, which indicates that they operate in
80 * 'mirror mode'. Make sure we reset the port offset for
81 * the next device in the list
82 */
83 port_bo = t_data->block_offset;
84 }
85 }
86 }
87 EXPORT_SYMBOL(sdw_compute_slave_ports);
88
sdw_compute_dp0_slave_ports(struct sdw_master_runtime * m_rt)89 static void sdw_compute_dp0_slave_ports(struct sdw_master_runtime *m_rt)
90 {
91 struct sdw_bus *bus = m_rt->bus;
92 struct sdw_slave_runtime *s_rt;
93 struct sdw_port_runtime *p_rt;
94
95 list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
96 list_for_each_entry(p_rt, &s_rt->port_list, port_node) {
97 sdw_fill_xport_params(&p_rt->transport_params, p_rt->num, false,
98 SDW_BLK_GRP_CNT_1, bus->params.col, 0, 0, 1,
99 bus->params.col - 1, SDW_BLK_PKG_PER_PORT, 0x0);
100
101 sdw_fill_port_params(&p_rt->port_params, p_rt->num, bus->params.col - 1,
102 SDW_PORT_FLOW_MODE_ISOCH, SDW_PORT_DATA_MODE_NORMAL);
103 }
104 }
105 }
106
sdw_compute_dp0_master_ports(struct sdw_master_runtime * m_rt)107 static void sdw_compute_dp0_master_ports(struct sdw_master_runtime *m_rt)
108 {
109 struct sdw_port_runtime *p_rt;
110 struct sdw_bus *bus = m_rt->bus;
111
112 list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
113 sdw_fill_xport_params(&p_rt->transport_params, p_rt->num, false,
114 SDW_BLK_GRP_CNT_1, bus->params.col, 0, 0, 1,
115 bus->params.col - 1, SDW_BLK_PKG_PER_PORT, 0x0);
116
117 sdw_fill_port_params(&p_rt->port_params, p_rt->num, bus->params.col - 1,
118 SDW_PORT_FLOW_MODE_ISOCH, SDW_PORT_DATA_MODE_NORMAL);
119 }
120 }
121
sdw_compute_dp0_port_params(struct sdw_bus * bus)122 static void sdw_compute_dp0_port_params(struct sdw_bus *bus)
123 {
124 struct sdw_master_runtime *m_rt;
125
126 list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
127 /* DP0 is for BPT only */
128 if (m_rt->stream->type != SDW_STREAM_BPT)
129 continue;
130 sdw_compute_dp0_master_ports(m_rt);
131 sdw_compute_dp0_slave_ports(m_rt);
132 }
133 }
134
sdw_compute_master_ports(struct sdw_master_runtime * m_rt,struct sdw_group_params * params,int * port_bo,int hstop)135 static void sdw_compute_master_ports(struct sdw_master_runtime *m_rt,
136 struct sdw_group_params *params,
137 int *port_bo, int hstop)
138 {
139 struct sdw_transport_data t_data = {0};
140 struct sdw_port_runtime *p_rt;
141 struct sdw_bus *bus = m_rt->bus;
142 struct sdw_bus_params *b_params = &bus->params;
143 int sample_int, hstart = 0;
144 unsigned int rate, bps, ch;
145
146 rate = m_rt->stream->params.rate;
147 bps = m_rt->stream->params.bps;
148 ch = m_rt->ch_count;
149 sample_int = (bus->params.curr_dr_freq / rate);
150
151 if (rate != params->rate)
152 return;
153
154 t_data.hstop = hstop;
155 hstart = hstop - params->hwidth + 1;
156 t_data.hstart = hstart;
157
158 list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
159 if (p_rt->lane != params->lane)
160 continue;
161
162 sdw_fill_xport_params(&p_rt->transport_params, p_rt->num,
163 false, SDW_BLK_GRP_CNT_1, sample_int,
164 *port_bo, (*port_bo) >> 8, hstart, hstop,
165 SDW_BLK_PKG_PER_PORT, p_rt->lane);
166
167 sdw_fill_port_params(&p_rt->port_params,
168 p_rt->num, bps,
169 SDW_PORT_FLOW_MODE_ISOCH,
170 b_params->m_data_mode);
171
172 /* Check for first entry */
173 if (!(p_rt == list_first_entry(&m_rt->port_list,
174 struct sdw_port_runtime,
175 port_node))) {
176 (*port_bo) += bps * ch;
177 continue;
178 }
179
180 t_data.hstart = hstart;
181 t_data.hstop = hstop;
182 t_data.block_offset = *port_bo;
183 t_data.sub_block_offset = 0;
184 (*port_bo) += bps * ch;
185 }
186
187 t_data.lane = params->lane;
188 sdw_compute_slave_ports(m_rt, &t_data);
189 }
190
_sdw_compute_port_params(struct sdw_bus * bus,struct sdw_group_params * params,int count)191 static void _sdw_compute_port_params(struct sdw_bus *bus,
192 struct sdw_group_params *params, int count)
193 {
194 struct sdw_master_runtime *m_rt;
195 int port_bo, i, l;
196 int hstop;
197
198 /* Run loop for all groups to compute transport parameters */
199 for (l = 0; l < SDW_MAX_LANES; l++) {
200 if (l > 0 && !bus->lane_used_bandwidth[l])
201 continue;
202 /* reset hstop for each lane */
203 hstop = bus->params.col - 1;
204 for (i = 0; i < count; i++) {
205 if (params[i].lane != l)
206 continue;
207 port_bo = 1;
208
209 list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
210 /*
211 * Only runtimes with CONFIGURED, PREPARED, ENABLED, and DISABLED
212 * states should be included in the bandwidth calculation.
213 */
214 if (m_rt->stream->state > SDW_STREAM_DISABLED ||
215 m_rt->stream->state < SDW_STREAM_CONFIGURED)
216 continue;
217 sdw_compute_master_ports(m_rt, ¶ms[i], &port_bo, hstop);
218 }
219
220 hstop = hstop - params[i].hwidth;
221 }
222 }
223 }
224
sdw_compute_group_params(struct sdw_bus * bus,struct sdw_stream_runtime * stream,struct sdw_group_params * params,struct sdw_group * group)225 static int sdw_compute_group_params(struct sdw_bus *bus,
226 struct sdw_stream_runtime *stream,
227 struct sdw_group_params *params,
228 struct sdw_group *group)
229 {
230 struct sdw_master_runtime *m_rt;
231 struct sdw_port_runtime *p_rt;
232 int sel_col = bus->params.col;
233 unsigned int rate, bps, ch;
234 int i, l, column_needed;
235
236 /* Calculate bandwidth per group */
237 for (i = 0; i < group->count; i++) {
238 params[i].rate = group->rates[i];
239 params[i].lane = group->lanes[i];
240 params[i].full_bw = bus->params.curr_dr_freq / params[i].rate;
241 }
242
243 list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
244 if (m_rt->stream == stream) {
245 /* Only runtime during prepare should be added */
246 if (stream->state != SDW_STREAM_CONFIGURED)
247 continue;
248 } else {
249 /*
250 * Include runtimes with running (ENABLED/PREPARED state) and
251 * paused (DISABLED state) streams
252 */
253 if (m_rt->stream->state != SDW_STREAM_ENABLED &&
254 m_rt->stream->state != SDW_STREAM_PREPARED &&
255 m_rt->stream->state != SDW_STREAM_DISABLED)
256 continue;
257 }
258 list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
259 rate = m_rt->stream->params.rate;
260 bps = m_rt->stream->params.bps;
261 ch = hweight32(p_rt->ch_mask);
262
263 for (i = 0; i < group->count; i++) {
264 if (rate == params[i].rate && p_rt->lane == params[i].lane)
265 params[i].payload_bw += bps * ch;
266 }
267 }
268 }
269
270 for (l = 0; l < SDW_MAX_LANES; l++) {
271 if (l > 0 && !bus->lane_used_bandwidth[l])
272 continue;
273 /* reset column_needed for each lane */
274 column_needed = 0;
275 for (i = 0; i < group->count; i++) {
276 if (params[i].lane != l)
277 continue;
278
279 params[i].hwidth = (sel_col * params[i].payload_bw +
280 params[i].full_bw - 1) / params[i].full_bw;
281
282 column_needed += params[i].hwidth;
283 /* There is no control column for lane 1 and above */
284 if (column_needed > sel_col)
285 return -EINVAL;
286 /* Column 0 is control column on lane 0 */
287 if (params[i].lane == 0 && column_needed > sel_col - 1)
288 return -EINVAL;
289 }
290 }
291
292
293 return 0;
294 }
295
sdw_add_element_group_count(struct sdw_group * group,unsigned int rate,unsigned int lane)296 static int sdw_add_element_group_count(struct sdw_group *group,
297 unsigned int rate, unsigned int lane)
298 {
299 int num = group->count;
300 int i;
301
302 for (i = 0; i <= num; i++) {
303 if (rate == group->rates[i] && lane == group->lanes[i])
304 break;
305
306 if (i != num)
307 continue;
308
309 if (group->count >= group->max_size) {
310 unsigned int *rates;
311 unsigned int *lanes;
312
313 group->max_size += 1;
314 rates = krealloc(group->rates,
315 (sizeof(int) * group->max_size),
316 GFP_KERNEL);
317 if (!rates)
318 return -ENOMEM;
319
320 group->rates = rates;
321
322 lanes = krealloc(group->lanes,
323 (sizeof(int) * group->max_size),
324 GFP_KERNEL);
325 if (!lanes)
326 return -ENOMEM;
327
328 group->lanes = lanes;
329 }
330
331 group->rates[group->count] = rate;
332 group->lanes[group->count++] = lane;
333 }
334
335 return 0;
336 }
337
sdw_get_group_count(struct sdw_bus * bus,struct sdw_group * group)338 static int sdw_get_group_count(struct sdw_bus *bus,
339 struct sdw_group *group)
340 {
341 struct sdw_master_runtime *m_rt;
342 struct sdw_port_runtime *p_rt;
343 unsigned int rate;
344 int ret = 0;
345
346 group->count = 0;
347 group->max_size = SDW_STRM_RATE_GROUPING;
348 group->rates = kcalloc(group->max_size, sizeof(int), GFP_KERNEL);
349 if (!group->rates)
350 return -ENOMEM;
351
352 group->lanes = kcalloc(group->max_size, sizeof(int), GFP_KERNEL);
353 if (!group->lanes) {
354 kfree(group->rates);
355 group->rates = NULL;
356 return -ENOMEM;
357 }
358
359 list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
360 if (m_rt->stream->state == SDW_STREAM_DEPREPARED)
361 continue;
362
363 rate = m_rt->stream->params.rate;
364 if (m_rt == list_first_entry(&bus->m_rt_list,
365 struct sdw_master_runtime,
366 bus_node)) {
367 group->rates[group->count++] = rate;
368 }
369 /*
370 * Different ports could use different lane, add group element
371 * even if m_rt is the first entry
372 */
373 list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
374 ret = sdw_add_element_group_count(group, rate, p_rt->lane);
375 if (ret < 0) {
376 kfree(group->rates);
377 kfree(group->lanes);
378 return ret;
379 }
380 }
381 }
382
383 return ret;
384 }
385
386 /**
387 * sdw_compute_port_params: Compute transport and port parameters
388 *
389 * @bus: SDW Bus instance
390 * @stream: Soundwire stream
391 */
sdw_compute_port_params(struct sdw_bus * bus,struct sdw_stream_runtime * stream)392 static int sdw_compute_port_params(struct sdw_bus *bus, struct sdw_stream_runtime *stream)
393 {
394 struct sdw_group_params *params = NULL;
395 struct sdw_group group;
396 int ret;
397
398 ret = sdw_get_group_count(bus, &group);
399 if (ret < 0)
400 return ret;
401
402 if (group.count == 0)
403 goto out;
404
405 params = kcalloc(group.count, sizeof(*params), GFP_KERNEL);
406 if (!params) {
407 ret = -ENOMEM;
408 goto out;
409 }
410
411 /* Compute transport parameters for grouped streams */
412 ret = sdw_compute_group_params(bus, stream, params, &group);
413 if (ret < 0)
414 goto free_params;
415
416 _sdw_compute_port_params(bus, params, group.count);
417
418 free_params:
419 kfree(params);
420 out:
421 kfree(group.rates);
422 kfree(group.lanes);
423
424 return ret;
425 }
426
sdw_select_row_col(struct sdw_bus * bus,int clk_freq)427 static int sdw_select_row_col(struct sdw_bus *bus, int clk_freq)
428 {
429 struct sdw_master_prop *prop = &bus->prop;
430 int r, c;
431
432 for (c = 0; c < SDW_FRAME_COLS; c++) {
433 for (r = 0; r < SDW_FRAME_ROWS; r++) {
434 if (sdw_rows[r] != prop->default_row ||
435 sdw_cols[c] != prop->default_col)
436 continue;
437
438 if (clk_freq * (sdw_cols[c] - 1) <
439 bus->params.bandwidth * sdw_cols[c])
440 continue;
441
442 bus->params.row = sdw_rows[r];
443 bus->params.col = sdw_cols[c];
444 return 0;
445 }
446 }
447
448 return -EINVAL;
449 }
450
is_clock_scaling_supported(struct sdw_bus * bus)451 static bool is_clock_scaling_supported(struct sdw_bus *bus)
452 {
453 struct sdw_master_runtime *m_rt;
454 struct sdw_slave_runtime *s_rt;
455
456 list_for_each_entry(m_rt, &bus->m_rt_list, bus_node)
457 list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node)
458 if (!is_clock_scaling_supported_by_slave(s_rt->slave))
459 return false;
460
461 return true;
462 }
463
464 /**
465 * is_lane_connected_to_all_peripherals: Check if the given manager lane connects to all peripherals
466 * So that all peripherals can use the manager lane.
467 *
468 * @m_rt: Manager runtime
469 * @lane: Lane number
470 */
is_lane_connected_to_all_peripherals(struct sdw_master_runtime * m_rt,unsigned int lane)471 static bool is_lane_connected_to_all_peripherals(struct sdw_master_runtime *m_rt, unsigned int lane)
472 {
473 struct sdw_slave_prop *slave_prop;
474 struct sdw_slave_runtime *s_rt;
475 int i;
476
477 list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
478 slave_prop = &s_rt->slave->prop;
479 for (i = 1; i < SDW_MAX_LANES; i++) {
480 if (slave_prop->lane_maps[i] == lane) {
481 dev_dbg(&s_rt->slave->dev,
482 "M lane %d is connected to P lane %d\n",
483 lane, i);
484 break;
485 }
486 }
487 if (i == SDW_MAX_LANES) {
488 dev_dbg(&s_rt->slave->dev, "M lane %d is not connected\n", lane);
489 return false;
490 }
491 }
492 return true;
493 }
494
get_manager_lane(struct sdw_bus * bus,struct sdw_master_runtime * m_rt,struct sdw_slave_runtime * s_rt,unsigned int curr_dr_freq)495 static int get_manager_lane(struct sdw_bus *bus, struct sdw_master_runtime *m_rt,
496 struct sdw_slave_runtime *s_rt, unsigned int curr_dr_freq)
497 {
498 struct sdw_slave_prop *slave_prop = &s_rt->slave->prop;
499 struct sdw_port_runtime *m_p_rt;
500 unsigned int required_bandwidth;
501 int m_lane;
502 int l;
503
504 for (l = 1; l < SDW_MAX_LANES; l++) {
505 if (!slave_prop->lane_maps[l])
506 continue;
507
508 required_bandwidth = 0;
509 list_for_each_entry(m_p_rt, &m_rt->port_list, port_node) {
510 required_bandwidth += m_rt->stream->params.rate *
511 hweight32(m_p_rt->ch_mask) *
512 m_rt->stream->params.bps;
513 }
514 if (required_bandwidth <=
515 curr_dr_freq - bus->lane_used_bandwidth[l]) {
516 /* Check if m_lane is connected to all Peripherals */
517 if (!is_lane_connected_to_all_peripherals(m_rt,
518 slave_prop->lane_maps[l])) {
519 dev_dbg(bus->dev,
520 "Not all Peripherals are connected to M lane %d\n",
521 slave_prop->lane_maps[l]);
522 continue;
523 }
524 m_lane = slave_prop->lane_maps[l];
525 dev_dbg(&s_rt->slave->dev, "M lane %d is used\n", m_lane);
526 bus->lane_used_bandwidth[l] += required_bandwidth;
527 /*
528 * Use non-zero manager lane, subtract the lane 0
529 * bandwidth that is already calculated
530 */
531 bus->params.bandwidth -= required_bandwidth;
532 return m_lane;
533 }
534 }
535
536 /* No available multi lane found, only lane 0 can be used */
537 return 0;
538 }
539
540 /**
541 * sdw_compute_bus_params: Compute bus parameters
542 *
543 * @bus: SDW Bus instance
544 */
sdw_compute_bus_params(struct sdw_bus * bus)545 static int sdw_compute_bus_params(struct sdw_bus *bus)
546 {
547 struct sdw_master_prop *mstr_prop = &bus->prop;
548 struct sdw_slave_prop *slave_prop;
549 struct sdw_port_runtime *m_p_rt;
550 struct sdw_port_runtime *s_p_rt;
551 struct sdw_master_runtime *m_rt;
552 struct sdw_slave_runtime *s_rt;
553 unsigned int curr_dr_freq = 0;
554 int i, l, clk_values, ret;
555 bool is_gear = false;
556 int m_lane = 0;
557 u32 *clk_buf;
558
559 if (mstr_prop->num_clk_gears) {
560 clk_values = mstr_prop->num_clk_gears;
561 clk_buf = mstr_prop->clk_gears;
562 is_gear = true;
563 } else if (mstr_prop->num_clk_freq) {
564 clk_values = mstr_prop->num_clk_freq;
565 clk_buf = mstr_prop->clk_freq;
566 } else {
567 clk_values = 1;
568 clk_buf = NULL;
569 }
570
571 /* If dynamic scaling is not supported, don't try higher freq */
572 if (!is_clock_scaling_supported(bus))
573 clk_values = 1;
574
575 for (i = 0; i < clk_values; i++) {
576 if (!clk_buf)
577 curr_dr_freq = bus->params.max_dr_freq;
578 else
579 curr_dr_freq = (is_gear) ?
580 (bus->params.max_dr_freq >> clk_buf[i]) :
581 clk_buf[i] * SDW_DOUBLE_RATE_FACTOR;
582
583 if (curr_dr_freq * (mstr_prop->default_col - 1) >=
584 bus->params.bandwidth * mstr_prop->default_col)
585 break;
586
587 list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
588 /*
589 * Get the first s_rt that will be used to find the available lane that
590 * can be used. No need to check all Peripherals because we can't use
591 * multi-lane if we can't find any available lane for the first Peripheral.
592 */
593 s_rt = list_first_entry(&m_rt->slave_rt_list,
594 struct sdw_slave_runtime, m_rt_node);
595
596 /*
597 * Find the available Manager lane that connected to the first Peripheral.
598 */
599 m_lane = get_manager_lane(bus, m_rt, s_rt, curr_dr_freq);
600 if (m_lane > 0)
601 goto out;
602 }
603
604 /*
605 * TODO: Check all the Slave(s) port(s) audio modes and find
606 * whether given clock rate is supported with glitchless
607 * transition.
608 */
609 }
610
611 if (i == clk_values) {
612 dev_err(bus->dev, "%s: could not find clock value for bandwidth %d\n",
613 __func__, bus->params.bandwidth);
614 return -EINVAL;
615 }
616 out:
617 /* multilane can be used */
618 if (m_lane > 0) {
619 /* Set Peripheral lanes */
620 list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
621 slave_prop = &s_rt->slave->prop;
622 for (l = 1; l < SDW_MAX_LANES; l++) {
623 if (slave_prop->lane_maps[l] == m_lane) {
624 list_for_each_entry(s_p_rt, &s_rt->port_list, port_node) {
625 s_p_rt->lane = l;
626 dev_dbg(&s_rt->slave->dev,
627 "Set P lane %d for port %d\n",
628 l, s_p_rt->num);
629 }
630 break;
631 }
632 }
633 }
634 /*
635 * Set Manager lanes. Configure the last m_rt in bus->m_rt_list only since
636 * we don't want to touch other m_rts that are already working.
637 */
638 list_for_each_entry(m_p_rt, &m_rt->port_list, port_node) {
639 m_p_rt->lane = m_lane;
640 }
641 }
642
643 if (!mstr_prop->default_frame_rate || !mstr_prop->default_row)
644 return -EINVAL;
645
646 mstr_prop->default_col = curr_dr_freq / mstr_prop->default_frame_rate /
647 mstr_prop->default_row;
648
649 ret = sdw_select_row_col(bus, curr_dr_freq);
650 if (ret < 0) {
651 dev_err(bus->dev, "%s: could not find frame configuration for bus dr_freq %d\n",
652 __func__, curr_dr_freq);
653 return -EINVAL;
654 }
655
656 bus->params.curr_dr_freq = curr_dr_freq;
657 return 0;
658 }
659
660 /**
661 * sdw_compute_params: Compute bus, transport and port parameters
662 *
663 * @bus: SDW Bus instance
664 * @stream: Soundwire stream
665 */
sdw_compute_params(struct sdw_bus * bus,struct sdw_stream_runtime * stream)666 int sdw_compute_params(struct sdw_bus *bus, struct sdw_stream_runtime *stream)
667 {
668 int ret;
669
670 /* Computes clock frequency, frame shape and frame frequency */
671 ret = sdw_compute_bus_params(bus);
672 if (ret < 0)
673 return ret;
674
675 if (stream->type == SDW_STREAM_BPT) {
676 sdw_compute_dp0_port_params(bus);
677 return 0;
678 }
679
680 /* Compute transport and port params */
681 ret = sdw_compute_port_params(bus, stream);
682 if (ret < 0) {
683 dev_err(bus->dev, "Compute transport params failed: %d\n", ret);
684 return ret;
685 }
686
687 return 0;
688 }
689 EXPORT_SYMBOL(sdw_compute_params);
690
691 MODULE_LICENSE("Dual BSD/GPL");
692 MODULE_DESCRIPTION("SoundWire Generic Bandwidth Allocation");
693