xref: /linux/drivers/soundwire/intel_ace2x.c (revision db9c4387391e09209d44d41c2791512ac45b9e3c)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 // Copyright(c) 2023 Intel Corporation
3 
4 /*
5  * Soundwire Intel ops for LunarLake
6  */
7 
8 #include <linux/acpi.h>
9 #include <linux/cleanup.h>
10 #include <linux/device.h>
11 #include <linux/soundwire/sdw_registers.h>
12 #include <linux/soundwire/sdw.h>
13 #include <linux/soundwire/sdw_intel.h>
14 #include <linux/string_choices.h>
15 #include <sound/hdaudio.h>
16 #include <sound/hda-mlink.h>
17 #include <sound/hda-sdw-bpt.h>
18 #include <sound/hda_register.h>
19 #include <sound/pcm_params.h>
20 #include "cadence_master.h"
21 #include "bus.h"
22 #include "intel.h"
23 
sdw_slave_bpt_stream_add(struct sdw_slave * slave,struct sdw_stream_runtime * stream)24 static int sdw_slave_bpt_stream_add(struct sdw_slave *slave, struct sdw_stream_runtime *stream)
25 {
26 	struct sdw_stream_config sconfig = {0};
27 	struct sdw_port_config pconfig = {0};
28 	int ret;
29 
30 	/* arbitrary configuration */
31 	sconfig.frame_rate = 16000;
32 	sconfig.ch_count = 1;
33 	sconfig.bps = 32; /* this is required for BPT/BRA */
34 	sconfig.direction = SDW_DATA_DIR_RX;
35 	sconfig.type = SDW_STREAM_BPT;
36 
37 	pconfig.num = 0;
38 	pconfig.ch_mask = BIT(0);
39 
40 	ret = sdw_stream_add_slave(slave, &sconfig, &pconfig, 1, stream);
41 	if (ret)
42 		dev_err(&slave->dev, "%s: failed: %d\n", __func__, ret);
43 
44 	return ret;
45 }
46 
47 #define READ_PDI1_MIN_SIZE	12
48 
intel_ace2x_bpt_open_stream(struct sdw_intel * sdw,struct sdw_slave * slave,struct sdw_bpt_msg * msg)49 static int intel_ace2x_bpt_open_stream(struct sdw_intel *sdw, struct sdw_slave *slave,
50 				       struct sdw_bpt_msg *msg)
51 {
52 	struct sdw_cdns *cdns = &sdw->cdns;
53 	struct sdw_bus *bus = &cdns->bus;
54 	struct sdw_master_prop *prop = &bus->prop;
55 	struct sdw_stream_runtime *stream;
56 	struct sdw_stream_config sconfig;
57 	struct sdw_port_config *pconfig;
58 	unsigned int pdi0_buf_size_pre_frame;
59 	unsigned int pdi1_buf_size_pre_frame;
60 	unsigned int pdi0_buffer_size_;
61 	unsigned int pdi1_buffer_size_;
62 	unsigned int pdi0_buffer_size;
63 	unsigned int tx_dma_bandwidth;
64 	unsigned int pdi1_buffer_size;
65 	unsigned int rx_dma_bandwidth;
66 	unsigned int fake_num_frames;
67 	unsigned int data_per_frame;
68 	unsigned int tx_total_bytes;
69 	struct sdw_cdns_pdi *pdi0;
70 	struct sdw_cdns_pdi *pdi1;
71 	unsigned int rx_alignment;
72 	unsigned int tx_alignment;
73 	unsigned int num_frames_;
74 	unsigned int num_frames;
75 	unsigned int fake_size;
76 	unsigned int tx_pad;
77 	unsigned int rx_pad;
78 	int command;
79 	int ret1;
80 	int ret;
81 	int dir;
82 	int len;
83 	int i;
84 
85 	stream = sdw_alloc_stream("BPT", SDW_STREAM_BPT);
86 	if (!stream)
87 		return -ENOMEM;
88 
89 	cdns->bus.bpt_stream = stream;
90 
91 	ret = sdw_slave_bpt_stream_add(slave, stream);
92 	if (ret < 0)
93 		goto release_stream;
94 
95 	/* handle PDI0 first */
96 	dir = SDW_DATA_DIR_TX;
97 
98 	pdi0 = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, 1,  dir, 0);
99 	if (!pdi0) {
100 		dev_err(cdns->dev, "%s: sdw_cdns_alloc_pdi0 failed\n", __func__);
101 		ret = -EINVAL;
102 		goto remove_slave;
103 	}
104 
105 	sdw_cdns_config_stream(cdns, 1, dir, pdi0);
106 
107 	/* handle PDI1  */
108 	dir = SDW_DATA_DIR_RX;
109 
110 	pdi1 = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, 1,  dir, 1);
111 	if (!pdi1) {
112 		dev_err(cdns->dev, "%s: sdw_cdns_alloc_pdi1 failed\n", __func__);
113 		ret = -EINVAL;
114 		goto remove_slave;
115 	}
116 
117 	sdw_cdns_config_stream(cdns, 1, dir, pdi1);
118 
119 	/*
120 	 * the port config direction, number of channels and frame
121 	 * rate is totally arbitrary
122 	 */
123 	sconfig.direction = dir;
124 	sconfig.ch_count = 1;
125 	sconfig.frame_rate = 16000;
126 	sconfig.type = SDW_STREAM_BPT;
127 	sconfig.bps = 32; /* this is required for BPT/BRA */
128 
129 	/* Port configuration */
130 	pconfig = kcalloc(2, sizeof(*pconfig), GFP_KERNEL);
131 	if (!pconfig) {
132 		ret =  -ENOMEM;
133 		goto remove_slave;
134 	}
135 
136 	for (i = 0; i < 2 /* num_pdi */; i++) {
137 		pconfig[i].num = i;
138 		pconfig[i].ch_mask = 1;
139 	}
140 
141 	ret = sdw_stream_add_master(&cdns->bus, &sconfig, pconfig, 2, stream);
142 	kfree(pconfig);
143 
144 	if (ret < 0) {
145 		dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
146 		goto remove_slave;
147 	}
148 
149 	ret = sdw_prepare_stream(cdns->bus.bpt_stream);
150 	if (ret < 0)
151 		goto remove_master;
152 
153 	command = (msg->flags & SDW_MSG_FLAG_WRITE) ? 0 : 1;
154 
155 	ret = sdw_cdns_bpt_find_bandwidth(command, cdns->bus.params.row,
156 					  cdns->bus.params.col,
157 					  prop->default_frame_rate,
158 					  &tx_dma_bandwidth, &rx_dma_bandwidth);
159 	if (ret < 0)
160 		goto deprepare_stream;
161 
162 	len = 0;
163 	pdi0_buffer_size = 0;
164 	pdi1_buffer_size = 0;
165 	num_frames = 0;
166 	/* Add up pdi buffer size and frame numbers of each BPT sections */
167 	for (i = 0; i < msg->sections; i++) {
168 		ret = sdw_cdns_bpt_find_buffer_sizes(command, cdns->bus.params.row,
169 						     cdns->bus.params.col,
170 						     msg->sec[i].len, SDW_BPT_MSG_MAX_BYTES,
171 						     &data_per_frame, &pdi0_buffer_size_,
172 						     &pdi1_buffer_size_, &num_frames_);
173 		if (ret < 0)
174 			goto deprepare_stream;
175 
176 		len += msg->sec[i].len;
177 		pdi0_buffer_size += pdi0_buffer_size_;
178 		pdi1_buffer_size += pdi1_buffer_size_;
179 		num_frames += num_frames_;
180 	}
181 
182 	sdw->bpt_ctx.pdi0_buffer_size = pdi0_buffer_size;
183 	sdw->bpt_ctx.pdi1_buffer_size = pdi1_buffer_size;
184 	sdw->bpt_ctx.num_frames = num_frames;
185 	sdw->bpt_ctx.data_per_frame = data_per_frame;
186 
187 	rx_alignment = hda_sdw_bpt_get_buf_size_alignment(rx_dma_bandwidth);
188 	tx_alignment = hda_sdw_bpt_get_buf_size_alignment(tx_dma_bandwidth);
189 
190 	if (command) { /* read */
191 		/* Get buffer size of a full frame */
192 		ret = sdw_cdns_bpt_find_buffer_sizes(command, cdns->bus.params.row,
193 						     cdns->bus.params.col,
194 						     data_per_frame, SDW_BPT_MSG_MAX_BYTES,
195 						     &data_per_frame, &pdi0_buf_size_pre_frame,
196 						     &pdi1_buf_size_pre_frame, &fake_num_frames);
197 		if (ret < 0)
198 			goto deprepare_stream;
199 
200 		/* find fake pdi1 buffer size */
201 		rx_pad = rx_alignment - (pdi1_buffer_size % rx_alignment);
202 		while (rx_pad <= READ_PDI1_MIN_SIZE)
203 			rx_pad += rx_alignment;
204 
205 		pdi1_buffer_size += rx_pad;
206 		/* It is fine if we request more than enough byte to read */
207 		fake_num_frames = DIV_ROUND_UP(rx_pad, pdi1_buf_size_pre_frame);
208 		fake_size = fake_num_frames * data_per_frame;
209 
210 		/* find fake pdi0 buffer size */
211 		pdi0_buffer_size += (fake_num_frames * pdi0_buf_size_pre_frame);
212 		tx_pad = tx_alignment - (pdi0_buffer_size % tx_alignment);
213 		pdi0_buffer_size += tx_pad;
214 	} else { /* write */
215 		/*
216 		 * For the write command, the rx data block is 4, and the rx buffer size of a frame
217 		 * is 8. So the rx buffer size (pdi0_buffer_size) is always a multiple of rx
218 		 * alignment.
219 		 */
220 		tx_pad = tx_alignment - (pdi0_buffer_size % tx_alignment);
221 		pdi0_buffer_size += tx_pad;
222 	}
223 
224 	dev_dbg(cdns->dev, "Message len %d transferred in %d frames (%d per frame)\n",
225 		len, num_frames, data_per_frame);
226 	dev_dbg(cdns->dev, "sizes pdi0 %d pdi1 %d tx_bandwidth %d rx_bandwidth %d\n",
227 		pdi0_buffer_size, pdi1_buffer_size, tx_dma_bandwidth, rx_dma_bandwidth);
228 
229 	ret = hda_sdw_bpt_open(cdns->dev->parent, /* PCI device */
230 			       sdw->instance, &sdw->bpt_ctx.bpt_tx_stream,
231 			       &sdw->bpt_ctx.dmab_tx_bdl, pdi0_buffer_size, tx_dma_bandwidth,
232 			       &sdw->bpt_ctx.bpt_rx_stream, &sdw->bpt_ctx.dmab_rx_bdl,
233 			       pdi1_buffer_size, rx_dma_bandwidth);
234 	if (ret < 0) {
235 		dev_err(cdns->dev, "%s: hda_sdw_bpt_open failed %d\n", __func__, ret);
236 		goto deprepare_stream;
237 	}
238 
239 	if (!command) {
240 		ret = sdw_cdns_prepare_write_dma_buffer(msg->dev_num, msg->sec, msg->sections,
241 							data_per_frame,
242 							sdw->bpt_ctx.dmab_tx_bdl.area,
243 							pdi0_buffer_size, &tx_total_bytes);
244 	} else {
245 		ret = sdw_cdns_prepare_read_dma_buffer(msg->dev_num, msg->sec, msg->sections,
246 						       data_per_frame,
247 						       sdw->bpt_ctx.dmab_tx_bdl.area,
248 						       pdi0_buffer_size, &tx_total_bytes,
249 						       fake_size);
250 	}
251 
252 	if (!ret)
253 		return 0;
254 
255 	dev_err(cdns->dev, "%s: sdw_prepare_%s_dma_buffer failed %d\n",
256 		__func__, str_read_write(command), ret);
257 
258 	ret1 = hda_sdw_bpt_close(cdns->dev->parent, /* PCI device */
259 				 sdw->bpt_ctx.bpt_tx_stream, &sdw->bpt_ctx.dmab_tx_bdl,
260 				 sdw->bpt_ctx.bpt_rx_stream, &sdw->bpt_ctx.dmab_rx_bdl);
261 	if (ret1 < 0)
262 		dev_err(cdns->dev, "%s:  hda_sdw_bpt_close failed: ret %d\n",
263 			__func__, ret1);
264 
265 deprepare_stream:
266 	sdw_deprepare_stream(cdns->bus.bpt_stream);
267 
268 remove_master:
269 	ret1 = sdw_stream_remove_master(&cdns->bus, cdns->bus.bpt_stream);
270 	if (ret1 < 0)
271 		dev_err(cdns->dev, "%s: remove master failed: %d\n",
272 			__func__, ret1);
273 
274 remove_slave:
275 	ret1 = sdw_stream_remove_slave(slave, cdns->bus.bpt_stream);
276 	if (ret1 < 0)
277 		dev_err(cdns->dev, "%s: remove slave failed: %d\n",
278 			__func__, ret1);
279 
280 release_stream:
281 	sdw_release_stream(cdns->bus.bpt_stream);
282 	cdns->bus.bpt_stream = NULL;
283 
284 	return ret;
285 }
286 
intel_ace2x_bpt_close_stream(struct sdw_intel * sdw,struct sdw_slave * slave,struct sdw_bpt_msg * msg)287 static void intel_ace2x_bpt_close_stream(struct sdw_intel *sdw, struct sdw_slave *slave,
288 					 struct sdw_bpt_msg *msg)
289 {
290 	struct sdw_cdns *cdns = &sdw->cdns;
291 	int ret;
292 
293 	ret = hda_sdw_bpt_close(cdns->dev->parent /* PCI device */, sdw->bpt_ctx.bpt_tx_stream,
294 				&sdw->bpt_ctx.dmab_tx_bdl, sdw->bpt_ctx.bpt_rx_stream,
295 				&sdw->bpt_ctx.dmab_rx_bdl);
296 	if (ret < 0)
297 		dev_err(cdns->dev, "%s:  hda_sdw_bpt_close failed: ret %d\n",
298 			__func__, ret);
299 
300 	ret = sdw_deprepare_stream(cdns->bus.bpt_stream);
301 	if (ret < 0)
302 		dev_err(cdns->dev, "%s: sdw_deprepare_stream failed: ret %d\n",
303 			__func__, ret);
304 
305 	ret = sdw_stream_remove_master(&cdns->bus, cdns->bus.bpt_stream);
306 	if (ret < 0)
307 		dev_err(cdns->dev, "%s: remove master failed: %d\n",
308 			__func__, ret);
309 
310 	ret = sdw_stream_remove_slave(slave, cdns->bus.bpt_stream);
311 	if (ret < 0)
312 		dev_err(cdns->dev, "%s: remove slave failed: %d\n",
313 			__func__, ret);
314 
315 	cdns->bus.bpt_stream = NULL;
316 }
317 
318 #define INTEL_BPT_MSG_BYTE_MIN 16
319 
intel_ace2x_bpt_send_async(struct sdw_intel * sdw,struct sdw_slave * slave,struct sdw_bpt_msg * msg)320 static int intel_ace2x_bpt_send_async(struct sdw_intel *sdw, struct sdw_slave *slave,
321 				      struct sdw_bpt_msg *msg)
322 {
323 	struct sdw_cdns *cdns = &sdw->cdns;
324 	int len = 0;
325 	int ret;
326 	int i;
327 
328 	for (i = 0; i < msg->sections; i++)
329 		len += msg->sec[i].len;
330 
331 	if (len < INTEL_BPT_MSG_BYTE_MIN) {
332 		dev_err(cdns->dev, "BPT message length %d is less than the minimum bytes %d\n",
333 			len, INTEL_BPT_MSG_BYTE_MIN);
334 		return -EINVAL;
335 	}
336 
337 	dev_dbg(cdns->dev, "BPT Transfer start\n");
338 
339 	ret = intel_ace2x_bpt_open_stream(sdw, slave, msg);
340 	if (ret < 0)
341 		return ret;
342 
343 	ret = hda_sdw_bpt_send_async(cdns->dev->parent, /* PCI device */
344 				     sdw->bpt_ctx.bpt_tx_stream, sdw->bpt_ctx.bpt_rx_stream);
345 	if (ret < 0) {
346 		dev_err(cdns->dev, "%s:   hda_sdw_bpt_send_async failed: %d\n",
347 			__func__, ret);
348 
349 		intel_ace2x_bpt_close_stream(sdw, slave, msg);
350 
351 		return ret;
352 	}
353 
354 	ret = sdw_enable_stream(cdns->bus.bpt_stream);
355 	if (ret < 0) {
356 		dev_err(cdns->dev, "%s: sdw_stream_enable failed: %d\n",
357 			__func__, ret);
358 		intel_ace2x_bpt_close_stream(sdw, slave, msg);
359 	}
360 
361 	return ret;
362 }
363 
intel_ace2x_bpt_wait(struct sdw_intel * sdw,struct sdw_slave * slave,struct sdw_bpt_msg * msg)364 static int intel_ace2x_bpt_wait(struct sdw_intel *sdw, struct sdw_slave *slave,
365 				struct sdw_bpt_msg *msg)
366 {
367 	struct sdw_cdns *cdns = &sdw->cdns;
368 	int ret;
369 
370 	dev_dbg(cdns->dev, "BPT Transfer wait\n");
371 
372 	ret = hda_sdw_bpt_wait(cdns->dev->parent, /* PCI device */
373 			       sdw->bpt_ctx.bpt_tx_stream, sdw->bpt_ctx.bpt_rx_stream);
374 	if (ret < 0)
375 		dev_err(cdns->dev, "%s: hda_sdw_bpt_wait failed: %d\n", __func__, ret);
376 
377 	ret = sdw_disable_stream(cdns->bus.bpt_stream);
378 	if (ret < 0) {
379 		dev_err(cdns->dev, "%s: sdw_stream_enable failed: %d\n",
380 			__func__, ret);
381 		goto err;
382 	}
383 
384 	if (msg->flags & SDW_MSG_FLAG_WRITE) {
385 		ret = sdw_cdns_check_write_response(cdns->dev, sdw->bpt_ctx.dmab_rx_bdl.area,
386 						    sdw->bpt_ctx.pdi1_buffer_size,
387 						    sdw->bpt_ctx.num_frames);
388 		if (ret < 0)
389 			dev_err(cdns->dev, "%s: BPT Write failed %d\n", __func__, ret);
390 	} else {
391 		ret = sdw_cdns_check_read_response(cdns->dev, sdw->bpt_ctx.dmab_rx_bdl.area,
392 						   sdw->bpt_ctx.pdi1_buffer_size,
393 						   msg->sec, msg->sections, sdw->bpt_ctx.num_frames,
394 						   sdw->bpt_ctx.data_per_frame);
395 		if (ret < 0)
396 			dev_err(cdns->dev, "%s: BPT Read failed %d\n", __func__, ret);
397 	}
398 
399 err:
400 	intel_ace2x_bpt_close_stream(sdw, slave, msg);
401 
402 	return ret;
403 }
404 
405 /*
406  * shim vendor-specific (vs) ops
407  */
408 
intel_shim_vs_init(struct sdw_intel * sdw)409 static void intel_shim_vs_init(struct sdw_intel *sdw)
410 {
411 	void __iomem *shim_vs = sdw->link_res->shim_vs;
412 	struct sdw_bus *bus = &sdw->cdns.bus;
413 	struct sdw_intel_prop *intel_prop;
414 	u16 clde;
415 	u16 doaise2;
416 	u16 dodse2;
417 	u16 clds;
418 	u16 clss;
419 	u16 doaise;
420 	u16 doais;
421 	u16 dodse;
422 	u16 dods;
423 	u16 act;
424 
425 	intel_prop = bus->vendor_specific_prop;
426 	clde = intel_prop->clde;
427 	doaise2 = intel_prop->doaise2;
428 	dodse2 = intel_prop->dodse2;
429 	clds = intel_prop->clds;
430 	clss = intel_prop->clss;
431 	doaise = intel_prop->doaise;
432 	doais = intel_prop->doais;
433 	dodse = intel_prop->dodse;
434 	dods = intel_prop->dods;
435 
436 	act = intel_readw(shim_vs, SDW_SHIM2_INTEL_VS_ACTMCTL);
437 	u16p_replace_bits(&act, clde, SDW_SHIM3_INTEL_VS_ACTMCTL_CLDE);
438 	u16p_replace_bits(&act, doaise2, SDW_SHIM3_INTEL_VS_ACTMCTL_DOAISE2);
439 	u16p_replace_bits(&act, dodse2, SDW_SHIM3_INTEL_VS_ACTMCTL_DODSE2);
440 	u16p_replace_bits(&act, clds, SDW_SHIM3_INTEL_VS_ACTMCTL_CLDS);
441 	u16p_replace_bits(&act, clss, SDW_SHIM3_INTEL_VS_ACTMCTL_CLSS);
442 	u16p_replace_bits(&act, doaise, SDW_SHIM2_INTEL_VS_ACTMCTL_DOAISE);
443 	u16p_replace_bits(&act, doais, SDW_SHIM2_INTEL_VS_ACTMCTL_DOAIS);
444 	u16p_replace_bits(&act, dodse, SDW_SHIM2_INTEL_VS_ACTMCTL_DODSE);
445 	u16p_replace_bits(&act, dods, SDW_SHIM2_INTEL_VS_ACTMCTL_DODS);
446 	act |= SDW_SHIM2_INTEL_VS_ACTMCTL_DACTQE;
447 	intel_writew(shim_vs, SDW_SHIM2_INTEL_VS_ACTMCTL, act);
448 	usleep_range(10, 15);
449 }
450 
intel_shim_vs_set_clock_source(struct sdw_intel * sdw,u32 source)451 static void intel_shim_vs_set_clock_source(struct sdw_intel *sdw, u32 source)
452 {
453 	void __iomem *shim_vs = sdw->link_res->shim_vs;
454 	u32 val;
455 
456 	val = intel_readl(shim_vs, SDW_SHIM2_INTEL_VS_LVSCTL);
457 
458 	u32p_replace_bits(&val, source, SDW_SHIM2_INTEL_VS_LVSCTL_MLCS);
459 
460 	intel_writel(shim_vs, SDW_SHIM2_INTEL_VS_LVSCTL, val);
461 
462 	dev_dbg(sdw->cdns.dev, "clock source %d LVSCTL %#x\n", source, val);
463 }
464 
intel_shim_check_wake(struct sdw_intel * sdw)465 static int intel_shim_check_wake(struct sdw_intel *sdw)
466 {
467 	/*
468 	 * We follow the HDaudio example and resume unconditionally
469 	 * without checking the WAKESTS bit for that specific link
470 	 */
471 
472 	return 1;
473 }
474 
intel_shim_wake(struct sdw_intel * sdw,bool wake_enable)475 static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
476 {
477 	u16 lsdiid = 0;
478 	u16 wake_en;
479 	u16 wake_sts;
480 	int ret;
481 
482 	mutex_lock(sdw->link_res->shim_lock);
483 
484 	ret = hdac_bus_eml_sdw_get_lsdiid_unlocked(sdw->link_res->hbus, sdw->instance, &lsdiid);
485 	if (ret < 0)
486 		goto unlock;
487 
488 	wake_en = snd_hdac_chip_readw(sdw->link_res->hbus, WAKEEN);
489 
490 	if (wake_enable) {
491 		/* Enable the wakeup */
492 		wake_en |= lsdiid;
493 
494 		snd_hdac_chip_writew(sdw->link_res->hbus, WAKEEN, wake_en);
495 	} else {
496 		/* Disable the wake up interrupt */
497 		wake_en &= ~lsdiid;
498 		snd_hdac_chip_writew(sdw->link_res->hbus, WAKEEN, wake_en);
499 
500 		/* Clear wake status (W1C) */
501 		wake_sts = snd_hdac_chip_readw(sdw->link_res->hbus, STATESTS);
502 		wake_sts |= lsdiid;
503 		snd_hdac_chip_writew(sdw->link_res->hbus, STATESTS, wake_sts);
504 	}
505 unlock:
506 	mutex_unlock(sdw->link_res->shim_lock);
507 }
508 
intel_link_power_up(struct sdw_intel * sdw)509 static int intel_link_power_up(struct sdw_intel *sdw)
510 {
511 	struct sdw_bus *bus = &sdw->cdns.bus;
512 	struct sdw_master_prop *prop = &bus->prop;
513 	u32 *shim_mask = sdw->link_res->shim_mask;
514 	unsigned int link_id = sdw->instance;
515 	u32 clock_source;
516 	u32 syncprd;
517 	int ret;
518 
519 	if (prop->mclk_freq % 6000000) {
520 		if (prop->mclk_freq % 2400000) {
521 			syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24_576;
522 			clock_source = SDW_SHIM2_MLCS_CARDINAL_CLK;
523 		} else {
524 			syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4;
525 			clock_source = SDW_SHIM2_MLCS_XTAL_CLK;
526 		}
527 	} else {
528 		syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_96;
529 		clock_source = SDW_SHIM2_MLCS_AUDIO_PLL_CLK;
530 	}
531 
532 	mutex_lock(sdw->link_res->shim_lock);
533 
534 	ret = hdac_bus_eml_sdw_power_up_unlocked(sdw->link_res->hbus, link_id);
535 	if (ret < 0) {
536 		dev_err(sdw->cdns.dev, "%s: hdac_bus_eml_sdw_power_up failed: %d\n",
537 			__func__, ret);
538 		goto out;
539 	}
540 
541 	intel_shim_vs_set_clock_source(sdw, clock_source);
542 
543 	if (!*shim_mask) {
544 		/* we first need to program the SyncPRD/CPU registers */
545 		dev_dbg(sdw->cdns.dev, "first link up, programming SYNCPRD\n");
546 
547 		ret =  hdac_bus_eml_sdw_set_syncprd_unlocked(sdw->link_res->hbus, syncprd);
548 		if (ret < 0) {
549 			dev_err(sdw->cdns.dev, "%s: hdac_bus_eml_sdw_set_syncprd failed: %d\n",
550 				__func__, ret);
551 			goto out;
552 		}
553 
554 		/* SYNCPU will change once link is active */
555 		ret =  hdac_bus_eml_sdw_wait_syncpu_unlocked(sdw->link_res->hbus);
556 		if (ret < 0) {
557 			dev_err(sdw->cdns.dev, "%s: hdac_bus_eml_sdw_wait_syncpu failed: %d\n",
558 				__func__, ret);
559 			goto out;
560 		}
561 
562 		hdac_bus_eml_enable_interrupt_unlocked(sdw->link_res->hbus, true,
563 						       AZX_REG_ML_LEPTR_ID_SDW, true);
564 	}
565 
566 	*shim_mask |= BIT(link_id);
567 
568 	sdw->cdns.link_up = true;
569 
570 	intel_shim_vs_init(sdw);
571 
572 out:
573 	mutex_unlock(sdw->link_res->shim_lock);
574 
575 	return ret;
576 }
577 
intel_link_power_down(struct sdw_intel * sdw)578 static int intel_link_power_down(struct sdw_intel *sdw)
579 {
580 	u32 *shim_mask = sdw->link_res->shim_mask;
581 	unsigned int link_id = sdw->instance;
582 	int ret;
583 
584 	mutex_lock(sdw->link_res->shim_lock);
585 
586 	sdw->cdns.link_up = false;
587 
588 	*shim_mask &= ~BIT(link_id);
589 
590 	if (!*shim_mask)
591 		hdac_bus_eml_enable_interrupt_unlocked(sdw->link_res->hbus, true,
592 						       AZX_REG_ML_LEPTR_ID_SDW, false);
593 
594 	ret = hdac_bus_eml_sdw_power_down_unlocked(sdw->link_res->hbus, link_id);
595 	if (ret < 0) {
596 		dev_err(sdw->cdns.dev, "%s: hdac_bus_eml_sdw_power_down failed: %d\n",
597 			__func__, ret);
598 
599 		/*
600 		 * we leave the sdw->cdns.link_up flag as false since we've disabled
601 		 * the link at this point and cannot handle interrupts any longer.
602 		 */
603 	}
604 
605 	mutex_unlock(sdw->link_res->shim_lock);
606 
607 	return ret;
608 }
609 
intel_sync_arm(struct sdw_intel * sdw)610 static void intel_sync_arm(struct sdw_intel *sdw)
611 {
612 	unsigned int link_id = sdw->instance;
613 
614 	mutex_lock(sdw->link_res->shim_lock);
615 
616 	hdac_bus_eml_sdw_sync_arm_unlocked(sdw->link_res->hbus, link_id);
617 
618 	mutex_unlock(sdw->link_res->shim_lock);
619 }
620 
intel_sync_go_unlocked(struct sdw_intel * sdw)621 static int intel_sync_go_unlocked(struct sdw_intel *sdw)
622 {
623 	int ret;
624 
625 	ret = hdac_bus_eml_sdw_sync_go_unlocked(sdw->link_res->hbus);
626 	if (ret < 0)
627 		dev_err(sdw->cdns.dev, "%s: SyncGO clear failed: %d\n", __func__, ret);
628 
629 	return ret;
630 }
631 
intel_sync_go(struct sdw_intel * sdw)632 static int intel_sync_go(struct sdw_intel *sdw)
633 {
634 	int ret;
635 
636 	mutex_lock(sdw->link_res->shim_lock);
637 
638 	ret = intel_sync_go_unlocked(sdw);
639 
640 	mutex_unlock(sdw->link_res->shim_lock);
641 
642 	return ret;
643 }
644 
intel_check_cmdsync_unlocked(struct sdw_intel * sdw)645 static bool intel_check_cmdsync_unlocked(struct sdw_intel *sdw)
646 {
647 	return hdac_bus_eml_sdw_check_cmdsync_unlocked(sdw->link_res->hbus);
648 }
649 
650 /* DAI callbacks */
intel_params_stream(struct sdw_intel * sdw,struct snd_pcm_substream * substream,struct snd_soc_dai * dai,struct snd_pcm_hw_params * hw_params,int link_id,int alh_stream_id)651 static int intel_params_stream(struct sdw_intel *sdw,
652 			       struct snd_pcm_substream *substream,
653 			       struct snd_soc_dai *dai,
654 			       struct snd_pcm_hw_params *hw_params,
655 			       int link_id, int alh_stream_id)
656 {
657 	struct sdw_intel_link_res *res = sdw->link_res;
658 	struct sdw_intel_stream_params_data params_data;
659 
660 	params_data.substream = substream;
661 	params_data.dai = dai;
662 	params_data.hw_params = hw_params;
663 	params_data.link_id = link_id;
664 	params_data.alh_stream_id = alh_stream_id;
665 
666 	if (res->ops && res->ops->params_stream && res->dev)
667 		return res->ops->params_stream(res->dev,
668 					       &params_data);
669 	return -EIO;
670 }
671 
intel_free_stream(struct sdw_intel * sdw,struct snd_pcm_substream * substream,struct snd_soc_dai * dai,int link_id)672 static int intel_free_stream(struct sdw_intel *sdw,
673 			     struct snd_pcm_substream *substream,
674 			     struct snd_soc_dai *dai,
675 			     int link_id)
676 
677 {
678 	struct sdw_intel_link_res *res = sdw->link_res;
679 	struct sdw_intel_stream_free_data free_data;
680 
681 	free_data.substream = substream;
682 	free_data.dai = dai;
683 	free_data.link_id = link_id;
684 
685 	if (res->ops && res->ops->free_stream && res->dev)
686 		return res->ops->free_stream(res->dev,
687 					     &free_data);
688 
689 	return 0;
690 }
691 
692 /*
693  * DAI operations
694  */
intel_hw_params(struct snd_pcm_substream * substream,struct snd_pcm_hw_params * params,struct snd_soc_dai * dai)695 static int intel_hw_params(struct snd_pcm_substream *substream,
696 			   struct snd_pcm_hw_params *params,
697 			   struct snd_soc_dai *dai)
698 {
699 	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
700 	struct sdw_intel *sdw = cdns_to_intel(cdns);
701 	struct sdw_cdns_dai_runtime *dai_runtime;
702 	struct sdw_cdns_pdi *pdi;
703 	struct sdw_stream_config sconfig;
704 	int ch, dir;
705 	int ret;
706 
707 	dai_runtime = cdns->dai_runtime_array[dai->id];
708 	if (!dai_runtime)
709 		return -EIO;
710 
711 	ch = params_channels(params);
712 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
713 		dir = SDW_DATA_DIR_RX;
714 	else
715 		dir = SDW_DATA_DIR_TX;
716 
717 	pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
718 	if (!pdi)
719 		return -EINVAL;
720 
721 	/* use same definitions for alh_id as previous generations */
722 	pdi->intel_alh_id = (sdw->instance * 16) + pdi->num + 3;
723 	if (pdi->num >= 2)
724 		pdi->intel_alh_id += 2;
725 
726 	/* the SHIM will be configured in the callback functions */
727 
728 	sdw_cdns_config_stream(cdns, ch, dir, pdi);
729 
730 	/* store pdi and state, may be needed in prepare step */
731 	dai_runtime->paused = false;
732 	dai_runtime->suspended = false;
733 	dai_runtime->pdi = pdi;
734 
735 	/* Inform DSP about PDI stream number */
736 	ret = intel_params_stream(sdw, substream, dai, params,
737 				  sdw->instance,
738 				  pdi->intel_alh_id);
739 	if (ret)
740 		return ret;
741 
742 	sconfig.direction = dir;
743 	sconfig.ch_count = ch;
744 	sconfig.frame_rate = params_rate(params);
745 	sconfig.type = dai_runtime->stream_type;
746 
747 	sconfig.bps = snd_pcm_format_width(params_format(params));
748 
749 	/* Port configuration */
750 	struct sdw_port_config *pconfig __free(kfree) = kzalloc(sizeof(*pconfig),
751 								GFP_KERNEL);
752 	if (!pconfig)
753 		return -ENOMEM;
754 
755 	pconfig->num = pdi->num;
756 	pconfig->ch_mask = (1 << ch) - 1;
757 
758 	ret = sdw_stream_add_master(&cdns->bus, &sconfig,
759 				    pconfig, 1, dai_runtime->stream);
760 	if (ret)
761 		dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
762 
763 	return ret;
764 }
765 
intel_prepare(struct snd_pcm_substream * substream,struct snd_soc_dai * dai)766 static int intel_prepare(struct snd_pcm_substream *substream,
767 			 struct snd_soc_dai *dai)
768 {
769 	struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
770 	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
771 	struct sdw_intel *sdw = cdns_to_intel(cdns);
772 	struct sdw_cdns_dai_runtime *dai_runtime;
773 	struct snd_pcm_hw_params *hw_params;
774 	int ch, dir;
775 
776 	dai_runtime = cdns->dai_runtime_array[dai->id];
777 	if (!dai_runtime) {
778 		dev_err(dai->dev, "failed to get dai runtime in %s\n",
779 			__func__);
780 		return -EIO;
781 	}
782 
783 	hw_params = &rtd->dpcm[substream->stream].hw_params;
784 	if (dai_runtime->suspended) {
785 		dai_runtime->suspended = false;
786 
787 		/*
788 		 * .prepare() is called after system resume, where we
789 		 * need to reinitialize the SHIM/ALH/Cadence IP.
790 		 * .prepare() is also called to deal with underflows,
791 		 * but in those cases we cannot touch ALH/SHIM
792 		 * registers
793 		 */
794 
795 		/* configure stream */
796 		ch = params_channels(hw_params);
797 		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
798 			dir = SDW_DATA_DIR_RX;
799 		else
800 			dir = SDW_DATA_DIR_TX;
801 
802 		/* the SHIM will be configured in the callback functions */
803 
804 		sdw_cdns_config_stream(cdns, ch, dir, dai_runtime->pdi);
805 	}
806 
807 	/* Inform DSP about PDI stream number */
808 	return intel_params_stream(sdw, substream, dai, hw_params, sdw->instance,
809 				   dai_runtime->pdi->intel_alh_id);
810 }
811 
812 static int
intel_hw_free(struct snd_pcm_substream * substream,struct snd_soc_dai * dai)813 intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
814 {
815 	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
816 	struct sdw_intel *sdw = cdns_to_intel(cdns);
817 	struct sdw_cdns_dai_runtime *dai_runtime;
818 	int ret;
819 
820 	dai_runtime = cdns->dai_runtime_array[dai->id];
821 	if (!dai_runtime)
822 		return -EIO;
823 
824 	/*
825 	 * The sdw stream state will transition to RELEASED when stream->
826 	 * master_list is empty. So the stream state will transition to
827 	 * DEPREPARED for the first cpu-dai and to RELEASED for the last
828 	 * cpu-dai.
829 	 */
830 	ret = sdw_stream_remove_master(&cdns->bus, dai_runtime->stream);
831 	if (ret < 0) {
832 		dev_err(dai->dev, "remove master from stream %s failed: %d\n",
833 			dai_runtime->stream->name, ret);
834 		return ret;
835 	}
836 
837 	ret = intel_free_stream(sdw, substream, dai, sdw->instance);
838 	if (ret < 0) {
839 		dev_err(dai->dev, "intel_free_stream: failed %d\n", ret);
840 		return ret;
841 	}
842 
843 	dai_runtime->pdi = NULL;
844 
845 	return 0;
846 }
847 
intel_pcm_set_sdw_stream(struct snd_soc_dai * dai,void * stream,int direction)848 static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
849 				    void *stream, int direction)
850 {
851 	return cdns_set_sdw_stream(dai, stream, direction);
852 }
853 
intel_get_sdw_stream(struct snd_soc_dai * dai,int direction)854 static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
855 				  int direction)
856 {
857 	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
858 	struct sdw_cdns_dai_runtime *dai_runtime;
859 
860 	dai_runtime = cdns->dai_runtime_array[dai->id];
861 	if (!dai_runtime)
862 		return ERR_PTR(-EINVAL);
863 
864 	return dai_runtime->stream;
865 }
866 
intel_trigger(struct snd_pcm_substream * substream,int cmd,struct snd_soc_dai * dai)867 static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai)
868 {
869 	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
870 	struct sdw_intel *sdw = cdns_to_intel(cdns);
871 	struct sdw_intel_link_res *res = sdw->link_res;
872 	struct sdw_cdns_dai_runtime *dai_runtime;
873 	int ret = 0;
874 
875 	/*
876 	 * The .trigger callback is used to program HDaudio DMA and send required IPC to audio
877 	 * firmware.
878 	 */
879 	if (res->ops && res->ops->trigger) {
880 		ret = res->ops->trigger(substream, cmd, dai);
881 		if (ret < 0)
882 			return ret;
883 	}
884 
885 	dai_runtime = cdns->dai_runtime_array[dai->id];
886 	if (!dai_runtime) {
887 		dev_err(dai->dev, "failed to get dai runtime in %s\n",
888 			__func__);
889 		return -EIO;
890 	}
891 
892 	switch (cmd) {
893 	case SNDRV_PCM_TRIGGER_SUSPEND:
894 
895 		/*
896 		 * The .prepare callback is used to deal with xruns and resume operations.
897 		 * In the case of xruns, the DMAs and SHIM registers cannot be touched,
898 		 * but for resume operations the DMAs and SHIM registers need to be initialized.
899 		 * the .trigger callback is used to track the suspend case only.
900 		 */
901 
902 		dai_runtime->suspended = true;
903 
904 		break;
905 
906 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
907 		dai_runtime->paused = true;
908 		break;
909 	case SNDRV_PCM_TRIGGER_STOP:
910 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
911 		dai_runtime->paused = false;
912 		break;
913 	default:
914 		break;
915 	}
916 
917 	return ret;
918 }
919 
920 static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
921 	.hw_params = intel_hw_params,
922 	.prepare = intel_prepare,
923 	.hw_free = intel_hw_free,
924 	.trigger = intel_trigger,
925 	.set_stream = intel_pcm_set_sdw_stream,
926 	.get_stream = intel_get_sdw_stream,
927 };
928 
929 static const struct snd_soc_component_driver dai_component = {
930 	.name			= "soundwire",
931 };
932 
933 /*
934  * PDI routines
935  */
intel_pdi_init(struct sdw_intel * sdw,struct sdw_cdns_stream_config * config)936 static void intel_pdi_init(struct sdw_intel *sdw,
937 			   struct sdw_cdns_stream_config *config)
938 {
939 	void __iomem *shim = sdw->link_res->shim;
940 	int pcm_cap;
941 
942 	/* PCM Stream Capability */
943 	pcm_cap = intel_readw(shim, SDW_SHIM2_PCMSCAP);
944 
945 	config->pcm_bd = FIELD_GET(SDW_SHIM2_PCMSCAP_BSS, pcm_cap);
946 	config->pcm_in = FIELD_GET(SDW_SHIM2_PCMSCAP_ISS, pcm_cap);
947 	config->pcm_out = FIELD_GET(SDW_SHIM2_PCMSCAP_ISS, pcm_cap);
948 
949 	dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n",
950 		config->pcm_bd, config->pcm_in, config->pcm_out);
951 }
952 
953 static int
intel_pdi_get_ch_cap(struct sdw_intel * sdw,unsigned int pdi_num)954 intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num)
955 {
956 	void __iomem *shim = sdw->link_res->shim;
957 
958 	/* zero based values for channel count in register */
959 	return intel_readw(shim, SDW_SHIM2_PCMSYCHC(pdi_num)) + 1;
960 }
961 
intel_pdi_get_ch_update(struct sdw_intel * sdw,struct sdw_cdns_pdi * pdi,unsigned int num_pdi,unsigned int * num_ch)962 static void intel_pdi_get_ch_update(struct sdw_intel *sdw,
963 				    struct sdw_cdns_pdi *pdi,
964 				    unsigned int num_pdi,
965 				    unsigned int *num_ch)
966 {
967 	int ch_count = 0;
968 	int i;
969 
970 	for (i = 0; i < num_pdi; i++) {
971 		pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num);
972 		ch_count += pdi->ch_count;
973 		pdi++;
974 	}
975 
976 	*num_ch = ch_count;
977 }
978 
intel_pdi_stream_ch_update(struct sdw_intel * sdw,struct sdw_cdns_streams * stream)979 static void intel_pdi_stream_ch_update(struct sdw_intel *sdw,
980 				       struct sdw_cdns_streams *stream)
981 {
982 	intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd,
983 				&stream->num_ch_bd);
984 
985 	intel_pdi_get_ch_update(sdw, stream->in, stream->num_in,
986 				&stream->num_ch_in);
987 
988 	intel_pdi_get_ch_update(sdw, stream->out, stream->num_out,
989 				&stream->num_ch_out);
990 }
991 
intel_create_dai(struct sdw_cdns * cdns,struct snd_soc_dai_driver * dais,enum intel_pdi_type type,u32 num,u32 off,u32 max_ch)992 static int intel_create_dai(struct sdw_cdns *cdns,
993 			    struct snd_soc_dai_driver *dais,
994 			    enum intel_pdi_type type,
995 			    u32 num, u32 off, u32 max_ch)
996 {
997 	int i;
998 
999 	if (!num)
1000 		return 0;
1001 
1002 	for (i = off; i < (off + num); i++) {
1003 		dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL,
1004 					      "SDW%d Pin%d",
1005 					      cdns->instance, i);
1006 		if (!dais[i].name)
1007 			return -ENOMEM;
1008 
1009 		if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) {
1010 			dais[i].playback.channels_min = 1;
1011 			dais[i].playback.channels_max = max_ch;
1012 		}
1013 
1014 		if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) {
1015 			dais[i].capture.channels_min = 1;
1016 			dais[i].capture.channels_max = max_ch;
1017 		}
1018 
1019 		dais[i].ops = &intel_pcm_dai_ops;
1020 	}
1021 
1022 	return 0;
1023 }
1024 
intel_register_dai(struct sdw_intel * sdw)1025 static int intel_register_dai(struct sdw_intel *sdw)
1026 {
1027 	struct sdw_cdns_dai_runtime **dai_runtime_array;
1028 	struct sdw_cdns_stream_config config;
1029 	struct sdw_cdns *cdns = &sdw->cdns;
1030 	struct sdw_cdns_streams *stream;
1031 	struct snd_soc_dai_driver *dais;
1032 	int num_dai;
1033 	int ret;
1034 	int off = 0;
1035 
1036 	/* Read the PDI config and initialize cadence PDI */
1037 	intel_pdi_init(sdw, &config);
1038 	ret = sdw_cdns_pdi_init(cdns, config);
1039 	if (ret)
1040 		return ret;
1041 
1042 	intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm);
1043 
1044 	/* DAIs are created based on total number of PDIs supported */
1045 	num_dai = cdns->pcm.num_pdi;
1046 
1047 	dai_runtime_array = devm_kcalloc(cdns->dev, num_dai,
1048 					 sizeof(struct sdw_cdns_dai_runtime *),
1049 					 GFP_KERNEL);
1050 	if (!dai_runtime_array)
1051 		return -ENOMEM;
1052 	cdns->dai_runtime_array = dai_runtime_array;
1053 
1054 	dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL);
1055 	if (!dais)
1056 		return -ENOMEM;
1057 
1058 	/* Create PCM DAIs */
1059 	stream = &cdns->pcm;
1060 
1061 	ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in,
1062 			       off, stream->num_ch_in);
1063 	if (ret)
1064 		return ret;
1065 
1066 	off += cdns->pcm.num_in;
1067 	ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out,
1068 			       off, stream->num_ch_out);
1069 	if (ret)
1070 		return ret;
1071 
1072 	off += cdns->pcm.num_out;
1073 	ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd,
1074 			       off, stream->num_ch_bd);
1075 	if (ret)
1076 		return ret;
1077 
1078 	return devm_snd_soc_register_component(cdns->dev, &dai_component,
1079 					       dais, num_dai);
1080 }
1081 
intel_program_sdi(struct sdw_intel * sdw,int dev_num)1082 static void intel_program_sdi(struct sdw_intel *sdw, int dev_num)
1083 {
1084 	int ret;
1085 
1086 	ret = hdac_bus_eml_sdw_set_lsdiid(sdw->link_res->hbus, sdw->instance, dev_num);
1087 	if (ret < 0)
1088 		dev_err(sdw->cdns.dev, "%s: could not set lsdiid for link %d %d\n",
1089 			__func__, sdw->instance, dev_num);
1090 }
1091 
intel_get_link_count(struct sdw_intel * sdw)1092 static int intel_get_link_count(struct sdw_intel *sdw)
1093 {
1094 	int ret;
1095 
1096 	ret = hdac_bus_eml_get_count(sdw->link_res->hbus, true, AZX_REG_ML_LEPTR_ID_SDW);
1097 	if (!ret) {
1098 		dev_err(sdw->cdns.dev, "%s: could not retrieve link count\n", __func__);
1099 		return -ENODEV;
1100 	}
1101 
1102 	if (ret > SDW_INTEL_MAX_LINKS) {
1103 		dev_err(sdw->cdns.dev, "%s: link count %d exceed max %d\n", __func__, ret, SDW_INTEL_MAX_LINKS);
1104 		return -EINVAL;
1105 	}
1106 
1107 	return ret;
1108 }
1109 
1110 const struct sdw_intel_hw_ops sdw_intel_lnl_hw_ops = {
1111 	.debugfs_init = intel_ace2x_debugfs_init,
1112 	.debugfs_exit = intel_ace2x_debugfs_exit,
1113 
1114 	.get_link_count = intel_get_link_count,
1115 
1116 	.register_dai = intel_register_dai,
1117 
1118 	.check_clock_stop = intel_check_clock_stop,
1119 	.start_bus = intel_start_bus,
1120 	.start_bus_after_reset = intel_start_bus_after_reset,
1121 	.start_bus_after_clock_stop = intel_start_bus_after_clock_stop,
1122 	.stop_bus = intel_stop_bus,
1123 
1124 	.link_power_up = intel_link_power_up,
1125 	.link_power_down = intel_link_power_down,
1126 
1127 	.shim_check_wake = intel_shim_check_wake,
1128 	.shim_wake = intel_shim_wake,
1129 
1130 	.pre_bank_switch = intel_pre_bank_switch,
1131 	.post_bank_switch = intel_post_bank_switch,
1132 
1133 	.sync_arm = intel_sync_arm,
1134 	.sync_go_unlocked = intel_sync_go_unlocked,
1135 	.sync_go = intel_sync_go,
1136 	.sync_check_cmdsync_unlocked = intel_check_cmdsync_unlocked,
1137 
1138 	.program_sdi = intel_program_sdi,
1139 
1140 	.bpt_send_async = intel_ace2x_bpt_send_async,
1141 	.bpt_wait = intel_ace2x_bpt_wait,
1142 };
1143 EXPORT_SYMBOL_NS(sdw_intel_lnl_hw_ops, "SOUNDWIRE_INTEL");
1144 
1145 MODULE_IMPORT_NS("SND_SOC_SOF_HDA_MLINK");
1146 MODULE_IMPORT_NS("SND_SOC_SOF_INTEL_HDA_SDW_BPT");
1147