xref: /linux/sound/soc/sof/ops.h (revision 856601e5a7ebe69b1c07adef7be80f9a03884329)
1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2 /*
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * Copyright(c) 2018 Intel Corporation. All rights reserved.
7  *
8  * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
9  */
10 
11 #ifndef __SOUND_SOC_SOF_IO_H
12 #define __SOUND_SOC_SOF_IO_H
13 
14 #include <linux/device.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <sound/pcm.h>
19 #include "sof-priv.h"
20 
21 #define sof_ops(sdev) \
22 	((sdev)->pdata->desc->ops)
23 
24 static inline void sof_ops_init(struct snd_sof_dev *sdev)
25 {
26 	if (sdev->pdata->desc->ops_init)
27 		sdev->pdata->desc->ops_init(sdev);
28 }
29 
30 /* Mandatory operations are verified during probing */
31 
32 /* init */
33 static inline int snd_sof_probe(struct snd_sof_dev *sdev)
34 {
35 	return sof_ops(sdev)->probe(sdev);
36 }
37 
38 static inline int snd_sof_remove(struct snd_sof_dev *sdev)
39 {
40 	if (sof_ops(sdev)->remove)
41 		return sof_ops(sdev)->remove(sdev);
42 
43 	return 0;
44 }
45 
46 static inline int snd_sof_shutdown(struct snd_sof_dev *sdev)
47 {
48 	if (sof_ops(sdev)->shutdown)
49 		return sof_ops(sdev)->shutdown(sdev);
50 
51 	return 0;
52 }
53 
54 /* control */
55 
56 /*
57  * snd_sof_dsp_run returns the core mask of the cores that are available
58  * after successful fw boot
59  */
60 static inline int snd_sof_dsp_run(struct snd_sof_dev *sdev)
61 {
62 	return sof_ops(sdev)->run(sdev);
63 }
64 
65 static inline int snd_sof_dsp_stall(struct snd_sof_dev *sdev, unsigned int core_mask)
66 {
67 	if (sof_ops(sdev)->stall)
68 		return sof_ops(sdev)->stall(sdev, core_mask);
69 
70 	return 0;
71 }
72 
73 static inline int snd_sof_dsp_reset(struct snd_sof_dev *sdev)
74 {
75 	if (sof_ops(sdev)->reset)
76 		return sof_ops(sdev)->reset(sdev);
77 
78 	return 0;
79 }
80 
81 /* dsp core get/put */
82 static inline int snd_sof_dsp_core_get(struct snd_sof_dev *sdev, int core)
83 {
84 	if (core > sdev->num_cores - 1) {
85 		dev_err(sdev->dev, "invalid core id: %d for num_cores: %d\n", core,
86 			sdev->num_cores);
87 		return -EINVAL;
88 	}
89 
90 	if (sof_ops(sdev)->core_get) {
91 		int ret;
92 
93 		/* if current ref_count is > 0, increment it and return */
94 		if (sdev->dsp_core_ref_count[core] > 0) {
95 			sdev->dsp_core_ref_count[core]++;
96 			return 0;
97 		}
98 
99 		/* power up the core */
100 		ret = sof_ops(sdev)->core_get(sdev, core);
101 		if (ret < 0)
102 			return ret;
103 
104 		/* increment ref_count */
105 		sdev->dsp_core_ref_count[core]++;
106 
107 		/* and update enabled_cores_mask */
108 		sdev->enabled_cores_mask |= BIT(core);
109 
110 		dev_dbg(sdev->dev, "Core %d powered up\n", core);
111 	}
112 
113 	return 0;
114 }
115 
116 static inline int snd_sof_dsp_core_put(struct snd_sof_dev *sdev, int core)
117 {
118 	if (core > sdev->num_cores - 1) {
119 		dev_err(sdev->dev, "invalid core id: %d for num_cores: %d\n", core,
120 			sdev->num_cores);
121 		return -EINVAL;
122 	}
123 
124 	if (sof_ops(sdev)->core_put) {
125 		int ret;
126 
127 		/* decrement ref_count and return if it is > 0 */
128 		if (--(sdev->dsp_core_ref_count[core]) > 0)
129 			return 0;
130 
131 		/* power down the core */
132 		ret = sof_ops(sdev)->core_put(sdev, core);
133 		if (ret < 0)
134 			return ret;
135 
136 		/* and update enabled_cores_mask */
137 		sdev->enabled_cores_mask &= ~BIT(core);
138 
139 		dev_dbg(sdev->dev, "Core %d powered down\n", core);
140 	}
141 
142 	return 0;
143 }
144 
145 /* pre/post fw load */
146 static inline int snd_sof_dsp_pre_fw_run(struct snd_sof_dev *sdev)
147 {
148 	if (sof_ops(sdev)->pre_fw_run)
149 		return sof_ops(sdev)->pre_fw_run(sdev);
150 
151 	return 0;
152 }
153 
154 static inline int snd_sof_dsp_post_fw_run(struct snd_sof_dev *sdev)
155 {
156 	if (sof_ops(sdev)->post_fw_run)
157 		return sof_ops(sdev)->post_fw_run(sdev);
158 
159 	return 0;
160 }
161 
162 /* parse platform specific extended manifest */
163 static inline int snd_sof_dsp_parse_platform_ext_manifest(struct snd_sof_dev *sdev,
164 							  const struct sof_ext_man_elem_header *hdr)
165 {
166 	if (sof_ops(sdev)->parse_platform_ext_manifest)
167 		return sof_ops(sdev)->parse_platform_ext_manifest(sdev, hdr);
168 
169 	return 0;
170 }
171 
172 /* misc */
173 
174 /**
175  * snd_sof_dsp_get_bar_index - Maps a section type with a BAR index
176  *
177  * @sdev: sof device
178  * @type: section type as described by snd_sof_fw_blk_type
179  *
180  * Returns the corresponding BAR index (a positive integer) or -EINVAL
181  * in case there is no mapping
182  */
183 static inline int snd_sof_dsp_get_bar_index(struct snd_sof_dev *sdev, u32 type)
184 {
185 	if (sof_ops(sdev)->get_bar_index)
186 		return sof_ops(sdev)->get_bar_index(sdev, type);
187 
188 	return sdev->mmio_bar;
189 }
190 
191 static inline int snd_sof_dsp_get_mailbox_offset(struct snd_sof_dev *sdev)
192 {
193 	if (sof_ops(sdev)->get_mailbox_offset)
194 		return sof_ops(sdev)->get_mailbox_offset(sdev);
195 
196 	dev_err(sdev->dev, "error: %s not defined\n", __func__);
197 	return -ENOTSUPP;
198 }
199 
200 static inline int snd_sof_dsp_get_window_offset(struct snd_sof_dev *sdev,
201 						u32 id)
202 {
203 	if (sof_ops(sdev)->get_window_offset)
204 		return sof_ops(sdev)->get_window_offset(sdev, id);
205 
206 	dev_err(sdev->dev, "error: %s not defined\n", __func__);
207 	return -ENOTSUPP;
208 }
209 /* power management */
210 static inline int snd_sof_dsp_resume(struct snd_sof_dev *sdev)
211 {
212 	if (sof_ops(sdev)->resume)
213 		return sof_ops(sdev)->resume(sdev);
214 
215 	return 0;
216 }
217 
218 static inline int snd_sof_dsp_suspend(struct snd_sof_dev *sdev,
219 				      u32 target_state)
220 {
221 	if (sof_ops(sdev)->suspend)
222 		return sof_ops(sdev)->suspend(sdev, target_state);
223 
224 	return 0;
225 }
226 
227 static inline int snd_sof_dsp_runtime_resume(struct snd_sof_dev *sdev)
228 {
229 	if (sof_ops(sdev)->runtime_resume)
230 		return sof_ops(sdev)->runtime_resume(sdev);
231 
232 	return 0;
233 }
234 
235 static inline int snd_sof_dsp_runtime_suspend(struct snd_sof_dev *sdev)
236 {
237 	if (sof_ops(sdev)->runtime_suspend)
238 		return sof_ops(sdev)->runtime_suspend(sdev);
239 
240 	return 0;
241 }
242 
243 static inline int snd_sof_dsp_runtime_idle(struct snd_sof_dev *sdev)
244 {
245 	if (sof_ops(sdev)->runtime_idle)
246 		return sof_ops(sdev)->runtime_idle(sdev);
247 
248 	return 0;
249 }
250 
251 static inline int snd_sof_dsp_hw_params_upon_resume(struct snd_sof_dev *sdev)
252 {
253 	if (sof_ops(sdev)->set_hw_params_upon_resume)
254 		return sof_ops(sdev)->set_hw_params_upon_resume(sdev);
255 	return 0;
256 }
257 
258 static inline int snd_sof_dsp_set_clk(struct snd_sof_dev *sdev, u32 freq)
259 {
260 	if (sof_ops(sdev)->set_clk)
261 		return sof_ops(sdev)->set_clk(sdev, freq);
262 
263 	return 0;
264 }
265 
266 static inline int
267 snd_sof_dsp_set_power_state(struct snd_sof_dev *sdev,
268 			    const struct sof_dsp_power_state *target_state)
269 {
270 	int ret = 0;
271 
272 	mutex_lock(&sdev->power_state_access);
273 
274 	if (sof_ops(sdev)->set_power_state)
275 		ret = sof_ops(sdev)->set_power_state(sdev, target_state);
276 
277 	mutex_unlock(&sdev->power_state_access);
278 
279 	return ret;
280 }
281 
282 /* debug */
283 void snd_sof_dsp_dbg_dump(struct snd_sof_dev *sdev, const char *msg, u32 flags);
284 
285 static inline int snd_sof_debugfs_add_region_item(struct snd_sof_dev *sdev,
286 		enum snd_sof_fw_blk_type blk_type, u32 offset, size_t size,
287 		const char *name, enum sof_debugfs_access_type access_type)
288 {
289 	if (sof_ops(sdev) && sof_ops(sdev)->debugfs_add_region_item)
290 		return sof_ops(sdev)->debugfs_add_region_item(sdev, blk_type, offset,
291 							      size, name, access_type);
292 
293 	return 0;
294 }
295 
296 /* register IO */
297 static inline void snd_sof_dsp_write(struct snd_sof_dev *sdev, u32 bar,
298 				     u32 offset, u32 value)
299 {
300 	if (sof_ops(sdev)->write) {
301 		sof_ops(sdev)->write(sdev, sdev->bar[bar] + offset, value);
302 		return;
303 	}
304 
305 	dev_err_ratelimited(sdev->dev, "error: %s not defined\n", __func__);
306 }
307 
308 static inline void snd_sof_dsp_write64(struct snd_sof_dev *sdev, u32 bar,
309 				       u32 offset, u64 value)
310 {
311 	if (sof_ops(sdev)->write64) {
312 		sof_ops(sdev)->write64(sdev, sdev->bar[bar] + offset, value);
313 		return;
314 	}
315 
316 	dev_err_ratelimited(sdev->dev, "error: %s not defined\n", __func__);
317 }
318 
319 static inline u32 snd_sof_dsp_read(struct snd_sof_dev *sdev, u32 bar,
320 				   u32 offset)
321 {
322 	if (sof_ops(sdev)->read)
323 		return sof_ops(sdev)->read(sdev, sdev->bar[bar] + offset);
324 
325 	dev_err(sdev->dev, "error: %s not defined\n", __func__);
326 	return -ENOTSUPP;
327 }
328 
329 static inline u64 snd_sof_dsp_read64(struct snd_sof_dev *sdev, u32 bar,
330 				     u32 offset)
331 {
332 	if (sof_ops(sdev)->read64)
333 		return sof_ops(sdev)->read64(sdev, sdev->bar[bar] + offset);
334 
335 	dev_err(sdev->dev, "error: %s not defined\n", __func__);
336 	return -ENOTSUPP;
337 }
338 
339 /* block IO */
340 static inline int snd_sof_dsp_block_read(struct snd_sof_dev *sdev,
341 					 enum snd_sof_fw_blk_type blk_type,
342 					 u32 offset, void *dest, size_t bytes)
343 {
344 	return sof_ops(sdev)->block_read(sdev, blk_type, offset, dest, bytes);
345 }
346 
347 static inline int snd_sof_dsp_block_write(struct snd_sof_dev *sdev,
348 					  enum snd_sof_fw_blk_type blk_type,
349 					  u32 offset, void *src, size_t bytes)
350 {
351 	return sof_ops(sdev)->block_write(sdev, blk_type, offset, src, bytes);
352 }
353 
354 /* mailbox IO */
355 static inline void snd_sof_dsp_mailbox_read(struct snd_sof_dev *sdev,
356 					    u32 offset, void *dest, size_t bytes)
357 {
358 	if (sof_ops(sdev)->mailbox_read)
359 		sof_ops(sdev)->mailbox_read(sdev, offset, dest, bytes);
360 }
361 
362 static inline void snd_sof_dsp_mailbox_write(struct snd_sof_dev *sdev,
363 					     u32 offset, void *src, size_t bytes)
364 {
365 	if (sof_ops(sdev)->mailbox_write)
366 		sof_ops(sdev)->mailbox_write(sdev, offset, src, bytes);
367 }
368 
369 /* ipc */
370 static inline int snd_sof_dsp_send_msg(struct snd_sof_dev *sdev,
371 				       struct snd_sof_ipc_msg *msg)
372 {
373 	return sof_ops(sdev)->send_msg(sdev, msg);
374 }
375 
376 /* host DMA trace */
377 static inline int snd_sof_dma_trace_init(struct snd_sof_dev *sdev,
378 					 struct sof_ipc_dma_trace_params_ext *dtrace_params)
379 {
380 	if (sof_ops(sdev)->trace_init)
381 		return sof_ops(sdev)->trace_init(sdev, dtrace_params);
382 
383 	return 0;
384 }
385 
386 static inline int snd_sof_dma_trace_release(struct snd_sof_dev *sdev)
387 {
388 	if (sof_ops(sdev)->trace_release)
389 		return sof_ops(sdev)->trace_release(sdev);
390 
391 	return 0;
392 }
393 
394 static inline int snd_sof_dma_trace_trigger(struct snd_sof_dev *sdev, int cmd)
395 {
396 	if (sof_ops(sdev)->trace_trigger)
397 		return sof_ops(sdev)->trace_trigger(sdev, cmd);
398 
399 	return 0;
400 }
401 
402 /* host PCM ops */
403 static inline int
404 snd_sof_pcm_platform_open(struct snd_sof_dev *sdev,
405 			  struct snd_pcm_substream *substream)
406 {
407 	if (sof_ops(sdev) && sof_ops(sdev)->pcm_open)
408 		return sof_ops(sdev)->pcm_open(sdev, substream);
409 
410 	return 0;
411 }
412 
413 /* disconnect pcm substream to a host stream */
414 static inline int
415 snd_sof_pcm_platform_close(struct snd_sof_dev *sdev,
416 			   struct snd_pcm_substream *substream)
417 {
418 	if (sof_ops(sdev) && sof_ops(sdev)->pcm_close)
419 		return sof_ops(sdev)->pcm_close(sdev, substream);
420 
421 	return 0;
422 }
423 
424 /* host stream hw params */
425 static inline int
426 snd_sof_pcm_platform_hw_params(struct snd_sof_dev *sdev,
427 			       struct snd_pcm_substream *substream,
428 			       struct snd_pcm_hw_params *params,
429 			       struct snd_sof_platform_stream_params *platform_params)
430 {
431 	if (sof_ops(sdev) && sof_ops(sdev)->pcm_hw_params)
432 		return sof_ops(sdev)->pcm_hw_params(sdev, substream, params,
433 						    platform_params);
434 
435 	return 0;
436 }
437 
438 /* host stream hw free */
439 static inline int
440 snd_sof_pcm_platform_hw_free(struct snd_sof_dev *sdev,
441 			     struct snd_pcm_substream *substream)
442 {
443 	if (sof_ops(sdev) && sof_ops(sdev)->pcm_hw_free)
444 		return sof_ops(sdev)->pcm_hw_free(sdev, substream);
445 
446 	return 0;
447 }
448 
449 /* host stream trigger */
450 static inline int
451 snd_sof_pcm_platform_trigger(struct snd_sof_dev *sdev,
452 			     struct snd_pcm_substream *substream, int cmd)
453 {
454 	if (sof_ops(sdev) && sof_ops(sdev)->pcm_trigger)
455 		return sof_ops(sdev)->pcm_trigger(sdev, substream, cmd);
456 
457 	return 0;
458 }
459 
460 /* Firmware loading */
461 static inline int snd_sof_load_firmware(struct snd_sof_dev *sdev)
462 {
463 	dev_dbg(sdev->dev, "loading firmware\n");
464 
465 	return sof_ops(sdev)->load_firmware(sdev);
466 }
467 
468 /* host DSP message data */
469 static inline int snd_sof_ipc_msg_data(struct snd_sof_dev *sdev,
470 				       struct snd_pcm_substream *substream,
471 				       void *p, size_t sz)
472 {
473 	return sof_ops(sdev)->ipc_msg_data(sdev, substream, p, sz);
474 }
475 /* host side configuration of the stream's data offset in stream mailbox area */
476 static inline int
477 snd_sof_set_stream_data_offset(struct snd_sof_dev *sdev,
478 			       struct snd_pcm_substream *substream,
479 			       size_t posn_offset)
480 {
481 	if (sof_ops(sdev) && sof_ops(sdev)->set_stream_data_offset)
482 		return sof_ops(sdev)->set_stream_data_offset(sdev, substream,
483 							     posn_offset);
484 
485 	return 0;
486 }
487 
488 /* host stream pointer */
489 static inline snd_pcm_uframes_t
490 snd_sof_pcm_platform_pointer(struct snd_sof_dev *sdev,
491 			     struct snd_pcm_substream *substream)
492 {
493 	if (sof_ops(sdev) && sof_ops(sdev)->pcm_pointer)
494 		return sof_ops(sdev)->pcm_pointer(sdev, substream);
495 
496 	return 0;
497 }
498 
499 /* pcm ack */
500 static inline int snd_sof_pcm_platform_ack(struct snd_sof_dev *sdev,
501 					   struct snd_pcm_substream *substream)
502 {
503 	if (sof_ops(sdev) && sof_ops(sdev)->pcm_ack)
504 		return sof_ops(sdev)->pcm_ack(sdev, substream);
505 
506 	return 0;
507 }
508 
509 /* machine driver */
510 static inline int
511 snd_sof_machine_register(struct snd_sof_dev *sdev, void *pdata)
512 {
513 	if (sof_ops(sdev) && sof_ops(sdev)->machine_register)
514 		return sof_ops(sdev)->machine_register(sdev, pdata);
515 
516 	return 0;
517 }
518 
519 static inline void
520 snd_sof_machine_unregister(struct snd_sof_dev *sdev, void *pdata)
521 {
522 	if (sof_ops(sdev) && sof_ops(sdev)->machine_unregister)
523 		sof_ops(sdev)->machine_unregister(sdev, pdata);
524 }
525 
526 static inline struct snd_soc_acpi_mach *
527 snd_sof_machine_select(struct snd_sof_dev *sdev)
528 {
529 	if (sof_ops(sdev) && sof_ops(sdev)->machine_select)
530 		return sof_ops(sdev)->machine_select(sdev);
531 
532 	return NULL;
533 }
534 
535 static inline void
536 snd_sof_set_mach_params(struct snd_soc_acpi_mach *mach,
537 			struct snd_sof_dev *sdev)
538 {
539 	if (sof_ops(sdev) && sof_ops(sdev)->set_mach_params)
540 		sof_ops(sdev)->set_mach_params(mach, sdev);
541 }
542 
543 /**
544  * snd_sof_dsp_register_poll_timeout - Periodically poll an address
545  * until a condition is met or a timeout occurs
546  * @op: accessor function (takes @addr as its only argument)
547  * @addr: Address to poll
548  * @val: Variable to read the value into
549  * @cond: Break condition (usually involving @val)
550  * @sleep_us: Maximum time to sleep between reads in us (0
551  *            tight-loops).  Should be less than ~20ms since usleep_range
552  *            is used (see Documentation/timers/timers-howto.rst).
553  * @timeout_us: Timeout in us, 0 means never timeout
554  *
555  * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
556  * case, the last read value at @addr is stored in @val. Must not
557  * be called from atomic context if sleep_us or timeout_us are used.
558  *
559  * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
560  */
561 #define snd_sof_dsp_read_poll_timeout(sdev, bar, offset, val, cond, sleep_us, timeout_us) \
562 ({ \
563 	u64 __timeout_us = (timeout_us); \
564 	unsigned long __sleep_us = (sleep_us); \
565 	ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
566 	might_sleep_if((__sleep_us) != 0); \
567 	for (;;) {							\
568 		(val) = snd_sof_dsp_read(sdev, bar, offset);		\
569 		if (cond) { \
570 			dev_dbg(sdev->dev, \
571 				"FW Poll Status: reg[%#x]=%#x successful\n", \
572 				(offset), (val)); \
573 			break; \
574 		} \
575 		if (__timeout_us && \
576 		    ktime_compare(ktime_get(), __timeout) > 0) { \
577 			(val) = snd_sof_dsp_read(sdev, bar, offset); \
578 			dev_dbg(sdev->dev, \
579 				"FW Poll Status: reg[%#x]=%#x timedout\n", \
580 				(offset), (val)); \
581 			break; \
582 		} \
583 		if (__sleep_us) \
584 			usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
585 	} \
586 	(cond) ? 0 : -ETIMEDOUT; \
587 })
588 
589 /* This is for registers bits with attribute RWC */
590 bool snd_sof_pci_update_bits(struct snd_sof_dev *sdev, u32 offset,
591 			     u32 mask, u32 value);
592 
593 bool snd_sof_dsp_update_bits_unlocked(struct snd_sof_dev *sdev, u32 bar,
594 				      u32 offset, u32 mask, u32 value);
595 
596 bool snd_sof_dsp_update_bits64_unlocked(struct snd_sof_dev *sdev, u32 bar,
597 					u32 offset, u64 mask, u64 value);
598 
599 bool snd_sof_dsp_update_bits(struct snd_sof_dev *sdev, u32 bar, u32 offset,
600 			     u32 mask, u32 value);
601 
602 bool snd_sof_dsp_update_bits64(struct snd_sof_dev *sdev, u32 bar,
603 			       u32 offset, u64 mask, u64 value);
604 
605 void snd_sof_dsp_update_bits_forced(struct snd_sof_dev *sdev, u32 bar,
606 				    u32 offset, u32 mask, u32 value);
607 
608 int snd_sof_dsp_register_poll(struct snd_sof_dev *sdev, u32 bar, u32 offset,
609 			      u32 mask, u32 target, u32 timeout_ms,
610 			      u32 interval_us);
611 
612 void snd_sof_dsp_panic(struct snd_sof_dev *sdev, u32 offset, bool non_recoverable);
613 #endif
614