1 /* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ 2 /* 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * Copyright(c) 2018 Intel Corporation. All rights reserved. 7 * 8 * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com> 9 */ 10 11 #ifndef __SOUND_SOC_SOF_IO_H 12 #define __SOUND_SOC_SOF_IO_H 13 14 #include <linux/device.h> 15 #include <linux/interrupt.h> 16 #include <linux/kernel.h> 17 #include <linux/types.h> 18 #include <sound/pcm.h> 19 #include "sof-priv.h" 20 21 #define sof_ops(sdev) \ 22 ((sdev)->pdata->desc->ops) 23 24 /* Mandatory operations are verified during probing */ 25 26 /* init */ 27 static inline int snd_sof_probe(struct snd_sof_dev *sdev) 28 { 29 return sof_ops(sdev)->probe(sdev); 30 } 31 32 static inline int snd_sof_remove(struct snd_sof_dev *sdev) 33 { 34 if (sof_ops(sdev)->remove) 35 return sof_ops(sdev)->remove(sdev); 36 37 return 0; 38 } 39 40 /* control */ 41 42 /* 43 * snd_sof_dsp_run returns the core mask of the cores that are available 44 * after successful fw boot 45 */ 46 static inline int snd_sof_dsp_run(struct snd_sof_dev *sdev) 47 { 48 return sof_ops(sdev)->run(sdev); 49 } 50 51 static inline int snd_sof_dsp_stall(struct snd_sof_dev *sdev) 52 { 53 if (sof_ops(sdev)->stall) 54 return sof_ops(sdev)->stall(sdev); 55 56 return 0; 57 } 58 59 static inline int snd_sof_dsp_reset(struct snd_sof_dev *sdev) 60 { 61 if (sof_ops(sdev)->reset) 62 return sof_ops(sdev)->reset(sdev); 63 64 return 0; 65 } 66 67 /* dsp core power up/power down */ 68 static inline int snd_sof_dsp_core_power_up(struct snd_sof_dev *sdev, 69 unsigned int core_mask) 70 { 71 if (sof_ops(sdev)->core_power_up) 72 return sof_ops(sdev)->core_power_up(sdev, core_mask); 73 74 return 0; 75 } 76 77 static inline int snd_sof_dsp_core_power_down(struct snd_sof_dev *sdev, 78 unsigned int core_mask) 79 { 80 if (sof_ops(sdev)->core_power_down) 81 return sof_ops(sdev)->core_power_down(sdev, core_mask); 82 83 return 0; 84 } 85 86 /* pre/post fw load */ 87 static inline int snd_sof_dsp_pre_fw_run(struct snd_sof_dev *sdev) 88 { 89 if (sof_ops(sdev)->pre_fw_run) 90 return sof_ops(sdev)->pre_fw_run(sdev); 91 92 return 0; 93 } 94 95 static inline int snd_sof_dsp_post_fw_run(struct snd_sof_dev *sdev) 96 { 97 if (sof_ops(sdev)->post_fw_run) 98 return sof_ops(sdev)->post_fw_run(sdev); 99 100 return 0; 101 } 102 103 /* power management */ 104 static inline int snd_sof_dsp_resume(struct snd_sof_dev *sdev) 105 { 106 if (sof_ops(sdev)->resume) 107 return sof_ops(sdev)->resume(sdev); 108 109 return 0; 110 } 111 112 static inline int snd_sof_dsp_suspend(struct snd_sof_dev *sdev, int state) 113 { 114 if (sof_ops(sdev)->suspend) 115 return sof_ops(sdev)->suspend(sdev, state); 116 117 return 0; 118 } 119 120 static inline int snd_sof_dsp_runtime_resume(struct snd_sof_dev *sdev) 121 { 122 if (sof_ops(sdev)->runtime_resume) 123 return sof_ops(sdev)->runtime_resume(sdev); 124 125 return 0; 126 } 127 128 static inline int snd_sof_dsp_runtime_suspend(struct snd_sof_dev *sdev, 129 int state) 130 { 131 if (sof_ops(sdev)->runtime_suspend) 132 return sof_ops(sdev)->runtime_suspend(sdev, state); 133 134 return 0; 135 } 136 137 static inline int snd_sof_dsp_hw_params_upon_resume(struct snd_sof_dev *sdev) 138 { 139 if (sof_ops(sdev)->set_hw_params_upon_resume) 140 return sof_ops(sdev)->set_hw_params_upon_resume(sdev); 141 return 0; 142 } 143 144 static inline int snd_sof_dsp_set_clk(struct snd_sof_dev *sdev, u32 freq) 145 { 146 if (sof_ops(sdev)->set_clk) 147 return sof_ops(sdev)->set_clk(sdev, freq); 148 149 return 0; 150 } 151 152 /* debug */ 153 static inline void snd_sof_dsp_dbg_dump(struct snd_sof_dev *sdev, u32 flags) 154 { 155 if (sof_ops(sdev)->dbg_dump) 156 return sof_ops(sdev)->dbg_dump(sdev, flags); 157 } 158 159 static inline void snd_sof_ipc_dump(struct snd_sof_dev *sdev) 160 { 161 if (sof_ops(sdev)->ipc_dump) 162 return sof_ops(sdev)->ipc_dump(sdev); 163 } 164 165 /* register IO */ 166 static inline void snd_sof_dsp_write(struct snd_sof_dev *sdev, u32 bar, 167 u32 offset, u32 value) 168 { 169 if (sof_ops(sdev)->write) { 170 sof_ops(sdev)->write(sdev, sdev->bar[bar] + offset, value); 171 return; 172 } 173 174 dev_err_ratelimited(sdev->dev, "error: %s not defined\n", __func__); 175 } 176 177 static inline void snd_sof_dsp_write64(struct snd_sof_dev *sdev, u32 bar, 178 u32 offset, u64 value) 179 { 180 if (sof_ops(sdev)->write64) { 181 sof_ops(sdev)->write64(sdev, sdev->bar[bar] + offset, value); 182 return; 183 } 184 185 dev_err_ratelimited(sdev->dev, "error: %s not defined\n", __func__); 186 } 187 188 static inline u32 snd_sof_dsp_read(struct snd_sof_dev *sdev, u32 bar, 189 u32 offset) 190 { 191 if (sof_ops(sdev)->read) 192 return sof_ops(sdev)->read(sdev, sdev->bar[bar] + offset); 193 194 dev_err(sdev->dev, "error: %s not defined\n", __func__); 195 return -ENOTSUPP; 196 } 197 198 static inline u64 snd_sof_dsp_read64(struct snd_sof_dev *sdev, u32 bar, 199 u32 offset) 200 { 201 if (sof_ops(sdev)->read64) 202 return sof_ops(sdev)->read64(sdev, sdev->bar[bar] + offset); 203 204 dev_err(sdev->dev, "error: %s not defined\n", __func__); 205 return -ENOTSUPP; 206 } 207 208 /* block IO */ 209 static inline void snd_sof_dsp_block_read(struct snd_sof_dev *sdev, u32 bar, 210 u32 offset, void *dest, size_t bytes) 211 { 212 sof_ops(sdev)->block_read(sdev, bar, offset, dest, bytes); 213 } 214 215 static inline void snd_sof_dsp_block_write(struct snd_sof_dev *sdev, u32 bar, 216 u32 offset, void *src, size_t bytes) 217 { 218 sof_ops(sdev)->block_write(sdev, bar, offset, src, bytes); 219 } 220 221 /* ipc */ 222 static inline int snd_sof_dsp_send_msg(struct snd_sof_dev *sdev, 223 struct snd_sof_ipc_msg *msg) 224 { 225 return sof_ops(sdev)->send_msg(sdev, msg); 226 } 227 228 /* host DMA trace */ 229 static inline int snd_sof_dma_trace_init(struct snd_sof_dev *sdev, 230 u32 *stream_tag) 231 { 232 if (sof_ops(sdev)->trace_init) 233 return sof_ops(sdev)->trace_init(sdev, stream_tag); 234 235 return 0; 236 } 237 238 static inline int snd_sof_dma_trace_release(struct snd_sof_dev *sdev) 239 { 240 if (sof_ops(sdev)->trace_release) 241 return sof_ops(sdev)->trace_release(sdev); 242 243 return 0; 244 } 245 246 static inline int snd_sof_dma_trace_trigger(struct snd_sof_dev *sdev, int cmd) 247 { 248 if (sof_ops(sdev)->trace_trigger) 249 return sof_ops(sdev)->trace_trigger(sdev, cmd); 250 251 return 0; 252 } 253 254 /* host PCM ops */ 255 static inline int 256 snd_sof_pcm_platform_open(struct snd_sof_dev *sdev, 257 struct snd_pcm_substream *substream) 258 { 259 if (sof_ops(sdev) && sof_ops(sdev)->pcm_open) 260 return sof_ops(sdev)->pcm_open(sdev, substream); 261 262 return 0; 263 } 264 265 /* disconnect pcm substream to a host stream */ 266 static inline int 267 snd_sof_pcm_platform_close(struct snd_sof_dev *sdev, 268 struct snd_pcm_substream *substream) 269 { 270 if (sof_ops(sdev) && sof_ops(sdev)->pcm_close) 271 return sof_ops(sdev)->pcm_close(sdev, substream); 272 273 return 0; 274 } 275 276 /* host stream hw params */ 277 static inline int 278 snd_sof_pcm_platform_hw_params(struct snd_sof_dev *sdev, 279 struct snd_pcm_substream *substream, 280 struct snd_pcm_hw_params *params, 281 struct sof_ipc_stream_params *ipc_params) 282 { 283 if (sof_ops(sdev) && sof_ops(sdev)->pcm_hw_params) 284 return sof_ops(sdev)->pcm_hw_params(sdev, substream, 285 params, ipc_params); 286 287 return 0; 288 } 289 290 /* host stream hw free */ 291 static inline int 292 snd_sof_pcm_platform_hw_free(struct snd_sof_dev *sdev, 293 struct snd_pcm_substream *substream) 294 { 295 if (sof_ops(sdev) && sof_ops(sdev)->pcm_hw_free) 296 return sof_ops(sdev)->pcm_hw_free(sdev, substream); 297 298 return 0; 299 } 300 301 /* host stream trigger */ 302 static inline int 303 snd_sof_pcm_platform_trigger(struct snd_sof_dev *sdev, 304 struct snd_pcm_substream *substream, int cmd) 305 { 306 if (sof_ops(sdev) && sof_ops(sdev)->pcm_trigger) 307 return sof_ops(sdev)->pcm_trigger(sdev, substream, cmd); 308 309 return 0; 310 } 311 312 /* host DSP message data */ 313 static inline void snd_sof_ipc_msg_data(struct snd_sof_dev *sdev, 314 struct snd_pcm_substream *substream, 315 void *p, size_t sz) 316 { 317 sof_ops(sdev)->ipc_msg_data(sdev, substream, p, sz); 318 } 319 320 /* host configure DSP HW parameters */ 321 static inline int 322 snd_sof_ipc_pcm_params(struct snd_sof_dev *sdev, 323 struct snd_pcm_substream *substream, 324 const struct sof_ipc_pcm_params_reply *reply) 325 { 326 return sof_ops(sdev)->ipc_pcm_params(sdev, substream, reply); 327 } 328 329 /* host stream pointer */ 330 static inline snd_pcm_uframes_t 331 snd_sof_pcm_platform_pointer(struct snd_sof_dev *sdev, 332 struct snd_pcm_substream *substream) 333 { 334 if (sof_ops(sdev) && sof_ops(sdev)->pcm_pointer) 335 return sof_ops(sdev)->pcm_pointer(sdev, substream); 336 337 return 0; 338 } 339 340 static inline const struct snd_sof_dsp_ops 341 *sof_get_ops(const struct sof_dev_desc *d, 342 const struct sof_ops_table mach_ops[], int asize) 343 { 344 int i; 345 346 for (i = 0; i < asize; i++) { 347 if (d == mach_ops[i].desc) 348 return mach_ops[i].ops; 349 } 350 351 /* not found */ 352 return NULL; 353 } 354 355 /** 356 * snd_sof_dsp_register_poll_timeout - Periodically poll an address 357 * until a condition is met or a timeout occurs 358 * @op: accessor function (takes @addr as its only argument) 359 * @addr: Address to poll 360 * @val: Variable to read the value into 361 * @cond: Break condition (usually involving @val) 362 * @sleep_us: Maximum time to sleep between reads in us (0 363 * tight-loops). Should be less than ~20ms since usleep_range 364 * is used (see Documentation/timers/timers-howto.txt). 365 * @timeout_us: Timeout in us, 0 means never timeout 366 * 367 * Returns 0 on success and -ETIMEDOUT upon a timeout. In either 368 * case, the last read value at @addr is stored in @val. Must not 369 * be called from atomic context if sleep_us or timeout_us are used. 370 * 371 * This is modelled after the readx_poll_timeout macros in linux/iopoll.h. 372 */ 373 #define snd_sof_dsp_read_poll_timeout(sdev, bar, offset, val, cond, sleep_us, timeout_us) \ 374 ({ \ 375 u64 __timeout_us = (timeout_us); \ 376 unsigned long __sleep_us = (sleep_us); \ 377 ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ 378 might_sleep_if((__sleep_us) != 0); \ 379 for (;;) { \ 380 (val) = snd_sof_dsp_read(sdev, bar, offset); \ 381 if (cond) { \ 382 dev_dbg(sdev->dev, \ 383 "FW Poll Status: reg=%#x successful\n", (val)); \ 384 break; \ 385 } \ 386 if (__timeout_us && \ 387 ktime_compare(ktime_get(), __timeout) > 0) { \ 388 (val) = snd_sof_dsp_read(sdev, bar, offset); \ 389 dev_dbg(sdev->dev, \ 390 "FW Poll Status: reg=%#x timedout\n", (val)); \ 391 break; \ 392 } \ 393 if (__sleep_us) \ 394 usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ 395 } \ 396 (cond) ? 0 : -ETIMEDOUT; \ 397 }) 398 399 /* This is for registers bits with attribute RWC */ 400 bool snd_sof_pci_update_bits(struct snd_sof_dev *sdev, u32 offset, 401 u32 mask, u32 value); 402 403 bool snd_sof_dsp_update_bits_unlocked(struct snd_sof_dev *sdev, u32 bar, 404 u32 offset, u32 mask, u32 value); 405 406 bool snd_sof_dsp_update_bits64_unlocked(struct snd_sof_dev *sdev, u32 bar, 407 u32 offset, u64 mask, u64 value); 408 409 bool snd_sof_dsp_update_bits(struct snd_sof_dev *sdev, u32 bar, u32 offset, 410 u32 mask, u32 value); 411 412 bool snd_sof_dsp_update_bits64(struct snd_sof_dev *sdev, u32 bar, 413 u32 offset, u64 mask, u64 value); 414 415 void snd_sof_dsp_update_bits_forced(struct snd_sof_dev *sdev, u32 bar, 416 u32 offset, u32 mask, u32 value); 417 418 int snd_sof_dsp_register_poll(struct snd_sof_dev *sdev, u32 bar, u32 offset, 419 u32 mask, u32 target, u32 timeout_ms, 420 u32 interval_us); 421 422 void snd_sof_dsp_panic(struct snd_sof_dev *sdev, u32 offset); 423 #endif 424