1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ 2 /* 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * Copyright(c) 2018 Intel Corporation. All rights reserved. 7 * 8 * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com> 9 */ 10 11 #ifndef __SOUND_SOC_SOF_IO_H 12 #define __SOUND_SOC_SOF_IO_H 13 14 #include <linux/device.h> 15 #include <linux/interrupt.h> 16 #include <linux/kernel.h> 17 #include <linux/types.h> 18 #include <sound/pcm.h> 19 #include "sof-priv.h" 20 21 #define sof_ops(sdev) \ 22 ((sdev)->pdata->desc->ops) 23 24 static inline int sof_ops_init(struct snd_sof_dev *sdev) 25 { 26 if (sdev->pdata->desc->ops_init) 27 return sdev->pdata->desc->ops_init(sdev); 28 29 return 0; 30 } 31 32 static inline void sof_ops_free(struct snd_sof_dev *sdev) 33 { 34 if (sdev->pdata->desc->ops_free) 35 sdev->pdata->desc->ops_free(sdev); 36 } 37 38 /* Mandatory operations are verified during probing */ 39 40 /* init */ 41 static inline int snd_sof_probe_early(struct snd_sof_dev *sdev) 42 { 43 if (sof_ops(sdev)->probe_early) 44 return sof_ops(sdev)->probe_early(sdev); 45 46 return 0; 47 } 48 49 static inline int snd_sof_probe(struct snd_sof_dev *sdev) 50 { 51 return sof_ops(sdev)->probe(sdev); 52 } 53 54 static inline void snd_sof_remove(struct snd_sof_dev *sdev) 55 { 56 if (sof_ops(sdev)->remove) 57 sof_ops(sdev)->remove(sdev); 58 } 59 60 static inline void snd_sof_remove_late(struct snd_sof_dev *sdev) 61 { 62 if (sof_ops(sdev)->remove_late) 63 sof_ops(sdev)->remove_late(sdev); 64 } 65 66 static inline int snd_sof_shutdown(struct snd_sof_dev *sdev) 67 { 68 if (sof_ops(sdev)->shutdown) 69 return sof_ops(sdev)->shutdown(sdev); 70 71 return 0; 72 } 73 74 /* control */ 75 76 /* 77 * snd_sof_dsp_run returns the core mask of the cores that are available 78 * after successful fw boot 79 */ 80 static inline int snd_sof_dsp_run(struct snd_sof_dev *sdev) 81 { 82 return sof_ops(sdev)->run(sdev); 83 } 84 85 static inline int snd_sof_dsp_stall(struct snd_sof_dev *sdev, unsigned int core_mask) 86 { 87 if (sof_ops(sdev)->stall) 88 return sof_ops(sdev)->stall(sdev, core_mask); 89 90 return 0; 91 } 92 93 static inline int snd_sof_dsp_reset(struct snd_sof_dev *sdev) 94 { 95 if (sof_ops(sdev)->reset) 96 return sof_ops(sdev)->reset(sdev); 97 98 return 0; 99 } 100 101 /* dsp core get/put */ 102 static inline int snd_sof_dsp_core_get(struct snd_sof_dev *sdev, int core) 103 { 104 if (core > sdev->num_cores - 1) { 105 dev_err(sdev->dev, "invalid core id: %d for num_cores: %d\n", core, 106 sdev->num_cores); 107 return -EINVAL; 108 } 109 110 if (sof_ops(sdev)->core_get) { 111 int ret; 112 113 /* if current ref_count is > 0, increment it and return */ 114 if (sdev->dsp_core_ref_count[core] > 0) { 115 sdev->dsp_core_ref_count[core]++; 116 return 0; 117 } 118 119 /* power up the core */ 120 ret = sof_ops(sdev)->core_get(sdev, core); 121 if (ret < 0) 122 return ret; 123 124 /* increment ref_count */ 125 sdev->dsp_core_ref_count[core]++; 126 127 /* and update enabled_cores_mask */ 128 sdev->enabled_cores_mask |= BIT(core); 129 130 dev_dbg(sdev->dev, "Core %d powered up\n", core); 131 } 132 133 return 0; 134 } 135 136 static inline int snd_sof_dsp_core_put(struct snd_sof_dev *sdev, int core) 137 { 138 if (core > sdev->num_cores - 1) { 139 dev_err(sdev->dev, "invalid core id: %d for num_cores: %d\n", core, 140 sdev->num_cores); 141 return -EINVAL; 142 } 143 144 if (sof_ops(sdev)->core_put) { 145 int ret; 146 147 /* decrement ref_count and return if it is > 0 */ 148 if (--(sdev->dsp_core_ref_count[core]) > 0) 149 return 0; 150 151 /* power down the core */ 152 ret = sof_ops(sdev)->core_put(sdev, core); 153 if (ret < 0) 154 return ret; 155 156 /* and update enabled_cores_mask */ 157 sdev->enabled_cores_mask &= ~BIT(core); 158 159 dev_dbg(sdev->dev, "Core %d powered down\n", core); 160 } 161 162 return 0; 163 } 164 165 /* pre/post fw load */ 166 static inline int snd_sof_dsp_pre_fw_run(struct snd_sof_dev *sdev) 167 { 168 if (sof_ops(sdev)->pre_fw_run) 169 return sof_ops(sdev)->pre_fw_run(sdev); 170 171 return 0; 172 } 173 174 static inline int snd_sof_dsp_post_fw_run(struct snd_sof_dev *sdev) 175 { 176 if (sof_ops(sdev)->post_fw_run) 177 return sof_ops(sdev)->post_fw_run(sdev); 178 179 return 0; 180 } 181 182 /* parse platform specific extended manifest */ 183 static inline int snd_sof_dsp_parse_platform_ext_manifest(struct snd_sof_dev *sdev, 184 const struct sof_ext_man_elem_header *hdr) 185 { 186 if (sof_ops(sdev)->parse_platform_ext_manifest) 187 return sof_ops(sdev)->parse_platform_ext_manifest(sdev, hdr); 188 189 return 0; 190 } 191 192 /* misc */ 193 194 /** 195 * snd_sof_dsp_get_bar_index - Maps a section type with a BAR index 196 * 197 * @sdev: sof device 198 * @type: section type as described by snd_sof_fw_blk_type 199 * 200 * Returns the corresponding BAR index (a positive integer) or -EINVAL 201 * in case there is no mapping 202 */ 203 static inline int snd_sof_dsp_get_bar_index(struct snd_sof_dev *sdev, u32 type) 204 { 205 if (sof_ops(sdev)->get_bar_index) 206 return sof_ops(sdev)->get_bar_index(sdev, type); 207 208 return sdev->mmio_bar; 209 } 210 211 static inline int snd_sof_dsp_get_mailbox_offset(struct snd_sof_dev *sdev) 212 { 213 if (sof_ops(sdev)->get_mailbox_offset) 214 return sof_ops(sdev)->get_mailbox_offset(sdev); 215 216 dev_err(sdev->dev, "error: %s not defined\n", __func__); 217 return -EOPNOTSUPP; 218 } 219 220 static inline int snd_sof_dsp_get_window_offset(struct snd_sof_dev *sdev, 221 u32 id) 222 { 223 if (sof_ops(sdev)->get_window_offset) 224 return sof_ops(sdev)->get_window_offset(sdev, id); 225 226 dev_err(sdev->dev, "error: %s not defined\n", __func__); 227 return -EOPNOTSUPP; 228 } 229 /* power management */ 230 static inline int snd_sof_dsp_resume(struct snd_sof_dev *sdev) 231 { 232 if (sof_ops(sdev)->resume) 233 return sof_ops(sdev)->resume(sdev); 234 235 return 0; 236 } 237 238 static inline int snd_sof_dsp_suspend(struct snd_sof_dev *sdev, 239 u32 target_state) 240 { 241 if (sof_ops(sdev)->suspend) 242 return sof_ops(sdev)->suspend(sdev, target_state); 243 244 return 0; 245 } 246 247 static inline int snd_sof_dsp_runtime_resume(struct snd_sof_dev *sdev) 248 { 249 if (sof_ops(sdev)->runtime_resume) 250 return sof_ops(sdev)->runtime_resume(sdev); 251 252 return 0; 253 } 254 255 static inline int snd_sof_dsp_runtime_suspend(struct snd_sof_dev *sdev) 256 { 257 if (sof_ops(sdev)->runtime_suspend) 258 return sof_ops(sdev)->runtime_suspend(sdev); 259 260 return 0; 261 } 262 263 static inline int snd_sof_dsp_runtime_idle(struct snd_sof_dev *sdev) 264 { 265 if (sof_ops(sdev)->runtime_idle) 266 return sof_ops(sdev)->runtime_idle(sdev); 267 268 return 0; 269 } 270 271 static inline int snd_sof_dsp_hw_params_upon_resume(struct snd_sof_dev *sdev) 272 { 273 if (sof_ops(sdev)->set_hw_params_upon_resume) 274 return sof_ops(sdev)->set_hw_params_upon_resume(sdev); 275 return 0; 276 } 277 278 static inline int snd_sof_dsp_set_clk(struct snd_sof_dev *sdev, u32 freq) 279 { 280 if (sof_ops(sdev)->set_clk) 281 return sof_ops(sdev)->set_clk(sdev, freq); 282 283 return 0; 284 } 285 286 static inline int 287 snd_sof_dsp_set_power_state(struct snd_sof_dev *sdev, 288 const struct sof_dsp_power_state *target_state) 289 { 290 int ret = 0; 291 292 mutex_lock(&sdev->power_state_access); 293 294 if (sof_ops(sdev)->set_power_state) 295 ret = sof_ops(sdev)->set_power_state(sdev, target_state); 296 297 mutex_unlock(&sdev->power_state_access); 298 299 return ret; 300 } 301 302 /* debug */ 303 void snd_sof_dsp_dbg_dump(struct snd_sof_dev *sdev, const char *msg, u32 flags); 304 305 static inline int snd_sof_debugfs_add_region_item(struct snd_sof_dev *sdev, 306 enum snd_sof_fw_blk_type blk_type, u32 offset, size_t size, 307 const char *name, enum sof_debugfs_access_type access_type) 308 { 309 if (sof_ops(sdev) && sof_ops(sdev)->debugfs_add_region_item) 310 return sof_ops(sdev)->debugfs_add_region_item(sdev, blk_type, offset, 311 size, name, access_type); 312 313 return 0; 314 } 315 316 /* register IO */ 317 static inline void snd_sof_dsp_write8(struct snd_sof_dev *sdev, u32 bar, 318 u32 offset, u8 value) 319 { 320 if (sof_ops(sdev)->write8) 321 sof_ops(sdev)->write8(sdev, sdev->bar[bar] + offset, value); 322 else 323 writeb(value, sdev->bar[bar] + offset); 324 } 325 326 static inline void snd_sof_dsp_write(struct snd_sof_dev *sdev, u32 bar, 327 u32 offset, u32 value) 328 { 329 if (sof_ops(sdev)->write) 330 sof_ops(sdev)->write(sdev, sdev->bar[bar] + offset, value); 331 else 332 writel(value, sdev->bar[bar] + offset); 333 } 334 335 static inline void snd_sof_dsp_write64(struct snd_sof_dev *sdev, u32 bar, 336 u32 offset, u64 value) 337 { 338 if (sof_ops(sdev)->write64) 339 sof_ops(sdev)->write64(sdev, sdev->bar[bar] + offset, value); 340 else 341 writeq(value, sdev->bar[bar] + offset); 342 } 343 344 static inline u8 snd_sof_dsp_read8(struct snd_sof_dev *sdev, u32 bar, 345 u32 offset) 346 { 347 if (sof_ops(sdev)->read8) 348 return sof_ops(sdev)->read8(sdev, sdev->bar[bar] + offset); 349 else 350 return readb(sdev->bar[bar] + offset); 351 } 352 353 static inline u32 snd_sof_dsp_read(struct snd_sof_dev *sdev, u32 bar, 354 u32 offset) 355 { 356 if (sof_ops(sdev)->read) 357 return sof_ops(sdev)->read(sdev, sdev->bar[bar] + offset); 358 else 359 return readl(sdev->bar[bar] + offset); 360 } 361 362 static inline u64 snd_sof_dsp_read64(struct snd_sof_dev *sdev, u32 bar, 363 u32 offset) 364 { 365 if (sof_ops(sdev)->read64) 366 return sof_ops(sdev)->read64(sdev, sdev->bar[bar] + offset); 367 else 368 return readq(sdev->bar[bar] + offset); 369 } 370 371 static inline void snd_sof_dsp_update8(struct snd_sof_dev *sdev, u32 bar, 372 u32 offset, u8 mask, u8 value) 373 { 374 u8 reg; 375 376 reg = snd_sof_dsp_read8(sdev, bar, offset); 377 reg &= ~mask; 378 reg |= value; 379 snd_sof_dsp_write8(sdev, bar, offset, reg); 380 } 381 382 /* block IO */ 383 static inline int snd_sof_dsp_block_read(struct snd_sof_dev *sdev, 384 enum snd_sof_fw_blk_type blk_type, 385 u32 offset, void *dest, size_t bytes) 386 { 387 return sof_ops(sdev)->block_read(sdev, blk_type, offset, dest, bytes); 388 } 389 390 static inline int snd_sof_dsp_block_write(struct snd_sof_dev *sdev, 391 enum snd_sof_fw_blk_type blk_type, 392 u32 offset, void *src, size_t bytes) 393 { 394 return sof_ops(sdev)->block_write(sdev, blk_type, offset, src, bytes); 395 } 396 397 /* mailbox IO */ 398 static inline void snd_sof_dsp_mailbox_read(struct snd_sof_dev *sdev, 399 u32 offset, void *dest, size_t bytes) 400 { 401 if (sof_ops(sdev)->mailbox_read) 402 sof_ops(sdev)->mailbox_read(sdev, offset, dest, bytes); 403 } 404 405 static inline void snd_sof_dsp_mailbox_write(struct snd_sof_dev *sdev, 406 u32 offset, void *src, size_t bytes) 407 { 408 if (sof_ops(sdev)->mailbox_write) 409 sof_ops(sdev)->mailbox_write(sdev, offset, src, bytes); 410 } 411 412 /* ipc */ 413 static inline int snd_sof_dsp_send_msg(struct snd_sof_dev *sdev, 414 struct snd_sof_ipc_msg *msg) 415 { 416 return sof_ops(sdev)->send_msg(sdev, msg); 417 } 418 419 /* host PCM ops */ 420 static inline int 421 snd_sof_pcm_platform_open(struct snd_sof_dev *sdev, 422 struct snd_pcm_substream *substream) 423 { 424 if (sof_ops(sdev) && sof_ops(sdev)->pcm_open) 425 return sof_ops(sdev)->pcm_open(sdev, substream); 426 427 return 0; 428 } 429 430 /* disconnect pcm substream to a host stream */ 431 static inline int 432 snd_sof_pcm_platform_close(struct snd_sof_dev *sdev, 433 struct snd_pcm_substream *substream) 434 { 435 if (sof_ops(sdev) && sof_ops(sdev)->pcm_close) 436 return sof_ops(sdev)->pcm_close(sdev, substream); 437 438 return 0; 439 } 440 441 /* host stream hw params */ 442 static inline int 443 snd_sof_pcm_platform_hw_params(struct snd_sof_dev *sdev, 444 struct snd_pcm_substream *substream, 445 struct snd_pcm_hw_params *params, 446 struct snd_sof_platform_stream_params *platform_params) 447 { 448 if (sof_ops(sdev) && sof_ops(sdev)->pcm_hw_params) 449 return sof_ops(sdev)->pcm_hw_params(sdev, substream, params, 450 platform_params); 451 452 return 0; 453 } 454 455 /* host stream hw free */ 456 static inline int 457 snd_sof_pcm_platform_hw_free(struct snd_sof_dev *sdev, 458 struct snd_pcm_substream *substream) 459 { 460 if (sof_ops(sdev) && sof_ops(sdev)->pcm_hw_free) 461 return sof_ops(sdev)->pcm_hw_free(sdev, substream); 462 463 return 0; 464 } 465 466 /* host stream trigger */ 467 static inline int 468 snd_sof_pcm_platform_trigger(struct snd_sof_dev *sdev, 469 struct snd_pcm_substream *substream, int cmd) 470 { 471 if (sof_ops(sdev) && sof_ops(sdev)->pcm_trigger) 472 return sof_ops(sdev)->pcm_trigger(sdev, substream, cmd); 473 474 return 0; 475 } 476 477 /* Firmware loading */ 478 static inline int snd_sof_load_firmware(struct snd_sof_dev *sdev) 479 { 480 dev_dbg(sdev->dev, "loading firmware\n"); 481 482 return sof_ops(sdev)->load_firmware(sdev); 483 } 484 485 /* host DSP message data */ 486 static inline int snd_sof_ipc_msg_data(struct snd_sof_dev *sdev, 487 struct snd_sof_pcm_stream *sps, 488 void *p, size_t sz) 489 { 490 return sof_ops(sdev)->ipc_msg_data(sdev, sps, p, sz); 491 } 492 /* host side configuration of the stream's data offset in stream mailbox area */ 493 static inline int 494 snd_sof_set_stream_data_offset(struct snd_sof_dev *sdev, 495 struct snd_sof_pcm_stream *sps, 496 size_t posn_offset) 497 { 498 if (sof_ops(sdev) && sof_ops(sdev)->set_stream_data_offset) 499 return sof_ops(sdev)->set_stream_data_offset(sdev, sps, 500 posn_offset); 501 502 return 0; 503 } 504 505 /* host stream pointer */ 506 static inline snd_pcm_uframes_t 507 snd_sof_pcm_platform_pointer(struct snd_sof_dev *sdev, 508 struct snd_pcm_substream *substream) 509 { 510 if (sof_ops(sdev) && sof_ops(sdev)->pcm_pointer) 511 return sof_ops(sdev)->pcm_pointer(sdev, substream); 512 513 return 0; 514 } 515 516 /* pcm ack */ 517 static inline int snd_sof_pcm_platform_ack(struct snd_sof_dev *sdev, 518 struct snd_pcm_substream *substream) 519 { 520 if (sof_ops(sdev) && sof_ops(sdev)->pcm_ack) 521 return sof_ops(sdev)->pcm_ack(sdev, substream); 522 523 return 0; 524 } 525 526 static inline u64 snd_sof_pcm_get_stream_position(struct snd_sof_dev *sdev, 527 struct snd_soc_component *component, 528 struct snd_pcm_substream *substream) 529 { 530 if (sof_ops(sdev) && sof_ops(sdev)->get_stream_position) 531 return sof_ops(sdev)->get_stream_position(sdev, component, substream); 532 533 return 0; 534 } 535 536 /* machine driver */ 537 static inline int 538 snd_sof_machine_register(struct snd_sof_dev *sdev, void *pdata) 539 { 540 if (sof_ops(sdev) && sof_ops(sdev)->machine_register) 541 return sof_ops(sdev)->machine_register(sdev, pdata); 542 543 return 0; 544 } 545 546 static inline void 547 snd_sof_machine_unregister(struct snd_sof_dev *sdev, void *pdata) 548 { 549 if (sof_ops(sdev) && sof_ops(sdev)->machine_unregister) 550 sof_ops(sdev)->machine_unregister(sdev, pdata); 551 } 552 553 static inline struct snd_soc_acpi_mach * 554 snd_sof_machine_select(struct snd_sof_dev *sdev) 555 { 556 if (sof_ops(sdev) && sof_ops(sdev)->machine_select) 557 return sof_ops(sdev)->machine_select(sdev); 558 559 return NULL; 560 } 561 562 static inline void 563 snd_sof_set_mach_params(struct snd_soc_acpi_mach *mach, 564 struct snd_sof_dev *sdev) 565 { 566 if (sof_ops(sdev) && sof_ops(sdev)->set_mach_params) 567 sof_ops(sdev)->set_mach_params(mach, sdev); 568 } 569 570 /** 571 * snd_sof_dsp_register_poll_timeout - Periodically poll an address 572 * until a condition is met or a timeout occurs 573 * @op: accessor function (takes @addr as its only argument) 574 * @addr: Address to poll 575 * @val: Variable to read the value into 576 * @cond: Break condition (usually involving @val) 577 * @sleep_us: Maximum time to sleep between reads in us (0 578 * tight-loops). Should be less than ~20ms since usleep_range 579 * is used (see Documentation/timers/timers-howto.rst). 580 * @timeout_us: Timeout in us, 0 means never timeout 581 * 582 * Returns 0 on success and -ETIMEDOUT upon a timeout. In either 583 * case, the last read value at @addr is stored in @val. Must not 584 * be called from atomic context if sleep_us or timeout_us are used. 585 * 586 * This is modelled after the readx_poll_timeout macros in linux/iopoll.h. 587 */ 588 #define snd_sof_dsp_read_poll_timeout(sdev, bar, offset, val, cond, sleep_us, timeout_us) \ 589 ({ \ 590 u64 __timeout_us = (timeout_us); \ 591 unsigned long __sleep_us = (sleep_us); \ 592 ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ 593 might_sleep_if((__sleep_us) != 0); \ 594 for (;;) { \ 595 (val) = snd_sof_dsp_read(sdev, bar, offset); \ 596 if (cond) { \ 597 dev_dbg(sdev->dev, \ 598 "FW Poll Status: reg[%#x]=%#x successful\n", \ 599 (offset), (val)); \ 600 break; \ 601 } \ 602 if (__timeout_us && \ 603 ktime_compare(ktime_get(), __timeout) > 0) { \ 604 (val) = snd_sof_dsp_read(sdev, bar, offset); \ 605 dev_dbg(sdev->dev, \ 606 "FW Poll Status: reg[%#x]=%#x timedout\n", \ 607 (offset), (val)); \ 608 break; \ 609 } \ 610 if (__sleep_us) \ 611 usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ 612 } \ 613 (cond) ? 0 : -ETIMEDOUT; \ 614 }) 615 616 /* This is for registers bits with attribute RWC */ 617 bool snd_sof_pci_update_bits(struct snd_sof_dev *sdev, u32 offset, 618 u32 mask, u32 value); 619 620 bool snd_sof_dsp_update_bits_unlocked(struct snd_sof_dev *sdev, u32 bar, 621 u32 offset, u32 mask, u32 value); 622 623 bool snd_sof_dsp_update_bits64_unlocked(struct snd_sof_dev *sdev, u32 bar, 624 u32 offset, u64 mask, u64 value); 625 626 bool snd_sof_dsp_update_bits(struct snd_sof_dev *sdev, u32 bar, u32 offset, 627 u32 mask, u32 value); 628 629 bool snd_sof_dsp_update_bits64(struct snd_sof_dev *sdev, u32 bar, 630 u32 offset, u64 mask, u64 value); 631 632 void snd_sof_dsp_update_bits_forced(struct snd_sof_dev *sdev, u32 bar, 633 u32 offset, u32 mask, u32 value); 634 635 int snd_sof_dsp_register_poll(struct snd_sof_dev *sdev, u32 bar, u32 offset, 636 u32 mask, u32 target, u32 timeout_ms, 637 u32 interval_us); 638 639 void snd_sof_dsp_panic(struct snd_sof_dev *sdev, u32 offset, bool non_recoverable); 640 #endif 641