1 // SPDX-License-Identifier: GPL-2.0-only 2 // 3 // Copyright(c) 2021-2022 Intel Corporation 4 // 5 // Authors: Cezary Rojewski <cezary.rojewski@intel.com> 6 // Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com> 7 // 8 9 #include <linux/io-64-nonatomic-lo-hi.h> 10 #include <linux/slab.h> 11 #include <sound/hdaudio_ext.h> 12 #include "avs.h" 13 #include "messages.h" 14 #include "registers.h" 15 #include "trace.h" 16 17 #define AVS_IPC_TIMEOUT_MS 300 18 #define AVS_D0IX_DELAY_MS 300 19 20 static int 21 avs_dsp_set_d0ix(struct avs_dev *adev, bool enable) 22 { 23 struct avs_ipc *ipc = adev->ipc; 24 int ret; 25 26 /* Is transition required? */ 27 if (ipc->in_d0ix == enable) 28 return 0; 29 30 ret = avs_dsp_op(adev, set_d0ix, enable); 31 if (ret) { 32 /* Prevent further d0ix attempts on conscious IPC failure. */ 33 if (ret == -AVS_EIPC) 34 atomic_inc(&ipc->d0ix_disable_depth); 35 36 ipc->in_d0ix = false; 37 return ret; 38 } 39 40 ipc->in_d0ix = enable; 41 return 0; 42 } 43 44 static void avs_dsp_schedule_d0ix(struct avs_dev *adev, struct avs_ipc_msg *tx) 45 { 46 if (atomic_read(&adev->ipc->d0ix_disable_depth)) 47 return; 48 49 mod_delayed_work(system_power_efficient_wq, &adev->ipc->d0ix_work, 50 msecs_to_jiffies(AVS_D0IX_DELAY_MS)); 51 } 52 53 static void avs_dsp_d0ix_work(struct work_struct *work) 54 { 55 struct avs_ipc *ipc = container_of(work, struct avs_ipc, d0ix_work.work); 56 57 avs_dsp_set_d0ix(to_avs_dev(ipc->dev), true); 58 } 59 60 static int avs_dsp_wake_d0i0(struct avs_dev *adev, struct avs_ipc_msg *tx) 61 { 62 struct avs_ipc *ipc = adev->ipc; 63 64 if (!atomic_read(&ipc->d0ix_disable_depth)) { 65 cancel_delayed_work_sync(&ipc->d0ix_work); 66 return avs_dsp_set_d0ix(adev, false); 67 } 68 69 return 0; 70 } 71 72 int avs_dsp_disable_d0ix(struct avs_dev *adev) 73 { 74 struct avs_ipc *ipc = adev->ipc; 75 76 /* Prevent PG only on the first disable. */ 77 if (atomic_inc_return(&ipc->d0ix_disable_depth) == 1) { 78 cancel_delayed_work_sync(&ipc->d0ix_work); 79 return avs_dsp_set_d0ix(adev, false); 80 } 81 82 return 0; 83 } 84 85 int avs_dsp_enable_d0ix(struct avs_dev *adev) 86 { 87 struct avs_ipc *ipc = adev->ipc; 88 89 if (atomic_dec_and_test(&ipc->d0ix_disable_depth)) 90 queue_delayed_work(system_power_efficient_wq, &ipc->d0ix_work, 91 msecs_to_jiffies(AVS_D0IX_DELAY_MS)); 92 return 0; 93 } 94 95 static void avs_dsp_recovery(struct avs_dev *adev) 96 { 97 struct avs_soc_component *acomp; 98 unsigned int core_mask; 99 int ret; 100 101 mutex_lock(&adev->comp_list_mutex); 102 /* disconnect all running streams */ 103 list_for_each_entry(acomp, &adev->comp_list, node) { 104 struct snd_soc_pcm_runtime *rtd; 105 struct snd_soc_card *card; 106 107 card = acomp->base.card; 108 if (!card) 109 continue; 110 111 for_each_card_rtds(card, rtd) { 112 struct snd_pcm *pcm; 113 int dir; 114 115 pcm = rtd->pcm; 116 if (!pcm || rtd->dai_link->no_pcm) 117 continue; 118 119 for_each_pcm_streams(dir) { 120 struct snd_pcm_substream *substream; 121 122 substream = pcm->streams[dir].substream; 123 if (!substream || !substream->runtime) 124 continue; 125 126 /* No need for _irq() as we are in nonatomic context. */ 127 snd_pcm_stream_lock(substream); 128 snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED); 129 snd_pcm_stream_unlock(substream); 130 } 131 } 132 } 133 mutex_unlock(&adev->comp_list_mutex); 134 135 /* forcibly shutdown all cores */ 136 core_mask = GENMASK(adev->hw_cfg.dsp_cores - 1, 0); 137 avs_dsp_core_disable(adev, core_mask); 138 139 /* attempt dsp reboot */ 140 ret = avs_dsp_boot_firmware(adev, true); 141 if (ret < 0) 142 dev_err(adev->dev, "dsp reboot failed: %d\n", ret); 143 144 pm_runtime_enable(adev->dev); 145 pm_request_autosuspend(adev->dev); 146 147 atomic_set(&adev->ipc->recovering, 0); 148 } 149 150 static void avs_dsp_recovery_work(struct work_struct *work) 151 { 152 struct avs_ipc *ipc = container_of(work, struct avs_ipc, recovery_work); 153 154 avs_dsp_recovery(to_avs_dev(ipc->dev)); 155 } 156 157 static void avs_dsp_exception_caught(struct avs_dev *adev, union avs_notify_msg *msg) 158 { 159 struct avs_ipc *ipc = adev->ipc; 160 161 /* Account for the double-exception case. */ 162 ipc->ready = false; 163 164 if (!atomic_add_unless(&ipc->recovering, 1, 1)) { 165 dev_err(adev->dev, "dsp recovery is already in progress\n"); 166 return; 167 } 168 169 dev_crit(adev->dev, "communication severed, rebooting dsp..\n"); 170 171 /* Avoid deadlock as the exception may be the response to SET_D0IX. */ 172 if (current_work() != &ipc->d0ix_work.work) 173 cancel_delayed_work_sync(&ipc->d0ix_work); 174 ipc->in_d0ix = false; 175 /* Re-enabled on recovery completion. */ 176 pm_runtime_disable(adev->dev); 177 178 /* Process received notification. */ 179 avs_dsp_op(adev, coredump, msg); 180 181 schedule_work(&ipc->recovery_work); 182 } 183 184 static void avs_dsp_receive_rx(struct avs_dev *adev, u64 header) 185 { 186 struct avs_ipc *ipc = adev->ipc; 187 union avs_reply_msg msg = AVS_MSG(header); 188 u32 sts, lec; 189 190 sts = snd_hdac_adsp_readl(adev, AVS_FW_REG_STATUS(adev)); 191 lec = snd_hdac_adsp_readl(adev, AVS_FW_REG_ERROR(adev)); 192 trace_avs_ipc_reply_msg(header, sts, lec); 193 194 ipc->rx.header = header; 195 /* Abort copying payload if request processing was unsuccessful. */ 196 if (!msg.status) { 197 /* update size in case of LARGE_CONFIG_GET */ 198 if (msg.msg_target == AVS_MOD_MSG && 199 msg.global_msg_type == AVS_MOD_LARGE_CONFIG_GET) 200 ipc->rx.size = min_t(u32, AVS_MAILBOX_SIZE, 201 msg.ext.large_config.data_off_size); 202 203 memcpy_fromio(ipc->rx.data, avs_uplink_addr(adev), ipc->rx.size); 204 trace_avs_msg_payload(ipc->rx.data, ipc->rx.size); 205 } 206 } 207 208 static void avs_dsp_process_notification(struct avs_dev *adev, u64 header) 209 { 210 struct avs_notify_mod_data mod_data; 211 union avs_notify_msg msg = AVS_MSG(header); 212 size_t data_size = 0; 213 void *data = NULL; 214 u32 sts, lec; 215 216 sts = snd_hdac_adsp_readl(adev, AVS_FW_REG_STATUS(adev)); 217 lec = snd_hdac_adsp_readl(adev, AVS_FW_REG_ERROR(adev)); 218 trace_avs_ipc_notify_msg(header, sts, lec); 219 220 /* Ignore spurious notifications until handshake is established. */ 221 if (!adev->ipc->ready && msg.notify_msg_type != AVS_NOTIFY_FW_READY) { 222 dev_dbg(adev->dev, "FW not ready, skip notification: 0x%08x\n", msg.primary); 223 return; 224 } 225 226 /* Calculate notification payload size. */ 227 switch (msg.notify_msg_type) { 228 case AVS_NOTIFY_FW_READY: 229 break; 230 231 case AVS_NOTIFY_PHRASE_DETECTED: 232 data_size = sizeof(struct avs_notify_voice_data); 233 break; 234 235 case AVS_NOTIFY_RESOURCE_EVENT: 236 data_size = sizeof(struct avs_notify_res_data); 237 break; 238 239 case AVS_NOTIFY_LOG_BUFFER_STATUS: 240 case AVS_NOTIFY_EXCEPTION_CAUGHT: 241 break; 242 243 case AVS_NOTIFY_MODULE_EVENT: 244 /* To know the total payload size, header needs to be read first. */ 245 memcpy_fromio(&mod_data, avs_uplink_addr(adev), sizeof(mod_data)); 246 data_size = sizeof(mod_data) + mod_data.data_size; 247 break; 248 249 default: 250 dev_info(adev->dev, "unknown notification: 0x%08x\n", msg.primary); 251 break; 252 } 253 254 if (data_size) { 255 data = kmalloc(data_size, GFP_KERNEL); 256 if (!data) 257 return; 258 259 memcpy_fromio(data, avs_uplink_addr(adev), data_size); 260 trace_avs_msg_payload(data, data_size); 261 } 262 263 /* Perform notification-specific operations. */ 264 switch (msg.notify_msg_type) { 265 case AVS_NOTIFY_FW_READY: 266 dev_dbg(adev->dev, "FW READY 0x%08x\n", msg.primary); 267 adev->ipc->ready = true; 268 complete(&adev->fw_ready); 269 break; 270 271 case AVS_NOTIFY_LOG_BUFFER_STATUS: 272 avs_log_buffer_status_locked(adev, &msg); 273 break; 274 275 case AVS_NOTIFY_EXCEPTION_CAUGHT: 276 avs_dsp_exception_caught(adev, &msg); 277 break; 278 279 default: 280 break; 281 } 282 283 kfree(data); 284 } 285 286 void avs_dsp_process_response(struct avs_dev *adev, u64 header) 287 { 288 struct avs_ipc *ipc = adev->ipc; 289 290 /* 291 * Response may either be solicited - a reply for a request that has 292 * been sent beforehand - or unsolicited (notification). 293 */ 294 if (avs_msg_is_reply(header)) { 295 /* Response processing is invoked from IRQ thread. */ 296 spin_lock_irq(&ipc->rx_lock); 297 avs_dsp_receive_rx(adev, header); 298 ipc->rx_completed = true; 299 spin_unlock_irq(&ipc->rx_lock); 300 } else { 301 avs_dsp_process_notification(adev, header); 302 } 303 304 complete(&ipc->busy_completion); 305 } 306 307 static bool avs_ipc_is_busy(struct avs_ipc *ipc) 308 { 309 struct avs_dev *adev = to_avs_dev(ipc->dev); 310 const struct avs_spec *const spec = adev->spec; 311 u32 hipc_rsp; 312 313 hipc_rsp = snd_hdac_adsp_readl(adev, spec->hipc->rsp_offset); 314 return hipc_rsp & spec->hipc->rsp_busy_mask; 315 } 316 317 static int avs_ipc_wait_busy_completion(struct avs_ipc *ipc, int timeout) 318 { 319 u32 repeats_left = 128; /* to avoid infinite looping */ 320 int ret; 321 322 again: 323 ret = wait_for_completion_timeout(&ipc->busy_completion, msecs_to_jiffies(timeout)); 324 325 /* DSP could be unresponsive at this point. */ 326 if (!ipc->ready) 327 return -EPERM; 328 329 if (!ret) { 330 if (!avs_ipc_is_busy(ipc)) 331 return -ETIMEDOUT; 332 /* 333 * Firmware did its job, either notification or reply 334 * has been received - now wait until it's processed. 335 */ 336 wait_for_completion_killable(&ipc->busy_completion); 337 } 338 339 /* Ongoing notification's bottom-half may cause early wakeup */ 340 spin_lock(&ipc->rx_lock); 341 if (!ipc->rx_completed) { 342 if (repeats_left) { 343 /* Reply delayed due to notification. */ 344 repeats_left--; 345 reinit_completion(&ipc->busy_completion); 346 spin_unlock(&ipc->rx_lock); 347 goto again; 348 } 349 350 spin_unlock(&ipc->rx_lock); 351 return -ETIMEDOUT; 352 } 353 354 spin_unlock(&ipc->rx_lock); 355 return 0; 356 } 357 358 static void avs_ipc_msg_init(struct avs_ipc *ipc, struct avs_ipc_msg *reply) 359 { 360 lockdep_assert_held(&ipc->rx_lock); 361 362 ipc->rx.header = 0; 363 ipc->rx.size = reply ? reply->size : 0; 364 ipc->rx_completed = false; 365 366 reinit_completion(&ipc->done_completion); 367 reinit_completion(&ipc->busy_completion); 368 } 369 370 static void avs_dsp_send_tx(struct avs_dev *adev, struct avs_ipc_msg *tx, bool read_fwregs) 371 { 372 const struct avs_spec *const spec = adev->spec; 373 u32 sts = UINT_MAX; 374 u32 lec = UINT_MAX; 375 376 tx->header |= spec->hipc->req_busy_mask; 377 if (read_fwregs) { 378 sts = snd_hdac_adsp_readl(adev, AVS_FW_REG_STATUS(adev)); 379 lec = snd_hdac_adsp_readl(adev, AVS_FW_REG_ERROR(adev)); 380 } 381 382 trace_avs_request(tx, sts, lec); 383 384 if (tx->size) 385 memcpy_toio(avs_downlink_addr(adev), tx->data, tx->size); 386 snd_hdac_adsp_writel(adev, spec->hipc->req_ext_offset, tx->header >> 32); 387 snd_hdac_adsp_writel(adev, spec->hipc->req_offset, tx->header & UINT_MAX); 388 } 389 390 static int avs_dsp_do_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request, 391 struct avs_ipc_msg *reply, int timeout, const char *name) 392 { 393 struct avs_ipc *ipc = adev->ipc; 394 int ret; 395 396 if (!ipc->ready) 397 return -EPERM; 398 399 mutex_lock(&ipc->msg_mutex); 400 401 spin_lock(&ipc->rx_lock); 402 avs_ipc_msg_init(ipc, reply); 403 avs_dsp_send_tx(adev, request, true); 404 spin_unlock(&ipc->rx_lock); 405 406 ret = avs_ipc_wait_busy_completion(ipc, timeout); 407 if (ret) { 408 if (ret == -ETIMEDOUT) { 409 union avs_notify_msg msg = AVS_NOTIFICATION(EXCEPTION_CAUGHT); 410 411 /* Same treatment as on exception, just stack_dump=0. */ 412 avs_dsp_exception_caught(adev, &msg); 413 } 414 goto exit; 415 } 416 417 ret = ipc->rx.rsp.status; 418 /* 419 * If IPC channel is blocked e.g.: due to ongoing recovery, 420 * -EPERM error code is expected and thus it's not an actual error. 421 * 422 * Unsupported IPCs are of no harm either. 423 */ 424 if (ret == -EPERM || ret == AVS_IPC_NOT_SUPPORTED) 425 dev_dbg(adev->dev, "%s (0x%08x 0x%08x) failed: %d\n", 426 name, request->glb.primary, request->glb.ext.val, ret); 427 else if (ret) 428 dev_err(adev->dev, "%s (0x%08x 0x%08x) failed: %d\n", 429 name, request->glb.primary, request->glb.ext.val, ret); 430 431 if (reply) { 432 reply->header = ipc->rx.header; 433 reply->size = ipc->rx.size; 434 if (reply->data && ipc->rx.size) 435 memcpy(reply->data, ipc->rx.data, reply->size); 436 } 437 438 exit: 439 mutex_unlock(&ipc->msg_mutex); 440 return ret; 441 } 442 443 static int avs_dsp_send_msg_sequence(struct avs_dev *adev, struct avs_ipc_msg *request, 444 struct avs_ipc_msg *reply, int timeout, bool wake_d0i0, 445 bool schedule_d0ix, const char *name) 446 { 447 int ret; 448 449 trace_avs_d0ix("wake", wake_d0i0, request->header); 450 if (wake_d0i0) { 451 ret = avs_dsp_wake_d0i0(adev, request); 452 if (ret) 453 return ret; 454 } 455 456 ret = avs_dsp_do_send_msg(adev, request, reply, timeout, name); 457 if (ret) 458 return ret; 459 460 trace_avs_d0ix("schedule", schedule_d0ix, request->header); 461 if (schedule_d0ix) 462 avs_dsp_schedule_d0ix(adev, request); 463 464 return 0; 465 } 466 467 int avs_dsp_send_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, 468 struct avs_ipc_msg *reply, int timeout, const char *name) 469 { 470 bool wake_d0i0 = avs_dsp_op(adev, d0ix_toggle, request, true); 471 bool schedule_d0ix = avs_dsp_op(adev, d0ix_toggle, request, false); 472 473 return avs_dsp_send_msg_sequence(adev, request, reply, timeout, wake_d0i0, schedule_d0ix, 474 name); 475 } 476 477 int avs_dsp_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request, 478 struct avs_ipc_msg *reply, const char *name) 479 { 480 return avs_dsp_send_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms, name); 481 } 482 483 int avs_dsp_send_pm_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, 484 struct avs_ipc_msg *reply, int timeout, bool wake_d0i0, 485 const char *name) 486 { 487 return avs_dsp_send_msg_sequence(adev, request, reply, timeout, wake_d0i0, false, name); 488 } 489 490 int avs_dsp_send_pm_msg(struct avs_dev *adev, struct avs_ipc_msg *request, 491 struct avs_ipc_msg *reply, bool wake_d0i0, const char *name) 492 { 493 return avs_dsp_send_pm_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms, 494 wake_d0i0, name); 495 } 496 497 static int avs_dsp_do_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request, int timeout, 498 const char *name) 499 { 500 struct avs_ipc *ipc = adev->ipc; 501 int ret; 502 503 mutex_lock(&ipc->msg_mutex); 504 505 spin_lock(&ipc->rx_lock); 506 avs_ipc_msg_init(ipc, NULL); 507 /* 508 * with hw still stalled, memory windows may not be 509 * configured properly so avoid accessing SRAM 510 */ 511 avs_dsp_send_tx(adev, request, false); 512 spin_unlock(&ipc->rx_lock); 513 514 /* ROM messages must be sent before main core is unstalled */ 515 ret = avs_dsp_op(adev, stall, AVS_MAIN_CORE_MASK, false); 516 if (!ret) { 517 ret = wait_for_completion_timeout(&ipc->done_completion, msecs_to_jiffies(timeout)); 518 ret = ret ? 0 : -ETIMEDOUT; 519 } 520 if (ret) 521 dev_err(adev->dev, "%s (0x%08x 0x%08x) failed: %d\n", 522 name, request->glb.primary, request->glb.ext.val, ret); 523 524 mutex_unlock(&ipc->msg_mutex); 525 526 return ret; 527 } 528 529 int avs_dsp_send_rom_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, int timeout, 530 const char *name) 531 { 532 return avs_dsp_do_send_rom_msg(adev, request, timeout, name); 533 } 534 535 int avs_dsp_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request, const char *name) 536 { 537 return avs_dsp_send_rom_msg_timeout(adev, request, adev->ipc->default_timeout_ms, name); 538 } 539 540 void avs_dsp_interrupt_control(struct avs_dev *adev, bool enable) 541 { 542 const struct avs_spec *const spec = adev->spec; 543 u32 value, mask; 544 545 /* 546 * No particular bit setting order. All of these are required 547 * to have a functional SW <-> FW communication. 548 */ 549 value = enable ? AVS_ADSP_ADSPIC_IPC : 0; 550 snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_IPC, value); 551 552 mask = AVS_ADSP_HIPCCTL_DONE | AVS_ADSP_HIPCCTL_BUSY; 553 value = enable ? mask : 0; 554 snd_hdac_adsp_updatel(adev, spec->hipc->ctl_offset, mask, value); 555 } 556 557 int avs_ipc_init(struct avs_ipc *ipc, struct device *dev) 558 { 559 ipc->rx.data = devm_kzalloc(dev, AVS_MAILBOX_SIZE, GFP_KERNEL); 560 if (!ipc->rx.data) 561 return -ENOMEM; 562 563 ipc->dev = dev; 564 ipc->ready = false; 565 ipc->default_timeout_ms = AVS_IPC_TIMEOUT_MS; 566 INIT_WORK(&ipc->recovery_work, avs_dsp_recovery_work); 567 INIT_DELAYED_WORK(&ipc->d0ix_work, avs_dsp_d0ix_work); 568 init_completion(&ipc->done_completion); 569 init_completion(&ipc->busy_completion); 570 spin_lock_init(&ipc->rx_lock); 571 mutex_init(&ipc->msg_mutex); 572 573 return 0; 574 } 575 576 void avs_ipc_block(struct avs_ipc *ipc) 577 { 578 ipc->ready = false; 579 cancel_work_sync(&ipc->recovery_work); 580 cancel_delayed_work_sync(&ipc->d0ix_work); 581 ipc->in_d0ix = false; 582 } 583