1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include <linux/devcoredump.h> 6 7 #include "cam.h" 8 #include "chan.h" 9 #include "debug.h" 10 #include "fw.h" 11 #include "mac.h" 12 #include "ps.h" 13 #include "reg.h" 14 #include "ser.h" 15 #include "util.h" 16 17 #define SER_RECFG_TIMEOUT 1000 18 19 enum ser_evt { 20 SER_EV_NONE, 21 SER_EV_STATE_IN, 22 SER_EV_STATE_OUT, 23 SER_EV_L1_RESET_PREPARE, /* pre-M0 */ 24 SER_EV_L1_RESET, /* M1 */ 25 SER_EV_DO_RECOVERY, /* M3 */ 26 SER_EV_MAC_RESET_DONE, /* M5 */ 27 SER_EV_L2_RESET, 28 SER_EV_L2_RECFG_DONE, 29 SER_EV_L2_RECFG_TIMEOUT, 30 SER_EV_M1_TIMEOUT, 31 SER_EV_M3_TIMEOUT, 32 SER_EV_FW_M5_TIMEOUT, 33 SER_EV_L0_RESET, 34 SER_EV_MAXX 35 }; 36 37 enum ser_state { 38 SER_IDLE_ST, 39 SER_L1_RESET_PRE_ST, 40 SER_RESET_TRX_ST, 41 SER_DO_HCI_ST, 42 SER_L2_RESET_ST, 43 SER_ST_MAX_ST 44 }; 45 46 struct ser_msg { 47 struct list_head list; 48 u8 event; 49 }; 50 51 struct state_ent { 52 u8 state; 53 char *name; 54 void (*st_func)(struct rtw89_ser *ser, u8 event); 55 }; 56 57 struct event_ent { 58 u8 event; 59 char *name; 60 }; 61 62 static char *ser_ev_name(struct rtw89_ser *ser, u8 event) 63 { 64 if (event < SER_EV_MAXX) 65 return ser->ev_tbl[event].name; 66 67 return "err_ev_name"; 68 } 69 70 static char *ser_st_name(struct rtw89_ser *ser) 71 { 72 if (ser->state < SER_ST_MAX_ST) 73 return ser->st_tbl[ser->state].name; 74 75 return "err_st_name"; 76 } 77 78 #define RTW89_DEF_SER_CD_TYPE(_name, _type, _size) \ 79 struct ser_cd_ ## _name { \ 80 u32 type; \ 81 u32 type_size; \ 82 u64 padding; \ 83 u8 data[_size]; \ 84 } __packed; \ 85 static void ser_cd_ ## _name ## _init(struct ser_cd_ ## _name *p) \ 86 { \ 87 p->type = _type; \ 88 p->type_size = sizeof(p->data); \ 89 p->padding = 0x0123456789abcdef; \ 90 } 91 92 enum rtw89_ser_cd_type { 93 RTW89_SER_CD_FW_RSVD_PLE = 0, 94 RTW89_SER_CD_FW_BACKTRACE = 1, 95 }; 96 97 RTW89_DEF_SER_CD_TYPE(fw_rsvd_ple, 98 RTW89_SER_CD_FW_RSVD_PLE, 99 RTW89_FW_RSVD_PLE_SIZE); 100 101 RTW89_DEF_SER_CD_TYPE(fw_backtrace, 102 RTW89_SER_CD_FW_BACKTRACE, 103 RTW89_FW_BACKTRACE_MAX_SIZE); 104 105 struct rtw89_ser_cd_buffer { 106 struct ser_cd_fw_rsvd_ple fwple; 107 struct ser_cd_fw_backtrace fwbt; 108 } __packed; 109 110 static struct rtw89_ser_cd_buffer *rtw89_ser_cd_prep(struct rtw89_dev *rtwdev) 111 { 112 struct rtw89_ser_cd_buffer *buf; 113 114 buf = vzalloc(sizeof(*buf)); 115 if (!buf) 116 return NULL; 117 118 ser_cd_fw_rsvd_ple_init(&buf->fwple); 119 ser_cd_fw_backtrace_init(&buf->fwbt); 120 121 return buf; 122 } 123 124 static void rtw89_ser_cd_send(struct rtw89_dev *rtwdev, 125 struct rtw89_ser_cd_buffer *buf) 126 { 127 rtw89_debug(rtwdev, RTW89_DBG_SER, "SER sends core dump\n"); 128 129 /* After calling dev_coredump, buf's lifetime is supposed to be 130 * handled by the device coredump framework. Note that a new dump 131 * will be discarded if a previous one hasn't been released by 132 * framework yet. 133 */ 134 dev_coredumpv(rtwdev->dev, buf, sizeof(*buf), GFP_KERNEL); 135 } 136 137 static void rtw89_ser_cd_free(struct rtw89_dev *rtwdev, 138 struct rtw89_ser_cd_buffer *buf, bool free_self) 139 { 140 if (!free_self) 141 return; 142 143 rtw89_debug(rtwdev, RTW89_DBG_SER, "SER frees core dump by self\n"); 144 145 /* When some problems happen during filling data of core dump, 146 * we won't send it to device coredump framework. Instead, we 147 * free buf by ourselves. 148 */ 149 vfree(buf); 150 } 151 152 static void ser_state_run(struct rtw89_ser *ser, u8 evt) 153 { 154 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 155 156 rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s receive %s\n", 157 ser_st_name(ser), ser_ev_name(ser, evt)); 158 159 wiphy_lock(rtwdev->hw->wiphy); 160 rtw89_leave_lps(rtwdev); 161 wiphy_unlock(rtwdev->hw->wiphy); 162 163 ser->st_tbl[ser->state].st_func(ser, evt); 164 } 165 166 static void ser_state_goto(struct rtw89_ser *ser, u8 new_state) 167 { 168 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 169 170 if (ser->state == new_state || new_state >= SER_ST_MAX_ST) 171 return; 172 ser_state_run(ser, SER_EV_STATE_OUT); 173 174 rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s goto -> %s\n", 175 ser_st_name(ser), ser->st_tbl[new_state].name); 176 177 ser->state = new_state; 178 ser_state_run(ser, SER_EV_STATE_IN); 179 } 180 181 static struct ser_msg *__rtw89_ser_dequeue_msg(struct rtw89_ser *ser) 182 { 183 struct ser_msg *msg; 184 185 spin_lock_irq(&ser->msg_q_lock); 186 msg = list_first_entry_or_null(&ser->msg_q, struct ser_msg, list); 187 if (msg) 188 list_del(&msg->list); 189 spin_unlock_irq(&ser->msg_q_lock); 190 191 return msg; 192 } 193 194 static void rtw89_ser_hdl_work(struct work_struct *work) 195 { 196 struct ser_msg *msg; 197 struct rtw89_ser *ser = container_of(work, struct rtw89_ser, 198 ser_hdl_work); 199 200 while ((msg = __rtw89_ser_dequeue_msg(ser))) { 201 ser_state_run(ser, msg->event); 202 kfree(msg); 203 } 204 } 205 206 static int ser_send_msg(struct rtw89_ser *ser, u8 event) 207 { 208 struct ser_msg *msg = NULL; 209 210 if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags)) 211 return -EIO; 212 213 msg = kmalloc(sizeof(*msg), GFP_ATOMIC); 214 if (!msg) 215 return -ENOMEM; 216 217 msg->event = event; 218 219 spin_lock_irq(&ser->msg_q_lock); 220 list_add(&msg->list, &ser->msg_q); 221 spin_unlock_irq(&ser->msg_q_lock); 222 223 schedule_work(&ser->ser_hdl_work); 224 return 0; 225 } 226 227 static void rtw89_ser_alarm_work(struct work_struct *work) 228 { 229 struct rtw89_ser *ser = container_of(work, struct rtw89_ser, 230 ser_alarm_work.work); 231 232 ser_send_msg(ser, ser->alarm_event); 233 ser->alarm_event = SER_EV_NONE; 234 } 235 236 static void ser_set_alarm(struct rtw89_ser *ser, u32 ms, u8 event) 237 { 238 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 239 240 if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags)) 241 return; 242 243 ser->alarm_event = event; 244 ieee80211_queue_delayed_work(rtwdev->hw, &ser->ser_alarm_work, 245 msecs_to_jiffies(ms)); 246 } 247 248 static void ser_del_alarm(struct rtw89_ser *ser) 249 { 250 cancel_delayed_work(&ser->ser_alarm_work); 251 ser->alarm_event = SER_EV_NONE; 252 } 253 254 /* driver function */ 255 static void drv_stop_tx(struct rtw89_ser *ser) 256 { 257 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 258 259 ieee80211_stop_queues(rtwdev->hw); 260 set_bit(RTW89_SER_DRV_STOP_TX, ser->flags); 261 } 262 263 static void drv_stop_rx(struct rtw89_ser *ser) 264 { 265 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 266 267 clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags); 268 set_bit(RTW89_SER_DRV_STOP_RX, ser->flags); 269 } 270 271 static void drv_trx_reset(struct rtw89_ser *ser) 272 { 273 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 274 275 rtw89_hci_reset(rtwdev); 276 } 277 278 static void drv_resume_tx(struct rtw89_ser *ser) 279 { 280 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 281 282 if (!test_bit(RTW89_SER_DRV_STOP_TX, ser->flags)) 283 return; 284 285 ieee80211_wake_queues(rtwdev->hw); 286 clear_bit(RTW89_SER_DRV_STOP_TX, ser->flags); 287 } 288 289 static void drv_resume_rx(struct rtw89_ser *ser) 290 { 291 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 292 293 if (!test_bit(RTW89_SER_DRV_STOP_RX, ser->flags)) 294 return; 295 296 set_bit(RTW89_FLAG_RUNNING, rtwdev->flags); 297 clear_bit(RTW89_SER_DRV_STOP_RX, ser->flags); 298 } 299 300 static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 301 { 302 struct rtw89_vif_link *rtwvif_link; 303 unsigned int link_id; 304 305 rtwvif->tdls_peer = 0; 306 307 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 308 rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif_link->port); 309 rtwvif_link->net_type = RTW89_NET_TYPE_NO_LINK; 310 rtwvif_link->trigger = false; 311 rtwvif_link->rand_tsf_done = false; 312 313 rtw89_p2p_noa_once_deinit(rtwvif_link); 314 } 315 } 316 317 static void ser_sta_deinit_cam_iter(void *data, struct ieee80211_sta *sta) 318 { 319 struct rtw89_vif *target_rtwvif = (struct rtw89_vif *)data; 320 struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); 321 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 322 struct rtw89_dev *rtwdev = rtwvif->rtwdev; 323 struct rtw89_vif_link *rtwvif_link; 324 struct rtw89_sta_link *rtwsta_link; 325 unsigned int link_id; 326 327 if (rtwvif != target_rtwvif) 328 return; 329 330 rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) { 331 rtwvif_link = rtwsta_link->rtwvif_link; 332 333 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE || sta->tdls) 334 rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta_link->addr_cam); 335 if (sta->tdls) 336 rtw89_cam_deinit_bssid_cam(rtwdev, &rtwsta_link->bssid_cam); 337 338 INIT_LIST_HEAD(&rtwsta_link->ba_cam_list); 339 } 340 } 341 342 static void ser_deinit_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 343 { 344 struct rtw89_vif_link *rtwvif_link; 345 unsigned int link_id; 346 347 ieee80211_iterate_stations_atomic(rtwdev->hw, 348 ser_sta_deinit_cam_iter, 349 rtwvif); 350 351 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 352 rtw89_cam_deinit(rtwdev, rtwvif_link); 353 354 bitmap_zero(rtwdev->cam_info.ba_cam_map, RTW89_MAX_BA_CAM_NUM); 355 } 356 357 static void ser_reset_mac_binding(struct rtw89_dev *rtwdev) 358 { 359 struct rtw89_vif *rtwvif; 360 361 rtw89_cam_reset_keys(rtwdev); 362 rtw89_for_each_rtwvif(rtwdev, rtwvif) 363 ser_deinit_cam(rtwdev, rtwvif); 364 365 rtw89_core_release_all_bits_map(rtwdev->mac_id_map, RTW89_MAX_MAC_ID_NUM); 366 rtw89_for_each_rtwvif(rtwdev, rtwvif) 367 ser_reset_vif(rtwdev, rtwvif); 368 369 rtwdev->total_sta_assoc = 0; 370 refcount_set(&rtwdev->refcount_ap_info, 0); 371 } 372 373 /* hal function */ 374 static int hal_enable_dma(struct rtw89_ser *ser) 375 { 376 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 377 int ret; 378 379 if (!test_bit(RTW89_SER_HAL_STOP_DMA, ser->flags)) 380 return 0; 381 382 if (!rtwdev->hci.ops->mac_lv1_rcvy) 383 return -EIO; 384 385 ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_2); 386 if (!ret) 387 clear_bit(RTW89_SER_HAL_STOP_DMA, ser->flags); 388 else 389 rtw89_debug(rtwdev, RTW89_DBG_SER, 390 "lv1 rcvy fail to start dma: %d\n", ret); 391 392 return ret; 393 } 394 395 static int hal_stop_dma(struct rtw89_ser *ser) 396 { 397 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 398 int ret; 399 400 if (!rtwdev->hci.ops->mac_lv1_rcvy) 401 return -EIO; 402 403 ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_1); 404 if (!ret) 405 set_bit(RTW89_SER_HAL_STOP_DMA, ser->flags); 406 else 407 rtw89_debug(rtwdev, RTW89_DBG_SER, 408 "lv1 rcvy fail to stop dma: %d\n", ret); 409 410 return ret; 411 } 412 413 static void hal_send_post_m0_event(struct rtw89_ser *ser) 414 { 415 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 416 417 rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_RESET_START_DMAC); 418 } 419 420 static void hal_send_m2_event(struct rtw89_ser *ser) 421 { 422 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 423 424 rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_DISABLE_EN); 425 } 426 427 static void hal_send_m4_event(struct rtw89_ser *ser) 428 { 429 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 430 431 rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_RCVY_EN); 432 } 433 434 /* state handler */ 435 static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt) 436 { 437 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 438 439 switch (evt) { 440 case SER_EV_STATE_IN: 441 rtw89_hci_recovery_complete(rtwdev); 442 clear_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags); 443 clear_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags); 444 break; 445 case SER_EV_L1_RESET_PREPARE: 446 ser_state_goto(ser, SER_L1_RESET_PRE_ST); 447 break; 448 case SER_EV_L1_RESET: 449 ser_state_goto(ser, SER_RESET_TRX_ST); 450 break; 451 case SER_EV_L2_RESET: 452 ser_state_goto(ser, SER_L2_RESET_ST); 453 break; 454 case SER_EV_STATE_OUT: 455 set_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags); 456 rtw89_hci_recovery_start(rtwdev); 457 break; 458 default: 459 break; 460 } 461 } 462 463 static void ser_l1_reset_pre_st_hdl(struct rtw89_ser *ser, u8 evt) 464 { 465 switch (evt) { 466 case SER_EV_STATE_IN: 467 ser->prehandle_l1 = true; 468 hal_send_post_m0_event(ser); 469 ser_set_alarm(ser, 1000, SER_EV_M1_TIMEOUT); 470 break; 471 case SER_EV_L1_RESET: 472 ser_state_goto(ser, SER_RESET_TRX_ST); 473 break; 474 case SER_EV_M1_TIMEOUT: 475 ser_state_goto(ser, SER_L2_RESET_ST); 476 break; 477 case SER_EV_STATE_OUT: 478 ser_del_alarm(ser); 479 break; 480 default: 481 break; 482 } 483 } 484 485 static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt) 486 { 487 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 488 struct wiphy *wiphy = rtwdev->hw->wiphy; 489 490 switch (evt) { 491 case SER_EV_STATE_IN: 492 wiphy_lock(wiphy); 493 wiphy_delayed_work_cancel(wiphy, &rtwdev->track_work); 494 wiphy_delayed_work_cancel(wiphy, &rtwdev->track_ps_work); 495 wiphy_unlock(wiphy); 496 drv_stop_tx(ser); 497 498 if (hal_stop_dma(ser)) { 499 ser_state_goto(ser, SER_L2_RESET_ST); 500 break; 501 } 502 503 drv_stop_rx(ser); 504 wiphy_lock(wiphy); 505 drv_trx_reset(ser); 506 wiphy_unlock(wiphy); 507 508 /* wait m3 */ 509 hal_send_m2_event(ser); 510 511 /* set alarm to prevent FW response timeout */ 512 ser_set_alarm(ser, 1000, SER_EV_M3_TIMEOUT); 513 break; 514 515 case SER_EV_DO_RECOVERY: 516 ser_state_goto(ser, SER_DO_HCI_ST); 517 break; 518 519 case SER_EV_M3_TIMEOUT: 520 ser_state_goto(ser, SER_L2_RESET_ST); 521 break; 522 523 case SER_EV_STATE_OUT: 524 ser_del_alarm(ser); 525 hal_enable_dma(ser); 526 drv_resume_rx(ser); 527 drv_resume_tx(ser); 528 wiphy_delayed_work_queue(wiphy, &rtwdev->track_work, 529 RTW89_TRACK_WORK_PERIOD); 530 wiphy_delayed_work_queue(wiphy, &rtwdev->track_ps_work, 531 RTW89_TRACK_PS_WORK_PERIOD); 532 break; 533 534 default: 535 break; 536 } 537 } 538 539 static void ser_do_hci_st_hdl(struct rtw89_ser *ser, u8 evt) 540 { 541 switch (evt) { 542 case SER_EV_STATE_IN: 543 /* wait m5 */ 544 hal_send_m4_event(ser); 545 546 /* prevent FW response timeout */ 547 ser_set_alarm(ser, 1000, SER_EV_FW_M5_TIMEOUT); 548 break; 549 550 case SER_EV_FW_M5_TIMEOUT: 551 ser_state_goto(ser, SER_L2_RESET_ST); 552 break; 553 554 case SER_EV_MAC_RESET_DONE: 555 ser_state_goto(ser, SER_IDLE_ST); 556 break; 557 558 case SER_EV_STATE_OUT: 559 ser_del_alarm(ser); 560 break; 561 562 default: 563 break; 564 } 565 } 566 567 static void ser_mac_mem_dump(struct rtw89_dev *rtwdev, u8 *buf, 568 u8 sel, u32 start_addr, u32 len) 569 { 570 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 571 u32 filter_model_addr = mac->filter_model_addr; 572 u32 indir_access_addr = mac->indir_access_addr; 573 u32 mem_page_size = mac->mem_page_size; 574 u32 *ptr = (u32 *)buf; 575 u32 base_addr, start_page, residue; 576 u32 cnt = 0; 577 u32 i; 578 579 start_page = start_addr / mem_page_size; 580 residue = start_addr % mem_page_size; 581 base_addr = mac->mem_base_addrs[sel]; 582 base_addr += start_page * mem_page_size; 583 584 while (cnt < len) { 585 rtw89_write32(rtwdev, filter_model_addr, base_addr); 586 587 for (i = indir_access_addr + residue; 588 i < indir_access_addr + mem_page_size; 589 i += 4, ptr++) { 590 *ptr = rtw89_read32(rtwdev, i); 591 cnt += 4; 592 if (cnt >= len) 593 break; 594 } 595 596 residue = 0; 597 base_addr += mem_page_size; 598 } 599 } 600 601 static void rtw89_ser_fw_rsvd_ple_dump(struct rtw89_dev *rtwdev, u8 *buf) 602 { 603 u32 start_addr = rtwdev->chip->rsvd_ple_ofst; 604 605 rtw89_debug(rtwdev, RTW89_DBG_SER, 606 "dump mem for fw rsvd payload engine (start addr: 0x%x)\n", 607 start_addr); 608 ser_mac_mem_dump(rtwdev, buf, RTW89_MAC_MEM_SHARED_BUF, start_addr, 609 RTW89_FW_RSVD_PLE_SIZE); 610 } 611 612 struct __fw_backtrace_entry { 613 u32 wcpu_addr; 614 u32 size; 615 u32 key; 616 } __packed; 617 618 struct __fw_backtrace_info { 619 u32 ra; 620 u32 sp; 621 } __packed; 622 623 static_assert(RTW89_FW_BACKTRACE_INFO_SIZE == 624 sizeof(struct __fw_backtrace_info)); 625 626 static u32 convert_addr_from_wcpu(u32 wcpu_addr) 627 { 628 if (wcpu_addr < 0x30000000) 629 return wcpu_addr; 630 631 return wcpu_addr & GENMASK(28, 0); 632 } 633 634 static int rtw89_ser_fw_backtrace_dump(struct rtw89_dev *rtwdev, u8 *buf, 635 const struct __fw_backtrace_entry *ent) 636 { 637 struct __fw_backtrace_info *ptr = (struct __fw_backtrace_info *)buf; 638 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 639 u32 filter_model_addr = mac->filter_model_addr; 640 u32 indir_access_addr = mac->indir_access_addr; 641 u32 fwbt_addr = convert_addr_from_wcpu(ent->wcpu_addr); 642 u32 fwbt_size = ent->size; 643 u32 fwbt_key = ent->key; 644 u32 i; 645 646 if (fwbt_addr == 0) { 647 rtw89_warn(rtwdev, "FW backtrace invalid address: 0x%x\n", 648 fwbt_addr); 649 return -EINVAL; 650 } 651 652 if (fwbt_key != RTW89_FW_BACKTRACE_KEY) { 653 rtw89_warn(rtwdev, "FW backtrace invalid key: 0x%x\n", 654 fwbt_key); 655 return -EINVAL; 656 } 657 658 if (fwbt_size == 0 || !RTW89_VALID_FW_BACKTRACE_SIZE(fwbt_size) || 659 fwbt_size > RTW89_FW_BACKTRACE_MAX_SIZE) { 660 rtw89_warn(rtwdev, "FW backtrace invalid size: 0x%x\n", 661 fwbt_size); 662 return -EINVAL; 663 } 664 665 rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace start\n"); 666 rtw89_write32(rtwdev, filter_model_addr, fwbt_addr); 667 668 for (i = indir_access_addr; 669 i < indir_access_addr + fwbt_size; 670 i += RTW89_FW_BACKTRACE_INFO_SIZE, ptr++) { 671 *ptr = (struct __fw_backtrace_info){ 672 .ra = rtw89_read32(rtwdev, i), 673 .sp = rtw89_read32(rtwdev, i + 4), 674 }; 675 rtw89_debug(rtwdev, RTW89_DBG_SER, 676 "next sp: 0x%x, next ra: 0x%x\n", 677 ptr->sp, ptr->ra); 678 } 679 680 rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace end\n"); 681 return 0; 682 } 683 684 static void ser_l2_reset_st_pre_hdl(struct rtw89_ser *ser) 685 { 686 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 687 struct rtw89_ser_cd_buffer *buf; 688 struct __fw_backtrace_entry fwbt_ent; 689 int ret = 0; 690 691 buf = rtw89_ser_cd_prep(rtwdev); 692 if (!buf) { 693 ret = -ENOMEM; 694 goto bottom; 695 } 696 697 rtw89_ser_fw_rsvd_ple_dump(rtwdev, buf->fwple.data); 698 699 fwbt_ent = *(struct __fw_backtrace_entry *)buf->fwple.data; 700 ret = rtw89_ser_fw_backtrace_dump(rtwdev, buf->fwbt.data, &fwbt_ent); 701 if (ret) 702 goto bottom; 703 704 rtw89_ser_cd_send(rtwdev, buf); 705 706 bottom: 707 rtw89_ser_cd_free(rtwdev, buf, !!ret); 708 709 ser_reset_mac_binding(rtwdev); 710 rtw89_core_stop(rtwdev); 711 rtw89_entity_init(rtwdev); 712 rtw89_fw_release_general_pkt_list(rtwdev, false); 713 INIT_LIST_HEAD(&rtwdev->rtwvifs_list); 714 } 715 716 static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt) 717 { 718 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 719 720 switch (evt) { 721 case SER_EV_STATE_IN: 722 wiphy_lock(rtwdev->hw->wiphy); 723 ser_l2_reset_st_pre_hdl(ser); 724 wiphy_unlock(rtwdev->hw->wiphy); 725 726 ieee80211_restart_hw(rtwdev->hw); 727 ser_set_alarm(ser, SER_RECFG_TIMEOUT, SER_EV_L2_RECFG_TIMEOUT); 728 break; 729 730 case SER_EV_L2_RECFG_TIMEOUT: 731 rtw89_info(rtwdev, "Err: ser L2 re-config timeout\n"); 732 fallthrough; 733 case SER_EV_L2_RECFG_DONE: 734 ser_state_goto(ser, SER_IDLE_ST); 735 break; 736 737 case SER_EV_STATE_OUT: 738 ser_del_alarm(ser); 739 break; 740 741 default: 742 break; 743 } 744 } 745 746 static const struct event_ent ser_ev_tbl[] = { 747 {SER_EV_NONE, "SER_EV_NONE"}, 748 {SER_EV_STATE_IN, "SER_EV_STATE_IN"}, 749 {SER_EV_STATE_OUT, "SER_EV_STATE_OUT"}, 750 {SER_EV_L1_RESET_PREPARE, "SER_EV_L1_RESET_PREPARE pre-m0"}, 751 {SER_EV_L1_RESET, "SER_EV_L1_RESET m1"}, 752 {SER_EV_DO_RECOVERY, "SER_EV_DO_RECOVERY m3"}, 753 {SER_EV_MAC_RESET_DONE, "SER_EV_MAC_RESET_DONE m5"}, 754 {SER_EV_L2_RESET, "SER_EV_L2_RESET"}, 755 {SER_EV_L2_RECFG_DONE, "SER_EV_L2_RECFG_DONE"}, 756 {SER_EV_L2_RECFG_TIMEOUT, "SER_EV_L2_RECFG_TIMEOUT"}, 757 {SER_EV_M1_TIMEOUT, "SER_EV_M1_TIMEOUT"}, 758 {SER_EV_M3_TIMEOUT, "SER_EV_M3_TIMEOUT"}, 759 {SER_EV_FW_M5_TIMEOUT, "SER_EV_FW_M5_TIMEOUT"}, 760 {SER_EV_L0_RESET, "SER_EV_L0_RESET"}, 761 {SER_EV_MAXX, "SER_EV_MAX"} 762 }; 763 764 static const struct state_ent ser_st_tbl[] = { 765 {SER_IDLE_ST, "SER_IDLE_ST", ser_idle_st_hdl}, 766 {SER_L1_RESET_PRE_ST, "SER_L1_RESET_PRE_ST", ser_l1_reset_pre_st_hdl}, 767 {SER_RESET_TRX_ST, "SER_RESET_TRX_ST", ser_reset_trx_st_hdl}, 768 {SER_DO_HCI_ST, "SER_DO_HCI_ST", ser_do_hci_st_hdl}, 769 {SER_L2_RESET_ST, "SER_L2_RESET_ST", ser_l2_reset_st_hdl} 770 }; 771 772 int rtw89_ser_init(struct rtw89_dev *rtwdev) 773 { 774 struct rtw89_ser *ser = &rtwdev->ser; 775 776 memset(ser, 0, sizeof(*ser)); 777 INIT_LIST_HEAD(&ser->msg_q); 778 ser->state = SER_IDLE_ST; 779 ser->st_tbl = ser_st_tbl; 780 ser->ev_tbl = ser_ev_tbl; 781 782 bitmap_zero(ser->flags, RTW89_NUM_OF_SER_FLAGS); 783 spin_lock_init(&ser->msg_q_lock); 784 INIT_WORK(&ser->ser_hdl_work, rtw89_ser_hdl_work); 785 INIT_DELAYED_WORK(&ser->ser_alarm_work, rtw89_ser_alarm_work); 786 return 0; 787 } 788 789 int rtw89_ser_deinit(struct rtw89_dev *rtwdev) 790 { 791 struct rtw89_ser *ser = (struct rtw89_ser *)&rtwdev->ser; 792 793 set_bit(RTW89_SER_DRV_STOP_RUN, ser->flags); 794 cancel_delayed_work_sync(&ser->ser_alarm_work); 795 cancel_work_sync(&ser->ser_hdl_work); 796 clear_bit(RTW89_SER_DRV_STOP_RUN, ser->flags); 797 return 0; 798 } 799 800 void rtw89_ser_recfg_done(struct rtw89_dev *rtwdev) 801 { 802 ser_send_msg(&rtwdev->ser, SER_EV_L2_RECFG_DONE); 803 } 804 805 int rtw89_ser_notify(struct rtw89_dev *rtwdev, u32 err) 806 { 807 u8 event = SER_EV_NONE; 808 809 rtw89_info(rtwdev, "SER catches error: 0x%x\n", err); 810 811 switch (err) { 812 case MAC_AX_ERR_L1_PREERR_DMAC: /* pre-M0 */ 813 event = SER_EV_L1_RESET_PREPARE; 814 break; 815 case MAC_AX_ERR_L1_ERR_DMAC: 816 case MAC_AX_ERR_L0_PROMOTE_TO_L1: 817 event = SER_EV_L1_RESET; /* M1 */ 818 break; 819 case MAC_AX_ERR_L1_RESET_DISABLE_DMAC_DONE: 820 event = SER_EV_DO_RECOVERY; /* M3 */ 821 break; 822 case MAC_AX_ERR_L1_RESET_RECOVERY_DONE: 823 event = SER_EV_MAC_RESET_DONE; /* M5 */ 824 break; 825 case MAC_AX_ERR_L0_ERR_CMAC0: 826 case MAC_AX_ERR_L0_ERR_CMAC1: 827 case MAC_AX_ERR_L0_RESET_DONE: 828 event = SER_EV_L0_RESET; 829 break; 830 default: 831 if (err == MAC_AX_ERR_L1_PROMOTE_TO_L2 || 832 (err >= MAC_AX_ERR_L2_ERR_AH_DMA && 833 err <= MAC_AX_GET_ERR_MAX)) 834 event = SER_EV_L2_RESET; 835 break; 836 } 837 838 if (event == SER_EV_NONE) { 839 rtw89_warn(rtwdev, "SER cannot recognize error: 0x%x\n", err); 840 return -EINVAL; 841 } 842 843 ser_send_msg(&rtwdev->ser, event); 844 return 0; 845 } 846 EXPORT_SYMBOL(rtw89_ser_notify); 847