1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2015 Intel Mobile Communications GmbH 4 * Copyright (C) 2016-2017 Intel Deutschland GmbH 5 * Copyright (C) 2019-2021, 2023-2025 Intel Corporation 6 */ 7 #include <linux/kernel.h> 8 #include <linux/bsearch.h> 9 #include <linux/list.h> 10 11 #include "fw/api/tx.h" 12 #include "iwl-trans.h" 13 #include "iwl-drv.h" 14 #include "iwl-fh.h" 15 #include <linux/dmapool.h> 16 #include "fw/api/commands.h" 17 #include "pcie/internal.h" 18 #include "iwl-context-info-gen3.h" 19 20 struct iwl_trans_dev_restart_data { 21 struct list_head list; 22 unsigned int restart_count; 23 time64_t last_error; 24 char name[]; 25 }; 26 27 static LIST_HEAD(restart_data_list); 28 static DEFINE_SPINLOCK(restart_data_lock); 29 30 static struct iwl_trans_dev_restart_data * 31 iwl_trans_get_restart_data(struct device *dev) 32 { 33 struct iwl_trans_dev_restart_data *tmp, *data = NULL; 34 const char *name = dev_name(dev); 35 36 spin_lock(&restart_data_lock); 37 list_for_each_entry(tmp, &restart_data_list, list) { 38 if (strcmp(tmp->name, name)) 39 continue; 40 data = tmp; 41 break; 42 } 43 spin_unlock(&restart_data_lock); 44 45 if (data) 46 return data; 47 48 data = kzalloc(struct_size(data, name, strlen(name) + 1), GFP_ATOMIC); 49 if (!data) 50 return NULL; 51 52 strcpy(data->name, name); 53 spin_lock(&restart_data_lock); 54 list_add_tail(&data->list, &restart_data_list); 55 spin_unlock(&restart_data_lock); 56 57 return data; 58 } 59 60 static void iwl_trans_inc_restart_count(struct device *dev) 61 { 62 struct iwl_trans_dev_restart_data *data; 63 64 data = iwl_trans_get_restart_data(dev); 65 if (data) { 66 data->last_error = ktime_get_boottime_seconds(); 67 data->restart_count++; 68 } 69 } 70 71 void iwl_trans_free_restart_list(void) 72 { 73 struct iwl_trans_dev_restart_data *tmp; 74 75 while ((tmp = list_first_entry_or_null(&restart_data_list, 76 typeof(*tmp), list))) { 77 list_del(&tmp->list); 78 kfree(tmp); 79 } 80 } 81 82 struct iwl_trans_reprobe { 83 struct device *dev; 84 struct delayed_work work; 85 }; 86 87 static void iwl_trans_reprobe_wk(struct work_struct *wk) 88 { 89 struct iwl_trans_reprobe *reprobe; 90 91 reprobe = container_of(wk, typeof(*reprobe), work.work); 92 93 if (device_reprobe(reprobe->dev)) 94 dev_err(reprobe->dev, "reprobe failed!\n"); 95 put_device(reprobe->dev); 96 kfree(reprobe); 97 module_put(THIS_MODULE); 98 } 99 100 static void iwl_trans_schedule_reprobe(struct iwl_trans *trans, 101 unsigned int delay_ms) 102 { 103 struct iwl_trans_reprobe *reprobe; 104 105 /* 106 * get a module reference to avoid doing this while unloading 107 * anyway and to avoid scheduling a work with code that's 108 * being removed. 109 */ 110 if (!try_module_get(THIS_MODULE)) { 111 IWL_ERR(trans, "Module is being unloaded - abort\n"); 112 return; 113 } 114 115 reprobe = kzalloc(sizeof(*reprobe), GFP_KERNEL); 116 if (!reprobe) { 117 module_put(THIS_MODULE); 118 return; 119 } 120 reprobe->dev = get_device(trans->dev); 121 INIT_DELAYED_WORK(&reprobe->work, iwl_trans_reprobe_wk); 122 schedule_delayed_work(&reprobe->work, msecs_to_jiffies(delay_ms)); 123 } 124 125 #define IWL_TRANS_RESET_OK_TIME 7 /* seconds */ 126 127 static enum iwl_reset_mode 128 iwl_trans_determine_restart_mode(struct iwl_trans *trans) 129 { 130 struct iwl_trans_dev_restart_data *data; 131 enum iwl_reset_mode at_least = 0; 132 unsigned int index; 133 static const enum iwl_reset_mode escalation_list_old[] = { 134 IWL_RESET_MODE_SW_RESET, 135 IWL_RESET_MODE_REPROBE, 136 IWL_RESET_MODE_REPROBE, 137 IWL_RESET_MODE_FUNC_RESET, 138 IWL_RESET_MODE_PROD_RESET, 139 }; 140 static const enum iwl_reset_mode escalation_list_sc[] = { 141 IWL_RESET_MODE_SW_RESET, 142 IWL_RESET_MODE_REPROBE, 143 IWL_RESET_MODE_REPROBE, 144 IWL_RESET_MODE_FUNC_RESET, 145 IWL_RESET_MODE_TOP_RESET, 146 IWL_RESET_MODE_PROD_RESET, 147 IWL_RESET_MODE_TOP_RESET, 148 IWL_RESET_MODE_PROD_RESET, 149 IWL_RESET_MODE_TOP_RESET, 150 IWL_RESET_MODE_PROD_RESET, 151 }; 152 const enum iwl_reset_mode *escalation_list; 153 size_t escalation_list_size; 154 155 /* used by TOP fatal error/TOP reset */ 156 if (trans->restart.mode.type == IWL_ERR_TYPE_TOP_RESET_FAILED) 157 return IWL_RESET_MODE_PROD_RESET; 158 159 if (trans->request_top_reset) { 160 trans->request_top_reset = 0; 161 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_SC) 162 return IWL_RESET_MODE_TOP_RESET; 163 return IWL_RESET_MODE_PROD_RESET; 164 } 165 166 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_SC) { 167 escalation_list = escalation_list_sc; 168 escalation_list_size = ARRAY_SIZE(escalation_list_sc); 169 } else { 170 escalation_list = escalation_list_old; 171 escalation_list_size = ARRAY_SIZE(escalation_list_old); 172 } 173 174 if (trans->restart.during_reset) 175 at_least = IWL_RESET_MODE_REPROBE; 176 177 data = iwl_trans_get_restart_data(trans->dev); 178 if (!data) 179 return at_least; 180 181 if (ktime_get_boottime_seconds() - data->last_error >= 182 IWL_TRANS_RESET_OK_TIME) 183 data->restart_count = 0; 184 185 index = data->restart_count; 186 if (index >= escalation_list_size) 187 index = escalation_list_size - 1; 188 189 return max(at_least, escalation_list[index]); 190 } 191 192 #define IWL_TRANS_TOP_FOLLOWER_WAIT 180 /* ms */ 193 194 #define IWL_TRANS_RESET_DELAY (HZ * 60) 195 196 static void iwl_trans_restart_wk(struct work_struct *wk) 197 { 198 struct iwl_trans *trans = container_of(wk, typeof(*trans), restart.wk); 199 enum iwl_reset_mode mode; 200 201 if (trans->restart.mode.type == IWL_ERR_TYPE_TOP_RESET_BY_BT) { 202 iwl_trans_schedule_reprobe(trans, IWL_TRANS_TOP_FOLLOWER_WAIT); 203 return; 204 } 205 206 if (!trans->op_mode) 207 return; 208 209 /* might have been scheduled before marked as dead, re-check */ 210 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 211 return; 212 213 iwl_op_mode_dump_error(trans->op_mode, &trans->restart.mode); 214 215 /* 216 * If the opmode stopped the device while we were trying to dump and 217 * reset, then we'll have done the dump already (synchronized by the 218 * opmode lock that it will acquire in iwl_op_mode_dump_error()) and 219 * managed that via trans->restart.mode. 220 * Additionally, make sure that in such a case we won't attempt to do 221 * any resets now, since it's no longer requested. 222 */ 223 if (!test_and_clear_bit(STATUS_RESET_PENDING, &trans->status)) 224 return; 225 226 if (!iwlwifi_mod_params.fw_restart) 227 return; 228 229 mode = iwl_trans_determine_restart_mode(trans); 230 231 iwl_trans_inc_restart_count(trans->dev); 232 233 switch (mode) { 234 case IWL_RESET_MODE_TOP_RESET: 235 trans->do_top_reset = 1; 236 IWL_ERR(trans, "Device error - TOP reset\n"); 237 fallthrough; 238 case IWL_RESET_MODE_SW_RESET: 239 if (mode == IWL_RESET_MODE_SW_RESET) 240 IWL_ERR(trans, "Device error - SW reset\n"); 241 iwl_trans_opmode_sw_reset(trans, trans->restart.mode.type); 242 break; 243 case IWL_RESET_MODE_REPROBE: 244 IWL_ERR(trans, "Device error - reprobe!\n"); 245 246 iwl_trans_schedule_reprobe(trans, 0); 247 break; 248 default: 249 iwl_trans_pcie_reset(trans, mode); 250 break; 251 } 252 } 253 254 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, 255 struct device *dev, 256 const struct iwl_cfg_trans_params *cfg_trans) 257 { 258 struct iwl_trans *trans; 259 #ifdef CONFIG_LOCKDEP 260 static struct lock_class_key __sync_cmd_key; 261 #endif 262 263 trans = devm_kzalloc(dev, sizeof(*trans) + priv_size, GFP_KERNEL); 264 if (!trans) 265 return NULL; 266 267 trans->trans_cfg = cfg_trans; 268 269 #ifdef CONFIG_LOCKDEP 270 lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map", 271 &__sync_cmd_key, 0); 272 #endif 273 274 trans->dev = dev; 275 trans->num_rx_queues = 1; 276 277 INIT_WORK(&trans->restart.wk, iwl_trans_restart_wk); 278 279 return trans; 280 } 281 282 int iwl_trans_init(struct iwl_trans *trans) 283 { 284 int txcmd_size, txcmd_align; 285 286 if (!trans->trans_cfg->gen2) { 287 txcmd_size = sizeof(struct iwl_tx_cmd); 288 txcmd_align = sizeof(void *); 289 } else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) { 290 txcmd_size = sizeof(struct iwl_tx_cmd_gen2); 291 txcmd_align = 64; 292 } else { 293 txcmd_size = sizeof(struct iwl_tx_cmd_gen3); 294 txcmd_align = 128; 295 } 296 297 txcmd_size += sizeof(struct iwl_cmd_header); 298 txcmd_size += 36; /* biggest possible 802.11 header */ 299 300 /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */ 301 if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align)) 302 return -EINVAL; 303 304 snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name), 305 "iwl_cmd_pool:%s", dev_name(trans->dev)); 306 trans->dev_cmd_pool = 307 kmem_cache_create(trans->dev_cmd_pool_name, 308 txcmd_size, txcmd_align, 309 SLAB_HWCACHE_ALIGN, NULL); 310 if (!trans->dev_cmd_pool) 311 return -ENOMEM; 312 313 /* Initialize the wait queue for commands */ 314 init_waitqueue_head(&trans->wait_command_queue); 315 316 return 0; 317 } 318 319 void iwl_trans_free(struct iwl_trans *trans) 320 { 321 cancel_work_sync(&trans->restart.wk); 322 kmem_cache_destroy(trans->dev_cmd_pool); 323 } 324 325 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 326 { 327 int ret; 328 329 if (unlikely(!(cmd->flags & CMD_SEND_IN_RFKILL) && 330 test_bit(STATUS_RFKILL_OPMODE, &trans->status))) 331 return -ERFKILL; 332 333 /* 334 * We can't test IWL_MVM_STATUS_IN_D3 in mvm->status because this 335 * bit is set early in the D3 flow, before we send all the commands 336 * that configure the firmware for D3 operation (power, patterns, ...) 337 * and we don't want to flag all those with CMD_SEND_IN_D3. 338 * So use the system_pm_mode instead. The only command sent after 339 * we set system_pm_mode is D3_CONFIG_CMD, which we now flag with 340 * CMD_SEND_IN_D3. 341 */ 342 if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 && 343 !(cmd->flags & CMD_SEND_IN_D3))) 344 return -EHOSTDOWN; 345 346 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) 347 return -EIO; 348 349 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 350 "bad state = %d\n", trans->state)) 351 return -EIO; 352 353 if (!(cmd->flags & CMD_ASYNC)) 354 lock_map_acquire_read(&trans->sync_cmd_lockdep_map); 355 356 if (trans->wide_cmd_header && !iwl_cmd_groupid(cmd->id)) { 357 if (cmd->id != REPLY_ERROR) 358 cmd->id = DEF_ID(cmd->id); 359 } 360 361 ret = iwl_trans_pcie_send_hcmd(trans, cmd); 362 363 if (!(cmd->flags & CMD_ASYNC)) 364 lock_map_release(&trans->sync_cmd_lockdep_map); 365 366 if (WARN_ON((cmd->flags & CMD_WANT_SKB) && !ret && !cmd->resp_pkt)) 367 return -EIO; 368 369 return ret; 370 } 371 IWL_EXPORT_SYMBOL(iwl_trans_send_cmd); 372 373 /* Comparator for struct iwl_hcmd_names. 374 * Used in the binary search over a list of host commands. 375 * 376 * @key: command_id that we're looking for. 377 * @elt: struct iwl_hcmd_names candidate for match. 378 * 379 * @return 0 iff equal. 380 */ 381 static int iwl_hcmd_names_cmp(const void *key, const void *elt) 382 { 383 const struct iwl_hcmd_names *name = elt; 384 const u8 *cmd1 = key; 385 u8 cmd2 = name->cmd_id; 386 387 return (*cmd1 - cmd2); 388 } 389 390 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id) 391 { 392 u8 grp, cmd; 393 struct iwl_hcmd_names *ret; 394 const struct iwl_hcmd_arr *arr; 395 size_t size = sizeof(struct iwl_hcmd_names); 396 397 grp = iwl_cmd_groupid(id); 398 cmd = iwl_cmd_opcode(id); 399 400 if (!trans->command_groups || grp >= trans->command_groups_size || 401 !trans->command_groups[grp].arr) 402 return "UNKNOWN"; 403 404 arr = &trans->command_groups[grp]; 405 ret = bsearch(&cmd, arr->arr, arr->size, size, iwl_hcmd_names_cmp); 406 if (!ret) 407 return "UNKNOWN"; 408 return ret->cmd_name; 409 } 410 IWL_EXPORT_SYMBOL(iwl_get_cmd_string); 411 412 int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans) 413 { 414 int i, j; 415 const struct iwl_hcmd_arr *arr; 416 417 for (i = 0; i < trans->command_groups_size; i++) { 418 arr = &trans->command_groups[i]; 419 if (!arr->arr) 420 continue; 421 for (j = 0; j < arr->size - 1; j++) 422 if (arr->arr[j].cmd_id > arr->arr[j + 1].cmd_id) 423 return -1; 424 } 425 return 0; 426 } 427 IWL_EXPORT_SYMBOL(iwl_cmd_groups_verify_sorted); 428 429 void iwl_trans_configure(struct iwl_trans *trans, 430 const struct iwl_trans_config *trans_cfg) 431 { 432 trans->op_mode = trans_cfg->op_mode; 433 434 iwl_trans_pcie_configure(trans, trans_cfg); 435 WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg)); 436 } 437 IWL_EXPORT_SYMBOL(iwl_trans_configure); 438 439 int iwl_trans_start_hw(struct iwl_trans *trans) 440 { 441 might_sleep(); 442 443 clear_bit(STATUS_TRANS_RESET_IN_PROGRESS, &trans->status); 444 445 return iwl_trans_pcie_start_hw(trans); 446 } 447 IWL_EXPORT_SYMBOL(iwl_trans_start_hw); 448 449 void iwl_trans_op_mode_leave(struct iwl_trans *trans) 450 { 451 might_sleep(); 452 453 iwl_trans_pcie_op_mode_leave(trans); 454 455 cancel_work_sync(&trans->restart.wk); 456 457 trans->op_mode = NULL; 458 459 trans->state = IWL_TRANS_NO_FW; 460 } 461 IWL_EXPORT_SYMBOL(iwl_trans_op_mode_leave); 462 463 void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val) 464 { 465 iwl_trans_pcie_write8(trans, ofs, val); 466 } 467 IWL_EXPORT_SYMBOL(iwl_trans_write8); 468 469 void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val) 470 { 471 iwl_trans_pcie_write32(trans, ofs, val); 472 } 473 IWL_EXPORT_SYMBOL(iwl_trans_write32); 474 475 u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs) 476 { 477 return iwl_trans_pcie_read32(trans, ofs); 478 } 479 IWL_EXPORT_SYMBOL(iwl_trans_read32); 480 481 u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs) 482 { 483 return iwl_trans_pcie_read_prph(trans, ofs); 484 } 485 IWL_EXPORT_SYMBOL(iwl_trans_read_prph); 486 487 void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) 488 { 489 return iwl_trans_pcie_write_prph(trans, ofs, val); 490 } 491 IWL_EXPORT_SYMBOL(iwl_trans_write_prph); 492 493 int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr, 494 void *buf, int dwords) 495 { 496 return iwl_trans_pcie_read_mem(trans, addr, buf, dwords); 497 } 498 IWL_EXPORT_SYMBOL(iwl_trans_read_mem); 499 500 int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr, 501 const void *buf, int dwords) 502 { 503 return iwl_trans_pcie_write_mem(trans, addr, buf, dwords); 504 } 505 IWL_EXPORT_SYMBOL(iwl_trans_write_mem); 506 507 void iwl_trans_set_pmi(struct iwl_trans *trans, bool state) 508 { 509 if (state) 510 set_bit(STATUS_TPOWER_PMI, &trans->status); 511 else 512 clear_bit(STATUS_TPOWER_PMI, &trans->status); 513 } 514 IWL_EXPORT_SYMBOL(iwl_trans_set_pmi); 515 516 int iwl_trans_sw_reset(struct iwl_trans *trans, bool retake_ownership) 517 { 518 return iwl_trans_pcie_sw_reset(trans, retake_ownership); 519 } 520 IWL_EXPORT_SYMBOL(iwl_trans_sw_reset); 521 522 struct iwl_trans_dump_data * 523 iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask, 524 const struct iwl_dump_sanitize_ops *sanitize_ops, 525 void *sanitize_ctx) 526 { 527 return iwl_trans_pcie_dump_data(trans, dump_mask, 528 sanitize_ops, sanitize_ctx); 529 } 530 IWL_EXPORT_SYMBOL(iwl_trans_dump_data); 531 532 int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset) 533 { 534 might_sleep(); 535 536 return iwl_trans_pcie_d3_suspend(trans, test, reset); 537 } 538 IWL_EXPORT_SYMBOL(iwl_trans_d3_suspend); 539 540 int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status, 541 bool test, bool reset) 542 { 543 might_sleep(); 544 545 return iwl_trans_pcie_d3_resume(trans, status, test, reset); 546 } 547 IWL_EXPORT_SYMBOL(iwl_trans_d3_resume); 548 549 void iwl_trans_interrupts(struct iwl_trans *trans, bool enable) 550 { 551 iwl_trans_pci_interrupts(trans, enable); 552 } 553 IWL_EXPORT_SYMBOL(iwl_trans_interrupts); 554 555 void iwl_trans_sync_nmi(struct iwl_trans *trans) 556 { 557 iwl_trans_pcie_sync_nmi(trans); 558 } 559 IWL_EXPORT_SYMBOL(iwl_trans_sync_nmi); 560 561 int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr, 562 u64 src_addr, u32 byte_cnt) 563 { 564 return iwl_trans_pcie_copy_imr(trans, dst_addr, src_addr, byte_cnt); 565 } 566 IWL_EXPORT_SYMBOL(iwl_trans_write_imr_mem); 567 568 void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, 569 u32 mask, u32 value) 570 { 571 iwl_trans_pcie_set_bits_mask(trans, reg, mask, value); 572 } 573 IWL_EXPORT_SYMBOL(iwl_trans_set_bits_mask); 574 575 int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs, 576 u32 *val) 577 { 578 return iwl_trans_pcie_read_config32(trans, ofs, val); 579 } 580 IWL_EXPORT_SYMBOL(iwl_trans_read_config32); 581 582 bool _iwl_trans_grab_nic_access(struct iwl_trans *trans) 583 { 584 return iwl_trans_pcie_grab_nic_access(trans); 585 } 586 IWL_EXPORT_SYMBOL(_iwl_trans_grab_nic_access); 587 588 void __releases(nic_access) 589 iwl_trans_release_nic_access(struct iwl_trans *trans) 590 { 591 iwl_trans_pcie_release_nic_access(trans); 592 __release(nic_access); 593 } 594 IWL_EXPORT_SYMBOL(iwl_trans_release_nic_access); 595 596 void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr) 597 { 598 might_sleep(); 599 600 trans->state = IWL_TRANS_FW_ALIVE; 601 602 if (trans->trans_cfg->gen2) 603 iwl_trans_pcie_gen2_fw_alive(trans); 604 else 605 iwl_trans_pcie_fw_alive(trans, scd_addr); 606 } 607 IWL_EXPORT_SYMBOL(iwl_trans_fw_alive); 608 609 int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw, 610 bool run_in_rfkill) 611 { 612 int ret; 613 614 might_sleep(); 615 616 WARN_ON_ONCE(!trans->rx_mpdu_cmd); 617 618 clear_bit(STATUS_FW_ERROR, &trans->status); 619 620 if (trans->trans_cfg->gen2) 621 ret = iwl_trans_pcie_gen2_start_fw(trans, fw, run_in_rfkill); 622 else 623 ret = iwl_trans_pcie_start_fw(trans, fw, run_in_rfkill); 624 625 if (ret == 0) 626 trans->state = IWL_TRANS_FW_STARTED; 627 628 return ret; 629 } 630 IWL_EXPORT_SYMBOL(iwl_trans_start_fw); 631 632 void iwl_trans_stop_device(struct iwl_trans *trans) 633 { 634 might_sleep(); 635 636 /* 637 * See also the comment in iwl_trans_restart_wk(). 638 * 639 * When the opmode stops the device while a reset is pending, the 640 * worker (iwl_trans_restart_wk) might not have run yet or, more 641 * likely, will be blocked on the opmode lock. Due to the locking, 642 * we can't just flush the worker. 643 * 644 * If this is the case, then the test_and_clear_bit() ensures that 645 * the worker won't attempt to do anything after the stop. 646 * 647 * The trans->restart.mode is a handshake with the opmode, we set 648 * the context there to ABORT so that when the worker can finally 649 * acquire the lock in the opmode, the code there won't attempt to 650 * do any dumps. Since we'd really like to have the dump though, 651 * also do it inline here (with the opmode locks already held), 652 * but use a separate mode struct to avoid races. 653 */ 654 if (test_and_clear_bit(STATUS_RESET_PENDING, &trans->status)) { 655 struct iwl_fw_error_dump_mode mode; 656 657 mode = trans->restart.mode; 658 mode.context = IWL_ERR_CONTEXT_FROM_OPMODE; 659 trans->restart.mode.context = IWL_ERR_CONTEXT_ABORT; 660 661 iwl_op_mode_dump_error(trans->op_mode, &mode); 662 } 663 664 if (trans->trans_cfg->gen2) 665 iwl_trans_pcie_gen2_stop_device(trans); 666 else 667 iwl_trans_pcie_stop_device(trans); 668 669 trans->state = IWL_TRANS_NO_FW; 670 } 671 IWL_EXPORT_SYMBOL(iwl_trans_stop_device); 672 673 int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, 674 struct iwl_device_tx_cmd *dev_cmd, int queue) 675 { 676 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) 677 return -EIO; 678 679 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 680 "bad state = %d\n", trans->state)) 681 return -EIO; 682 683 if (trans->trans_cfg->gen2) 684 return iwl_txq_gen2_tx(trans, skb, dev_cmd, queue); 685 686 return iwl_trans_pcie_tx(trans, skb, dev_cmd, queue); 687 } 688 IWL_EXPORT_SYMBOL(iwl_trans_tx); 689 690 void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn, 691 struct sk_buff_head *skbs, bool is_flush) 692 { 693 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) 694 return; 695 696 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 697 "bad state = %d\n", trans->state)) 698 return; 699 700 iwl_pcie_reclaim(trans, queue, ssn, skbs, is_flush); 701 } 702 IWL_EXPORT_SYMBOL(iwl_trans_reclaim); 703 704 void iwl_trans_txq_disable(struct iwl_trans *trans, int queue, 705 bool configure_scd) 706 { 707 iwl_trans_pcie_txq_disable(trans, queue, configure_scd); 708 } 709 IWL_EXPORT_SYMBOL(iwl_trans_txq_disable); 710 711 bool iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, 712 const struct iwl_trans_txq_scd_cfg *cfg, 713 unsigned int queue_wdg_timeout) 714 { 715 might_sleep(); 716 717 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 718 "bad state = %d\n", trans->state)) 719 return false; 720 721 return iwl_trans_pcie_txq_enable(trans, queue, ssn, 722 cfg, queue_wdg_timeout); 723 } 724 IWL_EXPORT_SYMBOL(iwl_trans_txq_enable_cfg); 725 726 int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue) 727 { 728 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) 729 return -EIO; 730 731 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 732 "bad state = %d\n", trans->state)) 733 return -EIO; 734 735 return iwl_trans_pcie_wait_txq_empty(trans, queue); 736 } 737 IWL_EXPORT_SYMBOL(iwl_trans_wait_txq_empty); 738 739 int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, u32 txqs) 740 { 741 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 742 "bad state = %d\n", trans->state)) 743 return -EIO; 744 745 return iwl_trans_pcie_wait_txqs_empty(trans, txqs); 746 } 747 IWL_EXPORT_SYMBOL(iwl_trans_wait_tx_queues_empty); 748 749 void iwl_trans_freeze_txq_timer(struct iwl_trans *trans, 750 unsigned long txqs, bool freeze) 751 { 752 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 753 "bad state = %d\n", trans->state)) 754 return; 755 756 iwl_pcie_freeze_txq_timer(trans, txqs, freeze); 757 } 758 IWL_EXPORT_SYMBOL(iwl_trans_freeze_txq_timer); 759 760 void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans, 761 int txq_id, bool shared_mode) 762 { 763 iwl_trans_pcie_txq_set_shared_mode(trans, txq_id, shared_mode); 764 } 765 IWL_EXPORT_SYMBOL(iwl_trans_txq_set_shared_mode); 766 767 #ifdef CONFIG_IWLWIFI_DEBUGFS 768 void iwl_trans_debugfs_cleanup(struct iwl_trans *trans) 769 { 770 iwl_trans_pcie_debugfs_cleanup(trans); 771 } 772 IWL_EXPORT_SYMBOL(iwl_trans_debugfs_cleanup); 773 #endif 774 775 void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, int ptr) 776 { 777 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 778 "bad state = %d\n", trans->state)) 779 return; 780 781 iwl_pcie_set_q_ptrs(trans, queue, ptr); 782 } 783 IWL_EXPORT_SYMBOL(iwl_trans_set_q_ptrs); 784 785 int iwl_trans_txq_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask, 786 u8 tid, int size, unsigned int wdg_timeout) 787 { 788 might_sleep(); 789 790 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 791 "bad state = %d\n", trans->state)) 792 return -EIO; 793 794 return iwl_txq_dyn_alloc(trans, flags, sta_mask, tid, 795 size, wdg_timeout); 796 } 797 IWL_EXPORT_SYMBOL(iwl_trans_txq_alloc); 798 799 void iwl_trans_txq_free(struct iwl_trans *trans, int queue) 800 { 801 iwl_txq_dyn_free(trans, queue); 802 } 803 IWL_EXPORT_SYMBOL(iwl_trans_txq_free); 804 805 int iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue, 806 struct iwl_trans_rxq_dma_data *data) 807 { 808 return iwl_trans_pcie_rxq_dma_data(trans, queue, data); 809 } 810 IWL_EXPORT_SYMBOL(iwl_trans_get_rxq_dma_data); 811 812 int iwl_trans_load_pnvm(struct iwl_trans *trans, 813 const struct iwl_pnvm_image *pnvm_data, 814 const struct iwl_ucode_capabilities *capa) 815 { 816 return iwl_trans_pcie_ctx_info_gen3_load_pnvm(trans, pnvm_data, capa); 817 } 818 IWL_EXPORT_SYMBOL(iwl_trans_load_pnvm); 819 820 void iwl_trans_set_pnvm(struct iwl_trans *trans, 821 const struct iwl_ucode_capabilities *capa) 822 { 823 iwl_trans_pcie_ctx_info_gen3_set_pnvm(trans, capa); 824 } 825 IWL_EXPORT_SYMBOL(iwl_trans_set_pnvm); 826 827 int iwl_trans_load_reduce_power(struct iwl_trans *trans, 828 const struct iwl_pnvm_image *payloads, 829 const struct iwl_ucode_capabilities *capa) 830 { 831 return iwl_trans_pcie_ctx_info_gen3_load_reduce_power(trans, payloads, 832 capa); 833 } 834 IWL_EXPORT_SYMBOL(iwl_trans_load_reduce_power); 835 836 void iwl_trans_set_reduce_power(struct iwl_trans *trans, 837 const struct iwl_ucode_capabilities *capa) 838 { 839 iwl_trans_pcie_ctx_info_gen3_set_reduce_power(trans, capa); 840 } 841 IWL_EXPORT_SYMBOL(iwl_trans_set_reduce_power); 842