1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2015 Intel Mobile Communications GmbH 4 * Copyright (C) 2016-2017 Intel Deutschland GmbH 5 * Copyright (C) 2019-2021, 2023-2025 Intel Corporation 6 */ 7 #include <linux/kernel.h> 8 #include <linux/bsearch.h> 9 #include <linux/list.h> 10 11 #include "fw/api/tx.h" 12 #include "iwl-trans.h" 13 #include "iwl-drv.h" 14 #include "iwl-fh.h" 15 #include <linux/dmapool.h> 16 #include "fw/api/commands.h" 17 #include "pcie/internal.h" 18 #include "iwl-context-info-gen3.h" 19 20 struct iwl_trans_dev_restart_data { 21 struct list_head list; 22 unsigned int restart_count; 23 time64_t last_error; 24 char name[]; 25 }; 26 27 static LIST_HEAD(restart_data_list); 28 static DEFINE_SPINLOCK(restart_data_lock); 29 30 static struct iwl_trans_dev_restart_data * 31 iwl_trans_get_restart_data(struct device *dev) 32 { 33 struct iwl_trans_dev_restart_data *tmp, *data = NULL; 34 const char *name = dev_name(dev); 35 36 spin_lock(&restart_data_lock); 37 list_for_each_entry(tmp, &restart_data_list, list) { 38 if (strcmp(tmp->name, name)) 39 continue; 40 data = tmp; 41 break; 42 } 43 spin_unlock(&restart_data_lock); 44 45 if (data) 46 return data; 47 48 data = kzalloc(struct_size(data, name, strlen(name) + 1), GFP_ATOMIC); 49 if (!data) 50 return NULL; 51 52 strcpy(data->name, name); 53 spin_lock(&restart_data_lock); 54 list_add_tail(&data->list, &restart_data_list); 55 spin_unlock(&restart_data_lock); 56 57 return data; 58 } 59 60 static void iwl_trans_inc_restart_count(struct device *dev) 61 { 62 struct iwl_trans_dev_restart_data *data; 63 64 data = iwl_trans_get_restart_data(dev); 65 if (data) { 66 data->last_error = ktime_get_boottime_seconds(); 67 data->restart_count++; 68 } 69 } 70 71 void iwl_trans_free_restart_list(void) 72 { 73 struct iwl_trans_dev_restart_data *tmp; 74 75 while ((tmp = list_first_entry_or_null(&restart_data_list, 76 typeof(*tmp), list))) { 77 list_del(&tmp->list); 78 kfree(tmp); 79 } 80 } 81 82 struct iwl_trans_reprobe { 83 struct device *dev; 84 struct work_struct work; 85 }; 86 87 static void iwl_trans_reprobe_wk(struct work_struct *wk) 88 { 89 struct iwl_trans_reprobe *reprobe; 90 91 reprobe = container_of(wk, typeof(*reprobe), work); 92 93 if (device_reprobe(reprobe->dev)) 94 dev_err(reprobe->dev, "reprobe failed!\n"); 95 put_device(reprobe->dev); 96 kfree(reprobe); 97 module_put(THIS_MODULE); 98 } 99 100 #define IWL_TRANS_RESET_OK_TIME 7 /* seconds */ 101 102 static enum iwl_reset_mode 103 iwl_trans_determine_restart_mode(struct iwl_trans *trans) 104 { 105 struct iwl_trans_dev_restart_data *data; 106 enum iwl_reset_mode at_least = 0; 107 unsigned int index; 108 static const enum iwl_reset_mode escalation_list[] = { 109 IWL_RESET_MODE_SW_RESET, 110 IWL_RESET_MODE_REPROBE, 111 IWL_RESET_MODE_REPROBE, 112 IWL_RESET_MODE_FUNC_RESET, 113 /* FIXME: add TOP reset */ 114 IWL_RESET_MODE_PROD_RESET, 115 /* FIXME: add TOP reset */ 116 IWL_RESET_MODE_PROD_RESET, 117 /* FIXME: add TOP reset */ 118 IWL_RESET_MODE_PROD_RESET, 119 }; 120 121 if (trans->restart.during_reset) 122 at_least = IWL_RESET_MODE_REPROBE; 123 124 data = iwl_trans_get_restart_data(trans->dev); 125 if (!data) 126 return at_least; 127 128 if (ktime_get_boottime_seconds() - data->last_error >= 129 IWL_TRANS_RESET_OK_TIME) 130 data->restart_count = 0; 131 132 index = data->restart_count; 133 if (index >= ARRAY_SIZE(escalation_list)) 134 index = ARRAY_SIZE(escalation_list) - 1; 135 136 return max(at_least, escalation_list[index]); 137 } 138 139 #define IWL_TRANS_RESET_DELAY (HZ * 60) 140 141 static void iwl_trans_restart_wk(struct work_struct *wk) 142 { 143 struct iwl_trans *trans = container_of(wk, typeof(*trans), restart.wk); 144 struct iwl_trans_reprobe *reprobe; 145 enum iwl_reset_mode mode; 146 147 if (!trans->op_mode) 148 return; 149 150 /* might have been scheduled before marked as dead, re-check */ 151 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 152 return; 153 154 iwl_op_mode_dump_error(trans->op_mode, &trans->restart.mode); 155 156 /* 157 * If the opmode stopped the device while we were trying to dump and 158 * reset, then we'll have done the dump already (synchronized by the 159 * opmode lock that it will acquire in iwl_op_mode_dump_error()) and 160 * managed that via trans->restart.mode. 161 * Additionally, make sure that in such a case we won't attempt to do 162 * any resets now, since it's no longer requested. 163 */ 164 if (!test_and_clear_bit(STATUS_RESET_PENDING, &trans->status)) 165 return; 166 167 if (!iwlwifi_mod_params.fw_restart) 168 return; 169 170 mode = iwl_trans_determine_restart_mode(trans); 171 172 iwl_trans_inc_restart_count(trans->dev); 173 174 switch (mode) { 175 case IWL_RESET_MODE_SW_RESET: 176 IWL_ERR(trans, "Device error - SW reset\n"); 177 iwl_trans_opmode_sw_reset(trans, trans->restart.mode.type); 178 break; 179 case IWL_RESET_MODE_REPROBE: 180 IWL_ERR(trans, "Device error - reprobe!\n"); 181 182 /* 183 * get a module reference to avoid doing this while unloading 184 * anyway and to avoid scheduling a work with code that's 185 * being removed. 186 */ 187 if (!try_module_get(THIS_MODULE)) { 188 IWL_ERR(trans, "Module is being unloaded - abort\n"); 189 return; 190 } 191 192 reprobe = kzalloc(sizeof(*reprobe), GFP_KERNEL); 193 if (!reprobe) { 194 module_put(THIS_MODULE); 195 return; 196 } 197 reprobe->dev = get_device(trans->dev); 198 INIT_WORK(&reprobe->work, iwl_trans_reprobe_wk); 199 schedule_work(&reprobe->work); 200 break; 201 default: 202 iwl_trans_pcie_reset(trans, mode); 203 break; 204 } 205 } 206 207 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, 208 struct device *dev, 209 const struct iwl_cfg_trans_params *cfg_trans) 210 { 211 struct iwl_trans *trans; 212 #ifdef CONFIG_LOCKDEP 213 static struct lock_class_key __sync_cmd_key; 214 #endif 215 216 trans = devm_kzalloc(dev, sizeof(*trans) + priv_size, GFP_KERNEL); 217 if (!trans) 218 return NULL; 219 220 trans->trans_cfg = cfg_trans; 221 222 #ifdef CONFIG_LOCKDEP 223 lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map", 224 &__sync_cmd_key, 0); 225 #endif 226 227 trans->dev = dev; 228 trans->num_rx_queues = 1; 229 230 INIT_WORK(&trans->restart.wk, iwl_trans_restart_wk); 231 232 return trans; 233 } 234 235 int iwl_trans_init(struct iwl_trans *trans) 236 { 237 int txcmd_size, txcmd_align; 238 239 if (!trans->trans_cfg->gen2) { 240 txcmd_size = sizeof(struct iwl_tx_cmd); 241 txcmd_align = sizeof(void *); 242 } else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) { 243 txcmd_size = sizeof(struct iwl_tx_cmd_gen2); 244 txcmd_align = 64; 245 } else { 246 txcmd_size = sizeof(struct iwl_tx_cmd_gen3); 247 txcmd_align = 128; 248 } 249 250 txcmd_size += sizeof(struct iwl_cmd_header); 251 txcmd_size += 36; /* biggest possible 802.11 header */ 252 253 /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */ 254 if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align)) 255 return -EINVAL; 256 257 snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name), 258 "iwl_cmd_pool:%s", dev_name(trans->dev)); 259 trans->dev_cmd_pool = 260 kmem_cache_create(trans->dev_cmd_pool_name, 261 txcmd_size, txcmd_align, 262 SLAB_HWCACHE_ALIGN, NULL); 263 if (!trans->dev_cmd_pool) 264 return -ENOMEM; 265 266 /* Initialize the wait queue for commands */ 267 init_waitqueue_head(&trans->wait_command_queue); 268 269 return 0; 270 } 271 272 void iwl_trans_free(struct iwl_trans *trans) 273 { 274 cancel_work_sync(&trans->restart.wk); 275 kmem_cache_destroy(trans->dev_cmd_pool); 276 } 277 278 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 279 { 280 int ret; 281 282 if (unlikely(!(cmd->flags & CMD_SEND_IN_RFKILL) && 283 test_bit(STATUS_RFKILL_OPMODE, &trans->status))) 284 return -ERFKILL; 285 286 /* 287 * We can't test IWL_MVM_STATUS_IN_D3 in mvm->status because this 288 * bit is set early in the D3 flow, before we send all the commands 289 * that configure the firmware for D3 operation (power, patterns, ...) 290 * and we don't want to flag all those with CMD_SEND_IN_D3. 291 * So use the system_pm_mode instead. The only command sent after 292 * we set system_pm_mode is D3_CONFIG_CMD, which we now flag with 293 * CMD_SEND_IN_D3. 294 */ 295 if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 && 296 !(cmd->flags & CMD_SEND_IN_D3))) 297 return -EHOSTDOWN; 298 299 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) 300 return -EIO; 301 302 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 303 "bad state = %d\n", trans->state)) 304 return -EIO; 305 306 if (!(cmd->flags & CMD_ASYNC)) 307 lock_map_acquire_read(&trans->sync_cmd_lockdep_map); 308 309 if (trans->wide_cmd_header && !iwl_cmd_groupid(cmd->id)) { 310 if (cmd->id != REPLY_ERROR) 311 cmd->id = DEF_ID(cmd->id); 312 } 313 314 ret = iwl_trans_pcie_send_hcmd(trans, cmd); 315 316 if (!(cmd->flags & CMD_ASYNC)) 317 lock_map_release(&trans->sync_cmd_lockdep_map); 318 319 if (WARN_ON((cmd->flags & CMD_WANT_SKB) && !ret && !cmd->resp_pkt)) 320 return -EIO; 321 322 return ret; 323 } 324 IWL_EXPORT_SYMBOL(iwl_trans_send_cmd); 325 326 /* Comparator for struct iwl_hcmd_names. 327 * Used in the binary search over a list of host commands. 328 * 329 * @key: command_id that we're looking for. 330 * @elt: struct iwl_hcmd_names candidate for match. 331 * 332 * @return 0 iff equal. 333 */ 334 static int iwl_hcmd_names_cmp(const void *key, const void *elt) 335 { 336 const struct iwl_hcmd_names *name = elt; 337 const u8 *cmd1 = key; 338 u8 cmd2 = name->cmd_id; 339 340 return (*cmd1 - cmd2); 341 } 342 343 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id) 344 { 345 u8 grp, cmd; 346 struct iwl_hcmd_names *ret; 347 const struct iwl_hcmd_arr *arr; 348 size_t size = sizeof(struct iwl_hcmd_names); 349 350 grp = iwl_cmd_groupid(id); 351 cmd = iwl_cmd_opcode(id); 352 353 if (!trans->command_groups || grp >= trans->command_groups_size || 354 !trans->command_groups[grp].arr) 355 return "UNKNOWN"; 356 357 arr = &trans->command_groups[grp]; 358 ret = bsearch(&cmd, arr->arr, arr->size, size, iwl_hcmd_names_cmp); 359 if (!ret) 360 return "UNKNOWN"; 361 return ret->cmd_name; 362 } 363 IWL_EXPORT_SYMBOL(iwl_get_cmd_string); 364 365 int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans) 366 { 367 int i, j; 368 const struct iwl_hcmd_arr *arr; 369 370 for (i = 0; i < trans->command_groups_size; i++) { 371 arr = &trans->command_groups[i]; 372 if (!arr->arr) 373 continue; 374 for (j = 0; j < arr->size - 1; j++) 375 if (arr->arr[j].cmd_id > arr->arr[j + 1].cmd_id) 376 return -1; 377 } 378 return 0; 379 } 380 IWL_EXPORT_SYMBOL(iwl_cmd_groups_verify_sorted); 381 382 void iwl_trans_configure(struct iwl_trans *trans, 383 const struct iwl_trans_config *trans_cfg) 384 { 385 trans->op_mode = trans_cfg->op_mode; 386 387 iwl_trans_pcie_configure(trans, trans_cfg); 388 WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg)); 389 } 390 IWL_EXPORT_SYMBOL(iwl_trans_configure); 391 392 int iwl_trans_start_hw(struct iwl_trans *trans) 393 { 394 might_sleep(); 395 396 return iwl_trans_pcie_start_hw(trans); 397 } 398 IWL_EXPORT_SYMBOL(iwl_trans_start_hw); 399 400 void iwl_trans_op_mode_leave(struct iwl_trans *trans) 401 { 402 might_sleep(); 403 404 iwl_trans_pcie_op_mode_leave(trans); 405 406 cancel_work_sync(&trans->restart.wk); 407 408 trans->op_mode = NULL; 409 410 trans->state = IWL_TRANS_NO_FW; 411 } 412 IWL_EXPORT_SYMBOL(iwl_trans_op_mode_leave); 413 414 void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val) 415 { 416 iwl_trans_pcie_write8(trans, ofs, val); 417 } 418 IWL_EXPORT_SYMBOL(iwl_trans_write8); 419 420 void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val) 421 { 422 iwl_trans_pcie_write32(trans, ofs, val); 423 } 424 IWL_EXPORT_SYMBOL(iwl_trans_write32); 425 426 u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs) 427 { 428 return iwl_trans_pcie_read32(trans, ofs); 429 } 430 IWL_EXPORT_SYMBOL(iwl_trans_read32); 431 432 u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs) 433 { 434 return iwl_trans_pcie_read_prph(trans, ofs); 435 } 436 IWL_EXPORT_SYMBOL(iwl_trans_read_prph); 437 438 void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) 439 { 440 return iwl_trans_pcie_write_prph(trans, ofs, val); 441 } 442 IWL_EXPORT_SYMBOL(iwl_trans_write_prph); 443 444 int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr, 445 void *buf, int dwords) 446 { 447 return iwl_trans_pcie_read_mem(trans, addr, buf, dwords); 448 } 449 IWL_EXPORT_SYMBOL(iwl_trans_read_mem); 450 451 int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr, 452 const void *buf, int dwords) 453 { 454 return iwl_trans_pcie_write_mem(trans, addr, buf, dwords); 455 } 456 IWL_EXPORT_SYMBOL(iwl_trans_write_mem); 457 458 void iwl_trans_set_pmi(struct iwl_trans *trans, bool state) 459 { 460 if (state) 461 set_bit(STATUS_TPOWER_PMI, &trans->status); 462 else 463 clear_bit(STATUS_TPOWER_PMI, &trans->status); 464 } 465 IWL_EXPORT_SYMBOL(iwl_trans_set_pmi); 466 467 int iwl_trans_sw_reset(struct iwl_trans *trans, bool retake_ownership) 468 { 469 return iwl_trans_pcie_sw_reset(trans, retake_ownership); 470 } 471 IWL_EXPORT_SYMBOL(iwl_trans_sw_reset); 472 473 struct iwl_trans_dump_data * 474 iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask, 475 const struct iwl_dump_sanitize_ops *sanitize_ops, 476 void *sanitize_ctx) 477 { 478 return iwl_trans_pcie_dump_data(trans, dump_mask, 479 sanitize_ops, sanitize_ctx); 480 } 481 IWL_EXPORT_SYMBOL(iwl_trans_dump_data); 482 483 int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset) 484 { 485 might_sleep(); 486 487 return iwl_trans_pcie_d3_suspend(trans, test, reset); 488 } 489 IWL_EXPORT_SYMBOL(iwl_trans_d3_suspend); 490 491 int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status, 492 bool test, bool reset) 493 { 494 might_sleep(); 495 496 return iwl_trans_pcie_d3_resume(trans, status, test, reset); 497 } 498 IWL_EXPORT_SYMBOL(iwl_trans_d3_resume); 499 500 void iwl_trans_interrupts(struct iwl_trans *trans, bool enable) 501 { 502 iwl_trans_pci_interrupts(trans, enable); 503 } 504 IWL_EXPORT_SYMBOL(iwl_trans_interrupts); 505 506 void iwl_trans_sync_nmi(struct iwl_trans *trans) 507 { 508 iwl_trans_pcie_sync_nmi(trans); 509 } 510 IWL_EXPORT_SYMBOL(iwl_trans_sync_nmi); 511 512 int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr, 513 u64 src_addr, u32 byte_cnt) 514 { 515 return iwl_trans_pcie_copy_imr(trans, dst_addr, src_addr, byte_cnt); 516 } 517 IWL_EXPORT_SYMBOL(iwl_trans_write_imr_mem); 518 519 void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, 520 u32 mask, u32 value) 521 { 522 iwl_trans_pcie_set_bits_mask(trans, reg, mask, value); 523 } 524 IWL_EXPORT_SYMBOL(iwl_trans_set_bits_mask); 525 526 int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs, 527 u32 *val) 528 { 529 return iwl_trans_pcie_read_config32(trans, ofs, val); 530 } 531 IWL_EXPORT_SYMBOL(iwl_trans_read_config32); 532 533 bool _iwl_trans_grab_nic_access(struct iwl_trans *trans) 534 { 535 return iwl_trans_pcie_grab_nic_access(trans); 536 } 537 IWL_EXPORT_SYMBOL(_iwl_trans_grab_nic_access); 538 539 void __releases(nic_access) 540 iwl_trans_release_nic_access(struct iwl_trans *trans) 541 { 542 iwl_trans_pcie_release_nic_access(trans); 543 __release(nic_access); 544 } 545 IWL_EXPORT_SYMBOL(iwl_trans_release_nic_access); 546 547 void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr) 548 { 549 might_sleep(); 550 551 trans->state = IWL_TRANS_FW_ALIVE; 552 553 if (trans->trans_cfg->gen2) 554 iwl_trans_pcie_gen2_fw_alive(trans); 555 else 556 iwl_trans_pcie_fw_alive(trans, scd_addr); 557 } 558 IWL_EXPORT_SYMBOL(iwl_trans_fw_alive); 559 560 int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw, 561 bool run_in_rfkill) 562 { 563 int ret; 564 565 might_sleep(); 566 567 WARN_ON_ONCE(!trans->rx_mpdu_cmd); 568 569 clear_bit(STATUS_FW_ERROR, &trans->status); 570 571 if (trans->trans_cfg->gen2) 572 ret = iwl_trans_pcie_gen2_start_fw(trans, fw, run_in_rfkill); 573 else 574 ret = iwl_trans_pcie_start_fw(trans, fw, run_in_rfkill); 575 576 if (ret == 0) 577 trans->state = IWL_TRANS_FW_STARTED; 578 579 return ret; 580 } 581 IWL_EXPORT_SYMBOL(iwl_trans_start_fw); 582 583 void iwl_trans_stop_device(struct iwl_trans *trans) 584 { 585 might_sleep(); 586 587 /* 588 * See also the comment in iwl_trans_restart_wk(). 589 * 590 * When the opmode stops the device while a reset is pending, the 591 * worker (iwl_trans_restart_wk) might not have run yet or, more 592 * likely, will be blocked on the opmode lock. Due to the locking, 593 * we can't just flush the worker. 594 * 595 * If this is the case, then the test_and_clear_bit() ensures that 596 * the worker won't attempt to do anything after the stop. 597 * 598 * The trans->restart.mode is a handshake with the opmode, we set 599 * the context there to ABORT so that when the worker can finally 600 * acquire the lock in the opmode, the code there won't attempt to 601 * do any dumps. Since we'd really like to have the dump though, 602 * also do it inline here (with the opmode locks already held), 603 * but use a separate mode struct to avoid races. 604 */ 605 if (test_and_clear_bit(STATUS_RESET_PENDING, &trans->status)) { 606 struct iwl_fw_error_dump_mode mode; 607 608 mode = trans->restart.mode; 609 mode.context = IWL_ERR_CONTEXT_FROM_OPMODE; 610 trans->restart.mode.context = IWL_ERR_CONTEXT_ABORT; 611 612 iwl_op_mode_dump_error(trans->op_mode, &mode); 613 } 614 615 if (trans->trans_cfg->gen2) 616 iwl_trans_pcie_gen2_stop_device(trans); 617 else 618 iwl_trans_pcie_stop_device(trans); 619 620 trans->state = IWL_TRANS_NO_FW; 621 } 622 IWL_EXPORT_SYMBOL(iwl_trans_stop_device); 623 624 int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, 625 struct iwl_device_tx_cmd *dev_cmd, int queue) 626 { 627 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) 628 return -EIO; 629 630 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 631 "bad state = %d\n", trans->state)) 632 return -EIO; 633 634 if (trans->trans_cfg->gen2) 635 return iwl_txq_gen2_tx(trans, skb, dev_cmd, queue); 636 637 return iwl_trans_pcie_tx(trans, skb, dev_cmd, queue); 638 } 639 IWL_EXPORT_SYMBOL(iwl_trans_tx); 640 641 void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn, 642 struct sk_buff_head *skbs, bool is_flush) 643 { 644 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) 645 return; 646 647 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 648 "bad state = %d\n", trans->state)) 649 return; 650 651 iwl_pcie_reclaim(trans, queue, ssn, skbs, is_flush); 652 } 653 IWL_EXPORT_SYMBOL(iwl_trans_reclaim); 654 655 void iwl_trans_txq_disable(struct iwl_trans *trans, int queue, 656 bool configure_scd) 657 { 658 iwl_trans_pcie_txq_disable(trans, queue, configure_scd); 659 } 660 IWL_EXPORT_SYMBOL(iwl_trans_txq_disable); 661 662 bool iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, 663 const struct iwl_trans_txq_scd_cfg *cfg, 664 unsigned int queue_wdg_timeout) 665 { 666 might_sleep(); 667 668 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 669 "bad state = %d\n", trans->state)) 670 return false; 671 672 return iwl_trans_pcie_txq_enable(trans, queue, ssn, 673 cfg, queue_wdg_timeout); 674 } 675 IWL_EXPORT_SYMBOL(iwl_trans_txq_enable_cfg); 676 677 int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue) 678 { 679 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) 680 return -EIO; 681 682 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 683 "bad state = %d\n", trans->state)) 684 return -EIO; 685 686 return iwl_trans_pcie_wait_txq_empty(trans, queue); 687 } 688 IWL_EXPORT_SYMBOL(iwl_trans_wait_txq_empty); 689 690 int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, u32 txqs) 691 { 692 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 693 "bad state = %d\n", trans->state)) 694 return -EIO; 695 696 return iwl_trans_pcie_wait_txqs_empty(trans, txqs); 697 } 698 IWL_EXPORT_SYMBOL(iwl_trans_wait_tx_queues_empty); 699 700 void iwl_trans_freeze_txq_timer(struct iwl_trans *trans, 701 unsigned long txqs, bool freeze) 702 { 703 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 704 "bad state = %d\n", trans->state)) 705 return; 706 707 iwl_pcie_freeze_txq_timer(trans, txqs, freeze); 708 } 709 IWL_EXPORT_SYMBOL(iwl_trans_freeze_txq_timer); 710 711 void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans, 712 int txq_id, bool shared_mode) 713 { 714 iwl_trans_pcie_txq_set_shared_mode(trans, txq_id, shared_mode); 715 } 716 IWL_EXPORT_SYMBOL(iwl_trans_txq_set_shared_mode); 717 718 #ifdef CONFIG_IWLWIFI_DEBUGFS 719 void iwl_trans_debugfs_cleanup(struct iwl_trans *trans) 720 { 721 iwl_trans_pcie_debugfs_cleanup(trans); 722 } 723 IWL_EXPORT_SYMBOL(iwl_trans_debugfs_cleanup); 724 #endif 725 726 void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, int ptr) 727 { 728 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 729 "bad state = %d\n", trans->state)) 730 return; 731 732 iwl_pcie_set_q_ptrs(trans, queue, ptr); 733 } 734 IWL_EXPORT_SYMBOL(iwl_trans_set_q_ptrs); 735 736 int iwl_trans_txq_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask, 737 u8 tid, int size, unsigned int wdg_timeout) 738 { 739 might_sleep(); 740 741 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 742 "bad state = %d\n", trans->state)) 743 return -EIO; 744 745 return iwl_txq_dyn_alloc(trans, flags, sta_mask, tid, 746 size, wdg_timeout); 747 } 748 IWL_EXPORT_SYMBOL(iwl_trans_txq_alloc); 749 750 void iwl_trans_txq_free(struct iwl_trans *trans, int queue) 751 { 752 iwl_txq_dyn_free(trans, queue); 753 } 754 IWL_EXPORT_SYMBOL(iwl_trans_txq_free); 755 756 int iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue, 757 struct iwl_trans_rxq_dma_data *data) 758 { 759 return iwl_trans_pcie_rxq_dma_data(trans, queue, data); 760 } 761 IWL_EXPORT_SYMBOL(iwl_trans_get_rxq_dma_data); 762 763 int iwl_trans_load_pnvm(struct iwl_trans *trans, 764 const struct iwl_pnvm_image *pnvm_data, 765 const struct iwl_ucode_capabilities *capa) 766 { 767 return iwl_trans_pcie_ctx_info_gen3_load_pnvm(trans, pnvm_data, capa); 768 } 769 IWL_EXPORT_SYMBOL(iwl_trans_load_pnvm); 770 771 void iwl_trans_set_pnvm(struct iwl_trans *trans, 772 const struct iwl_ucode_capabilities *capa) 773 { 774 iwl_trans_pcie_ctx_info_gen3_set_pnvm(trans, capa); 775 } 776 IWL_EXPORT_SYMBOL(iwl_trans_set_pnvm); 777 778 int iwl_trans_load_reduce_power(struct iwl_trans *trans, 779 const struct iwl_pnvm_image *payloads, 780 const struct iwl_ucode_capabilities *capa) 781 { 782 return iwl_trans_pcie_ctx_info_gen3_load_reduce_power(trans, payloads, 783 capa); 784 } 785 IWL_EXPORT_SYMBOL(iwl_trans_load_reduce_power); 786 787 void iwl_trans_set_reduce_power(struct iwl_trans *trans, 788 const struct iwl_ucode_capabilities *capa) 789 { 790 iwl_trans_pcie_ctx_info_gen3_set_reduce_power(trans, capa); 791 } 792 IWL_EXPORT_SYMBOL(iwl_trans_set_reduce_power); 793