1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2015 Intel Mobile Communications GmbH 4 * Copyright (C) 2016-2017 Intel Deutschland GmbH 5 * Copyright (C) 2019-2021, 2023-2024 Intel Corporation 6 */ 7 #include <linux/kernel.h> 8 #include <linux/bsearch.h> 9 #include <linux/list.h> 10 11 #include "fw/api/tx.h" 12 #include "iwl-trans.h" 13 #include "iwl-drv.h" 14 #include "iwl-fh.h" 15 #include <linux/dmapool.h> 16 #include "fw/api/commands.h" 17 #include "pcie/internal.h" 18 #include "iwl-context-info-gen3.h" 19 20 struct iwl_trans_dev_restart_data { 21 struct list_head list; 22 unsigned int restart_count; 23 time64_t last_error; 24 char name[]; 25 }; 26 27 static LIST_HEAD(restart_data_list); 28 static DEFINE_SPINLOCK(restart_data_lock); 29 30 static struct iwl_trans_dev_restart_data * 31 iwl_trans_get_restart_data(struct device *dev) 32 { 33 struct iwl_trans_dev_restart_data *tmp, *data = NULL; 34 const char *name = dev_name(dev); 35 36 spin_lock(&restart_data_lock); 37 list_for_each_entry(tmp, &restart_data_list, list) { 38 if (strcmp(tmp->name, name)) 39 continue; 40 data = tmp; 41 break; 42 } 43 spin_unlock(&restart_data_lock); 44 45 if (data) 46 return data; 47 48 data = kzalloc(struct_size(data, name, strlen(name) + 1), GFP_ATOMIC); 49 if (!data) 50 return NULL; 51 52 strcpy(data->name, name); 53 spin_lock(&restart_data_lock); 54 list_add_tail(&data->list, &restart_data_list); 55 spin_unlock(&restart_data_lock); 56 57 return data; 58 } 59 60 static void iwl_trans_inc_restart_count(struct device *dev) 61 { 62 struct iwl_trans_dev_restart_data *data; 63 64 data = iwl_trans_get_restart_data(dev); 65 if (data) { 66 data->last_error = ktime_get_boottime_seconds(); 67 data->restart_count++; 68 } 69 } 70 71 void iwl_trans_free_restart_list(void) 72 { 73 struct iwl_trans_dev_restart_data *tmp; 74 75 while ((tmp = list_first_entry_or_null(&restart_data_list, 76 typeof(*tmp), list))) { 77 list_del(&tmp->list); 78 kfree(tmp); 79 } 80 } 81 82 struct iwl_trans_reprobe { 83 struct device *dev; 84 struct work_struct work; 85 }; 86 87 static void iwl_trans_reprobe_wk(struct work_struct *wk) 88 { 89 struct iwl_trans_reprobe *reprobe; 90 91 reprobe = container_of(wk, typeof(*reprobe), work); 92 93 if (device_reprobe(reprobe->dev)) 94 dev_err(reprobe->dev, "reprobe failed!\n"); 95 put_device(reprobe->dev); 96 kfree(reprobe); 97 module_put(THIS_MODULE); 98 } 99 100 #define IWL_TRANS_RESET_OK_TIME 180 /* seconds */ 101 102 static enum iwl_reset_mode 103 iwl_trans_determine_restart_mode(struct iwl_trans *trans) 104 { 105 struct iwl_trans_dev_restart_data *data; 106 enum iwl_reset_mode at_least = 0; 107 unsigned int index; 108 static const enum iwl_reset_mode escalation_list[] = { 109 IWL_RESET_MODE_SW_RESET, 110 IWL_RESET_MODE_REPROBE, 111 IWL_RESET_MODE_REPROBE, 112 IWL_RESET_MODE_FUNC_RESET, 113 /* FIXME: add TOP reset */ 114 IWL_RESET_MODE_PROD_RESET, 115 /* FIXME: add TOP reset */ 116 IWL_RESET_MODE_PROD_RESET, 117 /* FIXME: add TOP reset */ 118 IWL_RESET_MODE_PROD_RESET, 119 }; 120 121 if (trans->restart.during_reset) 122 at_least = IWL_RESET_MODE_REPROBE; 123 124 data = iwl_trans_get_restart_data(trans->dev); 125 if (!data) 126 return at_least; 127 128 if (ktime_get_boottime_seconds() - data->last_error >= 129 IWL_TRANS_RESET_OK_TIME) 130 data->restart_count = 0; 131 132 index = data->restart_count; 133 if (index >= ARRAY_SIZE(escalation_list)) 134 index = ARRAY_SIZE(escalation_list) - 1; 135 136 return max(at_least, escalation_list[index]); 137 } 138 139 #define IWL_TRANS_RESET_DELAY (HZ * 60) 140 141 static void iwl_trans_restart_wk(struct work_struct *wk) 142 { 143 struct iwl_trans *trans = container_of(wk, typeof(*trans), restart.wk); 144 struct iwl_trans_reprobe *reprobe; 145 enum iwl_reset_mode mode; 146 147 if (!trans->op_mode) 148 return; 149 150 /* might have been scheduled before marked as dead, re-check */ 151 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 152 return; 153 154 iwl_op_mode_dump_error(trans->op_mode, &trans->restart.mode); 155 156 /* 157 * If the opmode stopped the device while we were trying to dump and 158 * reset, then we'll have done the dump already (synchronized by the 159 * opmode lock that it will acquire in iwl_op_mode_dump_error()) and 160 * managed that via trans->restart.mode. 161 * Additionally, make sure that in such a case we won't attempt to do 162 * any resets now, since it's no longer requested. 163 */ 164 if (!test_and_clear_bit(STATUS_RESET_PENDING, &trans->status)) 165 return; 166 167 if (!iwlwifi_mod_params.fw_restart) 168 return; 169 170 mode = iwl_trans_determine_restart_mode(trans); 171 172 iwl_trans_inc_restart_count(trans->dev); 173 174 switch (mode) { 175 case IWL_RESET_MODE_SW_RESET: 176 IWL_ERR(trans, "Device error - SW reset\n"); 177 iwl_trans_opmode_sw_reset(trans, trans->restart.mode.type); 178 break; 179 case IWL_RESET_MODE_REPROBE: 180 IWL_ERR(trans, "Device error - reprobe!\n"); 181 182 /* 183 * get a module reference to avoid doing this while unloading 184 * anyway and to avoid scheduling a work with code that's 185 * being removed. 186 */ 187 if (!try_module_get(THIS_MODULE)) { 188 IWL_ERR(trans, "Module is being unloaded - abort\n"); 189 return; 190 } 191 192 reprobe = kzalloc(sizeof(*reprobe), GFP_KERNEL); 193 if (!reprobe) { 194 module_put(THIS_MODULE); 195 return; 196 } 197 reprobe->dev = get_device(trans->dev); 198 INIT_WORK(&reprobe->work, iwl_trans_reprobe_wk); 199 schedule_work(&reprobe->work); 200 break; 201 default: 202 iwl_trans_pcie_reset(trans, mode); 203 break; 204 } 205 } 206 207 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, 208 struct device *dev, 209 const struct iwl_cfg_trans_params *cfg_trans) 210 { 211 struct iwl_trans *trans; 212 #ifdef CONFIG_LOCKDEP 213 static struct lock_class_key __sync_cmd_key; 214 #endif 215 216 trans = devm_kzalloc(dev, sizeof(*trans) + priv_size, GFP_KERNEL); 217 if (!trans) 218 return NULL; 219 220 trans->trans_cfg = cfg_trans; 221 222 #ifdef CONFIG_LOCKDEP 223 lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map", 224 &__sync_cmd_key, 0); 225 #endif 226 227 trans->dev = dev; 228 trans->num_rx_queues = 1; 229 230 INIT_WORK(&trans->restart.wk, iwl_trans_restart_wk); 231 232 return trans; 233 } 234 235 int iwl_trans_init(struct iwl_trans *trans) 236 { 237 int txcmd_size, txcmd_align; 238 239 if (!trans->trans_cfg->gen2) { 240 txcmd_size = sizeof(struct iwl_tx_cmd); 241 txcmd_align = sizeof(void *); 242 } else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) { 243 txcmd_size = sizeof(struct iwl_tx_cmd_gen2); 244 txcmd_align = 64; 245 } else { 246 txcmd_size = sizeof(struct iwl_tx_cmd_gen3); 247 txcmd_align = 128; 248 } 249 250 txcmd_size += sizeof(struct iwl_cmd_header); 251 txcmd_size += 36; /* biggest possible 802.11 header */ 252 253 /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */ 254 if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align)) 255 return -EINVAL; 256 257 snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name), 258 "iwl_cmd_pool:%s", dev_name(trans->dev)); 259 trans->dev_cmd_pool = 260 kmem_cache_create(trans->dev_cmd_pool_name, 261 txcmd_size, txcmd_align, 262 SLAB_HWCACHE_ALIGN, NULL); 263 if (!trans->dev_cmd_pool) 264 return -ENOMEM; 265 266 /* Initialize the wait queue for commands */ 267 init_waitqueue_head(&trans->wait_command_queue); 268 269 return 0; 270 } 271 272 void iwl_trans_free(struct iwl_trans *trans) 273 { 274 cancel_work_sync(&trans->restart.wk); 275 kmem_cache_destroy(trans->dev_cmd_pool); 276 } 277 278 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 279 { 280 int ret; 281 282 if (unlikely(!(cmd->flags & CMD_SEND_IN_RFKILL) && 283 test_bit(STATUS_RFKILL_OPMODE, &trans->status))) 284 return -ERFKILL; 285 286 /* 287 * We can't test IWL_MVM_STATUS_IN_D3 in mvm->status because this 288 * bit is set early in the D3 flow, before we send all the commands 289 * that configure the firmware for D3 operation (power, patterns, ...) 290 * and we don't want to flag all those with CMD_SEND_IN_D3. 291 * So use the system_pm_mode instead. The only command sent after 292 * we set system_pm_mode is D3_CONFIG_CMD, which we now flag with 293 * CMD_SEND_IN_D3. 294 */ 295 if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 && 296 !(cmd->flags & CMD_SEND_IN_D3))) 297 return -EHOSTDOWN; 298 299 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) 300 return -EIO; 301 302 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 303 "bad state = %d\n", trans->state)) 304 return -EIO; 305 306 if (!(cmd->flags & CMD_ASYNC)) 307 lock_map_acquire_read(&trans->sync_cmd_lockdep_map); 308 309 if (trans->wide_cmd_header && !iwl_cmd_groupid(cmd->id)) { 310 if (cmd->id != REPLY_ERROR) 311 cmd->id = DEF_ID(cmd->id); 312 } 313 314 ret = iwl_trans_pcie_send_hcmd(trans, cmd); 315 316 if (!(cmd->flags & CMD_ASYNC)) 317 lock_map_release(&trans->sync_cmd_lockdep_map); 318 319 if (WARN_ON((cmd->flags & CMD_WANT_SKB) && !ret && !cmd->resp_pkt)) 320 return -EIO; 321 322 return ret; 323 } 324 IWL_EXPORT_SYMBOL(iwl_trans_send_cmd); 325 326 /* Comparator for struct iwl_hcmd_names. 327 * Used in the binary search over a list of host commands. 328 * 329 * @key: command_id that we're looking for. 330 * @elt: struct iwl_hcmd_names candidate for match. 331 * 332 * @return 0 iff equal. 333 */ 334 static int iwl_hcmd_names_cmp(const void *key, const void *elt) 335 { 336 const struct iwl_hcmd_names *name = elt; 337 const u8 *cmd1 = key; 338 u8 cmd2 = name->cmd_id; 339 340 return (*cmd1 - cmd2); 341 } 342 343 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id) 344 { 345 u8 grp, cmd; 346 struct iwl_hcmd_names *ret; 347 const struct iwl_hcmd_arr *arr; 348 size_t size = sizeof(struct iwl_hcmd_names); 349 350 grp = iwl_cmd_groupid(id); 351 cmd = iwl_cmd_opcode(id); 352 353 if (!trans->command_groups || grp >= trans->command_groups_size || 354 !trans->command_groups[grp].arr) 355 return "UNKNOWN"; 356 357 arr = &trans->command_groups[grp]; 358 ret = bsearch(&cmd, arr->arr, arr->size, size, iwl_hcmd_names_cmp); 359 if (!ret) 360 return "UNKNOWN"; 361 return ret->cmd_name; 362 } 363 IWL_EXPORT_SYMBOL(iwl_get_cmd_string); 364 365 int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans) 366 { 367 int i, j; 368 const struct iwl_hcmd_arr *arr; 369 370 for (i = 0; i < trans->command_groups_size; i++) { 371 arr = &trans->command_groups[i]; 372 if (!arr->arr) 373 continue; 374 for (j = 0; j < arr->size - 1; j++) 375 if (arr->arr[j].cmd_id > arr->arr[j + 1].cmd_id) 376 return -1; 377 } 378 return 0; 379 } 380 IWL_EXPORT_SYMBOL(iwl_cmd_groups_verify_sorted); 381 382 void iwl_trans_configure(struct iwl_trans *trans, 383 const struct iwl_trans_config *trans_cfg) 384 { 385 trans->op_mode = trans_cfg->op_mode; 386 387 iwl_trans_pcie_configure(trans, trans_cfg); 388 WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg)); 389 } 390 IWL_EXPORT_SYMBOL(iwl_trans_configure); 391 392 int iwl_trans_start_hw(struct iwl_trans *trans) 393 { 394 might_sleep(); 395 396 return iwl_trans_pcie_start_hw(trans); 397 } 398 IWL_EXPORT_SYMBOL(iwl_trans_start_hw); 399 400 void iwl_trans_op_mode_leave(struct iwl_trans *trans) 401 { 402 might_sleep(); 403 404 iwl_trans_pcie_op_mode_leave(trans); 405 406 trans->op_mode = NULL; 407 408 trans->state = IWL_TRANS_NO_FW; 409 } 410 IWL_EXPORT_SYMBOL(iwl_trans_op_mode_leave); 411 412 void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val) 413 { 414 iwl_trans_pcie_write8(trans, ofs, val); 415 } 416 IWL_EXPORT_SYMBOL(iwl_trans_write8); 417 418 void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val) 419 { 420 iwl_trans_pcie_write32(trans, ofs, val); 421 } 422 IWL_EXPORT_SYMBOL(iwl_trans_write32); 423 424 u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs) 425 { 426 return iwl_trans_pcie_read32(trans, ofs); 427 } 428 IWL_EXPORT_SYMBOL(iwl_trans_read32); 429 430 u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs) 431 { 432 return iwl_trans_pcie_read_prph(trans, ofs); 433 } 434 IWL_EXPORT_SYMBOL(iwl_trans_read_prph); 435 436 void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) 437 { 438 return iwl_trans_pcie_write_prph(trans, ofs, val); 439 } 440 IWL_EXPORT_SYMBOL(iwl_trans_write_prph); 441 442 int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr, 443 void *buf, int dwords) 444 { 445 return iwl_trans_pcie_read_mem(trans, addr, buf, dwords); 446 } 447 IWL_EXPORT_SYMBOL(iwl_trans_read_mem); 448 449 int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr, 450 const void *buf, int dwords) 451 { 452 return iwl_trans_pcie_write_mem(trans, addr, buf, dwords); 453 } 454 IWL_EXPORT_SYMBOL(iwl_trans_write_mem); 455 456 void iwl_trans_set_pmi(struct iwl_trans *trans, bool state) 457 { 458 if (state) 459 set_bit(STATUS_TPOWER_PMI, &trans->status); 460 else 461 clear_bit(STATUS_TPOWER_PMI, &trans->status); 462 } 463 IWL_EXPORT_SYMBOL(iwl_trans_set_pmi); 464 465 int iwl_trans_sw_reset(struct iwl_trans *trans, bool retake_ownership) 466 { 467 return iwl_trans_pcie_sw_reset(trans, retake_ownership); 468 } 469 IWL_EXPORT_SYMBOL(iwl_trans_sw_reset); 470 471 struct iwl_trans_dump_data * 472 iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask, 473 const struct iwl_dump_sanitize_ops *sanitize_ops, 474 void *sanitize_ctx) 475 { 476 return iwl_trans_pcie_dump_data(trans, dump_mask, 477 sanitize_ops, sanitize_ctx); 478 } 479 IWL_EXPORT_SYMBOL(iwl_trans_dump_data); 480 481 int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset) 482 { 483 might_sleep(); 484 485 return iwl_trans_pcie_d3_suspend(trans, test, reset); 486 } 487 IWL_EXPORT_SYMBOL(iwl_trans_d3_suspend); 488 489 int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status, 490 bool test, bool reset) 491 { 492 might_sleep(); 493 494 return iwl_trans_pcie_d3_resume(trans, status, test, reset); 495 } 496 IWL_EXPORT_SYMBOL(iwl_trans_d3_resume); 497 498 void iwl_trans_interrupts(struct iwl_trans *trans, bool enable) 499 { 500 iwl_trans_pci_interrupts(trans, enable); 501 } 502 IWL_EXPORT_SYMBOL(iwl_trans_interrupts); 503 504 void iwl_trans_sync_nmi(struct iwl_trans *trans) 505 { 506 iwl_trans_pcie_sync_nmi(trans); 507 } 508 IWL_EXPORT_SYMBOL(iwl_trans_sync_nmi); 509 510 int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr, 511 u64 src_addr, u32 byte_cnt) 512 { 513 return iwl_trans_pcie_copy_imr(trans, dst_addr, src_addr, byte_cnt); 514 } 515 IWL_EXPORT_SYMBOL(iwl_trans_write_imr_mem); 516 517 void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, 518 u32 mask, u32 value) 519 { 520 iwl_trans_pcie_set_bits_mask(trans, reg, mask, value); 521 } 522 IWL_EXPORT_SYMBOL(iwl_trans_set_bits_mask); 523 524 int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs, 525 u32 *val) 526 { 527 return iwl_trans_pcie_read_config32(trans, ofs, val); 528 } 529 IWL_EXPORT_SYMBOL(iwl_trans_read_config32); 530 531 bool _iwl_trans_grab_nic_access(struct iwl_trans *trans) 532 { 533 return iwl_trans_pcie_grab_nic_access(trans); 534 } 535 IWL_EXPORT_SYMBOL(_iwl_trans_grab_nic_access); 536 537 void __releases(nic_access) 538 iwl_trans_release_nic_access(struct iwl_trans *trans) 539 { 540 iwl_trans_pcie_release_nic_access(trans); 541 __release(nic_access); 542 } 543 IWL_EXPORT_SYMBOL(iwl_trans_release_nic_access); 544 545 void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr) 546 { 547 might_sleep(); 548 549 trans->state = IWL_TRANS_FW_ALIVE; 550 551 if (trans->trans_cfg->gen2) 552 iwl_trans_pcie_gen2_fw_alive(trans); 553 else 554 iwl_trans_pcie_fw_alive(trans, scd_addr); 555 } 556 IWL_EXPORT_SYMBOL(iwl_trans_fw_alive); 557 558 int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw, 559 bool run_in_rfkill) 560 { 561 int ret; 562 563 might_sleep(); 564 565 WARN_ON_ONCE(!trans->rx_mpdu_cmd); 566 567 clear_bit(STATUS_FW_ERROR, &trans->status); 568 569 if (trans->trans_cfg->gen2) 570 ret = iwl_trans_pcie_gen2_start_fw(trans, fw, run_in_rfkill); 571 else 572 ret = iwl_trans_pcie_start_fw(trans, fw, run_in_rfkill); 573 574 if (ret == 0) 575 trans->state = IWL_TRANS_FW_STARTED; 576 577 return ret; 578 } 579 IWL_EXPORT_SYMBOL(iwl_trans_start_fw); 580 581 void iwl_trans_stop_device(struct iwl_trans *trans) 582 { 583 might_sleep(); 584 585 /* 586 * See also the comment in iwl_trans_restart_wk(). 587 * 588 * When the opmode stops the device while a reset is pending, the 589 * worker (iwl_trans_restart_wk) might not have run yet or, more 590 * likely, will be blocked on the opmode lock. Due to the locking, 591 * we can't just flush the worker. 592 * 593 * If this is the case, then the test_and_clear_bit() ensures that 594 * the worker won't attempt to do anything after the stop. 595 * 596 * The trans->restart.mode is a handshake with the opmode, we set 597 * the context there to ABORT so that when the worker can finally 598 * acquire the lock in the opmode, the code there won't attempt to 599 * do any dumps. Since we'd really like to have the dump though, 600 * also do it inline here (with the opmode locks already held), 601 * but use a separate mode struct to avoid races. 602 */ 603 if (test_and_clear_bit(STATUS_RESET_PENDING, &trans->status)) { 604 struct iwl_fw_error_dump_mode mode; 605 606 mode = trans->restart.mode; 607 mode.context = IWL_ERR_CONTEXT_FROM_OPMODE; 608 trans->restart.mode.context = IWL_ERR_CONTEXT_ABORT; 609 610 iwl_op_mode_dump_error(trans->op_mode, &mode); 611 } 612 613 if (trans->trans_cfg->gen2) 614 iwl_trans_pcie_gen2_stop_device(trans); 615 else 616 iwl_trans_pcie_stop_device(trans); 617 618 trans->state = IWL_TRANS_NO_FW; 619 } 620 IWL_EXPORT_SYMBOL(iwl_trans_stop_device); 621 622 int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, 623 struct iwl_device_tx_cmd *dev_cmd, int queue) 624 { 625 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) 626 return -EIO; 627 628 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 629 "bad state = %d\n", trans->state)) 630 return -EIO; 631 632 if (trans->trans_cfg->gen2) 633 return iwl_txq_gen2_tx(trans, skb, dev_cmd, queue); 634 635 return iwl_trans_pcie_tx(trans, skb, dev_cmd, queue); 636 } 637 IWL_EXPORT_SYMBOL(iwl_trans_tx); 638 639 void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn, 640 struct sk_buff_head *skbs, bool is_flush) 641 { 642 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 643 "bad state = %d\n", trans->state)) 644 return; 645 646 iwl_pcie_reclaim(trans, queue, ssn, skbs, is_flush); 647 } 648 IWL_EXPORT_SYMBOL(iwl_trans_reclaim); 649 650 void iwl_trans_txq_disable(struct iwl_trans *trans, int queue, 651 bool configure_scd) 652 { 653 iwl_trans_pcie_txq_disable(trans, queue, configure_scd); 654 } 655 IWL_EXPORT_SYMBOL(iwl_trans_txq_disable); 656 657 bool iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, 658 const struct iwl_trans_txq_scd_cfg *cfg, 659 unsigned int queue_wdg_timeout) 660 { 661 might_sleep(); 662 663 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 664 "bad state = %d\n", trans->state)) 665 return false; 666 667 return iwl_trans_pcie_txq_enable(trans, queue, ssn, 668 cfg, queue_wdg_timeout); 669 } 670 IWL_EXPORT_SYMBOL(iwl_trans_txq_enable_cfg); 671 672 int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue) 673 { 674 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 675 "bad state = %d\n", trans->state)) 676 return -EIO; 677 678 return iwl_trans_pcie_wait_txq_empty(trans, queue); 679 } 680 IWL_EXPORT_SYMBOL(iwl_trans_wait_txq_empty); 681 682 int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, u32 txqs) 683 { 684 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 685 "bad state = %d\n", trans->state)) 686 return -EIO; 687 688 return iwl_trans_pcie_wait_txqs_empty(trans, txqs); 689 } 690 IWL_EXPORT_SYMBOL(iwl_trans_wait_tx_queues_empty); 691 692 void iwl_trans_freeze_txq_timer(struct iwl_trans *trans, 693 unsigned long txqs, bool freeze) 694 { 695 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 696 "bad state = %d\n", trans->state)) 697 return; 698 699 iwl_pcie_freeze_txq_timer(trans, txqs, freeze); 700 } 701 IWL_EXPORT_SYMBOL(iwl_trans_freeze_txq_timer); 702 703 void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans, 704 int txq_id, bool shared_mode) 705 { 706 iwl_trans_pcie_txq_set_shared_mode(trans, txq_id, shared_mode); 707 } 708 IWL_EXPORT_SYMBOL(iwl_trans_txq_set_shared_mode); 709 710 #ifdef CONFIG_IWLWIFI_DEBUGFS 711 void iwl_trans_debugfs_cleanup(struct iwl_trans *trans) 712 { 713 iwl_trans_pcie_debugfs_cleanup(trans); 714 } 715 IWL_EXPORT_SYMBOL(iwl_trans_debugfs_cleanup); 716 #endif 717 718 void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, int ptr) 719 { 720 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 721 "bad state = %d\n", trans->state)) 722 return; 723 724 iwl_pcie_set_q_ptrs(trans, queue, ptr); 725 } 726 IWL_EXPORT_SYMBOL(iwl_trans_set_q_ptrs); 727 728 int iwl_trans_txq_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask, 729 u8 tid, int size, unsigned int wdg_timeout) 730 { 731 might_sleep(); 732 733 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 734 "bad state = %d\n", trans->state)) 735 return -EIO; 736 737 return iwl_txq_dyn_alloc(trans, flags, sta_mask, tid, 738 size, wdg_timeout); 739 } 740 IWL_EXPORT_SYMBOL(iwl_trans_txq_alloc); 741 742 void iwl_trans_txq_free(struct iwl_trans *trans, int queue) 743 { 744 iwl_txq_dyn_free(trans, queue); 745 } 746 IWL_EXPORT_SYMBOL(iwl_trans_txq_free); 747 748 int iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue, 749 struct iwl_trans_rxq_dma_data *data) 750 { 751 return iwl_trans_pcie_rxq_dma_data(trans, queue, data); 752 } 753 IWL_EXPORT_SYMBOL(iwl_trans_get_rxq_dma_data); 754 755 int iwl_trans_load_pnvm(struct iwl_trans *trans, 756 const struct iwl_pnvm_image *pnvm_data, 757 const struct iwl_ucode_capabilities *capa) 758 { 759 return iwl_trans_pcie_ctx_info_gen3_load_pnvm(trans, pnvm_data, capa); 760 } 761 IWL_EXPORT_SYMBOL(iwl_trans_load_pnvm); 762 763 void iwl_trans_set_pnvm(struct iwl_trans *trans, 764 const struct iwl_ucode_capabilities *capa) 765 { 766 iwl_trans_pcie_ctx_info_gen3_set_pnvm(trans, capa); 767 } 768 IWL_EXPORT_SYMBOL(iwl_trans_set_pnvm); 769 770 int iwl_trans_load_reduce_power(struct iwl_trans *trans, 771 const struct iwl_pnvm_image *payloads, 772 const struct iwl_ucode_capabilities *capa) 773 { 774 return iwl_trans_pcie_ctx_info_gen3_load_reduce_power(trans, payloads, 775 capa); 776 } 777 IWL_EXPORT_SYMBOL(iwl_trans_load_reduce_power); 778 779 void iwl_trans_set_reduce_power(struct iwl_trans *trans, 780 const struct iwl_ucode_capabilities *capa) 781 { 782 iwl_trans_pcie_ctx_info_gen3_set_reduce_power(trans, capa); 783 } 784 IWL_EXPORT_SYMBOL(iwl_trans_set_reduce_power); 785