1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2015 Intel Mobile Communications GmbH 4 * Copyright (C) 2016-2017 Intel Deutschland GmbH 5 * Copyright (C) 2019-2021, 2023-2024 Intel Corporation 6 */ 7 #include <linux/kernel.h> 8 #include <linux/bsearch.h> 9 10 #include "fw/api/tx.h" 11 #include "iwl-trans.h" 12 #include "iwl-drv.h" 13 #include "iwl-fh.h" 14 #include <linux/dmapool.h> 15 #include "fw/api/commands.h" 16 #include "pcie/internal.h" 17 #include "iwl-context-info-gen3.h" 18 19 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, 20 struct device *dev, 21 const struct iwl_cfg_trans_params *cfg_trans) 22 { 23 struct iwl_trans *trans; 24 #ifdef CONFIG_LOCKDEP 25 static struct lock_class_key __key; 26 #endif 27 28 trans = devm_kzalloc(dev, sizeof(*trans) + priv_size, GFP_KERNEL); 29 if (!trans) 30 return NULL; 31 32 trans->trans_cfg = cfg_trans; 33 34 #ifdef CONFIG_LOCKDEP 35 lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map", 36 &__key, 0); 37 #endif 38 39 trans->dev = dev; 40 trans->num_rx_queues = 1; 41 42 return trans; 43 } 44 45 int iwl_trans_init(struct iwl_trans *trans) 46 { 47 int txcmd_size, txcmd_align; 48 49 if (!trans->trans_cfg->gen2) { 50 txcmd_size = sizeof(struct iwl_tx_cmd); 51 txcmd_align = sizeof(void *); 52 } else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) { 53 txcmd_size = sizeof(struct iwl_tx_cmd_gen2); 54 txcmd_align = 64; 55 } else { 56 txcmd_size = sizeof(struct iwl_tx_cmd_gen3); 57 txcmd_align = 128; 58 } 59 60 txcmd_size += sizeof(struct iwl_cmd_header); 61 txcmd_size += 36; /* biggest possible 802.11 header */ 62 63 /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */ 64 if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align)) 65 return -EINVAL; 66 67 snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name), 68 "iwl_cmd_pool:%s", dev_name(trans->dev)); 69 trans->dev_cmd_pool = 70 kmem_cache_create(trans->dev_cmd_pool_name, 71 txcmd_size, txcmd_align, 72 SLAB_HWCACHE_ALIGN, NULL); 73 if (!trans->dev_cmd_pool) 74 return -ENOMEM; 75 76 /* Initialize the wait queue for commands */ 77 init_waitqueue_head(&trans->wait_command_queue); 78 79 return 0; 80 } 81 82 void iwl_trans_free(struct iwl_trans *trans) 83 { 84 kmem_cache_destroy(trans->dev_cmd_pool); 85 } 86 87 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 88 { 89 int ret; 90 91 if (unlikely(!(cmd->flags & CMD_SEND_IN_RFKILL) && 92 test_bit(STATUS_RFKILL_OPMODE, &trans->status))) 93 return -ERFKILL; 94 95 /* 96 * We can't test IWL_MVM_STATUS_IN_D3 in mvm->status because this 97 * bit is set early in the D3 flow, before we send all the commands 98 * that configure the firmware for D3 operation (power, patterns, ...) 99 * and we don't want to flag all those with CMD_SEND_IN_D3. 100 * So use the system_pm_mode instead. The only command sent after 101 * we set system_pm_mode is D3_CONFIG_CMD, which we now flag with 102 * CMD_SEND_IN_D3. 103 */ 104 if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 && 105 !(cmd->flags & CMD_SEND_IN_D3))) 106 return -EHOSTDOWN; 107 108 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) 109 return -EIO; 110 111 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 112 "bad state = %d\n", trans->state)) 113 return -EIO; 114 115 if (!(cmd->flags & CMD_ASYNC)) 116 lock_map_acquire_read(&trans->sync_cmd_lockdep_map); 117 118 if (trans->wide_cmd_header && !iwl_cmd_groupid(cmd->id)) { 119 if (cmd->id != REPLY_ERROR) 120 cmd->id = DEF_ID(cmd->id); 121 } 122 123 ret = iwl_trans_pcie_send_hcmd(trans, cmd); 124 125 if (!(cmd->flags & CMD_ASYNC)) 126 lock_map_release(&trans->sync_cmd_lockdep_map); 127 128 if (WARN_ON((cmd->flags & CMD_WANT_SKB) && !ret && !cmd->resp_pkt)) 129 return -EIO; 130 131 return ret; 132 } 133 IWL_EXPORT_SYMBOL(iwl_trans_send_cmd); 134 135 /* Comparator for struct iwl_hcmd_names. 136 * Used in the binary search over a list of host commands. 137 * 138 * @key: command_id that we're looking for. 139 * @elt: struct iwl_hcmd_names candidate for match. 140 * 141 * @return 0 iff equal. 142 */ 143 static int iwl_hcmd_names_cmp(const void *key, const void *elt) 144 { 145 const struct iwl_hcmd_names *name = elt; 146 const u8 *cmd1 = key; 147 u8 cmd2 = name->cmd_id; 148 149 return (*cmd1 - cmd2); 150 } 151 152 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id) 153 { 154 u8 grp, cmd; 155 struct iwl_hcmd_names *ret; 156 const struct iwl_hcmd_arr *arr; 157 size_t size = sizeof(struct iwl_hcmd_names); 158 159 grp = iwl_cmd_groupid(id); 160 cmd = iwl_cmd_opcode(id); 161 162 if (!trans->command_groups || grp >= trans->command_groups_size || 163 !trans->command_groups[grp].arr) 164 return "UNKNOWN"; 165 166 arr = &trans->command_groups[grp]; 167 ret = bsearch(&cmd, arr->arr, arr->size, size, iwl_hcmd_names_cmp); 168 if (!ret) 169 return "UNKNOWN"; 170 return ret->cmd_name; 171 } 172 IWL_EXPORT_SYMBOL(iwl_get_cmd_string); 173 174 int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans) 175 { 176 int i, j; 177 const struct iwl_hcmd_arr *arr; 178 179 for (i = 0; i < trans->command_groups_size; i++) { 180 arr = &trans->command_groups[i]; 181 if (!arr->arr) 182 continue; 183 for (j = 0; j < arr->size - 1; j++) 184 if (arr->arr[j].cmd_id > arr->arr[j + 1].cmd_id) 185 return -1; 186 } 187 return 0; 188 } 189 IWL_EXPORT_SYMBOL(iwl_cmd_groups_verify_sorted); 190 191 void iwl_trans_configure(struct iwl_trans *trans, 192 const struct iwl_trans_config *trans_cfg) 193 { 194 trans->op_mode = trans_cfg->op_mode; 195 196 iwl_trans_pcie_configure(trans, trans_cfg); 197 WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg)); 198 } 199 IWL_EXPORT_SYMBOL(iwl_trans_configure); 200 201 int iwl_trans_start_hw(struct iwl_trans *trans) 202 { 203 might_sleep(); 204 205 return iwl_trans_pcie_start_hw(trans); 206 } 207 IWL_EXPORT_SYMBOL(iwl_trans_start_hw); 208 209 void iwl_trans_op_mode_leave(struct iwl_trans *trans) 210 { 211 might_sleep(); 212 213 iwl_trans_pcie_op_mode_leave(trans); 214 215 trans->op_mode = NULL; 216 217 trans->state = IWL_TRANS_NO_FW; 218 } 219 IWL_EXPORT_SYMBOL(iwl_trans_op_mode_leave); 220 221 void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val) 222 { 223 iwl_trans_pcie_write8(trans, ofs, val); 224 } 225 IWL_EXPORT_SYMBOL(iwl_trans_write8); 226 227 void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val) 228 { 229 iwl_trans_pcie_write32(trans, ofs, val); 230 } 231 IWL_EXPORT_SYMBOL(iwl_trans_write32); 232 233 u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs) 234 { 235 return iwl_trans_pcie_read32(trans, ofs); 236 } 237 IWL_EXPORT_SYMBOL(iwl_trans_read32); 238 239 u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs) 240 { 241 return iwl_trans_pcie_read_prph(trans, ofs); 242 } 243 IWL_EXPORT_SYMBOL(iwl_trans_read_prph); 244 245 void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) 246 { 247 return iwl_trans_pcie_write_prph(trans, ofs, val); 248 } 249 IWL_EXPORT_SYMBOL(iwl_trans_write_prph); 250 251 int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr, 252 void *buf, int dwords) 253 { 254 return iwl_trans_pcie_read_mem(trans, addr, buf, dwords); 255 } 256 IWL_EXPORT_SYMBOL(iwl_trans_read_mem); 257 258 int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr, 259 const void *buf, int dwords) 260 { 261 return iwl_trans_pcie_write_mem(trans, addr, buf, dwords); 262 } 263 IWL_EXPORT_SYMBOL(iwl_trans_write_mem); 264 265 void iwl_trans_set_pmi(struct iwl_trans *trans, bool state) 266 { 267 if (state) 268 set_bit(STATUS_TPOWER_PMI, &trans->status); 269 else 270 clear_bit(STATUS_TPOWER_PMI, &trans->status); 271 } 272 IWL_EXPORT_SYMBOL(iwl_trans_set_pmi); 273 274 int iwl_trans_sw_reset(struct iwl_trans *trans, bool retake_ownership) 275 { 276 return iwl_trans_pcie_sw_reset(trans, retake_ownership); 277 } 278 IWL_EXPORT_SYMBOL(iwl_trans_sw_reset); 279 280 struct iwl_trans_dump_data * 281 iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask, 282 const struct iwl_dump_sanitize_ops *sanitize_ops, 283 void *sanitize_ctx) 284 { 285 return iwl_trans_pcie_dump_data(trans, dump_mask, 286 sanitize_ops, sanitize_ctx); 287 } 288 IWL_EXPORT_SYMBOL(iwl_trans_dump_data); 289 290 int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset) 291 { 292 might_sleep(); 293 294 return iwl_trans_pcie_d3_suspend(trans, test, reset); 295 } 296 IWL_EXPORT_SYMBOL(iwl_trans_d3_suspend); 297 298 int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status, 299 bool test, bool reset) 300 { 301 might_sleep(); 302 303 return iwl_trans_pcie_d3_resume(trans, status, test, reset); 304 } 305 IWL_EXPORT_SYMBOL(iwl_trans_d3_resume); 306 307 void iwl_trans_interrupts(struct iwl_trans *trans, bool enable) 308 { 309 iwl_trans_pci_interrupts(trans, enable); 310 } 311 IWL_EXPORT_SYMBOL(iwl_trans_interrupts); 312 313 void iwl_trans_sync_nmi(struct iwl_trans *trans) 314 { 315 iwl_trans_pcie_sync_nmi(trans); 316 } 317 IWL_EXPORT_SYMBOL(iwl_trans_sync_nmi); 318 319 int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr, 320 u64 src_addr, u32 byte_cnt) 321 { 322 return iwl_trans_pcie_copy_imr(trans, dst_addr, src_addr, byte_cnt); 323 } 324 IWL_EXPORT_SYMBOL(iwl_trans_write_imr_mem); 325 326 void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, 327 u32 mask, u32 value) 328 { 329 iwl_trans_pcie_set_bits_mask(trans, reg, mask, value); 330 } 331 IWL_EXPORT_SYMBOL(iwl_trans_set_bits_mask); 332 333 int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs, 334 u32 *val) 335 { 336 return iwl_trans_pcie_read_config32(trans, ofs, val); 337 } 338 IWL_EXPORT_SYMBOL(iwl_trans_read_config32); 339 340 bool _iwl_trans_grab_nic_access(struct iwl_trans *trans) 341 { 342 return iwl_trans_pcie_grab_nic_access(trans); 343 } 344 IWL_EXPORT_SYMBOL(_iwl_trans_grab_nic_access); 345 346 void __releases(nic_access) 347 iwl_trans_release_nic_access(struct iwl_trans *trans) 348 { 349 iwl_trans_pcie_release_nic_access(trans); 350 __release(nic_access); 351 } 352 IWL_EXPORT_SYMBOL(iwl_trans_release_nic_access); 353 354 void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr) 355 { 356 might_sleep(); 357 358 trans->state = IWL_TRANS_FW_ALIVE; 359 360 if (trans->trans_cfg->gen2) 361 iwl_trans_pcie_gen2_fw_alive(trans); 362 else 363 iwl_trans_pcie_fw_alive(trans, scd_addr); 364 } 365 IWL_EXPORT_SYMBOL(iwl_trans_fw_alive); 366 367 int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw, 368 bool run_in_rfkill) 369 { 370 int ret; 371 372 might_sleep(); 373 374 WARN_ON_ONCE(!trans->rx_mpdu_cmd); 375 376 clear_bit(STATUS_FW_ERROR, &trans->status); 377 378 if (trans->trans_cfg->gen2) 379 ret = iwl_trans_pcie_gen2_start_fw(trans, fw, run_in_rfkill); 380 else 381 ret = iwl_trans_pcie_start_fw(trans, fw, run_in_rfkill); 382 383 if (ret == 0) 384 trans->state = IWL_TRANS_FW_STARTED; 385 386 return ret; 387 } 388 IWL_EXPORT_SYMBOL(iwl_trans_start_fw); 389 390 void iwl_trans_stop_device(struct iwl_trans *trans) 391 { 392 might_sleep(); 393 394 if (trans->trans_cfg->gen2) 395 iwl_trans_pcie_gen2_stop_device(trans); 396 else 397 iwl_trans_pcie_stop_device(trans); 398 399 trans->state = IWL_TRANS_NO_FW; 400 } 401 IWL_EXPORT_SYMBOL(iwl_trans_stop_device); 402 403 int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, 404 struct iwl_device_tx_cmd *dev_cmd, int queue) 405 { 406 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) 407 return -EIO; 408 409 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 410 "bad state = %d\n", trans->state)) 411 return -EIO; 412 413 if (trans->trans_cfg->gen2) 414 return iwl_txq_gen2_tx(trans, skb, dev_cmd, queue); 415 416 return iwl_trans_pcie_tx(trans, skb, dev_cmd, queue); 417 } 418 IWL_EXPORT_SYMBOL(iwl_trans_tx); 419 420 void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn, 421 struct sk_buff_head *skbs, bool is_flush) 422 { 423 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 424 "bad state = %d\n", trans->state)) 425 return; 426 427 iwl_pcie_reclaim(trans, queue, ssn, skbs, is_flush); 428 } 429 IWL_EXPORT_SYMBOL(iwl_trans_reclaim); 430 431 void iwl_trans_txq_disable(struct iwl_trans *trans, int queue, 432 bool configure_scd) 433 { 434 iwl_trans_pcie_txq_disable(trans, queue, configure_scd); 435 } 436 IWL_EXPORT_SYMBOL(iwl_trans_txq_disable); 437 438 bool iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, 439 const struct iwl_trans_txq_scd_cfg *cfg, 440 unsigned int queue_wdg_timeout) 441 { 442 might_sleep(); 443 444 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 445 "bad state = %d\n", trans->state)) 446 return false; 447 448 return iwl_trans_pcie_txq_enable(trans, queue, ssn, 449 cfg, queue_wdg_timeout); 450 } 451 IWL_EXPORT_SYMBOL(iwl_trans_txq_enable_cfg); 452 453 int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue) 454 { 455 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 456 "bad state = %d\n", trans->state)) 457 return -EIO; 458 459 return iwl_trans_pcie_wait_txq_empty(trans, queue); 460 } 461 IWL_EXPORT_SYMBOL(iwl_trans_wait_txq_empty); 462 463 int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, u32 txqs) 464 { 465 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 466 "bad state = %d\n", trans->state)) 467 return -EIO; 468 469 return iwl_trans_pcie_wait_txqs_empty(trans, txqs); 470 } 471 IWL_EXPORT_SYMBOL(iwl_trans_wait_tx_queues_empty); 472 473 void iwl_trans_freeze_txq_timer(struct iwl_trans *trans, 474 unsigned long txqs, bool freeze) 475 { 476 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 477 "bad state = %d\n", trans->state)) 478 return; 479 480 iwl_pcie_freeze_txq_timer(trans, txqs, freeze); 481 } 482 IWL_EXPORT_SYMBOL(iwl_trans_freeze_txq_timer); 483 484 void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans, 485 int txq_id, bool shared_mode) 486 { 487 iwl_trans_pcie_txq_set_shared_mode(trans, txq_id, shared_mode); 488 } 489 IWL_EXPORT_SYMBOL(iwl_trans_txq_set_shared_mode); 490 491 #ifdef CONFIG_IWLWIFI_DEBUGFS 492 void iwl_trans_debugfs_cleanup(struct iwl_trans *trans) 493 { 494 iwl_trans_pcie_debugfs_cleanup(trans); 495 } 496 IWL_EXPORT_SYMBOL(iwl_trans_debugfs_cleanup); 497 #endif 498 499 void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, int ptr) 500 { 501 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 502 "bad state = %d\n", trans->state)) 503 return; 504 505 iwl_pcie_set_q_ptrs(trans, queue, ptr); 506 } 507 IWL_EXPORT_SYMBOL(iwl_trans_set_q_ptrs); 508 509 int iwl_trans_txq_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask, 510 u8 tid, int size, unsigned int wdg_timeout) 511 { 512 might_sleep(); 513 514 if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 515 "bad state = %d\n", trans->state)) 516 return -EIO; 517 518 return iwl_txq_dyn_alloc(trans, flags, sta_mask, tid, 519 size, wdg_timeout); 520 } 521 IWL_EXPORT_SYMBOL(iwl_trans_txq_alloc); 522 523 void iwl_trans_txq_free(struct iwl_trans *trans, int queue) 524 { 525 iwl_txq_dyn_free(trans, queue); 526 } 527 IWL_EXPORT_SYMBOL(iwl_trans_txq_free); 528 529 int iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue, 530 struct iwl_trans_rxq_dma_data *data) 531 { 532 return iwl_trans_pcie_rxq_dma_data(trans, queue, data); 533 } 534 IWL_EXPORT_SYMBOL(iwl_trans_get_rxq_dma_data); 535 536 int iwl_trans_load_pnvm(struct iwl_trans *trans, 537 const struct iwl_pnvm_image *pnvm_data, 538 const struct iwl_ucode_capabilities *capa) 539 { 540 return iwl_trans_pcie_ctx_info_gen3_load_pnvm(trans, pnvm_data, capa); 541 } 542 IWL_EXPORT_SYMBOL(iwl_trans_load_pnvm); 543 544 void iwl_trans_set_pnvm(struct iwl_trans *trans, 545 const struct iwl_ucode_capabilities *capa) 546 { 547 iwl_trans_pcie_ctx_info_gen3_set_pnvm(trans, capa); 548 } 549 IWL_EXPORT_SYMBOL(iwl_trans_set_pnvm); 550 551 int iwl_trans_load_reduce_power(struct iwl_trans *trans, 552 const struct iwl_pnvm_image *payloads, 553 const struct iwl_ucode_capabilities *capa) 554 { 555 return iwl_trans_pcie_ctx_info_gen3_load_reduce_power(trans, payloads, 556 capa); 557 } 558 IWL_EXPORT_SYMBOL(iwl_trans_load_reduce_power); 559 560 void iwl_trans_set_reduce_power(struct iwl_trans *trans, 561 const struct iwl_ucode_capabilities *capa) 562 { 563 iwl_trans_pcie_ctx_info_gen3_set_reduce_power(trans, capa); 564 } 565 IWL_EXPORT_SYMBOL(iwl_trans_set_reduce_power); 566