1bfcc09ddSBjoern A. Zeeb // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2bfcc09ddSBjoern A. Zeeb /* 3bfcc09ddSBjoern A. Zeeb * Copyright (C) 2017 Intel Deutschland GmbH 4*a4128aadSBjoern A. Zeeb * Copyright (C) 2018-2024 Intel Corporation 5bfcc09ddSBjoern A. Zeeb */ 6bfcc09ddSBjoern A. Zeeb #include "iwl-trans.h" 7bfcc09ddSBjoern A. Zeeb #include "iwl-fh.h" 8bfcc09ddSBjoern A. Zeeb #include "iwl-context-info.h" 9bfcc09ddSBjoern A. Zeeb #include "internal.h" 10bfcc09ddSBjoern A. Zeeb #include "iwl-prph.h" 11bfcc09ddSBjoern A. Zeeb 12bfcc09ddSBjoern A. Zeeb static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans, 13bfcc09ddSBjoern A. Zeeb size_t size, 14bfcc09ddSBjoern A. Zeeb dma_addr_t *phys, 15bfcc09ddSBjoern A. Zeeb int depth) 16bfcc09ddSBjoern A. Zeeb { 17bfcc09ddSBjoern A. Zeeb void *result; 18bfcc09ddSBjoern A. Zeeb 19bfcc09ddSBjoern A. Zeeb if (WARN(depth > 2, 20bfcc09ddSBjoern A. Zeeb "failed to allocate DMA memory not crossing 2^32 boundary")) 21bfcc09ddSBjoern A. Zeeb return NULL; 22bfcc09ddSBjoern A. Zeeb 23bfcc09ddSBjoern A. Zeeb result = dma_alloc_coherent(trans->dev, size, phys, GFP_KERNEL); 24bfcc09ddSBjoern A. Zeeb 25bfcc09ddSBjoern A. Zeeb if (!result) 26bfcc09ddSBjoern A. Zeeb return NULL; 27bfcc09ddSBjoern A. Zeeb 28bfcc09ddSBjoern A. Zeeb if (unlikely(iwl_txq_crosses_4g_boundary(*phys, size))) { 29bfcc09ddSBjoern A. Zeeb void *old = result; 30bfcc09ddSBjoern A. Zeeb dma_addr_t oldphys = *phys; 31bfcc09ddSBjoern A. Zeeb 32bfcc09ddSBjoern A. Zeeb result = _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, 33bfcc09ddSBjoern A. Zeeb phys, 34bfcc09ddSBjoern A. Zeeb depth + 1); 35bfcc09ddSBjoern A. Zeeb dma_free_coherent(trans->dev, size, old, oldphys); 36bfcc09ddSBjoern A. Zeeb } 37bfcc09ddSBjoern A. Zeeb 38bfcc09ddSBjoern A. Zeeb return result; 39bfcc09ddSBjoern A. Zeeb } 40bfcc09ddSBjoern A. Zeeb 419af1bba4SBjoern A. Zeeb void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans, 42bfcc09ddSBjoern A. Zeeb size_t size, 43bfcc09ddSBjoern A. Zeeb dma_addr_t *phys) 44bfcc09ddSBjoern A. Zeeb { 45bfcc09ddSBjoern A. Zeeb return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0); 46bfcc09ddSBjoern A. Zeeb } 47bfcc09ddSBjoern A. Zeeb 48bfcc09ddSBjoern A. Zeeb int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans, 49bfcc09ddSBjoern A. Zeeb const void *data, u32 len, 50bfcc09ddSBjoern A. Zeeb struct iwl_dram_data *dram) 51bfcc09ddSBjoern A. Zeeb { 52bfcc09ddSBjoern A. Zeeb dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, len, 53bfcc09ddSBjoern A. Zeeb &dram->physical); 54bfcc09ddSBjoern A. Zeeb if (!dram->block) 55bfcc09ddSBjoern A. Zeeb return -ENOMEM; 56bfcc09ddSBjoern A. Zeeb 57bfcc09ddSBjoern A. Zeeb dram->size = len; 58bfcc09ddSBjoern A. Zeeb memcpy(dram->block, data, len); 59bfcc09ddSBjoern A. Zeeb 60bfcc09ddSBjoern A. Zeeb return 0; 61bfcc09ddSBjoern A. Zeeb } 62bfcc09ddSBjoern A. Zeeb 63bfcc09ddSBjoern A. Zeeb void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans) 64bfcc09ddSBjoern A. Zeeb { 65bfcc09ddSBjoern A. Zeeb struct iwl_self_init_dram *dram = &trans->init_dram; 66bfcc09ddSBjoern A. Zeeb int i; 67bfcc09ddSBjoern A. Zeeb 68bfcc09ddSBjoern A. Zeeb if (!dram->paging) { 69bfcc09ddSBjoern A. Zeeb WARN_ON(dram->paging_cnt); 70bfcc09ddSBjoern A. Zeeb return; 71bfcc09ddSBjoern A. Zeeb } 72bfcc09ddSBjoern A. Zeeb 73bfcc09ddSBjoern A. Zeeb /* free paging*/ 74bfcc09ddSBjoern A. Zeeb for (i = 0; i < dram->paging_cnt; i++) 75bfcc09ddSBjoern A. Zeeb dma_free_coherent(trans->dev, dram->paging[i].size, 76bfcc09ddSBjoern A. Zeeb dram->paging[i].block, 77bfcc09ddSBjoern A. Zeeb dram->paging[i].physical); 78bfcc09ddSBjoern A. Zeeb 79bfcc09ddSBjoern A. Zeeb kfree(dram->paging); 80bfcc09ddSBjoern A. Zeeb dram->paging_cnt = 0; 81bfcc09ddSBjoern A. Zeeb dram->paging = NULL; 82bfcc09ddSBjoern A. Zeeb } 83bfcc09ddSBjoern A. Zeeb 84bfcc09ddSBjoern A. Zeeb int iwl_pcie_init_fw_sec(struct iwl_trans *trans, 85bfcc09ddSBjoern A. Zeeb const struct fw_img *fw, 86bfcc09ddSBjoern A. Zeeb struct iwl_context_info_dram *ctxt_dram) 87bfcc09ddSBjoern A. Zeeb { 88bfcc09ddSBjoern A. Zeeb struct iwl_self_init_dram *dram = &trans->init_dram; 89bfcc09ddSBjoern A. Zeeb int i, ret, lmac_cnt, umac_cnt, paging_cnt; 90bfcc09ddSBjoern A. Zeeb 91bfcc09ddSBjoern A. Zeeb if (WARN(dram->paging, 92bfcc09ddSBjoern A. Zeeb "paging shouldn't already be initialized (%d pages)\n", 93bfcc09ddSBjoern A. Zeeb dram->paging_cnt)) 94bfcc09ddSBjoern A. Zeeb iwl_pcie_ctxt_info_free_paging(trans); 95bfcc09ddSBjoern A. Zeeb 96bfcc09ddSBjoern A. Zeeb lmac_cnt = iwl_pcie_get_num_sections(fw, 0); 97bfcc09ddSBjoern A. Zeeb /* add 1 due to separator */ 98bfcc09ddSBjoern A. Zeeb umac_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + 1); 99bfcc09ddSBjoern A. Zeeb /* add 2 due to separators */ 100bfcc09ddSBjoern A. Zeeb paging_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + umac_cnt + 2); 101bfcc09ddSBjoern A. Zeeb 102bfcc09ddSBjoern A. Zeeb dram->fw = kcalloc(umac_cnt + lmac_cnt, sizeof(*dram->fw), GFP_KERNEL); 103bfcc09ddSBjoern A. Zeeb if (!dram->fw) 104bfcc09ddSBjoern A. Zeeb return -ENOMEM; 105bfcc09ddSBjoern A. Zeeb dram->paging = kcalloc(paging_cnt, sizeof(*dram->paging), GFP_KERNEL); 106bfcc09ddSBjoern A. Zeeb if (!dram->paging) 107bfcc09ddSBjoern A. Zeeb return -ENOMEM; 108bfcc09ddSBjoern A. Zeeb 109bfcc09ddSBjoern A. Zeeb /* initialize lmac sections */ 110bfcc09ddSBjoern A. Zeeb for (i = 0; i < lmac_cnt; i++) { 111bfcc09ddSBjoern A. Zeeb ret = iwl_pcie_ctxt_info_alloc_dma(trans, fw->sec[i].data, 112bfcc09ddSBjoern A. Zeeb fw->sec[i].len, 113bfcc09ddSBjoern A. Zeeb &dram->fw[dram->fw_cnt]); 114bfcc09ddSBjoern A. Zeeb if (ret) 115bfcc09ddSBjoern A. Zeeb return ret; 116bfcc09ddSBjoern A. Zeeb ctxt_dram->lmac_img[i] = 117bfcc09ddSBjoern A. Zeeb cpu_to_le64(dram->fw[dram->fw_cnt].physical); 118bfcc09ddSBjoern A. Zeeb dram->fw_cnt++; 119bfcc09ddSBjoern A. Zeeb } 120bfcc09ddSBjoern A. Zeeb 121bfcc09ddSBjoern A. Zeeb /* initialize umac sections */ 122bfcc09ddSBjoern A. Zeeb for (i = 0; i < umac_cnt; i++) { 123bfcc09ddSBjoern A. Zeeb /* access FW with +1 to make up for lmac separator */ 124bfcc09ddSBjoern A. Zeeb ret = iwl_pcie_ctxt_info_alloc_dma(trans, 125bfcc09ddSBjoern A. Zeeb fw->sec[dram->fw_cnt + 1].data, 126bfcc09ddSBjoern A. Zeeb fw->sec[dram->fw_cnt + 1].len, 127bfcc09ddSBjoern A. Zeeb &dram->fw[dram->fw_cnt]); 128bfcc09ddSBjoern A. Zeeb if (ret) 129bfcc09ddSBjoern A. Zeeb return ret; 130bfcc09ddSBjoern A. Zeeb ctxt_dram->umac_img[i] = 131bfcc09ddSBjoern A. Zeeb cpu_to_le64(dram->fw[dram->fw_cnt].physical); 132bfcc09ddSBjoern A. Zeeb dram->fw_cnt++; 133bfcc09ddSBjoern A. Zeeb } 134bfcc09ddSBjoern A. Zeeb 135bfcc09ddSBjoern A. Zeeb /* 136bfcc09ddSBjoern A. Zeeb * Initialize paging. 137bfcc09ddSBjoern A. Zeeb * Paging memory isn't stored in dram->fw as the umac and lmac - it is 138bfcc09ddSBjoern A. Zeeb * stored separately. 139bfcc09ddSBjoern A. Zeeb * This is since the timing of its release is different - 140bfcc09ddSBjoern A. Zeeb * while fw memory can be released on alive, the paging memory can be 141bfcc09ddSBjoern A. Zeeb * freed only when the device goes down. 142bfcc09ddSBjoern A. Zeeb * Given that, the logic here in accessing the fw image is a bit 143bfcc09ddSBjoern A. Zeeb * different - fw_cnt isn't changing so loop counter is added to it. 144bfcc09ddSBjoern A. Zeeb */ 145bfcc09ddSBjoern A. Zeeb for (i = 0; i < paging_cnt; i++) { 146bfcc09ddSBjoern A. Zeeb /* access FW with +2 to make up for lmac & umac separators */ 147bfcc09ddSBjoern A. Zeeb int fw_idx = dram->fw_cnt + i + 2; 148bfcc09ddSBjoern A. Zeeb 149bfcc09ddSBjoern A. Zeeb ret = iwl_pcie_ctxt_info_alloc_dma(trans, fw->sec[fw_idx].data, 150bfcc09ddSBjoern A. Zeeb fw->sec[fw_idx].len, 151bfcc09ddSBjoern A. Zeeb &dram->paging[i]); 152bfcc09ddSBjoern A. Zeeb if (ret) 153bfcc09ddSBjoern A. Zeeb return ret; 154bfcc09ddSBjoern A. Zeeb 155bfcc09ddSBjoern A. Zeeb ctxt_dram->virtual_img[i] = 156bfcc09ddSBjoern A. Zeeb cpu_to_le64(dram->paging[i].physical); 157bfcc09ddSBjoern A. Zeeb dram->paging_cnt++; 158bfcc09ddSBjoern A. Zeeb } 159bfcc09ddSBjoern A. Zeeb 160bfcc09ddSBjoern A. Zeeb return 0; 161bfcc09ddSBjoern A. Zeeb } 162bfcc09ddSBjoern A. Zeeb 163bfcc09ddSBjoern A. Zeeb int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, 164bfcc09ddSBjoern A. Zeeb const struct fw_img *fw) 165bfcc09ddSBjoern A. Zeeb { 166bfcc09ddSBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 167bfcc09ddSBjoern A. Zeeb struct iwl_context_info *ctxt_info; 168bfcc09ddSBjoern A. Zeeb struct iwl_context_info_rbd_cfg *rx_cfg; 169bfcc09ddSBjoern A. Zeeb u32 control_flags = 0, rb_size; 170bfcc09ddSBjoern A. Zeeb dma_addr_t phys; 171bfcc09ddSBjoern A. Zeeb int ret; 172bfcc09ddSBjoern A. Zeeb 173bfcc09ddSBjoern A. Zeeb ctxt_info = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, 174bfcc09ddSBjoern A. Zeeb sizeof(*ctxt_info), 175bfcc09ddSBjoern A. Zeeb &phys); 176bfcc09ddSBjoern A. Zeeb if (!ctxt_info) 177bfcc09ddSBjoern A. Zeeb return -ENOMEM; 178bfcc09ddSBjoern A. Zeeb 179bfcc09ddSBjoern A. Zeeb trans_pcie->ctxt_info_dma_addr = phys; 180bfcc09ddSBjoern A. Zeeb 181bfcc09ddSBjoern A. Zeeb ctxt_info->version.version = 0; 182bfcc09ddSBjoern A. Zeeb ctxt_info->version.mac_id = 183*a4128aadSBjoern A. Zeeb cpu_to_le16((u16)trans->hw_rev); 184bfcc09ddSBjoern A. Zeeb /* size is in DWs */ 185bfcc09ddSBjoern A. Zeeb ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4); 186bfcc09ddSBjoern A. Zeeb 187bfcc09ddSBjoern A. Zeeb switch (trans_pcie->rx_buf_size) { 188bfcc09ddSBjoern A. Zeeb case IWL_AMSDU_2K: 189bfcc09ddSBjoern A. Zeeb rb_size = IWL_CTXT_INFO_RB_SIZE_2K; 190bfcc09ddSBjoern A. Zeeb break; 191bfcc09ddSBjoern A. Zeeb case IWL_AMSDU_4K: 192bfcc09ddSBjoern A. Zeeb rb_size = IWL_CTXT_INFO_RB_SIZE_4K; 193bfcc09ddSBjoern A. Zeeb break; 194bfcc09ddSBjoern A. Zeeb case IWL_AMSDU_8K: 195bfcc09ddSBjoern A. Zeeb rb_size = IWL_CTXT_INFO_RB_SIZE_8K; 196bfcc09ddSBjoern A. Zeeb break; 197bfcc09ddSBjoern A. Zeeb case IWL_AMSDU_12K: 198bfcc09ddSBjoern A. Zeeb rb_size = IWL_CTXT_INFO_RB_SIZE_16K; 199bfcc09ddSBjoern A. Zeeb break; 200bfcc09ddSBjoern A. Zeeb default: 201bfcc09ddSBjoern A. Zeeb WARN_ON(1); 202bfcc09ddSBjoern A. Zeeb rb_size = IWL_CTXT_INFO_RB_SIZE_4K; 203bfcc09ddSBjoern A. Zeeb } 204bfcc09ddSBjoern A. Zeeb 205bfcc09ddSBjoern A. Zeeb WARN_ON(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds) > 12); 206bfcc09ddSBjoern A. Zeeb control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG; 207bfcc09ddSBjoern A. Zeeb control_flags |= 208bfcc09ddSBjoern A. Zeeb u32_encode_bits(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds), 209bfcc09ddSBjoern A. Zeeb IWL_CTXT_INFO_RB_CB_SIZE); 210bfcc09ddSBjoern A. Zeeb control_flags |= u32_encode_bits(rb_size, IWL_CTXT_INFO_RB_SIZE); 211bfcc09ddSBjoern A. Zeeb ctxt_info->control.control_flags = cpu_to_le32(control_flags); 212bfcc09ddSBjoern A. Zeeb 213bfcc09ddSBjoern A. Zeeb /* initialize RX default queue */ 214bfcc09ddSBjoern A. Zeeb rx_cfg = &ctxt_info->rbd_cfg; 215bfcc09ddSBjoern A. Zeeb rx_cfg->free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma); 216bfcc09ddSBjoern A. Zeeb rx_cfg->used_rbd_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma); 217bfcc09ddSBjoern A. Zeeb rx_cfg->status_wr_ptr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma); 218bfcc09ddSBjoern A. Zeeb 219bfcc09ddSBjoern A. Zeeb /* initialize TX command queue */ 220bfcc09ddSBjoern A. Zeeb ctxt_info->hcmd_cfg.cmd_queue_addr = 221*a4128aadSBjoern A. Zeeb cpu_to_le64(trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]->dma_addr); 222bfcc09ddSBjoern A. Zeeb ctxt_info->hcmd_cfg.cmd_queue_size = 223bfcc09ddSBjoern A. Zeeb TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE); 224bfcc09ddSBjoern A. Zeeb 225bfcc09ddSBjoern A. Zeeb /* allocate ucode sections in dram and set addresses */ 226bfcc09ddSBjoern A. Zeeb ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram); 227bfcc09ddSBjoern A. Zeeb if (ret) { 228bfcc09ddSBjoern A. Zeeb dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info), 229bfcc09ddSBjoern A. Zeeb ctxt_info, trans_pcie->ctxt_info_dma_addr); 230bfcc09ddSBjoern A. Zeeb return ret; 231bfcc09ddSBjoern A. Zeeb } 232bfcc09ddSBjoern A. Zeeb 233bfcc09ddSBjoern A. Zeeb trans_pcie->ctxt_info = ctxt_info; 234bfcc09ddSBjoern A. Zeeb 235bfcc09ddSBjoern A. Zeeb iwl_enable_fw_load_int_ctx_info(trans); 236bfcc09ddSBjoern A. Zeeb 237bfcc09ddSBjoern A. Zeeb /* Configure debug, if exists */ 238bfcc09ddSBjoern A. Zeeb if (iwl_pcie_dbg_on(trans)) 239bfcc09ddSBjoern A. Zeeb iwl_pcie_apply_destination(trans); 240bfcc09ddSBjoern A. Zeeb 241bfcc09ddSBjoern A. Zeeb /* kick FW self load */ 242bfcc09ddSBjoern A. Zeeb iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr); 243bfcc09ddSBjoern A. Zeeb 244bfcc09ddSBjoern A. Zeeb /* Context info will be released upon alive or failure to get one */ 245bfcc09ddSBjoern A. Zeeb 246bfcc09ddSBjoern A. Zeeb return 0; 247bfcc09ddSBjoern A. Zeeb } 248bfcc09ddSBjoern A. Zeeb 249bfcc09ddSBjoern A. Zeeb void iwl_pcie_ctxt_info_free(struct iwl_trans *trans) 250bfcc09ddSBjoern A. Zeeb { 251bfcc09ddSBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 252bfcc09ddSBjoern A. Zeeb 253bfcc09ddSBjoern A. Zeeb if (!trans_pcie->ctxt_info) 254bfcc09ddSBjoern A. Zeeb return; 255bfcc09ddSBjoern A. Zeeb 256bfcc09ddSBjoern A. Zeeb dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info), 257bfcc09ddSBjoern A. Zeeb trans_pcie->ctxt_info, 258bfcc09ddSBjoern A. Zeeb trans_pcie->ctxt_info_dma_addr); 259bfcc09ddSBjoern A. Zeeb trans_pcie->ctxt_info_dma_addr = 0; 260bfcc09ddSBjoern A. Zeeb trans_pcie->ctxt_info = NULL; 261bfcc09ddSBjoern A. Zeeb 262bfcc09ddSBjoern A. Zeeb iwl_pcie_ctxt_info_free_fw_img(trans); 263bfcc09ddSBjoern A. Zeeb } 264