1bfcc09ddSBjoern A. Zeeb // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2bfcc09ddSBjoern A. Zeeb /* 3*a4128aadSBjoern A. Zeeb * Copyright (C) 2018-2024 Intel Corporation 4bfcc09ddSBjoern A. Zeeb */ 5*a4128aadSBjoern A. Zeeb #include <linux/dmi.h> 6bfcc09ddSBjoern A. Zeeb #include "iwl-trans.h" 7bfcc09ddSBjoern A. Zeeb #include "iwl-fh.h" 8bfcc09ddSBjoern A. Zeeb #include "iwl-context-info-gen3.h" 9bfcc09ddSBjoern A. Zeeb #include "internal.h" 10bfcc09ddSBjoern A. Zeeb #include "iwl-prph.h" 11bfcc09ddSBjoern A. Zeeb 12*a4128aadSBjoern A. Zeeb static const struct dmi_system_id dmi_force_scu_active_approved_list[] = { 13*a4128aadSBjoern A. Zeeb { .ident = "DELL", 14*a4128aadSBjoern A. Zeeb .matches = { 15*a4128aadSBjoern A. Zeeb DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 16*a4128aadSBjoern A. Zeeb }, 17*a4128aadSBjoern A. Zeeb }, 18*a4128aadSBjoern A. Zeeb { .ident = "DELL", 19*a4128aadSBjoern A. Zeeb .matches = { 20*a4128aadSBjoern A. Zeeb DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), 21*a4128aadSBjoern A. Zeeb }, 22*a4128aadSBjoern A. Zeeb }, 23*a4128aadSBjoern A. Zeeb /* keep last */ 24*a4128aadSBjoern A. Zeeb {} 25*a4128aadSBjoern A. Zeeb }; 26*a4128aadSBjoern A. Zeeb 27*a4128aadSBjoern A. Zeeb static bool iwl_is_force_scu_active_approved(void) 28*a4128aadSBjoern A. Zeeb { 29*a4128aadSBjoern A. Zeeb return !!dmi_check_system(dmi_force_scu_active_approved_list); 30*a4128aadSBjoern A. Zeeb } 31*a4128aadSBjoern A. Zeeb 32bfcc09ddSBjoern A. Zeeb static void 33bfcc09ddSBjoern A. Zeeb iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans, 34bfcc09ddSBjoern A. Zeeb struct iwl_prph_scratch_hwm_cfg *dbg_cfg, 35bfcc09ddSBjoern A. Zeeb u32 *control_flags) 36bfcc09ddSBjoern A. Zeeb { 37bfcc09ddSBjoern A. Zeeb enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1; 38bfcc09ddSBjoern A. Zeeb struct iwl_fw_ini_allocation_tlv *fw_mon_cfg; 39bfcc09ddSBjoern A. Zeeb u32 dbg_flags = 0; 40bfcc09ddSBjoern A. Zeeb 41bfcc09ddSBjoern A. Zeeb if (!iwl_trans_dbg_ini_valid(trans)) { 42bfcc09ddSBjoern A. Zeeb struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 43bfcc09ddSBjoern A. Zeeb 44bfcc09ddSBjoern A. Zeeb iwl_pcie_alloc_fw_monitor(trans, 0); 45bfcc09ddSBjoern A. Zeeb 46bfcc09ddSBjoern A. Zeeb if (fw_mon->size) { 47bfcc09ddSBjoern A. Zeeb dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM; 48bfcc09ddSBjoern A. Zeeb 49bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(trans, 50bfcc09ddSBjoern A. Zeeb "WRT: Applying DRAM buffer destination\n"); 51bfcc09ddSBjoern A. Zeeb 52bfcc09ddSBjoern A. Zeeb dbg_cfg->hwm_base_addr = cpu_to_le64(fw_mon->physical); 53bfcc09ddSBjoern A. Zeeb dbg_cfg->hwm_size = cpu_to_le32(fw_mon->size); 54bfcc09ddSBjoern A. Zeeb } 55bfcc09ddSBjoern A. Zeeb 56bfcc09ddSBjoern A. Zeeb goto out; 57bfcc09ddSBjoern A. Zeeb } 58bfcc09ddSBjoern A. Zeeb 59bfcc09ddSBjoern A. Zeeb fw_mon_cfg = &trans->dbg.fw_mon_cfg[alloc_id]; 60bfcc09ddSBjoern A. Zeeb 61bfcc09ddSBjoern A. Zeeb switch (le32_to_cpu(fw_mon_cfg->buf_location)) { 62bfcc09ddSBjoern A. Zeeb case IWL_FW_INI_LOCATION_SRAM_PATH: 63bfcc09ddSBjoern A. Zeeb dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL; 64bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(trans, 65bfcc09ddSBjoern A. Zeeb "WRT: Applying SMEM buffer destination\n"); 66bfcc09ddSBjoern A. Zeeb break; 67bfcc09ddSBjoern A. Zeeb 68bfcc09ddSBjoern A. Zeeb case IWL_FW_INI_LOCATION_NPK_PATH: 69bfcc09ddSBjoern A. Zeeb dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF; 70bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(trans, 71bfcc09ddSBjoern A. Zeeb "WRT: Applying NPK buffer destination\n"); 72bfcc09ddSBjoern A. Zeeb break; 73bfcc09ddSBjoern A. Zeeb 74bfcc09ddSBjoern A. Zeeb case IWL_FW_INI_LOCATION_DRAM_PATH: 75bfcc09ddSBjoern A. Zeeb if (trans->dbg.fw_mon_ini[alloc_id].num_frags) { 76bfcc09ddSBjoern A. Zeeb struct iwl_dram_data *frag = 77bfcc09ddSBjoern A. Zeeb &trans->dbg.fw_mon_ini[alloc_id].frags[0]; 78bfcc09ddSBjoern A. Zeeb dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM; 79bfcc09ddSBjoern A. Zeeb dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical); 80bfcc09ddSBjoern A. Zeeb dbg_cfg->hwm_size = cpu_to_le32(frag->size); 81bfcc09ddSBjoern A. Zeeb dbg_cfg->debug_token_config = cpu_to_le32(trans->dbg.ucode_preset); 82bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(trans, 83bfcc09ddSBjoern A. Zeeb "WRT: Applying DRAM destination (debug_token_config=%u)\n", 84bfcc09ddSBjoern A. Zeeb dbg_cfg->debug_token_config); 85bfcc09ddSBjoern A. Zeeb IWL_DEBUG_FW(trans, 86bfcc09ddSBjoern A. Zeeb "WRT: Applying DRAM destination (alloc_id=%u, num_frags=%u)\n", 87bfcc09ddSBjoern A. Zeeb alloc_id, 88bfcc09ddSBjoern A. Zeeb trans->dbg.fw_mon_ini[alloc_id].num_frags); 89bfcc09ddSBjoern A. Zeeb } 90bfcc09ddSBjoern A. Zeeb break; 91bfcc09ddSBjoern A. Zeeb default: 92*a4128aadSBjoern A. Zeeb IWL_DEBUG_FW(trans, "WRT: Invalid buffer destination (%d)\n", 93*a4128aadSBjoern A. Zeeb le32_to_cpu(fw_mon_cfg->buf_location)); 94bfcc09ddSBjoern A. Zeeb } 95bfcc09ddSBjoern A. Zeeb out: 96bfcc09ddSBjoern A. Zeeb if (dbg_flags) 97bfcc09ddSBjoern A. Zeeb *control_flags |= IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | dbg_flags; 98bfcc09ddSBjoern A. Zeeb } 99bfcc09ddSBjoern A. Zeeb 100bfcc09ddSBjoern A. Zeeb int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, 101bfcc09ddSBjoern A. Zeeb const struct fw_img *fw) 102bfcc09ddSBjoern A. Zeeb { 103bfcc09ddSBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 104bfcc09ddSBjoern A. Zeeb struct iwl_context_info_gen3 *ctxt_info_gen3; 105bfcc09ddSBjoern A. Zeeb struct iwl_prph_scratch *prph_scratch; 106bfcc09ddSBjoern A. Zeeb struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl; 107bfcc09ddSBjoern A. Zeeb struct iwl_prph_info *prph_info; 108bfcc09ddSBjoern A. Zeeb u32 control_flags = 0; 109bfcc09ddSBjoern A. Zeeb int ret; 110bfcc09ddSBjoern A. Zeeb int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE, 111bfcc09ddSBjoern A. Zeeb trans->cfg->min_txq_size); 112bfcc09ddSBjoern A. Zeeb 113bfcc09ddSBjoern A. Zeeb switch (trans_pcie->rx_buf_size) { 114bfcc09ddSBjoern A. Zeeb case IWL_AMSDU_DEF: 115bfcc09ddSBjoern A. Zeeb return -EINVAL; 116bfcc09ddSBjoern A. Zeeb case IWL_AMSDU_2K: 117bfcc09ddSBjoern A. Zeeb break; 118bfcc09ddSBjoern A. Zeeb case IWL_AMSDU_4K: 119bfcc09ddSBjoern A. Zeeb control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K; 120bfcc09ddSBjoern A. Zeeb break; 121bfcc09ddSBjoern A. Zeeb case IWL_AMSDU_8K: 122bfcc09ddSBjoern A. Zeeb control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K; 123bfcc09ddSBjoern A. Zeeb /* if firmware supports the ext size, tell it */ 124bfcc09ddSBjoern A. Zeeb control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K; 125bfcc09ddSBjoern A. Zeeb break; 126bfcc09ddSBjoern A. Zeeb case IWL_AMSDU_12K: 127bfcc09ddSBjoern A. Zeeb control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K; 128bfcc09ddSBjoern A. Zeeb /* if firmware supports the ext size, tell it */ 129bfcc09ddSBjoern A. Zeeb control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K; 130bfcc09ddSBjoern A. Zeeb break; 131bfcc09ddSBjoern A. Zeeb } 132bfcc09ddSBjoern A. Zeeb 133bfcc09ddSBjoern A. Zeeb /* Allocate prph scratch */ 134bfcc09ddSBjoern A. Zeeb prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch), 135bfcc09ddSBjoern A. Zeeb &trans_pcie->prph_scratch_dma_addr, 136bfcc09ddSBjoern A. Zeeb GFP_KERNEL); 137bfcc09ddSBjoern A. Zeeb if (!prph_scratch) 138bfcc09ddSBjoern A. Zeeb return -ENOMEM; 139bfcc09ddSBjoern A. Zeeb 140bfcc09ddSBjoern A. Zeeb prph_sc_ctrl = &prph_scratch->ctrl_cfg; 141bfcc09ddSBjoern A. Zeeb 142bfcc09ddSBjoern A. Zeeb prph_sc_ctrl->version.version = 0; 143bfcc09ddSBjoern A. Zeeb prph_sc_ctrl->version.mac_id = 144*a4128aadSBjoern A. Zeeb cpu_to_le16((u16)trans->hw_rev); 145bfcc09ddSBjoern A. Zeeb prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4); 146bfcc09ddSBjoern A. Zeeb 147bfcc09ddSBjoern A. Zeeb control_flags |= IWL_PRPH_SCRATCH_MTR_MODE; 148bfcc09ddSBjoern A. Zeeb control_flags |= IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT; 149bfcc09ddSBjoern A. Zeeb 150fac1f593SBjoern A. Zeeb if (trans->trans_cfg->imr_enabled) 151fac1f593SBjoern A. Zeeb control_flags |= IWL_PRPH_SCRATCH_IMR_DEBUG_EN; 152fac1f593SBjoern A. Zeeb 153*a4128aadSBjoern A. Zeeb if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL && 154*a4128aadSBjoern A. Zeeb iwl_is_force_scu_active_approved()) { 155*a4128aadSBjoern A. Zeeb control_flags |= IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE; 156*a4128aadSBjoern A. Zeeb IWL_DEBUG_FW(trans, 157*a4128aadSBjoern A. Zeeb "Context Info: Set SCU_FORCE_ACTIVE (0x%x) in control_flags\n", 158*a4128aadSBjoern A. Zeeb IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE); 159*a4128aadSBjoern A. Zeeb } 160*a4128aadSBjoern A. Zeeb 161bfcc09ddSBjoern A. Zeeb /* initialize RX default queue */ 162bfcc09ddSBjoern A. Zeeb prph_sc_ctrl->rbd_cfg.free_rbd_addr = 163bfcc09ddSBjoern A. Zeeb cpu_to_le64(trans_pcie->rxq->bd_dma); 164bfcc09ddSBjoern A. Zeeb 165bfcc09ddSBjoern A. Zeeb iwl_pcie_ctxt_info_dbg_enable(trans, &prph_sc_ctrl->hwm_cfg, 166bfcc09ddSBjoern A. Zeeb &control_flags); 167bfcc09ddSBjoern A. Zeeb prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags); 168bfcc09ddSBjoern A. Zeeb 1699af1bba4SBjoern A. Zeeb /* initialize the Step equalizer data */ 1709af1bba4SBjoern A. Zeeb prph_sc_ctrl->step_cfg.mbx_addr_0 = cpu_to_le32(trans->mbx_addr_0_step); 1719af1bba4SBjoern A. Zeeb prph_sc_ctrl->step_cfg.mbx_addr_1 = cpu_to_le32(trans->mbx_addr_1_step); 1729af1bba4SBjoern A. Zeeb 173bfcc09ddSBjoern A. Zeeb /* allocate ucode sections in dram and set addresses */ 174bfcc09ddSBjoern A. Zeeb ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram); 175bfcc09ddSBjoern A. Zeeb if (ret) 176bfcc09ddSBjoern A. Zeeb goto err_free_prph_scratch; 177bfcc09ddSBjoern A. Zeeb 178bfcc09ddSBjoern A. Zeeb 179bfcc09ddSBjoern A. Zeeb /* Allocate prph information 180bfcc09ddSBjoern A. Zeeb * currently we don't assign to the prph info anything, but it would get 181bfcc09ddSBjoern A. Zeeb * assigned later 182bfcc09ddSBjoern A. Zeeb * 183bfcc09ddSBjoern A. Zeeb * We also use the second half of this page to give the device some 184bfcc09ddSBjoern A. Zeeb * dummy TR/CR tail pointers - which shouldn't be necessary as we don't 185bfcc09ddSBjoern A. Zeeb * use this, but the hardware still reads/writes there and we can't let 186bfcc09ddSBjoern A. Zeeb * it go do that with a NULL pointer. 187bfcc09ddSBjoern A. Zeeb */ 188bfcc09ddSBjoern A. Zeeb BUILD_BUG_ON(sizeof(*prph_info) > PAGE_SIZE / 2); 189bfcc09ddSBjoern A. Zeeb prph_info = dma_alloc_coherent(trans->dev, PAGE_SIZE, 190bfcc09ddSBjoern A. Zeeb &trans_pcie->prph_info_dma_addr, 191bfcc09ddSBjoern A. Zeeb GFP_KERNEL); 192bfcc09ddSBjoern A. Zeeb if (!prph_info) { 193bfcc09ddSBjoern A. Zeeb ret = -ENOMEM; 194bfcc09ddSBjoern A. Zeeb goto err_free_prph_scratch; 195bfcc09ddSBjoern A. Zeeb } 196bfcc09ddSBjoern A. Zeeb 197bfcc09ddSBjoern A. Zeeb /* Allocate context info */ 198bfcc09ddSBjoern A. Zeeb ctxt_info_gen3 = dma_alloc_coherent(trans->dev, 199bfcc09ddSBjoern A. Zeeb sizeof(*ctxt_info_gen3), 200bfcc09ddSBjoern A. Zeeb &trans_pcie->ctxt_info_dma_addr, 201bfcc09ddSBjoern A. Zeeb GFP_KERNEL); 202bfcc09ddSBjoern A. Zeeb if (!ctxt_info_gen3) { 203bfcc09ddSBjoern A. Zeeb ret = -ENOMEM; 204bfcc09ddSBjoern A. Zeeb goto err_free_prph_info; 205bfcc09ddSBjoern A. Zeeb } 206bfcc09ddSBjoern A. Zeeb 207bfcc09ddSBjoern A. Zeeb ctxt_info_gen3->prph_info_base_addr = 208bfcc09ddSBjoern A. Zeeb cpu_to_le64(trans_pcie->prph_info_dma_addr); 209bfcc09ddSBjoern A. Zeeb ctxt_info_gen3->prph_scratch_base_addr = 210bfcc09ddSBjoern A. Zeeb cpu_to_le64(trans_pcie->prph_scratch_dma_addr); 211bfcc09ddSBjoern A. Zeeb ctxt_info_gen3->prph_scratch_size = 212bfcc09ddSBjoern A. Zeeb cpu_to_le32(sizeof(*prph_scratch)); 213bfcc09ddSBjoern A. Zeeb ctxt_info_gen3->cr_head_idx_arr_base_addr = 214bfcc09ddSBjoern A. Zeeb cpu_to_le64(trans_pcie->rxq->rb_stts_dma); 215bfcc09ddSBjoern A. Zeeb ctxt_info_gen3->tr_tail_idx_arr_base_addr = 216bfcc09ddSBjoern A. Zeeb cpu_to_le64(trans_pcie->prph_info_dma_addr + PAGE_SIZE / 2); 217bfcc09ddSBjoern A. Zeeb ctxt_info_gen3->cr_tail_idx_arr_base_addr = 218bfcc09ddSBjoern A. Zeeb cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4); 219bfcc09ddSBjoern A. Zeeb ctxt_info_gen3->mtr_base_addr = 220*a4128aadSBjoern A. Zeeb cpu_to_le64(trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]->dma_addr); 221bfcc09ddSBjoern A. Zeeb ctxt_info_gen3->mcr_base_addr = 222bfcc09ddSBjoern A. Zeeb cpu_to_le64(trans_pcie->rxq->used_bd_dma); 223bfcc09ddSBjoern A. Zeeb ctxt_info_gen3->mtr_size = 224bfcc09ddSBjoern A. Zeeb cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size)); 225bfcc09ddSBjoern A. Zeeb ctxt_info_gen3->mcr_size = 226bfcc09ddSBjoern A. Zeeb cpu_to_le16(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds)); 227bfcc09ddSBjoern A. Zeeb 228bfcc09ddSBjoern A. Zeeb trans_pcie->ctxt_info_gen3 = ctxt_info_gen3; 229bfcc09ddSBjoern A. Zeeb trans_pcie->prph_info = prph_info; 230bfcc09ddSBjoern A. Zeeb trans_pcie->prph_scratch = prph_scratch; 231bfcc09ddSBjoern A. Zeeb 232bfcc09ddSBjoern A. Zeeb /* Allocate IML */ 233bfcc09ddSBjoern A. Zeeb trans_pcie->iml = dma_alloc_coherent(trans->dev, trans->iml_len, 234bfcc09ddSBjoern A. Zeeb &trans_pcie->iml_dma_addr, 235bfcc09ddSBjoern A. Zeeb GFP_KERNEL); 236bfcc09ddSBjoern A. Zeeb if (!trans_pcie->iml) { 237bfcc09ddSBjoern A. Zeeb ret = -ENOMEM; 238bfcc09ddSBjoern A. Zeeb goto err_free_ctxt_info; 239bfcc09ddSBjoern A. Zeeb } 240bfcc09ddSBjoern A. Zeeb 241bfcc09ddSBjoern A. Zeeb memcpy(trans_pcie->iml, trans->iml, trans->iml_len); 242bfcc09ddSBjoern A. Zeeb 243bfcc09ddSBjoern A. Zeeb iwl_enable_fw_load_int_ctx_info(trans); 244bfcc09ddSBjoern A. Zeeb 245bfcc09ddSBjoern A. Zeeb /* kick FW self load */ 246bfcc09ddSBjoern A. Zeeb iwl_write64(trans, CSR_CTXT_INFO_ADDR, 247bfcc09ddSBjoern A. Zeeb trans_pcie->ctxt_info_dma_addr); 248bfcc09ddSBjoern A. Zeeb iwl_write64(trans, CSR_IML_DATA_ADDR, 249bfcc09ddSBjoern A. Zeeb trans_pcie->iml_dma_addr); 250bfcc09ddSBjoern A. Zeeb iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len); 251bfcc09ddSBjoern A. Zeeb 252bfcc09ddSBjoern A. Zeeb iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, 253bfcc09ddSBjoern A. Zeeb CSR_AUTO_FUNC_BOOT_ENA); 254bfcc09ddSBjoern A. Zeeb 255bfcc09ddSBjoern A. Zeeb return 0; 256bfcc09ddSBjoern A. Zeeb 257bfcc09ddSBjoern A. Zeeb err_free_ctxt_info: 258bfcc09ddSBjoern A. Zeeb dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3), 259bfcc09ddSBjoern A. Zeeb trans_pcie->ctxt_info_gen3, 260bfcc09ddSBjoern A. Zeeb trans_pcie->ctxt_info_dma_addr); 261bfcc09ddSBjoern A. Zeeb trans_pcie->ctxt_info_gen3 = NULL; 262bfcc09ddSBjoern A. Zeeb err_free_prph_info: 263bfcc09ddSBjoern A. Zeeb dma_free_coherent(trans->dev, PAGE_SIZE, prph_info, 264bfcc09ddSBjoern A. Zeeb trans_pcie->prph_info_dma_addr); 265bfcc09ddSBjoern A. Zeeb 266bfcc09ddSBjoern A. Zeeb err_free_prph_scratch: 267bfcc09ddSBjoern A. Zeeb dma_free_coherent(trans->dev, 268bfcc09ddSBjoern A. Zeeb sizeof(*prph_scratch), 269bfcc09ddSBjoern A. Zeeb prph_scratch, 270bfcc09ddSBjoern A. Zeeb trans_pcie->prph_scratch_dma_addr); 271bfcc09ddSBjoern A. Zeeb return ret; 272bfcc09ddSBjoern A. Zeeb 273bfcc09ddSBjoern A. Zeeb } 274bfcc09ddSBjoern A. Zeeb 275bfcc09ddSBjoern A. Zeeb void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive) 276bfcc09ddSBjoern A. Zeeb { 277bfcc09ddSBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 278bfcc09ddSBjoern A. Zeeb 279bfcc09ddSBjoern A. Zeeb if (trans_pcie->iml) { 280bfcc09ddSBjoern A. Zeeb dma_free_coherent(trans->dev, trans->iml_len, trans_pcie->iml, 281bfcc09ddSBjoern A. Zeeb trans_pcie->iml_dma_addr); 282bfcc09ddSBjoern A. Zeeb trans_pcie->iml_dma_addr = 0; 283bfcc09ddSBjoern A. Zeeb trans_pcie->iml = NULL; 284bfcc09ddSBjoern A. Zeeb } 285bfcc09ddSBjoern A. Zeeb 286bfcc09ddSBjoern A. Zeeb iwl_pcie_ctxt_info_free_fw_img(trans); 287bfcc09ddSBjoern A. Zeeb 288bfcc09ddSBjoern A. Zeeb if (alive) 289bfcc09ddSBjoern A. Zeeb return; 290bfcc09ddSBjoern A. Zeeb 291bfcc09ddSBjoern A. Zeeb if (!trans_pcie->ctxt_info_gen3) 292bfcc09ddSBjoern A. Zeeb return; 293bfcc09ddSBjoern A. Zeeb 294bfcc09ddSBjoern A. Zeeb /* ctxt_info_gen3 and prph_scratch are still needed for PNVM load */ 295bfcc09ddSBjoern A. Zeeb dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3), 296bfcc09ddSBjoern A. Zeeb trans_pcie->ctxt_info_gen3, 297bfcc09ddSBjoern A. Zeeb trans_pcie->ctxt_info_dma_addr); 298bfcc09ddSBjoern A. Zeeb trans_pcie->ctxt_info_dma_addr = 0; 299bfcc09ddSBjoern A. Zeeb trans_pcie->ctxt_info_gen3 = NULL; 300bfcc09ddSBjoern A. Zeeb 301bfcc09ddSBjoern A. Zeeb dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch), 302bfcc09ddSBjoern A. Zeeb trans_pcie->prph_scratch, 303bfcc09ddSBjoern A. Zeeb trans_pcie->prph_scratch_dma_addr); 304bfcc09ddSBjoern A. Zeeb trans_pcie->prph_scratch_dma_addr = 0; 305bfcc09ddSBjoern A. Zeeb trans_pcie->prph_scratch = NULL; 306bfcc09ddSBjoern A. Zeeb 307bfcc09ddSBjoern A. Zeeb /* this is needed for the entire lifetime */ 308bfcc09ddSBjoern A. Zeeb dma_free_coherent(trans->dev, PAGE_SIZE, trans_pcie->prph_info, 309bfcc09ddSBjoern A. Zeeb trans_pcie->prph_info_dma_addr); 310bfcc09ddSBjoern A. Zeeb trans_pcie->prph_info_dma_addr = 0; 311bfcc09ddSBjoern A. Zeeb trans_pcie->prph_info = NULL; 312bfcc09ddSBjoern A. Zeeb } 313bfcc09ddSBjoern A. Zeeb 3149af1bba4SBjoern A. Zeeb static int iwl_pcie_load_payloads_continuously(struct iwl_trans *trans, 3159af1bba4SBjoern A. Zeeb const struct iwl_pnvm_image *pnvm_data, 3169af1bba4SBjoern A. Zeeb struct iwl_dram_data *dram) 3179af1bba4SBjoern A. Zeeb { 3189af1bba4SBjoern A. Zeeb u32 len, len0, len1; 3199af1bba4SBjoern A. Zeeb 3209af1bba4SBjoern A. Zeeb if (pnvm_data->n_chunks != UNFRAGMENTED_PNVM_PAYLOADS_NUMBER) { 3219af1bba4SBjoern A. Zeeb IWL_DEBUG_FW(trans, "expected 2 payloads, got %d.\n", 3229af1bba4SBjoern A. Zeeb pnvm_data->n_chunks); 3239af1bba4SBjoern A. Zeeb return -EINVAL; 3249af1bba4SBjoern A. Zeeb } 3259af1bba4SBjoern A. Zeeb 3269af1bba4SBjoern A. Zeeb len0 = pnvm_data->chunks[0].len; 3279af1bba4SBjoern A. Zeeb len1 = pnvm_data->chunks[1].len; 3289af1bba4SBjoern A. Zeeb if (len1 > 0xFFFFFFFF - len0) { 3299af1bba4SBjoern A. Zeeb IWL_DEBUG_FW(trans, "sizes of payloads overflow.\n"); 3309af1bba4SBjoern A. Zeeb return -EINVAL; 3319af1bba4SBjoern A. Zeeb } 3329af1bba4SBjoern A. Zeeb len = len0 + len1; 3339af1bba4SBjoern A. Zeeb 3349af1bba4SBjoern A. Zeeb dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, len, 3359af1bba4SBjoern A. Zeeb &dram->physical); 3369af1bba4SBjoern A. Zeeb if (!dram->block) { 3379af1bba4SBjoern A. Zeeb IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA.\n"); 3389af1bba4SBjoern A. Zeeb return -ENOMEM; 3399af1bba4SBjoern A. Zeeb } 3409af1bba4SBjoern A. Zeeb 3419af1bba4SBjoern A. Zeeb dram->size = len; 3429af1bba4SBjoern A. Zeeb memcpy(dram->block, pnvm_data->chunks[0].data, len0); 3439af1bba4SBjoern A. Zeeb memcpy((u8 *)dram->block + len0, pnvm_data->chunks[1].data, len1); 3449af1bba4SBjoern A. Zeeb 3459af1bba4SBjoern A. Zeeb return 0; 3469af1bba4SBjoern A. Zeeb } 3479af1bba4SBjoern A. Zeeb 3489af1bba4SBjoern A. Zeeb static int iwl_pcie_load_payloads_segments 3499af1bba4SBjoern A. Zeeb (struct iwl_trans *trans, 3509af1bba4SBjoern A. Zeeb struct iwl_dram_regions *dram_regions, 3519af1bba4SBjoern A. Zeeb const struct iwl_pnvm_image *pnvm_data) 3529af1bba4SBjoern A. Zeeb { 3539af1bba4SBjoern A. Zeeb struct iwl_dram_data *cur_payload_dram = &dram_regions->drams[0]; 3549af1bba4SBjoern A. Zeeb struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc; 3559af1bba4SBjoern A. Zeeb struct iwl_prph_scrath_mem_desc_addr_array *addresses; 3569af1bba4SBjoern A. Zeeb const void *data; 3579af1bba4SBjoern A. Zeeb u32 len; 3589af1bba4SBjoern A. Zeeb int i; 3599af1bba4SBjoern A. Zeeb 3609af1bba4SBjoern A. Zeeb /* allocate and init DRAM descriptors array */ 3619af1bba4SBjoern A. Zeeb len = sizeof(struct iwl_prph_scrath_mem_desc_addr_array); 3629af1bba4SBjoern A. Zeeb desc_dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent 3639af1bba4SBjoern A. Zeeb (trans, 3649af1bba4SBjoern A. Zeeb len, 3659af1bba4SBjoern A. Zeeb &desc_dram->physical); 3669af1bba4SBjoern A. Zeeb if (!desc_dram->block) { 3679af1bba4SBjoern A. Zeeb IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA.\n"); 3689af1bba4SBjoern A. Zeeb return -ENOMEM; 3699af1bba4SBjoern A. Zeeb } 3709af1bba4SBjoern A. Zeeb desc_dram->size = len; 3719af1bba4SBjoern A. Zeeb memset(desc_dram->block, 0, len); 3729af1bba4SBjoern A. Zeeb 3739af1bba4SBjoern A. Zeeb /* allocate DRAM region for each payload */ 3749af1bba4SBjoern A. Zeeb dram_regions->n_regions = 0; 3759af1bba4SBjoern A. Zeeb for (i = 0; i < pnvm_data->n_chunks; i++) { 3769af1bba4SBjoern A. Zeeb len = pnvm_data->chunks[i].len; 3779af1bba4SBjoern A. Zeeb data = pnvm_data->chunks[i].data; 3789af1bba4SBjoern A. Zeeb 3799af1bba4SBjoern A. Zeeb if (iwl_pcie_ctxt_info_alloc_dma(trans, 3809af1bba4SBjoern A. Zeeb data, 3819af1bba4SBjoern A. Zeeb len, 3829af1bba4SBjoern A. Zeeb cur_payload_dram)) { 3839af1bba4SBjoern A. Zeeb iwl_trans_pcie_free_pnvm_dram_regions(dram_regions, 3849af1bba4SBjoern A. Zeeb trans->dev); 3859af1bba4SBjoern A. Zeeb return -ENOMEM; 3869af1bba4SBjoern A. Zeeb } 3879af1bba4SBjoern A. Zeeb 3889af1bba4SBjoern A. Zeeb dram_regions->n_regions++; 3899af1bba4SBjoern A. Zeeb cur_payload_dram++; 3909af1bba4SBjoern A. Zeeb } 3919af1bba4SBjoern A. Zeeb 3929af1bba4SBjoern A. Zeeb /* fill desc with the DRAM payloads addresses */ 3939af1bba4SBjoern A. Zeeb addresses = desc_dram->block; 3949af1bba4SBjoern A. Zeeb for (i = 0; i < pnvm_data->n_chunks; i++) { 3959af1bba4SBjoern A. Zeeb addresses->mem_descs[i] = 3969af1bba4SBjoern A. Zeeb cpu_to_le64(dram_regions->drams[i].physical); 3979af1bba4SBjoern A. Zeeb } 3989af1bba4SBjoern A. Zeeb 3999af1bba4SBjoern A. Zeeb return 0; 4009af1bba4SBjoern A. Zeeb 4019af1bba4SBjoern A. Zeeb } 4029af1bba4SBjoern A. Zeeb 4039af1bba4SBjoern A. Zeeb int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans, 4049af1bba4SBjoern A. Zeeb const struct iwl_pnvm_image *pnvm_payloads, 4059af1bba4SBjoern A. Zeeb const struct iwl_ucode_capabilities *capa) 406bfcc09ddSBjoern A. Zeeb { 407bfcc09ddSBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 408bfcc09ddSBjoern A. Zeeb struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = 409bfcc09ddSBjoern A. Zeeb &trans_pcie->prph_scratch->ctrl_cfg; 4109af1bba4SBjoern A. Zeeb struct iwl_dram_regions *dram_regions = &trans_pcie->pnvm_data; 4119af1bba4SBjoern A. Zeeb int ret = 0; 412bfcc09ddSBjoern A. Zeeb 413bfcc09ddSBjoern A. Zeeb /* only allocate the DRAM if not allocated yet */ 4149af1bba4SBjoern A. Zeeb if (trans->pnvm_loaded) 4159af1bba4SBjoern A. Zeeb return 0; 4169af1bba4SBjoern A. Zeeb 417bfcc09ddSBjoern A. Zeeb if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size)) 418bfcc09ddSBjoern A. Zeeb return -EBUSY; 419bfcc09ddSBjoern A. Zeeb 4209af1bba4SBjoern A. Zeeb if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) 4219af1bba4SBjoern A. Zeeb return 0; 4229af1bba4SBjoern A. Zeeb 4239af1bba4SBjoern A. Zeeb if (!pnvm_payloads->n_chunks) { 4249af1bba4SBjoern A. Zeeb IWL_DEBUG_FW(trans, "no payloads\n"); 4259af1bba4SBjoern A. Zeeb return -EINVAL; 4269af1bba4SBjoern A. Zeeb } 4279af1bba4SBjoern A. Zeeb 4289af1bba4SBjoern A. Zeeb /* save payloads in several DRAM sections */ 4299af1bba4SBjoern A. Zeeb if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) { 4309af1bba4SBjoern A. Zeeb ret = iwl_pcie_load_payloads_segments(trans, 4319af1bba4SBjoern A. Zeeb dram_regions, 4329af1bba4SBjoern A. Zeeb pnvm_payloads); 4339af1bba4SBjoern A. Zeeb if (!ret) 4349af1bba4SBjoern A. Zeeb trans->pnvm_loaded = true; 4359af1bba4SBjoern A. Zeeb } else { 4369af1bba4SBjoern A. Zeeb /* save only in one DRAM section */ 4379af1bba4SBjoern A. Zeeb ret = iwl_pcie_load_payloads_continuously 4389af1bba4SBjoern A. Zeeb (trans, 4399af1bba4SBjoern A. Zeeb pnvm_payloads, 4409af1bba4SBjoern A. Zeeb &dram_regions->drams[0]); 4419af1bba4SBjoern A. Zeeb if (!ret) { 4429af1bba4SBjoern A. Zeeb dram_regions->n_regions = 1; 4439af1bba4SBjoern A. Zeeb trans->pnvm_loaded = true; 4449af1bba4SBjoern A. Zeeb } 4459af1bba4SBjoern A. Zeeb } 4469af1bba4SBjoern A. Zeeb 447bfcc09ddSBjoern A. Zeeb return ret; 448bfcc09ddSBjoern A. Zeeb } 4499af1bba4SBjoern A. Zeeb 4509af1bba4SBjoern A. Zeeb static inline size_t 4519af1bba4SBjoern A. Zeeb iwl_dram_regions_size(const struct iwl_dram_regions *dram_regions) 4529af1bba4SBjoern A. Zeeb { 4539af1bba4SBjoern A. Zeeb size_t total_size = 0; 4549af1bba4SBjoern A. Zeeb int i; 4559af1bba4SBjoern A. Zeeb 4569af1bba4SBjoern A. Zeeb for (i = 0; i < dram_regions->n_regions; i++) 4579af1bba4SBjoern A. Zeeb total_size += dram_regions->drams[i].size; 4589af1bba4SBjoern A. Zeeb 4599af1bba4SBjoern A. Zeeb return total_size; 460bfcc09ddSBjoern A. Zeeb } 461bfcc09ddSBjoern A. Zeeb 4629af1bba4SBjoern A. Zeeb static void iwl_pcie_set_pnvm_segments(struct iwl_trans *trans) 463bfcc09ddSBjoern A. Zeeb { 464bfcc09ddSBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 465bfcc09ddSBjoern A. Zeeb struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = 466bfcc09ddSBjoern A. Zeeb &trans_pcie->prph_scratch->ctrl_cfg; 4679af1bba4SBjoern A. Zeeb struct iwl_dram_regions *dram_regions = &trans_pcie->pnvm_data; 4689af1bba4SBjoern A. Zeeb 4699af1bba4SBjoern A. Zeeb prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = 4709af1bba4SBjoern A. Zeeb cpu_to_le64(dram_regions->prph_scratch_mem_desc.physical); 4719af1bba4SBjoern A. Zeeb prph_sc_ctrl->pnvm_cfg.pnvm_size = 4729af1bba4SBjoern A. Zeeb cpu_to_le32(iwl_dram_regions_size(dram_regions)); 4739af1bba4SBjoern A. Zeeb } 4749af1bba4SBjoern A. Zeeb 4759af1bba4SBjoern A. Zeeb static void iwl_pcie_set_continuous_pnvm(struct iwl_trans *trans) 4769af1bba4SBjoern A. Zeeb { 4779af1bba4SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 4789af1bba4SBjoern A. Zeeb struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = 4799af1bba4SBjoern A. Zeeb &trans_pcie->prph_scratch->ctrl_cfg; 4809af1bba4SBjoern A. Zeeb 4819af1bba4SBjoern A. Zeeb prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = 4829af1bba4SBjoern A. Zeeb cpu_to_le64(trans_pcie->pnvm_data.drams[0].physical); 4839af1bba4SBjoern A. Zeeb prph_sc_ctrl->pnvm_cfg.pnvm_size = 4849af1bba4SBjoern A. Zeeb cpu_to_le32(trans_pcie->pnvm_data.drams[0].size); 4859af1bba4SBjoern A. Zeeb } 4869af1bba4SBjoern A. Zeeb 4879af1bba4SBjoern A. Zeeb void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans, 4889af1bba4SBjoern A. Zeeb const struct iwl_ucode_capabilities *capa) 4899af1bba4SBjoern A. Zeeb { 4909af1bba4SBjoern A. Zeeb if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) 4919af1bba4SBjoern A. Zeeb return; 4929af1bba4SBjoern A. Zeeb 4939af1bba4SBjoern A. Zeeb if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) 4949af1bba4SBjoern A. Zeeb iwl_pcie_set_pnvm_segments(trans); 4959af1bba4SBjoern A. Zeeb else 4969af1bba4SBjoern A. Zeeb iwl_pcie_set_continuous_pnvm(trans); 4979af1bba4SBjoern A. Zeeb } 4989af1bba4SBjoern A. Zeeb 4999af1bba4SBjoern A. Zeeb int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans, 5009af1bba4SBjoern A. Zeeb const struct iwl_pnvm_image *payloads, 5019af1bba4SBjoern A. Zeeb const struct iwl_ucode_capabilities *capa) 5029af1bba4SBjoern A. Zeeb { 5039af1bba4SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 5049af1bba4SBjoern A. Zeeb struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = 5059af1bba4SBjoern A. Zeeb &trans_pcie->prph_scratch->ctrl_cfg; 5069af1bba4SBjoern A. Zeeb struct iwl_dram_regions *dram_regions = &trans_pcie->reduced_tables_data; 5079af1bba4SBjoern A. Zeeb int ret = 0; 5089af1bba4SBjoern A. Zeeb 5099af1bba4SBjoern A. Zeeb /* only allocate the DRAM if not allocated yet */ 5109af1bba4SBjoern A. Zeeb if (trans->reduce_power_loaded) 5119af1bba4SBjoern A. Zeeb return 0; 512bfcc09ddSBjoern A. Zeeb 513bfcc09ddSBjoern A. Zeeb if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) 514bfcc09ddSBjoern A. Zeeb return 0; 515bfcc09ddSBjoern A. Zeeb 516bfcc09ddSBjoern A. Zeeb if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size)) 517bfcc09ddSBjoern A. Zeeb return -EBUSY; 518bfcc09ddSBjoern A. Zeeb 5199af1bba4SBjoern A. Zeeb if (!payloads->n_chunks) { 5209af1bba4SBjoern A. Zeeb IWL_DEBUG_FW(trans, "no payloads\n"); 5219af1bba4SBjoern A. Zeeb return -EINVAL; 5229af1bba4SBjoern A. Zeeb } 5239af1bba4SBjoern A. Zeeb 5249af1bba4SBjoern A. Zeeb /* save payloads in several DRAM sections */ 5259af1bba4SBjoern A. Zeeb if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) { 5269af1bba4SBjoern A. Zeeb ret = iwl_pcie_load_payloads_segments(trans, 5279af1bba4SBjoern A. Zeeb dram_regions, 5289af1bba4SBjoern A. Zeeb payloads); 5299af1bba4SBjoern A. Zeeb if (!ret) 5309af1bba4SBjoern A. Zeeb trans->reduce_power_loaded = true; 5319af1bba4SBjoern A. Zeeb } else { 5329af1bba4SBjoern A. Zeeb /* save only in one DRAM section */ 5339af1bba4SBjoern A. Zeeb ret = iwl_pcie_load_payloads_continuously 5349af1bba4SBjoern A. Zeeb (trans, 5359af1bba4SBjoern A. Zeeb payloads, 5369af1bba4SBjoern A. Zeeb &dram_regions->drams[0]); 5379af1bba4SBjoern A. Zeeb if (!ret) { 5389af1bba4SBjoern A. Zeeb dram_regions->n_regions = 1; 5399af1bba4SBjoern A. Zeeb trans->reduce_power_loaded = true; 5409af1bba4SBjoern A. Zeeb } 5419af1bba4SBjoern A. Zeeb } 5429af1bba4SBjoern A. Zeeb 543bfcc09ddSBjoern A. Zeeb return ret; 544bfcc09ddSBjoern A. Zeeb } 5459af1bba4SBjoern A. Zeeb 5469af1bba4SBjoern A. Zeeb static void iwl_pcie_set_reduce_power_segments(struct iwl_trans *trans) 5479af1bba4SBjoern A. Zeeb { 5489af1bba4SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 5499af1bba4SBjoern A. Zeeb struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = 5509af1bba4SBjoern A. Zeeb &trans_pcie->prph_scratch->ctrl_cfg; 5519af1bba4SBjoern A. Zeeb struct iwl_dram_regions *dram_regions = &trans_pcie->reduced_tables_data; 552bfcc09ddSBjoern A. Zeeb 553bfcc09ddSBjoern A. Zeeb prph_sc_ctrl->reduce_power_cfg.base_addr = 5549af1bba4SBjoern A. Zeeb cpu_to_le64(dram_regions->prph_scratch_mem_desc.physical); 555bfcc09ddSBjoern A. Zeeb prph_sc_ctrl->reduce_power_cfg.size = 5569af1bba4SBjoern A. Zeeb cpu_to_le32(iwl_dram_regions_size(dram_regions)); 557bfcc09ddSBjoern A. Zeeb } 5589af1bba4SBjoern A. Zeeb 5599af1bba4SBjoern A. Zeeb static void iwl_pcie_set_continuous_reduce_power(struct iwl_trans *trans) 5609af1bba4SBjoern A. Zeeb { 5619af1bba4SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 5629af1bba4SBjoern A. Zeeb struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = 5639af1bba4SBjoern A. Zeeb &trans_pcie->prph_scratch->ctrl_cfg; 5649af1bba4SBjoern A. Zeeb 5659af1bba4SBjoern A. Zeeb prph_sc_ctrl->reduce_power_cfg.base_addr = 5669af1bba4SBjoern A. Zeeb cpu_to_le64(trans_pcie->reduced_tables_data.drams[0].physical); 5679af1bba4SBjoern A. Zeeb prph_sc_ctrl->reduce_power_cfg.size = 5689af1bba4SBjoern A. Zeeb cpu_to_le32(trans_pcie->reduced_tables_data.drams[0].size); 5699af1bba4SBjoern A. Zeeb } 5709af1bba4SBjoern A. Zeeb 5719af1bba4SBjoern A. Zeeb void 5729af1bba4SBjoern A. Zeeb iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans, 5739af1bba4SBjoern A. Zeeb const struct iwl_ucode_capabilities *capa) 5749af1bba4SBjoern A. Zeeb { 5759af1bba4SBjoern A. Zeeb if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) 5769af1bba4SBjoern A. Zeeb return; 5779af1bba4SBjoern A. Zeeb 5789af1bba4SBjoern A. Zeeb if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) 5799af1bba4SBjoern A. Zeeb iwl_pcie_set_reduce_power_segments(trans); 5809af1bba4SBjoern A. Zeeb else 5819af1bba4SBjoern A. Zeeb iwl_pcie_set_continuous_reduce_power(trans); 5829af1bba4SBjoern A. Zeeb } 5839af1bba4SBjoern A. Zeeb 584