xref: /linux/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c (revision 06a130e42a5bfc84795464bff023bff4c16f58c5)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2018-2024 Intel Corporation
4  */
5 #include <linux/dmi.h>
6 #include "iwl-trans.h"
7 #include "iwl-fh.h"
8 #include "iwl-context-info-gen3.h"
9 #include "internal.h"
10 #include "iwl-prph.h"
11 
12 static const struct dmi_system_id dmi_force_scu_active_approved_list[] = {
13 	{ .ident = "DELL",
14 	  .matches = {
15 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
16 		},
17 	},
18 	{ .ident = "DELL",
19 	  .matches = {
20 			DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
21 		},
22 	},
23 	/* keep last */
24 	{}
25 };
26 
27 static bool iwl_is_force_scu_active_approved(void)
28 {
29 	return !!dmi_check_system(dmi_force_scu_active_approved_list);
30 }
31 
32 static void
33 iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
34 			      struct iwl_prph_scratch_hwm_cfg *dbg_cfg,
35 			      u32 *control_flags)
36 {
37 	enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
38 	struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
39 	u32 dbg_flags = 0;
40 
41 	if (!iwl_trans_dbg_ini_valid(trans)) {
42 		struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
43 
44 		iwl_pcie_alloc_fw_monitor(trans, 0);
45 
46 		if (fw_mon->size) {
47 			dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
48 
49 			IWL_DEBUG_FW(trans,
50 				     "WRT: Applying DRAM buffer destination\n");
51 
52 			dbg_cfg->hwm_base_addr = cpu_to_le64(fw_mon->physical);
53 			dbg_cfg->hwm_size = cpu_to_le32(fw_mon->size);
54 		}
55 
56 		goto out;
57 	}
58 
59 	fw_mon_cfg = &trans->dbg.fw_mon_cfg[alloc_id];
60 
61 	switch (le32_to_cpu(fw_mon_cfg->buf_location)) {
62 	case IWL_FW_INI_LOCATION_SRAM_PATH:
63 		dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL;
64 		IWL_DEBUG_FW(trans,
65 				"WRT: Applying SMEM buffer destination\n");
66 		break;
67 
68 	case IWL_FW_INI_LOCATION_NPK_PATH:
69 		dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF;
70 		IWL_DEBUG_FW(trans,
71 			     "WRT: Applying NPK buffer destination\n");
72 		break;
73 
74 	case IWL_FW_INI_LOCATION_DRAM_PATH:
75 		if (trans->dbg.fw_mon_ini[alloc_id].num_frags) {
76 			struct iwl_dram_data *frag =
77 				&trans->dbg.fw_mon_ini[alloc_id].frags[0];
78 			dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
79 			dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical);
80 			dbg_cfg->hwm_size = cpu_to_le32(frag->size);
81 			dbg_cfg->debug_token_config = cpu_to_le32(trans->dbg.ucode_preset);
82 			IWL_DEBUG_FW(trans,
83 				     "WRT: Applying DRAM destination (debug_token_config=%u)\n",
84 				     dbg_cfg->debug_token_config);
85 			IWL_DEBUG_FW(trans,
86 				     "WRT: Applying DRAM destination (alloc_id=%u, num_frags=%u)\n",
87 				     alloc_id,
88 				     trans->dbg.fw_mon_ini[alloc_id].num_frags);
89 		}
90 		break;
91 	default:
92 		IWL_DEBUG_FW(trans, "WRT: Invalid buffer destination (%d)\n",
93 			     le32_to_cpu(fw_mon_cfg->buf_location));
94 	}
95 out:
96 	if (dbg_flags)
97 		*control_flags |= IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | dbg_flags;
98 }
99 
100 int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
101 				 const struct fw_img *fw)
102 {
103 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
104 	struct iwl_context_info_gen3 *ctxt_info_gen3;
105 	struct iwl_prph_scratch *prph_scratch;
106 	struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
107 	struct iwl_prph_info *prph_info;
108 	u32 control_flags = 0;
109 	int ret;
110 	int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
111 			      trans->cfg->min_txq_size);
112 
113 	switch (trans_pcie->rx_buf_size) {
114 	case IWL_AMSDU_DEF:
115 		return -EINVAL;
116 	case IWL_AMSDU_2K:
117 		break;
118 	case IWL_AMSDU_4K:
119 		control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
120 		break;
121 	case IWL_AMSDU_8K:
122 		control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
123 		/* if firmware supports the ext size, tell it */
124 		control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K;
125 		break;
126 	case IWL_AMSDU_12K:
127 		control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
128 		/* if firmware supports the ext size, tell it */
129 		control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K;
130 		break;
131 	}
132 
133 	/* Allocate prph scratch */
134 	prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
135 					  &trans_pcie->prph_scratch_dma_addr,
136 					  GFP_KERNEL);
137 	if (!prph_scratch)
138 		return -ENOMEM;
139 
140 	prph_sc_ctrl = &prph_scratch->ctrl_cfg;
141 
142 	prph_sc_ctrl->version.version = 0;
143 	prph_sc_ctrl->version.mac_id =
144 		cpu_to_le16((u16)trans->hw_rev);
145 	prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
146 
147 	control_flags |= IWL_PRPH_SCRATCH_MTR_MODE;
148 	control_flags |= IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT;
149 
150 	if (trans->trans_cfg->imr_enabled)
151 		control_flags |= IWL_PRPH_SCRATCH_IMR_DEBUG_EN;
152 
153 	if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL &&
154 	    iwl_is_force_scu_active_approved()) {
155 		control_flags |= IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE;
156 		IWL_DEBUG_FW(trans,
157 			     "Context Info: Set SCU_FORCE_ACTIVE (0x%x) in control_flags\n",
158 			     IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE);
159 	}
160 
161 	/* initialize RX default queue */
162 	prph_sc_ctrl->rbd_cfg.free_rbd_addr =
163 		cpu_to_le64(trans_pcie->rxq->bd_dma);
164 
165 	iwl_pcie_ctxt_info_dbg_enable(trans, &prph_sc_ctrl->hwm_cfg,
166 				      &control_flags);
167 	prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
168 
169 	/* initialize the Step equalizer data */
170 	prph_sc_ctrl->step_cfg.mbx_addr_0 = cpu_to_le32(trans->mbx_addr_0_step);
171 	prph_sc_ctrl->step_cfg.mbx_addr_1 = cpu_to_le32(trans->mbx_addr_1_step);
172 
173 	/* allocate ucode sections in dram and set addresses */
174 	ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
175 	if (ret)
176 		goto err_free_prph_scratch;
177 
178 
179 	/* Allocate prph information
180 	 * currently we don't assign to the prph info anything, but it would get
181 	 * assigned later
182 	 *
183 	 * We also use the second half of this page to give the device some
184 	 * dummy TR/CR tail pointers - which shouldn't be necessary as we don't
185 	 * use this, but the hardware still reads/writes there and we can't let
186 	 * it go do that with a NULL pointer.
187 	 */
188 	BUILD_BUG_ON(sizeof(*prph_info) > PAGE_SIZE / 2);
189 	prph_info = dma_alloc_coherent(trans->dev, PAGE_SIZE,
190 				       &trans_pcie->prph_info_dma_addr,
191 				       GFP_KERNEL);
192 	if (!prph_info) {
193 		ret = -ENOMEM;
194 		goto err_free_prph_scratch;
195 	}
196 
197 	/* Allocate context info */
198 	ctxt_info_gen3 = dma_alloc_coherent(trans->dev,
199 					    sizeof(*ctxt_info_gen3),
200 					    &trans_pcie->ctxt_info_dma_addr,
201 					    GFP_KERNEL);
202 	if (!ctxt_info_gen3) {
203 		ret = -ENOMEM;
204 		goto err_free_prph_info;
205 	}
206 
207 	ctxt_info_gen3->prph_info_base_addr =
208 		cpu_to_le64(trans_pcie->prph_info_dma_addr);
209 	ctxt_info_gen3->prph_scratch_base_addr =
210 		cpu_to_le64(trans_pcie->prph_scratch_dma_addr);
211 	ctxt_info_gen3->prph_scratch_size =
212 		cpu_to_le32(sizeof(*prph_scratch));
213 	ctxt_info_gen3->cr_head_idx_arr_base_addr =
214 		cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
215 	ctxt_info_gen3->tr_tail_idx_arr_base_addr =
216 		cpu_to_le64(trans_pcie->prph_info_dma_addr + PAGE_SIZE / 2);
217 	ctxt_info_gen3->cr_tail_idx_arr_base_addr =
218 		cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4);
219 	ctxt_info_gen3->mtr_base_addr =
220 		cpu_to_le64(trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]->dma_addr);
221 	ctxt_info_gen3->mcr_base_addr =
222 		cpu_to_le64(trans_pcie->rxq->used_bd_dma);
223 	ctxt_info_gen3->mtr_size =
224 		cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size));
225 	ctxt_info_gen3->mcr_size =
226 		cpu_to_le16(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds));
227 
228 	trans_pcie->ctxt_info_gen3 = ctxt_info_gen3;
229 	trans_pcie->prph_info = prph_info;
230 	trans_pcie->prph_scratch = prph_scratch;
231 
232 	/* Allocate IML */
233 	trans_pcie->iml = dma_alloc_coherent(trans->dev, trans->iml_len,
234 					     &trans_pcie->iml_dma_addr,
235 					     GFP_KERNEL);
236 	if (!trans_pcie->iml) {
237 		ret = -ENOMEM;
238 		goto err_free_ctxt_info;
239 	}
240 
241 	memcpy(trans_pcie->iml, trans->iml, trans->iml_len);
242 
243 	iwl_enable_fw_load_int_ctx_info(trans);
244 
245 	/* kick FW self load */
246 	iwl_write64(trans, CSR_CTXT_INFO_ADDR,
247 		    trans_pcie->ctxt_info_dma_addr);
248 	iwl_write64(trans, CSR_IML_DATA_ADDR,
249 		    trans_pcie->iml_dma_addr);
250 	iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len);
251 
252 	iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
253 		    CSR_AUTO_FUNC_BOOT_ENA);
254 
255 	return 0;
256 
257 err_free_ctxt_info:
258 	dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
259 			  trans_pcie->ctxt_info_gen3,
260 			  trans_pcie->ctxt_info_dma_addr);
261 	trans_pcie->ctxt_info_gen3 = NULL;
262 err_free_prph_info:
263 	dma_free_coherent(trans->dev, PAGE_SIZE, prph_info,
264 			  trans_pcie->prph_info_dma_addr);
265 
266 err_free_prph_scratch:
267 	dma_free_coherent(trans->dev,
268 			  sizeof(*prph_scratch),
269 			prph_scratch,
270 			trans_pcie->prph_scratch_dma_addr);
271 	return ret;
272 
273 }
274 
275 void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive)
276 {
277 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
278 
279 	if (trans_pcie->iml) {
280 		dma_free_coherent(trans->dev, trans->iml_len, trans_pcie->iml,
281 				  trans_pcie->iml_dma_addr);
282 		trans_pcie->iml_dma_addr = 0;
283 		trans_pcie->iml = NULL;
284 	}
285 
286 	iwl_pcie_ctxt_info_free_fw_img(trans);
287 
288 	if (alive)
289 		return;
290 
291 	if (!trans_pcie->ctxt_info_gen3)
292 		return;
293 
294 	/* ctxt_info_gen3 and prph_scratch are still needed for PNVM load */
295 	dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
296 			  trans_pcie->ctxt_info_gen3,
297 			  trans_pcie->ctxt_info_dma_addr);
298 	trans_pcie->ctxt_info_dma_addr = 0;
299 	trans_pcie->ctxt_info_gen3 = NULL;
300 
301 	dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),
302 			  trans_pcie->prph_scratch,
303 			  trans_pcie->prph_scratch_dma_addr);
304 	trans_pcie->prph_scratch_dma_addr = 0;
305 	trans_pcie->prph_scratch = NULL;
306 
307 	/* this is needed for the entire lifetime */
308 	dma_free_coherent(trans->dev, PAGE_SIZE, trans_pcie->prph_info,
309 			  trans_pcie->prph_info_dma_addr);
310 	trans_pcie->prph_info_dma_addr = 0;
311 	trans_pcie->prph_info = NULL;
312 }
313 
314 static int iwl_pcie_load_payloads_continuously(struct iwl_trans *trans,
315 					       const struct iwl_pnvm_image *pnvm_data,
316 					       struct iwl_dram_data *dram)
317 {
318 	u32 len, len0, len1;
319 
320 	if (pnvm_data->n_chunks != UNFRAGMENTED_PNVM_PAYLOADS_NUMBER) {
321 		IWL_DEBUG_FW(trans, "expected 2 payloads, got %d.\n",
322 			     pnvm_data->n_chunks);
323 		return -EINVAL;
324 	}
325 
326 	len0 = pnvm_data->chunks[0].len;
327 	len1 = pnvm_data->chunks[1].len;
328 	if (len1 > 0xFFFFFFFF - len0) {
329 		IWL_DEBUG_FW(trans, "sizes of payloads overflow.\n");
330 		return -EINVAL;
331 	}
332 	len = len0 + len1;
333 
334 	dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, len,
335 							    &dram->physical);
336 	if (!dram->block) {
337 		IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA.\n");
338 		return -ENOMEM;
339 	}
340 
341 	dram->size = len;
342 	memcpy(dram->block, pnvm_data->chunks[0].data, len0);
343 	memcpy((u8 *)dram->block + len0, pnvm_data->chunks[1].data, len1);
344 
345 	return 0;
346 }
347 
348 static int iwl_pcie_load_payloads_segments
349 				(struct iwl_trans *trans,
350 				 struct iwl_dram_regions *dram_regions,
351 				 const struct iwl_pnvm_image *pnvm_data)
352 {
353 	struct iwl_dram_data *cur_payload_dram = &dram_regions->drams[0];
354 	struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc;
355 	struct iwl_prph_scrath_mem_desc_addr_array *addresses;
356 	const void *data;
357 	u32 len;
358 	int i;
359 
360 	/* allocate and init DRAM descriptors array */
361 	len = sizeof(struct iwl_prph_scrath_mem_desc_addr_array);
362 	desc_dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent
363 						(trans,
364 						 len,
365 						 &desc_dram->physical);
366 	if (!desc_dram->block) {
367 		IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA.\n");
368 		return -ENOMEM;
369 	}
370 	desc_dram->size = len;
371 	memset(desc_dram->block, 0, len);
372 
373 	/* allocate DRAM region for each payload */
374 	dram_regions->n_regions = 0;
375 	for (i = 0; i < pnvm_data->n_chunks; i++) {
376 		len = pnvm_data->chunks[i].len;
377 		data = pnvm_data->chunks[i].data;
378 
379 		if (iwl_pcie_ctxt_info_alloc_dma(trans,
380 						 data,
381 						 len,
382 						 cur_payload_dram)) {
383 			iwl_trans_pcie_free_pnvm_dram_regions(dram_regions,
384 							      trans->dev);
385 			return -ENOMEM;
386 		}
387 
388 		dram_regions->n_regions++;
389 		cur_payload_dram++;
390 	}
391 
392 	/* fill desc with the DRAM payloads addresses */
393 	addresses = desc_dram->block;
394 	for (i = 0; i < pnvm_data->n_chunks; i++) {
395 		addresses->mem_descs[i] =
396 			cpu_to_le64(dram_regions->drams[i].physical);
397 	}
398 
399 	return 0;
400 
401 }
402 
403 int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
404 					   const struct iwl_pnvm_image *pnvm_payloads,
405 					   const struct iwl_ucode_capabilities *capa)
406 {
407 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
408 	struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
409 		&trans_pcie->prph_scratch->ctrl_cfg;
410 	struct iwl_dram_regions *dram_regions = &trans_pcie->pnvm_data;
411 	int ret = 0;
412 
413 	/* only allocate the DRAM if not allocated yet */
414 	if (trans->pnvm_loaded)
415 		return 0;
416 
417 	if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size))
418 		return -EBUSY;
419 
420 	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
421 		return 0;
422 
423 	if (!pnvm_payloads->n_chunks) {
424 		IWL_DEBUG_FW(trans, "no payloads\n");
425 		return -EINVAL;
426 	}
427 
428 	/* save payloads in several DRAM sections */
429 	if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) {
430 		ret = iwl_pcie_load_payloads_segments(trans,
431 						      dram_regions,
432 						      pnvm_payloads);
433 		if (!ret)
434 			trans->pnvm_loaded = true;
435 	} else {
436 		/* save only in one DRAM section */
437 		ret = iwl_pcie_load_payloads_continuously
438 						(trans,
439 						 pnvm_payloads,
440 						 &dram_regions->drams[0]);
441 		if (!ret) {
442 			dram_regions->n_regions = 1;
443 			trans->pnvm_loaded = true;
444 		}
445 	}
446 
447 	return ret;
448 }
449 
450 static inline size_t
451 iwl_dram_regions_size(const struct iwl_dram_regions *dram_regions)
452 {
453 	size_t total_size = 0;
454 	int i;
455 
456 	for (i = 0; i < dram_regions->n_regions; i++)
457 		total_size += dram_regions->drams[i].size;
458 
459 	return total_size;
460 }
461 
462 static void iwl_pcie_set_pnvm_segments(struct iwl_trans *trans)
463 {
464 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
465 	struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
466 		&trans_pcie->prph_scratch->ctrl_cfg;
467 	struct iwl_dram_regions *dram_regions = &trans_pcie->pnvm_data;
468 
469 	prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
470 		cpu_to_le64(dram_regions->prph_scratch_mem_desc.physical);
471 	prph_sc_ctrl->pnvm_cfg.pnvm_size =
472 		cpu_to_le32(iwl_dram_regions_size(dram_regions));
473 }
474 
475 static void iwl_pcie_set_continuous_pnvm(struct iwl_trans *trans)
476 {
477 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
478 	struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
479 		&trans_pcie->prph_scratch->ctrl_cfg;
480 
481 	prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
482 		cpu_to_le64(trans_pcie->pnvm_data.drams[0].physical);
483 	prph_sc_ctrl->pnvm_cfg.pnvm_size =
484 		cpu_to_le32(trans_pcie->pnvm_data.drams[0].size);
485 }
486 
487 void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
488 					   const struct iwl_ucode_capabilities *capa)
489 {
490 	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
491 		return;
492 
493 	if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))
494 		iwl_pcie_set_pnvm_segments(trans);
495 	else
496 		iwl_pcie_set_continuous_pnvm(trans);
497 }
498 
499 int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans,
500 						   const struct iwl_pnvm_image *payloads,
501 						   const struct iwl_ucode_capabilities *capa)
502 {
503 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
504 	struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
505 		&trans_pcie->prph_scratch->ctrl_cfg;
506 	struct iwl_dram_regions *dram_regions = &trans_pcie->reduced_tables_data;
507 	int ret = 0;
508 
509 	/* only allocate the DRAM if not allocated yet */
510 	if (trans->reduce_power_loaded)
511 		return 0;
512 
513 	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
514 		return 0;
515 
516 	if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size))
517 		return -EBUSY;
518 
519 	if (!payloads->n_chunks) {
520 		IWL_DEBUG_FW(trans, "no payloads\n");
521 		return -EINVAL;
522 	}
523 
524 	/* save payloads in several DRAM sections */
525 	if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) {
526 		ret = iwl_pcie_load_payloads_segments(trans,
527 						      dram_regions,
528 						      payloads);
529 		if (!ret)
530 			trans->reduce_power_loaded = true;
531 	} else {
532 		/* save only in one DRAM section */
533 		ret = iwl_pcie_load_payloads_continuously
534 						(trans,
535 						 payloads,
536 						 &dram_regions->drams[0]);
537 		if (!ret) {
538 			dram_regions->n_regions = 1;
539 			trans->reduce_power_loaded = true;
540 		}
541 	}
542 
543 	return ret;
544 }
545 
546 static void iwl_pcie_set_reduce_power_segments(struct iwl_trans *trans)
547 {
548 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
549 	struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
550 		&trans_pcie->prph_scratch->ctrl_cfg;
551 	struct iwl_dram_regions *dram_regions = &trans_pcie->reduced_tables_data;
552 
553 	prph_sc_ctrl->reduce_power_cfg.base_addr =
554 		cpu_to_le64(dram_regions->prph_scratch_mem_desc.physical);
555 	prph_sc_ctrl->reduce_power_cfg.size =
556 		cpu_to_le32(iwl_dram_regions_size(dram_regions));
557 }
558 
559 static void iwl_pcie_set_continuous_reduce_power(struct iwl_trans *trans)
560 {
561 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
562 	struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
563 		&trans_pcie->prph_scratch->ctrl_cfg;
564 
565 	prph_sc_ctrl->reduce_power_cfg.base_addr =
566 		cpu_to_le64(trans_pcie->reduced_tables_data.drams[0].physical);
567 	prph_sc_ctrl->reduce_power_cfg.size =
568 		cpu_to_le32(trans_pcie->reduced_tables_data.drams[0].size);
569 }
570 
571 void
572 iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
573 					      const struct iwl_ucode_capabilities *capa)
574 {
575 	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
576 		return;
577 
578 	if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))
579 		iwl_pcie_set_reduce_power_segments(trans);
580 	else
581 		iwl_pcie_set_continuous_reduce_power(trans);
582 }
583 
584