xref: /linux/drivers/media/pci/intel/ipu6/ipu6.c (revision bb51f46b5cda61cc041f6722736ef3a2394010ff)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013--2024 Intel Corporation
4  */
5 
6 #include <linux/bitfield.h>
7 #include <linux/bits.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/err.h>
10 #include <linux/firmware.h>
11 #include <linux/kernel.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/pci-ats.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/property.h>
19 #include <linux/scatterlist.h>
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 
23 #include <media/ipu-bridge.h>
24 #include <media/ipu6-pci-table.h>
25 
26 #include "ipu6.h"
27 #include "ipu6-bus.h"
28 #include "ipu6-buttress.h"
29 #include "ipu6-cpd.h"
30 #include "ipu6-isys.h"
31 #include "ipu6-mmu.h"
32 #include "ipu6-platform-buttress-regs.h"
33 #include "ipu6-platform-isys-csi2-reg.h"
34 #include "ipu6-platform-regs.h"
35 
36 #define IPU6_PCI_BAR		0
37 
38 struct ipu6_cell_program {
39 	u32 magic_number;
40 
41 	u32 blob_offset;
42 	u32 blob_size;
43 
44 	u32 start[3];
45 
46 	u32 icache_source;
47 	u32 icache_target;
48 	u32 icache_size;
49 
50 	u32 pmem_source;
51 	u32 pmem_target;
52 	u32 pmem_size;
53 
54 	u32 data_source;
55 	u32 data_target;
56 	u32 data_size;
57 
58 	u32 bss_target;
59 	u32 bss_size;
60 
61 	u32 cell_id;
62 	u32 regs_addr;
63 
64 	u32 cell_pmem_data_bus_address;
65 	u32 cell_dmem_data_bus_address;
66 	u32 cell_pmem_control_bus_address;
67 	u32 cell_dmem_control_bus_address;
68 
69 	u32 next;
70 	u32 dummy[2];
71 };
72 
73 static struct ipu6_isys_internal_pdata isys_ipdata = {
74 	.hw_variant = {
75 		.offset = IPU6_UNIFIED_OFFSET,
76 		.nr_mmus = 3,
77 		.mmu_hw = {
78 			{
79 				.offset = IPU6_ISYS_IOMMU0_OFFSET,
80 				.info_bits = IPU6_INFO_REQUEST_DESTINATION_IOSF,
81 				.nr_l1streams = 16,
82 				.l1_block_sz = {
83 					3, 8, 2, 2, 2, 2, 2, 2, 1, 1,
84 					1, 1, 1, 1, 1, 1
85 				},
86 				.nr_l2streams = 16,
87 				.l2_block_sz = {
88 					2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
89 					2, 2, 2, 2, 2, 2
90 				},
91 				.insert_read_before_invalidate = false,
92 				.l1_stream_id_reg_offset =
93 				IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
94 				.l2_stream_id_reg_offset =
95 				IPU6_MMU_L2_STREAM_ID_REG_OFFSET,
96 			},
97 			{
98 				.offset = IPU6_ISYS_IOMMU1_OFFSET,
99 				.info_bits = 0,
100 				.nr_l1streams = 16,
101 				.l1_block_sz = {
102 					2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 					2, 2, 2, 1, 1, 4
104 				},
105 				.nr_l2streams = 16,
106 				.l2_block_sz = {
107 					2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
108 					2, 2, 2, 2, 2, 2
109 				},
110 				.insert_read_before_invalidate = false,
111 				.l1_stream_id_reg_offset =
112 				IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
113 				.l2_stream_id_reg_offset =
114 				IPU6_MMU_L2_STREAM_ID_REG_OFFSET,
115 			},
116 			{
117 				.offset = IPU6_ISYS_IOMMUI_OFFSET,
118 				.info_bits = 0,
119 				.nr_l1streams = 0,
120 				.nr_l2streams = 0,
121 				.insert_read_before_invalidate = false,
122 			},
123 		},
124 		.cdc_fifos = 3,
125 		.cdc_fifo_threshold = {6, 8, 2},
126 		.dmem_offset = IPU6_ISYS_DMEM_OFFSET,
127 		.spc_offset = IPU6_ISYS_SPC_OFFSET,
128 	},
129 	.isys_dma_overshoot = IPU6_ISYS_OVERALLOC_MIN,
130 };
131 
132 static struct ipu6_psys_internal_pdata psys_ipdata = {
133 	.hw_variant = {
134 		.offset = IPU6_UNIFIED_OFFSET,
135 		.nr_mmus = 4,
136 		.mmu_hw = {
137 			{
138 				.offset = IPU6_PSYS_IOMMU0_OFFSET,
139 				.info_bits =
140 				IPU6_INFO_REQUEST_DESTINATION_IOSF,
141 				.nr_l1streams = 16,
142 				.l1_block_sz = {
143 					2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
144 					2, 2, 2, 2, 2, 2
145 				},
146 				.nr_l2streams = 16,
147 				.l2_block_sz = {
148 					2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
149 					2, 2, 2, 2, 2, 2
150 				},
151 				.insert_read_before_invalidate = false,
152 				.l1_stream_id_reg_offset =
153 				IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
154 				.l2_stream_id_reg_offset =
155 				IPU6_MMU_L2_STREAM_ID_REG_OFFSET,
156 			},
157 			{
158 				.offset = IPU6_PSYS_IOMMU1_OFFSET,
159 				.info_bits = 0,
160 				.nr_l1streams = 32,
161 				.l1_block_sz = {
162 					1, 2, 2, 2, 2, 2, 2, 2, 2, 2,
163 					2, 2, 2, 2, 2, 10,
164 					5, 4, 14, 6, 4, 14, 6, 4, 8,
165 					4, 2, 1, 1, 1, 1, 14
166 				},
167 				.nr_l2streams = 32,
168 				.l2_block_sz = {
169 					2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
170 					2, 2, 2, 2, 2, 2,
171 					2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
172 					2, 2, 2, 2, 2, 2
173 				},
174 				.insert_read_before_invalidate = false,
175 				.l1_stream_id_reg_offset =
176 				IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
177 				.l2_stream_id_reg_offset =
178 				IPU6_PSYS_MMU1W_L2_STREAM_ID_REG_OFFSET,
179 			},
180 			{
181 				.offset = IPU6_PSYS_IOMMU1R_OFFSET,
182 				.info_bits = 0,
183 				.nr_l1streams = 16,
184 				.l1_block_sz = {
185 					1, 4, 4, 4, 4, 16, 8, 4, 32,
186 					16, 16, 2, 2, 2, 1, 12
187 				},
188 				.nr_l2streams = 16,
189 				.l2_block_sz = {
190 					2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
191 					2, 2, 2, 2, 2, 2
192 				},
193 				.insert_read_before_invalidate = false,
194 				.l1_stream_id_reg_offset =
195 				IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
196 				.l2_stream_id_reg_offset =
197 				IPU6_MMU_L2_STREAM_ID_REG_OFFSET,
198 			},
199 			{
200 				.offset = IPU6_PSYS_IOMMUI_OFFSET,
201 				.info_bits = 0,
202 				.nr_l1streams = 0,
203 				.nr_l2streams = 0,
204 				.insert_read_before_invalidate = false,
205 			},
206 		},
207 		.dmem_offset = IPU6_PSYS_DMEM_OFFSET,
208 	},
209 };
210 
211 static const struct ipu6_buttress_ctrl isys_buttress_ctrl = {
212 	.ratio = IPU6_IS_FREQ_CTL_DEFAULT_RATIO,
213 	.qos_floor = IPU6_IS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO,
214 	.freq_ctl = IPU6_BUTTRESS_REG_IS_FREQ_CTL,
215 	.pwr_sts_shift = IPU6_BUTTRESS_PWR_STATE_IS_PWR_SHIFT,
216 	.pwr_sts_mask = IPU6_BUTTRESS_PWR_STATE_IS_PWR_MASK,
217 	.pwr_sts_on = IPU6_BUTTRESS_PWR_STATE_UP_DONE,
218 	.pwr_sts_off = IPU6_BUTTRESS_PWR_STATE_DN_DONE,
219 };
220 
221 static const struct ipu6_buttress_ctrl psys_buttress_ctrl = {
222 	.ratio = IPU6_PS_FREQ_CTL_DEFAULT_RATIO,
223 	.qos_floor = IPU6_PS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO,
224 	.freq_ctl = IPU6_BUTTRESS_REG_PS_FREQ_CTL,
225 	.pwr_sts_shift = IPU6_BUTTRESS_PWR_STATE_PS_PWR_SHIFT,
226 	.pwr_sts_mask = IPU6_BUTTRESS_PWR_STATE_PS_PWR_MASK,
227 	.pwr_sts_on = IPU6_BUTTRESS_PWR_STATE_UP_DONE,
228 	.pwr_sts_off = IPU6_BUTTRESS_PWR_STATE_DN_DONE,
229 };
230 
231 static void
232 ipu6_pkg_dir_configure_spc(struct ipu6_device *isp,
233 			   const struct ipu6_hw_variants *hw_variant,
234 			   int pkg_dir_idx, void __iomem *base,
235 			   u64 *pkg_dir, dma_addr_t pkg_dir_vied_address)
236 {
237 	struct ipu6_cell_program *prog;
238 	void __iomem *spc_base;
239 	u32 server_fw_addr;
240 	dma_addr_t dma_addr;
241 	u32 pg_offset;
242 
243 	server_fw_addr = lower_32_bits(*(pkg_dir + (pkg_dir_idx + 1) * 2));
244 	if (pkg_dir_idx == IPU6_CPD_PKG_DIR_ISYS_SERVER_IDX)
245 		dma_addr = sg_dma_address(isp->isys->fw_sgt.sgl);
246 	else
247 		dma_addr = sg_dma_address(isp->psys->fw_sgt.sgl);
248 
249 	pg_offset = server_fw_addr - dma_addr;
250 	prog = (struct ipu6_cell_program *)((u64)isp->cpd_fw->data + pg_offset);
251 	spc_base = base + prog->regs_addr;
252 	if (spc_base != (base + hw_variant->spc_offset))
253 		dev_warn(&isp->pdev->dev,
254 			 "SPC reg addr %p not matching value from CPD %p\n",
255 			 base + hw_variant->spc_offset, spc_base);
256 	writel(server_fw_addr + prog->blob_offset +
257 	       prog->icache_source, spc_base + IPU6_PSYS_REG_SPC_ICACHE_BASE);
258 	writel(IPU6_INFO_REQUEST_DESTINATION_IOSF,
259 	       spc_base + IPU6_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER);
260 	writel(prog->start[1], spc_base + IPU6_PSYS_REG_SPC_START_PC);
261 	writel(pkg_dir_vied_address, base + hw_variant->dmem_offset);
262 }
263 
264 void ipu6_configure_spc(struct ipu6_device *isp,
265 			const struct ipu6_hw_variants *hw_variant,
266 			int pkg_dir_idx, void __iomem *base, u64 *pkg_dir,
267 			dma_addr_t pkg_dir_dma_addr)
268 {
269 	void __iomem *dmem_base = base + hw_variant->dmem_offset;
270 	void __iomem *spc_regs_base = base + hw_variant->spc_offset;
271 	u32 val;
272 
273 	val = readl(spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL);
274 	val |= IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE;
275 	writel(val, spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL);
276 
277 	if (isp->secure_mode)
278 		writel(IPU6_PKG_DIR_IMR_OFFSET, dmem_base);
279 	else
280 		ipu6_pkg_dir_configure_spc(isp, hw_variant, pkg_dir_idx, base,
281 					   pkg_dir, pkg_dir_dma_addr);
282 }
283 EXPORT_SYMBOL_NS_GPL(ipu6_configure_spc, INTEL_IPU6);
284 
285 #define IPU6_ISYS_CSI2_NPORTS		4
286 #define IPU6SE_ISYS_CSI2_NPORTS		4
287 #define IPU6_TGL_ISYS_CSI2_NPORTS	8
288 #define IPU6EP_MTL_ISYS_CSI2_NPORTS	4
289 
290 static void ipu6_internal_pdata_init(struct ipu6_device *isp)
291 {
292 	u8 hw_ver = isp->hw_ver;
293 
294 	isys_ipdata.num_parallel_streams = IPU6_ISYS_NUM_STREAMS;
295 	isys_ipdata.sram_gran_shift = IPU6_SRAM_GRANULARITY_SHIFT;
296 	isys_ipdata.sram_gran_size = IPU6_SRAM_GRANULARITY_SIZE;
297 	isys_ipdata.max_sram_size = IPU6_MAX_SRAM_SIZE;
298 	isys_ipdata.sensor_type_start = IPU6_FW_ISYS_SENSOR_TYPE_START;
299 	isys_ipdata.sensor_type_end = IPU6_FW_ISYS_SENSOR_TYPE_END;
300 	isys_ipdata.max_streams = IPU6_ISYS_NUM_STREAMS;
301 	isys_ipdata.max_send_queues = IPU6_N_MAX_SEND_QUEUES;
302 	isys_ipdata.max_sram_blocks = IPU6_NOF_SRAM_BLOCKS_MAX;
303 	isys_ipdata.max_devq_size = IPU6_DEV_SEND_QUEUE_SIZE;
304 	isys_ipdata.csi2.nports = IPU6_ISYS_CSI2_NPORTS;
305 	isys_ipdata.csi2.irq_mask = IPU6_CSI_RX_ERROR_IRQ_MASK;
306 	isys_ipdata.csi2.ctrl0_irq_edge = IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE;
307 	isys_ipdata.csi2.ctrl0_irq_clear =
308 		IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR;
309 	isys_ipdata.csi2.ctrl0_irq_mask = IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK;
310 	isys_ipdata.csi2.ctrl0_irq_enable =
311 		IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE;
312 	isys_ipdata.csi2.ctrl0_irq_status =
313 		IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS;
314 	isys_ipdata.csi2.ctrl0_irq_lnp =
315 		IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE;
316 	isys_ipdata.enhanced_iwake = is_ipu6ep_mtl(hw_ver) || is_ipu6ep(hw_ver);
317 	psys_ipdata.hw_variant.spc_offset = IPU6_PSYS_SPC_OFFSET;
318 	isys_ipdata.csi2.fw_access_port_ofs = CSI_REG_HUB_FW_ACCESS_PORT_OFS;
319 
320 	if (is_ipu6ep(hw_ver)) {
321 		isys_ipdata.ltr = IPU6EP_LTR_VALUE;
322 		isys_ipdata.memopen_threshold = IPU6EP_MIN_MEMOPEN_TH;
323 	}
324 
325 	if (is_ipu6_tgl(hw_ver))
326 		isys_ipdata.csi2.nports = IPU6_TGL_ISYS_CSI2_NPORTS;
327 
328 	if (is_ipu6ep_mtl(hw_ver)) {
329 		isys_ipdata.csi2.nports = IPU6EP_MTL_ISYS_CSI2_NPORTS;
330 
331 		isys_ipdata.csi2.ctrl0_irq_edge =
332 			IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE;
333 		isys_ipdata.csi2.ctrl0_irq_clear =
334 			IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR;
335 		isys_ipdata.csi2.ctrl0_irq_mask =
336 			IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK;
337 		isys_ipdata.csi2.ctrl0_irq_enable =
338 			IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE;
339 		isys_ipdata.csi2.ctrl0_irq_lnp =
340 			IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE;
341 		isys_ipdata.csi2.ctrl0_irq_status =
342 			IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS;
343 		isys_ipdata.csi2.fw_access_port_ofs =
344 			CSI_REG_HUB_FW_ACCESS_PORT_V6OFS;
345 		isys_ipdata.ltr = IPU6EP_MTL_LTR_VALUE;
346 		isys_ipdata.memopen_threshold = IPU6EP_MTL_MIN_MEMOPEN_TH;
347 	}
348 
349 	if (is_ipu6se(hw_ver)) {
350 		isys_ipdata.csi2.nports = IPU6SE_ISYS_CSI2_NPORTS;
351 		isys_ipdata.csi2.irq_mask = IPU6SE_CSI_RX_ERROR_IRQ_MASK;
352 		isys_ipdata.num_parallel_streams = IPU6SE_ISYS_NUM_STREAMS;
353 		isys_ipdata.sram_gran_shift = IPU6SE_SRAM_GRANULARITY_SHIFT;
354 		isys_ipdata.sram_gran_size = IPU6SE_SRAM_GRANULARITY_SIZE;
355 		isys_ipdata.max_sram_size = IPU6SE_MAX_SRAM_SIZE;
356 		isys_ipdata.sensor_type_start =
357 			IPU6SE_FW_ISYS_SENSOR_TYPE_START;
358 		isys_ipdata.sensor_type_end = IPU6SE_FW_ISYS_SENSOR_TYPE_END;
359 		isys_ipdata.max_streams = IPU6SE_ISYS_NUM_STREAMS;
360 		isys_ipdata.max_send_queues = IPU6SE_N_MAX_SEND_QUEUES;
361 		isys_ipdata.max_sram_blocks = IPU6SE_NOF_SRAM_BLOCKS_MAX;
362 		isys_ipdata.max_devq_size = IPU6SE_DEV_SEND_QUEUE_SIZE;
363 		psys_ipdata.hw_variant.spc_offset = IPU6SE_PSYS_SPC_OFFSET;
364 	}
365 }
366 
367 static struct ipu6_bus_device *
368 ipu6_isys_init(struct pci_dev *pdev, struct device *parent,
369 	       struct ipu6_buttress_ctrl *ctrl, void __iomem *base,
370 	       const struct ipu6_isys_internal_pdata *ipdata)
371 {
372 	struct device *dev = &pdev->dev;
373 	struct ipu6_bus_device *isys_adev;
374 	struct ipu6_isys_pdata *pdata;
375 	int ret;
376 
377 	ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb);
378 	if (ret) {
379 		dev_err_probe(dev, ret, "IPU6 bridge init failed\n");
380 		return ERR_PTR(ret);
381 	}
382 
383 	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
384 	if (!pdata)
385 		return ERR_PTR(-ENOMEM);
386 
387 	pdata->base = base;
388 	pdata->ipdata = ipdata;
389 
390 	isys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl,
391 					       IPU6_ISYS_NAME);
392 	if (IS_ERR(isys_adev)) {
393 		dev_err_probe(dev, PTR_ERR(isys_adev),
394 			      "ipu6_bus_initialize_device isys failed\n");
395 		kfree(pdata);
396 		return ERR_CAST(isys_adev);
397 	}
398 
399 	isys_adev->mmu = ipu6_mmu_init(dev, base, ISYS_MMID,
400 				       &ipdata->hw_variant);
401 	if (IS_ERR(isys_adev->mmu)) {
402 		dev_err_probe(dev, PTR_ERR(isys_adev->mmu),
403 			      "ipu6_mmu_init(isys_adev->mmu) failed\n");
404 		put_device(&isys_adev->auxdev.dev);
405 		kfree(pdata);
406 		return ERR_CAST(isys_adev->mmu);
407 	}
408 
409 	isys_adev->mmu->dev = &isys_adev->auxdev.dev;
410 
411 	ret = ipu6_bus_add_device(isys_adev);
412 	if (ret) {
413 		kfree(pdata);
414 		return ERR_PTR(ret);
415 	}
416 
417 	return isys_adev;
418 }
419 
420 static struct ipu6_bus_device *
421 ipu6_psys_init(struct pci_dev *pdev, struct device *parent,
422 	       struct ipu6_buttress_ctrl *ctrl, void __iomem *base,
423 	       const struct ipu6_psys_internal_pdata *ipdata)
424 {
425 	struct ipu6_bus_device *psys_adev;
426 	struct ipu6_psys_pdata *pdata;
427 	int ret;
428 
429 	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
430 	if (!pdata)
431 		return ERR_PTR(-ENOMEM);
432 
433 	pdata->base = base;
434 	pdata->ipdata = ipdata;
435 
436 	psys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl,
437 					       IPU6_PSYS_NAME);
438 	if (IS_ERR(psys_adev)) {
439 		dev_err_probe(&pdev->dev, PTR_ERR(psys_adev),
440 			      "ipu6_bus_initialize_device psys failed\n");
441 		kfree(pdata);
442 		return ERR_CAST(psys_adev);
443 	}
444 
445 	psys_adev->mmu = ipu6_mmu_init(&pdev->dev, base, PSYS_MMID,
446 				       &ipdata->hw_variant);
447 	if (IS_ERR(psys_adev->mmu)) {
448 		dev_err_probe(&pdev->dev, PTR_ERR(psys_adev->mmu),
449 			      "ipu6_mmu_init(psys_adev->mmu) failed\n");
450 		put_device(&psys_adev->auxdev.dev);
451 		kfree(pdata);
452 		return ERR_CAST(psys_adev->mmu);
453 	}
454 
455 	psys_adev->mmu->dev = &psys_adev->auxdev.dev;
456 
457 	ret = ipu6_bus_add_device(psys_adev);
458 	if (ret) {
459 		kfree(pdata);
460 		return ERR_PTR(ret);
461 	}
462 
463 	return psys_adev;
464 }
465 
466 static int ipu6_pci_config_setup(struct pci_dev *dev, u8 hw_ver)
467 {
468 	int ret;
469 
470 	/* disable IPU6 PCI ATS on mtl ES2 */
471 	if (is_ipu6ep_mtl(hw_ver) && boot_cpu_data.x86_stepping == 0x2 &&
472 	    pci_ats_supported(dev))
473 		pci_disable_ats(dev);
474 
475 	/* No PCI msi capability for IPU6EP */
476 	if (is_ipu6ep(hw_ver) || is_ipu6ep_mtl(hw_ver)) {
477 		/* likely do nothing as msi not enabled by default */
478 		pci_disable_msi(dev);
479 		return 0;
480 	}
481 
482 	ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_MSI);
483 	if (ret < 0)
484 		return dev_err_probe(&dev->dev, ret, "Request msi failed");
485 
486 	return 0;
487 }
488 
489 static void ipu6_configure_vc_mechanism(struct ipu6_device *isp)
490 {
491 	u32 val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL);
492 
493 	if (IPU6_BTRS_ARB_STALL_MODE_VC0 == IPU6_BTRS_ARB_MODE_TYPE_STALL)
494 		val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0;
495 	else
496 		val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0;
497 
498 	if (IPU6_BTRS_ARB_STALL_MODE_VC1 == IPU6_BTRS_ARB_MODE_TYPE_STALL)
499 		val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1;
500 	else
501 		val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1;
502 
503 	writel(val, isp->base + BUTTRESS_REG_BTRS_CTRL);
504 }
505 
506 static int request_cpd_fw(const struct firmware **firmware_p, const char *name,
507 			  struct device *device)
508 {
509 	const struct firmware *fw;
510 	struct firmware *dst;
511 	int ret = 0;
512 
513 	ret = request_firmware(&fw, name, device);
514 	if (ret)
515 		return ret;
516 
517 	if (is_vmalloc_addr(fw->data)) {
518 		*firmware_p = fw;
519 		return 0;
520 	}
521 
522 	dst = kzalloc(sizeof(*dst), GFP_KERNEL);
523 	if (!dst) {
524 		ret = -ENOMEM;
525 		goto release_firmware;
526 	}
527 
528 	dst->size = fw->size;
529 	dst->data = vmalloc(fw->size);
530 	if (!dst->data) {
531 		kfree(dst);
532 		ret = -ENOMEM;
533 		goto release_firmware;
534 	}
535 
536 	memcpy((void *)dst->data, fw->data, fw->size);
537 	*firmware_p = dst;
538 
539 release_firmware:
540 	release_firmware(fw);
541 
542 	return ret;
543 }
544 
545 static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
546 {
547 	struct ipu6_buttress_ctrl *isys_ctrl = NULL, *psys_ctrl = NULL;
548 	struct device *dev = &pdev->dev;
549 	void __iomem *isys_base = NULL;
550 	void __iomem *psys_base = NULL;
551 	struct ipu6_device *isp;
552 	phys_addr_t phys;
553 	u32 val, version, sku_id;
554 	int ret;
555 
556 	isp = devm_kzalloc(dev, sizeof(*isp), GFP_KERNEL);
557 	if (!isp)
558 		return -ENOMEM;
559 
560 	isp->pdev = pdev;
561 	INIT_LIST_HEAD(&isp->devices);
562 
563 	ret = pcim_enable_device(pdev);
564 	if (ret)
565 		return dev_err_probe(dev, ret, "Enable PCI device failed\n");
566 
567 	phys = pci_resource_start(pdev, IPU6_PCI_BAR);
568 	dev_dbg(dev, "IPU6 PCI bar[%u] = %pa\n", IPU6_PCI_BAR, &phys);
569 
570 	ret = pcim_iomap_regions(pdev, 1 << IPU6_PCI_BAR, pci_name(pdev));
571 	if (ret)
572 		return dev_err_probe(dev, ret, "Failed to I/O mem remappinp\n");
573 
574 	isp->base = pcim_iomap_table(pdev)[IPU6_PCI_BAR];
575 	pci_set_drvdata(pdev, isp);
576 	pci_set_master(pdev);
577 
578 	isp->cpd_metadata_cmpnt_size = sizeof(struct ipu6_cpd_metadata_cmpnt);
579 	switch (id->device) {
580 	case PCI_DEVICE_ID_INTEL_IPU6:
581 		isp->hw_ver = IPU6_VER_6;
582 		isp->cpd_fw_name = IPU6_FIRMWARE_NAME;
583 		break;
584 	case PCI_DEVICE_ID_INTEL_IPU6SE:
585 		isp->hw_ver = IPU6_VER_6SE;
586 		isp->cpd_fw_name = IPU6SE_FIRMWARE_NAME;
587 		isp->cpd_metadata_cmpnt_size =
588 			sizeof(struct ipu6se_cpd_metadata_cmpnt);
589 		break;
590 	case PCI_DEVICE_ID_INTEL_IPU6EP_ADLP:
591 	case PCI_DEVICE_ID_INTEL_IPU6EP_RPLP:
592 		isp->hw_ver = IPU6_VER_6EP;
593 		isp->cpd_fw_name = IPU6EP_FIRMWARE_NAME;
594 		break;
595 	case PCI_DEVICE_ID_INTEL_IPU6EP_ADLN:
596 		isp->hw_ver = IPU6_VER_6EP;
597 		isp->cpd_fw_name = IPU6EPADLN_FIRMWARE_NAME;
598 		break;
599 	case PCI_DEVICE_ID_INTEL_IPU6EP_MTL:
600 		isp->hw_ver = IPU6_VER_6EP_MTL;
601 		isp->cpd_fw_name = IPU6EPMTL_FIRMWARE_NAME;
602 		break;
603 	default:
604 		return dev_err_probe(dev, -ENODEV,
605 				     "Unsupported IPU6 device %x\n",
606 				     id->device);
607 	}
608 
609 	ipu6_internal_pdata_init(isp);
610 
611 	isys_base = isp->base + isys_ipdata.hw_variant.offset;
612 	psys_base = isp->base + psys_ipdata.hw_variant.offset;
613 
614 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39));
615 	if (ret)
616 		return dev_err_probe(dev, ret, "Failed to set DMA mask\n");
617 
618 	ret = dma_set_max_seg_size(dev, UINT_MAX);
619 	if (ret)
620 		return dev_err_probe(dev, ret, "Failed to set max_seg_size\n");
621 
622 	ret = ipu6_pci_config_setup(pdev, isp->hw_ver);
623 	if (ret)
624 		return ret;
625 
626 	ret = ipu6_buttress_init(isp);
627 	if (ret)
628 		return ret;
629 
630 	ret = request_cpd_fw(&isp->cpd_fw, isp->cpd_fw_name, dev);
631 	if (ret) {
632 		dev_err_probe(&isp->pdev->dev, ret,
633 			      "Requesting signed firmware %s failed\n",
634 			      isp->cpd_fw_name);
635 		goto buttress_exit;
636 	}
637 
638 	ret = ipu6_cpd_validate_cpd_file(isp, isp->cpd_fw->data,
639 					 isp->cpd_fw->size);
640 	if (ret) {
641 		dev_err_probe(&isp->pdev->dev, ret,
642 			      "Failed to validate cpd\n");
643 		goto out_ipu6_bus_del_devices;
644 	}
645 
646 	isys_ctrl = devm_kmemdup(dev, &isys_buttress_ctrl,
647 				 sizeof(isys_buttress_ctrl), GFP_KERNEL);
648 	if (!isys_ctrl) {
649 		ret = -ENOMEM;
650 		goto out_ipu6_bus_del_devices;
651 	}
652 
653 	isp->isys = ipu6_isys_init(pdev, dev, isys_ctrl, isys_base,
654 				   &isys_ipdata);
655 	if (IS_ERR(isp->isys)) {
656 		ret = PTR_ERR(isp->isys);
657 		goto out_ipu6_bus_del_devices;
658 	}
659 
660 	psys_ctrl = devm_kmemdup(dev, &psys_buttress_ctrl,
661 				 sizeof(psys_buttress_ctrl), GFP_KERNEL);
662 	if (!psys_ctrl) {
663 		ret = -ENOMEM;
664 		goto out_ipu6_bus_del_devices;
665 	}
666 
667 	isp->psys = ipu6_psys_init(pdev, &isp->isys->auxdev.dev, psys_ctrl,
668 				   psys_base, &psys_ipdata);
669 	if (IS_ERR(isp->psys)) {
670 		ret = PTR_ERR(isp->psys);
671 		goto out_ipu6_bus_del_devices;
672 	}
673 
674 	ret = pm_runtime_resume_and_get(&isp->psys->auxdev.dev);
675 	if (ret < 0)
676 		goto out_ipu6_bus_del_devices;
677 
678 	ret = ipu6_mmu_hw_init(isp->psys->mmu);
679 	if (ret) {
680 		dev_err_probe(&isp->pdev->dev, ret,
681 			      "Failed to set MMU hardware\n");
682 		goto out_ipu6_bus_del_devices;
683 	}
684 
685 	ret = ipu6_buttress_map_fw_image(isp->psys, isp->cpd_fw,
686 					 &isp->psys->fw_sgt);
687 	if (ret) {
688 		dev_err_probe(&isp->pdev->dev, ret, "failed to map fw image\n");
689 		goto out_ipu6_bus_del_devices;
690 	}
691 
692 	ret = ipu6_cpd_create_pkg_dir(isp->psys, isp->cpd_fw->data);
693 	if (ret) {
694 		dev_err_probe(&isp->pdev->dev, ret,
695 			      "failed to create pkg dir\n");
696 		goto out_ipu6_bus_del_devices;
697 	}
698 
699 	ret = devm_request_threaded_irq(dev, pdev->irq, ipu6_buttress_isr,
700 					ipu6_buttress_isr_threaded,
701 					IRQF_SHARED, IPU6_NAME, isp);
702 	if (ret) {
703 		dev_err_probe(dev, ret, "Requesting irq failed\n");
704 		goto out_ipu6_bus_del_devices;
705 	}
706 
707 	ret = ipu6_buttress_authenticate(isp);
708 	if (ret) {
709 		dev_err_probe(&isp->pdev->dev, ret,
710 			      "FW authentication failed\n");
711 		goto out_free_irq;
712 	}
713 
714 	ipu6_mmu_hw_cleanup(isp->psys->mmu);
715 	pm_runtime_put(&isp->psys->auxdev.dev);
716 
717 	/* Configure the arbitration mechanisms for VC requests */
718 	ipu6_configure_vc_mechanism(isp);
719 
720 	val = readl(isp->base + BUTTRESS_REG_SKU);
721 	sku_id = FIELD_GET(GENMASK(6, 4), val);
722 	version = FIELD_GET(GENMASK(3, 0), val);
723 	dev_info(dev, "IPU%u-v%u[%x] hardware version %d\n", version, sku_id,
724 		 pdev->device, isp->hw_ver);
725 
726 	pm_runtime_put_noidle(dev);
727 	pm_runtime_allow(dev);
728 
729 	isp->bus_ready_to_probe = true;
730 
731 	return 0;
732 
733 out_free_irq:
734 	devm_free_irq(dev, pdev->irq, isp);
735 out_ipu6_bus_del_devices:
736 	if (isp->psys) {
737 		ipu6_cpd_free_pkg_dir(isp->psys);
738 		ipu6_buttress_unmap_fw_image(isp->psys, &isp->psys->fw_sgt);
739 	}
740 	if (!IS_ERR_OR_NULL(isp->psys) && !IS_ERR_OR_NULL(isp->psys->mmu))
741 		ipu6_mmu_cleanup(isp->psys->mmu);
742 	if (!IS_ERR_OR_NULL(isp->isys) && !IS_ERR_OR_NULL(isp->isys->mmu))
743 		ipu6_mmu_cleanup(isp->isys->mmu);
744 	ipu6_bus_del_devices(pdev);
745 	release_firmware(isp->cpd_fw);
746 buttress_exit:
747 	ipu6_buttress_exit(isp);
748 
749 	return ret;
750 }
751 
752 static void ipu6_pci_remove(struct pci_dev *pdev)
753 {
754 	struct ipu6_device *isp = pci_get_drvdata(pdev);
755 	struct ipu6_mmu *isys_mmu = isp->isys->mmu;
756 	struct ipu6_mmu *psys_mmu = isp->psys->mmu;
757 
758 	devm_free_irq(&pdev->dev, pdev->irq, isp);
759 	ipu6_cpd_free_pkg_dir(isp->psys);
760 
761 	ipu6_buttress_unmap_fw_image(isp->psys, &isp->psys->fw_sgt);
762 	ipu6_buttress_exit(isp);
763 
764 	ipu6_bus_del_devices(pdev);
765 
766 	pm_runtime_forbid(&pdev->dev);
767 	pm_runtime_get_noresume(&pdev->dev);
768 
769 	pci_release_regions(pdev);
770 	pci_disable_device(pdev);
771 
772 	release_firmware(isp->cpd_fw);
773 
774 	ipu6_mmu_cleanup(psys_mmu);
775 	ipu6_mmu_cleanup(isys_mmu);
776 }
777 
778 static void ipu6_pci_reset_prepare(struct pci_dev *pdev)
779 {
780 	struct ipu6_device *isp = pci_get_drvdata(pdev);
781 
782 	pm_runtime_forbid(&isp->pdev->dev);
783 }
784 
785 static void ipu6_pci_reset_done(struct pci_dev *pdev)
786 {
787 	struct ipu6_device *isp = pci_get_drvdata(pdev);
788 
789 	ipu6_buttress_restore(isp);
790 	if (isp->secure_mode)
791 		ipu6_buttress_reset_authentication(isp);
792 
793 	isp->need_ipc_reset = true;
794 	pm_runtime_allow(&isp->pdev->dev);
795 }
796 
797 /*
798  * PCI base driver code requires driver to provide these to enable
799  * PCI device level PM state transitions (D0<->D3)
800  */
801 static int ipu6_suspend(struct device *dev)
802 {
803 	return 0;
804 }
805 
806 static int ipu6_resume(struct device *dev)
807 {
808 	struct pci_dev *pdev = to_pci_dev(dev);
809 	struct ipu6_device *isp = pci_get_drvdata(pdev);
810 	struct ipu6_buttress *b = &isp->buttress;
811 	int ret;
812 
813 	/* Configure the arbitration mechanisms for VC requests */
814 	ipu6_configure_vc_mechanism(isp);
815 
816 	isp->secure_mode = ipu6_buttress_get_secure_mode(isp);
817 	dev_info(dev, "IPU6 in %s mode\n",
818 		 isp->secure_mode ? "secure" : "non-secure");
819 
820 	ipu6_buttress_restore(isp);
821 
822 	ret = ipu6_buttress_ipc_reset(isp, &b->cse);
823 	if (ret)
824 		dev_err(&isp->pdev->dev, "IPC reset protocol failed!\n");
825 
826 	ret = pm_runtime_resume_and_get(&isp->psys->auxdev.dev);
827 	if (ret < 0) {
828 		dev_err(&isp->psys->auxdev.dev, "Failed to get runtime PM\n");
829 		return 0;
830 	}
831 
832 	ret = ipu6_buttress_authenticate(isp);
833 	if (ret)
834 		dev_err(&isp->pdev->dev, "FW authentication failed(%d)\n", ret);
835 
836 	pm_runtime_put(&isp->psys->auxdev.dev);
837 
838 	return 0;
839 }
840 
841 static int ipu6_runtime_resume(struct device *dev)
842 {
843 	struct pci_dev *pdev = to_pci_dev(dev);
844 	struct ipu6_device *isp = pci_get_drvdata(pdev);
845 	int ret;
846 
847 	ipu6_configure_vc_mechanism(isp);
848 	ipu6_buttress_restore(isp);
849 
850 	if (isp->need_ipc_reset) {
851 		struct ipu6_buttress *b = &isp->buttress;
852 
853 		isp->need_ipc_reset = false;
854 		ret = ipu6_buttress_ipc_reset(isp, &b->cse);
855 		if (ret)
856 			dev_err(&isp->pdev->dev, "IPC reset protocol failed\n");
857 	}
858 
859 	return 0;
860 }
861 
862 static const struct dev_pm_ops ipu6_pm_ops = {
863 	SYSTEM_SLEEP_PM_OPS(&ipu6_suspend, &ipu6_resume)
864 	RUNTIME_PM_OPS(&ipu6_suspend, &ipu6_runtime_resume, NULL)
865 };
866 
867 MODULE_DEVICE_TABLE(pci, ipu6_pci_tbl);
868 
869 static const struct pci_error_handlers pci_err_handlers = {
870 	.reset_prepare = ipu6_pci_reset_prepare,
871 	.reset_done = ipu6_pci_reset_done,
872 };
873 
874 static struct pci_driver ipu6_pci_driver = {
875 	.name = IPU6_NAME,
876 	.id_table = ipu6_pci_tbl,
877 	.probe = ipu6_pci_probe,
878 	.remove = ipu6_pci_remove,
879 	.driver = {
880 		.pm = pm_ptr(&ipu6_pm_ops),
881 	},
882 	.err_handler = &pci_err_handlers,
883 };
884 
885 module_pci_driver(ipu6_pci_driver);
886 
887 MODULE_IMPORT_NS(INTEL_IPU_BRIDGE);
888 MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
889 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
890 MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
891 MODULE_AUTHOR("Qingwu Zhang <qingwu.zhang@intel.com>");
892 MODULE_AUTHOR("Yunliang Ding <yunliang.ding@intel.com>");
893 MODULE_AUTHOR("Hongju Wang <hongju.wang@intel.com>");
894 MODULE_LICENSE("GPL");
895 MODULE_DESCRIPTION("Intel IPU6 PCI driver");
896