xref: /linux/drivers/media/pci/intel/ipu6/ipu6.c (revision cdd30ebb1b9f36159d66f088b61aee264e649d7a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013--2024 Intel Corporation
4  */
5 
6 #include <linux/bitfield.h>
7 #include <linux/bits.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/err.h>
10 #include <linux/firmware.h>
11 #include <linux/kernel.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/pci-ats.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/property.h>
19 #include <linux/scatterlist.h>
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 
23 #include <media/ipu-bridge.h>
24 #include <media/ipu6-pci-table.h>
25 
26 #include "ipu6.h"
27 #include "ipu6-bus.h"
28 #include "ipu6-buttress.h"
29 #include "ipu6-cpd.h"
30 #include "ipu6-isys.h"
31 #include "ipu6-mmu.h"
32 #include "ipu6-platform-buttress-regs.h"
33 #include "ipu6-platform-isys-csi2-reg.h"
34 #include "ipu6-platform-regs.h"
35 
36 #define IPU6_PCI_BAR		0
37 
38 struct ipu6_cell_program {
39 	u32 magic_number;
40 
41 	u32 blob_offset;
42 	u32 blob_size;
43 
44 	u32 start[3];
45 
46 	u32 icache_source;
47 	u32 icache_target;
48 	u32 icache_size;
49 
50 	u32 pmem_source;
51 	u32 pmem_target;
52 	u32 pmem_size;
53 
54 	u32 data_source;
55 	u32 data_target;
56 	u32 data_size;
57 
58 	u32 bss_target;
59 	u32 bss_size;
60 
61 	u32 cell_id;
62 	u32 regs_addr;
63 
64 	u32 cell_pmem_data_bus_address;
65 	u32 cell_dmem_data_bus_address;
66 	u32 cell_pmem_control_bus_address;
67 	u32 cell_dmem_control_bus_address;
68 
69 	u32 next;
70 	u32 dummy[2];
71 };
72 
73 static struct ipu6_isys_internal_pdata isys_ipdata = {
74 	.hw_variant = {
75 		.offset = IPU6_UNIFIED_OFFSET,
76 		.nr_mmus = 3,
77 		.mmu_hw = {
78 			{
79 				.offset = IPU6_ISYS_IOMMU0_OFFSET,
80 				.info_bits = IPU6_INFO_REQUEST_DESTINATION_IOSF,
81 				.nr_l1streams = 16,
82 				.l1_block_sz = {
83 					3, 8, 2, 2, 2, 2, 2, 2, 1, 1,
84 					1, 1, 1, 1, 1, 1
85 				},
86 				.nr_l2streams = 16,
87 				.l2_block_sz = {
88 					2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
89 					2, 2, 2, 2, 2, 2
90 				},
91 				.insert_read_before_invalidate = false,
92 				.l1_stream_id_reg_offset =
93 				IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
94 				.l2_stream_id_reg_offset =
95 				IPU6_MMU_L2_STREAM_ID_REG_OFFSET,
96 			},
97 			{
98 				.offset = IPU6_ISYS_IOMMU1_OFFSET,
99 				.info_bits = 0,
100 				.nr_l1streams = 16,
101 				.l1_block_sz = {
102 					2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 					2, 2, 2, 1, 1, 4
104 				},
105 				.nr_l2streams = 16,
106 				.l2_block_sz = {
107 					2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
108 					2, 2, 2, 2, 2, 2
109 				},
110 				.insert_read_before_invalidate = false,
111 				.l1_stream_id_reg_offset =
112 				IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
113 				.l2_stream_id_reg_offset =
114 				IPU6_MMU_L2_STREAM_ID_REG_OFFSET,
115 			},
116 			{
117 				.offset = IPU6_ISYS_IOMMUI_OFFSET,
118 				.info_bits = 0,
119 				.nr_l1streams = 0,
120 				.nr_l2streams = 0,
121 				.insert_read_before_invalidate = false,
122 			},
123 		},
124 		.cdc_fifos = 3,
125 		.cdc_fifo_threshold = {6, 8, 2},
126 		.dmem_offset = IPU6_ISYS_DMEM_OFFSET,
127 		.spc_offset = IPU6_ISYS_SPC_OFFSET,
128 	},
129 	.isys_dma_overshoot = IPU6_ISYS_OVERALLOC_MIN,
130 };
131 
132 static struct ipu6_psys_internal_pdata psys_ipdata = {
133 	.hw_variant = {
134 		.offset = IPU6_UNIFIED_OFFSET,
135 		.nr_mmus = 4,
136 		.mmu_hw = {
137 			{
138 				.offset = IPU6_PSYS_IOMMU0_OFFSET,
139 				.info_bits =
140 				IPU6_INFO_REQUEST_DESTINATION_IOSF,
141 				.nr_l1streams = 16,
142 				.l1_block_sz = {
143 					2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
144 					2, 2, 2, 2, 2, 2
145 				},
146 				.nr_l2streams = 16,
147 				.l2_block_sz = {
148 					2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
149 					2, 2, 2, 2, 2, 2
150 				},
151 				.insert_read_before_invalidate = false,
152 				.l1_stream_id_reg_offset =
153 				IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
154 				.l2_stream_id_reg_offset =
155 				IPU6_MMU_L2_STREAM_ID_REG_OFFSET,
156 			},
157 			{
158 				.offset = IPU6_PSYS_IOMMU1_OFFSET,
159 				.info_bits = 0,
160 				.nr_l1streams = 32,
161 				.l1_block_sz = {
162 					1, 2, 2, 2, 2, 2, 2, 2, 2, 2,
163 					2, 2, 2, 2, 2, 10,
164 					5, 4, 14, 6, 4, 14, 6, 4, 8,
165 					4, 2, 1, 1, 1, 1, 14
166 				},
167 				.nr_l2streams = 32,
168 				.l2_block_sz = {
169 					2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
170 					2, 2, 2, 2, 2, 2,
171 					2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
172 					2, 2, 2, 2, 2, 2
173 				},
174 				.insert_read_before_invalidate = false,
175 				.l1_stream_id_reg_offset =
176 				IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
177 				.l2_stream_id_reg_offset =
178 				IPU6_PSYS_MMU1W_L2_STREAM_ID_REG_OFFSET,
179 			},
180 			{
181 				.offset = IPU6_PSYS_IOMMU1R_OFFSET,
182 				.info_bits = 0,
183 				.nr_l1streams = 16,
184 				.l1_block_sz = {
185 					1, 4, 4, 4, 4, 16, 8, 4, 32,
186 					16, 16, 2, 2, 2, 1, 12
187 				},
188 				.nr_l2streams = 16,
189 				.l2_block_sz = {
190 					2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
191 					2, 2, 2, 2, 2, 2
192 				},
193 				.insert_read_before_invalidate = false,
194 				.l1_stream_id_reg_offset =
195 				IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
196 				.l2_stream_id_reg_offset =
197 				IPU6_MMU_L2_STREAM_ID_REG_OFFSET,
198 			},
199 			{
200 				.offset = IPU6_PSYS_IOMMUI_OFFSET,
201 				.info_bits = 0,
202 				.nr_l1streams = 0,
203 				.nr_l2streams = 0,
204 				.insert_read_before_invalidate = false,
205 			},
206 		},
207 		.dmem_offset = IPU6_PSYS_DMEM_OFFSET,
208 	},
209 };
210 
211 static const struct ipu6_buttress_ctrl isys_buttress_ctrl = {
212 	.ratio = IPU6_IS_FREQ_CTL_DEFAULT_RATIO,
213 	.qos_floor = IPU6_IS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO,
214 	.freq_ctl = IPU6_BUTTRESS_REG_IS_FREQ_CTL,
215 	.pwr_sts_shift = IPU6_BUTTRESS_PWR_STATE_IS_PWR_SHIFT,
216 	.pwr_sts_mask = IPU6_BUTTRESS_PWR_STATE_IS_PWR_MASK,
217 	.pwr_sts_on = IPU6_BUTTRESS_PWR_STATE_UP_DONE,
218 	.pwr_sts_off = IPU6_BUTTRESS_PWR_STATE_DN_DONE,
219 };
220 
221 static const struct ipu6_buttress_ctrl psys_buttress_ctrl = {
222 	.ratio = IPU6_PS_FREQ_CTL_DEFAULT_RATIO,
223 	.qos_floor = IPU6_PS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO,
224 	.freq_ctl = IPU6_BUTTRESS_REG_PS_FREQ_CTL,
225 	.pwr_sts_shift = IPU6_BUTTRESS_PWR_STATE_PS_PWR_SHIFT,
226 	.pwr_sts_mask = IPU6_BUTTRESS_PWR_STATE_PS_PWR_MASK,
227 	.pwr_sts_on = IPU6_BUTTRESS_PWR_STATE_UP_DONE,
228 	.pwr_sts_off = IPU6_BUTTRESS_PWR_STATE_DN_DONE,
229 };
230 
231 static void
ipu6_pkg_dir_configure_spc(struct ipu6_device * isp,const struct ipu6_hw_variants * hw_variant,int pkg_dir_idx,void __iomem * base,u64 * pkg_dir,dma_addr_t pkg_dir_vied_address)232 ipu6_pkg_dir_configure_spc(struct ipu6_device *isp,
233 			   const struct ipu6_hw_variants *hw_variant,
234 			   int pkg_dir_idx, void __iomem *base,
235 			   u64 *pkg_dir, dma_addr_t pkg_dir_vied_address)
236 {
237 	struct ipu6_cell_program *prog;
238 	void __iomem *spc_base;
239 	u32 server_fw_addr;
240 	dma_addr_t dma_addr;
241 	u32 pg_offset;
242 
243 	server_fw_addr = lower_32_bits(*(pkg_dir + (pkg_dir_idx + 1) * 2));
244 	if (pkg_dir_idx == IPU6_CPD_PKG_DIR_ISYS_SERVER_IDX)
245 		dma_addr = sg_dma_address(isp->isys->fw_sgt.sgl);
246 	else
247 		dma_addr = sg_dma_address(isp->psys->fw_sgt.sgl);
248 
249 	pg_offset = server_fw_addr - dma_addr;
250 	prog = (struct ipu6_cell_program *)((uintptr_t)isp->cpd_fw->data +
251 					    pg_offset);
252 	spc_base = base + prog->regs_addr;
253 	if (spc_base != (base + hw_variant->spc_offset))
254 		dev_warn(&isp->pdev->dev,
255 			 "SPC reg addr %p not matching value from CPD %p\n",
256 			 base + hw_variant->spc_offset, spc_base);
257 	writel(server_fw_addr + prog->blob_offset +
258 	       prog->icache_source, spc_base + IPU6_PSYS_REG_SPC_ICACHE_BASE);
259 	writel(IPU6_INFO_REQUEST_DESTINATION_IOSF,
260 	       spc_base + IPU6_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER);
261 	writel(prog->start[1], spc_base + IPU6_PSYS_REG_SPC_START_PC);
262 	writel(pkg_dir_vied_address, base + hw_variant->dmem_offset);
263 }
264 
ipu6_configure_spc(struct ipu6_device * isp,const struct ipu6_hw_variants * hw_variant,int pkg_dir_idx,void __iomem * base,u64 * pkg_dir,dma_addr_t pkg_dir_dma_addr)265 void ipu6_configure_spc(struct ipu6_device *isp,
266 			const struct ipu6_hw_variants *hw_variant,
267 			int pkg_dir_idx, void __iomem *base, u64 *pkg_dir,
268 			dma_addr_t pkg_dir_dma_addr)
269 {
270 	void __iomem *dmem_base = base + hw_variant->dmem_offset;
271 	void __iomem *spc_regs_base = base + hw_variant->spc_offset;
272 	u32 val;
273 
274 	val = readl(spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL);
275 	val |= IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE;
276 	writel(val, spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL);
277 
278 	if (isp->secure_mode)
279 		writel(IPU6_PKG_DIR_IMR_OFFSET, dmem_base);
280 	else
281 		ipu6_pkg_dir_configure_spc(isp, hw_variant, pkg_dir_idx, base,
282 					   pkg_dir, pkg_dir_dma_addr);
283 }
284 EXPORT_SYMBOL_NS_GPL(ipu6_configure_spc, "INTEL_IPU6");
285 
286 #define IPU6_ISYS_CSI2_NPORTS		4
287 #define IPU6SE_ISYS_CSI2_NPORTS		4
288 #define IPU6_TGL_ISYS_CSI2_NPORTS	8
289 #define IPU6EP_MTL_ISYS_CSI2_NPORTS	6
290 
ipu6_internal_pdata_init(struct ipu6_device * isp)291 static void ipu6_internal_pdata_init(struct ipu6_device *isp)
292 {
293 	u8 hw_ver = isp->hw_ver;
294 
295 	isys_ipdata.num_parallel_streams = IPU6_ISYS_NUM_STREAMS;
296 	isys_ipdata.sram_gran_shift = IPU6_SRAM_GRANULARITY_SHIFT;
297 	isys_ipdata.sram_gran_size = IPU6_SRAM_GRANULARITY_SIZE;
298 	isys_ipdata.max_sram_size = IPU6_MAX_SRAM_SIZE;
299 	isys_ipdata.sensor_type_start = IPU6_FW_ISYS_SENSOR_TYPE_START;
300 	isys_ipdata.sensor_type_end = IPU6_FW_ISYS_SENSOR_TYPE_END;
301 	isys_ipdata.max_streams = IPU6_ISYS_NUM_STREAMS;
302 	isys_ipdata.max_send_queues = IPU6_N_MAX_SEND_QUEUES;
303 	isys_ipdata.max_sram_blocks = IPU6_NOF_SRAM_BLOCKS_MAX;
304 	isys_ipdata.max_devq_size = IPU6_DEV_SEND_QUEUE_SIZE;
305 	isys_ipdata.csi2.nports = IPU6_ISYS_CSI2_NPORTS;
306 	isys_ipdata.csi2.irq_mask = IPU6_CSI_RX_ERROR_IRQ_MASK;
307 	isys_ipdata.csi2.ctrl0_irq_edge = IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE;
308 	isys_ipdata.csi2.ctrl0_irq_clear =
309 		IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR;
310 	isys_ipdata.csi2.ctrl0_irq_mask = IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK;
311 	isys_ipdata.csi2.ctrl0_irq_enable =
312 		IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE;
313 	isys_ipdata.csi2.ctrl0_irq_status =
314 		IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS;
315 	isys_ipdata.csi2.ctrl0_irq_lnp =
316 		IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE;
317 	isys_ipdata.enhanced_iwake = is_ipu6ep_mtl(hw_ver) || is_ipu6ep(hw_ver);
318 	psys_ipdata.hw_variant.spc_offset = IPU6_PSYS_SPC_OFFSET;
319 	isys_ipdata.csi2.fw_access_port_ofs = CSI_REG_HUB_FW_ACCESS_PORT_OFS;
320 
321 	if (is_ipu6ep(hw_ver)) {
322 		isys_ipdata.ltr = IPU6EP_LTR_VALUE;
323 		isys_ipdata.memopen_threshold = IPU6EP_MIN_MEMOPEN_TH;
324 	}
325 
326 	if (is_ipu6_tgl(hw_ver))
327 		isys_ipdata.csi2.nports = IPU6_TGL_ISYS_CSI2_NPORTS;
328 
329 	if (is_ipu6ep_mtl(hw_ver)) {
330 		isys_ipdata.csi2.nports = IPU6EP_MTL_ISYS_CSI2_NPORTS;
331 
332 		isys_ipdata.csi2.ctrl0_irq_edge =
333 			IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE;
334 		isys_ipdata.csi2.ctrl0_irq_clear =
335 			IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR;
336 		isys_ipdata.csi2.ctrl0_irq_mask =
337 			IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK;
338 		isys_ipdata.csi2.ctrl0_irq_enable =
339 			IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE;
340 		isys_ipdata.csi2.ctrl0_irq_lnp =
341 			IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE;
342 		isys_ipdata.csi2.ctrl0_irq_status =
343 			IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS;
344 		isys_ipdata.csi2.fw_access_port_ofs =
345 			CSI_REG_HUB_FW_ACCESS_PORT_V6OFS;
346 		isys_ipdata.ltr = IPU6EP_MTL_LTR_VALUE;
347 		isys_ipdata.memopen_threshold = IPU6EP_MTL_MIN_MEMOPEN_TH;
348 	}
349 
350 	if (is_ipu6se(hw_ver)) {
351 		isys_ipdata.csi2.nports = IPU6SE_ISYS_CSI2_NPORTS;
352 		isys_ipdata.csi2.irq_mask = IPU6SE_CSI_RX_ERROR_IRQ_MASK;
353 		isys_ipdata.num_parallel_streams = IPU6SE_ISYS_NUM_STREAMS;
354 		isys_ipdata.sram_gran_shift = IPU6SE_SRAM_GRANULARITY_SHIFT;
355 		isys_ipdata.sram_gran_size = IPU6SE_SRAM_GRANULARITY_SIZE;
356 		isys_ipdata.max_sram_size = IPU6SE_MAX_SRAM_SIZE;
357 		isys_ipdata.sensor_type_start =
358 			IPU6SE_FW_ISYS_SENSOR_TYPE_START;
359 		isys_ipdata.sensor_type_end = IPU6SE_FW_ISYS_SENSOR_TYPE_END;
360 		isys_ipdata.max_streams = IPU6SE_ISYS_NUM_STREAMS;
361 		isys_ipdata.max_send_queues = IPU6SE_N_MAX_SEND_QUEUES;
362 		isys_ipdata.max_sram_blocks = IPU6SE_NOF_SRAM_BLOCKS_MAX;
363 		isys_ipdata.max_devq_size = IPU6SE_DEV_SEND_QUEUE_SIZE;
364 		psys_ipdata.hw_variant.spc_offset = IPU6SE_PSYS_SPC_OFFSET;
365 	}
366 }
367 
368 static struct ipu6_bus_device *
ipu6_isys_init(struct pci_dev * pdev,struct device * parent,struct ipu6_buttress_ctrl * ctrl,void __iomem * base,const struct ipu6_isys_internal_pdata * ipdata)369 ipu6_isys_init(struct pci_dev *pdev, struct device *parent,
370 	       struct ipu6_buttress_ctrl *ctrl, void __iomem *base,
371 	       const struct ipu6_isys_internal_pdata *ipdata)
372 {
373 	struct device *dev = &pdev->dev;
374 	struct ipu6_bus_device *isys_adev;
375 	struct ipu6_isys_pdata *pdata;
376 	int ret;
377 
378 	ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb);
379 	if (ret) {
380 		dev_err_probe(dev, ret, "IPU6 bridge init failed\n");
381 		return ERR_PTR(ret);
382 	}
383 
384 	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
385 	if (!pdata)
386 		return ERR_PTR(-ENOMEM);
387 
388 	pdata->base = base;
389 	pdata->ipdata = ipdata;
390 
391 	isys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl,
392 					       IPU6_ISYS_NAME);
393 	if (IS_ERR(isys_adev)) {
394 		kfree(pdata);
395 		return dev_err_cast_probe(dev, isys_adev,
396 				"ipu6_bus_initialize_device isys failed\n");
397 	}
398 
399 	isys_adev->mmu = ipu6_mmu_init(dev, base, ISYS_MMID,
400 				       &ipdata->hw_variant);
401 	if (IS_ERR(isys_adev->mmu)) {
402 		put_device(&isys_adev->auxdev.dev);
403 		kfree(pdata);
404 		return dev_err_cast_probe(dev, isys_adev->mmu,
405 				"ipu6_mmu_init(isys_adev->mmu) failed\n");
406 	}
407 
408 	isys_adev->mmu->dev = &isys_adev->auxdev.dev;
409 
410 	ret = ipu6_bus_add_device(isys_adev);
411 	if (ret) {
412 		kfree(pdata);
413 		return ERR_PTR(ret);
414 	}
415 
416 	return isys_adev;
417 }
418 
419 static struct ipu6_bus_device *
ipu6_psys_init(struct pci_dev * pdev,struct device * parent,struct ipu6_buttress_ctrl * ctrl,void __iomem * base,const struct ipu6_psys_internal_pdata * ipdata)420 ipu6_psys_init(struct pci_dev *pdev, struct device *parent,
421 	       struct ipu6_buttress_ctrl *ctrl, void __iomem *base,
422 	       const struct ipu6_psys_internal_pdata *ipdata)
423 {
424 	struct ipu6_bus_device *psys_adev;
425 	struct ipu6_psys_pdata *pdata;
426 	int ret;
427 
428 	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
429 	if (!pdata)
430 		return ERR_PTR(-ENOMEM);
431 
432 	pdata->base = base;
433 	pdata->ipdata = ipdata;
434 
435 	psys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl,
436 					       IPU6_PSYS_NAME);
437 	if (IS_ERR(psys_adev)) {
438 		kfree(pdata);
439 		return dev_err_cast_probe(&pdev->dev, psys_adev,
440 				"ipu6_bus_initialize_device psys failed\n");
441 	}
442 
443 	psys_adev->mmu = ipu6_mmu_init(&pdev->dev, base, PSYS_MMID,
444 				       &ipdata->hw_variant);
445 	if (IS_ERR(psys_adev->mmu)) {
446 		put_device(&psys_adev->auxdev.dev);
447 		kfree(pdata);
448 		return dev_err_cast_probe(&pdev->dev, psys_adev->mmu,
449 				"ipu6_mmu_init(psys_adev->mmu) failed\n");
450 	}
451 
452 	psys_adev->mmu->dev = &psys_adev->auxdev.dev;
453 
454 	ret = ipu6_bus_add_device(psys_adev);
455 	if (ret) {
456 		kfree(pdata);
457 		return ERR_PTR(ret);
458 	}
459 
460 	return psys_adev;
461 }
462 
ipu6_pci_config_setup(struct pci_dev * dev,u8 hw_ver)463 static int ipu6_pci_config_setup(struct pci_dev *dev, u8 hw_ver)
464 {
465 	int ret;
466 
467 	/* disable IPU6 PCI ATS on mtl ES2 */
468 	if (is_ipu6ep_mtl(hw_ver) && boot_cpu_data.x86_stepping == 0x2 &&
469 	    pci_ats_supported(dev))
470 		pci_disable_ats(dev);
471 
472 	/* No PCI msi capability for IPU6EP */
473 	if (is_ipu6ep(hw_ver) || is_ipu6ep_mtl(hw_ver)) {
474 		/* likely do nothing as msi not enabled by default */
475 		pci_disable_msi(dev);
476 		return 0;
477 	}
478 
479 	ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_MSI);
480 	if (ret < 0)
481 		return dev_err_probe(&dev->dev, ret, "Request msi failed");
482 
483 	return 0;
484 }
485 
ipu6_configure_vc_mechanism(struct ipu6_device * isp)486 static void ipu6_configure_vc_mechanism(struct ipu6_device *isp)
487 {
488 	u32 val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL);
489 
490 	if (IPU6_BTRS_ARB_STALL_MODE_VC0 == IPU6_BTRS_ARB_MODE_TYPE_STALL)
491 		val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0;
492 	else
493 		val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0;
494 
495 	if (IPU6_BTRS_ARB_STALL_MODE_VC1 == IPU6_BTRS_ARB_MODE_TYPE_STALL)
496 		val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1;
497 	else
498 		val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1;
499 
500 	writel(val, isp->base + BUTTRESS_REG_BTRS_CTRL);
501 }
502 
ipu6_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)503 static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
504 {
505 	struct ipu6_buttress_ctrl *isys_ctrl = NULL, *psys_ctrl = NULL;
506 	struct device *dev = &pdev->dev;
507 	void __iomem *isys_base = NULL;
508 	void __iomem *psys_base = NULL;
509 	struct ipu6_device *isp;
510 	phys_addr_t phys;
511 	u32 val, version, sku_id;
512 	int ret;
513 
514 	isp = devm_kzalloc(dev, sizeof(*isp), GFP_KERNEL);
515 	if (!isp)
516 		return -ENOMEM;
517 
518 	isp->pdev = pdev;
519 	INIT_LIST_HEAD(&isp->devices);
520 
521 	ret = pcim_enable_device(pdev);
522 	if (ret)
523 		return dev_err_probe(dev, ret, "Enable PCI device failed\n");
524 
525 	phys = pci_resource_start(pdev, IPU6_PCI_BAR);
526 	dev_dbg(dev, "IPU6 PCI bar[%u] = %pa\n", IPU6_PCI_BAR, &phys);
527 
528 	ret = pcim_iomap_regions(pdev, 1 << IPU6_PCI_BAR, pci_name(pdev));
529 	if (ret)
530 		return dev_err_probe(dev, ret, "Failed to I/O mem remapping\n");
531 
532 	isp->base = pcim_iomap_table(pdev)[IPU6_PCI_BAR];
533 	pci_set_drvdata(pdev, isp);
534 	pci_set_master(pdev);
535 
536 	isp->cpd_metadata_cmpnt_size = sizeof(struct ipu6_cpd_metadata_cmpnt);
537 	switch (id->device) {
538 	case PCI_DEVICE_ID_INTEL_IPU6:
539 		isp->hw_ver = IPU6_VER_6;
540 		isp->cpd_fw_name = IPU6_FIRMWARE_NAME;
541 		break;
542 	case PCI_DEVICE_ID_INTEL_IPU6SE:
543 		isp->hw_ver = IPU6_VER_6SE;
544 		isp->cpd_fw_name = IPU6SE_FIRMWARE_NAME;
545 		isp->cpd_metadata_cmpnt_size =
546 			sizeof(struct ipu6se_cpd_metadata_cmpnt);
547 		break;
548 	case PCI_DEVICE_ID_INTEL_IPU6EP_ADLP:
549 	case PCI_DEVICE_ID_INTEL_IPU6EP_RPLP:
550 		isp->hw_ver = IPU6_VER_6EP;
551 		isp->cpd_fw_name = IPU6EP_FIRMWARE_NAME;
552 		break;
553 	case PCI_DEVICE_ID_INTEL_IPU6EP_ADLN:
554 		isp->hw_ver = IPU6_VER_6EP;
555 		isp->cpd_fw_name = IPU6EPADLN_FIRMWARE_NAME;
556 		break;
557 	case PCI_DEVICE_ID_INTEL_IPU6EP_MTL:
558 		isp->hw_ver = IPU6_VER_6EP_MTL;
559 		isp->cpd_fw_name = IPU6EPMTL_FIRMWARE_NAME;
560 		break;
561 	default:
562 		return dev_err_probe(dev, -ENODEV,
563 				     "Unsupported IPU6 device %x\n",
564 				     id->device);
565 	}
566 
567 	ipu6_internal_pdata_init(isp);
568 
569 	isys_base = isp->base + isys_ipdata.hw_variant.offset;
570 	psys_base = isp->base + psys_ipdata.hw_variant.offset;
571 
572 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39));
573 	if (ret)
574 		return dev_err_probe(dev, ret, "Failed to set DMA mask\n");
575 
576 	dma_set_max_seg_size(dev, UINT_MAX);
577 
578 	ret = ipu6_pci_config_setup(pdev, isp->hw_ver);
579 	if (ret)
580 		return ret;
581 
582 	ret = ipu6_buttress_init(isp);
583 	if (ret)
584 		return ret;
585 
586 	ret = request_firmware(&isp->cpd_fw, isp->cpd_fw_name, dev);
587 	if (ret) {
588 		dev_err_probe(&isp->pdev->dev, ret,
589 			      "Requesting signed firmware %s failed\n",
590 			      isp->cpd_fw_name);
591 		goto buttress_exit;
592 	}
593 
594 	ret = ipu6_cpd_validate_cpd_file(isp, isp->cpd_fw->data,
595 					 isp->cpd_fw->size);
596 	if (ret) {
597 		dev_err_probe(&isp->pdev->dev, ret,
598 			      "Failed to validate cpd\n");
599 		goto out_ipu6_bus_del_devices;
600 	}
601 
602 	isys_ctrl = devm_kmemdup(dev, &isys_buttress_ctrl,
603 				 sizeof(isys_buttress_ctrl), GFP_KERNEL);
604 	if (!isys_ctrl) {
605 		ret = -ENOMEM;
606 		goto out_ipu6_bus_del_devices;
607 	}
608 
609 	isp->isys = ipu6_isys_init(pdev, dev, isys_ctrl, isys_base,
610 				   &isys_ipdata);
611 	if (IS_ERR(isp->isys)) {
612 		ret = PTR_ERR(isp->isys);
613 		goto out_ipu6_bus_del_devices;
614 	}
615 
616 	psys_ctrl = devm_kmemdup(dev, &psys_buttress_ctrl,
617 				 sizeof(psys_buttress_ctrl), GFP_KERNEL);
618 	if (!psys_ctrl) {
619 		ret = -ENOMEM;
620 		goto out_ipu6_bus_del_devices;
621 	}
622 
623 	isp->psys = ipu6_psys_init(pdev, &isp->isys->auxdev.dev, psys_ctrl,
624 				   psys_base, &psys_ipdata);
625 	if (IS_ERR(isp->psys)) {
626 		ret = PTR_ERR(isp->psys);
627 		goto out_ipu6_bus_del_devices;
628 	}
629 
630 	ret = pm_runtime_resume_and_get(&isp->psys->auxdev.dev);
631 	if (ret < 0)
632 		goto out_ipu6_bus_del_devices;
633 
634 	ret = ipu6_mmu_hw_init(isp->psys->mmu);
635 	if (ret) {
636 		dev_err_probe(&isp->pdev->dev, ret,
637 			      "Failed to set MMU hardware\n");
638 		goto out_ipu6_bus_del_devices;
639 	}
640 
641 	ret = ipu6_buttress_map_fw_image(isp->psys, isp->cpd_fw,
642 					 &isp->psys->fw_sgt);
643 	if (ret) {
644 		dev_err_probe(&isp->pdev->dev, ret, "failed to map fw image\n");
645 		goto out_ipu6_bus_del_devices;
646 	}
647 
648 	ret = ipu6_cpd_create_pkg_dir(isp->psys, isp->cpd_fw->data);
649 	if (ret) {
650 		dev_err_probe(&isp->pdev->dev, ret,
651 			      "failed to create pkg dir\n");
652 		goto out_ipu6_bus_del_devices;
653 	}
654 
655 	ret = devm_request_threaded_irq(dev, pdev->irq, ipu6_buttress_isr,
656 					ipu6_buttress_isr_threaded,
657 					IRQF_SHARED, IPU6_NAME, isp);
658 	if (ret) {
659 		dev_err_probe(dev, ret, "Requesting irq failed\n");
660 		goto out_ipu6_bus_del_devices;
661 	}
662 
663 	ret = ipu6_buttress_authenticate(isp);
664 	if (ret) {
665 		dev_err_probe(&isp->pdev->dev, ret,
666 			      "FW authentication failed\n");
667 		goto out_free_irq;
668 	}
669 
670 	ipu6_mmu_hw_cleanup(isp->psys->mmu);
671 	pm_runtime_put(&isp->psys->auxdev.dev);
672 
673 	/* Configure the arbitration mechanisms for VC requests */
674 	ipu6_configure_vc_mechanism(isp);
675 
676 	val = readl(isp->base + BUTTRESS_REG_SKU);
677 	sku_id = FIELD_GET(GENMASK(6, 4), val);
678 	version = FIELD_GET(GENMASK(3, 0), val);
679 	dev_info(dev, "IPU%u-v%u[%x] hardware version %d\n", version, sku_id,
680 		 pdev->device, isp->hw_ver);
681 
682 	pm_runtime_put_noidle(dev);
683 	pm_runtime_allow(dev);
684 
685 	isp->bus_ready_to_probe = true;
686 
687 	return 0;
688 
689 out_free_irq:
690 	devm_free_irq(dev, pdev->irq, isp);
691 out_ipu6_bus_del_devices:
692 	if (isp->psys) {
693 		ipu6_cpd_free_pkg_dir(isp->psys);
694 		ipu6_buttress_unmap_fw_image(isp->psys, &isp->psys->fw_sgt);
695 	}
696 	if (!IS_ERR_OR_NULL(isp->psys) && !IS_ERR_OR_NULL(isp->psys->mmu))
697 		ipu6_mmu_cleanup(isp->psys->mmu);
698 	if (!IS_ERR_OR_NULL(isp->isys) && !IS_ERR_OR_NULL(isp->isys->mmu))
699 		ipu6_mmu_cleanup(isp->isys->mmu);
700 	ipu6_bus_del_devices(pdev);
701 	release_firmware(isp->cpd_fw);
702 buttress_exit:
703 	ipu6_buttress_exit(isp);
704 
705 	return ret;
706 }
707 
ipu6_pci_remove(struct pci_dev * pdev)708 static void ipu6_pci_remove(struct pci_dev *pdev)
709 {
710 	struct ipu6_device *isp = pci_get_drvdata(pdev);
711 	struct ipu6_mmu *isys_mmu = isp->isys->mmu;
712 	struct ipu6_mmu *psys_mmu = isp->psys->mmu;
713 
714 	devm_free_irq(&pdev->dev, pdev->irq, isp);
715 	ipu6_cpd_free_pkg_dir(isp->psys);
716 
717 	ipu6_buttress_unmap_fw_image(isp->psys, &isp->psys->fw_sgt);
718 	ipu6_buttress_exit(isp);
719 
720 	ipu6_bus_del_devices(pdev);
721 
722 	pm_runtime_forbid(&pdev->dev);
723 	pm_runtime_get_noresume(&pdev->dev);
724 
725 	release_firmware(isp->cpd_fw);
726 
727 	ipu6_mmu_cleanup(psys_mmu);
728 	ipu6_mmu_cleanup(isys_mmu);
729 }
730 
ipu6_pci_reset_prepare(struct pci_dev * pdev)731 static void ipu6_pci_reset_prepare(struct pci_dev *pdev)
732 {
733 	struct ipu6_device *isp = pci_get_drvdata(pdev);
734 
735 	pm_runtime_forbid(&isp->pdev->dev);
736 }
737 
ipu6_pci_reset_done(struct pci_dev * pdev)738 static void ipu6_pci_reset_done(struct pci_dev *pdev)
739 {
740 	struct ipu6_device *isp = pci_get_drvdata(pdev);
741 
742 	ipu6_buttress_restore(isp);
743 	if (isp->secure_mode)
744 		ipu6_buttress_reset_authentication(isp);
745 
746 	isp->need_ipc_reset = true;
747 	pm_runtime_allow(&isp->pdev->dev);
748 }
749 
750 /*
751  * PCI base driver code requires driver to provide these to enable
752  * PCI device level PM state transitions (D0<->D3)
753  */
ipu6_suspend(struct device * dev)754 static int ipu6_suspend(struct device *dev)
755 {
756 	struct pci_dev *pdev = to_pci_dev(dev);
757 
758 	synchronize_irq(pdev->irq);
759 	return 0;
760 }
761 
ipu6_resume(struct device * dev)762 static int ipu6_resume(struct device *dev)
763 {
764 	struct pci_dev *pdev = to_pci_dev(dev);
765 	struct ipu6_device *isp = pci_get_drvdata(pdev);
766 	struct ipu6_buttress *b = &isp->buttress;
767 	int ret;
768 
769 	/* Configure the arbitration mechanisms for VC requests */
770 	ipu6_configure_vc_mechanism(isp);
771 
772 	isp->secure_mode = ipu6_buttress_get_secure_mode(isp);
773 	dev_info(dev, "IPU6 in %s mode\n",
774 		 isp->secure_mode ? "secure" : "non-secure");
775 
776 	ipu6_buttress_restore(isp);
777 
778 	ret = ipu6_buttress_ipc_reset(isp, &b->cse);
779 	if (ret)
780 		dev_err(&isp->pdev->dev, "IPC reset protocol failed!\n");
781 
782 	ret = pm_runtime_resume_and_get(&isp->psys->auxdev.dev);
783 	if (ret < 0) {
784 		dev_err(&isp->psys->auxdev.dev, "Failed to get runtime PM\n");
785 		return 0;
786 	}
787 
788 	ret = ipu6_buttress_authenticate(isp);
789 	if (ret)
790 		dev_err(&isp->pdev->dev, "FW authentication failed(%d)\n", ret);
791 
792 	pm_runtime_put(&isp->psys->auxdev.dev);
793 
794 	return 0;
795 }
796 
ipu6_runtime_resume(struct device * dev)797 static int ipu6_runtime_resume(struct device *dev)
798 {
799 	struct pci_dev *pdev = to_pci_dev(dev);
800 	struct ipu6_device *isp = pci_get_drvdata(pdev);
801 	int ret;
802 
803 	ipu6_configure_vc_mechanism(isp);
804 	ipu6_buttress_restore(isp);
805 
806 	if (isp->need_ipc_reset) {
807 		struct ipu6_buttress *b = &isp->buttress;
808 
809 		isp->need_ipc_reset = false;
810 		ret = ipu6_buttress_ipc_reset(isp, &b->cse);
811 		if (ret)
812 			dev_err(&isp->pdev->dev, "IPC reset protocol failed\n");
813 	}
814 
815 	return 0;
816 }
817 
818 static const struct dev_pm_ops ipu6_pm_ops = {
819 	SYSTEM_SLEEP_PM_OPS(&ipu6_suspend, &ipu6_resume)
820 	RUNTIME_PM_OPS(&ipu6_suspend, &ipu6_runtime_resume, NULL)
821 };
822 
823 MODULE_DEVICE_TABLE(pci, ipu6_pci_tbl);
824 
825 static const struct pci_error_handlers pci_err_handlers = {
826 	.reset_prepare = ipu6_pci_reset_prepare,
827 	.reset_done = ipu6_pci_reset_done,
828 };
829 
830 static struct pci_driver ipu6_pci_driver = {
831 	.name = IPU6_NAME,
832 	.id_table = ipu6_pci_tbl,
833 	.probe = ipu6_pci_probe,
834 	.remove = ipu6_pci_remove,
835 	.driver = {
836 		.pm = pm_ptr(&ipu6_pm_ops),
837 	},
838 	.err_handler = &pci_err_handlers,
839 };
840 
841 module_pci_driver(ipu6_pci_driver);
842 
843 MODULE_IMPORT_NS("INTEL_IPU_BRIDGE");
844 MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
845 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
846 MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
847 MODULE_AUTHOR("Qingwu Zhang <qingwu.zhang@intel.com>");
848 MODULE_AUTHOR("Yunliang Ding <yunliang.ding@intel.com>");
849 MODULE_AUTHOR("Hongju Wang <hongju.wang@intel.com>");
850 MODULE_LICENSE("GPL");
851 MODULE_DESCRIPTION("Intel IPU6 PCI driver");
852