1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013--2024 Intel Corporation
4 */
5
6 #include <linux/bitfield.h>
7 #include <linux/bits.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/err.h>
10 #include <linux/firmware.h>
11 #include <linux/kernel.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/pci-ats.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/property.h>
19 #include <linux/scatterlist.h>
20 #include <linux/slab.h>
21 #include <linux/types.h>
22
23 #include <media/ipu-bridge.h>
24 #include <media/ipu6-pci-table.h>
25
26 #include "ipu6.h"
27 #include "ipu6-bus.h"
28 #include "ipu6-buttress.h"
29 #include "ipu6-cpd.h"
30 #include "ipu6-isys.h"
31 #include "ipu6-mmu.h"
32 #include "ipu6-platform-buttress-regs.h"
33 #include "ipu6-platform-isys-csi2-reg.h"
34 #include "ipu6-platform-regs.h"
35
36 #define IPU6_PCI_BAR 0
37
38 struct ipu6_cell_program {
39 u32 magic_number;
40
41 u32 blob_offset;
42 u32 blob_size;
43
44 u32 start[3];
45
46 u32 icache_source;
47 u32 icache_target;
48 u32 icache_size;
49
50 u32 pmem_source;
51 u32 pmem_target;
52 u32 pmem_size;
53
54 u32 data_source;
55 u32 data_target;
56 u32 data_size;
57
58 u32 bss_target;
59 u32 bss_size;
60
61 u32 cell_id;
62 u32 regs_addr;
63
64 u32 cell_pmem_data_bus_address;
65 u32 cell_dmem_data_bus_address;
66 u32 cell_pmem_control_bus_address;
67 u32 cell_dmem_control_bus_address;
68
69 u32 next;
70 u32 dummy[2];
71 };
72
73 static struct ipu6_isys_internal_pdata isys_ipdata = {
74 .hw_variant = {
75 .offset = IPU6_UNIFIED_OFFSET,
76 .nr_mmus = 3,
77 .mmu_hw = {
78 {
79 .offset = IPU6_ISYS_IOMMU0_OFFSET,
80 .info_bits = IPU6_INFO_REQUEST_DESTINATION_IOSF,
81 .nr_l1streams = 16,
82 .l1_block_sz = {
83 3, 8, 2, 2, 2, 2, 2, 2, 1, 1,
84 1, 1, 1, 1, 1, 1
85 },
86 .nr_l2streams = 16,
87 .l2_block_sz = {
88 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
89 2, 2, 2, 2, 2, 2
90 },
91 .insert_read_before_invalidate = false,
92 .l1_stream_id_reg_offset =
93 IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
94 .l2_stream_id_reg_offset =
95 IPU6_MMU_L2_STREAM_ID_REG_OFFSET,
96 },
97 {
98 .offset = IPU6_ISYS_IOMMU1_OFFSET,
99 .info_bits = 0,
100 .nr_l1streams = 16,
101 .l1_block_sz = {
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 1, 1, 4
104 },
105 .nr_l2streams = 16,
106 .l2_block_sz = {
107 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
108 2, 2, 2, 2, 2, 2
109 },
110 .insert_read_before_invalidate = false,
111 .l1_stream_id_reg_offset =
112 IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
113 .l2_stream_id_reg_offset =
114 IPU6_MMU_L2_STREAM_ID_REG_OFFSET,
115 },
116 {
117 .offset = IPU6_ISYS_IOMMUI_OFFSET,
118 .info_bits = 0,
119 .nr_l1streams = 0,
120 .nr_l2streams = 0,
121 .insert_read_before_invalidate = false,
122 },
123 },
124 .cdc_fifos = 3,
125 .cdc_fifo_threshold = {6, 8, 2},
126 .dmem_offset = IPU6_ISYS_DMEM_OFFSET,
127 .spc_offset = IPU6_ISYS_SPC_OFFSET,
128 },
129 .isys_dma_overshoot = IPU6_ISYS_OVERALLOC_MIN,
130 };
131
132 static struct ipu6_psys_internal_pdata psys_ipdata = {
133 .hw_variant = {
134 .offset = IPU6_UNIFIED_OFFSET,
135 .nr_mmus = 4,
136 .mmu_hw = {
137 {
138 .offset = IPU6_PSYS_IOMMU0_OFFSET,
139 .info_bits =
140 IPU6_INFO_REQUEST_DESTINATION_IOSF,
141 .nr_l1streams = 16,
142 .l1_block_sz = {
143 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
144 2, 2, 2, 2, 2, 2
145 },
146 .nr_l2streams = 16,
147 .l2_block_sz = {
148 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
149 2, 2, 2, 2, 2, 2
150 },
151 .insert_read_before_invalidate = false,
152 .l1_stream_id_reg_offset =
153 IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
154 .l2_stream_id_reg_offset =
155 IPU6_MMU_L2_STREAM_ID_REG_OFFSET,
156 },
157 {
158 .offset = IPU6_PSYS_IOMMU1_OFFSET,
159 .info_bits = 0,
160 .nr_l1streams = 32,
161 .l1_block_sz = {
162 1, 2, 2, 2, 2, 2, 2, 2, 2, 2,
163 2, 2, 2, 2, 2, 10,
164 5, 4, 14, 6, 4, 14, 6, 4, 8,
165 4, 2, 1, 1, 1, 1, 14
166 },
167 .nr_l2streams = 32,
168 .l2_block_sz = {
169 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
170 2, 2, 2, 2, 2, 2,
171 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
172 2, 2, 2, 2, 2, 2
173 },
174 .insert_read_before_invalidate = false,
175 .l1_stream_id_reg_offset =
176 IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
177 .l2_stream_id_reg_offset =
178 IPU6_PSYS_MMU1W_L2_STREAM_ID_REG_OFFSET,
179 },
180 {
181 .offset = IPU6_PSYS_IOMMU1R_OFFSET,
182 .info_bits = 0,
183 .nr_l1streams = 16,
184 .l1_block_sz = {
185 1, 4, 4, 4, 4, 16, 8, 4, 32,
186 16, 16, 2, 2, 2, 1, 12
187 },
188 .nr_l2streams = 16,
189 .l2_block_sz = {
190 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
191 2, 2, 2, 2, 2, 2
192 },
193 .insert_read_before_invalidate = false,
194 .l1_stream_id_reg_offset =
195 IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
196 .l2_stream_id_reg_offset =
197 IPU6_MMU_L2_STREAM_ID_REG_OFFSET,
198 },
199 {
200 .offset = IPU6_PSYS_IOMMUI_OFFSET,
201 .info_bits = 0,
202 .nr_l1streams = 0,
203 .nr_l2streams = 0,
204 .insert_read_before_invalidate = false,
205 },
206 },
207 .dmem_offset = IPU6_PSYS_DMEM_OFFSET,
208 },
209 };
210
211 static const struct ipu6_buttress_ctrl isys_buttress_ctrl = {
212 .ratio = IPU6_IS_FREQ_CTL_DEFAULT_RATIO,
213 .qos_floor = IPU6_IS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO,
214 .freq_ctl = IPU6_BUTTRESS_REG_IS_FREQ_CTL,
215 .pwr_sts_shift = IPU6_BUTTRESS_PWR_STATE_IS_PWR_SHIFT,
216 .pwr_sts_mask = IPU6_BUTTRESS_PWR_STATE_IS_PWR_MASK,
217 .pwr_sts_on = IPU6_BUTTRESS_PWR_STATE_UP_DONE,
218 .pwr_sts_off = IPU6_BUTTRESS_PWR_STATE_DN_DONE,
219 };
220
221 static const struct ipu6_buttress_ctrl psys_buttress_ctrl = {
222 .ratio = IPU6_PS_FREQ_CTL_DEFAULT_RATIO,
223 .qos_floor = IPU6_PS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO,
224 .freq_ctl = IPU6_BUTTRESS_REG_PS_FREQ_CTL,
225 .pwr_sts_shift = IPU6_BUTTRESS_PWR_STATE_PS_PWR_SHIFT,
226 .pwr_sts_mask = IPU6_BUTTRESS_PWR_STATE_PS_PWR_MASK,
227 .pwr_sts_on = IPU6_BUTTRESS_PWR_STATE_UP_DONE,
228 .pwr_sts_off = IPU6_BUTTRESS_PWR_STATE_DN_DONE,
229 };
230
231 static void
ipu6_pkg_dir_configure_spc(struct ipu6_device * isp,const struct ipu6_hw_variants * hw_variant,int pkg_dir_idx,void __iomem * base,u64 * pkg_dir,dma_addr_t pkg_dir_vied_address)232 ipu6_pkg_dir_configure_spc(struct ipu6_device *isp,
233 const struct ipu6_hw_variants *hw_variant,
234 int pkg_dir_idx, void __iomem *base,
235 u64 *pkg_dir, dma_addr_t pkg_dir_vied_address)
236 {
237 struct ipu6_cell_program *prog;
238 void __iomem *spc_base;
239 u32 server_fw_addr;
240 dma_addr_t dma_addr;
241 u32 pg_offset;
242
243 server_fw_addr = lower_32_bits(*(pkg_dir + (pkg_dir_idx + 1) * 2));
244 if (pkg_dir_idx == IPU6_CPD_PKG_DIR_ISYS_SERVER_IDX)
245 dma_addr = sg_dma_address(isp->isys->fw_sgt.sgl);
246 else
247 dma_addr = sg_dma_address(isp->psys->fw_sgt.sgl);
248
249 pg_offset = server_fw_addr - dma_addr;
250 prog = (struct ipu6_cell_program *)((u64)isp->cpd_fw->data + pg_offset);
251 spc_base = base + prog->regs_addr;
252 if (spc_base != (base + hw_variant->spc_offset))
253 dev_warn(&isp->pdev->dev,
254 "SPC reg addr %p not matching value from CPD %p\n",
255 base + hw_variant->spc_offset, spc_base);
256 writel(server_fw_addr + prog->blob_offset +
257 prog->icache_source, spc_base + IPU6_PSYS_REG_SPC_ICACHE_BASE);
258 writel(IPU6_INFO_REQUEST_DESTINATION_IOSF,
259 spc_base + IPU6_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER);
260 writel(prog->start[1], spc_base + IPU6_PSYS_REG_SPC_START_PC);
261 writel(pkg_dir_vied_address, base + hw_variant->dmem_offset);
262 }
263
ipu6_configure_spc(struct ipu6_device * isp,const struct ipu6_hw_variants * hw_variant,int pkg_dir_idx,void __iomem * base,u64 * pkg_dir,dma_addr_t pkg_dir_dma_addr)264 void ipu6_configure_spc(struct ipu6_device *isp,
265 const struct ipu6_hw_variants *hw_variant,
266 int pkg_dir_idx, void __iomem *base, u64 *pkg_dir,
267 dma_addr_t pkg_dir_dma_addr)
268 {
269 void __iomem *dmem_base = base + hw_variant->dmem_offset;
270 void __iomem *spc_regs_base = base + hw_variant->spc_offset;
271 u32 val;
272
273 val = readl(spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL);
274 val |= IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE;
275 writel(val, spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL);
276
277 if (isp->secure_mode)
278 writel(IPU6_PKG_DIR_IMR_OFFSET, dmem_base);
279 else
280 ipu6_pkg_dir_configure_spc(isp, hw_variant, pkg_dir_idx, base,
281 pkg_dir, pkg_dir_dma_addr);
282 }
283 EXPORT_SYMBOL_NS_GPL(ipu6_configure_spc, INTEL_IPU6);
284
285 #define IPU6_ISYS_CSI2_NPORTS 4
286 #define IPU6SE_ISYS_CSI2_NPORTS 4
287 #define IPU6_TGL_ISYS_CSI2_NPORTS 8
288 #define IPU6EP_MTL_ISYS_CSI2_NPORTS 6
289
ipu6_internal_pdata_init(struct ipu6_device * isp)290 static void ipu6_internal_pdata_init(struct ipu6_device *isp)
291 {
292 u8 hw_ver = isp->hw_ver;
293
294 isys_ipdata.num_parallel_streams = IPU6_ISYS_NUM_STREAMS;
295 isys_ipdata.sram_gran_shift = IPU6_SRAM_GRANULARITY_SHIFT;
296 isys_ipdata.sram_gran_size = IPU6_SRAM_GRANULARITY_SIZE;
297 isys_ipdata.max_sram_size = IPU6_MAX_SRAM_SIZE;
298 isys_ipdata.sensor_type_start = IPU6_FW_ISYS_SENSOR_TYPE_START;
299 isys_ipdata.sensor_type_end = IPU6_FW_ISYS_SENSOR_TYPE_END;
300 isys_ipdata.max_streams = IPU6_ISYS_NUM_STREAMS;
301 isys_ipdata.max_send_queues = IPU6_N_MAX_SEND_QUEUES;
302 isys_ipdata.max_sram_blocks = IPU6_NOF_SRAM_BLOCKS_MAX;
303 isys_ipdata.max_devq_size = IPU6_DEV_SEND_QUEUE_SIZE;
304 isys_ipdata.csi2.nports = IPU6_ISYS_CSI2_NPORTS;
305 isys_ipdata.csi2.irq_mask = IPU6_CSI_RX_ERROR_IRQ_MASK;
306 isys_ipdata.csi2.ctrl0_irq_edge = IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE;
307 isys_ipdata.csi2.ctrl0_irq_clear =
308 IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR;
309 isys_ipdata.csi2.ctrl0_irq_mask = IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK;
310 isys_ipdata.csi2.ctrl0_irq_enable =
311 IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE;
312 isys_ipdata.csi2.ctrl0_irq_status =
313 IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS;
314 isys_ipdata.csi2.ctrl0_irq_lnp =
315 IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE;
316 isys_ipdata.enhanced_iwake = is_ipu6ep_mtl(hw_ver) || is_ipu6ep(hw_ver);
317 psys_ipdata.hw_variant.spc_offset = IPU6_PSYS_SPC_OFFSET;
318 isys_ipdata.csi2.fw_access_port_ofs = CSI_REG_HUB_FW_ACCESS_PORT_OFS;
319
320 if (is_ipu6ep(hw_ver)) {
321 isys_ipdata.ltr = IPU6EP_LTR_VALUE;
322 isys_ipdata.memopen_threshold = IPU6EP_MIN_MEMOPEN_TH;
323 }
324
325 if (is_ipu6_tgl(hw_ver))
326 isys_ipdata.csi2.nports = IPU6_TGL_ISYS_CSI2_NPORTS;
327
328 if (is_ipu6ep_mtl(hw_ver)) {
329 isys_ipdata.csi2.nports = IPU6EP_MTL_ISYS_CSI2_NPORTS;
330
331 isys_ipdata.csi2.ctrl0_irq_edge =
332 IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE;
333 isys_ipdata.csi2.ctrl0_irq_clear =
334 IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR;
335 isys_ipdata.csi2.ctrl0_irq_mask =
336 IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK;
337 isys_ipdata.csi2.ctrl0_irq_enable =
338 IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE;
339 isys_ipdata.csi2.ctrl0_irq_lnp =
340 IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE;
341 isys_ipdata.csi2.ctrl0_irq_status =
342 IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS;
343 isys_ipdata.csi2.fw_access_port_ofs =
344 CSI_REG_HUB_FW_ACCESS_PORT_V6OFS;
345 isys_ipdata.ltr = IPU6EP_MTL_LTR_VALUE;
346 isys_ipdata.memopen_threshold = IPU6EP_MTL_MIN_MEMOPEN_TH;
347 }
348
349 if (is_ipu6se(hw_ver)) {
350 isys_ipdata.csi2.nports = IPU6SE_ISYS_CSI2_NPORTS;
351 isys_ipdata.csi2.irq_mask = IPU6SE_CSI_RX_ERROR_IRQ_MASK;
352 isys_ipdata.num_parallel_streams = IPU6SE_ISYS_NUM_STREAMS;
353 isys_ipdata.sram_gran_shift = IPU6SE_SRAM_GRANULARITY_SHIFT;
354 isys_ipdata.sram_gran_size = IPU6SE_SRAM_GRANULARITY_SIZE;
355 isys_ipdata.max_sram_size = IPU6SE_MAX_SRAM_SIZE;
356 isys_ipdata.sensor_type_start =
357 IPU6SE_FW_ISYS_SENSOR_TYPE_START;
358 isys_ipdata.sensor_type_end = IPU6SE_FW_ISYS_SENSOR_TYPE_END;
359 isys_ipdata.max_streams = IPU6SE_ISYS_NUM_STREAMS;
360 isys_ipdata.max_send_queues = IPU6SE_N_MAX_SEND_QUEUES;
361 isys_ipdata.max_sram_blocks = IPU6SE_NOF_SRAM_BLOCKS_MAX;
362 isys_ipdata.max_devq_size = IPU6SE_DEV_SEND_QUEUE_SIZE;
363 psys_ipdata.hw_variant.spc_offset = IPU6SE_PSYS_SPC_OFFSET;
364 }
365 }
366
367 static struct ipu6_bus_device *
ipu6_isys_init(struct pci_dev * pdev,struct device * parent,struct ipu6_buttress_ctrl * ctrl,void __iomem * base,const struct ipu6_isys_internal_pdata * ipdata)368 ipu6_isys_init(struct pci_dev *pdev, struct device *parent,
369 struct ipu6_buttress_ctrl *ctrl, void __iomem *base,
370 const struct ipu6_isys_internal_pdata *ipdata)
371 {
372 struct device *dev = &pdev->dev;
373 struct ipu6_bus_device *isys_adev;
374 struct ipu6_isys_pdata *pdata;
375 int ret;
376
377 ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb);
378 if (ret) {
379 dev_err_probe(dev, ret, "IPU6 bridge init failed\n");
380 return ERR_PTR(ret);
381 }
382
383 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
384 if (!pdata)
385 return ERR_PTR(-ENOMEM);
386
387 pdata->base = base;
388 pdata->ipdata = ipdata;
389
390 isys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl,
391 IPU6_ISYS_NAME);
392 if (IS_ERR(isys_adev)) {
393 kfree(pdata);
394 return dev_err_cast_probe(dev, isys_adev,
395 "ipu6_bus_initialize_device isys failed\n");
396 }
397
398 isys_adev->mmu = ipu6_mmu_init(dev, base, ISYS_MMID,
399 &ipdata->hw_variant);
400 if (IS_ERR(isys_adev->mmu)) {
401 put_device(&isys_adev->auxdev.dev);
402 kfree(pdata);
403 return dev_err_cast_probe(dev, isys_adev->mmu,
404 "ipu6_mmu_init(isys_adev->mmu) failed\n");
405 }
406
407 isys_adev->mmu->dev = &isys_adev->auxdev.dev;
408
409 ret = ipu6_bus_add_device(isys_adev);
410 if (ret) {
411 kfree(pdata);
412 return ERR_PTR(ret);
413 }
414
415 return isys_adev;
416 }
417
418 static struct ipu6_bus_device *
ipu6_psys_init(struct pci_dev * pdev,struct device * parent,struct ipu6_buttress_ctrl * ctrl,void __iomem * base,const struct ipu6_psys_internal_pdata * ipdata)419 ipu6_psys_init(struct pci_dev *pdev, struct device *parent,
420 struct ipu6_buttress_ctrl *ctrl, void __iomem *base,
421 const struct ipu6_psys_internal_pdata *ipdata)
422 {
423 struct ipu6_bus_device *psys_adev;
424 struct ipu6_psys_pdata *pdata;
425 int ret;
426
427 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
428 if (!pdata)
429 return ERR_PTR(-ENOMEM);
430
431 pdata->base = base;
432 pdata->ipdata = ipdata;
433
434 psys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl,
435 IPU6_PSYS_NAME);
436 if (IS_ERR(psys_adev)) {
437 kfree(pdata);
438 return dev_err_cast_probe(&pdev->dev, psys_adev,
439 "ipu6_bus_initialize_device psys failed\n");
440 }
441
442 psys_adev->mmu = ipu6_mmu_init(&pdev->dev, base, PSYS_MMID,
443 &ipdata->hw_variant);
444 if (IS_ERR(psys_adev->mmu)) {
445 put_device(&psys_adev->auxdev.dev);
446 kfree(pdata);
447 return dev_err_cast_probe(&pdev->dev, psys_adev->mmu,
448 "ipu6_mmu_init(psys_adev->mmu) failed\n");
449 }
450
451 psys_adev->mmu->dev = &psys_adev->auxdev.dev;
452
453 ret = ipu6_bus_add_device(psys_adev);
454 if (ret) {
455 kfree(pdata);
456 return ERR_PTR(ret);
457 }
458
459 return psys_adev;
460 }
461
ipu6_pci_config_setup(struct pci_dev * dev,u8 hw_ver)462 static int ipu6_pci_config_setup(struct pci_dev *dev, u8 hw_ver)
463 {
464 int ret;
465
466 /* disable IPU6 PCI ATS on mtl ES2 */
467 if (is_ipu6ep_mtl(hw_ver) && boot_cpu_data.x86_stepping == 0x2 &&
468 pci_ats_supported(dev))
469 pci_disable_ats(dev);
470
471 /* No PCI msi capability for IPU6EP */
472 if (is_ipu6ep(hw_ver) || is_ipu6ep_mtl(hw_ver)) {
473 /* likely do nothing as msi not enabled by default */
474 pci_disable_msi(dev);
475 return 0;
476 }
477
478 ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_MSI);
479 if (ret < 0)
480 return dev_err_probe(&dev->dev, ret, "Request msi failed");
481
482 return 0;
483 }
484
ipu6_configure_vc_mechanism(struct ipu6_device * isp)485 static void ipu6_configure_vc_mechanism(struct ipu6_device *isp)
486 {
487 u32 val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL);
488
489 if (IPU6_BTRS_ARB_STALL_MODE_VC0 == IPU6_BTRS_ARB_MODE_TYPE_STALL)
490 val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0;
491 else
492 val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0;
493
494 if (IPU6_BTRS_ARB_STALL_MODE_VC1 == IPU6_BTRS_ARB_MODE_TYPE_STALL)
495 val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1;
496 else
497 val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1;
498
499 writel(val, isp->base + BUTTRESS_REG_BTRS_CTRL);
500 }
501
ipu6_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)502 static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
503 {
504 struct ipu6_buttress_ctrl *isys_ctrl = NULL, *psys_ctrl = NULL;
505 struct device *dev = &pdev->dev;
506 void __iomem *isys_base = NULL;
507 void __iomem *psys_base = NULL;
508 struct ipu6_device *isp;
509 phys_addr_t phys;
510 u32 val, version, sku_id;
511 int ret;
512
513 isp = devm_kzalloc(dev, sizeof(*isp), GFP_KERNEL);
514 if (!isp)
515 return -ENOMEM;
516
517 isp->pdev = pdev;
518 INIT_LIST_HEAD(&isp->devices);
519
520 ret = pcim_enable_device(pdev);
521 if (ret)
522 return dev_err_probe(dev, ret, "Enable PCI device failed\n");
523
524 phys = pci_resource_start(pdev, IPU6_PCI_BAR);
525 dev_dbg(dev, "IPU6 PCI bar[%u] = %pa\n", IPU6_PCI_BAR, &phys);
526
527 ret = pcim_iomap_regions(pdev, 1 << IPU6_PCI_BAR, pci_name(pdev));
528 if (ret)
529 return dev_err_probe(dev, ret, "Failed to I/O mem remapping\n");
530
531 isp->base = pcim_iomap_table(pdev)[IPU6_PCI_BAR];
532 pci_set_drvdata(pdev, isp);
533 pci_set_master(pdev);
534
535 isp->cpd_metadata_cmpnt_size = sizeof(struct ipu6_cpd_metadata_cmpnt);
536 switch (id->device) {
537 case PCI_DEVICE_ID_INTEL_IPU6:
538 isp->hw_ver = IPU6_VER_6;
539 isp->cpd_fw_name = IPU6_FIRMWARE_NAME;
540 break;
541 case PCI_DEVICE_ID_INTEL_IPU6SE:
542 isp->hw_ver = IPU6_VER_6SE;
543 isp->cpd_fw_name = IPU6SE_FIRMWARE_NAME;
544 isp->cpd_metadata_cmpnt_size =
545 sizeof(struct ipu6se_cpd_metadata_cmpnt);
546 break;
547 case PCI_DEVICE_ID_INTEL_IPU6EP_ADLP:
548 case PCI_DEVICE_ID_INTEL_IPU6EP_RPLP:
549 isp->hw_ver = IPU6_VER_6EP;
550 isp->cpd_fw_name = IPU6EP_FIRMWARE_NAME;
551 break;
552 case PCI_DEVICE_ID_INTEL_IPU6EP_ADLN:
553 isp->hw_ver = IPU6_VER_6EP;
554 isp->cpd_fw_name = IPU6EPADLN_FIRMWARE_NAME;
555 break;
556 case PCI_DEVICE_ID_INTEL_IPU6EP_MTL:
557 isp->hw_ver = IPU6_VER_6EP_MTL;
558 isp->cpd_fw_name = IPU6EPMTL_FIRMWARE_NAME;
559 break;
560 default:
561 return dev_err_probe(dev, -ENODEV,
562 "Unsupported IPU6 device %x\n",
563 id->device);
564 }
565
566 ipu6_internal_pdata_init(isp);
567
568 isys_base = isp->base + isys_ipdata.hw_variant.offset;
569 psys_base = isp->base + psys_ipdata.hw_variant.offset;
570
571 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39));
572 if (ret)
573 return dev_err_probe(dev, ret, "Failed to set DMA mask\n");
574
575 dma_set_max_seg_size(dev, UINT_MAX);
576
577 ret = ipu6_pci_config_setup(pdev, isp->hw_ver);
578 if (ret)
579 return ret;
580
581 ret = ipu6_buttress_init(isp);
582 if (ret)
583 return ret;
584
585 ret = request_firmware(&isp->cpd_fw, isp->cpd_fw_name, dev);
586 if (ret) {
587 dev_err_probe(&isp->pdev->dev, ret,
588 "Requesting signed firmware %s failed\n",
589 isp->cpd_fw_name);
590 goto buttress_exit;
591 }
592
593 ret = ipu6_cpd_validate_cpd_file(isp, isp->cpd_fw->data,
594 isp->cpd_fw->size);
595 if (ret) {
596 dev_err_probe(&isp->pdev->dev, ret,
597 "Failed to validate cpd\n");
598 goto out_ipu6_bus_del_devices;
599 }
600
601 isys_ctrl = devm_kmemdup(dev, &isys_buttress_ctrl,
602 sizeof(isys_buttress_ctrl), GFP_KERNEL);
603 if (!isys_ctrl) {
604 ret = -ENOMEM;
605 goto out_ipu6_bus_del_devices;
606 }
607
608 isp->isys = ipu6_isys_init(pdev, dev, isys_ctrl, isys_base,
609 &isys_ipdata);
610 if (IS_ERR(isp->isys)) {
611 ret = PTR_ERR(isp->isys);
612 goto out_ipu6_bus_del_devices;
613 }
614
615 psys_ctrl = devm_kmemdup(dev, &psys_buttress_ctrl,
616 sizeof(psys_buttress_ctrl), GFP_KERNEL);
617 if (!psys_ctrl) {
618 ret = -ENOMEM;
619 goto out_ipu6_bus_del_devices;
620 }
621
622 isp->psys = ipu6_psys_init(pdev, &isp->isys->auxdev.dev, psys_ctrl,
623 psys_base, &psys_ipdata);
624 if (IS_ERR(isp->psys)) {
625 ret = PTR_ERR(isp->psys);
626 goto out_ipu6_bus_del_devices;
627 }
628
629 ret = pm_runtime_resume_and_get(&isp->psys->auxdev.dev);
630 if (ret < 0)
631 goto out_ipu6_bus_del_devices;
632
633 ret = ipu6_mmu_hw_init(isp->psys->mmu);
634 if (ret) {
635 dev_err_probe(&isp->pdev->dev, ret,
636 "Failed to set MMU hardware\n");
637 goto out_ipu6_bus_del_devices;
638 }
639
640 ret = ipu6_buttress_map_fw_image(isp->psys, isp->cpd_fw,
641 &isp->psys->fw_sgt);
642 if (ret) {
643 dev_err_probe(&isp->pdev->dev, ret, "failed to map fw image\n");
644 goto out_ipu6_bus_del_devices;
645 }
646
647 ret = ipu6_cpd_create_pkg_dir(isp->psys, isp->cpd_fw->data);
648 if (ret) {
649 dev_err_probe(&isp->pdev->dev, ret,
650 "failed to create pkg dir\n");
651 goto out_ipu6_bus_del_devices;
652 }
653
654 ret = devm_request_threaded_irq(dev, pdev->irq, ipu6_buttress_isr,
655 ipu6_buttress_isr_threaded,
656 IRQF_SHARED, IPU6_NAME, isp);
657 if (ret) {
658 dev_err_probe(dev, ret, "Requesting irq failed\n");
659 goto out_ipu6_bus_del_devices;
660 }
661
662 ret = ipu6_buttress_authenticate(isp);
663 if (ret) {
664 dev_err_probe(&isp->pdev->dev, ret,
665 "FW authentication failed\n");
666 goto out_free_irq;
667 }
668
669 ipu6_mmu_hw_cleanup(isp->psys->mmu);
670 pm_runtime_put(&isp->psys->auxdev.dev);
671
672 /* Configure the arbitration mechanisms for VC requests */
673 ipu6_configure_vc_mechanism(isp);
674
675 val = readl(isp->base + BUTTRESS_REG_SKU);
676 sku_id = FIELD_GET(GENMASK(6, 4), val);
677 version = FIELD_GET(GENMASK(3, 0), val);
678 dev_info(dev, "IPU%u-v%u[%x] hardware version %d\n", version, sku_id,
679 pdev->device, isp->hw_ver);
680
681 pm_runtime_put_noidle(dev);
682 pm_runtime_allow(dev);
683
684 isp->bus_ready_to_probe = true;
685
686 return 0;
687
688 out_free_irq:
689 devm_free_irq(dev, pdev->irq, isp);
690 out_ipu6_bus_del_devices:
691 if (isp->psys) {
692 ipu6_cpd_free_pkg_dir(isp->psys);
693 ipu6_buttress_unmap_fw_image(isp->psys, &isp->psys->fw_sgt);
694 }
695 if (!IS_ERR_OR_NULL(isp->psys) && !IS_ERR_OR_NULL(isp->psys->mmu))
696 ipu6_mmu_cleanup(isp->psys->mmu);
697 if (!IS_ERR_OR_NULL(isp->isys) && !IS_ERR_OR_NULL(isp->isys->mmu))
698 ipu6_mmu_cleanup(isp->isys->mmu);
699 ipu6_bus_del_devices(pdev);
700 release_firmware(isp->cpd_fw);
701 buttress_exit:
702 ipu6_buttress_exit(isp);
703
704 return ret;
705 }
706
ipu6_pci_remove(struct pci_dev * pdev)707 static void ipu6_pci_remove(struct pci_dev *pdev)
708 {
709 struct ipu6_device *isp = pci_get_drvdata(pdev);
710 struct ipu6_mmu *isys_mmu = isp->isys->mmu;
711 struct ipu6_mmu *psys_mmu = isp->psys->mmu;
712
713 devm_free_irq(&pdev->dev, pdev->irq, isp);
714 ipu6_cpd_free_pkg_dir(isp->psys);
715
716 ipu6_buttress_unmap_fw_image(isp->psys, &isp->psys->fw_sgt);
717 ipu6_buttress_exit(isp);
718
719 ipu6_bus_del_devices(pdev);
720
721 pm_runtime_forbid(&pdev->dev);
722 pm_runtime_get_noresume(&pdev->dev);
723
724 release_firmware(isp->cpd_fw);
725
726 ipu6_mmu_cleanup(psys_mmu);
727 ipu6_mmu_cleanup(isys_mmu);
728 }
729
ipu6_pci_reset_prepare(struct pci_dev * pdev)730 static void ipu6_pci_reset_prepare(struct pci_dev *pdev)
731 {
732 struct ipu6_device *isp = pci_get_drvdata(pdev);
733
734 pm_runtime_forbid(&isp->pdev->dev);
735 }
736
ipu6_pci_reset_done(struct pci_dev * pdev)737 static void ipu6_pci_reset_done(struct pci_dev *pdev)
738 {
739 struct ipu6_device *isp = pci_get_drvdata(pdev);
740
741 ipu6_buttress_restore(isp);
742 if (isp->secure_mode)
743 ipu6_buttress_reset_authentication(isp);
744
745 isp->need_ipc_reset = true;
746 pm_runtime_allow(&isp->pdev->dev);
747 }
748
749 /*
750 * PCI base driver code requires driver to provide these to enable
751 * PCI device level PM state transitions (D0<->D3)
752 */
ipu6_suspend(struct device * dev)753 static int ipu6_suspend(struct device *dev)
754 {
755 return 0;
756 }
757
ipu6_resume(struct device * dev)758 static int ipu6_resume(struct device *dev)
759 {
760 struct pci_dev *pdev = to_pci_dev(dev);
761 struct ipu6_device *isp = pci_get_drvdata(pdev);
762 struct ipu6_buttress *b = &isp->buttress;
763 int ret;
764
765 /* Configure the arbitration mechanisms for VC requests */
766 ipu6_configure_vc_mechanism(isp);
767
768 isp->secure_mode = ipu6_buttress_get_secure_mode(isp);
769 dev_info(dev, "IPU6 in %s mode\n",
770 isp->secure_mode ? "secure" : "non-secure");
771
772 ipu6_buttress_restore(isp);
773
774 ret = ipu6_buttress_ipc_reset(isp, &b->cse);
775 if (ret)
776 dev_err(&isp->pdev->dev, "IPC reset protocol failed!\n");
777
778 ret = pm_runtime_resume_and_get(&isp->psys->auxdev.dev);
779 if (ret < 0) {
780 dev_err(&isp->psys->auxdev.dev, "Failed to get runtime PM\n");
781 return 0;
782 }
783
784 ret = ipu6_buttress_authenticate(isp);
785 if (ret)
786 dev_err(&isp->pdev->dev, "FW authentication failed(%d)\n", ret);
787
788 pm_runtime_put(&isp->psys->auxdev.dev);
789
790 return 0;
791 }
792
ipu6_runtime_resume(struct device * dev)793 static int ipu6_runtime_resume(struct device *dev)
794 {
795 struct pci_dev *pdev = to_pci_dev(dev);
796 struct ipu6_device *isp = pci_get_drvdata(pdev);
797 int ret;
798
799 ipu6_configure_vc_mechanism(isp);
800 ipu6_buttress_restore(isp);
801
802 if (isp->need_ipc_reset) {
803 struct ipu6_buttress *b = &isp->buttress;
804
805 isp->need_ipc_reset = false;
806 ret = ipu6_buttress_ipc_reset(isp, &b->cse);
807 if (ret)
808 dev_err(&isp->pdev->dev, "IPC reset protocol failed\n");
809 }
810
811 return 0;
812 }
813
814 static const struct dev_pm_ops ipu6_pm_ops = {
815 SYSTEM_SLEEP_PM_OPS(&ipu6_suspend, &ipu6_resume)
816 RUNTIME_PM_OPS(&ipu6_suspend, &ipu6_runtime_resume, NULL)
817 };
818
819 MODULE_DEVICE_TABLE(pci, ipu6_pci_tbl);
820
821 static const struct pci_error_handlers pci_err_handlers = {
822 .reset_prepare = ipu6_pci_reset_prepare,
823 .reset_done = ipu6_pci_reset_done,
824 };
825
826 static struct pci_driver ipu6_pci_driver = {
827 .name = IPU6_NAME,
828 .id_table = ipu6_pci_tbl,
829 .probe = ipu6_pci_probe,
830 .remove = ipu6_pci_remove,
831 .driver = {
832 .pm = pm_ptr(&ipu6_pm_ops),
833 },
834 .err_handler = &pci_err_handlers,
835 };
836
837 module_pci_driver(ipu6_pci_driver);
838
839 MODULE_IMPORT_NS(INTEL_IPU_BRIDGE);
840 MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
841 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
842 MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
843 MODULE_AUTHOR("Qingwu Zhang <qingwu.zhang@intel.com>");
844 MODULE_AUTHOR("Yunliang Ding <yunliang.ding@intel.com>");
845 MODULE_AUTHOR("Hongju Wang <hongju.wang@intel.com>");
846 MODULE_LICENSE("GPL");
847 MODULE_DESCRIPTION("Intel IPU6 PCI driver");
848