xref: /linux/drivers/media/pci/intel/ipu6/ipu6.h (revision 6fd600d742744dc7ef7fc65ca26daa2b1163158a)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2013 - 2024 Intel Corporation */
3 
4 #ifndef IPU6_H
5 #define IPU6_H
6 
7 #include <linux/list.h>
8 #include <linux/pci.h>
9 #include <linux/types.h>
10 
11 #include "ipu6-buttress.h"
12 
13 struct firmware;
14 struct pci_dev;
15 struct ipu6_bus_device;
16 
17 #define IPU6_NAME			"intel-ipu6"
18 #define IPU6_MEDIA_DEV_MODEL_NAME	"ipu6"
19 
20 #define IPU6SE_FIRMWARE_NAME		"intel/ipu/ipu6se_fw.bin"
21 #define IPU6EP_FIRMWARE_NAME		"intel/ipu/ipu6ep_fw.bin"
22 #define IPU6_FIRMWARE_NAME		"intel/ipu/ipu6_fw.bin"
23 #define IPU6EPMTL_FIRMWARE_NAME		"intel/ipu/ipu6epmtl_fw.bin"
24 #define IPU6EPADLN_FIRMWARE_NAME	"intel/ipu/ipu6epadln_fw.bin"
25 
26 enum ipu6_version {
27 	IPU6_VER_INVALID = 0,
28 	IPU6_VER_6 = 1,
29 	IPU6_VER_6SE = 3,
30 	IPU6_VER_6EP = 5,
31 	IPU6_VER_6EP_MTL = 6,
32 };
33 
34 /*
35  * IPU6 - TGL
36  * IPU6SE - JSL
37  * IPU6EP - ADL/RPL
38  * IPU6EP_MTL - MTL
39  */
is_ipu6se(u8 hw_ver)40 static inline bool is_ipu6se(u8 hw_ver)
41 {
42 	return hw_ver == IPU6_VER_6SE;
43 }
44 
is_ipu6ep(u8 hw_ver)45 static inline bool is_ipu6ep(u8 hw_ver)
46 {
47 	return hw_ver == IPU6_VER_6EP;
48 }
49 
is_ipu6ep_mtl(u8 hw_ver)50 static inline bool is_ipu6ep_mtl(u8 hw_ver)
51 {
52 	return hw_ver == IPU6_VER_6EP_MTL;
53 }
54 
is_ipu6_tgl(u8 hw_ver)55 static inline bool is_ipu6_tgl(u8 hw_ver)
56 {
57 	return hw_ver == IPU6_VER_6;
58 }
59 
60 /*
61  * ISYS DMA can overshoot. For higher resolutions over allocation is one line
62  * but it must be at minimum 1024 bytes. Value could be different in
63  * different versions / generations thus provide it via platform data.
64  */
65 #define IPU6_ISYS_OVERALLOC_MIN		1024
66 
67 /* Physical pages in GDA is 128, page size is 2K for IPU6, 1K for others */
68 #define IPU6_DEVICE_GDA_NR_PAGES		128
69 
70 /* Virtualization factor to calculate the available virtual pages */
71 #define IPU6_DEVICE_GDA_VIRT_FACTOR	32
72 
73 struct ipu6_device {
74 	struct pci_dev *pdev;
75 	struct list_head devices;
76 	struct ipu6_bus_device *isys;
77 	struct ipu6_bus_device *psys;
78 	struct ipu6_buttress buttress;
79 
80 	const struct firmware *cpd_fw;
81 	const char *cpd_fw_name;
82 	u32 cpd_metadata_cmpnt_size;
83 
84 	void __iomem *base;
85 	bool need_ipc_reset;
86 	bool secure_mode;
87 	u8 hw_ver;
88 	bool bus_ready_to_probe;
89 };
90 
91 #define IPU6_ISYS_NAME "isys"
92 #define IPU6_PSYS_NAME "psys"
93 
94 #define IPU6_MMU_MAX_DEVICES		4
95 #define IPU6_MMU_ADDR_BITS		32
96 /* The firmware is accessible within the first 2 GiB only in non-secure mode. */
97 #define IPU6_MMU_ADDR_BITS_NON_SECURE	31
98 
99 #define IPU6_MMU_MAX_TLB_L1_STREAMS	32
100 #define IPU6_MMU_MAX_TLB_L2_STREAMS	32
101 #define IPU6_MAX_LI_BLOCK_ADDR		128
102 #define IPU6_MAX_L2_BLOCK_ADDR		64
103 
104 #define IPU6SE_ISYS_NUM_STREAMS          IPU6SE_NONSECURE_STREAM_ID_MAX
105 #define IPU6_ISYS_NUM_STREAMS            IPU6_NONSECURE_STREAM_ID_MAX
106 
107 /*
108  * To maximize the IOSF utlization, IPU6 need to send requests in bursts.
109  * At the DMA interface with the buttress, there are CDC FIFOs with burst
110  * collection capability. CDC FIFO burst collectors have a configurable
111  * threshold and is configured based on the outcome of performance measurements.
112  *
113  * isys has 3 ports with IOSF interface for VC0, VC1 and VC2
114  * psys has 4 ports with IOSF interface for VC0, VC1w, VC1r and VC2
115  *
116  * Threshold values are pre-defined and are arrived at after performance
117  * evaluations on a type of IPU6
118  */
119 #define IPU6_MAX_VC_IOSF_PORTS		4
120 
121 /*
122  * IPU6 must configure correct arbitration mechanism related to the IOSF VC
123  * requests. There are two options per VC0 and VC1 - > 0 means rearbitrate on
124  * stall and 1 means stall until the request is completed.
125  */
126 #define IPU6_BTRS_ARB_MODE_TYPE_REARB	0
127 #define IPU6_BTRS_ARB_MODE_TYPE_STALL	1
128 
129 /* Currently chosen arbitration mechanism for VC0 */
130 #define IPU6_BTRS_ARB_STALL_MODE_VC0	\
131 			IPU6_BTRS_ARB_MODE_TYPE_REARB
132 
133 /* Currently chosen arbitration mechanism for VC1 */
134 #define IPU6_BTRS_ARB_STALL_MODE_VC1	\
135 			IPU6_BTRS_ARB_MODE_TYPE_REARB
136 
137 /*
138  * MMU Invalidation HW bug workaround by ZLW mechanism
139  *
140  * Old IPU6 MMUV2 has a bug in the invalidation mechanism which might result in
141  * wrong translation or replication of the translation. This will cause data
142  * corruption. So we cannot directly use the MMU V2 invalidation registers
143  * to invalidate the MMU. Instead, whenever an invalidate is called, we need to
144  * clear the TLB by evicting all the valid translations by filling it with trash
145  * buffer (which is guaranteed not to be used by any other processes). ZLW is
146  * used to fill the L1 and L2 caches with the trash buffer translations. ZLW
147  * or Zero length write, is pre-fetch mechanism to pre-fetch the pages in
148  * advance to the L1 and L2 caches without triggering any memory operations.
149  *
150  * In MMU V2, L1 -> 16 streams and 64 blocks, maximum 16 blocks per stream
151  * One L1 block has 16 entries, hence points to 16 * 4K pages
152  * L2 -> 16 streams and 32 blocks. 2 blocks per streams
153  * One L2 block maps to 1024 L1 entries, hence points to 4MB address range
154  * 2 blocks per L2 stream means, 1 stream points to 8MB range
155  *
156  * As we need to clear the caches and 8MB being the biggest cache size, we need
157  * to have trash buffer which points to 8MB address range. As these trash
158  * buffers are not used for any memory transactions, we need only the least
159  * amount of physical memory. So we reserve 8MB IOVA address range but only
160  * one page is reserved from physical memory. Each of this 8MB IOVA address
161  * range is then mapped to the same physical memory page.
162  */
163 /* One L2 entry maps 1024 L1 entries and one L1 entry per page */
164 #define IPU6_MMUV2_L2_RANGE		(1024 * PAGE_SIZE)
165 /* Max L2 blocks per stream */
166 #define IPU6_MMUV2_MAX_L2_BLOCKS	2
167 /* Max L1 blocks per stream */
168 #define IPU6_MMUV2_MAX_L1_BLOCKS	16
169 #define IPU6_MMUV2_TRASH_RANGE	(IPU6_MMUV2_L2_RANGE * IPU6_MMUV2_MAX_L2_BLOCKS)
170 /* Entries per L1 block */
171 #define MMUV2_ENTRIES_PER_L1_BLOCK	16
172 #define MMUV2_TRASH_L1_BLOCK_OFFSET	(MMUV2_ENTRIES_PER_L1_BLOCK * PAGE_SIZE)
173 #define MMUV2_TRASH_L2_BLOCK_OFFSET	IPU6_MMUV2_L2_RANGE
174 
175 /*
176  * In some of the IPU6 MMUs, there is provision to configure L1 and L2 page
177  * table caches. Both these L1 and L2 caches are divided into multiple sections
178  * called streams. There is maximum 16 streams for both caches. Each of these
179  * sections are subdivided into multiple blocks. When nr_l1streams = 0 and
180  * nr_l2streams = 0, means the MMU is of type MMU_V1 and do not support
181  * L1/L2 page table caches.
182  *
183  * L1 stream per block sizes are configurable and varies per usecase.
184  * L2 has constant block sizes - 2 blocks per stream.
185  *
186  * MMU1 support pre-fetching of the pages to have less cache lookup misses. To
187  * enable the pre-fetching, MMU1 AT (Address Translator) device registers
188  * need to be configured.
189  *
190  * There are four types of memory accesses which requires ZLW configuration.
191  * ZLW(Zero Length Write) is a mechanism to enable VT-d pre-fetching on IOMMU.
192  *
193  * 1. Sequential Access or 1D mode
194  *	Set ZLW_EN -> 1
195  *	set ZLW_PAGE_CROSS_1D -> 1
196  *	Set ZLW_N to "N" pages so that ZLW will be inserte N pages ahead where
197  *		  N is pre-defined and hardcoded in the platform data
198  *	Set ZLW_2D -> 0
199  *
200  * 2. ZLW 2D mode
201  *	Set ZLW_EN -> 1
202  *	set ZLW_PAGE_CROSS_1D -> 1,
203  *	Set ZLW_N -> 0
204  *	Set ZLW_2D -> 1
205  *
206  * 3. ZLW Enable (no 1D or 2D mode)
207  *	Set ZLW_EN -> 1
208  *	set ZLW_PAGE_CROSS_1D -> 0,
209  *	Set ZLW_N -> 0
210  *	Set ZLW_2D -> 0
211  *
212  * 4. ZLW disable
213  *	Set ZLW_EN -> 0
214  *	set ZLW_PAGE_CROSS_1D -> 0,
215  *	Set ZLW_N -> 0
216  *	Set ZLW_2D -> 0
217  *
218  * To configure the ZLW for the above memory access, four registers are
219  * available. Hence to track these four settings, we have the following entries
220  * in the struct ipu6_mmu_hw. Each of these entries are per stream and
221  * available only for the L1 streams.
222  *
223  * a. l1_zlw_en -> To track zlw enabled per stream (ZLW_EN)
224  * b. l1_zlw_1d_mode -> Track 1D mode per stream. ZLW inserted at page boundary
225  * c. l1_ins_zlw_ahead_pages -> to track how advance the ZLW need to be inserted
226  *			Insert ZLW request N pages ahead address.
227  * d. l1_zlw_2d_mode -> To track 2D mode per stream (ZLW_2D)
228  *
229  *
230  * Currently L1/L2 streams, blocks, AT ZLW configurations etc. are pre-defined
231  * as per the usecase specific calculations. Any change to this pre-defined
232  * table has to happen in sync with IPU6 FW.
233  */
234 struct ipu6_mmu_hw {
235 	union {
236 		unsigned long offset;
237 		void __iomem *base;
238 	};
239 	u32 info_bits;
240 	u8 nr_l1streams;
241 	/*
242 	 * L1 has variable blocks per stream - total of 64 blocks and maximum of
243 	 * 16 blocks per stream. Configurable by using the block start address
244 	 * per stream. Block start address is calculated from the block size
245 	 */
246 	u8 l1_block_sz[IPU6_MMU_MAX_TLB_L1_STREAMS];
247 	/* Is ZLW is enabled in each stream */
248 	bool l1_zlw_en[IPU6_MMU_MAX_TLB_L1_STREAMS];
249 	bool l1_zlw_1d_mode[IPU6_MMU_MAX_TLB_L1_STREAMS];
250 	u8 l1_ins_zlw_ahead_pages[IPU6_MMU_MAX_TLB_L1_STREAMS];
251 	bool l1_zlw_2d_mode[IPU6_MMU_MAX_TLB_L1_STREAMS];
252 
253 	u32 l1_stream_id_reg_offset;
254 	u32 l2_stream_id_reg_offset;
255 
256 	u8 nr_l2streams;
257 	/*
258 	 * L2 has fixed 2 blocks per stream. Block address is calculated
259 	 * from the block size
260 	 */
261 	u8 l2_block_sz[IPU6_MMU_MAX_TLB_L2_STREAMS];
262 	/* flag to track if WA is needed for successive invalidate HW bug */
263 	bool insert_read_before_invalidate;
264 };
265 
266 struct ipu6_mmu_pdata {
267 	u32 nr_mmus;
268 	struct ipu6_mmu_hw mmu_hw[IPU6_MMU_MAX_DEVICES];
269 	int mmid;
270 };
271 
272 struct ipu6_isys_csi2_pdata {
273 	void __iomem *base;
274 };
275 
276 struct ipu6_isys_internal_csi2_pdata {
277 	u32 nports;
278 	u32 irq_mask;
279 	u32 ctrl0_irq_edge;
280 	u32 ctrl0_irq_clear;
281 	u32 ctrl0_irq_mask;
282 	u32 ctrl0_irq_enable;
283 	u32 ctrl0_irq_lnp;
284 	u32 ctrl0_irq_status;
285 	u32 fw_access_port_ofs;
286 };
287 
288 struct ipu6_isys_internal_tpg_pdata {
289 	u32 ntpgs;
290 	u32 *offsets;
291 	u32 *sels;
292 };
293 
294 struct ipu6_hw_variants {
295 	unsigned long offset;
296 	u32 nr_mmus;
297 	struct ipu6_mmu_hw mmu_hw[IPU6_MMU_MAX_DEVICES];
298 	u8 cdc_fifos;
299 	u8 cdc_fifo_threshold[IPU6_MAX_VC_IOSF_PORTS];
300 	u32 dmem_offset;
301 	u32 spc_offset;
302 };
303 
304 struct ipu6_isys_internal_pdata {
305 	struct ipu6_isys_internal_csi2_pdata csi2;
306 	struct ipu6_hw_variants hw_variant;
307 	u32 num_parallel_streams;
308 	u32 isys_dma_overshoot;
309 	u32 sram_gran_shift;
310 	u32 sram_gran_size;
311 	u32 max_sram_size;
312 	u32 max_streams;
313 	u32 max_send_queues;
314 	u32 max_sram_blocks;
315 	u32 max_devq_size;
316 	u32 sensor_type_start;
317 	u32 sensor_type_end;
318 	u32 ltr;
319 	u32 memopen_threshold;
320 	bool enhanced_iwake;
321 };
322 
323 struct ipu6_isys_pdata {
324 	void __iomem *base;
325 	const struct ipu6_isys_internal_pdata *ipdata;
326 };
327 
328 struct ipu6_psys_internal_pdata {
329 	struct ipu6_hw_variants hw_variant;
330 };
331 
332 struct ipu6_psys_pdata {
333 	void __iomem *base;
334 	const struct ipu6_psys_internal_pdata *ipdata;
335 };
336 
337 int ipu6_fw_authenticate(void *data, u64 val);
338 void ipu6_configure_spc(struct ipu6_device *isp,
339 			const struct ipu6_hw_variants *hw_variant,
340 			int pkg_dir_idx, void __iomem *base, u64 *pkg_dir,
341 			dma_addr_t pkg_dir_dma_addr);
342 #endif /* IPU6_H */
343