xref: /linux/drivers/dma/idxd/registers.h (revision cbac924200b838cfb8d8b1415113d788089dc50b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #ifndef _IDXD_REGISTERS_H_
4 #define _IDXD_REGISTERS_H_
5 
6 /* PCI Config */
7 #define PCI_DEVICE_ID_INTEL_DSA_SPR0	0x0b25
8 #define PCI_DEVICE_ID_INTEL_IAX_SPR0	0x0cfe
9 
10 #define DEVICE_VERSION_1		0x100
11 #define DEVICE_VERSION_2		0x200
12 
13 #define IDXD_MMIO_BAR		0
14 #define IDXD_WQ_BAR		2
15 #define IDXD_PORTAL_SIZE	PAGE_SIZE
16 
17 /* MMIO Device BAR0 Registers */
18 #define IDXD_VER_OFFSET			0x00
19 #define IDXD_VER_MAJOR_MASK		0xf0
20 #define IDXD_VER_MINOR_MASK		0x0f
21 #define GET_IDXD_VER_MAJOR(x)		(((x) & IDXD_VER_MAJOR_MASK) >> 4)
22 #define GET_IDXD_VER_MINOR(x)		((x) & IDXD_VER_MINOR_MASK)
23 
24 union gen_cap_reg {
25 	struct {
26 		u64 block_on_fault:1;
27 		u64 overlap_copy:1;
28 		u64 cache_control_mem:1;
29 		u64 cache_control_cache:1;
30 		u64 cmd_cap:1;
31 		u64 rsvd:3;
32 		u64 dest_readback:1;
33 		u64 drain_readback:1;
34 		u64 rsvd2:6;
35 		u64 max_xfer_shift:5;
36 		u64 max_batch_shift:4;
37 		u64 max_ims_mult:6;
38 		u64 config_en:1;
39 		u64 rsvd3:32;
40 	};
41 	u64 bits;
42 } __packed;
43 #define IDXD_GENCAP_OFFSET		0x10
44 
45 union wq_cap_reg {
46 	struct {
47 		u64 total_wq_size:16;
48 		u64 num_wqs:8;
49 		u64 wqcfg_size:4;
50 		u64 rsvd:20;
51 		u64 shared_mode:1;
52 		u64 dedicated_mode:1;
53 		u64 wq_ats_support:1;
54 		u64 priority:1;
55 		u64 occupancy:1;
56 		u64 occupancy_int:1;
57 		u64 rsvd3:10;
58 	};
59 	u64 bits;
60 } __packed;
61 #define IDXD_WQCAP_OFFSET		0x20
62 #define IDXD_WQCFG_MIN			5
63 
64 union group_cap_reg {
65 	struct {
66 		u64 num_groups:8;
67 		u64 total_rdbufs:8;	/* formerly total_tokens */
68 		u64 rdbuf_ctrl:1;	/* formerly token_en */
69 		u64 rdbuf_limit:1;	/* formerly token_limit */
70 		u64 rsvd:46;
71 	};
72 	u64 bits;
73 } __packed;
74 #define IDXD_GRPCAP_OFFSET		0x30
75 
76 union engine_cap_reg {
77 	struct {
78 		u64 num_engines:8;
79 		u64 rsvd:56;
80 	};
81 	u64 bits;
82 } __packed;
83 
84 #define IDXD_ENGCAP_OFFSET		0x38
85 
86 #define IDXD_OPCAP_NOOP			0x0001
87 #define IDXD_OPCAP_BATCH			0x0002
88 #define IDXD_OPCAP_MEMMOVE		0x0008
89 struct opcap {
90 	u64 bits[4];
91 };
92 
93 #define IDXD_OPCAP_OFFSET		0x40
94 
95 #define IDXD_TABLE_OFFSET		0x60
96 union offsets_reg {
97 	struct {
98 		u64 grpcfg:16;
99 		u64 wqcfg:16;
100 		u64 msix_perm:16;
101 		u64 ims:16;
102 		u64 perfmon:16;
103 		u64 rsvd:48;
104 	};
105 	u64 bits[2];
106 } __packed;
107 
108 #define IDXD_TABLE_MULT			0x100
109 
110 #define IDXD_GENCFG_OFFSET		0x80
111 union gencfg_reg {
112 	struct {
113 		u32 rdbuf_limit:8;
114 		u32 rsvd:4;
115 		u32 user_int_en:1;
116 		u32 rsvd2:19;
117 	};
118 	u32 bits;
119 } __packed;
120 
121 #define IDXD_GENCTRL_OFFSET		0x88
122 union genctrl_reg {
123 	struct {
124 		u32 softerr_int_en:1;
125 		u32 halt_int_en:1;
126 		u32 rsvd:30;
127 	};
128 	u32 bits;
129 } __packed;
130 
131 #define IDXD_GENSTATS_OFFSET		0x90
132 union gensts_reg {
133 	struct {
134 		u32 state:2;
135 		u32 reset_type:2;
136 		u32 rsvd:28;
137 	};
138 	u32 bits;
139 } __packed;
140 
141 enum idxd_device_status_state {
142 	IDXD_DEVICE_STATE_DISABLED = 0,
143 	IDXD_DEVICE_STATE_ENABLED,
144 	IDXD_DEVICE_STATE_DRAIN,
145 	IDXD_DEVICE_STATE_HALT,
146 };
147 
148 enum idxd_device_reset_type {
149 	IDXD_DEVICE_RESET_SOFTWARE = 0,
150 	IDXD_DEVICE_RESET_FLR,
151 	IDXD_DEVICE_RESET_WARM,
152 	IDXD_DEVICE_RESET_COLD,
153 };
154 
155 #define IDXD_INTCAUSE_OFFSET		0x98
156 #define IDXD_INTC_ERR			0x01
157 #define IDXD_INTC_CMD			0x02
158 #define IDXD_INTC_OCCUPY			0x04
159 #define IDXD_INTC_PERFMON_OVFL		0x08
160 #define IDXD_INTC_HALT_STATE		0x10
161 #define IDXD_INTC_INT_HANDLE_REVOKED	0x80000000
162 
163 #define IDXD_CMD_OFFSET			0xa0
164 union idxd_command_reg {
165 	struct {
166 		u32 operand:20;
167 		u32 cmd:5;
168 		u32 rsvd:6;
169 		u32 int_req:1;
170 	};
171 	u32 bits;
172 } __packed;
173 
174 enum idxd_cmd {
175 	IDXD_CMD_ENABLE_DEVICE = 1,
176 	IDXD_CMD_DISABLE_DEVICE,
177 	IDXD_CMD_DRAIN_ALL,
178 	IDXD_CMD_ABORT_ALL,
179 	IDXD_CMD_RESET_DEVICE,
180 	IDXD_CMD_ENABLE_WQ,
181 	IDXD_CMD_DISABLE_WQ,
182 	IDXD_CMD_DRAIN_WQ,
183 	IDXD_CMD_ABORT_WQ,
184 	IDXD_CMD_RESET_WQ,
185 	IDXD_CMD_DRAIN_PASID,
186 	IDXD_CMD_ABORT_PASID,
187 	IDXD_CMD_REQUEST_INT_HANDLE,
188 	IDXD_CMD_RELEASE_INT_HANDLE,
189 };
190 
191 #define CMD_INT_HANDLE_IMS		0x10000
192 
193 #define IDXD_CMDSTS_OFFSET		0xa8
194 union cmdsts_reg {
195 	struct {
196 		u8 err;
197 		u16 result;
198 		u8 rsvd:7;
199 		u8 active:1;
200 	};
201 	u32 bits;
202 } __packed;
203 #define IDXD_CMDSTS_ACTIVE		0x80000000
204 #define IDXD_CMDSTS_ERR_MASK		0xff
205 #define IDXD_CMDSTS_RES_SHIFT		8
206 
207 enum idxd_cmdsts_err {
208 	IDXD_CMDSTS_SUCCESS = 0,
209 	IDXD_CMDSTS_INVAL_CMD,
210 	IDXD_CMDSTS_INVAL_WQIDX,
211 	IDXD_CMDSTS_HW_ERR,
212 	/* enable device errors */
213 	IDXD_CMDSTS_ERR_DEV_ENABLED = 0x10,
214 	IDXD_CMDSTS_ERR_CONFIG,
215 	IDXD_CMDSTS_ERR_BUSMASTER_EN,
216 	IDXD_CMDSTS_ERR_PASID_INVAL,
217 	IDXD_CMDSTS_ERR_WQ_SIZE_ERANGE,
218 	IDXD_CMDSTS_ERR_GRP_CONFIG,
219 	IDXD_CMDSTS_ERR_GRP_CONFIG2,
220 	IDXD_CMDSTS_ERR_GRP_CONFIG3,
221 	IDXD_CMDSTS_ERR_GRP_CONFIG4,
222 	/* enable wq errors */
223 	IDXD_CMDSTS_ERR_DEV_NOTEN = 0x20,
224 	IDXD_CMDSTS_ERR_WQ_ENABLED,
225 	IDXD_CMDSTS_ERR_WQ_SIZE,
226 	IDXD_CMDSTS_ERR_WQ_PRIOR,
227 	IDXD_CMDSTS_ERR_WQ_MODE,
228 	IDXD_CMDSTS_ERR_BOF_EN,
229 	IDXD_CMDSTS_ERR_PASID_EN,
230 	IDXD_CMDSTS_ERR_MAX_BATCH_SIZE,
231 	IDXD_CMDSTS_ERR_MAX_XFER_SIZE,
232 	/* disable device errors */
233 	IDXD_CMDSTS_ERR_DIS_DEV_EN = 0x31,
234 	/* disable WQ, drain WQ, abort WQ, reset WQ */
235 	IDXD_CMDSTS_ERR_DEV_NOT_EN,
236 	/* request interrupt handle */
237 	IDXD_CMDSTS_ERR_INVAL_INT_IDX = 0x41,
238 	IDXD_CMDSTS_ERR_NO_HANDLE,
239 };
240 
241 #define IDXD_CMDCAP_OFFSET		0xb0
242 
243 #define IDXD_SWERR_OFFSET		0xc0
244 #define IDXD_SWERR_VALID		0x00000001
245 #define IDXD_SWERR_OVERFLOW		0x00000002
246 #define IDXD_SWERR_ACK			(IDXD_SWERR_VALID | IDXD_SWERR_OVERFLOW)
247 union sw_err_reg {
248 	struct {
249 		u64 valid:1;
250 		u64 overflow:1;
251 		u64 desc_valid:1;
252 		u64 wq_idx_valid:1;
253 		u64 batch:1;
254 		u64 fault_rw:1;
255 		u64 priv:1;
256 		u64 rsvd:1;
257 		u64 error:8;
258 		u64 wq_idx:8;
259 		u64 rsvd2:8;
260 		u64 operation:8;
261 		u64 pasid:20;
262 		u64 rsvd3:4;
263 
264 		u64 batch_idx:16;
265 		u64 rsvd4:16;
266 		u64 invalid_flags:32;
267 
268 		u64 fault_addr;
269 
270 		u64 rsvd5;
271 	};
272 	u64 bits[4];
273 } __packed;
274 
275 union msix_perm {
276 	struct {
277 		u32 rsvd:2;
278 		u32 ignore:1;
279 		u32 pasid_en:1;
280 		u32 rsvd2:8;
281 		u32 pasid:20;
282 	};
283 	u32 bits;
284 } __packed;
285 
286 union group_flags {
287 	struct {
288 		u32 tc_a:3;
289 		u32 tc_b:3;
290 		u32 rsvd:1;
291 		u32 use_rdbuf_limit:1;
292 		u32 rdbufs_reserved:8;
293 		u32 rsvd2:4;
294 		u32 rdbufs_allowed:8;
295 		u32 rsvd3:4;
296 	};
297 	u32 bits;
298 } __packed;
299 
300 struct grpcfg {
301 	u64 wqs[4];
302 	u64 engines;
303 	union group_flags flags;
304 } __packed;
305 
306 union wqcfg {
307 	struct {
308 		/* bytes 0-3 */
309 		u16 wq_size;
310 		u16 rsvd;
311 
312 		/* bytes 4-7 */
313 		u16 wq_thresh;
314 		u16 rsvd1;
315 
316 		/* bytes 8-11 */
317 		u32 mode:1;	/* shared or dedicated */
318 		u32 bof:1;	/* block on fault */
319 		u32 wq_ats_disable:1;
320 		u32 rsvd2:1;
321 		u32 priority:4;
322 		u32 pasid:20;
323 		u32 pasid_en:1;
324 		u32 priv:1;
325 		u32 rsvd3:2;
326 
327 		/* bytes 12-15 */
328 		u32 max_xfer_shift:5;
329 		u32 max_batch_shift:4;
330 		u32 rsvd4:23;
331 
332 		/* bytes 16-19 */
333 		u16 occupancy_inth;
334 		u16 occupancy_table_sel:1;
335 		u16 rsvd5:15;
336 
337 		/* bytes 20-23 */
338 		u16 occupancy_limit;
339 		u16 occupancy_int_en:1;
340 		u16 rsvd6:15;
341 
342 		/* bytes 24-27 */
343 		u16 occupancy;
344 		u16 occupancy_int:1;
345 		u16 rsvd7:12;
346 		u16 mode_support:1;
347 		u16 wq_state:2;
348 
349 		/* bytes 28-31 */
350 		u32 rsvd8;
351 	};
352 	u32 bits[8];
353 } __packed;
354 
355 #define WQCFG_PASID_IDX                2
356 #define WQCFG_OCCUP_IDX		6
357 
358 #define WQCFG_OCCUP_MASK	0xffff
359 
360 /*
361  * This macro calculates the offset into the WQCFG register
362  * idxd - struct idxd *
363  * n - wq id
364  * ofs - the index of the 32b dword for the config register
365  *
366  * The WQCFG register block is divided into groups per each wq. The n index
367  * allows us to move to the register group that's for that particular wq.
368  * Each register is 32bits. The ofs gives us the number of register to access.
369  */
370 #define WQCFG_OFFSET(_idxd_dev, n, ofs) \
371 ({\
372 	typeof(_idxd_dev) __idxd_dev = (_idxd_dev);	\
373 	(__idxd_dev)->wqcfg_offset + (n) * (__idxd_dev)->wqcfg_size + sizeof(u32) * (ofs);	\
374 })
375 
376 #define WQCFG_STRIDES(_idxd_dev) ((_idxd_dev)->wqcfg_size / sizeof(u32))
377 
378 #define GRPCFG_SIZE		64
379 #define GRPWQCFG_STRIDES	4
380 
381 /*
382  * This macro calculates the offset into the GRPCFG register
383  * idxd - struct idxd *
384  * n - wq id
385  * ofs - the index of the 32b dword for the config register
386  *
387  * The WQCFG register block is divided into groups per each wq. The n index
388  * allows us to move to the register group that's for that particular wq.
389  * Each register is 32bits. The ofs gives us the number of register to access.
390  */
391 #define GRPWQCFG_OFFSET(idxd_dev, n, ofs) ((idxd_dev)->grpcfg_offset +\
392 					   (n) * GRPCFG_SIZE + sizeof(u64) * (ofs))
393 #define GRPENGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 32)
394 #define GRPFLGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 40)
395 
396 /* Following is performance monitor registers */
397 #define IDXD_PERFCAP_OFFSET		0x0
398 union idxd_perfcap {
399 	struct {
400 		u64 num_perf_counter:6;
401 		u64 rsvd1:2;
402 		u64 counter_width:8;
403 		u64 num_event_category:4;
404 		u64 global_event_category:16;
405 		u64 filter:8;
406 		u64 rsvd2:8;
407 		u64 cap_per_counter:1;
408 		u64 writeable_counter:1;
409 		u64 counter_freeze:1;
410 		u64 overflow_interrupt:1;
411 		u64 rsvd3:8;
412 	};
413 	u64 bits;
414 } __packed;
415 
416 #define IDXD_EVNTCAP_OFFSET		0x80
417 union idxd_evntcap {
418 	struct {
419 		u64 events:28;
420 		u64 rsvd:36;
421 	};
422 	u64 bits;
423 } __packed;
424 
425 struct idxd_event {
426 	union {
427 		struct {
428 			u32 event_category:4;
429 			u32 events:28;
430 		};
431 		u32 val;
432 	};
433 } __packed;
434 
435 #define IDXD_CNTRCAP_OFFSET		0x800
436 struct idxd_cntrcap {
437 	union {
438 		struct {
439 			u32 counter_width:8;
440 			u32 rsvd:20;
441 			u32 num_events:4;
442 		};
443 		u32 val;
444 	};
445 	struct idxd_event events[];
446 } __packed;
447 
448 #define IDXD_PERFRST_OFFSET		0x10
449 union idxd_perfrst {
450 	struct {
451 		u32 perfrst_config:1;
452 		u32 perfrst_counter:1;
453 		u32 rsvd:30;
454 	};
455 	u32 val;
456 } __packed;
457 
458 #define IDXD_OVFSTATUS_OFFSET		0x30
459 #define IDXD_PERFFRZ_OFFSET		0x20
460 #define IDXD_CNTRCFG_OFFSET		0x100
461 union idxd_cntrcfg {
462 	struct {
463 		u64 enable:1;
464 		u64 interrupt_ovf:1;
465 		u64 global_freeze_ovf:1;
466 		u64 rsvd1:5;
467 		u64 event_category:4;
468 		u64 rsvd2:20;
469 		u64 events:28;
470 		u64 rsvd3:4;
471 	};
472 	u64 val;
473 } __packed;
474 
475 #define IDXD_FLTCFG_OFFSET		0x300
476 
477 #define IDXD_CNTRDATA_OFFSET		0x200
478 union idxd_cntrdata {
479 	struct {
480 		u64 event_count_value;
481 	};
482 	u64 val;
483 } __packed;
484 
485 union event_cfg {
486 	struct {
487 		u64 event_cat:4;
488 		u64 event_enc:28;
489 	};
490 	u64 val;
491 } __packed;
492 
493 union filter_cfg {
494 	struct {
495 		u64 wq:32;
496 		u64 tc:8;
497 		u64 pg_sz:4;
498 		u64 xfer_sz:8;
499 		u64 eng:8;
500 	};
501 	u64 val;
502 } __packed;
503 
504 #endif
505