1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #ifndef _IDXD_REGISTERS_H_ 4 #define _IDXD_REGISTERS_H_ 5 6 #include <uapi/linux/idxd.h> 7 8 /* PCI Config */ 9 #define DEVICE_VERSION_1 0x100 10 #define DEVICE_VERSION_2 0x200 11 12 #define IDXD_MMIO_BAR 0 13 #define IDXD_WQ_BAR 2 14 #define IDXD_PORTAL_SIZE PAGE_SIZE 15 16 /* MMIO Device BAR0 Registers */ 17 #define IDXD_VER_OFFSET 0x00 18 #define IDXD_VER_MAJOR_MASK 0xf0 19 #define IDXD_VER_MINOR_MASK 0x0f 20 #define GET_IDXD_VER_MAJOR(x) (((x) & IDXD_VER_MAJOR_MASK) >> 4) 21 #define GET_IDXD_VER_MINOR(x) ((x) & IDXD_VER_MINOR_MASK) 22 23 union gen_cap_reg { 24 struct { 25 u64 block_on_fault:1; 26 u64 overlap_copy:1; 27 u64 cache_control_mem:1; 28 u64 cache_control_cache:1; 29 u64 cmd_cap:1; 30 u64 rsvd:3; 31 u64 dest_readback:1; 32 u64 drain_readback:1; 33 u64 rsvd2:3; 34 u64 evl_support:2; 35 u64 batch_continuation:1; 36 u64 max_xfer_shift:5; 37 u64 max_batch_shift:4; 38 u64 max_ims_mult:6; 39 u64 config_en:1; 40 u64 rsvd3:32; 41 }; 42 u64 bits; 43 } __packed; 44 #define IDXD_GENCAP_OFFSET 0x10 45 46 union wq_cap_reg { 47 struct { 48 u64 total_wq_size:16; 49 u64 num_wqs:8; 50 u64 wqcfg_size:4; 51 u64 rsvd:20; 52 u64 shared_mode:1; 53 u64 dedicated_mode:1; 54 u64 wq_ats_support:1; 55 u64 priority:1; 56 u64 occupancy:1; 57 u64 occupancy_int:1; 58 u64 op_config:1; 59 u64 wq_prs_support:1; 60 u64 rsvd4:8; 61 }; 62 u64 bits; 63 } __packed; 64 #define IDXD_WQCAP_OFFSET 0x20 65 #define IDXD_WQCFG_MIN 5 66 67 union group_cap_reg { 68 struct { 69 u64 num_groups:8; 70 u64 total_rdbufs:8; /* formerly total_tokens */ 71 u64 rdbuf_ctrl:1; /* formerly token_en */ 72 u64 rdbuf_limit:1; /* formerly token_limit */ 73 u64 progress_limit:1; /* descriptor and batch descriptor */ 74 u64 rsvd:45; 75 }; 76 u64 bits; 77 } __packed; 78 #define IDXD_GRPCAP_OFFSET 0x30 79 80 union engine_cap_reg { 81 struct { 82 u64 num_engines:8; 83 u64 rsvd:56; 84 }; 85 u64 bits; 86 } __packed; 87 88 #define IDXD_ENGCAP_OFFSET 0x38 89 90 #define IDXD_OPCAP_NOOP 0x0001 91 #define IDXD_OPCAP_BATCH 0x0002 92 #define IDXD_OPCAP_MEMMOVE 0x0008 93 struct opcap { 94 u64 bits[4]; 95 }; 96 97 #define IDXD_MAX_OPCAP_BITS 256U 98 99 #define IDXD_OPCAP_OFFSET 0x40 100 101 #define IDXD_TABLE_OFFSET 0x60 102 union offsets_reg { 103 struct { 104 u64 grpcfg:16; 105 u64 wqcfg:16; 106 u64 msix_perm:16; 107 u64 ims:16; 108 u64 perfmon:16; 109 u64 rsvd:48; 110 }; 111 u64 bits[2]; 112 } __packed; 113 114 #define IDXD_TABLE_MULT 0x100 115 116 #define IDXD_GENCFG_OFFSET 0x80 117 union gencfg_reg { 118 struct { 119 u32 rdbuf_limit:8; 120 u32 rsvd:4; 121 u32 user_int_en:1; 122 u32 evl_en:1; 123 u32 rsvd2:18; 124 }; 125 u32 bits; 126 } __packed; 127 128 #define IDXD_GENCTRL_OFFSET 0x88 129 union genctrl_reg { 130 struct { 131 u32 softerr_int_en:1; 132 u32 halt_int_en:1; 133 u32 evl_int_en:1; 134 u32 rsvd:29; 135 }; 136 u32 bits; 137 } __packed; 138 139 #define IDXD_GENSTATS_OFFSET 0x90 140 union gensts_reg { 141 struct { 142 u32 state:2; 143 u32 reset_type:2; 144 u32 rsvd:28; 145 }; 146 u32 bits; 147 } __packed; 148 149 enum idxd_device_status_state { 150 IDXD_DEVICE_STATE_DISABLED = 0, 151 IDXD_DEVICE_STATE_ENABLED, 152 IDXD_DEVICE_STATE_DRAIN, 153 IDXD_DEVICE_STATE_HALT, 154 }; 155 156 enum idxd_device_reset_type { 157 IDXD_DEVICE_RESET_SOFTWARE = 0, 158 IDXD_DEVICE_RESET_FLR, 159 IDXD_DEVICE_RESET_WARM, 160 IDXD_DEVICE_RESET_COLD, 161 }; 162 163 #define IDXD_INTCAUSE_OFFSET 0x98 164 #define IDXD_INTC_ERR 0x01 165 #define IDXD_INTC_CMD 0x02 166 #define IDXD_INTC_OCCUPY 0x04 167 #define IDXD_INTC_PERFMON_OVFL 0x08 168 #define IDXD_INTC_HALT_STATE 0x10 169 #define IDXD_INTC_EVL 0x20 170 #define IDXD_INTC_INT_HANDLE_REVOKED 0x80000000 171 172 #define IDXD_CMD_OFFSET 0xa0 173 union idxd_command_reg { 174 struct { 175 u32 operand:20; 176 u32 cmd:5; 177 u32 rsvd:6; 178 u32 int_req:1; 179 }; 180 u32 bits; 181 } __packed; 182 183 enum idxd_cmd { 184 IDXD_CMD_ENABLE_DEVICE = 1, 185 IDXD_CMD_DISABLE_DEVICE, 186 IDXD_CMD_DRAIN_ALL, 187 IDXD_CMD_ABORT_ALL, 188 IDXD_CMD_RESET_DEVICE, 189 IDXD_CMD_ENABLE_WQ, 190 IDXD_CMD_DISABLE_WQ, 191 IDXD_CMD_DRAIN_WQ, 192 IDXD_CMD_ABORT_WQ, 193 IDXD_CMD_RESET_WQ, 194 IDXD_CMD_DRAIN_PASID, 195 IDXD_CMD_ABORT_PASID, 196 IDXD_CMD_REQUEST_INT_HANDLE, 197 IDXD_CMD_RELEASE_INT_HANDLE, 198 }; 199 200 #define CMD_INT_HANDLE_IMS 0x10000 201 202 #define IDXD_CMDSTS_OFFSET 0xa8 203 union cmdsts_reg { 204 struct { 205 u8 err; 206 u16 result; 207 u8 rsvd:7; 208 u8 active:1; 209 }; 210 u32 bits; 211 } __packed; 212 #define IDXD_CMDSTS_ACTIVE 0x80000000 213 #define IDXD_CMDSTS_ERR_MASK 0xff 214 #define IDXD_CMDSTS_RES_SHIFT 8 215 216 enum idxd_cmdsts_err { 217 IDXD_CMDSTS_SUCCESS = 0, 218 IDXD_CMDSTS_INVAL_CMD, 219 IDXD_CMDSTS_INVAL_WQIDX, 220 IDXD_CMDSTS_HW_ERR, 221 /* enable device errors */ 222 IDXD_CMDSTS_ERR_DEV_ENABLED = 0x10, 223 IDXD_CMDSTS_ERR_CONFIG, 224 IDXD_CMDSTS_ERR_BUSMASTER_EN, 225 IDXD_CMDSTS_ERR_PASID_INVAL, 226 IDXD_CMDSTS_ERR_WQ_SIZE_ERANGE, 227 IDXD_CMDSTS_ERR_GRP_CONFIG, 228 IDXD_CMDSTS_ERR_GRP_CONFIG2, 229 IDXD_CMDSTS_ERR_GRP_CONFIG3, 230 IDXD_CMDSTS_ERR_GRP_CONFIG4, 231 /* enable wq errors */ 232 IDXD_CMDSTS_ERR_DEV_NOTEN = 0x20, 233 IDXD_CMDSTS_ERR_WQ_ENABLED, 234 IDXD_CMDSTS_ERR_WQ_SIZE, 235 IDXD_CMDSTS_ERR_WQ_PRIOR, 236 IDXD_CMDSTS_ERR_WQ_MODE, 237 IDXD_CMDSTS_ERR_BOF_EN, 238 IDXD_CMDSTS_ERR_PASID_EN, 239 IDXD_CMDSTS_ERR_MAX_BATCH_SIZE, 240 IDXD_CMDSTS_ERR_MAX_XFER_SIZE, 241 /* disable device errors */ 242 IDXD_CMDSTS_ERR_DIS_DEV_EN = 0x31, 243 /* disable WQ, drain WQ, abort WQ, reset WQ */ 244 IDXD_CMDSTS_ERR_DEV_NOT_EN, 245 /* request interrupt handle */ 246 IDXD_CMDSTS_ERR_INVAL_INT_IDX = 0x41, 247 IDXD_CMDSTS_ERR_NO_HANDLE, 248 }; 249 250 #define IDXD_CMDCAP_OFFSET 0xb0 251 252 #define IDXD_SWERR_OFFSET 0xc0 253 #define IDXD_SWERR_VALID 0x00000001 254 #define IDXD_SWERR_OVERFLOW 0x00000002 255 #define IDXD_SWERR_ACK (IDXD_SWERR_VALID | IDXD_SWERR_OVERFLOW) 256 union sw_err_reg { 257 struct { 258 u64 valid:1; 259 u64 overflow:1; 260 u64 desc_valid:1; 261 u64 wq_idx_valid:1; 262 u64 batch:1; 263 u64 fault_rw:1; 264 u64 priv:1; 265 u64 rsvd:1; 266 u64 error:8; 267 u64 wq_idx:8; 268 u64 rsvd2:8; 269 u64 operation:8; 270 u64 pasid:20; 271 u64 rsvd3:4; 272 273 u64 batch_idx:16; 274 u64 rsvd4:16; 275 u64 invalid_flags:32; 276 277 u64 fault_addr; 278 279 u64 rsvd5; 280 }; 281 u64 bits[4]; 282 } __packed; 283 284 union iaa_cap_reg { 285 struct { 286 u64 dec_aecs_format_ver:1; 287 u64 drop_init_bits:1; 288 u64 chaining:1; 289 u64 force_array_output_mod:1; 290 u64 load_part_aecs:1; 291 u64 comp_early_abort:1; 292 u64 nested_comp:1; 293 u64 diction_comp:1; 294 u64 header_gen:1; 295 u64 crypto_gcm:1; 296 u64 crypto_cfb:1; 297 u64 crypto_xts:1; 298 u64 rsvd:52; 299 }; 300 u64 bits; 301 } __packed; 302 303 #define IDXD_IAACAP_OFFSET 0x180 304 305 #define IDXD_EVLCFG_OFFSET 0xe0 306 union evlcfg_reg { 307 struct { 308 u64 pasid_en:1; 309 u64 priv:1; 310 u64 rsvd:10; 311 u64 base_addr:52; 312 313 u64 size:16; 314 u64 pasid:20; 315 u64 rsvd2:28; 316 }; 317 u64 bits[2]; 318 } __packed; 319 320 #define IDXD_EVL_SIZE_MIN 0x0040 321 #define IDXD_EVL_SIZE_MAX 0xffff 322 323 union msix_perm { 324 struct { 325 u32 rsvd:2; 326 u32 ignore:1; 327 u32 pasid_en:1; 328 u32 rsvd2:8; 329 u32 pasid:20; 330 }; 331 u32 bits; 332 } __packed; 333 334 union group_flags { 335 struct { 336 u64 tc_a:3; 337 u64 tc_b:3; 338 u64 rsvd:1; 339 u64 use_rdbuf_limit:1; 340 u64 rdbufs_reserved:8; 341 u64 rsvd2:4; 342 u64 rdbufs_allowed:8; 343 u64 rsvd3:4; 344 u64 desc_progress_limit:2; 345 u64 rsvd4:2; 346 u64 batch_progress_limit:2; 347 u64 rsvd5:26; 348 }; 349 u64 bits; 350 } __packed; 351 352 struct grpcfg { 353 u64 wqs[4]; 354 u64 engines; 355 union group_flags flags; 356 } __packed; 357 358 union wqcfg { 359 struct { 360 /* bytes 0-3 */ 361 u16 wq_size; 362 u16 rsvd; 363 364 /* bytes 4-7 */ 365 u16 wq_thresh; 366 u16 rsvd1; 367 368 /* bytes 8-11 */ 369 u32 mode:1; /* shared or dedicated */ 370 u32 bof:1; /* block on fault */ 371 u32 wq_ats_disable:1; 372 u32 wq_prs_disable:1; 373 u32 priority:4; 374 u32 pasid:20; 375 u32 pasid_en:1; 376 u32 priv:1; 377 u32 rsvd3:2; 378 379 /* bytes 12-15 */ 380 u32 max_xfer_shift:5; 381 u32 max_batch_shift:4; 382 u32 rsvd4:23; 383 384 /* bytes 16-19 */ 385 u16 occupancy_inth; 386 u16 occupancy_table_sel:1; 387 u16 rsvd5:15; 388 389 /* bytes 20-23 */ 390 u16 occupancy_limit; 391 u16 occupancy_int_en:1; 392 u16 rsvd6:15; 393 394 /* bytes 24-27 */ 395 u16 occupancy; 396 u16 occupancy_int:1; 397 u16 rsvd7:12; 398 u16 mode_support:1; 399 u16 wq_state:2; 400 401 /* bytes 28-31 */ 402 u32 rsvd8; 403 404 /* bytes 32-63 */ 405 u64 op_config[4]; 406 }; 407 u32 bits[16]; 408 } __packed; 409 410 #define WQCFG_PASID_IDX 2 411 #define WQCFG_PRIVL_IDX 2 412 #define WQCFG_OCCUP_IDX 6 413 414 #define WQCFG_OCCUP_MASK 0xffff 415 416 /* 417 * This macro calculates the offset into the WQCFG register 418 * idxd - struct idxd * 419 * n - wq id 420 * ofs - the index of the 32b dword for the config register 421 * 422 * The WQCFG register block is divided into groups per each wq. The n index 423 * allows us to move to the register group that's for that particular wq. 424 * Each register is 32bits. The ofs gives us the number of register to access. 425 */ 426 #define WQCFG_OFFSET(_idxd_dev, n, ofs) \ 427 ({\ 428 typeof(_idxd_dev) __idxd_dev = (_idxd_dev); \ 429 (__idxd_dev)->wqcfg_offset + (n) * (__idxd_dev)->wqcfg_size + sizeof(u32) * (ofs); \ 430 }) 431 432 #define WQCFG_STRIDES(_idxd_dev) ((_idxd_dev)->wqcfg_size / sizeof(u32)) 433 434 #define GRPCFG_SIZE 64 435 #define GRPWQCFG_STRIDES 4 436 437 /* 438 * This macro calculates the offset into the GRPCFG register 439 * idxd - struct idxd * 440 * n - group id 441 * ofs - the index of the 64b qword for the config register 442 * 443 * The GRPCFG register block is divided into three sub-registers, which 444 * are GRPWQCFG, GRPENGCFG and GRPFLGCFG. The n index allows us to move 445 * to the register block that contains the three sub-registers. 446 * Each register block is 64bits. And the ofs gives us the offset 447 * within the GRPWQCFG register to access. 448 */ 449 #define GRPWQCFG_OFFSET(idxd_dev, n, ofs) ((idxd_dev)->grpcfg_offset +\ 450 (n) * GRPCFG_SIZE + sizeof(u64) * (ofs)) 451 #define GRPENGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 32) 452 #define GRPFLGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 40) 453 454 /* Following is performance monitor registers */ 455 #define IDXD_PERFCAP_OFFSET 0x0 456 union idxd_perfcap { 457 struct { 458 u64 num_perf_counter:6; 459 u64 rsvd1:2; 460 u64 counter_width:8; 461 u64 num_event_category:4; 462 u64 global_event_category:16; 463 u64 filter:8; 464 u64 rsvd2:8; 465 u64 cap_per_counter:1; 466 u64 writeable_counter:1; 467 u64 counter_freeze:1; 468 u64 overflow_interrupt:1; 469 u64 rsvd3:8; 470 }; 471 u64 bits; 472 } __packed; 473 474 #define IDXD_EVNTCAP_OFFSET 0x80 475 union idxd_evntcap { 476 struct { 477 u64 events:28; 478 u64 rsvd:36; 479 }; 480 u64 bits; 481 } __packed; 482 483 struct idxd_event { 484 union { 485 struct { 486 u32 event_category:4; 487 u32 events:28; 488 }; 489 u32 val; 490 }; 491 } __packed; 492 493 #define IDXD_CNTRCAP_OFFSET 0x800 494 struct idxd_cntrcap { 495 union { 496 struct { 497 u32 counter_width:8; 498 u32 rsvd:20; 499 u32 num_events:4; 500 }; 501 u32 val; 502 }; 503 struct idxd_event events[]; 504 } __packed; 505 506 #define IDXD_PERFRST_OFFSET 0x10 507 union idxd_perfrst { 508 struct { 509 u32 perfrst_config:1; 510 u32 perfrst_counter:1; 511 u32 rsvd:30; 512 }; 513 u32 val; 514 } __packed; 515 516 #define IDXD_OVFSTATUS_OFFSET 0x30 517 #define IDXD_PERFFRZ_OFFSET 0x20 518 #define IDXD_CNTRCFG_OFFSET 0x100 519 union idxd_cntrcfg { 520 struct { 521 u64 enable:1; 522 u64 interrupt_ovf:1; 523 u64 global_freeze_ovf:1; 524 u64 rsvd1:5; 525 u64 event_category:4; 526 u64 rsvd2:20; 527 u64 events:28; 528 u64 rsvd3:4; 529 }; 530 u64 val; 531 } __packed; 532 533 #define IDXD_FLTCFG_OFFSET 0x300 534 535 #define IDXD_CNTRDATA_OFFSET 0x200 536 union idxd_cntrdata { 537 struct { 538 u64 event_count_value; 539 }; 540 u64 val; 541 } __packed; 542 543 union event_cfg { 544 struct { 545 u64 event_cat:4; 546 u64 event_enc:28; 547 }; 548 u64 val; 549 } __packed; 550 551 union filter_cfg { 552 struct { 553 u64 wq:32; 554 u64 tc:8; 555 u64 pg_sz:4; 556 u64 xfer_sz:8; 557 u64 eng:8; 558 }; 559 u64 val; 560 } __packed; 561 562 #define IDXD_EVLSTATUS_OFFSET 0xf0 563 564 union evl_status_reg { 565 struct { 566 u32 head:16; 567 u32 rsvd:16; 568 u32 tail:16; 569 u32 rsvd2:14; 570 u32 int_pending:1; 571 u32 rsvd3:1; 572 }; 573 struct { 574 u32 bits_lower32; 575 u32 bits_upper32; 576 }; 577 u64 bits; 578 } __packed; 579 580 #define IDXD_MAX_BATCH_IDENT 256 581 582 struct __evl_entry { 583 u64 rsvd:2; 584 u64 desc_valid:1; 585 u64 wq_idx_valid:1; 586 u64 batch:1; 587 u64 fault_rw:1; 588 u64 priv:1; 589 u64 err_info_valid:1; 590 u64 error:8; 591 u64 wq_idx:8; 592 u64 batch_id:8; 593 u64 operation:8; 594 u64 pasid:20; 595 u64 rsvd2:4; 596 597 u16 batch_idx; 598 u16 rsvd3; 599 union { 600 /* Invalid Flags 0x11 */ 601 u32 invalid_flags; 602 /* Invalid Int Handle 0x19 */ 603 /* Page fault 0x1a */ 604 /* Page fault 0x06, 0x1f, only operand_id */ 605 /* Page fault before drain or in batch, 0x26, 0x27 */ 606 struct { 607 u16 int_handle; 608 u16 rci:1; 609 u16 ims:1; 610 u16 rcr:1; 611 u16 first_err_in_batch:1; 612 u16 rsvd4_2:9; 613 u16 operand_id:3; 614 }; 615 }; 616 u64 fault_addr; 617 u64 rsvd5; 618 } __packed; 619 620 struct dsa_evl_entry { 621 struct __evl_entry e; 622 struct dsa_completion_record cr; 623 } __packed; 624 625 struct iax_evl_entry { 626 struct __evl_entry e; 627 u64 rsvd[4]; 628 struct iax_completion_record cr; 629 } __packed; 630 631 #endif 632