1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #ifndef _IDXD_REGISTERS_H_ 4 #define _IDXD_REGISTERS_H_ 5 6 #include <uapi/linux/idxd.h> 7 8 /* PCI Config */ 9 #define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25 10 #define PCI_DEVICE_ID_INTEL_IAX_SPR0 0x0cfe 11 12 #define DEVICE_VERSION_1 0x100 13 #define DEVICE_VERSION_2 0x200 14 15 #define IDXD_MMIO_BAR 0 16 #define IDXD_WQ_BAR 2 17 #define IDXD_PORTAL_SIZE PAGE_SIZE 18 19 /* MMIO Device BAR0 Registers */ 20 #define IDXD_VER_OFFSET 0x00 21 #define IDXD_VER_MAJOR_MASK 0xf0 22 #define IDXD_VER_MINOR_MASK 0x0f 23 #define GET_IDXD_VER_MAJOR(x) (((x) & IDXD_VER_MAJOR_MASK) >> 4) 24 #define GET_IDXD_VER_MINOR(x) ((x) & IDXD_VER_MINOR_MASK) 25 26 union gen_cap_reg { 27 struct { 28 u64 block_on_fault:1; 29 u64 overlap_copy:1; 30 u64 cache_control_mem:1; 31 u64 cache_control_cache:1; 32 u64 cmd_cap:1; 33 u64 rsvd:3; 34 u64 dest_readback:1; 35 u64 drain_readback:1; 36 u64 rsvd2:3; 37 u64 evl_support:2; 38 u64 batch_continuation:1; 39 u64 max_xfer_shift:5; 40 u64 max_batch_shift:4; 41 u64 max_ims_mult:6; 42 u64 config_en:1; 43 u64 rsvd3:32; 44 }; 45 u64 bits; 46 } __packed; 47 #define IDXD_GENCAP_OFFSET 0x10 48 49 union wq_cap_reg { 50 struct { 51 u64 total_wq_size:16; 52 u64 num_wqs:8; 53 u64 wqcfg_size:4; 54 u64 rsvd:20; 55 u64 shared_mode:1; 56 u64 dedicated_mode:1; 57 u64 wq_ats_support:1; 58 u64 priority:1; 59 u64 occupancy:1; 60 u64 occupancy_int:1; 61 u64 op_config:1; 62 u64 wq_prs_support:1; 63 u64 rsvd4:8; 64 }; 65 u64 bits; 66 } __packed; 67 #define IDXD_WQCAP_OFFSET 0x20 68 #define IDXD_WQCFG_MIN 5 69 70 union group_cap_reg { 71 struct { 72 u64 num_groups:8; 73 u64 total_rdbufs:8; /* formerly total_tokens */ 74 u64 rdbuf_ctrl:1; /* formerly token_en */ 75 u64 rdbuf_limit:1; /* formerly token_limit */ 76 u64 progress_limit:1; /* descriptor and batch descriptor */ 77 u64 rsvd:45; 78 }; 79 u64 bits; 80 } __packed; 81 #define IDXD_GRPCAP_OFFSET 0x30 82 83 union engine_cap_reg { 84 struct { 85 u64 num_engines:8; 86 u64 rsvd:56; 87 }; 88 u64 bits; 89 } __packed; 90 91 #define IDXD_ENGCAP_OFFSET 0x38 92 93 #define IDXD_OPCAP_NOOP 0x0001 94 #define IDXD_OPCAP_BATCH 0x0002 95 #define IDXD_OPCAP_MEMMOVE 0x0008 96 struct opcap { 97 u64 bits[4]; 98 }; 99 100 #define IDXD_MAX_OPCAP_BITS 256U 101 102 #define IDXD_OPCAP_OFFSET 0x40 103 104 #define IDXD_TABLE_OFFSET 0x60 105 union offsets_reg { 106 struct { 107 u64 grpcfg:16; 108 u64 wqcfg:16; 109 u64 msix_perm:16; 110 u64 ims:16; 111 u64 perfmon:16; 112 u64 rsvd:48; 113 }; 114 u64 bits[2]; 115 } __packed; 116 117 #define IDXD_TABLE_MULT 0x100 118 119 #define IDXD_GENCFG_OFFSET 0x80 120 union gencfg_reg { 121 struct { 122 u32 rdbuf_limit:8; 123 u32 rsvd:4; 124 u32 user_int_en:1; 125 u32 evl_en:1; 126 u32 rsvd2:18; 127 }; 128 u32 bits; 129 } __packed; 130 131 #define IDXD_GENCTRL_OFFSET 0x88 132 union genctrl_reg { 133 struct { 134 u32 softerr_int_en:1; 135 u32 halt_int_en:1; 136 u32 evl_int_en:1; 137 u32 rsvd:29; 138 }; 139 u32 bits; 140 } __packed; 141 142 #define IDXD_GENSTATS_OFFSET 0x90 143 union gensts_reg { 144 struct { 145 u32 state:2; 146 u32 reset_type:2; 147 u32 rsvd:28; 148 }; 149 u32 bits; 150 } __packed; 151 152 enum idxd_device_status_state { 153 IDXD_DEVICE_STATE_DISABLED = 0, 154 IDXD_DEVICE_STATE_ENABLED, 155 IDXD_DEVICE_STATE_DRAIN, 156 IDXD_DEVICE_STATE_HALT, 157 }; 158 159 enum idxd_device_reset_type { 160 IDXD_DEVICE_RESET_SOFTWARE = 0, 161 IDXD_DEVICE_RESET_FLR, 162 IDXD_DEVICE_RESET_WARM, 163 IDXD_DEVICE_RESET_COLD, 164 }; 165 166 #define IDXD_INTCAUSE_OFFSET 0x98 167 #define IDXD_INTC_ERR 0x01 168 #define IDXD_INTC_CMD 0x02 169 #define IDXD_INTC_OCCUPY 0x04 170 #define IDXD_INTC_PERFMON_OVFL 0x08 171 #define IDXD_INTC_HALT_STATE 0x10 172 #define IDXD_INTC_EVL 0x20 173 #define IDXD_INTC_INT_HANDLE_REVOKED 0x80000000 174 175 #define IDXD_CMD_OFFSET 0xa0 176 union idxd_command_reg { 177 struct { 178 u32 operand:20; 179 u32 cmd:5; 180 u32 rsvd:6; 181 u32 int_req:1; 182 }; 183 u32 bits; 184 } __packed; 185 186 enum idxd_cmd { 187 IDXD_CMD_ENABLE_DEVICE = 1, 188 IDXD_CMD_DISABLE_DEVICE, 189 IDXD_CMD_DRAIN_ALL, 190 IDXD_CMD_ABORT_ALL, 191 IDXD_CMD_RESET_DEVICE, 192 IDXD_CMD_ENABLE_WQ, 193 IDXD_CMD_DISABLE_WQ, 194 IDXD_CMD_DRAIN_WQ, 195 IDXD_CMD_ABORT_WQ, 196 IDXD_CMD_RESET_WQ, 197 IDXD_CMD_DRAIN_PASID, 198 IDXD_CMD_ABORT_PASID, 199 IDXD_CMD_REQUEST_INT_HANDLE, 200 IDXD_CMD_RELEASE_INT_HANDLE, 201 }; 202 203 #define CMD_INT_HANDLE_IMS 0x10000 204 205 #define IDXD_CMDSTS_OFFSET 0xa8 206 union cmdsts_reg { 207 struct { 208 u8 err; 209 u16 result; 210 u8 rsvd:7; 211 u8 active:1; 212 }; 213 u32 bits; 214 } __packed; 215 #define IDXD_CMDSTS_ACTIVE 0x80000000 216 #define IDXD_CMDSTS_ERR_MASK 0xff 217 #define IDXD_CMDSTS_RES_SHIFT 8 218 219 enum idxd_cmdsts_err { 220 IDXD_CMDSTS_SUCCESS = 0, 221 IDXD_CMDSTS_INVAL_CMD, 222 IDXD_CMDSTS_INVAL_WQIDX, 223 IDXD_CMDSTS_HW_ERR, 224 /* enable device errors */ 225 IDXD_CMDSTS_ERR_DEV_ENABLED = 0x10, 226 IDXD_CMDSTS_ERR_CONFIG, 227 IDXD_CMDSTS_ERR_BUSMASTER_EN, 228 IDXD_CMDSTS_ERR_PASID_INVAL, 229 IDXD_CMDSTS_ERR_WQ_SIZE_ERANGE, 230 IDXD_CMDSTS_ERR_GRP_CONFIG, 231 IDXD_CMDSTS_ERR_GRP_CONFIG2, 232 IDXD_CMDSTS_ERR_GRP_CONFIG3, 233 IDXD_CMDSTS_ERR_GRP_CONFIG4, 234 /* enable wq errors */ 235 IDXD_CMDSTS_ERR_DEV_NOTEN = 0x20, 236 IDXD_CMDSTS_ERR_WQ_ENABLED, 237 IDXD_CMDSTS_ERR_WQ_SIZE, 238 IDXD_CMDSTS_ERR_WQ_PRIOR, 239 IDXD_CMDSTS_ERR_WQ_MODE, 240 IDXD_CMDSTS_ERR_BOF_EN, 241 IDXD_CMDSTS_ERR_PASID_EN, 242 IDXD_CMDSTS_ERR_MAX_BATCH_SIZE, 243 IDXD_CMDSTS_ERR_MAX_XFER_SIZE, 244 /* disable device errors */ 245 IDXD_CMDSTS_ERR_DIS_DEV_EN = 0x31, 246 /* disable WQ, drain WQ, abort WQ, reset WQ */ 247 IDXD_CMDSTS_ERR_DEV_NOT_EN, 248 /* request interrupt handle */ 249 IDXD_CMDSTS_ERR_INVAL_INT_IDX = 0x41, 250 IDXD_CMDSTS_ERR_NO_HANDLE, 251 }; 252 253 #define IDXD_CMDCAP_OFFSET 0xb0 254 255 #define IDXD_SWERR_OFFSET 0xc0 256 #define IDXD_SWERR_VALID 0x00000001 257 #define IDXD_SWERR_OVERFLOW 0x00000002 258 #define IDXD_SWERR_ACK (IDXD_SWERR_VALID | IDXD_SWERR_OVERFLOW) 259 union sw_err_reg { 260 struct { 261 u64 valid:1; 262 u64 overflow:1; 263 u64 desc_valid:1; 264 u64 wq_idx_valid:1; 265 u64 batch:1; 266 u64 fault_rw:1; 267 u64 priv:1; 268 u64 rsvd:1; 269 u64 error:8; 270 u64 wq_idx:8; 271 u64 rsvd2:8; 272 u64 operation:8; 273 u64 pasid:20; 274 u64 rsvd3:4; 275 276 u64 batch_idx:16; 277 u64 rsvd4:16; 278 u64 invalid_flags:32; 279 280 u64 fault_addr; 281 282 u64 rsvd5; 283 }; 284 u64 bits[4]; 285 } __packed; 286 287 union iaa_cap_reg { 288 struct { 289 u64 dec_aecs_format_ver:1; 290 u64 drop_init_bits:1; 291 u64 chaining:1; 292 u64 force_array_output_mod:1; 293 u64 load_part_aecs:1; 294 u64 comp_early_abort:1; 295 u64 nested_comp:1; 296 u64 diction_comp:1; 297 u64 header_gen:1; 298 u64 crypto_gcm:1; 299 u64 crypto_cfb:1; 300 u64 crypto_xts:1; 301 u64 rsvd:52; 302 }; 303 u64 bits; 304 } __packed; 305 306 #define IDXD_IAACAP_OFFSET 0x180 307 308 #define IDXD_EVLCFG_OFFSET 0xe0 309 union evlcfg_reg { 310 struct { 311 u64 pasid_en:1; 312 u64 priv:1; 313 u64 rsvd:10; 314 u64 base_addr:52; 315 316 u64 size:16; 317 u64 pasid:20; 318 u64 rsvd2:28; 319 }; 320 u64 bits[2]; 321 } __packed; 322 323 #define IDXD_EVL_SIZE_MIN 0x0040 324 #define IDXD_EVL_SIZE_MAX 0xffff 325 326 union msix_perm { 327 struct { 328 u32 rsvd:2; 329 u32 ignore:1; 330 u32 pasid_en:1; 331 u32 rsvd2:8; 332 u32 pasid:20; 333 }; 334 u32 bits; 335 } __packed; 336 337 union group_flags { 338 struct { 339 u64 tc_a:3; 340 u64 tc_b:3; 341 u64 rsvd:1; 342 u64 use_rdbuf_limit:1; 343 u64 rdbufs_reserved:8; 344 u64 rsvd2:4; 345 u64 rdbufs_allowed:8; 346 u64 rsvd3:4; 347 u64 desc_progress_limit:2; 348 u64 rsvd4:2; 349 u64 batch_progress_limit:2; 350 u64 rsvd5:26; 351 }; 352 u64 bits; 353 } __packed; 354 355 struct grpcfg { 356 u64 wqs[4]; 357 u64 engines; 358 union group_flags flags; 359 } __packed; 360 361 union wqcfg { 362 struct { 363 /* bytes 0-3 */ 364 u16 wq_size; 365 u16 rsvd; 366 367 /* bytes 4-7 */ 368 u16 wq_thresh; 369 u16 rsvd1; 370 371 /* bytes 8-11 */ 372 u32 mode:1; /* shared or dedicated */ 373 u32 bof:1; /* block on fault */ 374 u32 wq_ats_disable:1; 375 u32 wq_prs_disable:1; 376 u32 priority:4; 377 u32 pasid:20; 378 u32 pasid_en:1; 379 u32 priv:1; 380 u32 rsvd3:2; 381 382 /* bytes 12-15 */ 383 u32 max_xfer_shift:5; 384 u32 max_batch_shift:4; 385 u32 rsvd4:23; 386 387 /* bytes 16-19 */ 388 u16 occupancy_inth; 389 u16 occupancy_table_sel:1; 390 u16 rsvd5:15; 391 392 /* bytes 20-23 */ 393 u16 occupancy_limit; 394 u16 occupancy_int_en:1; 395 u16 rsvd6:15; 396 397 /* bytes 24-27 */ 398 u16 occupancy; 399 u16 occupancy_int:1; 400 u16 rsvd7:12; 401 u16 mode_support:1; 402 u16 wq_state:2; 403 404 /* bytes 28-31 */ 405 u32 rsvd8; 406 407 /* bytes 32-63 */ 408 u64 op_config[4]; 409 }; 410 u32 bits[16]; 411 } __packed; 412 413 #define WQCFG_PASID_IDX 2 414 #define WQCFG_PRIVL_IDX 2 415 #define WQCFG_OCCUP_IDX 6 416 417 #define WQCFG_OCCUP_MASK 0xffff 418 419 /* 420 * This macro calculates the offset into the WQCFG register 421 * idxd - struct idxd * 422 * n - wq id 423 * ofs - the index of the 32b dword for the config register 424 * 425 * The WQCFG register block is divided into groups per each wq. The n index 426 * allows us to move to the register group that's for that particular wq. 427 * Each register is 32bits. The ofs gives us the number of register to access. 428 */ 429 #define WQCFG_OFFSET(_idxd_dev, n, ofs) \ 430 ({\ 431 typeof(_idxd_dev) __idxd_dev = (_idxd_dev); \ 432 (__idxd_dev)->wqcfg_offset + (n) * (__idxd_dev)->wqcfg_size + sizeof(u32) * (ofs); \ 433 }) 434 435 #define WQCFG_STRIDES(_idxd_dev) ((_idxd_dev)->wqcfg_size / sizeof(u32)) 436 437 #define GRPCFG_SIZE 64 438 #define GRPWQCFG_STRIDES 4 439 440 /* 441 * This macro calculates the offset into the GRPCFG register 442 * idxd - struct idxd * 443 * n - group id 444 * ofs - the index of the 64b qword for the config register 445 * 446 * The GRPCFG register block is divided into three sub-registers, which 447 * are GRPWQCFG, GRPENGCFG and GRPFLGCFG. The n index allows us to move 448 * to the register block that contains the three sub-registers. 449 * Each register block is 64bits. And the ofs gives us the offset 450 * within the GRPWQCFG register to access. 451 */ 452 #define GRPWQCFG_OFFSET(idxd_dev, n, ofs) ((idxd_dev)->grpcfg_offset +\ 453 (n) * GRPCFG_SIZE + sizeof(u64) * (ofs)) 454 #define GRPENGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 32) 455 #define GRPFLGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 40) 456 457 /* Following is performance monitor registers */ 458 #define IDXD_PERFCAP_OFFSET 0x0 459 union idxd_perfcap { 460 struct { 461 u64 num_perf_counter:6; 462 u64 rsvd1:2; 463 u64 counter_width:8; 464 u64 num_event_category:4; 465 u64 global_event_category:16; 466 u64 filter:8; 467 u64 rsvd2:8; 468 u64 cap_per_counter:1; 469 u64 writeable_counter:1; 470 u64 counter_freeze:1; 471 u64 overflow_interrupt:1; 472 u64 rsvd3:8; 473 }; 474 u64 bits; 475 } __packed; 476 477 #define IDXD_EVNTCAP_OFFSET 0x80 478 union idxd_evntcap { 479 struct { 480 u64 events:28; 481 u64 rsvd:36; 482 }; 483 u64 bits; 484 } __packed; 485 486 struct idxd_event { 487 union { 488 struct { 489 u32 event_category:4; 490 u32 events:28; 491 }; 492 u32 val; 493 }; 494 } __packed; 495 496 #define IDXD_CNTRCAP_OFFSET 0x800 497 struct idxd_cntrcap { 498 union { 499 struct { 500 u32 counter_width:8; 501 u32 rsvd:20; 502 u32 num_events:4; 503 }; 504 u32 val; 505 }; 506 struct idxd_event events[]; 507 } __packed; 508 509 #define IDXD_PERFRST_OFFSET 0x10 510 union idxd_perfrst { 511 struct { 512 u32 perfrst_config:1; 513 u32 perfrst_counter:1; 514 u32 rsvd:30; 515 }; 516 u32 val; 517 } __packed; 518 519 #define IDXD_OVFSTATUS_OFFSET 0x30 520 #define IDXD_PERFFRZ_OFFSET 0x20 521 #define IDXD_CNTRCFG_OFFSET 0x100 522 union idxd_cntrcfg { 523 struct { 524 u64 enable:1; 525 u64 interrupt_ovf:1; 526 u64 global_freeze_ovf:1; 527 u64 rsvd1:5; 528 u64 event_category:4; 529 u64 rsvd2:20; 530 u64 events:28; 531 u64 rsvd3:4; 532 }; 533 u64 val; 534 } __packed; 535 536 #define IDXD_FLTCFG_OFFSET 0x300 537 538 #define IDXD_CNTRDATA_OFFSET 0x200 539 union idxd_cntrdata { 540 struct { 541 u64 event_count_value; 542 }; 543 u64 val; 544 } __packed; 545 546 union event_cfg { 547 struct { 548 u64 event_cat:4; 549 u64 event_enc:28; 550 }; 551 u64 val; 552 } __packed; 553 554 union filter_cfg { 555 struct { 556 u64 wq:32; 557 u64 tc:8; 558 u64 pg_sz:4; 559 u64 xfer_sz:8; 560 u64 eng:8; 561 }; 562 u64 val; 563 } __packed; 564 565 #define IDXD_EVLSTATUS_OFFSET 0xf0 566 567 union evl_status_reg { 568 struct { 569 u32 head:16; 570 u32 rsvd:16; 571 u32 tail:16; 572 u32 rsvd2:14; 573 u32 int_pending:1; 574 u32 rsvd3:1; 575 }; 576 struct { 577 u32 bits_lower32; 578 u32 bits_upper32; 579 }; 580 u64 bits; 581 } __packed; 582 583 #define IDXD_MAX_BATCH_IDENT 256 584 585 struct __evl_entry { 586 u64 rsvd:2; 587 u64 desc_valid:1; 588 u64 wq_idx_valid:1; 589 u64 batch:1; 590 u64 fault_rw:1; 591 u64 priv:1; 592 u64 err_info_valid:1; 593 u64 error:8; 594 u64 wq_idx:8; 595 u64 batch_id:8; 596 u64 operation:8; 597 u64 pasid:20; 598 u64 rsvd2:4; 599 600 u16 batch_idx; 601 u16 rsvd3; 602 union { 603 /* Invalid Flags 0x11 */ 604 u32 invalid_flags; 605 /* Invalid Int Handle 0x19 */ 606 /* Page fault 0x1a */ 607 /* Page fault 0x06, 0x1f, only operand_id */ 608 /* Page fault before drain or in batch, 0x26, 0x27 */ 609 struct { 610 u16 int_handle; 611 u16 rci:1; 612 u16 ims:1; 613 u16 rcr:1; 614 u16 first_err_in_batch:1; 615 u16 rsvd4_2:9; 616 u16 operand_id:3; 617 }; 618 }; 619 u64 fault_addr; 620 u64 rsvd5; 621 } __packed; 622 623 struct dsa_evl_entry { 624 struct __evl_entry e; 625 struct dsa_completion_record cr; 626 } __packed; 627 628 struct iax_evl_entry { 629 struct __evl_entry e; 630 u64 rsvd[4]; 631 struct iax_completion_record cr; 632 } __packed; 633 634 #endif 635