1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright(c) 2007-2025 Intel Corporation */ 3 #ifndef ADF_ACCEL_DEVICES_H_ 4 #define ADF_ACCEL_DEVICES_H_ 5 6 #include "qat_freebsd.h" 7 #include "adf_cfg_common.h" 8 #include "adf_pfvf_msg.h" 9 10 #include "opt_qat.h" 11 12 #define ADF_CFG_NUM_SERVICES 4 13 14 #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" 15 #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf" 16 #define ADF_C62X_DEVICE_NAME "c6xx" 17 #define ADF_C62XVF_DEVICE_NAME "c6xxvf" 18 #define ADF_C3XXX_DEVICE_NAME "c3xxx" 19 #define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf" 20 #define ADF_200XX_DEVICE_NAME "200xx" 21 #define ADF_200XXVF_DEVICE_NAME "200xxvf" 22 #define ADF_C4XXX_DEVICE_NAME "c4xxx" 23 #define ADF_C4XXXVF_DEVICE_NAME "c4xxxvf" 24 #define ADF_4XXX_DEVICE_NAME "4xxx" 25 #define ADF_4XXXVF_DEVICE_NAME "4xxxvf" 26 #define ADF_DH895XCC_PCI_DEVICE_ID 0x435 27 #define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443 28 #define ADF_C62X_PCI_DEVICE_ID 0x37c8 29 #define ADF_C62XIOV_PCI_DEVICE_ID 0x37c9 30 #define ADF_C3XXX_PCI_DEVICE_ID 0x19e2 31 #define ADF_C3XXXIOV_PCI_DEVICE_ID 0x19e3 32 #define ADF_200XX_PCI_DEVICE_ID 0x18ee 33 #define ADF_200XXIOV_PCI_DEVICE_ID 0x18ef 34 #define ADF_D15XX_PCI_DEVICE_ID 0x6f54 35 #define ADF_D15XXIOV_PCI_DEVICE_ID 0x6f55 36 #define ADF_C4XXX_PCI_DEVICE_ID 0x18a0 37 #define ADF_C4XXXIOV_PCI_DEVICE_ID 0x18a1 38 #define ADF_4XXX_PCI_DEVICE_ID 0x4940 39 #define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941 40 #define ADF_401XX_PCI_DEVICE_ID 0x4942 41 #define ADF_401XXIOV_PCI_DEVICE_ID 0x4943 42 43 #define IS_QAT_GEN3(ID) ({ (ID == ADF_C4XXX_PCI_DEVICE_ID); }) 44 static inline bool 45 IS_QAT_GEN4(const unsigned int id) 46 { 47 return (id == ADF_4XXX_PCI_DEVICE_ID || id == ADF_401XX_PCI_DEVICE_ID || 48 id == ADF_4XXXIOV_PCI_DEVICE_ID || 49 id == ADF_401XXIOV_PCI_DEVICE_ID); 50 } 51 52 #define IS_QAT_GEN3_OR_GEN4(ID) (IS_QAT_GEN3(ID) || IS_QAT_GEN4(ID)) 53 #define ADF_VF2PF_SET_SIZE 32 54 #define ADF_MAX_VF2PF_SET 4 55 #define ADF_VF2PF_SET_OFFSET(set_nr) ((set_nr)*ADF_VF2PF_SET_SIZE) 56 #define ADF_VF2PF_VFNR_TO_SET(vf_nr) ((vf_nr) / ADF_VF2PF_SET_SIZE) 57 #define ADF_VF2PF_VFNR_TO_MASK(vf_nr) \ 58 ({ \ 59 u32 vf_nr_ = (vf_nr); \ 60 BIT((vf_nr_)-ADF_VF2PF_SET_SIZE *ADF_VF2PF_VFNR_TO_SET( \ 61 vf_nr_)); \ 62 }) 63 64 #define ADF_DEVICE_FUSECTL_OFFSET 0x40 65 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C 66 #define ADF_DEVICE_FUSECTL_MASK 0x80000000 67 #define ADF_PCI_MAX_BARS 3 68 #define ADF_DEVICE_NAME_LENGTH 32 69 #define ADF_ETR_MAX_RINGS_PER_BANK 16 70 #define ADF_MAX_MSIX_VECTOR_NAME 32 71 #define ADF_DEVICE_NAME_PREFIX "qat_" 72 #define ADF_STOP_RETRY 50 73 #define ADF_NUM_THREADS_PER_AE (8) 74 #define ADF_AE_ADMIN_THREAD (7) 75 #define ADF_NUM_PKE_STRAND (2) 76 #define ADF_AE_STRAND0_THREAD (8) 77 #define ADF_AE_STRAND1_THREAD (9) 78 #define ADF_CFG_NUM_SERVICES 4 79 #define ADF_SRV_TYPE_BIT_LEN 3 80 #define ADF_SRV_TYPE_MASK 0x7 81 #define ADF_RINGS_PER_SRV_TYPE 2 82 #define ADF_THRD_ABILITY_BIT_LEN 4 83 #define ADF_THRD_ABILITY_MASK 0xf 84 #define ADF_VF_OFFSET 0x8 85 #define ADF_MAX_FUNC_PER_DEV 0x7 86 #define ADF_PCI_DEV_OFFSET 0x3 87 88 #define ADF_SRV_TYPE_BIT_LEN 3 89 #define ADF_SRV_TYPE_MASK 0x7 90 91 #define GET_SRV_TYPE(ena_srv_mask, srv) \ 92 (((ena_srv_mask) >> (ADF_SRV_TYPE_BIT_LEN * (srv))) & ADF_SRV_TYPE_MASK) 93 94 #define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_info.csr_ops) 95 #define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->csr_info.pfvf_ops) 96 #define ADF_DEFAULT_RING_TO_SRV_MAP \ 97 (CRYPTO | CRYPTO << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ 98 NA << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ 99 COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) 100 101 enum adf_accel_capabilities { 102 ADF_ACCEL_CAPABILITIES_NULL = 0, 103 ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1, 104 ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2, 105 ADF_ACCEL_CAPABILITIES_CIPHER = 4, 106 ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8, 107 ADF_ACCEL_CAPABILITIES_COMPRESSION = 32, 108 ADF_ACCEL_CAPABILITIES_DEPRECATED = 64, 109 ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128 110 }; 111 112 struct adf_bar { 113 rman_res_t base_addr; 114 struct resource *virt_addr; 115 rman_res_t size; 116 } __packed; 117 118 struct adf_accel_msix { 119 struct msix_entry *entries; 120 u32 num_entries; 121 } __packed; 122 123 struct adf_accel_pci { 124 device_t pci_dev; 125 struct adf_accel_msix msix_entries; 126 struct adf_bar pci_bars[ADF_PCI_MAX_BARS]; 127 uint8_t revid; 128 uint8_t sku; 129 int node; 130 } __packed; 131 132 enum dev_state { DEV_DOWN = 0, DEV_UP }; 133 134 enum dev_sku_info { 135 DEV_SKU_1 = 0, 136 DEV_SKU_2, 137 DEV_SKU_3, 138 DEV_SKU_4, 139 DEV_SKU_VF, 140 DEV_SKU_1_CY, 141 DEV_SKU_2_CY, 142 DEV_SKU_3_CY, 143 DEV_SKU_UNKNOWN 144 }; 145 146 static inline const char * 147 get_sku_info(enum dev_sku_info info) 148 { 149 switch (info) { 150 case DEV_SKU_1: 151 return "SKU1"; 152 case DEV_SKU_1_CY: 153 return "SKU1CY"; 154 case DEV_SKU_2: 155 return "SKU2"; 156 case DEV_SKU_2_CY: 157 return "SKU2CY"; 158 case DEV_SKU_3: 159 return "SKU3"; 160 case DEV_SKU_3_CY: 161 return "SKU3CY"; 162 case DEV_SKU_4: 163 return "SKU4"; 164 case DEV_SKU_VF: 165 return "SKUVF"; 166 case DEV_SKU_UNKNOWN: 167 default: 168 break; 169 } 170 return "Unknown SKU"; 171 } 172 173 enum adf_accel_unit_services { 174 ADF_ACCEL_SERVICE_NULL = 0, 175 ADF_ACCEL_INLINE_CRYPTO = 1, 176 ADF_ACCEL_CRYPTO = 2, 177 ADF_ACCEL_COMPRESSION = 4, 178 ADF_ACCEL_ASYM = 8, 179 ADF_ACCEL_ADMIN = 16 180 }; 181 182 struct adf_ae_info { 183 u32 num_asym_thd; 184 u32 num_sym_thd; 185 u32 num_dc_thd; 186 } __packed; 187 188 struct adf_accel_unit { 189 u8 au_mask; 190 u32 accel_mask; 191 u64 ae_mask; 192 u64 comp_ae_mask; 193 u32 num_ae; 194 enum adf_accel_unit_services services; 195 } __packed; 196 197 struct adf_accel_unit_info { 198 u32 inline_ingress_msk; 199 u32 inline_egress_msk; 200 u32 sym_ae_msk; 201 u32 asym_ae_msk; 202 u32 dc_ae_msk; 203 u8 num_cy_au; 204 u8 num_dc_au; 205 u8 num_asym_au; 206 u8 num_inline_au; 207 struct adf_accel_unit *au; 208 const struct adf_ae_info *ae_info; 209 } __packed; 210 211 struct adf_hw_aram_info { 212 /* Inline Egress mask. "1" = AE is working with egress traffic */ 213 u32 inline_direction_egress_mask; 214 /* Inline congestion managmenet profiles set in config file */ 215 u32 inline_congest_mngt_profile; 216 /* Initialise CY AE mask, "1" = AE is used for CY operations */ 217 u32 cy_ae_mask; 218 /* Initialise DC AE mask, "1" = AE is used for DC operations */ 219 u32 dc_ae_mask; 220 /* Number of long words used to define the ARAM regions */ 221 u32 num_aram_lw_entries; 222 /* ARAM region definitions */ 223 u32 mmp_region_size; 224 u32 mmp_region_offset; 225 u32 skm_region_size; 226 u32 skm_region_offset; 227 /* 228 * Defines size and offset of compression intermediate buffers stored 229 * in ARAM (device's on-chip memory). 230 */ 231 u32 inter_buff_aram_region_size; 232 u32 inter_buff_aram_region_offset; 233 u32 sadb_region_size; 234 u32 sadb_region_offset; 235 } __packed; 236 237 struct adf_hw_device_class { 238 const char *name; 239 const enum adf_device_type type; 240 uint32_t instances; 241 } __packed; 242 243 struct arb_info { 244 u32 arbiter_offset; 245 u32 wrk_thd_2_srv_arb_map; 246 u32 wrk_cfg_offset; 247 } __packed; 248 249 struct admin_info { 250 u32 admin_msg_ur; 251 u32 admin_msg_lr; 252 u32 mailbox_offset; 253 } __packed; 254 255 struct adf_hw_csr_ops { 256 u64 (*build_csr_ring_base_addr)(bus_addr_t addr, u32 size); 257 u32 (*read_csr_ring_head)(struct resource *csr_base_addr, 258 u32 bank, 259 u32 ring); 260 void (*write_csr_ring_head)(struct resource *csr_base_addr, 261 u32 bank, 262 u32 ring, 263 u32 value); 264 u32 (*read_csr_ring_tail)(struct resource *csr_base_addr, 265 u32 bank, 266 u32 ring); 267 void (*write_csr_ring_tail)(struct resource *csr_base_addr, 268 u32 bank, 269 u32 ring, 270 u32 value); 271 u32 (*read_csr_e_stat)(struct resource *csr_base_addr, u32 bank); 272 void (*write_csr_ring_config)(struct resource *csr_base_addr, 273 u32 bank, 274 u32 ring, 275 u32 value); 276 bus_addr_t (*read_csr_ring_base)(struct resource *csr_base_addr, 277 u32 bank, 278 u32 ring); 279 void (*write_csr_ring_base)(struct resource *csr_base_addr, 280 u32 bank, 281 u32 ring, 282 bus_addr_t addr); 283 void (*write_csr_int_flag)(struct resource *csr_base_addr, 284 u32 bank, 285 u32 value); 286 void (*write_csr_int_srcsel)(struct resource *csr_base_addr, u32 bank); 287 void (*write_csr_int_col_en)(struct resource *csr_base_addr, 288 u32 bank, 289 u32 value); 290 void (*write_csr_int_col_ctl)(struct resource *csr_base_addr, 291 u32 bank, 292 u32 value); 293 void (*write_csr_int_flag_and_col)(struct resource *csr_base_addr, 294 u32 bank, 295 u32 value); 296 u32 (*read_csr_ring_srv_arb_en)(struct resource *csr_base_addr, 297 u32 bank); 298 void (*write_csr_ring_srv_arb_en)(struct resource *csr_base_addr, 299 u32 bank, 300 u32 value); 301 u32 (*get_src_sel_mask)(void); 302 u32 (*get_int_col_ctl_enable_mask)(void); 303 u32 (*get_bank_irq_mask)(u32 irq_mask); 304 }; 305 306 struct adf_cfg_device_data; 307 struct adf_accel_dev; 308 struct adf_etr_data; 309 struct adf_etr_ring_data; 310 311 struct adf_pfvf_ops { 312 int (*enable_comms)(struct adf_accel_dev *accel_dev); 313 u32 (*get_pf2vf_offset)(u32 i); 314 u32 (*get_vf2pf_offset)(u32 i); 315 void (*enable_vf2pf_interrupts)(struct resource *pmisc_addr, 316 u32 vf_mask); 317 void (*disable_all_vf2pf_interrupts)(struct resource *pmisc_addr); 318 u32 (*disable_pending_vf2pf_interrupts)(struct resource *pmisc_addr); 319 int (*send_msg)(struct adf_accel_dev *accel_dev, 320 struct pfvf_message msg, 321 u32 pfvf_offset, 322 struct mutex *csr_lock); 323 struct pfvf_message (*recv_msg)(struct adf_accel_dev *accel_dev, 324 u32 pfvf_offset, 325 u8 compat_ver); 326 }; 327 328 struct adf_hw_csr_info { 329 struct adf_hw_csr_ops csr_ops; 330 struct adf_pfvf_ops pfvf_ops; 331 u32 csr_addr_offset; 332 u32 ring_bundle_size; 333 u32 bank_int_flag_clear_mask; 334 u32 num_rings_per_int_srcsel; 335 u32 arb_enable_mask; 336 }; 337 338 struct adf_hw_device_data { 339 struct adf_hw_device_class *dev_class; 340 uint32_t (*get_accel_mask)(struct adf_accel_dev *accel_dev); 341 uint32_t (*get_ae_mask)(struct adf_accel_dev *accel_dev); 342 uint32_t (*get_sram_bar_id)(struct adf_hw_device_data *self); 343 uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self); 344 uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self); 345 uint32_t (*get_num_aes)(struct adf_hw_device_data *self); 346 uint32_t (*get_num_accels)(struct adf_hw_device_data *self); 347 void (*notify_and_wait_ethernet)(struct adf_accel_dev *accel_dev); 348 bool (*get_eth_doorbell_msg)(struct adf_accel_dev *accel_dev); 349 void (*get_arb_info)(struct arb_info *arb_csrs_info); 350 void (*get_admin_info)(struct admin_info *admin_csrs_info); 351 void (*get_errsou_offset)(u32 *errsou3, u32 *errsou5); 352 uint32_t (*get_num_accel_units)(struct adf_hw_device_data *self); 353 int (*init_accel_units)(struct adf_accel_dev *accel_dev); 354 void (*exit_accel_units)(struct adf_accel_dev *accel_dev); 355 uint32_t (*get_clock_speed)(struct adf_hw_device_data *self); 356 enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self); 357 bool (*check_prod_sku)(struct adf_accel_dev *accel_dev); 358 int (*alloc_irq)(struct adf_accel_dev *accel_dev); 359 void (*free_irq)(struct adf_accel_dev *accel_dev); 360 void (*enable_error_correction)(struct adf_accel_dev *accel_dev); 361 int (*check_uncorrectable_error)(struct adf_accel_dev *accel_dev); 362 void (*print_err_registers)(struct adf_accel_dev *accel_dev); 363 void (*disable_error_interrupts)(struct adf_accel_dev *accel_dev); 364 int (*init_ras)(struct adf_accel_dev *accel_dev); 365 void (*exit_ras)(struct adf_accel_dev *accel_dev); 366 void (*disable_arb)(struct adf_accel_dev *accel_dev); 367 void (*update_ras_errors)(struct adf_accel_dev *accel_dev, int error); 368 bool (*ras_interrupts)(struct adf_accel_dev *accel_dev, 369 bool *reset_required); 370 int (*init_admin_comms)(struct adf_accel_dev *accel_dev); 371 void (*exit_admin_comms)(struct adf_accel_dev *accel_dev); 372 int (*send_admin_init)(struct adf_accel_dev *accel_dev); 373 void (*set_asym_rings_mask)(struct adf_accel_dev *accel_dev); 374 int (*get_ring_to_svc_map)(struct adf_accel_dev *accel_dev, 375 u16 *ring_to_svc_map); 376 uint32_t (*get_accel_cap)(struct adf_accel_dev *accel_dev); 377 int (*init_arb)(struct adf_accel_dev *accel_dev); 378 void (*exit_arb)(struct adf_accel_dev *accel_dev); 379 void (*get_arb_mapping)(struct adf_accel_dev *accel_dev, 380 const uint32_t **cfg); 381 int (*init_device)(struct adf_accel_dev *accel_dev); 382 int (*get_heartbeat_status)(struct adf_accel_dev *accel_dev); 383 int (*int_timer_init)(struct adf_accel_dev *accel_dev); 384 void (*int_timer_exit)(struct adf_accel_dev *accel_dev); 385 uint32_t (*get_ae_clock)(struct adf_hw_device_data *self); 386 uint32_t (*get_hb_clock)(struct adf_hw_device_data *self); 387 void (*disable_iov)(struct adf_accel_dev *accel_dev); 388 void (*configure_iov_threads)(struct adf_accel_dev *accel_dev, 389 bool enable); 390 void (*enable_ints)(struct adf_accel_dev *accel_dev); 391 bool (*check_slice_hang)(struct adf_accel_dev *accel_dev); 392 int (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev); 393 void (*enable_pf2vf_interrupt)(struct adf_accel_dev *accel_dev); 394 void (*disable_pf2vf_interrupt)(struct adf_accel_dev *accel_dev); 395 int (*interrupt_active_pf2vf)(struct adf_accel_dev *accel_dev); 396 int (*get_int_active_bundles)(struct adf_accel_dev *accel_dev); 397 void (*reset_device)(struct adf_accel_dev *accel_dev); 398 void (*reset_hw_units)(struct adf_accel_dev *accel_dev); 399 int (*measure_clock)(struct adf_accel_dev *accel_dev); 400 void (*restore_device)(struct adf_accel_dev *accel_dev); 401 uint32_t (*get_obj_cfg_ae_mask)(struct adf_accel_dev *accel_dev, 402 enum adf_accel_unit_services services); 403 enum adf_accel_unit_services ( 404 *get_service_type)(struct adf_accel_dev *accel_dev, s32 obj_num); 405 int (*add_pke_stats)(struct adf_accel_dev *accel_dev); 406 void (*remove_pke_stats)(struct adf_accel_dev *accel_dev); 407 int (*add_misc_error)(struct adf_accel_dev *accel_dev); 408 int (*count_ras_event)(struct adf_accel_dev *accel_dev, 409 u32 *ras_event, 410 char *aeidstr); 411 void (*remove_misc_error)(struct adf_accel_dev *accel_dev); 412 int (*configure_accel_units)(struct adf_accel_dev *accel_dev); 413 int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, 414 u32 bank_number); 415 void (*config_ring_irq)(struct adf_accel_dev *accel_dev, 416 u32 bank_number, 417 u16 ring_mask); 418 uint32_t (*get_objs_num)(struct adf_accel_dev *accel_dev); 419 const char *(*get_obj_name)(struct adf_accel_dev *accel_dev, 420 enum adf_accel_unit_services services); 421 void (*pre_reset)(struct adf_accel_dev *accel_dev); 422 void (*post_reset)(struct adf_accel_dev *accel_dev); 423 void (*set_msix_rttable)(struct adf_accel_dev *accel_dev); 424 void (*get_ring_svc_map_data)(int ring_pair_index, 425 u16 ring_to_svc_map, 426 u8 *serv_type, 427 int *ring_index, 428 int *num_rings_per_srv, 429 int bundle_num); 430 struct adf_hw_csr_info csr_info; 431 const char *fw_name; 432 const char *fw_mmp_name; 433 bool reset_ack; 434 uint32_t fuses; 435 uint32_t accel_capabilities_mask; 436 uint32_t instance_id; 437 uint16_t accel_mask; 438 u32 aerucm_mask; 439 u32 ae_mask; 440 u32 admin_ae_mask; 441 u32 service_mask; 442 u32 service_to_load_mask; 443 u32 heartbeat_ctr_num; 444 uint16_t tx_rings_mask; 445 uint8_t tx_rx_gap; 446 uint8_t num_banks; 447 u8 num_rings_per_bank; 448 uint8_t num_accel; 449 uint8_t num_logical_accel; 450 uint8_t num_engines; 451 bool get_ring_to_svc_done; 452 int (*get_storage_enabled)(struct adf_accel_dev *accel_dev, 453 uint32_t *storage_enabled); 454 u8 query_storage_cap; 455 u32 clock_frequency; 456 u8 storage_enable; 457 u32 extended_dc_capabilities; 458 int (*config_device)(struct adf_accel_dev *accel_dev); 459 u32 asym_ae_active_thd_mask; 460 u16 asym_rings_mask; 461 int (*get_fw_image_type)(struct adf_accel_dev *accel_dev, 462 enum adf_cfg_fw_image_type *fw_image_type); 463 u16 ring_to_svc_map; 464 } __packed; 465 466 /* helper enum for performing CSR operations */ 467 enum operation { 468 AND, 469 OR, 470 }; 471 472 /* 32-bit CSR write macro */ 473 #define ADF_CSR_WR(csr_base, csr_offset, val) \ 474 bus_write_4(csr_base, csr_offset, val) 475 476 /* 64-bit CSR write macro */ 477 #ifdef __x86_64__ 478 #define ADF_CSR_WR64(csr_base, csr_offset, val) \ 479 bus_write_8(csr_base, csr_offset, val) 480 #else 481 static __inline void 482 adf_csr_wr64(struct resource *csr_base, bus_size_t offset, uint64_t value) 483 { 484 bus_write_4(csr_base, offset, (uint32_t)value); 485 bus_write_4(csr_base, offset + 4, (uint32_t)(value >> 32)); 486 } 487 #define ADF_CSR_WR64(csr_base, csr_offset, val) \ 488 adf_csr_wr64(csr_base, csr_offset, val) 489 #endif 490 491 /* 32-bit CSR read macro */ 492 #define ADF_CSR_RD(csr_base, csr_offset) bus_read_4(csr_base, csr_offset) 493 494 /* 64-bit CSR read macro */ 495 #ifdef __x86_64__ 496 #define ADF_CSR_RD64(csr_base, csr_offset) bus_read_8(csr_base, csr_offset) 497 #else 498 static __inline uint64_t 499 adf_csr_rd64(struct resource *csr_base, bus_size_t offset) 500 { 501 return (((uint64_t)bus_read_4(csr_base, offset)) | 502 (((uint64_t)bus_read_4(csr_base, offset + 4)) << 32)); 503 } 504 #define ADF_CSR_RD64(csr_base, csr_offset) adf_csr_rd64(csr_base, csr_offset) 505 #endif 506 507 #define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev) 508 #define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars) 509 #define GET_HW_DATA(accel_dev) (accel_dev->hw_device) 510 #define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks) 511 #define GET_DEV_SKU(accel_dev) (accel_dev->accel_pci_dev.sku) 512 #define GET_NUM_RINGS_PER_BANK(accel_dev) \ 513 (GET_HW_DATA(accel_dev)->num_rings_per_bank) 514 #define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines) 515 #define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev 516 #define GET_SRV_TYPE(ena_srv_mask, srv) \ 517 (((ena_srv_mask) >> (ADF_SRV_TYPE_BIT_LEN * (srv))) & ADF_SRV_TYPE_MASK) 518 #define SET_ASYM_MASK(asym_mask, srv) \ 519 ({ \ 520 typeof(srv) srv_ = (srv); \ 521 (asym_mask) |= ((1 << (srv_)*ADF_RINGS_PER_SRV_TYPE) | \ 522 (1 << ((srv_)*ADF_RINGS_PER_SRV_TYPE + 1))); \ 523 }) 524 525 #define GET_NUM_RINGS_PER_BANK(accel_dev) \ 526 (GET_HW_DATA(accel_dev)->num_rings_per_bank) 527 #define GET_MAX_PROCESSES(accel_dev) \ 528 ({ \ 529 typeof(accel_dev) dev = (accel_dev); \ 530 (GET_MAX_BANKS(dev) * (GET_NUM_RINGS_PER_BANK(dev) / 2)); \ 531 }) 532 #define GET_DU_TABLE(accel_dev) (accel_dev->du_table) 533 534 static inline void 535 adf_csr_fetch_and_and(struct resource *csr, size_t offs, unsigned long mask) 536 { 537 unsigned int val = ADF_CSR_RD(csr, offs); 538 539 val &= mask; 540 ADF_CSR_WR(csr, offs, val); 541 } 542 543 static inline void 544 adf_csr_fetch_and_or(struct resource *csr, size_t offs, unsigned long mask) 545 { 546 unsigned int val = ADF_CSR_RD(csr, offs); 547 548 val |= mask; 549 ADF_CSR_WR(csr, offs, val); 550 } 551 552 static inline void 553 adf_csr_fetch_and_update(enum operation op, 554 struct resource *csr, 555 size_t offs, 556 unsigned long mask) 557 { 558 switch (op) { 559 case AND: 560 adf_csr_fetch_and_and(csr, offs, mask); 561 break; 562 case OR: 563 adf_csr_fetch_and_or(csr, offs, mask); 564 break; 565 } 566 } 567 568 struct pfvf_stats { 569 struct dentry *stats_file; 570 /* Messages put in CSR */ 571 unsigned int tx; 572 /* Messages read from CSR */ 573 unsigned int rx; 574 /* Interrupt fired but int bit was clear */ 575 unsigned int spurious; 576 /* Block messages sent */ 577 unsigned int blk_tx; 578 /* Block messages received */ 579 unsigned int blk_rx; 580 /* Blocks received with CRC errors */ 581 unsigned int crc_err; 582 /* CSR in use by other side */ 583 unsigned int busy; 584 /* Receiver did not acknowledge */ 585 unsigned int no_ack; 586 /* Collision detected */ 587 unsigned int collision; 588 /* Couldn't send a response */ 589 unsigned int tx_timeout; 590 /* Didn't receive a response */ 591 unsigned int rx_timeout; 592 /* Responses received */ 593 unsigned int rx_rsp; 594 /* Messages re-transmitted */ 595 unsigned int retry; 596 /* Event put timeout */ 597 unsigned int event_timeout; 598 }; 599 600 #define NUM_PFVF_COUNTERS 14 601 602 void adf_get_admin_info(struct admin_info *admin_csrs_info); 603 struct adf_admin_comms { 604 bus_addr_t phy_addr; 605 bus_addr_t const_tbl_addr; 606 bus_addr_t aram_map_phys_addr; 607 bus_addr_t phy_hb_addr; 608 bus_dmamap_t aram_map; 609 bus_dmamap_t const_tbl_map; 610 bus_dmamap_t hb_map; 611 char *virt_addr; 612 char *virt_hb_addr; 613 struct resource *mailbox_addr; 614 struct sx lock; 615 struct bus_dmamem dma_mem; 616 struct bus_dmamem dma_hb; 617 }; 618 619 struct icp_qat_fw_loader_handle; 620 struct adf_fw_loader_data { 621 struct icp_qat_fw_loader_handle *fw_loader; 622 const struct firmware *uof_fw; 623 const struct firmware *mmp_fw; 624 }; 625 626 struct adf_accel_vf_info { 627 struct adf_accel_dev *accel_dev; 628 struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */ 629 u32 vf_nr; 630 bool init; 631 u8 compat_ver; 632 struct pfvf_stats pfvf_counters; 633 }; 634 635 struct adf_fw_versions { 636 u8 fw_version_major; 637 u8 fw_version_minor; 638 u8 fw_version_patch; 639 u8 mmp_version_major; 640 u8 mmp_version_minor; 641 u8 mmp_version_patch; 642 }; 643 644 struct adf_int_timer { 645 struct adf_accel_dev *accel_dev; 646 struct workqueue_struct *timer_irq_wq; 647 struct timer_list timer; 648 u32 timeout_val; 649 u32 int_cnt; 650 bool enabled; 651 }; 652 653 #define ADF_COMPAT_CHECKER_MAX 8 654 typedef int (*adf_iov_compat_checker_t)(struct adf_accel_dev *accel_dev, 655 u8 vf_compat_ver); 656 struct adf_accel_compat_manager { 657 u8 num_chker; 658 adf_iov_compat_checker_t iov_compat_checkers[ADF_COMPAT_CHECKER_MAX]; 659 }; 660 661 struct adf_heartbeat; 662 struct adf_accel_dev { 663 struct adf_hw_aram_info *aram_info; 664 struct adf_accel_unit_info *au_info; 665 struct adf_etr_data *transport; 666 struct adf_hw_device_data *hw_device; 667 struct adf_cfg_device_data *cfg; 668 struct adf_fw_loader_data *fw_loader; 669 struct adf_admin_comms *admin; 670 struct adf_uio_control_accel *accel; 671 struct adf_heartbeat *heartbeat; 672 struct adf_int_timer *int_timer; 673 struct adf_fw_versions fw_versions; 674 unsigned int autoreset_on_error; 675 struct adf_fw_counters_data *fw_counters_data; 676 struct sysctl_oid *debugfs_ae_config; 677 struct list_head crypto_list; 678 atomic_t *ras_counters; 679 unsigned long status; 680 atomic_t ref_count; 681 bus_dma_tag_t dma_tag; 682 struct sysctl_ctx_list sysctl_ctx; 683 struct sysctl_oid *ras_correctable; 684 struct sysctl_oid *ras_uncorrectable; 685 struct sysctl_oid *ras_fatal; 686 struct sysctl_oid *ras_reset; 687 struct sysctl_oid *pke_replay_dbgfile; 688 struct sysctl_oid *misc_error_dbgfile; 689 struct sysctl_oid *fw_version_oid; 690 struct sysctl_oid *mmp_version_oid; 691 struct sysctl_oid *hw_version_oid; 692 struct sysctl_oid *cnv_error_oid; 693 struct list_head list; 694 struct adf_accel_pci accel_pci_dev; 695 struct adf_accel_compat_manager *cm; 696 u8 compat_ver; 697 #ifdef QAT_DISABLE_SAFE_DC_MODE 698 struct sysctl_oid *safe_dc_mode; 699 u8 disable_safe_dc_mode; 700 #endif /* QAT_DISABLE_SAFE_DC_MODE */ 701 union { 702 struct { 703 /* vf_info is non-zero when SR-IOV is init'ed */ 704 struct adf_accel_vf_info *vf_info; 705 int num_vfs; 706 } pf; 707 struct { 708 bool irq_enabled; 709 struct resource *irq; 710 void *cookie; 711 struct task pf2vf_bh_tasklet; 712 struct mutex vf2pf_lock; /* protect CSR access */ 713 struct completion msg_received; 714 struct pfvf_message 715 response; /* temp field holding pf2vf response */ 716 enum ring_reset_result rpreset_sts; 717 struct mutex rpreset_lock; /* protect rpreset_sts */ 718 struct pfvf_stats pfvf_counters; 719 u8 pf_compat_ver; 720 } vf; 721 } u1; 722 bool is_vf; 723 u32 accel_id; 724 void *lac_dev; 725 struct mutex lock; /* protect accel_dev during start/stop e.t.c */ 726 }; 727 #endif 728