1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright(c) 2007-2022 Intel Corporation */ 3 /* $FreeBSD$ */ 4 #ifndef ADF_ACCEL_DEVICES_H_ 5 #define ADF_ACCEL_DEVICES_H_ 6 7 #include "qat_freebsd.h" 8 #include "adf_cfg_common.h" 9 #include "adf_pfvf_msg.h" 10 11 #define ADF_CFG_NUM_SERVICES 4 12 13 #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" 14 #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf" 15 #define ADF_C62X_DEVICE_NAME "c6xx" 16 #define ADF_C62XVF_DEVICE_NAME "c6xxvf" 17 #define ADF_C3XXX_DEVICE_NAME "c3xxx" 18 #define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf" 19 #define ADF_200XX_DEVICE_NAME "200xx" 20 #define ADF_200XXVF_DEVICE_NAME "200xxvf" 21 #define ADF_C4XXX_DEVICE_NAME "c4xxx" 22 #define ADF_C4XXXVF_DEVICE_NAME "c4xxxvf" 23 #define ADF_4XXX_DEVICE_NAME "4xxx" 24 #define ADF_4XXXVF_DEVICE_NAME "4xxxvf" 25 #define ADF_DH895XCC_PCI_DEVICE_ID 0x435 26 #define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443 27 #define ADF_C62X_PCI_DEVICE_ID 0x37c8 28 #define ADF_C62XIOV_PCI_DEVICE_ID 0x37c9 29 #define ADF_C3XXX_PCI_DEVICE_ID 0x19e2 30 #define ADF_C3XXXIOV_PCI_DEVICE_ID 0x19e3 31 #define ADF_200XX_PCI_DEVICE_ID 0x18ee 32 #define ADF_200XXIOV_PCI_DEVICE_ID 0x18ef 33 #define ADF_D15XX_PCI_DEVICE_ID 0x6f54 34 #define ADF_D15XXIOV_PCI_DEVICE_ID 0x6f55 35 #define ADF_C4XXX_PCI_DEVICE_ID 0x18a0 36 #define ADF_C4XXXIOV_PCI_DEVICE_ID 0x18a1 37 #define ADF_4XXX_PCI_DEVICE_ID 0x4940 38 #define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941 39 #define ADF_401XX_PCI_DEVICE_ID 0x4942 40 #define ADF_401XXIOV_PCI_DEVICE_ID 0x4943 41 42 #define IS_QAT_GEN3(ID) ({ (ID == ADF_C4XXX_PCI_DEVICE_ID); }) 43 static inline bool 44 IS_QAT_GEN4(const unsigned int id) 45 { 46 return (id == ADF_4XXX_PCI_DEVICE_ID || id == ADF_401XX_PCI_DEVICE_ID || 47 id == ADF_4XXXIOV_PCI_DEVICE_ID || 48 id == ADF_401XXIOV_PCI_DEVICE_ID); 49 } 50 51 #define IS_QAT_GEN3_OR_GEN4(ID) (IS_QAT_GEN3(ID) || IS_QAT_GEN4(ID)) 52 #define ADF_VF2PF_SET_SIZE 32 53 #define ADF_MAX_VF2PF_SET 4 54 #define ADF_VF2PF_SET_OFFSET(set_nr) ((set_nr)*ADF_VF2PF_SET_SIZE) 55 #define ADF_VF2PF_VFNR_TO_SET(vf_nr) ((vf_nr) / ADF_VF2PF_SET_SIZE) 56 #define ADF_VF2PF_VFNR_TO_MASK(vf_nr) \ 57 ({ \ 58 u32 vf_nr_ = (vf_nr); \ 59 BIT((vf_nr_)-ADF_VF2PF_SET_SIZE *ADF_VF2PF_VFNR_TO_SET( \ 60 vf_nr_)); \ 61 }) 62 63 #define ADF_DEVICE_FUSECTL_OFFSET 0x40 64 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C 65 #define ADF_DEVICE_FUSECTL_MASK 0x80000000 66 #define ADF_PCI_MAX_BARS 3 67 #define ADF_DEVICE_NAME_LENGTH 32 68 #define ADF_ETR_MAX_RINGS_PER_BANK 16 69 #define ADF_MAX_MSIX_VECTOR_NAME 32 70 #define ADF_DEVICE_NAME_PREFIX "qat_" 71 #define ADF_STOP_RETRY 50 72 #define ADF_NUM_THREADS_PER_AE (8) 73 #define ADF_AE_ADMIN_THREAD (7) 74 #define ADF_NUM_PKE_STRAND (2) 75 #define ADF_AE_STRAND0_THREAD (8) 76 #define ADF_AE_STRAND1_THREAD (9) 77 #define ADF_CFG_NUM_SERVICES 4 78 #define ADF_SRV_TYPE_BIT_LEN 3 79 #define ADF_SRV_TYPE_MASK 0x7 80 #define ADF_RINGS_PER_SRV_TYPE 2 81 #define ADF_THRD_ABILITY_BIT_LEN 4 82 #define ADF_THRD_ABILITY_MASK 0xf 83 #define ADF_VF_OFFSET 0x8 84 #define ADF_MAX_FUNC_PER_DEV 0x7 85 #define ADF_PCI_DEV_OFFSET 0x3 86 87 #define ADF_SRV_TYPE_BIT_LEN 3 88 #define ADF_SRV_TYPE_MASK 0x7 89 90 #define GET_SRV_TYPE(ena_srv_mask, srv) \ 91 (((ena_srv_mask) >> (ADF_SRV_TYPE_BIT_LEN * (srv))) & ADF_SRV_TYPE_MASK) 92 93 #define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_info.csr_ops) 94 #define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->csr_info.pfvf_ops) 95 #define ADF_DEFAULT_RING_TO_SRV_MAP \ 96 (CRYPTO | CRYPTO << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \ 97 NA << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \ 98 COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT) 99 100 enum adf_accel_capabilities { 101 ADF_ACCEL_CAPABILITIES_NULL = 0, 102 ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1, 103 ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2, 104 ADF_ACCEL_CAPABILITIES_CIPHER = 4, 105 ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8, 106 ADF_ACCEL_CAPABILITIES_COMPRESSION = 32, 107 ADF_ACCEL_CAPABILITIES_DEPRECATED = 64, 108 ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128 109 }; 110 111 struct adf_bar { 112 rman_res_t base_addr; 113 struct resource *virt_addr; 114 rman_res_t size; 115 } __packed; 116 117 struct adf_accel_msix { 118 struct msix_entry *entries; 119 u32 num_entries; 120 } __packed; 121 122 struct adf_accel_pci { 123 device_t pci_dev; 124 struct adf_accel_msix msix_entries; 125 struct adf_bar pci_bars[ADF_PCI_MAX_BARS]; 126 uint8_t revid; 127 uint8_t sku; 128 int node; 129 } __packed; 130 131 enum dev_state { DEV_DOWN = 0, DEV_UP }; 132 133 enum dev_sku_info { 134 DEV_SKU_1 = 0, 135 DEV_SKU_2, 136 DEV_SKU_3, 137 DEV_SKU_4, 138 DEV_SKU_VF, 139 DEV_SKU_1_CY, 140 DEV_SKU_2_CY, 141 DEV_SKU_3_CY, 142 DEV_SKU_UNKNOWN 143 }; 144 145 static inline const char * 146 get_sku_info(enum dev_sku_info info) 147 { 148 switch (info) { 149 case DEV_SKU_1: 150 return "SKU1"; 151 case DEV_SKU_1_CY: 152 return "SKU1CY"; 153 case DEV_SKU_2: 154 return "SKU2"; 155 case DEV_SKU_2_CY: 156 return "SKU2CY"; 157 case DEV_SKU_3: 158 return "SKU3"; 159 case DEV_SKU_3_CY: 160 return "SKU3CY"; 161 case DEV_SKU_4: 162 return "SKU4"; 163 case DEV_SKU_VF: 164 return "SKUVF"; 165 case DEV_SKU_UNKNOWN: 166 default: 167 break; 168 } 169 return "Unknown SKU"; 170 } 171 172 enum adf_accel_unit_services { 173 ADF_ACCEL_SERVICE_NULL = 0, 174 ADF_ACCEL_INLINE_CRYPTO = 1, 175 ADF_ACCEL_CRYPTO = 2, 176 ADF_ACCEL_COMPRESSION = 4, 177 ADF_ACCEL_ASYM = 8, 178 ADF_ACCEL_ADMIN = 16 179 }; 180 181 struct adf_ae_info { 182 u32 num_asym_thd; 183 u32 num_sym_thd; 184 u32 num_dc_thd; 185 } __packed; 186 187 struct adf_accel_unit { 188 u8 au_mask; 189 u32 accel_mask; 190 u64 ae_mask; 191 u64 comp_ae_mask; 192 u32 num_ae; 193 enum adf_accel_unit_services services; 194 } __packed; 195 196 struct adf_accel_unit_info { 197 u32 inline_ingress_msk; 198 u32 inline_egress_msk; 199 u32 sym_ae_msk; 200 u32 asym_ae_msk; 201 u32 dc_ae_msk; 202 u8 num_cy_au; 203 u8 num_dc_au; 204 u8 num_asym_au; 205 u8 num_inline_au; 206 struct adf_accel_unit *au; 207 const struct adf_ae_info *ae_info; 208 } __packed; 209 210 struct adf_hw_aram_info { 211 /* Inline Egress mask. "1" = AE is working with egress traffic */ 212 u32 inline_direction_egress_mask; 213 /* Inline congestion managmenet profiles set in config file */ 214 u32 inline_congest_mngt_profile; 215 /* Initialise CY AE mask, "1" = AE is used for CY operations */ 216 u32 cy_ae_mask; 217 /* Initialise DC AE mask, "1" = AE is used for DC operations */ 218 u32 dc_ae_mask; 219 /* Number of long words used to define the ARAM regions */ 220 u32 num_aram_lw_entries; 221 /* ARAM region definitions */ 222 u32 mmp_region_size; 223 u32 mmp_region_offset; 224 u32 skm_region_size; 225 u32 skm_region_offset; 226 /* 227 * Defines size and offset of compression intermediate buffers stored 228 * in ARAM (device's on-chip memory). 229 */ 230 u32 inter_buff_aram_region_size; 231 u32 inter_buff_aram_region_offset; 232 u32 sadb_region_size; 233 u32 sadb_region_offset; 234 } __packed; 235 236 struct adf_hw_device_class { 237 const char *name; 238 const enum adf_device_type type; 239 uint32_t instances; 240 } __packed; 241 242 struct arb_info { 243 u32 arbiter_offset; 244 u32 wrk_thd_2_srv_arb_map; 245 u32 wrk_cfg_offset; 246 } __packed; 247 248 struct admin_info { 249 u32 admin_msg_ur; 250 u32 admin_msg_lr; 251 u32 mailbox_offset; 252 } __packed; 253 254 struct adf_hw_csr_ops { 255 u64 (*build_csr_ring_base_addr)(bus_addr_t addr, u32 size); 256 u32 (*read_csr_ring_head)(struct resource *csr_base_addr, 257 u32 bank, 258 u32 ring); 259 void (*write_csr_ring_head)(struct resource *csr_base_addr, 260 u32 bank, 261 u32 ring, 262 u32 value); 263 u32 (*read_csr_ring_tail)(struct resource *csr_base_addr, 264 u32 bank, 265 u32 ring); 266 void (*write_csr_ring_tail)(struct resource *csr_base_addr, 267 u32 bank, 268 u32 ring, 269 u32 value); 270 u32 (*read_csr_e_stat)(struct resource *csr_base_addr, u32 bank); 271 void (*write_csr_ring_config)(struct resource *csr_base_addr, 272 u32 bank, 273 u32 ring, 274 u32 value); 275 bus_addr_t (*read_csr_ring_base)(struct resource *csr_base_addr, 276 u32 bank, 277 u32 ring); 278 void (*write_csr_ring_base)(struct resource *csr_base_addr, 279 u32 bank, 280 u32 ring, 281 bus_addr_t addr); 282 void (*write_csr_int_flag)(struct resource *csr_base_addr, 283 u32 bank, 284 u32 value); 285 void (*write_csr_int_srcsel)(struct resource *csr_base_addr, u32 bank); 286 void (*write_csr_int_col_en)(struct resource *csr_base_addr, 287 u32 bank, 288 u32 value); 289 void (*write_csr_int_col_ctl)(struct resource *csr_base_addr, 290 u32 bank, 291 u32 value); 292 void (*write_csr_int_flag_and_col)(struct resource *csr_base_addr, 293 u32 bank, 294 u32 value); 295 u32 (*read_csr_ring_srv_arb_en)(struct resource *csr_base_addr, 296 u32 bank); 297 void (*write_csr_ring_srv_arb_en)(struct resource *csr_base_addr, 298 u32 bank, 299 u32 value); 300 u32 (*get_src_sel_mask)(void); 301 u32 (*get_int_col_ctl_enable_mask)(void); 302 u32 (*get_bank_irq_mask)(u32 irq_mask); 303 }; 304 305 struct adf_cfg_device_data; 306 struct adf_accel_dev; 307 struct adf_etr_data; 308 struct adf_etr_ring_data; 309 310 struct adf_pfvf_ops { 311 int (*enable_comms)(struct adf_accel_dev *accel_dev); 312 u32 (*get_pf2vf_offset)(u32 i); 313 u32 (*get_vf2pf_offset)(u32 i); 314 void (*enable_vf2pf_interrupts)(struct resource *pmisc_addr, 315 u32 vf_mask); 316 void (*disable_all_vf2pf_interrupts)(struct resource *pmisc_addr); 317 u32 (*disable_pending_vf2pf_interrupts)(struct resource *pmisc_addr); 318 int (*send_msg)(struct adf_accel_dev *accel_dev, 319 struct pfvf_message msg, 320 u32 pfvf_offset, 321 struct mutex *csr_lock); 322 struct pfvf_message (*recv_msg)(struct adf_accel_dev *accel_dev, 323 u32 pfvf_offset, 324 u8 compat_ver); 325 }; 326 327 struct adf_hw_csr_info { 328 struct adf_hw_csr_ops csr_ops; 329 struct adf_pfvf_ops pfvf_ops; 330 u32 csr_addr_offset; 331 u32 ring_bundle_size; 332 u32 bank_int_flag_clear_mask; 333 u32 num_rings_per_int_srcsel; 334 u32 arb_enable_mask; 335 }; 336 337 struct adf_hw_device_data { 338 struct adf_hw_device_class *dev_class; 339 uint32_t (*get_accel_mask)(struct adf_accel_dev *accel_dev); 340 uint32_t (*get_ae_mask)(struct adf_accel_dev *accel_dev); 341 uint32_t (*get_sram_bar_id)(struct adf_hw_device_data *self); 342 uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self); 343 uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self); 344 uint32_t (*get_num_aes)(struct adf_hw_device_data *self); 345 uint32_t (*get_num_accels)(struct adf_hw_device_data *self); 346 void (*notify_and_wait_ethernet)(struct adf_accel_dev *accel_dev); 347 bool (*get_eth_doorbell_msg)(struct adf_accel_dev *accel_dev); 348 void (*get_arb_info)(struct arb_info *arb_csrs_info); 349 void (*get_admin_info)(struct admin_info *admin_csrs_info); 350 void (*get_errsou_offset)(u32 *errsou3, u32 *errsou5); 351 uint32_t (*get_num_accel_units)(struct adf_hw_device_data *self); 352 int (*init_accel_units)(struct adf_accel_dev *accel_dev); 353 void (*exit_accel_units)(struct adf_accel_dev *accel_dev); 354 uint32_t (*get_clock_speed)(struct adf_hw_device_data *self); 355 enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self); 356 bool (*check_prod_sku)(struct adf_accel_dev *accel_dev); 357 int (*alloc_irq)(struct adf_accel_dev *accel_dev); 358 void (*free_irq)(struct adf_accel_dev *accel_dev); 359 void (*enable_error_correction)(struct adf_accel_dev *accel_dev); 360 int (*check_uncorrectable_error)(struct adf_accel_dev *accel_dev); 361 void (*print_err_registers)(struct adf_accel_dev *accel_dev); 362 void (*disable_error_interrupts)(struct adf_accel_dev *accel_dev); 363 int (*init_ras)(struct adf_accel_dev *accel_dev); 364 void (*exit_ras)(struct adf_accel_dev *accel_dev); 365 void (*disable_arb)(struct adf_accel_dev *accel_dev); 366 void (*update_ras_errors)(struct adf_accel_dev *accel_dev, int error); 367 bool (*ras_interrupts)(struct adf_accel_dev *accel_dev, 368 bool *reset_required); 369 int (*init_admin_comms)(struct adf_accel_dev *accel_dev); 370 void (*exit_admin_comms)(struct adf_accel_dev *accel_dev); 371 int (*send_admin_init)(struct adf_accel_dev *accel_dev); 372 void (*set_asym_rings_mask)(struct adf_accel_dev *accel_dev); 373 int (*get_ring_to_svc_map)(struct adf_accel_dev *accel_dev, 374 u16 *ring_to_svc_map); 375 uint32_t (*get_accel_cap)(struct adf_accel_dev *accel_dev); 376 int (*init_arb)(struct adf_accel_dev *accel_dev); 377 void (*exit_arb)(struct adf_accel_dev *accel_dev); 378 void (*get_arb_mapping)(struct adf_accel_dev *accel_dev, 379 const uint32_t **cfg); 380 int (*init_device)(struct adf_accel_dev *accel_dev); 381 int (*get_heartbeat_status)(struct adf_accel_dev *accel_dev); 382 int (*int_timer_init)(struct adf_accel_dev *accel_dev); 383 void (*int_timer_exit)(struct adf_accel_dev *accel_dev); 384 uint32_t (*get_ae_clock)(struct adf_hw_device_data *self); 385 uint32_t (*get_hb_clock)(struct adf_hw_device_data *self); 386 void (*disable_iov)(struct adf_accel_dev *accel_dev); 387 void (*configure_iov_threads)(struct adf_accel_dev *accel_dev, 388 bool enable); 389 void (*enable_ints)(struct adf_accel_dev *accel_dev); 390 bool (*check_slice_hang)(struct adf_accel_dev *accel_dev); 391 int (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev); 392 void (*enable_pf2vf_interrupt)(struct adf_accel_dev *accel_dev); 393 void (*disable_pf2vf_interrupt)(struct adf_accel_dev *accel_dev); 394 int (*interrupt_active_pf2vf)(struct adf_accel_dev *accel_dev); 395 int (*get_int_active_bundles)(struct adf_accel_dev *accel_dev); 396 void (*reset_device)(struct adf_accel_dev *accel_dev); 397 void (*reset_hw_units)(struct adf_accel_dev *accel_dev); 398 int (*measure_clock)(struct adf_accel_dev *accel_dev); 399 void (*restore_device)(struct adf_accel_dev *accel_dev); 400 uint32_t (*get_obj_cfg_ae_mask)(struct adf_accel_dev *accel_dev, 401 enum adf_accel_unit_services services); 402 enum adf_accel_unit_services ( 403 *get_service_type)(struct adf_accel_dev *accel_dev, s32 obj_num); 404 int (*add_pke_stats)(struct adf_accel_dev *accel_dev); 405 void (*remove_pke_stats)(struct adf_accel_dev *accel_dev); 406 int (*add_misc_error)(struct adf_accel_dev *accel_dev); 407 int (*count_ras_event)(struct adf_accel_dev *accel_dev, 408 u32 *ras_event, 409 char *aeidstr); 410 void (*remove_misc_error)(struct adf_accel_dev *accel_dev); 411 int (*configure_accel_units)(struct adf_accel_dev *accel_dev); 412 int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, 413 u32 bank_number); 414 void (*config_ring_irq)(struct adf_accel_dev *accel_dev, 415 u32 bank_number, 416 u16 ring_mask); 417 uint32_t (*get_objs_num)(struct adf_accel_dev *accel_dev); 418 const char *(*get_obj_name)(struct adf_accel_dev *accel_dev, 419 enum adf_accel_unit_services services); 420 void (*pre_reset)(struct adf_accel_dev *accel_dev); 421 void (*post_reset)(struct adf_accel_dev *accel_dev); 422 void (*set_msix_rttable)(struct adf_accel_dev *accel_dev); 423 void (*get_ring_svc_map_data)(int ring_pair_index, 424 u16 ring_to_svc_map, 425 u8 *serv_type, 426 int *ring_index, 427 int *num_rings_per_srv, 428 int bundle_num); 429 struct adf_hw_csr_info csr_info; 430 const char *fw_name; 431 const char *fw_mmp_name; 432 bool reset_ack; 433 uint32_t fuses; 434 uint32_t accel_capabilities_mask; 435 uint32_t instance_id; 436 uint16_t accel_mask; 437 u32 aerucm_mask; 438 u32 ae_mask; 439 u32 admin_ae_mask; 440 u32 service_mask; 441 u32 service_to_load_mask; 442 u32 heartbeat_ctr_num; 443 uint16_t tx_rings_mask; 444 uint8_t tx_rx_gap; 445 uint8_t num_banks; 446 u8 num_rings_per_bank; 447 uint8_t num_accel; 448 uint8_t num_logical_accel; 449 uint8_t num_engines; 450 int (*get_storage_enabled)(struct adf_accel_dev *accel_dev, 451 uint32_t *storage_enabled); 452 u8 query_storage_cap; 453 u32 clock_frequency; 454 u8 storage_enable; 455 u32 extended_dc_capabilities; 456 int (*config_device)(struct adf_accel_dev *accel_dev); 457 u32 asym_ae_active_thd_mask; 458 u16 asym_rings_mask; 459 int (*get_fw_image_type)(struct adf_accel_dev *accel_dev, 460 enum adf_cfg_fw_image_type *fw_image_type); 461 u16 ring_to_svc_map; 462 } __packed; 463 464 /* helper enum for performing CSR operations */ 465 enum operation { 466 AND, 467 OR, 468 }; 469 470 /* 32-bit CSR write macro */ 471 #define ADF_CSR_WR(csr_base, csr_offset, val) \ 472 bus_write_4(csr_base, csr_offset, val) 473 474 /* 64-bit CSR write macro */ 475 #ifdef __x86_64__ 476 #define ADF_CSR_WR64(csr_base, csr_offset, val) \ 477 bus_write_8(csr_base, csr_offset, val) 478 #else 479 static __inline void 480 adf_csr_wr64(struct resource *csr_base, bus_size_t offset, uint64_t value) 481 { 482 bus_write_4(csr_base, offset, (uint32_t)value); 483 bus_write_4(csr_base, offset + 4, (uint32_t)(value >> 32)); 484 } 485 #define ADF_CSR_WR64(csr_base, csr_offset, val) \ 486 adf_csr_wr64(csr_base, csr_offset, val) 487 #endif 488 489 /* 32-bit CSR read macro */ 490 #define ADF_CSR_RD(csr_base, csr_offset) bus_read_4(csr_base, csr_offset) 491 492 /* 64-bit CSR read macro */ 493 #ifdef __x86_64__ 494 #define ADF_CSR_RD64(csr_base, csr_offset) bus_read_8(csr_base, csr_offset) 495 #else 496 static __inline uint64_t 497 adf_csr_rd64(struct resource *csr_base, bus_size_t offset) 498 { 499 return (((uint64_t)bus_read_4(csr_base, offset)) | 500 (((uint64_t)bus_read_4(csr_base, offset + 4)) << 32)); 501 } 502 #define ADF_CSR_RD64(csr_base, csr_offset) adf_csr_rd64(csr_base, csr_offset) 503 #endif 504 505 #define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev) 506 #define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars) 507 #define GET_HW_DATA(accel_dev) (accel_dev->hw_device) 508 #define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks) 509 #define GET_DEV_SKU(accel_dev) (accel_dev->accel_pci_dev.sku) 510 #define GET_NUM_RINGS_PER_BANK(accel_dev) \ 511 (GET_HW_DATA(accel_dev)->num_rings_per_bank) 512 #define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines) 513 #define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev 514 #define GET_SRV_TYPE(ena_srv_mask, srv) \ 515 (((ena_srv_mask) >> (ADF_SRV_TYPE_BIT_LEN * (srv))) & ADF_SRV_TYPE_MASK) 516 #define SET_ASYM_MASK(asym_mask, srv) \ 517 ({ \ 518 typeof(srv) srv_ = (srv); \ 519 (asym_mask) |= ((1 << (srv_)*ADF_RINGS_PER_SRV_TYPE) | \ 520 (1 << ((srv_)*ADF_RINGS_PER_SRV_TYPE + 1))); \ 521 }) 522 523 #define GET_NUM_RINGS_PER_BANK(accel_dev) \ 524 (GET_HW_DATA(accel_dev)->num_rings_per_bank) 525 #define GET_MAX_PROCESSES(accel_dev) \ 526 ({ \ 527 typeof(accel_dev) dev = (accel_dev); \ 528 (GET_MAX_BANKS(dev) * (GET_NUM_RINGS_PER_BANK(dev) / 2)); \ 529 }) 530 #define GET_DU_TABLE(accel_dev) (accel_dev->du_table) 531 532 static inline void 533 adf_csr_fetch_and_and(struct resource *csr, size_t offs, unsigned long mask) 534 { 535 unsigned int val = ADF_CSR_RD(csr, offs); 536 537 val &= mask; 538 ADF_CSR_WR(csr, offs, val); 539 } 540 541 static inline void 542 adf_csr_fetch_and_or(struct resource *csr, size_t offs, unsigned long mask) 543 { 544 unsigned int val = ADF_CSR_RD(csr, offs); 545 546 val |= mask; 547 ADF_CSR_WR(csr, offs, val); 548 } 549 550 static inline void 551 adf_csr_fetch_and_update(enum operation op, 552 struct resource *csr, 553 size_t offs, 554 unsigned long mask) 555 { 556 switch (op) { 557 case AND: 558 adf_csr_fetch_and_and(csr, offs, mask); 559 break; 560 case OR: 561 adf_csr_fetch_and_or(csr, offs, mask); 562 break; 563 } 564 } 565 566 struct pfvf_stats { 567 struct dentry *stats_file; 568 /* Messages put in CSR */ 569 unsigned int tx; 570 /* Messages read from CSR */ 571 unsigned int rx; 572 /* Interrupt fired but int bit was clear */ 573 unsigned int spurious; 574 /* Block messages sent */ 575 unsigned int blk_tx; 576 /* Block messages received */ 577 unsigned int blk_rx; 578 /* Blocks received with CRC errors */ 579 unsigned int crc_err; 580 /* CSR in use by other side */ 581 unsigned int busy; 582 /* Receiver did not acknowledge */ 583 unsigned int no_ack; 584 /* Collision detected */ 585 unsigned int collision; 586 /* Couldn't send a response */ 587 unsigned int tx_timeout; 588 /* Didn't receive a response */ 589 unsigned int rx_timeout; 590 /* Responses received */ 591 unsigned int rx_rsp; 592 /* Messages re-transmitted */ 593 unsigned int retry; 594 /* Event put timeout */ 595 unsigned int event_timeout; 596 }; 597 598 #define NUM_PFVF_COUNTERS 14 599 600 void adf_get_admin_info(struct admin_info *admin_csrs_info); 601 struct adf_admin_comms { 602 bus_addr_t phy_addr; 603 bus_addr_t const_tbl_addr; 604 bus_addr_t aram_map_phys_addr; 605 bus_addr_t phy_hb_addr; 606 bus_dmamap_t aram_map; 607 bus_dmamap_t const_tbl_map; 608 bus_dmamap_t hb_map; 609 char *virt_addr; 610 char *virt_hb_addr; 611 struct resource *mailbox_addr; 612 struct sx lock; 613 struct bus_dmamem dma_mem; 614 struct bus_dmamem dma_hb; 615 }; 616 617 struct icp_qat_fw_loader_handle; 618 struct adf_fw_loader_data { 619 struct icp_qat_fw_loader_handle *fw_loader; 620 const struct firmware *uof_fw; 621 const struct firmware *mmp_fw; 622 }; 623 624 struct adf_accel_vf_info { 625 struct adf_accel_dev *accel_dev; 626 struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */ 627 u32 vf_nr; 628 bool init; 629 u8 compat_ver; 630 struct pfvf_stats pfvf_counters; 631 }; 632 633 struct adf_fw_versions { 634 u8 fw_version_major; 635 u8 fw_version_minor; 636 u8 fw_version_patch; 637 u8 mmp_version_major; 638 u8 mmp_version_minor; 639 u8 mmp_version_patch; 640 }; 641 642 struct adf_int_timer { 643 struct adf_accel_dev *accel_dev; 644 struct workqueue_struct *timer_irq_wq; 645 struct timer_list timer; 646 u32 timeout_val; 647 u32 int_cnt; 648 bool enabled; 649 }; 650 651 #define ADF_COMPAT_CHECKER_MAX 8 652 typedef int (*adf_iov_compat_checker_t)(struct adf_accel_dev *accel_dev, 653 u8 vf_compat_ver); 654 struct adf_accel_compat_manager { 655 u8 num_chker; 656 adf_iov_compat_checker_t iov_compat_checkers[ADF_COMPAT_CHECKER_MAX]; 657 }; 658 659 struct adf_heartbeat; 660 struct adf_accel_dev { 661 struct adf_hw_aram_info *aram_info; 662 struct adf_accel_unit_info *au_info; 663 struct adf_etr_data *transport; 664 struct adf_hw_device_data *hw_device; 665 struct adf_cfg_device_data *cfg; 666 struct adf_fw_loader_data *fw_loader; 667 struct adf_admin_comms *admin; 668 struct adf_uio_control_accel *accel; 669 struct adf_heartbeat *heartbeat; 670 struct adf_int_timer *int_timer; 671 struct adf_fw_versions fw_versions; 672 unsigned int autoreset_on_error; 673 struct adf_fw_counters_data *fw_counters_data; 674 struct sysctl_oid *debugfs_ae_config; 675 struct list_head crypto_list; 676 atomic_t *ras_counters; 677 unsigned long status; 678 atomic_t ref_count; 679 bus_dma_tag_t dma_tag; 680 struct sysctl_ctx_list sysctl_ctx; 681 struct sysctl_oid *ras_correctable; 682 struct sysctl_oid *ras_uncorrectable; 683 struct sysctl_oid *ras_fatal; 684 struct sysctl_oid *ras_reset; 685 struct sysctl_oid *pke_replay_dbgfile; 686 struct sysctl_oid *misc_error_dbgfile; 687 struct list_head list; 688 struct adf_accel_pci accel_pci_dev; 689 struct adf_accel_compat_manager *cm; 690 u8 compat_ver; 691 union { 692 struct { 693 /* vf_info is non-zero when SR-IOV is init'ed */ 694 struct adf_accel_vf_info *vf_info; 695 int num_vfs; 696 } pf; 697 struct { 698 bool irq_enabled; 699 struct resource *irq; 700 void *cookie; 701 struct task pf2vf_bh_tasklet; 702 struct mutex vf2pf_lock; /* protect CSR access */ 703 struct completion msg_received; 704 struct pfvf_message 705 response; /* temp field holding pf2vf response */ 706 enum ring_reset_result rpreset_sts; 707 struct mutex rpreset_lock; /* protect rpreset_sts */ 708 struct pfvf_stats pfvf_counters; 709 u8 pf_compat_ver; 710 } vf; 711 } u1; 712 bool is_vf; 713 u32 accel_id; 714 void *lac_dev; 715 }; 716 #endif 717