1 /* SPDX-License-Identifier: GPL-2.0-only 2 * Copyright (C) 2020 Marvell. 3 */ 4 5 #ifndef __OTX2_CPT_COMMON_H 6 #define __OTX2_CPT_COMMON_H 7 8 #include <linux/pci.h> 9 #include <linux/types.h> 10 #include <linux/module.h> 11 #include <linux/delay.h> 12 #include <linux/crypto.h> 13 #include <net/devlink.h> 14 #include "otx2_cpt_hw_types.h" 15 #include "rvu.h" 16 #include "mbox.h" 17 18 #define OTX2_CPT_MAX_VFS_NUM 128 19 #define OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs) \ 20 (((blk) << 20) | ((slot) << 12) | (offs)) 21 22 #define OTX2_CPT_RVU_PFFUNC(pdev, pf, func) rvu_make_pcifunc(pdev, pf, func) 23 24 #define OTX2_CPT_INVALID_CRYPTO_ENG_GRP 0xFF 25 #define OTX2_CPT_NAME_LENGTH 64 26 #define OTX2_CPT_DMA_MINALIGN 128 27 28 /* HW capability flags */ 29 #define CN10K_MBOX 0 30 #define CN10K_LMTST 1 31 32 #define BAD_OTX2_CPT_ENG_TYPE OTX2_CPT_MAX_ENG_TYPES 33 34 enum otx2_cpt_eng_type { 35 OTX2_CPT_AE_TYPES = 1, 36 OTX2_CPT_SE_TYPES = 2, 37 OTX2_CPT_IE_TYPES = 3, 38 OTX2_CPT_MAX_ENG_TYPES, 39 }; 40 41 /* Take mbox id from end of CPT mbox range in AF (range 0xA00 - 0xBFF) */ 42 #define MBOX_MSG_RX_INLINE_IPSEC_LF_CFG 0xBFE 43 #define MBOX_MSG_GET_ENG_GRP_NUM 0xBFF 44 #define MBOX_MSG_GET_CAPS 0xBFD 45 #define MBOX_MSG_GET_KVF_LIMITS 0xBFC 46 47 /* 48 * Message request to config cpt lf for inline inbound ipsec. 49 * This message is only used between CPT PF <-> CPT VF 50 */ 51 struct otx2_cpt_rx_inline_lf_cfg { 52 struct mbox_msghdr hdr; 53 u16 sso_pf_func; 54 u16 param1; 55 u16 param2; 56 u16 opcode; 57 u32 credit; 58 u32 credit_th; 59 u16 bpid; 60 u32 reserved; 61 u8 ctx_ilen_valid : 1; 62 u8 ctx_ilen : 7; 63 }; 64 65 /* 66 * Message request and response to get engine group number 67 * which has attached a given type of engines (SE, AE, IE) 68 * This messages are only used between CPT PF <=> CPT VF 69 */ 70 struct otx2_cpt_egrp_num_msg { 71 struct mbox_msghdr hdr; 72 u8 eng_type; 73 }; 74 75 struct otx2_cpt_egrp_num_rsp { 76 struct mbox_msghdr hdr; 77 u8 eng_type; 78 u8 eng_grp_num; 79 }; 80 81 /* 82 * Message request and response to get kernel crypto limits 83 * This messages are only used between CPT PF <-> CPT VF 84 */ 85 struct otx2_cpt_kvf_limits_msg { 86 struct mbox_msghdr hdr; 87 }; 88 89 struct otx2_cpt_kvf_limits_rsp { 90 struct mbox_msghdr hdr; 91 u8 kvf_limits; 92 }; 93 94 /* CPT HW capabilities */ 95 union otx2_cpt_eng_caps { 96 u64 u; 97 struct { 98 u64 reserved_0_4:5; 99 u64 mul:1; 100 u64 sha1_sha2:1; 101 u64 chacha20:1; 102 u64 zuc_snow3g:1; 103 u64 sha3:1; 104 u64 aes:1; 105 u64 kasumi:1; 106 u64 des:1; 107 u64 crc:1; 108 u64 mmul:1; 109 u64 reserved_15_33:19; 110 u64 pdcp_chain:1; 111 u64 reserved_35_63:29; 112 }; 113 }; 114 115 /* 116 * Message request and response to get HW capabilities for each 117 * engine type (SE, IE, AE). 118 * This messages are only used between CPT PF <=> CPT VF 119 */ 120 struct otx2_cpt_caps_msg { 121 struct mbox_msghdr hdr; 122 }; 123 124 struct otx2_cpt_caps_rsp { 125 struct mbox_msghdr hdr; 126 u16 cpt_pf_drv_version; 127 u8 cpt_revision; 128 union otx2_cpt_eng_caps eng_caps[OTX2_CPT_MAX_ENG_TYPES]; 129 }; 130 131 static inline void otx2_cpt_write64(void __iomem *reg_base, u64 blk, u64 slot, 132 u64 offs, u64 val) 133 { 134 writeq_relaxed(val, reg_base + 135 OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs)); 136 } 137 138 static inline u64 otx2_cpt_read64(void __iomem *reg_base, u64 blk, u64 slot, 139 u64 offs) 140 { 141 return readq_relaxed(reg_base + 142 OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs)); 143 } 144 145 static inline bool is_dev_otx2(struct pci_dev *pdev) 146 { 147 return pdev->device == OTX2_CPT_PCI_PF_DEVICE_ID || 148 pdev->device == OTX2_CPT_PCI_VF_DEVICE_ID; 149 } 150 151 static inline bool is_dev_cn10ka(struct pci_dev *pdev) 152 { 153 return pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A; 154 } 155 156 static inline bool is_dev_cn10ka_ax(struct pci_dev *pdev) 157 { 158 return pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A && 159 ((pdev->revision & 0xFF) == 4 || 160 (pdev->revision & 0xFF) == 0x50 || 161 (pdev->revision & 0xFF) == 0x51); 162 } 163 164 static inline bool is_dev_cn10kb(struct pci_dev *pdev) 165 { 166 return pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_B; 167 } 168 169 static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev) 170 { 171 return pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A && 172 (pdev->revision & 0xFF) == 0x54; 173 } 174 175 static inline void otx2_cpt_set_hw_caps(struct pci_dev *pdev, 176 unsigned long *cap_flag) 177 { 178 if (!is_dev_otx2(pdev)) { 179 __set_bit(CN10K_MBOX, cap_flag); 180 __set_bit(CN10K_LMTST, cap_flag); 181 } 182 } 183 184 static inline bool cpt_is_errata_38550_exists(struct pci_dev *pdev) 185 { 186 return is_dev_otx2(pdev) || is_dev_cn10ka_ax(pdev); 187 } 188 189 static inline bool cpt_feature_sgv2(struct pci_dev *pdev) 190 { 191 return !is_dev_otx2(pdev) && !is_dev_cn10ka_ax(pdev); 192 } 193 194 int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev); 195 int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev); 196 197 int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, 198 struct pci_dev *pdev); 199 int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, 200 u64 reg, u64 val, int blkaddr); 201 int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, 202 u64 reg, u64 *val, int blkaddr); 203 int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, 204 u64 reg, u64 val, int blkaddr); 205 struct otx2_cptlfs_info; 206 int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs); 207 int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs); 208 int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs); 209 int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox); 210 int otx2_cpt_lf_reset_msg(struct otx2_cptlfs_info *lfs, int slot); 211 int otx2_cpt_lmtst_tbl_setup_msg(struct otx2_cptlfs_info *lfs); 212 213 #endif /* __OTX2_CPT_COMMON_H */ 214