1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #ifndef _ICE_LAN_TX_RX_H_ 5 #define _ICE_LAN_TX_RX_H_ 6 7 union ice_32byte_rx_desc { 8 struct { 9 __le64 pkt_addr; /* Packet buffer address */ 10 __le64 hdr_addr; /* Header buffer address */ 11 /* bit 0 of hdr_addr is DD bit */ 12 __le64 rsvd1; 13 __le64 rsvd2; 14 } read; 15 struct { 16 struct { 17 struct { 18 __le16 mirroring_status; 19 __le16 l2tag1; 20 } lo_dword; 21 union { 22 __le32 rss; /* RSS Hash */ 23 __le32 fd_id; /* Flow Director filter id */ 24 } hi_dword; 25 } qword0; 26 struct { 27 /* status/error/PTYPE/length */ 28 __le64 status_error_len; 29 } qword1; 30 struct { 31 __le16 ext_status; /* extended status */ 32 __le16 rsvd; 33 __le16 l2tag2_1; 34 __le16 l2tag2_2; 35 } qword2; 36 struct { 37 __le32 reserved; 38 __le32 fd_id; 39 } qword3; 40 } wb; /* writeback */ 41 }; 42 43 struct ice_rx_ptype_decoded { 44 u32 ptype:10; 45 u32 known:1; 46 u32 outer_ip:1; 47 u32 outer_ip_ver:2; 48 u32 outer_frag:1; 49 u32 tunnel_type:3; 50 u32 tunnel_end_prot:2; 51 u32 tunnel_end_frag:1; 52 u32 inner_prot:4; 53 u32 payload_layer:3; 54 }; 55 56 enum ice_rx_ptype_outer_ip { 57 ICE_RX_PTYPE_OUTER_L2 = 0, 58 ICE_RX_PTYPE_OUTER_IP = 1, 59 }; 60 61 enum ice_rx_ptype_outer_ip_ver { 62 ICE_RX_PTYPE_OUTER_NONE = 0, 63 ICE_RX_PTYPE_OUTER_IPV4 = 1, 64 ICE_RX_PTYPE_OUTER_IPV6 = 2, 65 }; 66 67 enum ice_rx_ptype_outer_fragmented { 68 ICE_RX_PTYPE_NOT_FRAG = 0, 69 ICE_RX_PTYPE_FRAG = 1, 70 }; 71 72 enum ice_rx_ptype_tunnel_type { 73 ICE_RX_PTYPE_TUNNEL_NONE = 0, 74 ICE_RX_PTYPE_TUNNEL_IP_IP = 1, 75 ICE_RX_PTYPE_TUNNEL_IP_GRENAT = 2, 76 ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, 77 ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, 78 }; 79 80 enum ice_rx_ptype_tunnel_end_prot { 81 ICE_RX_PTYPE_TUNNEL_END_NONE = 0, 82 ICE_RX_PTYPE_TUNNEL_END_IPV4 = 1, 83 ICE_RX_PTYPE_TUNNEL_END_IPV6 = 2, 84 }; 85 86 enum ice_rx_ptype_inner_prot { 87 ICE_RX_PTYPE_INNER_PROT_NONE = 0, 88 ICE_RX_PTYPE_INNER_PROT_UDP = 1, 89 ICE_RX_PTYPE_INNER_PROT_TCP = 2, 90 ICE_RX_PTYPE_INNER_PROT_SCTP = 3, 91 ICE_RX_PTYPE_INNER_PROT_ICMP = 4, 92 ICE_RX_PTYPE_INNER_PROT_TIMESYNC = 5, 93 }; 94 95 enum ice_rx_ptype_payload_layer { 96 ICE_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, 97 ICE_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, 98 ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, 99 ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, 100 }; 101 102 /* RX Flex Descriptor 103 * This descriptor is used instead of the legacy version descriptor when 104 * ice_rlan_ctx.adv_desc is set 105 */ 106 union ice_32b_rx_flex_desc { 107 struct { 108 __le64 pkt_addr; /* Packet buffer address */ 109 __le64 hdr_addr; /* Header buffer address */ 110 /* bit 0 of hdr_addr is DD bit */ 111 __le64 rsvd1; 112 __le64 rsvd2; 113 } read; 114 struct { 115 /* Qword 0 */ 116 u8 rxdid; /* descriptor builder profile id */ 117 u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */ 118 __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */ 119 __le16 pkt_len; /* [15:14] are reserved */ 120 __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */ 121 /* sph=[11:11] */ 122 /* ff1/ext=[15:12] */ 123 124 /* Qword 1 */ 125 __le16 status_error0; 126 __le16 l2tag1; 127 __le16 flex_meta0; 128 __le16 flex_meta1; 129 130 /* Qword 2 */ 131 __le16 status_error1; 132 u8 flex_flags2; 133 u8 time_stamp_low; 134 __le16 l2tag2_1st; 135 __le16 l2tag2_2nd; 136 137 /* Qword 3 */ 138 __le16 flex_meta2; 139 __le16 flex_meta3; 140 union { 141 struct { 142 __le16 flex_meta4; 143 __le16 flex_meta5; 144 } flex; 145 __le32 ts_high; 146 } flex_ts; 147 } wb; /* writeback */ 148 }; 149 150 /* Rx Flex Descriptor NIC Profile 151 * This descriptor corresponds to RxDID 2 which contains 152 * metadata fields for RSS, flow id and timestamp info 153 */ 154 struct ice_32b_rx_flex_desc_nic { 155 /* Qword 0 */ 156 u8 rxdid; 157 u8 mir_id_umb_cast; 158 __le16 ptype_flexi_flags0; 159 __le16 pkt_len; 160 __le16 hdr_len_sph_flex_flags1; 161 162 /* Qword 1 */ 163 __le16 status_error0; 164 __le16 l2tag1; 165 __le32 rss_hash; 166 167 /* Qword 2 */ 168 __le16 status_error1; 169 u8 flexi_flags2; 170 u8 ts_low; 171 __le16 l2tag2_1st; 172 __le16 l2tag2_2nd; 173 174 /* Qword 3 */ 175 __le32 flow_id; 176 union { 177 struct { 178 __le16 vlan_id; 179 __le16 flow_id_ipv6; 180 } flex; 181 __le32 ts_high; 182 } flex_ts; 183 }; 184 185 /* Receive Flex Descriptor profile IDs: There are a total 186 * of 64 profiles where profile IDs 0/1 are for legacy; and 187 * profiles 2-63 are flex profiles that can be programmed 188 * with a specific metadata (profile 7 reserved for HW) 189 */ 190 enum ice_rxdid { 191 ICE_RXDID_LEGACY_0 = 0, 192 ICE_RXDID_LEGACY_1 = 1, 193 ICE_RXDID_FLEX_NIC = 2, 194 ICE_RXDID_FLEX_NIC_2 = 6, 195 ICE_RXDID_HW = 7, 196 ICE_RXDID_LAST = 63, 197 }; 198 199 /* Receive Flex Descriptor Rx opcode values */ 200 #define ICE_RX_OPC_MDID 0x01 201 202 /* Receive Descriptor MDID values */ 203 enum ice_flex_rx_mdid { 204 ICE_RX_MDID_FLOW_ID_LOWER = 5, 205 ICE_RX_MDID_FLOW_ID_HIGH, 206 ICE_RX_MDID_SRC_VSI = 19, 207 ICE_RX_MDID_HASH_LOW = 56, 208 ICE_RX_MDID_HASH_HIGH, 209 }; 210 211 /* Rx Flag64 packet flag bits */ 212 enum ice_rx_flg64_bits { 213 ICE_RXFLG_PKT_DSI = 0, 214 ICE_RXFLG_EVLAN_x8100 = 15, 215 ICE_RXFLG_EVLAN_x9100, 216 ICE_RXFLG_VLAN_x8100, 217 ICE_RXFLG_TNL_MAC = 22, 218 ICE_RXFLG_TNL_VLAN, 219 ICE_RXFLG_PKT_FRG, 220 ICE_RXFLG_FIN = 32, 221 ICE_RXFLG_SYN, 222 ICE_RXFLG_RST, 223 ICE_RXFLG_TNL0 = 38, 224 ICE_RXFLG_TNL1, 225 ICE_RXFLG_TNL2, 226 ICE_RXFLG_UDP_GRE, 227 ICE_RXFLG_RSVD = 63 228 }; 229 230 /* for ice_32byte_rx_flex_desc.ptype_flexi_flags0 member */ 231 #define ICE_RX_FLEX_DESC_PTYPE_M (0x3FF) /* 10-bits */ 232 233 /* for ice_32byte_rx_flex_desc.pkt_length member */ 234 #define ICE_RX_FLX_DESC_PKT_LEN_M (0x3FFF) /* 14-bits */ 235 236 enum ice_rx_flex_desc_status_error_0_bits { 237 /* Note: These are predefined bit offsets */ 238 ICE_RX_FLEX_DESC_STATUS0_DD_S = 0, 239 ICE_RX_FLEX_DESC_STATUS0_EOF_S, 240 ICE_RX_FLEX_DESC_STATUS0_HBO_S, 241 ICE_RX_FLEX_DESC_STATUS0_L3L4P_S, 242 ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S, 243 ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S, 244 ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S, 245 ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S, 246 ICE_RX_FLEX_DESC_STATUS0_LPBK_S, 247 ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S, 248 ICE_RX_FLEX_DESC_STATUS0_RXE_S, 249 ICE_RX_FLEX_DESC_STATUS0_CRCP_S, 250 ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S, 251 ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S, 252 ICE_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S, 253 ICE_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S, 254 ICE_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */ 255 }; 256 257 #define ICE_RXQ_CTX_SIZE_DWORDS 8 258 #define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32)) 259 #define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22 260 #define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5 261 #define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800)) 262 263 /* RLAN Rx queue context data 264 * 265 * The sizes of the variables may be larger than needed due to crossing byte 266 * boundaries. If we do not have the width of the variable set to the correct 267 * size then we could end up shifting bits off the top of the variable when the 268 * variable is at the top of a byte and crosses over into the next byte. 269 */ 270 struct ice_rlan_ctx { 271 u16 head; 272 u16 cpuid; /* bigger than needed, see above for reason */ 273 #define ICE_RLAN_BASE_S 7 274 u64 base; 275 u16 qlen; 276 #define ICE_RLAN_CTX_DBUF_S 7 277 u16 dbuf; /* bigger than needed, see above for reason */ 278 #define ICE_RLAN_CTX_HBUF_S 6 279 u16 hbuf; /* bigger than needed, see above for reason */ 280 u8 dtype; 281 u8 dsize; 282 u8 crcstrip; 283 u8 l2tsel; 284 u8 hsplit_0; 285 u8 hsplit_1; 286 u8 showiv; 287 u32 rxmax; /* bigger than needed, see above for reason */ 288 u8 tphrdesc_ena; 289 u8 tphwdesc_ena; 290 u8 tphdata_ena; 291 u8 tphhead_ena; 292 u16 lrxqthresh; /* bigger than needed, see above for reason */ 293 }; 294 295 struct ice_ctx_ele { 296 u16 offset; 297 u16 size_of; 298 u16 width; 299 u16 lsb; 300 }; 301 302 #define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \ 303 .offset = offsetof(struct _struct, _ele), \ 304 .size_of = FIELD_SIZEOF(struct _struct, _ele), \ 305 .width = _width, \ 306 .lsb = _lsb, \ 307 } 308 309 /* for hsplit_0 field of Rx RLAN context */ 310 enum ice_rlan_ctx_rx_hsplit_0 { 311 ICE_RLAN_RX_HSPLIT_0_NO_SPLIT = 0, 312 ICE_RLAN_RX_HSPLIT_0_SPLIT_L2 = 1, 313 ICE_RLAN_RX_HSPLIT_0_SPLIT_IP = 2, 314 ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP = 4, 315 ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP = 8, 316 }; 317 318 /* for hsplit_1 field of Rx RLAN context */ 319 enum ice_rlan_ctx_rx_hsplit_1 { 320 ICE_RLAN_RX_HSPLIT_1_NO_SPLIT = 0, 321 ICE_RLAN_RX_HSPLIT_1_SPLIT_L2 = 1, 322 ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS = 2, 323 }; 324 325 /* TX Descriptor */ 326 struct ice_tx_desc { 327 __le64 buf_addr; /* Address of descriptor's data buf */ 328 __le64 cmd_type_offset_bsz; 329 }; 330 331 enum ice_tx_desc_dtype_value { 332 ICE_TX_DESC_DTYPE_DATA = 0x0, 333 ICE_TX_DESC_DTYPE_CTX = 0x1, 334 /* DESC_DONE - HW has completed write-back of descriptor */ 335 ICE_TX_DESC_DTYPE_DESC_DONE = 0xF, 336 }; 337 338 #define ICE_TXD_QW1_CMD_S 4 339 #define ICE_TXD_QW1_CMD_M (0xFFFUL << ICE_TXD_QW1_CMD_S) 340 341 enum ice_tx_desc_cmd_bits { 342 ICE_TX_DESC_CMD_EOP = 0x0001, 343 ICE_TX_DESC_CMD_RS = 0x0002, 344 ICE_TX_DESC_CMD_IL2TAG1 = 0x0008, 345 ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */ 346 ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */ 347 ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */ 348 ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */ 349 ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */ 350 }; 351 352 #define ICE_TXD_QW1_OFFSET_S 16 353 #define ICE_TXD_QW1_OFFSET_M (0x3FFFFULL << ICE_TXD_QW1_OFFSET_S) 354 355 enum ice_tx_desc_len_fields { 356 /* Note: These are predefined bit offsets */ 357 ICE_TX_DESC_LEN_MACLEN_S = 0, /* 7 BITS */ 358 ICE_TX_DESC_LEN_IPLEN_S = 7, /* 7 BITS */ 359 ICE_TX_DESC_LEN_L4_LEN_S = 14 /* 4 BITS */ 360 }; 361 362 #define ICE_TXD_QW1_MACLEN_M (0x7FUL << ICE_TX_DESC_LEN_MACLEN_S) 363 #define ICE_TXD_QW1_IPLEN_M (0x7FUL << ICE_TX_DESC_LEN_IPLEN_S) 364 #define ICE_TXD_QW1_L4LEN_M (0xFUL << ICE_TX_DESC_LEN_L4_LEN_S) 365 366 /* Tx descriptor field limits in bytes */ 367 #define ICE_TXD_MACLEN_MAX ((ICE_TXD_QW1_MACLEN_M >> \ 368 ICE_TX_DESC_LEN_MACLEN_S) * ICE_BYTES_PER_WORD) 369 #define ICE_TXD_IPLEN_MAX ((ICE_TXD_QW1_IPLEN_M >> \ 370 ICE_TX_DESC_LEN_IPLEN_S) * ICE_BYTES_PER_DWORD) 371 #define ICE_TXD_L4LEN_MAX ((ICE_TXD_QW1_L4LEN_M >> \ 372 ICE_TX_DESC_LEN_L4_LEN_S) * ICE_BYTES_PER_DWORD) 373 374 #define ICE_TXD_QW1_TX_BUF_SZ_S 34 375 #define ICE_TXD_QW1_L2TAG1_S 48 376 377 /* Context descriptors */ 378 struct ice_tx_ctx_desc { 379 __le32 tunneling_params; 380 __le16 l2tag2; 381 __le16 rsvd; 382 __le64 qw1; 383 }; 384 385 #define ICE_TXD_CTX_QW1_CMD_S 4 386 #define ICE_TXD_CTX_QW1_CMD_M (0x7FUL << ICE_TXD_CTX_QW1_CMD_S) 387 388 #define ICE_TXD_CTX_QW1_TSO_LEN_S 30 389 #define ICE_TXD_CTX_QW1_TSO_LEN_M \ 390 (0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S) 391 392 #define ICE_TXD_CTX_QW1_MSS_S 50 393 394 enum ice_tx_ctx_desc_cmd_bits { 395 ICE_TX_CTX_DESC_TSO = 0x01, 396 ICE_TX_CTX_DESC_TSYN = 0x02, 397 ICE_TX_CTX_DESC_IL2TAG2 = 0x04, 398 ICE_TX_CTX_DESC_IL2TAG2_IL2H = 0x08, 399 ICE_TX_CTX_DESC_SWTCH_NOTAG = 0x00, 400 ICE_TX_CTX_DESC_SWTCH_UPLINK = 0x10, 401 ICE_TX_CTX_DESC_SWTCH_LOCAL = 0x20, 402 ICE_TX_CTX_DESC_SWTCH_VSI = 0x30, 403 ICE_TX_CTX_DESC_RESERVED = 0x40 404 }; 405 406 #define ICE_LAN_TXQ_MAX_QGRPS 127 407 #define ICE_LAN_TXQ_MAX_QDIS 1023 408 409 /* Tx queue context data 410 * 411 * The sizes of the variables may be larger than needed due to crossing byte 412 * boundaries. If we do not have the width of the variable set to the correct 413 * size then we could end up shifting bits off the top of the variable when the 414 * variable is at the top of a byte and crosses over into the next byte. 415 */ 416 struct ice_tlan_ctx { 417 #define ICE_TLAN_CTX_BASE_S 7 418 u64 base; /* base is defined in 128-byte units */ 419 u8 port_num; 420 u16 cgd_num; /* bigger than needed, see above for reason */ 421 u8 pf_num; 422 u16 vmvf_num; 423 u8 vmvf_type; 424 #define ICE_TLAN_CTX_VMVF_TYPE_VF 0 425 #define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1 426 #define ICE_TLAN_CTX_VMVF_TYPE_PF 2 427 u16 src_vsi; 428 u8 tsyn_ena; 429 u8 alt_vlan; 430 u16 cpuid; /* bigger than needed, see above for reason */ 431 u8 wb_mode; 432 u8 tphrd_desc; 433 u8 tphrd; 434 u8 tphwr_desc; 435 u16 cmpq_id; 436 u16 qnum_in_func; 437 u8 itr_notification_mode; 438 u8 adjust_prof_id; 439 u32 qlen; /* bigger than needed, see above for reason */ 440 u8 quanta_prof_idx; 441 u8 tso_ena; 442 u16 tso_qnum; 443 u8 legacy_int; 444 u8 drop_ena; 445 u8 cache_prof_idx; 446 u8 pkt_shaper_prof_idx; 447 u8 int_q_state; /* width not needed - internal do not write */ 448 }; 449 450 /* macro to make the table lines short */ 451 #define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ 452 { PTYPE, \ 453 1, \ 454 ICE_RX_PTYPE_OUTER_##OUTER_IP, \ 455 ICE_RX_PTYPE_OUTER_##OUTER_IP_VER, \ 456 ICE_RX_PTYPE_##OUTER_FRAG, \ 457 ICE_RX_PTYPE_TUNNEL_##T, \ 458 ICE_RX_PTYPE_TUNNEL_END_##TE, \ 459 ICE_RX_PTYPE_##TEF, \ 460 ICE_RX_PTYPE_INNER_PROT_##I, \ 461 ICE_RX_PTYPE_PAYLOAD_LAYER_##PL } 462 463 #define ICE_PTT_UNUSED_ENTRY(PTYPE) { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } 464 465 /* shorter macros makes the table fit but are terse */ 466 #define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG 467 468 /* Lookup table mapping the HW PTYPE to the bit field for decoding */ 469 static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = { 470 /* L2 Packet types */ 471 ICE_PTT_UNUSED_ENTRY(0), 472 ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 473 ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), 474 }; 475 476 static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype) 477 { 478 return ice_ptype_lkup[ptype]; 479 } 480 481 #define ICE_LINK_SPEED_UNKNOWN 0 482 #define ICE_LINK_SPEED_10MBPS 10 483 #define ICE_LINK_SPEED_100MBPS 100 484 #define ICE_LINK_SPEED_1000MBPS 1000 485 #define ICE_LINK_SPEED_2500MBPS 2500 486 #define ICE_LINK_SPEED_5000MBPS 5000 487 #define ICE_LINK_SPEED_10000MBPS 10000 488 #define ICE_LINK_SPEED_20000MBPS 20000 489 #define ICE_LINK_SPEED_25000MBPS 25000 490 #define ICE_LINK_SPEED_40000MBPS 40000 491 492 #endif /* _ICE_LAN_TX_RX_H_ */ 493