1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2016 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 #ifndef _I40E_TXRX_H_ 28 #define _I40E_TXRX_H_ 29 30 /* Interrupt Throttling and Rate Limiting Goodies */ 31 32 #define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ 33 #define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */ 34 #define I40E_ITR_100K 0x0005 35 #define I40E_ITR_50K 0x000A 36 #define I40E_ITR_20K 0x0019 37 #define I40E_ITR_18K 0x001B 38 #define I40E_ITR_8K 0x003E 39 #define I40E_ITR_4K 0x007A 40 #define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ 41 #define I40E_ITR_RX_DEF I40E_ITR_20K 42 #define I40E_ITR_TX_DEF I40E_ITR_20K 43 #define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ 44 #define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */ 45 #define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */ 46 #define I40E_DEFAULT_IRQ_WORK 256 47 #define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1) 48 #define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC)) 49 #define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1) 50 /* 0x40 is the enable bit for interrupt rate limiting, and must be set if 51 * the value of the rate limit is non-zero 52 */ 53 #define INTRL_ENA BIT(6) 54 #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2) 55 #define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0) 56 #define I40E_INTRL_8K 125 /* 8000 ints/sec */ 57 #define I40E_INTRL_62K 16 /* 62500 ints/sec */ 58 #define I40E_INTRL_83K 12 /* 83333 ints/sec */ 59 60 #define I40E_QUEUE_END_OF_LIST 0x7FF 61 62 /* this enum matches hardware bits and is meant to be used by DYN_CTLN 63 * registers and QINT registers or more generally anywhere in the manual 64 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any 65 * register but instead is a special value meaning "don't update" ITR0/1/2. 66 */ 67 enum i40e_dyn_idx_t { 68 I40E_IDX_ITR0 = 0, 69 I40E_IDX_ITR1 = 1, 70 I40E_IDX_ITR2 = 2, 71 I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ 72 }; 73 74 /* these are indexes into ITRN registers */ 75 #define I40E_RX_ITR I40E_IDX_ITR0 76 #define I40E_TX_ITR I40E_IDX_ITR1 77 #define I40E_PE_ITR I40E_IDX_ITR2 78 79 /* Supported RSS offloads */ 80 #define I40E_DEFAULT_RSS_HENA ( \ 81 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ 82 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ 83 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ 84 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ 85 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \ 86 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ 87 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ 88 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ 89 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ 90 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ 91 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) 92 93 #define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \ 94 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ 95 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ 96 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ 97 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ 98 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ 99 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) 100 101 #define i40e_pf_get_default_rss_hena(pf) \ 102 (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ 103 I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) 104 105 /* Supported Rx Buffer Sizes (a multiple of 128) */ 106 #define I40E_RXBUFFER_256 256 107 #define I40E_RXBUFFER_2048 2048 108 #define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */ 109 #define I40E_RXBUFFER_4096 4096 110 #define I40E_RXBUFFER_8192 8192 111 #define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */ 112 113 /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we 114 * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, 115 * this adds up to 512 bytes of extra data meaning the smallest allocation 116 * we could have is 1K. 117 * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab) 118 * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab) 119 */ 120 #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 121 #define i40e_rx_desc i40e_32byte_rx_desc 122 123 /** 124 * i40e_test_staterr - tests bits in Rx descriptor status and error fields 125 * @rx_desc: pointer to receive descriptor (in le64 format) 126 * @stat_err_bits: value to mask 127 * 128 * This function does some fast chicanery in order to return the 129 * value of the mask which is really only used for boolean tests. 130 * The status_error_len doesn't need to be shifted because it begins 131 * at offset zero. 132 */ 133 static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, 134 const u64 stat_err_bits) 135 { 136 return !!(rx_desc->wb.qword1.status_error_len & 137 cpu_to_le64(stat_err_bits)); 138 } 139 140 /* How many Rx Buffers do we bundle into one write to the hardware ? */ 141 #define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 142 #define I40E_RX_INCREMENT(r, i) \ 143 do { \ 144 (i)++; \ 145 if ((i) == (r)->count) \ 146 i = 0; \ 147 r->next_to_clean = i; \ 148 } while (0) 149 150 #define I40E_RX_NEXT_DESC(r, i, n) \ 151 do { \ 152 (i)++; \ 153 if ((i) == (r)->count) \ 154 i = 0; \ 155 (n) = I40E_RX_DESC((r), (i)); \ 156 } while (0) 157 158 #define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \ 159 do { \ 160 I40E_RX_NEXT_DESC((r), (i), (n)); \ 161 prefetch((n)); \ 162 } while (0) 163 164 #define I40E_MAX_BUFFER_TXD 8 165 #define I40E_MIN_TX_LEN 17 166 167 /* The size limit for a transmit buffer in a descriptor is (16K - 1). 168 * In order to align with the read requests we will align the value to 169 * the nearest 4K which represents our maximum read request size. 170 */ 171 #define I40E_MAX_READ_REQ_SIZE 4096 172 #define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1) 173 #define I40E_MAX_DATA_PER_TXD_ALIGNED \ 174 (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1)) 175 176 /* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is 177 * the value I40E_MAX_DATA_PER_TXD_ALIGNED. It is needed due to the fact 178 * that 12K is not a power of 2 and division is expensive. It is used to 179 * approximate the number of descriptors used per linear buffer. Note 180 * that this will overestimate in some cases as it doesn't account for the 181 * fact that we will add up to 4K - 1 in aligning the 12K buffer, however 182 * the error should not impact things much as large buffers usually mean 183 * we will use fewer descriptors then there are frags in an skb. 184 */ 185 static inline unsigned int i40e_txd_use_count(unsigned int size) 186 { 187 const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED; 188 const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max; 189 unsigned int adjust = ~(u32)0; 190 191 /* if we rounded up on the reciprocal pull down the adjustment */ 192 if ((max * reciprocal) > adjust) 193 adjust = ~(u32)(reciprocal - 1); 194 195 return (u32)((((u64)size * reciprocal) + adjust) >> 32); 196 } 197 198 /* Tx Descriptors needed, worst case */ 199 #define DESC_NEEDED (MAX_SKB_FRAGS + 4) 200 #define I40E_MIN_DESC_PENDING 4 201 202 #define I40E_TX_FLAGS_HW_VLAN BIT(1) 203 #define I40E_TX_FLAGS_SW_VLAN BIT(2) 204 #define I40E_TX_FLAGS_TSO BIT(3) 205 #define I40E_TX_FLAGS_IPV4 BIT(4) 206 #define I40E_TX_FLAGS_IPV6 BIT(5) 207 #define I40E_TX_FLAGS_FCCRC BIT(6) 208 #define I40E_TX_FLAGS_FSO BIT(7) 209 #define I40E_TX_FLAGS_TSYN BIT(8) 210 #define I40E_TX_FLAGS_FD_SB BIT(9) 211 #define I40E_TX_FLAGS_UDP_TUNNEL BIT(10) 212 #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 213 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 214 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 215 #define I40E_TX_FLAGS_VLAN_SHIFT 16 216 217 struct i40e_tx_buffer { 218 struct i40e_tx_desc *next_to_watch; 219 union { 220 struct sk_buff *skb; 221 void *raw_buf; 222 }; 223 unsigned int bytecount; 224 unsigned short gso_segs; 225 226 DEFINE_DMA_UNMAP_ADDR(dma); 227 DEFINE_DMA_UNMAP_LEN(len); 228 u32 tx_flags; 229 }; 230 231 struct i40e_rx_buffer { 232 struct sk_buff *skb; 233 dma_addr_t dma; 234 struct page *page; 235 unsigned int page_offset; 236 }; 237 238 struct i40e_queue_stats { 239 u64 packets; 240 u64 bytes; 241 }; 242 243 struct i40e_tx_queue_stats { 244 u64 restart_queue; 245 u64 tx_busy; 246 u64 tx_done_old; 247 u64 tx_linearize; 248 u64 tx_force_wb; 249 u64 tx_lost_interrupt; 250 }; 251 252 struct i40e_rx_queue_stats { 253 u64 non_eop_descs; 254 u64 alloc_page_failed; 255 u64 alloc_buff_failed; 256 u64 page_reuse_count; 257 u64 realloc_count; 258 }; 259 260 enum i40e_ring_state_t { 261 __I40E_TX_FDIR_INIT_DONE, 262 __I40E_TX_XPS_INIT_DONE, 263 }; 264 265 /* some useful defines for virtchannel interface, which 266 * is the only remaining user of header split 267 */ 268 #define I40E_RX_DTYPE_NO_SPLIT 0 269 #define I40E_RX_DTYPE_HEADER_SPLIT 1 270 #define I40E_RX_DTYPE_SPLIT_ALWAYS 2 271 #define I40E_RX_SPLIT_L2 0x1 272 #define I40E_RX_SPLIT_IP 0x2 273 #define I40E_RX_SPLIT_TCP_UDP 0x4 274 #define I40E_RX_SPLIT_SCTP 0x8 275 276 /* struct that defines a descriptor ring, associated with a VSI */ 277 struct i40e_ring { 278 struct i40e_ring *next; /* pointer to next ring in q_vector */ 279 void *desc; /* Descriptor ring memory */ 280 struct device *dev; /* Used for DMA mapping */ 281 struct net_device *netdev; /* netdev ring maps to */ 282 union { 283 struct i40e_tx_buffer *tx_bi; 284 struct i40e_rx_buffer *rx_bi; 285 }; 286 unsigned long state; 287 u16 queue_index; /* Queue number of ring */ 288 u8 dcb_tc; /* Traffic class of ring */ 289 u8 __iomem *tail; 290 291 /* high bit set means dynamic, use accessor routines to read/write. 292 * hardware only supports 2us resolution for the ITR registers. 293 * these values always store the USER setting, and must be converted 294 * before programming to a register. 295 */ 296 u16 rx_itr_setting; 297 u16 tx_itr_setting; 298 299 u16 count; /* Number of descriptors */ 300 u16 reg_idx; /* HW register index of the ring */ 301 u16 rx_buf_len; 302 303 /* used in interrupt processing */ 304 u16 next_to_use; 305 u16 next_to_clean; 306 307 u8 atr_sample_rate; 308 u8 atr_count; 309 310 unsigned long last_rx_timestamp; 311 312 bool ring_active; /* is ring online or not */ 313 bool arm_wb; /* do something to arm write back */ 314 u8 packet_stride; 315 316 u16 flags; 317 #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) 318 #define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2) 319 320 /* stats structs */ 321 struct i40e_queue_stats stats; 322 struct u64_stats_sync syncp; 323 union { 324 struct i40e_tx_queue_stats tx_stats; 325 struct i40e_rx_queue_stats rx_stats; 326 }; 327 328 unsigned int size; /* length of descriptor ring in bytes */ 329 dma_addr_t dma; /* physical address of ring */ 330 331 struct i40e_vsi *vsi; /* Backreference to associated VSI */ 332 struct i40e_q_vector *q_vector; /* Backreference to associated vector */ 333 334 struct rcu_head rcu; /* to avoid race on free */ 335 u16 next_to_alloc; 336 } ____cacheline_internodealigned_in_smp; 337 338 enum i40e_latency_range { 339 I40E_LOWEST_LATENCY = 0, 340 I40E_LOW_LATENCY = 1, 341 I40E_BULK_LATENCY = 2, 342 I40E_ULTRA_LATENCY = 3, 343 }; 344 345 struct i40e_ring_container { 346 /* array of pointers to rings */ 347 struct i40e_ring *ring; 348 unsigned int total_bytes; /* total bytes processed this int */ 349 unsigned int total_packets; /* total packets processed this int */ 350 u16 count; 351 enum i40e_latency_range latency_range; 352 u16 itr; 353 }; 354 355 /* iterator for handling rings in ring container */ 356 #define i40e_for_each_ring(pos, head) \ 357 for (pos = (head).ring; pos != NULL; pos = pos->next) 358 359 bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); 360 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); 361 void i40e_clean_tx_ring(struct i40e_ring *tx_ring); 362 void i40e_clean_rx_ring(struct i40e_ring *rx_ring); 363 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring); 364 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring); 365 void i40e_free_tx_resources(struct i40e_ring *tx_ring); 366 void i40e_free_rx_resources(struct i40e_ring *rx_ring); 367 int i40e_napi_poll(struct napi_struct *napi, int budget); 368 #ifdef I40E_FCOE 369 void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, 370 struct i40e_tx_buffer *first, u32 tx_flags, 371 const u8 hdr_len, u32 td_cmd, u32 td_offset); 372 int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, 373 struct i40e_ring *tx_ring, u32 *flags); 374 #endif 375 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); 376 u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw); 377 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); 378 bool __i40e_chk_linearize(struct sk_buff *skb); 379 380 /** 381 * i40e_get_head - Retrieve head from head writeback 382 * @tx_ring: tx ring to fetch head of 383 * 384 * Returns value of Tx ring head based on value stored 385 * in head write-back location 386 **/ 387 static inline u32 i40e_get_head(struct i40e_ring *tx_ring) 388 { 389 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; 390 391 return le32_to_cpu(*(volatile __le32 *)head); 392 } 393 394 /** 395 * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed 396 * @skb: send buffer 397 * @tx_ring: ring to send buffer on 398 * 399 * Returns number of data descriptors needed for this skb. Returns 0 to indicate 400 * there is not enough descriptors available in this ring since we need at least 401 * one descriptor. 402 **/ 403 static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) 404 { 405 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 406 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 407 int count = 0, size = skb_headlen(skb); 408 409 for (;;) { 410 count += i40e_txd_use_count(size); 411 412 if (!nr_frags--) 413 break; 414 415 size = skb_frag_size(frag++); 416 } 417 418 return count; 419 } 420 421 /** 422 * i40e_maybe_stop_tx - 1st level check for Tx stop conditions 423 * @tx_ring: the ring to be checked 424 * @size: the size buffer we want to assure is available 425 * 426 * Returns 0 if stop is not needed 427 **/ 428 static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) 429 { 430 if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) 431 return 0; 432 return __i40e_maybe_stop_tx(tx_ring, size); 433 } 434 435 /** 436 * i40e_chk_linearize - Check if there are more than 8 fragments per packet 437 * @skb: send buffer 438 * @count: number of buffers used 439 * 440 * Note: Our HW can't scatter-gather more than 8 fragments to build 441 * a packet on the wire and so we need to figure out the cases where we 442 * need to linearize the skb. 443 **/ 444 static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) 445 { 446 /* Both TSO and single send will work if count is less than 8 */ 447 if (likely(count < I40E_MAX_BUFFER_TXD)) 448 return false; 449 450 if (skb_is_gso(skb)) 451 return __i40e_chk_linearize(skb); 452 453 /* we can support up to 8 data buffers for a single send */ 454 return count != I40E_MAX_BUFFER_TXD; 455 } 456 457 /** 458 * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE 459 * @ptype: the packet type field from Rx descriptor write-back 460 **/ 461 static inline bool i40e_rx_is_fcoe(u16 ptype) 462 { 463 return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) && 464 (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER); 465 } 466 #endif /* _I40E_TXRX_H_ */ 467