1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ 3 4 #include <linux/etherdevice.h> 5 #include <net/ip6_checksum.h> 6 #include <net/page_pool/helpers.h> 7 #include <net/inet_ecn.h> 8 #include <linux/iopoll.h> 9 #include <linux/sctp.h> 10 #include <linux/pci.h> 11 #include <net/tcp.h> 12 #include <net/ip.h> 13 14 #include "wx_type.h" 15 #include "wx_lib.h" 16 #include "wx_hw.h" 17 18 /* Lookup table mapping the HW PTYPE to the bit field for decoding */ 19 static struct wx_dec_ptype wx_ptype_lookup[256] = { 20 /* L2: mac */ 21 [0x11] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2), 22 [0x12] = WX_PTT(L2, NONE, NONE, NONE, TS, PAY2), 23 [0x13] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2), 24 [0x14] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2), 25 [0x15] = WX_PTT(L2, NONE, NONE, NONE, NONE, NONE), 26 [0x16] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2), 27 [0x17] = WX_PTT(L2, NONE, NONE, NONE, NONE, NONE), 28 29 /* L2: ethertype filter */ 30 [0x18 ... 0x1F] = WX_PTT(L2, NONE, NONE, NONE, NONE, NONE), 31 32 /* L3: ip non-tunnel */ 33 [0x21] = WX_PTT(IP, FGV4, NONE, NONE, NONE, PAY3), 34 [0x22] = WX_PTT(IP, IPV4, NONE, NONE, NONE, PAY3), 35 [0x23] = WX_PTT(IP, IPV4, NONE, NONE, UDP, PAY4), 36 [0x24] = WX_PTT(IP, IPV4, NONE, NONE, TCP, PAY4), 37 [0x25] = WX_PTT(IP, IPV4, NONE, NONE, SCTP, PAY4), 38 [0x29] = WX_PTT(IP, FGV6, NONE, NONE, NONE, PAY3), 39 [0x2A] = WX_PTT(IP, IPV6, NONE, NONE, NONE, PAY3), 40 [0x2B] = WX_PTT(IP, IPV6, NONE, NONE, UDP, PAY3), 41 [0x2C] = WX_PTT(IP, IPV6, NONE, NONE, TCP, PAY4), 42 [0x2D] = WX_PTT(IP, IPV6, NONE, NONE, SCTP, PAY4), 43 44 /* L2: fcoe */ 45 [0x30 ... 0x34] = WX_PTT(FCOE, NONE, NONE, NONE, NONE, PAY3), 46 [0x38 ... 0x3C] = WX_PTT(FCOE, NONE, NONE, NONE, NONE, PAY3), 47 48 /* IPv4 --> IPv4/IPv6 */ 49 [0x81] = WX_PTT(IP, IPV4, IPIP, FGV4, NONE, PAY3), 50 [0x82] = WX_PTT(IP, IPV4, IPIP, IPV4, NONE, PAY3), 51 [0x83] = WX_PTT(IP, IPV4, IPIP, IPV4, UDP, PAY4), 52 [0x84] = WX_PTT(IP, IPV4, IPIP, IPV4, TCP, PAY4), 53 [0x85] = WX_PTT(IP, IPV4, IPIP, IPV4, SCTP, PAY4), 54 [0x89] = WX_PTT(IP, IPV4, IPIP, FGV6, NONE, PAY3), 55 [0x8A] = WX_PTT(IP, IPV4, IPIP, IPV6, NONE, PAY3), 56 [0x8B] = WX_PTT(IP, IPV4, IPIP, IPV6, UDP, PAY4), 57 [0x8C] = WX_PTT(IP, IPV4, IPIP, IPV6, TCP, PAY4), 58 [0x8D] = WX_PTT(IP, IPV4, IPIP, IPV6, SCTP, PAY4), 59 60 /* IPv4 --> GRE/NAT --> NONE/IPv4/IPv6 */ 61 [0x90] = WX_PTT(IP, IPV4, IG, NONE, NONE, PAY3), 62 [0x91] = WX_PTT(IP, IPV4, IG, FGV4, NONE, PAY3), 63 [0x92] = WX_PTT(IP, IPV4, IG, IPV4, NONE, PAY3), 64 [0x93] = WX_PTT(IP, IPV4, IG, IPV4, UDP, PAY4), 65 [0x94] = WX_PTT(IP, IPV4, IG, IPV4, TCP, PAY4), 66 [0x95] = WX_PTT(IP, IPV4, IG, IPV4, SCTP, PAY4), 67 [0x99] = WX_PTT(IP, IPV4, IG, FGV6, NONE, PAY3), 68 [0x9A] = WX_PTT(IP, IPV4, IG, IPV6, NONE, PAY3), 69 [0x9B] = WX_PTT(IP, IPV4, IG, IPV6, UDP, PAY4), 70 [0x9C] = WX_PTT(IP, IPV4, IG, IPV6, TCP, PAY4), 71 [0x9D] = WX_PTT(IP, IPV4, IG, IPV6, SCTP, PAY4), 72 73 /* IPv4 --> GRE/NAT --> MAC --> NONE/IPv4/IPv6 */ 74 [0xA0] = WX_PTT(IP, IPV4, IGM, NONE, NONE, PAY3), 75 [0xA1] = WX_PTT(IP, IPV4, IGM, FGV4, NONE, PAY3), 76 [0xA2] = WX_PTT(IP, IPV4, IGM, IPV4, NONE, PAY3), 77 [0xA3] = WX_PTT(IP, IPV4, IGM, IPV4, UDP, PAY4), 78 [0xA4] = WX_PTT(IP, IPV4, IGM, IPV4, TCP, PAY4), 79 [0xA5] = WX_PTT(IP, IPV4, IGM, IPV4, SCTP, PAY4), 80 [0xA9] = WX_PTT(IP, IPV4, IGM, FGV6, NONE, PAY3), 81 [0xAA] = WX_PTT(IP, IPV4, IGM, IPV6, NONE, PAY3), 82 [0xAB] = WX_PTT(IP, IPV4, IGM, IPV6, UDP, PAY4), 83 [0xAC] = WX_PTT(IP, IPV4, IGM, IPV6, TCP, PAY4), 84 [0xAD] = WX_PTT(IP, IPV4, IGM, IPV6, SCTP, PAY4), 85 86 /* IPv4 --> GRE/NAT --> MAC+VLAN --> NONE/IPv4/IPv6 */ 87 [0xB0] = WX_PTT(IP, IPV4, IGMV, NONE, NONE, PAY3), 88 [0xB1] = WX_PTT(IP, IPV4, IGMV, FGV4, NONE, PAY3), 89 [0xB2] = WX_PTT(IP, IPV4, IGMV, IPV4, NONE, PAY3), 90 [0xB3] = WX_PTT(IP, IPV4, IGMV, IPV4, UDP, PAY4), 91 [0xB4] = WX_PTT(IP, IPV4, IGMV, IPV4, TCP, PAY4), 92 [0xB5] = WX_PTT(IP, IPV4, IGMV, IPV4, SCTP, PAY4), 93 [0xB9] = WX_PTT(IP, IPV4, IGMV, FGV6, NONE, PAY3), 94 [0xBA] = WX_PTT(IP, IPV4, IGMV, IPV6, NONE, PAY3), 95 [0xBB] = WX_PTT(IP, IPV4, IGMV, IPV6, UDP, PAY4), 96 [0xBC] = WX_PTT(IP, IPV4, IGMV, IPV6, TCP, PAY4), 97 [0xBD] = WX_PTT(IP, IPV4, IGMV, IPV6, SCTP, PAY4), 98 99 /* IPv6 --> IPv4/IPv6 */ 100 [0xC1] = WX_PTT(IP, IPV6, IPIP, FGV4, NONE, PAY3), 101 [0xC2] = WX_PTT(IP, IPV6, IPIP, IPV4, NONE, PAY3), 102 [0xC3] = WX_PTT(IP, IPV6, IPIP, IPV4, UDP, PAY4), 103 [0xC4] = WX_PTT(IP, IPV6, IPIP, IPV4, TCP, PAY4), 104 [0xC5] = WX_PTT(IP, IPV6, IPIP, IPV4, SCTP, PAY4), 105 [0xC9] = WX_PTT(IP, IPV6, IPIP, FGV6, NONE, PAY3), 106 [0xCA] = WX_PTT(IP, IPV6, IPIP, IPV6, NONE, PAY3), 107 [0xCB] = WX_PTT(IP, IPV6, IPIP, IPV6, UDP, PAY4), 108 [0xCC] = WX_PTT(IP, IPV6, IPIP, IPV6, TCP, PAY4), 109 [0xCD] = WX_PTT(IP, IPV6, IPIP, IPV6, SCTP, PAY4), 110 111 /* IPv6 --> GRE/NAT -> NONE/IPv4/IPv6 */ 112 [0xD0] = WX_PTT(IP, IPV6, IG, NONE, NONE, PAY3), 113 [0xD1] = WX_PTT(IP, IPV6, IG, FGV4, NONE, PAY3), 114 [0xD2] = WX_PTT(IP, IPV6, IG, IPV4, NONE, PAY3), 115 [0xD3] = WX_PTT(IP, IPV6, IG, IPV4, UDP, PAY4), 116 [0xD4] = WX_PTT(IP, IPV6, IG, IPV4, TCP, PAY4), 117 [0xD5] = WX_PTT(IP, IPV6, IG, IPV4, SCTP, PAY4), 118 [0xD9] = WX_PTT(IP, IPV6, IG, FGV6, NONE, PAY3), 119 [0xDA] = WX_PTT(IP, IPV6, IG, IPV6, NONE, PAY3), 120 [0xDB] = WX_PTT(IP, IPV6, IG, IPV6, UDP, PAY4), 121 [0xDC] = WX_PTT(IP, IPV6, IG, IPV6, TCP, PAY4), 122 [0xDD] = WX_PTT(IP, IPV6, IG, IPV6, SCTP, PAY4), 123 124 /* IPv6 --> GRE/NAT -> MAC -> NONE/IPv4/IPv6 */ 125 [0xE0] = WX_PTT(IP, IPV6, IGM, NONE, NONE, PAY3), 126 [0xE1] = WX_PTT(IP, IPV6, IGM, FGV4, NONE, PAY3), 127 [0xE2] = WX_PTT(IP, IPV6, IGM, IPV4, NONE, PAY3), 128 [0xE3] = WX_PTT(IP, IPV6, IGM, IPV4, UDP, PAY4), 129 [0xE4] = WX_PTT(IP, IPV6, IGM, IPV4, TCP, PAY4), 130 [0xE5] = WX_PTT(IP, IPV6, IGM, IPV4, SCTP, PAY4), 131 [0xE9] = WX_PTT(IP, IPV6, IGM, FGV6, NONE, PAY3), 132 [0xEA] = WX_PTT(IP, IPV6, IGM, IPV6, NONE, PAY3), 133 [0xEB] = WX_PTT(IP, IPV6, IGM, IPV6, UDP, PAY4), 134 [0xEC] = WX_PTT(IP, IPV6, IGM, IPV6, TCP, PAY4), 135 [0xED] = WX_PTT(IP, IPV6, IGM, IPV6, SCTP, PAY4), 136 137 /* IPv6 --> GRE/NAT -> MAC--> NONE/IPv */ 138 [0xF0] = WX_PTT(IP, IPV6, IGMV, NONE, NONE, PAY3), 139 [0xF1] = WX_PTT(IP, IPV6, IGMV, FGV4, NONE, PAY3), 140 [0xF2] = WX_PTT(IP, IPV6, IGMV, IPV4, NONE, PAY3), 141 [0xF3] = WX_PTT(IP, IPV6, IGMV, IPV4, UDP, PAY4), 142 [0xF4] = WX_PTT(IP, IPV6, IGMV, IPV4, TCP, PAY4), 143 [0xF5] = WX_PTT(IP, IPV6, IGMV, IPV4, SCTP, PAY4), 144 [0xF9] = WX_PTT(IP, IPV6, IGMV, FGV6, NONE, PAY3), 145 [0xFA] = WX_PTT(IP, IPV6, IGMV, IPV6, NONE, PAY3), 146 [0xFB] = WX_PTT(IP, IPV6, IGMV, IPV6, UDP, PAY4), 147 [0xFC] = WX_PTT(IP, IPV6, IGMV, IPV6, TCP, PAY4), 148 [0xFD] = WX_PTT(IP, IPV6, IGMV, IPV6, SCTP, PAY4), 149 }; 150 151 static struct wx_dec_ptype wx_decode_ptype(const u8 ptype) 152 { 153 return wx_ptype_lookup[ptype]; 154 } 155 156 /* wx_test_staterr - tests bits in Rx descriptor status and error fields */ 157 static __le32 wx_test_staterr(union wx_rx_desc *rx_desc, 158 const u32 stat_err_bits) 159 { 160 return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); 161 } 162 163 static void wx_dma_sync_frag(struct wx_ring *rx_ring, 164 struct wx_rx_buffer *rx_buffer) 165 { 166 struct sk_buff *skb = rx_buffer->skb; 167 skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 168 169 dma_sync_single_range_for_cpu(rx_ring->dev, 170 WX_CB(skb)->dma, 171 skb_frag_off(frag), 172 skb_frag_size(frag), 173 DMA_FROM_DEVICE); 174 175 /* If the page was released, just unmap it. */ 176 if (unlikely(WX_CB(skb)->page_released)) 177 page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); 178 } 179 180 static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring, 181 union wx_rx_desc *rx_desc, 182 struct sk_buff **skb, 183 int *rx_buffer_pgcnt) 184 { 185 struct wx_rx_buffer *rx_buffer; 186 unsigned int size; 187 188 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 189 size = le16_to_cpu(rx_desc->wb.upper.length); 190 191 #if (PAGE_SIZE < 8192) 192 *rx_buffer_pgcnt = page_count(rx_buffer->page); 193 #else 194 *rx_buffer_pgcnt = 0; 195 #endif 196 197 prefetchw(rx_buffer->page); 198 *skb = rx_buffer->skb; 199 200 /* Delay unmapping of the first packet. It carries the header 201 * information, HW may still access the header after the writeback. 202 * Only unmap it when EOP is reached 203 */ 204 if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)) { 205 if (!*skb) 206 goto skip_sync; 207 } else { 208 if (*skb) 209 wx_dma_sync_frag(rx_ring, rx_buffer); 210 } 211 212 /* we are reusing so sync this buffer for CPU use */ 213 dma_sync_single_range_for_cpu(rx_ring->dev, 214 rx_buffer->dma, 215 rx_buffer->page_offset, 216 size, 217 DMA_FROM_DEVICE); 218 skip_sync: 219 return rx_buffer; 220 } 221 222 static void wx_put_rx_buffer(struct wx_ring *rx_ring, 223 struct wx_rx_buffer *rx_buffer, 224 struct sk_buff *skb, 225 int rx_buffer_pgcnt) 226 { 227 if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma) 228 /* the page has been released from the ring */ 229 WX_CB(skb)->page_released = true; 230 231 /* clear contents of rx_buffer */ 232 rx_buffer->page = NULL; 233 rx_buffer->skb = NULL; 234 } 235 236 static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring, 237 struct wx_rx_buffer *rx_buffer, 238 union wx_rx_desc *rx_desc) 239 { 240 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); 241 #if (PAGE_SIZE < 8192) 242 unsigned int truesize = WX_RX_BUFSZ; 243 #else 244 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); 245 #endif 246 struct sk_buff *skb = rx_buffer->skb; 247 248 if (!skb) { 249 void *page_addr = page_address(rx_buffer->page) + 250 rx_buffer->page_offset; 251 252 /* prefetch first cache line of first page */ 253 prefetch(page_addr); 254 #if L1_CACHE_BYTES < 128 255 prefetch(page_addr + L1_CACHE_BYTES); 256 #endif 257 258 /* allocate a skb to store the frags */ 259 skb = napi_alloc_skb(&rx_ring->q_vector->napi, WX_RXBUFFER_256); 260 if (unlikely(!skb)) 261 return NULL; 262 263 /* we will be copying header into skb->data in 264 * pskb_may_pull so it is in our interest to prefetch 265 * it now to avoid a possible cache miss 266 */ 267 prefetchw(skb->data); 268 269 if (size <= WX_RXBUFFER_256) { 270 memcpy(__skb_put(skb, size), page_addr, 271 ALIGN(size, sizeof(long))); 272 page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, true); 273 return skb; 274 } 275 276 skb_mark_for_recycle(skb); 277 278 if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)) 279 WX_CB(skb)->dma = rx_buffer->dma; 280 281 skb_add_rx_frag(skb, 0, rx_buffer->page, 282 rx_buffer->page_offset, 283 size, truesize); 284 goto out; 285 286 } else { 287 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, 288 rx_buffer->page_offset, size, truesize); 289 } 290 291 out: 292 #if (PAGE_SIZE < 8192) 293 /* flip page offset to other buffer */ 294 rx_buffer->page_offset ^= truesize; 295 #else 296 /* move offset up to the next cache line */ 297 rx_buffer->page_offset += truesize; 298 #endif 299 300 return skb; 301 } 302 303 static bool wx_alloc_mapped_page(struct wx_ring *rx_ring, 304 struct wx_rx_buffer *bi) 305 { 306 struct page *page = bi->page; 307 dma_addr_t dma; 308 309 /* since we are recycling buffers we should seldom need to alloc */ 310 if (likely(page)) 311 return true; 312 313 page = page_pool_dev_alloc_pages(rx_ring->page_pool); 314 WARN_ON(!page); 315 dma = page_pool_get_dma_addr(page); 316 317 bi->page_dma = dma; 318 bi->page = page; 319 bi->page_offset = 0; 320 321 return true; 322 } 323 324 /** 325 * wx_alloc_rx_buffers - Replace used receive buffers 326 * @rx_ring: ring to place buffers on 327 * @cleaned_count: number of buffers to replace 328 **/ 329 void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count) 330 { 331 u16 i = rx_ring->next_to_use; 332 union wx_rx_desc *rx_desc; 333 struct wx_rx_buffer *bi; 334 335 /* nothing to do */ 336 if (!cleaned_count) 337 return; 338 339 rx_desc = WX_RX_DESC(rx_ring, i); 340 bi = &rx_ring->rx_buffer_info[i]; 341 i -= rx_ring->count; 342 343 do { 344 if (!wx_alloc_mapped_page(rx_ring, bi)) 345 break; 346 347 /* sync the buffer for use by the device */ 348 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 349 bi->page_offset, 350 WX_RX_BUFSZ, 351 DMA_FROM_DEVICE); 352 353 rx_desc->read.pkt_addr = 354 cpu_to_le64(bi->page_dma + bi->page_offset); 355 356 rx_desc++; 357 bi++; 358 i++; 359 if (unlikely(!i)) { 360 rx_desc = WX_RX_DESC(rx_ring, 0); 361 bi = rx_ring->rx_buffer_info; 362 i -= rx_ring->count; 363 } 364 365 /* clear the status bits for the next_to_use descriptor */ 366 rx_desc->wb.upper.status_error = 0; 367 368 cleaned_count--; 369 } while (cleaned_count); 370 371 i += rx_ring->count; 372 373 if (rx_ring->next_to_use != i) { 374 rx_ring->next_to_use = i; 375 /* update next to alloc since we have filled the ring */ 376 rx_ring->next_to_alloc = i; 377 378 /* Force memory writes to complete before letting h/w 379 * know there are new descriptors to fetch. (Only 380 * applicable for weak-ordered memory model archs, 381 * such as IA-64). 382 */ 383 wmb(); 384 writel(i, rx_ring->tail); 385 } 386 } 387 388 u16 wx_desc_unused(struct wx_ring *ring) 389 { 390 u16 ntc = ring->next_to_clean; 391 u16 ntu = ring->next_to_use; 392 393 return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; 394 } 395 396 /** 397 * wx_is_non_eop - process handling of non-EOP buffers 398 * @rx_ring: Rx ring being processed 399 * @rx_desc: Rx descriptor for current buffer 400 * @skb: Current socket buffer containing buffer in progress 401 * 402 * This function updates next to clean. If the buffer is an EOP buffer 403 * this function exits returning false, otherwise it will place the 404 * sk_buff in the next buffer to be chained and return true indicating 405 * that this is in fact a non-EOP buffer. 406 **/ 407 static bool wx_is_non_eop(struct wx_ring *rx_ring, 408 union wx_rx_desc *rx_desc, 409 struct sk_buff *skb) 410 { 411 u32 ntc = rx_ring->next_to_clean + 1; 412 413 /* fetch, update, and store next to clean */ 414 ntc = (ntc < rx_ring->count) ? ntc : 0; 415 rx_ring->next_to_clean = ntc; 416 417 prefetch(WX_RX_DESC(rx_ring, ntc)); 418 419 /* if we are the last buffer then there is nothing else to do */ 420 if (likely(wx_test_staterr(rx_desc, WX_RXD_STAT_EOP))) 421 return false; 422 423 rx_ring->rx_buffer_info[ntc].skb = skb; 424 rx_ring->rx_stats.non_eop_descs++; 425 426 return true; 427 } 428 429 static void wx_pull_tail(struct sk_buff *skb) 430 { 431 skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 432 unsigned int pull_len; 433 unsigned char *va; 434 435 /* it is valid to use page_address instead of kmap since we are 436 * working with pages allocated out of the lomem pool per 437 * alloc_page(GFP_ATOMIC) 438 */ 439 va = skb_frag_address(frag); 440 441 /* we need the header to contain the greater of either ETH_HLEN or 442 * 60 bytes if the skb->len is less than 60 for skb_pad. 443 */ 444 pull_len = eth_get_headlen(skb->dev, va, WX_RXBUFFER_256); 445 446 /* align pull length to size of long to optimize memcpy performance */ 447 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); 448 449 /* update all of the pointers */ 450 skb_frag_size_sub(frag, pull_len); 451 skb_frag_off_add(frag, pull_len); 452 skb->data_len -= pull_len; 453 skb->tail += pull_len; 454 } 455 456 /** 457 * wx_cleanup_headers - Correct corrupted or empty headers 458 * @rx_ring: rx descriptor ring packet is being transacted on 459 * @rx_desc: pointer to the EOP Rx descriptor 460 * @skb: pointer to current skb being fixed 461 * 462 * Check for corrupted packet headers caused by senders on the local L2 463 * embedded NIC switch not setting up their Tx Descriptors right. These 464 * should be very rare. 465 * 466 * Also address the case where we are pulling data in on pages only 467 * and as such no data is present in the skb header. 468 * 469 * In addition if skb is not at least 60 bytes we need to pad it so that 470 * it is large enough to qualify as a valid Ethernet frame. 471 * 472 * Returns true if an error was encountered and skb was freed. 473 **/ 474 static bool wx_cleanup_headers(struct wx_ring *rx_ring, 475 union wx_rx_desc *rx_desc, 476 struct sk_buff *skb) 477 { 478 struct net_device *netdev = rx_ring->netdev; 479 480 /* verify that the packet does not have any known errors */ 481 if (!netdev || 482 unlikely(wx_test_staterr(rx_desc, WX_RXD_ERR_RXE) && 483 !(netdev->features & NETIF_F_RXALL))) { 484 dev_kfree_skb_any(skb); 485 return true; 486 } 487 488 /* place header in linear portion of buffer */ 489 if (!skb_headlen(skb)) 490 wx_pull_tail(skb); 491 492 /* if eth_skb_pad returns an error the skb was freed */ 493 if (eth_skb_pad(skb)) 494 return true; 495 496 return false; 497 } 498 499 static void wx_rx_hash(struct wx_ring *ring, 500 union wx_rx_desc *rx_desc, 501 struct sk_buff *skb) 502 { 503 u16 rss_type; 504 505 if (!(ring->netdev->features & NETIF_F_RXHASH)) 506 return; 507 508 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & 509 WX_RXD_RSSTYPE_MASK; 510 511 if (!rss_type) 512 return; 513 514 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), 515 (WX_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? 516 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); 517 } 518 519 /** 520 * wx_rx_checksum - indicate in skb if hw indicated a good cksum 521 * @ring: structure containing ring specific data 522 * @rx_desc: current Rx descriptor being processed 523 * @skb: skb currently being received and modified 524 **/ 525 static void wx_rx_checksum(struct wx_ring *ring, 526 union wx_rx_desc *rx_desc, 527 struct sk_buff *skb) 528 { 529 struct wx_dec_ptype dptype = wx_decode_ptype(WX_RXD_PKTTYPE(rx_desc)); 530 531 skb_checksum_none_assert(skb); 532 /* Rx csum disabled */ 533 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 534 return; 535 536 /* if IPv4 header checksum error */ 537 if ((wx_test_staterr(rx_desc, WX_RXD_STAT_IPCS) && 538 wx_test_staterr(rx_desc, WX_RXD_ERR_IPE)) || 539 (wx_test_staterr(rx_desc, WX_RXD_STAT_OUTERIPCS) && 540 wx_test_staterr(rx_desc, WX_RXD_ERR_OUTERIPER))) { 541 ring->rx_stats.csum_err++; 542 return; 543 } 544 545 /* L4 checksum offload flag must set for the below code to work */ 546 if (!wx_test_staterr(rx_desc, WX_RXD_STAT_L4CS)) 547 return; 548 549 /* Hardware can't guarantee csum if IPv6 Dest Header found */ 550 if (dptype.prot != WX_DEC_PTYPE_PROT_SCTP && WX_RXD_IPV6EX(rx_desc)) 551 return; 552 553 /* if L4 checksum error */ 554 if (wx_test_staterr(rx_desc, WX_RXD_ERR_TCPE)) { 555 ring->rx_stats.csum_err++; 556 return; 557 } 558 559 /* It must be a TCP or UDP or SCTP packet with a valid checksum */ 560 skb->ip_summed = CHECKSUM_UNNECESSARY; 561 562 /* If there is an outer header present that might contain a checksum 563 * we need to bump the checksum level by 1 to reflect the fact that 564 * we are indicating we validated the inner checksum. 565 */ 566 if (dptype.etype >= WX_DEC_PTYPE_ETYPE_IG) 567 __skb_incr_checksum_unnecessary(skb); 568 ring->rx_stats.csum_good_cnt++; 569 } 570 571 static void wx_rx_vlan(struct wx_ring *ring, union wx_rx_desc *rx_desc, 572 struct sk_buff *skb) 573 { 574 u16 ethertype; 575 u8 idx = 0; 576 577 if ((ring->netdev->features & 578 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) && 579 wx_test_staterr(rx_desc, WX_RXD_STAT_VP)) { 580 idx = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & 581 0x1c0) >> 6; 582 ethertype = ring->q_vector->wx->tpid[idx]; 583 __vlan_hwaccel_put_tag(skb, htons(ethertype), 584 le16_to_cpu(rx_desc->wb.upper.vlan)); 585 } 586 } 587 588 /** 589 * wx_process_skb_fields - Populate skb header fields from Rx descriptor 590 * @rx_ring: rx descriptor ring packet is being transacted on 591 * @rx_desc: pointer to the EOP Rx descriptor 592 * @skb: pointer to current skb being populated 593 * 594 * This function checks the ring, descriptor, and packet information in 595 * order to populate the hash, checksum, protocol, and 596 * other fields within the skb. 597 **/ 598 static void wx_process_skb_fields(struct wx_ring *rx_ring, 599 union wx_rx_desc *rx_desc, 600 struct sk_buff *skb) 601 { 602 wx_rx_hash(rx_ring, rx_desc, skb); 603 wx_rx_checksum(rx_ring, rx_desc, skb); 604 wx_rx_vlan(rx_ring, rx_desc, skb); 605 skb_record_rx_queue(skb, rx_ring->queue_index); 606 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 607 } 608 609 /** 610 * wx_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 611 * @q_vector: structure containing interrupt and ring information 612 * @rx_ring: rx descriptor ring to transact packets on 613 * @budget: Total limit on number of packets to process 614 * 615 * This function provides a "bounce buffer" approach to Rx interrupt 616 * processing. The advantage to this is that on systems that have 617 * expensive overhead for IOMMU access this provides a means of avoiding 618 * it by maintaining the mapping of the page to the system. 619 * 620 * Returns amount of work completed. 621 **/ 622 static int wx_clean_rx_irq(struct wx_q_vector *q_vector, 623 struct wx_ring *rx_ring, 624 int budget) 625 { 626 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 627 u16 cleaned_count = wx_desc_unused(rx_ring); 628 629 do { 630 struct wx_rx_buffer *rx_buffer; 631 union wx_rx_desc *rx_desc; 632 struct sk_buff *skb; 633 int rx_buffer_pgcnt; 634 635 /* return some buffers to hardware, one at a time is too slow */ 636 if (cleaned_count >= WX_RX_BUFFER_WRITE) { 637 wx_alloc_rx_buffers(rx_ring, cleaned_count); 638 cleaned_count = 0; 639 } 640 641 rx_desc = WX_RX_DESC(rx_ring, rx_ring->next_to_clean); 642 if (!wx_test_staterr(rx_desc, WX_RXD_STAT_DD)) 643 break; 644 645 /* This memory barrier is needed to keep us from reading 646 * any other fields out of the rx_desc until we know the 647 * descriptor has been written back 648 */ 649 dma_rmb(); 650 651 rx_buffer = wx_get_rx_buffer(rx_ring, rx_desc, &skb, &rx_buffer_pgcnt); 652 653 /* retrieve a buffer from the ring */ 654 skb = wx_build_skb(rx_ring, rx_buffer, rx_desc); 655 656 /* exit if we failed to retrieve a buffer */ 657 if (!skb) { 658 rx_ring->rx_stats.alloc_rx_buff_failed++; 659 break; 660 } 661 662 wx_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt); 663 cleaned_count++; 664 665 /* place incomplete frames back on ring for completion */ 666 if (wx_is_non_eop(rx_ring, rx_desc, skb)) 667 continue; 668 669 /* verify the packet layout is correct */ 670 if (wx_cleanup_headers(rx_ring, rx_desc, skb)) 671 continue; 672 673 /* probably a little skewed due to removing CRC */ 674 total_rx_bytes += skb->len; 675 676 /* populate checksum, timestamp, VLAN, and protocol */ 677 wx_process_skb_fields(rx_ring, rx_desc, skb); 678 napi_gro_receive(&q_vector->napi, skb); 679 680 /* update budget accounting */ 681 total_rx_packets++; 682 } while (likely(total_rx_packets < budget)); 683 684 u64_stats_update_begin(&rx_ring->syncp); 685 rx_ring->stats.packets += total_rx_packets; 686 rx_ring->stats.bytes += total_rx_bytes; 687 u64_stats_update_end(&rx_ring->syncp); 688 q_vector->rx.total_packets += total_rx_packets; 689 q_vector->rx.total_bytes += total_rx_bytes; 690 691 return total_rx_packets; 692 } 693 694 static struct netdev_queue *wx_txring_txq(const struct wx_ring *ring) 695 { 696 return netdev_get_tx_queue(ring->netdev, ring->queue_index); 697 } 698 699 /** 700 * wx_clean_tx_irq - Reclaim resources after transmit completes 701 * @q_vector: structure containing interrupt and ring information 702 * @tx_ring: tx ring to clean 703 * @napi_budget: Used to determine if we are in netpoll 704 **/ 705 static bool wx_clean_tx_irq(struct wx_q_vector *q_vector, 706 struct wx_ring *tx_ring, int napi_budget) 707 { 708 unsigned int budget = q_vector->wx->tx_work_limit; 709 unsigned int total_bytes = 0, total_packets = 0; 710 unsigned int i = tx_ring->next_to_clean; 711 struct wx_tx_buffer *tx_buffer; 712 union wx_tx_desc *tx_desc; 713 714 if (!netif_carrier_ok(tx_ring->netdev)) 715 return true; 716 717 tx_buffer = &tx_ring->tx_buffer_info[i]; 718 tx_desc = WX_TX_DESC(tx_ring, i); 719 i -= tx_ring->count; 720 721 do { 722 union wx_tx_desc *eop_desc = tx_buffer->next_to_watch; 723 724 /* if next_to_watch is not set then there is no work pending */ 725 if (!eop_desc) 726 break; 727 728 /* prevent any other reads prior to eop_desc */ 729 smp_rmb(); 730 731 /* if DD is not set pending work has not been completed */ 732 if (!(eop_desc->wb.status & cpu_to_le32(WX_TXD_STAT_DD))) 733 break; 734 735 /* clear next_to_watch to prevent false hangs */ 736 tx_buffer->next_to_watch = NULL; 737 738 /* update the statistics for this packet */ 739 total_bytes += tx_buffer->bytecount; 740 total_packets += tx_buffer->gso_segs; 741 742 /* free the skb */ 743 napi_consume_skb(tx_buffer->skb, napi_budget); 744 745 /* unmap skb header data */ 746 dma_unmap_single(tx_ring->dev, 747 dma_unmap_addr(tx_buffer, dma), 748 dma_unmap_len(tx_buffer, len), 749 DMA_TO_DEVICE); 750 751 /* clear tx_buffer data */ 752 dma_unmap_len_set(tx_buffer, len, 0); 753 754 /* unmap remaining buffers */ 755 while (tx_desc != eop_desc) { 756 tx_buffer++; 757 tx_desc++; 758 i++; 759 if (unlikely(!i)) { 760 i -= tx_ring->count; 761 tx_buffer = tx_ring->tx_buffer_info; 762 tx_desc = WX_TX_DESC(tx_ring, 0); 763 } 764 765 /* unmap any remaining paged data */ 766 if (dma_unmap_len(tx_buffer, len)) { 767 dma_unmap_page(tx_ring->dev, 768 dma_unmap_addr(tx_buffer, dma), 769 dma_unmap_len(tx_buffer, len), 770 DMA_TO_DEVICE); 771 dma_unmap_len_set(tx_buffer, len, 0); 772 } 773 } 774 775 /* move us one more past the eop_desc for start of next pkt */ 776 tx_buffer++; 777 tx_desc++; 778 i++; 779 if (unlikely(!i)) { 780 i -= tx_ring->count; 781 tx_buffer = tx_ring->tx_buffer_info; 782 tx_desc = WX_TX_DESC(tx_ring, 0); 783 } 784 785 /* issue prefetch for next Tx descriptor */ 786 prefetch(tx_desc); 787 788 /* update budget accounting */ 789 budget--; 790 } while (likely(budget)); 791 792 i += tx_ring->count; 793 tx_ring->next_to_clean = i; 794 u64_stats_update_begin(&tx_ring->syncp); 795 tx_ring->stats.bytes += total_bytes; 796 tx_ring->stats.packets += total_packets; 797 u64_stats_update_end(&tx_ring->syncp); 798 q_vector->tx.total_bytes += total_bytes; 799 q_vector->tx.total_packets += total_packets; 800 801 netdev_tx_completed_queue(wx_txring_txq(tx_ring), 802 total_packets, total_bytes); 803 804 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 805 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && 806 (wx_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { 807 /* Make sure that anybody stopping the queue after this 808 * sees the new next_to_clean. 809 */ 810 smp_mb(); 811 812 if (__netif_subqueue_stopped(tx_ring->netdev, 813 tx_ring->queue_index) && 814 netif_running(tx_ring->netdev)) { 815 netif_wake_subqueue(tx_ring->netdev, 816 tx_ring->queue_index); 817 ++tx_ring->tx_stats.restart_queue; 818 } 819 } 820 821 return !!budget; 822 } 823 824 /** 825 * wx_poll - NAPI polling RX/TX cleanup routine 826 * @napi: napi struct with our devices info in it 827 * @budget: amount of work driver is allowed to do this pass, in packets 828 * 829 * This function will clean all queues associated with a q_vector. 830 **/ 831 static int wx_poll(struct napi_struct *napi, int budget) 832 { 833 struct wx_q_vector *q_vector = container_of(napi, struct wx_q_vector, napi); 834 int per_ring_budget, work_done = 0; 835 struct wx *wx = q_vector->wx; 836 bool clean_complete = true; 837 struct wx_ring *ring; 838 839 wx_for_each_ring(ring, q_vector->tx) { 840 if (!wx_clean_tx_irq(q_vector, ring, budget)) 841 clean_complete = false; 842 } 843 844 /* Exit if we are called by netpoll */ 845 if (budget <= 0) 846 return budget; 847 848 /* attempt to distribute budget to each queue fairly, but don't allow 849 * the budget to go below 1 because we'll exit polling 850 */ 851 if (q_vector->rx.count > 1) 852 per_ring_budget = max(budget / q_vector->rx.count, 1); 853 else 854 per_ring_budget = budget; 855 856 wx_for_each_ring(ring, q_vector->rx) { 857 int cleaned = wx_clean_rx_irq(q_vector, ring, per_ring_budget); 858 859 work_done += cleaned; 860 if (cleaned >= per_ring_budget) 861 clean_complete = false; 862 } 863 864 /* If all work not completed, return budget and keep polling */ 865 if (!clean_complete) 866 return budget; 867 868 /* all work done, exit the polling mode */ 869 if (likely(napi_complete_done(napi, work_done))) { 870 if (netif_running(wx->netdev)) 871 wx_intr_enable(wx, WX_INTR_Q(q_vector->v_idx)); 872 } 873 874 return min(work_done, budget - 1); 875 } 876 877 static int wx_maybe_stop_tx(struct wx_ring *tx_ring, u16 size) 878 { 879 if (likely(wx_desc_unused(tx_ring) >= size)) 880 return 0; 881 882 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 883 884 /* For the next check */ 885 smp_mb(); 886 887 /* We need to check again in a case another CPU has just 888 * made room available. 889 */ 890 if (likely(wx_desc_unused(tx_ring) < size)) 891 return -EBUSY; 892 893 /* A reprieve! - use start_queue because it doesn't call schedule */ 894 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 895 ++tx_ring->tx_stats.restart_queue; 896 897 return 0; 898 } 899 900 static u32 wx_tx_cmd_type(u32 tx_flags) 901 { 902 /* set type for advanced descriptor with frame checksum insertion */ 903 u32 cmd_type = WX_TXD_DTYP_DATA | WX_TXD_IFCS; 904 905 /* set HW vlan bit if vlan is present */ 906 cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_HW_VLAN, WX_TXD_VLE); 907 /* set segmentation enable bits for TSO/FSO */ 908 cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_TSO, WX_TXD_TSE); 909 /* set timestamp bit if present */ 910 cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_TSTAMP, WX_TXD_MAC_TSTAMP); 911 cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_LINKSEC, WX_TXD_LINKSEC); 912 913 return cmd_type; 914 } 915 916 static void wx_tx_olinfo_status(union wx_tx_desc *tx_desc, 917 u32 tx_flags, unsigned int paylen) 918 { 919 u32 olinfo_status = paylen << WX_TXD_PAYLEN_SHIFT; 920 921 /* enable L4 checksum for TSO and TX checksum offload */ 922 olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_CSUM, WX_TXD_L4CS); 923 /* enable IPv4 checksum for TSO */ 924 olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_IPV4, WX_TXD_IIPCS); 925 /* enable outer IPv4 checksum for TSO */ 926 olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_OUTER_IPV4, 927 WX_TXD_EIPCS); 928 /* Check Context must be set if Tx switch is enabled, which it 929 * always is for case where virtual functions are running 930 */ 931 olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_CC, WX_TXD_CC); 932 olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_IPSEC, 933 WX_TXD_IPSEC); 934 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 935 } 936 937 static void wx_tx_map(struct wx_ring *tx_ring, 938 struct wx_tx_buffer *first, 939 const u8 hdr_len) 940 { 941 struct sk_buff *skb = first->skb; 942 struct wx_tx_buffer *tx_buffer; 943 u32 tx_flags = first->tx_flags; 944 u16 i = tx_ring->next_to_use; 945 unsigned int data_len, size; 946 union wx_tx_desc *tx_desc; 947 skb_frag_t *frag; 948 dma_addr_t dma; 949 u32 cmd_type; 950 951 cmd_type = wx_tx_cmd_type(tx_flags); 952 tx_desc = WX_TX_DESC(tx_ring, i); 953 wx_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); 954 955 size = skb_headlen(skb); 956 data_len = skb->data_len; 957 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 958 959 tx_buffer = first; 960 961 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 962 if (dma_mapping_error(tx_ring->dev, dma)) 963 goto dma_error; 964 965 /* record length, and DMA address */ 966 dma_unmap_len_set(tx_buffer, len, size); 967 dma_unmap_addr_set(tx_buffer, dma, dma); 968 969 tx_desc->read.buffer_addr = cpu_to_le64(dma); 970 971 while (unlikely(size > WX_MAX_DATA_PER_TXD)) { 972 tx_desc->read.cmd_type_len = 973 cpu_to_le32(cmd_type ^ WX_MAX_DATA_PER_TXD); 974 975 i++; 976 tx_desc++; 977 if (i == tx_ring->count) { 978 tx_desc = WX_TX_DESC(tx_ring, 0); 979 i = 0; 980 } 981 tx_desc->read.olinfo_status = 0; 982 983 dma += WX_MAX_DATA_PER_TXD; 984 size -= WX_MAX_DATA_PER_TXD; 985 986 tx_desc->read.buffer_addr = cpu_to_le64(dma); 987 } 988 989 if (likely(!data_len)) 990 break; 991 992 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); 993 994 i++; 995 tx_desc++; 996 if (i == tx_ring->count) { 997 tx_desc = WX_TX_DESC(tx_ring, 0); 998 i = 0; 999 } 1000 tx_desc->read.olinfo_status = 0; 1001 1002 size = skb_frag_size(frag); 1003 1004 data_len -= size; 1005 1006 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 1007 DMA_TO_DEVICE); 1008 1009 tx_buffer = &tx_ring->tx_buffer_info[i]; 1010 } 1011 1012 /* write last descriptor with RS and EOP bits */ 1013 cmd_type |= size | WX_TXD_EOP | WX_TXD_RS; 1014 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 1015 1016 netdev_tx_sent_queue(wx_txring_txq(tx_ring), first->bytecount); 1017 1018 skb_tx_timestamp(skb); 1019 1020 /* Force memory writes to complete before letting h/w know there 1021 * are new descriptors to fetch. (Only applicable for weak-ordered 1022 * memory model archs, such as IA-64). 1023 * 1024 * We also need this memory barrier to make certain all of the 1025 * status bits have been updated before next_to_watch is written. 1026 */ 1027 wmb(); 1028 1029 /* set next_to_watch value indicating a packet is present */ 1030 first->next_to_watch = tx_desc; 1031 1032 i++; 1033 if (i == tx_ring->count) 1034 i = 0; 1035 1036 tx_ring->next_to_use = i; 1037 1038 wx_maybe_stop_tx(tx_ring, DESC_NEEDED); 1039 1040 if (netif_xmit_stopped(wx_txring_txq(tx_ring)) || !netdev_xmit_more()) 1041 writel(i, tx_ring->tail); 1042 1043 return; 1044 dma_error: 1045 dev_err(tx_ring->dev, "TX DMA map failed\n"); 1046 1047 /* clear dma mappings for failed tx_buffer_info map */ 1048 for (;;) { 1049 tx_buffer = &tx_ring->tx_buffer_info[i]; 1050 if (dma_unmap_len(tx_buffer, len)) 1051 dma_unmap_page(tx_ring->dev, 1052 dma_unmap_addr(tx_buffer, dma), 1053 dma_unmap_len(tx_buffer, len), 1054 DMA_TO_DEVICE); 1055 dma_unmap_len_set(tx_buffer, len, 0); 1056 if (tx_buffer == first) 1057 break; 1058 if (i == 0) 1059 i += tx_ring->count; 1060 i--; 1061 } 1062 1063 dev_kfree_skb_any(first->skb); 1064 first->skb = NULL; 1065 1066 tx_ring->next_to_use = i; 1067 } 1068 1069 static void wx_tx_ctxtdesc(struct wx_ring *tx_ring, u32 vlan_macip_lens, 1070 u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) 1071 { 1072 struct wx_tx_context_desc *context_desc; 1073 u16 i = tx_ring->next_to_use; 1074 1075 context_desc = WX_TX_CTXTDESC(tx_ring, i); 1076 i++; 1077 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 1078 1079 /* set bits to identify this as an advanced context descriptor */ 1080 type_tucmd |= WX_TXD_DTYP_CTXT; 1081 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 1082 context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); 1083 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 1084 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 1085 } 1086 1087 static void wx_get_ipv6_proto(struct sk_buff *skb, int offset, u8 *nexthdr) 1088 { 1089 struct ipv6hdr *hdr = (struct ipv6hdr *)(skb->data + offset); 1090 1091 *nexthdr = hdr->nexthdr; 1092 offset += sizeof(struct ipv6hdr); 1093 while (ipv6_ext_hdr(*nexthdr)) { 1094 struct ipv6_opt_hdr _hdr, *hp; 1095 1096 if (*nexthdr == NEXTHDR_NONE) 1097 return; 1098 hp = skb_header_pointer(skb, offset, sizeof(_hdr), &_hdr); 1099 if (!hp) 1100 return; 1101 if (*nexthdr == NEXTHDR_FRAGMENT) 1102 break; 1103 *nexthdr = hp->nexthdr; 1104 } 1105 } 1106 1107 union network_header { 1108 struct iphdr *ipv4; 1109 struct ipv6hdr *ipv6; 1110 void *raw; 1111 }; 1112 1113 static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first) 1114 { 1115 u8 tun_prot = 0, l4_prot = 0, ptype = 0; 1116 struct sk_buff *skb = first->skb; 1117 1118 if (skb->encapsulation) { 1119 union network_header hdr; 1120 1121 switch (first->protocol) { 1122 case htons(ETH_P_IP): 1123 tun_prot = ip_hdr(skb)->protocol; 1124 ptype = WX_PTYPE_TUN_IPV4; 1125 break; 1126 case htons(ETH_P_IPV6): 1127 wx_get_ipv6_proto(skb, skb_network_offset(skb), &tun_prot); 1128 ptype = WX_PTYPE_TUN_IPV6; 1129 break; 1130 default: 1131 return ptype; 1132 } 1133 1134 if (tun_prot == IPPROTO_IPIP) { 1135 hdr.raw = (void *)inner_ip_hdr(skb); 1136 ptype |= WX_PTYPE_PKT_IPIP; 1137 } else if (tun_prot == IPPROTO_UDP) { 1138 hdr.raw = (void *)inner_ip_hdr(skb); 1139 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || 1140 skb->inner_protocol != htons(ETH_P_TEB)) { 1141 ptype |= WX_PTYPE_PKT_IG; 1142 } else { 1143 if (((struct ethhdr *)skb_inner_mac_header(skb))->h_proto 1144 == htons(ETH_P_8021Q)) 1145 ptype |= WX_PTYPE_PKT_IGMV; 1146 else 1147 ptype |= WX_PTYPE_PKT_IGM; 1148 } 1149 1150 } else if (tun_prot == IPPROTO_GRE) { 1151 hdr.raw = (void *)inner_ip_hdr(skb); 1152 if (skb->inner_protocol == htons(ETH_P_IP) || 1153 skb->inner_protocol == htons(ETH_P_IPV6)) { 1154 ptype |= WX_PTYPE_PKT_IG; 1155 } else { 1156 if (((struct ethhdr *)skb_inner_mac_header(skb))->h_proto 1157 == htons(ETH_P_8021Q)) 1158 ptype |= WX_PTYPE_PKT_IGMV; 1159 else 1160 ptype |= WX_PTYPE_PKT_IGM; 1161 } 1162 } else { 1163 return ptype; 1164 } 1165 1166 switch (hdr.ipv4->version) { 1167 case IPVERSION: 1168 l4_prot = hdr.ipv4->protocol; 1169 break; 1170 case 6: 1171 wx_get_ipv6_proto(skb, skb_inner_network_offset(skb), &l4_prot); 1172 ptype |= WX_PTYPE_PKT_IPV6; 1173 break; 1174 default: 1175 return ptype; 1176 } 1177 } else { 1178 switch (first->protocol) { 1179 case htons(ETH_P_IP): 1180 l4_prot = ip_hdr(skb)->protocol; 1181 ptype = WX_PTYPE_PKT_IP; 1182 break; 1183 case htons(ETH_P_IPV6): 1184 wx_get_ipv6_proto(skb, skb_network_offset(skb), &l4_prot); 1185 ptype = WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6; 1186 break; 1187 default: 1188 return WX_PTYPE_PKT_MAC | WX_PTYPE_TYP_MAC; 1189 } 1190 } 1191 switch (l4_prot) { 1192 case IPPROTO_TCP: 1193 ptype |= WX_PTYPE_TYP_TCP; 1194 break; 1195 case IPPROTO_UDP: 1196 ptype |= WX_PTYPE_TYP_UDP; 1197 break; 1198 case IPPROTO_SCTP: 1199 ptype |= WX_PTYPE_TYP_SCTP; 1200 break; 1201 default: 1202 ptype |= WX_PTYPE_TYP_IP; 1203 break; 1204 } 1205 1206 return ptype; 1207 } 1208 1209 static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first, 1210 u8 *hdr_len, u8 ptype) 1211 { 1212 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; 1213 struct net_device *netdev = tx_ring->netdev; 1214 u32 l4len, tunhdr_eiplen_tunlen = 0; 1215 struct sk_buff *skb = first->skb; 1216 bool enc = skb->encapsulation; 1217 struct ipv6hdr *ipv6h; 1218 struct tcphdr *tcph; 1219 struct iphdr *iph; 1220 u8 tun_prot = 0; 1221 int err; 1222 1223 if (skb->ip_summed != CHECKSUM_PARTIAL) 1224 return 0; 1225 1226 if (!skb_is_gso(skb)) 1227 return 0; 1228 1229 err = skb_cow_head(skb, 0); 1230 if (err < 0) 1231 return err; 1232 1233 /* indicates the inner headers in the skbuff are valid. */ 1234 iph = enc ? inner_ip_hdr(skb) : ip_hdr(skb); 1235 if (iph->version == 4) { 1236 tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb); 1237 iph->tot_len = 0; 1238 iph->check = 0; 1239 tcph->check = ~csum_tcpudp_magic(iph->saddr, 1240 iph->daddr, 0, 1241 IPPROTO_TCP, 0); 1242 first->tx_flags |= WX_TX_FLAGS_TSO | 1243 WX_TX_FLAGS_CSUM | 1244 WX_TX_FLAGS_IPV4 | 1245 WX_TX_FLAGS_CC; 1246 } else if (iph->version == 6 && skb_is_gso_v6(skb)) { 1247 ipv6h = enc ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); 1248 tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb); 1249 ipv6h->payload_len = 0; 1250 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, 1251 &ipv6h->daddr, 0, 1252 IPPROTO_TCP, 0); 1253 first->tx_flags |= WX_TX_FLAGS_TSO | 1254 WX_TX_FLAGS_CSUM | 1255 WX_TX_FLAGS_CC; 1256 } 1257 1258 /* compute header lengths */ 1259 l4len = enc ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); 1260 *hdr_len = enc ? (skb_inner_transport_header(skb) - skb->data) : 1261 skb_transport_offset(skb); 1262 *hdr_len += l4len; 1263 1264 /* update gso size and bytecount with header size */ 1265 first->gso_segs = skb_shinfo(skb)->gso_segs; 1266 first->bytecount += (first->gso_segs - 1) * *hdr_len; 1267 1268 /* mss_l4len_id: use 0 as index for TSO */ 1269 mss_l4len_idx = l4len << WX_TXD_L4LEN_SHIFT; 1270 mss_l4len_idx |= skb_shinfo(skb)->gso_size << WX_TXD_MSS_SHIFT; 1271 1272 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ 1273 if (enc) { 1274 switch (first->protocol) { 1275 case htons(ETH_P_IP): 1276 tun_prot = ip_hdr(skb)->protocol; 1277 first->tx_flags |= WX_TX_FLAGS_OUTER_IPV4; 1278 break; 1279 case htons(ETH_P_IPV6): 1280 tun_prot = ipv6_hdr(skb)->nexthdr; 1281 break; 1282 default: 1283 break; 1284 } 1285 switch (tun_prot) { 1286 case IPPROTO_UDP: 1287 tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_UDP; 1288 tunhdr_eiplen_tunlen |= ((skb_network_header_len(skb) >> 2) << 1289 WX_TXD_OUTER_IPLEN_SHIFT) | 1290 (((skb_inner_mac_header(skb) - 1291 skb_transport_header(skb)) >> 1) << 1292 WX_TXD_TUNNEL_LEN_SHIFT); 1293 break; 1294 case IPPROTO_GRE: 1295 tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_GRE; 1296 tunhdr_eiplen_tunlen |= ((skb_network_header_len(skb) >> 2) << 1297 WX_TXD_OUTER_IPLEN_SHIFT) | 1298 (((skb_inner_mac_header(skb) - 1299 skb_transport_header(skb)) >> 1) << 1300 WX_TXD_TUNNEL_LEN_SHIFT); 1301 break; 1302 case IPPROTO_IPIP: 1303 tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) - 1304 (char *)ip_hdr(skb)) >> 2) << 1305 WX_TXD_OUTER_IPLEN_SHIFT; 1306 break; 1307 default: 1308 break; 1309 } 1310 vlan_macip_lens = skb_inner_network_header_len(skb) >> 1; 1311 } else { 1312 vlan_macip_lens = skb_network_header_len(skb) >> 1; 1313 } 1314 1315 vlan_macip_lens |= skb_network_offset(skb) << WX_TXD_MACLEN_SHIFT; 1316 vlan_macip_lens |= first->tx_flags & WX_TX_FLAGS_VLAN_MASK; 1317 1318 type_tucmd = ptype << 24; 1319 if (skb->vlan_proto == htons(ETH_P_8021AD) && 1320 netdev->features & NETIF_F_HW_VLAN_STAG_TX) 1321 type_tucmd |= WX_SET_FLAG(first->tx_flags, 1322 WX_TX_FLAGS_HW_VLAN, 1323 0x1 << WX_TXD_TAG_TPID_SEL_SHIFT); 1324 wx_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, 1325 type_tucmd, mss_l4len_idx); 1326 1327 return 1; 1328 } 1329 1330 static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, 1331 u8 ptype) 1332 { 1333 u32 tunhdr_eiplen_tunlen = 0, vlan_macip_lens = 0; 1334 struct net_device *netdev = tx_ring->netdev; 1335 u32 mss_l4len_idx = 0, type_tucmd; 1336 struct sk_buff *skb = first->skb; 1337 u8 tun_prot = 0; 1338 1339 if (skb->ip_summed != CHECKSUM_PARTIAL) { 1340 if (!(first->tx_flags & WX_TX_FLAGS_HW_VLAN) && 1341 !(first->tx_flags & WX_TX_FLAGS_CC)) 1342 return; 1343 vlan_macip_lens = skb_network_offset(skb) << 1344 WX_TXD_MACLEN_SHIFT; 1345 } else { 1346 u8 l4_prot = 0; 1347 union { 1348 struct iphdr *ipv4; 1349 struct ipv6hdr *ipv6; 1350 u8 *raw; 1351 } network_hdr; 1352 union { 1353 struct tcphdr *tcphdr; 1354 u8 *raw; 1355 } transport_hdr; 1356 1357 if (skb->encapsulation) { 1358 network_hdr.raw = skb_inner_network_header(skb); 1359 transport_hdr.raw = skb_inner_transport_header(skb); 1360 vlan_macip_lens = skb_network_offset(skb) << 1361 WX_TXD_MACLEN_SHIFT; 1362 switch (first->protocol) { 1363 case htons(ETH_P_IP): 1364 tun_prot = ip_hdr(skb)->protocol; 1365 break; 1366 case htons(ETH_P_IPV6): 1367 tun_prot = ipv6_hdr(skb)->nexthdr; 1368 break; 1369 default: 1370 return; 1371 } 1372 switch (tun_prot) { 1373 case IPPROTO_UDP: 1374 tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_UDP; 1375 tunhdr_eiplen_tunlen |= 1376 ((skb_network_header_len(skb) >> 2) << 1377 WX_TXD_OUTER_IPLEN_SHIFT) | 1378 (((skb_inner_mac_header(skb) - 1379 skb_transport_header(skb)) >> 1) << 1380 WX_TXD_TUNNEL_LEN_SHIFT); 1381 break; 1382 case IPPROTO_GRE: 1383 tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_GRE; 1384 tunhdr_eiplen_tunlen |= ((skb_network_header_len(skb) >> 2) << 1385 WX_TXD_OUTER_IPLEN_SHIFT) | 1386 (((skb_inner_mac_header(skb) - 1387 skb_transport_header(skb)) >> 1) << 1388 WX_TXD_TUNNEL_LEN_SHIFT); 1389 break; 1390 case IPPROTO_IPIP: 1391 tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) - 1392 (char *)ip_hdr(skb)) >> 2) << 1393 WX_TXD_OUTER_IPLEN_SHIFT; 1394 break; 1395 default: 1396 break; 1397 } 1398 1399 } else { 1400 network_hdr.raw = skb_network_header(skb); 1401 transport_hdr.raw = skb_transport_header(skb); 1402 vlan_macip_lens = skb_network_offset(skb) << 1403 WX_TXD_MACLEN_SHIFT; 1404 } 1405 1406 switch (network_hdr.ipv4->version) { 1407 case IPVERSION: 1408 vlan_macip_lens |= (transport_hdr.raw - network_hdr.raw) >> 1; 1409 l4_prot = network_hdr.ipv4->protocol; 1410 break; 1411 case 6: 1412 vlan_macip_lens |= (transport_hdr.raw - network_hdr.raw) >> 1; 1413 l4_prot = network_hdr.ipv6->nexthdr; 1414 break; 1415 default: 1416 break; 1417 } 1418 1419 switch (l4_prot) { 1420 case IPPROTO_TCP: 1421 mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) << 1422 WX_TXD_L4LEN_SHIFT; 1423 break; 1424 case IPPROTO_SCTP: 1425 mss_l4len_idx = sizeof(struct sctphdr) << 1426 WX_TXD_L4LEN_SHIFT; 1427 break; 1428 case IPPROTO_UDP: 1429 mss_l4len_idx = sizeof(struct udphdr) << 1430 WX_TXD_L4LEN_SHIFT; 1431 break; 1432 default: 1433 break; 1434 } 1435 1436 /* update TX checksum flag */ 1437 first->tx_flags |= WX_TX_FLAGS_CSUM; 1438 } 1439 first->tx_flags |= WX_TX_FLAGS_CC; 1440 /* vlan_macip_lens: MACLEN, VLAN tag */ 1441 vlan_macip_lens |= first->tx_flags & WX_TX_FLAGS_VLAN_MASK; 1442 1443 type_tucmd = ptype << 24; 1444 if (skb->vlan_proto == htons(ETH_P_8021AD) && 1445 netdev->features & NETIF_F_HW_VLAN_STAG_TX) 1446 type_tucmd |= WX_SET_FLAG(first->tx_flags, 1447 WX_TX_FLAGS_HW_VLAN, 1448 0x1 << WX_TXD_TAG_TPID_SEL_SHIFT); 1449 wx_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, 1450 type_tucmd, mss_l4len_idx); 1451 } 1452 1453 static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb, 1454 struct wx_ring *tx_ring) 1455 { 1456 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 1457 struct wx_tx_buffer *first; 1458 u8 hdr_len = 0, ptype; 1459 unsigned short f; 1460 u32 tx_flags = 0; 1461 int tso; 1462 1463 /* need: 1 descriptor per page * PAGE_SIZE/WX_MAX_DATA_PER_TXD, 1464 * + 1 desc for skb_headlen/WX_MAX_DATA_PER_TXD, 1465 * + 2 desc gap to keep tail from touching head, 1466 * + 1 desc for context descriptor, 1467 * otherwise try next time 1468 */ 1469 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 1470 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)-> 1471 frags[f])); 1472 1473 if (wx_maybe_stop_tx(tx_ring, count + 3)) { 1474 tx_ring->tx_stats.tx_busy++; 1475 return NETDEV_TX_BUSY; 1476 } 1477 1478 /* record the location of the first descriptor for this packet */ 1479 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 1480 first->skb = skb; 1481 first->bytecount = skb->len; 1482 first->gso_segs = 1; 1483 1484 /* if we have a HW VLAN tag being added default to the HW one */ 1485 if (skb_vlan_tag_present(skb)) { 1486 tx_flags |= skb_vlan_tag_get(skb) << WX_TX_FLAGS_VLAN_SHIFT; 1487 tx_flags |= WX_TX_FLAGS_HW_VLAN; 1488 } 1489 1490 /* record initial flags and protocol */ 1491 first->tx_flags = tx_flags; 1492 first->protocol = vlan_get_protocol(skb); 1493 1494 ptype = wx_encode_tx_desc_ptype(first); 1495 1496 tso = wx_tso(tx_ring, first, &hdr_len, ptype); 1497 if (tso < 0) 1498 goto out_drop; 1499 else if (!tso) 1500 wx_tx_csum(tx_ring, first, ptype); 1501 wx_tx_map(tx_ring, first, hdr_len); 1502 1503 return NETDEV_TX_OK; 1504 out_drop: 1505 dev_kfree_skb_any(first->skb); 1506 first->skb = NULL; 1507 1508 return NETDEV_TX_OK; 1509 } 1510 1511 netdev_tx_t wx_xmit_frame(struct sk_buff *skb, 1512 struct net_device *netdev) 1513 { 1514 unsigned int r_idx = skb->queue_mapping; 1515 struct wx *wx = netdev_priv(netdev); 1516 struct wx_ring *tx_ring; 1517 1518 if (!netif_carrier_ok(netdev)) { 1519 dev_kfree_skb_any(skb); 1520 return NETDEV_TX_OK; 1521 } 1522 1523 /* The minimum packet size for olinfo paylen is 17 so pad the skb 1524 * in order to meet this minimum size requirement. 1525 */ 1526 if (skb_put_padto(skb, 17)) 1527 return NETDEV_TX_OK; 1528 1529 if (r_idx >= wx->num_tx_queues) 1530 r_idx = r_idx % wx->num_tx_queues; 1531 tx_ring = wx->tx_ring[r_idx]; 1532 1533 return wx_xmit_frame_ring(skb, tx_ring); 1534 } 1535 EXPORT_SYMBOL(wx_xmit_frame); 1536 1537 void wx_napi_enable_all(struct wx *wx) 1538 { 1539 struct wx_q_vector *q_vector; 1540 int q_idx; 1541 1542 for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) { 1543 q_vector = wx->q_vector[q_idx]; 1544 napi_enable(&q_vector->napi); 1545 } 1546 } 1547 EXPORT_SYMBOL(wx_napi_enable_all); 1548 1549 void wx_napi_disable_all(struct wx *wx) 1550 { 1551 struct wx_q_vector *q_vector; 1552 int q_idx; 1553 1554 for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) { 1555 q_vector = wx->q_vector[q_idx]; 1556 napi_disable(&q_vector->napi); 1557 } 1558 } 1559 EXPORT_SYMBOL(wx_napi_disable_all); 1560 1561 /** 1562 * wx_set_rss_queues: Allocate queues for RSS 1563 * @wx: board private structure to initialize 1564 * 1565 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try 1566 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. 1567 * 1568 **/ 1569 static void wx_set_rss_queues(struct wx *wx) 1570 { 1571 wx->num_rx_queues = wx->mac.max_rx_queues; 1572 wx->num_tx_queues = wx->mac.max_tx_queues; 1573 } 1574 1575 static void wx_set_num_queues(struct wx *wx) 1576 { 1577 /* Start with base case */ 1578 wx->num_rx_queues = 1; 1579 wx->num_tx_queues = 1; 1580 wx->queues_per_pool = 1; 1581 1582 wx_set_rss_queues(wx); 1583 } 1584 1585 /** 1586 * wx_acquire_msix_vectors - acquire MSI-X vectors 1587 * @wx: board private structure 1588 * 1589 * Attempts to acquire a suitable range of MSI-X vector interrupts. Will 1590 * return a negative error code if unable to acquire MSI-X vectors for any 1591 * reason. 1592 */ 1593 static int wx_acquire_msix_vectors(struct wx *wx) 1594 { 1595 struct irq_affinity affd = {0, }; 1596 int nvecs, i; 1597 1598 nvecs = min_t(int, num_online_cpus(), wx->mac.max_msix_vectors); 1599 1600 wx->msix_entries = kcalloc(nvecs, 1601 sizeof(struct msix_entry), 1602 GFP_KERNEL); 1603 if (!wx->msix_entries) 1604 return -ENOMEM; 1605 1606 nvecs = pci_alloc_irq_vectors_affinity(wx->pdev, nvecs, 1607 nvecs, 1608 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, 1609 &affd); 1610 if (nvecs < 0) { 1611 wx_err(wx, "Failed to allocate MSI-X interrupts. Err: %d\n", nvecs); 1612 kfree(wx->msix_entries); 1613 wx->msix_entries = NULL; 1614 return nvecs; 1615 } 1616 1617 for (i = 0; i < nvecs; i++) { 1618 wx->msix_entries[i].entry = i; 1619 wx->msix_entries[i].vector = pci_irq_vector(wx->pdev, i); 1620 } 1621 1622 /* one for msix_other */ 1623 nvecs -= 1; 1624 wx->num_q_vectors = nvecs; 1625 wx->num_rx_queues = nvecs; 1626 wx->num_tx_queues = nvecs; 1627 1628 return 0; 1629 } 1630 1631 /** 1632 * wx_set_interrupt_capability - set MSI-X or MSI if supported 1633 * @wx: board private structure to initialize 1634 * 1635 * Attempt to configure the interrupts using the best available 1636 * capabilities of the hardware and the kernel. 1637 **/ 1638 static int wx_set_interrupt_capability(struct wx *wx) 1639 { 1640 struct pci_dev *pdev = wx->pdev; 1641 int nvecs, ret; 1642 1643 /* We will try to get MSI-X interrupts first */ 1644 ret = wx_acquire_msix_vectors(wx); 1645 if (ret == 0 || (ret == -ENOMEM)) 1646 return ret; 1647 1648 wx->num_rx_queues = 1; 1649 wx->num_tx_queues = 1; 1650 wx->num_q_vectors = 1; 1651 1652 /* minmum one for queue, one for misc*/ 1653 nvecs = 1; 1654 nvecs = pci_alloc_irq_vectors(pdev, nvecs, 1655 nvecs, PCI_IRQ_MSI | PCI_IRQ_LEGACY); 1656 if (nvecs == 1) { 1657 if (pdev->msi_enabled) 1658 wx_err(wx, "Fallback to MSI.\n"); 1659 else 1660 wx_err(wx, "Fallback to LEGACY.\n"); 1661 } else { 1662 wx_err(wx, "Failed to allocate MSI/LEGACY interrupts. Error: %d\n", nvecs); 1663 return nvecs; 1664 } 1665 1666 pdev->irq = pci_irq_vector(pdev, 0); 1667 1668 return 0; 1669 } 1670 1671 /** 1672 * wx_cache_ring_rss - Descriptor ring to register mapping for RSS 1673 * @wx: board private structure to initialize 1674 * 1675 * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV. 1676 * 1677 **/ 1678 static void wx_cache_ring_rss(struct wx *wx) 1679 { 1680 u16 i; 1681 1682 for (i = 0; i < wx->num_rx_queues; i++) 1683 wx->rx_ring[i]->reg_idx = i; 1684 1685 for (i = 0; i < wx->num_tx_queues; i++) 1686 wx->tx_ring[i]->reg_idx = i; 1687 } 1688 1689 static void wx_add_ring(struct wx_ring *ring, struct wx_ring_container *head) 1690 { 1691 ring->next = head->ring; 1692 head->ring = ring; 1693 head->count++; 1694 } 1695 1696 /** 1697 * wx_alloc_q_vector - Allocate memory for a single interrupt vector 1698 * @wx: board private structure to initialize 1699 * @v_count: q_vectors allocated on wx, used for ring interleaving 1700 * @v_idx: index of vector in wx struct 1701 * @txr_count: total number of Tx rings to allocate 1702 * @txr_idx: index of first Tx ring to allocate 1703 * @rxr_count: total number of Rx rings to allocate 1704 * @rxr_idx: index of first Rx ring to allocate 1705 * 1706 * We allocate one q_vector. If allocation fails we return -ENOMEM. 1707 **/ 1708 static int wx_alloc_q_vector(struct wx *wx, 1709 unsigned int v_count, unsigned int v_idx, 1710 unsigned int txr_count, unsigned int txr_idx, 1711 unsigned int rxr_count, unsigned int rxr_idx) 1712 { 1713 struct wx_q_vector *q_vector; 1714 int ring_count, default_itr; 1715 struct wx_ring *ring; 1716 1717 /* note this will allocate space for the ring structure as well! */ 1718 ring_count = txr_count + rxr_count; 1719 1720 q_vector = kzalloc(struct_size(q_vector, ring, ring_count), 1721 GFP_KERNEL); 1722 if (!q_vector) 1723 return -ENOMEM; 1724 1725 /* initialize NAPI */ 1726 netif_napi_add(wx->netdev, &q_vector->napi, 1727 wx_poll); 1728 1729 /* tie q_vector and wx together */ 1730 wx->q_vector[v_idx] = q_vector; 1731 q_vector->wx = wx; 1732 q_vector->v_idx = v_idx; 1733 if (cpu_online(v_idx)) 1734 q_vector->numa_node = cpu_to_node(v_idx); 1735 1736 /* initialize pointer to rings */ 1737 ring = q_vector->ring; 1738 1739 if (wx->mac.type == wx_mac_sp) 1740 default_itr = WX_12K_ITR; 1741 else 1742 default_itr = WX_7K_ITR; 1743 /* initialize ITR */ 1744 if (txr_count && !rxr_count) 1745 /* tx only vector */ 1746 q_vector->itr = wx->tx_itr_setting ? 1747 default_itr : wx->tx_itr_setting; 1748 else 1749 /* rx or rx/tx vector */ 1750 q_vector->itr = wx->rx_itr_setting ? 1751 default_itr : wx->rx_itr_setting; 1752 1753 while (txr_count) { 1754 /* assign generic ring traits */ 1755 ring->dev = &wx->pdev->dev; 1756 ring->netdev = wx->netdev; 1757 1758 /* configure backlink on ring */ 1759 ring->q_vector = q_vector; 1760 1761 /* update q_vector Tx values */ 1762 wx_add_ring(ring, &q_vector->tx); 1763 1764 /* apply Tx specific ring traits */ 1765 ring->count = wx->tx_ring_count; 1766 1767 ring->queue_index = txr_idx; 1768 1769 /* assign ring to wx */ 1770 wx->tx_ring[txr_idx] = ring; 1771 1772 /* update count and index */ 1773 txr_count--; 1774 txr_idx += v_count; 1775 1776 /* push pointer to next ring */ 1777 ring++; 1778 } 1779 1780 while (rxr_count) { 1781 /* assign generic ring traits */ 1782 ring->dev = &wx->pdev->dev; 1783 ring->netdev = wx->netdev; 1784 1785 /* configure backlink on ring */ 1786 ring->q_vector = q_vector; 1787 1788 /* update q_vector Rx values */ 1789 wx_add_ring(ring, &q_vector->rx); 1790 1791 /* apply Rx specific ring traits */ 1792 ring->count = wx->rx_ring_count; 1793 ring->queue_index = rxr_idx; 1794 1795 /* assign ring to wx */ 1796 wx->rx_ring[rxr_idx] = ring; 1797 1798 /* update count and index */ 1799 rxr_count--; 1800 rxr_idx += v_count; 1801 1802 /* push pointer to next ring */ 1803 ring++; 1804 } 1805 1806 return 0; 1807 } 1808 1809 /** 1810 * wx_free_q_vector - Free memory allocated for specific interrupt vector 1811 * @wx: board private structure to initialize 1812 * @v_idx: Index of vector to be freed 1813 * 1814 * This function frees the memory allocated to the q_vector. In addition if 1815 * NAPI is enabled it will delete any references to the NAPI struct prior 1816 * to freeing the q_vector. 1817 **/ 1818 static void wx_free_q_vector(struct wx *wx, int v_idx) 1819 { 1820 struct wx_q_vector *q_vector = wx->q_vector[v_idx]; 1821 struct wx_ring *ring; 1822 1823 wx_for_each_ring(ring, q_vector->tx) 1824 wx->tx_ring[ring->queue_index] = NULL; 1825 1826 wx_for_each_ring(ring, q_vector->rx) 1827 wx->rx_ring[ring->queue_index] = NULL; 1828 1829 wx->q_vector[v_idx] = NULL; 1830 netif_napi_del(&q_vector->napi); 1831 kfree_rcu(q_vector, rcu); 1832 } 1833 1834 /** 1835 * wx_alloc_q_vectors - Allocate memory for interrupt vectors 1836 * @wx: board private structure to initialize 1837 * 1838 * We allocate one q_vector per queue interrupt. If allocation fails we 1839 * return -ENOMEM. 1840 **/ 1841 static int wx_alloc_q_vectors(struct wx *wx) 1842 { 1843 unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0; 1844 unsigned int rxr_remaining = wx->num_rx_queues; 1845 unsigned int txr_remaining = wx->num_tx_queues; 1846 unsigned int q_vectors = wx->num_q_vectors; 1847 int rqpv, tqpv; 1848 int err; 1849 1850 for (; v_idx < q_vectors; v_idx++) { 1851 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 1852 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 1853 err = wx_alloc_q_vector(wx, q_vectors, v_idx, 1854 tqpv, txr_idx, 1855 rqpv, rxr_idx); 1856 1857 if (err) 1858 goto err_out; 1859 1860 /* update counts and index */ 1861 rxr_remaining -= rqpv; 1862 txr_remaining -= tqpv; 1863 rxr_idx++; 1864 txr_idx++; 1865 } 1866 1867 return 0; 1868 1869 err_out: 1870 wx->num_tx_queues = 0; 1871 wx->num_rx_queues = 0; 1872 wx->num_q_vectors = 0; 1873 1874 while (v_idx--) 1875 wx_free_q_vector(wx, v_idx); 1876 1877 return -ENOMEM; 1878 } 1879 1880 /** 1881 * wx_free_q_vectors - Free memory allocated for interrupt vectors 1882 * @wx: board private structure to initialize 1883 * 1884 * This function frees the memory allocated to the q_vectors. In addition if 1885 * NAPI is enabled it will delete any references to the NAPI struct prior 1886 * to freeing the q_vector. 1887 **/ 1888 static void wx_free_q_vectors(struct wx *wx) 1889 { 1890 int v_idx = wx->num_q_vectors; 1891 1892 wx->num_tx_queues = 0; 1893 wx->num_rx_queues = 0; 1894 wx->num_q_vectors = 0; 1895 1896 while (v_idx--) 1897 wx_free_q_vector(wx, v_idx); 1898 } 1899 1900 void wx_reset_interrupt_capability(struct wx *wx) 1901 { 1902 struct pci_dev *pdev = wx->pdev; 1903 1904 if (!pdev->msi_enabled && !pdev->msix_enabled) 1905 return; 1906 1907 if (pdev->msix_enabled) { 1908 kfree(wx->msix_entries); 1909 wx->msix_entries = NULL; 1910 } 1911 pci_free_irq_vectors(wx->pdev); 1912 } 1913 EXPORT_SYMBOL(wx_reset_interrupt_capability); 1914 1915 /** 1916 * wx_clear_interrupt_scheme - Clear the current interrupt scheme settings 1917 * @wx: board private structure to clear interrupt scheme on 1918 * 1919 * We go through and clear interrupt specific resources and reset the structure 1920 * to pre-load conditions 1921 **/ 1922 void wx_clear_interrupt_scheme(struct wx *wx) 1923 { 1924 wx_free_q_vectors(wx); 1925 wx_reset_interrupt_capability(wx); 1926 } 1927 EXPORT_SYMBOL(wx_clear_interrupt_scheme); 1928 1929 int wx_init_interrupt_scheme(struct wx *wx) 1930 { 1931 int ret; 1932 1933 /* Number of supported queues */ 1934 wx_set_num_queues(wx); 1935 1936 /* Set interrupt mode */ 1937 ret = wx_set_interrupt_capability(wx); 1938 if (ret) { 1939 wx_err(wx, "Allocate irq vectors for failed.\n"); 1940 return ret; 1941 } 1942 1943 /* Allocate memory for queues */ 1944 ret = wx_alloc_q_vectors(wx); 1945 if (ret) { 1946 wx_err(wx, "Unable to allocate memory for queue vectors.\n"); 1947 wx_reset_interrupt_capability(wx); 1948 return ret; 1949 } 1950 1951 wx_cache_ring_rss(wx); 1952 1953 return 0; 1954 } 1955 EXPORT_SYMBOL(wx_init_interrupt_scheme); 1956 1957 irqreturn_t wx_msix_clean_rings(int __always_unused irq, void *data) 1958 { 1959 struct wx_q_vector *q_vector = data; 1960 1961 /* EIAM disabled interrupts (on this vector) for us */ 1962 if (q_vector->rx.ring || q_vector->tx.ring) 1963 napi_schedule_irqoff(&q_vector->napi); 1964 1965 return IRQ_HANDLED; 1966 } 1967 EXPORT_SYMBOL(wx_msix_clean_rings); 1968 1969 void wx_free_irq(struct wx *wx) 1970 { 1971 struct pci_dev *pdev = wx->pdev; 1972 int vector; 1973 1974 if (!(pdev->msix_enabled)) { 1975 free_irq(pdev->irq, wx); 1976 return; 1977 } 1978 1979 for (vector = 0; vector < wx->num_q_vectors; vector++) { 1980 struct wx_q_vector *q_vector = wx->q_vector[vector]; 1981 struct msix_entry *entry = &wx->msix_entries[vector]; 1982 1983 /* free only the irqs that were actually requested */ 1984 if (!q_vector->rx.ring && !q_vector->tx.ring) 1985 continue; 1986 1987 free_irq(entry->vector, q_vector); 1988 } 1989 1990 if (wx->mac.type == wx_mac_em) 1991 free_irq(wx->msix_entries[vector].vector, wx); 1992 } 1993 EXPORT_SYMBOL(wx_free_irq); 1994 1995 /** 1996 * wx_setup_isb_resources - allocate interrupt status resources 1997 * @wx: board private structure 1998 * 1999 * Return 0 on success, negative on failure 2000 **/ 2001 int wx_setup_isb_resources(struct wx *wx) 2002 { 2003 struct pci_dev *pdev = wx->pdev; 2004 2005 wx->isb_mem = dma_alloc_coherent(&pdev->dev, 2006 sizeof(u32) * 4, 2007 &wx->isb_dma, 2008 GFP_KERNEL); 2009 if (!wx->isb_mem) { 2010 wx_err(wx, "Alloc isb_mem failed\n"); 2011 return -ENOMEM; 2012 } 2013 2014 return 0; 2015 } 2016 EXPORT_SYMBOL(wx_setup_isb_resources); 2017 2018 /** 2019 * wx_free_isb_resources - allocate all queues Rx resources 2020 * @wx: board private structure 2021 * 2022 * Return 0 on success, negative on failure 2023 **/ 2024 void wx_free_isb_resources(struct wx *wx) 2025 { 2026 struct pci_dev *pdev = wx->pdev; 2027 2028 dma_free_coherent(&pdev->dev, sizeof(u32) * 4, 2029 wx->isb_mem, wx->isb_dma); 2030 wx->isb_mem = NULL; 2031 } 2032 EXPORT_SYMBOL(wx_free_isb_resources); 2033 2034 u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx) 2035 { 2036 u32 cur_tag = 0; 2037 2038 cur_tag = wx->isb_mem[WX_ISB_HEADER]; 2039 wx->isb_tag[idx] = cur_tag; 2040 2041 return (__force u32)cpu_to_le32(wx->isb_mem[idx]); 2042 } 2043 EXPORT_SYMBOL(wx_misc_isb); 2044 2045 /** 2046 * wx_set_ivar - set the IVAR registers, mapping interrupt causes to vectors 2047 * @wx: pointer to wx struct 2048 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 2049 * @queue: queue to map the corresponding interrupt to 2050 * @msix_vector: the vector to map to the corresponding queue 2051 * 2052 **/ 2053 static void wx_set_ivar(struct wx *wx, s8 direction, 2054 u16 queue, u16 msix_vector) 2055 { 2056 u32 ivar, index; 2057 2058 if (direction == -1) { 2059 /* other causes */ 2060 msix_vector |= WX_PX_IVAR_ALLOC_VAL; 2061 index = 0; 2062 ivar = rd32(wx, WX_PX_MISC_IVAR); 2063 ivar &= ~(0xFF << index); 2064 ivar |= (msix_vector << index); 2065 wr32(wx, WX_PX_MISC_IVAR, ivar); 2066 } else { 2067 /* tx or rx causes */ 2068 msix_vector |= WX_PX_IVAR_ALLOC_VAL; 2069 index = ((16 * (queue & 1)) + (8 * direction)); 2070 ivar = rd32(wx, WX_PX_IVAR(queue >> 1)); 2071 ivar &= ~(0xFF << index); 2072 ivar |= (msix_vector << index); 2073 wr32(wx, WX_PX_IVAR(queue >> 1), ivar); 2074 } 2075 } 2076 2077 /** 2078 * wx_write_eitr - write EITR register in hardware specific way 2079 * @q_vector: structure containing interrupt and ring information 2080 * 2081 * This function is made to be called by ethtool and by the driver 2082 * when it needs to update EITR registers at runtime. Hardware 2083 * specific quirks/differences are taken care of here. 2084 */ 2085 static void wx_write_eitr(struct wx_q_vector *q_vector) 2086 { 2087 struct wx *wx = q_vector->wx; 2088 int v_idx = q_vector->v_idx; 2089 u32 itr_reg; 2090 2091 if (wx->mac.type == wx_mac_sp) 2092 itr_reg = q_vector->itr & WX_SP_MAX_EITR; 2093 else 2094 itr_reg = q_vector->itr & WX_EM_MAX_EITR; 2095 2096 itr_reg |= WX_PX_ITR_CNT_WDIS; 2097 2098 wr32(wx, WX_PX_ITR(v_idx), itr_reg); 2099 } 2100 2101 /** 2102 * wx_configure_vectors - Configure vectors for hardware 2103 * @wx: board private structure 2104 * 2105 * wx_configure_vectors sets up the hardware to properly generate MSI-X/MSI/LEGACY 2106 * interrupts. 2107 **/ 2108 void wx_configure_vectors(struct wx *wx) 2109 { 2110 struct pci_dev *pdev = wx->pdev; 2111 u32 eitrsel = 0; 2112 u16 v_idx; 2113 2114 if (pdev->msix_enabled) { 2115 /* Populate MSIX to EITR Select */ 2116 wr32(wx, WX_PX_ITRSEL, eitrsel); 2117 /* use EIAM to auto-mask when MSI-X interrupt is asserted 2118 * this saves a register write for every interrupt 2119 */ 2120 wr32(wx, WX_PX_GPIE, WX_PX_GPIE_MODEL); 2121 } else { 2122 /* legacy interrupts, use EIAM to auto-mask when reading EICR, 2123 * specifically only auto mask tx and rx interrupts. 2124 */ 2125 wr32(wx, WX_PX_GPIE, 0); 2126 } 2127 2128 /* Populate the IVAR table and set the ITR values to the 2129 * corresponding register. 2130 */ 2131 for (v_idx = 0; v_idx < wx->num_q_vectors; v_idx++) { 2132 struct wx_q_vector *q_vector = wx->q_vector[v_idx]; 2133 struct wx_ring *ring; 2134 2135 wx_for_each_ring(ring, q_vector->rx) 2136 wx_set_ivar(wx, 0, ring->reg_idx, v_idx); 2137 2138 wx_for_each_ring(ring, q_vector->tx) 2139 wx_set_ivar(wx, 1, ring->reg_idx, v_idx); 2140 2141 wx_write_eitr(q_vector); 2142 } 2143 2144 wx_set_ivar(wx, -1, 0, v_idx); 2145 if (pdev->msix_enabled) 2146 wr32(wx, WX_PX_ITR(v_idx), 1950); 2147 } 2148 EXPORT_SYMBOL(wx_configure_vectors); 2149 2150 /** 2151 * wx_clean_rx_ring - Free Rx Buffers per Queue 2152 * @rx_ring: ring to free buffers from 2153 **/ 2154 static void wx_clean_rx_ring(struct wx_ring *rx_ring) 2155 { 2156 struct wx_rx_buffer *rx_buffer; 2157 u16 i = rx_ring->next_to_clean; 2158 2159 rx_buffer = &rx_ring->rx_buffer_info[i]; 2160 2161 /* Free all the Rx ring sk_buffs */ 2162 while (i != rx_ring->next_to_alloc) { 2163 if (rx_buffer->skb) { 2164 struct sk_buff *skb = rx_buffer->skb; 2165 2166 if (WX_CB(skb)->page_released) 2167 page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); 2168 2169 dev_kfree_skb(skb); 2170 } 2171 2172 /* Invalidate cache lines that may have been written to by 2173 * device so that we avoid corrupting memory. 2174 */ 2175 dma_sync_single_range_for_cpu(rx_ring->dev, 2176 rx_buffer->dma, 2177 rx_buffer->page_offset, 2178 WX_RX_BUFSZ, 2179 DMA_FROM_DEVICE); 2180 2181 /* free resources associated with mapping */ 2182 page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); 2183 2184 i++; 2185 rx_buffer++; 2186 if (i == rx_ring->count) { 2187 i = 0; 2188 rx_buffer = rx_ring->rx_buffer_info; 2189 } 2190 } 2191 2192 rx_ring->next_to_alloc = 0; 2193 rx_ring->next_to_clean = 0; 2194 rx_ring->next_to_use = 0; 2195 } 2196 2197 /** 2198 * wx_clean_all_rx_rings - Free Rx Buffers for all queues 2199 * @wx: board private structure 2200 **/ 2201 void wx_clean_all_rx_rings(struct wx *wx) 2202 { 2203 int i; 2204 2205 for (i = 0; i < wx->num_rx_queues; i++) 2206 wx_clean_rx_ring(wx->rx_ring[i]); 2207 } 2208 EXPORT_SYMBOL(wx_clean_all_rx_rings); 2209 2210 /** 2211 * wx_free_rx_resources - Free Rx Resources 2212 * @rx_ring: ring to clean the resources from 2213 * 2214 * Free all receive software resources 2215 **/ 2216 static void wx_free_rx_resources(struct wx_ring *rx_ring) 2217 { 2218 wx_clean_rx_ring(rx_ring); 2219 kvfree(rx_ring->rx_buffer_info); 2220 rx_ring->rx_buffer_info = NULL; 2221 2222 /* if not set, then don't free */ 2223 if (!rx_ring->desc) 2224 return; 2225 2226 dma_free_coherent(rx_ring->dev, rx_ring->size, 2227 rx_ring->desc, rx_ring->dma); 2228 2229 rx_ring->desc = NULL; 2230 2231 if (rx_ring->page_pool) { 2232 page_pool_destroy(rx_ring->page_pool); 2233 rx_ring->page_pool = NULL; 2234 } 2235 } 2236 2237 /** 2238 * wx_free_all_rx_resources - Free Rx Resources for All Queues 2239 * @wx: pointer to hardware structure 2240 * 2241 * Free all receive software resources 2242 **/ 2243 static void wx_free_all_rx_resources(struct wx *wx) 2244 { 2245 int i; 2246 2247 for (i = 0; i < wx->num_rx_queues; i++) 2248 wx_free_rx_resources(wx->rx_ring[i]); 2249 } 2250 2251 /** 2252 * wx_clean_tx_ring - Free Tx Buffers 2253 * @tx_ring: ring to be cleaned 2254 **/ 2255 static void wx_clean_tx_ring(struct wx_ring *tx_ring) 2256 { 2257 struct wx_tx_buffer *tx_buffer; 2258 u16 i = tx_ring->next_to_clean; 2259 2260 tx_buffer = &tx_ring->tx_buffer_info[i]; 2261 2262 while (i != tx_ring->next_to_use) { 2263 union wx_tx_desc *eop_desc, *tx_desc; 2264 2265 /* Free all the Tx ring sk_buffs */ 2266 dev_kfree_skb_any(tx_buffer->skb); 2267 2268 /* unmap skb header data */ 2269 dma_unmap_single(tx_ring->dev, 2270 dma_unmap_addr(tx_buffer, dma), 2271 dma_unmap_len(tx_buffer, len), 2272 DMA_TO_DEVICE); 2273 2274 /* check for eop_desc to determine the end of the packet */ 2275 eop_desc = tx_buffer->next_to_watch; 2276 tx_desc = WX_TX_DESC(tx_ring, i); 2277 2278 /* unmap remaining buffers */ 2279 while (tx_desc != eop_desc) { 2280 tx_buffer++; 2281 tx_desc++; 2282 i++; 2283 if (unlikely(i == tx_ring->count)) { 2284 i = 0; 2285 tx_buffer = tx_ring->tx_buffer_info; 2286 tx_desc = WX_TX_DESC(tx_ring, 0); 2287 } 2288 2289 /* unmap any remaining paged data */ 2290 if (dma_unmap_len(tx_buffer, len)) 2291 dma_unmap_page(tx_ring->dev, 2292 dma_unmap_addr(tx_buffer, dma), 2293 dma_unmap_len(tx_buffer, len), 2294 DMA_TO_DEVICE); 2295 } 2296 2297 /* move us one more past the eop_desc for start of next pkt */ 2298 tx_buffer++; 2299 i++; 2300 if (unlikely(i == tx_ring->count)) { 2301 i = 0; 2302 tx_buffer = tx_ring->tx_buffer_info; 2303 } 2304 } 2305 2306 netdev_tx_reset_queue(wx_txring_txq(tx_ring)); 2307 2308 /* reset next_to_use and next_to_clean */ 2309 tx_ring->next_to_use = 0; 2310 tx_ring->next_to_clean = 0; 2311 } 2312 2313 /** 2314 * wx_clean_all_tx_rings - Free Tx Buffers for all queues 2315 * @wx: board private structure 2316 **/ 2317 void wx_clean_all_tx_rings(struct wx *wx) 2318 { 2319 int i; 2320 2321 for (i = 0; i < wx->num_tx_queues; i++) 2322 wx_clean_tx_ring(wx->tx_ring[i]); 2323 } 2324 EXPORT_SYMBOL(wx_clean_all_tx_rings); 2325 2326 /** 2327 * wx_free_tx_resources - Free Tx Resources per Queue 2328 * @tx_ring: Tx descriptor ring for a specific queue 2329 * 2330 * Free all transmit software resources 2331 **/ 2332 static void wx_free_tx_resources(struct wx_ring *tx_ring) 2333 { 2334 wx_clean_tx_ring(tx_ring); 2335 kvfree(tx_ring->tx_buffer_info); 2336 tx_ring->tx_buffer_info = NULL; 2337 2338 /* if not set, then don't free */ 2339 if (!tx_ring->desc) 2340 return; 2341 2342 dma_free_coherent(tx_ring->dev, tx_ring->size, 2343 tx_ring->desc, tx_ring->dma); 2344 tx_ring->desc = NULL; 2345 } 2346 2347 /** 2348 * wx_free_all_tx_resources - Free Tx Resources for All Queues 2349 * @wx: pointer to hardware structure 2350 * 2351 * Free all transmit software resources 2352 **/ 2353 static void wx_free_all_tx_resources(struct wx *wx) 2354 { 2355 int i; 2356 2357 for (i = 0; i < wx->num_tx_queues; i++) 2358 wx_free_tx_resources(wx->tx_ring[i]); 2359 } 2360 2361 void wx_free_resources(struct wx *wx) 2362 { 2363 wx_free_isb_resources(wx); 2364 wx_free_all_rx_resources(wx); 2365 wx_free_all_tx_resources(wx); 2366 } 2367 EXPORT_SYMBOL(wx_free_resources); 2368 2369 static int wx_alloc_page_pool(struct wx_ring *rx_ring) 2370 { 2371 int ret = 0; 2372 2373 struct page_pool_params pp_params = { 2374 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, 2375 .order = 0, 2376 .pool_size = rx_ring->size, 2377 .nid = dev_to_node(rx_ring->dev), 2378 .dev = rx_ring->dev, 2379 .dma_dir = DMA_FROM_DEVICE, 2380 .offset = 0, 2381 .max_len = PAGE_SIZE, 2382 }; 2383 2384 rx_ring->page_pool = page_pool_create(&pp_params); 2385 if (IS_ERR(rx_ring->page_pool)) { 2386 ret = PTR_ERR(rx_ring->page_pool); 2387 rx_ring->page_pool = NULL; 2388 } 2389 2390 return ret; 2391 } 2392 2393 /** 2394 * wx_setup_rx_resources - allocate Rx resources (Descriptors) 2395 * @rx_ring: rx descriptor ring (for a specific queue) to setup 2396 * 2397 * Returns 0 on success, negative on failure 2398 **/ 2399 static int wx_setup_rx_resources(struct wx_ring *rx_ring) 2400 { 2401 struct device *dev = rx_ring->dev; 2402 int orig_node = dev_to_node(dev); 2403 int numa_node = NUMA_NO_NODE; 2404 int size, ret; 2405 2406 size = sizeof(struct wx_rx_buffer) * rx_ring->count; 2407 2408 if (rx_ring->q_vector) 2409 numa_node = rx_ring->q_vector->numa_node; 2410 2411 rx_ring->rx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node); 2412 if (!rx_ring->rx_buffer_info) 2413 rx_ring->rx_buffer_info = kvmalloc(size, GFP_KERNEL); 2414 if (!rx_ring->rx_buffer_info) 2415 goto err; 2416 2417 /* Round up to nearest 4K */ 2418 rx_ring->size = rx_ring->count * sizeof(union wx_rx_desc); 2419 rx_ring->size = ALIGN(rx_ring->size, 4096); 2420 2421 set_dev_node(dev, numa_node); 2422 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, 2423 &rx_ring->dma, GFP_KERNEL); 2424 if (!rx_ring->desc) { 2425 set_dev_node(dev, orig_node); 2426 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, 2427 &rx_ring->dma, GFP_KERNEL); 2428 } 2429 2430 if (!rx_ring->desc) 2431 goto err; 2432 2433 rx_ring->next_to_clean = 0; 2434 rx_ring->next_to_use = 0; 2435 2436 ret = wx_alloc_page_pool(rx_ring); 2437 if (ret < 0) { 2438 dev_err(rx_ring->dev, "Page pool creation failed: %d\n", ret); 2439 goto err_desc; 2440 } 2441 2442 return 0; 2443 2444 err_desc: 2445 dma_free_coherent(dev, rx_ring->size, rx_ring->desc, rx_ring->dma); 2446 err: 2447 kvfree(rx_ring->rx_buffer_info); 2448 rx_ring->rx_buffer_info = NULL; 2449 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); 2450 return -ENOMEM; 2451 } 2452 2453 /** 2454 * wx_setup_all_rx_resources - allocate all queues Rx resources 2455 * @wx: pointer to hardware structure 2456 * 2457 * If this function returns with an error, then it's possible one or 2458 * more of the rings is populated (while the rest are not). It is the 2459 * callers duty to clean those orphaned rings. 2460 * 2461 * Return 0 on success, negative on failure 2462 **/ 2463 static int wx_setup_all_rx_resources(struct wx *wx) 2464 { 2465 int i, err = 0; 2466 2467 for (i = 0; i < wx->num_rx_queues; i++) { 2468 err = wx_setup_rx_resources(wx->rx_ring[i]); 2469 if (!err) 2470 continue; 2471 2472 wx_err(wx, "Allocation for Rx Queue %u failed\n", i); 2473 goto err_setup_rx; 2474 } 2475 2476 return 0; 2477 err_setup_rx: 2478 /* rewind the index freeing the rings as we go */ 2479 while (i--) 2480 wx_free_rx_resources(wx->rx_ring[i]); 2481 return err; 2482 } 2483 2484 /** 2485 * wx_setup_tx_resources - allocate Tx resources (Descriptors) 2486 * @tx_ring: tx descriptor ring (for a specific queue) to setup 2487 * 2488 * Return 0 on success, negative on failure 2489 **/ 2490 static int wx_setup_tx_resources(struct wx_ring *tx_ring) 2491 { 2492 struct device *dev = tx_ring->dev; 2493 int orig_node = dev_to_node(dev); 2494 int numa_node = NUMA_NO_NODE; 2495 int size; 2496 2497 size = sizeof(struct wx_tx_buffer) * tx_ring->count; 2498 2499 if (tx_ring->q_vector) 2500 numa_node = tx_ring->q_vector->numa_node; 2501 2502 tx_ring->tx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node); 2503 if (!tx_ring->tx_buffer_info) 2504 tx_ring->tx_buffer_info = kvmalloc(size, GFP_KERNEL); 2505 if (!tx_ring->tx_buffer_info) 2506 goto err; 2507 2508 /* round up to nearest 4K */ 2509 tx_ring->size = tx_ring->count * sizeof(union wx_tx_desc); 2510 tx_ring->size = ALIGN(tx_ring->size, 4096); 2511 2512 set_dev_node(dev, numa_node); 2513 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 2514 &tx_ring->dma, GFP_KERNEL); 2515 if (!tx_ring->desc) { 2516 set_dev_node(dev, orig_node); 2517 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 2518 &tx_ring->dma, GFP_KERNEL); 2519 } 2520 2521 if (!tx_ring->desc) 2522 goto err; 2523 2524 tx_ring->next_to_use = 0; 2525 tx_ring->next_to_clean = 0; 2526 2527 return 0; 2528 2529 err: 2530 kvfree(tx_ring->tx_buffer_info); 2531 tx_ring->tx_buffer_info = NULL; 2532 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); 2533 return -ENOMEM; 2534 } 2535 2536 /** 2537 * wx_setup_all_tx_resources - allocate all queues Tx resources 2538 * @wx: pointer to private structure 2539 * 2540 * If this function returns with an error, then it's possible one or 2541 * more of the rings is populated (while the rest are not). It is the 2542 * callers duty to clean those orphaned rings. 2543 * 2544 * Return 0 on success, negative on failure 2545 **/ 2546 static int wx_setup_all_tx_resources(struct wx *wx) 2547 { 2548 int i, err = 0; 2549 2550 for (i = 0; i < wx->num_tx_queues; i++) { 2551 err = wx_setup_tx_resources(wx->tx_ring[i]); 2552 if (!err) 2553 continue; 2554 2555 wx_err(wx, "Allocation for Tx Queue %u failed\n", i); 2556 goto err_setup_tx; 2557 } 2558 2559 return 0; 2560 err_setup_tx: 2561 /* rewind the index freeing the rings as we go */ 2562 while (i--) 2563 wx_free_tx_resources(wx->tx_ring[i]); 2564 return err; 2565 } 2566 2567 int wx_setup_resources(struct wx *wx) 2568 { 2569 int err; 2570 2571 /* allocate transmit descriptors */ 2572 err = wx_setup_all_tx_resources(wx); 2573 if (err) 2574 return err; 2575 2576 /* allocate receive descriptors */ 2577 err = wx_setup_all_rx_resources(wx); 2578 if (err) 2579 goto err_free_tx; 2580 2581 err = wx_setup_isb_resources(wx); 2582 if (err) 2583 goto err_free_rx; 2584 2585 return 0; 2586 2587 err_free_rx: 2588 wx_free_all_rx_resources(wx); 2589 err_free_tx: 2590 wx_free_all_tx_resources(wx); 2591 2592 return err; 2593 } 2594 EXPORT_SYMBOL(wx_setup_resources); 2595 2596 /** 2597 * wx_get_stats64 - Get System Network Statistics 2598 * @netdev: network interface device structure 2599 * @stats: storage space for 64bit statistics 2600 */ 2601 void wx_get_stats64(struct net_device *netdev, 2602 struct rtnl_link_stats64 *stats) 2603 { 2604 struct wx *wx = netdev_priv(netdev); 2605 struct wx_hw_stats *hwstats; 2606 int i; 2607 2608 wx_update_stats(wx); 2609 2610 rcu_read_lock(); 2611 for (i = 0; i < wx->num_rx_queues; i++) { 2612 struct wx_ring *ring = READ_ONCE(wx->rx_ring[i]); 2613 u64 bytes, packets; 2614 unsigned int start; 2615 2616 if (ring) { 2617 do { 2618 start = u64_stats_fetch_begin(&ring->syncp); 2619 packets = ring->stats.packets; 2620 bytes = ring->stats.bytes; 2621 } while (u64_stats_fetch_retry(&ring->syncp, start)); 2622 stats->rx_packets += packets; 2623 stats->rx_bytes += bytes; 2624 } 2625 } 2626 2627 for (i = 0; i < wx->num_tx_queues; i++) { 2628 struct wx_ring *ring = READ_ONCE(wx->tx_ring[i]); 2629 u64 bytes, packets; 2630 unsigned int start; 2631 2632 if (ring) { 2633 do { 2634 start = u64_stats_fetch_begin(&ring->syncp); 2635 packets = ring->stats.packets; 2636 bytes = ring->stats.bytes; 2637 } while (u64_stats_fetch_retry(&ring->syncp, 2638 start)); 2639 stats->tx_packets += packets; 2640 stats->tx_bytes += bytes; 2641 } 2642 } 2643 2644 rcu_read_unlock(); 2645 2646 hwstats = &wx->stats; 2647 stats->rx_errors = hwstats->crcerrs + hwstats->rlec; 2648 stats->multicast = hwstats->qmprc; 2649 stats->rx_length_errors = hwstats->rlec; 2650 stats->rx_crc_errors = hwstats->crcerrs; 2651 } 2652 EXPORT_SYMBOL(wx_get_stats64); 2653 2654 int wx_set_features(struct net_device *netdev, netdev_features_t features) 2655 { 2656 netdev_features_t changed = netdev->features ^ features; 2657 struct wx *wx = netdev_priv(netdev); 2658 2659 if (changed & NETIF_F_RXHASH) 2660 wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, 2661 WX_RDB_RA_CTL_RSS_EN); 2662 else 2663 wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, 0); 2664 2665 if (changed & 2666 (NETIF_F_HW_VLAN_CTAG_RX | 2667 NETIF_F_HW_VLAN_STAG_RX)) 2668 wx_set_rx_mode(netdev); 2669 2670 return 1; 2671 } 2672 EXPORT_SYMBOL(wx_set_features); 2673 2674 MODULE_LICENSE("GPL"); 2675