1874aeea5SJeff Kirsher /**************************************************************************** 2874aeea5SJeff Kirsher * Driver for Solarflare Solarstorm network controllers and boards 3874aeea5SJeff Kirsher * Copyright 2005-2006 Fen Systems Ltd. 4874aeea5SJeff Kirsher * Copyright 2005-2011 Solarflare Communications Inc. 5874aeea5SJeff Kirsher * 6874aeea5SJeff Kirsher * This program is free software; you can redistribute it and/or modify it 7874aeea5SJeff Kirsher * under the terms of the GNU General Public License version 2 as published 8874aeea5SJeff Kirsher * by the Free Software Foundation, incorporated herein by reference. 9874aeea5SJeff Kirsher */ 10874aeea5SJeff Kirsher 11874aeea5SJeff Kirsher #include <linux/socket.h> 12874aeea5SJeff Kirsher #include <linux/in.h> 13874aeea5SJeff Kirsher #include <linux/slab.h> 14874aeea5SJeff Kirsher #include <linux/ip.h> 15874aeea5SJeff Kirsher #include <linux/tcp.h> 16874aeea5SJeff Kirsher #include <linux/udp.h> 17874aeea5SJeff Kirsher #include <linux/prefetch.h> 186eb07cafSPaul Gortmaker #include <linux/moduleparam.h> 19*2768935aSDaniel Pieczko #include <linux/iommu.h> 20874aeea5SJeff Kirsher #include <net/ip.h> 21874aeea5SJeff Kirsher #include <net/checksum.h> 22874aeea5SJeff Kirsher #include "net_driver.h" 23874aeea5SJeff Kirsher #include "efx.h" 24874aeea5SJeff Kirsher #include "nic.h" 25874aeea5SJeff Kirsher #include "selftest.h" 26874aeea5SJeff Kirsher #include "workarounds.h" 27874aeea5SJeff Kirsher 28874aeea5SJeff Kirsher /* Number of RX descriptors pushed at once. */ 29874aeea5SJeff Kirsher #define EFX_RX_BATCH 8 30874aeea5SJeff Kirsher 31*2768935aSDaniel Pieczko /* Number of RX buffers to recycle pages for. When creating the RX page recycle 32*2768935aSDaniel Pieczko * ring, this number is divided by the number of buffers per page to calculate 33*2768935aSDaniel Pieczko * the number of pages to store in the RX page recycle ring. 34*2768935aSDaniel Pieczko */ 35*2768935aSDaniel Pieczko #define EFX_RECYCLE_RING_SIZE_IOMMU 4096 36*2768935aSDaniel Pieczko #define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_BATCH) 37*2768935aSDaniel Pieczko 38272baeebSBen Hutchings /* Maximum length for an RX descriptor sharing a page */ 39272baeebSBen Hutchings #define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state) \ 40272baeebSBen Hutchings - EFX_PAGE_IP_ALIGN) 41874aeea5SJeff Kirsher 42874aeea5SJeff Kirsher /* Size of buffer allocated for skb header area. */ 43874aeea5SJeff Kirsher #define EFX_SKB_HEADERS 64u 44874aeea5SJeff Kirsher 45874aeea5SJeff Kirsher /* This is the percentage fill level below which new RX descriptors 46874aeea5SJeff Kirsher * will be added to the RX descriptor ring. 47874aeea5SJeff Kirsher */ 4864235187SDavid Riddoch static unsigned int rx_refill_threshold; 49874aeea5SJeff Kirsher 5085740cdfSBen Hutchings /* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */ 5185740cdfSBen Hutchings #define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \ 5285740cdfSBen Hutchings EFX_RX_USR_BUF_SIZE) 5385740cdfSBen Hutchings 54874aeea5SJeff Kirsher /* 55874aeea5SJeff Kirsher * RX maximum head room required. 56874aeea5SJeff Kirsher * 5785740cdfSBen Hutchings * This must be at least 1 to prevent overflow, plus one packet-worth 5885740cdfSBen Hutchings * to allow pipelined receives. 59874aeea5SJeff Kirsher */ 6085740cdfSBen Hutchings #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS) 61874aeea5SJeff Kirsher 62b184f16bSBen Hutchings static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf) 63874aeea5SJeff Kirsher { 64b184f16bSBen Hutchings return page_address(buf->page) + buf->page_offset; 65874aeea5SJeff Kirsher } 66874aeea5SJeff Kirsher 67874aeea5SJeff Kirsher static inline u32 efx_rx_buf_hash(const u8 *eh) 68874aeea5SJeff Kirsher { 69874aeea5SJeff Kirsher /* The ethernet header is always directly after any hash. */ 70874aeea5SJeff Kirsher #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0 71874aeea5SJeff Kirsher return __le32_to_cpup((const __le32 *)(eh - 4)); 72874aeea5SJeff Kirsher #else 73874aeea5SJeff Kirsher const u8 *data = eh - 4; 740beaca2cSBen Hutchings return (u32)data[0] | 75874aeea5SJeff Kirsher (u32)data[1] << 8 | 76874aeea5SJeff Kirsher (u32)data[2] << 16 | 770beaca2cSBen Hutchings (u32)data[3] << 24; 78874aeea5SJeff Kirsher #endif 79874aeea5SJeff Kirsher } 80874aeea5SJeff Kirsher 8185740cdfSBen Hutchings static inline struct efx_rx_buffer * 8285740cdfSBen Hutchings efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf) 8385740cdfSBen Hutchings { 8485740cdfSBen Hutchings if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask))) 8585740cdfSBen Hutchings return efx_rx_buffer(rx_queue, 0); 8685740cdfSBen Hutchings else 8785740cdfSBen Hutchings return rx_buf + 1; 8885740cdfSBen Hutchings } 8985740cdfSBen Hutchings 90*2768935aSDaniel Pieczko static inline void efx_sync_rx_buffer(struct efx_nic *efx, 91*2768935aSDaniel Pieczko struct efx_rx_buffer *rx_buf, 92*2768935aSDaniel Pieczko unsigned int len) 93*2768935aSDaniel Pieczko { 94*2768935aSDaniel Pieczko dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len, 95*2768935aSDaniel Pieczko DMA_FROM_DEVICE); 96*2768935aSDaniel Pieczko } 97*2768935aSDaniel Pieczko 98*2768935aSDaniel Pieczko /* Return true if this is the last RX buffer using a page. */ 99*2768935aSDaniel Pieczko static inline bool efx_rx_is_last_buffer(struct efx_nic *efx, 100*2768935aSDaniel Pieczko struct efx_rx_buffer *rx_buf) 101*2768935aSDaniel Pieczko { 102*2768935aSDaniel Pieczko return (rx_buf->page_offset >= (PAGE_SIZE >> 1) || 103*2768935aSDaniel Pieczko efx->rx_dma_len > EFX_RX_HALF_PAGE); 104*2768935aSDaniel Pieczko } 105*2768935aSDaniel Pieczko 106*2768935aSDaniel Pieczko /* Check the RX page recycle ring for a page that can be reused. */ 107*2768935aSDaniel Pieczko static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue) 108*2768935aSDaniel Pieczko { 109*2768935aSDaniel Pieczko struct efx_nic *efx = rx_queue->efx; 110*2768935aSDaniel Pieczko struct page *page; 111*2768935aSDaniel Pieczko struct efx_rx_page_state *state; 112*2768935aSDaniel Pieczko unsigned index; 113*2768935aSDaniel Pieczko 114*2768935aSDaniel Pieczko index = rx_queue->page_remove & rx_queue->page_ptr_mask; 115*2768935aSDaniel Pieczko page = rx_queue->page_ring[index]; 116*2768935aSDaniel Pieczko if (page == NULL) 117*2768935aSDaniel Pieczko return NULL; 118*2768935aSDaniel Pieczko 119*2768935aSDaniel Pieczko rx_queue->page_ring[index] = NULL; 120*2768935aSDaniel Pieczko /* page_remove cannot exceed page_add. */ 121*2768935aSDaniel Pieczko if (rx_queue->page_remove != rx_queue->page_add) 122*2768935aSDaniel Pieczko ++rx_queue->page_remove; 123*2768935aSDaniel Pieczko 124*2768935aSDaniel Pieczko /* If page_count is 1 then we hold the only reference to this page. */ 125*2768935aSDaniel Pieczko if (page_count(page) == 1) { 126*2768935aSDaniel Pieczko ++rx_queue->page_recycle_count; 127*2768935aSDaniel Pieczko return page; 128*2768935aSDaniel Pieczko } else { 129*2768935aSDaniel Pieczko state = page_address(page); 130*2768935aSDaniel Pieczko dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, 131*2768935aSDaniel Pieczko PAGE_SIZE << efx->rx_buffer_order, 132*2768935aSDaniel Pieczko DMA_FROM_DEVICE); 133*2768935aSDaniel Pieczko put_page(page); 134*2768935aSDaniel Pieczko ++rx_queue->page_recycle_failed; 135*2768935aSDaniel Pieczko } 136*2768935aSDaniel Pieczko 137*2768935aSDaniel Pieczko return NULL; 138*2768935aSDaniel Pieczko } 139*2768935aSDaniel Pieczko 140874aeea5SJeff Kirsher /** 14197d48a10SAlexandre Rames * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers 142874aeea5SJeff Kirsher * 143874aeea5SJeff Kirsher * @rx_queue: Efx RX queue 144874aeea5SJeff Kirsher * 145874aeea5SJeff Kirsher * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA, 146874aeea5SJeff Kirsher * and populates struct efx_rx_buffers for each one. Return a negative error 147874aeea5SJeff Kirsher * code or 0 on success. If a single page can be split between two buffers, 148874aeea5SJeff Kirsher * then the page will either be inserted fully, or not at at all. 149874aeea5SJeff Kirsher */ 15097d48a10SAlexandre Rames static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue) 151874aeea5SJeff Kirsher { 152874aeea5SJeff Kirsher struct efx_nic *efx = rx_queue->efx; 153874aeea5SJeff Kirsher struct efx_rx_buffer *rx_buf; 154874aeea5SJeff Kirsher struct page *page; 155b590ace0SBen Hutchings unsigned int page_offset; 156874aeea5SJeff Kirsher struct efx_rx_page_state *state; 157874aeea5SJeff Kirsher dma_addr_t dma_addr; 158874aeea5SJeff Kirsher unsigned index, count; 159874aeea5SJeff Kirsher 160874aeea5SJeff Kirsher /* We can split a page between two buffers */ 161874aeea5SJeff Kirsher BUILD_BUG_ON(EFX_RX_BATCH & 1); 162874aeea5SJeff Kirsher 163874aeea5SJeff Kirsher for (count = 0; count < EFX_RX_BATCH; ++count) { 164*2768935aSDaniel Pieczko page = efx_reuse_page(rx_queue); 165*2768935aSDaniel Pieczko if (page == NULL) { 166874aeea5SJeff Kirsher page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, 167874aeea5SJeff Kirsher efx->rx_buffer_order); 168874aeea5SJeff Kirsher if (unlikely(page == NULL)) 169874aeea5SJeff Kirsher return -ENOMEM; 170*2768935aSDaniel Pieczko dma_addr = 171*2768935aSDaniel Pieczko dma_map_page(&efx->pci_dev->dev, page, 0, 172272baeebSBen Hutchings PAGE_SIZE << efx->rx_buffer_order, 1730e33d870SBen Hutchings DMA_FROM_DEVICE); 174*2768935aSDaniel Pieczko if (unlikely(dma_mapping_error(&efx->pci_dev->dev, 175*2768935aSDaniel Pieczko dma_addr))) { 176874aeea5SJeff Kirsher __free_pages(page, efx->rx_buffer_order); 177874aeea5SJeff Kirsher return -EIO; 178874aeea5SJeff Kirsher } 179b8e02517SBen Hutchings state = page_address(page); 180874aeea5SJeff Kirsher state->dma_addr = dma_addr; 181*2768935aSDaniel Pieczko } else { 182*2768935aSDaniel Pieczko state = page_address(page); 183*2768935aSDaniel Pieczko dma_addr = state->dma_addr; 184*2768935aSDaniel Pieczko } 185*2768935aSDaniel Pieczko get_page(page); 186874aeea5SJeff Kirsher 187874aeea5SJeff Kirsher dma_addr += sizeof(struct efx_rx_page_state); 188b590ace0SBen Hutchings page_offset = sizeof(struct efx_rx_page_state); 189874aeea5SJeff Kirsher 190874aeea5SJeff Kirsher split: 191874aeea5SJeff Kirsher index = rx_queue->added_count & rx_queue->ptr_mask; 192874aeea5SJeff Kirsher rx_buf = efx_rx_buffer(rx_queue, index); 193874aeea5SJeff Kirsher rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; 19497d48a10SAlexandre Rames rx_buf->page = page; 195c73e787aSBen Hutchings rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN; 196272baeebSBen Hutchings rx_buf->len = efx->rx_dma_len; 197874aeea5SJeff Kirsher ++rx_queue->added_count; 198874aeea5SJeff Kirsher 199272baeebSBen Hutchings if ((~count & 1) && (efx->rx_dma_len <= EFX_RX_HALF_PAGE)) { 200874aeea5SJeff Kirsher /* Use the second half of the page */ 201874aeea5SJeff Kirsher get_page(page); 202874aeea5SJeff Kirsher dma_addr += (PAGE_SIZE >> 1); 203b590ace0SBen Hutchings page_offset += (PAGE_SIZE >> 1); 204874aeea5SJeff Kirsher ++count; 205874aeea5SJeff Kirsher goto split; 206874aeea5SJeff Kirsher } 207874aeea5SJeff Kirsher } 208874aeea5SJeff Kirsher 209874aeea5SJeff Kirsher return 0; 210874aeea5SJeff Kirsher } 211874aeea5SJeff Kirsher 212*2768935aSDaniel Pieczko /* Unmap a DMA-mapped page. This function is only called for the final RX 213*2768935aSDaniel Pieczko * buffer in a page. 214*2768935aSDaniel Pieczko */ 215874aeea5SJeff Kirsher static void efx_unmap_rx_buffer(struct efx_nic *efx, 216*2768935aSDaniel Pieczko struct efx_rx_buffer *rx_buf) 217874aeea5SJeff Kirsher { 218*2768935aSDaniel Pieczko struct page *page = rx_buf->page; 219874aeea5SJeff Kirsher 220*2768935aSDaniel Pieczko if (page) { 221*2768935aSDaniel Pieczko struct efx_rx_page_state *state = page_address(page); 2220e33d870SBen Hutchings dma_unmap_page(&efx->pci_dev->dev, 223874aeea5SJeff Kirsher state->dma_addr, 224272baeebSBen Hutchings PAGE_SIZE << efx->rx_buffer_order, 2250e33d870SBen Hutchings DMA_FROM_DEVICE); 226874aeea5SJeff Kirsher } 227874aeea5SJeff Kirsher } 228874aeea5SJeff Kirsher 229*2768935aSDaniel Pieczko static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf) 230874aeea5SJeff Kirsher { 23197d48a10SAlexandre Rames if (rx_buf->page) { 232*2768935aSDaniel Pieczko put_page(rx_buf->page); 23397d48a10SAlexandre Rames rx_buf->page = NULL; 234874aeea5SJeff Kirsher } 235874aeea5SJeff Kirsher } 236874aeea5SJeff Kirsher 237*2768935aSDaniel Pieczko /* Attempt to recycle the page if there is an RX recycle ring; the page can 238*2768935aSDaniel Pieczko * only be added if this is the final RX buffer, to prevent pages being used in 239*2768935aSDaniel Pieczko * the descriptor ring and appearing in the recycle ring simultaneously. 240*2768935aSDaniel Pieczko */ 241*2768935aSDaniel Pieczko static void efx_recycle_rx_page(struct efx_channel *channel, 242*2768935aSDaniel Pieczko struct efx_rx_buffer *rx_buf) 243*2768935aSDaniel Pieczko { 244*2768935aSDaniel Pieczko struct page *page = rx_buf->page; 245*2768935aSDaniel Pieczko struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); 246*2768935aSDaniel Pieczko struct efx_nic *efx = rx_queue->efx; 247*2768935aSDaniel Pieczko unsigned index; 248*2768935aSDaniel Pieczko 249*2768935aSDaniel Pieczko /* Only recycle the page after processing the final buffer. */ 250*2768935aSDaniel Pieczko if (!efx_rx_is_last_buffer(efx, rx_buf)) 251*2768935aSDaniel Pieczko return; 252*2768935aSDaniel Pieczko 253*2768935aSDaniel Pieczko index = rx_queue->page_add & rx_queue->page_ptr_mask; 254*2768935aSDaniel Pieczko if (rx_queue->page_ring[index] == NULL) { 255*2768935aSDaniel Pieczko unsigned read_index = rx_queue->page_remove & 256*2768935aSDaniel Pieczko rx_queue->page_ptr_mask; 257*2768935aSDaniel Pieczko 258*2768935aSDaniel Pieczko /* The next slot in the recycle ring is available, but 259*2768935aSDaniel Pieczko * increment page_remove if the read pointer currently 260*2768935aSDaniel Pieczko * points here. 261*2768935aSDaniel Pieczko */ 262*2768935aSDaniel Pieczko if (read_index == index) 263*2768935aSDaniel Pieczko ++rx_queue->page_remove; 264*2768935aSDaniel Pieczko rx_queue->page_ring[index] = page; 265*2768935aSDaniel Pieczko ++rx_queue->page_add; 266*2768935aSDaniel Pieczko return; 267*2768935aSDaniel Pieczko } 268*2768935aSDaniel Pieczko ++rx_queue->page_recycle_full; 269*2768935aSDaniel Pieczko efx_unmap_rx_buffer(efx, rx_buf); 270*2768935aSDaniel Pieczko put_page(rx_buf->page); 271*2768935aSDaniel Pieczko } 272*2768935aSDaniel Pieczko 273874aeea5SJeff Kirsher static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, 274874aeea5SJeff Kirsher struct efx_rx_buffer *rx_buf) 275874aeea5SJeff Kirsher { 276*2768935aSDaniel Pieczko /* Release the page reference we hold for the buffer. */ 277*2768935aSDaniel Pieczko if (rx_buf->page) 278*2768935aSDaniel Pieczko put_page(rx_buf->page); 279*2768935aSDaniel Pieczko 280*2768935aSDaniel Pieczko /* If this is the last buffer in a page, unmap and free it. */ 281*2768935aSDaniel Pieczko if (efx_rx_is_last_buffer(rx_queue->efx, rx_buf)) { 282*2768935aSDaniel Pieczko efx_unmap_rx_buffer(rx_queue->efx, rx_buf); 283*2768935aSDaniel Pieczko efx_free_rx_buffer(rx_buf); 284*2768935aSDaniel Pieczko } 285*2768935aSDaniel Pieczko rx_buf->page = NULL; 286874aeea5SJeff Kirsher } 287874aeea5SJeff Kirsher 288*2768935aSDaniel Pieczko /* Recycle the pages that are used by buffers that have just been received. */ 28985740cdfSBen Hutchings static void efx_recycle_rx_buffers(struct efx_channel *channel, 29085740cdfSBen Hutchings struct efx_rx_buffer *rx_buf, 29185740cdfSBen Hutchings unsigned int n_frags) 292874aeea5SJeff Kirsher { 293874aeea5SJeff Kirsher struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); 294874aeea5SJeff Kirsher 29585740cdfSBen Hutchings do { 296*2768935aSDaniel Pieczko efx_recycle_rx_page(channel, rx_buf); 29785740cdfSBen Hutchings rx_buf = efx_rx_buf_next(rx_queue, rx_buf); 29885740cdfSBen Hutchings } while (--n_frags); 299874aeea5SJeff Kirsher } 300874aeea5SJeff Kirsher 301874aeea5SJeff Kirsher /** 302874aeea5SJeff Kirsher * efx_fast_push_rx_descriptors - push new RX descriptors quickly 303874aeea5SJeff Kirsher * @rx_queue: RX descriptor queue 30449ce9c2cSBen Hutchings * 305874aeea5SJeff Kirsher * This will aim to fill the RX descriptor queue up to 306da9ca505SDavid Riddoch * @rx_queue->@max_fill. If there is insufficient atomic 307874aeea5SJeff Kirsher * memory to do so, a slow fill will be scheduled. 308874aeea5SJeff Kirsher * 309874aeea5SJeff Kirsher * The caller must provide serialisation (none is used here). In practise, 310874aeea5SJeff Kirsher * this means this function must run from the NAPI handler, or be called 311874aeea5SJeff Kirsher * when NAPI is disabled. 312874aeea5SJeff Kirsher */ 313874aeea5SJeff Kirsher void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) 314874aeea5SJeff Kirsher { 315874aeea5SJeff Kirsher unsigned fill_level; 316874aeea5SJeff Kirsher int space, rc = 0; 317874aeea5SJeff Kirsher 318874aeea5SJeff Kirsher /* Calculate current fill level, and exit if we don't need to fill */ 319874aeea5SJeff Kirsher fill_level = (rx_queue->added_count - rx_queue->removed_count); 320874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries); 321874aeea5SJeff Kirsher if (fill_level >= rx_queue->fast_fill_trigger) 322874aeea5SJeff Kirsher goto out; 323874aeea5SJeff Kirsher 324874aeea5SJeff Kirsher /* Record minimum fill level */ 325874aeea5SJeff Kirsher if (unlikely(fill_level < rx_queue->min_fill)) { 326874aeea5SJeff Kirsher if (fill_level) 327874aeea5SJeff Kirsher rx_queue->min_fill = fill_level; 328874aeea5SJeff Kirsher } 329874aeea5SJeff Kirsher 330da9ca505SDavid Riddoch space = rx_queue->max_fill - fill_level; 33164235187SDavid Riddoch EFX_BUG_ON_PARANOID(space < EFX_RX_BATCH); 332874aeea5SJeff Kirsher 333874aeea5SJeff Kirsher netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 334874aeea5SJeff Kirsher "RX queue %d fast-filling descriptor ring from" 33597d48a10SAlexandre Rames " level %d to level %d\n", 336874aeea5SJeff Kirsher efx_rx_queue_index(rx_queue), fill_level, 33797d48a10SAlexandre Rames rx_queue->max_fill); 33897d48a10SAlexandre Rames 339874aeea5SJeff Kirsher 340874aeea5SJeff Kirsher do { 34197d48a10SAlexandre Rames rc = efx_init_rx_buffers(rx_queue); 342874aeea5SJeff Kirsher if (unlikely(rc)) { 343874aeea5SJeff Kirsher /* Ensure that we don't leave the rx queue empty */ 344874aeea5SJeff Kirsher if (rx_queue->added_count == rx_queue->removed_count) 345874aeea5SJeff Kirsher efx_schedule_slow_fill(rx_queue); 346874aeea5SJeff Kirsher goto out; 347874aeea5SJeff Kirsher } 348874aeea5SJeff Kirsher } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); 349874aeea5SJeff Kirsher 350874aeea5SJeff Kirsher netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 351874aeea5SJeff Kirsher "RX queue %d fast-filled descriptor ring " 352874aeea5SJeff Kirsher "to level %d\n", efx_rx_queue_index(rx_queue), 353874aeea5SJeff Kirsher rx_queue->added_count - rx_queue->removed_count); 354874aeea5SJeff Kirsher 355874aeea5SJeff Kirsher out: 356874aeea5SJeff Kirsher if (rx_queue->notified_count != rx_queue->added_count) 357874aeea5SJeff Kirsher efx_nic_notify_rx_desc(rx_queue); 358874aeea5SJeff Kirsher } 359874aeea5SJeff Kirsher 360874aeea5SJeff Kirsher void efx_rx_slow_fill(unsigned long context) 361874aeea5SJeff Kirsher { 362874aeea5SJeff Kirsher struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context; 363874aeea5SJeff Kirsher 364874aeea5SJeff Kirsher /* Post an event to cause NAPI to run and refill the queue */ 3652ae75dacSBen Hutchings efx_nic_generate_fill_event(rx_queue); 366874aeea5SJeff Kirsher ++rx_queue->slow_fill_count; 367874aeea5SJeff Kirsher } 368874aeea5SJeff Kirsher 369874aeea5SJeff Kirsher static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, 370874aeea5SJeff Kirsher struct efx_rx_buffer *rx_buf, 37197d48a10SAlexandre Rames int len) 372874aeea5SJeff Kirsher { 373874aeea5SJeff Kirsher struct efx_nic *efx = rx_queue->efx; 374874aeea5SJeff Kirsher unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; 375874aeea5SJeff Kirsher 376874aeea5SJeff Kirsher if (likely(len <= max_len)) 377874aeea5SJeff Kirsher return; 378874aeea5SJeff Kirsher 379874aeea5SJeff Kirsher /* The packet must be discarded, but this is only a fatal error 380874aeea5SJeff Kirsher * if the caller indicated it was 381874aeea5SJeff Kirsher */ 382db339569SBen Hutchings rx_buf->flags |= EFX_RX_PKT_DISCARD; 383874aeea5SJeff Kirsher 384874aeea5SJeff Kirsher if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { 385874aeea5SJeff Kirsher if (net_ratelimit()) 386874aeea5SJeff Kirsher netif_err(efx, rx_err, efx->net_dev, 387874aeea5SJeff Kirsher " RX queue %d seriously overlength " 388874aeea5SJeff Kirsher "RX event (0x%x > 0x%x+0x%x). Leaking\n", 389874aeea5SJeff Kirsher efx_rx_queue_index(rx_queue), len, max_len, 390874aeea5SJeff Kirsher efx->type->rx_buffer_padding); 391874aeea5SJeff Kirsher efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); 392874aeea5SJeff Kirsher } else { 393874aeea5SJeff Kirsher if (net_ratelimit()) 394874aeea5SJeff Kirsher netif_err(efx, rx_err, efx->net_dev, 395874aeea5SJeff Kirsher " RX queue %d overlength RX event " 396874aeea5SJeff Kirsher "(0x%x > 0x%x)\n", 397874aeea5SJeff Kirsher efx_rx_queue_index(rx_queue), len, max_len); 398874aeea5SJeff Kirsher } 399874aeea5SJeff Kirsher 400874aeea5SJeff Kirsher efx_rx_queue_channel(rx_queue)->n_rx_overlength++; 401874aeea5SJeff Kirsher } 402874aeea5SJeff Kirsher 40361321d92SBen Hutchings /* Pass a received packet up through GRO. GRO can handle pages 40461321d92SBen Hutchings * regardless of checksum state and skbs with a good checksum. 405874aeea5SJeff Kirsher */ 40685740cdfSBen Hutchings static void 40785740cdfSBen Hutchings efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, 40885740cdfSBen Hutchings unsigned int n_frags, u8 *eh) 409874aeea5SJeff Kirsher { 410874aeea5SJeff Kirsher struct napi_struct *napi = &channel->napi_str; 411874aeea5SJeff Kirsher gro_result_t gro_result; 412874aeea5SJeff Kirsher struct efx_nic *efx = channel->efx; 413874aeea5SJeff Kirsher struct sk_buff *skb; 414874aeea5SJeff Kirsher 415874aeea5SJeff Kirsher skb = napi_get_frags(napi); 41685740cdfSBen Hutchings if (unlikely(!skb)) { 41785740cdfSBen Hutchings while (n_frags--) { 41885740cdfSBen Hutchings put_page(rx_buf->page); 41985740cdfSBen Hutchings rx_buf->page = NULL; 42085740cdfSBen Hutchings rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); 42185740cdfSBen Hutchings } 422874aeea5SJeff Kirsher return; 423874aeea5SJeff Kirsher } 424874aeea5SJeff Kirsher 425874aeea5SJeff Kirsher if (efx->net_dev->features & NETIF_F_RXHASH) 426874aeea5SJeff Kirsher skb->rxhash = efx_rx_buf_hash(eh); 427db339569SBen Hutchings skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? 428db339569SBen Hutchings CHECKSUM_UNNECESSARY : CHECKSUM_NONE); 429874aeea5SJeff Kirsher 43085740cdfSBen Hutchings for (;;) { 43185740cdfSBen Hutchings skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 43285740cdfSBen Hutchings rx_buf->page, rx_buf->page_offset, 43385740cdfSBen Hutchings rx_buf->len); 43485740cdfSBen Hutchings rx_buf->page = NULL; 43585740cdfSBen Hutchings skb->len += rx_buf->len; 43685740cdfSBen Hutchings if (skb_shinfo(skb)->nr_frags == n_frags) 43785740cdfSBen Hutchings break; 43885740cdfSBen Hutchings 43985740cdfSBen Hutchings rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); 44085740cdfSBen Hutchings } 44185740cdfSBen Hutchings 44285740cdfSBen Hutchings skb->data_len = skb->len; 44385740cdfSBen Hutchings skb->truesize += n_frags * efx->rx_buffer_truesize; 44485740cdfSBen Hutchings 44579d68b37SStuart Hodgson skb_record_rx_queue(skb, channel->rx_queue.core_index); 446874aeea5SJeff Kirsher 447874aeea5SJeff Kirsher gro_result = napi_gro_frags(napi); 44897d48a10SAlexandre Rames if (gro_result != GRO_DROP) 449874aeea5SJeff Kirsher channel->irq_mod_score += 2; 450874aeea5SJeff Kirsher } 45197d48a10SAlexandre Rames 45285740cdfSBen Hutchings /* Allocate and construct an SKB around page fragments */ 45397d48a10SAlexandre Rames static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel, 45497d48a10SAlexandre Rames struct efx_rx_buffer *rx_buf, 45585740cdfSBen Hutchings unsigned int n_frags, 45697d48a10SAlexandre Rames u8 *eh, int hdr_len) 45797d48a10SAlexandre Rames { 45897d48a10SAlexandre Rames struct efx_nic *efx = channel->efx; 45997d48a10SAlexandre Rames struct sk_buff *skb; 46097d48a10SAlexandre Rames 46197d48a10SAlexandre Rames /* Allocate an SKB to store the headers */ 46297d48a10SAlexandre Rames skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN); 46397d48a10SAlexandre Rames if (unlikely(skb == NULL)) 46497d48a10SAlexandre Rames return NULL; 46597d48a10SAlexandre Rames 46697d48a10SAlexandre Rames EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len); 46797d48a10SAlexandre Rames 46897d48a10SAlexandre Rames skb_reserve(skb, EFX_PAGE_SKB_ALIGN); 46985740cdfSBen Hutchings memcpy(__skb_put(skb, hdr_len), eh, hdr_len); 47097d48a10SAlexandre Rames 47185740cdfSBen Hutchings /* Append the remaining page(s) onto the frag list */ 47297d48a10SAlexandre Rames if (rx_buf->len > hdr_len) { 47385740cdfSBen Hutchings rx_buf->page_offset += hdr_len; 47485740cdfSBen Hutchings rx_buf->len -= hdr_len; 47585740cdfSBen Hutchings 47685740cdfSBen Hutchings for (;;) { 47785740cdfSBen Hutchings skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 47885740cdfSBen Hutchings rx_buf->page, rx_buf->page_offset, 47985740cdfSBen Hutchings rx_buf->len); 48085740cdfSBen Hutchings rx_buf->page = NULL; 48185740cdfSBen Hutchings skb->len += rx_buf->len; 48285740cdfSBen Hutchings skb->data_len += rx_buf->len; 48385740cdfSBen Hutchings if (skb_shinfo(skb)->nr_frags == n_frags) 48485740cdfSBen Hutchings break; 48585740cdfSBen Hutchings 48685740cdfSBen Hutchings rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); 48785740cdfSBen Hutchings } 48897d48a10SAlexandre Rames } else { 48997d48a10SAlexandre Rames __free_pages(rx_buf->page, efx->rx_buffer_order); 49085740cdfSBen Hutchings rx_buf->page = NULL; 49185740cdfSBen Hutchings n_frags = 0; 49297d48a10SAlexandre Rames } 49397d48a10SAlexandre Rames 49485740cdfSBen Hutchings skb->truesize += n_frags * efx->rx_buffer_truesize; 49597d48a10SAlexandre Rames 49697d48a10SAlexandre Rames /* Move past the ethernet header */ 49797d48a10SAlexandre Rames skb->protocol = eth_type_trans(skb, efx->net_dev); 49897d48a10SAlexandre Rames 49997d48a10SAlexandre Rames return skb; 500874aeea5SJeff Kirsher } 501874aeea5SJeff Kirsher 502874aeea5SJeff Kirsher void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 50385740cdfSBen Hutchings unsigned int n_frags, unsigned int len, u16 flags) 504874aeea5SJeff Kirsher { 505874aeea5SJeff Kirsher struct efx_nic *efx = rx_queue->efx; 506874aeea5SJeff Kirsher struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 507874aeea5SJeff Kirsher struct efx_rx_buffer *rx_buf; 508874aeea5SJeff Kirsher 509874aeea5SJeff Kirsher rx_buf = efx_rx_buffer(rx_queue, index); 510*2768935aSDaniel Pieczko rx_buf->flags = flags; 511874aeea5SJeff Kirsher 51285740cdfSBen Hutchings /* Validate the number of fragments and completed length */ 51385740cdfSBen Hutchings if (n_frags == 1) { 51497d48a10SAlexandre Rames efx_rx_packet__check_len(rx_queue, rx_buf, len); 51585740cdfSBen Hutchings } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) || 51685740cdfSBen Hutchings unlikely(len <= (n_frags - 1) * EFX_RX_USR_BUF_SIZE) || 51785740cdfSBen Hutchings unlikely(len > n_frags * EFX_RX_USR_BUF_SIZE) || 51885740cdfSBen Hutchings unlikely(!efx->rx_scatter)) { 51985740cdfSBen Hutchings /* If this isn't an explicit discard request, either 52085740cdfSBen Hutchings * the hardware or the driver is broken. 52185740cdfSBen Hutchings */ 52285740cdfSBen Hutchings WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD)); 52385740cdfSBen Hutchings rx_buf->flags |= EFX_RX_PKT_DISCARD; 52485740cdfSBen Hutchings } 525874aeea5SJeff Kirsher 526874aeea5SJeff Kirsher netif_vdbg(efx, rx_status, efx->net_dev, 52785740cdfSBen Hutchings "RX queue %d received ids %x-%x len %d %s%s\n", 528874aeea5SJeff Kirsher efx_rx_queue_index(rx_queue), index, 52985740cdfSBen Hutchings (index + n_frags - 1) & rx_queue->ptr_mask, len, 530db339569SBen Hutchings (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "", 531db339569SBen Hutchings (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : ""); 532874aeea5SJeff Kirsher 53385740cdfSBen Hutchings /* Discard packet, if instructed to do so. Process the 53485740cdfSBen Hutchings * previous receive first. 53585740cdfSBen Hutchings */ 536db339569SBen Hutchings if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { 53785740cdfSBen Hutchings efx_rx_flush_packet(channel); 538*2768935aSDaniel Pieczko put_page(rx_buf->page); 53985740cdfSBen Hutchings efx_recycle_rx_buffers(channel, rx_buf, n_frags); 54085740cdfSBen Hutchings return; 541874aeea5SJeff Kirsher } 542874aeea5SJeff Kirsher 54385740cdfSBen Hutchings if (n_frags == 1) 54485740cdfSBen Hutchings rx_buf->len = len; 54585740cdfSBen Hutchings 546*2768935aSDaniel Pieczko /* Release and/or sync the DMA mapping - assumes all RX buffers 547*2768935aSDaniel Pieczko * consumed in-order per RX queue. 548874aeea5SJeff Kirsher */ 549*2768935aSDaniel Pieczko efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); 550874aeea5SJeff Kirsher 551874aeea5SJeff Kirsher /* Prefetch nice and early so data will (hopefully) be in cache by 552874aeea5SJeff Kirsher * the time we look at it. 553874aeea5SJeff Kirsher */ 5545036b7c7SBen Hutchings prefetch(efx_rx_buf_va(rx_buf)); 555874aeea5SJeff Kirsher 556b74e3e8cSBen Hutchings rx_buf->page_offset += efx->type->rx_buffer_hash_size; 55785740cdfSBen Hutchings rx_buf->len -= efx->type->rx_buffer_hash_size; 55885740cdfSBen Hutchings 55985740cdfSBen Hutchings if (n_frags > 1) { 56085740cdfSBen Hutchings /* Release/sync DMA mapping for additional fragments. 56185740cdfSBen Hutchings * Fix length for last fragment. 56285740cdfSBen Hutchings */ 56385740cdfSBen Hutchings unsigned int tail_frags = n_frags - 1; 56485740cdfSBen Hutchings 56585740cdfSBen Hutchings for (;;) { 56685740cdfSBen Hutchings rx_buf = efx_rx_buf_next(rx_queue, rx_buf); 56785740cdfSBen Hutchings if (--tail_frags == 0) 56885740cdfSBen Hutchings break; 569*2768935aSDaniel Pieczko efx_sync_rx_buffer(efx, rx_buf, EFX_RX_USR_BUF_SIZE); 57085740cdfSBen Hutchings } 57185740cdfSBen Hutchings rx_buf->len = len - (n_frags - 1) * EFX_RX_USR_BUF_SIZE; 572*2768935aSDaniel Pieczko efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); 57385740cdfSBen Hutchings } 574b74e3e8cSBen Hutchings 575*2768935aSDaniel Pieczko /* All fragments have been DMA-synced, so recycle buffers and pages. */ 576*2768935aSDaniel Pieczko rx_buf = efx_rx_buffer(rx_queue, index); 577*2768935aSDaniel Pieczko efx_recycle_rx_buffers(channel, rx_buf, n_frags); 578*2768935aSDaniel Pieczko 579874aeea5SJeff Kirsher /* Pipeline receives so that we give time for packet headers to be 580874aeea5SJeff Kirsher * prefetched into cache. 581874aeea5SJeff Kirsher */ 582ff734ef4SBen Hutchings efx_rx_flush_packet(channel); 58385740cdfSBen Hutchings channel->rx_pkt_n_frags = n_frags; 58485740cdfSBen Hutchings channel->rx_pkt_index = index; 585874aeea5SJeff Kirsher } 586874aeea5SJeff Kirsher 58797d48a10SAlexandre Rames static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, 58885740cdfSBen Hutchings struct efx_rx_buffer *rx_buf, 58985740cdfSBen Hutchings unsigned int n_frags) 5901ddceb4cSBen Hutchings { 5911ddceb4cSBen Hutchings struct sk_buff *skb; 59297d48a10SAlexandre Rames u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS); 5931ddceb4cSBen Hutchings 59485740cdfSBen Hutchings skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len); 59597d48a10SAlexandre Rames if (unlikely(skb == NULL)) { 596*2768935aSDaniel Pieczko efx_free_rx_buffer(rx_buf); 59797d48a10SAlexandre Rames return; 59897d48a10SAlexandre Rames } 59997d48a10SAlexandre Rames skb_record_rx_queue(skb, channel->rx_queue.core_index); 6001ddceb4cSBen Hutchings 6011ddceb4cSBen Hutchings /* Set the SKB flags */ 6021ddceb4cSBen Hutchings skb_checksum_none_assert(skb); 6031ddceb4cSBen Hutchings 604c31e5f9fSStuart Hodgson if (channel->type->receive_skb) 6054a74dc65SBen Hutchings if (channel->type->receive_skb(channel, skb)) 60697d48a10SAlexandre Rames return; 6074a74dc65SBen Hutchings 6084a74dc65SBen Hutchings /* Pass the packet up */ 6091ddceb4cSBen Hutchings netif_receive_skb(skb); 6101ddceb4cSBen Hutchings } 6111ddceb4cSBen Hutchings 612874aeea5SJeff Kirsher /* Handle a received packet. Second half: Touches packet payload. */ 61385740cdfSBen Hutchings void __efx_rx_packet(struct efx_channel *channel) 614874aeea5SJeff Kirsher { 615874aeea5SJeff Kirsher struct efx_nic *efx = channel->efx; 61685740cdfSBen Hutchings struct efx_rx_buffer *rx_buf = 61785740cdfSBen Hutchings efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index); 618b74e3e8cSBen Hutchings u8 *eh = efx_rx_buf_va(rx_buf); 619874aeea5SJeff Kirsher 620874aeea5SJeff Kirsher /* If we're in loopback test, then pass the packet directly to the 621874aeea5SJeff Kirsher * loopback layer, and free the rx_buf here 622874aeea5SJeff Kirsher */ 623874aeea5SJeff Kirsher if (unlikely(efx->loopback_selftest)) { 624874aeea5SJeff Kirsher efx_loopback_rx_packet(efx, eh, rx_buf->len); 625*2768935aSDaniel Pieczko efx_free_rx_buffer(rx_buf); 62685740cdfSBen Hutchings goto out; 627874aeea5SJeff Kirsher } 628874aeea5SJeff Kirsher 629874aeea5SJeff Kirsher if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) 630db339569SBen Hutchings rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; 631874aeea5SJeff Kirsher 63297d48a10SAlexandre Rames if (!channel->type->receive_skb) 63385740cdfSBen Hutchings efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh); 6341ddceb4cSBen Hutchings else 63585740cdfSBen Hutchings efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); 63685740cdfSBen Hutchings out: 63785740cdfSBen Hutchings channel->rx_pkt_n_frags = 0; 638874aeea5SJeff Kirsher } 639874aeea5SJeff Kirsher 640874aeea5SJeff Kirsher int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) 641874aeea5SJeff Kirsher { 642874aeea5SJeff Kirsher struct efx_nic *efx = rx_queue->efx; 643874aeea5SJeff Kirsher unsigned int entries; 644874aeea5SJeff Kirsher int rc; 645874aeea5SJeff Kirsher 646874aeea5SJeff Kirsher /* Create the smallest power-of-two aligned ring */ 647874aeea5SJeff Kirsher entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE); 648874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); 649874aeea5SJeff Kirsher rx_queue->ptr_mask = entries - 1; 650874aeea5SJeff Kirsher 651874aeea5SJeff Kirsher netif_dbg(efx, probe, efx->net_dev, 652874aeea5SJeff Kirsher "creating RX queue %d size %#x mask %#x\n", 653874aeea5SJeff Kirsher efx_rx_queue_index(rx_queue), efx->rxq_entries, 654874aeea5SJeff Kirsher rx_queue->ptr_mask); 655874aeea5SJeff Kirsher 656874aeea5SJeff Kirsher /* Allocate RX buffers */ 657c2e4e25aSThomas Meyer rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer), 658874aeea5SJeff Kirsher GFP_KERNEL); 659874aeea5SJeff Kirsher if (!rx_queue->buffer) 660874aeea5SJeff Kirsher return -ENOMEM; 661874aeea5SJeff Kirsher 662874aeea5SJeff Kirsher rc = efx_nic_probe_rx(rx_queue); 663874aeea5SJeff Kirsher if (rc) { 664874aeea5SJeff Kirsher kfree(rx_queue->buffer); 665874aeea5SJeff Kirsher rx_queue->buffer = NULL; 666874aeea5SJeff Kirsher } 667*2768935aSDaniel Pieczko 668874aeea5SJeff Kirsher return rc; 669874aeea5SJeff Kirsher } 670874aeea5SJeff Kirsher 671*2768935aSDaniel Pieczko void efx_init_rx_recycle_ring(struct efx_nic *efx, 672*2768935aSDaniel Pieczko struct efx_rx_queue *rx_queue) 673*2768935aSDaniel Pieczko { 674*2768935aSDaniel Pieczko unsigned int bufs_in_recycle_ring, page_ring_size; 675*2768935aSDaniel Pieczko 676*2768935aSDaniel Pieczko /* Set the RX recycle ring size */ 677*2768935aSDaniel Pieczko #ifdef CONFIG_PPC64 678*2768935aSDaniel Pieczko bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU; 679*2768935aSDaniel Pieczko #else 680*2768935aSDaniel Pieczko if (efx->pci_dev->dev.iommu_group) 681*2768935aSDaniel Pieczko bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU; 682*2768935aSDaniel Pieczko else 683*2768935aSDaniel Pieczko bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU; 684*2768935aSDaniel Pieczko #endif /* CONFIG_PPC64 */ 685*2768935aSDaniel Pieczko 686*2768935aSDaniel Pieczko page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring / 687*2768935aSDaniel Pieczko efx->rx_bufs_per_page); 688*2768935aSDaniel Pieczko rx_queue->page_ring = kcalloc(page_ring_size, 689*2768935aSDaniel Pieczko sizeof(*rx_queue->page_ring), GFP_KERNEL); 690*2768935aSDaniel Pieczko rx_queue->page_ptr_mask = page_ring_size - 1; 691*2768935aSDaniel Pieczko } 692*2768935aSDaniel Pieczko 693874aeea5SJeff Kirsher void efx_init_rx_queue(struct efx_rx_queue *rx_queue) 694874aeea5SJeff Kirsher { 695874aeea5SJeff Kirsher struct efx_nic *efx = rx_queue->efx; 69664235187SDavid Riddoch unsigned int max_fill, trigger, max_trigger; 697874aeea5SJeff Kirsher 698874aeea5SJeff Kirsher netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 699874aeea5SJeff Kirsher "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); 700874aeea5SJeff Kirsher 701874aeea5SJeff Kirsher /* Initialise ptr fields */ 702874aeea5SJeff Kirsher rx_queue->added_count = 0; 703874aeea5SJeff Kirsher rx_queue->notified_count = 0; 704874aeea5SJeff Kirsher rx_queue->removed_count = 0; 705874aeea5SJeff Kirsher rx_queue->min_fill = -1U; 706*2768935aSDaniel Pieczko efx_init_rx_recycle_ring(efx, rx_queue); 707*2768935aSDaniel Pieczko 708*2768935aSDaniel Pieczko rx_queue->page_remove = 0; 709*2768935aSDaniel Pieczko rx_queue->page_add = rx_queue->page_ptr_mask + 1; 710*2768935aSDaniel Pieczko rx_queue->page_recycle_count = 0; 711*2768935aSDaniel Pieczko rx_queue->page_recycle_failed = 0; 712*2768935aSDaniel Pieczko rx_queue->page_recycle_full = 0; 713874aeea5SJeff Kirsher 714874aeea5SJeff Kirsher /* Initialise limit fields */ 715874aeea5SJeff Kirsher max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; 71664235187SDavid Riddoch max_trigger = max_fill - EFX_RX_BATCH; 71764235187SDavid Riddoch if (rx_refill_threshold != 0) { 718874aeea5SJeff Kirsher trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; 71964235187SDavid Riddoch if (trigger > max_trigger) 72064235187SDavid Riddoch trigger = max_trigger; 72164235187SDavid Riddoch } else { 72264235187SDavid Riddoch trigger = max_trigger; 72364235187SDavid Riddoch } 724874aeea5SJeff Kirsher 725874aeea5SJeff Kirsher rx_queue->max_fill = max_fill; 726874aeea5SJeff Kirsher rx_queue->fast_fill_trigger = trigger; 727874aeea5SJeff Kirsher 728874aeea5SJeff Kirsher /* Set up RX descriptor ring */ 7299f2cb71cSBen Hutchings rx_queue->enabled = true; 730874aeea5SJeff Kirsher efx_nic_init_rx(rx_queue); 731874aeea5SJeff Kirsher } 732874aeea5SJeff Kirsher 733874aeea5SJeff Kirsher void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) 734874aeea5SJeff Kirsher { 735874aeea5SJeff Kirsher int i; 736*2768935aSDaniel Pieczko struct efx_nic *efx = rx_queue->efx; 737874aeea5SJeff Kirsher struct efx_rx_buffer *rx_buf; 738874aeea5SJeff Kirsher 739874aeea5SJeff Kirsher netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 740874aeea5SJeff Kirsher "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); 741874aeea5SJeff Kirsher 7429f2cb71cSBen Hutchings /* A flush failure might have left rx_queue->enabled */ 7439f2cb71cSBen Hutchings rx_queue->enabled = false; 7449f2cb71cSBen Hutchings 745874aeea5SJeff Kirsher del_timer_sync(&rx_queue->slow_fill); 746874aeea5SJeff Kirsher efx_nic_fini_rx(rx_queue); 747874aeea5SJeff Kirsher 748*2768935aSDaniel Pieczko /* Release RX buffers from the current read ptr to the write ptr */ 749874aeea5SJeff Kirsher if (rx_queue->buffer) { 750*2768935aSDaniel Pieczko for (i = rx_queue->removed_count; i < rx_queue->added_count; 751*2768935aSDaniel Pieczko i++) { 752*2768935aSDaniel Pieczko unsigned index = i & rx_queue->ptr_mask; 753*2768935aSDaniel Pieczko rx_buf = efx_rx_buffer(rx_queue, index); 754874aeea5SJeff Kirsher efx_fini_rx_buffer(rx_queue, rx_buf); 755874aeea5SJeff Kirsher } 756874aeea5SJeff Kirsher } 757*2768935aSDaniel Pieczko 758*2768935aSDaniel Pieczko /* Unmap and release the pages in the recycle ring. Remove the ring. */ 759*2768935aSDaniel Pieczko for (i = 0; i <= rx_queue->page_ptr_mask; i++) { 760*2768935aSDaniel Pieczko struct page *page = rx_queue->page_ring[i]; 761*2768935aSDaniel Pieczko struct efx_rx_page_state *state; 762*2768935aSDaniel Pieczko 763*2768935aSDaniel Pieczko if (page == NULL) 764*2768935aSDaniel Pieczko continue; 765*2768935aSDaniel Pieczko 766*2768935aSDaniel Pieczko state = page_address(page); 767*2768935aSDaniel Pieczko dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, 768*2768935aSDaniel Pieczko PAGE_SIZE << efx->rx_buffer_order, 769*2768935aSDaniel Pieczko DMA_FROM_DEVICE); 770*2768935aSDaniel Pieczko put_page(page); 771*2768935aSDaniel Pieczko } 772*2768935aSDaniel Pieczko kfree(rx_queue->page_ring); 773*2768935aSDaniel Pieczko rx_queue->page_ring = NULL; 774874aeea5SJeff Kirsher } 775874aeea5SJeff Kirsher 776874aeea5SJeff Kirsher void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) 777874aeea5SJeff Kirsher { 778874aeea5SJeff Kirsher netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 779874aeea5SJeff Kirsher "destroying RX queue %d\n", efx_rx_queue_index(rx_queue)); 780874aeea5SJeff Kirsher 781874aeea5SJeff Kirsher efx_nic_remove_rx(rx_queue); 782874aeea5SJeff Kirsher 783874aeea5SJeff Kirsher kfree(rx_queue->buffer); 784874aeea5SJeff Kirsher rx_queue->buffer = NULL; 785874aeea5SJeff Kirsher } 786874aeea5SJeff Kirsher 787874aeea5SJeff Kirsher 788874aeea5SJeff Kirsher module_param(rx_refill_threshold, uint, 0444); 789874aeea5SJeff Kirsher MODULE_PARM_DESC(rx_refill_threshold, 79064235187SDavid Riddoch "RX descriptor ring refill threshold (%)"); 791874aeea5SJeff Kirsher 792