11ccea77eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 29aa32835SJeff Kirsher /* 39aa32835SJeff Kirsher * IBM Power Virtual Ethernet Device Driver 49aa32835SJeff Kirsher * 59aa32835SJeff Kirsher * Copyright (C) IBM Corporation, 2003, 2010 69aa32835SJeff Kirsher * 79aa32835SJeff Kirsher * Authors: Dave Larson <larson1@us.ibm.com> 89aa32835SJeff Kirsher * Santiago Leon <santil@linux.vnet.ibm.com> 99aa32835SJeff Kirsher * Brian King <brking@linux.vnet.ibm.com> 109aa32835SJeff Kirsher * Robert Jennings <rcj@linux.vnet.ibm.com> 119aa32835SJeff Kirsher * Anton Blanchard <anton@au.ibm.com> 129aa32835SJeff Kirsher */ 139aa32835SJeff Kirsher 149aa32835SJeff Kirsher #include <linux/module.h> 159aa32835SJeff Kirsher #include <linux/types.h> 169aa32835SJeff Kirsher #include <linux/errno.h> 179aa32835SJeff Kirsher #include <linux/dma-mapping.h> 189aa32835SJeff Kirsher #include <linux/kernel.h> 199aa32835SJeff Kirsher #include <linux/netdevice.h> 209aa32835SJeff Kirsher #include <linux/etherdevice.h> 219aa32835SJeff Kirsher #include <linux/skbuff.h> 229aa32835SJeff Kirsher #include <linux/init.h> 239aa32835SJeff Kirsher #include <linux/interrupt.h> 249aa32835SJeff Kirsher #include <linux/mm.h> 259aa32835SJeff Kirsher #include <linux/pm.h> 269aa32835SJeff Kirsher #include <linux/ethtool.h> 279aa32835SJeff Kirsher #include <linux/in.h> 289aa32835SJeff Kirsher #include <linux/ip.h> 299aa32835SJeff Kirsher #include <linux/ipv6.h> 309aa32835SJeff Kirsher #include <linux/slab.h> 319aa32835SJeff Kirsher #include <asm/hvcall.h> 329aa32835SJeff Kirsher #include <linux/atomic.h> 339aa32835SJeff Kirsher #include <asm/vio.h> 349aa32835SJeff Kirsher #include <asm/iommu.h> 359aa32835SJeff Kirsher #include <asm/firmware.h> 3666aa0678SSivakumar Krishnasamy #include <net/tcp.h> 3766aa0678SSivakumar Krishnasamy #include <net/ip6_checksum.h> 389aa32835SJeff Kirsher 399aa32835SJeff Kirsher #include "ibmveth.h" 409aa32835SJeff Kirsher 419aa32835SJeff Kirsher static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); 429aa32835SJeff Kirsher static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); 439aa32835SJeff Kirsher static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev); 449aa32835SJeff Kirsher 459aa32835SJeff Kirsher static struct kobj_type ktype_veth_pool; 469aa32835SJeff Kirsher 479aa32835SJeff Kirsher 489aa32835SJeff Kirsher static const char ibmveth_driver_name[] = "ibmveth"; 499aa32835SJeff Kirsher static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver"; 507b596738SThomas Falcon #define ibmveth_driver_version "1.06" 519aa32835SJeff Kirsher 529aa32835SJeff Kirsher MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>"); 539aa32835SJeff Kirsher MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver"); 549aa32835SJeff Kirsher MODULE_LICENSE("GPL"); 559aa32835SJeff Kirsher MODULE_VERSION(ibmveth_driver_version); 569aa32835SJeff Kirsher 579aa32835SJeff Kirsher static unsigned int tx_copybreak __read_mostly = 128; 589aa32835SJeff Kirsher module_param(tx_copybreak, uint, 0644); 599aa32835SJeff Kirsher MODULE_PARM_DESC(tx_copybreak, 609aa32835SJeff Kirsher "Maximum size of packet that is copied to a new buffer on transmit"); 619aa32835SJeff Kirsher 629aa32835SJeff Kirsher static unsigned int rx_copybreak __read_mostly = 128; 639aa32835SJeff Kirsher module_param(rx_copybreak, uint, 0644); 649aa32835SJeff Kirsher MODULE_PARM_DESC(rx_copybreak, 659aa32835SJeff Kirsher "Maximum size of packet that is copied to a new buffer on receive"); 669aa32835SJeff Kirsher 679aa32835SJeff Kirsher static unsigned int rx_flush __read_mostly = 0; 689aa32835SJeff Kirsher module_param(rx_flush, uint, 0644); 699aa32835SJeff Kirsher MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use"); 709aa32835SJeff Kirsher 7107e6a97dSThomas Falcon static bool old_large_send __read_mostly; 72d3757ba4SJoe Perches module_param(old_large_send, bool, 0444); 7307e6a97dSThomas Falcon MODULE_PARM_DESC(old_large_send, 7407e6a97dSThomas Falcon "Use old large send method on firmware that supports the new method"); 7507e6a97dSThomas Falcon 769aa32835SJeff Kirsher struct ibmveth_stat { 779aa32835SJeff Kirsher char name[ETH_GSTRING_LEN]; 789aa32835SJeff Kirsher int offset; 799aa32835SJeff Kirsher }; 809aa32835SJeff Kirsher 819aa32835SJeff Kirsher #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat) 829aa32835SJeff Kirsher #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off)) 839aa32835SJeff Kirsher 84a0cfa79fSYueHaibing static struct ibmveth_stat ibmveth_stats[] = { 859aa32835SJeff Kirsher { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) }, 869aa32835SJeff Kirsher { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) }, 879aa32835SJeff Kirsher { "replenish_add_buff_failure", 889aa32835SJeff Kirsher IBMVETH_STAT_OFF(replenish_add_buff_failure) }, 899aa32835SJeff Kirsher { "replenish_add_buff_success", 909aa32835SJeff Kirsher IBMVETH_STAT_OFF(replenish_add_buff_success) }, 919aa32835SJeff Kirsher { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) }, 929aa32835SJeff Kirsher { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) }, 939aa32835SJeff Kirsher { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) }, 949aa32835SJeff Kirsher { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) }, 959aa32835SJeff Kirsher { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) }, 969aa32835SJeff Kirsher { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) }, 978641dd85SThomas Falcon { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) }, 9807e6a97dSThomas Falcon { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) }, 9907e6a97dSThomas Falcon { "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) } 1009aa32835SJeff Kirsher }; 1019aa32835SJeff Kirsher 1029aa32835SJeff Kirsher /* simple methods of getting data from the current rxq entry */ 1039aa32835SJeff Kirsher static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter) 1049aa32835SJeff Kirsher { 1050b536be7SAnton Blanchard return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off); 1069aa32835SJeff Kirsher } 1079aa32835SJeff Kirsher 1089aa32835SJeff Kirsher static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter) 1099aa32835SJeff Kirsher { 1109aa32835SJeff Kirsher return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >> 1119aa32835SJeff Kirsher IBMVETH_RXQ_TOGGLE_SHIFT; 1129aa32835SJeff Kirsher } 1139aa32835SJeff Kirsher 1149aa32835SJeff Kirsher static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter) 1159aa32835SJeff Kirsher { 1169aa32835SJeff Kirsher return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle; 1179aa32835SJeff Kirsher } 1189aa32835SJeff Kirsher 1199aa32835SJeff Kirsher static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter) 1209aa32835SJeff Kirsher { 1219aa32835SJeff Kirsher return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID; 1229aa32835SJeff Kirsher } 1239aa32835SJeff Kirsher 1249aa32835SJeff Kirsher static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter) 1259aa32835SJeff Kirsher { 1269aa32835SJeff Kirsher return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK; 1279aa32835SJeff Kirsher } 1289aa32835SJeff Kirsher 1297b596738SThomas Falcon static inline int ibmveth_rxq_large_packet(struct ibmveth_adapter *adapter) 1307b596738SThomas Falcon { 1317b596738SThomas Falcon return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_LRG_PKT; 1327b596738SThomas Falcon } 1337b596738SThomas Falcon 1349aa32835SJeff Kirsher static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) 1359aa32835SJeff Kirsher { 1360b536be7SAnton Blanchard return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length); 1379aa32835SJeff Kirsher } 1389aa32835SJeff Kirsher 1399aa32835SJeff Kirsher static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter) 1409aa32835SJeff Kirsher { 1419aa32835SJeff Kirsher return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD; 1429aa32835SJeff Kirsher } 1439aa32835SJeff Kirsher 1449aa32835SJeff Kirsher /* setup the initial settings for a buffer pool */ 1459aa32835SJeff Kirsher static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, 1469aa32835SJeff Kirsher u32 pool_index, u32 pool_size, 1479aa32835SJeff Kirsher u32 buff_size, u32 pool_active) 1489aa32835SJeff Kirsher { 1499aa32835SJeff Kirsher pool->size = pool_size; 1509aa32835SJeff Kirsher pool->index = pool_index; 1519aa32835SJeff Kirsher pool->buff_size = buff_size; 1529aa32835SJeff Kirsher pool->threshold = pool_size * 7 / 8; 1539aa32835SJeff Kirsher pool->active = pool_active; 1549aa32835SJeff Kirsher } 1559aa32835SJeff Kirsher 1569aa32835SJeff Kirsher /* allocate and setup an buffer pool - called during open */ 1579aa32835SJeff Kirsher static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) 1589aa32835SJeff Kirsher { 1599aa32835SJeff Kirsher int i; 1609aa32835SJeff Kirsher 1616da2ec56SKees Cook pool->free_map = kmalloc_array(pool->size, sizeof(u16), GFP_KERNEL); 1629aa32835SJeff Kirsher 1639aa32835SJeff Kirsher if (!pool->free_map) 1649aa32835SJeff Kirsher return -1; 1659aa32835SJeff Kirsher 166076ef440SNicholas Mc Guire pool->dma_addr = kcalloc(pool->size, sizeof(dma_addr_t), GFP_KERNEL); 1679aa32835SJeff Kirsher if (!pool->dma_addr) { 1689aa32835SJeff Kirsher kfree(pool->free_map); 1699aa32835SJeff Kirsher pool->free_map = NULL; 1709aa32835SJeff Kirsher return -1; 1719aa32835SJeff Kirsher } 1729aa32835SJeff Kirsher 1739aa32835SJeff Kirsher pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL); 1749aa32835SJeff Kirsher 1759aa32835SJeff Kirsher if (!pool->skbuff) { 1769aa32835SJeff Kirsher kfree(pool->dma_addr); 1779aa32835SJeff Kirsher pool->dma_addr = NULL; 1789aa32835SJeff Kirsher 1799aa32835SJeff Kirsher kfree(pool->free_map); 1809aa32835SJeff Kirsher pool->free_map = NULL; 1819aa32835SJeff Kirsher return -1; 1829aa32835SJeff Kirsher } 1839aa32835SJeff Kirsher 1849aa32835SJeff Kirsher for (i = 0; i < pool->size; ++i) 1859aa32835SJeff Kirsher pool->free_map[i] = i; 1869aa32835SJeff Kirsher 1879aa32835SJeff Kirsher atomic_set(&pool->available, 0); 1889aa32835SJeff Kirsher pool->producer_index = 0; 1899aa32835SJeff Kirsher pool->consumer_index = 0; 1909aa32835SJeff Kirsher 1919aa32835SJeff Kirsher return 0; 1929aa32835SJeff Kirsher } 1939aa32835SJeff Kirsher 1949aa32835SJeff Kirsher static inline void ibmveth_flush_buffer(void *addr, unsigned long length) 1959aa32835SJeff Kirsher { 1969aa32835SJeff Kirsher unsigned long offset; 1979aa32835SJeff Kirsher 1989aa32835SJeff Kirsher for (offset = 0; offset < length; offset += SMP_CACHE_BYTES) 1999aa32835SJeff Kirsher asm("dcbfl %0,%1" :: "b" (addr), "r" (offset)); 2009aa32835SJeff Kirsher } 2019aa32835SJeff Kirsher 2029aa32835SJeff Kirsher /* replenish the buffers for a pool. note that we don't need to 2039aa32835SJeff Kirsher * skb_reserve these since they are used for incoming... 2049aa32835SJeff Kirsher */ 2059aa32835SJeff Kirsher static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, 2069aa32835SJeff Kirsher struct ibmveth_buff_pool *pool) 2079aa32835SJeff Kirsher { 2089aa32835SJeff Kirsher u32 i; 2099aa32835SJeff Kirsher u32 count = pool->size - atomic_read(&pool->available); 2109aa32835SJeff Kirsher u32 buffers_added = 0; 2119aa32835SJeff Kirsher struct sk_buff *skb; 2129aa32835SJeff Kirsher unsigned int free_index, index; 2139aa32835SJeff Kirsher u64 correlator; 2149aa32835SJeff Kirsher unsigned long lpar_rc; 2159aa32835SJeff Kirsher dma_addr_t dma_addr; 2169aa32835SJeff Kirsher 2179aa32835SJeff Kirsher mb(); 2189aa32835SJeff Kirsher 2199aa32835SJeff Kirsher for (i = 0; i < count; ++i) { 2209aa32835SJeff Kirsher union ibmveth_buf_desc desc; 2219aa32835SJeff Kirsher 2229aa32835SJeff Kirsher skb = netdev_alloc_skb(adapter->netdev, pool->buff_size); 2239aa32835SJeff Kirsher 2249aa32835SJeff Kirsher if (!skb) { 2259aa32835SJeff Kirsher netdev_dbg(adapter->netdev, 2269aa32835SJeff Kirsher "replenish: unable to allocate skb\n"); 2279aa32835SJeff Kirsher adapter->replenish_no_mem++; 2289aa32835SJeff Kirsher break; 2299aa32835SJeff Kirsher } 2309aa32835SJeff Kirsher 2319aa32835SJeff Kirsher free_index = pool->consumer_index; 2329aa32835SJeff Kirsher pool->consumer_index++; 2339aa32835SJeff Kirsher if (pool->consumer_index >= pool->size) 2349aa32835SJeff Kirsher pool->consumer_index = 0; 2359aa32835SJeff Kirsher index = pool->free_map[free_index]; 2369aa32835SJeff Kirsher 2379aa32835SJeff Kirsher BUG_ON(index == IBM_VETH_INVALID_MAP); 2389aa32835SJeff Kirsher BUG_ON(pool->skbuff[index] != NULL); 2399aa32835SJeff Kirsher 2409aa32835SJeff Kirsher dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 2419aa32835SJeff Kirsher pool->buff_size, DMA_FROM_DEVICE); 2429aa32835SJeff Kirsher 2439aa32835SJeff Kirsher if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) 2449aa32835SJeff Kirsher goto failure; 2459aa32835SJeff Kirsher 2469aa32835SJeff Kirsher pool->free_map[free_index] = IBM_VETH_INVALID_MAP; 2479aa32835SJeff Kirsher pool->dma_addr[index] = dma_addr; 2489aa32835SJeff Kirsher pool->skbuff[index] = skb; 2499aa32835SJeff Kirsher 2509aa32835SJeff Kirsher correlator = ((u64)pool->index << 32) | index; 2519aa32835SJeff Kirsher *(u64 *)skb->data = correlator; 2529aa32835SJeff Kirsher 2539aa32835SJeff Kirsher desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size; 2549aa32835SJeff Kirsher desc.fields.address = dma_addr; 2559aa32835SJeff Kirsher 2569aa32835SJeff Kirsher if (rx_flush) { 2579aa32835SJeff Kirsher unsigned int len = min(pool->buff_size, 2589aa32835SJeff Kirsher adapter->netdev->mtu + 2599aa32835SJeff Kirsher IBMVETH_BUFF_OH); 2609aa32835SJeff Kirsher ibmveth_flush_buffer(skb->data, len); 2619aa32835SJeff Kirsher } 2629aa32835SJeff Kirsher lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, 2639aa32835SJeff Kirsher desc.desc); 2649aa32835SJeff Kirsher 2659aa32835SJeff Kirsher if (lpar_rc != H_SUCCESS) { 2669aa32835SJeff Kirsher goto failure; 2679aa32835SJeff Kirsher } else { 2689aa32835SJeff Kirsher buffers_added++; 2699aa32835SJeff Kirsher adapter->replenish_add_buff_success++; 2709aa32835SJeff Kirsher } 2719aa32835SJeff Kirsher } 2729aa32835SJeff Kirsher 2739aa32835SJeff Kirsher mb(); 2749aa32835SJeff Kirsher atomic_add(buffers_added, &(pool->available)); 2759aa32835SJeff Kirsher return; 2769aa32835SJeff Kirsher 2779aa32835SJeff Kirsher failure: 2789aa32835SJeff Kirsher pool->free_map[free_index] = index; 2799aa32835SJeff Kirsher pool->skbuff[index] = NULL; 2809aa32835SJeff Kirsher if (pool->consumer_index == 0) 2819aa32835SJeff Kirsher pool->consumer_index = pool->size - 1; 2829aa32835SJeff Kirsher else 2839aa32835SJeff Kirsher pool->consumer_index--; 2849aa32835SJeff Kirsher if (!dma_mapping_error(&adapter->vdev->dev, dma_addr)) 2859aa32835SJeff Kirsher dma_unmap_single(&adapter->vdev->dev, 2869aa32835SJeff Kirsher pool->dma_addr[index], pool->buff_size, 2879aa32835SJeff Kirsher DMA_FROM_DEVICE); 2889aa32835SJeff Kirsher dev_kfree_skb_any(skb); 2899aa32835SJeff Kirsher adapter->replenish_add_buff_failure++; 2909aa32835SJeff Kirsher 2919aa32835SJeff Kirsher mb(); 2929aa32835SJeff Kirsher atomic_add(buffers_added, &(pool->available)); 2939aa32835SJeff Kirsher } 2949aa32835SJeff Kirsher 295cbd52281SAnton Blanchard /* 296cbd52281SAnton Blanchard * The final 8 bytes of the buffer list is a counter of frames dropped 297cbd52281SAnton Blanchard * because there was not a buffer in the buffer list capable of holding 298cbd52281SAnton Blanchard * the frame. 299cbd52281SAnton Blanchard */ 300cbd52281SAnton Blanchard static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter) 301cbd52281SAnton Blanchard { 302cbd52281SAnton Blanchard __be64 *p = adapter->buffer_list_addr + 4096 - 8; 303cbd52281SAnton Blanchard 304cbd52281SAnton Blanchard adapter->rx_no_buffer = be64_to_cpup(p); 305cbd52281SAnton Blanchard } 306cbd52281SAnton Blanchard 3079aa32835SJeff Kirsher /* replenish routine */ 3089aa32835SJeff Kirsher static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) 3099aa32835SJeff Kirsher { 3109aa32835SJeff Kirsher int i; 3119aa32835SJeff Kirsher 3129aa32835SJeff Kirsher adapter->replenish_task_cycles++; 3139aa32835SJeff Kirsher 3149aa32835SJeff Kirsher for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) { 3159aa32835SJeff Kirsher struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i]; 3169aa32835SJeff Kirsher 3179aa32835SJeff Kirsher if (pool->active && 3189aa32835SJeff Kirsher (atomic_read(&pool->available) < pool->threshold)) 3199aa32835SJeff Kirsher ibmveth_replenish_buffer_pool(adapter, pool); 3209aa32835SJeff Kirsher } 3219aa32835SJeff Kirsher 322cbd52281SAnton Blanchard ibmveth_update_rx_no_buffer(adapter); 3239aa32835SJeff Kirsher } 3249aa32835SJeff Kirsher 3259aa32835SJeff Kirsher /* empty and free ana buffer pool - also used to do cleanup in error paths */ 3269aa32835SJeff Kirsher static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, 3279aa32835SJeff Kirsher struct ibmveth_buff_pool *pool) 3289aa32835SJeff Kirsher { 3299aa32835SJeff Kirsher int i; 3309aa32835SJeff Kirsher 3319aa32835SJeff Kirsher kfree(pool->free_map); 3329aa32835SJeff Kirsher pool->free_map = NULL; 3339aa32835SJeff Kirsher 3349aa32835SJeff Kirsher if (pool->skbuff && pool->dma_addr) { 3359aa32835SJeff Kirsher for (i = 0; i < pool->size; ++i) { 3369aa32835SJeff Kirsher struct sk_buff *skb = pool->skbuff[i]; 3379aa32835SJeff Kirsher if (skb) { 3389aa32835SJeff Kirsher dma_unmap_single(&adapter->vdev->dev, 3399aa32835SJeff Kirsher pool->dma_addr[i], 3409aa32835SJeff Kirsher pool->buff_size, 3419aa32835SJeff Kirsher DMA_FROM_DEVICE); 3429aa32835SJeff Kirsher dev_kfree_skb_any(skb); 3439aa32835SJeff Kirsher pool->skbuff[i] = NULL; 3449aa32835SJeff Kirsher } 3459aa32835SJeff Kirsher } 3469aa32835SJeff Kirsher } 3479aa32835SJeff Kirsher 3489aa32835SJeff Kirsher if (pool->dma_addr) { 3499aa32835SJeff Kirsher kfree(pool->dma_addr); 3509aa32835SJeff Kirsher pool->dma_addr = NULL; 3519aa32835SJeff Kirsher } 3529aa32835SJeff Kirsher 3539aa32835SJeff Kirsher if (pool->skbuff) { 3549aa32835SJeff Kirsher kfree(pool->skbuff); 3559aa32835SJeff Kirsher pool->skbuff = NULL; 3569aa32835SJeff Kirsher } 3579aa32835SJeff Kirsher } 3589aa32835SJeff Kirsher 3599aa32835SJeff Kirsher /* remove a buffer from a pool */ 3609aa32835SJeff Kirsher static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, 3619aa32835SJeff Kirsher u64 correlator) 3629aa32835SJeff Kirsher { 3639aa32835SJeff Kirsher unsigned int pool = correlator >> 32; 3649aa32835SJeff Kirsher unsigned int index = correlator & 0xffffffffUL; 3659aa32835SJeff Kirsher unsigned int free_index; 3669aa32835SJeff Kirsher struct sk_buff *skb; 3679aa32835SJeff Kirsher 3689aa32835SJeff Kirsher BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); 3699aa32835SJeff Kirsher BUG_ON(index >= adapter->rx_buff_pool[pool].size); 3709aa32835SJeff Kirsher 3719aa32835SJeff Kirsher skb = adapter->rx_buff_pool[pool].skbuff[index]; 3729aa32835SJeff Kirsher 3739aa32835SJeff Kirsher BUG_ON(skb == NULL); 3749aa32835SJeff Kirsher 3759aa32835SJeff Kirsher adapter->rx_buff_pool[pool].skbuff[index] = NULL; 3769aa32835SJeff Kirsher 3779aa32835SJeff Kirsher dma_unmap_single(&adapter->vdev->dev, 3789aa32835SJeff Kirsher adapter->rx_buff_pool[pool].dma_addr[index], 3799aa32835SJeff Kirsher adapter->rx_buff_pool[pool].buff_size, 3809aa32835SJeff Kirsher DMA_FROM_DEVICE); 3819aa32835SJeff Kirsher 3829aa32835SJeff Kirsher free_index = adapter->rx_buff_pool[pool].producer_index; 3839aa32835SJeff Kirsher adapter->rx_buff_pool[pool].producer_index++; 3849aa32835SJeff Kirsher if (adapter->rx_buff_pool[pool].producer_index >= 3859aa32835SJeff Kirsher adapter->rx_buff_pool[pool].size) 3869aa32835SJeff Kirsher adapter->rx_buff_pool[pool].producer_index = 0; 3879aa32835SJeff Kirsher adapter->rx_buff_pool[pool].free_map[free_index] = index; 3889aa32835SJeff Kirsher 3899aa32835SJeff Kirsher mb(); 3909aa32835SJeff Kirsher 3919aa32835SJeff Kirsher atomic_dec(&(adapter->rx_buff_pool[pool].available)); 3929aa32835SJeff Kirsher } 3939aa32835SJeff Kirsher 3949aa32835SJeff Kirsher /* get the current buffer on the rx queue */ 3959aa32835SJeff Kirsher static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter) 3969aa32835SJeff Kirsher { 3979aa32835SJeff Kirsher u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator; 3989aa32835SJeff Kirsher unsigned int pool = correlator >> 32; 3999aa32835SJeff Kirsher unsigned int index = correlator & 0xffffffffUL; 4009aa32835SJeff Kirsher 4019aa32835SJeff Kirsher BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); 4029aa32835SJeff Kirsher BUG_ON(index >= adapter->rx_buff_pool[pool].size); 4039aa32835SJeff Kirsher 4049aa32835SJeff Kirsher return adapter->rx_buff_pool[pool].skbuff[index]; 4059aa32835SJeff Kirsher } 4069aa32835SJeff Kirsher 4079aa32835SJeff Kirsher /* recycle the current buffer on the rx queue */ 4088decf868SDavid S. Miller static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) 4099aa32835SJeff Kirsher { 4109aa32835SJeff Kirsher u32 q_index = adapter->rx_queue.index; 4119aa32835SJeff Kirsher u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator; 4129aa32835SJeff Kirsher unsigned int pool = correlator >> 32; 4139aa32835SJeff Kirsher unsigned int index = correlator & 0xffffffffUL; 4149aa32835SJeff Kirsher union ibmveth_buf_desc desc; 4159aa32835SJeff Kirsher unsigned long lpar_rc; 4168decf868SDavid S. Miller int ret = 1; 4179aa32835SJeff Kirsher 4189aa32835SJeff Kirsher BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); 4199aa32835SJeff Kirsher BUG_ON(index >= adapter->rx_buff_pool[pool].size); 4209aa32835SJeff Kirsher 4219aa32835SJeff Kirsher if (!adapter->rx_buff_pool[pool].active) { 4229aa32835SJeff Kirsher ibmveth_rxq_harvest_buffer(adapter); 4239aa32835SJeff Kirsher ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); 4248decf868SDavid S. Miller goto out; 4259aa32835SJeff Kirsher } 4269aa32835SJeff Kirsher 4279aa32835SJeff Kirsher desc.fields.flags_len = IBMVETH_BUF_VALID | 4289aa32835SJeff Kirsher adapter->rx_buff_pool[pool].buff_size; 4299aa32835SJeff Kirsher desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index]; 4309aa32835SJeff Kirsher 4319aa32835SJeff Kirsher lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); 4329aa32835SJeff Kirsher 4339aa32835SJeff Kirsher if (lpar_rc != H_SUCCESS) { 4349aa32835SJeff Kirsher netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed " 4359aa32835SJeff Kirsher "during recycle rc=%ld", lpar_rc); 4369aa32835SJeff Kirsher ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); 4378decf868SDavid S. Miller ret = 0; 4389aa32835SJeff Kirsher } 4399aa32835SJeff Kirsher 4409aa32835SJeff Kirsher if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { 4419aa32835SJeff Kirsher adapter->rx_queue.index = 0; 4429aa32835SJeff Kirsher adapter->rx_queue.toggle = !adapter->rx_queue.toggle; 4439aa32835SJeff Kirsher } 4448decf868SDavid S. Miller 4458decf868SDavid S. Miller out: 4468decf868SDavid S. Miller return ret; 4479aa32835SJeff Kirsher } 4489aa32835SJeff Kirsher 4499aa32835SJeff Kirsher static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) 4509aa32835SJeff Kirsher { 4519aa32835SJeff Kirsher ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); 4529aa32835SJeff Kirsher 4539aa32835SJeff Kirsher if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { 4549aa32835SJeff Kirsher adapter->rx_queue.index = 0; 4559aa32835SJeff Kirsher adapter->rx_queue.toggle = !adapter->rx_queue.toggle; 4569aa32835SJeff Kirsher } 4579aa32835SJeff Kirsher } 4589aa32835SJeff Kirsher 4599aa32835SJeff Kirsher static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, 4609aa32835SJeff Kirsher union ibmveth_buf_desc rxq_desc, u64 mac_address) 4619aa32835SJeff Kirsher { 4629aa32835SJeff Kirsher int rc, try_again = 1; 4639aa32835SJeff Kirsher 4649aa32835SJeff Kirsher /* 4659aa32835SJeff Kirsher * After a kexec the adapter will still be open, so our attempt to 4669aa32835SJeff Kirsher * open it will fail. So if we get a failure we free the adapter and 4679aa32835SJeff Kirsher * try again, but only once. 4689aa32835SJeff Kirsher */ 4699aa32835SJeff Kirsher retry: 4709aa32835SJeff Kirsher rc = h_register_logical_lan(adapter->vdev->unit_address, 4719aa32835SJeff Kirsher adapter->buffer_list_dma, rxq_desc.desc, 4729aa32835SJeff Kirsher adapter->filter_list_dma, mac_address); 4739aa32835SJeff Kirsher 4749aa32835SJeff Kirsher if (rc != H_SUCCESS && try_again) { 4759aa32835SJeff Kirsher do { 4769aa32835SJeff Kirsher rc = h_free_logical_lan(adapter->vdev->unit_address); 4779aa32835SJeff Kirsher } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); 4789aa32835SJeff Kirsher 4799aa32835SJeff Kirsher try_again = 0; 4809aa32835SJeff Kirsher goto retry; 4819aa32835SJeff Kirsher } 4829aa32835SJeff Kirsher 4839aa32835SJeff Kirsher return rc; 4849aa32835SJeff Kirsher } 4859aa32835SJeff Kirsher 486d746ca95SAnton Blanchard static u64 ibmveth_encode_mac_addr(u8 *mac) 487d746ca95SAnton Blanchard { 488d746ca95SAnton Blanchard int i; 489d746ca95SAnton Blanchard u64 encoded = 0; 490d746ca95SAnton Blanchard 491d746ca95SAnton Blanchard for (i = 0; i < ETH_ALEN; i++) 492d746ca95SAnton Blanchard encoded = (encoded << 8) | mac[i]; 493d746ca95SAnton Blanchard 494d746ca95SAnton Blanchard return encoded; 495d746ca95SAnton Blanchard } 496d746ca95SAnton Blanchard 4979aa32835SJeff Kirsher static int ibmveth_open(struct net_device *netdev) 4989aa32835SJeff Kirsher { 4999aa32835SJeff Kirsher struct ibmveth_adapter *adapter = netdev_priv(netdev); 500d746ca95SAnton Blanchard u64 mac_address; 5019aa32835SJeff Kirsher int rxq_entries = 1; 5029aa32835SJeff Kirsher unsigned long lpar_rc; 5039aa32835SJeff Kirsher int rc; 5049aa32835SJeff Kirsher union ibmveth_buf_desc rxq_desc; 5059aa32835SJeff Kirsher int i; 5069aa32835SJeff Kirsher struct device *dev; 5079aa32835SJeff Kirsher 5089aa32835SJeff Kirsher netdev_dbg(netdev, "open starting\n"); 5099aa32835SJeff Kirsher 5109aa32835SJeff Kirsher napi_enable(&adapter->napi); 5119aa32835SJeff Kirsher 5129aa32835SJeff Kirsher for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) 5139aa32835SJeff Kirsher rxq_entries += adapter->rx_buff_pool[i].size; 5149aa32835SJeff Kirsher 5159aa32835SJeff Kirsher rc = -ENOMEM; 516d43732ceSChristoph Hellwig adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); 517d43732ceSChristoph Hellwig if (!adapter->buffer_list_addr) { 518d43732ceSChristoph Hellwig netdev_err(netdev, "unable to allocate list pages\n"); 519d43732ceSChristoph Hellwig goto out; 520d43732ceSChristoph Hellwig } 521d43732ceSChristoph Hellwig 522d43732ceSChristoph Hellwig adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); 523d43732ceSChristoph Hellwig if (!adapter->filter_list_addr) { 524d43732ceSChristoph Hellwig netdev_err(netdev, "unable to allocate filter pages\n"); 525d43732ceSChristoph Hellwig goto out_free_buffer_list; 5269aa32835SJeff Kirsher } 5279aa32835SJeff Kirsher 528d90c92feSSantiago Leon dev = &adapter->vdev->dev; 529d90c92feSSantiago Leon 5309aa32835SJeff Kirsher adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * 5319aa32835SJeff Kirsher rxq_entries; 532d90c92feSSantiago Leon adapter->rx_queue.queue_addr = 533d90c92feSSantiago Leon dma_alloc_coherent(dev, adapter->rx_queue.queue_len, 534d90c92feSSantiago Leon &adapter->rx_queue.queue_dma, GFP_KERNEL); 535d43732ceSChristoph Hellwig if (!adapter->rx_queue.queue_addr) 536d43732ceSChristoph Hellwig goto out_free_filter_list; 5379aa32835SJeff Kirsher 5389aa32835SJeff Kirsher adapter->buffer_list_dma = dma_map_single(dev, 5399aa32835SJeff Kirsher adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); 540d43732ceSChristoph Hellwig if (dma_mapping_error(dev, adapter->buffer_list_dma)) { 541d43732ceSChristoph Hellwig netdev_err(netdev, "unable to map buffer list pages\n"); 542d43732ceSChristoph Hellwig goto out_free_queue_mem; 543d43732ceSChristoph Hellwig } 544d43732ceSChristoph Hellwig 5459aa32835SJeff Kirsher adapter->filter_list_dma = dma_map_single(dev, 5469aa32835SJeff Kirsher adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); 547d43732ceSChristoph Hellwig if (dma_mapping_error(dev, adapter->filter_list_dma)) { 548d43732ceSChristoph Hellwig netdev_err(netdev, "unable to map filter list pages\n"); 549d43732ceSChristoph Hellwig goto out_unmap_buffer_list; 5509aa32835SJeff Kirsher } 5519aa32835SJeff Kirsher 5529aa32835SJeff Kirsher adapter->rx_queue.index = 0; 5539aa32835SJeff Kirsher adapter->rx_queue.num_slots = rxq_entries; 5549aa32835SJeff Kirsher adapter->rx_queue.toggle = 1; 5559aa32835SJeff Kirsher 556d746ca95SAnton Blanchard mac_address = ibmveth_encode_mac_addr(netdev->dev_addr); 5579aa32835SJeff Kirsher 5589aa32835SJeff Kirsher rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | 5599aa32835SJeff Kirsher adapter->rx_queue.queue_len; 5609aa32835SJeff Kirsher rxq_desc.fields.address = adapter->rx_queue.queue_dma; 5619aa32835SJeff Kirsher 5629aa32835SJeff Kirsher netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr); 5639aa32835SJeff Kirsher netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr); 5649aa32835SJeff Kirsher netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr); 5659aa32835SJeff Kirsher 5669aa32835SJeff Kirsher h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); 5679aa32835SJeff Kirsher 5689aa32835SJeff Kirsher lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address); 5699aa32835SJeff Kirsher 5709aa32835SJeff Kirsher if (lpar_rc != H_SUCCESS) { 5719aa32835SJeff Kirsher netdev_err(netdev, "h_register_logical_lan failed with %ld\n", 5729aa32835SJeff Kirsher lpar_rc); 5739aa32835SJeff Kirsher netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq " 5749aa32835SJeff Kirsher "desc:0x%llx MAC:0x%llx\n", 5759aa32835SJeff Kirsher adapter->buffer_list_dma, 5769aa32835SJeff Kirsher adapter->filter_list_dma, 5779aa32835SJeff Kirsher rxq_desc.desc, 5789aa32835SJeff Kirsher mac_address); 5799aa32835SJeff Kirsher rc = -ENONET; 580d43732ceSChristoph Hellwig goto out_unmap_filter_list; 5819aa32835SJeff Kirsher } 5829aa32835SJeff Kirsher 5839aa32835SJeff Kirsher for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { 5849aa32835SJeff Kirsher if (!adapter->rx_buff_pool[i].active) 5859aa32835SJeff Kirsher continue; 5869aa32835SJeff Kirsher if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { 5879aa32835SJeff Kirsher netdev_err(netdev, "unable to alloc pool\n"); 5889aa32835SJeff Kirsher adapter->rx_buff_pool[i].active = 0; 5899aa32835SJeff Kirsher rc = -ENOMEM; 590d43732ceSChristoph Hellwig goto out_free_buffer_pools; 5919aa32835SJeff Kirsher } 5929aa32835SJeff Kirsher } 5939aa32835SJeff Kirsher 5949aa32835SJeff Kirsher netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq); 5959aa32835SJeff Kirsher rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name, 5969aa32835SJeff Kirsher netdev); 5979aa32835SJeff Kirsher if (rc != 0) { 5989aa32835SJeff Kirsher netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", 5999aa32835SJeff Kirsher netdev->irq, rc); 6009aa32835SJeff Kirsher do { 60188c5100cSDavid S. Miller lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); 60288c5100cSDavid S. Miller } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); 6039aa32835SJeff Kirsher 604d43732ceSChristoph Hellwig goto out_free_buffer_pools; 6059aa32835SJeff Kirsher } 6069aa32835SJeff Kirsher 607d43732ceSChristoph Hellwig rc = -ENOMEM; 6089aa32835SJeff Kirsher adapter->bounce_buffer = 6099aa32835SJeff Kirsher kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); 610d43732ceSChristoph Hellwig if (!adapter->bounce_buffer) 611d43732ceSChristoph Hellwig goto out_free_irq; 612d43732ceSChristoph Hellwig 6139aa32835SJeff Kirsher adapter->bounce_buffer_dma = 6149aa32835SJeff Kirsher dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, 6159aa32835SJeff Kirsher netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); 6169aa32835SJeff Kirsher if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { 6179aa32835SJeff Kirsher netdev_err(netdev, "unable to map bounce buffer\n"); 618d43732ceSChristoph Hellwig goto out_free_bounce_buffer; 6199aa32835SJeff Kirsher } 6209aa32835SJeff Kirsher 6219aa32835SJeff Kirsher netdev_dbg(netdev, "initial replenish cycle\n"); 6229aa32835SJeff Kirsher ibmveth_interrupt(netdev->irq, netdev); 6239aa32835SJeff Kirsher 6249aa32835SJeff Kirsher netif_start_queue(netdev); 6259aa32835SJeff Kirsher 6269aa32835SJeff Kirsher netdev_dbg(netdev, "open complete\n"); 6279aa32835SJeff Kirsher 6289aa32835SJeff Kirsher return 0; 6299aa32835SJeff Kirsher 630d43732ceSChristoph Hellwig out_free_bounce_buffer: 631d43732ceSChristoph Hellwig kfree(adapter->bounce_buffer); 632d43732ceSChristoph Hellwig out_free_irq: 6339aa32835SJeff Kirsher free_irq(netdev->irq, netdev); 634d43732ceSChristoph Hellwig out_free_buffer_pools: 635d43732ceSChristoph Hellwig while (--i >= 0) { 636d43732ceSChristoph Hellwig if (adapter->rx_buff_pool[i].active) 637d43732ceSChristoph Hellwig ibmveth_free_buffer_pool(adapter, 638d43732ceSChristoph Hellwig &adapter->rx_buff_pool[i]); 639d43732ceSChristoph Hellwig } 640d43732ceSChristoph Hellwig out_unmap_filter_list: 641d43732ceSChristoph Hellwig dma_unmap_single(dev, adapter->filter_list_dma, 4096, 642d43732ceSChristoph Hellwig DMA_BIDIRECTIONAL); 643d43732ceSChristoph Hellwig out_unmap_buffer_list: 644d43732ceSChristoph Hellwig dma_unmap_single(dev, adapter->buffer_list_dma, 4096, 645d43732ceSChristoph Hellwig DMA_BIDIRECTIONAL); 646d43732ceSChristoph Hellwig out_free_queue_mem: 647d43732ceSChristoph Hellwig dma_free_coherent(dev, adapter->rx_queue.queue_len, 648d43732ceSChristoph Hellwig adapter->rx_queue.queue_addr, 649d43732ceSChristoph Hellwig adapter->rx_queue.queue_dma); 650d43732ceSChristoph Hellwig out_free_filter_list: 651d43732ceSChristoph Hellwig free_page((unsigned long)adapter->filter_list_addr); 652d43732ceSChristoph Hellwig out_free_buffer_list: 653d43732ceSChristoph Hellwig free_page((unsigned long)adapter->buffer_list_addr); 654d43732ceSChristoph Hellwig out: 6559aa32835SJeff Kirsher napi_disable(&adapter->napi); 6569aa32835SJeff Kirsher return rc; 6579aa32835SJeff Kirsher } 6589aa32835SJeff Kirsher 6599aa32835SJeff Kirsher static int ibmveth_close(struct net_device *netdev) 6609aa32835SJeff Kirsher { 6619aa32835SJeff Kirsher struct ibmveth_adapter *adapter = netdev_priv(netdev); 662d43732ceSChristoph Hellwig struct device *dev = &adapter->vdev->dev; 6639aa32835SJeff Kirsher long lpar_rc; 664d43732ceSChristoph Hellwig int i; 6659aa32835SJeff Kirsher 6669aa32835SJeff Kirsher netdev_dbg(netdev, "close starting\n"); 6679aa32835SJeff Kirsher 6689aa32835SJeff Kirsher napi_disable(&adapter->napi); 6699aa32835SJeff Kirsher 6709aa32835SJeff Kirsher if (!adapter->pool_config) 6719aa32835SJeff Kirsher netif_stop_queue(netdev); 6729aa32835SJeff Kirsher 6739aa32835SJeff Kirsher h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); 6749aa32835SJeff Kirsher 6759aa32835SJeff Kirsher do { 6769aa32835SJeff Kirsher lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); 6779aa32835SJeff Kirsher } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); 6789aa32835SJeff Kirsher 6799aa32835SJeff Kirsher if (lpar_rc != H_SUCCESS) { 6809aa32835SJeff Kirsher netdev_err(netdev, "h_free_logical_lan failed with %lx, " 6819aa32835SJeff Kirsher "continuing with close\n", lpar_rc); 6829aa32835SJeff Kirsher } 6839aa32835SJeff Kirsher 6849aa32835SJeff Kirsher free_irq(netdev->irq, netdev); 6859aa32835SJeff Kirsher 686cbd52281SAnton Blanchard ibmveth_update_rx_no_buffer(adapter); 6879aa32835SJeff Kirsher 688d43732ceSChristoph Hellwig dma_unmap_single(dev, adapter->buffer_list_dma, 4096, 689d43732ceSChristoph Hellwig DMA_BIDIRECTIONAL); 690d43732ceSChristoph Hellwig free_page((unsigned long)adapter->buffer_list_addr); 691d43732ceSChristoph Hellwig 692d43732ceSChristoph Hellwig dma_unmap_single(dev, adapter->filter_list_dma, 4096, 693d43732ceSChristoph Hellwig DMA_BIDIRECTIONAL); 694d43732ceSChristoph Hellwig free_page((unsigned long)adapter->filter_list_addr); 695d43732ceSChristoph Hellwig 696d43732ceSChristoph Hellwig dma_free_coherent(dev, adapter->rx_queue.queue_len, 697d43732ceSChristoph Hellwig adapter->rx_queue.queue_addr, 698d43732ceSChristoph Hellwig adapter->rx_queue.queue_dma); 699d43732ceSChristoph Hellwig 700d43732ceSChristoph Hellwig for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) 701d43732ceSChristoph Hellwig if (adapter->rx_buff_pool[i].active) 702d43732ceSChristoph Hellwig ibmveth_free_buffer_pool(adapter, 703d43732ceSChristoph Hellwig &adapter->rx_buff_pool[i]); 704d43732ceSChristoph Hellwig 705d43732ceSChristoph Hellwig dma_unmap_single(&adapter->vdev->dev, adapter->bounce_buffer_dma, 706d43732ceSChristoph Hellwig adapter->netdev->mtu + IBMVETH_BUFF_OH, 707d43732ceSChristoph Hellwig DMA_BIDIRECTIONAL); 708d43732ceSChristoph Hellwig kfree(adapter->bounce_buffer); 7099aa32835SJeff Kirsher 7109aa32835SJeff Kirsher netdev_dbg(netdev, "close complete\n"); 7119aa32835SJeff Kirsher 7129aa32835SJeff Kirsher return 0; 7139aa32835SJeff Kirsher } 7149aa32835SJeff Kirsher 7159aedc6e2SCris Forno static int ibmveth_set_link_ksettings(struct net_device *dev, 7169aedc6e2SCris Forno const struct ethtool_link_ksettings *cmd) 7179aedc6e2SCris Forno { 7189aedc6e2SCris Forno struct ibmveth_adapter *adapter = netdev_priv(dev); 7199aedc6e2SCris Forno 7209aedc6e2SCris Forno return ethtool_virtdev_set_link_ksettings(dev, cmd, 7219aedc6e2SCris Forno &adapter->speed, 7229aedc6e2SCris Forno &adapter->duplex); 7239aedc6e2SCris Forno } 7249aedc6e2SCris Forno 7259aedc6e2SCris Forno static int ibmveth_get_link_ksettings(struct net_device *dev, 7269ce8c2dfSPhilippe Reynes struct ethtool_link_ksettings *cmd) 7279aa32835SJeff Kirsher { 7289aedc6e2SCris Forno struct ibmveth_adapter *adapter = netdev_priv(dev); 7299ce8c2dfSPhilippe Reynes 7309aedc6e2SCris Forno cmd->base.speed = adapter->speed; 7319aedc6e2SCris Forno cmd->base.duplex = adapter->duplex; 7329aedc6e2SCris Forno cmd->base.port = PORT_OTHER; 7339ce8c2dfSPhilippe Reynes 7349aa32835SJeff Kirsher return 0; 7359aa32835SJeff Kirsher } 7369aa32835SJeff Kirsher 7379aedc6e2SCris Forno static void ibmveth_init_link_settings(struct net_device *dev) 7389aedc6e2SCris Forno { 7399aedc6e2SCris Forno struct ibmveth_adapter *adapter = netdev_priv(dev); 7409aedc6e2SCris Forno 7419aedc6e2SCris Forno adapter->speed = SPEED_1000; 7429aedc6e2SCris Forno adapter->duplex = DUPLEX_FULL; 7439aedc6e2SCris Forno } 7449aedc6e2SCris Forno 7459aa32835SJeff Kirsher static void netdev_get_drvinfo(struct net_device *dev, 7469aa32835SJeff Kirsher struct ethtool_drvinfo *info) 7479aa32835SJeff Kirsher { 7487826d43fSJiri Pirko strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver)); 7497826d43fSJiri Pirko strlcpy(info->version, ibmveth_driver_version, sizeof(info->version)); 7509aa32835SJeff Kirsher } 7519aa32835SJeff Kirsher 752c8f44affSMichał Mirosław static netdev_features_t ibmveth_fix_features(struct net_device *dev, 753c8f44affSMichał Mirosław netdev_features_t features) 7549aa32835SJeff Kirsher { 7559aa32835SJeff Kirsher /* 7569aa32835SJeff Kirsher * Since the ibmveth firmware interface does not have the 7579aa32835SJeff Kirsher * concept of separate tx/rx checksum offload enable, if rx 7589aa32835SJeff Kirsher * checksum is disabled we also have to disable tx checksum 7599aa32835SJeff Kirsher * offload. Once we disable rx checksum offload, we are no 7609aa32835SJeff Kirsher * longer allowed to send tx buffers that are not properly 7619aa32835SJeff Kirsher * checksummed. 7629aa32835SJeff Kirsher */ 7639aa32835SJeff Kirsher 7649aa32835SJeff Kirsher if (!(features & NETIF_F_RXCSUM)) 765a188222bSTom Herbert features &= ~NETIF_F_CSUM_MASK; 7669aa32835SJeff Kirsher 7679aa32835SJeff Kirsher return features; 7689aa32835SJeff Kirsher } 7699aa32835SJeff Kirsher 7709aa32835SJeff Kirsher static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) 7719aa32835SJeff Kirsher { 7729aa32835SJeff Kirsher struct ibmveth_adapter *adapter = netdev_priv(dev); 7739aa32835SJeff Kirsher unsigned long set_attr, clr_attr, ret_attr; 7749aa32835SJeff Kirsher unsigned long set_attr6, clr_attr6; 7758decf868SDavid S. Miller long ret, ret4, ret6; 7769aa32835SJeff Kirsher int rc1 = 0, rc2 = 0; 7779aa32835SJeff Kirsher int restart = 0; 7789aa32835SJeff Kirsher 7799aa32835SJeff Kirsher if (netif_running(dev)) { 7809aa32835SJeff Kirsher restart = 1; 7819aa32835SJeff Kirsher adapter->pool_config = 1; 7829aa32835SJeff Kirsher ibmveth_close(dev); 7839aa32835SJeff Kirsher adapter->pool_config = 0; 7849aa32835SJeff Kirsher } 7859aa32835SJeff Kirsher 7869aa32835SJeff Kirsher set_attr = 0; 7879aa32835SJeff Kirsher clr_attr = 0; 7888decf868SDavid S. Miller set_attr6 = 0; 7898decf868SDavid S. Miller clr_attr6 = 0; 7909aa32835SJeff Kirsher 7919aa32835SJeff Kirsher if (data) { 7929aa32835SJeff Kirsher set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; 7939aa32835SJeff Kirsher set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM; 7949aa32835SJeff Kirsher } else { 7959aa32835SJeff Kirsher clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; 7969aa32835SJeff Kirsher clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM; 7979aa32835SJeff Kirsher } 7989aa32835SJeff Kirsher 7999aa32835SJeff Kirsher ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); 8009aa32835SJeff Kirsher 80166aa0678SSivakumar Krishnasamy if (ret == H_SUCCESS && 8029aa32835SJeff Kirsher (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { 8038decf868SDavid S. Miller ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr, 8049aa32835SJeff Kirsher set_attr, &ret_attr); 8059aa32835SJeff Kirsher 8068decf868SDavid S. Miller if (ret4 != H_SUCCESS) { 8079aa32835SJeff Kirsher netdev_err(dev, "unable to change IPv4 checksum " 8089aa32835SJeff Kirsher "offload settings. %d rc=%ld\n", 8098decf868SDavid S. Miller data, ret4); 8109aa32835SJeff Kirsher 8118decf868SDavid S. Miller h_illan_attributes(adapter->vdev->unit_address, 8129aa32835SJeff Kirsher set_attr, clr_attr, &ret_attr); 8138decf868SDavid S. Miller 8148decf868SDavid S. Miller if (data == 1) 8158decf868SDavid S. Miller dev->features &= ~NETIF_F_IP_CSUM; 8168decf868SDavid S. Miller 8179aa32835SJeff Kirsher } else { 8189aa32835SJeff Kirsher adapter->fw_ipv4_csum_support = data; 8199aa32835SJeff Kirsher } 8209aa32835SJeff Kirsher 8219aa32835SJeff Kirsher ret6 = h_illan_attributes(adapter->vdev->unit_address, 8229aa32835SJeff Kirsher clr_attr6, set_attr6, &ret_attr); 8239aa32835SJeff Kirsher 8249aa32835SJeff Kirsher if (ret6 != H_SUCCESS) { 8259aa32835SJeff Kirsher netdev_err(dev, "unable to change IPv6 checksum " 8269aa32835SJeff Kirsher "offload settings. %d rc=%ld\n", 8278decf868SDavid S. Miller data, ret6); 8289aa32835SJeff Kirsher 8298decf868SDavid S. Miller h_illan_attributes(adapter->vdev->unit_address, 8308decf868SDavid S. Miller set_attr6, clr_attr6, &ret_attr); 8318decf868SDavid S. Miller 8328decf868SDavid S. Miller if (data == 1) 8338decf868SDavid S. Miller dev->features &= ~NETIF_F_IPV6_CSUM; 8348decf868SDavid S. Miller 8359aa32835SJeff Kirsher } else 8369aa32835SJeff Kirsher adapter->fw_ipv6_csum_support = data; 8379aa32835SJeff Kirsher 8388decf868SDavid S. Miller if (ret4 == H_SUCCESS || ret6 == H_SUCCESS) 8399aa32835SJeff Kirsher adapter->rx_csum = data; 8409aa32835SJeff Kirsher else 8419aa32835SJeff Kirsher rc1 = -EIO; 8429aa32835SJeff Kirsher } else { 8439aa32835SJeff Kirsher rc1 = -EIO; 8449aa32835SJeff Kirsher netdev_err(dev, "unable to change checksum offload settings." 8459aa32835SJeff Kirsher " %d rc=%ld ret_attr=%lx\n", data, ret, 8469aa32835SJeff Kirsher ret_attr); 8479aa32835SJeff Kirsher } 8489aa32835SJeff Kirsher 8499aa32835SJeff Kirsher if (restart) 8509aa32835SJeff Kirsher rc2 = ibmveth_open(dev); 8519aa32835SJeff Kirsher 8529aa32835SJeff Kirsher return rc1 ? rc1 : rc2; 8539aa32835SJeff Kirsher } 8549aa32835SJeff Kirsher 85507e6a97dSThomas Falcon static int ibmveth_set_tso(struct net_device *dev, u32 data) 85607e6a97dSThomas Falcon { 85707e6a97dSThomas Falcon struct ibmveth_adapter *adapter = netdev_priv(dev); 85807e6a97dSThomas Falcon unsigned long set_attr, clr_attr, ret_attr; 85907e6a97dSThomas Falcon long ret1, ret2; 86007e6a97dSThomas Falcon int rc1 = 0, rc2 = 0; 86107e6a97dSThomas Falcon int restart = 0; 86207e6a97dSThomas Falcon 86307e6a97dSThomas Falcon if (netif_running(dev)) { 86407e6a97dSThomas Falcon restart = 1; 86507e6a97dSThomas Falcon adapter->pool_config = 1; 86607e6a97dSThomas Falcon ibmveth_close(dev); 86707e6a97dSThomas Falcon adapter->pool_config = 0; 86807e6a97dSThomas Falcon } 86907e6a97dSThomas Falcon 87007e6a97dSThomas Falcon set_attr = 0; 87107e6a97dSThomas Falcon clr_attr = 0; 87207e6a97dSThomas Falcon 87307e6a97dSThomas Falcon if (data) 87407e6a97dSThomas Falcon set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED; 87507e6a97dSThomas Falcon else 87607e6a97dSThomas Falcon clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED; 87707e6a97dSThomas Falcon 87807e6a97dSThomas Falcon ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); 87907e6a97dSThomas Falcon 88007e6a97dSThomas Falcon if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) && 88107e6a97dSThomas Falcon !old_large_send) { 88207e6a97dSThomas Falcon ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr, 88307e6a97dSThomas Falcon set_attr, &ret_attr); 88407e6a97dSThomas Falcon 88507e6a97dSThomas Falcon if (ret2 != H_SUCCESS) { 88607e6a97dSThomas Falcon netdev_err(dev, "unable to change tso settings. %d rc=%ld\n", 88707e6a97dSThomas Falcon data, ret2); 88807e6a97dSThomas Falcon 88907e6a97dSThomas Falcon h_illan_attributes(adapter->vdev->unit_address, 89007e6a97dSThomas Falcon set_attr, clr_attr, &ret_attr); 89107e6a97dSThomas Falcon 89207e6a97dSThomas Falcon if (data == 1) 89307e6a97dSThomas Falcon dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); 89407e6a97dSThomas Falcon rc1 = -EIO; 89507e6a97dSThomas Falcon 89607e6a97dSThomas Falcon } else { 89707e6a97dSThomas Falcon adapter->fw_large_send_support = data; 89807e6a97dSThomas Falcon adapter->large_send = data; 89907e6a97dSThomas Falcon } 90007e6a97dSThomas Falcon } else { 90107e6a97dSThomas Falcon /* Older firmware version of large send offload does not 90207e6a97dSThomas Falcon * support tcp6/ipv6 90307e6a97dSThomas Falcon */ 90407e6a97dSThomas Falcon if (data == 1) { 90507e6a97dSThomas Falcon dev->features &= ~NETIF_F_TSO6; 90607e6a97dSThomas Falcon netdev_info(dev, "TSO feature requires all partitions to have updated driver"); 90707e6a97dSThomas Falcon } 90807e6a97dSThomas Falcon adapter->large_send = data; 90907e6a97dSThomas Falcon } 91007e6a97dSThomas Falcon 91107e6a97dSThomas Falcon if (restart) 91207e6a97dSThomas Falcon rc2 = ibmveth_open(dev); 91307e6a97dSThomas Falcon 91407e6a97dSThomas Falcon return rc1 ? rc1 : rc2; 91507e6a97dSThomas Falcon } 91607e6a97dSThomas Falcon 917c8f44affSMichał Mirosław static int ibmveth_set_features(struct net_device *dev, 918c8f44affSMichał Mirosław netdev_features_t features) 9199aa32835SJeff Kirsher { 9209aa32835SJeff Kirsher struct ibmveth_adapter *adapter = netdev_priv(dev); 9219aa32835SJeff Kirsher int rx_csum = !!(features & NETIF_F_RXCSUM); 92207e6a97dSThomas Falcon int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6)); 92307e6a97dSThomas Falcon int rc1 = 0, rc2 = 0; 9248641dd85SThomas Falcon 92507e6a97dSThomas Falcon if (rx_csum != adapter->rx_csum) { 92607e6a97dSThomas Falcon rc1 = ibmveth_set_csum_offload(dev, rx_csum); 92707e6a97dSThomas Falcon if (rc1 && !adapter->rx_csum) 92807e6a97dSThomas Falcon dev->features = 929a188222bSTom Herbert features & ~(NETIF_F_CSUM_MASK | 930a188222bSTom Herbert NETIF_F_RXCSUM); 93107e6a97dSThomas Falcon } 9329aa32835SJeff Kirsher 93307e6a97dSThomas Falcon if (large_send != adapter->large_send) { 93407e6a97dSThomas Falcon rc2 = ibmveth_set_tso(dev, large_send); 93507e6a97dSThomas Falcon if (rc2 && !adapter->large_send) 93607e6a97dSThomas Falcon dev->features = 93707e6a97dSThomas Falcon features & ~(NETIF_F_TSO | NETIF_F_TSO6); 93807e6a97dSThomas Falcon } 9399aa32835SJeff Kirsher 94007e6a97dSThomas Falcon return rc1 ? rc1 : rc2; 9419aa32835SJeff Kirsher } 9429aa32835SJeff Kirsher 9439aa32835SJeff Kirsher static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data) 9449aa32835SJeff Kirsher { 9459aa32835SJeff Kirsher int i; 9469aa32835SJeff Kirsher 9479aa32835SJeff Kirsher if (stringset != ETH_SS_STATS) 9489aa32835SJeff Kirsher return; 9499aa32835SJeff Kirsher 9509aa32835SJeff Kirsher for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN) 9519aa32835SJeff Kirsher memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN); 9529aa32835SJeff Kirsher } 9539aa32835SJeff Kirsher 9549aa32835SJeff Kirsher static int ibmveth_get_sset_count(struct net_device *dev, int sset) 9559aa32835SJeff Kirsher { 9569aa32835SJeff Kirsher switch (sset) { 9579aa32835SJeff Kirsher case ETH_SS_STATS: 9589aa32835SJeff Kirsher return ARRAY_SIZE(ibmveth_stats); 9599aa32835SJeff Kirsher default: 9609aa32835SJeff Kirsher return -EOPNOTSUPP; 9619aa32835SJeff Kirsher } 9629aa32835SJeff Kirsher } 9639aa32835SJeff Kirsher 9649aa32835SJeff Kirsher static void ibmveth_get_ethtool_stats(struct net_device *dev, 9659aa32835SJeff Kirsher struct ethtool_stats *stats, u64 *data) 9669aa32835SJeff Kirsher { 9679aa32835SJeff Kirsher int i; 9689aa32835SJeff Kirsher struct ibmveth_adapter *adapter = netdev_priv(dev); 9699aa32835SJeff Kirsher 9709aa32835SJeff Kirsher for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++) 9719aa32835SJeff Kirsher data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset); 9729aa32835SJeff Kirsher } 9739aa32835SJeff Kirsher 9749aa32835SJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops = { 9759aa32835SJeff Kirsher .get_drvinfo = netdev_get_drvinfo, 9769aa32835SJeff Kirsher .get_link = ethtool_op_get_link, 9779aa32835SJeff Kirsher .get_strings = ibmveth_get_strings, 9789aa32835SJeff Kirsher .get_sset_count = ibmveth_get_sset_count, 9799aa32835SJeff Kirsher .get_ethtool_stats = ibmveth_get_ethtool_stats, 9809aedc6e2SCris Forno .get_link_ksettings = ibmveth_get_link_ksettings, 9819aedc6e2SCris Forno .set_link_ksettings = ibmveth_set_link_ksettings, 9829aa32835SJeff Kirsher }; 9839aa32835SJeff Kirsher 9849aa32835SJeff Kirsher static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 9859aa32835SJeff Kirsher { 9869aa32835SJeff Kirsher return -EOPNOTSUPP; 9879aa32835SJeff Kirsher } 9889aa32835SJeff Kirsher 9899aa32835SJeff Kirsher static int ibmveth_send(struct ibmveth_adapter *adapter, 99007e6a97dSThomas Falcon union ibmveth_buf_desc *descs, unsigned long mss) 9919aa32835SJeff Kirsher { 9929aa32835SJeff Kirsher unsigned long correlator; 9939aa32835SJeff Kirsher unsigned int retry_count; 9949aa32835SJeff Kirsher unsigned long ret; 9959aa32835SJeff Kirsher 9969aa32835SJeff Kirsher /* 9979aa32835SJeff Kirsher * The retry count sets a maximum for the number of broadcast and 9989aa32835SJeff Kirsher * multicast destinations within the system. 9999aa32835SJeff Kirsher */ 10009aa32835SJeff Kirsher retry_count = 1024; 10019aa32835SJeff Kirsher correlator = 0; 10029aa32835SJeff Kirsher do { 10039aa32835SJeff Kirsher ret = h_send_logical_lan(adapter->vdev->unit_address, 10049aa32835SJeff Kirsher descs[0].desc, descs[1].desc, 10059aa32835SJeff Kirsher descs[2].desc, descs[3].desc, 10069aa32835SJeff Kirsher descs[4].desc, descs[5].desc, 100707e6a97dSThomas Falcon correlator, &correlator, mss, 100807e6a97dSThomas Falcon adapter->fw_large_send_support); 10099aa32835SJeff Kirsher } while ((ret == H_BUSY) && (retry_count--)); 10109aa32835SJeff Kirsher 10119aa32835SJeff Kirsher if (ret != H_SUCCESS && ret != H_DROPPED) { 10129aa32835SJeff Kirsher netdev_err(adapter->netdev, "tx: h_send_logical_lan failed " 10139aa32835SJeff Kirsher "with rc=%ld\n", ret); 10149aa32835SJeff Kirsher return 1; 10159aa32835SJeff Kirsher } 10169aa32835SJeff Kirsher 10179aa32835SJeff Kirsher return 0; 10189aa32835SJeff Kirsher } 10199aa32835SJeff Kirsher 10206f227543SCris Forno static int ibmveth_is_packet_unsupported(struct sk_buff *skb, 10216f227543SCris Forno struct net_device *netdev) 10226f227543SCris Forno { 10236f227543SCris Forno struct ethhdr *ether_header; 10246f227543SCris Forno int ret = 0; 10256f227543SCris Forno 10266f227543SCris Forno ether_header = eth_hdr(skb); 10276f227543SCris Forno 10286f227543SCris Forno if (ether_addr_equal(ether_header->h_dest, netdev->dev_addr)) { 10296f227543SCris Forno netdev_dbg(netdev, "veth doesn't support loopback packets, dropping packet.\n"); 10306f227543SCris Forno netdev->stats.tx_dropped++; 10316f227543SCris Forno ret = -EOPNOTSUPP; 10326f227543SCris Forno } 10336f227543SCris Forno 10346f227543SCris Forno return ret; 10356f227543SCris Forno } 10366f227543SCris Forno 10379aa32835SJeff Kirsher static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, 10389aa32835SJeff Kirsher struct net_device *netdev) 10399aa32835SJeff Kirsher { 10409aa32835SJeff Kirsher struct ibmveth_adapter *adapter = netdev_priv(netdev); 10419aa32835SJeff Kirsher unsigned int desc_flags; 10429aa32835SJeff Kirsher union ibmveth_buf_desc descs[6]; 10439aa32835SJeff Kirsher int last, i; 10449aa32835SJeff Kirsher int force_bounce = 0; 10458decf868SDavid S. Miller dma_addr_t dma_addr; 104607e6a97dSThomas Falcon unsigned long mss = 0; 10479aa32835SJeff Kirsher 10486f227543SCris Forno if (ibmveth_is_packet_unsupported(skb, netdev)) 10496f227543SCris Forno goto out; 10506f227543SCris Forno 105166aa0678SSivakumar Krishnasamy /* veth doesn't handle frag_list, so linearize the skb. 105266aa0678SSivakumar Krishnasamy * When GRO is enabled SKB's can have frag_list. 105366aa0678SSivakumar Krishnasamy */ 105466aa0678SSivakumar Krishnasamy if (adapter->is_active_trunk && 105566aa0678SSivakumar Krishnasamy skb_has_frag_list(skb) && __skb_linearize(skb)) { 105666aa0678SSivakumar Krishnasamy netdev->stats.tx_dropped++; 105766aa0678SSivakumar Krishnasamy goto out; 105866aa0678SSivakumar Krishnasamy } 105966aa0678SSivakumar Krishnasamy 10609aa32835SJeff Kirsher /* 10619aa32835SJeff Kirsher * veth handles a maximum of 6 segments including the header, so 10629aa32835SJeff Kirsher * we have to linearize the skb if there are more than this. 10639aa32835SJeff Kirsher */ 10649aa32835SJeff Kirsher if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) { 10659aa32835SJeff Kirsher netdev->stats.tx_dropped++; 10669aa32835SJeff Kirsher goto out; 10679aa32835SJeff Kirsher } 10689aa32835SJeff Kirsher 10699aa32835SJeff Kirsher /* veth can't checksum offload UDP */ 10709aa32835SJeff Kirsher if (skb->ip_summed == CHECKSUM_PARTIAL && 10719aa32835SJeff Kirsher ((skb->protocol == htons(ETH_P_IP) && 10729aa32835SJeff Kirsher ip_hdr(skb)->protocol != IPPROTO_TCP) || 10739aa32835SJeff Kirsher (skb->protocol == htons(ETH_P_IPV6) && 10749aa32835SJeff Kirsher ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) && 10759aa32835SJeff Kirsher skb_checksum_help(skb)) { 10769aa32835SJeff Kirsher 10779aa32835SJeff Kirsher netdev_err(netdev, "tx: failed to checksum packet\n"); 10789aa32835SJeff Kirsher netdev->stats.tx_dropped++; 10799aa32835SJeff Kirsher goto out; 10809aa32835SJeff Kirsher } 10819aa32835SJeff Kirsher 10829aa32835SJeff Kirsher desc_flags = IBMVETH_BUF_VALID; 10839aa32835SJeff Kirsher 10849aa32835SJeff Kirsher if (skb->ip_summed == CHECKSUM_PARTIAL) { 10859aa32835SJeff Kirsher unsigned char *buf = skb_transport_header(skb) + 10869aa32835SJeff Kirsher skb->csum_offset; 10879aa32835SJeff Kirsher 10889aa32835SJeff Kirsher desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD); 10899aa32835SJeff Kirsher 10909aa32835SJeff Kirsher /* Need to zero out the checksum */ 10919aa32835SJeff Kirsher buf[0] = 0; 10929aa32835SJeff Kirsher buf[1] = 0; 109366aa0678SSivakumar Krishnasamy 109466aa0678SSivakumar Krishnasamy if (skb_is_gso(skb) && adapter->fw_large_send_support) 109566aa0678SSivakumar Krishnasamy desc_flags |= IBMVETH_BUF_LRG_SND; 10969aa32835SJeff Kirsher } 10979aa32835SJeff Kirsher 10989aa32835SJeff Kirsher retry_bounce: 10999aa32835SJeff Kirsher memset(descs, 0, sizeof(descs)); 11009aa32835SJeff Kirsher 11019aa32835SJeff Kirsher /* 11029aa32835SJeff Kirsher * If a linear packet is below the rx threshold then 11039aa32835SJeff Kirsher * copy it into the static bounce buffer. This avoids the 11049aa32835SJeff Kirsher * cost of a TCE insert and remove. 11059aa32835SJeff Kirsher */ 11069aa32835SJeff Kirsher if (force_bounce || (!skb_is_nonlinear(skb) && 11079aa32835SJeff Kirsher (skb->len < tx_copybreak))) { 11089aa32835SJeff Kirsher skb_copy_from_linear_data(skb, adapter->bounce_buffer, 11099aa32835SJeff Kirsher skb->len); 11109aa32835SJeff Kirsher 11119aa32835SJeff Kirsher descs[0].fields.flags_len = desc_flags | skb->len; 11129aa32835SJeff Kirsher descs[0].fields.address = adapter->bounce_buffer_dma; 11139aa32835SJeff Kirsher 111407e6a97dSThomas Falcon if (ibmveth_send(adapter, descs, 0)) { 11159aa32835SJeff Kirsher adapter->tx_send_failed++; 11169aa32835SJeff Kirsher netdev->stats.tx_dropped++; 11179aa32835SJeff Kirsher } else { 11189aa32835SJeff Kirsher netdev->stats.tx_packets++; 11199aa32835SJeff Kirsher netdev->stats.tx_bytes += skb->len; 11209aa32835SJeff Kirsher } 11219aa32835SJeff Kirsher 11229aa32835SJeff Kirsher goto out; 11239aa32835SJeff Kirsher } 11249aa32835SJeff Kirsher 11259aa32835SJeff Kirsher /* Map the header */ 11268decf868SDavid S. Miller dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 11278decf868SDavid S. Miller skb_headlen(skb), DMA_TO_DEVICE); 11288decf868SDavid S. Miller if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) 11299aa32835SJeff Kirsher goto map_failed; 11309aa32835SJeff Kirsher 11319aa32835SJeff Kirsher descs[0].fields.flags_len = desc_flags | skb_headlen(skb); 11328decf868SDavid S. Miller descs[0].fields.address = dma_addr; 11339aa32835SJeff Kirsher 11349aa32835SJeff Kirsher /* Map the frags */ 11359aa32835SJeff Kirsher for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 11369e903e08SEric Dumazet const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 11379aa32835SJeff Kirsher 11388838a538SIan Campbell dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0, 11399e903e08SEric Dumazet skb_frag_size(frag), DMA_TO_DEVICE); 11409aa32835SJeff Kirsher 11419aa32835SJeff Kirsher if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) 11429aa32835SJeff Kirsher goto map_failed_frags; 11439aa32835SJeff Kirsher 11449e903e08SEric Dumazet descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag); 11459aa32835SJeff Kirsher descs[i+1].fields.address = dma_addr; 11469aa32835SJeff Kirsher } 11479aa32835SJeff Kirsher 114866aa0678SSivakumar Krishnasamy if (skb->ip_summed == CHECKSUM_PARTIAL && skb_is_gso(skb)) { 114907e6a97dSThomas Falcon if (adapter->fw_large_send_support) { 115007e6a97dSThomas Falcon mss = (unsigned long)skb_shinfo(skb)->gso_size; 115107e6a97dSThomas Falcon adapter->tx_large_packets++; 115207e6a97dSThomas Falcon } else if (!skb_is_gso_v6(skb)) { 11538641dd85SThomas Falcon /* Put -1 in the IP checksum to tell phyp it 115407e6a97dSThomas Falcon * is a largesend packet. Put the mss in 115507e6a97dSThomas Falcon * the TCP checksum. 11568641dd85SThomas Falcon */ 11578641dd85SThomas Falcon ip_hdr(skb)->check = 0xffff; 115807e6a97dSThomas Falcon tcp_hdr(skb)->check = 115907e6a97dSThomas Falcon cpu_to_be16(skb_shinfo(skb)->gso_size); 11608641dd85SThomas Falcon adapter->tx_large_packets++; 11618641dd85SThomas Falcon } 116207e6a97dSThomas Falcon } 11638641dd85SThomas Falcon 116407e6a97dSThomas Falcon if (ibmveth_send(adapter, descs, mss)) { 11659aa32835SJeff Kirsher adapter->tx_send_failed++; 11669aa32835SJeff Kirsher netdev->stats.tx_dropped++; 11679aa32835SJeff Kirsher } else { 11689aa32835SJeff Kirsher netdev->stats.tx_packets++; 11699aa32835SJeff Kirsher netdev->stats.tx_bytes += skb->len; 11709aa32835SJeff Kirsher } 11719aa32835SJeff Kirsher 11728decf868SDavid S. Miller dma_unmap_single(&adapter->vdev->dev, 11738decf868SDavid S. Miller descs[0].fields.address, 11748decf868SDavid S. Miller descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK, 11758decf868SDavid S. Miller DMA_TO_DEVICE); 11768decf868SDavid S. Miller 11778decf868SDavid S. Miller for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++) 11789aa32835SJeff Kirsher dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address, 11799aa32835SJeff Kirsher descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK, 11809aa32835SJeff Kirsher DMA_TO_DEVICE); 11819aa32835SJeff Kirsher 11829aa32835SJeff Kirsher out: 118326faa9d7SEric W. Biederman dev_consume_skb_any(skb); 11849aa32835SJeff Kirsher return NETDEV_TX_OK; 11859aa32835SJeff Kirsher 11869aa32835SJeff Kirsher map_failed_frags: 11879aa32835SJeff Kirsher last = i+1; 1188756af9c6STyrel Datwyler for (i = 1; i < last; i++) 11899aa32835SJeff Kirsher dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address, 11909aa32835SJeff Kirsher descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK, 11919aa32835SJeff Kirsher DMA_TO_DEVICE); 11929aa32835SJeff Kirsher 1193756af9c6STyrel Datwyler dma_unmap_single(&adapter->vdev->dev, 1194756af9c6STyrel Datwyler descs[0].fields.address, 1195756af9c6STyrel Datwyler descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK, 1196756af9c6STyrel Datwyler DMA_TO_DEVICE); 11979aa32835SJeff Kirsher map_failed: 11989aa32835SJeff Kirsher if (!firmware_has_feature(FW_FEATURE_CMO)) 11999aa32835SJeff Kirsher netdev_err(netdev, "tx: unable to map xmit buffer\n"); 12009aa32835SJeff Kirsher adapter->tx_map_failed++; 12012c42bf4bSThomas Falcon if (skb_linearize(skb)) { 12022c42bf4bSThomas Falcon netdev->stats.tx_dropped++; 12032c42bf4bSThomas Falcon goto out; 12042c42bf4bSThomas Falcon } 12059aa32835SJeff Kirsher force_bounce = 1; 12069aa32835SJeff Kirsher goto retry_bounce; 12079aa32835SJeff Kirsher } 12089aa32835SJeff Kirsher 12097b596738SThomas Falcon static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt) 12107b596738SThomas Falcon { 121194acf164SThomas Falcon struct tcphdr *tcph; 12127b596738SThomas Falcon int offset = 0; 121394acf164SThomas Falcon int hdr_len; 12147b596738SThomas Falcon 12157b596738SThomas Falcon /* only TCP packets will be aggregated */ 12167b596738SThomas Falcon if (skb->protocol == htons(ETH_P_IP)) { 12177b596738SThomas Falcon struct iphdr *iph = (struct iphdr *)skb->data; 12187b596738SThomas Falcon 12197b596738SThomas Falcon if (iph->protocol == IPPROTO_TCP) { 12207b596738SThomas Falcon offset = iph->ihl * 4; 12217b596738SThomas Falcon skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 12227b596738SThomas Falcon } else { 12237b596738SThomas Falcon return; 12247b596738SThomas Falcon } 12257b596738SThomas Falcon } else if (skb->protocol == htons(ETH_P_IPV6)) { 12267b596738SThomas Falcon struct ipv6hdr *iph6 = (struct ipv6hdr *)skb->data; 12277b596738SThomas Falcon 12287b596738SThomas Falcon if (iph6->nexthdr == IPPROTO_TCP) { 12297b596738SThomas Falcon offset = sizeof(struct ipv6hdr); 12307b596738SThomas Falcon skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 12317b596738SThomas Falcon } else { 12327b596738SThomas Falcon return; 12337b596738SThomas Falcon } 12347b596738SThomas Falcon } else { 12357b596738SThomas Falcon return; 12367b596738SThomas Falcon } 12377b596738SThomas Falcon /* if mss is not set through Large Packet bit/mss in rx buffer, 12387b596738SThomas Falcon * expect that the mss will be written to the tcp header checksum. 12397b596738SThomas Falcon */ 124094acf164SThomas Falcon tcph = (struct tcphdr *)(skb->data + offset); 12417b596738SThomas Falcon if (lrg_pkt) { 12427b596738SThomas Falcon skb_shinfo(skb)->gso_size = mss; 12437b596738SThomas Falcon } else if (offset) { 12447b596738SThomas Falcon skb_shinfo(skb)->gso_size = ntohs(tcph->check); 12457b596738SThomas Falcon tcph->check = 0; 12467b596738SThomas Falcon } 124794acf164SThomas Falcon 124894acf164SThomas Falcon if (skb_shinfo(skb)->gso_size) { 124994acf164SThomas Falcon hdr_len = offset + tcph->doff * 4; 125094acf164SThomas Falcon skb_shinfo(skb)->gso_segs = 125194acf164SThomas Falcon DIV_ROUND_UP(skb->len - hdr_len, 125294acf164SThomas Falcon skb_shinfo(skb)->gso_size); 125394acf164SThomas Falcon } 12547b596738SThomas Falcon } 12557b596738SThomas Falcon 125666aa0678SSivakumar Krishnasamy static void ibmveth_rx_csum_helper(struct sk_buff *skb, 125766aa0678SSivakumar Krishnasamy struct ibmveth_adapter *adapter) 125866aa0678SSivakumar Krishnasamy { 125966aa0678SSivakumar Krishnasamy struct iphdr *iph = NULL; 126066aa0678SSivakumar Krishnasamy struct ipv6hdr *iph6 = NULL; 126166aa0678SSivakumar Krishnasamy __be16 skb_proto = 0; 126266aa0678SSivakumar Krishnasamy u16 iphlen = 0; 126366aa0678SSivakumar Krishnasamy u16 iph_proto = 0; 126466aa0678SSivakumar Krishnasamy u16 tcphdrlen = 0; 126566aa0678SSivakumar Krishnasamy 126666aa0678SSivakumar Krishnasamy skb_proto = be16_to_cpu(skb->protocol); 126766aa0678SSivakumar Krishnasamy 126866aa0678SSivakumar Krishnasamy if (skb_proto == ETH_P_IP) { 126966aa0678SSivakumar Krishnasamy iph = (struct iphdr *)skb->data; 127066aa0678SSivakumar Krishnasamy 127166aa0678SSivakumar Krishnasamy /* If the IP checksum is not offloaded and if the packet 127266aa0678SSivakumar Krishnasamy * is large send, the checksum must be rebuilt. 127366aa0678SSivakumar Krishnasamy */ 127466aa0678SSivakumar Krishnasamy if (iph->check == 0xffff) { 127566aa0678SSivakumar Krishnasamy iph->check = 0; 127666aa0678SSivakumar Krishnasamy iph->check = ip_fast_csum((unsigned char *)iph, 127766aa0678SSivakumar Krishnasamy iph->ihl); 127866aa0678SSivakumar Krishnasamy } 127966aa0678SSivakumar Krishnasamy 128066aa0678SSivakumar Krishnasamy iphlen = iph->ihl * 4; 128166aa0678SSivakumar Krishnasamy iph_proto = iph->protocol; 128266aa0678SSivakumar Krishnasamy } else if (skb_proto == ETH_P_IPV6) { 128366aa0678SSivakumar Krishnasamy iph6 = (struct ipv6hdr *)skb->data; 128466aa0678SSivakumar Krishnasamy iphlen = sizeof(struct ipv6hdr); 128566aa0678SSivakumar Krishnasamy iph_proto = iph6->nexthdr; 128666aa0678SSivakumar Krishnasamy } 128766aa0678SSivakumar Krishnasamy 1288*7525de25SDavid Wilder /* When CSO is enabled the TCP checksum may have be set to NULL by 1289*7525de25SDavid Wilder * the sender given that we zeroed out TCP checksum field in 1290*7525de25SDavid Wilder * transmit path (refer ibmveth_start_xmit routine). In this case set 1291*7525de25SDavid Wilder * up CHECKSUM_PARTIAL. If the packet is forwarded, the checksum will 1292*7525de25SDavid Wilder * then be recalculated by the destination NIC (CSO must be enabled 1293*7525de25SDavid Wilder * on the destination NIC). 1294*7525de25SDavid Wilder * 1295*7525de25SDavid Wilder * In an OVS environment, when a flow is not cached, specifically for a 1296*7525de25SDavid Wilder * new TCP connection, the first packet information is passed up to 129766aa0678SSivakumar Krishnasamy * the user space for finding a flow. During this process, OVS computes 129866aa0678SSivakumar Krishnasamy * checksum on the first packet when CHECKSUM_PARTIAL flag is set. 129966aa0678SSivakumar Krishnasamy * 1300*7525de25SDavid Wilder * So, re-compute TCP pseudo header checksum when configured for 1301*7525de25SDavid Wilder * trunk mode. 130266aa0678SSivakumar Krishnasamy */ 1303*7525de25SDavid Wilder if (iph_proto == IPPROTO_TCP) { 130466aa0678SSivakumar Krishnasamy struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen); 1305*7525de25SDavid Wilder if (tcph->check == 0x0000) { 130666aa0678SSivakumar Krishnasamy /* Recompute TCP pseudo header checksum */ 1307*7525de25SDavid Wilder if (adapter->is_active_trunk) { 1308*7525de25SDavid Wilder tcphdrlen = skb->len - iphlen; 130966aa0678SSivakumar Krishnasamy if (skb_proto == ETH_P_IP) 1310*7525de25SDavid Wilder tcph->check = 1311*7525de25SDavid Wilder ~csum_tcpudp_magic(iph->saddr, 131266aa0678SSivakumar Krishnasamy iph->daddr, tcphdrlen, iph_proto, 0); 131366aa0678SSivakumar Krishnasamy else if (skb_proto == ETH_P_IPV6) 1314*7525de25SDavid Wilder tcph->check = 1315*7525de25SDavid Wilder ~csum_ipv6_magic(&iph6->saddr, 131666aa0678SSivakumar Krishnasamy &iph6->daddr, tcphdrlen, iph_proto, 0); 1317*7525de25SDavid Wilder } 131866aa0678SSivakumar Krishnasamy /* Setup SKB fields for checksum offload */ 131966aa0678SSivakumar Krishnasamy skb_partial_csum_set(skb, iphlen, 132066aa0678SSivakumar Krishnasamy offsetof(struct tcphdr, check)); 132166aa0678SSivakumar Krishnasamy skb_reset_network_header(skb); 132266aa0678SSivakumar Krishnasamy } 132366aa0678SSivakumar Krishnasamy } 1324*7525de25SDavid Wilder } 132566aa0678SSivakumar Krishnasamy 13269aa32835SJeff Kirsher static int ibmveth_poll(struct napi_struct *napi, int budget) 13279aa32835SJeff Kirsher { 13289aa32835SJeff Kirsher struct ibmveth_adapter *adapter = 13299aa32835SJeff Kirsher container_of(napi, struct ibmveth_adapter, napi); 13309aa32835SJeff Kirsher struct net_device *netdev = adapter->netdev; 13319aa32835SJeff Kirsher int frames_processed = 0; 13329aa32835SJeff Kirsher unsigned long lpar_rc; 13337b596738SThomas Falcon u16 mss = 0; 13349aa32835SJeff Kirsher 1335cb013ea1SEric W. Biederman while (frames_processed < budget) { 13369aa32835SJeff Kirsher if (!ibmveth_rxq_pending_buffer(adapter)) 13379aa32835SJeff Kirsher break; 13389aa32835SJeff Kirsher 13399aa32835SJeff Kirsher smp_rmb(); 13409aa32835SJeff Kirsher if (!ibmveth_rxq_buffer_valid(adapter)) { 13419aa32835SJeff Kirsher wmb(); /* suggested by larson1 */ 13429aa32835SJeff Kirsher adapter->rx_invalid_buffer++; 13439aa32835SJeff Kirsher netdev_dbg(netdev, "recycling invalid buffer\n"); 13449aa32835SJeff Kirsher ibmveth_rxq_recycle_buffer(adapter); 13459aa32835SJeff Kirsher } else { 13469aa32835SJeff Kirsher struct sk_buff *skb, *new_skb; 13479aa32835SJeff Kirsher int length = ibmveth_rxq_frame_length(adapter); 13489aa32835SJeff Kirsher int offset = ibmveth_rxq_frame_offset(adapter); 13499aa32835SJeff Kirsher int csum_good = ibmveth_rxq_csum_good(adapter); 13507b596738SThomas Falcon int lrg_pkt = ibmveth_rxq_large_packet(adapter); 1351413f142cSDavid Wilder __sum16 iph_check = 0; 13529aa32835SJeff Kirsher 13539aa32835SJeff Kirsher skb = ibmveth_rxq_get_buffer(adapter); 13549aa32835SJeff Kirsher 13557b596738SThomas Falcon /* if the large packet bit is set in the rx queue 13567b596738SThomas Falcon * descriptor, the mss will be written by PHYP eight 13577b596738SThomas Falcon * bytes from the start of the rx buffer, which is 13587b596738SThomas Falcon * skb->data at this stage 13597b596738SThomas Falcon */ 13607b596738SThomas Falcon if (lrg_pkt) { 13617b596738SThomas Falcon __be64 *rxmss = (__be64 *)(skb->data + 8); 13627b596738SThomas Falcon 13637b596738SThomas Falcon mss = (u16)be64_to_cpu(*rxmss); 13647b596738SThomas Falcon } 13657b596738SThomas Falcon 13669aa32835SJeff Kirsher new_skb = NULL; 13679aa32835SJeff Kirsher if (length < rx_copybreak) 13689aa32835SJeff Kirsher new_skb = netdev_alloc_skb(netdev, length); 13699aa32835SJeff Kirsher 13709aa32835SJeff Kirsher if (new_skb) { 13719aa32835SJeff Kirsher skb_copy_to_linear_data(new_skb, 13729aa32835SJeff Kirsher skb->data + offset, 13739aa32835SJeff Kirsher length); 13749aa32835SJeff Kirsher if (rx_flush) 13759aa32835SJeff Kirsher ibmveth_flush_buffer(skb->data, 13769aa32835SJeff Kirsher length + offset); 13778decf868SDavid S. Miller if (!ibmveth_rxq_recycle_buffer(adapter)) 13788decf868SDavid S. Miller kfree_skb(skb); 13799aa32835SJeff Kirsher skb = new_skb; 13809aa32835SJeff Kirsher } else { 13819aa32835SJeff Kirsher ibmveth_rxq_harvest_buffer(adapter); 13829aa32835SJeff Kirsher skb_reserve(skb, offset); 13839aa32835SJeff Kirsher } 13849aa32835SJeff Kirsher 13859aa32835SJeff Kirsher skb_put(skb, length); 13869aa32835SJeff Kirsher skb->protocol = eth_type_trans(skb, netdev); 13879aa32835SJeff Kirsher 1388413f142cSDavid Wilder /* PHYP without PLSO support places a -1 in the ip 1389413f142cSDavid Wilder * checksum for large send frames. 1390413f142cSDavid Wilder */ 1391413f142cSDavid Wilder if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 1392413f142cSDavid Wilder struct iphdr *iph = (struct iphdr *)skb->data; 1393413f142cSDavid Wilder 1394413f142cSDavid Wilder iph_check = iph->check; 1395413f142cSDavid Wilder } 1396413f142cSDavid Wilder 1397413f142cSDavid Wilder if ((length > netdev->mtu + ETH_HLEN) || 1398413f142cSDavid Wilder lrg_pkt || iph_check == 0xffff) { 13997b596738SThomas Falcon ibmveth_rx_mss_helper(skb, mss, lrg_pkt); 14009c7e8bc5SThomas Falcon adapter->rx_large_packets++; 14019c7e8bc5SThomas Falcon } 14029aa32835SJeff Kirsher 14035ce9ad81SDavid Wilder if (csum_good) { 14045ce9ad81SDavid Wilder skb->ip_summed = CHECKSUM_UNNECESSARY; 14055ce9ad81SDavid Wilder ibmveth_rx_csum_helper(skb, adapter); 14065ce9ad81SDavid Wilder } 14075ce9ad81SDavid Wilder 140892ec8279SThomas Falcon napi_gro_receive(napi, skb); /* send it up */ 14099aa32835SJeff Kirsher 14109aa32835SJeff Kirsher netdev->stats.rx_packets++; 14119aa32835SJeff Kirsher netdev->stats.rx_bytes += length; 14129aa32835SJeff Kirsher frames_processed++; 14139aa32835SJeff Kirsher } 1414cb013ea1SEric W. Biederman } 14159aa32835SJeff Kirsher 14169aa32835SJeff Kirsher ibmveth_replenish_task(adapter); 14179aa32835SJeff Kirsher 14189aa32835SJeff Kirsher if (frames_processed < budget) { 14196ad20165SEric Dumazet napi_complete_done(napi, frames_processed); 14204736edc7SYongbae Park 14219aa32835SJeff Kirsher /* We think we are done - reenable interrupts, 14229aa32835SJeff Kirsher * then check once more to make sure we are done. 14239aa32835SJeff Kirsher */ 14249aa32835SJeff Kirsher lpar_rc = h_vio_signal(adapter->vdev->unit_address, 14259aa32835SJeff Kirsher VIO_IRQ_ENABLE); 14269aa32835SJeff Kirsher 14279aa32835SJeff Kirsher BUG_ON(lpar_rc != H_SUCCESS); 14289aa32835SJeff Kirsher 14299aa32835SJeff Kirsher if (ibmveth_rxq_pending_buffer(adapter) && 14309aa32835SJeff Kirsher napi_reschedule(napi)) { 14319aa32835SJeff Kirsher lpar_rc = h_vio_signal(adapter->vdev->unit_address, 14329aa32835SJeff Kirsher VIO_IRQ_DISABLE); 14339aa32835SJeff Kirsher } 14349aa32835SJeff Kirsher } 14359aa32835SJeff Kirsher 14369aa32835SJeff Kirsher return frames_processed; 14379aa32835SJeff Kirsher } 14389aa32835SJeff Kirsher 14399aa32835SJeff Kirsher static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance) 14409aa32835SJeff Kirsher { 14419aa32835SJeff Kirsher struct net_device *netdev = dev_instance; 14429aa32835SJeff Kirsher struct ibmveth_adapter *adapter = netdev_priv(netdev); 14439aa32835SJeff Kirsher unsigned long lpar_rc; 14449aa32835SJeff Kirsher 14459aa32835SJeff Kirsher if (napi_schedule_prep(&adapter->napi)) { 14469aa32835SJeff Kirsher lpar_rc = h_vio_signal(adapter->vdev->unit_address, 14479aa32835SJeff Kirsher VIO_IRQ_DISABLE); 14489aa32835SJeff Kirsher BUG_ON(lpar_rc != H_SUCCESS); 14499aa32835SJeff Kirsher __napi_schedule(&adapter->napi); 14509aa32835SJeff Kirsher } 14519aa32835SJeff Kirsher return IRQ_HANDLED; 14529aa32835SJeff Kirsher } 14539aa32835SJeff Kirsher 14549aa32835SJeff Kirsher static void ibmveth_set_multicast_list(struct net_device *netdev) 14559aa32835SJeff Kirsher { 14569aa32835SJeff Kirsher struct ibmveth_adapter *adapter = netdev_priv(netdev); 14579aa32835SJeff Kirsher unsigned long lpar_rc; 14589aa32835SJeff Kirsher 14599aa32835SJeff Kirsher if ((netdev->flags & IFF_PROMISC) || 14609aa32835SJeff Kirsher (netdev_mc_count(netdev) > adapter->mcastFilterSize)) { 14619aa32835SJeff Kirsher lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 14629aa32835SJeff Kirsher IbmVethMcastEnableRecv | 14639aa32835SJeff Kirsher IbmVethMcastDisableFiltering, 14649aa32835SJeff Kirsher 0); 14659aa32835SJeff Kirsher if (lpar_rc != H_SUCCESS) { 14669aa32835SJeff Kirsher netdev_err(netdev, "h_multicast_ctrl rc=%ld when " 14679aa32835SJeff Kirsher "entering promisc mode\n", lpar_rc); 14689aa32835SJeff Kirsher } 14699aa32835SJeff Kirsher } else { 14709aa32835SJeff Kirsher struct netdev_hw_addr *ha; 14719aa32835SJeff Kirsher /* clear the filter table & disable filtering */ 14729aa32835SJeff Kirsher lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 14739aa32835SJeff Kirsher IbmVethMcastEnableRecv | 14749aa32835SJeff Kirsher IbmVethMcastDisableFiltering | 14759aa32835SJeff Kirsher IbmVethMcastClearFilterTable, 14769aa32835SJeff Kirsher 0); 14779aa32835SJeff Kirsher if (lpar_rc != H_SUCCESS) { 14789aa32835SJeff Kirsher netdev_err(netdev, "h_multicast_ctrl rc=%ld when " 14799aa32835SJeff Kirsher "attempting to clear filter table\n", 14809aa32835SJeff Kirsher lpar_rc); 14819aa32835SJeff Kirsher } 14829aa32835SJeff Kirsher /* add the addresses to the filter table */ 14839aa32835SJeff Kirsher netdev_for_each_mc_addr(ha, netdev) { 14849aa32835SJeff Kirsher /* add the multicast address to the filter table */ 1485d746ca95SAnton Blanchard u64 mcast_addr; 1486d746ca95SAnton Blanchard mcast_addr = ibmveth_encode_mac_addr(ha->addr); 14879aa32835SJeff Kirsher lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 14889aa32835SJeff Kirsher IbmVethMcastAddFilter, 14899aa32835SJeff Kirsher mcast_addr); 14909aa32835SJeff Kirsher if (lpar_rc != H_SUCCESS) { 14919aa32835SJeff Kirsher netdev_err(netdev, "h_multicast_ctrl rc=%ld " 14929aa32835SJeff Kirsher "when adding an entry to the filter " 14939aa32835SJeff Kirsher "table\n", lpar_rc); 14949aa32835SJeff Kirsher } 14959aa32835SJeff Kirsher } 14969aa32835SJeff Kirsher 14979aa32835SJeff Kirsher /* re-enable filtering */ 14989aa32835SJeff Kirsher lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 14999aa32835SJeff Kirsher IbmVethMcastEnableFiltering, 15009aa32835SJeff Kirsher 0); 15019aa32835SJeff Kirsher if (lpar_rc != H_SUCCESS) { 15029aa32835SJeff Kirsher netdev_err(netdev, "h_multicast_ctrl rc=%ld when " 15039aa32835SJeff Kirsher "enabling filtering\n", lpar_rc); 15049aa32835SJeff Kirsher } 15059aa32835SJeff Kirsher } 15069aa32835SJeff Kirsher } 15079aa32835SJeff Kirsher 15089aa32835SJeff Kirsher static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) 15099aa32835SJeff Kirsher { 15109aa32835SJeff Kirsher struct ibmveth_adapter *adapter = netdev_priv(dev); 15119aa32835SJeff Kirsher struct vio_dev *viodev = adapter->vdev; 15129aa32835SJeff Kirsher int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; 15139aa32835SJeff Kirsher int i, rc; 15149aa32835SJeff Kirsher int need_restart = 0; 15159aa32835SJeff Kirsher 15169aa32835SJeff Kirsher for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) 15174fce1482SDavid Gibson if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) 15189aa32835SJeff Kirsher break; 15199aa32835SJeff Kirsher 15209aa32835SJeff Kirsher if (i == IBMVETH_NUM_BUFF_POOLS) 15219aa32835SJeff Kirsher return -EINVAL; 15229aa32835SJeff Kirsher 15239aa32835SJeff Kirsher /* Deactivate all the buffer pools so that the next loop can activate 15249aa32835SJeff Kirsher only the buffer pools necessary to hold the new MTU */ 15259aa32835SJeff Kirsher if (netif_running(adapter->netdev)) { 15269aa32835SJeff Kirsher need_restart = 1; 15279aa32835SJeff Kirsher adapter->pool_config = 1; 15289aa32835SJeff Kirsher ibmveth_close(adapter->netdev); 15299aa32835SJeff Kirsher adapter->pool_config = 0; 15309aa32835SJeff Kirsher } 15319aa32835SJeff Kirsher 15329aa32835SJeff Kirsher /* Look for an active buffer pool that can hold the new MTU */ 15339aa32835SJeff Kirsher for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { 15349aa32835SJeff Kirsher adapter->rx_buff_pool[i].active = 1; 15359aa32835SJeff Kirsher 15364fce1482SDavid Gibson if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) { 15379aa32835SJeff Kirsher dev->mtu = new_mtu; 15389aa32835SJeff Kirsher vio_cmo_set_dev_desired(viodev, 15399aa32835SJeff Kirsher ibmveth_get_desired_dma 15409aa32835SJeff Kirsher (viodev)); 15419aa32835SJeff Kirsher if (need_restart) { 15429aa32835SJeff Kirsher return ibmveth_open(adapter->netdev); 15439aa32835SJeff Kirsher } 15449aa32835SJeff Kirsher return 0; 15459aa32835SJeff Kirsher } 15469aa32835SJeff Kirsher } 15479aa32835SJeff Kirsher 15489aa32835SJeff Kirsher if (need_restart && (rc = ibmveth_open(adapter->netdev))) 15499aa32835SJeff Kirsher return rc; 15509aa32835SJeff Kirsher 15519aa32835SJeff Kirsher return -EINVAL; 15529aa32835SJeff Kirsher } 15539aa32835SJeff Kirsher 15549aa32835SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 15559aa32835SJeff Kirsher static void ibmveth_poll_controller(struct net_device *dev) 15569aa32835SJeff Kirsher { 15579aa32835SJeff Kirsher ibmveth_replenish_task(netdev_priv(dev)); 15589aa32835SJeff Kirsher ibmveth_interrupt(dev->irq, dev); 15599aa32835SJeff Kirsher } 15609aa32835SJeff Kirsher #endif 15619aa32835SJeff Kirsher 15629aa32835SJeff Kirsher /** 15639aa32835SJeff Kirsher * ibmveth_get_desired_dma - Calculate IO memory desired by the driver 15649aa32835SJeff Kirsher * 15659aa32835SJeff Kirsher * @vdev: struct vio_dev for the device whose desired IO mem is to be returned 15669aa32835SJeff Kirsher * 15679aa32835SJeff Kirsher * Return value: 15689aa32835SJeff Kirsher * Number of bytes of IO data the driver will need to perform well. 15699aa32835SJeff Kirsher */ 15709aa32835SJeff Kirsher static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev) 15719aa32835SJeff Kirsher { 15729aa32835SJeff Kirsher struct net_device *netdev = dev_get_drvdata(&vdev->dev); 15739aa32835SJeff Kirsher struct ibmveth_adapter *adapter; 1574d0847757SAlistair Popple struct iommu_table *tbl; 15759aa32835SJeff Kirsher unsigned long ret; 15769aa32835SJeff Kirsher int i; 15779aa32835SJeff Kirsher int rxqentries = 1; 15789aa32835SJeff Kirsher 1579d0847757SAlistair Popple tbl = get_iommu_table_base(&vdev->dev); 1580d0847757SAlistair Popple 15819aa32835SJeff Kirsher /* netdev inits at probe time along with the structures we need below*/ 15829aa32835SJeff Kirsher if (netdev == NULL) 1583d0847757SAlistair Popple return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl); 15849aa32835SJeff Kirsher 15859aa32835SJeff Kirsher adapter = netdev_priv(netdev); 15869aa32835SJeff Kirsher 15879aa32835SJeff Kirsher ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; 1588d0847757SAlistair Popple ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl); 15899aa32835SJeff Kirsher 15909aa32835SJeff Kirsher for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { 15919aa32835SJeff Kirsher /* add the size of the active receive buffers */ 15929aa32835SJeff Kirsher if (adapter->rx_buff_pool[i].active) 15939aa32835SJeff Kirsher ret += 15949aa32835SJeff Kirsher adapter->rx_buff_pool[i].size * 15959aa32835SJeff Kirsher IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i]. 1596d0847757SAlistair Popple buff_size, tbl); 15979aa32835SJeff Kirsher rxqentries += adapter->rx_buff_pool[i].size; 15989aa32835SJeff Kirsher } 15999aa32835SJeff Kirsher /* add the size of the receive queue entries */ 1600d0847757SAlistair Popple ret += IOMMU_PAGE_ALIGN( 1601d0847757SAlistair Popple rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl); 16029aa32835SJeff Kirsher 16039aa32835SJeff Kirsher return ret; 16049aa32835SJeff Kirsher } 16059aa32835SJeff Kirsher 1606c77c761fSThomas Falcon static int ibmveth_set_mac_addr(struct net_device *dev, void *p) 1607c77c761fSThomas Falcon { 1608c77c761fSThomas Falcon struct ibmveth_adapter *adapter = netdev_priv(dev); 1609c77c761fSThomas Falcon struct sockaddr *addr = p; 1610c77c761fSThomas Falcon u64 mac_address; 1611c77c761fSThomas Falcon int rc; 1612c77c761fSThomas Falcon 1613c77c761fSThomas Falcon if (!is_valid_ether_addr(addr->sa_data)) 1614c77c761fSThomas Falcon return -EADDRNOTAVAIL; 1615c77c761fSThomas Falcon 1616c77c761fSThomas Falcon mac_address = ibmveth_encode_mac_addr(addr->sa_data); 1617c77c761fSThomas Falcon rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address); 1618c77c761fSThomas Falcon if (rc) { 1619c77c761fSThomas Falcon netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc); 1620c77c761fSThomas Falcon return rc; 1621c77c761fSThomas Falcon } 1622c77c761fSThomas Falcon 1623c77c761fSThomas Falcon ether_addr_copy(dev->dev_addr, addr->sa_data); 1624c77c761fSThomas Falcon 1625c77c761fSThomas Falcon return 0; 1626c77c761fSThomas Falcon } 1627c77c761fSThomas Falcon 16289aa32835SJeff Kirsher static const struct net_device_ops ibmveth_netdev_ops = { 16299aa32835SJeff Kirsher .ndo_open = ibmveth_open, 16309aa32835SJeff Kirsher .ndo_stop = ibmveth_close, 16319aa32835SJeff Kirsher .ndo_start_xmit = ibmveth_start_xmit, 1632afc4b13dSJiri Pirko .ndo_set_rx_mode = ibmveth_set_multicast_list, 16339aa32835SJeff Kirsher .ndo_do_ioctl = ibmveth_ioctl, 16349aa32835SJeff Kirsher .ndo_change_mtu = ibmveth_change_mtu, 16359aa32835SJeff Kirsher .ndo_fix_features = ibmveth_fix_features, 16369aa32835SJeff Kirsher .ndo_set_features = ibmveth_set_features, 16379aa32835SJeff Kirsher .ndo_validate_addr = eth_validate_addr, 1638c77c761fSThomas Falcon .ndo_set_mac_address = ibmveth_set_mac_addr, 16399aa32835SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 16409aa32835SJeff Kirsher .ndo_poll_controller = ibmveth_poll_controller, 16419aa32835SJeff Kirsher #endif 16429aa32835SJeff Kirsher }; 16439aa32835SJeff Kirsher 16441dd06ae8SGreg Kroah-Hartman static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) 16459aa32835SJeff Kirsher { 164613f85203SBenjamin Herrenschmidt int rc, i, mac_len; 16479aa32835SJeff Kirsher struct net_device *netdev; 16489aa32835SJeff Kirsher struct ibmveth_adapter *adapter; 16499aa32835SJeff Kirsher unsigned char *mac_addr_p; 165066cf4710SThomas Falcon __be32 *mcastFilterSize_p; 165107e6a97dSThomas Falcon long ret; 165207e6a97dSThomas Falcon unsigned long ret_attr; 16539aa32835SJeff Kirsher 16549aa32835SJeff Kirsher dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n", 16559aa32835SJeff Kirsher dev->unit_address); 16569aa32835SJeff Kirsher 16579aa32835SJeff Kirsher mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR, 165813f85203SBenjamin Herrenschmidt &mac_len); 16599aa32835SJeff Kirsher if (!mac_addr_p) { 16609aa32835SJeff Kirsher dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n"); 16619aa32835SJeff Kirsher return -EINVAL; 16629aa32835SJeff Kirsher } 166313f85203SBenjamin Herrenschmidt /* Workaround for old/broken pHyp */ 166413f85203SBenjamin Herrenschmidt if (mac_len == 8) 166513f85203SBenjamin Herrenschmidt mac_addr_p += 2; 166613f85203SBenjamin Herrenschmidt else if (mac_len != 6) { 166713f85203SBenjamin Herrenschmidt dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n", 166813f85203SBenjamin Herrenschmidt mac_len); 166913f85203SBenjamin Herrenschmidt return -EINVAL; 167013f85203SBenjamin Herrenschmidt } 16719aa32835SJeff Kirsher 167266cf4710SThomas Falcon mcastFilterSize_p = (__be32 *)vio_get_attribute(dev, 167366cf4710SThomas Falcon VETH_MCAST_FILTER_SIZE, 167466cf4710SThomas Falcon NULL); 16759aa32835SJeff Kirsher if (!mcastFilterSize_p) { 16769aa32835SJeff Kirsher dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE " 16779aa32835SJeff Kirsher "attribute\n"); 16789aa32835SJeff Kirsher return -EINVAL; 16799aa32835SJeff Kirsher } 16809aa32835SJeff Kirsher 16819aa32835SJeff Kirsher netdev = alloc_etherdev(sizeof(struct ibmveth_adapter)); 16829aa32835SJeff Kirsher 16839aa32835SJeff Kirsher if (!netdev) 16849aa32835SJeff Kirsher return -ENOMEM; 16859aa32835SJeff Kirsher 16869aa32835SJeff Kirsher adapter = netdev_priv(netdev); 16879aa32835SJeff Kirsher dev_set_drvdata(&dev->dev, netdev); 16889aa32835SJeff Kirsher 16899aa32835SJeff Kirsher adapter->vdev = dev; 16909aa32835SJeff Kirsher adapter->netdev = netdev; 169166cf4710SThomas Falcon adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p); 16929aa32835SJeff Kirsher adapter->pool_config = 0; 16939aedc6e2SCris Forno ibmveth_init_link_settings(netdev); 16949aa32835SJeff Kirsher 16959aa32835SJeff Kirsher netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); 16969aa32835SJeff Kirsher 16979aa32835SJeff Kirsher netdev->irq = dev->irq; 16989aa32835SJeff Kirsher netdev->netdev_ops = &ibmveth_netdev_ops; 16999aa32835SJeff Kirsher netdev->ethtool_ops = &netdev_ethtool_ops; 17009aa32835SJeff Kirsher SET_NETDEV_DEV(netdev, &dev->dev); 170123d28a85SThomas Huth netdev->hw_features = NETIF_F_SG; 170223d28a85SThomas Huth if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) { 170323d28a85SThomas Huth netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 170423d28a85SThomas Huth NETIF_F_RXCSUM; 170523d28a85SThomas Huth } 170607e6a97dSThomas Falcon 17079aa32835SJeff Kirsher netdev->features |= netdev->hw_features; 17089aa32835SJeff Kirsher 170907e6a97dSThomas Falcon ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); 171007e6a97dSThomas Falcon 171107e6a97dSThomas Falcon /* If running older firmware, TSO should not be enabled by default */ 171207e6a97dSThomas Falcon if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) && 171307e6a97dSThomas Falcon !old_large_send) { 171407e6a97dSThomas Falcon netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 171507e6a97dSThomas Falcon netdev->features |= netdev->hw_features; 171607e6a97dSThomas Falcon } else { 17178641dd85SThomas Falcon netdev->hw_features |= NETIF_F_TSO; 171807e6a97dSThomas Falcon } 17198641dd85SThomas Falcon 172066aa0678SSivakumar Krishnasamy adapter->is_active_trunk = false; 172166aa0678SSivakumar Krishnasamy if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK)) { 172266aa0678SSivakumar Krishnasamy adapter->is_active_trunk = true; 172366aa0678SSivakumar Krishnasamy netdev->hw_features |= NETIF_F_FRAGLIST; 172466aa0678SSivakumar Krishnasamy netdev->features |= NETIF_F_FRAGLIST; 172566aa0678SSivakumar Krishnasamy } 172666aa0678SSivakumar Krishnasamy 1727d894be57SJarod Wilson netdev->min_mtu = IBMVETH_MIN_MTU; 17285948378bSThomas Falcon netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH; 1729d894be57SJarod Wilson 1730d746ca95SAnton Blanchard memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN); 17319aa32835SJeff Kirsher 1732cd7c7ec3SThomas Falcon if (firmware_has_feature(FW_FEATURE_CMO)) 1733cd7c7ec3SThomas Falcon memcpy(pool_count, pool_count_cmo, sizeof(pool_count)); 1734cd7c7ec3SThomas Falcon 17359aa32835SJeff Kirsher for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { 17369aa32835SJeff Kirsher struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; 17379aa32835SJeff Kirsher int error; 17389aa32835SJeff Kirsher 17399aa32835SJeff Kirsher ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, 17409aa32835SJeff Kirsher pool_count[i], pool_size[i], 17419aa32835SJeff Kirsher pool_active[i]); 17429aa32835SJeff Kirsher error = kobject_init_and_add(kobj, &ktype_veth_pool, 17439aa32835SJeff Kirsher &dev->dev.kobj, "pool%d", i); 17449aa32835SJeff Kirsher if (!error) 17459aa32835SJeff Kirsher kobject_uevent(kobj, KOBJ_ADD); 17469aa32835SJeff Kirsher } 17479aa32835SJeff Kirsher 17489aa32835SJeff Kirsher netdev_dbg(netdev, "adapter @ 0x%p\n", adapter); 17499aa32835SJeff Kirsher netdev_dbg(netdev, "registering netdev...\n"); 17509aa32835SJeff Kirsher 17519aa32835SJeff Kirsher ibmveth_set_features(netdev, netdev->features); 17529aa32835SJeff Kirsher 17539aa32835SJeff Kirsher rc = register_netdev(netdev); 17549aa32835SJeff Kirsher 17559aa32835SJeff Kirsher if (rc) { 17569aa32835SJeff Kirsher netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc); 17579aa32835SJeff Kirsher free_netdev(netdev); 17589aa32835SJeff Kirsher return rc; 17599aa32835SJeff Kirsher } 17609aa32835SJeff Kirsher 17619aa32835SJeff Kirsher netdev_dbg(netdev, "registered\n"); 17629aa32835SJeff Kirsher 17639aa32835SJeff Kirsher return 0; 17649aa32835SJeff Kirsher } 17659aa32835SJeff Kirsher 1766386a966fSUwe Kleine-König static void ibmveth_remove(struct vio_dev *dev) 17679aa32835SJeff Kirsher { 17689aa32835SJeff Kirsher struct net_device *netdev = dev_get_drvdata(&dev->dev); 17699aa32835SJeff Kirsher struct ibmveth_adapter *adapter = netdev_priv(netdev); 17709aa32835SJeff Kirsher int i; 17719aa32835SJeff Kirsher 17729aa32835SJeff Kirsher for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) 17739aa32835SJeff Kirsher kobject_put(&adapter->rx_buff_pool[i].kobj); 17749aa32835SJeff Kirsher 17759aa32835SJeff Kirsher unregister_netdev(netdev); 17769aa32835SJeff Kirsher 17779aa32835SJeff Kirsher free_netdev(netdev); 17789aa32835SJeff Kirsher dev_set_drvdata(&dev->dev, NULL); 17799aa32835SJeff Kirsher } 17809aa32835SJeff Kirsher 17819aa32835SJeff Kirsher static struct attribute veth_active_attr; 17829aa32835SJeff Kirsher static struct attribute veth_num_attr; 17839aa32835SJeff Kirsher static struct attribute veth_size_attr; 17849aa32835SJeff Kirsher 17859aa32835SJeff Kirsher static ssize_t veth_pool_show(struct kobject *kobj, 17869aa32835SJeff Kirsher struct attribute *attr, char *buf) 17879aa32835SJeff Kirsher { 17889aa32835SJeff Kirsher struct ibmveth_buff_pool *pool = container_of(kobj, 17899aa32835SJeff Kirsher struct ibmveth_buff_pool, 17909aa32835SJeff Kirsher kobj); 17919aa32835SJeff Kirsher 17929aa32835SJeff Kirsher if (attr == &veth_active_attr) 17939aa32835SJeff Kirsher return sprintf(buf, "%d\n", pool->active); 17949aa32835SJeff Kirsher else if (attr == &veth_num_attr) 17959aa32835SJeff Kirsher return sprintf(buf, "%d\n", pool->size); 17969aa32835SJeff Kirsher else if (attr == &veth_size_attr) 17979aa32835SJeff Kirsher return sprintf(buf, "%d\n", pool->buff_size); 17989aa32835SJeff Kirsher return 0; 17999aa32835SJeff Kirsher } 18009aa32835SJeff Kirsher 18019aa32835SJeff Kirsher static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr, 18029aa32835SJeff Kirsher const char *buf, size_t count) 18039aa32835SJeff Kirsher { 18049aa32835SJeff Kirsher struct ibmveth_buff_pool *pool = container_of(kobj, 18059aa32835SJeff Kirsher struct ibmveth_buff_pool, 18069aa32835SJeff Kirsher kobj); 18071756055dSYueHaibing struct net_device *netdev = dev_get_drvdata(kobj_to_dev(kobj->parent)); 18089aa32835SJeff Kirsher struct ibmveth_adapter *adapter = netdev_priv(netdev); 18099aa32835SJeff Kirsher long value = simple_strtol(buf, NULL, 10); 18109aa32835SJeff Kirsher long rc; 18119aa32835SJeff Kirsher 18129aa32835SJeff Kirsher if (attr == &veth_active_attr) { 18139aa32835SJeff Kirsher if (value && !pool->active) { 18149aa32835SJeff Kirsher if (netif_running(netdev)) { 18159aa32835SJeff Kirsher if (ibmveth_alloc_buffer_pool(pool)) { 18169aa32835SJeff Kirsher netdev_err(netdev, 18179aa32835SJeff Kirsher "unable to alloc pool\n"); 18189aa32835SJeff Kirsher return -ENOMEM; 18199aa32835SJeff Kirsher } 18209aa32835SJeff Kirsher pool->active = 1; 18219aa32835SJeff Kirsher adapter->pool_config = 1; 18229aa32835SJeff Kirsher ibmveth_close(netdev); 18239aa32835SJeff Kirsher adapter->pool_config = 0; 18249aa32835SJeff Kirsher if ((rc = ibmveth_open(netdev))) 18259aa32835SJeff Kirsher return rc; 18269aa32835SJeff Kirsher } else { 18279aa32835SJeff Kirsher pool->active = 1; 18289aa32835SJeff Kirsher } 18299aa32835SJeff Kirsher } else if (!value && pool->active) { 18309aa32835SJeff Kirsher int mtu = netdev->mtu + IBMVETH_BUFF_OH; 18319aa32835SJeff Kirsher int i; 18329aa32835SJeff Kirsher /* Make sure there is a buffer pool with buffers that 18339aa32835SJeff Kirsher can hold a packet of the size of the MTU */ 18349aa32835SJeff Kirsher for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { 18359aa32835SJeff Kirsher if (pool == &adapter->rx_buff_pool[i]) 18369aa32835SJeff Kirsher continue; 18379aa32835SJeff Kirsher if (!adapter->rx_buff_pool[i].active) 18389aa32835SJeff Kirsher continue; 18399aa32835SJeff Kirsher if (mtu <= adapter->rx_buff_pool[i].buff_size) 18409aa32835SJeff Kirsher break; 18419aa32835SJeff Kirsher } 18429aa32835SJeff Kirsher 18439aa32835SJeff Kirsher if (i == IBMVETH_NUM_BUFF_POOLS) { 18449aa32835SJeff Kirsher netdev_err(netdev, "no active pool >= MTU\n"); 18459aa32835SJeff Kirsher return -EPERM; 18469aa32835SJeff Kirsher } 18479aa32835SJeff Kirsher 18489aa32835SJeff Kirsher if (netif_running(netdev)) { 18499aa32835SJeff Kirsher adapter->pool_config = 1; 18509aa32835SJeff Kirsher ibmveth_close(netdev); 18519aa32835SJeff Kirsher pool->active = 0; 18529aa32835SJeff Kirsher adapter->pool_config = 0; 18539aa32835SJeff Kirsher if ((rc = ibmveth_open(netdev))) 18549aa32835SJeff Kirsher return rc; 18559aa32835SJeff Kirsher } 18569aa32835SJeff Kirsher pool->active = 0; 18579aa32835SJeff Kirsher } 18589aa32835SJeff Kirsher } else if (attr == &veth_num_attr) { 18599aa32835SJeff Kirsher if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) { 18609aa32835SJeff Kirsher return -EINVAL; 18619aa32835SJeff Kirsher } else { 18629aa32835SJeff Kirsher if (netif_running(netdev)) { 18639aa32835SJeff Kirsher adapter->pool_config = 1; 18649aa32835SJeff Kirsher ibmveth_close(netdev); 18659aa32835SJeff Kirsher adapter->pool_config = 0; 18669aa32835SJeff Kirsher pool->size = value; 18679aa32835SJeff Kirsher if ((rc = ibmveth_open(netdev))) 18689aa32835SJeff Kirsher return rc; 18699aa32835SJeff Kirsher } else { 18709aa32835SJeff Kirsher pool->size = value; 18719aa32835SJeff Kirsher } 18729aa32835SJeff Kirsher } 18739aa32835SJeff Kirsher } else if (attr == &veth_size_attr) { 18749aa32835SJeff Kirsher if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) { 18759aa32835SJeff Kirsher return -EINVAL; 18769aa32835SJeff Kirsher } else { 18779aa32835SJeff Kirsher if (netif_running(netdev)) { 18789aa32835SJeff Kirsher adapter->pool_config = 1; 18799aa32835SJeff Kirsher ibmveth_close(netdev); 18809aa32835SJeff Kirsher adapter->pool_config = 0; 18819aa32835SJeff Kirsher pool->buff_size = value; 18829aa32835SJeff Kirsher if ((rc = ibmveth_open(netdev))) 18839aa32835SJeff Kirsher return rc; 18849aa32835SJeff Kirsher } else { 18859aa32835SJeff Kirsher pool->buff_size = value; 18869aa32835SJeff Kirsher } 18879aa32835SJeff Kirsher } 18889aa32835SJeff Kirsher } 18899aa32835SJeff Kirsher 18909aa32835SJeff Kirsher /* kick the interrupt handler to allocate/deallocate pools */ 18919aa32835SJeff Kirsher ibmveth_interrupt(netdev->irq, netdev); 18929aa32835SJeff Kirsher return count; 18939aa32835SJeff Kirsher } 18949aa32835SJeff Kirsher 18959aa32835SJeff Kirsher 18969aa32835SJeff Kirsher #define ATTR(_name, _mode) \ 18979aa32835SJeff Kirsher struct attribute veth_##_name##_attr = { \ 18989aa32835SJeff Kirsher .name = __stringify(_name), .mode = _mode, \ 18999aa32835SJeff Kirsher }; 19009aa32835SJeff Kirsher 19019aa32835SJeff Kirsher static ATTR(active, 0644); 19029aa32835SJeff Kirsher static ATTR(num, 0644); 19039aa32835SJeff Kirsher static ATTR(size, 0644); 19049aa32835SJeff Kirsher 19059aa32835SJeff Kirsher static struct attribute *veth_pool_attrs[] = { 19069aa32835SJeff Kirsher &veth_active_attr, 19079aa32835SJeff Kirsher &veth_num_attr, 19089aa32835SJeff Kirsher &veth_size_attr, 19099aa32835SJeff Kirsher NULL, 19109aa32835SJeff Kirsher }; 19119aa32835SJeff Kirsher 19129aa32835SJeff Kirsher static const struct sysfs_ops veth_pool_ops = { 19139aa32835SJeff Kirsher .show = veth_pool_show, 19149aa32835SJeff Kirsher .store = veth_pool_store, 19159aa32835SJeff Kirsher }; 19169aa32835SJeff Kirsher 19179aa32835SJeff Kirsher static struct kobj_type ktype_veth_pool = { 19189aa32835SJeff Kirsher .release = NULL, 19199aa32835SJeff Kirsher .sysfs_ops = &veth_pool_ops, 19209aa32835SJeff Kirsher .default_attrs = veth_pool_attrs, 19219aa32835SJeff Kirsher }; 19229aa32835SJeff Kirsher 19239aa32835SJeff Kirsher static int ibmveth_resume(struct device *dev) 19249aa32835SJeff Kirsher { 19259aa32835SJeff Kirsher struct net_device *netdev = dev_get_drvdata(dev); 19269aa32835SJeff Kirsher ibmveth_interrupt(netdev->irq, netdev); 19279aa32835SJeff Kirsher return 0; 19289aa32835SJeff Kirsher } 19299aa32835SJeff Kirsher 193071450804SArvind Yadav static const struct vio_device_id ibmveth_device_table[] = { 19319aa32835SJeff Kirsher { "network", "IBM,l-lan"}, 19329aa32835SJeff Kirsher { "", "" } 19339aa32835SJeff Kirsher }; 19349aa32835SJeff Kirsher MODULE_DEVICE_TABLE(vio, ibmveth_device_table); 19359aa32835SJeff Kirsher 1936eb60a73dSArvind Yadav static const struct dev_pm_ops ibmveth_pm_ops = { 19379aa32835SJeff Kirsher .resume = ibmveth_resume 19389aa32835SJeff Kirsher }; 19399aa32835SJeff Kirsher 19409aa32835SJeff Kirsher static struct vio_driver ibmveth_driver = { 19419aa32835SJeff Kirsher .id_table = ibmveth_device_table, 19429aa32835SJeff Kirsher .probe = ibmveth_probe, 19439aa32835SJeff Kirsher .remove = ibmveth_remove, 19449aa32835SJeff Kirsher .get_desired_dma = ibmveth_get_desired_dma, 19459aa32835SJeff Kirsher .name = ibmveth_driver_name, 19469aa32835SJeff Kirsher .pm = &ibmveth_pm_ops, 19479aa32835SJeff Kirsher }; 19489aa32835SJeff Kirsher 19499aa32835SJeff Kirsher static int __init ibmveth_module_init(void) 19509aa32835SJeff Kirsher { 19519aa32835SJeff Kirsher printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name, 19529aa32835SJeff Kirsher ibmveth_driver_string, ibmveth_driver_version); 19539aa32835SJeff Kirsher 19549aa32835SJeff Kirsher return vio_register_driver(&ibmveth_driver); 19559aa32835SJeff Kirsher } 19569aa32835SJeff Kirsher 19579aa32835SJeff Kirsher static void __exit ibmveth_module_exit(void) 19589aa32835SJeff Kirsher { 19599aa32835SJeff Kirsher vio_unregister_driver(&ibmveth_driver); 19609aa32835SJeff Kirsher } 19619aa32835SJeff Kirsher 19629aa32835SJeff Kirsher module_init(ibmveth_module_init); 19639aa32835SJeff Kirsher module_exit(ibmveth_module_exit); 1964