19aa32835SJeff Kirsher /* 29aa32835SJeff Kirsher * IBM Power Virtual Ethernet Device Driver 39aa32835SJeff Kirsher * 49aa32835SJeff Kirsher * This program is free software; you can redistribute it and/or modify 59aa32835SJeff Kirsher * it under the terms of the GNU General Public License as published by 69aa32835SJeff Kirsher * the Free Software Foundation; either version 2 of the License, or 79aa32835SJeff Kirsher * (at your option) any later version. 89aa32835SJeff Kirsher * 99aa32835SJeff Kirsher * This program is distributed in the hope that it will be useful, 109aa32835SJeff Kirsher * but WITHOUT ANY WARRANTY; without even the implied warranty of 119aa32835SJeff Kirsher * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 129aa32835SJeff Kirsher * GNU General Public License for more details. 139aa32835SJeff Kirsher * 149aa32835SJeff Kirsher * You should have received a copy of the GNU General Public License 150ab75ae8SJeff Kirsher * along with this program; if not, see <http://www.gnu.org/licenses/>. 169aa32835SJeff Kirsher * 179aa32835SJeff Kirsher * Copyright (C) IBM Corporation, 2003, 2010 189aa32835SJeff Kirsher * 199aa32835SJeff Kirsher * Authors: Dave Larson <larson1@us.ibm.com> 209aa32835SJeff Kirsher * Santiago Leon <santil@linux.vnet.ibm.com> 219aa32835SJeff Kirsher * Brian King <brking@linux.vnet.ibm.com> 229aa32835SJeff Kirsher * Robert Jennings <rcj@linux.vnet.ibm.com> 239aa32835SJeff Kirsher * Anton Blanchard <anton@au.ibm.com> 249aa32835SJeff Kirsher */ 259aa32835SJeff Kirsher 269aa32835SJeff Kirsher #include <linux/module.h> 279aa32835SJeff Kirsher #include <linux/moduleparam.h> 289aa32835SJeff Kirsher #include <linux/types.h> 299aa32835SJeff Kirsher #include <linux/errno.h> 309aa32835SJeff Kirsher #include <linux/dma-mapping.h> 319aa32835SJeff Kirsher #include <linux/kernel.h> 329aa32835SJeff Kirsher #include <linux/netdevice.h> 339aa32835SJeff Kirsher #include <linux/etherdevice.h> 349aa32835SJeff Kirsher #include <linux/skbuff.h> 359aa32835SJeff Kirsher #include <linux/init.h> 369aa32835SJeff Kirsher #include <linux/interrupt.h> 379aa32835SJeff Kirsher #include <linux/mm.h> 389aa32835SJeff Kirsher #include <linux/pm.h> 399aa32835SJeff Kirsher #include <linux/ethtool.h> 409aa32835SJeff Kirsher #include <linux/in.h> 419aa32835SJeff Kirsher #include <linux/ip.h> 429aa32835SJeff Kirsher #include <linux/ipv6.h> 439aa32835SJeff Kirsher #include <linux/slab.h> 449aa32835SJeff Kirsher #include <asm/hvcall.h> 459aa32835SJeff Kirsher #include <linux/atomic.h> 469aa32835SJeff Kirsher #include <asm/vio.h> 479aa32835SJeff Kirsher #include <asm/iommu.h> 489aa32835SJeff Kirsher #include <asm/firmware.h> 4966aa0678SSivakumar Krishnasamy #include <net/tcp.h> 5066aa0678SSivakumar Krishnasamy #include <net/ip6_checksum.h> 519aa32835SJeff Kirsher 529aa32835SJeff Kirsher #include "ibmveth.h" 539aa32835SJeff Kirsher 549aa32835SJeff Kirsher static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); 559aa32835SJeff Kirsher static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); 569aa32835SJeff Kirsher static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev); 579aa32835SJeff Kirsher 589aa32835SJeff Kirsher static struct kobj_type ktype_veth_pool; 599aa32835SJeff Kirsher 609aa32835SJeff Kirsher 619aa32835SJeff Kirsher static const char ibmveth_driver_name[] = "ibmveth"; 629aa32835SJeff Kirsher static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver"; 637b596738SThomas Falcon #define ibmveth_driver_version "1.06" 649aa32835SJeff Kirsher 659aa32835SJeff Kirsher MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>"); 669aa32835SJeff Kirsher MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver"); 679aa32835SJeff Kirsher MODULE_LICENSE("GPL"); 689aa32835SJeff Kirsher MODULE_VERSION(ibmveth_driver_version); 699aa32835SJeff Kirsher 709aa32835SJeff Kirsher static unsigned int tx_copybreak __read_mostly = 128; 719aa32835SJeff Kirsher module_param(tx_copybreak, uint, 0644); 729aa32835SJeff Kirsher MODULE_PARM_DESC(tx_copybreak, 739aa32835SJeff Kirsher "Maximum size of packet that is copied to a new buffer on transmit"); 749aa32835SJeff Kirsher 759aa32835SJeff Kirsher static unsigned int rx_copybreak __read_mostly = 128; 769aa32835SJeff Kirsher module_param(rx_copybreak, uint, 0644); 779aa32835SJeff Kirsher MODULE_PARM_DESC(rx_copybreak, 789aa32835SJeff Kirsher "Maximum size of packet that is copied to a new buffer on receive"); 799aa32835SJeff Kirsher 809aa32835SJeff Kirsher static unsigned int rx_flush __read_mostly = 0; 819aa32835SJeff Kirsher module_param(rx_flush, uint, 0644); 829aa32835SJeff Kirsher MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use"); 839aa32835SJeff Kirsher 8407e6a97dSThomas Falcon static bool old_large_send __read_mostly; 85d3757ba4SJoe Perches module_param(old_large_send, bool, 0444); 8607e6a97dSThomas Falcon MODULE_PARM_DESC(old_large_send, 8707e6a97dSThomas Falcon "Use old large send method on firmware that supports the new method"); 8807e6a97dSThomas Falcon 899aa32835SJeff Kirsher struct ibmveth_stat { 909aa32835SJeff Kirsher char name[ETH_GSTRING_LEN]; 919aa32835SJeff Kirsher int offset; 929aa32835SJeff Kirsher }; 939aa32835SJeff Kirsher 949aa32835SJeff Kirsher #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat) 959aa32835SJeff Kirsher #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off)) 969aa32835SJeff Kirsher 979aa32835SJeff Kirsher struct ibmveth_stat ibmveth_stats[] = { 989aa32835SJeff Kirsher { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) }, 999aa32835SJeff Kirsher { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) }, 1009aa32835SJeff Kirsher { "replenish_add_buff_failure", 1019aa32835SJeff Kirsher IBMVETH_STAT_OFF(replenish_add_buff_failure) }, 1029aa32835SJeff Kirsher { "replenish_add_buff_success", 1039aa32835SJeff Kirsher IBMVETH_STAT_OFF(replenish_add_buff_success) }, 1049aa32835SJeff Kirsher { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) }, 1059aa32835SJeff Kirsher { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) }, 1069aa32835SJeff Kirsher { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) }, 1079aa32835SJeff Kirsher { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) }, 1089aa32835SJeff Kirsher { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) }, 1099aa32835SJeff Kirsher { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) }, 1108641dd85SThomas Falcon { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) }, 11107e6a97dSThomas Falcon { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) }, 11207e6a97dSThomas Falcon { "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) } 1139aa32835SJeff Kirsher }; 1149aa32835SJeff Kirsher 1159aa32835SJeff Kirsher /* simple methods of getting data from the current rxq entry */ 1169aa32835SJeff Kirsher static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter) 1179aa32835SJeff Kirsher { 1180b536be7SAnton Blanchard return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off); 1199aa32835SJeff Kirsher } 1209aa32835SJeff Kirsher 1219aa32835SJeff Kirsher static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter) 1229aa32835SJeff Kirsher { 1239aa32835SJeff Kirsher return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >> 1249aa32835SJeff Kirsher IBMVETH_RXQ_TOGGLE_SHIFT; 1259aa32835SJeff Kirsher } 1269aa32835SJeff Kirsher 1279aa32835SJeff Kirsher static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter) 1289aa32835SJeff Kirsher { 1299aa32835SJeff Kirsher return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle; 1309aa32835SJeff Kirsher } 1319aa32835SJeff Kirsher 1329aa32835SJeff Kirsher static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter) 1339aa32835SJeff Kirsher { 1349aa32835SJeff Kirsher return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID; 1359aa32835SJeff Kirsher } 1369aa32835SJeff Kirsher 1379aa32835SJeff Kirsher static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter) 1389aa32835SJeff Kirsher { 1399aa32835SJeff Kirsher return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK; 1409aa32835SJeff Kirsher } 1419aa32835SJeff Kirsher 1427b596738SThomas Falcon static inline int ibmveth_rxq_large_packet(struct ibmveth_adapter *adapter) 1437b596738SThomas Falcon { 1447b596738SThomas Falcon return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_LRG_PKT; 1457b596738SThomas Falcon } 1467b596738SThomas Falcon 1479aa32835SJeff Kirsher static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) 1489aa32835SJeff Kirsher { 1490b536be7SAnton Blanchard return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length); 1509aa32835SJeff Kirsher } 1519aa32835SJeff Kirsher 1529aa32835SJeff Kirsher static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter) 1539aa32835SJeff Kirsher { 1549aa32835SJeff Kirsher return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD; 1559aa32835SJeff Kirsher } 1569aa32835SJeff Kirsher 1579aa32835SJeff Kirsher /* setup the initial settings for a buffer pool */ 1589aa32835SJeff Kirsher static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, 1599aa32835SJeff Kirsher u32 pool_index, u32 pool_size, 1609aa32835SJeff Kirsher u32 buff_size, u32 pool_active) 1619aa32835SJeff Kirsher { 1629aa32835SJeff Kirsher pool->size = pool_size; 1639aa32835SJeff Kirsher pool->index = pool_index; 1649aa32835SJeff Kirsher pool->buff_size = buff_size; 1659aa32835SJeff Kirsher pool->threshold = pool_size * 7 / 8; 1669aa32835SJeff Kirsher pool->active = pool_active; 1679aa32835SJeff Kirsher } 1689aa32835SJeff Kirsher 1699aa32835SJeff Kirsher /* allocate and setup an buffer pool - called during open */ 1709aa32835SJeff Kirsher static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) 1719aa32835SJeff Kirsher { 1729aa32835SJeff Kirsher int i; 1739aa32835SJeff Kirsher 174*6da2ec56SKees Cook pool->free_map = kmalloc_array(pool->size, sizeof(u16), GFP_KERNEL); 1759aa32835SJeff Kirsher 1769aa32835SJeff Kirsher if (!pool->free_map) 1779aa32835SJeff Kirsher return -1; 1789aa32835SJeff Kirsher 179076ef440SNicholas Mc Guire pool->dma_addr = kcalloc(pool->size, sizeof(dma_addr_t), GFP_KERNEL); 1809aa32835SJeff Kirsher if (!pool->dma_addr) { 1819aa32835SJeff Kirsher kfree(pool->free_map); 1829aa32835SJeff Kirsher pool->free_map = NULL; 1839aa32835SJeff Kirsher return -1; 1849aa32835SJeff Kirsher } 1859aa32835SJeff Kirsher 1869aa32835SJeff Kirsher pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL); 1879aa32835SJeff Kirsher 1889aa32835SJeff Kirsher if (!pool->skbuff) { 1899aa32835SJeff Kirsher kfree(pool->dma_addr); 1909aa32835SJeff Kirsher pool->dma_addr = NULL; 1919aa32835SJeff Kirsher 1929aa32835SJeff Kirsher kfree(pool->free_map); 1939aa32835SJeff Kirsher pool->free_map = NULL; 1949aa32835SJeff Kirsher return -1; 1959aa32835SJeff Kirsher } 1969aa32835SJeff Kirsher 1979aa32835SJeff Kirsher for (i = 0; i < pool->size; ++i) 1989aa32835SJeff Kirsher pool->free_map[i] = i; 1999aa32835SJeff Kirsher 2009aa32835SJeff Kirsher atomic_set(&pool->available, 0); 2019aa32835SJeff Kirsher pool->producer_index = 0; 2029aa32835SJeff Kirsher pool->consumer_index = 0; 2039aa32835SJeff Kirsher 2049aa32835SJeff Kirsher return 0; 2059aa32835SJeff Kirsher } 2069aa32835SJeff Kirsher 2079aa32835SJeff Kirsher static inline void ibmveth_flush_buffer(void *addr, unsigned long length) 2089aa32835SJeff Kirsher { 2099aa32835SJeff Kirsher unsigned long offset; 2109aa32835SJeff Kirsher 2119aa32835SJeff Kirsher for (offset = 0; offset < length; offset += SMP_CACHE_BYTES) 2129aa32835SJeff Kirsher asm("dcbfl %0,%1" :: "b" (addr), "r" (offset)); 2139aa32835SJeff Kirsher } 2149aa32835SJeff Kirsher 2159aa32835SJeff Kirsher /* replenish the buffers for a pool. note that we don't need to 2169aa32835SJeff Kirsher * skb_reserve these since they are used for incoming... 2179aa32835SJeff Kirsher */ 2189aa32835SJeff Kirsher static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, 2199aa32835SJeff Kirsher struct ibmveth_buff_pool *pool) 2209aa32835SJeff Kirsher { 2219aa32835SJeff Kirsher u32 i; 2229aa32835SJeff Kirsher u32 count = pool->size - atomic_read(&pool->available); 2239aa32835SJeff Kirsher u32 buffers_added = 0; 2249aa32835SJeff Kirsher struct sk_buff *skb; 2259aa32835SJeff Kirsher unsigned int free_index, index; 2269aa32835SJeff Kirsher u64 correlator; 2279aa32835SJeff Kirsher unsigned long lpar_rc; 2289aa32835SJeff Kirsher dma_addr_t dma_addr; 2299aa32835SJeff Kirsher 2309aa32835SJeff Kirsher mb(); 2319aa32835SJeff Kirsher 2329aa32835SJeff Kirsher for (i = 0; i < count; ++i) { 2339aa32835SJeff Kirsher union ibmveth_buf_desc desc; 2349aa32835SJeff Kirsher 2359aa32835SJeff Kirsher skb = netdev_alloc_skb(adapter->netdev, pool->buff_size); 2369aa32835SJeff Kirsher 2379aa32835SJeff Kirsher if (!skb) { 2389aa32835SJeff Kirsher netdev_dbg(adapter->netdev, 2399aa32835SJeff Kirsher "replenish: unable to allocate skb\n"); 2409aa32835SJeff Kirsher adapter->replenish_no_mem++; 2419aa32835SJeff Kirsher break; 2429aa32835SJeff Kirsher } 2439aa32835SJeff Kirsher 2449aa32835SJeff Kirsher free_index = pool->consumer_index; 2459aa32835SJeff Kirsher pool->consumer_index++; 2469aa32835SJeff Kirsher if (pool->consumer_index >= pool->size) 2479aa32835SJeff Kirsher pool->consumer_index = 0; 2489aa32835SJeff Kirsher index = pool->free_map[free_index]; 2499aa32835SJeff Kirsher 2509aa32835SJeff Kirsher BUG_ON(index == IBM_VETH_INVALID_MAP); 2519aa32835SJeff Kirsher BUG_ON(pool->skbuff[index] != NULL); 2529aa32835SJeff Kirsher 2539aa32835SJeff Kirsher dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 2549aa32835SJeff Kirsher pool->buff_size, DMA_FROM_DEVICE); 2559aa32835SJeff Kirsher 2569aa32835SJeff Kirsher if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) 2579aa32835SJeff Kirsher goto failure; 2589aa32835SJeff Kirsher 2599aa32835SJeff Kirsher pool->free_map[free_index] = IBM_VETH_INVALID_MAP; 2609aa32835SJeff Kirsher pool->dma_addr[index] = dma_addr; 2619aa32835SJeff Kirsher pool->skbuff[index] = skb; 2629aa32835SJeff Kirsher 2639aa32835SJeff Kirsher correlator = ((u64)pool->index << 32) | index; 2649aa32835SJeff Kirsher *(u64 *)skb->data = correlator; 2659aa32835SJeff Kirsher 2669aa32835SJeff Kirsher desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size; 2679aa32835SJeff Kirsher desc.fields.address = dma_addr; 2689aa32835SJeff Kirsher 2699aa32835SJeff Kirsher if (rx_flush) { 2709aa32835SJeff Kirsher unsigned int len = min(pool->buff_size, 2719aa32835SJeff Kirsher adapter->netdev->mtu + 2729aa32835SJeff Kirsher IBMVETH_BUFF_OH); 2739aa32835SJeff Kirsher ibmveth_flush_buffer(skb->data, len); 2749aa32835SJeff Kirsher } 2759aa32835SJeff Kirsher lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, 2769aa32835SJeff Kirsher desc.desc); 2779aa32835SJeff Kirsher 2789aa32835SJeff Kirsher if (lpar_rc != H_SUCCESS) { 2799aa32835SJeff Kirsher goto failure; 2809aa32835SJeff Kirsher } else { 2819aa32835SJeff Kirsher buffers_added++; 2829aa32835SJeff Kirsher adapter->replenish_add_buff_success++; 2839aa32835SJeff Kirsher } 2849aa32835SJeff Kirsher } 2859aa32835SJeff Kirsher 2869aa32835SJeff Kirsher mb(); 2879aa32835SJeff Kirsher atomic_add(buffers_added, &(pool->available)); 2889aa32835SJeff Kirsher return; 2899aa32835SJeff Kirsher 2909aa32835SJeff Kirsher failure: 2919aa32835SJeff Kirsher pool->free_map[free_index] = index; 2929aa32835SJeff Kirsher pool->skbuff[index] = NULL; 2939aa32835SJeff Kirsher if (pool->consumer_index == 0) 2949aa32835SJeff Kirsher pool->consumer_index = pool->size - 1; 2959aa32835SJeff Kirsher else 2969aa32835SJeff Kirsher pool->consumer_index--; 2979aa32835SJeff Kirsher if (!dma_mapping_error(&adapter->vdev->dev, dma_addr)) 2989aa32835SJeff Kirsher dma_unmap_single(&adapter->vdev->dev, 2999aa32835SJeff Kirsher pool->dma_addr[index], pool->buff_size, 3009aa32835SJeff Kirsher DMA_FROM_DEVICE); 3019aa32835SJeff Kirsher dev_kfree_skb_any(skb); 3029aa32835SJeff Kirsher adapter->replenish_add_buff_failure++; 3039aa32835SJeff Kirsher 3049aa32835SJeff Kirsher mb(); 3059aa32835SJeff Kirsher atomic_add(buffers_added, &(pool->available)); 3069aa32835SJeff Kirsher } 3079aa32835SJeff Kirsher 308cbd52281SAnton Blanchard /* 309cbd52281SAnton Blanchard * The final 8 bytes of the buffer list is a counter of frames dropped 310cbd52281SAnton Blanchard * because there was not a buffer in the buffer list capable of holding 311cbd52281SAnton Blanchard * the frame. 312cbd52281SAnton Blanchard */ 313cbd52281SAnton Blanchard static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter) 314cbd52281SAnton Blanchard { 315cbd52281SAnton Blanchard __be64 *p = adapter->buffer_list_addr + 4096 - 8; 316cbd52281SAnton Blanchard 317cbd52281SAnton Blanchard adapter->rx_no_buffer = be64_to_cpup(p); 318cbd52281SAnton Blanchard } 319cbd52281SAnton Blanchard 3209aa32835SJeff Kirsher /* replenish routine */ 3219aa32835SJeff Kirsher static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) 3229aa32835SJeff Kirsher { 3239aa32835SJeff Kirsher int i; 3249aa32835SJeff Kirsher 3259aa32835SJeff Kirsher adapter->replenish_task_cycles++; 3269aa32835SJeff Kirsher 3279aa32835SJeff Kirsher for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) { 3289aa32835SJeff Kirsher struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i]; 3299aa32835SJeff Kirsher 3309aa32835SJeff Kirsher if (pool->active && 3319aa32835SJeff Kirsher (atomic_read(&pool->available) < pool->threshold)) 3329aa32835SJeff Kirsher ibmveth_replenish_buffer_pool(adapter, pool); 3339aa32835SJeff Kirsher } 3349aa32835SJeff Kirsher 335cbd52281SAnton Blanchard ibmveth_update_rx_no_buffer(adapter); 3369aa32835SJeff Kirsher } 3379aa32835SJeff Kirsher 3389aa32835SJeff Kirsher /* empty and free ana buffer pool - also used to do cleanup in error paths */ 3399aa32835SJeff Kirsher static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, 3409aa32835SJeff Kirsher struct ibmveth_buff_pool *pool) 3419aa32835SJeff Kirsher { 3429aa32835SJeff Kirsher int i; 3439aa32835SJeff Kirsher 3449aa32835SJeff Kirsher kfree(pool->free_map); 3459aa32835SJeff Kirsher pool->free_map = NULL; 3469aa32835SJeff Kirsher 3479aa32835SJeff Kirsher if (pool->skbuff && pool->dma_addr) { 3489aa32835SJeff Kirsher for (i = 0; i < pool->size; ++i) { 3499aa32835SJeff Kirsher struct sk_buff *skb = pool->skbuff[i]; 3509aa32835SJeff Kirsher if (skb) { 3519aa32835SJeff Kirsher dma_unmap_single(&adapter->vdev->dev, 3529aa32835SJeff Kirsher pool->dma_addr[i], 3539aa32835SJeff Kirsher pool->buff_size, 3549aa32835SJeff Kirsher DMA_FROM_DEVICE); 3559aa32835SJeff Kirsher dev_kfree_skb_any(skb); 3569aa32835SJeff Kirsher pool->skbuff[i] = NULL; 3579aa32835SJeff Kirsher } 3589aa32835SJeff Kirsher } 3599aa32835SJeff Kirsher } 3609aa32835SJeff Kirsher 3619aa32835SJeff Kirsher if (pool->dma_addr) { 3629aa32835SJeff Kirsher kfree(pool->dma_addr); 3639aa32835SJeff Kirsher pool->dma_addr = NULL; 3649aa32835SJeff Kirsher } 3659aa32835SJeff Kirsher 3669aa32835SJeff Kirsher if (pool->skbuff) { 3679aa32835SJeff Kirsher kfree(pool->skbuff); 3689aa32835SJeff Kirsher pool->skbuff = NULL; 3699aa32835SJeff Kirsher } 3709aa32835SJeff Kirsher } 3719aa32835SJeff Kirsher 3729aa32835SJeff Kirsher /* remove a buffer from a pool */ 3739aa32835SJeff Kirsher static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, 3749aa32835SJeff Kirsher u64 correlator) 3759aa32835SJeff Kirsher { 3769aa32835SJeff Kirsher unsigned int pool = correlator >> 32; 3779aa32835SJeff Kirsher unsigned int index = correlator & 0xffffffffUL; 3789aa32835SJeff Kirsher unsigned int free_index; 3799aa32835SJeff Kirsher struct sk_buff *skb; 3809aa32835SJeff Kirsher 3819aa32835SJeff Kirsher BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); 3829aa32835SJeff Kirsher BUG_ON(index >= adapter->rx_buff_pool[pool].size); 3839aa32835SJeff Kirsher 3849aa32835SJeff Kirsher skb = adapter->rx_buff_pool[pool].skbuff[index]; 3859aa32835SJeff Kirsher 3869aa32835SJeff Kirsher BUG_ON(skb == NULL); 3879aa32835SJeff Kirsher 3889aa32835SJeff Kirsher adapter->rx_buff_pool[pool].skbuff[index] = NULL; 3899aa32835SJeff Kirsher 3909aa32835SJeff Kirsher dma_unmap_single(&adapter->vdev->dev, 3919aa32835SJeff Kirsher adapter->rx_buff_pool[pool].dma_addr[index], 3929aa32835SJeff Kirsher adapter->rx_buff_pool[pool].buff_size, 3939aa32835SJeff Kirsher DMA_FROM_DEVICE); 3949aa32835SJeff Kirsher 3959aa32835SJeff Kirsher free_index = adapter->rx_buff_pool[pool].producer_index; 3969aa32835SJeff Kirsher adapter->rx_buff_pool[pool].producer_index++; 3979aa32835SJeff Kirsher if (adapter->rx_buff_pool[pool].producer_index >= 3989aa32835SJeff Kirsher adapter->rx_buff_pool[pool].size) 3999aa32835SJeff Kirsher adapter->rx_buff_pool[pool].producer_index = 0; 4009aa32835SJeff Kirsher adapter->rx_buff_pool[pool].free_map[free_index] = index; 4019aa32835SJeff Kirsher 4029aa32835SJeff Kirsher mb(); 4039aa32835SJeff Kirsher 4049aa32835SJeff Kirsher atomic_dec(&(adapter->rx_buff_pool[pool].available)); 4059aa32835SJeff Kirsher } 4069aa32835SJeff Kirsher 4079aa32835SJeff Kirsher /* get the current buffer on the rx queue */ 4089aa32835SJeff Kirsher static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter) 4099aa32835SJeff Kirsher { 4109aa32835SJeff Kirsher u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator; 4119aa32835SJeff Kirsher unsigned int pool = correlator >> 32; 4129aa32835SJeff Kirsher unsigned int index = correlator & 0xffffffffUL; 4139aa32835SJeff Kirsher 4149aa32835SJeff Kirsher BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); 4159aa32835SJeff Kirsher BUG_ON(index >= adapter->rx_buff_pool[pool].size); 4169aa32835SJeff Kirsher 4179aa32835SJeff Kirsher return adapter->rx_buff_pool[pool].skbuff[index]; 4189aa32835SJeff Kirsher } 4199aa32835SJeff Kirsher 4209aa32835SJeff Kirsher /* recycle the current buffer on the rx queue */ 4218decf868SDavid S. Miller static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) 4229aa32835SJeff Kirsher { 4239aa32835SJeff Kirsher u32 q_index = adapter->rx_queue.index; 4249aa32835SJeff Kirsher u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator; 4259aa32835SJeff Kirsher unsigned int pool = correlator >> 32; 4269aa32835SJeff Kirsher unsigned int index = correlator & 0xffffffffUL; 4279aa32835SJeff Kirsher union ibmveth_buf_desc desc; 4289aa32835SJeff Kirsher unsigned long lpar_rc; 4298decf868SDavid S. Miller int ret = 1; 4309aa32835SJeff Kirsher 4319aa32835SJeff Kirsher BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); 4329aa32835SJeff Kirsher BUG_ON(index >= adapter->rx_buff_pool[pool].size); 4339aa32835SJeff Kirsher 4349aa32835SJeff Kirsher if (!adapter->rx_buff_pool[pool].active) { 4359aa32835SJeff Kirsher ibmveth_rxq_harvest_buffer(adapter); 4369aa32835SJeff Kirsher ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); 4378decf868SDavid S. Miller goto out; 4389aa32835SJeff Kirsher } 4399aa32835SJeff Kirsher 4409aa32835SJeff Kirsher desc.fields.flags_len = IBMVETH_BUF_VALID | 4419aa32835SJeff Kirsher adapter->rx_buff_pool[pool].buff_size; 4429aa32835SJeff Kirsher desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index]; 4439aa32835SJeff Kirsher 4449aa32835SJeff Kirsher lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); 4459aa32835SJeff Kirsher 4469aa32835SJeff Kirsher if (lpar_rc != H_SUCCESS) { 4479aa32835SJeff Kirsher netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed " 4489aa32835SJeff Kirsher "during recycle rc=%ld", lpar_rc); 4499aa32835SJeff Kirsher ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); 4508decf868SDavid S. Miller ret = 0; 4519aa32835SJeff Kirsher } 4529aa32835SJeff Kirsher 4539aa32835SJeff Kirsher if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { 4549aa32835SJeff Kirsher adapter->rx_queue.index = 0; 4559aa32835SJeff Kirsher adapter->rx_queue.toggle = !adapter->rx_queue.toggle; 4569aa32835SJeff Kirsher } 4578decf868SDavid S. Miller 4588decf868SDavid S. Miller out: 4598decf868SDavid S. Miller return ret; 4609aa32835SJeff Kirsher } 4619aa32835SJeff Kirsher 4629aa32835SJeff Kirsher static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) 4639aa32835SJeff Kirsher { 4649aa32835SJeff Kirsher ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); 4659aa32835SJeff Kirsher 4669aa32835SJeff Kirsher if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { 4679aa32835SJeff Kirsher adapter->rx_queue.index = 0; 4689aa32835SJeff Kirsher adapter->rx_queue.toggle = !adapter->rx_queue.toggle; 4699aa32835SJeff Kirsher } 4709aa32835SJeff Kirsher } 4719aa32835SJeff Kirsher 4729aa32835SJeff Kirsher static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, 4739aa32835SJeff Kirsher union ibmveth_buf_desc rxq_desc, u64 mac_address) 4749aa32835SJeff Kirsher { 4759aa32835SJeff Kirsher int rc, try_again = 1; 4769aa32835SJeff Kirsher 4779aa32835SJeff Kirsher /* 4789aa32835SJeff Kirsher * After a kexec the adapter will still be open, so our attempt to 4799aa32835SJeff Kirsher * open it will fail. So if we get a failure we free the adapter and 4809aa32835SJeff Kirsher * try again, but only once. 4819aa32835SJeff Kirsher */ 4829aa32835SJeff Kirsher retry: 4839aa32835SJeff Kirsher rc = h_register_logical_lan(adapter->vdev->unit_address, 4849aa32835SJeff Kirsher adapter->buffer_list_dma, rxq_desc.desc, 4859aa32835SJeff Kirsher adapter->filter_list_dma, mac_address); 4869aa32835SJeff Kirsher 4879aa32835SJeff Kirsher if (rc != H_SUCCESS && try_again) { 4889aa32835SJeff Kirsher do { 4899aa32835SJeff Kirsher rc = h_free_logical_lan(adapter->vdev->unit_address); 4909aa32835SJeff Kirsher } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); 4919aa32835SJeff Kirsher 4929aa32835SJeff Kirsher try_again = 0; 4939aa32835SJeff Kirsher goto retry; 4949aa32835SJeff Kirsher } 4959aa32835SJeff Kirsher 4969aa32835SJeff Kirsher return rc; 4979aa32835SJeff Kirsher } 4989aa32835SJeff Kirsher 499d746ca95SAnton Blanchard static u64 ibmveth_encode_mac_addr(u8 *mac) 500d746ca95SAnton Blanchard { 501d746ca95SAnton Blanchard int i; 502d746ca95SAnton Blanchard u64 encoded = 0; 503d746ca95SAnton Blanchard 504d746ca95SAnton Blanchard for (i = 0; i < ETH_ALEN; i++) 505d746ca95SAnton Blanchard encoded = (encoded << 8) | mac[i]; 506d746ca95SAnton Blanchard 507d746ca95SAnton Blanchard return encoded; 508d746ca95SAnton Blanchard } 509d746ca95SAnton Blanchard 5109aa32835SJeff Kirsher static int ibmveth_open(struct net_device *netdev) 5119aa32835SJeff Kirsher { 5129aa32835SJeff Kirsher struct ibmveth_adapter *adapter = netdev_priv(netdev); 513d746ca95SAnton Blanchard u64 mac_address; 5149aa32835SJeff Kirsher int rxq_entries = 1; 5159aa32835SJeff Kirsher unsigned long lpar_rc; 5169aa32835SJeff Kirsher int rc; 5179aa32835SJeff Kirsher union ibmveth_buf_desc rxq_desc; 5189aa32835SJeff Kirsher int i; 5199aa32835SJeff Kirsher struct device *dev; 5209aa32835SJeff Kirsher 5219aa32835SJeff Kirsher netdev_dbg(netdev, "open starting\n"); 5229aa32835SJeff Kirsher 5239aa32835SJeff Kirsher napi_enable(&adapter->napi); 5249aa32835SJeff Kirsher 5259aa32835SJeff Kirsher for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) 5269aa32835SJeff Kirsher rxq_entries += adapter->rx_buff_pool[i].size; 5279aa32835SJeff Kirsher 5289aa32835SJeff Kirsher rc = -ENOMEM; 529d43732ceSChristoph Hellwig adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); 530d43732ceSChristoph Hellwig if (!adapter->buffer_list_addr) { 531d43732ceSChristoph Hellwig netdev_err(netdev, "unable to allocate list pages\n"); 532d43732ceSChristoph Hellwig goto out; 533d43732ceSChristoph Hellwig } 534d43732ceSChristoph Hellwig 535d43732ceSChristoph Hellwig adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); 536d43732ceSChristoph Hellwig if (!adapter->filter_list_addr) { 537d43732ceSChristoph Hellwig netdev_err(netdev, "unable to allocate filter pages\n"); 538d43732ceSChristoph Hellwig goto out_free_buffer_list; 5399aa32835SJeff Kirsher } 5409aa32835SJeff Kirsher 541d90c92feSSantiago Leon dev = &adapter->vdev->dev; 542d90c92feSSantiago Leon 5439aa32835SJeff Kirsher adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * 5449aa32835SJeff Kirsher rxq_entries; 545d90c92feSSantiago Leon adapter->rx_queue.queue_addr = 546d90c92feSSantiago Leon dma_alloc_coherent(dev, adapter->rx_queue.queue_len, 547d90c92feSSantiago Leon &adapter->rx_queue.queue_dma, GFP_KERNEL); 548d43732ceSChristoph Hellwig if (!adapter->rx_queue.queue_addr) 549d43732ceSChristoph Hellwig goto out_free_filter_list; 5509aa32835SJeff Kirsher 5519aa32835SJeff Kirsher adapter->buffer_list_dma = dma_map_single(dev, 5529aa32835SJeff Kirsher adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); 553d43732ceSChristoph Hellwig if (dma_mapping_error(dev, adapter->buffer_list_dma)) { 554d43732ceSChristoph Hellwig netdev_err(netdev, "unable to map buffer list pages\n"); 555d43732ceSChristoph Hellwig goto out_free_queue_mem; 556d43732ceSChristoph Hellwig } 557d43732ceSChristoph Hellwig 5589aa32835SJeff Kirsher adapter->filter_list_dma = dma_map_single(dev, 5599aa32835SJeff Kirsher adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); 560d43732ceSChristoph Hellwig if (dma_mapping_error(dev, adapter->filter_list_dma)) { 561d43732ceSChristoph Hellwig netdev_err(netdev, "unable to map filter list pages\n"); 562d43732ceSChristoph Hellwig goto out_unmap_buffer_list; 5639aa32835SJeff Kirsher } 5649aa32835SJeff Kirsher 5659aa32835SJeff Kirsher adapter->rx_queue.index = 0; 5669aa32835SJeff Kirsher adapter->rx_queue.num_slots = rxq_entries; 5679aa32835SJeff Kirsher adapter->rx_queue.toggle = 1; 5689aa32835SJeff Kirsher 569d746ca95SAnton Blanchard mac_address = ibmveth_encode_mac_addr(netdev->dev_addr); 5709aa32835SJeff Kirsher 5719aa32835SJeff Kirsher rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | 5729aa32835SJeff Kirsher adapter->rx_queue.queue_len; 5739aa32835SJeff Kirsher rxq_desc.fields.address = adapter->rx_queue.queue_dma; 5749aa32835SJeff Kirsher 5759aa32835SJeff Kirsher netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr); 5769aa32835SJeff Kirsher netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr); 5779aa32835SJeff Kirsher netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr); 5789aa32835SJeff Kirsher 5799aa32835SJeff Kirsher h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); 5809aa32835SJeff Kirsher 5819aa32835SJeff Kirsher lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address); 5829aa32835SJeff Kirsher 5839aa32835SJeff Kirsher if (lpar_rc != H_SUCCESS) { 5849aa32835SJeff Kirsher netdev_err(netdev, "h_register_logical_lan failed with %ld\n", 5859aa32835SJeff Kirsher lpar_rc); 5869aa32835SJeff Kirsher netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq " 5879aa32835SJeff Kirsher "desc:0x%llx MAC:0x%llx\n", 5889aa32835SJeff Kirsher adapter->buffer_list_dma, 5899aa32835SJeff Kirsher adapter->filter_list_dma, 5909aa32835SJeff Kirsher rxq_desc.desc, 5919aa32835SJeff Kirsher mac_address); 5929aa32835SJeff Kirsher rc = -ENONET; 593d43732ceSChristoph Hellwig goto out_unmap_filter_list; 5949aa32835SJeff Kirsher } 5959aa32835SJeff Kirsher 5969aa32835SJeff Kirsher for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { 5979aa32835SJeff Kirsher if (!adapter->rx_buff_pool[i].active) 5989aa32835SJeff Kirsher continue; 5999aa32835SJeff Kirsher if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { 6009aa32835SJeff Kirsher netdev_err(netdev, "unable to alloc pool\n"); 6019aa32835SJeff Kirsher adapter->rx_buff_pool[i].active = 0; 6029aa32835SJeff Kirsher rc = -ENOMEM; 603d43732ceSChristoph Hellwig goto out_free_buffer_pools; 6049aa32835SJeff Kirsher } 6059aa32835SJeff Kirsher } 6069aa32835SJeff Kirsher 6079aa32835SJeff Kirsher netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq); 6089aa32835SJeff Kirsher rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name, 6099aa32835SJeff Kirsher netdev); 6109aa32835SJeff Kirsher if (rc != 0) { 6119aa32835SJeff Kirsher netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", 6129aa32835SJeff Kirsher netdev->irq, rc); 6139aa32835SJeff Kirsher do { 61488c5100cSDavid S. Miller lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); 61588c5100cSDavid S. Miller } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); 6169aa32835SJeff Kirsher 617d43732ceSChristoph Hellwig goto out_free_buffer_pools; 6189aa32835SJeff Kirsher } 6199aa32835SJeff Kirsher 620d43732ceSChristoph Hellwig rc = -ENOMEM; 6219aa32835SJeff Kirsher adapter->bounce_buffer = 6229aa32835SJeff Kirsher kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); 623d43732ceSChristoph Hellwig if (!adapter->bounce_buffer) 624d43732ceSChristoph Hellwig goto out_free_irq; 625d43732ceSChristoph Hellwig 6269aa32835SJeff Kirsher adapter->bounce_buffer_dma = 6279aa32835SJeff Kirsher dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, 6289aa32835SJeff Kirsher netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); 6299aa32835SJeff Kirsher if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { 6309aa32835SJeff Kirsher netdev_err(netdev, "unable to map bounce buffer\n"); 631d43732ceSChristoph Hellwig goto out_free_bounce_buffer; 6329aa32835SJeff Kirsher } 6339aa32835SJeff Kirsher 6349aa32835SJeff Kirsher netdev_dbg(netdev, "initial replenish cycle\n"); 6359aa32835SJeff Kirsher ibmveth_interrupt(netdev->irq, netdev); 6369aa32835SJeff Kirsher 6379aa32835SJeff Kirsher netif_start_queue(netdev); 6389aa32835SJeff Kirsher 6399aa32835SJeff Kirsher netdev_dbg(netdev, "open complete\n"); 6409aa32835SJeff Kirsher 6419aa32835SJeff Kirsher return 0; 6429aa32835SJeff Kirsher 643d43732ceSChristoph Hellwig out_free_bounce_buffer: 644d43732ceSChristoph Hellwig kfree(adapter->bounce_buffer); 645d43732ceSChristoph Hellwig out_free_irq: 6469aa32835SJeff Kirsher free_irq(netdev->irq, netdev); 647d43732ceSChristoph Hellwig out_free_buffer_pools: 648d43732ceSChristoph Hellwig while (--i >= 0) { 649d43732ceSChristoph Hellwig if (adapter->rx_buff_pool[i].active) 650d43732ceSChristoph Hellwig ibmveth_free_buffer_pool(adapter, 651d43732ceSChristoph Hellwig &adapter->rx_buff_pool[i]); 652d43732ceSChristoph Hellwig } 653d43732ceSChristoph Hellwig out_unmap_filter_list: 654d43732ceSChristoph Hellwig dma_unmap_single(dev, adapter->filter_list_dma, 4096, 655d43732ceSChristoph Hellwig DMA_BIDIRECTIONAL); 656d43732ceSChristoph Hellwig out_unmap_buffer_list: 657d43732ceSChristoph Hellwig dma_unmap_single(dev, adapter->buffer_list_dma, 4096, 658d43732ceSChristoph Hellwig DMA_BIDIRECTIONAL); 659d43732ceSChristoph Hellwig out_free_queue_mem: 660d43732ceSChristoph Hellwig dma_free_coherent(dev, adapter->rx_queue.queue_len, 661d43732ceSChristoph Hellwig adapter->rx_queue.queue_addr, 662d43732ceSChristoph Hellwig adapter->rx_queue.queue_dma); 663d43732ceSChristoph Hellwig out_free_filter_list: 664d43732ceSChristoph Hellwig free_page((unsigned long)adapter->filter_list_addr); 665d43732ceSChristoph Hellwig out_free_buffer_list: 666d43732ceSChristoph Hellwig free_page((unsigned long)adapter->buffer_list_addr); 667d43732ceSChristoph Hellwig out: 6689aa32835SJeff Kirsher napi_disable(&adapter->napi); 6699aa32835SJeff Kirsher return rc; 6709aa32835SJeff Kirsher } 6719aa32835SJeff Kirsher 6729aa32835SJeff Kirsher static int ibmveth_close(struct net_device *netdev) 6739aa32835SJeff Kirsher { 6749aa32835SJeff Kirsher struct ibmveth_adapter *adapter = netdev_priv(netdev); 675d43732ceSChristoph Hellwig struct device *dev = &adapter->vdev->dev; 6769aa32835SJeff Kirsher long lpar_rc; 677d43732ceSChristoph Hellwig int i; 6789aa32835SJeff Kirsher 6799aa32835SJeff Kirsher netdev_dbg(netdev, "close starting\n"); 6809aa32835SJeff Kirsher 6819aa32835SJeff Kirsher napi_disable(&adapter->napi); 6829aa32835SJeff Kirsher 6839aa32835SJeff Kirsher if (!adapter->pool_config) 6849aa32835SJeff Kirsher netif_stop_queue(netdev); 6859aa32835SJeff Kirsher 6869aa32835SJeff Kirsher h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); 6879aa32835SJeff Kirsher 6889aa32835SJeff Kirsher do { 6899aa32835SJeff Kirsher lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); 6909aa32835SJeff Kirsher } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); 6919aa32835SJeff Kirsher 6929aa32835SJeff Kirsher if (lpar_rc != H_SUCCESS) { 6939aa32835SJeff Kirsher netdev_err(netdev, "h_free_logical_lan failed with %lx, " 6949aa32835SJeff Kirsher "continuing with close\n", lpar_rc); 6959aa32835SJeff Kirsher } 6969aa32835SJeff Kirsher 6979aa32835SJeff Kirsher free_irq(netdev->irq, netdev); 6989aa32835SJeff Kirsher 699cbd52281SAnton Blanchard ibmveth_update_rx_no_buffer(adapter); 7009aa32835SJeff Kirsher 701d43732ceSChristoph Hellwig dma_unmap_single(dev, adapter->buffer_list_dma, 4096, 702d43732ceSChristoph Hellwig DMA_BIDIRECTIONAL); 703d43732ceSChristoph Hellwig free_page((unsigned long)adapter->buffer_list_addr); 704d43732ceSChristoph Hellwig 705d43732ceSChristoph Hellwig dma_unmap_single(dev, adapter->filter_list_dma, 4096, 706d43732ceSChristoph Hellwig DMA_BIDIRECTIONAL); 707d43732ceSChristoph Hellwig free_page((unsigned long)adapter->filter_list_addr); 708d43732ceSChristoph Hellwig 709d43732ceSChristoph Hellwig dma_free_coherent(dev, adapter->rx_queue.queue_len, 710d43732ceSChristoph Hellwig adapter->rx_queue.queue_addr, 711d43732ceSChristoph Hellwig adapter->rx_queue.queue_dma); 712d43732ceSChristoph Hellwig 713d43732ceSChristoph Hellwig for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) 714d43732ceSChristoph Hellwig if (adapter->rx_buff_pool[i].active) 715d43732ceSChristoph Hellwig ibmveth_free_buffer_pool(adapter, 716d43732ceSChristoph Hellwig &adapter->rx_buff_pool[i]); 717d43732ceSChristoph Hellwig 718d43732ceSChristoph Hellwig dma_unmap_single(&adapter->vdev->dev, adapter->bounce_buffer_dma, 719d43732ceSChristoph Hellwig adapter->netdev->mtu + IBMVETH_BUFF_OH, 720d43732ceSChristoph Hellwig DMA_BIDIRECTIONAL); 721d43732ceSChristoph Hellwig kfree(adapter->bounce_buffer); 7229aa32835SJeff Kirsher 7239aa32835SJeff Kirsher netdev_dbg(netdev, "close complete\n"); 7249aa32835SJeff Kirsher 7259aa32835SJeff Kirsher return 0; 7269aa32835SJeff Kirsher } 7279aa32835SJeff Kirsher 7289ce8c2dfSPhilippe Reynes static int netdev_get_link_ksettings(struct net_device *dev, 7299ce8c2dfSPhilippe Reynes struct ethtool_link_ksettings *cmd) 7309aa32835SJeff Kirsher { 7319ce8c2dfSPhilippe Reynes u32 supported, advertising; 7329ce8c2dfSPhilippe Reynes 7339ce8c2dfSPhilippe Reynes supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | 7349aa32835SJeff Kirsher SUPPORTED_FIBRE); 7359ce8c2dfSPhilippe Reynes advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | 7369aa32835SJeff Kirsher ADVERTISED_FIBRE); 7379ce8c2dfSPhilippe Reynes cmd->base.speed = SPEED_1000; 7389ce8c2dfSPhilippe Reynes cmd->base.duplex = DUPLEX_FULL; 7399ce8c2dfSPhilippe Reynes cmd->base.port = PORT_FIBRE; 7409ce8c2dfSPhilippe Reynes cmd->base.phy_address = 0; 7419ce8c2dfSPhilippe Reynes cmd->base.autoneg = AUTONEG_ENABLE; 7429ce8c2dfSPhilippe Reynes 7439ce8c2dfSPhilippe Reynes ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 7449ce8c2dfSPhilippe Reynes supported); 7459ce8c2dfSPhilippe Reynes ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 7469ce8c2dfSPhilippe Reynes advertising); 7479ce8c2dfSPhilippe Reynes 7489aa32835SJeff Kirsher return 0; 7499aa32835SJeff Kirsher } 7509aa32835SJeff Kirsher 7519aa32835SJeff Kirsher static void netdev_get_drvinfo(struct net_device *dev, 7529aa32835SJeff Kirsher struct ethtool_drvinfo *info) 7539aa32835SJeff Kirsher { 7547826d43fSJiri Pirko strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver)); 7557826d43fSJiri Pirko strlcpy(info->version, ibmveth_driver_version, sizeof(info->version)); 7569aa32835SJeff Kirsher } 7579aa32835SJeff Kirsher 758c8f44affSMichał Mirosław static netdev_features_t ibmveth_fix_features(struct net_device *dev, 759c8f44affSMichał Mirosław netdev_features_t features) 7609aa32835SJeff Kirsher { 7619aa32835SJeff Kirsher /* 7629aa32835SJeff Kirsher * Since the ibmveth firmware interface does not have the 7639aa32835SJeff Kirsher * concept of separate tx/rx checksum offload enable, if rx 7649aa32835SJeff Kirsher * checksum is disabled we also have to disable tx checksum 7659aa32835SJeff Kirsher * offload. Once we disable rx checksum offload, we are no 7669aa32835SJeff Kirsher * longer allowed to send tx buffers that are not properly 7679aa32835SJeff Kirsher * checksummed. 7689aa32835SJeff Kirsher */ 7699aa32835SJeff Kirsher 7709aa32835SJeff Kirsher if (!(features & NETIF_F_RXCSUM)) 771a188222bSTom Herbert features &= ~NETIF_F_CSUM_MASK; 7729aa32835SJeff Kirsher 7739aa32835SJeff Kirsher return features; 7749aa32835SJeff Kirsher } 7759aa32835SJeff Kirsher 7769aa32835SJeff Kirsher static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) 7779aa32835SJeff Kirsher { 7789aa32835SJeff Kirsher struct ibmveth_adapter *adapter = netdev_priv(dev); 7799aa32835SJeff Kirsher unsigned long set_attr, clr_attr, ret_attr; 7809aa32835SJeff Kirsher unsigned long set_attr6, clr_attr6; 7818decf868SDavid S. Miller long ret, ret4, ret6; 7829aa32835SJeff Kirsher int rc1 = 0, rc2 = 0; 7839aa32835SJeff Kirsher int restart = 0; 7849aa32835SJeff Kirsher 7859aa32835SJeff Kirsher if (netif_running(dev)) { 7869aa32835SJeff Kirsher restart = 1; 7879aa32835SJeff Kirsher adapter->pool_config = 1; 7889aa32835SJeff Kirsher ibmveth_close(dev); 7899aa32835SJeff Kirsher adapter->pool_config = 0; 7909aa32835SJeff Kirsher } 7919aa32835SJeff Kirsher 7929aa32835SJeff Kirsher set_attr = 0; 7939aa32835SJeff Kirsher clr_attr = 0; 7948decf868SDavid S. Miller set_attr6 = 0; 7958decf868SDavid S. Miller clr_attr6 = 0; 7969aa32835SJeff Kirsher 7979aa32835SJeff Kirsher if (data) { 7989aa32835SJeff Kirsher set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; 7999aa32835SJeff Kirsher set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM; 8009aa32835SJeff Kirsher } else { 8019aa32835SJeff Kirsher clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; 8029aa32835SJeff Kirsher clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM; 8039aa32835SJeff Kirsher } 8049aa32835SJeff Kirsher 8059aa32835SJeff Kirsher ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); 8069aa32835SJeff Kirsher 80766aa0678SSivakumar Krishnasamy if (ret == H_SUCCESS && 8089aa32835SJeff Kirsher (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { 8098decf868SDavid S. Miller ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr, 8109aa32835SJeff Kirsher set_attr, &ret_attr); 8119aa32835SJeff Kirsher 8128decf868SDavid S. Miller if (ret4 != H_SUCCESS) { 8139aa32835SJeff Kirsher netdev_err(dev, "unable to change IPv4 checksum " 8149aa32835SJeff Kirsher "offload settings. %d rc=%ld\n", 8158decf868SDavid S. Miller data, ret4); 8169aa32835SJeff Kirsher 8178decf868SDavid S. Miller h_illan_attributes(adapter->vdev->unit_address, 8189aa32835SJeff Kirsher set_attr, clr_attr, &ret_attr); 8198decf868SDavid S. Miller 8208decf868SDavid S. Miller if (data == 1) 8218decf868SDavid S. Miller dev->features &= ~NETIF_F_IP_CSUM; 8228decf868SDavid S. Miller 8239aa32835SJeff Kirsher } else { 8249aa32835SJeff Kirsher adapter->fw_ipv4_csum_support = data; 8259aa32835SJeff Kirsher } 8269aa32835SJeff Kirsher 8279aa32835SJeff Kirsher ret6 = h_illan_attributes(adapter->vdev->unit_address, 8289aa32835SJeff Kirsher clr_attr6, set_attr6, &ret_attr); 8299aa32835SJeff Kirsher 8309aa32835SJeff Kirsher if (ret6 != H_SUCCESS) { 8319aa32835SJeff Kirsher netdev_err(dev, "unable to change IPv6 checksum " 8329aa32835SJeff Kirsher "offload settings. %d rc=%ld\n", 8338decf868SDavid S. Miller data, ret6); 8349aa32835SJeff Kirsher 8358decf868SDavid S. Miller h_illan_attributes(adapter->vdev->unit_address, 8368decf868SDavid S. Miller set_attr6, clr_attr6, &ret_attr); 8378decf868SDavid S. Miller 8388decf868SDavid S. Miller if (data == 1) 8398decf868SDavid S. Miller dev->features &= ~NETIF_F_IPV6_CSUM; 8408decf868SDavid S. Miller 8419aa32835SJeff Kirsher } else 8429aa32835SJeff Kirsher adapter->fw_ipv6_csum_support = data; 8439aa32835SJeff Kirsher 8448decf868SDavid S. Miller if (ret4 == H_SUCCESS || ret6 == H_SUCCESS) 8459aa32835SJeff Kirsher adapter->rx_csum = data; 8469aa32835SJeff Kirsher else 8479aa32835SJeff Kirsher rc1 = -EIO; 8489aa32835SJeff Kirsher } else { 8499aa32835SJeff Kirsher rc1 = -EIO; 8509aa32835SJeff Kirsher netdev_err(dev, "unable to change checksum offload settings." 8519aa32835SJeff Kirsher " %d rc=%ld ret_attr=%lx\n", data, ret, 8529aa32835SJeff Kirsher ret_attr); 8539aa32835SJeff Kirsher } 8549aa32835SJeff Kirsher 8559aa32835SJeff Kirsher if (restart) 8569aa32835SJeff Kirsher rc2 = ibmveth_open(dev); 8579aa32835SJeff Kirsher 8589aa32835SJeff Kirsher return rc1 ? rc1 : rc2; 8599aa32835SJeff Kirsher } 8609aa32835SJeff Kirsher 86107e6a97dSThomas Falcon static int ibmveth_set_tso(struct net_device *dev, u32 data) 86207e6a97dSThomas Falcon { 86307e6a97dSThomas Falcon struct ibmveth_adapter *adapter = netdev_priv(dev); 86407e6a97dSThomas Falcon unsigned long set_attr, clr_attr, ret_attr; 86507e6a97dSThomas Falcon long ret1, ret2; 86607e6a97dSThomas Falcon int rc1 = 0, rc2 = 0; 86707e6a97dSThomas Falcon int restart = 0; 86807e6a97dSThomas Falcon 86907e6a97dSThomas Falcon if (netif_running(dev)) { 87007e6a97dSThomas Falcon restart = 1; 87107e6a97dSThomas Falcon adapter->pool_config = 1; 87207e6a97dSThomas Falcon ibmveth_close(dev); 87307e6a97dSThomas Falcon adapter->pool_config = 0; 87407e6a97dSThomas Falcon } 87507e6a97dSThomas Falcon 87607e6a97dSThomas Falcon set_attr = 0; 87707e6a97dSThomas Falcon clr_attr = 0; 87807e6a97dSThomas Falcon 87907e6a97dSThomas Falcon if (data) 88007e6a97dSThomas Falcon set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED; 88107e6a97dSThomas Falcon else 88207e6a97dSThomas Falcon clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED; 88307e6a97dSThomas Falcon 88407e6a97dSThomas Falcon ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); 88507e6a97dSThomas Falcon 88607e6a97dSThomas Falcon if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) && 88707e6a97dSThomas Falcon !old_large_send) { 88807e6a97dSThomas Falcon ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr, 88907e6a97dSThomas Falcon set_attr, &ret_attr); 89007e6a97dSThomas Falcon 89107e6a97dSThomas Falcon if (ret2 != H_SUCCESS) { 89207e6a97dSThomas Falcon netdev_err(dev, "unable to change tso settings. %d rc=%ld\n", 89307e6a97dSThomas Falcon data, ret2); 89407e6a97dSThomas Falcon 89507e6a97dSThomas Falcon h_illan_attributes(adapter->vdev->unit_address, 89607e6a97dSThomas Falcon set_attr, clr_attr, &ret_attr); 89707e6a97dSThomas Falcon 89807e6a97dSThomas Falcon if (data == 1) 89907e6a97dSThomas Falcon dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); 90007e6a97dSThomas Falcon rc1 = -EIO; 90107e6a97dSThomas Falcon 90207e6a97dSThomas Falcon } else { 90307e6a97dSThomas Falcon adapter->fw_large_send_support = data; 90407e6a97dSThomas Falcon adapter->large_send = data; 90507e6a97dSThomas Falcon } 90607e6a97dSThomas Falcon } else { 90707e6a97dSThomas Falcon /* Older firmware version of large send offload does not 90807e6a97dSThomas Falcon * support tcp6/ipv6 90907e6a97dSThomas Falcon */ 91007e6a97dSThomas Falcon if (data == 1) { 91107e6a97dSThomas Falcon dev->features &= ~NETIF_F_TSO6; 91207e6a97dSThomas Falcon netdev_info(dev, "TSO feature requires all partitions to have updated driver"); 91307e6a97dSThomas Falcon } 91407e6a97dSThomas Falcon adapter->large_send = data; 91507e6a97dSThomas Falcon } 91607e6a97dSThomas Falcon 91707e6a97dSThomas Falcon if (restart) 91807e6a97dSThomas Falcon rc2 = ibmveth_open(dev); 91907e6a97dSThomas Falcon 92007e6a97dSThomas Falcon return rc1 ? rc1 : rc2; 92107e6a97dSThomas Falcon } 92207e6a97dSThomas Falcon 923c8f44affSMichał Mirosław static int ibmveth_set_features(struct net_device *dev, 924c8f44affSMichał Mirosław netdev_features_t features) 9259aa32835SJeff Kirsher { 9269aa32835SJeff Kirsher struct ibmveth_adapter *adapter = netdev_priv(dev); 9279aa32835SJeff Kirsher int rx_csum = !!(features & NETIF_F_RXCSUM); 92807e6a97dSThomas Falcon int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6)); 92907e6a97dSThomas Falcon int rc1 = 0, rc2 = 0; 9308641dd85SThomas Falcon 93107e6a97dSThomas Falcon if (rx_csum != adapter->rx_csum) { 93207e6a97dSThomas Falcon rc1 = ibmveth_set_csum_offload(dev, rx_csum); 93307e6a97dSThomas Falcon if (rc1 && !adapter->rx_csum) 93407e6a97dSThomas Falcon dev->features = 935a188222bSTom Herbert features & ~(NETIF_F_CSUM_MASK | 936a188222bSTom Herbert NETIF_F_RXCSUM); 93707e6a97dSThomas Falcon } 9389aa32835SJeff Kirsher 93907e6a97dSThomas Falcon if (large_send != adapter->large_send) { 94007e6a97dSThomas Falcon rc2 = ibmveth_set_tso(dev, large_send); 94107e6a97dSThomas Falcon if (rc2 && !adapter->large_send) 94207e6a97dSThomas Falcon dev->features = 94307e6a97dSThomas Falcon features & ~(NETIF_F_TSO | NETIF_F_TSO6); 94407e6a97dSThomas Falcon } 9459aa32835SJeff Kirsher 94607e6a97dSThomas Falcon return rc1 ? rc1 : rc2; 9479aa32835SJeff Kirsher } 9489aa32835SJeff Kirsher 9499aa32835SJeff Kirsher static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data) 9509aa32835SJeff Kirsher { 9519aa32835SJeff Kirsher int i; 9529aa32835SJeff Kirsher 9539aa32835SJeff Kirsher if (stringset != ETH_SS_STATS) 9549aa32835SJeff Kirsher return; 9559aa32835SJeff Kirsher 9569aa32835SJeff Kirsher for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN) 9579aa32835SJeff Kirsher memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN); 9589aa32835SJeff Kirsher } 9599aa32835SJeff Kirsher 9609aa32835SJeff Kirsher static int ibmveth_get_sset_count(struct net_device *dev, int sset) 9619aa32835SJeff Kirsher { 9629aa32835SJeff Kirsher switch (sset) { 9639aa32835SJeff Kirsher case ETH_SS_STATS: 9649aa32835SJeff Kirsher return ARRAY_SIZE(ibmveth_stats); 9659aa32835SJeff Kirsher default: 9669aa32835SJeff Kirsher return -EOPNOTSUPP; 9679aa32835SJeff Kirsher } 9689aa32835SJeff Kirsher } 9699aa32835SJeff Kirsher 9709aa32835SJeff Kirsher static void ibmveth_get_ethtool_stats(struct net_device *dev, 9719aa32835SJeff Kirsher struct ethtool_stats *stats, u64 *data) 9729aa32835SJeff Kirsher { 9739aa32835SJeff Kirsher int i; 9749aa32835SJeff Kirsher struct ibmveth_adapter *adapter = netdev_priv(dev); 9759aa32835SJeff Kirsher 9769aa32835SJeff Kirsher for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++) 9779aa32835SJeff Kirsher data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset); 9789aa32835SJeff Kirsher } 9799aa32835SJeff Kirsher 9809aa32835SJeff Kirsher static const struct ethtool_ops netdev_ethtool_ops = { 9819aa32835SJeff Kirsher .get_drvinfo = netdev_get_drvinfo, 9829aa32835SJeff Kirsher .get_link = ethtool_op_get_link, 9839aa32835SJeff Kirsher .get_strings = ibmveth_get_strings, 9849aa32835SJeff Kirsher .get_sset_count = ibmveth_get_sset_count, 9859aa32835SJeff Kirsher .get_ethtool_stats = ibmveth_get_ethtool_stats, 9869ce8c2dfSPhilippe Reynes .get_link_ksettings = netdev_get_link_ksettings, 9879aa32835SJeff Kirsher }; 9889aa32835SJeff Kirsher 9899aa32835SJeff Kirsher static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 9909aa32835SJeff Kirsher { 9919aa32835SJeff Kirsher return -EOPNOTSUPP; 9929aa32835SJeff Kirsher } 9939aa32835SJeff Kirsher 9949aa32835SJeff Kirsher #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1)) 9959aa32835SJeff Kirsher 9969aa32835SJeff Kirsher static int ibmveth_send(struct ibmveth_adapter *adapter, 99707e6a97dSThomas Falcon union ibmveth_buf_desc *descs, unsigned long mss) 9989aa32835SJeff Kirsher { 9999aa32835SJeff Kirsher unsigned long correlator; 10009aa32835SJeff Kirsher unsigned int retry_count; 10019aa32835SJeff Kirsher unsigned long ret; 10029aa32835SJeff Kirsher 10039aa32835SJeff Kirsher /* 10049aa32835SJeff Kirsher * The retry count sets a maximum for the number of broadcast and 10059aa32835SJeff Kirsher * multicast destinations within the system. 10069aa32835SJeff Kirsher */ 10079aa32835SJeff Kirsher retry_count = 1024; 10089aa32835SJeff Kirsher correlator = 0; 10099aa32835SJeff Kirsher do { 10109aa32835SJeff Kirsher ret = h_send_logical_lan(adapter->vdev->unit_address, 10119aa32835SJeff Kirsher descs[0].desc, descs[1].desc, 10129aa32835SJeff Kirsher descs[2].desc, descs[3].desc, 10139aa32835SJeff Kirsher descs[4].desc, descs[5].desc, 101407e6a97dSThomas Falcon correlator, &correlator, mss, 101507e6a97dSThomas Falcon adapter->fw_large_send_support); 10169aa32835SJeff Kirsher } while ((ret == H_BUSY) && (retry_count--)); 10179aa32835SJeff Kirsher 10189aa32835SJeff Kirsher if (ret != H_SUCCESS && ret != H_DROPPED) { 10199aa32835SJeff Kirsher netdev_err(adapter->netdev, "tx: h_send_logical_lan failed " 10209aa32835SJeff Kirsher "with rc=%ld\n", ret); 10219aa32835SJeff Kirsher return 1; 10229aa32835SJeff Kirsher } 10239aa32835SJeff Kirsher 10249aa32835SJeff Kirsher return 0; 10259aa32835SJeff Kirsher } 10269aa32835SJeff Kirsher 10279aa32835SJeff Kirsher static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, 10289aa32835SJeff Kirsher struct net_device *netdev) 10299aa32835SJeff Kirsher { 10309aa32835SJeff Kirsher struct ibmveth_adapter *adapter = netdev_priv(netdev); 10319aa32835SJeff Kirsher unsigned int desc_flags; 10329aa32835SJeff Kirsher union ibmveth_buf_desc descs[6]; 10339aa32835SJeff Kirsher int last, i; 10349aa32835SJeff Kirsher int force_bounce = 0; 10358decf868SDavid S. Miller dma_addr_t dma_addr; 103607e6a97dSThomas Falcon unsigned long mss = 0; 10379aa32835SJeff Kirsher 103866aa0678SSivakumar Krishnasamy /* veth doesn't handle frag_list, so linearize the skb. 103966aa0678SSivakumar Krishnasamy * When GRO is enabled SKB's can have frag_list. 104066aa0678SSivakumar Krishnasamy */ 104166aa0678SSivakumar Krishnasamy if (adapter->is_active_trunk && 104266aa0678SSivakumar Krishnasamy skb_has_frag_list(skb) && __skb_linearize(skb)) { 104366aa0678SSivakumar Krishnasamy netdev->stats.tx_dropped++; 104466aa0678SSivakumar Krishnasamy goto out; 104566aa0678SSivakumar Krishnasamy } 104666aa0678SSivakumar Krishnasamy 10479aa32835SJeff Kirsher /* 10489aa32835SJeff Kirsher * veth handles a maximum of 6 segments including the header, so 10499aa32835SJeff Kirsher * we have to linearize the skb if there are more than this. 10509aa32835SJeff Kirsher */ 10519aa32835SJeff Kirsher if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) { 10529aa32835SJeff Kirsher netdev->stats.tx_dropped++; 10539aa32835SJeff Kirsher goto out; 10549aa32835SJeff Kirsher } 10559aa32835SJeff Kirsher 10569aa32835SJeff Kirsher /* veth can't checksum offload UDP */ 10579aa32835SJeff Kirsher if (skb->ip_summed == CHECKSUM_PARTIAL && 10589aa32835SJeff Kirsher ((skb->protocol == htons(ETH_P_IP) && 10599aa32835SJeff Kirsher ip_hdr(skb)->protocol != IPPROTO_TCP) || 10609aa32835SJeff Kirsher (skb->protocol == htons(ETH_P_IPV6) && 10619aa32835SJeff Kirsher ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) && 10629aa32835SJeff Kirsher skb_checksum_help(skb)) { 10639aa32835SJeff Kirsher 10649aa32835SJeff Kirsher netdev_err(netdev, "tx: failed to checksum packet\n"); 10659aa32835SJeff Kirsher netdev->stats.tx_dropped++; 10669aa32835SJeff Kirsher goto out; 10679aa32835SJeff Kirsher } 10689aa32835SJeff Kirsher 10699aa32835SJeff Kirsher desc_flags = IBMVETH_BUF_VALID; 10709aa32835SJeff Kirsher 10719aa32835SJeff Kirsher if (skb->ip_summed == CHECKSUM_PARTIAL) { 10729aa32835SJeff Kirsher unsigned char *buf = skb_transport_header(skb) + 10739aa32835SJeff Kirsher skb->csum_offset; 10749aa32835SJeff Kirsher 10759aa32835SJeff Kirsher desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD); 10769aa32835SJeff Kirsher 10779aa32835SJeff Kirsher /* Need to zero out the checksum */ 10789aa32835SJeff Kirsher buf[0] = 0; 10799aa32835SJeff Kirsher buf[1] = 0; 108066aa0678SSivakumar Krishnasamy 108166aa0678SSivakumar Krishnasamy if (skb_is_gso(skb) && adapter->fw_large_send_support) 108266aa0678SSivakumar Krishnasamy desc_flags |= IBMVETH_BUF_LRG_SND; 10839aa32835SJeff Kirsher } 10849aa32835SJeff Kirsher 10859aa32835SJeff Kirsher retry_bounce: 10869aa32835SJeff Kirsher memset(descs, 0, sizeof(descs)); 10879aa32835SJeff Kirsher 10889aa32835SJeff Kirsher /* 10899aa32835SJeff Kirsher * If a linear packet is below the rx threshold then 10909aa32835SJeff Kirsher * copy it into the static bounce buffer. This avoids the 10919aa32835SJeff Kirsher * cost of a TCE insert and remove. 10929aa32835SJeff Kirsher */ 10939aa32835SJeff Kirsher if (force_bounce || (!skb_is_nonlinear(skb) && 10949aa32835SJeff Kirsher (skb->len < tx_copybreak))) { 10959aa32835SJeff Kirsher skb_copy_from_linear_data(skb, adapter->bounce_buffer, 10969aa32835SJeff Kirsher skb->len); 10979aa32835SJeff Kirsher 10989aa32835SJeff Kirsher descs[0].fields.flags_len = desc_flags | skb->len; 10999aa32835SJeff Kirsher descs[0].fields.address = adapter->bounce_buffer_dma; 11009aa32835SJeff Kirsher 110107e6a97dSThomas Falcon if (ibmveth_send(adapter, descs, 0)) { 11029aa32835SJeff Kirsher adapter->tx_send_failed++; 11039aa32835SJeff Kirsher netdev->stats.tx_dropped++; 11049aa32835SJeff Kirsher } else { 11059aa32835SJeff Kirsher netdev->stats.tx_packets++; 11069aa32835SJeff Kirsher netdev->stats.tx_bytes += skb->len; 11079aa32835SJeff Kirsher } 11089aa32835SJeff Kirsher 11099aa32835SJeff Kirsher goto out; 11109aa32835SJeff Kirsher } 11119aa32835SJeff Kirsher 11129aa32835SJeff Kirsher /* Map the header */ 11138decf868SDavid S. Miller dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 11148decf868SDavid S. Miller skb_headlen(skb), DMA_TO_DEVICE); 11158decf868SDavid S. Miller if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) 11169aa32835SJeff Kirsher goto map_failed; 11179aa32835SJeff Kirsher 11189aa32835SJeff Kirsher descs[0].fields.flags_len = desc_flags | skb_headlen(skb); 11198decf868SDavid S. Miller descs[0].fields.address = dma_addr; 11209aa32835SJeff Kirsher 11219aa32835SJeff Kirsher /* Map the frags */ 11229aa32835SJeff Kirsher for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 11239e903e08SEric Dumazet const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 11249aa32835SJeff Kirsher 11258838a538SIan Campbell dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0, 11269e903e08SEric Dumazet skb_frag_size(frag), DMA_TO_DEVICE); 11279aa32835SJeff Kirsher 11289aa32835SJeff Kirsher if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) 11299aa32835SJeff Kirsher goto map_failed_frags; 11309aa32835SJeff Kirsher 11319e903e08SEric Dumazet descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag); 11329aa32835SJeff Kirsher descs[i+1].fields.address = dma_addr; 11339aa32835SJeff Kirsher } 11349aa32835SJeff Kirsher 113566aa0678SSivakumar Krishnasamy if (skb->ip_summed == CHECKSUM_PARTIAL && skb_is_gso(skb)) { 113607e6a97dSThomas Falcon if (adapter->fw_large_send_support) { 113707e6a97dSThomas Falcon mss = (unsigned long)skb_shinfo(skb)->gso_size; 113807e6a97dSThomas Falcon adapter->tx_large_packets++; 113907e6a97dSThomas Falcon } else if (!skb_is_gso_v6(skb)) { 11408641dd85SThomas Falcon /* Put -1 in the IP checksum to tell phyp it 114107e6a97dSThomas Falcon * is a largesend packet. Put the mss in 114207e6a97dSThomas Falcon * the TCP checksum. 11438641dd85SThomas Falcon */ 11448641dd85SThomas Falcon ip_hdr(skb)->check = 0xffff; 114507e6a97dSThomas Falcon tcp_hdr(skb)->check = 114607e6a97dSThomas Falcon cpu_to_be16(skb_shinfo(skb)->gso_size); 11478641dd85SThomas Falcon adapter->tx_large_packets++; 11488641dd85SThomas Falcon } 114907e6a97dSThomas Falcon } 11508641dd85SThomas Falcon 115107e6a97dSThomas Falcon if (ibmveth_send(adapter, descs, mss)) { 11529aa32835SJeff Kirsher adapter->tx_send_failed++; 11539aa32835SJeff Kirsher netdev->stats.tx_dropped++; 11549aa32835SJeff Kirsher } else { 11559aa32835SJeff Kirsher netdev->stats.tx_packets++; 11569aa32835SJeff Kirsher netdev->stats.tx_bytes += skb->len; 11579aa32835SJeff Kirsher } 11589aa32835SJeff Kirsher 11598decf868SDavid S. Miller dma_unmap_single(&adapter->vdev->dev, 11608decf868SDavid S. Miller descs[0].fields.address, 11618decf868SDavid S. Miller descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK, 11628decf868SDavid S. Miller DMA_TO_DEVICE); 11638decf868SDavid S. Miller 11648decf868SDavid S. Miller for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++) 11659aa32835SJeff Kirsher dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address, 11669aa32835SJeff Kirsher descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK, 11679aa32835SJeff Kirsher DMA_TO_DEVICE); 11689aa32835SJeff Kirsher 11699aa32835SJeff Kirsher out: 117026faa9d7SEric W. Biederman dev_consume_skb_any(skb); 11719aa32835SJeff Kirsher return NETDEV_TX_OK; 11729aa32835SJeff Kirsher 11739aa32835SJeff Kirsher map_failed_frags: 11749aa32835SJeff Kirsher last = i+1; 11759aa32835SJeff Kirsher for (i = 0; i < last; i++) 11769aa32835SJeff Kirsher dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address, 11779aa32835SJeff Kirsher descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK, 11789aa32835SJeff Kirsher DMA_TO_DEVICE); 11799aa32835SJeff Kirsher 11809aa32835SJeff Kirsher map_failed: 11819aa32835SJeff Kirsher if (!firmware_has_feature(FW_FEATURE_CMO)) 11829aa32835SJeff Kirsher netdev_err(netdev, "tx: unable to map xmit buffer\n"); 11839aa32835SJeff Kirsher adapter->tx_map_failed++; 11842c42bf4bSThomas Falcon if (skb_linearize(skb)) { 11852c42bf4bSThomas Falcon netdev->stats.tx_dropped++; 11862c42bf4bSThomas Falcon goto out; 11872c42bf4bSThomas Falcon } 11889aa32835SJeff Kirsher force_bounce = 1; 11899aa32835SJeff Kirsher goto retry_bounce; 11909aa32835SJeff Kirsher } 11919aa32835SJeff Kirsher 11927b596738SThomas Falcon static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt) 11937b596738SThomas Falcon { 119494acf164SThomas Falcon struct tcphdr *tcph; 11957b596738SThomas Falcon int offset = 0; 119694acf164SThomas Falcon int hdr_len; 11977b596738SThomas Falcon 11987b596738SThomas Falcon /* only TCP packets will be aggregated */ 11997b596738SThomas Falcon if (skb->protocol == htons(ETH_P_IP)) { 12007b596738SThomas Falcon struct iphdr *iph = (struct iphdr *)skb->data; 12017b596738SThomas Falcon 12027b596738SThomas Falcon if (iph->protocol == IPPROTO_TCP) { 12037b596738SThomas Falcon offset = iph->ihl * 4; 12047b596738SThomas Falcon skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 12057b596738SThomas Falcon } else { 12067b596738SThomas Falcon return; 12077b596738SThomas Falcon } 12087b596738SThomas Falcon } else if (skb->protocol == htons(ETH_P_IPV6)) { 12097b596738SThomas Falcon struct ipv6hdr *iph6 = (struct ipv6hdr *)skb->data; 12107b596738SThomas Falcon 12117b596738SThomas Falcon if (iph6->nexthdr == IPPROTO_TCP) { 12127b596738SThomas Falcon offset = sizeof(struct ipv6hdr); 12137b596738SThomas Falcon skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 12147b596738SThomas Falcon } else { 12157b596738SThomas Falcon return; 12167b596738SThomas Falcon } 12177b596738SThomas Falcon } else { 12187b596738SThomas Falcon return; 12197b596738SThomas Falcon } 12207b596738SThomas Falcon /* if mss is not set through Large Packet bit/mss in rx buffer, 12217b596738SThomas Falcon * expect that the mss will be written to the tcp header checksum. 12227b596738SThomas Falcon */ 122394acf164SThomas Falcon tcph = (struct tcphdr *)(skb->data + offset); 12247b596738SThomas Falcon if (lrg_pkt) { 12257b596738SThomas Falcon skb_shinfo(skb)->gso_size = mss; 12267b596738SThomas Falcon } else if (offset) { 12277b596738SThomas Falcon skb_shinfo(skb)->gso_size = ntohs(tcph->check); 12287b596738SThomas Falcon tcph->check = 0; 12297b596738SThomas Falcon } 123094acf164SThomas Falcon 123194acf164SThomas Falcon if (skb_shinfo(skb)->gso_size) { 123294acf164SThomas Falcon hdr_len = offset + tcph->doff * 4; 123394acf164SThomas Falcon skb_shinfo(skb)->gso_segs = 123494acf164SThomas Falcon DIV_ROUND_UP(skb->len - hdr_len, 123594acf164SThomas Falcon skb_shinfo(skb)->gso_size); 123694acf164SThomas Falcon } 12377b596738SThomas Falcon } 12387b596738SThomas Falcon 123966aa0678SSivakumar Krishnasamy static void ibmveth_rx_csum_helper(struct sk_buff *skb, 124066aa0678SSivakumar Krishnasamy struct ibmveth_adapter *adapter) 124166aa0678SSivakumar Krishnasamy { 124266aa0678SSivakumar Krishnasamy struct iphdr *iph = NULL; 124366aa0678SSivakumar Krishnasamy struct ipv6hdr *iph6 = NULL; 124466aa0678SSivakumar Krishnasamy __be16 skb_proto = 0; 124566aa0678SSivakumar Krishnasamy u16 iphlen = 0; 124666aa0678SSivakumar Krishnasamy u16 iph_proto = 0; 124766aa0678SSivakumar Krishnasamy u16 tcphdrlen = 0; 124866aa0678SSivakumar Krishnasamy 124966aa0678SSivakumar Krishnasamy skb_proto = be16_to_cpu(skb->protocol); 125066aa0678SSivakumar Krishnasamy 125166aa0678SSivakumar Krishnasamy if (skb_proto == ETH_P_IP) { 125266aa0678SSivakumar Krishnasamy iph = (struct iphdr *)skb->data; 125366aa0678SSivakumar Krishnasamy 125466aa0678SSivakumar Krishnasamy /* If the IP checksum is not offloaded and if the packet 125566aa0678SSivakumar Krishnasamy * is large send, the checksum must be rebuilt. 125666aa0678SSivakumar Krishnasamy */ 125766aa0678SSivakumar Krishnasamy if (iph->check == 0xffff) { 125866aa0678SSivakumar Krishnasamy iph->check = 0; 125966aa0678SSivakumar Krishnasamy iph->check = ip_fast_csum((unsigned char *)iph, 126066aa0678SSivakumar Krishnasamy iph->ihl); 126166aa0678SSivakumar Krishnasamy } 126266aa0678SSivakumar Krishnasamy 126366aa0678SSivakumar Krishnasamy iphlen = iph->ihl * 4; 126466aa0678SSivakumar Krishnasamy iph_proto = iph->protocol; 126566aa0678SSivakumar Krishnasamy } else if (skb_proto == ETH_P_IPV6) { 126666aa0678SSivakumar Krishnasamy iph6 = (struct ipv6hdr *)skb->data; 126766aa0678SSivakumar Krishnasamy iphlen = sizeof(struct ipv6hdr); 126866aa0678SSivakumar Krishnasamy iph_proto = iph6->nexthdr; 126966aa0678SSivakumar Krishnasamy } 127066aa0678SSivakumar Krishnasamy 127166aa0678SSivakumar Krishnasamy /* In OVS environment, when a flow is not cached, specifically for a 127266aa0678SSivakumar Krishnasamy * new TCP connection, the first packet information is passed up 127366aa0678SSivakumar Krishnasamy * the user space for finding a flow. During this process, OVS computes 127466aa0678SSivakumar Krishnasamy * checksum on the first packet when CHECKSUM_PARTIAL flag is set. 127566aa0678SSivakumar Krishnasamy * 127666aa0678SSivakumar Krishnasamy * Given that we zeroed out TCP checksum field in transmit path 127766aa0678SSivakumar Krishnasamy * (refer ibmveth_start_xmit routine) as we set "no checksum bit", 127866aa0678SSivakumar Krishnasamy * OVS computed checksum will be incorrect w/o TCP pseudo checksum 127966aa0678SSivakumar Krishnasamy * in the packet. This leads to OVS dropping the packet and hence 128066aa0678SSivakumar Krishnasamy * TCP retransmissions are seen. 128166aa0678SSivakumar Krishnasamy * 128266aa0678SSivakumar Krishnasamy * So, re-compute TCP pseudo header checksum. 128366aa0678SSivakumar Krishnasamy */ 128466aa0678SSivakumar Krishnasamy if (iph_proto == IPPROTO_TCP && adapter->is_active_trunk) { 128566aa0678SSivakumar Krishnasamy struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen); 128666aa0678SSivakumar Krishnasamy 128766aa0678SSivakumar Krishnasamy tcphdrlen = skb->len - iphlen; 128866aa0678SSivakumar Krishnasamy 128966aa0678SSivakumar Krishnasamy /* Recompute TCP pseudo header checksum */ 129066aa0678SSivakumar Krishnasamy if (skb_proto == ETH_P_IP) 129166aa0678SSivakumar Krishnasamy tcph->check = ~csum_tcpudp_magic(iph->saddr, 129266aa0678SSivakumar Krishnasamy iph->daddr, tcphdrlen, iph_proto, 0); 129366aa0678SSivakumar Krishnasamy else if (skb_proto == ETH_P_IPV6) 129466aa0678SSivakumar Krishnasamy tcph->check = ~csum_ipv6_magic(&iph6->saddr, 129566aa0678SSivakumar Krishnasamy &iph6->daddr, tcphdrlen, iph_proto, 0); 129666aa0678SSivakumar Krishnasamy 129766aa0678SSivakumar Krishnasamy /* Setup SKB fields for checksum offload */ 129866aa0678SSivakumar Krishnasamy skb_partial_csum_set(skb, iphlen, 129966aa0678SSivakumar Krishnasamy offsetof(struct tcphdr, check)); 130066aa0678SSivakumar Krishnasamy skb_reset_network_header(skb); 130166aa0678SSivakumar Krishnasamy } 130266aa0678SSivakumar Krishnasamy } 130366aa0678SSivakumar Krishnasamy 13049aa32835SJeff Kirsher static int ibmveth_poll(struct napi_struct *napi, int budget) 13059aa32835SJeff Kirsher { 13069aa32835SJeff Kirsher struct ibmveth_adapter *adapter = 13079aa32835SJeff Kirsher container_of(napi, struct ibmveth_adapter, napi); 13089aa32835SJeff Kirsher struct net_device *netdev = adapter->netdev; 13099aa32835SJeff Kirsher int frames_processed = 0; 13109aa32835SJeff Kirsher unsigned long lpar_rc; 13117b596738SThomas Falcon u16 mss = 0; 13129aa32835SJeff Kirsher 13139aa32835SJeff Kirsher restart_poll: 1314cb013ea1SEric W. Biederman while (frames_processed < budget) { 13159aa32835SJeff Kirsher if (!ibmveth_rxq_pending_buffer(adapter)) 13169aa32835SJeff Kirsher break; 13179aa32835SJeff Kirsher 13189aa32835SJeff Kirsher smp_rmb(); 13199aa32835SJeff Kirsher if (!ibmveth_rxq_buffer_valid(adapter)) { 13209aa32835SJeff Kirsher wmb(); /* suggested by larson1 */ 13219aa32835SJeff Kirsher adapter->rx_invalid_buffer++; 13229aa32835SJeff Kirsher netdev_dbg(netdev, "recycling invalid buffer\n"); 13239aa32835SJeff Kirsher ibmveth_rxq_recycle_buffer(adapter); 13249aa32835SJeff Kirsher } else { 13259aa32835SJeff Kirsher struct sk_buff *skb, *new_skb; 13269aa32835SJeff Kirsher int length = ibmveth_rxq_frame_length(adapter); 13279aa32835SJeff Kirsher int offset = ibmveth_rxq_frame_offset(adapter); 13289aa32835SJeff Kirsher int csum_good = ibmveth_rxq_csum_good(adapter); 13297b596738SThomas Falcon int lrg_pkt = ibmveth_rxq_large_packet(adapter); 13309aa32835SJeff Kirsher 13319aa32835SJeff Kirsher skb = ibmveth_rxq_get_buffer(adapter); 13329aa32835SJeff Kirsher 13337b596738SThomas Falcon /* if the large packet bit is set in the rx queue 13347b596738SThomas Falcon * descriptor, the mss will be written by PHYP eight 13357b596738SThomas Falcon * bytes from the start of the rx buffer, which is 13367b596738SThomas Falcon * skb->data at this stage 13377b596738SThomas Falcon */ 13387b596738SThomas Falcon if (lrg_pkt) { 13397b596738SThomas Falcon __be64 *rxmss = (__be64 *)(skb->data + 8); 13407b596738SThomas Falcon 13417b596738SThomas Falcon mss = (u16)be64_to_cpu(*rxmss); 13427b596738SThomas Falcon } 13437b596738SThomas Falcon 13449aa32835SJeff Kirsher new_skb = NULL; 13459aa32835SJeff Kirsher if (length < rx_copybreak) 13469aa32835SJeff Kirsher new_skb = netdev_alloc_skb(netdev, length); 13479aa32835SJeff Kirsher 13489aa32835SJeff Kirsher if (new_skb) { 13499aa32835SJeff Kirsher skb_copy_to_linear_data(new_skb, 13509aa32835SJeff Kirsher skb->data + offset, 13519aa32835SJeff Kirsher length); 13529aa32835SJeff Kirsher if (rx_flush) 13539aa32835SJeff Kirsher ibmveth_flush_buffer(skb->data, 13549aa32835SJeff Kirsher length + offset); 13558decf868SDavid S. Miller if (!ibmveth_rxq_recycle_buffer(adapter)) 13568decf868SDavid S. Miller kfree_skb(skb); 13579aa32835SJeff Kirsher skb = new_skb; 13589aa32835SJeff Kirsher } else { 13599aa32835SJeff Kirsher ibmveth_rxq_harvest_buffer(adapter); 13609aa32835SJeff Kirsher skb_reserve(skb, offset); 13619aa32835SJeff Kirsher } 13629aa32835SJeff Kirsher 13639aa32835SJeff Kirsher skb_put(skb, length); 13649aa32835SJeff Kirsher skb->protocol = eth_type_trans(skb, netdev); 13659aa32835SJeff Kirsher 13669c7e8bc5SThomas Falcon if (csum_good) { 13679aa32835SJeff Kirsher skb->ip_summed = CHECKSUM_UNNECESSARY; 136866aa0678SSivakumar Krishnasamy ibmveth_rx_csum_helper(skb, adapter); 13697b596738SThomas Falcon } 13707b596738SThomas Falcon 13717b596738SThomas Falcon if (length > netdev->mtu + ETH_HLEN) { 13727b596738SThomas Falcon ibmveth_rx_mss_helper(skb, mss, lrg_pkt); 13739c7e8bc5SThomas Falcon adapter->rx_large_packets++; 13749c7e8bc5SThomas Falcon } 13759aa32835SJeff Kirsher 137692ec8279SThomas Falcon napi_gro_receive(napi, skb); /* send it up */ 13779aa32835SJeff Kirsher 13789aa32835SJeff Kirsher netdev->stats.rx_packets++; 13799aa32835SJeff Kirsher netdev->stats.rx_bytes += length; 13809aa32835SJeff Kirsher frames_processed++; 13819aa32835SJeff Kirsher } 1382cb013ea1SEric W. Biederman } 13839aa32835SJeff Kirsher 13849aa32835SJeff Kirsher ibmveth_replenish_task(adapter); 13859aa32835SJeff Kirsher 13869aa32835SJeff Kirsher if (frames_processed < budget) { 13876ad20165SEric Dumazet napi_complete_done(napi, frames_processed); 13884736edc7SYongbae Park 13899aa32835SJeff Kirsher /* We think we are done - reenable interrupts, 13909aa32835SJeff Kirsher * then check once more to make sure we are done. 13919aa32835SJeff Kirsher */ 13929aa32835SJeff Kirsher lpar_rc = h_vio_signal(adapter->vdev->unit_address, 13939aa32835SJeff Kirsher VIO_IRQ_ENABLE); 13949aa32835SJeff Kirsher 13959aa32835SJeff Kirsher BUG_ON(lpar_rc != H_SUCCESS); 13969aa32835SJeff Kirsher 13979aa32835SJeff Kirsher if (ibmveth_rxq_pending_buffer(adapter) && 13989aa32835SJeff Kirsher napi_reschedule(napi)) { 13999aa32835SJeff Kirsher lpar_rc = h_vio_signal(adapter->vdev->unit_address, 14009aa32835SJeff Kirsher VIO_IRQ_DISABLE); 14019aa32835SJeff Kirsher goto restart_poll; 14029aa32835SJeff Kirsher } 14039aa32835SJeff Kirsher } 14049aa32835SJeff Kirsher 14059aa32835SJeff Kirsher return frames_processed; 14069aa32835SJeff Kirsher } 14079aa32835SJeff Kirsher 14089aa32835SJeff Kirsher static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance) 14099aa32835SJeff Kirsher { 14109aa32835SJeff Kirsher struct net_device *netdev = dev_instance; 14119aa32835SJeff Kirsher struct ibmveth_adapter *adapter = netdev_priv(netdev); 14129aa32835SJeff Kirsher unsigned long lpar_rc; 14139aa32835SJeff Kirsher 14149aa32835SJeff Kirsher if (napi_schedule_prep(&adapter->napi)) { 14159aa32835SJeff Kirsher lpar_rc = h_vio_signal(adapter->vdev->unit_address, 14169aa32835SJeff Kirsher VIO_IRQ_DISABLE); 14179aa32835SJeff Kirsher BUG_ON(lpar_rc != H_SUCCESS); 14189aa32835SJeff Kirsher __napi_schedule(&adapter->napi); 14199aa32835SJeff Kirsher } 14209aa32835SJeff Kirsher return IRQ_HANDLED; 14219aa32835SJeff Kirsher } 14229aa32835SJeff Kirsher 14239aa32835SJeff Kirsher static void ibmveth_set_multicast_list(struct net_device *netdev) 14249aa32835SJeff Kirsher { 14259aa32835SJeff Kirsher struct ibmveth_adapter *adapter = netdev_priv(netdev); 14269aa32835SJeff Kirsher unsigned long lpar_rc; 14279aa32835SJeff Kirsher 14289aa32835SJeff Kirsher if ((netdev->flags & IFF_PROMISC) || 14299aa32835SJeff Kirsher (netdev_mc_count(netdev) > adapter->mcastFilterSize)) { 14309aa32835SJeff Kirsher lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 14319aa32835SJeff Kirsher IbmVethMcastEnableRecv | 14329aa32835SJeff Kirsher IbmVethMcastDisableFiltering, 14339aa32835SJeff Kirsher 0); 14349aa32835SJeff Kirsher if (lpar_rc != H_SUCCESS) { 14359aa32835SJeff Kirsher netdev_err(netdev, "h_multicast_ctrl rc=%ld when " 14369aa32835SJeff Kirsher "entering promisc mode\n", lpar_rc); 14379aa32835SJeff Kirsher } 14389aa32835SJeff Kirsher } else { 14399aa32835SJeff Kirsher struct netdev_hw_addr *ha; 14409aa32835SJeff Kirsher /* clear the filter table & disable filtering */ 14419aa32835SJeff Kirsher lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 14429aa32835SJeff Kirsher IbmVethMcastEnableRecv | 14439aa32835SJeff Kirsher IbmVethMcastDisableFiltering | 14449aa32835SJeff Kirsher IbmVethMcastClearFilterTable, 14459aa32835SJeff Kirsher 0); 14469aa32835SJeff Kirsher if (lpar_rc != H_SUCCESS) { 14479aa32835SJeff Kirsher netdev_err(netdev, "h_multicast_ctrl rc=%ld when " 14489aa32835SJeff Kirsher "attempting to clear filter table\n", 14499aa32835SJeff Kirsher lpar_rc); 14509aa32835SJeff Kirsher } 14519aa32835SJeff Kirsher /* add the addresses to the filter table */ 14529aa32835SJeff Kirsher netdev_for_each_mc_addr(ha, netdev) { 14539aa32835SJeff Kirsher /* add the multicast address to the filter table */ 1454d746ca95SAnton Blanchard u64 mcast_addr; 1455d746ca95SAnton Blanchard mcast_addr = ibmveth_encode_mac_addr(ha->addr); 14569aa32835SJeff Kirsher lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 14579aa32835SJeff Kirsher IbmVethMcastAddFilter, 14589aa32835SJeff Kirsher mcast_addr); 14599aa32835SJeff Kirsher if (lpar_rc != H_SUCCESS) { 14609aa32835SJeff Kirsher netdev_err(netdev, "h_multicast_ctrl rc=%ld " 14619aa32835SJeff Kirsher "when adding an entry to the filter " 14629aa32835SJeff Kirsher "table\n", lpar_rc); 14639aa32835SJeff Kirsher } 14649aa32835SJeff Kirsher } 14659aa32835SJeff Kirsher 14669aa32835SJeff Kirsher /* re-enable filtering */ 14679aa32835SJeff Kirsher lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 14689aa32835SJeff Kirsher IbmVethMcastEnableFiltering, 14699aa32835SJeff Kirsher 0); 14709aa32835SJeff Kirsher if (lpar_rc != H_SUCCESS) { 14719aa32835SJeff Kirsher netdev_err(netdev, "h_multicast_ctrl rc=%ld when " 14729aa32835SJeff Kirsher "enabling filtering\n", lpar_rc); 14739aa32835SJeff Kirsher } 14749aa32835SJeff Kirsher } 14759aa32835SJeff Kirsher } 14769aa32835SJeff Kirsher 14779aa32835SJeff Kirsher static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) 14789aa32835SJeff Kirsher { 14799aa32835SJeff Kirsher struct ibmveth_adapter *adapter = netdev_priv(dev); 14809aa32835SJeff Kirsher struct vio_dev *viodev = adapter->vdev; 14819aa32835SJeff Kirsher int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; 14829aa32835SJeff Kirsher int i, rc; 14839aa32835SJeff Kirsher int need_restart = 0; 14849aa32835SJeff Kirsher 14859aa32835SJeff Kirsher for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) 14864fce1482SDavid Gibson if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) 14879aa32835SJeff Kirsher break; 14889aa32835SJeff Kirsher 14899aa32835SJeff Kirsher if (i == IBMVETH_NUM_BUFF_POOLS) 14909aa32835SJeff Kirsher return -EINVAL; 14919aa32835SJeff Kirsher 14929aa32835SJeff Kirsher /* Deactivate all the buffer pools so that the next loop can activate 14939aa32835SJeff Kirsher only the buffer pools necessary to hold the new MTU */ 14949aa32835SJeff Kirsher if (netif_running(adapter->netdev)) { 14959aa32835SJeff Kirsher need_restart = 1; 14969aa32835SJeff Kirsher adapter->pool_config = 1; 14979aa32835SJeff Kirsher ibmveth_close(adapter->netdev); 14989aa32835SJeff Kirsher adapter->pool_config = 0; 14999aa32835SJeff Kirsher } 15009aa32835SJeff Kirsher 15019aa32835SJeff Kirsher /* Look for an active buffer pool that can hold the new MTU */ 15029aa32835SJeff Kirsher for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { 15039aa32835SJeff Kirsher adapter->rx_buff_pool[i].active = 1; 15049aa32835SJeff Kirsher 15054fce1482SDavid Gibson if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) { 15069aa32835SJeff Kirsher dev->mtu = new_mtu; 15079aa32835SJeff Kirsher vio_cmo_set_dev_desired(viodev, 15089aa32835SJeff Kirsher ibmveth_get_desired_dma 15099aa32835SJeff Kirsher (viodev)); 15109aa32835SJeff Kirsher if (need_restart) { 15119aa32835SJeff Kirsher return ibmveth_open(adapter->netdev); 15129aa32835SJeff Kirsher } 15139aa32835SJeff Kirsher return 0; 15149aa32835SJeff Kirsher } 15159aa32835SJeff Kirsher } 15169aa32835SJeff Kirsher 15179aa32835SJeff Kirsher if (need_restart && (rc = ibmveth_open(adapter->netdev))) 15189aa32835SJeff Kirsher return rc; 15199aa32835SJeff Kirsher 15209aa32835SJeff Kirsher return -EINVAL; 15219aa32835SJeff Kirsher } 15229aa32835SJeff Kirsher 15239aa32835SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 15249aa32835SJeff Kirsher static void ibmveth_poll_controller(struct net_device *dev) 15259aa32835SJeff Kirsher { 15269aa32835SJeff Kirsher ibmveth_replenish_task(netdev_priv(dev)); 15279aa32835SJeff Kirsher ibmveth_interrupt(dev->irq, dev); 15289aa32835SJeff Kirsher } 15299aa32835SJeff Kirsher #endif 15309aa32835SJeff Kirsher 15319aa32835SJeff Kirsher /** 15329aa32835SJeff Kirsher * ibmveth_get_desired_dma - Calculate IO memory desired by the driver 15339aa32835SJeff Kirsher * 15349aa32835SJeff Kirsher * @vdev: struct vio_dev for the device whose desired IO mem is to be returned 15359aa32835SJeff Kirsher * 15369aa32835SJeff Kirsher * Return value: 15379aa32835SJeff Kirsher * Number of bytes of IO data the driver will need to perform well. 15389aa32835SJeff Kirsher */ 15399aa32835SJeff Kirsher static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev) 15409aa32835SJeff Kirsher { 15419aa32835SJeff Kirsher struct net_device *netdev = dev_get_drvdata(&vdev->dev); 15429aa32835SJeff Kirsher struct ibmveth_adapter *adapter; 1543d0847757SAlistair Popple struct iommu_table *tbl; 15449aa32835SJeff Kirsher unsigned long ret; 15459aa32835SJeff Kirsher int i; 15469aa32835SJeff Kirsher int rxqentries = 1; 15479aa32835SJeff Kirsher 1548d0847757SAlistair Popple tbl = get_iommu_table_base(&vdev->dev); 1549d0847757SAlistair Popple 15509aa32835SJeff Kirsher /* netdev inits at probe time along with the structures we need below*/ 15519aa32835SJeff Kirsher if (netdev == NULL) 1552d0847757SAlistair Popple return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl); 15539aa32835SJeff Kirsher 15549aa32835SJeff Kirsher adapter = netdev_priv(netdev); 15559aa32835SJeff Kirsher 15569aa32835SJeff Kirsher ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; 1557d0847757SAlistair Popple ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl); 15589aa32835SJeff Kirsher 15599aa32835SJeff Kirsher for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { 15609aa32835SJeff Kirsher /* add the size of the active receive buffers */ 15619aa32835SJeff Kirsher if (adapter->rx_buff_pool[i].active) 15629aa32835SJeff Kirsher ret += 15639aa32835SJeff Kirsher adapter->rx_buff_pool[i].size * 15649aa32835SJeff Kirsher IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i]. 1565d0847757SAlistair Popple buff_size, tbl); 15669aa32835SJeff Kirsher rxqentries += adapter->rx_buff_pool[i].size; 15679aa32835SJeff Kirsher } 15689aa32835SJeff Kirsher /* add the size of the receive queue entries */ 1569d0847757SAlistair Popple ret += IOMMU_PAGE_ALIGN( 1570d0847757SAlistair Popple rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl); 15719aa32835SJeff Kirsher 15729aa32835SJeff Kirsher return ret; 15739aa32835SJeff Kirsher } 15749aa32835SJeff Kirsher 1575c77c761fSThomas Falcon static int ibmveth_set_mac_addr(struct net_device *dev, void *p) 1576c77c761fSThomas Falcon { 1577c77c761fSThomas Falcon struct ibmveth_adapter *adapter = netdev_priv(dev); 1578c77c761fSThomas Falcon struct sockaddr *addr = p; 1579c77c761fSThomas Falcon u64 mac_address; 1580c77c761fSThomas Falcon int rc; 1581c77c761fSThomas Falcon 1582c77c761fSThomas Falcon if (!is_valid_ether_addr(addr->sa_data)) 1583c77c761fSThomas Falcon return -EADDRNOTAVAIL; 1584c77c761fSThomas Falcon 1585c77c761fSThomas Falcon mac_address = ibmveth_encode_mac_addr(addr->sa_data); 1586c77c761fSThomas Falcon rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address); 1587c77c761fSThomas Falcon if (rc) { 1588c77c761fSThomas Falcon netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc); 1589c77c761fSThomas Falcon return rc; 1590c77c761fSThomas Falcon } 1591c77c761fSThomas Falcon 1592c77c761fSThomas Falcon ether_addr_copy(dev->dev_addr, addr->sa_data); 1593c77c761fSThomas Falcon 1594c77c761fSThomas Falcon return 0; 1595c77c761fSThomas Falcon } 1596c77c761fSThomas Falcon 15979aa32835SJeff Kirsher static const struct net_device_ops ibmveth_netdev_ops = { 15989aa32835SJeff Kirsher .ndo_open = ibmveth_open, 15999aa32835SJeff Kirsher .ndo_stop = ibmveth_close, 16009aa32835SJeff Kirsher .ndo_start_xmit = ibmveth_start_xmit, 1601afc4b13dSJiri Pirko .ndo_set_rx_mode = ibmveth_set_multicast_list, 16029aa32835SJeff Kirsher .ndo_do_ioctl = ibmveth_ioctl, 16039aa32835SJeff Kirsher .ndo_change_mtu = ibmveth_change_mtu, 16049aa32835SJeff Kirsher .ndo_fix_features = ibmveth_fix_features, 16059aa32835SJeff Kirsher .ndo_set_features = ibmveth_set_features, 16069aa32835SJeff Kirsher .ndo_validate_addr = eth_validate_addr, 1607c77c761fSThomas Falcon .ndo_set_mac_address = ibmveth_set_mac_addr, 16089aa32835SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 16099aa32835SJeff Kirsher .ndo_poll_controller = ibmveth_poll_controller, 16109aa32835SJeff Kirsher #endif 16119aa32835SJeff Kirsher }; 16129aa32835SJeff Kirsher 16131dd06ae8SGreg Kroah-Hartman static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) 16149aa32835SJeff Kirsher { 161513f85203SBenjamin Herrenschmidt int rc, i, mac_len; 16169aa32835SJeff Kirsher struct net_device *netdev; 16179aa32835SJeff Kirsher struct ibmveth_adapter *adapter; 16189aa32835SJeff Kirsher unsigned char *mac_addr_p; 16199aa32835SJeff Kirsher unsigned int *mcastFilterSize_p; 162007e6a97dSThomas Falcon long ret; 162107e6a97dSThomas Falcon unsigned long ret_attr; 16229aa32835SJeff Kirsher 16239aa32835SJeff Kirsher dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n", 16249aa32835SJeff Kirsher dev->unit_address); 16259aa32835SJeff Kirsher 16269aa32835SJeff Kirsher mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR, 162713f85203SBenjamin Herrenschmidt &mac_len); 16289aa32835SJeff Kirsher if (!mac_addr_p) { 16299aa32835SJeff Kirsher dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n"); 16309aa32835SJeff Kirsher return -EINVAL; 16319aa32835SJeff Kirsher } 163213f85203SBenjamin Herrenschmidt /* Workaround for old/broken pHyp */ 163313f85203SBenjamin Herrenschmidt if (mac_len == 8) 163413f85203SBenjamin Herrenschmidt mac_addr_p += 2; 163513f85203SBenjamin Herrenschmidt else if (mac_len != 6) { 163613f85203SBenjamin Herrenschmidt dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n", 163713f85203SBenjamin Herrenschmidt mac_len); 163813f85203SBenjamin Herrenschmidt return -EINVAL; 163913f85203SBenjamin Herrenschmidt } 16409aa32835SJeff Kirsher 16419aa32835SJeff Kirsher mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev, 16429aa32835SJeff Kirsher VETH_MCAST_FILTER_SIZE, NULL); 16439aa32835SJeff Kirsher if (!mcastFilterSize_p) { 16449aa32835SJeff Kirsher dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE " 16459aa32835SJeff Kirsher "attribute\n"); 16469aa32835SJeff Kirsher return -EINVAL; 16479aa32835SJeff Kirsher } 16489aa32835SJeff Kirsher 16499aa32835SJeff Kirsher netdev = alloc_etherdev(sizeof(struct ibmveth_adapter)); 16509aa32835SJeff Kirsher 16519aa32835SJeff Kirsher if (!netdev) 16529aa32835SJeff Kirsher return -ENOMEM; 16539aa32835SJeff Kirsher 16549aa32835SJeff Kirsher adapter = netdev_priv(netdev); 16559aa32835SJeff Kirsher dev_set_drvdata(&dev->dev, netdev); 16569aa32835SJeff Kirsher 16579aa32835SJeff Kirsher adapter->vdev = dev; 16589aa32835SJeff Kirsher adapter->netdev = netdev; 16599aa32835SJeff Kirsher adapter->mcastFilterSize = *mcastFilterSize_p; 16609aa32835SJeff Kirsher adapter->pool_config = 0; 16619aa32835SJeff Kirsher 16629aa32835SJeff Kirsher netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); 16639aa32835SJeff Kirsher 16649aa32835SJeff Kirsher netdev->irq = dev->irq; 16659aa32835SJeff Kirsher netdev->netdev_ops = &ibmveth_netdev_ops; 16669aa32835SJeff Kirsher netdev->ethtool_ops = &netdev_ethtool_ops; 16679aa32835SJeff Kirsher SET_NETDEV_DEV(netdev, &dev->dev); 166823d28a85SThomas Huth netdev->hw_features = NETIF_F_SG; 166923d28a85SThomas Huth if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) { 167023d28a85SThomas Huth netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 167123d28a85SThomas Huth NETIF_F_RXCSUM; 167223d28a85SThomas Huth } 167307e6a97dSThomas Falcon 16749aa32835SJeff Kirsher netdev->features |= netdev->hw_features; 16759aa32835SJeff Kirsher 167607e6a97dSThomas Falcon ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); 167707e6a97dSThomas Falcon 167807e6a97dSThomas Falcon /* If running older firmware, TSO should not be enabled by default */ 167907e6a97dSThomas Falcon if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) && 168007e6a97dSThomas Falcon !old_large_send) { 168107e6a97dSThomas Falcon netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 168207e6a97dSThomas Falcon netdev->features |= netdev->hw_features; 168307e6a97dSThomas Falcon } else { 16848641dd85SThomas Falcon netdev->hw_features |= NETIF_F_TSO; 168507e6a97dSThomas Falcon } 16868641dd85SThomas Falcon 168766aa0678SSivakumar Krishnasamy adapter->is_active_trunk = false; 168866aa0678SSivakumar Krishnasamy if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK)) { 168966aa0678SSivakumar Krishnasamy adapter->is_active_trunk = true; 169066aa0678SSivakumar Krishnasamy netdev->hw_features |= NETIF_F_FRAGLIST; 169166aa0678SSivakumar Krishnasamy netdev->features |= NETIF_F_FRAGLIST; 169266aa0678SSivakumar Krishnasamy } 169366aa0678SSivakumar Krishnasamy 1694d894be57SJarod Wilson netdev->min_mtu = IBMVETH_MIN_MTU; 1695110447f8SStefan Richter netdev->max_mtu = ETH_MAX_MTU; 1696d894be57SJarod Wilson 1697d746ca95SAnton Blanchard memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN); 16989aa32835SJeff Kirsher 1699cd7c7ec3SThomas Falcon if (firmware_has_feature(FW_FEATURE_CMO)) 1700cd7c7ec3SThomas Falcon memcpy(pool_count, pool_count_cmo, sizeof(pool_count)); 1701cd7c7ec3SThomas Falcon 17029aa32835SJeff Kirsher for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { 17039aa32835SJeff Kirsher struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; 17049aa32835SJeff Kirsher int error; 17059aa32835SJeff Kirsher 17069aa32835SJeff Kirsher ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, 17079aa32835SJeff Kirsher pool_count[i], pool_size[i], 17089aa32835SJeff Kirsher pool_active[i]); 17099aa32835SJeff Kirsher error = kobject_init_and_add(kobj, &ktype_veth_pool, 17109aa32835SJeff Kirsher &dev->dev.kobj, "pool%d", i); 17119aa32835SJeff Kirsher if (!error) 17129aa32835SJeff Kirsher kobject_uevent(kobj, KOBJ_ADD); 17139aa32835SJeff Kirsher } 17149aa32835SJeff Kirsher 17159aa32835SJeff Kirsher netdev_dbg(netdev, "adapter @ 0x%p\n", adapter); 17169aa32835SJeff Kirsher netdev_dbg(netdev, "registering netdev...\n"); 17179aa32835SJeff Kirsher 17189aa32835SJeff Kirsher ibmveth_set_features(netdev, netdev->features); 17199aa32835SJeff Kirsher 17209aa32835SJeff Kirsher rc = register_netdev(netdev); 17219aa32835SJeff Kirsher 17229aa32835SJeff Kirsher if (rc) { 17239aa32835SJeff Kirsher netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc); 17249aa32835SJeff Kirsher free_netdev(netdev); 17259aa32835SJeff Kirsher return rc; 17269aa32835SJeff Kirsher } 17279aa32835SJeff Kirsher 17289aa32835SJeff Kirsher netdev_dbg(netdev, "registered\n"); 17299aa32835SJeff Kirsher 17309aa32835SJeff Kirsher return 0; 17319aa32835SJeff Kirsher } 17329aa32835SJeff Kirsher 1733e11787a2SBill Pemberton static int ibmveth_remove(struct vio_dev *dev) 17349aa32835SJeff Kirsher { 17359aa32835SJeff Kirsher struct net_device *netdev = dev_get_drvdata(&dev->dev); 17369aa32835SJeff Kirsher struct ibmveth_adapter *adapter = netdev_priv(netdev); 17379aa32835SJeff Kirsher int i; 17389aa32835SJeff Kirsher 17399aa32835SJeff Kirsher for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) 17409aa32835SJeff Kirsher kobject_put(&adapter->rx_buff_pool[i].kobj); 17419aa32835SJeff Kirsher 17429aa32835SJeff Kirsher unregister_netdev(netdev); 17439aa32835SJeff Kirsher 17449aa32835SJeff Kirsher free_netdev(netdev); 17459aa32835SJeff Kirsher dev_set_drvdata(&dev->dev, NULL); 17469aa32835SJeff Kirsher 17479aa32835SJeff Kirsher return 0; 17489aa32835SJeff Kirsher } 17499aa32835SJeff Kirsher 17509aa32835SJeff Kirsher static struct attribute veth_active_attr; 17519aa32835SJeff Kirsher static struct attribute veth_num_attr; 17529aa32835SJeff Kirsher static struct attribute veth_size_attr; 17539aa32835SJeff Kirsher 17549aa32835SJeff Kirsher static ssize_t veth_pool_show(struct kobject *kobj, 17559aa32835SJeff Kirsher struct attribute *attr, char *buf) 17569aa32835SJeff Kirsher { 17579aa32835SJeff Kirsher struct ibmveth_buff_pool *pool = container_of(kobj, 17589aa32835SJeff Kirsher struct ibmveth_buff_pool, 17599aa32835SJeff Kirsher kobj); 17609aa32835SJeff Kirsher 17619aa32835SJeff Kirsher if (attr == &veth_active_attr) 17629aa32835SJeff Kirsher return sprintf(buf, "%d\n", pool->active); 17639aa32835SJeff Kirsher else if (attr == &veth_num_attr) 17649aa32835SJeff Kirsher return sprintf(buf, "%d\n", pool->size); 17659aa32835SJeff Kirsher else if (attr == &veth_size_attr) 17669aa32835SJeff Kirsher return sprintf(buf, "%d\n", pool->buff_size); 17679aa32835SJeff Kirsher return 0; 17689aa32835SJeff Kirsher } 17699aa32835SJeff Kirsher 17709aa32835SJeff Kirsher static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr, 17719aa32835SJeff Kirsher const char *buf, size_t count) 17729aa32835SJeff Kirsher { 17739aa32835SJeff Kirsher struct ibmveth_buff_pool *pool = container_of(kobj, 17749aa32835SJeff Kirsher struct ibmveth_buff_pool, 17759aa32835SJeff Kirsher kobj); 17769aa32835SJeff Kirsher struct net_device *netdev = dev_get_drvdata( 17779aa32835SJeff Kirsher container_of(kobj->parent, struct device, kobj)); 17789aa32835SJeff Kirsher struct ibmveth_adapter *adapter = netdev_priv(netdev); 17799aa32835SJeff Kirsher long value = simple_strtol(buf, NULL, 10); 17809aa32835SJeff Kirsher long rc; 17819aa32835SJeff Kirsher 17829aa32835SJeff Kirsher if (attr == &veth_active_attr) { 17839aa32835SJeff Kirsher if (value && !pool->active) { 17849aa32835SJeff Kirsher if (netif_running(netdev)) { 17859aa32835SJeff Kirsher if (ibmveth_alloc_buffer_pool(pool)) { 17869aa32835SJeff Kirsher netdev_err(netdev, 17879aa32835SJeff Kirsher "unable to alloc pool\n"); 17889aa32835SJeff Kirsher return -ENOMEM; 17899aa32835SJeff Kirsher } 17909aa32835SJeff Kirsher pool->active = 1; 17919aa32835SJeff Kirsher adapter->pool_config = 1; 17929aa32835SJeff Kirsher ibmveth_close(netdev); 17939aa32835SJeff Kirsher adapter->pool_config = 0; 17949aa32835SJeff Kirsher if ((rc = ibmveth_open(netdev))) 17959aa32835SJeff Kirsher return rc; 17969aa32835SJeff Kirsher } else { 17979aa32835SJeff Kirsher pool->active = 1; 17989aa32835SJeff Kirsher } 17999aa32835SJeff Kirsher } else if (!value && pool->active) { 18009aa32835SJeff Kirsher int mtu = netdev->mtu + IBMVETH_BUFF_OH; 18019aa32835SJeff Kirsher int i; 18029aa32835SJeff Kirsher /* Make sure there is a buffer pool with buffers that 18039aa32835SJeff Kirsher can hold a packet of the size of the MTU */ 18049aa32835SJeff Kirsher for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { 18059aa32835SJeff Kirsher if (pool == &adapter->rx_buff_pool[i]) 18069aa32835SJeff Kirsher continue; 18079aa32835SJeff Kirsher if (!adapter->rx_buff_pool[i].active) 18089aa32835SJeff Kirsher continue; 18099aa32835SJeff Kirsher if (mtu <= adapter->rx_buff_pool[i].buff_size) 18109aa32835SJeff Kirsher break; 18119aa32835SJeff Kirsher } 18129aa32835SJeff Kirsher 18139aa32835SJeff Kirsher if (i == IBMVETH_NUM_BUFF_POOLS) { 18149aa32835SJeff Kirsher netdev_err(netdev, "no active pool >= MTU\n"); 18159aa32835SJeff Kirsher return -EPERM; 18169aa32835SJeff Kirsher } 18179aa32835SJeff Kirsher 18189aa32835SJeff Kirsher if (netif_running(netdev)) { 18199aa32835SJeff Kirsher adapter->pool_config = 1; 18209aa32835SJeff Kirsher ibmveth_close(netdev); 18219aa32835SJeff Kirsher pool->active = 0; 18229aa32835SJeff Kirsher adapter->pool_config = 0; 18239aa32835SJeff Kirsher if ((rc = ibmveth_open(netdev))) 18249aa32835SJeff Kirsher return rc; 18259aa32835SJeff Kirsher } 18269aa32835SJeff Kirsher pool->active = 0; 18279aa32835SJeff Kirsher } 18289aa32835SJeff Kirsher } else if (attr == &veth_num_attr) { 18299aa32835SJeff Kirsher if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) { 18309aa32835SJeff Kirsher return -EINVAL; 18319aa32835SJeff Kirsher } else { 18329aa32835SJeff Kirsher if (netif_running(netdev)) { 18339aa32835SJeff Kirsher adapter->pool_config = 1; 18349aa32835SJeff Kirsher ibmveth_close(netdev); 18359aa32835SJeff Kirsher adapter->pool_config = 0; 18369aa32835SJeff Kirsher pool->size = value; 18379aa32835SJeff Kirsher if ((rc = ibmveth_open(netdev))) 18389aa32835SJeff Kirsher return rc; 18399aa32835SJeff Kirsher } else { 18409aa32835SJeff Kirsher pool->size = value; 18419aa32835SJeff Kirsher } 18429aa32835SJeff Kirsher } 18439aa32835SJeff Kirsher } else if (attr == &veth_size_attr) { 18449aa32835SJeff Kirsher if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) { 18459aa32835SJeff Kirsher return -EINVAL; 18469aa32835SJeff Kirsher } else { 18479aa32835SJeff Kirsher if (netif_running(netdev)) { 18489aa32835SJeff Kirsher adapter->pool_config = 1; 18499aa32835SJeff Kirsher ibmveth_close(netdev); 18509aa32835SJeff Kirsher adapter->pool_config = 0; 18519aa32835SJeff Kirsher pool->buff_size = value; 18529aa32835SJeff Kirsher if ((rc = ibmveth_open(netdev))) 18539aa32835SJeff Kirsher return rc; 18549aa32835SJeff Kirsher } else { 18559aa32835SJeff Kirsher pool->buff_size = value; 18569aa32835SJeff Kirsher } 18579aa32835SJeff Kirsher } 18589aa32835SJeff Kirsher } 18599aa32835SJeff Kirsher 18609aa32835SJeff Kirsher /* kick the interrupt handler to allocate/deallocate pools */ 18619aa32835SJeff Kirsher ibmveth_interrupt(netdev->irq, netdev); 18629aa32835SJeff Kirsher return count; 18639aa32835SJeff Kirsher } 18649aa32835SJeff Kirsher 18659aa32835SJeff Kirsher 18669aa32835SJeff Kirsher #define ATTR(_name, _mode) \ 18679aa32835SJeff Kirsher struct attribute veth_##_name##_attr = { \ 18689aa32835SJeff Kirsher .name = __stringify(_name), .mode = _mode, \ 18699aa32835SJeff Kirsher }; 18709aa32835SJeff Kirsher 18719aa32835SJeff Kirsher static ATTR(active, 0644); 18729aa32835SJeff Kirsher static ATTR(num, 0644); 18739aa32835SJeff Kirsher static ATTR(size, 0644); 18749aa32835SJeff Kirsher 18759aa32835SJeff Kirsher static struct attribute *veth_pool_attrs[] = { 18769aa32835SJeff Kirsher &veth_active_attr, 18779aa32835SJeff Kirsher &veth_num_attr, 18789aa32835SJeff Kirsher &veth_size_attr, 18799aa32835SJeff Kirsher NULL, 18809aa32835SJeff Kirsher }; 18819aa32835SJeff Kirsher 18829aa32835SJeff Kirsher static const struct sysfs_ops veth_pool_ops = { 18839aa32835SJeff Kirsher .show = veth_pool_show, 18849aa32835SJeff Kirsher .store = veth_pool_store, 18859aa32835SJeff Kirsher }; 18869aa32835SJeff Kirsher 18879aa32835SJeff Kirsher static struct kobj_type ktype_veth_pool = { 18889aa32835SJeff Kirsher .release = NULL, 18899aa32835SJeff Kirsher .sysfs_ops = &veth_pool_ops, 18909aa32835SJeff Kirsher .default_attrs = veth_pool_attrs, 18919aa32835SJeff Kirsher }; 18929aa32835SJeff Kirsher 18939aa32835SJeff Kirsher static int ibmveth_resume(struct device *dev) 18949aa32835SJeff Kirsher { 18959aa32835SJeff Kirsher struct net_device *netdev = dev_get_drvdata(dev); 18969aa32835SJeff Kirsher ibmveth_interrupt(netdev->irq, netdev); 18979aa32835SJeff Kirsher return 0; 18989aa32835SJeff Kirsher } 18999aa32835SJeff Kirsher 190071450804SArvind Yadav static const struct vio_device_id ibmveth_device_table[] = { 19019aa32835SJeff Kirsher { "network", "IBM,l-lan"}, 19029aa32835SJeff Kirsher { "", "" } 19039aa32835SJeff Kirsher }; 19049aa32835SJeff Kirsher MODULE_DEVICE_TABLE(vio, ibmveth_device_table); 19059aa32835SJeff Kirsher 1906eb60a73dSArvind Yadav static const struct dev_pm_ops ibmveth_pm_ops = { 19079aa32835SJeff Kirsher .resume = ibmveth_resume 19089aa32835SJeff Kirsher }; 19099aa32835SJeff Kirsher 19109aa32835SJeff Kirsher static struct vio_driver ibmveth_driver = { 19119aa32835SJeff Kirsher .id_table = ibmveth_device_table, 19129aa32835SJeff Kirsher .probe = ibmveth_probe, 19139aa32835SJeff Kirsher .remove = ibmveth_remove, 19149aa32835SJeff Kirsher .get_desired_dma = ibmveth_get_desired_dma, 19159aa32835SJeff Kirsher .name = ibmveth_driver_name, 19169aa32835SJeff Kirsher .pm = &ibmveth_pm_ops, 19179aa32835SJeff Kirsher }; 19189aa32835SJeff Kirsher 19199aa32835SJeff Kirsher static int __init ibmveth_module_init(void) 19209aa32835SJeff Kirsher { 19219aa32835SJeff Kirsher printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name, 19229aa32835SJeff Kirsher ibmveth_driver_string, ibmveth_driver_version); 19239aa32835SJeff Kirsher 19249aa32835SJeff Kirsher return vio_register_driver(&ibmveth_driver); 19259aa32835SJeff Kirsher } 19269aa32835SJeff Kirsher 19279aa32835SJeff Kirsher static void __exit ibmveth_module_exit(void) 19289aa32835SJeff Kirsher { 19299aa32835SJeff Kirsher vio_unregister_driver(&ibmveth_driver); 19309aa32835SJeff Kirsher } 19319aa32835SJeff Kirsher 19329aa32835SJeff Kirsher module_init(ibmveth_module_init); 19339aa32835SJeff Kirsher module_exit(ibmveth_module_exit); 1934