Lines Matching +full:pool +full:- +full:long

1 // SPDX-License-Identifier: GPL-2.0-or-later
17 #include <linux/dma-mapping.h>
42 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
81 #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
104 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off); in ibmveth_rxq_flags()
115 return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle; in ibmveth_rxq_pending_buffer()
135 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length); in ibmveth_rxq_frame_length()
150 /* setup the initial settings for a buffer pool */
151 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, in ibmveth_init_buffer_pool() argument
155 pool->size = pool_size; in ibmveth_init_buffer_pool()
156 pool->index = pool_index; in ibmveth_init_buffer_pool()
157 pool->buff_size = buff_size; in ibmveth_init_buffer_pool()
158 pool->threshold = pool_size * 7 / 8; in ibmveth_init_buffer_pool()
159 pool->active = pool_active; in ibmveth_init_buffer_pool()
162 /* allocate and setup an buffer pool - called during open */
163 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) in ibmveth_alloc_buffer_pool() argument
167 pool->free_map = kmalloc_array(pool->size, sizeof(u16), GFP_KERNEL); in ibmveth_alloc_buffer_pool()
169 if (!pool->free_map) in ibmveth_alloc_buffer_pool()
170 return -1; in ibmveth_alloc_buffer_pool()
172 pool->dma_addr = kcalloc(pool->size, sizeof(dma_addr_t), GFP_KERNEL); in ibmveth_alloc_buffer_pool()
173 if (!pool->dma_addr) { in ibmveth_alloc_buffer_pool()
174 kfree(pool->free_map); in ibmveth_alloc_buffer_pool()
175 pool->free_map = NULL; in ibmveth_alloc_buffer_pool()
176 return -1; in ibmveth_alloc_buffer_pool()
179 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL); in ibmveth_alloc_buffer_pool()
181 if (!pool->skbuff) { in ibmveth_alloc_buffer_pool()
182 kfree(pool->dma_addr); in ibmveth_alloc_buffer_pool()
183 pool->dma_addr = NULL; in ibmveth_alloc_buffer_pool()
185 kfree(pool->free_map); in ibmveth_alloc_buffer_pool()
186 pool->free_map = NULL; in ibmveth_alloc_buffer_pool()
187 return -1; in ibmveth_alloc_buffer_pool()
190 for (i = 0; i < pool->size; ++i) in ibmveth_alloc_buffer_pool()
191 pool->free_map[i] = i; in ibmveth_alloc_buffer_pool()
193 atomic_set(&pool->available, 0); in ibmveth_alloc_buffer_pool()
194 pool->producer_index = 0; in ibmveth_alloc_buffer_pool()
195 pool->consumer_index = 0; in ibmveth_alloc_buffer_pool()
200 static inline void ibmveth_flush_buffer(void *addr, unsigned long length) in ibmveth_flush_buffer()
202 unsigned long offset; in ibmveth_flush_buffer()
208 /* replenish the buffers for a pool. note that we don't need to
212 struct ibmveth_buff_pool *pool) in ibmveth_replenish_buffer_pool() argument
215 u32 remaining = pool->size - atomic_read(&pool->available); in ibmveth_replenish_buffer_pool()
217 unsigned long lpar_rc; in ibmveth_replenish_buffer_pool()
225 vdev = adapter->vdev; in ibmveth_replenish_buffer_pool()
226 dev = &vdev->dev; in ibmveth_replenish_buffer_pool()
230 batch = adapter->rx_buffers_per_hcall; in ibmveth_replenish_buffer_pool()
233 unsigned int free_index = pool->consumer_index; in ibmveth_replenish_buffer_pool()
237 index = pool->free_map[free_index]; in ibmveth_replenish_buffer_pool()
239 adapter->replenish_add_buff_failure++; in ibmveth_replenish_buffer_pool()
240 netdev_info(adapter->netdev, in ibmveth_replenish_buffer_pool()
243 schedule_work(&adapter->work); in ibmveth_replenish_buffer_pool()
247 if (!pool->skbuff[index]) { in ibmveth_replenish_buffer_pool()
250 skb = netdev_alloc_skb(adapter->netdev, in ibmveth_replenish_buffer_pool()
251 pool->buff_size); in ibmveth_replenish_buffer_pool()
253 adapter->replenish_no_mem++; in ibmveth_replenish_buffer_pool()
254 adapter->replenish_add_buff_failure++; in ibmveth_replenish_buffer_pool()
258 dma_addr = dma_map_single(dev, skb->data, in ibmveth_replenish_buffer_pool()
259 pool->buff_size, in ibmveth_replenish_buffer_pool()
263 adapter->replenish_add_buff_failure++; in ibmveth_replenish_buffer_pool()
267 pool->dma_addr[index] = dma_addr; in ibmveth_replenish_buffer_pool()
268 pool->skbuff[index] = skb; in ibmveth_replenish_buffer_pool()
270 /* re-use case */ in ibmveth_replenish_buffer_pool()
271 dma_addr = pool->dma_addr[index]; in ibmveth_replenish_buffer_pool()
277 len = adapter->netdev->mtu + IBMVETH_BUFF_OH; in ibmveth_replenish_buffer_pool()
278 len = min(pool->buff_size, len); in ibmveth_replenish_buffer_pool()
279 ibmveth_flush_buffer(pool->skbuff[index]->data, in ibmveth_replenish_buffer_pool()
284 pool->buff_size; in ibmveth_replenish_buffer_pool()
287 correlators[filled] = ((u64)pool->index << 32) | index; in ibmveth_replenish_buffer_pool()
288 *(u64 *)pool->skbuff[index]->data = correlators[filled]; in ibmveth_replenish_buffer_pool()
291 if (free_index >= pool->size) in ibmveth_replenish_buffer_pool()
300 lpar_rc = h_add_logical_lan_buffer(vdev->unit_address, in ibmveth_replenish_buffer_pool()
303 /* Multi-buffer hcall */ in ibmveth_replenish_buffer_pool()
304 lpar_rc = h_add_logical_lan_buffers(vdev->unit_address, in ibmveth_replenish_buffer_pool()
320 /* Only update pool state after hcall succeeds */ in ibmveth_replenish_buffer_pool()
322 free_index = pool->consumer_index; in ibmveth_replenish_buffer_pool()
323 pool->free_map[free_index] = IBM_VETH_INVALID_MAP; in ibmveth_replenish_buffer_pool()
325 pool->consumer_index++; in ibmveth_replenish_buffer_pool()
326 if (pool->consumer_index >= pool->size) in ibmveth_replenish_buffer_pool()
327 pool->consumer_index = 0; in ibmveth_replenish_buffer_pool()
331 adapter->replenish_add_buff_success += filled; in ibmveth_replenish_buffer_pool()
332 remaining -= filled; in ibmveth_replenish_buffer_pool()
341 dma_addr = pool->dma_addr[index]; in ibmveth_replenish_buffer_pool()
343 if (pool->skbuff[index]) { in ibmveth_replenish_buffer_pool()
347 pool->buff_size, in ibmveth_replenish_buffer_pool()
350 dev_kfree_skb_any(pool->skbuff[index]); in ibmveth_replenish_buffer_pool()
351 pool->skbuff[index] = NULL; in ibmveth_replenish_buffer_pool()
354 adapter->replenish_add_buff_failure += filled; in ibmveth_replenish_buffer_pool()
366 * with single-buffer case in ibmveth_replenish_buffer_pool()
368 netdev_info(adapter->netdev, in ibmveth_replenish_buffer_pool()
371 adapter->rx_buffers_per_hcall = 1; in ibmveth_replenish_buffer_pool()
372 netdev_info(adapter->netdev, in ibmveth_replenish_buffer_pool()
373 "Next rx replesh will fall back to single-buffer hcall\n"); in ibmveth_replenish_buffer_pool()
379 atomic_add(buffers_added, &(pool->available)); in ibmveth_replenish_buffer_pool()
389 __be64 *p = adapter->buffer_list_addr + 4096 - 8; in ibmveth_update_rx_no_buffer()
391 adapter->rx_no_buffer = be64_to_cpup(p); in ibmveth_update_rx_no_buffer()
399 adapter->replenish_task_cycles++; in ibmveth_replenish_task()
401 for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) { in ibmveth_replenish_task()
402 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i]; in ibmveth_replenish_task() local
404 if (pool->active && in ibmveth_replenish_task()
405 (atomic_read(&pool->available) < pool->threshold)) in ibmveth_replenish_task()
406 ibmveth_replenish_buffer_pool(adapter, pool); in ibmveth_replenish_task()
412 /* empty and free ana buffer pool - also used to do cleanup in error paths */
414 struct ibmveth_buff_pool *pool) in ibmveth_free_buffer_pool() argument
418 kfree(pool->free_map); in ibmveth_free_buffer_pool()
419 pool->free_map = NULL; in ibmveth_free_buffer_pool()
421 if (pool->skbuff && pool->dma_addr) { in ibmveth_free_buffer_pool()
422 for (i = 0; i < pool->size; ++i) { in ibmveth_free_buffer_pool()
423 struct sk_buff *skb = pool->skbuff[i]; in ibmveth_free_buffer_pool()
425 dma_unmap_single(&adapter->vdev->dev, in ibmveth_free_buffer_pool()
426 pool->dma_addr[i], in ibmveth_free_buffer_pool()
427 pool->buff_size, in ibmveth_free_buffer_pool()
430 pool->skbuff[i] = NULL; in ibmveth_free_buffer_pool()
435 if (pool->dma_addr) { in ibmveth_free_buffer_pool()
436 kfree(pool->dma_addr); in ibmveth_free_buffer_pool()
437 pool->dma_addr = NULL; in ibmveth_free_buffer_pool()
440 if (pool->skbuff) { in ibmveth_free_buffer_pool()
441 kfree(pool->skbuff); in ibmveth_free_buffer_pool()
442 pool->skbuff = NULL; in ibmveth_free_buffer_pool()
447 * ibmveth_remove_buffer_from_pool - remove a buffer from a pool
449 * @correlator: identifies pool and index
453 * * %0 - success
454 * * %-EINVAL - correlator maps to pool or index out of range
455 * * %-EFAULT - pool and index map to null skb
460 unsigned int pool = correlator >> 32; in ibmveth_remove_buffer_from_pool() local
465 if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) || in ibmveth_remove_buffer_from_pool()
466 WARN_ON(index >= adapter->rx_buff_pool[pool].size)) { in ibmveth_remove_buffer_from_pool()
467 schedule_work(&adapter->work); in ibmveth_remove_buffer_from_pool()
468 return -EINVAL; in ibmveth_remove_buffer_from_pool()
471 skb = adapter->rx_buff_pool[pool].skbuff[index]; in ibmveth_remove_buffer_from_pool()
473 schedule_work(&adapter->work); in ibmveth_remove_buffer_from_pool()
474 return -EFAULT; in ibmveth_remove_buffer_from_pool()
485 adapter->rx_buff_pool[pool].skbuff[index] = NULL; in ibmveth_remove_buffer_from_pool()
487 dma_unmap_single(&adapter->vdev->dev, in ibmveth_remove_buffer_from_pool()
488 adapter->rx_buff_pool[pool].dma_addr[index], in ibmveth_remove_buffer_from_pool()
489 adapter->rx_buff_pool[pool].buff_size, in ibmveth_remove_buffer_from_pool()
493 free_index = adapter->rx_buff_pool[pool].producer_index; in ibmveth_remove_buffer_from_pool()
494 adapter->rx_buff_pool[pool].producer_index++; in ibmveth_remove_buffer_from_pool()
495 if (adapter->rx_buff_pool[pool].producer_index >= in ibmveth_remove_buffer_from_pool()
496 adapter->rx_buff_pool[pool].size) in ibmveth_remove_buffer_from_pool()
497 adapter->rx_buff_pool[pool].producer_index = 0; in ibmveth_remove_buffer_from_pool()
498 adapter->rx_buff_pool[pool].free_map[free_index] = index; in ibmveth_remove_buffer_from_pool()
502 atomic_dec(&(adapter->rx_buff_pool[pool].available)); in ibmveth_remove_buffer_from_pool()
510 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator; in ibmveth_rxq_get_buffer()
511 unsigned int pool = correlator >> 32; in ibmveth_rxq_get_buffer() local
514 if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) || in ibmveth_rxq_get_buffer()
515 WARN_ON(index >= adapter->rx_buff_pool[pool].size)) { in ibmveth_rxq_get_buffer()
516 schedule_work(&adapter->work); in ibmveth_rxq_get_buffer()
520 return adapter->rx_buff_pool[pool].skbuff[index]; in ibmveth_rxq_get_buffer()
524 * ibmveth_rxq_harvest_buffer - Harvest buffer from pool
532 * * %0 - success
533 * * other - non-zero return from ibmveth_remove_buffer_from_pool
541 cor = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator; in ibmveth_rxq_harvest_buffer()
546 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { in ibmveth_rxq_harvest_buffer()
547 adapter->rx_queue.index = 0; in ibmveth_rxq_harvest_buffer()
548 adapter->rx_queue.toggle = !adapter->rx_queue.toggle; in ibmveth_rxq_harvest_buffer()
556 dma_unmap_single(&adapter->vdev->dev, adapter->tx_ltb_dma[idx], in ibmveth_free_tx_ltb()
557 adapter->tx_ltb_size, DMA_TO_DEVICE); in ibmveth_free_tx_ltb()
558 kfree(adapter->tx_ltb_ptr[idx]); in ibmveth_free_tx_ltb()
559 adapter->tx_ltb_ptr[idx] = NULL; in ibmveth_free_tx_ltb()
564 adapter->tx_ltb_ptr[idx] = kzalloc(adapter->tx_ltb_size, in ibmveth_allocate_tx_ltb()
566 if (!adapter->tx_ltb_ptr[idx]) { in ibmveth_allocate_tx_ltb()
567 netdev_err(adapter->netdev, in ibmveth_allocate_tx_ltb()
568 "unable to allocate tx long term buffer\n"); in ibmveth_allocate_tx_ltb()
569 return -ENOMEM; in ibmveth_allocate_tx_ltb()
571 adapter->tx_ltb_dma[idx] = dma_map_single(&adapter->vdev->dev, in ibmveth_allocate_tx_ltb()
572 adapter->tx_ltb_ptr[idx], in ibmveth_allocate_tx_ltb()
573 adapter->tx_ltb_size, in ibmveth_allocate_tx_ltb()
575 if (dma_mapping_error(&adapter->vdev->dev, adapter->tx_ltb_dma[idx])) { in ibmveth_allocate_tx_ltb()
576 netdev_err(adapter->netdev, in ibmveth_allocate_tx_ltb()
577 "unable to DMA map tx long term buffer\n"); in ibmveth_allocate_tx_ltb()
578 kfree(adapter->tx_ltb_ptr[idx]); in ibmveth_allocate_tx_ltb()
579 adapter->tx_ltb_ptr[idx] = NULL; in ibmveth_allocate_tx_ltb()
580 return -ENOMEM; in ibmveth_allocate_tx_ltb()
597 rc = h_register_logical_lan(adapter->vdev->unit_address, in ibmveth_register_logical_lan()
598 adapter->buffer_list_dma, rxq_desc.desc, in ibmveth_register_logical_lan()
599 adapter->filter_list_dma, mac_address); in ibmveth_register_logical_lan()
603 rc = h_free_logical_lan(adapter->vdev->unit_address); in ibmveth_register_logical_lan()
618 unsigned long lpar_rc; in ibmveth_open()
626 napi_enable(&adapter->napi); in ibmveth_open()
629 rxq_entries += adapter->rx_buff_pool[i].size; in ibmveth_open()
631 rc = -ENOMEM; in ibmveth_open()
632 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); in ibmveth_open()
633 if (!adapter->buffer_list_addr) { in ibmveth_open()
638 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); in ibmveth_open()
639 if (!adapter->filter_list_addr) { in ibmveth_open()
644 dev = &adapter->vdev->dev; in ibmveth_open()
646 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * in ibmveth_open()
648 adapter->rx_queue.queue_addr = in ibmveth_open()
649 dma_alloc_coherent(dev, adapter->rx_queue.queue_len, in ibmveth_open()
650 &adapter->rx_queue.queue_dma, GFP_KERNEL); in ibmveth_open()
651 if (!adapter->rx_queue.queue_addr) in ibmveth_open()
654 adapter->buffer_list_dma = dma_map_single(dev, in ibmveth_open()
655 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); in ibmveth_open()
656 if (dma_mapping_error(dev, adapter->buffer_list_dma)) { in ibmveth_open()
661 adapter->filter_list_dma = dma_map_single(dev, in ibmveth_open()
662 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); in ibmveth_open()
663 if (dma_mapping_error(dev, adapter->filter_list_dma)) { in ibmveth_open()
668 for (i = 0; i < netdev->real_num_tx_queues; i++) { in ibmveth_open()
673 adapter->rx_queue.index = 0; in ibmveth_open()
674 adapter->rx_queue.num_slots = rxq_entries; in ibmveth_open()
675 adapter->rx_queue.toggle = 1; in ibmveth_open()
677 mac_address = ether_addr_to_u64(netdev->dev_addr); in ibmveth_open()
680 adapter->rx_queue.queue_len; in ibmveth_open()
681 rxq_desc.fields.address = adapter->rx_queue.queue_dma; in ibmveth_open()
683 netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr); in ibmveth_open()
684 netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr); in ibmveth_open()
685 netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr); in ibmveth_open()
687 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); in ibmveth_open()
696 adapter->buffer_list_dma, in ibmveth_open()
697 adapter->filter_list_dma, in ibmveth_open()
700 rc = -ENONET; in ibmveth_open()
705 if (!adapter->rx_buff_pool[i].active) in ibmveth_open()
707 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { in ibmveth_open()
708 netdev_err(netdev, "unable to alloc pool\n"); in ibmveth_open()
709 adapter->rx_buff_pool[i].active = 0; in ibmveth_open()
710 rc = -ENOMEM; in ibmveth_open()
715 netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq); in ibmveth_open()
716 rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name, in ibmveth_open()
720 netdev->irq, rc); in ibmveth_open()
722 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); in ibmveth_open()
728 rc = -ENOMEM; in ibmveth_open()
731 ibmveth_interrupt(netdev->irq, netdev); in ibmveth_open()
740 while (--i >= 0) { in ibmveth_open()
741 if (adapter->rx_buff_pool[i].active) in ibmveth_open()
743 &adapter->rx_buff_pool[i]); in ibmveth_open()
746 dma_unmap_single(dev, adapter->filter_list_dma, 4096, in ibmveth_open()
750 while (--i >= 0) { in ibmveth_open()
755 dma_unmap_single(dev, adapter->buffer_list_dma, 4096, in ibmveth_open()
758 dma_free_coherent(dev, adapter->rx_queue.queue_len, in ibmveth_open()
759 adapter->rx_queue.queue_addr, in ibmveth_open()
760 adapter->rx_queue.queue_dma); in ibmveth_open()
762 free_page((unsigned long)adapter->filter_list_addr); in ibmveth_open()
764 free_page((unsigned long)adapter->buffer_list_addr); in ibmveth_open()
766 napi_disable(&adapter->napi); in ibmveth_open()
773 struct device *dev = &adapter->vdev->dev; in ibmveth_close()
774 long lpar_rc; in ibmveth_close()
779 napi_disable(&adapter->napi); in ibmveth_close()
783 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); in ibmveth_close()
786 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); in ibmveth_close()
794 free_irq(netdev->irq, netdev); in ibmveth_close()
798 dma_unmap_single(dev, adapter->buffer_list_dma, 4096, in ibmveth_close()
800 free_page((unsigned long)adapter->buffer_list_addr); in ibmveth_close()
802 dma_unmap_single(dev, adapter->filter_list_dma, 4096, in ibmveth_close()
804 free_page((unsigned long)adapter->filter_list_addr); in ibmveth_close()
806 dma_free_coherent(dev, adapter->rx_queue.queue_len, in ibmveth_close()
807 adapter->rx_queue.queue_addr, in ibmveth_close()
808 adapter->rx_queue.queue_dma); in ibmveth_close()
811 if (adapter->rx_buff_pool[i].active) in ibmveth_close()
813 &adapter->rx_buff_pool[i]); in ibmveth_close()
815 for (i = 0; i < netdev->real_num_tx_queues; i++) in ibmveth_close()
824 * ibmveth_reset - Handle scheduled reset work
838 struct net_device *netdev = adapter->netdev; in ibmveth_reset()
844 dev_close(adapter->netdev); in ibmveth_reset()
845 dev_open(adapter->netdev, NULL); in ibmveth_reset()
858 &adapter->speed, in ibmveth_set_link_ksettings()
859 &adapter->duplex); in ibmveth_set_link_ksettings()
867 cmd->base.speed = adapter->speed; in ibmveth_get_link_ksettings()
868 cmd->base.duplex = adapter->duplex; in ibmveth_get_link_ksettings()
869 cmd->base.port = PORT_OTHER; in ibmveth_get_link_ksettings()
878 adapter->speed = SPEED_1000; in ibmveth_init_link_settings()
879 adapter->duplex = DUPLEX_FULL; in ibmveth_init_link_settings()
885 strscpy(info->driver, ibmveth_driver_name, sizeof(info->driver)); in netdev_get_drvinfo()
886 strscpy(info->version, ibmveth_driver_version, sizeof(info->version)); in netdev_get_drvinfo()
910 unsigned long set_attr, clr_attr, ret_attr; in ibmveth_set_csum_offload()
911 unsigned long set_attr6, clr_attr6; in ibmveth_set_csum_offload()
912 long ret, ret4, ret6; in ibmveth_set_csum_offload()
934 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); in ibmveth_set_csum_offload()
938 ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr, in ibmveth_set_csum_offload()
946 h_illan_attributes(adapter->vdev->unit_address, in ibmveth_set_csum_offload()
950 dev->features &= ~NETIF_F_IP_CSUM; in ibmveth_set_csum_offload()
953 adapter->fw_ipv4_csum_support = data; in ibmveth_set_csum_offload()
956 ret6 = h_illan_attributes(adapter->vdev->unit_address, in ibmveth_set_csum_offload()
964 h_illan_attributes(adapter->vdev->unit_address, in ibmveth_set_csum_offload()
968 dev->features &= ~NETIF_F_IPV6_CSUM; in ibmveth_set_csum_offload()
971 adapter->fw_ipv6_csum_support = data; in ibmveth_set_csum_offload()
974 adapter->rx_csum = data; in ibmveth_set_csum_offload()
976 rc1 = -EIO; in ibmveth_set_csum_offload()
978 rc1 = -EIO; in ibmveth_set_csum_offload()
993 unsigned long set_attr, clr_attr, ret_attr; in ibmveth_set_tso()
994 long ret1, ret2; in ibmveth_set_tso()
1011 ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); in ibmveth_set_tso()
1015 ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr, in ibmveth_set_tso()
1022 h_illan_attributes(adapter->vdev->unit_address, in ibmveth_set_tso()
1026 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); in ibmveth_set_tso()
1027 rc1 = -EIO; in ibmveth_set_tso()
1030 adapter->fw_large_send_support = data; in ibmveth_set_tso()
1031 adapter->large_send = data; in ibmveth_set_tso()
1038 dev->features &= ~NETIF_F_TSO6; in ibmveth_set_tso()
1041 adapter->large_send = data; in ibmveth_set_tso()
1058 if (rx_csum != adapter->rx_csum) { in ibmveth_set_features()
1060 if (rc1 && !adapter->rx_csum) in ibmveth_set_features()
1061 dev->features = in ibmveth_set_features()
1066 if (large_send != adapter->large_send) { in ibmveth_set_features()
1068 if (rc2 && !adapter->large_send) in ibmveth_set_features()
1069 dev->features = in ibmveth_set_features()
1093 return -EOPNOTSUPP; in ibmveth_get_sset_count()
1110 channels->max_tx = ibmveth_real_max_tx_queues(); in ibmveth_get_channels()
1111 channels->tx_count = netdev->real_num_tx_queues; in ibmveth_get_channels()
1113 channels->max_rx = netdev->real_num_rx_queues; in ibmveth_get_channels()
1114 channels->rx_count = netdev->real_num_rx_queues; in ibmveth_get_channels()
1121 unsigned int old = netdev->real_num_tx_queues, in ibmveth_set_channels()
1122 goal = channels->tx_count; in ibmveth_set_channels()
1128 if (!(netdev->flags & IFF_UP)) in ibmveth_set_channels()
1138 if (adapter->tx_ltb_ptr[i]) in ibmveth_set_channels()
1160 for (i = old; i > goal; i--) { in ibmveth_set_channels()
1161 if (adapter->tx_ltb_ptr[i - 1]) in ibmveth_set_channels()
1162 ibmveth_free_tx_ltb(adapter, i - 1); in ibmveth_set_channels()
1184 return -EOPNOTSUPP; in ibmveth_ioctl()
1188 unsigned long desc, unsigned long mss) in ibmveth_send()
1190 unsigned long correlator; in ibmveth_send()
1192 unsigned long ret; in ibmveth_send()
1201 ret = h_send_logical_lan(adapter->vdev->unit_address, desc, in ibmveth_send()
1203 adapter->fw_large_send_support); in ibmveth_send()
1204 } while ((ret == H_BUSY) && (retry_count--)); in ibmveth_send()
1207 netdev_err(adapter->netdev, "tx: h_send_logical_lan failed " in ibmveth_send()
1223 if (ether_addr_equal(ether_header->h_dest, netdev->dev_addr)) { in ibmveth_is_packet_unsupported()
1225 netdev->stats.tx_dropped++; in ibmveth_is_packet_unsupported()
1226 ret = -EOPNOTSUPP; in ibmveth_is_packet_unsupported()
1239 unsigned long mss = 0; in ibmveth_start_xmit()
1244 if (skb->ip_summed == CHECKSUM_PARTIAL && in ibmveth_start_xmit()
1245 ((skb->protocol == htons(ETH_P_IP) && in ibmveth_start_xmit()
1246 ip_hdr(skb)->protocol != IPPROTO_TCP) || in ibmveth_start_xmit()
1247 (skb->protocol == htons(ETH_P_IPV6) && in ibmveth_start_xmit()
1248 ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) && in ibmveth_start_xmit()
1252 netdev->stats.tx_dropped++; in ibmveth_start_xmit()
1258 if (skb->ip_summed == CHECKSUM_PARTIAL) { in ibmveth_start_xmit()
1260 skb->csum_offset; in ibmveth_start_xmit()
1268 if (skb_is_gso(skb) && adapter->fw_large_send_support) in ibmveth_start_xmit()
1272 if (skb->ip_summed == CHECKSUM_PARTIAL && skb_is_gso(skb)) { in ibmveth_start_xmit()
1273 if (adapter->fw_large_send_support) { in ibmveth_start_xmit()
1274 mss = (unsigned long)skb_shinfo(skb)->gso_size; in ibmveth_start_xmit()
1275 adapter->tx_large_packets++; in ibmveth_start_xmit()
1277 /* Put -1 in the IP checksum to tell phyp it in ibmveth_start_xmit()
1281 ip_hdr(skb)->check = 0xffff; in ibmveth_start_xmit()
1282 tcp_hdr(skb)->check = in ibmveth_start_xmit()
1283 cpu_to_be16(skb_shinfo(skb)->gso_size); in ibmveth_start_xmit()
1284 adapter->tx_large_packets++; in ibmveth_start_xmit()
1289 if (unlikely(skb->len > adapter->tx_ltb_size)) { in ibmveth_start_xmit()
1290 netdev_err(adapter->netdev, "tx: packet size (%u) exceeds ltb (%u)\n", in ibmveth_start_xmit()
1291 skb->len, adapter->tx_ltb_size); in ibmveth_start_xmit()
1292 netdev->stats.tx_dropped++; in ibmveth_start_xmit()
1295 memcpy(adapter->tx_ltb_ptr[queue_num], skb->data, skb_headlen(skb)); in ibmveth_start_xmit()
1298 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in ibmveth_start_xmit()
1299 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in ibmveth_start_xmit()
1301 memcpy(adapter->tx_ltb_ptr[queue_num] + total_bytes, in ibmveth_start_xmit()
1306 if (unlikely(total_bytes != skb->len)) { in ibmveth_start_xmit()
1307 netdev_err(adapter->netdev, "tx: incorrect packet len copied into ltb (%u != %u)\n", in ibmveth_start_xmit()
1308 skb->len, total_bytes); in ibmveth_start_xmit()
1309 netdev->stats.tx_dropped++; in ibmveth_start_xmit()
1312 desc.fields.flags_len = desc_flags | skb->len; in ibmveth_start_xmit()
1313 desc.fields.address = adapter->tx_ltb_dma[queue_num]; in ibmveth_start_xmit()
1318 adapter->tx_send_failed++; in ibmveth_start_xmit()
1319 netdev->stats.tx_dropped++; in ibmveth_start_xmit()
1321 netdev->stats.tx_packets++; in ibmveth_start_xmit()
1322 netdev->stats.tx_bytes += skb->len; in ibmveth_start_xmit()
1339 if (skb->protocol == htons(ETH_P_IP)) { in ibmveth_rx_mss_helper()
1340 struct iphdr *iph = (struct iphdr *)skb->data; in ibmveth_rx_mss_helper()
1342 if (iph->protocol == IPPROTO_TCP) { in ibmveth_rx_mss_helper()
1343 offset = iph->ihl * 4; in ibmveth_rx_mss_helper()
1344 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in ibmveth_rx_mss_helper()
1348 } else if (skb->protocol == htons(ETH_P_IPV6)) { in ibmveth_rx_mss_helper()
1349 struct ipv6hdr *iph6 = (struct ipv6hdr *)skb->data; in ibmveth_rx_mss_helper()
1351 if (iph6->nexthdr == IPPROTO_TCP) { in ibmveth_rx_mss_helper()
1353 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; in ibmveth_rx_mss_helper()
1363 tcph = (struct tcphdr *)(skb->data + offset); in ibmveth_rx_mss_helper()
1365 skb_shinfo(skb)->gso_size = mss; in ibmveth_rx_mss_helper()
1367 skb_shinfo(skb)->gso_size = ntohs(tcph->check); in ibmveth_rx_mss_helper()
1368 tcph->check = 0; in ibmveth_rx_mss_helper()
1371 if (skb_shinfo(skb)->gso_size) { in ibmveth_rx_mss_helper()
1372 hdr_len = offset + tcph->doff * 4; in ibmveth_rx_mss_helper()
1373 skb_shinfo(skb)->gso_segs = in ibmveth_rx_mss_helper()
1374 DIV_ROUND_UP(skb->len - hdr_len, in ibmveth_rx_mss_helper()
1375 skb_shinfo(skb)->gso_size); in ibmveth_rx_mss_helper()
1389 skb_proto = be16_to_cpu(skb->protocol); in ibmveth_rx_csum_helper()
1392 iph = (struct iphdr *)skb->data; in ibmveth_rx_csum_helper()
1397 if (iph->check == 0xffff) { in ibmveth_rx_csum_helper()
1398 iph->check = 0; in ibmveth_rx_csum_helper()
1399 iph->check = ip_fast_csum((unsigned char *)iph, in ibmveth_rx_csum_helper()
1400 iph->ihl); in ibmveth_rx_csum_helper()
1403 iphlen = iph->ihl * 4; in ibmveth_rx_csum_helper()
1404 iph_proto = iph->protocol; in ibmveth_rx_csum_helper()
1406 iph6 = (struct ipv6hdr *)skb->data; in ibmveth_rx_csum_helper()
1408 iph_proto = iph6->nexthdr; in ibmveth_rx_csum_helper()
1423 * So, re-compute TCP pseudo header checksum. in ibmveth_rx_csum_helper()
1427 struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen); in ibmveth_rx_csum_helper()
1429 if (tcph->check == 0x0000) { in ibmveth_rx_csum_helper()
1431 tcphdrlen = skb->len - iphlen; in ibmveth_rx_csum_helper()
1433 tcph->check = in ibmveth_rx_csum_helper()
1434 ~csum_tcpudp_magic(iph->saddr, in ibmveth_rx_csum_helper()
1435 iph->daddr, tcphdrlen, iph_proto, 0); in ibmveth_rx_csum_helper()
1437 tcph->check = in ibmveth_rx_csum_helper()
1438 ~csum_ipv6_magic(&iph6->saddr, in ibmveth_rx_csum_helper()
1439 &iph6->daddr, tcphdrlen, iph_proto, 0); in ibmveth_rx_csum_helper()
1452 struct net_device *netdev = adapter->netdev; in ibmveth_poll()
1454 unsigned long lpar_rc; in ibmveth_poll()
1465 adapter->rx_invalid_buffer++; in ibmveth_poll()
1484 * skb->data at this stage in ibmveth_poll()
1487 __be64 *rxmss = (__be64 *)(skb->data + 8); in ibmveth_poll()
1498 skb->data + offset, in ibmveth_poll()
1501 ibmveth_flush_buffer(skb->data, in ibmveth_poll()
1513 skb->protocol = eth_type_trans(skb, netdev); in ibmveth_poll()
1515 /* PHYP without PLSO support places a -1 in the ip in ibmveth_poll()
1518 if (skb->protocol == cpu_to_be16(ETH_P_IP)) { in ibmveth_poll()
1519 struct iphdr *iph = (struct iphdr *)skb->data; in ibmveth_poll()
1521 iph_check = iph->check; in ibmveth_poll()
1524 if ((length > netdev->mtu + ETH_HLEN) || in ibmveth_poll()
1527 adapter->rx_large_packets++; in ibmveth_poll()
1531 skb->ip_summed = CHECKSUM_UNNECESSARY; in ibmveth_poll()
1537 netdev->stats.rx_packets++; in ibmveth_poll()
1538 netdev->stats.rx_bytes += length; in ibmveth_poll()
1551 /* We think we are done - reenable interrupts, in ibmveth_poll()
1554 lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE); in ibmveth_poll()
1556 schedule_work(&adapter->work); in ibmveth_poll()
1561 lpar_rc = h_vio_signal(adapter->vdev->unit_address, in ibmveth_poll()
1574 unsigned long lpar_rc; in ibmveth_interrupt()
1576 if (napi_schedule_prep(&adapter->napi)) { in ibmveth_interrupt()
1577 lpar_rc = h_vio_signal(adapter->vdev->unit_address, in ibmveth_interrupt()
1580 __napi_schedule(&adapter->napi); in ibmveth_interrupt()
1588 unsigned long lpar_rc; in ibmveth_set_multicast_list()
1590 if ((netdev->flags & IFF_PROMISC) || in ibmveth_set_multicast_list()
1591 (netdev_mc_count(netdev) > adapter->mcastFilterSize)) { in ibmveth_set_multicast_list()
1592 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, in ibmveth_set_multicast_list()
1603 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, in ibmveth_set_multicast_list()
1617 mcast_addr = ether_addr_to_u64(ha->addr); in ibmveth_set_multicast_list()
1618 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, in ibmveth_set_multicast_list()
1628 /* re-enable filtering */ in ibmveth_set_multicast_list()
1629 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, in ibmveth_set_multicast_list()
1642 struct vio_dev *viodev = adapter->vdev; in ibmveth_change_mtu()
1648 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) in ibmveth_change_mtu()
1652 return -EINVAL; in ibmveth_change_mtu()
1656 if (netif_running(adapter->netdev)) { in ibmveth_change_mtu()
1658 ibmveth_close(adapter->netdev); in ibmveth_change_mtu()
1661 /* Look for an active buffer pool that can hold the new MTU */ in ibmveth_change_mtu()
1663 adapter->rx_buff_pool[i].active = 1; in ibmveth_change_mtu()
1665 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) { in ibmveth_change_mtu()
1666 WRITE_ONCE(dev->mtu, new_mtu); in ibmveth_change_mtu()
1671 return ibmveth_open(adapter->netdev); in ibmveth_change_mtu()
1677 if (need_restart && (rc = ibmveth_open(adapter->netdev))) in ibmveth_change_mtu()
1680 return -EINVAL; in ibmveth_change_mtu()
1687 ibmveth_interrupt(dev->irq, dev); in ibmveth_poll_controller()
1692 * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1699 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev) in ibmveth_get_desired_dma()
1701 struct net_device *netdev = dev_get_drvdata(&vdev->dev); in ibmveth_get_desired_dma()
1704 unsigned long ret; in ibmveth_get_desired_dma()
1708 tbl = get_iommu_table_base(&vdev->dev); in ibmveth_get_desired_dma()
1717 ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl); in ibmveth_get_desired_dma()
1723 if (adapter->rx_buff_pool[i].active) in ibmveth_get_desired_dma()
1725 adapter->rx_buff_pool[i].size * in ibmveth_get_desired_dma()
1726 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i]. in ibmveth_get_desired_dma()
1728 rxqentries += adapter->rx_buff_pool[i].size; in ibmveth_get_desired_dma()
1744 if (!is_valid_ether_addr(addr->sa_data)) in ibmveth_set_mac_addr()
1745 return -EADDRNOTAVAIL; in ibmveth_set_mac_addr()
1747 mac_address = ether_addr_to_u64(addr->sa_data); in ibmveth_set_mac_addr()
1748 rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address); in ibmveth_set_mac_addr()
1750 netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc); in ibmveth_set_mac_addr()
1754 eth_hw_addr_set(dev, addr->sa_data); in ibmveth_set_mac_addr()
1782 long ret; in ibmveth_probe()
1783 unsigned long ret_attr; in ibmveth_probe()
1785 dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n", in ibmveth_probe()
1786 dev->unit_address); in ibmveth_probe()
1791 dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n"); in ibmveth_probe()
1792 return -EINVAL; in ibmveth_probe()
1798 dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n", in ibmveth_probe()
1800 return -EINVAL; in ibmveth_probe()
1807 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE " in ibmveth_probe()
1809 return -EINVAL; in ibmveth_probe()
1814 return -ENOMEM; in ibmveth_probe()
1817 dev_set_drvdata(&dev->dev, netdev); in ibmveth_probe()
1819 adapter->vdev = dev; in ibmveth_probe()
1820 adapter->netdev = netdev; in ibmveth_probe()
1821 INIT_WORK(&adapter->work, ibmveth_reset); in ibmveth_probe()
1822 adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p); in ibmveth_probe()
1825 netif_napi_add_weight(netdev, &adapter->napi, ibmveth_poll, 16); in ibmveth_probe()
1827 netdev->irq = dev->irq; in ibmveth_probe()
1828 netdev->netdev_ops = &ibmveth_netdev_ops; in ibmveth_probe()
1829 netdev->ethtool_ops = &netdev_ethtool_ops; in ibmveth_probe()
1830 SET_NETDEV_DEV(netdev, &dev->dev); in ibmveth_probe()
1831 netdev->hw_features = NETIF_F_SG; in ibmveth_probe()
1832 if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) { in ibmveth_probe()
1833 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | in ibmveth_probe()
1837 netdev->features |= netdev->hw_features; in ibmveth_probe()
1839 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); in ibmveth_probe()
1844 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; in ibmveth_probe()
1845 netdev->features |= netdev->hw_features; in ibmveth_probe()
1847 netdev->hw_features |= NETIF_F_TSO; in ibmveth_probe()
1850 adapter->is_active_trunk = false; in ibmveth_probe()
1852 adapter->is_active_trunk = true; in ibmveth_probe()
1853 netdev->hw_features |= NETIF_F_FRAGLIST; in ibmveth_probe()
1854 netdev->features |= NETIF_F_FRAGLIST; in ibmveth_probe()
1859 adapter->rx_buffers_per_hcall = IBMVETH_MAX_RX_PER_HCALL; in ibmveth_probe()
1861 "RX Multi-buffer hcall supported by FW, batch set to %u\n", in ibmveth_probe()
1862 adapter->rx_buffers_per_hcall); in ibmveth_probe()
1864 adapter->rx_buffers_per_hcall = 1; in ibmveth_probe()
1866 "RX Single-buffer hcall mode, batch set to %u\n", in ibmveth_probe()
1867 adapter->rx_buffers_per_hcall); in ibmveth_probe()
1870 netdev->min_mtu = IBMVETH_MIN_MTU; in ibmveth_probe()
1871 netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH; in ibmveth_probe()
1879 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; in ibmveth_probe()
1882 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, in ibmveth_probe()
1886 &dev->dev.kobj, "pool%d", i); in ibmveth_probe()
1899 adapter->tx_ltb_size = PAGE_ALIGN(IBMVETH_MAX_TX_BUF_SIZE); in ibmveth_probe()
1901 adapter->tx_ltb_ptr[i] = NULL; in ibmveth_probe()
1906 ibmveth_set_features(netdev, netdev->features); in ibmveth_probe()
1923 struct net_device *netdev = dev_get_drvdata(&dev->dev); in ibmveth_remove()
1927 cancel_work_sync(&adapter->work); in ibmveth_remove()
1930 kobject_put(&adapter->rx_buff_pool[i].kobj); in ibmveth_remove()
1935 dev_set_drvdata(&dev->dev, NULL); in ibmveth_remove()
1945 struct ibmveth_buff_pool *pool = container_of(kobj, in veth_pool_show() local
1950 return sprintf(buf, "%d\n", pool->active); in veth_pool_show()
1952 return sprintf(buf, "%d\n", pool->size); in veth_pool_show()
1954 return sprintf(buf, "%d\n", pool->buff_size); in veth_pool_show()
1959 * veth_pool_store - sysfs store handler for pool attributes
1960 * @kobj: kobject embedded in pool
1965 * Stores new value in pool attribute. Verifies the range of the new value for
1966 * size and buff_size. Verifies that at least one pool remains available to
1967 * receive MTU-sized packets.
1973 * * %-EPERM - Not allowed to disabled all MTU-sized buffer pools
1974 * * %-EINVAL - New pool size or buffer size is out of range
1975 * * count - Return count for success
1976 * * other - Return value from a failed ibmveth_open call
1981 struct ibmveth_buff_pool *pool = container_of(kobj, in veth_pool_store() local
1984 struct net_device *netdev = dev_get_drvdata(kobj_to_dev(kobj->parent)); in veth_pool_store()
1986 long value = simple_strtol(buf, NULL, 10); in veth_pool_store()
1994 long rc; in veth_pool_store()
1998 oldbuff_size = pool->buff_size; in veth_pool_store()
1999 oldactive = pool->active; in veth_pool_store()
2000 oldsize = pool->size; in veth_pool_store()
2011 int mtu = netdev->mtu + IBMVETH_BUFF_OH; in veth_pool_store()
2013 /* Make sure there is a buffer pool with buffers that in veth_pool_store()
2016 if (pool == &adapter->rx_buff_pool[i]) in veth_pool_store()
2018 if (!adapter->rx_buff_pool[i].active) in veth_pool_store()
2020 if (mtu <= adapter->rx_buff_pool[i].buff_size) in veth_pool_store()
2025 netdev_err(netdev, "no active pool >= MTU\n"); in veth_pool_store()
2026 rc = -EPERM; in veth_pool_store()
2035 rc = -EINVAL; in veth_pool_store()
2044 rc = -EINVAL; in veth_pool_store()
2057 pool->active = newactive; in veth_pool_store()
2058 pool->buff_size = newbuff_size; in veth_pool_store()
2059 pool->size = newsize; in veth_pool_store()
2064 pool->active = oldactive; in veth_pool_store()
2065 pool->buff_size = oldbuff_size; in veth_pool_store()
2066 pool->size = oldsize; in veth_pool_store()
2074 ibmveth_interrupt(netdev->irq, netdev); in veth_pool_store()
2114 ibmveth_interrupt(netdev->irq, netdev); in ibmveth_resume()
2119 { "network", "IBM,l-lan"},
2157 * ibmveth_reset_kunit - reset routine for running in KUnit environment
2172 * ibmveth_remove_buffer_from_pool_test - unit test for some of
2185 struct ibmveth_buff_pool *pool; in ibmveth_remove_buffer_from_pool_test() local
2190 INIT_WORK(&adapter->work, ibmveth_reset_kunit); in ibmveth_remove_buffer_from_pool_test()
2194 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, in ibmveth_remove_buffer_from_pool_test()
2198 pool = &adapter->rx_buff_pool[0]; in ibmveth_remove_buffer_from_pool_test()
2199 pool->skbuff = kunit_kcalloc(test, pool->size, sizeof(void *), GFP_KERNEL); in ibmveth_remove_buffer_from_pool_test()
2200 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pool->skbuff); in ibmveth_remove_buffer_from_pool_test()
2203 KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, false)); in ibmveth_remove_buffer_from_pool_test()
2204 KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, true)); in ibmveth_remove_buffer_from_pool_test()
2206 correlator = ((u64)0 << 32) | adapter->rx_buff_pool[0].size; in ibmveth_remove_buffer_from_pool_test()
2207 KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, false)); in ibmveth_remove_buffer_from_pool_test()
2208 KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, true)); in ibmveth_remove_buffer_from_pool_test()
2211 pool->skbuff[0] = NULL; in ibmveth_remove_buffer_from_pool_test()
2212 KUNIT_EXPECT_EQ(test, -EFAULT, ibmveth_remove_buffer_from_pool(adapter, correlator, false)); in ibmveth_remove_buffer_from_pool_test()
2213 KUNIT_EXPECT_EQ(test, -EFAULT, ibmveth_remove_buffer_from_pool(adapter, correlator, true)); in ibmveth_remove_buffer_from_pool_test()
2215 flush_work(&adapter->work); in ibmveth_remove_buffer_from_pool_test()
2219 * ibmveth_rxq_get_buffer_test - unit test for ibmveth_rxq_get_buffer
2232 struct ibmveth_buff_pool *pool; in ibmveth_rxq_get_buffer_test() local
2237 INIT_WORK(&adapter->work, ibmveth_reset_kunit); in ibmveth_rxq_get_buffer_test()
2239 adapter->rx_queue.queue_len = 1; in ibmveth_rxq_get_buffer_test()
2240 adapter->rx_queue.index = 0; in ibmveth_rxq_get_buffer_test()
2241 adapter->rx_queue.queue_addr = kunit_kzalloc(test, sizeof(struct ibmveth_rx_q_entry), in ibmveth_rxq_get_buffer_test()
2243 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter->rx_queue.queue_addr); in ibmveth_rxq_get_buffer_test()
2247 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, in ibmveth_rxq_get_buffer_test()
2251 pool = &adapter->rx_buff_pool[0]; in ibmveth_rxq_get_buffer_test()
2252 pool->skbuff = kunit_kcalloc(test, pool->size, sizeof(void *), GFP_KERNEL); in ibmveth_rxq_get_buffer_test()
2253 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pool->skbuff); in ibmveth_rxq_get_buffer_test()
2255 adapter->rx_queue.queue_addr[0].correlator = (u64)IBMVETH_NUM_BUFF_POOLS << 32 | 0; in ibmveth_rxq_get_buffer_test()
2258 adapter->rx_queue.queue_addr[0].correlator = (u64)0 << 32 | adapter->rx_buff_pool[0].size; in ibmveth_rxq_get_buffer_test()
2261 pool->skbuff[0] = skb; in ibmveth_rxq_get_buffer_test()
2262 adapter->rx_queue.queue_addr[0].correlator = (u64)0 << 32 | 0; in ibmveth_rxq_get_buffer_test()
2265 flush_work(&adapter->work); in ibmveth_rxq_get_buffer_test()
2275 .name = "ibmveth-kunit-test",