1 // SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 /* Google virtual Ethernet (gve) driver 3 * 4 * Copyright (C) 2015-2021 Google, Inc. 5 */ 6 7 #include "gve.h" 8 #include "gve_adminq.h" 9 #include "gve_utils.h" 10 11 bool gve_tx_was_added_to_block(struct gve_priv *priv, int queue_idx) 12 { 13 struct gve_notify_block *block = 14 &priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)]; 15 16 return block->tx != NULL; 17 } 18 19 void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx) 20 { 21 struct gve_notify_block *block = 22 &priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)]; 23 24 block->tx = NULL; 25 } 26 27 void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx) 28 { 29 unsigned int active_cpus = min_t(int, priv->num_ntfy_blks / 2, 30 num_online_cpus()); 31 int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx); 32 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; 33 struct gve_tx_ring *tx = &priv->tx[queue_idx]; 34 35 block->tx = tx; 36 tx->ntfy_id = ntfy_idx; 37 netif_set_xps_queue(priv->dev, get_cpu_mask(ntfy_idx % active_cpus), 38 queue_idx); 39 } 40 41 bool gve_rx_was_added_to_block(struct gve_priv *priv, int queue_idx) 42 { 43 struct gve_notify_block *block = 44 &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)]; 45 46 return block->rx != NULL; 47 } 48 49 void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx) 50 { 51 struct gve_notify_block *block = 52 &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)]; 53 54 block->rx = NULL; 55 } 56 57 void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx) 58 { 59 u32 ntfy_idx = gve_rx_idx_to_ntfy(priv, queue_idx); 60 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; 61 struct gve_rx_ring *rx = &priv->rx[queue_idx]; 62 63 block->rx = rx; 64 rx->ntfy_id = ntfy_idx; 65 } 66 67 struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, 68 struct gve_rx_slot_page_info *page_info, u16 len) 69 { 70 void *va = page_info->page_address + page_info->page_offset + 71 page_info->pad; 72 struct sk_buff *skb; 73 74 skb = napi_alloc_skb(napi, len); 75 if (unlikely(!skb)) 76 return NULL; 77 78 __skb_put(skb, len); 79 skb_copy_to_linear_data_offset(skb, 0, va, len); 80 skb->protocol = eth_type_trans(skb, dev); 81 82 return skb; 83 } 84 85 void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info) 86 { 87 page_info->pagecnt_bias--; 88 if (page_info->pagecnt_bias == 0) { 89 int pagecount = page_count(page_info->page); 90 91 /* If we have run out of bias - set it back up to INT_MAX 92 * minus the existing refs. 93 */ 94 page_info->pagecnt_bias = INT_MAX - pagecount; 95 96 /* Set pagecount back up to max. */ 97 page_ref_add(page_info->page, INT_MAX - pagecount); 98 } 99 } 100 101 void gve_add_napi(struct gve_priv *priv, int ntfy_idx, 102 int (*gve_poll)(struct napi_struct *, int)) 103 { 104 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; 105 106 netif_napi_add(priv->dev, &block->napi, gve_poll); 107 } 108 109 void gve_remove_napi(struct gve_priv *priv, int ntfy_idx) 110 { 111 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; 112 113 netif_napi_del(&block->napi); 114 } 115