1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/skbuff.h> 3 #include <linux/slab.h> 4 #include <linux/netdevice.h> 5 #include <net/gro_cells.h> 6 #include <net/hotdata.h> 7 8 struct gro_cell { 9 struct sk_buff_head napi_skbs; 10 struct napi_struct napi; 11 local_lock_t bh_lock; 12 }; 13 14 int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb) 15 { 16 struct net_device *dev = skb->dev; 17 bool have_bh_lock = false; 18 struct gro_cell *cell; 19 int res; 20 21 rcu_read_lock(); 22 if (unlikely(!(dev->flags & IFF_UP))) 23 goto drop; 24 25 if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev)) { 26 res = netif_rx(skb); 27 goto unlock; 28 } 29 30 local_lock_nested_bh(&gcells->cells->bh_lock); 31 have_bh_lock = true; 32 cell = this_cpu_ptr(gcells->cells); 33 34 if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(net_hotdata.max_backlog)) { 35 drop: 36 dev_core_stats_rx_dropped_inc(dev); 37 kfree_skb(skb); 38 res = NET_RX_DROP; 39 goto unlock; 40 } 41 42 __skb_queue_tail(&cell->napi_skbs, skb); 43 if (skb_queue_len(&cell->napi_skbs) == 1) 44 napi_schedule(&cell->napi); 45 46 if (have_bh_lock) 47 local_unlock_nested_bh(&gcells->cells->bh_lock); 48 49 res = NET_RX_SUCCESS; 50 51 unlock: 52 rcu_read_unlock(); 53 return res; 54 } 55 EXPORT_SYMBOL(gro_cells_receive); 56 57 /* called under BH context */ 58 static int gro_cell_poll(struct napi_struct *napi, int budget) 59 { 60 struct gro_cell *cell = container_of(napi, struct gro_cell, napi); 61 struct sk_buff *skb; 62 int work_done = 0; 63 64 __local_lock_nested_bh(&cell->bh_lock); 65 while (work_done < budget) { 66 skb = __skb_dequeue(&cell->napi_skbs); 67 if (!skb) 68 break; 69 napi_gro_receive(napi, skb); 70 work_done++; 71 } 72 73 if (work_done < budget) 74 napi_complete_done(napi, work_done); 75 __local_unlock_nested_bh(&cell->bh_lock); 76 return work_done; 77 } 78 79 int gro_cells_init(struct gro_cells *gcells, struct net_device *dev) 80 { 81 int i; 82 83 gcells->cells = alloc_percpu(struct gro_cell); 84 if (!gcells->cells) 85 return -ENOMEM; 86 87 for_each_possible_cpu(i) { 88 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); 89 90 __skb_queue_head_init(&cell->napi_skbs); 91 local_lock_init(&cell->bh_lock); 92 93 set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state); 94 95 netif_napi_add(dev, &cell->napi, gro_cell_poll); 96 napi_enable(&cell->napi); 97 } 98 return 0; 99 } 100 EXPORT_SYMBOL(gro_cells_init); 101 102 struct percpu_free_defer { 103 struct rcu_head rcu; 104 void __percpu *ptr; 105 }; 106 107 static void percpu_free_defer_callback(struct rcu_head *head) 108 { 109 struct percpu_free_defer *defer; 110 111 defer = container_of(head, struct percpu_free_defer, rcu); 112 free_percpu(defer->ptr); 113 kfree(defer); 114 } 115 116 void gro_cells_destroy(struct gro_cells *gcells) 117 { 118 struct percpu_free_defer *defer; 119 int i; 120 121 if (!gcells->cells) 122 return; 123 for_each_possible_cpu(i) { 124 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); 125 126 napi_disable(&cell->napi); 127 __netif_napi_del(&cell->napi); 128 __skb_queue_purge(&cell->napi_skbs); 129 } 130 /* We need to observe an rcu grace period before freeing ->cells, 131 * because netpoll could access dev->napi_list under rcu protection. 132 * Try hard using call_rcu() instead of synchronize_rcu(), 133 * because we might be called from cleanup_net(), and we 134 * definitely do not want to block this critical task. 135 */ 136 defer = kmalloc(sizeof(*defer), GFP_KERNEL | __GFP_NOWARN); 137 if (likely(defer)) { 138 defer->ptr = gcells->cells; 139 call_rcu(&defer->rcu, percpu_free_defer_callback); 140 } else { 141 /* We do not hold RTNL at this point, synchronize_net() 142 * would not be able to expedite this sync. 143 */ 144 synchronize_rcu_expedited(); 145 free_percpu(gcells->cells); 146 } 147 gcells->cells = NULL; 148 } 149 EXPORT_SYMBOL(gro_cells_destroy); 150