1 #ifndef _NET_GRO_CELLS_H 2 #define _NET_GRO_CELLS_H 3 4 #include <linux/skbuff.h> 5 #include <linux/slab.h> 6 #include <linux/netdevice.h> 7 8 struct gro_cell { 9 struct sk_buff_head napi_skbs; 10 struct napi_struct napi; 11 }; 12 13 struct gro_cells { 14 struct gro_cell __percpu *cells; 15 }; 16 17 static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb) 18 { 19 struct gro_cell *cell; 20 struct net_device *dev = skb->dev; 21 22 if (!gcells->cells || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) { 23 netif_rx(skb); 24 return; 25 } 26 27 cell = this_cpu_ptr(gcells->cells); 28 29 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) { 30 atomic_long_inc(&dev->rx_dropped); 31 kfree_skb(skb); 32 return; 33 } 34 35 /* We run in BH context */ 36 spin_lock(&cell->napi_skbs.lock); 37 38 __skb_queue_tail(&cell->napi_skbs, skb); 39 if (skb_queue_len(&cell->napi_skbs) == 1) 40 napi_schedule(&cell->napi); 41 42 spin_unlock(&cell->napi_skbs.lock); 43 } 44 45 /* called unser BH context */ 46 static inline int gro_cell_poll(struct napi_struct *napi, int budget) 47 { 48 struct gro_cell *cell = container_of(napi, struct gro_cell, napi); 49 struct sk_buff *skb; 50 int work_done = 0; 51 52 spin_lock(&cell->napi_skbs.lock); 53 while (work_done < budget) { 54 skb = __skb_dequeue(&cell->napi_skbs); 55 if (!skb) 56 break; 57 spin_unlock(&cell->napi_skbs.lock); 58 napi_gro_receive(napi, skb); 59 work_done++; 60 spin_lock(&cell->napi_skbs.lock); 61 } 62 63 if (work_done < budget) 64 napi_complete(napi); 65 spin_unlock(&cell->napi_skbs.lock); 66 return work_done; 67 } 68 69 static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *dev) 70 { 71 int i; 72 73 gcells->cells = alloc_percpu(struct gro_cell); 74 if (!gcells->cells) 75 return -ENOMEM; 76 77 for_each_possible_cpu(i) { 78 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); 79 80 skb_queue_head_init(&cell->napi_skbs); 81 netif_napi_add(dev, &cell->napi, gro_cell_poll, 64); 82 napi_enable(&cell->napi); 83 } 84 return 0; 85 } 86 87 static inline void gro_cells_destroy(struct gro_cells *gcells) 88 { 89 int i; 90 91 if (!gcells->cells) 92 return; 93 for_each_possible_cpu(i) { 94 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); 95 netif_napi_del(&cell->napi); 96 skb_queue_purge(&cell->napi_skbs); 97 } 98 free_percpu(gcells->cells); 99 gcells->cells = NULL; 100 } 101 102 #endif 103