xref: /linux/net/core/gro_cells.c (revision 79790b6818e96c58fe2bffee1b418c16e64e7b80)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/skbuff.h>
3 #include <linux/slab.h>
4 #include <linux/netdevice.h>
5 #include <net/gro_cells.h>
6 #include <net/hotdata.h>
7 
8 struct gro_cell {
9 	struct sk_buff_head	napi_skbs;
10 	struct napi_struct	napi;
11 };
12 
gro_cells_receive(struct gro_cells * gcells,struct sk_buff * skb)13 int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
14 {
15 	struct net_device *dev = skb->dev;
16 	struct gro_cell *cell;
17 	int res;
18 
19 	rcu_read_lock();
20 	if (unlikely(!(dev->flags & IFF_UP)))
21 		goto drop;
22 
23 	if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev)) {
24 		res = netif_rx(skb);
25 		goto unlock;
26 	}
27 
28 	cell = this_cpu_ptr(gcells->cells);
29 
30 	if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(net_hotdata.max_backlog)) {
31 drop:
32 		dev_core_stats_rx_dropped_inc(dev);
33 		kfree_skb(skb);
34 		res = NET_RX_DROP;
35 		goto unlock;
36 	}
37 
38 	__skb_queue_tail(&cell->napi_skbs, skb);
39 	if (skb_queue_len(&cell->napi_skbs) == 1)
40 		napi_schedule(&cell->napi);
41 
42 	res = NET_RX_SUCCESS;
43 
44 unlock:
45 	rcu_read_unlock();
46 	return res;
47 }
48 EXPORT_SYMBOL(gro_cells_receive);
49 
50 /* called under BH context */
gro_cell_poll(struct napi_struct * napi,int budget)51 static int gro_cell_poll(struct napi_struct *napi, int budget)
52 {
53 	struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
54 	struct sk_buff *skb;
55 	int work_done = 0;
56 
57 	while (work_done < budget) {
58 		skb = __skb_dequeue(&cell->napi_skbs);
59 		if (!skb)
60 			break;
61 		napi_gro_receive(napi, skb);
62 		work_done++;
63 	}
64 
65 	if (work_done < budget)
66 		napi_complete_done(napi, work_done);
67 	return work_done;
68 }
69 
gro_cells_init(struct gro_cells * gcells,struct net_device * dev)70 int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
71 {
72 	int i;
73 
74 	gcells->cells = alloc_percpu(struct gro_cell);
75 	if (!gcells->cells)
76 		return -ENOMEM;
77 
78 	for_each_possible_cpu(i) {
79 		struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
80 
81 		__skb_queue_head_init(&cell->napi_skbs);
82 
83 		set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state);
84 
85 		netif_napi_add(dev, &cell->napi, gro_cell_poll);
86 		napi_enable(&cell->napi);
87 	}
88 	return 0;
89 }
90 EXPORT_SYMBOL(gro_cells_init);
91 
92 struct percpu_free_defer {
93 	struct rcu_head rcu;
94 	void __percpu	*ptr;
95 };
96 
percpu_free_defer_callback(struct rcu_head * head)97 static void percpu_free_defer_callback(struct rcu_head *head)
98 {
99 	struct percpu_free_defer *defer;
100 
101 	defer = container_of(head, struct percpu_free_defer, rcu);
102 	free_percpu(defer->ptr);
103 	kfree(defer);
104 }
105 
gro_cells_destroy(struct gro_cells * gcells)106 void gro_cells_destroy(struct gro_cells *gcells)
107 {
108 	struct percpu_free_defer *defer;
109 	int i;
110 
111 	if (!gcells->cells)
112 		return;
113 	for_each_possible_cpu(i) {
114 		struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
115 
116 		napi_disable(&cell->napi);
117 		__netif_napi_del(&cell->napi);
118 		__skb_queue_purge(&cell->napi_skbs);
119 	}
120 	/* We need to observe an rcu grace period before freeing ->cells,
121 	 * because netpoll could access dev->napi_list under rcu protection.
122 	 * Try hard using call_rcu() instead of synchronize_rcu(),
123 	 * because we might be called from cleanup_net(), and we
124 	 * definitely do not want to block this critical task.
125 	 */
126 	defer = kmalloc(sizeof(*defer), GFP_KERNEL | __GFP_NOWARN);
127 	if (likely(defer)) {
128 		defer->ptr = gcells->cells;
129 		call_rcu(&defer->rcu, percpu_free_defer_callback);
130 	} else {
131 		/* We do not hold RTNL at this point, synchronize_net()
132 		 * would not be able to expedite this sync.
133 		 */
134 		synchronize_rcu_expedited();
135 		free_percpu(gcells->cells);
136 	}
137 	gcells->cells = NULL;
138 }
139 EXPORT_SYMBOL(gro_cells_destroy);
140