xref: /linux/net/core/netdev_rx_queue.c (revision 26db4dbb747813b5946aff31485873f071a10332)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <linux/netdevice.h>
4 #include <net/netdev_queues.h>
5 #include <net/netdev_rx_queue.h>
6 #include <net/page_pool/memory_provider.h>
7 
8 #include "page_pool_priv.h"
9 
10 int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
11 {
12 	struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx);
13 	void *new_mem, *old_mem;
14 	int err;
15 
16 	if (!dev->queue_mgmt_ops || !dev->queue_mgmt_ops->ndo_queue_stop ||
17 	    !dev->queue_mgmt_ops->ndo_queue_mem_free ||
18 	    !dev->queue_mgmt_ops->ndo_queue_mem_alloc ||
19 	    !dev->queue_mgmt_ops->ndo_queue_start)
20 		return -EOPNOTSUPP;
21 
22 	ASSERT_RTNL();
23 
24 	new_mem = kvzalloc(dev->queue_mgmt_ops->ndo_queue_mem_size, GFP_KERNEL);
25 	if (!new_mem)
26 		return -ENOMEM;
27 
28 	old_mem = kvzalloc(dev->queue_mgmt_ops->ndo_queue_mem_size, GFP_KERNEL);
29 	if (!old_mem) {
30 		err = -ENOMEM;
31 		goto err_free_new_mem;
32 	}
33 
34 	err = dev->queue_mgmt_ops->ndo_queue_mem_alloc(dev, new_mem, rxq_idx);
35 	if (err)
36 		goto err_free_old_mem;
37 
38 	err = page_pool_check_memory_provider(dev, rxq);
39 	if (err)
40 		goto err_free_new_queue_mem;
41 
42 	err = dev->queue_mgmt_ops->ndo_queue_stop(dev, old_mem, rxq_idx);
43 	if (err)
44 		goto err_free_new_queue_mem;
45 
46 	err = dev->queue_mgmt_ops->ndo_queue_start(dev, new_mem, rxq_idx);
47 	if (err)
48 		goto err_start_queue;
49 
50 	dev->queue_mgmt_ops->ndo_queue_mem_free(dev, old_mem);
51 
52 	kvfree(old_mem);
53 	kvfree(new_mem);
54 
55 	return 0;
56 
57 err_start_queue:
58 	/* Restarting the queue with old_mem should be successful as we haven't
59 	 * changed any of the queue configuration, and there is not much we can
60 	 * do to recover from a failure here.
61 	 *
62 	 * WARN if we fail to recover the old rx queue, and at least free
63 	 * old_mem so we don't also leak that.
64 	 */
65 	if (dev->queue_mgmt_ops->ndo_queue_start(dev, old_mem, rxq_idx)) {
66 		WARN(1,
67 		     "Failed to restart old queue in error path. RX queue %d may be unhealthy.",
68 		     rxq_idx);
69 		dev->queue_mgmt_ops->ndo_queue_mem_free(dev, old_mem);
70 	}
71 
72 err_free_new_queue_mem:
73 	dev->queue_mgmt_ops->ndo_queue_mem_free(dev, new_mem);
74 
75 err_free_old_mem:
76 	kvfree(old_mem);
77 
78 err_free_new_mem:
79 	kvfree(new_mem);
80 
81 	return err;
82 }
83 EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL");
84 
85 static int __net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
86 			     struct pp_memory_provider_params *p)
87 {
88 	struct netdev_rx_queue *rxq;
89 	int ret;
90 
91 	if (ifq_idx >= dev->real_num_rx_queues)
92 		return -EINVAL;
93 	ifq_idx = array_index_nospec(ifq_idx, dev->real_num_rx_queues);
94 
95 	rxq = __netif_get_rx_queue(dev, ifq_idx);
96 	if (rxq->mp_params.mp_ops)
97 		return -EEXIST;
98 
99 	rxq->mp_params = *p;
100 	ret = netdev_rx_queue_restart(dev, ifq_idx);
101 	if (ret) {
102 		rxq->mp_params.mp_ops = NULL;
103 		rxq->mp_params.mp_priv = NULL;
104 	}
105 	return ret;
106 }
107 
108 int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
109 		    struct pp_memory_provider_params *p)
110 {
111 	int ret;
112 
113 	rtnl_lock();
114 	ret = __net_mp_open_rxq(dev, ifq_idx, p);
115 	rtnl_unlock();
116 	return ret;
117 }
118 
119 static void __net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
120 			      struct pp_memory_provider_params *old_p)
121 {
122 	struct netdev_rx_queue *rxq;
123 
124 	if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues))
125 		return;
126 
127 	rxq = __netif_get_rx_queue(dev, ifq_idx);
128 
129 	/* Callers holding a netdev ref may get here after we already
130 	 * went thru shutdown via dev_memory_provider_uninstall().
131 	 */
132 	if (dev->reg_state > NETREG_REGISTERED &&
133 	    !rxq->mp_params.mp_ops)
134 		return;
135 
136 	if (WARN_ON_ONCE(rxq->mp_params.mp_ops != old_p->mp_ops ||
137 			 rxq->mp_params.mp_priv != old_p->mp_priv))
138 		return;
139 
140 	rxq->mp_params.mp_ops = NULL;
141 	rxq->mp_params.mp_priv = NULL;
142 	WARN_ON(netdev_rx_queue_restart(dev, ifq_idx));
143 }
144 
145 void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
146 		      struct pp_memory_provider_params *old_p)
147 {
148 	rtnl_lock();
149 	__net_mp_close_rxq(dev, ifq_idx, old_p);
150 	rtnl_unlock();
151 }
152