xref: /linux/net/core/netdev_rx_queue.c (revision 1cc3462159babb69c84c39cb1b4e262aef3ea325)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <linux/netdevice.h>
4 #include <net/netdev_queues.h>
5 #include <net/netdev_rx_queue.h>
6 #include <net/page_pool/memory_provider.h>
7 
8 #include "page_pool_priv.h"
9 
10 int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
11 {
12 	struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx);
13 	const struct netdev_queue_mgmt_ops *qops = dev->queue_mgmt_ops;
14 	void *new_mem, *old_mem;
15 	int err;
16 
17 	if (!qops || !qops->ndo_queue_stop || !qops->ndo_queue_mem_free ||
18 	    !qops->ndo_queue_mem_alloc || !qops->ndo_queue_start)
19 		return -EOPNOTSUPP;
20 
21 	ASSERT_RTNL();
22 
23 	new_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL);
24 	if (!new_mem)
25 		return -ENOMEM;
26 
27 	old_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL);
28 	if (!old_mem) {
29 		err = -ENOMEM;
30 		goto err_free_new_mem;
31 	}
32 
33 	netdev_lock(dev);
34 
35 	err = qops->ndo_queue_mem_alloc(dev, new_mem, rxq_idx);
36 	if (err)
37 		goto err_free_old_mem;
38 
39 	err = page_pool_check_memory_provider(dev, rxq);
40 	if (err)
41 		goto err_free_new_queue_mem;
42 
43 	if (netif_running(dev)) {
44 		err = qops->ndo_queue_stop(dev, old_mem, rxq_idx);
45 		if (err)
46 			goto err_free_new_queue_mem;
47 
48 		err = qops->ndo_queue_start(dev, new_mem, rxq_idx);
49 		if (err)
50 			goto err_start_queue;
51 	} else {
52 		swap(new_mem, old_mem);
53 	}
54 
55 	qops->ndo_queue_mem_free(dev, old_mem);
56 
57 	netdev_unlock(dev);
58 
59 	kvfree(old_mem);
60 	kvfree(new_mem);
61 
62 	return 0;
63 
64 err_start_queue:
65 	/* Restarting the queue with old_mem should be successful as we haven't
66 	 * changed any of the queue configuration, and there is not much we can
67 	 * do to recover from a failure here.
68 	 *
69 	 * WARN if we fail to recover the old rx queue, and at least free
70 	 * old_mem so we don't also leak that.
71 	 */
72 	if (qops->ndo_queue_start(dev, old_mem, rxq_idx)) {
73 		WARN(1,
74 		     "Failed to restart old queue in error path. RX queue %d may be unhealthy.",
75 		     rxq_idx);
76 		qops->ndo_queue_mem_free(dev, old_mem);
77 	}
78 
79 err_free_new_queue_mem:
80 	qops->ndo_queue_mem_free(dev, new_mem);
81 
82 err_free_old_mem:
83 	netdev_unlock(dev);
84 	kvfree(old_mem);
85 
86 err_free_new_mem:
87 	kvfree(new_mem);
88 
89 	return err;
90 }
91 EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL");
92 
93 static int __net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
94 			     struct pp_memory_provider_params *p)
95 {
96 	struct netdev_rx_queue *rxq;
97 	int ret;
98 
99 	if (ifq_idx >= dev->real_num_rx_queues)
100 		return -EINVAL;
101 	ifq_idx = array_index_nospec(ifq_idx, dev->real_num_rx_queues);
102 
103 	rxq = __netif_get_rx_queue(dev, ifq_idx);
104 	if (rxq->mp_params.mp_ops)
105 		return -EEXIST;
106 
107 	rxq->mp_params = *p;
108 	ret = netdev_rx_queue_restart(dev, ifq_idx);
109 	if (ret) {
110 		rxq->mp_params.mp_ops = NULL;
111 		rxq->mp_params.mp_priv = NULL;
112 	}
113 	return ret;
114 }
115 
116 int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
117 		    struct pp_memory_provider_params *p)
118 {
119 	int ret;
120 
121 	rtnl_lock();
122 	ret = __net_mp_open_rxq(dev, ifq_idx, p);
123 	rtnl_unlock();
124 	return ret;
125 }
126 
127 static void __net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
128 			      struct pp_memory_provider_params *old_p)
129 {
130 	struct netdev_rx_queue *rxq;
131 
132 	if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues))
133 		return;
134 
135 	rxq = __netif_get_rx_queue(dev, ifq_idx);
136 
137 	/* Callers holding a netdev ref may get here after we already
138 	 * went thru shutdown via dev_memory_provider_uninstall().
139 	 */
140 	if (dev->reg_state > NETREG_REGISTERED &&
141 	    !rxq->mp_params.mp_ops)
142 		return;
143 
144 	if (WARN_ON_ONCE(rxq->mp_params.mp_ops != old_p->mp_ops ||
145 			 rxq->mp_params.mp_priv != old_p->mp_priv))
146 		return;
147 
148 	rxq->mp_params.mp_ops = NULL;
149 	rxq->mp_params.mp_priv = NULL;
150 	WARN_ON(netdev_rx_queue_restart(dev, ifq_idx));
151 }
152 
153 void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
154 		      struct pp_memory_provider_params *old_p)
155 {
156 	rtnl_lock();
157 	__net_mp_close_rxq(dev, ifq_idx, old_p);
158 	rtnl_unlock();
159 }
160