xref: /linux/net/core/netdev_rx_queue.c (revision 37a93dd5c49b5fda807fd204edf2547c3493319c)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <linux/ethtool_netlink.h>
4 #include <linux/netdevice.h>
5 #include <net/netdev_lock.h>
6 #include <net/netdev_queues.h>
7 #include <net/netdev_rx_queue.h>
8 #include <net/page_pool/memory_provider.h>
9 
10 #include "dev.h"
11 #include "page_pool_priv.h"
12 
13 /* See also page_pool_is_unreadable() */
14 bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx)
15 {
16 	struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, idx);
17 
18 	return !!rxq->mp_params.mp_ops;
19 }
20 EXPORT_SYMBOL(netif_rxq_has_unreadable_mp);
21 
22 static int netdev_rx_queue_reconfig(struct net_device *dev,
23 				    unsigned int rxq_idx,
24 				    struct netdev_queue_config *qcfg_old,
25 				    struct netdev_queue_config *qcfg_new)
26 {
27 	struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx);
28 	const struct netdev_queue_mgmt_ops *qops = dev->queue_mgmt_ops;
29 	void *new_mem, *old_mem;
30 	int err;
31 
32 	if (!qops || !qops->ndo_queue_stop || !qops->ndo_queue_mem_free ||
33 	    !qops->ndo_queue_mem_alloc || !qops->ndo_queue_start)
34 		return -EOPNOTSUPP;
35 
36 	netdev_assert_locked(dev);
37 
38 	new_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL);
39 	if (!new_mem)
40 		return -ENOMEM;
41 
42 	old_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL);
43 	if (!old_mem) {
44 		err = -ENOMEM;
45 		goto err_free_new_mem;
46 	}
47 
48 	err = qops->ndo_queue_mem_alloc(dev, qcfg_new, new_mem, rxq_idx);
49 	if (err)
50 		goto err_free_old_mem;
51 
52 	err = page_pool_check_memory_provider(dev, rxq);
53 	if (err)
54 		goto err_free_new_queue_mem;
55 
56 	if (netif_running(dev)) {
57 		err = qops->ndo_queue_stop(dev, old_mem, rxq_idx);
58 		if (err)
59 			goto err_free_new_queue_mem;
60 
61 		err = qops->ndo_queue_start(dev, qcfg_new, new_mem, rxq_idx);
62 		if (err)
63 			goto err_start_queue;
64 	} else {
65 		swap(new_mem, old_mem);
66 	}
67 
68 	qops->ndo_queue_mem_free(dev, old_mem);
69 
70 	kvfree(old_mem);
71 	kvfree(new_mem);
72 
73 	return 0;
74 
75 err_start_queue:
76 	/* Restarting the queue with old_mem should be successful as we haven't
77 	 * changed any of the queue configuration, and there is not much we can
78 	 * do to recover from a failure here.
79 	 *
80 	 * WARN if we fail to recover the old rx queue, and at least free
81 	 * old_mem so we don't also leak that.
82 	 */
83 	if (qops->ndo_queue_start(dev, qcfg_old, old_mem, rxq_idx)) {
84 		WARN(1,
85 		     "Failed to restart old queue in error path. RX queue %d may be unhealthy.",
86 		     rxq_idx);
87 		qops->ndo_queue_mem_free(dev, old_mem);
88 	}
89 
90 err_free_new_queue_mem:
91 	qops->ndo_queue_mem_free(dev, new_mem);
92 
93 err_free_old_mem:
94 	kvfree(old_mem);
95 
96 err_free_new_mem:
97 	kvfree(new_mem);
98 
99 	return err;
100 }
101 
102 int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
103 {
104 	struct netdev_queue_config qcfg;
105 
106 	netdev_queue_config(dev, rxq_idx, &qcfg);
107 	return netdev_rx_queue_reconfig(dev, rxq_idx, &qcfg, &qcfg);
108 }
109 EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL");
110 
111 int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
112 		      const struct pp_memory_provider_params *p,
113 		      struct netlink_ext_ack *extack)
114 {
115 	const struct netdev_queue_mgmt_ops *qops = dev->queue_mgmt_ops;
116 	struct netdev_queue_config qcfg[2];
117 	struct netdev_rx_queue *rxq;
118 	int ret;
119 
120 	if (!netdev_need_ops_lock(dev))
121 		return -EOPNOTSUPP;
122 
123 	if (rxq_idx >= dev->real_num_rx_queues) {
124 		NL_SET_ERR_MSG(extack, "rx queue index out of range");
125 		return -ERANGE;
126 	}
127 	rxq_idx = array_index_nospec(rxq_idx, dev->real_num_rx_queues);
128 
129 	if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) {
130 		NL_SET_ERR_MSG(extack, "tcp-data-split is disabled");
131 		return -EINVAL;
132 	}
133 	if (dev->cfg->hds_thresh) {
134 		NL_SET_ERR_MSG(extack, "hds-thresh is not zero");
135 		return -EINVAL;
136 	}
137 	if (dev_xdp_prog_count(dev)) {
138 		NL_SET_ERR_MSG(extack, "unable to custom memory provider to device with XDP program attached");
139 		return -EEXIST;
140 	}
141 	if (p->rx_page_size && !(qops->supported_params & QCFG_RX_PAGE_SIZE)) {
142 		NL_SET_ERR_MSG(extack, "device does not support: rx_page_size");
143 		return -EOPNOTSUPP;
144 	}
145 
146 	rxq = __netif_get_rx_queue(dev, rxq_idx);
147 	if (rxq->mp_params.mp_ops) {
148 		NL_SET_ERR_MSG(extack, "designated queue already memory provider bound");
149 		return -EEXIST;
150 	}
151 #ifdef CONFIG_XDP_SOCKETS
152 	if (rxq->pool) {
153 		NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP");
154 		return -EBUSY;
155 	}
156 #endif
157 
158 	netdev_queue_config(dev, rxq_idx, &qcfg[0]);
159 	rxq->mp_params = *p;
160 	ret = netdev_queue_config_validate(dev, rxq_idx, &qcfg[1], extack);
161 	if (ret)
162 		goto err_clear_mp;
163 
164 	ret = netdev_rx_queue_reconfig(dev, rxq_idx, &qcfg[0], &qcfg[1]);
165 	if (ret)
166 		goto err_clear_mp;
167 
168 	return 0;
169 
170 err_clear_mp:
171 	memset(&rxq->mp_params, 0, sizeof(rxq->mp_params));
172 	return ret;
173 }
174 
175 int net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
176 		    struct pp_memory_provider_params *p)
177 {
178 	int ret;
179 
180 	netdev_lock(dev);
181 	ret = __net_mp_open_rxq(dev, rxq_idx, p, NULL);
182 	netdev_unlock(dev);
183 	return ret;
184 }
185 
186 void __net_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
187 			const struct pp_memory_provider_params *old_p)
188 {
189 	struct netdev_queue_config qcfg[2];
190 	struct netdev_rx_queue *rxq;
191 	int err;
192 
193 	if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues))
194 		return;
195 
196 	rxq = __netif_get_rx_queue(dev, ifq_idx);
197 
198 	/* Callers holding a netdev ref may get here after we already
199 	 * went thru shutdown via dev_memory_provider_uninstall().
200 	 */
201 	if (dev->reg_state > NETREG_REGISTERED &&
202 	    !rxq->mp_params.mp_ops)
203 		return;
204 
205 	if (WARN_ON_ONCE(rxq->mp_params.mp_ops != old_p->mp_ops ||
206 			 rxq->mp_params.mp_priv != old_p->mp_priv))
207 		return;
208 
209 	netdev_queue_config(dev, ifq_idx, &qcfg[0]);
210 	memset(&rxq->mp_params, 0, sizeof(rxq->mp_params));
211 	netdev_queue_config(dev, ifq_idx, &qcfg[1]);
212 
213 	err = netdev_rx_queue_reconfig(dev, ifq_idx, &qcfg[0], &qcfg[1]);
214 	WARN_ON(err && err != -ENETDOWN);
215 }
216 
217 void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
218 		      struct pp_memory_provider_params *old_p)
219 {
220 	netdev_lock(dev);
221 	__net_mp_close_rxq(dev, ifq_idx, old_p);
222 	netdev_unlock(dev);
223 }
224