xref: /linux/net/core/netdev_rx_queue.c (revision d8f87aa5fa0a4276491fa8ef436cd22605a3f9ba)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <linux/ethtool_netlink.h>
4 #include <linux/netdevice.h>
5 #include <net/netdev_lock.h>
6 #include <net/netdev_queues.h>
7 #include <net/netdev_rx_queue.h>
8 #include <net/page_pool/memory_provider.h>
9 
10 #include "page_pool_priv.h"
11 
12 /* See also page_pool_is_unreadable() */
13 bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx)
14 {
15 	struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, idx);
16 
17 	return !!rxq->mp_params.mp_ops;
18 }
19 EXPORT_SYMBOL(netif_rxq_has_unreadable_mp);
20 
21 int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
22 {
23 	struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx);
24 	const struct netdev_queue_mgmt_ops *qops = dev->queue_mgmt_ops;
25 	struct netdev_queue_config qcfg;
26 	void *new_mem, *old_mem;
27 	int err;
28 
29 	if (!qops || !qops->ndo_queue_stop || !qops->ndo_queue_mem_free ||
30 	    !qops->ndo_queue_mem_alloc || !qops->ndo_queue_start)
31 		return -EOPNOTSUPP;
32 
33 	if (WARN_ON_ONCE(qops->supported_params && !qops->ndo_default_qcfg))
34 		return -EINVAL;
35 
36 	netdev_assert_locked(dev);
37 
38 	memset(&qcfg, 0, sizeof(qcfg));
39 	if (qops->ndo_default_qcfg)
40 		qops->ndo_default_qcfg(dev, &qcfg);
41 
42 	if (rxq->mp_params.rx_page_size) {
43 		if (!(qops->supported_params & QCFG_RX_PAGE_SIZE))
44 			return -EOPNOTSUPP;
45 		qcfg.rx_page_size = rxq->mp_params.rx_page_size;
46 	}
47 
48 	new_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL);
49 	if (!new_mem)
50 		return -ENOMEM;
51 
52 	old_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL);
53 	if (!old_mem) {
54 		err = -ENOMEM;
55 		goto err_free_new_mem;
56 	}
57 
58 	err = qops->ndo_queue_mem_alloc(dev, &qcfg, new_mem, rxq_idx);
59 	if (err)
60 		goto err_free_old_mem;
61 
62 	err = page_pool_check_memory_provider(dev, rxq);
63 	if (err)
64 		goto err_free_new_queue_mem;
65 
66 	if (netif_running(dev)) {
67 		err = qops->ndo_queue_stop(dev, old_mem, rxq_idx);
68 		if (err)
69 			goto err_free_new_queue_mem;
70 
71 		err = qops->ndo_queue_start(dev, &qcfg, new_mem, rxq_idx);
72 		if (err)
73 			goto err_start_queue;
74 	} else {
75 		swap(new_mem, old_mem);
76 	}
77 
78 	qops->ndo_queue_mem_free(dev, old_mem);
79 
80 	kvfree(old_mem);
81 	kvfree(new_mem);
82 
83 	rxq->qcfg = qcfg;
84 	return 0;
85 
86 err_start_queue:
87 	/* Restarting the queue with old_mem should be successful as we haven't
88 	 * changed any of the queue configuration, and there is not much we can
89 	 * do to recover from a failure here.
90 	 *
91 	 * WARN if we fail to recover the old rx queue, and at least free
92 	 * old_mem so we don't also leak that.
93 	 */
94 	if (qops->ndo_queue_start(dev, &rxq->qcfg, old_mem, rxq_idx)) {
95 		WARN(1,
96 		     "Failed to restart old queue in error path. RX queue %d may be unhealthy.",
97 		     rxq_idx);
98 		qops->ndo_queue_mem_free(dev, old_mem);
99 	}
100 
101 err_free_new_queue_mem:
102 	qops->ndo_queue_mem_free(dev, new_mem);
103 
104 err_free_old_mem:
105 	kvfree(old_mem);
106 
107 err_free_new_mem:
108 	kvfree(new_mem);
109 
110 	return err;
111 }
112 EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL");
113 
114 int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
115 		      const struct pp_memory_provider_params *p,
116 		      struct netlink_ext_ack *extack)
117 {
118 	struct netdev_rx_queue *rxq;
119 	int ret;
120 
121 	if (!netdev_need_ops_lock(dev))
122 		return -EOPNOTSUPP;
123 
124 	if (rxq_idx >= dev->real_num_rx_queues) {
125 		NL_SET_ERR_MSG(extack, "rx queue index out of range");
126 		return -ERANGE;
127 	}
128 	rxq_idx = array_index_nospec(rxq_idx, dev->real_num_rx_queues);
129 
130 	if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) {
131 		NL_SET_ERR_MSG(extack, "tcp-data-split is disabled");
132 		return -EINVAL;
133 	}
134 	if (dev->cfg->hds_thresh) {
135 		NL_SET_ERR_MSG(extack, "hds-thresh is not zero");
136 		return -EINVAL;
137 	}
138 	if (dev_xdp_prog_count(dev)) {
139 		NL_SET_ERR_MSG(extack, "unable to custom memory provider to device with XDP program attached");
140 		return -EEXIST;
141 	}
142 
143 	rxq = __netif_get_rx_queue(dev, rxq_idx);
144 	if (rxq->mp_params.mp_ops) {
145 		NL_SET_ERR_MSG(extack, "designated queue already memory provider bound");
146 		return -EEXIST;
147 	}
148 #ifdef CONFIG_XDP_SOCKETS
149 	if (rxq->pool) {
150 		NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP");
151 		return -EBUSY;
152 	}
153 #endif
154 
155 	rxq->mp_params = *p;
156 	ret = netdev_rx_queue_restart(dev, rxq_idx);
157 	if (ret)
158 		memset(&rxq->mp_params, 0, sizeof(rxq->mp_params));
159 
160 	return ret;
161 }
162 
163 int net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
164 		    struct pp_memory_provider_params *p)
165 {
166 	int ret;
167 
168 	netdev_lock(dev);
169 	ret = __net_mp_open_rxq(dev, rxq_idx, p, NULL);
170 	netdev_unlock(dev);
171 	return ret;
172 }
173 
174 void __net_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
175 			const struct pp_memory_provider_params *old_p)
176 {
177 	struct netdev_rx_queue *rxq;
178 	int err;
179 
180 	if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues))
181 		return;
182 
183 	rxq = __netif_get_rx_queue(dev, ifq_idx);
184 
185 	/* Callers holding a netdev ref may get here after we already
186 	 * went thru shutdown via dev_memory_provider_uninstall().
187 	 */
188 	if (dev->reg_state > NETREG_REGISTERED &&
189 	    !rxq->mp_params.mp_ops)
190 		return;
191 
192 	if (WARN_ON_ONCE(rxq->mp_params.mp_ops != old_p->mp_ops ||
193 			 rxq->mp_params.mp_priv != old_p->mp_priv))
194 		return;
195 
196 	memset(&rxq->mp_params, 0, sizeof(rxq->mp_params));
197 	err = netdev_rx_queue_restart(dev, ifq_idx);
198 	WARN_ON(err && err != -ENETDOWN);
199 }
200 
201 void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
202 		      struct pp_memory_provider_params *old_p)
203 {
204 	netdev_lock(dev);
205 	__net_mp_close_rxq(dev, ifq_idx, old_p);
206 	netdev_unlock(dev);
207 }
208