xref: /linux/net/core/netdev_rx_queue.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <linux/ethtool_netlink.h>
4 #include <linux/netdevice.h>
5 #include <net/netdev_lock.h>
6 #include <net/netdev_queues.h>
7 #include <net/netdev_rx_queue.h>
8 #include <net/page_pool/memory_provider.h>
9 
10 #include "page_pool_priv.h"
11 
12 /* See also page_pool_is_unreadable() */
13 bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx)
14 {
15 	struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, idx);
16 
17 	return !!rxq->mp_params.mp_ops;
18 }
19 EXPORT_SYMBOL(netif_rxq_has_unreadable_mp);
20 
21 int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
22 {
23 	struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx);
24 	const struct netdev_queue_mgmt_ops *qops = dev->queue_mgmt_ops;
25 	void *new_mem, *old_mem;
26 	int err;
27 
28 	if (!qops || !qops->ndo_queue_stop || !qops->ndo_queue_mem_free ||
29 	    !qops->ndo_queue_mem_alloc || !qops->ndo_queue_start)
30 		return -EOPNOTSUPP;
31 
32 	netdev_assert_locked(dev);
33 
34 	new_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL);
35 	if (!new_mem)
36 		return -ENOMEM;
37 
38 	old_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL);
39 	if (!old_mem) {
40 		err = -ENOMEM;
41 		goto err_free_new_mem;
42 	}
43 
44 	err = qops->ndo_queue_mem_alloc(dev, new_mem, rxq_idx);
45 	if (err)
46 		goto err_free_old_mem;
47 
48 	err = page_pool_check_memory_provider(dev, rxq);
49 	if (err)
50 		goto err_free_new_queue_mem;
51 
52 	if (netif_running(dev)) {
53 		err = qops->ndo_queue_stop(dev, old_mem, rxq_idx);
54 		if (err)
55 			goto err_free_new_queue_mem;
56 
57 		err = qops->ndo_queue_start(dev, new_mem, rxq_idx);
58 		if (err)
59 			goto err_start_queue;
60 	} else {
61 		swap(new_mem, old_mem);
62 	}
63 
64 	qops->ndo_queue_mem_free(dev, old_mem);
65 
66 	kvfree(old_mem);
67 	kvfree(new_mem);
68 
69 	return 0;
70 
71 err_start_queue:
72 	/* Restarting the queue with old_mem should be successful as we haven't
73 	 * changed any of the queue configuration, and there is not much we can
74 	 * do to recover from a failure here.
75 	 *
76 	 * WARN if we fail to recover the old rx queue, and at least free
77 	 * old_mem so we don't also leak that.
78 	 */
79 	if (qops->ndo_queue_start(dev, old_mem, rxq_idx)) {
80 		WARN(1,
81 		     "Failed to restart old queue in error path. RX queue %d may be unhealthy.",
82 		     rxq_idx);
83 		qops->ndo_queue_mem_free(dev, old_mem);
84 	}
85 
86 err_free_new_queue_mem:
87 	qops->ndo_queue_mem_free(dev, new_mem);
88 
89 err_free_old_mem:
90 	kvfree(old_mem);
91 
92 err_free_new_mem:
93 	kvfree(new_mem);
94 
95 	return err;
96 }
97 EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL");
98 
99 int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
100 		      const struct pp_memory_provider_params *p,
101 		      struct netlink_ext_ack *extack)
102 {
103 	struct netdev_rx_queue *rxq;
104 	int ret;
105 
106 	if (!netdev_need_ops_lock(dev))
107 		return -EOPNOTSUPP;
108 
109 	if (rxq_idx >= dev->real_num_rx_queues) {
110 		NL_SET_ERR_MSG(extack, "rx queue index out of range");
111 		return -ERANGE;
112 	}
113 	rxq_idx = array_index_nospec(rxq_idx, dev->real_num_rx_queues);
114 
115 	if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) {
116 		NL_SET_ERR_MSG(extack, "tcp-data-split is disabled");
117 		return -EINVAL;
118 	}
119 	if (dev->cfg->hds_thresh) {
120 		NL_SET_ERR_MSG(extack, "hds-thresh is not zero");
121 		return -EINVAL;
122 	}
123 	if (dev_xdp_prog_count(dev)) {
124 		NL_SET_ERR_MSG(extack, "unable to custom memory provider to device with XDP program attached");
125 		return -EEXIST;
126 	}
127 
128 	rxq = __netif_get_rx_queue(dev, rxq_idx);
129 	if (rxq->mp_params.mp_ops) {
130 		NL_SET_ERR_MSG(extack, "designated queue already memory provider bound");
131 		return -EEXIST;
132 	}
133 #ifdef CONFIG_XDP_SOCKETS
134 	if (rxq->pool) {
135 		NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP");
136 		return -EBUSY;
137 	}
138 #endif
139 
140 	rxq->mp_params = *p;
141 	ret = netdev_rx_queue_restart(dev, rxq_idx);
142 	if (ret) {
143 		rxq->mp_params.mp_ops = NULL;
144 		rxq->mp_params.mp_priv = NULL;
145 	}
146 	return ret;
147 }
148 
149 int net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
150 		    struct pp_memory_provider_params *p)
151 {
152 	int ret;
153 
154 	netdev_lock(dev);
155 	ret = __net_mp_open_rxq(dev, rxq_idx, p, NULL);
156 	netdev_unlock(dev);
157 	return ret;
158 }
159 
160 void __net_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
161 			const struct pp_memory_provider_params *old_p)
162 {
163 	struct netdev_rx_queue *rxq;
164 	int err;
165 
166 	if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues))
167 		return;
168 
169 	rxq = __netif_get_rx_queue(dev, ifq_idx);
170 
171 	/* Callers holding a netdev ref may get here after we already
172 	 * went thru shutdown via dev_memory_provider_uninstall().
173 	 */
174 	if (dev->reg_state > NETREG_REGISTERED &&
175 	    !rxq->mp_params.mp_ops)
176 		return;
177 
178 	if (WARN_ON_ONCE(rxq->mp_params.mp_ops != old_p->mp_ops ||
179 			 rxq->mp_params.mp_priv != old_p->mp_priv))
180 		return;
181 
182 	rxq->mp_params.mp_ops = NULL;
183 	rxq->mp_params.mp_priv = NULL;
184 	err = netdev_rx_queue_restart(dev, ifq_idx);
185 	WARN_ON(err && err != -ENETDOWN);
186 }
187 
188 void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
189 		      struct pp_memory_provider_params *old_p)
190 {
191 	netdev_lock(dev);
192 	__net_mp_close_rxq(dev, ifq_idx, old_p);
193 	netdev_unlock(dev);
194 }
195