xref: /linux/net/core/netdev_rx_queue.c (revision 61f96e684edd28ca40555ec49ea1555df31ba619)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <linux/ethtool_netlink.h>
4 #include <linux/netdevice.h>
5 #include <net/netdev_lock.h>
6 #include <net/netdev_queues.h>
7 #include <net/netdev_rx_queue.h>
8 #include <net/page_pool/memory_provider.h>
9 
10 #include "page_pool_priv.h"
11 
netdev_rx_queue_restart(struct net_device * dev,unsigned int rxq_idx)12 int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
13 {
14 	struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx);
15 	const struct netdev_queue_mgmt_ops *qops = dev->queue_mgmt_ops;
16 	void *new_mem, *old_mem;
17 	int err;
18 
19 	if (!qops || !qops->ndo_queue_stop || !qops->ndo_queue_mem_free ||
20 	    !qops->ndo_queue_mem_alloc || !qops->ndo_queue_start)
21 		return -EOPNOTSUPP;
22 
23 	netdev_assert_locked(dev);
24 
25 	new_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL);
26 	if (!new_mem)
27 		return -ENOMEM;
28 
29 	old_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL);
30 	if (!old_mem) {
31 		err = -ENOMEM;
32 		goto err_free_new_mem;
33 	}
34 
35 	err = qops->ndo_queue_mem_alloc(dev, new_mem, rxq_idx);
36 	if (err)
37 		goto err_free_old_mem;
38 
39 	err = page_pool_check_memory_provider(dev, rxq);
40 	if (err)
41 		goto err_free_new_queue_mem;
42 
43 	if (netif_running(dev)) {
44 		err = qops->ndo_queue_stop(dev, old_mem, rxq_idx);
45 		if (err)
46 			goto err_free_new_queue_mem;
47 
48 		err = qops->ndo_queue_start(dev, new_mem, rxq_idx);
49 		if (err)
50 			goto err_start_queue;
51 	} else {
52 		swap(new_mem, old_mem);
53 	}
54 
55 	qops->ndo_queue_mem_free(dev, old_mem);
56 
57 	kvfree(old_mem);
58 	kvfree(new_mem);
59 
60 	return 0;
61 
62 err_start_queue:
63 	/* Restarting the queue with old_mem should be successful as we haven't
64 	 * changed any of the queue configuration, and there is not much we can
65 	 * do to recover from a failure here.
66 	 *
67 	 * WARN if we fail to recover the old rx queue, and at least free
68 	 * old_mem so we don't also leak that.
69 	 */
70 	if (qops->ndo_queue_start(dev, old_mem, rxq_idx)) {
71 		WARN(1,
72 		     "Failed to restart old queue in error path. RX queue %d may be unhealthy.",
73 		     rxq_idx);
74 		qops->ndo_queue_mem_free(dev, old_mem);
75 	}
76 
77 err_free_new_queue_mem:
78 	qops->ndo_queue_mem_free(dev, new_mem);
79 
80 err_free_old_mem:
81 	kvfree(old_mem);
82 
83 err_free_new_mem:
84 	kvfree(new_mem);
85 
86 	return err;
87 }
88 EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL");
89 
__net_mp_open_rxq(struct net_device * dev,unsigned int rxq_idx,const struct pp_memory_provider_params * p,struct netlink_ext_ack * extack)90 int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
91 		      const struct pp_memory_provider_params *p,
92 		      struct netlink_ext_ack *extack)
93 {
94 	struct netdev_rx_queue *rxq;
95 	int ret;
96 
97 	if (!netdev_need_ops_lock(dev))
98 		return -EOPNOTSUPP;
99 
100 	if (rxq_idx >= dev->real_num_rx_queues)
101 		return -EINVAL;
102 	rxq_idx = array_index_nospec(rxq_idx, dev->real_num_rx_queues);
103 
104 	if (rxq_idx >= dev->real_num_rx_queues) {
105 		NL_SET_ERR_MSG(extack, "rx queue index out of range");
106 		return -ERANGE;
107 	}
108 	if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) {
109 		NL_SET_ERR_MSG(extack, "tcp-data-split is disabled");
110 		return -EINVAL;
111 	}
112 	if (dev->cfg->hds_thresh) {
113 		NL_SET_ERR_MSG(extack, "hds-thresh is not zero");
114 		return -EINVAL;
115 	}
116 	if (dev_xdp_prog_count(dev)) {
117 		NL_SET_ERR_MSG(extack, "unable to custom memory provider to device with XDP program attached");
118 		return -EEXIST;
119 	}
120 
121 	rxq = __netif_get_rx_queue(dev, rxq_idx);
122 	if (rxq->mp_params.mp_ops) {
123 		NL_SET_ERR_MSG(extack, "designated queue already memory provider bound");
124 		return -EEXIST;
125 	}
126 #ifdef CONFIG_XDP_SOCKETS
127 	if (rxq->pool) {
128 		NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP");
129 		return -EBUSY;
130 	}
131 #endif
132 
133 	rxq->mp_params = *p;
134 	ret = netdev_rx_queue_restart(dev, rxq_idx);
135 	if (ret) {
136 		rxq->mp_params.mp_ops = NULL;
137 		rxq->mp_params.mp_priv = NULL;
138 	}
139 	return ret;
140 }
141 
net_mp_open_rxq(struct net_device * dev,unsigned int rxq_idx,struct pp_memory_provider_params * p)142 int net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
143 		    struct pp_memory_provider_params *p)
144 {
145 	int ret;
146 
147 	netdev_lock(dev);
148 	ret = __net_mp_open_rxq(dev, rxq_idx, p, NULL);
149 	netdev_unlock(dev);
150 	return ret;
151 }
152 
__net_mp_close_rxq(struct net_device * dev,unsigned int ifq_idx,const struct pp_memory_provider_params * old_p)153 void __net_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
154 			const struct pp_memory_provider_params *old_p)
155 {
156 	struct netdev_rx_queue *rxq;
157 	int err;
158 
159 	if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues))
160 		return;
161 
162 	rxq = __netif_get_rx_queue(dev, ifq_idx);
163 
164 	/* Callers holding a netdev ref may get here after we already
165 	 * went thru shutdown via dev_memory_provider_uninstall().
166 	 */
167 	if (dev->reg_state > NETREG_REGISTERED &&
168 	    !rxq->mp_params.mp_ops)
169 		return;
170 
171 	if (WARN_ON_ONCE(rxq->mp_params.mp_ops != old_p->mp_ops ||
172 			 rxq->mp_params.mp_priv != old_p->mp_priv))
173 		return;
174 
175 	rxq->mp_params.mp_ops = NULL;
176 	rxq->mp_params.mp_priv = NULL;
177 	err = netdev_rx_queue_restart(dev, ifq_idx);
178 	WARN_ON(err && err != -ENETDOWN);
179 }
180 
net_mp_close_rxq(struct net_device * dev,unsigned ifq_idx,struct pp_memory_provider_params * old_p)181 void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
182 		      struct pp_memory_provider_params *old_p)
183 {
184 	netdev_lock(dev);
185 	__net_mp_close_rxq(dev, ifq_idx, old_p);
186 	netdev_unlock(dev);
187 }
188