xref: /linux/net/core/netdev_rx_queue.c (revision c3b999cad7ec39a5487b20e8b6e737d2ab0c5393)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <linux/netdevice.h>
4 #include <net/netdev_lock.h>
5 #include <net/netdev_queues.h>
6 #include <net/netdev_rx_queue.h>
7 #include <net/page_pool/memory_provider.h>
8 
9 #include "page_pool_priv.h"
10 
11 int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
12 {
13 	struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx);
14 	const struct netdev_queue_mgmt_ops *qops = dev->queue_mgmt_ops;
15 	void *new_mem, *old_mem;
16 	int err;
17 
18 	if (!qops || !qops->ndo_queue_stop || !qops->ndo_queue_mem_free ||
19 	    !qops->ndo_queue_mem_alloc || !qops->ndo_queue_start)
20 		return -EOPNOTSUPP;
21 
22 	netdev_assert_locked(dev);
23 
24 	new_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL);
25 	if (!new_mem)
26 		return -ENOMEM;
27 
28 	old_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL);
29 	if (!old_mem) {
30 		err = -ENOMEM;
31 		goto err_free_new_mem;
32 	}
33 
34 	err = qops->ndo_queue_mem_alloc(dev, new_mem, rxq_idx);
35 	if (err)
36 		goto err_free_old_mem;
37 
38 	err = page_pool_check_memory_provider(dev, rxq);
39 	if (err)
40 		goto err_free_new_queue_mem;
41 
42 	if (netif_running(dev)) {
43 		err = qops->ndo_queue_stop(dev, old_mem, rxq_idx);
44 		if (err)
45 			goto err_free_new_queue_mem;
46 
47 		err = qops->ndo_queue_start(dev, new_mem, rxq_idx);
48 		if (err)
49 			goto err_start_queue;
50 	} else {
51 		swap(new_mem, old_mem);
52 	}
53 
54 	qops->ndo_queue_mem_free(dev, old_mem);
55 
56 	kvfree(old_mem);
57 	kvfree(new_mem);
58 
59 	return 0;
60 
61 err_start_queue:
62 	/* Restarting the queue with old_mem should be successful as we haven't
63 	 * changed any of the queue configuration, and there is not much we can
64 	 * do to recover from a failure here.
65 	 *
66 	 * WARN if we fail to recover the old rx queue, and at least free
67 	 * old_mem so we don't also leak that.
68 	 */
69 	if (qops->ndo_queue_start(dev, old_mem, rxq_idx)) {
70 		WARN(1,
71 		     "Failed to restart old queue in error path. RX queue %d may be unhealthy.",
72 		     rxq_idx);
73 		qops->ndo_queue_mem_free(dev, old_mem);
74 	}
75 
76 err_free_new_queue_mem:
77 	qops->ndo_queue_mem_free(dev, new_mem);
78 
79 err_free_old_mem:
80 	kvfree(old_mem);
81 
82 err_free_new_mem:
83 	kvfree(new_mem);
84 
85 	return err;
86 }
87 EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL");
88 
89 static int __net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
90 			     struct pp_memory_provider_params *p)
91 {
92 	struct netdev_rx_queue *rxq;
93 	int ret;
94 
95 	if (ifq_idx >= dev->real_num_rx_queues)
96 		return -EINVAL;
97 	ifq_idx = array_index_nospec(ifq_idx, dev->real_num_rx_queues);
98 
99 	rxq = __netif_get_rx_queue(dev, ifq_idx);
100 	if (rxq->mp_params.mp_ops)
101 		return -EEXIST;
102 
103 	rxq->mp_params = *p;
104 	ret = netdev_rx_queue_restart(dev, ifq_idx);
105 	if (ret) {
106 		rxq->mp_params.mp_ops = NULL;
107 		rxq->mp_params.mp_priv = NULL;
108 	}
109 	return ret;
110 }
111 
112 int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
113 		    struct pp_memory_provider_params *p)
114 {
115 	int ret;
116 
117 	netdev_lock(dev);
118 	ret = __net_mp_open_rxq(dev, ifq_idx, p);
119 	netdev_unlock(dev);
120 	return ret;
121 }
122 
123 static void __net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
124 			      struct pp_memory_provider_params *old_p)
125 {
126 	struct netdev_rx_queue *rxq;
127 
128 	if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues))
129 		return;
130 
131 	rxq = __netif_get_rx_queue(dev, ifq_idx);
132 
133 	/* Callers holding a netdev ref may get here after we already
134 	 * went thru shutdown via dev_memory_provider_uninstall().
135 	 */
136 	if (dev->reg_state > NETREG_REGISTERED &&
137 	    !rxq->mp_params.mp_ops)
138 		return;
139 
140 	if (WARN_ON_ONCE(rxq->mp_params.mp_ops != old_p->mp_ops ||
141 			 rxq->mp_params.mp_priv != old_p->mp_priv))
142 		return;
143 
144 	rxq->mp_params.mp_ops = NULL;
145 	rxq->mp_params.mp_priv = NULL;
146 	WARN_ON(netdev_rx_queue_restart(dev, ifq_idx));
147 }
148 
149 void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
150 		      struct pp_memory_provider_params *old_p)
151 {
152 	netdev_lock(dev);
153 	__net_mp_close_rxq(dev, ifq_idx, old_p);
154 	netdev_unlock(dev);
155 }
156