1 // SPDX-License-Identifier: GPL-2.0-or-later 2 3 #include <linux/netdevice.h> 4 #include <net/netdev_lock.h> 5 #include <net/netdev_queues.h> 6 #include <net/netdev_rx_queue.h> 7 #include <net/page_pool/memory_provider.h> 8 9 #include "page_pool_priv.h" 10 11 int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx) 12 { 13 struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx); 14 const struct netdev_queue_mgmt_ops *qops = dev->queue_mgmt_ops; 15 void *new_mem, *old_mem; 16 int err; 17 18 if (!qops || !qops->ndo_queue_stop || !qops->ndo_queue_mem_free || 19 !qops->ndo_queue_mem_alloc || !qops->ndo_queue_start) 20 return -EOPNOTSUPP; 21 22 netdev_assert_locked(dev); 23 24 new_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL); 25 if (!new_mem) 26 return -ENOMEM; 27 28 old_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL); 29 if (!old_mem) { 30 err = -ENOMEM; 31 goto err_free_new_mem; 32 } 33 34 err = qops->ndo_queue_mem_alloc(dev, new_mem, rxq_idx); 35 if (err) 36 goto err_free_old_mem; 37 38 err = page_pool_check_memory_provider(dev, rxq); 39 if (err) 40 goto err_free_new_queue_mem; 41 42 if (netif_running(dev)) { 43 err = qops->ndo_queue_stop(dev, old_mem, rxq_idx); 44 if (err) 45 goto err_free_new_queue_mem; 46 47 err = qops->ndo_queue_start(dev, new_mem, rxq_idx); 48 if (err) 49 goto err_start_queue; 50 } else { 51 swap(new_mem, old_mem); 52 } 53 54 qops->ndo_queue_mem_free(dev, old_mem); 55 56 kvfree(old_mem); 57 kvfree(new_mem); 58 59 return 0; 60 61 err_start_queue: 62 /* Restarting the queue with old_mem should be successful as we haven't 63 * changed any of the queue configuration, and there is not much we can 64 * do to recover from a failure here. 65 * 66 * WARN if we fail to recover the old rx queue, and at least free 67 * old_mem so we don't also leak that. 68 */ 69 if (qops->ndo_queue_start(dev, old_mem, rxq_idx)) { 70 WARN(1, 71 "Failed to restart old queue in error path. RX queue %d may be unhealthy.", 72 rxq_idx); 73 qops->ndo_queue_mem_free(dev, old_mem); 74 } 75 76 err_free_new_queue_mem: 77 qops->ndo_queue_mem_free(dev, new_mem); 78 79 err_free_old_mem: 80 kvfree(old_mem); 81 82 err_free_new_mem: 83 kvfree(new_mem); 84 85 return err; 86 } 87 EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL"); 88 89 static int __net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx, 90 struct pp_memory_provider_params *p) 91 { 92 struct netdev_rx_queue *rxq; 93 int ret; 94 95 if (!netdev_need_ops_lock(dev)) 96 return -EOPNOTSUPP; 97 98 if (ifq_idx >= dev->real_num_rx_queues) 99 return -EINVAL; 100 ifq_idx = array_index_nospec(ifq_idx, dev->real_num_rx_queues); 101 102 rxq = __netif_get_rx_queue(dev, ifq_idx); 103 if (rxq->mp_params.mp_ops) 104 return -EEXIST; 105 106 rxq->mp_params = *p; 107 ret = netdev_rx_queue_restart(dev, ifq_idx); 108 if (ret) { 109 rxq->mp_params.mp_ops = NULL; 110 rxq->mp_params.mp_priv = NULL; 111 } 112 return ret; 113 } 114 115 int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx, 116 struct pp_memory_provider_params *p) 117 { 118 int ret; 119 120 netdev_lock(dev); 121 ret = __net_mp_open_rxq(dev, ifq_idx, p); 122 netdev_unlock(dev); 123 return ret; 124 } 125 126 static void __net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx, 127 struct pp_memory_provider_params *old_p) 128 { 129 struct netdev_rx_queue *rxq; 130 131 if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues)) 132 return; 133 134 rxq = __netif_get_rx_queue(dev, ifq_idx); 135 136 /* Callers holding a netdev ref may get here after we already 137 * went thru shutdown via dev_memory_provider_uninstall(). 138 */ 139 if (dev->reg_state > NETREG_REGISTERED && 140 !rxq->mp_params.mp_ops) 141 return; 142 143 if (WARN_ON_ONCE(rxq->mp_params.mp_ops != old_p->mp_ops || 144 rxq->mp_params.mp_priv != old_p->mp_priv)) 145 return; 146 147 rxq->mp_params.mp_ops = NULL; 148 rxq->mp_params.mp_priv = NULL; 149 WARN_ON(netdev_rx_queue_restart(dev, ifq_idx)); 150 } 151 152 void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx, 153 struct pp_memory_provider_params *old_p) 154 { 155 netdev_lock(dev); 156 __net_mp_close_rxq(dev, ifq_idx, old_p); 157 netdev_unlock(dev); 158 } 159