1 // SPDX-License-Identifier: GPL-2.0-or-later 2 3 #include <linux/ethtool_netlink.h> 4 #include <linux/netdevice.h> 5 #include <net/netdev_lock.h> 6 #include <net/netdev_queues.h> 7 #include <net/netdev_rx_queue.h> 8 #include <net/page_pool/memory_provider.h> 9 10 #include "page_pool_priv.h" 11 12 int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx) 13 { 14 struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx); 15 const struct netdev_queue_mgmt_ops *qops = dev->queue_mgmt_ops; 16 void *new_mem, *old_mem; 17 int err; 18 19 if (!qops || !qops->ndo_queue_stop || !qops->ndo_queue_mem_free || 20 !qops->ndo_queue_mem_alloc || !qops->ndo_queue_start) 21 return -EOPNOTSUPP; 22 23 netdev_assert_locked(dev); 24 25 new_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL); 26 if (!new_mem) 27 return -ENOMEM; 28 29 old_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL); 30 if (!old_mem) { 31 err = -ENOMEM; 32 goto err_free_new_mem; 33 } 34 35 err = qops->ndo_queue_mem_alloc(dev, new_mem, rxq_idx); 36 if (err) 37 goto err_free_old_mem; 38 39 err = page_pool_check_memory_provider(dev, rxq); 40 if (err) 41 goto err_free_new_queue_mem; 42 43 if (netif_running(dev)) { 44 err = qops->ndo_queue_stop(dev, old_mem, rxq_idx); 45 if (err) 46 goto err_free_new_queue_mem; 47 48 err = qops->ndo_queue_start(dev, new_mem, rxq_idx); 49 if (err) 50 goto err_start_queue; 51 } else { 52 swap(new_mem, old_mem); 53 } 54 55 qops->ndo_queue_mem_free(dev, old_mem); 56 57 kvfree(old_mem); 58 kvfree(new_mem); 59 60 return 0; 61 62 err_start_queue: 63 /* Restarting the queue with old_mem should be successful as we haven't 64 * changed any of the queue configuration, and there is not much we can 65 * do to recover from a failure here. 66 * 67 * WARN if we fail to recover the old rx queue, and at least free 68 * old_mem so we don't also leak that. 69 */ 70 if (qops->ndo_queue_start(dev, old_mem, rxq_idx)) { 71 WARN(1, 72 "Failed to restart old queue in error path. RX queue %d may be unhealthy.", 73 rxq_idx); 74 qops->ndo_queue_mem_free(dev, old_mem); 75 } 76 77 err_free_new_queue_mem: 78 qops->ndo_queue_mem_free(dev, new_mem); 79 80 err_free_old_mem: 81 kvfree(old_mem); 82 83 err_free_new_mem: 84 kvfree(new_mem); 85 86 return err; 87 } 88 EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL"); 89 90 int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx, 91 const struct pp_memory_provider_params *p, 92 struct netlink_ext_ack *extack) 93 { 94 struct netdev_rx_queue *rxq; 95 int ret; 96 97 if (!netdev_need_ops_lock(dev)) 98 return -EOPNOTSUPP; 99 100 if (rxq_idx >= dev->real_num_rx_queues) { 101 NL_SET_ERR_MSG(extack, "rx queue index out of range"); 102 return -ERANGE; 103 } 104 rxq_idx = array_index_nospec(rxq_idx, dev->real_num_rx_queues); 105 106 if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) { 107 NL_SET_ERR_MSG(extack, "tcp-data-split is disabled"); 108 return -EINVAL; 109 } 110 if (dev->cfg->hds_thresh) { 111 NL_SET_ERR_MSG(extack, "hds-thresh is not zero"); 112 return -EINVAL; 113 } 114 if (dev_xdp_prog_count(dev)) { 115 NL_SET_ERR_MSG(extack, "unable to custom memory provider to device with XDP program attached"); 116 return -EEXIST; 117 } 118 119 rxq = __netif_get_rx_queue(dev, rxq_idx); 120 if (rxq->mp_params.mp_ops) { 121 NL_SET_ERR_MSG(extack, "designated queue already memory provider bound"); 122 return -EEXIST; 123 } 124 #ifdef CONFIG_XDP_SOCKETS 125 if (rxq->pool) { 126 NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP"); 127 return -EBUSY; 128 } 129 #endif 130 131 rxq->mp_params = *p; 132 ret = netdev_rx_queue_restart(dev, rxq_idx); 133 if (ret) { 134 rxq->mp_params.mp_ops = NULL; 135 rxq->mp_params.mp_priv = NULL; 136 } 137 return ret; 138 } 139 140 int net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx, 141 struct pp_memory_provider_params *p) 142 { 143 int ret; 144 145 netdev_lock(dev); 146 ret = __net_mp_open_rxq(dev, rxq_idx, p, NULL); 147 netdev_unlock(dev); 148 return ret; 149 } 150 151 void __net_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx, 152 const struct pp_memory_provider_params *old_p) 153 { 154 struct netdev_rx_queue *rxq; 155 int err; 156 157 if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues)) 158 return; 159 160 rxq = __netif_get_rx_queue(dev, ifq_idx); 161 162 /* Callers holding a netdev ref may get here after we already 163 * went thru shutdown via dev_memory_provider_uninstall(). 164 */ 165 if (dev->reg_state > NETREG_REGISTERED && 166 !rxq->mp_params.mp_ops) 167 return; 168 169 if (WARN_ON_ONCE(rxq->mp_params.mp_ops != old_p->mp_ops || 170 rxq->mp_params.mp_priv != old_p->mp_priv)) 171 return; 172 173 rxq->mp_params.mp_ops = NULL; 174 rxq->mp_params.mp_priv = NULL; 175 err = netdev_rx_queue_restart(dev, ifq_idx); 176 WARN_ON(err && err != -ENETDOWN); 177 } 178 179 void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx, 180 struct pp_memory_provider_params *old_p) 181 { 182 netdev_lock(dev); 183 __net_mp_close_rxq(dev, ifq_idx, old_p); 184 netdev_unlock(dev); 185 } 186