xref: /linux/net/core/netdev_queues.c (revision e2683c8868d03382da7e1ce8453b543a043066d1)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <net/netdev_queues.h>
4 #include <net/netdev_rx_queue.h>
5 #include <net/xdp_sock_drv.h>
6 
7 #include "dev.h"
8 
9 static struct device *
10 __netdev_queue_get_dma_dev(struct net_device *dev, unsigned int idx)
11 {
12 	const struct netdev_queue_mgmt_ops *queue_ops = dev->queue_mgmt_ops;
13 	struct device *dma_dev;
14 
15 	if (queue_ops && queue_ops->ndo_queue_get_dma_dev)
16 		dma_dev = queue_ops->ndo_queue_get_dma_dev(dev, idx);
17 	else
18 		dma_dev = dev->dev.parent;
19 
20 	return dma_dev && dma_dev->dma_mask ? dma_dev : NULL;
21 }
22 
23 /**
24  * netdev_queue_get_dma_dev() - get dma device for zero-copy operations
25  * @dev:	net_device
26  * @idx:	queue index
27  * @type:	queue type (RX or TX)
28  *
29  * Get dma device for zero-copy operations to be used for this queue. If
30  * the queue is an RX queue leased from a physical queue, we retrieve the
31  * physical queue's dma device. When the dma device is not available or
32  * valid, the function will return NULL.
33  *
34  * Return: Device or NULL on error
35  */
36 struct device *netdev_queue_get_dma_dev(struct net_device *dev,
37 					unsigned int idx,
38 					enum netdev_queue_type type)
39 {
40 	struct netdev_rx_queue *hw_rxq;
41 	struct device *dma_dev;
42 
43 	netdev_ops_assert_locked(dev);
44 
45 	/* Only RX side supports queue leasing today. */
46 	if (type != NETDEV_QUEUE_TYPE_RX || !netif_rxq_is_leased(dev, idx))
47 		return __netdev_queue_get_dma_dev(dev, idx);
48 	if (!netif_is_queue_leasee(dev))
49 		return NULL;
50 
51 	hw_rxq = __netif_get_rx_queue(dev, idx)->lease;
52 
53 	netdev_lock(hw_rxq->dev);
54 	idx = get_netdev_rx_queue_index(hw_rxq);
55 	dma_dev = __netdev_queue_get_dma_dev(hw_rxq->dev, idx);
56 	netdev_unlock(hw_rxq->dev);
57 
58 	return dma_dev;
59 }
60 
61 bool netdev_can_create_queue(const struct net_device *dev,
62 			     struct netlink_ext_ack *extack)
63 {
64 	if (dev->dev.parent) {
65 		NL_SET_ERR_MSG(extack, "Device is not a virtual device");
66 		return false;
67 	}
68 	if (!dev->queue_mgmt_ops ||
69 	    !dev->queue_mgmt_ops->ndo_queue_create) {
70 		NL_SET_ERR_MSG(extack, "Device does not support queue creation");
71 		return false;
72 	}
73 	if (dev->real_num_rx_queues < 1 ||
74 	    dev->real_num_tx_queues < 1) {
75 		NL_SET_ERR_MSG(extack, "Device must have at least one real queue");
76 		return false;
77 	}
78 	return true;
79 }
80 
81 bool netdev_can_lease_queue(const struct net_device *dev,
82 			    struct netlink_ext_ack *extack)
83 {
84 	if (!dev->dev.parent) {
85 		NL_SET_ERR_MSG(extack, "Lease device is a virtual device");
86 		return false;
87 	}
88 	if (!netif_device_present(dev)) {
89 		NL_SET_ERR_MSG(extack, "Lease device has been removed from the system");
90 		return false;
91 	}
92 	if (!dev->queue_mgmt_ops) {
93 		NL_SET_ERR_MSG(extack, "Lease device does not support queue management operations");
94 		return false;
95 	}
96 	return true;
97 }
98 
99 bool netdev_queue_busy(struct net_device *dev, unsigned int idx,
100 		       enum netdev_queue_type type,
101 		       struct netlink_ext_ack *extack)
102 {
103 	if (xsk_get_pool_from_qid(dev, idx)) {
104 		NL_SET_ERR_MSG(extack, "Device queue in use by AF_XDP");
105 		return true;
106 	}
107 	if (type == NETDEV_QUEUE_TYPE_TX)
108 		return false;
109 	if (netif_rxq_is_leased(dev, idx)) {
110 		NL_SET_ERR_MSG(extack, "Device queue in use due to queue leasing");
111 		return true;
112 	}
113 	if (netif_rxq_has_mp(dev, idx)) {
114 		NL_SET_ERR_MSG(extack, "Device queue in use by memory provider");
115 		return true;
116 	}
117 	return false;
118 }
119