xref: /linux/include/net/pkt_sched.h (revision 8f7aa3d3c7323f4ca2768a9e74ebbe359c4f8f88)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_PKT_SCHED_H
3 #define __NET_PKT_SCHED_H
4 
5 #include <linux/jiffies.h>
6 #include <linux/ktime.h>
7 #include <linux/if_vlan.h>
8 #include <linux/netdevice.h>
9 #include <net/sch_generic.h>
10 #include <net/net_namespace.h>
11 #include <uapi/linux/pkt_sched.h>
12 
13 #define DEFAULT_TX_QUEUE_LEN	1000
14 #define STAB_SIZE_LOG_MAX	30
15 
16 struct qdisc_walker {
17 	int	stop;
18 	int	skip;
19 	int	count;
20 	int	(*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *);
21 };
22 
23 #define qdisc_priv(q)							\
24 	_Generic(q,							\
25 		 const struct Qdisc * : (const void *)&q->privdata,	\
26 		 struct Qdisc * : (void *)&q->privdata)
27 
28 /*
29    Timer resolution MUST BE < 10% of min_schedulable_packet_size/bandwidth
30 
31    Normal IP packet size ~ 512byte, hence:
32 
33    0.5Kbyte/1Mbyte/sec = 0.5msec, so that we need 50usec timer for
34    10Mbit ethernet.
35 
36    10msec resolution -> <50Kbit/sec.
37 
38    The result: [34]86 is not good choice for QoS router :-(
39 
40    The things are not so bad, because we may use artificial
41    clock evaluated by integration of network data flow
42    in the most critical places.
43  */
44 
45 typedef u64	psched_time_t;
46 
47 /* Avoid doing 64 bit divide */
48 #define PSCHED_SHIFT			6
49 #define PSCHED_TICKS2NS(x)		((s64)(x) << PSCHED_SHIFT)
50 #define PSCHED_NS2TICKS(x)		((x) >> PSCHED_SHIFT)
51 
52 #define PSCHED_TICKS_PER_SEC		PSCHED_NS2TICKS(NSEC_PER_SEC)
53 #define PSCHED_PASTPERFECT		0
54 
55 static inline psched_time_t psched_get_time(void)
56 {
57 	return PSCHED_NS2TICKS(ktime_get_ns());
58 }
59 
60 struct qdisc_watchdog {
61 	struct hrtimer	timer;
62 	struct Qdisc	*qdisc;
63 };
64 
65 void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
66 				 clockid_t clockid);
67 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
68 
69 void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
70 				      u64 delta_ns);
71 
72 static inline void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd,
73 					      u64 expires)
74 {
75 	return qdisc_watchdog_schedule_range_ns(wd, expires, 0ULL);
76 }
77 
78 static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
79 					   psched_time_t expires)
80 {
81 	qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires));
82 }
83 
84 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
85 
86 extern struct Qdisc_ops pfifo_qdisc_ops;
87 extern struct Qdisc_ops bfifo_qdisc_ops;
88 extern struct Qdisc_ops pfifo_head_drop_qdisc_ops;
89 
90 int fifo_set_limit(struct Qdisc *q, unsigned int limit);
91 struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
92 			       unsigned int limit,
93 			       struct netlink_ext_ack *extack);
94 
95 int register_qdisc(struct Qdisc_ops *qops);
96 void unregister_qdisc(struct Qdisc_ops *qops);
97 #define NET_SCH_ALIAS_PREFIX "net-sch-"
98 #define MODULE_ALIAS_NET_SCH(id)	MODULE_ALIAS(NET_SCH_ALIAS_PREFIX id)
99 void qdisc_get_default(char *id, size_t len);
100 int qdisc_set_default(const char *id);
101 
102 void qdisc_hash_add(struct Qdisc *q, bool invisible);
103 void qdisc_hash_del(struct Qdisc *q);
104 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
105 struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle);
106 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
107 					struct nlattr *tab,
108 					struct netlink_ext_ack *extack);
109 void qdisc_put_rtab(struct qdisc_rate_table *tab);
110 void qdisc_put_stab(struct qdisc_size_table *tab);
111 bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
112 		     struct net_device *dev, struct netdev_queue *txq,
113 		     spinlock_t *root_lock, bool validate);
114 
115 void __qdisc_run(struct Qdisc *q);
116 
117 static inline struct sk_buff *qdisc_run(struct Qdisc *q)
118 {
119 	if (qdisc_run_begin(q)) {
120 		__qdisc_run(q);
121 		return qdisc_run_end(q);
122 	}
123 	return NULL;
124 }
125 
126 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
127 
128 /* Calculate maximal size of packet seen by hard_start_xmit
129    routine of this device.
130  */
131 static inline unsigned int psched_mtu(const struct net_device *dev)
132 {
133 	return READ_ONCE(dev->mtu) + dev->hard_header_len;
134 }
135 
136 static inline struct net *qdisc_net(struct Qdisc *q)
137 {
138 	return dev_net(q->dev_queue->dev);
139 }
140 
141 struct tc_query_caps_base {
142 	enum tc_setup_type type;
143 	void *caps;
144 };
145 
146 struct tc_cbs_qopt_offload {
147 	u8 enable;
148 	s32 queue;
149 	s32 hicredit;
150 	s32 locredit;
151 	s32 idleslope;
152 	s32 sendslope;
153 };
154 
155 struct tc_etf_qopt_offload {
156 	u8 enable;
157 	s32 queue;
158 };
159 
160 struct tc_mqprio_caps {
161 	bool validate_queue_counts:1;
162 };
163 
164 struct tc_mqprio_qopt_offload {
165 	/* struct tc_mqprio_qopt must always be the first element */
166 	struct tc_mqprio_qopt qopt;
167 	struct netlink_ext_ack *extack;
168 	u16 mode;
169 	u16 shaper;
170 	u32 flags;
171 	u64 min_rate[TC_QOPT_MAX_QUEUE];
172 	u64 max_rate[TC_QOPT_MAX_QUEUE];
173 	unsigned long preemptible_tcs;
174 };
175 
176 struct tc_taprio_caps {
177 	bool supports_queue_max_sdu:1;
178 	bool gate_mask_per_txq:1;
179 	/* Device expects lower TXQ numbers to have higher priority over higher
180 	 * TXQs, regardless of their TC mapping. DO NOT USE FOR NEW DRIVERS,
181 	 * INSTEAD ENFORCE A PROPER TC:TXQ MAPPING COMING FROM USER SPACE.
182 	 */
183 	bool broken_mqprio:1;
184 };
185 
186 enum tc_taprio_qopt_cmd {
187 	TAPRIO_CMD_REPLACE,
188 	TAPRIO_CMD_DESTROY,
189 	TAPRIO_CMD_STATS,
190 	TAPRIO_CMD_QUEUE_STATS,
191 };
192 
193 /**
194  * struct tc_taprio_qopt_stats - IEEE 802.1Qbv statistics
195  * @window_drops: Frames that were dropped because they were too large to be
196  *	transmitted in any of the allotted time windows (open gates) for their
197  *	traffic class.
198  * @tx_overruns: Frames still being transmitted by the MAC after the
199  *	transmission gate associated with their traffic class has closed.
200  *	Equivalent to `12.29.1.1.2 TransmissionOverrun` from 802.1Q-2018.
201  */
202 struct tc_taprio_qopt_stats {
203 	u64 window_drops;
204 	u64 tx_overruns;
205 };
206 
207 struct tc_taprio_qopt_queue_stats {
208 	int queue;
209 	struct tc_taprio_qopt_stats stats;
210 };
211 
212 struct tc_taprio_sched_entry {
213 	u8 command; /* TC_TAPRIO_CMD_* */
214 
215 	/* The gate_mask in the offloading side refers to traffic classes */
216 	u32 gate_mask;
217 	u32 interval;
218 };
219 
220 struct tc_taprio_qopt_offload {
221 	enum tc_taprio_qopt_cmd cmd;
222 
223 	union {
224 		/* TAPRIO_CMD_STATS */
225 		struct tc_taprio_qopt_stats stats;
226 		/* TAPRIO_CMD_QUEUE_STATS */
227 		struct tc_taprio_qopt_queue_stats queue_stats;
228 		/* TAPRIO_CMD_REPLACE */
229 		struct {
230 			struct tc_mqprio_qopt_offload mqprio;
231 			struct netlink_ext_ack *extack;
232 			ktime_t base_time;
233 			u64 cycle_time;
234 			u64 cycle_time_extension;
235 			u32 max_sdu[TC_MAX_QUEUE];
236 
237 			size_t num_entries;
238 			struct tc_taprio_sched_entry entries[];
239 		};
240 	};
241 };
242 
243 #if IS_ENABLED(CONFIG_NET_SCH_TAPRIO)
244 
245 /* Reference counting */
246 struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
247 						  *offload);
248 void taprio_offload_free(struct tc_taprio_qopt_offload *offload);
249 
250 #else
251 
252 /* Reference counting */
253 static inline struct tc_taprio_qopt_offload *
254 taprio_offload_get(struct tc_taprio_qopt_offload *offload)
255 {
256 	return NULL;
257 }
258 
259 static inline void taprio_offload_free(struct tc_taprio_qopt_offload *offload)
260 {
261 }
262 
263 #endif
264 
265 /* Ensure skb_mstamp_ns, which might have been populated with the txtime, is
266  * not mistaken for a software timestamp, because this will otherwise prevent
267  * the dispatch of hardware timestamps to the socket.
268  */
269 static inline void skb_txtime_consumed(struct sk_buff *skb)
270 {
271 	skb->tstamp = ktime_set(0, 0);
272 }
273 
274 static inline bool tc_qdisc_stats_dump(struct Qdisc *sch,
275 				       unsigned long cl,
276 				       struct qdisc_walker *arg)
277 {
278 	if (arg->count >= arg->skip && arg->fn(sch, cl, arg) < 0) {
279 		arg->stop = 1;
280 		return false;
281 	}
282 
283 	arg->count++;
284 	return true;
285 }
286 
287 static inline void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
288 {
289 	if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
290 		pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
291 			txt, qdisc->ops->id, qdisc->handle >> 16);
292 		qdisc->flags |= TCQ_F_WARN_NONWC;
293 	}
294 }
295 
296 static inline unsigned int qdisc_peek_len(struct Qdisc *sch)
297 {
298 	struct sk_buff *skb;
299 	unsigned int len;
300 
301 	skb = sch->ops->peek(sch);
302 	if (unlikely(skb == NULL)) {
303 		qdisc_warn_nonwc("qdisc_peek_len", sch);
304 		return 0;
305 	}
306 	len = qdisc_pkt_len(skb);
307 
308 	return len;
309 }
310 
311 #endif
312