1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __NET_PKT_SCHED_H 3 #define __NET_PKT_SCHED_H 4 5 #include <linux/jiffies.h> 6 #include <linux/ktime.h> 7 #include <linux/if_vlan.h> 8 #include <linux/netdevice.h> 9 #include <net/sch_generic.h> 10 #include <net/net_namespace.h> 11 #include <uapi/linux/pkt_sched.h> 12 13 #define DEFAULT_TX_QUEUE_LEN 1000 14 15 struct qdisc_walker { 16 int stop; 17 int skip; 18 int count; 19 int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *); 20 }; 21 22 #define QDISC_ALIGNTO 64 23 #define QDISC_ALIGN(len) (((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1)) 24 25 static inline void *qdisc_priv(struct Qdisc *q) 26 { 27 return (char *) q + QDISC_ALIGN(sizeof(struct Qdisc)); 28 } 29 30 /* 31 Timer resolution MUST BE < 10% of min_schedulable_packet_size/bandwidth 32 33 Normal IP packet size ~ 512byte, hence: 34 35 0.5Kbyte/1Mbyte/sec = 0.5msec, so that we need 50usec timer for 36 10Mbit ethernet. 37 38 10msec resolution -> <50Kbit/sec. 39 40 The result: [34]86 is not good choice for QoS router :-( 41 42 The things are not so bad, because we may use artificial 43 clock evaluated by integration of network data flow 44 in the most critical places. 45 */ 46 47 typedef u64 psched_time_t; 48 typedef long psched_tdiff_t; 49 50 /* Avoid doing 64 bit divide */ 51 #define PSCHED_SHIFT 6 52 #define PSCHED_TICKS2NS(x) ((s64)(x) << PSCHED_SHIFT) 53 #define PSCHED_NS2TICKS(x) ((x) >> PSCHED_SHIFT) 54 55 #define PSCHED_TICKS_PER_SEC PSCHED_NS2TICKS(NSEC_PER_SEC) 56 #define PSCHED_PASTPERFECT 0 57 58 static inline psched_time_t psched_get_time(void) 59 { 60 return PSCHED_NS2TICKS(ktime_get_ns()); 61 } 62 63 static inline psched_tdiff_t 64 psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound) 65 { 66 return min(tv1 - tv2, bound); 67 } 68 69 struct qdisc_watchdog { 70 u64 last_expires; 71 struct hrtimer timer; 72 struct Qdisc *qdisc; 73 }; 74 75 void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc, 76 clockid_t clockid); 77 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); 78 79 void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires, 80 u64 delta_ns); 81 82 static inline void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, 83 u64 expires) 84 { 85 return qdisc_watchdog_schedule_range_ns(wd, expires, 0ULL); 86 } 87 88 static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, 89 psched_time_t expires) 90 { 91 qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires)); 92 } 93 94 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd); 95 96 extern struct Qdisc_ops pfifo_qdisc_ops; 97 extern struct Qdisc_ops bfifo_qdisc_ops; 98 extern struct Qdisc_ops pfifo_head_drop_qdisc_ops; 99 100 int fifo_set_limit(struct Qdisc *q, unsigned int limit); 101 struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, 102 unsigned int limit, 103 struct netlink_ext_ack *extack); 104 105 int register_qdisc(struct Qdisc_ops *qops); 106 int unregister_qdisc(struct Qdisc_ops *qops); 107 void qdisc_get_default(char *id, size_t len); 108 int qdisc_set_default(const char *id); 109 110 void qdisc_hash_add(struct Qdisc *q, bool invisible); 111 void qdisc_hash_del(struct Qdisc *q); 112 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); 113 struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle); 114 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, 115 struct nlattr *tab, 116 struct netlink_ext_ack *extack); 117 void qdisc_put_rtab(struct qdisc_rate_table *tab); 118 void qdisc_put_stab(struct qdisc_size_table *tab); 119 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc); 120 bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, 121 struct net_device *dev, struct netdev_queue *txq, 122 spinlock_t *root_lock, bool validate); 123 124 void __qdisc_run(struct Qdisc *q); 125 126 static inline void qdisc_run(struct Qdisc *q) 127 { 128 if (qdisc_run_begin(q)) { 129 /* NOLOCK qdisc must check 'state' under the qdisc seqlock 130 * to avoid racing with dev_qdisc_reset() 131 */ 132 if (!(q->flags & TCQ_F_NOLOCK) || 133 likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) 134 __qdisc_run(q); 135 qdisc_run_end(q); 136 } 137 } 138 139 static inline __be16 tc_skb_protocol(const struct sk_buff *skb) 140 { 141 /* We need to take extra care in case the skb came via 142 * vlan accelerated path. In that case, use skb->vlan_proto 143 * as the original vlan header was already stripped. 144 */ 145 if (skb_vlan_tag_present(skb)) 146 return skb->vlan_proto; 147 return skb->protocol; 148 } 149 150 /* Calculate maximal size of packet seen by hard_start_xmit 151 routine of this device. 152 */ 153 static inline unsigned int psched_mtu(const struct net_device *dev) 154 { 155 return dev->mtu + dev->hard_header_len; 156 } 157 158 static inline struct net *qdisc_net(struct Qdisc *q) 159 { 160 return dev_net(q->dev_queue->dev); 161 } 162 163 struct tc_cbs_qopt_offload { 164 u8 enable; 165 s32 queue; 166 s32 hicredit; 167 s32 locredit; 168 s32 idleslope; 169 s32 sendslope; 170 }; 171 172 struct tc_etf_qopt_offload { 173 u8 enable; 174 s32 queue; 175 }; 176 177 struct tc_taprio_sched_entry { 178 u8 command; /* TC_TAPRIO_CMD_* */ 179 180 /* The gate_mask in the offloading side refers to traffic classes */ 181 u32 gate_mask; 182 u32 interval; 183 }; 184 185 struct tc_taprio_qopt_offload { 186 u8 enable; 187 ktime_t base_time; 188 u64 cycle_time; 189 u64 cycle_time_extension; 190 191 size_t num_entries; 192 struct tc_taprio_sched_entry entries[]; 193 }; 194 195 /* Reference counting */ 196 struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload 197 *offload); 198 void taprio_offload_free(struct tc_taprio_qopt_offload *offload); 199 200 #endif 201