1 #ifndef __NET_PKT_SCHED_H 2 #define __NET_PKT_SCHED_H 3 4 #include <linux/jiffies.h> 5 #include <linux/ktime.h> 6 #include <linux/if_vlan.h> 7 #include <linux/netdevice.h> 8 #include <net/sch_generic.h> 9 #include <net/net_namespace.h> 10 #include <uapi/linux/pkt_sched.h> 11 12 #define DEFAULT_TX_QUEUE_LEN 1000 13 14 struct qdisc_walker { 15 int stop; 16 int skip; 17 int count; 18 int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *); 19 }; 20 21 #define QDISC_ALIGNTO 64 22 #define QDISC_ALIGN(len) (((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1)) 23 24 static inline void *qdisc_priv(struct Qdisc *q) 25 { 26 return (char *) q + QDISC_ALIGN(sizeof(struct Qdisc)); 27 } 28 29 /* 30 Timer resolution MUST BE < 10% of min_schedulable_packet_size/bandwidth 31 32 Normal IP packet size ~ 512byte, hence: 33 34 0.5Kbyte/1Mbyte/sec = 0.5msec, so that we need 50usec timer for 35 10Mbit ethernet. 36 37 10msec resolution -> <50Kbit/sec. 38 39 The result: [34]86 is not good choice for QoS router :-( 40 41 The things are not so bad, because we may use artificial 42 clock evaluated by integration of network data flow 43 in the most critical places. 44 */ 45 46 typedef u64 psched_time_t; 47 typedef long psched_tdiff_t; 48 49 /* Avoid doing 64 bit divide */ 50 #define PSCHED_SHIFT 6 51 #define PSCHED_TICKS2NS(x) ((s64)(x) << PSCHED_SHIFT) 52 #define PSCHED_NS2TICKS(x) ((x) >> PSCHED_SHIFT) 53 54 #define PSCHED_TICKS_PER_SEC PSCHED_NS2TICKS(NSEC_PER_SEC) 55 #define PSCHED_PASTPERFECT 0 56 57 static inline psched_time_t psched_get_time(void) 58 { 59 return PSCHED_NS2TICKS(ktime_get_ns()); 60 } 61 62 static inline psched_tdiff_t 63 psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound) 64 { 65 return min(tv1 - tv2, bound); 66 } 67 68 struct qdisc_watchdog { 69 u64 last_expires; 70 struct hrtimer timer; 71 struct Qdisc *qdisc; 72 }; 73 74 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); 75 void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires); 76 77 static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, 78 psched_time_t expires) 79 { 80 qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires)); 81 } 82 83 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd); 84 85 extern struct Qdisc_ops pfifo_qdisc_ops; 86 extern struct Qdisc_ops bfifo_qdisc_ops; 87 extern struct Qdisc_ops pfifo_head_drop_qdisc_ops; 88 89 int fifo_set_limit(struct Qdisc *q, unsigned int limit); 90 struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, 91 unsigned int limit); 92 93 int register_qdisc(struct Qdisc_ops *qops); 94 int unregister_qdisc(struct Qdisc_ops *qops); 95 void qdisc_get_default(char *id, size_t len); 96 int qdisc_set_default(const char *id); 97 98 void qdisc_hash_add(struct Qdisc *q, bool invisible); 99 void qdisc_hash_del(struct Qdisc *q); 100 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); 101 struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle); 102 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, 103 struct nlattr *tab); 104 void qdisc_put_rtab(struct qdisc_rate_table *tab); 105 void qdisc_put_stab(struct qdisc_size_table *tab); 106 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc); 107 int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, 108 struct net_device *dev, struct netdev_queue *txq, 109 spinlock_t *root_lock, bool validate); 110 111 void __qdisc_run(struct Qdisc *q); 112 113 static inline void qdisc_run(struct Qdisc *q) 114 { 115 if (qdisc_run_begin(q)) 116 __qdisc_run(q); 117 } 118 119 static inline __be16 tc_skb_protocol(const struct sk_buff *skb) 120 { 121 /* We need to take extra care in case the skb came via 122 * vlan accelerated path. In that case, use skb->vlan_proto 123 * as the original vlan header was already stripped. 124 */ 125 if (skb_vlan_tag_present(skb)) 126 return skb->vlan_proto; 127 return skb->protocol; 128 } 129 130 /* Calculate maximal size of packet seen by hard_start_xmit 131 routine of this device. 132 */ 133 static inline unsigned int psched_mtu(const struct net_device *dev) 134 { 135 return dev->mtu + dev->hard_header_len; 136 } 137 138 static inline bool is_classid_clsact_ingress(u32 classid) 139 { 140 /* This also returns true for ingress qdisc */ 141 return TC_H_MAJ(classid) == TC_H_MAJ(TC_H_CLSACT) && 142 TC_H_MIN(classid) != TC_H_MIN(TC_H_MIN_EGRESS); 143 } 144 145 static inline bool is_classid_clsact_egress(u32 classid) 146 { 147 return TC_H_MAJ(classid) == TC_H_MAJ(TC_H_CLSACT) && 148 TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_EGRESS); 149 } 150 151 static inline struct net *qdisc_net(struct Qdisc *q) 152 { 153 return dev_net(q->dev_queue->dev); 154 } 155 156 #endif 157