1 #ifndef MPLS_INTERNAL_H 2 #define MPLS_INTERNAL_H 3 #include <net/mpls.h> 4 5 struct mpls_entry_decoded { 6 u32 label; 7 u8 ttl; 8 u8 tc; 9 u8 bos; 10 }; 11 12 struct mpls_pcpu_stats { 13 struct mpls_link_stats stats; 14 struct u64_stats_sync syncp; 15 }; 16 17 struct mpls_dev { 18 int input_enabled; 19 struct net_device *dev; 20 struct mpls_pcpu_stats __percpu *stats; 21 22 struct ctl_table_header *sysctl; 23 struct rcu_head rcu; 24 }; 25 26 #if BITS_PER_LONG == 32 27 28 #define MPLS_INC_STATS_LEN(mdev, len, pkts_field, bytes_field) \ 29 do { \ 30 __typeof__(*(mdev)->stats) *ptr = \ 31 raw_cpu_ptr((mdev)->stats); \ 32 local_bh_disable(); \ 33 u64_stats_update_begin(&ptr->syncp); \ 34 ptr->stats.pkts_field++; \ 35 ptr->stats.bytes_field += (len); \ 36 u64_stats_update_end(&ptr->syncp); \ 37 local_bh_enable(); \ 38 } while (0) 39 40 #define MPLS_INC_STATS(mdev, field) \ 41 do { \ 42 __typeof__(*(mdev)->stats) *ptr = \ 43 raw_cpu_ptr((mdev)->stats); \ 44 local_bh_disable(); \ 45 u64_stats_update_begin(&ptr->syncp); \ 46 ptr->stats.field++; \ 47 u64_stats_update_end(&ptr->syncp); \ 48 local_bh_enable(); \ 49 } while (0) 50 51 #else 52 53 #define MPLS_INC_STATS_LEN(mdev, len, pkts_field, bytes_field) \ 54 do { \ 55 this_cpu_inc((mdev)->stats->stats.pkts_field); \ 56 this_cpu_add((mdev)->stats->stats.bytes_field, (len)); \ 57 } while (0) 58 59 #define MPLS_INC_STATS(mdev, field) \ 60 this_cpu_inc((mdev)->stats->stats.field) 61 62 #endif 63 64 struct sk_buff; 65 66 #define LABEL_NOT_SPECIFIED (1 << 20) 67 68 /* This maximum ha length copied from the definition of struct neighbour */ 69 #define VIA_ALEN_ALIGN sizeof(unsigned long) 70 #define MAX_VIA_ALEN (ALIGN(MAX_ADDR_LEN, VIA_ALEN_ALIGN)) 71 72 enum mpls_payload_type { 73 MPT_UNSPEC, /* IPv4 or IPv6 */ 74 MPT_IPV4 = 4, 75 MPT_IPV6 = 6, 76 77 /* Other types not implemented: 78 * - Pseudo-wire with or without control word (RFC4385) 79 * - GAL (RFC5586) 80 */ 81 }; 82 83 struct mpls_nh { /* next hop label forwarding entry */ 84 struct net_device __rcu *nh_dev; 85 86 /* nh_flags is accessed under RCU in the packet path; it is 87 * modified handling netdev events with rtnl lock held 88 */ 89 unsigned int nh_flags; 90 u8 nh_labels; 91 u8 nh_via_alen; 92 u8 nh_via_table; 93 u8 nh_reserved1; 94 95 u32 nh_label[0]; 96 }; 97 98 /* offset of via from beginning of mpls_nh */ 99 #define MPLS_NH_VIA_OFF(num_labels) \ 100 ALIGN(sizeof(struct mpls_nh) + (num_labels) * sizeof(u32), \ 101 VIA_ALEN_ALIGN) 102 103 /* all nexthops within a route have the same size based on the 104 * max number of labels and max via length across all nexthops 105 */ 106 #define MPLS_NH_SIZE(num_labels, max_via_alen) \ 107 (MPLS_NH_VIA_OFF((num_labels)) + \ 108 ALIGN((max_via_alen), VIA_ALEN_ALIGN)) 109 110 enum mpls_ttl_propagation { 111 MPLS_TTL_PROP_DEFAULT, 112 MPLS_TTL_PROP_ENABLED, 113 MPLS_TTL_PROP_DISABLED, 114 }; 115 116 /* The route, nexthops and vias are stored together in the same memory 117 * block: 118 * 119 * +----------------------+ 120 * | mpls_route | 121 * +----------------------+ 122 * | mpls_nh 0 | 123 * +----------------------+ 124 * | alignment padding | 4 bytes for odd number of labels 125 * +----------------------+ 126 * | via[rt_max_alen] 0 | 127 * +----------------------+ 128 * | alignment padding | via's aligned on sizeof(unsigned long) 129 * +----------------------+ 130 * | ... | 131 * +----------------------+ 132 * | mpls_nh n-1 | 133 * +----------------------+ 134 * | via[rt_max_alen] n-1 | 135 * +----------------------+ 136 */ 137 struct mpls_route { /* next hop label forwarding entry */ 138 struct rcu_head rt_rcu; 139 u8 rt_protocol; 140 u8 rt_payload_type; 141 u8 rt_max_alen; 142 u8 rt_ttl_propagate; 143 u8 rt_nhn; 144 /* rt_nhn_alive is accessed under RCU in the packet path; it 145 * is modified handling netdev events with rtnl lock held 146 */ 147 u8 rt_nhn_alive; 148 u8 rt_nh_size; 149 u8 rt_via_offset; 150 u8 rt_reserved1; 151 struct mpls_nh rt_nh[0]; 152 }; 153 154 #define for_nexthops(rt) { \ 155 int nhsel; struct mpls_nh *nh; u8 *__nh; \ 156 for (nhsel = 0, nh = (rt)->rt_nh, __nh = (u8 *)((rt)->rt_nh); \ 157 nhsel < (rt)->rt_nhn; \ 158 __nh += rt->rt_nh_size, nh = (struct mpls_nh *)__nh, nhsel++) 159 160 #define change_nexthops(rt) { \ 161 int nhsel; struct mpls_nh *nh; u8 *__nh; \ 162 for (nhsel = 0, nh = (struct mpls_nh *)((rt)->rt_nh), \ 163 __nh = (u8 *)((rt)->rt_nh); \ 164 nhsel < (rt)->rt_nhn; \ 165 __nh += rt->rt_nh_size, nh = (struct mpls_nh *)__nh, nhsel++) 166 167 #define endfor_nexthops(rt) } 168 169 static inline struct mpls_shim_hdr mpls_entry_encode(u32 label, unsigned ttl, unsigned tc, bool bos) 170 { 171 struct mpls_shim_hdr result; 172 result.label_stack_entry = 173 cpu_to_be32((label << MPLS_LS_LABEL_SHIFT) | 174 (tc << MPLS_LS_TC_SHIFT) | 175 (bos ? (1 << MPLS_LS_S_SHIFT) : 0) | 176 (ttl << MPLS_LS_TTL_SHIFT)); 177 return result; 178 } 179 180 static inline struct mpls_entry_decoded mpls_entry_decode(struct mpls_shim_hdr *hdr) 181 { 182 struct mpls_entry_decoded result; 183 unsigned entry = be32_to_cpu(hdr->label_stack_entry); 184 185 result.label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT; 186 result.ttl = (entry & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; 187 result.tc = (entry & MPLS_LS_TC_MASK) >> MPLS_LS_TC_SHIFT; 188 result.bos = (entry & MPLS_LS_S_MASK) >> MPLS_LS_S_SHIFT; 189 190 return result; 191 } 192 193 static inline struct mpls_dev *mpls_dev_get(const struct net_device *dev) 194 { 195 return rcu_dereference_rtnl(dev->mpls_ptr); 196 } 197 198 int nla_put_labels(struct sk_buff *skb, int attrtype, u8 labels, 199 const u32 label[]); 200 int nla_get_labels(const struct nlattr *nla, u8 max_labels, u8 *labels, 201 u32 label[]); 202 int nla_get_via(const struct nlattr *nla, u8 *via_alen, u8 *via_table, 203 u8 via[]); 204 bool mpls_output_possible(const struct net_device *dev); 205 unsigned int mpls_dev_mtu(const struct net_device *dev); 206 bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu); 207 void mpls_stats_inc_outucastpkts(struct net_device *dev, 208 const struct sk_buff *skb); 209 210 #endif /* MPLS_INTERNAL_H */ 211