1 /* 2 * lwtunnel Infrastructure for light weight tunnels like mpls 3 * 4 * Authors: Roopa Prabhu, <roopa@cumulusnetworks.com> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 */ 12 13 #include <linux/capability.h> 14 #include <linux/module.h> 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/slab.h> 18 #include <linux/uaccess.h> 19 #include <linux/skbuff.h> 20 #include <linux/netdevice.h> 21 #include <linux/lwtunnel.h> 22 #include <linux/in.h> 23 #include <linux/init.h> 24 #include <linux/err.h> 25 26 #include <net/lwtunnel.h> 27 #include <net/rtnetlink.h> 28 #include <net/ip6_fib.h> 29 30 struct lwtunnel_state *lwtunnel_state_alloc(int encap_len) 31 { 32 struct lwtunnel_state *lws; 33 34 lws = kzalloc(sizeof(*lws) + encap_len, GFP_ATOMIC); 35 36 return lws; 37 } 38 EXPORT_SYMBOL(lwtunnel_state_alloc); 39 40 static const struct lwtunnel_encap_ops __rcu * 41 lwtun_encaps[LWTUNNEL_ENCAP_MAX + 1] __read_mostly; 42 43 int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *ops, 44 unsigned int num) 45 { 46 if (num > LWTUNNEL_ENCAP_MAX) 47 return -ERANGE; 48 49 return !cmpxchg((const struct lwtunnel_encap_ops **) 50 &lwtun_encaps[num], 51 NULL, ops) ? 0 : -1; 52 } 53 EXPORT_SYMBOL(lwtunnel_encap_add_ops); 54 55 int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *ops, 56 unsigned int encap_type) 57 { 58 int ret; 59 60 if (encap_type == LWTUNNEL_ENCAP_NONE || 61 encap_type > LWTUNNEL_ENCAP_MAX) 62 return -ERANGE; 63 64 ret = (cmpxchg((const struct lwtunnel_encap_ops **) 65 &lwtun_encaps[encap_type], 66 ops, NULL) == ops) ? 0 : -1; 67 68 synchronize_net(); 69 70 return ret; 71 } 72 EXPORT_SYMBOL(lwtunnel_encap_del_ops); 73 74 int lwtunnel_build_state(struct net_device *dev, u16 encap_type, 75 struct nlattr *encap, unsigned int family, 76 const void *cfg, struct lwtunnel_state **lws) 77 { 78 const struct lwtunnel_encap_ops *ops; 79 int ret = -EINVAL; 80 81 if (encap_type == LWTUNNEL_ENCAP_NONE || 82 encap_type > LWTUNNEL_ENCAP_MAX) 83 return ret; 84 85 ret = -EOPNOTSUPP; 86 rcu_read_lock(); 87 ops = rcu_dereference(lwtun_encaps[encap_type]); 88 if (likely(ops && ops->build_state)) 89 ret = ops->build_state(dev, encap, family, cfg, lws); 90 rcu_read_unlock(); 91 92 return ret; 93 } 94 EXPORT_SYMBOL(lwtunnel_build_state); 95 96 int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate) 97 { 98 const struct lwtunnel_encap_ops *ops; 99 struct nlattr *nest; 100 int ret = -EINVAL; 101 102 if (!lwtstate) 103 return 0; 104 105 if (lwtstate->type == LWTUNNEL_ENCAP_NONE || 106 lwtstate->type > LWTUNNEL_ENCAP_MAX) 107 return 0; 108 109 ret = -EOPNOTSUPP; 110 nest = nla_nest_start(skb, RTA_ENCAP); 111 rcu_read_lock(); 112 ops = rcu_dereference(lwtun_encaps[lwtstate->type]); 113 if (likely(ops && ops->fill_encap)) 114 ret = ops->fill_encap(skb, lwtstate); 115 rcu_read_unlock(); 116 117 if (ret) 118 goto nla_put_failure; 119 nla_nest_end(skb, nest); 120 ret = nla_put_u16(skb, RTA_ENCAP_TYPE, lwtstate->type); 121 if (ret) 122 goto nla_put_failure; 123 124 return 0; 125 126 nla_put_failure: 127 nla_nest_cancel(skb, nest); 128 129 return (ret == -EOPNOTSUPP ? 0 : ret); 130 } 131 EXPORT_SYMBOL(lwtunnel_fill_encap); 132 133 int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate) 134 { 135 const struct lwtunnel_encap_ops *ops; 136 int ret = 0; 137 138 if (!lwtstate) 139 return 0; 140 141 if (lwtstate->type == LWTUNNEL_ENCAP_NONE || 142 lwtstate->type > LWTUNNEL_ENCAP_MAX) 143 return 0; 144 145 rcu_read_lock(); 146 ops = rcu_dereference(lwtun_encaps[lwtstate->type]); 147 if (likely(ops && ops->get_encap_size)) 148 ret = nla_total_size(ops->get_encap_size(lwtstate)); 149 rcu_read_unlock(); 150 151 return ret; 152 } 153 EXPORT_SYMBOL(lwtunnel_get_encap_size); 154 155 int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b) 156 { 157 const struct lwtunnel_encap_ops *ops; 158 int ret = 0; 159 160 if (!a && !b) 161 return 0; 162 163 if (!a || !b) 164 return 1; 165 166 if (a->type != b->type) 167 return 1; 168 169 if (a->type == LWTUNNEL_ENCAP_NONE || 170 a->type > LWTUNNEL_ENCAP_MAX) 171 return 0; 172 173 rcu_read_lock(); 174 ops = rcu_dereference(lwtun_encaps[a->type]); 175 if (likely(ops && ops->cmp_encap)) 176 ret = ops->cmp_encap(a, b); 177 rcu_read_unlock(); 178 179 return ret; 180 } 181 EXPORT_SYMBOL(lwtunnel_cmp_encap); 182 183 int lwtunnel_output(struct sock *sk, struct sk_buff *skb) 184 { 185 struct dst_entry *dst = skb_dst(skb); 186 const struct lwtunnel_encap_ops *ops; 187 struct lwtunnel_state *lwtstate; 188 int ret = -EINVAL; 189 190 if (!dst) 191 goto drop; 192 lwtstate = dst->lwtstate; 193 194 if (lwtstate->type == LWTUNNEL_ENCAP_NONE || 195 lwtstate->type > LWTUNNEL_ENCAP_MAX) 196 return 0; 197 198 ret = -EOPNOTSUPP; 199 rcu_read_lock(); 200 ops = rcu_dereference(lwtun_encaps[lwtstate->type]); 201 if (likely(ops && ops->output)) 202 ret = ops->output(sk, skb); 203 rcu_read_unlock(); 204 205 if (ret == -EOPNOTSUPP) 206 goto drop; 207 208 return ret; 209 210 drop: 211 kfree_skb(skb); 212 213 return ret; 214 } 215 EXPORT_SYMBOL(lwtunnel_output); 216 217 int lwtunnel_input(struct sk_buff *skb) 218 { 219 struct dst_entry *dst = skb_dst(skb); 220 const struct lwtunnel_encap_ops *ops; 221 struct lwtunnel_state *lwtstate; 222 int ret = -EINVAL; 223 224 if (!dst) 225 goto drop; 226 lwtstate = dst->lwtstate; 227 228 if (lwtstate->type == LWTUNNEL_ENCAP_NONE || 229 lwtstate->type > LWTUNNEL_ENCAP_MAX) 230 return 0; 231 232 ret = -EOPNOTSUPP; 233 rcu_read_lock(); 234 ops = rcu_dereference(lwtun_encaps[lwtstate->type]); 235 if (likely(ops && ops->input)) 236 ret = ops->input(skb); 237 rcu_read_unlock(); 238 239 if (ret == -EOPNOTSUPP) 240 goto drop; 241 242 return ret; 243 244 drop: 245 kfree_skb(skb); 246 247 return ret; 248 } 249 EXPORT_SYMBOL(lwtunnel_input); 250