1 /* 2 * CAIF Interface registration. 3 * Copyright (C) ST-Ericsson AB 2010 4 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com 5 * License terms: GNU General Public License (GPL) version 2 6 * 7 * Borrowed heavily from file: pn_dev.c. Thanks to 8 * Remi Denis-Courmont <remi.denis-courmont@nokia.com> 9 * and Sakari Ailus <sakari.ailus@nokia.com> 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ 13 14 #include <linux/kernel.h> 15 #include <linux/if_arp.h> 16 #include <linux/net.h> 17 #include <linux/netdevice.h> 18 #include <linux/mutex.h> 19 #include <net/netns/generic.h> 20 #include <net/net_namespace.h> 21 #include <net/pkt_sched.h> 22 #include <net/caif/caif_device.h> 23 #include <net/caif/caif_layer.h> 24 #include <net/caif/cfpkt.h> 25 #include <net/caif/cfcnfg.h> 26 27 MODULE_LICENSE("GPL"); 28 29 /* Used for local tracking of the CAIF net devices */ 30 struct caif_device_entry { 31 struct cflayer layer; 32 struct list_head list; 33 struct net_device *netdev; 34 int __percpu *pcpu_refcnt; 35 }; 36 37 struct caif_device_entry_list { 38 struct list_head list; 39 /* Protects simulanous deletes in list */ 40 struct mutex lock; 41 }; 42 43 struct caif_net { 44 struct cfcnfg *cfg; 45 struct caif_device_entry_list caifdevs; 46 }; 47 48 static int caif_net_id; 49 50 struct cfcnfg *get_cfcnfg(struct net *net) 51 { 52 struct caif_net *caifn; 53 BUG_ON(!net); 54 caifn = net_generic(net, caif_net_id); 55 BUG_ON(!caifn); 56 return caifn->cfg; 57 } 58 EXPORT_SYMBOL(get_cfcnfg); 59 60 static struct caif_device_entry_list *caif_device_list(struct net *net) 61 { 62 struct caif_net *caifn; 63 BUG_ON(!net); 64 caifn = net_generic(net, caif_net_id); 65 BUG_ON(!caifn); 66 return &caifn->caifdevs; 67 } 68 69 static void caifd_put(struct caif_device_entry *e) 70 { 71 irqsafe_cpu_dec(*e->pcpu_refcnt); 72 } 73 74 static void caifd_hold(struct caif_device_entry *e) 75 { 76 irqsafe_cpu_inc(*e->pcpu_refcnt); 77 } 78 79 static int caifd_refcnt_read(struct caif_device_entry *e) 80 { 81 int i, refcnt = 0; 82 for_each_possible_cpu(i) 83 refcnt += *per_cpu_ptr(e->pcpu_refcnt, i); 84 return refcnt; 85 } 86 87 /* Allocate new CAIF device. */ 88 static struct caif_device_entry *caif_device_alloc(struct net_device *dev) 89 { 90 struct caif_device_entry_list *caifdevs; 91 struct caif_device_entry *caifd; 92 93 caifdevs = caif_device_list(dev_net(dev)); 94 BUG_ON(!caifdevs); 95 96 caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC); 97 if (!caifd) 98 return NULL; 99 caifd->pcpu_refcnt = alloc_percpu(int); 100 caifd->netdev = dev; 101 dev_hold(dev); 102 return caifd; 103 } 104 105 static struct caif_device_entry *caif_get(struct net_device *dev) 106 { 107 struct caif_device_entry_list *caifdevs = 108 caif_device_list(dev_net(dev)); 109 struct caif_device_entry *caifd; 110 BUG_ON(!caifdevs); 111 list_for_each_entry_rcu(caifd, &caifdevs->list, list) { 112 if (caifd->netdev == dev) 113 return caifd; 114 } 115 return NULL; 116 } 117 118 static int transmit(struct cflayer *layer, struct cfpkt *pkt) 119 { 120 int err; 121 struct caif_device_entry *caifd = 122 container_of(layer, struct caif_device_entry, layer); 123 struct sk_buff *skb; 124 125 skb = cfpkt_tonative(pkt); 126 skb->dev = caifd->netdev; 127 128 err = dev_queue_xmit(skb); 129 if (err > 0) 130 err = -EIO; 131 132 return err; 133 } 134 135 /* 136 * Stuff received packets into the CAIF stack. 137 * On error, returns non-zero and releases the skb. 138 */ 139 static int receive(struct sk_buff *skb, struct net_device *dev, 140 struct packet_type *pkttype, struct net_device *orig_dev) 141 { 142 struct cfpkt *pkt; 143 struct caif_device_entry *caifd; 144 int err; 145 146 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); 147 148 rcu_read_lock(); 149 caifd = caif_get(dev); 150 151 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive || 152 !netif_oper_up(caifd->netdev)) { 153 rcu_read_unlock(); 154 kfree_skb(skb); 155 return NET_RX_DROP; 156 } 157 158 /* Hold reference to netdevice while using CAIF stack */ 159 caifd_hold(caifd); 160 rcu_read_unlock(); 161 162 err = caifd->layer.up->receive(caifd->layer.up, pkt); 163 164 /* For -EILSEQ the packet is not freed so so it now */ 165 if (err == -EILSEQ) 166 cfpkt_destroy(pkt); 167 168 /* Release reference to stack upwards */ 169 caifd_put(caifd); 170 return 0; 171 } 172 173 static struct packet_type caif_packet_type __read_mostly = { 174 .type = cpu_to_be16(ETH_P_CAIF), 175 .func = receive, 176 }; 177 178 static void dev_flowctrl(struct net_device *dev, int on) 179 { 180 struct caif_device_entry *caifd; 181 182 rcu_read_lock(); 183 184 caifd = caif_get(dev); 185 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { 186 rcu_read_unlock(); 187 return; 188 } 189 190 caifd_hold(caifd); 191 rcu_read_unlock(); 192 193 caifd->layer.up->ctrlcmd(caifd->layer.up, 194 on ? 195 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND : 196 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, 197 caifd->layer.id); 198 caifd_put(caifd); 199 } 200 201 /* notify Caif of device events */ 202 static int caif_device_notify(struct notifier_block *me, unsigned long what, 203 void *arg) 204 { 205 struct net_device *dev = arg; 206 struct caif_device_entry *caifd = NULL; 207 struct caif_dev_common *caifdev; 208 enum cfcnfg_phy_preference pref; 209 enum cfcnfg_phy_type phy_type; 210 struct cfcnfg *cfg; 211 struct caif_device_entry_list *caifdevs = 212 caif_device_list(dev_net(dev)); 213 214 if (dev->type != ARPHRD_CAIF) 215 return 0; 216 217 cfg = get_cfcnfg(dev_net(dev)); 218 if (cfg == NULL) 219 return 0; 220 221 switch (what) { 222 case NETDEV_REGISTER: 223 caifd = caif_device_alloc(dev); 224 if (!caifd) 225 return 0; 226 227 caifdev = netdev_priv(dev); 228 caifdev->flowctrl = dev_flowctrl; 229 230 caifd->layer.transmit = transmit; 231 232 if (caifdev->use_frag) 233 phy_type = CFPHYTYPE_FRAG; 234 else 235 phy_type = CFPHYTYPE_CAIF; 236 237 switch (caifdev->link_select) { 238 case CAIF_LINK_HIGH_BANDW: 239 pref = CFPHYPREF_HIGH_BW; 240 break; 241 case CAIF_LINK_LOW_LATENCY: 242 pref = CFPHYPREF_LOW_LAT; 243 break; 244 default: 245 pref = CFPHYPREF_HIGH_BW; 246 break; 247 } 248 strncpy(caifd->layer.name, dev->name, 249 sizeof(caifd->layer.name) - 1); 250 caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; 251 252 mutex_lock(&caifdevs->lock); 253 list_add_rcu(&caifd->list, &caifdevs->list); 254 255 cfcnfg_add_phy_layer(cfg, 256 phy_type, 257 dev, 258 &caifd->layer, 259 pref, 260 caifdev->use_fcs, 261 caifdev->use_stx); 262 mutex_unlock(&caifdevs->lock); 263 break; 264 265 case NETDEV_UP: 266 rcu_read_lock(); 267 268 caifd = caif_get(dev); 269 if (caifd == NULL) { 270 rcu_read_unlock(); 271 break; 272 } 273 274 cfcnfg_set_phy_state(cfg, &caifd->layer, true); 275 rcu_read_unlock(); 276 277 break; 278 279 case NETDEV_DOWN: 280 rcu_read_lock(); 281 282 caifd = caif_get(dev); 283 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { 284 rcu_read_unlock(); 285 return -EINVAL; 286 } 287 288 cfcnfg_set_phy_state(cfg, &caifd->layer, false); 289 caifd_hold(caifd); 290 rcu_read_unlock(); 291 292 caifd->layer.up->ctrlcmd(caifd->layer.up, 293 _CAIF_CTRLCMD_PHYIF_DOWN_IND, 294 caifd->layer.id); 295 caifd_put(caifd); 296 break; 297 298 case NETDEV_UNREGISTER: 299 mutex_lock(&caifdevs->lock); 300 301 caifd = caif_get(dev); 302 if (caifd == NULL) { 303 mutex_unlock(&caifdevs->lock); 304 break; 305 } 306 list_del_rcu(&caifd->list); 307 308 /* 309 * NETDEV_UNREGISTER is called repeatedly until all reference 310 * counts for the net-device are released. If references to 311 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for 312 * the next call to NETDEV_UNREGISTER. 313 * 314 * If any packets are in flight down the CAIF Stack, 315 * cfcnfg_del_phy_layer will return nonzero. 316 * If no packets are in flight, the CAIF Stack associated 317 * with the net-device un-registering is freed. 318 */ 319 320 if (caifd_refcnt_read(caifd) != 0 || 321 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) { 322 323 pr_info("Wait for device inuse\n"); 324 /* Enrole device if CAIF Stack is still in use */ 325 list_add_rcu(&caifd->list, &caifdevs->list); 326 mutex_unlock(&caifdevs->lock); 327 break; 328 } 329 330 synchronize_rcu(); 331 dev_put(caifd->netdev); 332 free_percpu(caifd->pcpu_refcnt); 333 kfree(caifd); 334 335 mutex_unlock(&caifdevs->lock); 336 break; 337 } 338 return 0; 339 } 340 341 static struct notifier_block caif_device_notifier = { 342 .notifier_call = caif_device_notify, 343 .priority = 0, 344 }; 345 346 /* Per-namespace Caif devices handling */ 347 static int caif_init_net(struct net *net) 348 { 349 struct caif_net *caifn = net_generic(net, caif_net_id); 350 BUG_ON(!caifn); 351 INIT_LIST_HEAD(&caifn->caifdevs.list); 352 mutex_init(&caifn->caifdevs.lock); 353 354 caifn->cfg = cfcnfg_create(); 355 if (!caifn->cfg) { 356 pr_warn("can't create cfcnfg\n"); 357 return -ENOMEM; 358 } 359 360 return 0; 361 } 362 363 static void caif_exit_net(struct net *net) 364 { 365 struct caif_device_entry *caifd, *tmp; 366 struct caif_device_entry_list *caifdevs = 367 caif_device_list(net); 368 struct cfcnfg *cfg; 369 370 rtnl_lock(); 371 mutex_lock(&caifdevs->lock); 372 373 cfg = get_cfcnfg(net); 374 if (cfg == NULL) { 375 mutex_unlock(&caifdevs->lock); 376 return; 377 } 378 379 list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) { 380 int i = 0; 381 list_del_rcu(&caifd->list); 382 cfcnfg_set_phy_state(cfg, &caifd->layer, false); 383 384 while (i < 10 && 385 (caifd_refcnt_read(caifd) != 0 || 386 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) { 387 388 pr_info("Wait for device inuse\n"); 389 msleep(250); 390 i++; 391 } 392 synchronize_rcu(); 393 dev_put(caifd->netdev); 394 free_percpu(caifd->pcpu_refcnt); 395 kfree(caifd); 396 } 397 cfcnfg_remove(cfg); 398 399 mutex_unlock(&caifdevs->lock); 400 rtnl_unlock(); 401 } 402 403 static struct pernet_operations caif_net_ops = { 404 .init = caif_init_net, 405 .exit = caif_exit_net, 406 .id = &caif_net_id, 407 .size = sizeof(struct caif_net), 408 }; 409 410 /* Initialize Caif devices list */ 411 static int __init caif_device_init(void) 412 { 413 int result; 414 415 result = register_pernet_device(&caif_net_ops); 416 417 if (result) 418 return result; 419 420 register_netdevice_notifier(&caif_device_notifier); 421 dev_add_pack(&caif_packet_type); 422 423 return result; 424 } 425 426 static void __exit caif_device_exit(void) 427 { 428 unregister_pernet_device(&caif_net_ops); 429 unregister_netdevice_notifier(&caif_device_notifier); 430 dev_remove_pack(&caif_packet_type); 431 } 432 433 module_init(caif_device_init); 434 module_exit(caif_device_exit); 435