1 /* 2 * CAIF Interface registration. 3 * Copyright (C) ST-Ericsson AB 2010 4 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com 5 * License terms: GNU General Public License (GPL) version 2 6 * 7 * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont 8 * and Sakari Ailus <sakari.ailus@nokia.com> 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ 12 13 #include <linux/kernel.h> 14 #include <linux/if_arp.h> 15 #include <linux/net.h> 16 #include <linux/netdevice.h> 17 #include <linux/mutex.h> 18 #include <linux/module.h> 19 #include <linux/spinlock.h> 20 #include <net/netns/generic.h> 21 #include <net/net_namespace.h> 22 #include <net/pkt_sched.h> 23 #include <net/caif/caif_device.h> 24 #include <net/caif/caif_layer.h> 25 #include <net/caif/cfpkt.h> 26 #include <net/caif/cfcnfg.h> 27 #include <net/caif/cfserl.h> 28 29 MODULE_LICENSE("GPL"); 30 31 /* Used for local tracking of the CAIF net devices */ 32 struct caif_device_entry { 33 struct cflayer layer; 34 struct list_head list; 35 struct net_device *netdev; 36 int __percpu *pcpu_refcnt; 37 spinlock_t flow_lock; 38 struct sk_buff *xoff_skb; 39 void (*xoff_skb_dtor)(struct sk_buff *skb); 40 bool xoff; 41 }; 42 43 struct caif_device_entry_list { 44 struct list_head list; 45 /* Protects simulanous deletes in list */ 46 struct mutex lock; 47 }; 48 49 struct caif_net { 50 struct cfcnfg *cfg; 51 struct caif_device_entry_list caifdevs; 52 }; 53 54 static int caif_net_id; 55 static int q_high = 50; /* Percent */ 56 57 struct cfcnfg *get_cfcnfg(struct net *net) 58 { 59 struct caif_net *caifn; 60 caifn = net_generic(net, caif_net_id); 61 return caifn->cfg; 62 } 63 EXPORT_SYMBOL(get_cfcnfg); 64 65 static struct caif_device_entry_list *caif_device_list(struct net *net) 66 { 67 struct caif_net *caifn; 68 caifn = net_generic(net, caif_net_id); 69 return &caifn->caifdevs; 70 } 71 72 static void caifd_put(struct caif_device_entry *e) 73 { 74 this_cpu_dec(*e->pcpu_refcnt); 75 } 76 77 static void caifd_hold(struct caif_device_entry *e) 78 { 79 this_cpu_inc(*e->pcpu_refcnt); 80 } 81 82 static int caifd_refcnt_read(struct caif_device_entry *e) 83 { 84 int i, refcnt = 0; 85 for_each_possible_cpu(i) 86 refcnt += *per_cpu_ptr(e->pcpu_refcnt, i); 87 return refcnt; 88 } 89 90 /* Allocate new CAIF device. */ 91 static struct caif_device_entry *caif_device_alloc(struct net_device *dev) 92 { 93 struct caif_device_entry_list *caifdevs; 94 struct caif_device_entry *caifd; 95 96 caifdevs = caif_device_list(dev_net(dev)); 97 98 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); 99 if (!caifd) 100 return NULL; 101 caifd->pcpu_refcnt = alloc_percpu(int); 102 if (!caifd->pcpu_refcnt) { 103 kfree(caifd); 104 return NULL; 105 } 106 caifd->netdev = dev; 107 dev_hold(dev); 108 return caifd; 109 } 110 111 static struct caif_device_entry *caif_get(struct net_device *dev) 112 { 113 struct caif_device_entry_list *caifdevs = 114 caif_device_list(dev_net(dev)); 115 struct caif_device_entry *caifd; 116 117 list_for_each_entry_rcu(caifd, &caifdevs->list, list) { 118 if (caifd->netdev == dev) 119 return caifd; 120 } 121 return NULL; 122 } 123 124 void caif_flow_cb(struct sk_buff *skb) 125 { 126 struct caif_device_entry *caifd; 127 void (*dtor)(struct sk_buff *skb) = NULL; 128 bool send_xoff; 129 130 WARN_ON(skb->dev == NULL); 131 132 rcu_read_lock(); 133 caifd = caif_get(skb->dev); 134 caifd_hold(caifd); 135 rcu_read_unlock(); 136 137 spin_lock_bh(&caifd->flow_lock); 138 send_xoff = caifd->xoff; 139 caifd->xoff = 0; 140 dtor = caifd->xoff_skb_dtor; 141 142 if (WARN_ON(caifd->xoff_skb != skb)) 143 skb = NULL; 144 145 caifd->xoff_skb = NULL; 146 caifd->xoff_skb_dtor = NULL; 147 148 spin_unlock_bh(&caifd->flow_lock); 149 150 if (dtor && skb) 151 dtor(skb); 152 153 if (send_xoff) 154 caifd->layer.up-> 155 ctrlcmd(caifd->layer.up, 156 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND, 157 caifd->layer.id); 158 caifd_put(caifd); 159 } 160 161 static int transmit(struct cflayer *layer, struct cfpkt *pkt) 162 { 163 int err, high = 0, qlen = 0; 164 struct caif_device_entry *caifd = 165 container_of(layer, struct caif_device_entry, layer); 166 struct sk_buff *skb; 167 struct netdev_queue *txq; 168 169 rcu_read_lock_bh(); 170 171 skb = cfpkt_tonative(pkt); 172 skb->dev = caifd->netdev; 173 skb_reset_network_header(skb); 174 skb->protocol = htons(ETH_P_CAIF); 175 176 /* Check if we need to handle xoff */ 177 if (likely(caifd->netdev->tx_queue_len == 0)) 178 goto noxoff; 179 180 if (unlikely(caifd->xoff)) 181 goto noxoff; 182 183 if (likely(!netif_queue_stopped(caifd->netdev))) { 184 /* If we run with a TX queue, check if the queue is too long*/ 185 txq = netdev_get_tx_queue(skb->dev, 0); 186 qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc)); 187 188 if (likely(qlen == 0)) 189 goto noxoff; 190 191 high = (caifd->netdev->tx_queue_len * q_high) / 100; 192 if (likely(qlen < high)) 193 goto noxoff; 194 } 195 196 /* Hold lock while accessing xoff */ 197 spin_lock_bh(&caifd->flow_lock); 198 if (caifd->xoff) { 199 spin_unlock_bh(&caifd->flow_lock); 200 goto noxoff; 201 } 202 203 /* 204 * Handle flow off, we do this by temporary hi-jacking this 205 * skb's destructor function, and replace it with our own 206 * flow-on callback. The callback will set flow-on and call 207 * the original destructor. 208 */ 209 210 pr_debug("queue has stopped(%d) or is full (%d > %d)\n", 211 netif_queue_stopped(caifd->netdev), 212 qlen, high); 213 caifd->xoff = 1; 214 caifd->xoff_skb = skb; 215 caifd->xoff_skb_dtor = skb->destructor; 216 skb->destructor = caif_flow_cb; 217 spin_unlock_bh(&caifd->flow_lock); 218 219 caifd->layer.up->ctrlcmd(caifd->layer.up, 220 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, 221 caifd->layer.id); 222 noxoff: 223 rcu_read_unlock_bh(); 224 225 err = dev_queue_xmit(skb); 226 if (err > 0) 227 err = -EIO; 228 229 return err; 230 } 231 232 /* 233 * Stuff received packets into the CAIF stack. 234 * On error, returns non-zero and releases the skb. 235 */ 236 static int receive(struct sk_buff *skb, struct net_device *dev, 237 struct packet_type *pkttype, struct net_device *orig_dev) 238 { 239 struct cfpkt *pkt; 240 struct caif_device_entry *caifd; 241 int err; 242 243 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); 244 245 rcu_read_lock(); 246 caifd = caif_get(dev); 247 248 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive || 249 !netif_oper_up(caifd->netdev)) { 250 rcu_read_unlock(); 251 kfree_skb(skb); 252 return NET_RX_DROP; 253 } 254 255 /* Hold reference to netdevice while using CAIF stack */ 256 caifd_hold(caifd); 257 rcu_read_unlock(); 258 259 err = caifd->layer.up->receive(caifd->layer.up, pkt); 260 261 /* For -EILSEQ the packet is not freed so so it now */ 262 if (err == -EILSEQ) 263 cfpkt_destroy(pkt); 264 265 /* Release reference to stack upwards */ 266 caifd_put(caifd); 267 268 if (err != 0) 269 err = NET_RX_DROP; 270 return err; 271 } 272 273 static struct packet_type caif_packet_type __read_mostly = { 274 .type = cpu_to_be16(ETH_P_CAIF), 275 .func = receive, 276 }; 277 278 static void dev_flowctrl(struct net_device *dev, int on) 279 { 280 struct caif_device_entry *caifd; 281 282 rcu_read_lock(); 283 284 caifd = caif_get(dev); 285 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { 286 rcu_read_unlock(); 287 return; 288 } 289 290 caifd_hold(caifd); 291 rcu_read_unlock(); 292 293 caifd->layer.up->ctrlcmd(caifd->layer.up, 294 on ? 295 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND : 296 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, 297 caifd->layer.id); 298 caifd_put(caifd); 299 } 300 301 void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, 302 struct cflayer *link_support, int head_room, 303 struct cflayer **layer, int (**rcv_func)( 304 struct sk_buff *, struct net_device *, 305 struct packet_type *, struct net_device *)) 306 { 307 struct caif_device_entry *caifd; 308 enum cfcnfg_phy_preference pref; 309 struct cfcnfg *cfg = get_cfcnfg(dev_net(dev)); 310 struct caif_device_entry_list *caifdevs; 311 312 caifdevs = caif_device_list(dev_net(dev)); 313 caifd = caif_device_alloc(dev); 314 if (!caifd) 315 return; 316 *layer = &caifd->layer; 317 spin_lock_init(&caifd->flow_lock); 318 319 switch (caifdev->link_select) { 320 case CAIF_LINK_HIGH_BANDW: 321 pref = CFPHYPREF_HIGH_BW; 322 break; 323 case CAIF_LINK_LOW_LATENCY: 324 pref = CFPHYPREF_LOW_LAT; 325 break; 326 default: 327 pref = CFPHYPREF_HIGH_BW; 328 break; 329 } 330 mutex_lock(&caifdevs->lock); 331 list_add_rcu(&caifd->list, &caifdevs->list); 332 333 strncpy(caifd->layer.name, dev->name, 334 sizeof(caifd->layer.name) - 1); 335 caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; 336 caifd->layer.transmit = transmit; 337 cfcnfg_add_phy_layer(cfg, 338 dev, 339 &caifd->layer, 340 pref, 341 link_support, 342 caifdev->use_fcs, 343 head_room); 344 mutex_unlock(&caifdevs->lock); 345 if (rcv_func) 346 *rcv_func = receive; 347 } 348 EXPORT_SYMBOL(caif_enroll_dev); 349 350 /* notify Caif of device events */ 351 static int caif_device_notify(struct notifier_block *me, unsigned long what, 352 void *arg) 353 { 354 struct net_device *dev = arg; 355 struct caif_device_entry *caifd = NULL; 356 struct caif_dev_common *caifdev; 357 struct cfcnfg *cfg; 358 struct cflayer *layer, *link_support; 359 int head_room = 0; 360 struct caif_device_entry_list *caifdevs; 361 362 cfg = get_cfcnfg(dev_net(dev)); 363 caifdevs = caif_device_list(dev_net(dev)); 364 365 caifd = caif_get(dev); 366 if (caifd == NULL && dev->type != ARPHRD_CAIF) 367 return 0; 368 369 switch (what) { 370 case NETDEV_REGISTER: 371 if (caifd != NULL) 372 break; 373 374 caifdev = netdev_priv(dev); 375 376 link_support = NULL; 377 if (caifdev->use_frag) { 378 head_room = 1; 379 link_support = cfserl_create(dev->ifindex, 380 caifdev->use_stx); 381 if (!link_support) { 382 pr_warn("Out of memory\n"); 383 break; 384 } 385 } 386 caif_enroll_dev(dev, caifdev, link_support, head_room, 387 &layer, NULL); 388 caifdev->flowctrl = dev_flowctrl; 389 break; 390 391 case NETDEV_UP: 392 rcu_read_lock(); 393 394 caifd = caif_get(dev); 395 if (caifd == NULL) { 396 rcu_read_unlock(); 397 break; 398 } 399 400 caifd->xoff = 0; 401 cfcnfg_set_phy_state(cfg, &caifd->layer, true); 402 rcu_read_unlock(); 403 404 break; 405 406 case NETDEV_DOWN: 407 rcu_read_lock(); 408 409 caifd = caif_get(dev); 410 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { 411 rcu_read_unlock(); 412 return -EINVAL; 413 } 414 415 cfcnfg_set_phy_state(cfg, &caifd->layer, false); 416 caifd_hold(caifd); 417 rcu_read_unlock(); 418 419 caifd->layer.up->ctrlcmd(caifd->layer.up, 420 _CAIF_CTRLCMD_PHYIF_DOWN_IND, 421 caifd->layer.id); 422 423 spin_lock_bh(&caifd->flow_lock); 424 425 /* 426 * Replace our xoff-destructor with original destructor. 427 * We trust that skb->destructor *always* is called before 428 * the skb reference is invalid. The hijacked SKB destructor 429 * takes the flow_lock so manipulating the skb->destructor here 430 * should be safe. 431 */ 432 if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL) 433 caifd->xoff_skb->destructor = caifd->xoff_skb_dtor; 434 435 caifd->xoff = 0; 436 caifd->xoff_skb_dtor = NULL; 437 caifd->xoff_skb = NULL; 438 439 spin_unlock_bh(&caifd->flow_lock); 440 caifd_put(caifd); 441 break; 442 443 case NETDEV_UNREGISTER: 444 mutex_lock(&caifdevs->lock); 445 446 caifd = caif_get(dev); 447 if (caifd == NULL) { 448 mutex_unlock(&caifdevs->lock); 449 break; 450 } 451 list_del_rcu(&caifd->list); 452 453 /* 454 * NETDEV_UNREGISTER is called repeatedly until all reference 455 * counts for the net-device are released. If references to 456 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for 457 * the next call to NETDEV_UNREGISTER. 458 * 459 * If any packets are in flight down the CAIF Stack, 460 * cfcnfg_del_phy_layer will return nonzero. 461 * If no packets are in flight, the CAIF Stack associated 462 * with the net-device un-registering is freed. 463 */ 464 465 if (caifd_refcnt_read(caifd) != 0 || 466 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) { 467 468 pr_info("Wait for device inuse\n"); 469 /* Enrole device if CAIF Stack is still in use */ 470 list_add_rcu(&caifd->list, &caifdevs->list); 471 mutex_unlock(&caifdevs->lock); 472 break; 473 } 474 475 synchronize_rcu(); 476 dev_put(caifd->netdev); 477 free_percpu(caifd->pcpu_refcnt); 478 kfree(caifd); 479 480 mutex_unlock(&caifdevs->lock); 481 break; 482 } 483 return 0; 484 } 485 486 static struct notifier_block caif_device_notifier = { 487 .notifier_call = caif_device_notify, 488 .priority = 0, 489 }; 490 491 /* Per-namespace Caif devices handling */ 492 static int caif_init_net(struct net *net) 493 { 494 struct caif_net *caifn = net_generic(net, caif_net_id); 495 INIT_LIST_HEAD(&caifn->caifdevs.list); 496 mutex_init(&caifn->caifdevs.lock); 497 498 caifn->cfg = cfcnfg_create(); 499 if (!caifn->cfg) 500 return -ENOMEM; 501 502 return 0; 503 } 504 505 static void caif_exit_net(struct net *net) 506 { 507 struct caif_device_entry *caifd, *tmp; 508 struct caif_device_entry_list *caifdevs = 509 caif_device_list(net); 510 struct cfcnfg *cfg = get_cfcnfg(net); 511 512 rtnl_lock(); 513 mutex_lock(&caifdevs->lock); 514 515 list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) { 516 int i = 0; 517 list_del_rcu(&caifd->list); 518 cfcnfg_set_phy_state(cfg, &caifd->layer, false); 519 520 while (i < 10 && 521 (caifd_refcnt_read(caifd) != 0 || 522 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) { 523 524 pr_info("Wait for device inuse\n"); 525 msleep(250); 526 i++; 527 } 528 synchronize_rcu(); 529 dev_put(caifd->netdev); 530 free_percpu(caifd->pcpu_refcnt); 531 kfree(caifd); 532 } 533 cfcnfg_remove(cfg); 534 535 mutex_unlock(&caifdevs->lock); 536 rtnl_unlock(); 537 } 538 539 static struct pernet_operations caif_net_ops = { 540 .init = caif_init_net, 541 .exit = caif_exit_net, 542 .id = &caif_net_id, 543 .size = sizeof(struct caif_net), 544 }; 545 546 /* Initialize Caif devices list */ 547 static int __init caif_device_init(void) 548 { 549 int result; 550 551 result = register_pernet_subsys(&caif_net_ops); 552 553 if (result) 554 return result; 555 556 register_netdevice_notifier(&caif_device_notifier); 557 dev_add_pack(&caif_packet_type); 558 559 return result; 560 } 561 562 static void __exit caif_device_exit(void) 563 { 564 unregister_pernet_subsys(&caif_net_ops); 565 unregister_netdevice_notifier(&caif_device_notifier); 566 dev_remove_pack(&caif_packet_type); 567 } 568 569 module_init(caif_device_init); 570 module_exit(caif_device_exit); 571