1 /* 2 * CAIF Interface registration. 3 * Copyright (C) ST-Ericsson AB 2010 4 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com 5 * License terms: GNU General Public License (GPL) version 2 6 * 7 * Borrowed heavily from file: pn_dev.c. Thanks to 8 * Remi Denis-Courmont <remi.denis-courmont@nokia.com> 9 * and Sakari Ailus <sakari.ailus@nokia.com> 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ 13 14 #include <linux/kernel.h> 15 #include <linux/if_arp.h> 16 #include <linux/net.h> 17 #include <linux/netdevice.h> 18 #include <linux/mutex.h> 19 #include <linux/module.h> 20 #include <linux/spinlock.h> 21 #include <net/netns/generic.h> 22 #include <net/net_namespace.h> 23 #include <net/pkt_sched.h> 24 #include <net/caif/caif_device.h> 25 #include <net/caif/caif_layer.h> 26 #include <net/caif/cfpkt.h> 27 #include <net/caif/cfcnfg.h> 28 #include <net/caif/cfserl.h> 29 30 MODULE_LICENSE("GPL"); 31 32 /* Used for local tracking of the CAIF net devices */ 33 struct caif_device_entry { 34 struct cflayer layer; 35 struct list_head list; 36 struct net_device *netdev; 37 int __percpu *pcpu_refcnt; 38 spinlock_t flow_lock; 39 struct sk_buff *xoff_skb; 40 void (*xoff_skb_dtor)(struct sk_buff *skb); 41 bool xoff; 42 }; 43 44 struct caif_device_entry_list { 45 struct list_head list; 46 /* Protects simulanous deletes in list */ 47 struct mutex lock; 48 }; 49 50 struct caif_net { 51 struct cfcnfg *cfg; 52 struct caif_device_entry_list caifdevs; 53 }; 54 55 static int caif_net_id; 56 static int q_high = 50; /* Percent */ 57 58 struct cfcnfg *get_cfcnfg(struct net *net) 59 { 60 struct caif_net *caifn; 61 caifn = net_generic(net, caif_net_id); 62 if (!caifn) 63 return NULL; 64 return caifn->cfg; 65 } 66 EXPORT_SYMBOL(get_cfcnfg); 67 68 static struct caif_device_entry_list *caif_device_list(struct net *net) 69 { 70 struct caif_net *caifn; 71 caifn = net_generic(net, caif_net_id); 72 if (!caifn) 73 return NULL; 74 return &caifn->caifdevs; 75 } 76 77 static void caifd_put(struct caif_device_entry *e) 78 { 79 this_cpu_dec(*e->pcpu_refcnt); 80 } 81 82 static void caifd_hold(struct caif_device_entry *e) 83 { 84 this_cpu_inc(*e->pcpu_refcnt); 85 } 86 87 static int caifd_refcnt_read(struct caif_device_entry *e) 88 { 89 int i, refcnt = 0; 90 for_each_possible_cpu(i) 91 refcnt += *per_cpu_ptr(e->pcpu_refcnt, i); 92 return refcnt; 93 } 94 95 /* Allocate new CAIF device. */ 96 static struct caif_device_entry *caif_device_alloc(struct net_device *dev) 97 { 98 struct caif_device_entry_list *caifdevs; 99 struct caif_device_entry *caifd; 100 101 caifdevs = caif_device_list(dev_net(dev)); 102 if (!caifdevs) 103 return NULL; 104 105 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); 106 if (!caifd) 107 return NULL; 108 caifd->pcpu_refcnt = alloc_percpu(int); 109 if (!caifd->pcpu_refcnt) { 110 kfree(caifd); 111 return NULL; 112 } 113 caifd->netdev = dev; 114 dev_hold(dev); 115 return caifd; 116 } 117 118 static struct caif_device_entry *caif_get(struct net_device *dev) 119 { 120 struct caif_device_entry_list *caifdevs = 121 caif_device_list(dev_net(dev)); 122 struct caif_device_entry *caifd; 123 if (!caifdevs) 124 return NULL; 125 126 list_for_each_entry_rcu(caifd, &caifdevs->list, list) { 127 if (caifd->netdev == dev) 128 return caifd; 129 } 130 return NULL; 131 } 132 133 void caif_flow_cb(struct sk_buff *skb) 134 { 135 struct caif_device_entry *caifd; 136 void (*dtor)(struct sk_buff *skb) = NULL; 137 bool send_xoff; 138 139 WARN_ON(skb->dev == NULL); 140 141 rcu_read_lock(); 142 caifd = caif_get(skb->dev); 143 caifd_hold(caifd); 144 rcu_read_unlock(); 145 146 spin_lock_bh(&caifd->flow_lock); 147 send_xoff = caifd->xoff; 148 caifd->xoff = 0; 149 if (!WARN_ON(caifd->xoff_skb_dtor == NULL)) { 150 WARN_ON(caifd->xoff_skb != skb); 151 dtor = caifd->xoff_skb_dtor; 152 caifd->xoff_skb = NULL; 153 caifd->xoff_skb_dtor = NULL; 154 } 155 spin_unlock_bh(&caifd->flow_lock); 156 157 if (dtor) 158 dtor(skb); 159 160 if (send_xoff) 161 caifd->layer.up-> 162 ctrlcmd(caifd->layer.up, 163 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND, 164 caifd->layer.id); 165 caifd_put(caifd); 166 } 167 168 static int transmit(struct cflayer *layer, struct cfpkt *pkt) 169 { 170 int err, high = 0, qlen = 0; 171 struct caif_dev_common *caifdev; 172 struct caif_device_entry *caifd = 173 container_of(layer, struct caif_device_entry, layer); 174 struct sk_buff *skb; 175 struct netdev_queue *txq; 176 177 rcu_read_lock_bh(); 178 179 skb = cfpkt_tonative(pkt); 180 skb->dev = caifd->netdev; 181 skb_reset_network_header(skb); 182 skb->protocol = htons(ETH_P_CAIF); 183 caifdev = netdev_priv(caifd->netdev); 184 185 /* Check if we need to handle xoff */ 186 if (likely(caifd->netdev->tx_queue_len == 0)) 187 goto noxoff; 188 189 if (unlikely(caifd->xoff)) 190 goto noxoff; 191 192 if (likely(!netif_queue_stopped(caifd->netdev))) { 193 /* If we run with a TX queue, check if the queue is too long*/ 194 txq = netdev_get_tx_queue(skb->dev, 0); 195 qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc)); 196 197 if (likely(qlen == 0)) 198 goto noxoff; 199 200 high = (caifd->netdev->tx_queue_len * q_high) / 100; 201 if (likely(qlen < high)) 202 goto noxoff; 203 } 204 205 /* Hold lock while accessing xoff */ 206 spin_lock_bh(&caifd->flow_lock); 207 if (caifd->xoff) { 208 spin_unlock_bh(&caifd->flow_lock); 209 goto noxoff; 210 } 211 212 /* 213 * Handle flow off, we do this by temporary hi-jacking this 214 * skb's destructor function, and replace it with our own 215 * flow-on callback. The callback will set flow-on and call 216 * the original destructor. 217 */ 218 219 pr_debug("queue has stopped(%d) or is full (%d > %d)\n", 220 netif_queue_stopped(caifd->netdev), 221 qlen, high); 222 caifd->xoff = 1; 223 caifd->xoff_skb = skb; 224 caifd->xoff_skb_dtor = skb->destructor; 225 skb->destructor = caif_flow_cb; 226 spin_unlock_bh(&caifd->flow_lock); 227 228 caifd->layer.up->ctrlcmd(caifd->layer.up, 229 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, 230 caifd->layer.id); 231 noxoff: 232 rcu_read_unlock_bh(); 233 234 err = dev_queue_xmit(skb); 235 if (err > 0) 236 err = -EIO; 237 238 return err; 239 } 240 241 /* 242 * Stuff received packets into the CAIF stack. 243 * On error, returns non-zero and releases the skb. 244 */ 245 static int receive(struct sk_buff *skb, struct net_device *dev, 246 struct packet_type *pkttype, struct net_device *orig_dev) 247 { 248 struct cfpkt *pkt; 249 struct caif_device_entry *caifd; 250 int err; 251 252 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); 253 254 rcu_read_lock(); 255 caifd = caif_get(dev); 256 257 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive || 258 !netif_oper_up(caifd->netdev)) { 259 rcu_read_unlock(); 260 kfree_skb(skb); 261 return NET_RX_DROP; 262 } 263 264 /* Hold reference to netdevice while using CAIF stack */ 265 caifd_hold(caifd); 266 rcu_read_unlock(); 267 268 err = caifd->layer.up->receive(caifd->layer.up, pkt); 269 270 /* For -EILSEQ the packet is not freed so so it now */ 271 if (err == -EILSEQ) 272 cfpkt_destroy(pkt); 273 274 /* Release reference to stack upwards */ 275 caifd_put(caifd); 276 277 if (err != 0) 278 err = NET_RX_DROP; 279 return err; 280 } 281 282 static struct packet_type caif_packet_type __read_mostly = { 283 .type = cpu_to_be16(ETH_P_CAIF), 284 .func = receive, 285 }; 286 287 static void dev_flowctrl(struct net_device *dev, int on) 288 { 289 struct caif_device_entry *caifd; 290 291 rcu_read_lock(); 292 293 caifd = caif_get(dev); 294 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { 295 rcu_read_unlock(); 296 return; 297 } 298 299 caifd_hold(caifd); 300 rcu_read_unlock(); 301 302 caifd->layer.up->ctrlcmd(caifd->layer.up, 303 on ? 304 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND : 305 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, 306 caifd->layer.id); 307 caifd_put(caifd); 308 } 309 310 void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, 311 struct cflayer *link_support, int head_room, 312 struct cflayer **layer, int (**rcv_func)( 313 struct sk_buff *, struct net_device *, 314 struct packet_type *, struct net_device *)) 315 { 316 struct caif_device_entry *caifd; 317 enum cfcnfg_phy_preference pref; 318 struct cfcnfg *cfg = get_cfcnfg(dev_net(dev)); 319 struct caif_device_entry_list *caifdevs; 320 321 caifdevs = caif_device_list(dev_net(dev)); 322 if (!cfg || !caifdevs) 323 return; 324 caifd = caif_device_alloc(dev); 325 if (!caifd) 326 return; 327 *layer = &caifd->layer; 328 spin_lock_init(&caifd->flow_lock); 329 330 switch (caifdev->link_select) { 331 case CAIF_LINK_HIGH_BANDW: 332 pref = CFPHYPREF_HIGH_BW; 333 break; 334 case CAIF_LINK_LOW_LATENCY: 335 pref = CFPHYPREF_LOW_LAT; 336 break; 337 default: 338 pref = CFPHYPREF_HIGH_BW; 339 break; 340 } 341 mutex_lock(&caifdevs->lock); 342 list_add_rcu(&caifd->list, &caifdevs->list); 343 344 strncpy(caifd->layer.name, dev->name, 345 sizeof(caifd->layer.name) - 1); 346 caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; 347 caifd->layer.transmit = transmit; 348 cfcnfg_add_phy_layer(cfg, 349 dev, 350 &caifd->layer, 351 pref, 352 link_support, 353 caifdev->use_fcs, 354 head_room); 355 mutex_unlock(&caifdevs->lock); 356 if (rcv_func) 357 *rcv_func = receive; 358 } 359 EXPORT_SYMBOL(caif_enroll_dev); 360 361 /* notify Caif of device events */ 362 static int caif_device_notify(struct notifier_block *me, unsigned long what, 363 void *arg) 364 { 365 struct net_device *dev = arg; 366 struct caif_device_entry *caifd = NULL; 367 struct caif_dev_common *caifdev; 368 struct cfcnfg *cfg; 369 struct cflayer *layer, *link_support; 370 int head_room = 0; 371 struct caif_device_entry_list *caifdevs; 372 373 cfg = get_cfcnfg(dev_net(dev)); 374 caifdevs = caif_device_list(dev_net(dev)); 375 if (!cfg || !caifdevs) 376 return 0; 377 378 caifd = caif_get(dev); 379 if (caifd == NULL && dev->type != ARPHRD_CAIF) 380 return 0; 381 382 switch (what) { 383 case NETDEV_REGISTER: 384 if (caifd != NULL) 385 break; 386 387 caifdev = netdev_priv(dev); 388 389 link_support = NULL; 390 if (caifdev->use_frag) { 391 head_room = 1; 392 link_support = cfserl_create(dev->ifindex, 393 caifdev->use_stx); 394 if (!link_support) { 395 pr_warn("Out of memory\n"); 396 break; 397 } 398 } 399 caif_enroll_dev(dev, caifdev, link_support, head_room, 400 &layer, NULL); 401 caifdev->flowctrl = dev_flowctrl; 402 break; 403 404 case NETDEV_UP: 405 rcu_read_lock(); 406 407 caifd = caif_get(dev); 408 if (caifd == NULL) { 409 rcu_read_unlock(); 410 break; 411 } 412 413 caifd->xoff = 0; 414 cfcnfg_set_phy_state(cfg, &caifd->layer, true); 415 rcu_read_unlock(); 416 417 break; 418 419 case NETDEV_DOWN: 420 rcu_read_lock(); 421 422 caifd = caif_get(dev); 423 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { 424 rcu_read_unlock(); 425 return -EINVAL; 426 } 427 428 cfcnfg_set_phy_state(cfg, &caifd->layer, false); 429 caifd_hold(caifd); 430 rcu_read_unlock(); 431 432 caifd->layer.up->ctrlcmd(caifd->layer.up, 433 _CAIF_CTRLCMD_PHYIF_DOWN_IND, 434 caifd->layer.id); 435 436 spin_lock_bh(&caifd->flow_lock); 437 438 /* 439 * Replace our xoff-destructor with original destructor. 440 * We trust that skb->destructor *always* is called before 441 * the skb reference is invalid. The hijacked SKB destructor 442 * takes the flow_lock so manipulating the skb->destructor here 443 * should be safe. 444 */ 445 if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL) 446 caifd->xoff_skb->destructor = caifd->xoff_skb_dtor; 447 448 caifd->xoff = 0; 449 caifd->xoff_skb_dtor = NULL; 450 caifd->xoff_skb = NULL; 451 452 spin_unlock_bh(&caifd->flow_lock); 453 caifd_put(caifd); 454 break; 455 456 case NETDEV_UNREGISTER: 457 mutex_lock(&caifdevs->lock); 458 459 caifd = caif_get(dev); 460 if (caifd == NULL) { 461 mutex_unlock(&caifdevs->lock); 462 break; 463 } 464 list_del_rcu(&caifd->list); 465 466 /* 467 * NETDEV_UNREGISTER is called repeatedly until all reference 468 * counts for the net-device are released. If references to 469 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for 470 * the next call to NETDEV_UNREGISTER. 471 * 472 * If any packets are in flight down the CAIF Stack, 473 * cfcnfg_del_phy_layer will return nonzero. 474 * If no packets are in flight, the CAIF Stack associated 475 * with the net-device un-registering is freed. 476 */ 477 478 if (caifd_refcnt_read(caifd) != 0 || 479 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) { 480 481 pr_info("Wait for device inuse\n"); 482 /* Enrole device if CAIF Stack is still in use */ 483 list_add_rcu(&caifd->list, &caifdevs->list); 484 mutex_unlock(&caifdevs->lock); 485 break; 486 } 487 488 synchronize_rcu(); 489 dev_put(caifd->netdev); 490 free_percpu(caifd->pcpu_refcnt); 491 kfree(caifd); 492 493 mutex_unlock(&caifdevs->lock); 494 break; 495 } 496 return 0; 497 } 498 499 static struct notifier_block caif_device_notifier = { 500 .notifier_call = caif_device_notify, 501 .priority = 0, 502 }; 503 504 /* Per-namespace Caif devices handling */ 505 static int caif_init_net(struct net *net) 506 { 507 struct caif_net *caifn = net_generic(net, caif_net_id); 508 if (WARN_ON(!caifn)) 509 return -EINVAL; 510 511 INIT_LIST_HEAD(&caifn->caifdevs.list); 512 mutex_init(&caifn->caifdevs.lock); 513 514 caifn->cfg = cfcnfg_create(); 515 if (!caifn->cfg) 516 return -ENOMEM; 517 518 return 0; 519 } 520 521 static void caif_exit_net(struct net *net) 522 { 523 struct caif_device_entry *caifd, *tmp; 524 struct caif_device_entry_list *caifdevs = 525 caif_device_list(net); 526 struct cfcnfg *cfg = get_cfcnfg(net); 527 528 if (!cfg || !caifdevs) 529 return; 530 531 rtnl_lock(); 532 mutex_lock(&caifdevs->lock); 533 534 list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) { 535 int i = 0; 536 list_del_rcu(&caifd->list); 537 cfcnfg_set_phy_state(cfg, &caifd->layer, false); 538 539 while (i < 10 && 540 (caifd_refcnt_read(caifd) != 0 || 541 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) { 542 543 pr_info("Wait for device inuse\n"); 544 msleep(250); 545 i++; 546 } 547 synchronize_rcu(); 548 dev_put(caifd->netdev); 549 free_percpu(caifd->pcpu_refcnt); 550 kfree(caifd); 551 } 552 cfcnfg_remove(cfg); 553 554 mutex_unlock(&caifdevs->lock); 555 rtnl_unlock(); 556 } 557 558 static struct pernet_operations caif_net_ops = { 559 .init = caif_init_net, 560 .exit = caif_exit_net, 561 .id = &caif_net_id, 562 .size = sizeof(struct caif_net), 563 }; 564 565 /* Initialize Caif devices list */ 566 static int __init caif_device_init(void) 567 { 568 int result; 569 570 result = register_pernet_device(&caif_net_ops); 571 572 if (result) 573 return result; 574 575 register_netdevice_notifier(&caif_device_notifier); 576 dev_add_pack(&caif_packet_type); 577 578 return result; 579 } 580 581 static void __exit caif_device_exit(void) 582 { 583 unregister_pernet_device(&caif_net_ops); 584 unregister_netdevice_notifier(&caif_device_notifier); 585 dev_remove_pack(&caif_packet_type); 586 } 587 588 module_init(caif_device_init); 589 module_exit(caif_device_exit); 590