1 /* 2 * CAIF Interface registration. 3 * Copyright (C) ST-Ericsson AB 2010 4 * Author: Sjur Brendeland 5 * License terms: GNU General Public License (GPL) version 2 6 * 7 * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont 8 * and Sakari Ailus <sakari.ailus@nokia.com> 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ 12 13 #include <linux/kernel.h> 14 #include <linux/if_arp.h> 15 #include <linux/net.h> 16 #include <linux/netdevice.h> 17 #include <linux/mutex.h> 18 #include <linux/module.h> 19 #include <linux/spinlock.h> 20 #include <net/netns/generic.h> 21 #include <net/net_namespace.h> 22 #include <net/pkt_sched.h> 23 #include <net/caif/caif_device.h> 24 #include <net/caif/caif_layer.h> 25 #include <net/caif/caif_dev.h> 26 #include <net/caif/cfpkt.h> 27 #include <net/caif/cfcnfg.h> 28 #include <net/caif/cfserl.h> 29 30 MODULE_LICENSE("GPL"); 31 32 /* Used for local tracking of the CAIF net devices */ 33 struct caif_device_entry { 34 struct cflayer layer; 35 struct list_head list; 36 struct net_device *netdev; 37 int __percpu *pcpu_refcnt; 38 spinlock_t flow_lock; 39 struct sk_buff *xoff_skb; 40 void (*xoff_skb_dtor)(struct sk_buff *skb); 41 bool xoff; 42 }; 43 44 struct caif_device_entry_list { 45 struct list_head list; 46 /* Protects simulanous deletes in list */ 47 struct mutex lock; 48 }; 49 50 struct caif_net { 51 struct cfcnfg *cfg; 52 struct caif_device_entry_list caifdevs; 53 }; 54 55 static int caif_net_id; 56 static int q_high = 50; /* Percent */ 57 58 struct cfcnfg *get_cfcnfg(struct net *net) 59 { 60 struct caif_net *caifn; 61 caifn = net_generic(net, caif_net_id); 62 return caifn->cfg; 63 } 64 EXPORT_SYMBOL(get_cfcnfg); 65 66 static struct caif_device_entry_list *caif_device_list(struct net *net) 67 { 68 struct caif_net *caifn; 69 caifn = net_generic(net, caif_net_id); 70 return &caifn->caifdevs; 71 } 72 73 static void caifd_put(struct caif_device_entry *e) 74 { 75 this_cpu_dec(*e->pcpu_refcnt); 76 } 77 78 static void caifd_hold(struct caif_device_entry *e) 79 { 80 this_cpu_inc(*e->pcpu_refcnt); 81 } 82 83 static int caifd_refcnt_read(struct caif_device_entry *e) 84 { 85 int i, refcnt = 0; 86 for_each_possible_cpu(i) 87 refcnt += *per_cpu_ptr(e->pcpu_refcnt, i); 88 return refcnt; 89 } 90 91 /* Allocate new CAIF device. */ 92 static struct caif_device_entry *caif_device_alloc(struct net_device *dev) 93 { 94 struct caif_device_entry *caifd; 95 96 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); 97 if (!caifd) 98 return NULL; 99 caifd->pcpu_refcnt = alloc_percpu(int); 100 if (!caifd->pcpu_refcnt) { 101 kfree(caifd); 102 return NULL; 103 } 104 caifd->netdev = dev; 105 dev_hold(dev); 106 return caifd; 107 } 108 109 static struct caif_device_entry *caif_get(struct net_device *dev) 110 { 111 struct caif_device_entry_list *caifdevs = 112 caif_device_list(dev_net(dev)); 113 struct caif_device_entry *caifd; 114 115 list_for_each_entry_rcu(caifd, &caifdevs->list, list) { 116 if (caifd->netdev == dev) 117 return caifd; 118 } 119 return NULL; 120 } 121 122 static void caif_flow_cb(struct sk_buff *skb) 123 { 124 struct caif_device_entry *caifd; 125 void (*dtor)(struct sk_buff *skb) = NULL; 126 bool send_xoff; 127 128 WARN_ON(skb->dev == NULL); 129 130 rcu_read_lock(); 131 caifd = caif_get(skb->dev); 132 133 WARN_ON(caifd == NULL); 134 if (caifd == NULL) 135 return; 136 137 caifd_hold(caifd); 138 rcu_read_unlock(); 139 140 spin_lock_bh(&caifd->flow_lock); 141 send_xoff = caifd->xoff; 142 caifd->xoff = 0; 143 dtor = caifd->xoff_skb_dtor; 144 145 if (WARN_ON(caifd->xoff_skb != skb)) 146 skb = NULL; 147 148 caifd->xoff_skb = NULL; 149 caifd->xoff_skb_dtor = NULL; 150 151 spin_unlock_bh(&caifd->flow_lock); 152 153 if (dtor && skb) 154 dtor(skb); 155 156 if (send_xoff) 157 caifd->layer.up-> 158 ctrlcmd(caifd->layer.up, 159 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND, 160 caifd->layer.id); 161 caifd_put(caifd); 162 } 163 164 static int transmit(struct cflayer *layer, struct cfpkt *pkt) 165 { 166 int err, high = 0, qlen = 0; 167 struct caif_device_entry *caifd = 168 container_of(layer, struct caif_device_entry, layer); 169 struct sk_buff *skb; 170 struct netdev_queue *txq; 171 172 rcu_read_lock_bh(); 173 174 skb = cfpkt_tonative(pkt); 175 skb->dev = caifd->netdev; 176 skb_reset_network_header(skb); 177 skb->protocol = htons(ETH_P_CAIF); 178 179 /* Check if we need to handle xoff */ 180 if (likely(caifd->netdev->tx_queue_len == 0)) 181 goto noxoff; 182 183 if (unlikely(caifd->xoff)) 184 goto noxoff; 185 186 if (likely(!netif_queue_stopped(caifd->netdev))) { 187 /* If we run with a TX queue, check if the queue is too long*/ 188 txq = netdev_get_tx_queue(skb->dev, 0); 189 qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc)); 190 191 if (likely(qlen == 0)) 192 goto noxoff; 193 194 high = (caifd->netdev->tx_queue_len * q_high) / 100; 195 if (likely(qlen < high)) 196 goto noxoff; 197 } 198 199 /* Hold lock while accessing xoff */ 200 spin_lock_bh(&caifd->flow_lock); 201 if (caifd->xoff) { 202 spin_unlock_bh(&caifd->flow_lock); 203 goto noxoff; 204 } 205 206 /* 207 * Handle flow off, we do this by temporary hi-jacking this 208 * skb's destructor function, and replace it with our own 209 * flow-on callback. The callback will set flow-on and call 210 * the original destructor. 211 */ 212 213 pr_debug("queue has stopped(%d) or is full (%d > %d)\n", 214 netif_queue_stopped(caifd->netdev), 215 qlen, high); 216 caifd->xoff = 1; 217 caifd->xoff_skb = skb; 218 caifd->xoff_skb_dtor = skb->destructor; 219 skb->destructor = caif_flow_cb; 220 spin_unlock_bh(&caifd->flow_lock); 221 222 caifd->layer.up->ctrlcmd(caifd->layer.up, 223 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, 224 caifd->layer.id); 225 noxoff: 226 rcu_read_unlock_bh(); 227 228 err = dev_queue_xmit(skb); 229 if (err > 0) 230 err = -EIO; 231 232 return err; 233 } 234 235 /* 236 * Stuff received packets into the CAIF stack. 237 * On error, returns non-zero and releases the skb. 238 */ 239 static int receive(struct sk_buff *skb, struct net_device *dev, 240 struct packet_type *pkttype, struct net_device *orig_dev) 241 { 242 struct cfpkt *pkt; 243 struct caif_device_entry *caifd; 244 int err; 245 246 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); 247 248 rcu_read_lock(); 249 caifd = caif_get(dev); 250 251 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive || 252 !netif_oper_up(caifd->netdev)) { 253 rcu_read_unlock(); 254 kfree_skb(skb); 255 return NET_RX_DROP; 256 } 257 258 /* Hold reference to netdevice while using CAIF stack */ 259 caifd_hold(caifd); 260 rcu_read_unlock(); 261 262 err = caifd->layer.up->receive(caifd->layer.up, pkt); 263 264 /* For -EILSEQ the packet is not freed so so it now */ 265 if (err == -EILSEQ) 266 cfpkt_destroy(pkt); 267 268 /* Release reference to stack upwards */ 269 caifd_put(caifd); 270 271 if (err != 0) 272 err = NET_RX_DROP; 273 return err; 274 } 275 276 static struct packet_type caif_packet_type __read_mostly = { 277 .type = cpu_to_be16(ETH_P_CAIF), 278 .func = receive, 279 }; 280 281 static void dev_flowctrl(struct net_device *dev, int on) 282 { 283 struct caif_device_entry *caifd; 284 285 rcu_read_lock(); 286 287 caifd = caif_get(dev); 288 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { 289 rcu_read_unlock(); 290 return; 291 } 292 293 caifd_hold(caifd); 294 rcu_read_unlock(); 295 296 caifd->layer.up->ctrlcmd(caifd->layer.up, 297 on ? 298 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND : 299 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, 300 caifd->layer.id); 301 caifd_put(caifd); 302 } 303 304 void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, 305 struct cflayer *link_support, int head_room, 306 struct cflayer **layer, 307 int (**rcv_func)(struct sk_buff *, struct net_device *, 308 struct packet_type *, 309 struct net_device *)) 310 { 311 struct caif_device_entry *caifd; 312 enum cfcnfg_phy_preference pref; 313 struct cfcnfg *cfg = get_cfcnfg(dev_net(dev)); 314 struct caif_device_entry_list *caifdevs; 315 316 caifdevs = caif_device_list(dev_net(dev)); 317 caifd = caif_device_alloc(dev); 318 if (!caifd) 319 return; 320 *layer = &caifd->layer; 321 spin_lock_init(&caifd->flow_lock); 322 323 switch (caifdev->link_select) { 324 case CAIF_LINK_HIGH_BANDW: 325 pref = CFPHYPREF_HIGH_BW; 326 break; 327 case CAIF_LINK_LOW_LATENCY: 328 pref = CFPHYPREF_LOW_LAT; 329 break; 330 default: 331 pref = CFPHYPREF_HIGH_BW; 332 break; 333 } 334 mutex_lock(&caifdevs->lock); 335 list_add_rcu(&caifd->list, &caifdevs->list); 336 337 strncpy(caifd->layer.name, dev->name, 338 sizeof(caifd->layer.name) - 1); 339 caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; 340 caifd->layer.transmit = transmit; 341 cfcnfg_add_phy_layer(cfg, 342 dev, 343 &caifd->layer, 344 pref, 345 link_support, 346 caifdev->use_fcs, 347 head_room); 348 mutex_unlock(&caifdevs->lock); 349 if (rcv_func) 350 *rcv_func = receive; 351 } 352 EXPORT_SYMBOL(caif_enroll_dev); 353 354 /* notify Caif of device events */ 355 static int caif_device_notify(struct notifier_block *me, unsigned long what, 356 void *ptr) 357 { 358 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 359 struct caif_device_entry *caifd = NULL; 360 struct caif_dev_common *caifdev; 361 struct cfcnfg *cfg; 362 struct cflayer *layer, *link_support; 363 int head_room = 0; 364 struct caif_device_entry_list *caifdevs; 365 366 cfg = get_cfcnfg(dev_net(dev)); 367 caifdevs = caif_device_list(dev_net(dev)); 368 369 caifd = caif_get(dev); 370 if (caifd == NULL && dev->type != ARPHRD_CAIF) 371 return 0; 372 373 switch (what) { 374 case NETDEV_REGISTER: 375 if (caifd != NULL) 376 break; 377 378 caifdev = netdev_priv(dev); 379 380 link_support = NULL; 381 if (caifdev->use_frag) { 382 head_room = 1; 383 link_support = cfserl_create(dev->ifindex, 384 caifdev->use_stx); 385 if (!link_support) { 386 pr_warn("Out of memory\n"); 387 break; 388 } 389 } 390 caif_enroll_dev(dev, caifdev, link_support, head_room, 391 &layer, NULL); 392 caifdev->flowctrl = dev_flowctrl; 393 break; 394 395 case NETDEV_UP: 396 rcu_read_lock(); 397 398 caifd = caif_get(dev); 399 if (caifd == NULL) { 400 rcu_read_unlock(); 401 break; 402 } 403 404 caifd->xoff = 0; 405 cfcnfg_set_phy_state(cfg, &caifd->layer, true); 406 rcu_read_unlock(); 407 408 break; 409 410 case NETDEV_DOWN: 411 rcu_read_lock(); 412 413 caifd = caif_get(dev); 414 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { 415 rcu_read_unlock(); 416 return -EINVAL; 417 } 418 419 cfcnfg_set_phy_state(cfg, &caifd->layer, false); 420 caifd_hold(caifd); 421 rcu_read_unlock(); 422 423 caifd->layer.up->ctrlcmd(caifd->layer.up, 424 _CAIF_CTRLCMD_PHYIF_DOWN_IND, 425 caifd->layer.id); 426 427 spin_lock_bh(&caifd->flow_lock); 428 429 /* 430 * Replace our xoff-destructor with original destructor. 431 * We trust that skb->destructor *always* is called before 432 * the skb reference is invalid. The hijacked SKB destructor 433 * takes the flow_lock so manipulating the skb->destructor here 434 * should be safe. 435 */ 436 if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL) 437 caifd->xoff_skb->destructor = caifd->xoff_skb_dtor; 438 439 caifd->xoff = 0; 440 caifd->xoff_skb_dtor = NULL; 441 caifd->xoff_skb = NULL; 442 443 spin_unlock_bh(&caifd->flow_lock); 444 caifd_put(caifd); 445 break; 446 447 case NETDEV_UNREGISTER: 448 mutex_lock(&caifdevs->lock); 449 450 caifd = caif_get(dev); 451 if (caifd == NULL) { 452 mutex_unlock(&caifdevs->lock); 453 break; 454 } 455 list_del_rcu(&caifd->list); 456 457 /* 458 * NETDEV_UNREGISTER is called repeatedly until all reference 459 * counts for the net-device are released. If references to 460 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for 461 * the next call to NETDEV_UNREGISTER. 462 * 463 * If any packets are in flight down the CAIF Stack, 464 * cfcnfg_del_phy_layer will return nonzero. 465 * If no packets are in flight, the CAIF Stack associated 466 * with the net-device un-registering is freed. 467 */ 468 469 if (caifd_refcnt_read(caifd) != 0 || 470 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) { 471 472 pr_info("Wait for device inuse\n"); 473 /* Enrole device if CAIF Stack is still in use */ 474 list_add_rcu(&caifd->list, &caifdevs->list); 475 mutex_unlock(&caifdevs->lock); 476 break; 477 } 478 479 synchronize_rcu(); 480 dev_put(caifd->netdev); 481 free_percpu(caifd->pcpu_refcnt); 482 kfree(caifd); 483 484 mutex_unlock(&caifdevs->lock); 485 break; 486 } 487 return 0; 488 } 489 490 static struct notifier_block caif_device_notifier = { 491 .notifier_call = caif_device_notify, 492 .priority = 0, 493 }; 494 495 /* Per-namespace Caif devices handling */ 496 static int caif_init_net(struct net *net) 497 { 498 struct caif_net *caifn = net_generic(net, caif_net_id); 499 INIT_LIST_HEAD(&caifn->caifdevs.list); 500 mutex_init(&caifn->caifdevs.lock); 501 502 caifn->cfg = cfcnfg_create(); 503 if (!caifn->cfg) 504 return -ENOMEM; 505 506 return 0; 507 } 508 509 static void caif_exit_net(struct net *net) 510 { 511 struct caif_device_entry *caifd, *tmp; 512 struct caif_device_entry_list *caifdevs = 513 caif_device_list(net); 514 struct cfcnfg *cfg = get_cfcnfg(net); 515 516 rtnl_lock(); 517 mutex_lock(&caifdevs->lock); 518 519 list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) { 520 int i = 0; 521 list_del_rcu(&caifd->list); 522 cfcnfg_set_phy_state(cfg, &caifd->layer, false); 523 524 while (i < 10 && 525 (caifd_refcnt_read(caifd) != 0 || 526 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) { 527 528 pr_info("Wait for device inuse\n"); 529 msleep(250); 530 i++; 531 } 532 synchronize_rcu(); 533 dev_put(caifd->netdev); 534 free_percpu(caifd->pcpu_refcnt); 535 kfree(caifd); 536 } 537 cfcnfg_remove(cfg); 538 539 mutex_unlock(&caifdevs->lock); 540 rtnl_unlock(); 541 } 542 543 static struct pernet_operations caif_net_ops = { 544 .init = caif_init_net, 545 .exit = caif_exit_net, 546 .id = &caif_net_id, 547 .size = sizeof(struct caif_net), 548 }; 549 550 /* Initialize Caif devices list */ 551 static int __init caif_device_init(void) 552 { 553 int result; 554 555 result = register_pernet_subsys(&caif_net_ops); 556 557 if (result) 558 return result; 559 560 register_netdevice_notifier(&caif_device_notifier); 561 dev_add_pack(&caif_packet_type); 562 563 return result; 564 } 565 566 static void __exit caif_device_exit(void) 567 { 568 unregister_netdevice_notifier(&caif_device_notifier); 569 dev_remove_pack(&caif_packet_type); 570 unregister_pernet_subsys(&caif_net_ops); 571 } 572 573 module_init(caif_device_init); 574 module_exit(caif_device_exit); 575