1 /* 2 * CAIF Interface registration. 3 * Copyright (C) ST-Ericsson AB 2010 4 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com 5 * License terms: GNU General Public License (GPL) version 2 6 * 7 * Borrowed heavily from file: pn_dev.c. Thanks to 8 * Remi Denis-Courmont <remi.denis-courmont@nokia.com> 9 * and Sakari Ailus <sakari.ailus@nokia.com> 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ 13 14 #include <linux/kernel.h> 15 #include <linux/if_arp.h> 16 #include <linux/net.h> 17 #include <linux/netdevice.h> 18 #include <linux/mutex.h> 19 #include <linux/module.h> 20 #include <net/netns/generic.h> 21 #include <net/net_namespace.h> 22 #include <net/pkt_sched.h> 23 #include <net/caif/caif_device.h> 24 #include <net/caif/caif_layer.h> 25 #include <net/caif/cfpkt.h> 26 #include <net/caif/cfcnfg.h> 27 28 MODULE_LICENSE("GPL"); 29 30 /* Used for local tracking of the CAIF net devices */ 31 struct caif_device_entry { 32 struct cflayer layer; 33 struct list_head list; 34 struct net_device *netdev; 35 int __percpu *pcpu_refcnt; 36 }; 37 38 struct caif_device_entry_list { 39 struct list_head list; 40 /* Protects simulanous deletes in list */ 41 struct mutex lock; 42 }; 43 44 struct caif_net { 45 struct cfcnfg *cfg; 46 struct caif_device_entry_list caifdevs; 47 }; 48 49 static int caif_net_id; 50 51 struct cfcnfg *get_cfcnfg(struct net *net) 52 { 53 struct caif_net *caifn; 54 BUG_ON(!net); 55 caifn = net_generic(net, caif_net_id); 56 BUG_ON(!caifn); 57 return caifn->cfg; 58 } 59 EXPORT_SYMBOL(get_cfcnfg); 60 61 static struct caif_device_entry_list *caif_device_list(struct net *net) 62 { 63 struct caif_net *caifn; 64 BUG_ON(!net); 65 caifn = net_generic(net, caif_net_id); 66 BUG_ON(!caifn); 67 return &caifn->caifdevs; 68 } 69 70 static void caifd_put(struct caif_device_entry *e) 71 { 72 irqsafe_cpu_dec(*e->pcpu_refcnt); 73 } 74 75 static void caifd_hold(struct caif_device_entry *e) 76 { 77 irqsafe_cpu_inc(*e->pcpu_refcnt); 78 } 79 80 static int caifd_refcnt_read(struct caif_device_entry *e) 81 { 82 int i, refcnt = 0; 83 for_each_possible_cpu(i) 84 refcnt += *per_cpu_ptr(e->pcpu_refcnt, i); 85 return refcnt; 86 } 87 88 /* Allocate new CAIF device. */ 89 static struct caif_device_entry *caif_device_alloc(struct net_device *dev) 90 { 91 struct caif_device_entry_list *caifdevs; 92 struct caif_device_entry *caifd; 93 94 caifdevs = caif_device_list(dev_net(dev)); 95 BUG_ON(!caifdevs); 96 97 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); 98 if (!caifd) 99 return NULL; 100 caifd->pcpu_refcnt = alloc_percpu(int); 101 if (!caifd->pcpu_refcnt) { 102 kfree(caifd); 103 return NULL; 104 } 105 caifd->netdev = dev; 106 dev_hold(dev); 107 return caifd; 108 } 109 110 static struct caif_device_entry *caif_get(struct net_device *dev) 111 { 112 struct caif_device_entry_list *caifdevs = 113 caif_device_list(dev_net(dev)); 114 struct caif_device_entry *caifd; 115 BUG_ON(!caifdevs); 116 list_for_each_entry_rcu(caifd, &caifdevs->list, list) { 117 if (caifd->netdev == dev) 118 return caifd; 119 } 120 return NULL; 121 } 122 123 static int transmit(struct cflayer *layer, struct cfpkt *pkt) 124 { 125 int err; 126 struct caif_device_entry *caifd = 127 container_of(layer, struct caif_device_entry, layer); 128 struct sk_buff *skb; 129 130 skb = cfpkt_tonative(pkt); 131 skb->dev = caifd->netdev; 132 133 err = dev_queue_xmit(skb); 134 if (err > 0) 135 err = -EIO; 136 137 return err; 138 } 139 140 /* 141 * Stuff received packets into the CAIF stack. 142 * On error, returns non-zero and releases the skb. 143 */ 144 static int receive(struct sk_buff *skb, struct net_device *dev, 145 struct packet_type *pkttype, struct net_device *orig_dev) 146 { 147 struct cfpkt *pkt; 148 struct caif_device_entry *caifd; 149 int err; 150 151 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); 152 153 rcu_read_lock(); 154 caifd = caif_get(dev); 155 156 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive || 157 !netif_oper_up(caifd->netdev)) { 158 rcu_read_unlock(); 159 kfree_skb(skb); 160 return NET_RX_DROP; 161 } 162 163 /* Hold reference to netdevice while using CAIF stack */ 164 caifd_hold(caifd); 165 rcu_read_unlock(); 166 167 err = caifd->layer.up->receive(caifd->layer.up, pkt); 168 169 /* For -EILSEQ the packet is not freed so so it now */ 170 if (err == -EILSEQ) 171 cfpkt_destroy(pkt); 172 173 /* Release reference to stack upwards */ 174 caifd_put(caifd); 175 return 0; 176 } 177 178 static struct packet_type caif_packet_type __read_mostly = { 179 .type = cpu_to_be16(ETH_P_CAIF), 180 .func = receive, 181 }; 182 183 static void dev_flowctrl(struct net_device *dev, int on) 184 { 185 struct caif_device_entry *caifd; 186 187 rcu_read_lock(); 188 189 caifd = caif_get(dev); 190 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { 191 rcu_read_unlock(); 192 return; 193 } 194 195 caifd_hold(caifd); 196 rcu_read_unlock(); 197 198 caifd->layer.up->ctrlcmd(caifd->layer.up, 199 on ? 200 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND : 201 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, 202 caifd->layer.id); 203 caifd_put(caifd); 204 } 205 206 /* notify Caif of device events */ 207 static int caif_device_notify(struct notifier_block *me, unsigned long what, 208 void *arg) 209 { 210 struct net_device *dev = arg; 211 struct caif_device_entry *caifd = NULL; 212 struct caif_dev_common *caifdev; 213 enum cfcnfg_phy_preference pref; 214 enum cfcnfg_phy_type phy_type; 215 struct cfcnfg *cfg; 216 struct caif_device_entry_list *caifdevs; 217 218 if (dev->type != ARPHRD_CAIF) 219 return 0; 220 221 cfg = get_cfcnfg(dev_net(dev)); 222 if (cfg == NULL) 223 return 0; 224 225 caifdevs = caif_device_list(dev_net(dev)); 226 227 switch (what) { 228 case NETDEV_REGISTER: 229 caifd = caif_device_alloc(dev); 230 if (!caifd) 231 return 0; 232 233 caifdev = netdev_priv(dev); 234 caifdev->flowctrl = dev_flowctrl; 235 236 caifd->layer.transmit = transmit; 237 238 if (caifdev->use_frag) 239 phy_type = CFPHYTYPE_FRAG; 240 else 241 phy_type = CFPHYTYPE_CAIF; 242 243 switch (caifdev->link_select) { 244 case CAIF_LINK_HIGH_BANDW: 245 pref = CFPHYPREF_HIGH_BW; 246 break; 247 case CAIF_LINK_LOW_LATENCY: 248 pref = CFPHYPREF_LOW_LAT; 249 break; 250 default: 251 pref = CFPHYPREF_HIGH_BW; 252 break; 253 } 254 strncpy(caifd->layer.name, dev->name, 255 sizeof(caifd->layer.name) - 1); 256 caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; 257 258 mutex_lock(&caifdevs->lock); 259 list_add_rcu(&caifd->list, &caifdevs->list); 260 261 cfcnfg_add_phy_layer(cfg, 262 phy_type, 263 dev, 264 &caifd->layer, 265 pref, 266 caifdev->use_fcs, 267 caifdev->use_stx); 268 mutex_unlock(&caifdevs->lock); 269 break; 270 271 case NETDEV_UP: 272 rcu_read_lock(); 273 274 caifd = caif_get(dev); 275 if (caifd == NULL) { 276 rcu_read_unlock(); 277 break; 278 } 279 280 cfcnfg_set_phy_state(cfg, &caifd->layer, true); 281 rcu_read_unlock(); 282 283 break; 284 285 case NETDEV_DOWN: 286 rcu_read_lock(); 287 288 caifd = caif_get(dev); 289 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { 290 rcu_read_unlock(); 291 return -EINVAL; 292 } 293 294 cfcnfg_set_phy_state(cfg, &caifd->layer, false); 295 caifd_hold(caifd); 296 rcu_read_unlock(); 297 298 caifd->layer.up->ctrlcmd(caifd->layer.up, 299 _CAIF_CTRLCMD_PHYIF_DOWN_IND, 300 caifd->layer.id); 301 caifd_put(caifd); 302 break; 303 304 case NETDEV_UNREGISTER: 305 mutex_lock(&caifdevs->lock); 306 307 caifd = caif_get(dev); 308 if (caifd == NULL) { 309 mutex_unlock(&caifdevs->lock); 310 break; 311 } 312 list_del_rcu(&caifd->list); 313 314 /* 315 * NETDEV_UNREGISTER is called repeatedly until all reference 316 * counts for the net-device are released. If references to 317 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for 318 * the next call to NETDEV_UNREGISTER. 319 * 320 * If any packets are in flight down the CAIF Stack, 321 * cfcnfg_del_phy_layer will return nonzero. 322 * If no packets are in flight, the CAIF Stack associated 323 * with the net-device un-registering is freed. 324 */ 325 326 if (caifd_refcnt_read(caifd) != 0 || 327 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) { 328 329 pr_info("Wait for device inuse\n"); 330 /* Enrole device if CAIF Stack is still in use */ 331 list_add_rcu(&caifd->list, &caifdevs->list); 332 mutex_unlock(&caifdevs->lock); 333 break; 334 } 335 336 synchronize_rcu(); 337 dev_put(caifd->netdev); 338 free_percpu(caifd->pcpu_refcnt); 339 kfree(caifd); 340 341 mutex_unlock(&caifdevs->lock); 342 break; 343 } 344 return 0; 345 } 346 347 static struct notifier_block caif_device_notifier = { 348 .notifier_call = caif_device_notify, 349 .priority = 0, 350 }; 351 352 /* Per-namespace Caif devices handling */ 353 static int caif_init_net(struct net *net) 354 { 355 struct caif_net *caifn = net_generic(net, caif_net_id); 356 BUG_ON(!caifn); 357 INIT_LIST_HEAD(&caifn->caifdevs.list); 358 mutex_init(&caifn->caifdevs.lock); 359 360 caifn->cfg = cfcnfg_create(); 361 if (!caifn->cfg) { 362 pr_warn("can't create cfcnfg\n"); 363 return -ENOMEM; 364 } 365 366 return 0; 367 } 368 369 static void caif_exit_net(struct net *net) 370 { 371 struct caif_device_entry *caifd, *tmp; 372 struct caif_device_entry_list *caifdevs = 373 caif_device_list(net); 374 struct cfcnfg *cfg; 375 376 rtnl_lock(); 377 mutex_lock(&caifdevs->lock); 378 379 cfg = get_cfcnfg(net); 380 if (cfg == NULL) { 381 mutex_unlock(&caifdevs->lock); 382 return; 383 } 384 385 list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) { 386 int i = 0; 387 list_del_rcu(&caifd->list); 388 cfcnfg_set_phy_state(cfg, &caifd->layer, false); 389 390 while (i < 10 && 391 (caifd_refcnt_read(caifd) != 0 || 392 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) { 393 394 pr_info("Wait for device inuse\n"); 395 msleep(250); 396 i++; 397 } 398 synchronize_rcu(); 399 dev_put(caifd->netdev); 400 free_percpu(caifd->pcpu_refcnt); 401 kfree(caifd); 402 } 403 cfcnfg_remove(cfg); 404 405 mutex_unlock(&caifdevs->lock); 406 rtnl_unlock(); 407 } 408 409 static struct pernet_operations caif_net_ops = { 410 .init = caif_init_net, 411 .exit = caif_exit_net, 412 .id = &caif_net_id, 413 .size = sizeof(struct caif_net), 414 }; 415 416 /* Initialize Caif devices list */ 417 static int __init caif_device_init(void) 418 { 419 int result; 420 421 result = register_pernet_device(&caif_net_ops); 422 423 if (result) 424 return result; 425 426 register_netdevice_notifier(&caif_device_notifier); 427 dev_add_pack(&caif_packet_type); 428 429 return result; 430 } 431 432 static void __exit caif_device_exit(void) 433 { 434 unregister_pernet_device(&caif_net_ops); 435 unregister_netdevice_notifier(&caif_device_notifier); 436 dev_remove_pack(&caif_packet_type); 437 } 438 439 module_init(caif_device_init); 440 module_exit(caif_device_exit); 441