1 /* 2 * Copyright (C) ST-Ericsson AB 2013 3 * Authors: Vicram Arv 4 * Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> 5 * Sjur Brendeland 6 * License terms: GNU General Public License (GPL) version 2 7 */ 8 #include <linux/module.h> 9 #include <linux/if_arp.h> 10 #include <linux/virtio.h> 11 #include <linux/vringh.h> 12 #include <linux/debugfs.h> 13 #include <linux/spinlock.h> 14 #include <linux/genalloc.h> 15 #include <linux/interrupt.h> 16 #include <linux/netdevice.h> 17 #include <linux/rtnetlink.h> 18 #include <linux/virtio_ids.h> 19 #include <linux/virtio_caif.h> 20 #include <linux/virtio_ring.h> 21 #include <linux/dma-mapping.h> 22 #include <net/caif/caif_dev.h> 23 #include <linux/virtio_config.h> 24 25 MODULE_LICENSE("GPL v2"); 26 MODULE_AUTHOR("Vicram Arv"); 27 MODULE_AUTHOR("Sjur Brendeland"); 28 MODULE_DESCRIPTION("Virtio CAIF Driver"); 29 30 /* NAPI schedule quota */ 31 #define CFV_DEFAULT_QUOTA 32 32 33 /* Defaults used if virtio config space is unavailable */ 34 #define CFV_DEF_MTU_SIZE 4096 35 #define CFV_DEF_HEADROOM 32 36 #define CFV_DEF_TAILROOM 32 37 38 /* Required IP header alignment */ 39 #define IP_HDR_ALIGN 4 40 41 /* struct cfv_napi_contxt - NAPI context info 42 * @riov: IOV holding data read from the ring. Note that riov may 43 * still hold data when cfv_rx_poll() returns. 44 * @head: Last descriptor ID we received from vringh_getdesc_kern. 45 * We use this to put descriptor back on the used ring. USHRT_MAX is 46 * used to indicate invalid head-id. 47 */ 48 struct cfv_napi_context { 49 struct vringh_kiov riov; 50 unsigned short head; 51 }; 52 53 /* struct cfv_stats - statistics for debugfs 54 * @rx_napi_complete: Number of NAPI completions (RX) 55 * @rx_napi_resched: Number of calls where the full quota was used (RX) 56 * @rx_nomem: Number of SKB alloc failures (RX) 57 * @rx_kicks: Number of RX kicks 58 * @tx_full_ring: Number times TX ring was full 59 * @tx_no_mem: Number of times TX went out of memory 60 * @tx_flow_on: Number of flow on (TX) 61 * @tx_kicks: Number of TX kicks 62 */ 63 struct cfv_stats { 64 u32 rx_napi_complete; 65 u32 rx_napi_resched; 66 u32 rx_nomem; 67 u32 rx_kicks; 68 u32 tx_full_ring; 69 u32 tx_no_mem; 70 u32 tx_flow_on; 71 u32 tx_kicks; 72 }; 73 74 /* struct cfv_info - Caif Virtio control structure 75 * @cfdev: caif common header 76 * @vdev: Associated virtio device 77 * @vr_rx: rx/downlink host vring 78 * @vq_tx: tx/uplink virtqueue 79 * @ndev: CAIF link layer device 80 * @watermark_tx: indicates number of free descriptors we need 81 * to reopen the tx-queues after overload. 82 * @tx_lock: protects vq_tx from concurrent use 83 * @tx_release_tasklet: Tasklet for freeing consumed TX buffers 84 * @napi: Napi context used in cfv_rx_poll() 85 * @ctx: Context data used in cfv_rx_poll() 86 * @tx_hr: transmit headroom 87 * @rx_hr: receive headroom 88 * @tx_tr: transmit tail room 89 * @rx_tr: receive tail room 90 * @mtu: transmit max size 91 * @mru: receive max size 92 * @allocsz: size of dma memory reserved for TX buffers 93 * @alloc_addr: virtual address to dma memory for TX buffers 94 * @alloc_dma: dma address to dma memory for TX buffers 95 * @genpool: Gen Pool used for allocating TX buffers 96 * @reserved_mem: Pointer to memory reserve allocated from genpool 97 * @reserved_size: Size of memory reserve allocated from genpool 98 * @stats: Statistics exposed in sysfs 99 * @debugfs: Debugfs dentry for statistic counters 100 */ 101 struct cfv_info { 102 struct caif_dev_common cfdev; 103 struct virtio_device *vdev; 104 struct vringh *vr_rx; 105 struct virtqueue *vq_tx; 106 struct net_device *ndev; 107 unsigned int watermark_tx; 108 /* Protect access to vq_tx */ 109 spinlock_t tx_lock; 110 struct tasklet_struct tx_release_tasklet; 111 struct napi_struct napi; 112 struct cfv_napi_context ctx; 113 u16 tx_hr; 114 u16 rx_hr; 115 u16 tx_tr; 116 u16 rx_tr; 117 u32 mtu; 118 u32 mru; 119 size_t allocsz; 120 void *alloc_addr; 121 dma_addr_t alloc_dma; 122 struct gen_pool *genpool; 123 unsigned long reserved_mem; 124 size_t reserved_size; 125 struct cfv_stats stats; 126 struct dentry *debugfs; 127 }; 128 129 /* struct buf_info - maintains transmit buffer data handle 130 * @size: size of transmit buffer 131 * @dma_handle: handle to allocated dma device memory area 132 * @vaddr: virtual address mapping to allocated memory area 133 */ 134 struct buf_info { 135 size_t size; 136 u8 *vaddr; 137 }; 138 139 /* Called from virtio device, in IRQ context */ 140 static void cfv_release_cb(struct virtqueue *vq_tx) 141 { 142 struct cfv_info *cfv = vq_tx->vdev->priv; 143 144 ++cfv->stats.tx_kicks; 145 tasklet_schedule(&cfv->tx_release_tasklet); 146 } 147 148 static void free_buf_info(struct cfv_info *cfv, struct buf_info *buf_info) 149 { 150 if (!buf_info) 151 return; 152 gen_pool_free(cfv->genpool, (unsigned long) buf_info->vaddr, 153 buf_info->size); 154 kfree(buf_info); 155 } 156 157 /* This is invoked whenever the remote processor completed processing 158 * a TX msg we just sent, and the buffer is put back to the used ring. 159 */ 160 static void cfv_release_used_buf(struct virtqueue *vq_tx) 161 { 162 struct cfv_info *cfv = vq_tx->vdev->priv; 163 unsigned long flags; 164 165 BUG_ON(vq_tx != cfv->vq_tx); 166 167 for (;;) { 168 unsigned int len; 169 struct buf_info *buf_info; 170 171 /* Get used buffer from used ring to recycle used descriptors */ 172 spin_lock_irqsave(&cfv->tx_lock, flags); 173 buf_info = virtqueue_get_buf(vq_tx, &len); 174 spin_unlock_irqrestore(&cfv->tx_lock, flags); 175 176 /* Stop looping if there are no more buffers to free */ 177 if (!buf_info) 178 break; 179 180 free_buf_info(cfv, buf_info); 181 182 /* watermark_tx indicates if we previously stopped the tx 183 * queues. If we have enough free stots in the virtio ring, 184 * re-establish memory reserved and open up tx queues. 185 */ 186 if (cfv->vq_tx->num_free <= cfv->watermark_tx) 187 continue; 188 189 /* Re-establish memory reserve */ 190 if (cfv->reserved_mem == 0 && cfv->genpool) 191 cfv->reserved_mem = 192 gen_pool_alloc(cfv->genpool, 193 cfv->reserved_size); 194 195 /* Open up the tx queues */ 196 if (cfv->reserved_mem) { 197 cfv->watermark_tx = 198 virtqueue_get_vring_size(cfv->vq_tx); 199 netif_tx_wake_all_queues(cfv->ndev); 200 /* Buffers are recycled in cfv_netdev_tx, so 201 * disable notifications when queues are opened. 202 */ 203 virtqueue_disable_cb(cfv->vq_tx); 204 ++cfv->stats.tx_flow_on; 205 } else { 206 /* if no memory reserve, wait for more free slots */ 207 WARN_ON(cfv->watermark_tx > 208 virtqueue_get_vring_size(cfv->vq_tx)); 209 cfv->watermark_tx += 210 virtqueue_get_vring_size(cfv->vq_tx) / 4; 211 } 212 } 213 } 214 215 /* Allocate a SKB and copy packet data to it */ 216 static struct sk_buff *cfv_alloc_and_copy_skb(int *err, 217 struct cfv_info *cfv, 218 u8 *frm, u32 frm_len) 219 { 220 struct sk_buff *skb; 221 u32 cfpkt_len, pad_len; 222 223 *err = 0; 224 /* Verify that packet size with down-link header and mtu size */ 225 if (frm_len > cfv->mru || frm_len <= cfv->rx_hr + cfv->rx_tr) { 226 netdev_err(cfv->ndev, 227 "Invalid frmlen:%u mtu:%u hr:%d tr:%d\n", 228 frm_len, cfv->mru, cfv->rx_hr, 229 cfv->rx_tr); 230 *err = -EPROTO; 231 return NULL; 232 } 233 234 cfpkt_len = frm_len - (cfv->rx_hr + cfv->rx_tr); 235 pad_len = (unsigned long)(frm + cfv->rx_hr) & (IP_HDR_ALIGN - 1); 236 237 skb = netdev_alloc_skb(cfv->ndev, frm_len + pad_len); 238 if (!skb) { 239 *err = -ENOMEM; 240 return NULL; 241 } 242 243 skb_reserve(skb, cfv->rx_hr + pad_len); 244 245 memcpy(skb_put(skb, cfpkt_len), frm + cfv->rx_hr, cfpkt_len); 246 return skb; 247 } 248 249 /* Get packets from the host vring */ 250 static int cfv_rx_poll(struct napi_struct *napi, int quota) 251 { 252 struct cfv_info *cfv = container_of(napi, struct cfv_info, napi); 253 int rxcnt = 0; 254 int err = 0; 255 void *buf; 256 struct sk_buff *skb; 257 struct vringh_kiov *riov = &cfv->ctx.riov; 258 unsigned int skb_len; 259 260 again: 261 do { 262 skb = NULL; 263 264 /* Put the previous iovec back on the used ring and 265 * fetch a new iovec if we have processed all elements. 266 */ 267 if (riov->i == riov->used) { 268 if (cfv->ctx.head != USHRT_MAX) { 269 vringh_complete_kern(cfv->vr_rx, 270 cfv->ctx.head, 271 0); 272 cfv->ctx.head = USHRT_MAX; 273 } 274 275 err = vringh_getdesc_kern( 276 cfv->vr_rx, 277 riov, 278 NULL, 279 &cfv->ctx.head, 280 GFP_ATOMIC); 281 282 if (err <= 0) 283 goto exit; 284 } 285 286 buf = phys_to_virt((unsigned long) riov->iov[riov->i].iov_base); 287 /* TODO: Add check on valid buffer address */ 288 289 skb = cfv_alloc_and_copy_skb(&err, cfv, buf, 290 riov->iov[riov->i].iov_len); 291 if (unlikely(err)) 292 goto exit; 293 294 /* Push received packet up the stack. */ 295 skb_len = skb->len; 296 skb->protocol = htons(ETH_P_CAIF); 297 skb_reset_mac_header(skb); 298 skb->dev = cfv->ndev; 299 err = netif_receive_skb(skb); 300 if (unlikely(err)) { 301 ++cfv->ndev->stats.rx_dropped; 302 } else { 303 ++cfv->ndev->stats.rx_packets; 304 cfv->ndev->stats.rx_bytes += skb_len; 305 } 306 307 ++riov->i; 308 ++rxcnt; 309 } while (rxcnt < quota); 310 311 ++cfv->stats.rx_napi_resched; 312 goto out; 313 314 exit: 315 switch (err) { 316 case 0: 317 ++cfv->stats.rx_napi_complete; 318 319 /* Really out of patckets? (stolen from virtio_net)*/ 320 napi_complete(napi); 321 if (unlikely(!vringh_notify_enable_kern(cfv->vr_rx)) && 322 napi_schedule_prep(napi)) { 323 vringh_notify_disable_kern(cfv->vr_rx); 324 __napi_schedule(napi); 325 goto again; 326 } 327 break; 328 329 case -ENOMEM: 330 ++cfv->stats.rx_nomem; 331 dev_kfree_skb(skb); 332 /* Stop NAPI poll on OOM, we hope to be polled later */ 333 napi_complete(napi); 334 vringh_notify_enable_kern(cfv->vr_rx); 335 break; 336 337 default: 338 /* We're doomed, any modem fault is fatal */ 339 netdev_warn(cfv->ndev, "Bad ring, disable device\n"); 340 cfv->ndev->stats.rx_dropped = riov->used - riov->i; 341 napi_complete(napi); 342 vringh_notify_disable_kern(cfv->vr_rx); 343 netif_carrier_off(cfv->ndev); 344 break; 345 } 346 out: 347 if (rxcnt && vringh_need_notify_kern(cfv->vr_rx) > 0) 348 vringh_notify(cfv->vr_rx); 349 return rxcnt; 350 } 351 352 static void cfv_recv(struct virtio_device *vdev, struct vringh *vr_rx) 353 { 354 struct cfv_info *cfv = vdev->priv; 355 356 ++cfv->stats.rx_kicks; 357 vringh_notify_disable_kern(cfv->vr_rx); 358 napi_schedule(&cfv->napi); 359 } 360 361 static void cfv_destroy_genpool(struct cfv_info *cfv) 362 { 363 if (cfv->alloc_addr) 364 dma_free_coherent(cfv->vdev->dev.parent->parent, 365 cfv->allocsz, cfv->alloc_addr, 366 cfv->alloc_dma); 367 368 if (!cfv->genpool) 369 return; 370 gen_pool_free(cfv->genpool, cfv->reserved_mem, 371 cfv->reserved_size); 372 gen_pool_destroy(cfv->genpool); 373 cfv->genpool = NULL; 374 } 375 376 static int cfv_create_genpool(struct cfv_info *cfv) 377 { 378 int err; 379 380 /* dma_alloc can only allocate whole pages, and we need a more 381 * fine graned allocation so we use genpool. We ask for space needed 382 * by IP and a full ring. If the dma allcoation fails we retry with a 383 * smaller allocation size. 384 */ 385 err = -ENOMEM; 386 cfv->allocsz = (virtqueue_get_vring_size(cfv->vq_tx) * 387 (ETH_DATA_LEN + cfv->tx_hr + cfv->tx_tr) * 11)/10; 388 if (cfv->allocsz <= (num_possible_cpus() + 1) * cfv->ndev->mtu) 389 return -EINVAL; 390 391 for (;;) { 392 if (cfv->allocsz <= num_possible_cpus() * cfv->ndev->mtu) { 393 netdev_info(cfv->ndev, "Not enough device memory\n"); 394 return -ENOMEM; 395 } 396 397 cfv->alloc_addr = dma_alloc_coherent( 398 cfv->vdev->dev.parent->parent, 399 cfv->allocsz, &cfv->alloc_dma, 400 GFP_ATOMIC); 401 if (cfv->alloc_addr) 402 break; 403 404 cfv->allocsz = (cfv->allocsz * 3) >> 2; 405 } 406 407 netdev_dbg(cfv->ndev, "Allocated %zd bytes from dma-memory\n", 408 cfv->allocsz); 409 410 /* Allocate on 128 bytes boundaries (1 << 7)*/ 411 cfv->genpool = gen_pool_create(7, -1); 412 if (!cfv->genpool) 413 goto err; 414 415 err = gen_pool_add_virt(cfv->genpool, (unsigned long)cfv->alloc_addr, 416 (phys_addr_t)virt_to_phys(cfv->alloc_addr), 417 cfv->allocsz, -1); 418 if (err) 419 goto err; 420 421 /* Reserve some memory for low memory situations. If we hit the roof 422 * in the memory pool, we stop TX flow and release the reserve. 423 */ 424 cfv->reserved_size = num_possible_cpus() * cfv->ndev->mtu; 425 cfv->reserved_mem = gen_pool_alloc(cfv->genpool, 426 cfv->reserved_size); 427 if (!cfv->reserved_mem) { 428 err = -ENOMEM; 429 goto err; 430 } 431 432 cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx); 433 return 0; 434 err: 435 cfv_destroy_genpool(cfv); 436 return err; 437 } 438 439 /* Enable the CAIF interface and allocate the memory-pool */ 440 static int cfv_netdev_open(struct net_device *netdev) 441 { 442 struct cfv_info *cfv = netdev_priv(netdev); 443 444 if (cfv_create_genpool(cfv)) 445 return -ENOMEM; 446 447 netif_carrier_on(netdev); 448 napi_enable(&cfv->napi); 449 450 /* Schedule NAPI to read any pending packets */ 451 napi_schedule(&cfv->napi); 452 return 0; 453 } 454 455 /* Disable the CAIF interface and free the memory-pool */ 456 static int cfv_netdev_close(struct net_device *netdev) 457 { 458 struct cfv_info *cfv = netdev_priv(netdev); 459 unsigned long flags; 460 struct buf_info *buf_info; 461 462 /* Disable interrupts, queues and NAPI polling */ 463 netif_carrier_off(netdev); 464 virtqueue_disable_cb(cfv->vq_tx); 465 vringh_notify_disable_kern(cfv->vr_rx); 466 napi_disable(&cfv->napi); 467 468 /* Release any TX buffers on both used and avilable rings */ 469 cfv_release_used_buf(cfv->vq_tx); 470 spin_lock_irqsave(&cfv->tx_lock, flags); 471 while ((buf_info = virtqueue_detach_unused_buf(cfv->vq_tx))) 472 free_buf_info(cfv, buf_info); 473 spin_unlock_irqrestore(&cfv->tx_lock, flags); 474 475 /* Release all dma allocated memory and destroy the pool */ 476 cfv_destroy_genpool(cfv); 477 return 0; 478 } 479 480 /* Allocate a buffer in dma-memory and copy skb to it */ 481 static struct buf_info *cfv_alloc_and_copy_to_shm(struct cfv_info *cfv, 482 struct sk_buff *skb, 483 struct scatterlist *sg) 484 { 485 struct caif_payload_info *info = (void *)&skb->cb; 486 struct buf_info *buf_info = NULL; 487 u8 pad_len, hdr_ofs; 488 489 if (!cfv->genpool) 490 goto err; 491 492 if (unlikely(cfv->tx_hr + skb->len + cfv->tx_tr > cfv->mtu)) { 493 netdev_warn(cfv->ndev, "Invalid packet len (%d > %d)\n", 494 cfv->tx_hr + skb->len + cfv->tx_tr, cfv->mtu); 495 goto err; 496 } 497 498 buf_info = kmalloc(sizeof(struct buf_info), GFP_ATOMIC); 499 if (unlikely(!buf_info)) 500 goto err; 501 502 /* Make the IP header aligned in tbe buffer */ 503 hdr_ofs = cfv->tx_hr + info->hdr_len; 504 pad_len = hdr_ofs & (IP_HDR_ALIGN - 1); 505 buf_info->size = cfv->tx_hr + skb->len + cfv->tx_tr + pad_len; 506 507 /* allocate dma memory buffer */ 508 buf_info->vaddr = (void *)gen_pool_alloc(cfv->genpool, buf_info->size); 509 if (unlikely(!buf_info->vaddr)) 510 goto err; 511 512 /* copy skbuf contents to send buffer */ 513 skb_copy_bits(skb, 0, buf_info->vaddr + cfv->tx_hr + pad_len, skb->len); 514 sg_init_one(sg, buf_info->vaddr + pad_len, 515 skb->len + cfv->tx_hr + cfv->rx_hr); 516 517 return buf_info; 518 err: 519 kfree(buf_info); 520 return NULL; 521 } 522 523 /* Put the CAIF packet on the virtio ring and kick the receiver */ 524 static int cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev) 525 { 526 struct cfv_info *cfv = netdev_priv(netdev); 527 struct buf_info *buf_info; 528 struct scatterlist sg; 529 unsigned long flags; 530 bool flow_off = false; 531 int ret; 532 533 /* garbage collect released buffers */ 534 cfv_release_used_buf(cfv->vq_tx); 535 spin_lock_irqsave(&cfv->tx_lock, flags); 536 537 /* Flow-off check takes into account number of cpus to make sure 538 * virtqueue will not be overfilled in any possible smp conditions. 539 * 540 * Flow-on is triggered when sufficient buffers are freed 541 */ 542 if (unlikely(cfv->vq_tx->num_free <= num_present_cpus())) { 543 flow_off = true; 544 cfv->stats.tx_full_ring++; 545 } 546 547 /* If we run out of memory, we release the memory reserve and retry 548 * allocation. 549 */ 550 buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg); 551 if (unlikely(!buf_info)) { 552 cfv->stats.tx_no_mem++; 553 flow_off = true; 554 555 if (cfv->reserved_mem && cfv->genpool) { 556 gen_pool_free(cfv->genpool, cfv->reserved_mem, 557 cfv->reserved_size); 558 cfv->reserved_mem = 0; 559 buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg); 560 } 561 } 562 563 if (unlikely(flow_off)) { 564 /* Turn flow on when a 1/4 of the descriptors are released */ 565 cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx) / 4; 566 /* Enable notifications of recycled TX buffers */ 567 virtqueue_enable_cb(cfv->vq_tx); 568 netif_tx_stop_all_queues(netdev); 569 } 570 571 if (unlikely(!buf_info)) { 572 /* If the memory reserve does it's job, this shouldn't happen */ 573 netdev_warn(cfv->ndev, "Out of gen_pool memory\n"); 574 goto err; 575 } 576 577 ret = virtqueue_add_outbuf(cfv->vq_tx, &sg, 1, buf_info, GFP_ATOMIC); 578 if (unlikely((ret < 0))) { 579 /* If flow control works, this shouldn't happen */ 580 netdev_warn(cfv->ndev, "Failed adding buffer to TX vring:%d\n", 581 ret); 582 goto err; 583 } 584 585 /* update netdev statistics */ 586 cfv->ndev->stats.tx_packets++; 587 cfv->ndev->stats.tx_bytes += skb->len; 588 spin_unlock_irqrestore(&cfv->tx_lock, flags); 589 590 /* tell the remote processor it has a pending message to read */ 591 virtqueue_kick(cfv->vq_tx); 592 593 dev_kfree_skb(skb); 594 return NETDEV_TX_OK; 595 err: 596 spin_unlock_irqrestore(&cfv->tx_lock, flags); 597 cfv->ndev->stats.tx_dropped++; 598 free_buf_info(cfv, buf_info); 599 dev_kfree_skb(skb); 600 return NETDEV_TX_OK; 601 } 602 603 static void cfv_tx_release_tasklet(unsigned long drv) 604 { 605 struct cfv_info *cfv = (struct cfv_info *)drv; 606 cfv_release_used_buf(cfv->vq_tx); 607 } 608 609 static const struct net_device_ops cfv_netdev_ops = { 610 .ndo_open = cfv_netdev_open, 611 .ndo_stop = cfv_netdev_close, 612 .ndo_start_xmit = cfv_netdev_tx, 613 }; 614 615 static void cfv_netdev_setup(struct net_device *netdev) 616 { 617 netdev->netdev_ops = &cfv_netdev_ops; 618 netdev->type = ARPHRD_CAIF; 619 netdev->tx_queue_len = 100; 620 netdev->flags = IFF_POINTOPOINT | IFF_NOARP; 621 netdev->mtu = CFV_DEF_MTU_SIZE; 622 netdev->destructor = free_netdev; 623 } 624 625 /* Create debugfs counters for the device */ 626 static inline void debugfs_init(struct cfv_info *cfv) 627 { 628 cfv->debugfs = 629 debugfs_create_dir(netdev_name(cfv->ndev), NULL); 630 631 if (IS_ERR(cfv->debugfs)) 632 return; 633 634 debugfs_create_u32("rx-napi-complete", S_IRUSR, cfv->debugfs, 635 &cfv->stats.rx_napi_complete); 636 debugfs_create_u32("rx-napi-resched", S_IRUSR, cfv->debugfs, 637 &cfv->stats.rx_napi_resched); 638 debugfs_create_u32("rx-nomem", S_IRUSR, cfv->debugfs, 639 &cfv->stats.rx_nomem); 640 debugfs_create_u32("rx-kicks", S_IRUSR, cfv->debugfs, 641 &cfv->stats.rx_kicks); 642 debugfs_create_u32("tx-full-ring", S_IRUSR, cfv->debugfs, 643 &cfv->stats.tx_full_ring); 644 debugfs_create_u32("tx-no-mem", S_IRUSR, cfv->debugfs, 645 &cfv->stats.tx_no_mem); 646 debugfs_create_u32("tx-kicks", S_IRUSR, cfv->debugfs, 647 &cfv->stats.tx_kicks); 648 debugfs_create_u32("tx-flow-on", S_IRUSR, cfv->debugfs, 649 &cfv->stats.tx_flow_on); 650 } 651 652 /* Setup CAIF for the a virtio device */ 653 static int cfv_probe(struct virtio_device *vdev) 654 { 655 vq_callback_t *vq_cbs = cfv_release_cb; 656 vrh_callback_t *vrh_cbs = cfv_recv; 657 const char *names = "output"; 658 const char *cfv_netdev_name = "cfvrt"; 659 struct net_device *netdev; 660 struct cfv_info *cfv; 661 int err = -EINVAL; 662 663 netdev = alloc_netdev(sizeof(struct cfv_info), cfv_netdev_name, 664 cfv_netdev_setup); 665 if (!netdev) 666 return -ENOMEM; 667 668 cfv = netdev_priv(netdev); 669 cfv->vdev = vdev; 670 cfv->ndev = netdev; 671 672 spin_lock_init(&cfv->tx_lock); 673 674 /* Get the RX virtio ring. This is a "host side vring". */ 675 err = -ENODEV; 676 if (!vdev->vringh_config || !vdev->vringh_config->find_vrhs) 677 goto err; 678 679 err = vdev->vringh_config->find_vrhs(vdev, 1, &cfv->vr_rx, &vrh_cbs); 680 if (err) 681 goto err; 682 683 /* Get the TX virtio ring. This is a "guest side vring". */ 684 err = vdev->config->find_vqs(vdev, 1, &cfv->vq_tx, &vq_cbs, &names); 685 if (err) 686 goto err; 687 688 /* Get the CAIF configuration from virtio config space, if available */ 689 if (vdev->config->get) { 690 virtio_cread(vdev, struct virtio_caif_transf_config, headroom, 691 &cfv->tx_hr); 692 virtio_cread(vdev, struct virtio_caif_transf_config, headroom, 693 &cfv->rx_hr); 694 virtio_cread(vdev, struct virtio_caif_transf_config, tailroom, 695 &cfv->tx_tr); 696 virtio_cread(vdev, struct virtio_caif_transf_config, tailroom, 697 &cfv->rx_tr); 698 virtio_cread(vdev, struct virtio_caif_transf_config, mtu, 699 &cfv->mtu); 700 virtio_cread(vdev, struct virtio_caif_transf_config, mtu, 701 &cfv->mru); 702 } else { 703 cfv->tx_hr = CFV_DEF_HEADROOM; 704 cfv->rx_hr = CFV_DEF_HEADROOM; 705 cfv->tx_tr = CFV_DEF_TAILROOM; 706 cfv->rx_tr = CFV_DEF_TAILROOM; 707 cfv->mtu = CFV_DEF_MTU_SIZE; 708 cfv->mru = CFV_DEF_MTU_SIZE; 709 } 710 711 netdev->needed_headroom = cfv->tx_hr; 712 netdev->needed_tailroom = cfv->tx_tr; 713 714 /* Disable buffer release interrupts unless we have stopped TX queues */ 715 virtqueue_disable_cb(cfv->vq_tx); 716 717 netdev->mtu = cfv->mtu - cfv->tx_tr; 718 vdev->priv = cfv; 719 720 /* Initialize NAPI poll context data */ 721 vringh_kiov_init(&cfv->ctx.riov, NULL, 0); 722 cfv->ctx.head = USHRT_MAX; 723 netif_napi_add(netdev, &cfv->napi, cfv_rx_poll, CFV_DEFAULT_QUOTA); 724 725 tasklet_init(&cfv->tx_release_tasklet, 726 cfv_tx_release_tasklet, 727 (unsigned long)cfv); 728 729 /* Carrier is off until netdevice is opened */ 730 netif_carrier_off(netdev); 731 732 /* register Netdev */ 733 err = register_netdev(netdev); 734 if (err) { 735 dev_err(&vdev->dev, "Unable to register netdev (%d)\n", err); 736 goto err; 737 } 738 739 debugfs_init(cfv); 740 741 return 0; 742 err: 743 netdev_warn(cfv->ndev, "CAIF Virtio probe failed:%d\n", err); 744 745 if (cfv->vr_rx) 746 vdev->vringh_config->del_vrhs(cfv->vdev); 747 if (cfv->vdev) 748 vdev->config->del_vqs(cfv->vdev); 749 free_netdev(netdev); 750 return err; 751 } 752 753 static void cfv_remove(struct virtio_device *vdev) 754 { 755 struct cfv_info *cfv = vdev->priv; 756 757 rtnl_lock(); 758 dev_close(cfv->ndev); 759 rtnl_unlock(); 760 761 tasklet_kill(&cfv->tx_release_tasklet); 762 debugfs_remove_recursive(cfv->debugfs); 763 764 vringh_kiov_cleanup(&cfv->ctx.riov); 765 vdev->config->reset(vdev); 766 vdev->vringh_config->del_vrhs(cfv->vdev); 767 cfv->vr_rx = NULL; 768 vdev->config->del_vqs(cfv->vdev); 769 unregister_netdev(cfv->ndev); 770 } 771 772 static struct virtio_device_id id_table[] = { 773 { VIRTIO_ID_CAIF, VIRTIO_DEV_ANY_ID }, 774 { 0 }, 775 }; 776 777 static unsigned int features[] = { 778 }; 779 780 static struct virtio_driver caif_virtio_driver = { 781 .feature_table = features, 782 .feature_table_size = ARRAY_SIZE(features), 783 .driver.name = KBUILD_MODNAME, 784 .driver.owner = THIS_MODULE, 785 .id_table = id_table, 786 .probe = cfv_probe, 787 .remove = cfv_remove, 788 }; 789 790 module_virtio_driver(caif_virtio_driver); 791 MODULE_DEVICE_TABLE(virtio, id_table); 792