1 /* 2 * The USB Monitor, inspired by Dave Harding's USBMon. 3 * 4 * This is a binary format reader. 5 * 6 * Copyright (C) 2006 Paolo Abeni (paolo.abeni@email.it) 7 * Copyright (C) 2006,2007 Pete Zaitcev (zaitcev@redhat.com) 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/types.h> 12 #include <linux/fs.h> 13 #include <linux/cdev.h> 14 #include <linux/usb.h> 15 #include <linux/poll.h> 16 #include <linux/compat.h> 17 #include <linux/mm.h> 18 #include <linux/smp_lock.h> 19 20 #include <asm/uaccess.h> 21 22 #include "usb_mon.h" 23 24 /* 25 * Defined by USB 2.0 clause 9.3, table 9.2. 26 */ 27 #define SETUP_LEN 8 28 29 /* ioctl macros */ 30 #define MON_IOC_MAGIC 0x92 31 32 #define MON_IOCQ_URB_LEN _IO(MON_IOC_MAGIC, 1) 33 /* #2 used to be MON_IOCX_URB, removed before it got into Linus tree */ 34 #define MON_IOCG_STATS _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats) 35 #define MON_IOCT_RING_SIZE _IO(MON_IOC_MAGIC, 4) 36 #define MON_IOCQ_RING_SIZE _IO(MON_IOC_MAGIC, 5) 37 #define MON_IOCX_GET _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get) 38 #define MON_IOCX_MFETCH _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch) 39 #define MON_IOCH_MFLUSH _IO(MON_IOC_MAGIC, 8) 40 41 #ifdef CONFIG_COMPAT 42 #define MON_IOCX_GET32 _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get32) 43 #define MON_IOCX_MFETCH32 _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch32) 44 #endif 45 46 /* 47 * Some architectures have enormous basic pages (16KB for ia64, 64KB for ppc). 48 * But it's all right. Just use a simple way to make sure the chunk is never 49 * smaller than a page. 50 * 51 * N.B. An application does not know our chunk size. 52 * 53 * Woops, get_zeroed_page() returns a single page. I guess we're stuck with 54 * page-sized chunks for the time being. 55 */ 56 #define CHUNK_SIZE PAGE_SIZE 57 #define CHUNK_ALIGN(x) (((x)+CHUNK_SIZE-1) & ~(CHUNK_SIZE-1)) 58 59 /* 60 * The magic limit was calculated so that it allows the monitoring 61 * application to pick data once in two ticks. This way, another application, 62 * which presumably drives the bus, gets to hog CPU, yet we collect our data. 63 * If HZ is 100, a 480 mbit/s bus drives 614 KB every jiffy. USB has an 64 * enormous overhead built into the bus protocol, so we need about 1000 KB. 65 * 66 * This is still too much for most cases, where we just snoop a few 67 * descriptor fetches for enumeration. So, the default is a "reasonable" 68 * amount for systems with HZ=250 and incomplete bus saturation. 69 * 70 * XXX What about multi-megabyte URBs which take minutes to transfer? 71 */ 72 #define BUFF_MAX CHUNK_ALIGN(1200*1024) 73 #define BUFF_DFL CHUNK_ALIGN(300*1024) 74 #define BUFF_MIN CHUNK_ALIGN(8*1024) 75 76 /* 77 * The per-event API header (2 per URB). 78 * 79 * This structure is seen in userland as defined by the documentation. 80 */ 81 struct mon_bin_hdr { 82 u64 id; /* URB ID - from submission to callback */ 83 unsigned char type; /* Same as in text API; extensible. */ 84 unsigned char xfer_type; /* ISO, Intr, Control, Bulk */ 85 unsigned char epnum; /* Endpoint number and transfer direction */ 86 unsigned char devnum; /* Device address */ 87 unsigned short busnum; /* Bus number */ 88 char flag_setup; 89 char flag_data; 90 s64 ts_sec; /* gettimeofday */ 91 s32 ts_usec; /* gettimeofday */ 92 int status; 93 unsigned int len_urb; /* Length of data (submitted or actual) */ 94 unsigned int len_cap; /* Delivered length */ 95 unsigned char setup[SETUP_LEN]; /* Only for Control S-type */ 96 }; 97 98 /* per file statistic */ 99 struct mon_bin_stats { 100 u32 queued; 101 u32 dropped; 102 }; 103 104 struct mon_bin_get { 105 struct mon_bin_hdr __user *hdr; /* Only 48 bytes, not 64. */ 106 void __user *data; 107 size_t alloc; /* Length of data (can be zero) */ 108 }; 109 110 struct mon_bin_mfetch { 111 u32 __user *offvec; /* Vector of events fetched */ 112 u32 nfetch; /* Number of events to fetch (out: fetched) */ 113 u32 nflush; /* Number of events to flush */ 114 }; 115 116 #ifdef CONFIG_COMPAT 117 struct mon_bin_get32 { 118 u32 hdr32; 119 u32 data32; 120 u32 alloc32; 121 }; 122 123 struct mon_bin_mfetch32 { 124 u32 offvec32; 125 u32 nfetch32; 126 u32 nflush32; 127 }; 128 #endif 129 130 /* Having these two values same prevents wrapping of the mon_bin_hdr */ 131 #define PKT_ALIGN 64 132 #define PKT_SIZE 64 133 134 /* max number of USB bus supported */ 135 #define MON_BIN_MAX_MINOR 128 136 137 /* 138 * The buffer: map of used pages. 139 */ 140 struct mon_pgmap { 141 struct page *pg; 142 unsigned char *ptr; /* XXX just use page_to_virt everywhere? */ 143 }; 144 145 /* 146 * This gets associated with an open file struct. 147 */ 148 struct mon_reader_bin { 149 /* The buffer: one per open. */ 150 spinlock_t b_lock; /* Protect b_cnt, b_in */ 151 unsigned int b_size; /* Current size of the buffer - bytes */ 152 unsigned int b_cnt; /* Bytes used */ 153 unsigned int b_in, b_out; /* Offsets into buffer - bytes */ 154 unsigned int b_read; /* Amount of read data in curr. pkt. */ 155 struct mon_pgmap *b_vec; /* The map array */ 156 wait_queue_head_t b_wait; /* Wait for data here */ 157 158 struct mutex fetch_lock; /* Protect b_read, b_out */ 159 int mmap_active; 160 161 /* A list of these is needed for "bus 0". Some time later. */ 162 struct mon_reader r; 163 164 /* Stats */ 165 unsigned int cnt_lost; 166 }; 167 168 static inline struct mon_bin_hdr *MON_OFF2HDR(const struct mon_reader_bin *rp, 169 unsigned int offset) 170 { 171 return (struct mon_bin_hdr *) 172 (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE); 173 } 174 175 #define MON_RING_EMPTY(rp) ((rp)->b_cnt == 0) 176 177 static unsigned char xfer_to_pipe[4] = { 178 PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT 179 }; 180 181 static struct class *mon_bin_class; 182 static dev_t mon_bin_dev0; 183 static struct cdev mon_bin_cdev; 184 185 static void mon_buff_area_fill(const struct mon_reader_bin *rp, 186 unsigned int offset, unsigned int size); 187 static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp); 188 static int mon_alloc_buff(struct mon_pgmap *map, int npages); 189 static void mon_free_buff(struct mon_pgmap *map, int npages); 190 191 /* 192 * This is a "chunked memcpy". It does not manipulate any counters. 193 * But it returns the new offset for repeated application. 194 */ 195 unsigned int mon_copy_to_buff(const struct mon_reader_bin *this, 196 unsigned int off, const unsigned char *from, unsigned int length) 197 { 198 unsigned int step_len; 199 unsigned char *buf; 200 unsigned int in_page; 201 202 while (length) { 203 /* 204 * Determine step_len. 205 */ 206 step_len = length; 207 in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); 208 if (in_page < step_len) 209 step_len = in_page; 210 211 /* 212 * Copy data and advance pointers. 213 */ 214 buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; 215 memcpy(buf, from, step_len); 216 if ((off += step_len) >= this->b_size) off = 0; 217 from += step_len; 218 length -= step_len; 219 } 220 return off; 221 } 222 223 /* 224 * This is a little worse than the above because it's "chunked copy_to_user". 225 * The return value is an error code, not an offset. 226 */ 227 static int copy_from_buf(const struct mon_reader_bin *this, unsigned int off, 228 char __user *to, int length) 229 { 230 unsigned int step_len; 231 unsigned char *buf; 232 unsigned int in_page; 233 234 while (length) { 235 /* 236 * Determine step_len. 237 */ 238 step_len = length; 239 in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); 240 if (in_page < step_len) 241 step_len = in_page; 242 243 /* 244 * Copy data and advance pointers. 245 */ 246 buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; 247 if (copy_to_user(to, buf, step_len)) 248 return -EINVAL; 249 if ((off += step_len) >= this->b_size) off = 0; 250 to += step_len; 251 length -= step_len; 252 } 253 return 0; 254 } 255 256 /* 257 * Allocate an (aligned) area in the buffer. 258 * This is called under b_lock. 259 * Returns ~0 on failure. 260 */ 261 static unsigned int mon_buff_area_alloc(struct mon_reader_bin *rp, 262 unsigned int size) 263 { 264 unsigned int offset; 265 266 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 267 if (rp->b_cnt + size > rp->b_size) 268 return ~0; 269 offset = rp->b_in; 270 rp->b_cnt += size; 271 if ((rp->b_in += size) >= rp->b_size) 272 rp->b_in -= rp->b_size; 273 return offset; 274 } 275 276 /* 277 * This is the same thing as mon_buff_area_alloc, only it does not allow 278 * buffers to wrap. This is needed by applications which pass references 279 * into mmap-ed buffers up their stacks (libpcap can do that). 280 * 281 * Currently, we always have the header stuck with the data, although 282 * it is not strictly speaking necessary. 283 * 284 * When a buffer would wrap, we place a filler packet to mark the space. 285 */ 286 static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp, 287 unsigned int size) 288 { 289 unsigned int offset; 290 unsigned int fill_size; 291 292 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 293 if (rp->b_cnt + size > rp->b_size) 294 return ~0; 295 if (rp->b_in + size > rp->b_size) { 296 /* 297 * This would wrap. Find if we still have space after 298 * skipping to the end of the buffer. If we do, place 299 * a filler packet and allocate a new packet. 300 */ 301 fill_size = rp->b_size - rp->b_in; 302 if (rp->b_cnt + size + fill_size > rp->b_size) 303 return ~0; 304 mon_buff_area_fill(rp, rp->b_in, fill_size); 305 306 offset = 0; 307 rp->b_in = size; 308 rp->b_cnt += size + fill_size; 309 } else if (rp->b_in + size == rp->b_size) { 310 offset = rp->b_in; 311 rp->b_in = 0; 312 rp->b_cnt += size; 313 } else { 314 offset = rp->b_in; 315 rp->b_in += size; 316 rp->b_cnt += size; 317 } 318 return offset; 319 } 320 321 /* 322 * Return a few (kilo-)bytes to the head of the buffer. 323 * This is used if a DMA fetch fails. 324 */ 325 static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size) 326 { 327 328 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 329 rp->b_cnt -= size; 330 if (rp->b_in < size) 331 rp->b_in += rp->b_size; 332 rp->b_in -= size; 333 } 334 335 /* 336 * This has to be called under both b_lock and fetch_lock, because 337 * it accesses both b_cnt and b_out. 338 */ 339 static void mon_buff_area_free(struct mon_reader_bin *rp, unsigned int size) 340 { 341 342 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 343 rp->b_cnt -= size; 344 if ((rp->b_out += size) >= rp->b_size) 345 rp->b_out -= rp->b_size; 346 } 347 348 static void mon_buff_area_fill(const struct mon_reader_bin *rp, 349 unsigned int offset, unsigned int size) 350 { 351 struct mon_bin_hdr *ep; 352 353 ep = MON_OFF2HDR(rp, offset); 354 memset(ep, 0, PKT_SIZE); 355 ep->type = '@'; 356 ep->len_cap = size - PKT_SIZE; 357 } 358 359 static inline char mon_bin_get_setup(unsigned char *setupb, 360 const struct urb *urb, char ev_type) 361 { 362 363 if (!usb_endpoint_xfer_control(&urb->ep->desc) || ev_type != 'S') 364 return '-'; 365 366 if (urb->setup_packet == NULL) 367 return 'Z'; 368 369 memcpy(setupb, urb->setup_packet, SETUP_LEN); 370 return 0; 371 } 372 373 static char mon_bin_get_data(const struct mon_reader_bin *rp, 374 unsigned int offset, struct urb *urb, unsigned int length) 375 { 376 377 if (urb->dev->bus->uses_dma && 378 (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { 379 mon_dmapeek_vec(rp, offset, urb->transfer_dma, length); 380 return 0; 381 } 382 383 if (urb->transfer_buffer == NULL) 384 return 'Z'; 385 386 mon_copy_to_buff(rp, offset, urb->transfer_buffer, length); 387 return 0; 388 } 389 390 static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb, 391 char ev_type, int status) 392 { 393 const struct usb_endpoint_descriptor *epd = &urb->ep->desc; 394 unsigned long flags; 395 struct timeval ts; 396 unsigned int urb_length; 397 unsigned int offset; 398 unsigned int length; 399 unsigned char dir; 400 struct mon_bin_hdr *ep; 401 char data_tag = 0; 402 403 do_gettimeofday(&ts); 404 405 spin_lock_irqsave(&rp->b_lock, flags); 406 407 /* 408 * Find the maximum allowable length, then allocate space. 409 */ 410 urb_length = (ev_type == 'S') ? 411 urb->transfer_buffer_length : urb->actual_length; 412 length = urb_length; 413 414 if (length >= rp->b_size/5) 415 length = rp->b_size/5; 416 417 if (usb_urb_dir_in(urb)) { 418 if (ev_type == 'S') { 419 length = 0; 420 data_tag = '<'; 421 } 422 /* Cannot rely on endpoint number in case of control ep.0 */ 423 dir = USB_DIR_IN; 424 } else { 425 if (ev_type == 'C') { 426 length = 0; 427 data_tag = '>'; 428 } 429 dir = 0; 430 } 431 432 if (rp->mmap_active) 433 offset = mon_buff_area_alloc_contiguous(rp, length + PKT_SIZE); 434 else 435 offset = mon_buff_area_alloc(rp, length + PKT_SIZE); 436 if (offset == ~0) { 437 rp->cnt_lost++; 438 spin_unlock_irqrestore(&rp->b_lock, flags); 439 return; 440 } 441 442 ep = MON_OFF2HDR(rp, offset); 443 if ((offset += PKT_SIZE) >= rp->b_size) offset = 0; 444 445 /* 446 * Fill the allocated area. 447 */ 448 memset(ep, 0, PKT_SIZE); 449 ep->type = ev_type; 450 ep->xfer_type = xfer_to_pipe[usb_endpoint_type(epd)]; 451 ep->epnum = dir | usb_endpoint_num(epd); 452 ep->devnum = urb->dev->devnum; 453 ep->busnum = urb->dev->bus->busnum; 454 ep->id = (unsigned long) urb; 455 ep->ts_sec = ts.tv_sec; 456 ep->ts_usec = ts.tv_usec; 457 ep->status = status; 458 ep->len_urb = urb_length; 459 ep->len_cap = length; 460 461 ep->flag_setup = mon_bin_get_setup(ep->setup, urb, ev_type); 462 if (length != 0) { 463 ep->flag_data = mon_bin_get_data(rp, offset, urb, length); 464 if (ep->flag_data != 0) { /* Yes, it's 0x00, not '0' */ 465 ep->len_cap = 0; 466 mon_buff_area_shrink(rp, length); 467 } 468 } else { 469 ep->flag_data = data_tag; 470 } 471 472 spin_unlock_irqrestore(&rp->b_lock, flags); 473 474 wake_up(&rp->b_wait); 475 } 476 477 static void mon_bin_submit(void *data, struct urb *urb) 478 { 479 struct mon_reader_bin *rp = data; 480 mon_bin_event(rp, urb, 'S', -EINPROGRESS); 481 } 482 483 static void mon_bin_complete(void *data, struct urb *urb, int status) 484 { 485 struct mon_reader_bin *rp = data; 486 mon_bin_event(rp, urb, 'C', status); 487 } 488 489 static void mon_bin_error(void *data, struct urb *urb, int error) 490 { 491 struct mon_reader_bin *rp = data; 492 unsigned long flags; 493 unsigned int offset; 494 struct mon_bin_hdr *ep; 495 496 spin_lock_irqsave(&rp->b_lock, flags); 497 498 offset = mon_buff_area_alloc(rp, PKT_SIZE); 499 if (offset == ~0) { 500 /* Not incrementing cnt_lost. Just because. */ 501 spin_unlock_irqrestore(&rp->b_lock, flags); 502 return; 503 } 504 505 ep = MON_OFF2HDR(rp, offset); 506 507 memset(ep, 0, PKT_SIZE); 508 ep->type = 'E'; 509 ep->xfer_type = xfer_to_pipe[usb_endpoint_type(&urb->ep->desc)]; 510 ep->epnum = usb_urb_dir_in(urb) ? USB_DIR_IN : 0; 511 ep->epnum |= usb_endpoint_num(&urb->ep->desc); 512 ep->devnum = urb->dev->devnum; 513 ep->busnum = urb->dev->bus->busnum; 514 ep->id = (unsigned long) urb; 515 ep->status = error; 516 517 ep->flag_setup = '-'; 518 ep->flag_data = 'E'; 519 520 spin_unlock_irqrestore(&rp->b_lock, flags); 521 522 wake_up(&rp->b_wait); 523 } 524 525 static int mon_bin_open(struct inode *inode, struct file *file) 526 { 527 struct mon_bus *mbus; 528 struct mon_reader_bin *rp; 529 size_t size; 530 int rc; 531 532 lock_kernel(); 533 mutex_lock(&mon_lock); 534 if ((mbus = mon_bus_lookup(iminor(inode))) == NULL) { 535 mutex_unlock(&mon_lock); 536 unlock_kernel(); 537 return -ENODEV; 538 } 539 if (mbus != &mon_bus0 && mbus->u_bus == NULL) { 540 printk(KERN_ERR TAG ": consistency error on open\n"); 541 mutex_unlock(&mon_lock); 542 unlock_kernel(); 543 return -ENODEV; 544 } 545 546 rp = kzalloc(sizeof(struct mon_reader_bin), GFP_KERNEL); 547 if (rp == NULL) { 548 rc = -ENOMEM; 549 goto err_alloc; 550 } 551 spin_lock_init(&rp->b_lock); 552 init_waitqueue_head(&rp->b_wait); 553 mutex_init(&rp->fetch_lock); 554 555 rp->b_size = BUFF_DFL; 556 557 size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE); 558 if ((rp->b_vec = kzalloc(size, GFP_KERNEL)) == NULL) { 559 rc = -ENOMEM; 560 goto err_allocvec; 561 } 562 563 if ((rc = mon_alloc_buff(rp->b_vec, rp->b_size/CHUNK_SIZE)) < 0) 564 goto err_allocbuff; 565 566 rp->r.m_bus = mbus; 567 rp->r.r_data = rp; 568 rp->r.rnf_submit = mon_bin_submit; 569 rp->r.rnf_error = mon_bin_error; 570 rp->r.rnf_complete = mon_bin_complete; 571 572 mon_reader_add(mbus, &rp->r); 573 574 file->private_data = rp; 575 mutex_unlock(&mon_lock); 576 unlock_kernel(); 577 return 0; 578 579 err_allocbuff: 580 kfree(rp->b_vec); 581 err_allocvec: 582 kfree(rp); 583 err_alloc: 584 mutex_unlock(&mon_lock); 585 unlock_kernel(); 586 return rc; 587 } 588 589 /* 590 * Extract an event from buffer and copy it to user space. 591 * Wait if there is no event ready. 592 * Returns zero or error. 593 */ 594 static int mon_bin_get_event(struct file *file, struct mon_reader_bin *rp, 595 struct mon_bin_hdr __user *hdr, void __user *data, unsigned int nbytes) 596 { 597 unsigned long flags; 598 struct mon_bin_hdr *ep; 599 size_t step_len; 600 unsigned int offset; 601 int rc; 602 603 mutex_lock(&rp->fetch_lock); 604 605 if ((rc = mon_bin_wait_event(file, rp)) < 0) { 606 mutex_unlock(&rp->fetch_lock); 607 return rc; 608 } 609 610 ep = MON_OFF2HDR(rp, rp->b_out); 611 612 if (copy_to_user(hdr, ep, sizeof(struct mon_bin_hdr))) { 613 mutex_unlock(&rp->fetch_lock); 614 return -EFAULT; 615 } 616 617 step_len = min(ep->len_cap, nbytes); 618 if ((offset = rp->b_out + PKT_SIZE) >= rp->b_size) offset = 0; 619 620 if (copy_from_buf(rp, offset, data, step_len)) { 621 mutex_unlock(&rp->fetch_lock); 622 return -EFAULT; 623 } 624 625 spin_lock_irqsave(&rp->b_lock, flags); 626 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); 627 spin_unlock_irqrestore(&rp->b_lock, flags); 628 rp->b_read = 0; 629 630 mutex_unlock(&rp->fetch_lock); 631 return 0; 632 } 633 634 static int mon_bin_release(struct inode *inode, struct file *file) 635 { 636 struct mon_reader_bin *rp = file->private_data; 637 struct mon_bus* mbus = rp->r.m_bus; 638 639 mutex_lock(&mon_lock); 640 641 if (mbus->nreaders <= 0) { 642 printk(KERN_ERR TAG ": consistency error on close\n"); 643 mutex_unlock(&mon_lock); 644 return 0; 645 } 646 mon_reader_del(mbus, &rp->r); 647 648 mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); 649 kfree(rp->b_vec); 650 kfree(rp); 651 652 mutex_unlock(&mon_lock); 653 return 0; 654 } 655 656 static ssize_t mon_bin_read(struct file *file, char __user *buf, 657 size_t nbytes, loff_t *ppos) 658 { 659 struct mon_reader_bin *rp = file->private_data; 660 unsigned long flags; 661 struct mon_bin_hdr *ep; 662 unsigned int offset; 663 size_t step_len; 664 char *ptr; 665 ssize_t done = 0; 666 int rc; 667 668 mutex_lock(&rp->fetch_lock); 669 670 if ((rc = mon_bin_wait_event(file, rp)) < 0) { 671 mutex_unlock(&rp->fetch_lock); 672 return rc; 673 } 674 675 ep = MON_OFF2HDR(rp, rp->b_out); 676 677 if (rp->b_read < sizeof(struct mon_bin_hdr)) { 678 step_len = min(nbytes, sizeof(struct mon_bin_hdr) - rp->b_read); 679 ptr = ((char *)ep) + rp->b_read; 680 if (step_len && copy_to_user(buf, ptr, step_len)) { 681 mutex_unlock(&rp->fetch_lock); 682 return -EFAULT; 683 } 684 nbytes -= step_len; 685 buf += step_len; 686 rp->b_read += step_len; 687 done += step_len; 688 } 689 690 if (rp->b_read >= sizeof(struct mon_bin_hdr)) { 691 step_len = ep->len_cap; 692 step_len -= rp->b_read - sizeof(struct mon_bin_hdr); 693 if (step_len > nbytes) 694 step_len = nbytes; 695 offset = rp->b_out + PKT_SIZE; 696 offset += rp->b_read - sizeof(struct mon_bin_hdr); 697 if (offset >= rp->b_size) 698 offset -= rp->b_size; 699 if (copy_from_buf(rp, offset, buf, step_len)) { 700 mutex_unlock(&rp->fetch_lock); 701 return -EFAULT; 702 } 703 nbytes -= step_len; 704 buf += step_len; 705 rp->b_read += step_len; 706 done += step_len; 707 } 708 709 /* 710 * Check if whole packet was read, and if so, jump to the next one. 711 */ 712 if (rp->b_read >= sizeof(struct mon_bin_hdr) + ep->len_cap) { 713 spin_lock_irqsave(&rp->b_lock, flags); 714 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); 715 spin_unlock_irqrestore(&rp->b_lock, flags); 716 rp->b_read = 0; 717 } 718 719 mutex_unlock(&rp->fetch_lock); 720 return done; 721 } 722 723 /* 724 * Remove at most nevents from chunked buffer. 725 * Returns the number of removed events. 726 */ 727 static int mon_bin_flush(struct mon_reader_bin *rp, unsigned nevents) 728 { 729 unsigned long flags; 730 struct mon_bin_hdr *ep; 731 int i; 732 733 mutex_lock(&rp->fetch_lock); 734 spin_lock_irqsave(&rp->b_lock, flags); 735 for (i = 0; i < nevents; ++i) { 736 if (MON_RING_EMPTY(rp)) 737 break; 738 739 ep = MON_OFF2HDR(rp, rp->b_out); 740 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); 741 } 742 spin_unlock_irqrestore(&rp->b_lock, flags); 743 rp->b_read = 0; 744 mutex_unlock(&rp->fetch_lock); 745 return i; 746 } 747 748 /* 749 * Fetch at most max event offsets into the buffer and put them into vec. 750 * The events are usually freed later with mon_bin_flush. 751 * Return the effective number of events fetched. 752 */ 753 static int mon_bin_fetch(struct file *file, struct mon_reader_bin *rp, 754 u32 __user *vec, unsigned int max) 755 { 756 unsigned int cur_out; 757 unsigned int bytes, avail; 758 unsigned int size; 759 unsigned int nevents; 760 struct mon_bin_hdr *ep; 761 unsigned long flags; 762 int rc; 763 764 mutex_lock(&rp->fetch_lock); 765 766 if ((rc = mon_bin_wait_event(file, rp)) < 0) { 767 mutex_unlock(&rp->fetch_lock); 768 return rc; 769 } 770 771 spin_lock_irqsave(&rp->b_lock, flags); 772 avail = rp->b_cnt; 773 spin_unlock_irqrestore(&rp->b_lock, flags); 774 775 cur_out = rp->b_out; 776 nevents = 0; 777 bytes = 0; 778 while (bytes < avail) { 779 if (nevents >= max) 780 break; 781 782 ep = MON_OFF2HDR(rp, cur_out); 783 if (put_user(cur_out, &vec[nevents])) { 784 mutex_unlock(&rp->fetch_lock); 785 return -EFAULT; 786 } 787 788 nevents++; 789 size = ep->len_cap + PKT_SIZE; 790 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 791 if ((cur_out += size) >= rp->b_size) 792 cur_out -= rp->b_size; 793 bytes += size; 794 } 795 796 mutex_unlock(&rp->fetch_lock); 797 return nevents; 798 } 799 800 /* 801 * Count events. This is almost the same as the above mon_bin_fetch, 802 * only we do not store offsets into user vector, and we have no limit. 803 */ 804 static int mon_bin_queued(struct mon_reader_bin *rp) 805 { 806 unsigned int cur_out; 807 unsigned int bytes, avail; 808 unsigned int size; 809 unsigned int nevents; 810 struct mon_bin_hdr *ep; 811 unsigned long flags; 812 813 mutex_lock(&rp->fetch_lock); 814 815 spin_lock_irqsave(&rp->b_lock, flags); 816 avail = rp->b_cnt; 817 spin_unlock_irqrestore(&rp->b_lock, flags); 818 819 cur_out = rp->b_out; 820 nevents = 0; 821 bytes = 0; 822 while (bytes < avail) { 823 ep = MON_OFF2HDR(rp, cur_out); 824 825 nevents++; 826 size = ep->len_cap + PKT_SIZE; 827 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 828 if ((cur_out += size) >= rp->b_size) 829 cur_out -= rp->b_size; 830 bytes += size; 831 } 832 833 mutex_unlock(&rp->fetch_lock); 834 return nevents; 835 } 836 837 /* 838 */ 839 static int mon_bin_ioctl(struct inode *inode, struct file *file, 840 unsigned int cmd, unsigned long arg) 841 { 842 struct mon_reader_bin *rp = file->private_data; 843 // struct mon_bus* mbus = rp->r.m_bus; 844 int ret = 0; 845 struct mon_bin_hdr *ep; 846 unsigned long flags; 847 848 switch (cmd) { 849 850 case MON_IOCQ_URB_LEN: 851 /* 852 * N.B. This only returns the size of data, without the header. 853 */ 854 spin_lock_irqsave(&rp->b_lock, flags); 855 if (!MON_RING_EMPTY(rp)) { 856 ep = MON_OFF2HDR(rp, rp->b_out); 857 ret = ep->len_cap; 858 } 859 spin_unlock_irqrestore(&rp->b_lock, flags); 860 break; 861 862 case MON_IOCQ_RING_SIZE: 863 ret = rp->b_size; 864 break; 865 866 case MON_IOCT_RING_SIZE: 867 /* 868 * Changing the buffer size will flush it's contents; the new 869 * buffer is allocated before releasing the old one to be sure 870 * the device will stay functional also in case of memory 871 * pressure. 872 */ 873 { 874 int size; 875 struct mon_pgmap *vec; 876 877 if (arg < BUFF_MIN || arg > BUFF_MAX) 878 return -EINVAL; 879 880 size = CHUNK_ALIGN(arg); 881 if ((vec = kzalloc(sizeof(struct mon_pgmap) * (size/CHUNK_SIZE), 882 GFP_KERNEL)) == NULL) { 883 ret = -ENOMEM; 884 break; 885 } 886 887 ret = mon_alloc_buff(vec, size/CHUNK_SIZE); 888 if (ret < 0) { 889 kfree(vec); 890 break; 891 } 892 893 mutex_lock(&rp->fetch_lock); 894 spin_lock_irqsave(&rp->b_lock, flags); 895 mon_free_buff(rp->b_vec, size/CHUNK_SIZE); 896 kfree(rp->b_vec); 897 rp->b_vec = vec; 898 rp->b_size = size; 899 rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0; 900 rp->cnt_lost = 0; 901 spin_unlock_irqrestore(&rp->b_lock, flags); 902 mutex_unlock(&rp->fetch_lock); 903 } 904 break; 905 906 case MON_IOCH_MFLUSH: 907 ret = mon_bin_flush(rp, arg); 908 break; 909 910 case MON_IOCX_GET: 911 { 912 struct mon_bin_get getb; 913 914 if (copy_from_user(&getb, (void __user *)arg, 915 sizeof(struct mon_bin_get))) 916 return -EFAULT; 917 918 if (getb.alloc > 0x10000000) /* Want to cast to u32 */ 919 return -EINVAL; 920 ret = mon_bin_get_event(file, rp, 921 getb.hdr, getb.data, (unsigned int)getb.alloc); 922 } 923 break; 924 925 case MON_IOCX_MFETCH: 926 { 927 struct mon_bin_mfetch mfetch; 928 struct mon_bin_mfetch __user *uptr; 929 930 uptr = (struct mon_bin_mfetch __user *)arg; 931 932 if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) 933 return -EFAULT; 934 935 if (mfetch.nflush) { 936 ret = mon_bin_flush(rp, mfetch.nflush); 937 if (ret < 0) 938 return ret; 939 if (put_user(ret, &uptr->nflush)) 940 return -EFAULT; 941 } 942 ret = mon_bin_fetch(file, rp, mfetch.offvec, mfetch.nfetch); 943 if (ret < 0) 944 return ret; 945 if (put_user(ret, &uptr->nfetch)) 946 return -EFAULT; 947 ret = 0; 948 } 949 break; 950 951 case MON_IOCG_STATS: { 952 struct mon_bin_stats __user *sp; 953 unsigned int nevents; 954 unsigned int ndropped; 955 956 spin_lock_irqsave(&rp->b_lock, flags); 957 ndropped = rp->cnt_lost; 958 rp->cnt_lost = 0; 959 spin_unlock_irqrestore(&rp->b_lock, flags); 960 nevents = mon_bin_queued(rp); 961 962 sp = (struct mon_bin_stats __user *)arg; 963 if (put_user(rp->cnt_lost, &sp->dropped)) 964 return -EFAULT; 965 if (put_user(nevents, &sp->queued)) 966 return -EFAULT; 967 968 } 969 break; 970 971 default: 972 return -ENOTTY; 973 } 974 975 return ret; 976 } 977 978 #ifdef CONFIG_COMPAT 979 static long mon_bin_compat_ioctl(struct file *file, 980 unsigned int cmd, unsigned long arg) 981 { 982 struct mon_reader_bin *rp = file->private_data; 983 int ret; 984 985 switch (cmd) { 986 987 case MON_IOCX_GET32: { 988 struct mon_bin_get32 getb; 989 990 if (copy_from_user(&getb, (void __user *)arg, 991 sizeof(struct mon_bin_get32))) 992 return -EFAULT; 993 994 ret = mon_bin_get_event(file, rp, 995 compat_ptr(getb.hdr32), compat_ptr(getb.data32), 996 getb.alloc32); 997 if (ret < 0) 998 return ret; 999 } 1000 return 0; 1001 1002 case MON_IOCX_MFETCH32: 1003 { 1004 struct mon_bin_mfetch32 mfetch; 1005 struct mon_bin_mfetch32 __user *uptr; 1006 1007 uptr = (struct mon_bin_mfetch32 __user *) compat_ptr(arg); 1008 1009 if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) 1010 return -EFAULT; 1011 1012 if (mfetch.nflush32) { 1013 ret = mon_bin_flush(rp, mfetch.nflush32); 1014 if (ret < 0) 1015 return ret; 1016 if (put_user(ret, &uptr->nflush32)) 1017 return -EFAULT; 1018 } 1019 ret = mon_bin_fetch(file, rp, compat_ptr(mfetch.offvec32), 1020 mfetch.nfetch32); 1021 if (ret < 0) 1022 return ret; 1023 if (put_user(ret, &uptr->nfetch32)) 1024 return -EFAULT; 1025 } 1026 return 0; 1027 1028 case MON_IOCG_STATS: 1029 return mon_bin_ioctl(NULL, file, cmd, 1030 (unsigned long) compat_ptr(arg)); 1031 1032 case MON_IOCQ_URB_LEN: 1033 case MON_IOCQ_RING_SIZE: 1034 case MON_IOCT_RING_SIZE: 1035 case MON_IOCH_MFLUSH: 1036 return mon_bin_ioctl(NULL, file, cmd, arg); 1037 1038 default: 1039 ; 1040 } 1041 return -ENOTTY; 1042 } 1043 #endif /* CONFIG_COMPAT */ 1044 1045 static unsigned int 1046 mon_bin_poll(struct file *file, struct poll_table_struct *wait) 1047 { 1048 struct mon_reader_bin *rp = file->private_data; 1049 unsigned int mask = 0; 1050 unsigned long flags; 1051 1052 if (file->f_mode & FMODE_READ) 1053 poll_wait(file, &rp->b_wait, wait); 1054 1055 spin_lock_irqsave(&rp->b_lock, flags); 1056 if (!MON_RING_EMPTY(rp)) 1057 mask |= POLLIN | POLLRDNORM; /* readable */ 1058 spin_unlock_irqrestore(&rp->b_lock, flags); 1059 return mask; 1060 } 1061 1062 /* 1063 * open and close: just keep track of how many times the device is 1064 * mapped, to use the proper memory allocation function. 1065 */ 1066 static void mon_bin_vma_open(struct vm_area_struct *vma) 1067 { 1068 struct mon_reader_bin *rp = vma->vm_private_data; 1069 rp->mmap_active++; 1070 } 1071 1072 static void mon_bin_vma_close(struct vm_area_struct *vma) 1073 { 1074 struct mon_reader_bin *rp = vma->vm_private_data; 1075 rp->mmap_active--; 1076 } 1077 1078 /* 1079 * Map ring pages to user space. 1080 */ 1081 static int mon_bin_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1082 { 1083 struct mon_reader_bin *rp = vma->vm_private_data; 1084 unsigned long offset, chunk_idx; 1085 struct page *pageptr; 1086 1087 offset = vmf->pgoff << PAGE_SHIFT; 1088 if (offset >= rp->b_size) 1089 return VM_FAULT_SIGBUS; 1090 chunk_idx = offset / CHUNK_SIZE; 1091 pageptr = rp->b_vec[chunk_idx].pg; 1092 get_page(pageptr); 1093 vmf->page = pageptr; 1094 return 0; 1095 } 1096 1097 static struct vm_operations_struct mon_bin_vm_ops = { 1098 .open = mon_bin_vma_open, 1099 .close = mon_bin_vma_close, 1100 .fault = mon_bin_vma_fault, 1101 }; 1102 1103 static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma) 1104 { 1105 /* don't do anything here: "fault" will set up page table entries */ 1106 vma->vm_ops = &mon_bin_vm_ops; 1107 vma->vm_flags |= VM_RESERVED; 1108 vma->vm_private_data = filp->private_data; 1109 mon_bin_vma_open(vma); 1110 return 0; 1111 } 1112 1113 static const struct file_operations mon_fops_binary = { 1114 .owner = THIS_MODULE, 1115 .open = mon_bin_open, 1116 .llseek = no_llseek, 1117 .read = mon_bin_read, 1118 /* .write = mon_text_write, */ 1119 .poll = mon_bin_poll, 1120 .ioctl = mon_bin_ioctl, 1121 #ifdef CONFIG_COMPAT 1122 .compat_ioctl = mon_bin_compat_ioctl, 1123 #endif 1124 .release = mon_bin_release, 1125 .mmap = mon_bin_mmap, 1126 }; 1127 1128 static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp) 1129 { 1130 DECLARE_WAITQUEUE(waita, current); 1131 unsigned long flags; 1132 1133 add_wait_queue(&rp->b_wait, &waita); 1134 set_current_state(TASK_INTERRUPTIBLE); 1135 1136 spin_lock_irqsave(&rp->b_lock, flags); 1137 while (MON_RING_EMPTY(rp)) { 1138 spin_unlock_irqrestore(&rp->b_lock, flags); 1139 1140 if (file->f_flags & O_NONBLOCK) { 1141 set_current_state(TASK_RUNNING); 1142 remove_wait_queue(&rp->b_wait, &waita); 1143 return -EWOULDBLOCK; /* Same as EAGAIN in Linux */ 1144 } 1145 schedule(); 1146 if (signal_pending(current)) { 1147 remove_wait_queue(&rp->b_wait, &waita); 1148 return -EINTR; 1149 } 1150 set_current_state(TASK_INTERRUPTIBLE); 1151 1152 spin_lock_irqsave(&rp->b_lock, flags); 1153 } 1154 spin_unlock_irqrestore(&rp->b_lock, flags); 1155 1156 set_current_state(TASK_RUNNING); 1157 remove_wait_queue(&rp->b_wait, &waita); 1158 return 0; 1159 } 1160 1161 static int mon_alloc_buff(struct mon_pgmap *map, int npages) 1162 { 1163 int n; 1164 unsigned long vaddr; 1165 1166 for (n = 0; n < npages; n++) { 1167 vaddr = get_zeroed_page(GFP_KERNEL); 1168 if (vaddr == 0) { 1169 while (n-- != 0) 1170 free_page((unsigned long) map[n].ptr); 1171 return -ENOMEM; 1172 } 1173 map[n].ptr = (unsigned char *) vaddr; 1174 map[n].pg = virt_to_page(vaddr); 1175 } 1176 return 0; 1177 } 1178 1179 static void mon_free_buff(struct mon_pgmap *map, int npages) 1180 { 1181 int n; 1182 1183 for (n = 0; n < npages; n++) 1184 free_page((unsigned long) map[n].ptr); 1185 } 1186 1187 int mon_bin_add(struct mon_bus *mbus, const struct usb_bus *ubus) 1188 { 1189 struct device *dev; 1190 unsigned minor = ubus? ubus->busnum: 0; 1191 1192 if (minor >= MON_BIN_MAX_MINOR) 1193 return 0; 1194 1195 dev = device_create(mon_bin_class, ubus ? ubus->controller : NULL, 1196 MKDEV(MAJOR(mon_bin_dev0), minor), NULL, 1197 "usbmon%d", minor); 1198 if (IS_ERR(dev)) 1199 return 0; 1200 1201 mbus->classdev = dev; 1202 return 1; 1203 } 1204 1205 void mon_bin_del(struct mon_bus *mbus) 1206 { 1207 device_destroy(mon_bin_class, mbus->classdev->devt); 1208 } 1209 1210 int __init mon_bin_init(void) 1211 { 1212 int rc; 1213 1214 mon_bin_class = class_create(THIS_MODULE, "usbmon"); 1215 if (IS_ERR(mon_bin_class)) { 1216 rc = PTR_ERR(mon_bin_class); 1217 goto err_class; 1218 } 1219 1220 rc = alloc_chrdev_region(&mon_bin_dev0, 0, MON_BIN_MAX_MINOR, "usbmon"); 1221 if (rc < 0) 1222 goto err_dev; 1223 1224 cdev_init(&mon_bin_cdev, &mon_fops_binary); 1225 mon_bin_cdev.owner = THIS_MODULE; 1226 1227 rc = cdev_add(&mon_bin_cdev, mon_bin_dev0, MON_BIN_MAX_MINOR); 1228 if (rc < 0) 1229 goto err_add; 1230 1231 return 0; 1232 1233 err_add: 1234 unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); 1235 err_dev: 1236 class_destroy(mon_bin_class); 1237 err_class: 1238 return rc; 1239 } 1240 1241 void mon_bin_exit(void) 1242 { 1243 cdev_del(&mon_bin_cdev); 1244 unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); 1245 class_destroy(mon_bin_class); 1246 } 1247