1 /* 2 * The USB Monitor, inspired by Dave Harding's USBMon. 3 * 4 * This is a binary format reader. 5 * 6 * Copyright (C) 2006 Paolo Abeni (paolo.abeni@email.it) 7 * Copyright (C) 2006,2007 Pete Zaitcev (zaitcev@redhat.com) 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/types.h> 12 #include <linux/fs.h> 13 #include <linux/cdev.h> 14 #include <linux/usb.h> 15 #include <linux/poll.h> 16 #include <linux/compat.h> 17 #include <linux/mm.h> 18 #include <linux/smp_lock.h> 19 20 #include <asm/uaccess.h> 21 22 #include "usb_mon.h" 23 24 /* 25 * Defined by USB 2.0 clause 9.3, table 9.2. 26 */ 27 #define SETUP_LEN 8 28 29 /* ioctl macros */ 30 #define MON_IOC_MAGIC 0x92 31 32 #define MON_IOCQ_URB_LEN _IO(MON_IOC_MAGIC, 1) 33 /* #2 used to be MON_IOCX_URB, removed before it got into Linus tree */ 34 #define MON_IOCG_STATS _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats) 35 #define MON_IOCT_RING_SIZE _IO(MON_IOC_MAGIC, 4) 36 #define MON_IOCQ_RING_SIZE _IO(MON_IOC_MAGIC, 5) 37 #define MON_IOCX_GET _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get) 38 #define MON_IOCX_MFETCH _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch) 39 #define MON_IOCH_MFLUSH _IO(MON_IOC_MAGIC, 8) 40 #ifdef CONFIG_COMPAT 41 #define MON_IOCX_GET32 _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get32) 42 #define MON_IOCX_MFETCH32 _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch32) 43 #endif 44 45 /* 46 * Some architectures have enormous basic pages (16KB for ia64, 64KB for ppc). 47 * But it's all right. Just use a simple way to make sure the chunk is never 48 * smaller than a page. 49 * 50 * N.B. An application does not know our chunk size. 51 * 52 * Woops, get_zeroed_page() returns a single page. I guess we're stuck with 53 * page-sized chunks for the time being. 54 */ 55 #define CHUNK_SIZE PAGE_SIZE 56 #define CHUNK_ALIGN(x) (((x)+CHUNK_SIZE-1) & ~(CHUNK_SIZE-1)) 57 58 /* 59 * The magic limit was calculated so that it allows the monitoring 60 * application to pick data once in two ticks. This way, another application, 61 * which presumably drives the bus, gets to hog CPU, yet we collect our data. 62 * If HZ is 100, a 480 mbit/s bus drives 614 KB every jiffy. USB has an 63 * enormous overhead built into the bus protocol, so we need about 1000 KB. 64 * 65 * This is still too much for most cases, where we just snoop a few 66 * descriptor fetches for enumeration. So, the default is a "reasonable" 67 * amount for systems with HZ=250 and incomplete bus saturation. 68 * 69 * XXX What about multi-megabyte URBs which take minutes to transfer? 70 */ 71 #define BUFF_MAX CHUNK_ALIGN(1200*1024) 72 #define BUFF_DFL CHUNK_ALIGN(300*1024) 73 #define BUFF_MIN CHUNK_ALIGN(8*1024) 74 75 /* 76 * The per-event API header (2 per URB). 77 * 78 * This structure is seen in userland as defined by the documentation. 79 */ 80 struct mon_bin_hdr { 81 u64 id; /* URB ID - from submission to callback */ 82 unsigned char type; /* Same as in text API; extensible. */ 83 unsigned char xfer_type; /* ISO, Intr, Control, Bulk */ 84 unsigned char epnum; /* Endpoint number and transfer direction */ 85 unsigned char devnum; /* Device address */ 86 unsigned short busnum; /* Bus number */ 87 char flag_setup; 88 char flag_data; 89 s64 ts_sec; /* gettimeofday */ 90 s32 ts_usec; /* gettimeofday */ 91 int status; 92 unsigned int len_urb; /* Length of data (submitted or actual) */ 93 unsigned int len_cap; /* Delivered length */ 94 unsigned char setup[SETUP_LEN]; /* Only for Control S-type */ 95 }; 96 97 /* per file statistic */ 98 struct mon_bin_stats { 99 u32 queued; 100 u32 dropped; 101 }; 102 103 struct mon_bin_get { 104 struct mon_bin_hdr __user *hdr; /* Only 48 bytes, not 64. */ 105 void __user *data; 106 size_t alloc; /* Length of data (can be zero) */ 107 }; 108 109 struct mon_bin_mfetch { 110 u32 __user *offvec; /* Vector of events fetched */ 111 u32 nfetch; /* Number of events to fetch (out: fetched) */ 112 u32 nflush; /* Number of events to flush */ 113 }; 114 115 #ifdef CONFIG_COMPAT 116 struct mon_bin_get32 { 117 u32 hdr32; 118 u32 data32; 119 u32 alloc32; 120 }; 121 122 struct mon_bin_mfetch32 { 123 u32 offvec32; 124 u32 nfetch32; 125 u32 nflush32; 126 }; 127 #endif 128 129 /* Having these two values same prevents wrapping of the mon_bin_hdr */ 130 #define PKT_ALIGN 64 131 #define PKT_SIZE 64 132 133 /* max number of USB bus supported */ 134 #define MON_BIN_MAX_MINOR 128 135 136 /* 137 * The buffer: map of used pages. 138 */ 139 struct mon_pgmap { 140 struct page *pg; 141 unsigned char *ptr; /* XXX just use page_to_virt everywhere? */ 142 }; 143 144 /* 145 * This gets associated with an open file struct. 146 */ 147 struct mon_reader_bin { 148 /* The buffer: one per open. */ 149 spinlock_t b_lock; /* Protect b_cnt, b_in */ 150 unsigned int b_size; /* Current size of the buffer - bytes */ 151 unsigned int b_cnt; /* Bytes used */ 152 unsigned int b_in, b_out; /* Offsets into buffer - bytes */ 153 unsigned int b_read; /* Amount of read data in curr. pkt. */ 154 struct mon_pgmap *b_vec; /* The map array */ 155 wait_queue_head_t b_wait; /* Wait for data here */ 156 157 struct mutex fetch_lock; /* Protect b_read, b_out */ 158 int mmap_active; 159 160 /* A list of these is needed for "bus 0". Some time later. */ 161 struct mon_reader r; 162 163 /* Stats */ 164 unsigned int cnt_lost; 165 }; 166 167 static inline struct mon_bin_hdr *MON_OFF2HDR(const struct mon_reader_bin *rp, 168 unsigned int offset) 169 { 170 return (struct mon_bin_hdr *) 171 (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE); 172 } 173 174 #define MON_RING_EMPTY(rp) ((rp)->b_cnt == 0) 175 176 static unsigned char xfer_to_pipe[4] = { 177 PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT 178 }; 179 180 static struct class *mon_bin_class; 181 static dev_t mon_bin_dev0; 182 static struct cdev mon_bin_cdev; 183 184 static void mon_buff_area_fill(const struct mon_reader_bin *rp, 185 unsigned int offset, unsigned int size); 186 static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp); 187 static int mon_alloc_buff(struct mon_pgmap *map, int npages); 188 static void mon_free_buff(struct mon_pgmap *map, int npages); 189 190 /* 191 * This is a "chunked memcpy". It does not manipulate any counters. 192 * But it returns the new offset for repeated application. 193 */ 194 unsigned int mon_copy_to_buff(const struct mon_reader_bin *this, 195 unsigned int off, const unsigned char *from, unsigned int length) 196 { 197 unsigned int step_len; 198 unsigned char *buf; 199 unsigned int in_page; 200 201 while (length) { 202 /* 203 * Determine step_len. 204 */ 205 step_len = length; 206 in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); 207 if (in_page < step_len) 208 step_len = in_page; 209 210 /* 211 * Copy data and advance pointers. 212 */ 213 buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; 214 memcpy(buf, from, step_len); 215 if ((off += step_len) >= this->b_size) off = 0; 216 from += step_len; 217 length -= step_len; 218 } 219 return off; 220 } 221 222 /* 223 * This is a little worse than the above because it's "chunked copy_to_user". 224 * The return value is an error code, not an offset. 225 */ 226 static int copy_from_buf(const struct mon_reader_bin *this, unsigned int off, 227 char __user *to, int length) 228 { 229 unsigned int step_len; 230 unsigned char *buf; 231 unsigned int in_page; 232 233 while (length) { 234 /* 235 * Determine step_len. 236 */ 237 step_len = length; 238 in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); 239 if (in_page < step_len) 240 step_len = in_page; 241 242 /* 243 * Copy data and advance pointers. 244 */ 245 buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; 246 if (copy_to_user(to, buf, step_len)) 247 return -EINVAL; 248 if ((off += step_len) >= this->b_size) off = 0; 249 to += step_len; 250 length -= step_len; 251 } 252 return 0; 253 } 254 255 /* 256 * Allocate an (aligned) area in the buffer. 257 * This is called under b_lock. 258 * Returns ~0 on failure. 259 */ 260 static unsigned int mon_buff_area_alloc(struct mon_reader_bin *rp, 261 unsigned int size) 262 { 263 unsigned int offset; 264 265 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 266 if (rp->b_cnt + size > rp->b_size) 267 return ~0; 268 offset = rp->b_in; 269 rp->b_cnt += size; 270 if ((rp->b_in += size) >= rp->b_size) 271 rp->b_in -= rp->b_size; 272 return offset; 273 } 274 275 /* 276 * This is the same thing as mon_buff_area_alloc, only it does not allow 277 * buffers to wrap. This is needed by applications which pass references 278 * into mmap-ed buffers up their stacks (libpcap can do that). 279 * 280 * Currently, we always have the header stuck with the data, although 281 * it is not strictly speaking necessary. 282 * 283 * When a buffer would wrap, we place a filler packet to mark the space. 284 */ 285 static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp, 286 unsigned int size) 287 { 288 unsigned int offset; 289 unsigned int fill_size; 290 291 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 292 if (rp->b_cnt + size > rp->b_size) 293 return ~0; 294 if (rp->b_in + size > rp->b_size) { 295 /* 296 * This would wrap. Find if we still have space after 297 * skipping to the end of the buffer. If we do, place 298 * a filler packet and allocate a new packet. 299 */ 300 fill_size = rp->b_size - rp->b_in; 301 if (rp->b_cnt + size + fill_size > rp->b_size) 302 return ~0; 303 mon_buff_area_fill(rp, rp->b_in, fill_size); 304 305 offset = 0; 306 rp->b_in = size; 307 rp->b_cnt += size + fill_size; 308 } else if (rp->b_in + size == rp->b_size) { 309 offset = rp->b_in; 310 rp->b_in = 0; 311 rp->b_cnt += size; 312 } else { 313 offset = rp->b_in; 314 rp->b_in += size; 315 rp->b_cnt += size; 316 } 317 return offset; 318 } 319 320 /* 321 * Return a few (kilo-)bytes to the head of the buffer. 322 * This is used if a DMA fetch fails. 323 */ 324 static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size) 325 { 326 327 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 328 rp->b_cnt -= size; 329 if (rp->b_in < size) 330 rp->b_in += rp->b_size; 331 rp->b_in -= size; 332 } 333 334 /* 335 * This has to be called under both b_lock and fetch_lock, because 336 * it accesses both b_cnt and b_out. 337 */ 338 static void mon_buff_area_free(struct mon_reader_bin *rp, unsigned int size) 339 { 340 341 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 342 rp->b_cnt -= size; 343 if ((rp->b_out += size) >= rp->b_size) 344 rp->b_out -= rp->b_size; 345 } 346 347 static void mon_buff_area_fill(const struct mon_reader_bin *rp, 348 unsigned int offset, unsigned int size) 349 { 350 struct mon_bin_hdr *ep; 351 352 ep = MON_OFF2HDR(rp, offset); 353 memset(ep, 0, PKT_SIZE); 354 ep->type = '@'; 355 ep->len_cap = size - PKT_SIZE; 356 } 357 358 static inline char mon_bin_get_setup(unsigned char *setupb, 359 const struct urb *urb, char ev_type) 360 { 361 362 if (!usb_endpoint_xfer_control(&urb->ep->desc) || ev_type != 'S') 363 return '-'; 364 365 if (urb->setup_packet == NULL) 366 return 'Z'; 367 368 memcpy(setupb, urb->setup_packet, SETUP_LEN); 369 return 0; 370 } 371 372 static char mon_bin_get_data(const struct mon_reader_bin *rp, 373 unsigned int offset, struct urb *urb, unsigned int length) 374 { 375 376 if (urb->dev->bus->uses_dma && 377 (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { 378 mon_dmapeek_vec(rp, offset, urb->transfer_dma, length); 379 return 0; 380 } 381 382 if (urb->transfer_buffer == NULL) 383 return 'Z'; 384 385 mon_copy_to_buff(rp, offset, urb->transfer_buffer, length); 386 return 0; 387 } 388 389 static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb, 390 char ev_type, int status) 391 { 392 const struct usb_endpoint_descriptor *epd = &urb->ep->desc; 393 unsigned long flags; 394 struct timeval ts; 395 unsigned int urb_length; 396 unsigned int offset; 397 unsigned int length; 398 unsigned char dir; 399 struct mon_bin_hdr *ep; 400 char data_tag = 0; 401 402 do_gettimeofday(&ts); 403 404 spin_lock_irqsave(&rp->b_lock, flags); 405 406 /* 407 * Find the maximum allowable length, then allocate space. 408 */ 409 urb_length = (ev_type == 'S') ? 410 urb->transfer_buffer_length : urb->actual_length; 411 length = urb_length; 412 413 if (length >= rp->b_size/5) 414 length = rp->b_size/5; 415 416 if (usb_urb_dir_in(urb)) { 417 if (ev_type == 'S') { 418 length = 0; 419 data_tag = '<'; 420 } 421 /* Cannot rely on endpoint number in case of control ep.0 */ 422 dir = USB_DIR_IN; 423 } else { 424 if (ev_type == 'C') { 425 length = 0; 426 data_tag = '>'; 427 } 428 dir = 0; 429 } 430 431 if (rp->mmap_active) 432 offset = mon_buff_area_alloc_contiguous(rp, length + PKT_SIZE); 433 else 434 offset = mon_buff_area_alloc(rp, length + PKT_SIZE); 435 if (offset == ~0) { 436 rp->cnt_lost++; 437 spin_unlock_irqrestore(&rp->b_lock, flags); 438 return; 439 } 440 441 ep = MON_OFF2HDR(rp, offset); 442 if ((offset += PKT_SIZE) >= rp->b_size) offset = 0; 443 444 /* 445 * Fill the allocated area. 446 */ 447 memset(ep, 0, PKT_SIZE); 448 ep->type = ev_type; 449 ep->xfer_type = xfer_to_pipe[usb_endpoint_type(epd)]; 450 ep->epnum = dir | usb_endpoint_num(epd); 451 ep->devnum = urb->dev->devnum; 452 ep->busnum = urb->dev->bus->busnum; 453 ep->id = (unsigned long) urb; 454 ep->ts_sec = ts.tv_sec; 455 ep->ts_usec = ts.tv_usec; 456 ep->status = status; 457 ep->len_urb = urb_length; 458 ep->len_cap = length; 459 460 ep->flag_setup = mon_bin_get_setup(ep->setup, urb, ev_type); 461 if (length != 0) { 462 ep->flag_data = mon_bin_get_data(rp, offset, urb, length); 463 if (ep->flag_data != 0) { /* Yes, it's 0x00, not '0' */ 464 ep->len_cap = 0; 465 mon_buff_area_shrink(rp, length); 466 } 467 } else { 468 ep->flag_data = data_tag; 469 } 470 471 spin_unlock_irqrestore(&rp->b_lock, flags); 472 473 wake_up(&rp->b_wait); 474 } 475 476 static void mon_bin_submit(void *data, struct urb *urb) 477 { 478 struct mon_reader_bin *rp = data; 479 mon_bin_event(rp, urb, 'S', -EINPROGRESS); 480 } 481 482 static void mon_bin_complete(void *data, struct urb *urb, int status) 483 { 484 struct mon_reader_bin *rp = data; 485 mon_bin_event(rp, urb, 'C', status); 486 } 487 488 static void mon_bin_error(void *data, struct urb *urb, int error) 489 { 490 struct mon_reader_bin *rp = data; 491 unsigned long flags; 492 unsigned int offset; 493 struct mon_bin_hdr *ep; 494 495 spin_lock_irqsave(&rp->b_lock, flags); 496 497 offset = mon_buff_area_alloc(rp, PKT_SIZE); 498 if (offset == ~0) { 499 /* Not incrementing cnt_lost. Just because. */ 500 spin_unlock_irqrestore(&rp->b_lock, flags); 501 return; 502 } 503 504 ep = MON_OFF2HDR(rp, offset); 505 506 memset(ep, 0, PKT_SIZE); 507 ep->type = 'E'; 508 ep->xfer_type = xfer_to_pipe[usb_endpoint_type(&urb->ep->desc)]; 509 ep->epnum = usb_urb_dir_in(urb) ? USB_DIR_IN : 0; 510 ep->epnum |= usb_endpoint_num(&urb->ep->desc); 511 ep->devnum = urb->dev->devnum; 512 ep->busnum = urb->dev->bus->busnum; 513 ep->id = (unsigned long) urb; 514 ep->status = error; 515 516 ep->flag_setup = '-'; 517 ep->flag_data = 'E'; 518 519 spin_unlock_irqrestore(&rp->b_lock, flags); 520 521 wake_up(&rp->b_wait); 522 } 523 524 static int mon_bin_open(struct inode *inode, struct file *file) 525 { 526 struct mon_bus *mbus; 527 struct mon_reader_bin *rp; 528 size_t size; 529 int rc; 530 531 lock_kernel(); 532 mutex_lock(&mon_lock); 533 if ((mbus = mon_bus_lookup(iminor(inode))) == NULL) { 534 mutex_unlock(&mon_lock); 535 unlock_kernel(); 536 return -ENODEV; 537 } 538 if (mbus != &mon_bus0 && mbus->u_bus == NULL) { 539 printk(KERN_ERR TAG ": consistency error on open\n"); 540 mutex_unlock(&mon_lock); 541 unlock_kernel(); 542 return -ENODEV; 543 } 544 545 rp = kzalloc(sizeof(struct mon_reader_bin), GFP_KERNEL); 546 if (rp == NULL) { 547 rc = -ENOMEM; 548 goto err_alloc; 549 } 550 spin_lock_init(&rp->b_lock); 551 init_waitqueue_head(&rp->b_wait); 552 mutex_init(&rp->fetch_lock); 553 554 rp->b_size = BUFF_DFL; 555 556 size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE); 557 if ((rp->b_vec = kzalloc(size, GFP_KERNEL)) == NULL) { 558 rc = -ENOMEM; 559 goto err_allocvec; 560 } 561 562 if ((rc = mon_alloc_buff(rp->b_vec, rp->b_size/CHUNK_SIZE)) < 0) 563 goto err_allocbuff; 564 565 rp->r.m_bus = mbus; 566 rp->r.r_data = rp; 567 rp->r.rnf_submit = mon_bin_submit; 568 rp->r.rnf_error = mon_bin_error; 569 rp->r.rnf_complete = mon_bin_complete; 570 571 mon_reader_add(mbus, &rp->r); 572 573 file->private_data = rp; 574 mutex_unlock(&mon_lock); 575 unlock_kernel(); 576 return 0; 577 578 err_allocbuff: 579 kfree(rp->b_vec); 580 err_allocvec: 581 kfree(rp); 582 err_alloc: 583 mutex_unlock(&mon_lock); 584 unlock_kernel(); 585 return rc; 586 } 587 588 /* 589 * Extract an event from buffer and copy it to user space. 590 * Wait if there is no event ready. 591 * Returns zero or error. 592 */ 593 static int mon_bin_get_event(struct file *file, struct mon_reader_bin *rp, 594 struct mon_bin_hdr __user *hdr, void __user *data, unsigned int nbytes) 595 { 596 unsigned long flags; 597 struct mon_bin_hdr *ep; 598 size_t step_len; 599 unsigned int offset; 600 int rc; 601 602 mutex_lock(&rp->fetch_lock); 603 604 if ((rc = mon_bin_wait_event(file, rp)) < 0) { 605 mutex_unlock(&rp->fetch_lock); 606 return rc; 607 } 608 609 ep = MON_OFF2HDR(rp, rp->b_out); 610 611 if (copy_to_user(hdr, ep, sizeof(struct mon_bin_hdr))) { 612 mutex_unlock(&rp->fetch_lock); 613 return -EFAULT; 614 } 615 616 step_len = min(ep->len_cap, nbytes); 617 if ((offset = rp->b_out + PKT_SIZE) >= rp->b_size) offset = 0; 618 619 if (copy_from_buf(rp, offset, data, step_len)) { 620 mutex_unlock(&rp->fetch_lock); 621 return -EFAULT; 622 } 623 624 spin_lock_irqsave(&rp->b_lock, flags); 625 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); 626 spin_unlock_irqrestore(&rp->b_lock, flags); 627 rp->b_read = 0; 628 629 mutex_unlock(&rp->fetch_lock); 630 return 0; 631 } 632 633 static int mon_bin_release(struct inode *inode, struct file *file) 634 { 635 struct mon_reader_bin *rp = file->private_data; 636 struct mon_bus* mbus = rp->r.m_bus; 637 638 mutex_lock(&mon_lock); 639 640 if (mbus->nreaders <= 0) { 641 printk(KERN_ERR TAG ": consistency error on close\n"); 642 mutex_unlock(&mon_lock); 643 return 0; 644 } 645 mon_reader_del(mbus, &rp->r); 646 647 mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); 648 kfree(rp->b_vec); 649 kfree(rp); 650 651 mutex_unlock(&mon_lock); 652 return 0; 653 } 654 655 static ssize_t mon_bin_read(struct file *file, char __user *buf, 656 size_t nbytes, loff_t *ppos) 657 { 658 struct mon_reader_bin *rp = file->private_data; 659 unsigned long flags; 660 struct mon_bin_hdr *ep; 661 unsigned int offset; 662 size_t step_len; 663 char *ptr; 664 ssize_t done = 0; 665 int rc; 666 667 mutex_lock(&rp->fetch_lock); 668 669 if ((rc = mon_bin_wait_event(file, rp)) < 0) { 670 mutex_unlock(&rp->fetch_lock); 671 return rc; 672 } 673 674 ep = MON_OFF2HDR(rp, rp->b_out); 675 676 if (rp->b_read < sizeof(struct mon_bin_hdr)) { 677 step_len = min(nbytes, sizeof(struct mon_bin_hdr) - rp->b_read); 678 ptr = ((char *)ep) + rp->b_read; 679 if (step_len && copy_to_user(buf, ptr, step_len)) { 680 mutex_unlock(&rp->fetch_lock); 681 return -EFAULT; 682 } 683 nbytes -= step_len; 684 buf += step_len; 685 rp->b_read += step_len; 686 done += step_len; 687 } 688 689 if (rp->b_read >= sizeof(struct mon_bin_hdr)) { 690 step_len = ep->len_cap; 691 step_len -= rp->b_read - sizeof(struct mon_bin_hdr); 692 if (step_len > nbytes) 693 step_len = nbytes; 694 offset = rp->b_out + PKT_SIZE; 695 offset += rp->b_read - sizeof(struct mon_bin_hdr); 696 if (offset >= rp->b_size) 697 offset -= rp->b_size; 698 if (copy_from_buf(rp, offset, buf, step_len)) { 699 mutex_unlock(&rp->fetch_lock); 700 return -EFAULT; 701 } 702 nbytes -= step_len; 703 buf += step_len; 704 rp->b_read += step_len; 705 done += step_len; 706 } 707 708 /* 709 * Check if whole packet was read, and if so, jump to the next one. 710 */ 711 if (rp->b_read >= sizeof(struct mon_bin_hdr) + ep->len_cap) { 712 spin_lock_irqsave(&rp->b_lock, flags); 713 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); 714 spin_unlock_irqrestore(&rp->b_lock, flags); 715 rp->b_read = 0; 716 } 717 718 mutex_unlock(&rp->fetch_lock); 719 return done; 720 } 721 722 /* 723 * Remove at most nevents from chunked buffer. 724 * Returns the number of removed events. 725 */ 726 static int mon_bin_flush(struct mon_reader_bin *rp, unsigned nevents) 727 { 728 unsigned long flags; 729 struct mon_bin_hdr *ep; 730 int i; 731 732 mutex_lock(&rp->fetch_lock); 733 spin_lock_irqsave(&rp->b_lock, flags); 734 for (i = 0; i < nevents; ++i) { 735 if (MON_RING_EMPTY(rp)) 736 break; 737 738 ep = MON_OFF2HDR(rp, rp->b_out); 739 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); 740 } 741 spin_unlock_irqrestore(&rp->b_lock, flags); 742 rp->b_read = 0; 743 mutex_unlock(&rp->fetch_lock); 744 return i; 745 } 746 747 /* 748 * Fetch at most max event offsets into the buffer and put them into vec. 749 * The events are usually freed later with mon_bin_flush. 750 * Return the effective number of events fetched. 751 */ 752 static int mon_bin_fetch(struct file *file, struct mon_reader_bin *rp, 753 u32 __user *vec, unsigned int max) 754 { 755 unsigned int cur_out; 756 unsigned int bytes, avail; 757 unsigned int size; 758 unsigned int nevents; 759 struct mon_bin_hdr *ep; 760 unsigned long flags; 761 int rc; 762 763 mutex_lock(&rp->fetch_lock); 764 765 if ((rc = mon_bin_wait_event(file, rp)) < 0) { 766 mutex_unlock(&rp->fetch_lock); 767 return rc; 768 } 769 770 spin_lock_irqsave(&rp->b_lock, flags); 771 avail = rp->b_cnt; 772 spin_unlock_irqrestore(&rp->b_lock, flags); 773 774 cur_out = rp->b_out; 775 nevents = 0; 776 bytes = 0; 777 while (bytes < avail) { 778 if (nevents >= max) 779 break; 780 781 ep = MON_OFF2HDR(rp, cur_out); 782 if (put_user(cur_out, &vec[nevents])) { 783 mutex_unlock(&rp->fetch_lock); 784 return -EFAULT; 785 } 786 787 nevents++; 788 size = ep->len_cap + PKT_SIZE; 789 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 790 if ((cur_out += size) >= rp->b_size) 791 cur_out -= rp->b_size; 792 bytes += size; 793 } 794 795 mutex_unlock(&rp->fetch_lock); 796 return nevents; 797 } 798 799 /* 800 * Count events. This is almost the same as the above mon_bin_fetch, 801 * only we do not store offsets into user vector, and we have no limit. 802 */ 803 static int mon_bin_queued(struct mon_reader_bin *rp) 804 { 805 unsigned int cur_out; 806 unsigned int bytes, avail; 807 unsigned int size; 808 unsigned int nevents; 809 struct mon_bin_hdr *ep; 810 unsigned long flags; 811 812 mutex_lock(&rp->fetch_lock); 813 814 spin_lock_irqsave(&rp->b_lock, flags); 815 avail = rp->b_cnt; 816 spin_unlock_irqrestore(&rp->b_lock, flags); 817 818 cur_out = rp->b_out; 819 nevents = 0; 820 bytes = 0; 821 while (bytes < avail) { 822 ep = MON_OFF2HDR(rp, cur_out); 823 824 nevents++; 825 size = ep->len_cap + PKT_SIZE; 826 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 827 if ((cur_out += size) >= rp->b_size) 828 cur_out -= rp->b_size; 829 bytes += size; 830 } 831 832 mutex_unlock(&rp->fetch_lock); 833 return nevents; 834 } 835 836 /* 837 */ 838 static int mon_bin_ioctl(struct inode *inode, struct file *file, 839 unsigned int cmd, unsigned long arg) 840 { 841 struct mon_reader_bin *rp = file->private_data; 842 // struct mon_bus* mbus = rp->r.m_bus; 843 int ret = 0; 844 struct mon_bin_hdr *ep; 845 unsigned long flags; 846 847 switch (cmd) { 848 849 case MON_IOCQ_URB_LEN: 850 /* 851 * N.B. This only returns the size of data, without the header. 852 */ 853 spin_lock_irqsave(&rp->b_lock, flags); 854 if (!MON_RING_EMPTY(rp)) { 855 ep = MON_OFF2HDR(rp, rp->b_out); 856 ret = ep->len_cap; 857 } 858 spin_unlock_irqrestore(&rp->b_lock, flags); 859 break; 860 861 case MON_IOCQ_RING_SIZE: 862 ret = rp->b_size; 863 break; 864 865 case MON_IOCT_RING_SIZE: 866 /* 867 * Changing the buffer size will flush it's contents; the new 868 * buffer is allocated before releasing the old one to be sure 869 * the device will stay functional also in case of memory 870 * pressure. 871 */ 872 { 873 int size; 874 struct mon_pgmap *vec; 875 876 if (arg < BUFF_MIN || arg > BUFF_MAX) 877 return -EINVAL; 878 879 size = CHUNK_ALIGN(arg); 880 if ((vec = kzalloc(sizeof(struct mon_pgmap) * (size/CHUNK_SIZE), 881 GFP_KERNEL)) == NULL) { 882 ret = -ENOMEM; 883 break; 884 } 885 886 ret = mon_alloc_buff(vec, size/CHUNK_SIZE); 887 if (ret < 0) { 888 kfree(vec); 889 break; 890 } 891 892 mutex_lock(&rp->fetch_lock); 893 spin_lock_irqsave(&rp->b_lock, flags); 894 mon_free_buff(rp->b_vec, size/CHUNK_SIZE); 895 kfree(rp->b_vec); 896 rp->b_vec = vec; 897 rp->b_size = size; 898 rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0; 899 rp->cnt_lost = 0; 900 spin_unlock_irqrestore(&rp->b_lock, flags); 901 mutex_unlock(&rp->fetch_lock); 902 } 903 break; 904 905 case MON_IOCH_MFLUSH: 906 ret = mon_bin_flush(rp, arg); 907 break; 908 909 case MON_IOCX_GET: 910 { 911 struct mon_bin_get getb; 912 913 if (copy_from_user(&getb, (void __user *)arg, 914 sizeof(struct mon_bin_get))) 915 return -EFAULT; 916 917 if (getb.alloc > 0x10000000) /* Want to cast to u32 */ 918 return -EINVAL; 919 ret = mon_bin_get_event(file, rp, 920 getb.hdr, getb.data, (unsigned int)getb.alloc); 921 } 922 break; 923 924 #ifdef CONFIG_COMPAT 925 case MON_IOCX_GET32: { 926 struct mon_bin_get32 getb; 927 928 if (copy_from_user(&getb, (void __user *)arg, 929 sizeof(struct mon_bin_get32))) 930 return -EFAULT; 931 932 ret = mon_bin_get_event(file, rp, 933 compat_ptr(getb.hdr32), compat_ptr(getb.data32), 934 getb.alloc32); 935 } 936 break; 937 #endif 938 939 case MON_IOCX_MFETCH: 940 { 941 struct mon_bin_mfetch mfetch; 942 struct mon_bin_mfetch __user *uptr; 943 944 uptr = (struct mon_bin_mfetch __user *)arg; 945 946 if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) 947 return -EFAULT; 948 949 if (mfetch.nflush) { 950 ret = mon_bin_flush(rp, mfetch.nflush); 951 if (ret < 0) 952 return ret; 953 if (put_user(ret, &uptr->nflush)) 954 return -EFAULT; 955 } 956 ret = mon_bin_fetch(file, rp, mfetch.offvec, mfetch.nfetch); 957 if (ret < 0) 958 return ret; 959 if (put_user(ret, &uptr->nfetch)) 960 return -EFAULT; 961 ret = 0; 962 } 963 break; 964 965 #ifdef CONFIG_COMPAT 966 case MON_IOCX_MFETCH32: 967 { 968 struct mon_bin_mfetch32 mfetch; 969 struct mon_bin_mfetch32 __user *uptr; 970 971 uptr = (struct mon_bin_mfetch32 __user *) compat_ptr(arg); 972 973 if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) 974 return -EFAULT; 975 976 if (mfetch.nflush32) { 977 ret = mon_bin_flush(rp, mfetch.nflush32); 978 if (ret < 0) 979 return ret; 980 if (put_user(ret, &uptr->nflush32)) 981 return -EFAULT; 982 } 983 ret = mon_bin_fetch(file, rp, compat_ptr(mfetch.offvec32), 984 mfetch.nfetch32); 985 if (ret < 0) 986 return ret; 987 if (put_user(ret, &uptr->nfetch32)) 988 return -EFAULT; 989 ret = 0; 990 } 991 break; 992 #endif 993 994 case MON_IOCG_STATS: { 995 struct mon_bin_stats __user *sp; 996 unsigned int nevents; 997 unsigned int ndropped; 998 999 spin_lock_irqsave(&rp->b_lock, flags); 1000 ndropped = rp->cnt_lost; 1001 rp->cnt_lost = 0; 1002 spin_unlock_irqrestore(&rp->b_lock, flags); 1003 nevents = mon_bin_queued(rp); 1004 1005 sp = (struct mon_bin_stats __user *)arg; 1006 if (put_user(rp->cnt_lost, &sp->dropped)) 1007 return -EFAULT; 1008 if (put_user(nevents, &sp->queued)) 1009 return -EFAULT; 1010 1011 } 1012 break; 1013 1014 default: 1015 return -ENOTTY; 1016 } 1017 1018 return ret; 1019 } 1020 1021 static unsigned int 1022 mon_bin_poll(struct file *file, struct poll_table_struct *wait) 1023 { 1024 struct mon_reader_bin *rp = file->private_data; 1025 unsigned int mask = 0; 1026 unsigned long flags; 1027 1028 if (file->f_mode & FMODE_READ) 1029 poll_wait(file, &rp->b_wait, wait); 1030 1031 spin_lock_irqsave(&rp->b_lock, flags); 1032 if (!MON_RING_EMPTY(rp)) 1033 mask |= POLLIN | POLLRDNORM; /* readable */ 1034 spin_unlock_irqrestore(&rp->b_lock, flags); 1035 return mask; 1036 } 1037 1038 /* 1039 * open and close: just keep track of how many times the device is 1040 * mapped, to use the proper memory allocation function. 1041 */ 1042 static void mon_bin_vma_open(struct vm_area_struct *vma) 1043 { 1044 struct mon_reader_bin *rp = vma->vm_private_data; 1045 rp->mmap_active++; 1046 } 1047 1048 static void mon_bin_vma_close(struct vm_area_struct *vma) 1049 { 1050 struct mon_reader_bin *rp = vma->vm_private_data; 1051 rp->mmap_active--; 1052 } 1053 1054 /* 1055 * Map ring pages to user space. 1056 */ 1057 static int mon_bin_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1058 { 1059 struct mon_reader_bin *rp = vma->vm_private_data; 1060 unsigned long offset, chunk_idx; 1061 struct page *pageptr; 1062 1063 offset = vmf->pgoff << PAGE_SHIFT; 1064 if (offset >= rp->b_size) 1065 return VM_FAULT_SIGBUS; 1066 chunk_idx = offset / CHUNK_SIZE; 1067 pageptr = rp->b_vec[chunk_idx].pg; 1068 get_page(pageptr); 1069 vmf->page = pageptr; 1070 return 0; 1071 } 1072 1073 static struct vm_operations_struct mon_bin_vm_ops = { 1074 .open = mon_bin_vma_open, 1075 .close = mon_bin_vma_close, 1076 .fault = mon_bin_vma_fault, 1077 }; 1078 1079 static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma) 1080 { 1081 /* don't do anything here: "fault" will set up page table entries */ 1082 vma->vm_ops = &mon_bin_vm_ops; 1083 vma->vm_flags |= VM_RESERVED; 1084 vma->vm_private_data = filp->private_data; 1085 mon_bin_vma_open(vma); 1086 return 0; 1087 } 1088 1089 static const struct file_operations mon_fops_binary = { 1090 .owner = THIS_MODULE, 1091 .open = mon_bin_open, 1092 .llseek = no_llseek, 1093 .read = mon_bin_read, 1094 /* .write = mon_text_write, */ 1095 .poll = mon_bin_poll, 1096 .ioctl = mon_bin_ioctl, 1097 .release = mon_bin_release, 1098 .mmap = mon_bin_mmap, 1099 }; 1100 1101 static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp) 1102 { 1103 DECLARE_WAITQUEUE(waita, current); 1104 unsigned long flags; 1105 1106 add_wait_queue(&rp->b_wait, &waita); 1107 set_current_state(TASK_INTERRUPTIBLE); 1108 1109 spin_lock_irqsave(&rp->b_lock, flags); 1110 while (MON_RING_EMPTY(rp)) { 1111 spin_unlock_irqrestore(&rp->b_lock, flags); 1112 1113 if (file->f_flags & O_NONBLOCK) { 1114 set_current_state(TASK_RUNNING); 1115 remove_wait_queue(&rp->b_wait, &waita); 1116 return -EWOULDBLOCK; /* Same as EAGAIN in Linux */ 1117 } 1118 schedule(); 1119 if (signal_pending(current)) { 1120 remove_wait_queue(&rp->b_wait, &waita); 1121 return -EINTR; 1122 } 1123 set_current_state(TASK_INTERRUPTIBLE); 1124 1125 spin_lock_irqsave(&rp->b_lock, flags); 1126 } 1127 spin_unlock_irqrestore(&rp->b_lock, flags); 1128 1129 set_current_state(TASK_RUNNING); 1130 remove_wait_queue(&rp->b_wait, &waita); 1131 return 0; 1132 } 1133 1134 static int mon_alloc_buff(struct mon_pgmap *map, int npages) 1135 { 1136 int n; 1137 unsigned long vaddr; 1138 1139 for (n = 0; n < npages; n++) { 1140 vaddr = get_zeroed_page(GFP_KERNEL); 1141 if (vaddr == 0) { 1142 while (n-- != 0) 1143 free_page((unsigned long) map[n].ptr); 1144 return -ENOMEM; 1145 } 1146 map[n].ptr = (unsigned char *) vaddr; 1147 map[n].pg = virt_to_page(vaddr); 1148 } 1149 return 0; 1150 } 1151 1152 static void mon_free_buff(struct mon_pgmap *map, int npages) 1153 { 1154 int n; 1155 1156 for (n = 0; n < npages; n++) 1157 free_page((unsigned long) map[n].ptr); 1158 } 1159 1160 int mon_bin_add(struct mon_bus *mbus, const struct usb_bus *ubus) 1161 { 1162 struct device *dev; 1163 unsigned minor = ubus? ubus->busnum: 0; 1164 1165 if (minor >= MON_BIN_MAX_MINOR) 1166 return 0; 1167 1168 dev = device_create(mon_bin_class, ubus ? ubus->controller : NULL, 1169 MKDEV(MAJOR(mon_bin_dev0), minor), NULL, 1170 "usbmon%d", minor); 1171 if (IS_ERR(dev)) 1172 return 0; 1173 1174 mbus->classdev = dev; 1175 return 1; 1176 } 1177 1178 void mon_bin_del(struct mon_bus *mbus) 1179 { 1180 device_destroy(mon_bin_class, mbus->classdev->devt); 1181 } 1182 1183 int __init mon_bin_init(void) 1184 { 1185 int rc; 1186 1187 mon_bin_class = class_create(THIS_MODULE, "usbmon"); 1188 if (IS_ERR(mon_bin_class)) { 1189 rc = PTR_ERR(mon_bin_class); 1190 goto err_class; 1191 } 1192 1193 rc = alloc_chrdev_region(&mon_bin_dev0, 0, MON_BIN_MAX_MINOR, "usbmon"); 1194 if (rc < 0) 1195 goto err_dev; 1196 1197 cdev_init(&mon_bin_cdev, &mon_fops_binary); 1198 mon_bin_cdev.owner = THIS_MODULE; 1199 1200 rc = cdev_add(&mon_bin_cdev, mon_bin_dev0, MON_BIN_MAX_MINOR); 1201 if (rc < 0) 1202 goto err_add; 1203 1204 return 0; 1205 1206 err_add: 1207 unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); 1208 err_dev: 1209 class_destroy(mon_bin_class); 1210 err_class: 1211 return rc; 1212 } 1213 1214 void mon_bin_exit(void) 1215 { 1216 cdev_del(&mon_bin_cdev); 1217 unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); 1218 class_destroy(mon_bin_class); 1219 } 1220