1 /* 2 * The USB Monitor, inspired by Dave Harding's USBMon. 3 * 4 * This is a binary format reader. 5 * 6 * Copyright (C) 2006 Paolo Abeni (paolo.abeni@email.it) 7 * Copyright (C) 2006,2007 Pete Zaitcev (zaitcev@redhat.com) 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/types.h> 12 #include <linux/fs.h> 13 #include <linux/cdev.h> 14 #include <linux/usb.h> 15 #include <linux/poll.h> 16 #include <linux/compat.h> 17 #include <linux/mm.h> 18 #include <linux/smp_lock.h> 19 #include <linux/scatterlist.h> 20 #include <linux/slab.h> 21 22 #include <asm/uaccess.h> 23 24 #include "usb_mon.h" 25 26 /* 27 * Defined by USB 2.0 clause 9.3, table 9.2. 28 */ 29 #define SETUP_LEN 8 30 31 /* ioctl macros */ 32 #define MON_IOC_MAGIC 0x92 33 34 #define MON_IOCQ_URB_LEN _IO(MON_IOC_MAGIC, 1) 35 /* #2 used to be MON_IOCX_URB, removed before it got into Linus tree */ 36 #define MON_IOCG_STATS _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats) 37 #define MON_IOCT_RING_SIZE _IO(MON_IOC_MAGIC, 4) 38 #define MON_IOCQ_RING_SIZE _IO(MON_IOC_MAGIC, 5) 39 #define MON_IOCX_GET _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get) 40 #define MON_IOCX_MFETCH _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch) 41 #define MON_IOCH_MFLUSH _IO(MON_IOC_MAGIC, 8) 42 /* #9 was MON_IOCT_SETAPI */ 43 #define MON_IOCX_GETX _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get) 44 45 #ifdef CONFIG_COMPAT 46 #define MON_IOCX_GET32 _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get32) 47 #define MON_IOCX_MFETCH32 _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch32) 48 #define MON_IOCX_GETX32 _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get32) 49 #endif 50 51 /* 52 * Some architectures have enormous basic pages (16KB for ia64, 64KB for ppc). 53 * But it's all right. Just use a simple way to make sure the chunk is never 54 * smaller than a page. 55 * 56 * N.B. An application does not know our chunk size. 57 * 58 * Woops, get_zeroed_page() returns a single page. I guess we're stuck with 59 * page-sized chunks for the time being. 60 */ 61 #define CHUNK_SIZE PAGE_SIZE 62 #define CHUNK_ALIGN(x) (((x)+CHUNK_SIZE-1) & ~(CHUNK_SIZE-1)) 63 64 /* 65 * The magic limit was calculated so that it allows the monitoring 66 * application to pick data once in two ticks. This way, another application, 67 * which presumably drives the bus, gets to hog CPU, yet we collect our data. 68 * If HZ is 100, a 480 mbit/s bus drives 614 KB every jiffy. USB has an 69 * enormous overhead built into the bus protocol, so we need about 1000 KB. 70 * 71 * This is still too much for most cases, where we just snoop a few 72 * descriptor fetches for enumeration. So, the default is a "reasonable" 73 * amount for systems with HZ=250 and incomplete bus saturation. 74 * 75 * XXX What about multi-megabyte URBs which take minutes to transfer? 76 */ 77 #define BUFF_MAX CHUNK_ALIGN(1200*1024) 78 #define BUFF_DFL CHUNK_ALIGN(300*1024) 79 #define BUFF_MIN CHUNK_ALIGN(8*1024) 80 81 /* 82 * The per-event API header (2 per URB). 83 * 84 * This structure is seen in userland as defined by the documentation. 85 */ 86 struct mon_bin_hdr { 87 u64 id; /* URB ID - from submission to callback */ 88 unsigned char type; /* Same as in text API; extensible. */ 89 unsigned char xfer_type; /* ISO, Intr, Control, Bulk */ 90 unsigned char epnum; /* Endpoint number and transfer direction */ 91 unsigned char devnum; /* Device address */ 92 unsigned short busnum; /* Bus number */ 93 char flag_setup; 94 char flag_data; 95 s64 ts_sec; /* gettimeofday */ 96 s32 ts_usec; /* gettimeofday */ 97 int status; 98 unsigned int len_urb; /* Length of data (submitted or actual) */ 99 unsigned int len_cap; /* Delivered length */ 100 union { 101 unsigned char setup[SETUP_LEN]; /* Only for Control S-type */ 102 struct iso_rec { 103 int error_count; 104 int numdesc; 105 } iso; 106 } s; 107 int interval; 108 int start_frame; 109 unsigned int xfer_flags; 110 unsigned int ndesc; /* Actual number of ISO descriptors */ 111 }; 112 113 /* 114 * ISO vector, packed into the head of data stream. 115 * This has to take 16 bytes to make sure that the end of buffer 116 * wrap is not happening in the middle of a descriptor. 117 */ 118 struct mon_bin_isodesc { 119 int iso_status; 120 unsigned int iso_off; 121 unsigned int iso_len; 122 u32 _pad; 123 }; 124 125 /* per file statistic */ 126 struct mon_bin_stats { 127 u32 queued; 128 u32 dropped; 129 }; 130 131 struct mon_bin_get { 132 struct mon_bin_hdr __user *hdr; /* Can be 48 bytes or 64. */ 133 void __user *data; 134 size_t alloc; /* Length of data (can be zero) */ 135 }; 136 137 struct mon_bin_mfetch { 138 u32 __user *offvec; /* Vector of events fetched */ 139 u32 nfetch; /* Number of events to fetch (out: fetched) */ 140 u32 nflush; /* Number of events to flush */ 141 }; 142 143 #ifdef CONFIG_COMPAT 144 struct mon_bin_get32 { 145 u32 hdr32; 146 u32 data32; 147 u32 alloc32; 148 }; 149 150 struct mon_bin_mfetch32 { 151 u32 offvec32; 152 u32 nfetch32; 153 u32 nflush32; 154 }; 155 #endif 156 157 /* Having these two values same prevents wrapping of the mon_bin_hdr */ 158 #define PKT_ALIGN 64 159 #define PKT_SIZE 64 160 161 #define PKT_SZ_API0 48 /* API 0 (2.6.20) size */ 162 #define PKT_SZ_API1 64 /* API 1 size: extra fields */ 163 164 #define ISODESC_MAX 128 /* Same number as usbfs allows, 2048 bytes. */ 165 166 /* max number of USB bus supported */ 167 #define MON_BIN_MAX_MINOR 128 168 169 /* 170 * The buffer: map of used pages. 171 */ 172 struct mon_pgmap { 173 struct page *pg; 174 unsigned char *ptr; /* XXX just use page_to_virt everywhere? */ 175 }; 176 177 /* 178 * This gets associated with an open file struct. 179 */ 180 struct mon_reader_bin { 181 /* The buffer: one per open. */ 182 spinlock_t b_lock; /* Protect b_cnt, b_in */ 183 unsigned int b_size; /* Current size of the buffer - bytes */ 184 unsigned int b_cnt; /* Bytes used */ 185 unsigned int b_in, b_out; /* Offsets into buffer - bytes */ 186 unsigned int b_read; /* Amount of read data in curr. pkt. */ 187 struct mon_pgmap *b_vec; /* The map array */ 188 wait_queue_head_t b_wait; /* Wait for data here */ 189 190 struct mutex fetch_lock; /* Protect b_read, b_out */ 191 int mmap_active; 192 193 /* A list of these is needed for "bus 0". Some time later. */ 194 struct mon_reader r; 195 196 /* Stats */ 197 unsigned int cnt_lost; 198 }; 199 200 static inline struct mon_bin_hdr *MON_OFF2HDR(const struct mon_reader_bin *rp, 201 unsigned int offset) 202 { 203 return (struct mon_bin_hdr *) 204 (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE); 205 } 206 207 #define MON_RING_EMPTY(rp) ((rp)->b_cnt == 0) 208 209 static unsigned char xfer_to_pipe[4] = { 210 PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT 211 }; 212 213 static struct class *mon_bin_class; 214 static dev_t mon_bin_dev0; 215 static struct cdev mon_bin_cdev; 216 217 static void mon_buff_area_fill(const struct mon_reader_bin *rp, 218 unsigned int offset, unsigned int size); 219 static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp); 220 static int mon_alloc_buff(struct mon_pgmap *map, int npages); 221 static void mon_free_buff(struct mon_pgmap *map, int npages); 222 223 /* 224 * This is a "chunked memcpy". It does not manipulate any counters. 225 */ 226 static unsigned int mon_copy_to_buff(const struct mon_reader_bin *this, 227 unsigned int off, const unsigned char *from, unsigned int length) 228 { 229 unsigned int step_len; 230 unsigned char *buf; 231 unsigned int in_page; 232 233 while (length) { 234 /* 235 * Determine step_len. 236 */ 237 step_len = length; 238 in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); 239 if (in_page < step_len) 240 step_len = in_page; 241 242 /* 243 * Copy data and advance pointers. 244 */ 245 buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; 246 memcpy(buf, from, step_len); 247 if ((off += step_len) >= this->b_size) off = 0; 248 from += step_len; 249 length -= step_len; 250 } 251 return off; 252 } 253 254 /* 255 * This is a little worse than the above because it's "chunked copy_to_user". 256 * The return value is an error code, not an offset. 257 */ 258 static int copy_from_buf(const struct mon_reader_bin *this, unsigned int off, 259 char __user *to, int length) 260 { 261 unsigned int step_len; 262 unsigned char *buf; 263 unsigned int in_page; 264 265 while (length) { 266 /* 267 * Determine step_len. 268 */ 269 step_len = length; 270 in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); 271 if (in_page < step_len) 272 step_len = in_page; 273 274 /* 275 * Copy data and advance pointers. 276 */ 277 buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; 278 if (copy_to_user(to, buf, step_len)) 279 return -EINVAL; 280 if ((off += step_len) >= this->b_size) off = 0; 281 to += step_len; 282 length -= step_len; 283 } 284 return 0; 285 } 286 287 /* 288 * Allocate an (aligned) area in the buffer. 289 * This is called under b_lock. 290 * Returns ~0 on failure. 291 */ 292 static unsigned int mon_buff_area_alloc(struct mon_reader_bin *rp, 293 unsigned int size) 294 { 295 unsigned int offset; 296 297 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 298 if (rp->b_cnt + size > rp->b_size) 299 return ~0; 300 offset = rp->b_in; 301 rp->b_cnt += size; 302 if ((rp->b_in += size) >= rp->b_size) 303 rp->b_in -= rp->b_size; 304 return offset; 305 } 306 307 /* 308 * This is the same thing as mon_buff_area_alloc, only it does not allow 309 * buffers to wrap. This is needed by applications which pass references 310 * into mmap-ed buffers up their stacks (libpcap can do that). 311 * 312 * Currently, we always have the header stuck with the data, although 313 * it is not strictly speaking necessary. 314 * 315 * When a buffer would wrap, we place a filler packet to mark the space. 316 */ 317 static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp, 318 unsigned int size) 319 { 320 unsigned int offset; 321 unsigned int fill_size; 322 323 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 324 if (rp->b_cnt + size > rp->b_size) 325 return ~0; 326 if (rp->b_in + size > rp->b_size) { 327 /* 328 * This would wrap. Find if we still have space after 329 * skipping to the end of the buffer. If we do, place 330 * a filler packet and allocate a new packet. 331 */ 332 fill_size = rp->b_size - rp->b_in; 333 if (rp->b_cnt + size + fill_size > rp->b_size) 334 return ~0; 335 mon_buff_area_fill(rp, rp->b_in, fill_size); 336 337 offset = 0; 338 rp->b_in = size; 339 rp->b_cnt += size + fill_size; 340 } else if (rp->b_in + size == rp->b_size) { 341 offset = rp->b_in; 342 rp->b_in = 0; 343 rp->b_cnt += size; 344 } else { 345 offset = rp->b_in; 346 rp->b_in += size; 347 rp->b_cnt += size; 348 } 349 return offset; 350 } 351 352 /* 353 * Return a few (kilo-)bytes to the head of the buffer. 354 * This is used if a data fetch fails. 355 */ 356 static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size) 357 { 358 359 /* size &= ~(PKT_ALIGN-1); -- we're called with aligned size */ 360 rp->b_cnt -= size; 361 if (rp->b_in < size) 362 rp->b_in += rp->b_size; 363 rp->b_in -= size; 364 } 365 366 /* 367 * This has to be called under both b_lock and fetch_lock, because 368 * it accesses both b_cnt and b_out. 369 */ 370 static void mon_buff_area_free(struct mon_reader_bin *rp, unsigned int size) 371 { 372 373 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 374 rp->b_cnt -= size; 375 if ((rp->b_out += size) >= rp->b_size) 376 rp->b_out -= rp->b_size; 377 } 378 379 static void mon_buff_area_fill(const struct mon_reader_bin *rp, 380 unsigned int offset, unsigned int size) 381 { 382 struct mon_bin_hdr *ep; 383 384 ep = MON_OFF2HDR(rp, offset); 385 memset(ep, 0, PKT_SIZE); 386 ep->type = '@'; 387 ep->len_cap = size - PKT_SIZE; 388 } 389 390 static inline char mon_bin_get_setup(unsigned char *setupb, 391 const struct urb *urb, char ev_type) 392 { 393 394 if (urb->setup_packet == NULL) 395 return 'Z'; 396 memcpy(setupb, urb->setup_packet, SETUP_LEN); 397 return 0; 398 } 399 400 static unsigned int mon_bin_get_data(const struct mon_reader_bin *rp, 401 unsigned int offset, struct urb *urb, unsigned int length, 402 char *flag) 403 { 404 int i; 405 struct scatterlist *sg; 406 unsigned int this_len; 407 408 *flag = 0; 409 if (urb->num_sgs == 0) { 410 if (urb->transfer_buffer == NULL) { 411 *flag = 'Z'; 412 return length; 413 } 414 mon_copy_to_buff(rp, offset, urb->transfer_buffer, length); 415 length = 0; 416 417 } else { 418 /* If IOMMU coalescing occurred, we cannot trust sg_page */ 419 if (urb->transfer_flags & URB_DMA_SG_COMBINED) { 420 *flag = 'D'; 421 return length; 422 } 423 424 /* Copy up to the first non-addressable segment */ 425 for_each_sg(urb->sg, sg, urb->num_sgs, i) { 426 if (length == 0 || PageHighMem(sg_page(sg))) 427 break; 428 this_len = min_t(unsigned int, sg->length, length); 429 offset = mon_copy_to_buff(rp, offset, sg_virt(sg), 430 this_len); 431 length -= this_len; 432 } 433 if (i == 0) 434 *flag = 'D'; 435 } 436 437 return length; 438 } 439 440 static void mon_bin_get_isodesc(const struct mon_reader_bin *rp, 441 unsigned int offset, struct urb *urb, char ev_type, unsigned int ndesc) 442 { 443 struct mon_bin_isodesc *dp; 444 struct usb_iso_packet_descriptor *fp; 445 446 fp = urb->iso_frame_desc; 447 while (ndesc-- != 0) { 448 dp = (struct mon_bin_isodesc *) 449 (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE); 450 dp->iso_status = fp->status; 451 dp->iso_off = fp->offset; 452 dp->iso_len = (ev_type == 'S') ? fp->length : fp->actual_length; 453 dp->_pad = 0; 454 if ((offset += sizeof(struct mon_bin_isodesc)) >= rp->b_size) 455 offset = 0; 456 fp++; 457 } 458 } 459 460 static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb, 461 char ev_type, int status) 462 { 463 const struct usb_endpoint_descriptor *epd = &urb->ep->desc; 464 struct timeval ts; 465 unsigned long flags; 466 unsigned int urb_length; 467 unsigned int offset; 468 unsigned int length; 469 unsigned int delta; 470 unsigned int ndesc, lendesc; 471 unsigned char dir; 472 struct mon_bin_hdr *ep; 473 char data_tag = 0; 474 475 do_gettimeofday(&ts); 476 477 spin_lock_irqsave(&rp->b_lock, flags); 478 479 /* 480 * Find the maximum allowable length, then allocate space. 481 */ 482 if (usb_endpoint_xfer_isoc(epd)) { 483 if (urb->number_of_packets < 0) { 484 ndesc = 0; 485 } else if (urb->number_of_packets >= ISODESC_MAX) { 486 ndesc = ISODESC_MAX; 487 } else { 488 ndesc = urb->number_of_packets; 489 } 490 } else { 491 ndesc = 0; 492 } 493 lendesc = ndesc*sizeof(struct mon_bin_isodesc); 494 495 urb_length = (ev_type == 'S') ? 496 urb->transfer_buffer_length : urb->actual_length; 497 length = urb_length; 498 499 if (length >= rp->b_size/5) 500 length = rp->b_size/5; 501 502 if (usb_urb_dir_in(urb)) { 503 if (ev_type == 'S') { 504 length = 0; 505 data_tag = '<'; 506 } 507 /* Cannot rely on endpoint number in case of control ep.0 */ 508 dir = USB_DIR_IN; 509 } else { 510 if (ev_type == 'C') { 511 length = 0; 512 data_tag = '>'; 513 } 514 dir = 0; 515 } 516 517 if (rp->mmap_active) { 518 offset = mon_buff_area_alloc_contiguous(rp, 519 length + PKT_SIZE + lendesc); 520 } else { 521 offset = mon_buff_area_alloc(rp, length + PKT_SIZE + lendesc); 522 } 523 if (offset == ~0) { 524 rp->cnt_lost++; 525 spin_unlock_irqrestore(&rp->b_lock, flags); 526 return; 527 } 528 529 ep = MON_OFF2HDR(rp, offset); 530 if ((offset += PKT_SIZE) >= rp->b_size) offset = 0; 531 532 /* 533 * Fill the allocated area. 534 */ 535 memset(ep, 0, PKT_SIZE); 536 ep->type = ev_type; 537 ep->xfer_type = xfer_to_pipe[usb_endpoint_type(epd)]; 538 ep->epnum = dir | usb_endpoint_num(epd); 539 ep->devnum = urb->dev->devnum; 540 ep->busnum = urb->dev->bus->busnum; 541 ep->id = (unsigned long) urb; 542 ep->ts_sec = ts.tv_sec; 543 ep->ts_usec = ts.tv_usec; 544 ep->status = status; 545 ep->len_urb = urb_length; 546 ep->len_cap = length + lendesc; 547 ep->xfer_flags = urb->transfer_flags; 548 549 if (usb_endpoint_xfer_int(epd)) { 550 ep->interval = urb->interval; 551 } else if (usb_endpoint_xfer_isoc(epd)) { 552 ep->interval = urb->interval; 553 ep->start_frame = urb->start_frame; 554 ep->s.iso.error_count = urb->error_count; 555 ep->s.iso.numdesc = urb->number_of_packets; 556 } 557 558 if (usb_endpoint_xfer_control(epd) && ev_type == 'S') { 559 ep->flag_setup = mon_bin_get_setup(ep->s.setup, urb, ev_type); 560 } else { 561 ep->flag_setup = '-'; 562 } 563 564 if (ndesc != 0) { 565 ep->ndesc = ndesc; 566 mon_bin_get_isodesc(rp, offset, urb, ev_type, ndesc); 567 if ((offset += lendesc) >= rp->b_size) 568 offset -= rp->b_size; 569 } 570 571 if (length != 0) { 572 length = mon_bin_get_data(rp, offset, urb, length, 573 &ep->flag_data); 574 if (length > 0) { 575 delta = (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 576 ep->len_cap -= length; 577 delta -= (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 578 mon_buff_area_shrink(rp, delta); 579 } 580 } else { 581 ep->flag_data = data_tag; 582 } 583 584 spin_unlock_irqrestore(&rp->b_lock, flags); 585 586 wake_up(&rp->b_wait); 587 } 588 589 static void mon_bin_submit(void *data, struct urb *urb) 590 { 591 struct mon_reader_bin *rp = data; 592 mon_bin_event(rp, urb, 'S', -EINPROGRESS); 593 } 594 595 static void mon_bin_complete(void *data, struct urb *urb, int status) 596 { 597 struct mon_reader_bin *rp = data; 598 mon_bin_event(rp, urb, 'C', status); 599 } 600 601 static void mon_bin_error(void *data, struct urb *urb, int error) 602 { 603 struct mon_reader_bin *rp = data; 604 struct timeval ts; 605 unsigned long flags; 606 unsigned int offset; 607 struct mon_bin_hdr *ep; 608 609 do_gettimeofday(&ts); 610 611 spin_lock_irqsave(&rp->b_lock, flags); 612 613 offset = mon_buff_area_alloc(rp, PKT_SIZE); 614 if (offset == ~0) { 615 /* Not incrementing cnt_lost. Just because. */ 616 spin_unlock_irqrestore(&rp->b_lock, flags); 617 return; 618 } 619 620 ep = MON_OFF2HDR(rp, offset); 621 622 memset(ep, 0, PKT_SIZE); 623 ep->type = 'E'; 624 ep->xfer_type = xfer_to_pipe[usb_endpoint_type(&urb->ep->desc)]; 625 ep->epnum = usb_urb_dir_in(urb) ? USB_DIR_IN : 0; 626 ep->epnum |= usb_endpoint_num(&urb->ep->desc); 627 ep->devnum = urb->dev->devnum; 628 ep->busnum = urb->dev->bus->busnum; 629 ep->id = (unsigned long) urb; 630 ep->ts_sec = ts.tv_sec; 631 ep->ts_usec = ts.tv_usec; 632 ep->status = error; 633 634 ep->flag_setup = '-'; 635 ep->flag_data = 'E'; 636 637 spin_unlock_irqrestore(&rp->b_lock, flags); 638 639 wake_up(&rp->b_wait); 640 } 641 642 static int mon_bin_open(struct inode *inode, struct file *file) 643 { 644 struct mon_bus *mbus; 645 struct mon_reader_bin *rp; 646 size_t size; 647 int rc; 648 649 lock_kernel(); 650 mutex_lock(&mon_lock); 651 if ((mbus = mon_bus_lookup(iminor(inode))) == NULL) { 652 mutex_unlock(&mon_lock); 653 unlock_kernel(); 654 return -ENODEV; 655 } 656 if (mbus != &mon_bus0 && mbus->u_bus == NULL) { 657 printk(KERN_ERR TAG ": consistency error on open\n"); 658 mutex_unlock(&mon_lock); 659 unlock_kernel(); 660 return -ENODEV; 661 } 662 663 rp = kzalloc(sizeof(struct mon_reader_bin), GFP_KERNEL); 664 if (rp == NULL) { 665 rc = -ENOMEM; 666 goto err_alloc; 667 } 668 spin_lock_init(&rp->b_lock); 669 init_waitqueue_head(&rp->b_wait); 670 mutex_init(&rp->fetch_lock); 671 rp->b_size = BUFF_DFL; 672 673 size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE); 674 if ((rp->b_vec = kzalloc(size, GFP_KERNEL)) == NULL) { 675 rc = -ENOMEM; 676 goto err_allocvec; 677 } 678 679 if ((rc = mon_alloc_buff(rp->b_vec, rp->b_size/CHUNK_SIZE)) < 0) 680 goto err_allocbuff; 681 682 rp->r.m_bus = mbus; 683 rp->r.r_data = rp; 684 rp->r.rnf_submit = mon_bin_submit; 685 rp->r.rnf_error = mon_bin_error; 686 rp->r.rnf_complete = mon_bin_complete; 687 688 mon_reader_add(mbus, &rp->r); 689 690 file->private_data = rp; 691 mutex_unlock(&mon_lock); 692 unlock_kernel(); 693 return 0; 694 695 err_allocbuff: 696 kfree(rp->b_vec); 697 err_allocvec: 698 kfree(rp); 699 err_alloc: 700 mutex_unlock(&mon_lock); 701 unlock_kernel(); 702 return rc; 703 } 704 705 /* 706 * Extract an event from buffer and copy it to user space. 707 * Wait if there is no event ready. 708 * Returns zero or error. 709 */ 710 static int mon_bin_get_event(struct file *file, struct mon_reader_bin *rp, 711 struct mon_bin_hdr __user *hdr, unsigned int hdrbytes, 712 void __user *data, unsigned int nbytes) 713 { 714 unsigned long flags; 715 struct mon_bin_hdr *ep; 716 size_t step_len; 717 unsigned int offset; 718 int rc; 719 720 mutex_lock(&rp->fetch_lock); 721 722 if ((rc = mon_bin_wait_event(file, rp)) < 0) { 723 mutex_unlock(&rp->fetch_lock); 724 return rc; 725 } 726 727 ep = MON_OFF2HDR(rp, rp->b_out); 728 729 if (copy_to_user(hdr, ep, hdrbytes)) { 730 mutex_unlock(&rp->fetch_lock); 731 return -EFAULT; 732 } 733 734 step_len = min(ep->len_cap, nbytes); 735 if ((offset = rp->b_out + PKT_SIZE) >= rp->b_size) offset = 0; 736 737 if (copy_from_buf(rp, offset, data, step_len)) { 738 mutex_unlock(&rp->fetch_lock); 739 return -EFAULT; 740 } 741 742 spin_lock_irqsave(&rp->b_lock, flags); 743 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); 744 spin_unlock_irqrestore(&rp->b_lock, flags); 745 rp->b_read = 0; 746 747 mutex_unlock(&rp->fetch_lock); 748 return 0; 749 } 750 751 static int mon_bin_release(struct inode *inode, struct file *file) 752 { 753 struct mon_reader_bin *rp = file->private_data; 754 struct mon_bus* mbus = rp->r.m_bus; 755 756 mutex_lock(&mon_lock); 757 758 if (mbus->nreaders <= 0) { 759 printk(KERN_ERR TAG ": consistency error on close\n"); 760 mutex_unlock(&mon_lock); 761 return 0; 762 } 763 mon_reader_del(mbus, &rp->r); 764 765 mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); 766 kfree(rp->b_vec); 767 kfree(rp); 768 769 mutex_unlock(&mon_lock); 770 return 0; 771 } 772 773 static ssize_t mon_bin_read(struct file *file, char __user *buf, 774 size_t nbytes, loff_t *ppos) 775 { 776 struct mon_reader_bin *rp = file->private_data; 777 unsigned int hdrbytes = PKT_SZ_API0; 778 unsigned long flags; 779 struct mon_bin_hdr *ep; 780 unsigned int offset; 781 size_t step_len; 782 char *ptr; 783 ssize_t done = 0; 784 int rc; 785 786 mutex_lock(&rp->fetch_lock); 787 788 if ((rc = mon_bin_wait_event(file, rp)) < 0) { 789 mutex_unlock(&rp->fetch_lock); 790 return rc; 791 } 792 793 ep = MON_OFF2HDR(rp, rp->b_out); 794 795 if (rp->b_read < hdrbytes) { 796 step_len = min(nbytes, (size_t)(hdrbytes - rp->b_read)); 797 ptr = ((char *)ep) + rp->b_read; 798 if (step_len && copy_to_user(buf, ptr, step_len)) { 799 mutex_unlock(&rp->fetch_lock); 800 return -EFAULT; 801 } 802 nbytes -= step_len; 803 buf += step_len; 804 rp->b_read += step_len; 805 done += step_len; 806 } 807 808 if (rp->b_read >= hdrbytes) { 809 step_len = ep->len_cap; 810 step_len -= rp->b_read - hdrbytes; 811 if (step_len > nbytes) 812 step_len = nbytes; 813 offset = rp->b_out + PKT_SIZE; 814 offset += rp->b_read - hdrbytes; 815 if (offset >= rp->b_size) 816 offset -= rp->b_size; 817 if (copy_from_buf(rp, offset, buf, step_len)) { 818 mutex_unlock(&rp->fetch_lock); 819 return -EFAULT; 820 } 821 nbytes -= step_len; 822 buf += step_len; 823 rp->b_read += step_len; 824 done += step_len; 825 } 826 827 /* 828 * Check if whole packet was read, and if so, jump to the next one. 829 */ 830 if (rp->b_read >= hdrbytes + ep->len_cap) { 831 spin_lock_irqsave(&rp->b_lock, flags); 832 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); 833 spin_unlock_irqrestore(&rp->b_lock, flags); 834 rp->b_read = 0; 835 } 836 837 mutex_unlock(&rp->fetch_lock); 838 return done; 839 } 840 841 /* 842 * Remove at most nevents from chunked buffer. 843 * Returns the number of removed events. 844 */ 845 static int mon_bin_flush(struct mon_reader_bin *rp, unsigned nevents) 846 { 847 unsigned long flags; 848 struct mon_bin_hdr *ep; 849 int i; 850 851 mutex_lock(&rp->fetch_lock); 852 spin_lock_irqsave(&rp->b_lock, flags); 853 for (i = 0; i < nevents; ++i) { 854 if (MON_RING_EMPTY(rp)) 855 break; 856 857 ep = MON_OFF2HDR(rp, rp->b_out); 858 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); 859 } 860 spin_unlock_irqrestore(&rp->b_lock, flags); 861 rp->b_read = 0; 862 mutex_unlock(&rp->fetch_lock); 863 return i; 864 } 865 866 /* 867 * Fetch at most max event offsets into the buffer and put them into vec. 868 * The events are usually freed later with mon_bin_flush. 869 * Return the effective number of events fetched. 870 */ 871 static int mon_bin_fetch(struct file *file, struct mon_reader_bin *rp, 872 u32 __user *vec, unsigned int max) 873 { 874 unsigned int cur_out; 875 unsigned int bytes, avail; 876 unsigned int size; 877 unsigned int nevents; 878 struct mon_bin_hdr *ep; 879 unsigned long flags; 880 int rc; 881 882 mutex_lock(&rp->fetch_lock); 883 884 if ((rc = mon_bin_wait_event(file, rp)) < 0) { 885 mutex_unlock(&rp->fetch_lock); 886 return rc; 887 } 888 889 spin_lock_irqsave(&rp->b_lock, flags); 890 avail = rp->b_cnt; 891 spin_unlock_irqrestore(&rp->b_lock, flags); 892 893 cur_out = rp->b_out; 894 nevents = 0; 895 bytes = 0; 896 while (bytes < avail) { 897 if (nevents >= max) 898 break; 899 900 ep = MON_OFF2HDR(rp, cur_out); 901 if (put_user(cur_out, &vec[nevents])) { 902 mutex_unlock(&rp->fetch_lock); 903 return -EFAULT; 904 } 905 906 nevents++; 907 size = ep->len_cap + PKT_SIZE; 908 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 909 if ((cur_out += size) >= rp->b_size) 910 cur_out -= rp->b_size; 911 bytes += size; 912 } 913 914 mutex_unlock(&rp->fetch_lock); 915 return nevents; 916 } 917 918 /* 919 * Count events. This is almost the same as the above mon_bin_fetch, 920 * only we do not store offsets into user vector, and we have no limit. 921 */ 922 static int mon_bin_queued(struct mon_reader_bin *rp) 923 { 924 unsigned int cur_out; 925 unsigned int bytes, avail; 926 unsigned int size; 927 unsigned int nevents; 928 struct mon_bin_hdr *ep; 929 unsigned long flags; 930 931 mutex_lock(&rp->fetch_lock); 932 933 spin_lock_irqsave(&rp->b_lock, flags); 934 avail = rp->b_cnt; 935 spin_unlock_irqrestore(&rp->b_lock, flags); 936 937 cur_out = rp->b_out; 938 nevents = 0; 939 bytes = 0; 940 while (bytes < avail) { 941 ep = MON_OFF2HDR(rp, cur_out); 942 943 nevents++; 944 size = ep->len_cap + PKT_SIZE; 945 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 946 if ((cur_out += size) >= rp->b_size) 947 cur_out -= rp->b_size; 948 bytes += size; 949 } 950 951 mutex_unlock(&rp->fetch_lock); 952 return nevents; 953 } 954 955 /* 956 */ 957 static int mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 958 { 959 struct mon_reader_bin *rp = file->private_data; 960 // struct mon_bus* mbus = rp->r.m_bus; 961 int ret = 0; 962 struct mon_bin_hdr *ep; 963 unsigned long flags; 964 965 switch (cmd) { 966 967 case MON_IOCQ_URB_LEN: 968 /* 969 * N.B. This only returns the size of data, without the header. 970 */ 971 spin_lock_irqsave(&rp->b_lock, flags); 972 if (!MON_RING_EMPTY(rp)) { 973 ep = MON_OFF2HDR(rp, rp->b_out); 974 ret = ep->len_cap; 975 } 976 spin_unlock_irqrestore(&rp->b_lock, flags); 977 break; 978 979 case MON_IOCQ_RING_SIZE: 980 ret = rp->b_size; 981 break; 982 983 case MON_IOCT_RING_SIZE: 984 /* 985 * Changing the buffer size will flush it's contents; the new 986 * buffer is allocated before releasing the old one to be sure 987 * the device will stay functional also in case of memory 988 * pressure. 989 */ 990 { 991 int size; 992 struct mon_pgmap *vec; 993 994 if (arg < BUFF_MIN || arg > BUFF_MAX) 995 return -EINVAL; 996 997 size = CHUNK_ALIGN(arg); 998 if ((vec = kzalloc(sizeof(struct mon_pgmap) * (size/CHUNK_SIZE), 999 GFP_KERNEL)) == NULL) { 1000 ret = -ENOMEM; 1001 break; 1002 } 1003 1004 ret = mon_alloc_buff(vec, size/CHUNK_SIZE); 1005 if (ret < 0) { 1006 kfree(vec); 1007 break; 1008 } 1009 1010 mutex_lock(&rp->fetch_lock); 1011 spin_lock_irqsave(&rp->b_lock, flags); 1012 mon_free_buff(rp->b_vec, size/CHUNK_SIZE); 1013 kfree(rp->b_vec); 1014 rp->b_vec = vec; 1015 rp->b_size = size; 1016 rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0; 1017 rp->cnt_lost = 0; 1018 spin_unlock_irqrestore(&rp->b_lock, flags); 1019 mutex_unlock(&rp->fetch_lock); 1020 } 1021 break; 1022 1023 case MON_IOCH_MFLUSH: 1024 ret = mon_bin_flush(rp, arg); 1025 break; 1026 1027 case MON_IOCX_GET: 1028 case MON_IOCX_GETX: 1029 { 1030 struct mon_bin_get getb; 1031 1032 if (copy_from_user(&getb, (void __user *)arg, 1033 sizeof(struct mon_bin_get))) 1034 return -EFAULT; 1035 1036 if (getb.alloc > 0x10000000) /* Want to cast to u32 */ 1037 return -EINVAL; 1038 ret = mon_bin_get_event(file, rp, getb.hdr, 1039 (cmd == MON_IOCX_GET)? PKT_SZ_API0: PKT_SZ_API1, 1040 getb.data, (unsigned int)getb.alloc); 1041 } 1042 break; 1043 1044 case MON_IOCX_MFETCH: 1045 { 1046 struct mon_bin_mfetch mfetch; 1047 struct mon_bin_mfetch __user *uptr; 1048 1049 uptr = (struct mon_bin_mfetch __user *)arg; 1050 1051 if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) 1052 return -EFAULT; 1053 1054 if (mfetch.nflush) { 1055 ret = mon_bin_flush(rp, mfetch.nflush); 1056 if (ret < 0) 1057 return ret; 1058 if (put_user(ret, &uptr->nflush)) 1059 return -EFAULT; 1060 } 1061 ret = mon_bin_fetch(file, rp, mfetch.offvec, mfetch.nfetch); 1062 if (ret < 0) 1063 return ret; 1064 if (put_user(ret, &uptr->nfetch)) 1065 return -EFAULT; 1066 ret = 0; 1067 } 1068 break; 1069 1070 case MON_IOCG_STATS: { 1071 struct mon_bin_stats __user *sp; 1072 unsigned int nevents; 1073 unsigned int ndropped; 1074 1075 spin_lock_irqsave(&rp->b_lock, flags); 1076 ndropped = rp->cnt_lost; 1077 rp->cnt_lost = 0; 1078 spin_unlock_irqrestore(&rp->b_lock, flags); 1079 nevents = mon_bin_queued(rp); 1080 1081 sp = (struct mon_bin_stats __user *)arg; 1082 if (put_user(rp->cnt_lost, &sp->dropped)) 1083 return -EFAULT; 1084 if (put_user(nevents, &sp->queued)) 1085 return -EFAULT; 1086 1087 } 1088 break; 1089 1090 default: 1091 return -ENOTTY; 1092 } 1093 1094 return ret; 1095 } 1096 1097 static long mon_bin_unlocked_ioctl(struct file *file, unsigned int cmd, 1098 unsigned long arg) 1099 { 1100 int ret; 1101 1102 lock_kernel(); 1103 ret = mon_bin_ioctl(file, cmd, arg); 1104 unlock_kernel(); 1105 1106 return ret; 1107 } 1108 1109 1110 #ifdef CONFIG_COMPAT 1111 static long mon_bin_compat_ioctl(struct file *file, 1112 unsigned int cmd, unsigned long arg) 1113 { 1114 struct mon_reader_bin *rp = file->private_data; 1115 int ret; 1116 1117 switch (cmd) { 1118 1119 case MON_IOCX_GET32: 1120 case MON_IOCX_GETX32: 1121 { 1122 struct mon_bin_get32 getb; 1123 1124 if (copy_from_user(&getb, (void __user *)arg, 1125 sizeof(struct mon_bin_get32))) 1126 return -EFAULT; 1127 1128 ret = mon_bin_get_event(file, rp, compat_ptr(getb.hdr32), 1129 (cmd == MON_IOCX_GET32)? PKT_SZ_API0: PKT_SZ_API1, 1130 compat_ptr(getb.data32), getb.alloc32); 1131 if (ret < 0) 1132 return ret; 1133 } 1134 return 0; 1135 1136 case MON_IOCX_MFETCH32: 1137 { 1138 struct mon_bin_mfetch32 mfetch; 1139 struct mon_bin_mfetch32 __user *uptr; 1140 1141 uptr = (struct mon_bin_mfetch32 __user *) compat_ptr(arg); 1142 1143 if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) 1144 return -EFAULT; 1145 1146 if (mfetch.nflush32) { 1147 ret = mon_bin_flush(rp, mfetch.nflush32); 1148 if (ret < 0) 1149 return ret; 1150 if (put_user(ret, &uptr->nflush32)) 1151 return -EFAULT; 1152 } 1153 ret = mon_bin_fetch(file, rp, compat_ptr(mfetch.offvec32), 1154 mfetch.nfetch32); 1155 if (ret < 0) 1156 return ret; 1157 if (put_user(ret, &uptr->nfetch32)) 1158 return -EFAULT; 1159 } 1160 return 0; 1161 1162 case MON_IOCG_STATS: 1163 return mon_bin_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); 1164 1165 case MON_IOCQ_URB_LEN: 1166 case MON_IOCQ_RING_SIZE: 1167 case MON_IOCT_RING_SIZE: 1168 case MON_IOCH_MFLUSH: 1169 return mon_bin_ioctl(file, cmd, arg); 1170 1171 default: 1172 ; 1173 } 1174 return -ENOTTY; 1175 } 1176 #endif /* CONFIG_COMPAT */ 1177 1178 static unsigned int 1179 mon_bin_poll(struct file *file, struct poll_table_struct *wait) 1180 { 1181 struct mon_reader_bin *rp = file->private_data; 1182 unsigned int mask = 0; 1183 unsigned long flags; 1184 1185 if (file->f_mode & FMODE_READ) 1186 poll_wait(file, &rp->b_wait, wait); 1187 1188 spin_lock_irqsave(&rp->b_lock, flags); 1189 if (!MON_RING_EMPTY(rp)) 1190 mask |= POLLIN | POLLRDNORM; /* readable */ 1191 spin_unlock_irqrestore(&rp->b_lock, flags); 1192 return mask; 1193 } 1194 1195 /* 1196 * open and close: just keep track of how many times the device is 1197 * mapped, to use the proper memory allocation function. 1198 */ 1199 static void mon_bin_vma_open(struct vm_area_struct *vma) 1200 { 1201 struct mon_reader_bin *rp = vma->vm_private_data; 1202 rp->mmap_active++; 1203 } 1204 1205 static void mon_bin_vma_close(struct vm_area_struct *vma) 1206 { 1207 struct mon_reader_bin *rp = vma->vm_private_data; 1208 rp->mmap_active--; 1209 } 1210 1211 /* 1212 * Map ring pages to user space. 1213 */ 1214 static int mon_bin_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1215 { 1216 struct mon_reader_bin *rp = vma->vm_private_data; 1217 unsigned long offset, chunk_idx; 1218 struct page *pageptr; 1219 1220 offset = vmf->pgoff << PAGE_SHIFT; 1221 if (offset >= rp->b_size) 1222 return VM_FAULT_SIGBUS; 1223 chunk_idx = offset / CHUNK_SIZE; 1224 pageptr = rp->b_vec[chunk_idx].pg; 1225 get_page(pageptr); 1226 vmf->page = pageptr; 1227 return 0; 1228 } 1229 1230 static const struct vm_operations_struct mon_bin_vm_ops = { 1231 .open = mon_bin_vma_open, 1232 .close = mon_bin_vma_close, 1233 .fault = mon_bin_vma_fault, 1234 }; 1235 1236 static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma) 1237 { 1238 /* don't do anything here: "fault" will set up page table entries */ 1239 vma->vm_ops = &mon_bin_vm_ops; 1240 vma->vm_flags |= VM_RESERVED; 1241 vma->vm_private_data = filp->private_data; 1242 mon_bin_vma_open(vma); 1243 return 0; 1244 } 1245 1246 static const struct file_operations mon_fops_binary = { 1247 .owner = THIS_MODULE, 1248 .open = mon_bin_open, 1249 .llseek = no_llseek, 1250 .read = mon_bin_read, 1251 /* .write = mon_text_write, */ 1252 .poll = mon_bin_poll, 1253 .unlocked_ioctl = mon_bin_unlocked_ioctl, 1254 #ifdef CONFIG_COMPAT 1255 .compat_ioctl = mon_bin_compat_ioctl, 1256 #endif 1257 .release = mon_bin_release, 1258 .mmap = mon_bin_mmap, 1259 }; 1260 1261 static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp) 1262 { 1263 DECLARE_WAITQUEUE(waita, current); 1264 unsigned long flags; 1265 1266 add_wait_queue(&rp->b_wait, &waita); 1267 set_current_state(TASK_INTERRUPTIBLE); 1268 1269 spin_lock_irqsave(&rp->b_lock, flags); 1270 while (MON_RING_EMPTY(rp)) { 1271 spin_unlock_irqrestore(&rp->b_lock, flags); 1272 1273 if (file->f_flags & O_NONBLOCK) { 1274 set_current_state(TASK_RUNNING); 1275 remove_wait_queue(&rp->b_wait, &waita); 1276 return -EWOULDBLOCK; /* Same as EAGAIN in Linux */ 1277 } 1278 schedule(); 1279 if (signal_pending(current)) { 1280 remove_wait_queue(&rp->b_wait, &waita); 1281 return -EINTR; 1282 } 1283 set_current_state(TASK_INTERRUPTIBLE); 1284 1285 spin_lock_irqsave(&rp->b_lock, flags); 1286 } 1287 spin_unlock_irqrestore(&rp->b_lock, flags); 1288 1289 set_current_state(TASK_RUNNING); 1290 remove_wait_queue(&rp->b_wait, &waita); 1291 return 0; 1292 } 1293 1294 static int mon_alloc_buff(struct mon_pgmap *map, int npages) 1295 { 1296 int n; 1297 unsigned long vaddr; 1298 1299 for (n = 0; n < npages; n++) { 1300 vaddr = get_zeroed_page(GFP_KERNEL); 1301 if (vaddr == 0) { 1302 while (n-- != 0) 1303 free_page((unsigned long) map[n].ptr); 1304 return -ENOMEM; 1305 } 1306 map[n].ptr = (unsigned char *) vaddr; 1307 map[n].pg = virt_to_page((void *) vaddr); 1308 } 1309 return 0; 1310 } 1311 1312 static void mon_free_buff(struct mon_pgmap *map, int npages) 1313 { 1314 int n; 1315 1316 for (n = 0; n < npages; n++) 1317 free_page((unsigned long) map[n].ptr); 1318 } 1319 1320 int mon_bin_add(struct mon_bus *mbus, const struct usb_bus *ubus) 1321 { 1322 struct device *dev; 1323 unsigned minor = ubus? ubus->busnum: 0; 1324 1325 if (minor >= MON_BIN_MAX_MINOR) 1326 return 0; 1327 1328 dev = device_create(mon_bin_class, ubus ? ubus->controller : NULL, 1329 MKDEV(MAJOR(mon_bin_dev0), minor), NULL, 1330 "usbmon%d", minor); 1331 if (IS_ERR(dev)) 1332 return 0; 1333 1334 mbus->classdev = dev; 1335 return 1; 1336 } 1337 1338 void mon_bin_del(struct mon_bus *mbus) 1339 { 1340 device_destroy(mon_bin_class, mbus->classdev->devt); 1341 } 1342 1343 int __init mon_bin_init(void) 1344 { 1345 int rc; 1346 1347 mon_bin_class = class_create(THIS_MODULE, "usbmon"); 1348 if (IS_ERR(mon_bin_class)) { 1349 rc = PTR_ERR(mon_bin_class); 1350 goto err_class; 1351 } 1352 1353 rc = alloc_chrdev_region(&mon_bin_dev0, 0, MON_BIN_MAX_MINOR, "usbmon"); 1354 if (rc < 0) 1355 goto err_dev; 1356 1357 cdev_init(&mon_bin_cdev, &mon_fops_binary); 1358 mon_bin_cdev.owner = THIS_MODULE; 1359 1360 rc = cdev_add(&mon_bin_cdev, mon_bin_dev0, MON_BIN_MAX_MINOR); 1361 if (rc < 0) 1362 goto err_add; 1363 1364 return 0; 1365 1366 err_add: 1367 unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); 1368 err_dev: 1369 class_destroy(mon_bin_class); 1370 err_class: 1371 return rc; 1372 } 1373 1374 void mon_bin_exit(void) 1375 { 1376 cdev_del(&mon_bin_cdev); 1377 unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); 1378 class_destroy(mon_bin_class); 1379 } 1380