1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * The USB Monitor, inspired by Dave Harding's USBMon. 4 * 5 * This is a binary format reader. 6 * 7 * Copyright (C) 2006 Paolo Abeni (paolo.abeni@email.it) 8 * Copyright (C) 2006,2007 Pete Zaitcev (zaitcev@redhat.com) 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/sched/signal.h> 13 #include <linux/types.h> 14 #include <linux/fs.h> 15 #include <linux/cdev.h> 16 #include <linux/export.h> 17 #include <linux/usb.h> 18 #include <linux/poll.h> 19 #include <linux/compat.h> 20 #include <linux/mm.h> 21 #include <linux/scatterlist.h> 22 #include <linux/slab.h> 23 #include <linux/time64.h> 24 25 #include <linux/uaccess.h> 26 27 #include "usb_mon.h" 28 29 /* 30 * Defined by USB 2.0 clause 9.3, table 9.2. 31 */ 32 #define SETUP_LEN 8 33 34 /* ioctl macros */ 35 #define MON_IOC_MAGIC 0x92 36 37 #define MON_IOCQ_URB_LEN _IO(MON_IOC_MAGIC, 1) 38 /* #2 used to be MON_IOCX_URB, removed before it got into Linus tree */ 39 #define MON_IOCG_STATS _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats) 40 #define MON_IOCT_RING_SIZE _IO(MON_IOC_MAGIC, 4) 41 #define MON_IOCQ_RING_SIZE _IO(MON_IOC_MAGIC, 5) 42 #define MON_IOCX_GET _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get) 43 #define MON_IOCX_MFETCH _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch) 44 #define MON_IOCH_MFLUSH _IO(MON_IOC_MAGIC, 8) 45 /* #9 was MON_IOCT_SETAPI */ 46 #define MON_IOCX_GETX _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get) 47 48 #ifdef CONFIG_COMPAT 49 #define MON_IOCX_GET32 _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get32) 50 #define MON_IOCX_MFETCH32 _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch32) 51 #define MON_IOCX_GETX32 _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get32) 52 #endif 53 54 /* 55 * Some architectures have enormous basic pages (16KB for ia64, 64KB for ppc). 56 * But it's all right. Just use a simple way to make sure the chunk is never 57 * smaller than a page. 58 * 59 * N.B. An application does not know our chunk size. 60 * 61 * Woops, get_zeroed_page() returns a single page. I guess we're stuck with 62 * page-sized chunks for the time being. 63 */ 64 #define CHUNK_SIZE PAGE_SIZE 65 #define CHUNK_ALIGN(x) (((x)+CHUNK_SIZE-1) & ~(CHUNK_SIZE-1)) 66 67 /* 68 * The magic limit was calculated so that it allows the monitoring 69 * application to pick data once in two ticks. This way, another application, 70 * which presumably drives the bus, gets to hog CPU, yet we collect our data. 71 * If HZ is 100, a 480 mbit/s bus drives 614 KB every jiffy. USB has an 72 * enormous overhead built into the bus protocol, so we need about 1000 KB. 73 * 74 * This is still too much for most cases, where we just snoop a few 75 * descriptor fetches for enumeration. So, the default is a "reasonable" 76 * amount for systems with HZ=250 and incomplete bus saturation. 77 * 78 * XXX What about multi-megabyte URBs which take minutes to transfer? 79 */ 80 #define BUFF_MAX CHUNK_ALIGN(1200*1024) 81 #define BUFF_DFL CHUNK_ALIGN(300*1024) 82 #define BUFF_MIN CHUNK_ALIGN(8*1024) 83 84 /* 85 * The per-event API header (2 per URB). 86 * 87 * This structure is seen in userland as defined by the documentation. 88 */ 89 struct mon_bin_hdr { 90 u64 id; /* URB ID - from submission to callback */ 91 unsigned char type; /* Same as in text API; extensible. */ 92 unsigned char xfer_type; /* ISO, Intr, Control, Bulk */ 93 unsigned char epnum; /* Endpoint number and transfer direction */ 94 unsigned char devnum; /* Device address */ 95 unsigned short busnum; /* Bus number */ 96 char flag_setup; 97 char flag_data; 98 s64 ts_sec; /* ktime_get_real_ts64 */ 99 s32 ts_usec; /* ktime_get_real_ts64 */ 100 int status; 101 unsigned int len_urb; /* Length of data (submitted or actual) */ 102 unsigned int len_cap; /* Delivered length */ 103 union { 104 unsigned char setup[SETUP_LEN]; /* Only for Control S-type */ 105 struct iso_rec { 106 int error_count; 107 int numdesc; 108 } iso; 109 } s; 110 int interval; 111 int start_frame; 112 unsigned int xfer_flags; 113 unsigned int ndesc; /* Actual number of ISO descriptors */ 114 }; 115 116 /* 117 * ISO vector, packed into the head of data stream. 118 * This has to take 16 bytes to make sure that the end of buffer 119 * wrap is not happening in the middle of a descriptor. 120 */ 121 struct mon_bin_isodesc { 122 int iso_status; 123 unsigned int iso_off; 124 unsigned int iso_len; 125 u32 _pad; 126 }; 127 128 /* per file statistic */ 129 struct mon_bin_stats { 130 u32 queued; 131 u32 dropped; 132 }; 133 134 struct mon_bin_get { 135 struct mon_bin_hdr __user *hdr; /* Can be 48 bytes or 64. */ 136 void __user *data; 137 size_t alloc; /* Length of data (can be zero) */ 138 }; 139 140 struct mon_bin_mfetch { 141 u32 __user *offvec; /* Vector of events fetched */ 142 u32 nfetch; /* Number of events to fetch (out: fetched) */ 143 u32 nflush; /* Number of events to flush */ 144 }; 145 146 #ifdef CONFIG_COMPAT 147 struct mon_bin_get32 { 148 u32 hdr32; 149 u32 data32; 150 u32 alloc32; 151 }; 152 153 struct mon_bin_mfetch32 { 154 u32 offvec32; 155 u32 nfetch32; 156 u32 nflush32; 157 }; 158 #endif 159 160 /* Having these two values same prevents wrapping of the mon_bin_hdr */ 161 #define PKT_ALIGN 64 162 #define PKT_SIZE 64 163 164 #define PKT_SZ_API0 48 /* API 0 (2.6.20) size */ 165 #define PKT_SZ_API1 64 /* API 1 size: extra fields */ 166 167 #define ISODESC_MAX 128 /* Same number as usbfs allows, 2048 bytes. */ 168 169 /* max number of USB bus supported */ 170 #define MON_BIN_MAX_MINOR 128 171 172 /* 173 * The buffer: map of used pages. 174 */ 175 struct mon_pgmap { 176 struct page *pg; 177 unsigned char *ptr; /* XXX just use page_to_virt everywhere? */ 178 }; 179 180 /* 181 * This gets associated with an open file struct. 182 */ 183 struct mon_reader_bin { 184 /* The buffer: one per open. */ 185 spinlock_t b_lock; /* Protect b_cnt, b_in */ 186 unsigned int b_size; /* Current size of the buffer - bytes */ 187 unsigned int b_cnt; /* Bytes used */ 188 unsigned int b_in, b_out; /* Offsets into buffer - bytes */ 189 unsigned int b_read; /* Amount of read data in curr. pkt. */ 190 struct mon_pgmap *b_vec; /* The map array */ 191 wait_queue_head_t b_wait; /* Wait for data here */ 192 193 struct mutex fetch_lock; /* Protect b_read, b_out */ 194 int mmap_active; 195 196 /* A list of these is needed for "bus 0". Some time later. */ 197 struct mon_reader r; 198 199 /* Stats */ 200 unsigned int cnt_lost; 201 }; 202 203 static inline struct mon_bin_hdr *MON_OFF2HDR(const struct mon_reader_bin *rp, 204 unsigned int offset) 205 { 206 return (struct mon_bin_hdr *) 207 (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE); 208 } 209 210 #define MON_RING_EMPTY(rp) ((rp)->b_cnt == 0) 211 212 static unsigned char xfer_to_pipe[4] = { 213 PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT 214 }; 215 216 static const struct class mon_bin_class = { 217 .name = "usbmon", 218 }; 219 220 static dev_t mon_bin_dev0; 221 static struct cdev mon_bin_cdev; 222 223 static void mon_buff_area_fill(const struct mon_reader_bin *rp, 224 unsigned int offset, unsigned int size); 225 static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp); 226 static int mon_alloc_buff(struct mon_pgmap *map, int npages); 227 static void mon_free_buff(struct mon_pgmap *map, int npages); 228 229 /* 230 * This is a "chunked memcpy". It does not manipulate any counters. 231 */ 232 static unsigned int mon_copy_to_buff(const struct mon_reader_bin *this, 233 unsigned int off, const unsigned char *from, unsigned int length) 234 { 235 unsigned int step_len; 236 unsigned char *buf; 237 unsigned int in_page; 238 239 while (length) { 240 /* 241 * Determine step_len. 242 */ 243 step_len = length; 244 in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); 245 if (in_page < step_len) 246 step_len = in_page; 247 248 /* 249 * Copy data and advance pointers. 250 */ 251 buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; 252 memcpy(buf, from, step_len); 253 if ((off += step_len) >= this->b_size) off = 0; 254 from += step_len; 255 length -= step_len; 256 } 257 return off; 258 } 259 260 /* 261 * This is a little worse than the above because it's "chunked copy_to_user". 262 * The return value is an error code, not an offset. 263 */ 264 static int copy_from_buf(const struct mon_reader_bin *this, unsigned int off, 265 char __user *to, int length) 266 { 267 unsigned int step_len; 268 unsigned char *buf; 269 unsigned int in_page; 270 271 while (length) { 272 /* 273 * Determine step_len. 274 */ 275 step_len = length; 276 in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); 277 if (in_page < step_len) 278 step_len = in_page; 279 280 /* 281 * Copy data and advance pointers. 282 */ 283 buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; 284 if (copy_to_user(to, buf, step_len)) 285 return -EINVAL; 286 if ((off += step_len) >= this->b_size) off = 0; 287 to += step_len; 288 length -= step_len; 289 } 290 return 0; 291 } 292 293 /* 294 * Allocate an (aligned) area in the buffer. 295 * This is called under b_lock. 296 * Returns ~0 on failure. 297 */ 298 static unsigned int mon_buff_area_alloc(struct mon_reader_bin *rp, 299 unsigned int size) 300 { 301 unsigned int offset; 302 303 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 304 if (rp->b_cnt + size > rp->b_size) 305 return ~0; 306 offset = rp->b_in; 307 rp->b_cnt += size; 308 if ((rp->b_in += size) >= rp->b_size) 309 rp->b_in -= rp->b_size; 310 return offset; 311 } 312 313 /* 314 * This is the same thing as mon_buff_area_alloc, only it does not allow 315 * buffers to wrap. This is needed by applications which pass references 316 * into mmap-ed buffers up their stacks (libpcap can do that). 317 * 318 * Currently, we always have the header stuck with the data, although 319 * it is not strictly speaking necessary. 320 * 321 * When a buffer would wrap, we place a filler packet to mark the space. 322 */ 323 static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp, 324 unsigned int size) 325 { 326 unsigned int offset; 327 unsigned int fill_size; 328 329 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 330 if (rp->b_cnt + size > rp->b_size) 331 return ~0; 332 if (rp->b_in + size > rp->b_size) { 333 /* 334 * This would wrap. Find if we still have space after 335 * skipping to the end of the buffer. If we do, place 336 * a filler packet and allocate a new packet. 337 */ 338 fill_size = rp->b_size - rp->b_in; 339 if (rp->b_cnt + size + fill_size > rp->b_size) 340 return ~0; 341 mon_buff_area_fill(rp, rp->b_in, fill_size); 342 343 offset = 0; 344 rp->b_in = size; 345 rp->b_cnt += size + fill_size; 346 } else if (rp->b_in + size == rp->b_size) { 347 offset = rp->b_in; 348 rp->b_in = 0; 349 rp->b_cnt += size; 350 } else { 351 offset = rp->b_in; 352 rp->b_in += size; 353 rp->b_cnt += size; 354 } 355 return offset; 356 } 357 358 /* 359 * Return a few (kilo-)bytes to the head of the buffer. 360 * This is used if a data fetch fails. 361 */ 362 static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size) 363 { 364 365 /* size &= ~(PKT_ALIGN-1); -- we're called with aligned size */ 366 rp->b_cnt -= size; 367 if (rp->b_in < size) 368 rp->b_in += rp->b_size; 369 rp->b_in -= size; 370 } 371 372 /* 373 * This has to be called under both b_lock and fetch_lock, because 374 * it accesses both b_cnt and b_out. 375 */ 376 static void mon_buff_area_free(struct mon_reader_bin *rp, unsigned int size) 377 { 378 379 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 380 rp->b_cnt -= size; 381 if ((rp->b_out += size) >= rp->b_size) 382 rp->b_out -= rp->b_size; 383 } 384 385 static void mon_buff_area_fill(const struct mon_reader_bin *rp, 386 unsigned int offset, unsigned int size) 387 { 388 struct mon_bin_hdr *ep; 389 390 ep = MON_OFF2HDR(rp, offset); 391 memset(ep, 0, PKT_SIZE); 392 ep->type = '@'; 393 ep->len_cap = size - PKT_SIZE; 394 } 395 396 static inline char mon_bin_get_setup(unsigned char *setupb, 397 const struct urb *urb, char ev_type) 398 { 399 400 if (urb->setup_packet == NULL) 401 return 'Z'; 402 memcpy(setupb, urb->setup_packet, SETUP_LEN); 403 return 0; 404 } 405 406 static unsigned int mon_bin_get_data(const struct mon_reader_bin *rp, 407 unsigned int offset, struct urb *urb, unsigned int length, 408 char *flag) 409 { 410 int i; 411 struct scatterlist *sg; 412 unsigned int this_len; 413 414 *flag = 0; 415 if (urb->num_sgs == 0) { 416 if (urb->transfer_buffer == NULL) { 417 *flag = 'Z'; 418 return length; 419 } 420 mon_copy_to_buff(rp, offset, urb->transfer_buffer, length); 421 length = 0; 422 423 } else { 424 /* If IOMMU coalescing occurred, we cannot trust sg_page */ 425 if (urb->transfer_flags & URB_DMA_SG_COMBINED) { 426 *flag = 'D'; 427 return length; 428 } 429 430 /* Copy up to the first non-addressable segment */ 431 for_each_sg(urb->sg, sg, urb->num_sgs, i) { 432 if (length == 0 || PageHighMem(sg_page(sg))) 433 break; 434 this_len = min_t(unsigned int, sg->length, length); 435 offset = mon_copy_to_buff(rp, offset, sg_virt(sg), 436 this_len); 437 length -= this_len; 438 } 439 if (i == 0) 440 *flag = 'D'; 441 } 442 443 return length; 444 } 445 446 /* 447 * This is the look-ahead pass in case of 'C Zi', when actual_length cannot 448 * be used to determine the length of the whole contiguous buffer. 449 */ 450 static unsigned int mon_bin_collate_isodesc(const struct mon_reader_bin *rp, 451 struct urb *urb, unsigned int ndesc) 452 { 453 struct usb_iso_packet_descriptor *fp; 454 unsigned int length; 455 456 length = 0; 457 fp = urb->iso_frame_desc; 458 while (ndesc-- != 0) { 459 if (fp->actual_length != 0) { 460 if (fp->offset + fp->actual_length > length) 461 length = fp->offset + fp->actual_length; 462 } 463 fp++; 464 } 465 return length; 466 } 467 468 static void mon_bin_get_isodesc(const struct mon_reader_bin *rp, 469 unsigned int offset, struct urb *urb, char ev_type, unsigned int ndesc) 470 { 471 struct mon_bin_isodesc *dp; 472 struct usb_iso_packet_descriptor *fp; 473 474 fp = urb->iso_frame_desc; 475 while (ndesc-- != 0) { 476 dp = (struct mon_bin_isodesc *) 477 (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE); 478 dp->iso_status = fp->status; 479 dp->iso_off = fp->offset; 480 dp->iso_len = (ev_type == 'S') ? fp->length : fp->actual_length; 481 dp->_pad = 0; 482 if ((offset += sizeof(struct mon_bin_isodesc)) >= rp->b_size) 483 offset = 0; 484 fp++; 485 } 486 } 487 488 static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb, 489 char ev_type, int status) 490 { 491 const struct usb_endpoint_descriptor *epd = &urb->ep->desc; 492 struct timespec64 ts; 493 unsigned long flags; 494 unsigned int urb_length; 495 unsigned int offset; 496 unsigned int length; 497 unsigned int delta; 498 unsigned int ndesc, lendesc; 499 unsigned char dir; 500 struct mon_bin_hdr *ep; 501 char data_tag = 0; 502 503 ktime_get_real_ts64(&ts); 504 505 spin_lock_irqsave(&rp->b_lock, flags); 506 507 /* 508 * Find the maximum allowable length, then allocate space. 509 */ 510 urb_length = (ev_type == 'S') ? 511 urb->transfer_buffer_length : urb->actual_length; 512 length = urb_length; 513 514 if (usb_endpoint_xfer_isoc(epd)) { 515 if (urb->number_of_packets < 0) { 516 ndesc = 0; 517 } else if (urb->number_of_packets >= ISODESC_MAX) { 518 ndesc = ISODESC_MAX; 519 } else { 520 ndesc = urb->number_of_packets; 521 } 522 if (ev_type == 'C' && usb_urb_dir_in(urb)) 523 length = mon_bin_collate_isodesc(rp, urb, ndesc); 524 } else { 525 ndesc = 0; 526 } 527 lendesc = ndesc*sizeof(struct mon_bin_isodesc); 528 529 /* not an issue unless there's a subtle bug in a HCD somewhere */ 530 if (length >= urb->transfer_buffer_length) 531 length = urb->transfer_buffer_length; 532 533 if (length >= rp->b_size/5) 534 length = rp->b_size/5; 535 536 if (usb_urb_dir_in(urb)) { 537 if (ev_type == 'S') { 538 length = 0; 539 data_tag = '<'; 540 } 541 /* Cannot rely on endpoint number in case of control ep.0 */ 542 dir = USB_DIR_IN; 543 } else { 544 if (ev_type == 'C') { 545 length = 0; 546 data_tag = '>'; 547 } 548 dir = 0; 549 } 550 551 if (rp->mmap_active) { 552 offset = mon_buff_area_alloc_contiguous(rp, 553 length + PKT_SIZE + lendesc); 554 } else { 555 offset = mon_buff_area_alloc(rp, length + PKT_SIZE + lendesc); 556 } 557 if (offset == ~0) { 558 rp->cnt_lost++; 559 spin_unlock_irqrestore(&rp->b_lock, flags); 560 return; 561 } 562 563 ep = MON_OFF2HDR(rp, offset); 564 if ((offset += PKT_SIZE) >= rp->b_size) offset = 0; 565 566 /* 567 * Fill the allocated area. 568 */ 569 memset(ep, 0, PKT_SIZE); 570 ep->type = ev_type; 571 ep->xfer_type = xfer_to_pipe[usb_endpoint_type(epd)]; 572 ep->epnum = dir | usb_endpoint_num(epd); 573 ep->devnum = urb->dev->devnum; 574 ep->busnum = urb->dev->bus->busnum; 575 ep->id = (unsigned long) urb; 576 ep->ts_sec = ts.tv_sec; 577 ep->ts_usec = ts.tv_nsec / NSEC_PER_USEC; 578 ep->status = status; 579 ep->len_urb = urb_length; 580 ep->len_cap = length + lendesc; 581 ep->xfer_flags = urb->transfer_flags; 582 583 if (usb_endpoint_xfer_int(epd)) { 584 ep->interval = urb->interval; 585 } else if (usb_endpoint_xfer_isoc(epd)) { 586 ep->interval = urb->interval; 587 ep->start_frame = urb->start_frame; 588 ep->s.iso.error_count = urb->error_count; 589 ep->s.iso.numdesc = urb->number_of_packets; 590 } 591 592 if (usb_endpoint_xfer_control(epd) && ev_type == 'S') { 593 ep->flag_setup = mon_bin_get_setup(ep->s.setup, urb, ev_type); 594 } else { 595 ep->flag_setup = '-'; 596 } 597 598 if (ndesc != 0) { 599 ep->ndesc = ndesc; 600 mon_bin_get_isodesc(rp, offset, urb, ev_type, ndesc); 601 if ((offset += lendesc) >= rp->b_size) 602 offset -= rp->b_size; 603 } 604 605 if (length != 0) { 606 length = mon_bin_get_data(rp, offset, urb, length, 607 &ep->flag_data); 608 if (length > 0) { 609 delta = (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 610 ep->len_cap -= length; 611 delta -= (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 612 mon_buff_area_shrink(rp, delta); 613 } 614 } else { 615 ep->flag_data = data_tag; 616 } 617 618 spin_unlock_irqrestore(&rp->b_lock, flags); 619 620 wake_up(&rp->b_wait); 621 } 622 623 static void mon_bin_submit(void *data, struct urb *urb) 624 { 625 struct mon_reader_bin *rp = data; 626 mon_bin_event(rp, urb, 'S', -EINPROGRESS); 627 } 628 629 static void mon_bin_complete(void *data, struct urb *urb, int status) 630 { 631 struct mon_reader_bin *rp = data; 632 mon_bin_event(rp, urb, 'C', status); 633 } 634 635 static void mon_bin_error(void *data, struct urb *urb, int error) 636 { 637 struct mon_reader_bin *rp = data; 638 struct timespec64 ts; 639 unsigned long flags; 640 unsigned int offset; 641 struct mon_bin_hdr *ep; 642 643 ktime_get_real_ts64(&ts); 644 645 spin_lock_irqsave(&rp->b_lock, flags); 646 647 offset = mon_buff_area_alloc(rp, PKT_SIZE); 648 if (offset == ~0) { 649 /* Not incrementing cnt_lost. Just because. */ 650 spin_unlock_irqrestore(&rp->b_lock, flags); 651 return; 652 } 653 654 ep = MON_OFF2HDR(rp, offset); 655 656 memset(ep, 0, PKT_SIZE); 657 ep->type = 'E'; 658 ep->xfer_type = xfer_to_pipe[usb_endpoint_type(&urb->ep->desc)]; 659 ep->epnum = usb_urb_dir_in(urb) ? USB_DIR_IN : 0; 660 ep->epnum |= usb_endpoint_num(&urb->ep->desc); 661 ep->devnum = urb->dev->devnum; 662 ep->busnum = urb->dev->bus->busnum; 663 ep->id = (unsigned long) urb; 664 ep->ts_sec = ts.tv_sec; 665 ep->ts_usec = ts.tv_nsec / NSEC_PER_USEC; 666 ep->status = error; 667 668 ep->flag_setup = '-'; 669 ep->flag_data = 'E'; 670 671 spin_unlock_irqrestore(&rp->b_lock, flags); 672 673 wake_up(&rp->b_wait); 674 } 675 676 static int mon_bin_open(struct inode *inode, struct file *file) 677 { 678 struct mon_bus *mbus; 679 struct mon_reader_bin *rp; 680 size_t size; 681 int rc; 682 683 mutex_lock(&mon_lock); 684 mbus = mon_bus_lookup(iminor(inode)); 685 if (mbus == NULL) { 686 mutex_unlock(&mon_lock); 687 return -ENODEV; 688 } 689 if (mbus != &mon_bus0 && mbus->u_bus == NULL) { 690 printk(KERN_ERR TAG ": consistency error on open\n"); 691 mutex_unlock(&mon_lock); 692 return -ENODEV; 693 } 694 695 rp = kzalloc(sizeof(struct mon_reader_bin), GFP_KERNEL); 696 if (rp == NULL) { 697 rc = -ENOMEM; 698 goto err_alloc; 699 } 700 spin_lock_init(&rp->b_lock); 701 init_waitqueue_head(&rp->b_wait); 702 mutex_init(&rp->fetch_lock); 703 rp->b_size = BUFF_DFL; 704 705 size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE); 706 if ((rp->b_vec = kzalloc(size, GFP_KERNEL)) == NULL) { 707 rc = -ENOMEM; 708 goto err_allocvec; 709 } 710 711 if ((rc = mon_alloc_buff(rp->b_vec, rp->b_size/CHUNK_SIZE)) < 0) 712 goto err_allocbuff; 713 714 rp->r.m_bus = mbus; 715 rp->r.r_data = rp; 716 rp->r.rnf_submit = mon_bin_submit; 717 rp->r.rnf_error = mon_bin_error; 718 rp->r.rnf_complete = mon_bin_complete; 719 720 mon_reader_add(mbus, &rp->r); 721 722 file->private_data = rp; 723 mutex_unlock(&mon_lock); 724 return 0; 725 726 err_allocbuff: 727 kfree(rp->b_vec); 728 err_allocvec: 729 kfree(rp); 730 err_alloc: 731 mutex_unlock(&mon_lock); 732 return rc; 733 } 734 735 /* 736 * Extract an event from buffer and copy it to user space. 737 * Wait if there is no event ready. 738 * Returns zero or error. 739 */ 740 static int mon_bin_get_event(struct file *file, struct mon_reader_bin *rp, 741 struct mon_bin_hdr __user *hdr, unsigned int hdrbytes, 742 void __user *data, unsigned int nbytes) 743 { 744 unsigned long flags; 745 struct mon_bin_hdr *ep; 746 size_t step_len; 747 unsigned int offset; 748 int rc; 749 750 mutex_lock(&rp->fetch_lock); 751 752 if ((rc = mon_bin_wait_event(file, rp)) < 0) { 753 mutex_unlock(&rp->fetch_lock); 754 return rc; 755 } 756 757 ep = MON_OFF2HDR(rp, rp->b_out); 758 759 if (copy_to_user(hdr, ep, hdrbytes)) { 760 mutex_unlock(&rp->fetch_lock); 761 return -EFAULT; 762 } 763 764 step_len = min(ep->len_cap, nbytes); 765 if ((offset = rp->b_out + PKT_SIZE) >= rp->b_size) offset = 0; 766 767 if (copy_from_buf(rp, offset, data, step_len)) { 768 mutex_unlock(&rp->fetch_lock); 769 return -EFAULT; 770 } 771 772 spin_lock_irqsave(&rp->b_lock, flags); 773 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); 774 spin_unlock_irqrestore(&rp->b_lock, flags); 775 rp->b_read = 0; 776 777 mutex_unlock(&rp->fetch_lock); 778 return 0; 779 } 780 781 static int mon_bin_release(struct inode *inode, struct file *file) 782 { 783 struct mon_reader_bin *rp = file->private_data; 784 struct mon_bus* mbus = rp->r.m_bus; 785 786 mutex_lock(&mon_lock); 787 788 if (mbus->nreaders <= 0) { 789 printk(KERN_ERR TAG ": consistency error on close\n"); 790 mutex_unlock(&mon_lock); 791 return 0; 792 } 793 mon_reader_del(mbus, &rp->r); 794 795 mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); 796 kfree(rp->b_vec); 797 kfree(rp); 798 799 mutex_unlock(&mon_lock); 800 return 0; 801 } 802 803 static ssize_t mon_bin_read(struct file *file, char __user *buf, 804 size_t nbytes, loff_t *ppos) 805 { 806 struct mon_reader_bin *rp = file->private_data; 807 unsigned int hdrbytes = PKT_SZ_API0; 808 unsigned long flags; 809 struct mon_bin_hdr *ep; 810 unsigned int offset; 811 size_t step_len; 812 char *ptr; 813 ssize_t done = 0; 814 int rc; 815 816 mutex_lock(&rp->fetch_lock); 817 818 if ((rc = mon_bin_wait_event(file, rp)) < 0) { 819 mutex_unlock(&rp->fetch_lock); 820 return rc; 821 } 822 823 ep = MON_OFF2HDR(rp, rp->b_out); 824 825 if (rp->b_read < hdrbytes) { 826 step_len = min(nbytes, (size_t)(hdrbytes - rp->b_read)); 827 ptr = ((char *)ep) + rp->b_read; 828 if (step_len && copy_to_user(buf, ptr, step_len)) { 829 mutex_unlock(&rp->fetch_lock); 830 return -EFAULT; 831 } 832 nbytes -= step_len; 833 buf += step_len; 834 rp->b_read += step_len; 835 done += step_len; 836 } 837 838 if (rp->b_read >= hdrbytes) { 839 step_len = ep->len_cap; 840 step_len -= rp->b_read - hdrbytes; 841 if (step_len > nbytes) 842 step_len = nbytes; 843 offset = rp->b_out + PKT_SIZE; 844 offset += rp->b_read - hdrbytes; 845 if (offset >= rp->b_size) 846 offset -= rp->b_size; 847 if (copy_from_buf(rp, offset, buf, step_len)) { 848 mutex_unlock(&rp->fetch_lock); 849 return -EFAULT; 850 } 851 nbytes -= step_len; 852 buf += step_len; 853 rp->b_read += step_len; 854 done += step_len; 855 } 856 857 /* 858 * Check if whole packet was read, and if so, jump to the next one. 859 */ 860 if (rp->b_read >= hdrbytes + ep->len_cap) { 861 spin_lock_irqsave(&rp->b_lock, flags); 862 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); 863 spin_unlock_irqrestore(&rp->b_lock, flags); 864 rp->b_read = 0; 865 } 866 867 mutex_unlock(&rp->fetch_lock); 868 return done; 869 } 870 871 /* 872 * Remove at most nevents from chunked buffer. 873 * Returns the number of removed events. 874 */ 875 static int mon_bin_flush(struct mon_reader_bin *rp, unsigned nevents) 876 { 877 unsigned long flags; 878 struct mon_bin_hdr *ep; 879 int i; 880 881 mutex_lock(&rp->fetch_lock); 882 spin_lock_irqsave(&rp->b_lock, flags); 883 for (i = 0; i < nevents; ++i) { 884 if (MON_RING_EMPTY(rp)) 885 break; 886 887 ep = MON_OFF2HDR(rp, rp->b_out); 888 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); 889 } 890 spin_unlock_irqrestore(&rp->b_lock, flags); 891 rp->b_read = 0; 892 mutex_unlock(&rp->fetch_lock); 893 return i; 894 } 895 896 /* 897 * Fetch at most max event offsets into the buffer and put them into vec. 898 * The events are usually freed later with mon_bin_flush. 899 * Return the effective number of events fetched. 900 */ 901 static int mon_bin_fetch(struct file *file, struct mon_reader_bin *rp, 902 u32 __user *vec, unsigned int max) 903 { 904 unsigned int cur_out; 905 unsigned int bytes, avail; 906 unsigned int size; 907 unsigned int nevents; 908 struct mon_bin_hdr *ep; 909 unsigned long flags; 910 int rc; 911 912 mutex_lock(&rp->fetch_lock); 913 914 if ((rc = mon_bin_wait_event(file, rp)) < 0) { 915 mutex_unlock(&rp->fetch_lock); 916 return rc; 917 } 918 919 spin_lock_irqsave(&rp->b_lock, flags); 920 avail = rp->b_cnt; 921 spin_unlock_irqrestore(&rp->b_lock, flags); 922 923 cur_out = rp->b_out; 924 nevents = 0; 925 bytes = 0; 926 while (bytes < avail) { 927 if (nevents >= max) 928 break; 929 930 ep = MON_OFF2HDR(rp, cur_out); 931 if (put_user(cur_out, &vec[nevents])) { 932 mutex_unlock(&rp->fetch_lock); 933 return -EFAULT; 934 } 935 936 nevents++; 937 size = ep->len_cap + PKT_SIZE; 938 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 939 if ((cur_out += size) >= rp->b_size) 940 cur_out -= rp->b_size; 941 bytes += size; 942 } 943 944 mutex_unlock(&rp->fetch_lock); 945 return nevents; 946 } 947 948 /* 949 * Count events. This is almost the same as the above mon_bin_fetch, 950 * only we do not store offsets into user vector, and we have no limit. 951 */ 952 static int mon_bin_queued(struct mon_reader_bin *rp) 953 { 954 unsigned int cur_out; 955 unsigned int bytes, avail; 956 unsigned int size; 957 unsigned int nevents; 958 struct mon_bin_hdr *ep; 959 unsigned long flags; 960 961 mutex_lock(&rp->fetch_lock); 962 963 spin_lock_irqsave(&rp->b_lock, flags); 964 avail = rp->b_cnt; 965 spin_unlock_irqrestore(&rp->b_lock, flags); 966 967 cur_out = rp->b_out; 968 nevents = 0; 969 bytes = 0; 970 while (bytes < avail) { 971 ep = MON_OFF2HDR(rp, cur_out); 972 973 nevents++; 974 size = ep->len_cap + PKT_SIZE; 975 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 976 if ((cur_out += size) >= rp->b_size) 977 cur_out -= rp->b_size; 978 bytes += size; 979 } 980 981 mutex_unlock(&rp->fetch_lock); 982 return nevents; 983 } 984 985 /* 986 */ 987 static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 988 { 989 struct mon_reader_bin *rp = file->private_data; 990 // struct mon_bus* mbus = rp->r.m_bus; 991 int ret = 0; 992 struct mon_bin_hdr *ep; 993 unsigned long flags; 994 995 switch (cmd) { 996 997 case MON_IOCQ_URB_LEN: 998 /* 999 * N.B. This only returns the size of data, without the header. 1000 */ 1001 spin_lock_irqsave(&rp->b_lock, flags); 1002 if (!MON_RING_EMPTY(rp)) { 1003 ep = MON_OFF2HDR(rp, rp->b_out); 1004 ret = ep->len_cap; 1005 } 1006 spin_unlock_irqrestore(&rp->b_lock, flags); 1007 break; 1008 1009 case MON_IOCQ_RING_SIZE: 1010 mutex_lock(&rp->fetch_lock); 1011 ret = rp->b_size; 1012 mutex_unlock(&rp->fetch_lock); 1013 break; 1014 1015 case MON_IOCT_RING_SIZE: 1016 /* 1017 * Changing the buffer size will flush it's contents; the new 1018 * buffer is allocated before releasing the old one to be sure 1019 * the device will stay functional also in case of memory 1020 * pressure. 1021 */ 1022 { 1023 int size; 1024 struct mon_pgmap *vec; 1025 1026 if (arg < BUFF_MIN || arg > BUFF_MAX) 1027 return -EINVAL; 1028 1029 size = CHUNK_ALIGN(arg); 1030 vec = kcalloc(size / CHUNK_SIZE, sizeof(struct mon_pgmap), 1031 GFP_KERNEL); 1032 if (vec == NULL) { 1033 ret = -ENOMEM; 1034 break; 1035 } 1036 1037 ret = mon_alloc_buff(vec, size/CHUNK_SIZE); 1038 if (ret < 0) { 1039 kfree(vec); 1040 break; 1041 } 1042 1043 mutex_lock(&rp->fetch_lock); 1044 spin_lock_irqsave(&rp->b_lock, flags); 1045 if (rp->mmap_active) { 1046 mon_free_buff(vec, size/CHUNK_SIZE); 1047 kfree(vec); 1048 ret = -EBUSY; 1049 } else { 1050 mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); 1051 kfree(rp->b_vec); 1052 rp->b_vec = vec; 1053 rp->b_size = size; 1054 rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0; 1055 rp->cnt_lost = 0; 1056 } 1057 spin_unlock_irqrestore(&rp->b_lock, flags); 1058 mutex_unlock(&rp->fetch_lock); 1059 } 1060 break; 1061 1062 case MON_IOCH_MFLUSH: 1063 ret = mon_bin_flush(rp, arg); 1064 break; 1065 1066 case MON_IOCX_GET: 1067 case MON_IOCX_GETX: 1068 { 1069 struct mon_bin_get getb; 1070 1071 if (copy_from_user(&getb, (void __user *)arg, 1072 sizeof(struct mon_bin_get))) 1073 return -EFAULT; 1074 1075 if (getb.alloc > 0x10000000) /* Want to cast to u32 */ 1076 return -EINVAL; 1077 ret = mon_bin_get_event(file, rp, getb.hdr, 1078 (cmd == MON_IOCX_GET)? PKT_SZ_API0: PKT_SZ_API1, 1079 getb.data, (unsigned int)getb.alloc); 1080 } 1081 break; 1082 1083 case MON_IOCX_MFETCH: 1084 { 1085 struct mon_bin_mfetch mfetch; 1086 struct mon_bin_mfetch __user *uptr; 1087 1088 uptr = (struct mon_bin_mfetch __user *)arg; 1089 1090 if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) 1091 return -EFAULT; 1092 1093 if (mfetch.nflush) { 1094 ret = mon_bin_flush(rp, mfetch.nflush); 1095 if (ret < 0) 1096 return ret; 1097 if (put_user(ret, &uptr->nflush)) 1098 return -EFAULT; 1099 } 1100 ret = mon_bin_fetch(file, rp, mfetch.offvec, mfetch.nfetch); 1101 if (ret < 0) 1102 return ret; 1103 if (put_user(ret, &uptr->nfetch)) 1104 return -EFAULT; 1105 ret = 0; 1106 } 1107 break; 1108 1109 case MON_IOCG_STATS: { 1110 struct mon_bin_stats __user *sp; 1111 unsigned int nevents; 1112 unsigned int ndropped; 1113 1114 spin_lock_irqsave(&rp->b_lock, flags); 1115 ndropped = rp->cnt_lost; 1116 rp->cnt_lost = 0; 1117 spin_unlock_irqrestore(&rp->b_lock, flags); 1118 nevents = mon_bin_queued(rp); 1119 1120 sp = (struct mon_bin_stats __user *)arg; 1121 if (put_user(ndropped, &sp->dropped)) 1122 return -EFAULT; 1123 if (put_user(nevents, &sp->queued)) 1124 return -EFAULT; 1125 1126 } 1127 break; 1128 1129 default: 1130 return -ENOTTY; 1131 } 1132 1133 return ret; 1134 } 1135 1136 #ifdef CONFIG_COMPAT 1137 static long mon_bin_compat_ioctl(struct file *file, 1138 unsigned int cmd, unsigned long arg) 1139 { 1140 struct mon_reader_bin *rp = file->private_data; 1141 int ret; 1142 1143 switch (cmd) { 1144 1145 case MON_IOCX_GET32: 1146 case MON_IOCX_GETX32: 1147 { 1148 struct mon_bin_get32 getb; 1149 1150 if (copy_from_user(&getb, (void __user *)arg, 1151 sizeof(struct mon_bin_get32))) 1152 return -EFAULT; 1153 1154 ret = mon_bin_get_event(file, rp, compat_ptr(getb.hdr32), 1155 (cmd == MON_IOCX_GET32)? PKT_SZ_API0: PKT_SZ_API1, 1156 compat_ptr(getb.data32), getb.alloc32); 1157 if (ret < 0) 1158 return ret; 1159 } 1160 return 0; 1161 1162 case MON_IOCX_MFETCH32: 1163 { 1164 struct mon_bin_mfetch32 mfetch; 1165 struct mon_bin_mfetch32 __user *uptr; 1166 1167 uptr = (struct mon_bin_mfetch32 __user *) compat_ptr(arg); 1168 1169 if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) 1170 return -EFAULT; 1171 1172 if (mfetch.nflush32) { 1173 ret = mon_bin_flush(rp, mfetch.nflush32); 1174 if (ret < 0) 1175 return ret; 1176 if (put_user(ret, &uptr->nflush32)) 1177 return -EFAULT; 1178 } 1179 ret = mon_bin_fetch(file, rp, compat_ptr(mfetch.offvec32), 1180 mfetch.nfetch32); 1181 if (ret < 0) 1182 return ret; 1183 if (put_user(ret, &uptr->nfetch32)) 1184 return -EFAULT; 1185 } 1186 return 0; 1187 1188 case MON_IOCG_STATS: 1189 return mon_bin_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); 1190 1191 case MON_IOCQ_URB_LEN: 1192 case MON_IOCQ_RING_SIZE: 1193 case MON_IOCT_RING_SIZE: 1194 case MON_IOCH_MFLUSH: 1195 return mon_bin_ioctl(file, cmd, arg); 1196 1197 default: 1198 ; 1199 } 1200 return -ENOTTY; 1201 } 1202 #endif /* CONFIG_COMPAT */ 1203 1204 static __poll_t 1205 mon_bin_poll(struct file *file, struct poll_table_struct *wait) 1206 { 1207 struct mon_reader_bin *rp = file->private_data; 1208 __poll_t mask = 0; 1209 unsigned long flags; 1210 1211 if (file->f_mode & FMODE_READ) 1212 poll_wait(file, &rp->b_wait, wait); 1213 1214 spin_lock_irqsave(&rp->b_lock, flags); 1215 if (!MON_RING_EMPTY(rp)) 1216 mask |= EPOLLIN | EPOLLRDNORM; /* readable */ 1217 spin_unlock_irqrestore(&rp->b_lock, flags); 1218 return mask; 1219 } 1220 1221 /* 1222 * open and close: just keep track of how many times the device is 1223 * mapped, to use the proper memory allocation function. 1224 */ 1225 static void mon_bin_vma_open(struct vm_area_struct *vma) 1226 { 1227 struct mon_reader_bin *rp = vma->vm_private_data; 1228 unsigned long flags; 1229 1230 spin_lock_irqsave(&rp->b_lock, flags); 1231 rp->mmap_active++; 1232 spin_unlock_irqrestore(&rp->b_lock, flags); 1233 } 1234 1235 static void mon_bin_vma_close(struct vm_area_struct *vma) 1236 { 1237 unsigned long flags; 1238 1239 struct mon_reader_bin *rp = vma->vm_private_data; 1240 spin_lock_irqsave(&rp->b_lock, flags); 1241 rp->mmap_active--; 1242 spin_unlock_irqrestore(&rp->b_lock, flags); 1243 } 1244 1245 /* 1246 * Map ring pages to user space. 1247 */ 1248 static vm_fault_t mon_bin_vma_fault(struct vm_fault *vmf) 1249 { 1250 struct mon_reader_bin *rp = vmf->vma->vm_private_data; 1251 unsigned long offset, chunk_idx; 1252 struct page *pageptr; 1253 unsigned long flags; 1254 1255 spin_lock_irqsave(&rp->b_lock, flags); 1256 offset = vmf->pgoff << PAGE_SHIFT; 1257 if (offset >= rp->b_size) { 1258 spin_unlock_irqrestore(&rp->b_lock, flags); 1259 return VM_FAULT_SIGBUS; 1260 } 1261 chunk_idx = offset / CHUNK_SIZE; 1262 pageptr = rp->b_vec[chunk_idx].pg; 1263 get_page(pageptr); 1264 vmf->page = pageptr; 1265 spin_unlock_irqrestore(&rp->b_lock, flags); 1266 return 0; 1267 } 1268 1269 static const struct vm_operations_struct mon_bin_vm_ops = { 1270 .open = mon_bin_vma_open, 1271 .close = mon_bin_vma_close, 1272 .fault = mon_bin_vma_fault, 1273 }; 1274 1275 static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma) 1276 { 1277 /* don't do anything here: "fault" will set up page table entries */ 1278 vma->vm_ops = &mon_bin_vm_ops; 1279 1280 if (vma->vm_flags & VM_WRITE) 1281 return -EPERM; 1282 1283 vm_flags_mod(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_MAYWRITE); 1284 vma->vm_private_data = filp->private_data; 1285 mon_bin_vma_open(vma); 1286 return 0; 1287 } 1288 1289 static const struct file_operations mon_fops_binary = { 1290 .owner = THIS_MODULE, 1291 .open = mon_bin_open, 1292 .llseek = no_llseek, 1293 .read = mon_bin_read, 1294 /* .write = mon_text_write, */ 1295 .poll = mon_bin_poll, 1296 .unlocked_ioctl = mon_bin_ioctl, 1297 #ifdef CONFIG_COMPAT 1298 .compat_ioctl = mon_bin_compat_ioctl, 1299 #endif 1300 .release = mon_bin_release, 1301 .mmap = mon_bin_mmap, 1302 }; 1303 1304 static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp) 1305 { 1306 DECLARE_WAITQUEUE(waita, current); 1307 unsigned long flags; 1308 1309 add_wait_queue(&rp->b_wait, &waita); 1310 set_current_state(TASK_INTERRUPTIBLE); 1311 1312 spin_lock_irqsave(&rp->b_lock, flags); 1313 while (MON_RING_EMPTY(rp)) { 1314 spin_unlock_irqrestore(&rp->b_lock, flags); 1315 1316 if (file->f_flags & O_NONBLOCK) { 1317 set_current_state(TASK_RUNNING); 1318 remove_wait_queue(&rp->b_wait, &waita); 1319 return -EWOULDBLOCK; /* Same as EAGAIN in Linux */ 1320 } 1321 schedule(); 1322 if (signal_pending(current)) { 1323 remove_wait_queue(&rp->b_wait, &waita); 1324 return -EINTR; 1325 } 1326 set_current_state(TASK_INTERRUPTIBLE); 1327 1328 spin_lock_irqsave(&rp->b_lock, flags); 1329 } 1330 spin_unlock_irqrestore(&rp->b_lock, flags); 1331 1332 set_current_state(TASK_RUNNING); 1333 remove_wait_queue(&rp->b_wait, &waita); 1334 return 0; 1335 } 1336 1337 static int mon_alloc_buff(struct mon_pgmap *map, int npages) 1338 { 1339 int n; 1340 unsigned long vaddr; 1341 1342 for (n = 0; n < npages; n++) { 1343 vaddr = get_zeroed_page(GFP_KERNEL); 1344 if (vaddr == 0) { 1345 while (n-- != 0) 1346 free_page((unsigned long) map[n].ptr); 1347 return -ENOMEM; 1348 } 1349 map[n].ptr = (unsigned char *) vaddr; 1350 map[n].pg = virt_to_page((void *) vaddr); 1351 } 1352 return 0; 1353 } 1354 1355 static void mon_free_buff(struct mon_pgmap *map, int npages) 1356 { 1357 int n; 1358 1359 for (n = 0; n < npages; n++) 1360 free_page((unsigned long) map[n].ptr); 1361 } 1362 1363 int mon_bin_add(struct mon_bus *mbus, const struct usb_bus *ubus) 1364 { 1365 struct device *dev; 1366 unsigned minor = ubus? ubus->busnum: 0; 1367 1368 if (minor >= MON_BIN_MAX_MINOR) 1369 return 0; 1370 1371 dev = device_create(&mon_bin_class, ubus ? ubus->controller : NULL, 1372 MKDEV(MAJOR(mon_bin_dev0), minor), NULL, 1373 "usbmon%d", minor); 1374 if (IS_ERR(dev)) 1375 return 0; 1376 1377 mbus->classdev = dev; 1378 return 1; 1379 } 1380 1381 void mon_bin_del(struct mon_bus *mbus) 1382 { 1383 device_destroy(&mon_bin_class, mbus->classdev->devt); 1384 } 1385 1386 int __init mon_bin_init(void) 1387 { 1388 int rc; 1389 1390 rc = class_register(&mon_bin_class); 1391 if (rc) 1392 goto err_class; 1393 1394 rc = alloc_chrdev_region(&mon_bin_dev0, 0, MON_BIN_MAX_MINOR, "usbmon"); 1395 if (rc < 0) 1396 goto err_dev; 1397 1398 cdev_init(&mon_bin_cdev, &mon_fops_binary); 1399 mon_bin_cdev.owner = THIS_MODULE; 1400 1401 rc = cdev_add(&mon_bin_cdev, mon_bin_dev0, MON_BIN_MAX_MINOR); 1402 if (rc < 0) 1403 goto err_add; 1404 1405 return 0; 1406 1407 err_add: 1408 unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); 1409 err_dev: 1410 class_unregister(&mon_bin_class); 1411 err_class: 1412 return rc; 1413 } 1414 1415 void mon_bin_exit(void) 1416 { 1417 cdev_del(&mon_bin_cdev); 1418 unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); 1419 class_unregister(&mon_bin_class); 1420 } 1421