1 /* 2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 3 * Licensed under the GPL 4 */ 5 6 #include "linux/irqreturn.h" 7 #include "linux/kd.h" 8 #include "linux/sched.h" 9 #include "linux/slab.h" 10 #include "chan.h" 11 #include "irq_kern.h" 12 #include "irq_user.h" 13 #include "kern_util.h" 14 #include "os.h" 15 16 #define LINE_BUFSIZE 4096 17 18 static irqreturn_t line_interrupt(int irq, void *data) 19 { 20 struct chan *chan = data; 21 struct line *line = chan->line; 22 23 if (line) 24 chan_interrupt(&line->chan_list, &line->task, line->tty, irq); 25 return IRQ_HANDLED; 26 } 27 28 static void line_timer_cb(struct work_struct *work) 29 { 30 struct line *line = container_of(work, struct line, task.work); 31 32 if (!line->throttled) 33 chan_interrupt(&line->chan_list, &line->task, line->tty, 34 line->driver->read_irq); 35 } 36 37 /* 38 * Returns the free space inside the ring buffer of this line. 39 * 40 * Should be called while holding line->lock (this does not modify data). 41 */ 42 static int write_room(struct line *line) 43 { 44 int n; 45 46 if (line->buffer == NULL) 47 return LINE_BUFSIZE - 1; 48 49 /* This is for the case where the buffer is wrapped! */ 50 n = line->head - line->tail; 51 52 if (n <= 0) 53 n += LINE_BUFSIZE; /* The other case */ 54 return n - 1; 55 } 56 57 int line_write_room(struct tty_struct *tty) 58 { 59 struct line *line = tty->driver_data; 60 unsigned long flags; 61 int room; 62 63 spin_lock_irqsave(&line->lock, flags); 64 room = write_room(line); 65 spin_unlock_irqrestore(&line->lock, flags); 66 67 return room; 68 } 69 70 int line_chars_in_buffer(struct tty_struct *tty) 71 { 72 struct line *line = tty->driver_data; 73 unsigned long flags; 74 int ret; 75 76 spin_lock_irqsave(&line->lock, flags); 77 /* write_room subtracts 1 for the needed NULL, so we readd it.*/ 78 ret = LINE_BUFSIZE - (write_room(line) + 1); 79 spin_unlock_irqrestore(&line->lock, flags); 80 81 return ret; 82 } 83 84 /* 85 * This copies the content of buf into the circular buffer associated with 86 * this line. 87 * The return value is the number of characters actually copied, i.e. the ones 88 * for which there was space: this function is not supposed to ever flush out 89 * the circular buffer. 90 * 91 * Must be called while holding line->lock! 92 */ 93 static int buffer_data(struct line *line, const char *buf, int len) 94 { 95 int end, room; 96 97 if (line->buffer == NULL) { 98 line->buffer = kmalloc(LINE_BUFSIZE, GFP_ATOMIC); 99 if (line->buffer == NULL) { 100 printk(KERN_ERR "buffer_data - atomic allocation " 101 "failed\n"); 102 return 0; 103 } 104 line->head = line->buffer; 105 line->tail = line->buffer; 106 } 107 108 room = write_room(line); 109 len = (len > room) ? room : len; 110 111 end = line->buffer + LINE_BUFSIZE - line->tail; 112 113 if (len < end) { 114 memcpy(line->tail, buf, len); 115 line->tail += len; 116 } 117 else { 118 /* The circular buffer is wrapping */ 119 memcpy(line->tail, buf, end); 120 buf += end; 121 memcpy(line->buffer, buf, len - end); 122 line->tail = line->buffer + len - end; 123 } 124 125 return len; 126 } 127 128 /* 129 * Flushes the ring buffer to the output channels. That is, write_chan is 130 * called, passing it line->head as buffer, and an appropriate count. 131 * 132 * On exit, returns 1 when the buffer is empty, 133 * 0 when the buffer is not empty on exit, 134 * and -errno when an error occurred. 135 * 136 * Must be called while holding line->lock!*/ 137 static int flush_buffer(struct line *line) 138 { 139 int n, count; 140 141 if ((line->buffer == NULL) || (line->head == line->tail)) 142 return 1; 143 144 if (line->tail < line->head) { 145 /* line->buffer + LINE_BUFSIZE is the end of the buffer! */ 146 count = line->buffer + LINE_BUFSIZE - line->head; 147 148 n = write_chan(&line->chan_list, line->head, count, 149 line->driver->write_irq); 150 if (n < 0) 151 return n; 152 if (n == count) { 153 /* 154 * We have flushed from ->head to buffer end, now we 155 * must flush only from the beginning to ->tail. 156 */ 157 line->head = line->buffer; 158 } else { 159 line->head += n; 160 return 0; 161 } 162 } 163 164 count = line->tail - line->head; 165 n = write_chan(&line->chan_list, line->head, count, 166 line->driver->write_irq); 167 168 if (n < 0) 169 return n; 170 171 line->head += n; 172 return line->head == line->tail; 173 } 174 175 void line_flush_buffer(struct tty_struct *tty) 176 { 177 struct line *line = tty->driver_data; 178 unsigned long flags; 179 180 spin_lock_irqsave(&line->lock, flags); 181 flush_buffer(line); 182 spin_unlock_irqrestore(&line->lock, flags); 183 } 184 185 /* 186 * We map both ->flush_chars and ->put_char (which go in pair) onto 187 * ->flush_buffer and ->write. Hope it's not that bad. 188 */ 189 void line_flush_chars(struct tty_struct *tty) 190 { 191 line_flush_buffer(tty); 192 } 193 194 int line_put_char(struct tty_struct *tty, unsigned char ch) 195 { 196 return line_write(tty, &ch, sizeof(ch)); 197 } 198 199 int line_write(struct tty_struct *tty, const unsigned char *buf, int len) 200 { 201 struct line *line = tty->driver_data; 202 unsigned long flags; 203 int n, ret = 0; 204 205 spin_lock_irqsave(&line->lock, flags); 206 if (line->head != line->tail) 207 ret = buffer_data(line, buf, len); 208 else { 209 n = write_chan(&line->chan_list, buf, len, 210 line->driver->write_irq); 211 if (n < 0) { 212 ret = n; 213 goto out_up; 214 } 215 216 len -= n; 217 ret += n; 218 if (len > 0) 219 ret += buffer_data(line, buf + n, len); 220 } 221 out_up: 222 spin_unlock_irqrestore(&line->lock, flags); 223 return ret; 224 } 225 226 void line_set_termios(struct tty_struct *tty, struct ktermios * old) 227 { 228 /* nothing */ 229 } 230 231 static const struct { 232 int cmd; 233 char *level; 234 char *name; 235 } tty_ioctls[] = { 236 /* don't print these, they flood the log ... */ 237 { TCGETS, NULL, "TCGETS" }, 238 { TCSETS, NULL, "TCSETS" }, 239 { TCSETSW, NULL, "TCSETSW" }, 240 { TCFLSH, NULL, "TCFLSH" }, 241 { TCSBRK, NULL, "TCSBRK" }, 242 243 /* general tty stuff */ 244 { TCSETSF, KERN_DEBUG, "TCSETSF" }, 245 { TCGETA, KERN_DEBUG, "TCGETA" }, 246 { TIOCMGET, KERN_DEBUG, "TIOCMGET" }, 247 { TCSBRKP, KERN_DEBUG, "TCSBRKP" }, 248 { TIOCMSET, KERN_DEBUG, "TIOCMSET" }, 249 250 /* linux-specific ones */ 251 { TIOCLINUX, KERN_INFO, "TIOCLINUX" }, 252 { KDGKBMODE, KERN_INFO, "KDGKBMODE" }, 253 { KDGKBTYPE, KERN_INFO, "KDGKBTYPE" }, 254 { KDSIGACCEPT, KERN_INFO, "KDSIGACCEPT" }, 255 }; 256 257 int line_ioctl(struct tty_struct *tty, unsigned int cmd, 258 unsigned long arg) 259 { 260 int ret; 261 int i; 262 263 ret = 0; 264 switch(cmd) { 265 #ifdef TIOCGETP 266 case TIOCGETP: 267 case TIOCSETP: 268 case TIOCSETN: 269 #endif 270 #ifdef TIOCGETC 271 case TIOCGETC: 272 case TIOCSETC: 273 #endif 274 #ifdef TIOCGLTC 275 case TIOCGLTC: 276 case TIOCSLTC: 277 #endif 278 /* Note: these are out of date as we now have TCGETS2 etc but this 279 whole lot should probably go away */ 280 case TCGETS: 281 case TCSETSF: 282 case TCSETSW: 283 case TCSETS: 284 case TCGETA: 285 case TCSETAF: 286 case TCSETAW: 287 case TCSETA: 288 case TCXONC: 289 case TCFLSH: 290 case TIOCOUTQ: 291 case TIOCINQ: 292 case TIOCGLCKTRMIOS: 293 case TIOCSLCKTRMIOS: 294 case TIOCPKT: 295 case TIOCGSOFTCAR: 296 case TIOCSSOFTCAR: 297 return -ENOIOCTLCMD; 298 #if 0 299 case TCwhatever: 300 /* do something */ 301 break; 302 #endif 303 default: 304 for (i = 0; i < ARRAY_SIZE(tty_ioctls); i++) 305 if (cmd == tty_ioctls[i].cmd) 306 break; 307 if (i == ARRAY_SIZE(tty_ioctls)) { 308 printk(KERN_ERR "%s: %s: unknown ioctl: 0x%x\n", 309 __func__, tty->name, cmd); 310 } 311 ret = -ENOIOCTLCMD; 312 break; 313 } 314 return ret; 315 } 316 317 void line_throttle(struct tty_struct *tty) 318 { 319 struct line *line = tty->driver_data; 320 321 deactivate_chan(&line->chan_list, line->driver->read_irq); 322 line->throttled = 1; 323 } 324 325 void line_unthrottle(struct tty_struct *tty) 326 { 327 struct line *line = tty->driver_data; 328 329 line->throttled = 0; 330 chan_interrupt(&line->chan_list, &line->task, tty, 331 line->driver->read_irq); 332 333 /* 334 * Maybe there is enough stuff pending that calling the interrupt 335 * throttles us again. In this case, line->throttled will be 1 336 * again and we shouldn't turn the interrupt back on. 337 */ 338 if (!line->throttled) 339 reactivate_chan(&line->chan_list, line->driver->read_irq); 340 } 341 342 static irqreturn_t line_write_interrupt(int irq, void *data) 343 { 344 struct chan *chan = data; 345 struct line *line = chan->line; 346 struct tty_struct *tty = line->tty; 347 int err; 348 349 /* 350 * Interrupts are disabled here because we registered the interrupt with 351 * IRQF_DISABLED (see line_setup_irq). 352 */ 353 354 spin_lock(&line->lock); 355 err = flush_buffer(line); 356 if (err == 0) { 357 return IRQ_NONE; 358 } else if (err < 0) { 359 line->head = line->buffer; 360 line->tail = line->buffer; 361 } 362 spin_unlock(&line->lock); 363 364 if (tty == NULL) 365 return IRQ_NONE; 366 367 tty_wakeup(tty); 368 return IRQ_HANDLED; 369 } 370 371 int line_setup_irq(int fd, int input, int output, struct line *line, void *data) 372 { 373 const struct line_driver *driver = line->driver; 374 int err = 0, flags = IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM; 375 376 if (input) 377 err = um_request_irq(driver->read_irq, fd, IRQ_READ, 378 line_interrupt, flags, 379 driver->read_irq_name, data); 380 if (err) 381 return err; 382 if (output) 383 err = um_request_irq(driver->write_irq, fd, IRQ_WRITE, 384 line_write_interrupt, flags, 385 driver->write_irq_name, data); 386 line->have_irq = 1; 387 return err; 388 } 389 390 /* 391 * Normally, a driver like this can rely mostly on the tty layer 392 * locking, particularly when it comes to the driver structure. 393 * However, in this case, mconsole requests can come in "from the 394 * side", and race with opens and closes. 395 * 396 * mconsole config requests will want to be sure the device isn't in 397 * use, and get_config, open, and close will want a stable 398 * configuration. The checking and modification of the configuration 399 * is done under a spinlock. Checking whether the device is in use is 400 * line->tty->count > 1, also under the spinlock. 401 * 402 * line->count serves to decide whether the device should be enabled or 403 * disabled on the host. If it's equal to 0, then we are doing the 404 * first open or last close. Otherwise, open and close just return. 405 */ 406 407 int line_open(struct line *lines, struct tty_struct *tty) 408 { 409 struct line *line = &lines[tty->index]; 410 int err = -ENODEV; 411 412 spin_lock(&line->count_lock); 413 if (!line->valid) 414 goto out_unlock; 415 416 err = 0; 417 if (line->count++) 418 goto out_unlock; 419 420 BUG_ON(tty->driver_data); 421 tty->driver_data = line; 422 line->tty = tty; 423 424 spin_unlock(&line->count_lock); 425 err = enable_chan(line); 426 if (err) /* line_close() will be called by our caller */ 427 return err; 428 429 INIT_DELAYED_WORK(&line->task, line_timer_cb); 430 431 if (!line->sigio) { 432 chan_enable_winch(&line->chan_list, tty); 433 line->sigio = 1; 434 } 435 436 chan_window_size(&line->chan_list, &tty->winsize.ws_row, 437 &tty->winsize.ws_col); 438 439 return 0; 440 441 out_unlock: 442 spin_unlock(&line->count_lock); 443 return err; 444 } 445 446 static void unregister_winch(struct tty_struct *tty); 447 448 void line_close(struct tty_struct *tty, struct file * filp) 449 { 450 struct line *line = tty->driver_data; 451 452 /* 453 * If line_open fails (and tty->driver_data is never set), 454 * tty_open will call line_close. So just return in this case. 455 */ 456 if (line == NULL) 457 return; 458 459 /* We ignore the error anyway! */ 460 flush_buffer(line); 461 462 spin_lock(&line->count_lock); 463 BUG_ON(!line->valid); 464 465 if (--line->count) 466 goto out_unlock; 467 468 line->tty = NULL; 469 tty->driver_data = NULL; 470 471 spin_unlock(&line->count_lock); 472 473 if (line->sigio) { 474 unregister_winch(tty); 475 line->sigio = 0; 476 } 477 478 return; 479 480 out_unlock: 481 spin_unlock(&line->count_lock); 482 } 483 484 void close_lines(struct line *lines, int nlines) 485 { 486 int i; 487 488 for(i = 0; i < nlines; i++) 489 close_chan(&lines[i].chan_list, 0); 490 } 491 492 static int setup_one_line(struct line *lines, int n, char *init, int init_prio, 493 char **error_out) 494 { 495 struct line *line = &lines[n]; 496 int err = -EINVAL; 497 498 spin_lock(&line->count_lock); 499 500 if (line->count) { 501 *error_out = "Device is already open"; 502 goto out; 503 } 504 505 if (line->init_pri <= init_prio) { 506 line->init_pri = init_prio; 507 if (!strcmp(init, "none")) 508 line->valid = 0; 509 else { 510 line->init_str = init; 511 line->valid = 1; 512 } 513 } 514 err = 0; 515 out: 516 spin_unlock(&line->count_lock); 517 return err; 518 } 519 520 /* 521 * Common setup code for both startup command line and mconsole initialization. 522 * @lines contains the array (of size @num) to modify; 523 * @init is the setup string; 524 * @error_out is an error string in the case of failure; 525 */ 526 527 int line_setup(struct line *lines, unsigned int num, char *init, 528 char **error_out) 529 { 530 int i, n, err; 531 char *end; 532 533 if (*init == '=') { 534 /* 535 * We said con=/ssl= instead of con#=, so we are configuring all 536 * consoles at once. 537 */ 538 n = -1; 539 } 540 else { 541 n = simple_strtoul(init, &end, 0); 542 if (*end != '=') { 543 *error_out = "Couldn't parse device number"; 544 return -EINVAL; 545 } 546 init = end; 547 } 548 init++; 549 550 if (n >= (signed int) num) { 551 *error_out = "Device number out of range"; 552 return -EINVAL; 553 } 554 else if (n >= 0) { 555 err = setup_one_line(lines, n, init, INIT_ONE, error_out); 556 if (err) 557 return err; 558 } 559 else { 560 for(i = 0; i < num; i++) { 561 err = setup_one_line(lines, i, init, INIT_ALL, 562 error_out); 563 if (err) 564 return err; 565 } 566 } 567 return n == -1 ? num : n; 568 } 569 570 int line_config(struct line *lines, unsigned int num, char *str, 571 const struct chan_opts *opts, char **error_out) 572 { 573 struct line *line; 574 char *new; 575 int n; 576 577 if (*str == '=') { 578 *error_out = "Can't configure all devices from mconsole"; 579 return -EINVAL; 580 } 581 582 new = kstrdup(str, GFP_KERNEL); 583 if (new == NULL) { 584 *error_out = "Failed to allocate memory"; 585 return -ENOMEM; 586 } 587 n = line_setup(lines, num, new, error_out); 588 if (n < 0) 589 return n; 590 591 line = &lines[n]; 592 return parse_chan_pair(line->init_str, line, n, opts, error_out); 593 } 594 595 int line_get_config(char *name, struct line *lines, unsigned int num, char *str, 596 int size, char **error_out) 597 { 598 struct line *line; 599 char *end; 600 int dev, n = 0; 601 602 dev = simple_strtoul(name, &end, 0); 603 if ((*end != '\0') || (end == name)) { 604 *error_out = "line_get_config failed to parse device number"; 605 return 0; 606 } 607 608 if ((dev < 0) || (dev >= num)) { 609 *error_out = "device number out of range"; 610 return 0; 611 } 612 613 line = &lines[dev]; 614 615 spin_lock(&line->count_lock); 616 if (!line->valid) 617 CONFIG_CHUNK(str, size, n, "none", 1); 618 else if (line->tty == NULL) 619 CONFIG_CHUNK(str, size, n, line->init_str, 1); 620 else n = chan_config_string(&line->chan_list, str, size, error_out); 621 spin_unlock(&line->count_lock); 622 623 return n; 624 } 625 626 int line_id(char **str, int *start_out, int *end_out) 627 { 628 char *end; 629 int n; 630 631 n = simple_strtoul(*str, &end, 0); 632 if ((*end != '\0') || (end == *str)) 633 return -1; 634 635 *str = end; 636 *start_out = n; 637 *end_out = n; 638 return n; 639 } 640 641 int line_remove(struct line *lines, unsigned int num, int n, char **error_out) 642 { 643 int err; 644 char config[sizeof("conxxxx=none\0")]; 645 646 sprintf(config, "%d=none", n); 647 err = line_setup(lines, num, config, error_out); 648 if (err >= 0) 649 err = 0; 650 return err; 651 } 652 653 struct tty_driver *register_lines(struct line_driver *line_driver, 654 const struct tty_operations *ops, 655 struct line *lines, int nlines) 656 { 657 int i; 658 struct tty_driver *driver = alloc_tty_driver(nlines); 659 660 if (!driver) 661 return NULL; 662 663 driver->driver_name = line_driver->name; 664 driver->name = line_driver->device_name; 665 driver->major = line_driver->major; 666 driver->minor_start = line_driver->minor_start; 667 driver->type = line_driver->type; 668 driver->subtype = line_driver->subtype; 669 driver->flags = TTY_DRIVER_REAL_RAW; 670 driver->init_termios = tty_std_termios; 671 tty_set_operations(driver, ops); 672 673 if (tty_register_driver(driver)) { 674 printk(KERN_ERR "register_lines : can't register %s driver\n", 675 line_driver->name); 676 put_tty_driver(driver); 677 return NULL; 678 } 679 680 for(i = 0; i < nlines; i++) { 681 if (!lines[i].valid) 682 tty_unregister_device(driver, i); 683 } 684 685 mconsole_register_dev(&line_driver->mc); 686 return driver; 687 } 688 689 static DEFINE_SPINLOCK(winch_handler_lock); 690 static LIST_HEAD(winch_handlers); 691 692 void lines_init(struct line *lines, int nlines, struct chan_opts *opts) 693 { 694 struct line *line; 695 char *error; 696 int i; 697 698 for(i = 0; i < nlines; i++) { 699 line = &lines[i]; 700 INIT_LIST_HEAD(&line->chan_list); 701 702 if (line->init_str == NULL) 703 continue; 704 705 line->init_str = kstrdup(line->init_str, GFP_KERNEL); 706 if (line->init_str == NULL) 707 printk(KERN_ERR "lines_init - kstrdup returned NULL\n"); 708 709 if (parse_chan_pair(line->init_str, line, i, opts, &error)) { 710 printk(KERN_ERR "parse_chan_pair failed for " 711 "device %d : %s\n", i, error); 712 line->valid = 0; 713 } 714 } 715 } 716 717 struct winch { 718 struct list_head list; 719 int fd; 720 int tty_fd; 721 int pid; 722 struct tty_struct *tty; 723 unsigned long stack; 724 struct work_struct work; 725 }; 726 727 static void __free_winch(struct work_struct *work) 728 { 729 struct winch *winch = container_of(work, struct winch, work); 730 free_irq(WINCH_IRQ, winch); 731 732 if (winch->pid != -1) 733 os_kill_process(winch->pid, 1); 734 if (winch->stack != 0) 735 free_stack(winch->stack, 0); 736 kfree(winch); 737 } 738 739 static void free_winch(struct winch *winch) 740 { 741 int fd = winch->fd; 742 winch->fd = -1; 743 if (fd != -1) 744 os_close_file(fd); 745 list_del(&winch->list); 746 __free_winch(&winch->work); 747 } 748 749 static irqreturn_t winch_interrupt(int irq, void *data) 750 { 751 struct winch *winch = data; 752 struct tty_struct *tty; 753 struct line *line; 754 int fd = winch->fd; 755 int err; 756 char c; 757 758 if (fd != -1) { 759 err = generic_read(fd, &c, NULL); 760 if (err < 0) { 761 if (err != -EAGAIN) { 762 winch->fd = -1; 763 list_del(&winch->list); 764 os_close_file(fd); 765 printk(KERN_ERR "winch_interrupt : " 766 "read failed, errno = %d\n", -err); 767 printk(KERN_ERR "fd %d is losing SIGWINCH " 768 "support\n", winch->tty_fd); 769 INIT_WORK(&winch->work, __free_winch); 770 schedule_work(&winch->work); 771 return IRQ_HANDLED; 772 } 773 goto out; 774 } 775 } 776 tty = winch->tty; 777 if (tty != NULL) { 778 line = tty->driver_data; 779 if (line != NULL) { 780 chan_window_size(&line->chan_list, &tty->winsize.ws_row, 781 &tty->winsize.ws_col); 782 kill_pgrp(tty->pgrp, SIGWINCH, 1); 783 } 784 } 785 out: 786 if (winch->fd != -1) 787 reactivate_fd(winch->fd, WINCH_IRQ); 788 return IRQ_HANDLED; 789 } 790 791 void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty, 792 unsigned long stack) 793 { 794 struct winch *winch; 795 796 winch = kmalloc(sizeof(*winch), GFP_KERNEL); 797 if (winch == NULL) { 798 printk(KERN_ERR "register_winch_irq - kmalloc failed\n"); 799 goto cleanup; 800 } 801 802 *winch = ((struct winch) { .list = LIST_HEAD_INIT(winch->list), 803 .fd = fd, 804 .tty_fd = tty_fd, 805 .pid = pid, 806 .tty = tty, 807 .stack = stack }); 808 809 if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt, 810 IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM, 811 "winch", winch) < 0) { 812 printk(KERN_ERR "register_winch_irq - failed to register " 813 "IRQ\n"); 814 goto out_free; 815 } 816 817 spin_lock(&winch_handler_lock); 818 list_add(&winch->list, &winch_handlers); 819 spin_unlock(&winch_handler_lock); 820 821 return; 822 823 out_free: 824 kfree(winch); 825 cleanup: 826 os_kill_process(pid, 1); 827 os_close_file(fd); 828 if (stack != 0) 829 free_stack(stack, 0); 830 } 831 832 static void unregister_winch(struct tty_struct *tty) 833 { 834 struct list_head *ele, *next; 835 struct winch *winch; 836 837 spin_lock(&winch_handler_lock); 838 839 list_for_each_safe(ele, next, &winch_handlers) { 840 winch = list_entry(ele, struct winch, list); 841 if (winch->tty == tty) { 842 free_winch(winch); 843 break; 844 } 845 } 846 spin_unlock(&winch_handler_lock); 847 } 848 849 static void winch_cleanup(void) 850 { 851 struct list_head *ele, *next; 852 struct winch *winch; 853 854 spin_lock(&winch_handler_lock); 855 856 list_for_each_safe(ele, next, &winch_handlers) { 857 winch = list_entry(ele, struct winch, list); 858 free_winch(winch); 859 } 860 861 spin_unlock(&winch_handler_lock); 862 } 863 __uml_exitcall(winch_cleanup); 864 865 char *add_xterm_umid(char *base) 866 { 867 char *umid, *title; 868 int len; 869 870 umid = get_umid(); 871 if (*umid == '\0') 872 return base; 873 874 len = strlen(base) + strlen(" ()") + strlen(umid) + 1; 875 title = kmalloc(len, GFP_KERNEL); 876 if (title == NULL) { 877 printk(KERN_ERR "Failed to allocate buffer for xterm title\n"); 878 return base; 879 } 880 881 snprintf(title, len, "%s (%s)", base, umid); 882 return title; 883 } 884