1 /* 2 * Copyright (C) 2000 Jens Axboe <axboe@suse.de> 3 * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com> 4 * Copyright (C) 2006 Thomas Maier <balagi@justmail.de> 5 * 6 * May be copied or modified under the terms of the GNU General Public 7 * License. See linux/COPYING for more information. 8 * 9 * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and 10 * DVD-RAM devices. 11 * 12 * Theory of operation: 13 * 14 * At the lowest level, there is the standard driver for the CD/DVD device, 15 * typically ide-cd.c or sr.c. This driver can handle read and write requests, 16 * but it doesn't know anything about the special restrictions that apply to 17 * packet writing. One restriction is that write requests must be aligned to 18 * packet boundaries on the physical media, and the size of a write request 19 * must be equal to the packet size. Another restriction is that a 20 * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read 21 * command, if the previous command was a write. 22 * 23 * The purpose of the packet writing driver is to hide these restrictions from 24 * higher layers, such as file systems, and present a block device that can be 25 * randomly read and written using 2kB-sized blocks. 26 * 27 * The lowest layer in the packet writing driver is the packet I/O scheduler. 28 * Its data is defined by the struct packet_iosched and includes two bio 29 * queues with pending read and write requests. These queues are processed 30 * by the pkt_iosched_process_queue() function. The write requests in this 31 * queue are already properly aligned and sized. This layer is responsible for 32 * issuing the flush cache commands and scheduling the I/O in a good order. 33 * 34 * The next layer transforms unaligned write requests to aligned writes. This 35 * transformation requires reading missing pieces of data from the underlying 36 * block device, assembling the pieces to full packets and queuing them to the 37 * packet I/O scheduler. 38 * 39 * At the top layer there is a custom make_request_fn function that forwards 40 * read requests directly to the iosched queue and puts write requests in the 41 * unaligned write queue. A kernel thread performs the necessary read 42 * gathering to convert the unaligned writes to aligned writes and then feeds 43 * them to the packet I/O scheduler. 44 * 45 *************************************************************************/ 46 47 #include <linux/pktcdvd.h> 48 #include <linux/module.h> 49 #include <linux/types.h> 50 #include <linux/kernel.h> 51 #include <linux/compat.h> 52 #include <linux/kthread.h> 53 #include <linux/errno.h> 54 #include <linux/spinlock.h> 55 #include <linux/file.h> 56 #include <linux/proc_fs.h> 57 #include <linux/seq_file.h> 58 #include <linux/miscdevice.h> 59 #include <linux/freezer.h> 60 #include <linux/mutex.h> 61 #include <linux/slab.h> 62 #include <scsi/scsi_cmnd.h> 63 #include <scsi/scsi_ioctl.h> 64 #include <scsi/scsi.h> 65 #include <linux/debugfs.h> 66 #include <linux/device.h> 67 68 #include <asm/uaccess.h> 69 70 #define DRIVER_NAME "pktcdvd" 71 72 #if PACKET_DEBUG 73 #define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args) 74 #else 75 #define DPRINTK(fmt, args...) 76 #endif 77 78 #if PACKET_DEBUG > 1 79 #define VPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args) 80 #else 81 #define VPRINTK(fmt, args...) 82 #endif 83 84 #define MAX_SPEED 0xffff 85 86 #define ZONE(sector, pd) (((sector) + (pd)->offset) & \ 87 ~(sector_t)((pd)->settings.size - 1)) 88 89 static DEFINE_MUTEX(pktcdvd_mutex); 90 static struct pktcdvd_device *pkt_devs[MAX_WRITERS]; 91 static struct proc_dir_entry *pkt_proc; 92 static int pktdev_major; 93 static int write_congestion_on = PKT_WRITE_CONGESTION_ON; 94 static int write_congestion_off = PKT_WRITE_CONGESTION_OFF; 95 static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */ 96 static mempool_t *psd_pool; 97 98 static struct class *class_pktcdvd = NULL; /* /sys/class/pktcdvd */ 99 static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */ 100 101 /* forward declaration */ 102 static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev); 103 static int pkt_remove_dev(dev_t pkt_dev); 104 static int pkt_seq_show(struct seq_file *m, void *p); 105 106 107 108 /* 109 * create and register a pktcdvd kernel object. 110 */ 111 static struct pktcdvd_kobj* pkt_kobj_create(struct pktcdvd_device *pd, 112 const char* name, 113 struct kobject* parent, 114 struct kobj_type* ktype) 115 { 116 struct pktcdvd_kobj *p; 117 int error; 118 119 p = kzalloc(sizeof(*p), GFP_KERNEL); 120 if (!p) 121 return NULL; 122 p->pd = pd; 123 error = kobject_init_and_add(&p->kobj, ktype, parent, "%s", name); 124 if (error) { 125 kobject_put(&p->kobj); 126 return NULL; 127 } 128 kobject_uevent(&p->kobj, KOBJ_ADD); 129 return p; 130 } 131 /* 132 * remove a pktcdvd kernel object. 133 */ 134 static void pkt_kobj_remove(struct pktcdvd_kobj *p) 135 { 136 if (p) 137 kobject_put(&p->kobj); 138 } 139 /* 140 * default release function for pktcdvd kernel objects. 141 */ 142 static void pkt_kobj_release(struct kobject *kobj) 143 { 144 kfree(to_pktcdvdkobj(kobj)); 145 } 146 147 148 /********************************************************** 149 * 150 * sysfs interface for pktcdvd 151 * by (C) 2006 Thomas Maier <balagi@justmail.de> 152 * 153 **********************************************************/ 154 155 #define DEF_ATTR(_obj,_name,_mode) \ 156 static struct attribute _obj = { .name = _name, .mode = _mode } 157 158 /********************************************************** 159 /sys/class/pktcdvd/pktcdvd[0-7]/ 160 stat/reset 161 stat/packets_started 162 stat/packets_finished 163 stat/kb_written 164 stat/kb_read 165 stat/kb_read_gather 166 write_queue/size 167 write_queue/congestion_off 168 write_queue/congestion_on 169 **********************************************************/ 170 171 DEF_ATTR(kobj_pkt_attr_st1, "reset", 0200); 172 DEF_ATTR(kobj_pkt_attr_st2, "packets_started", 0444); 173 DEF_ATTR(kobj_pkt_attr_st3, "packets_finished", 0444); 174 DEF_ATTR(kobj_pkt_attr_st4, "kb_written", 0444); 175 DEF_ATTR(kobj_pkt_attr_st5, "kb_read", 0444); 176 DEF_ATTR(kobj_pkt_attr_st6, "kb_read_gather", 0444); 177 178 static struct attribute *kobj_pkt_attrs_stat[] = { 179 &kobj_pkt_attr_st1, 180 &kobj_pkt_attr_st2, 181 &kobj_pkt_attr_st3, 182 &kobj_pkt_attr_st4, 183 &kobj_pkt_attr_st5, 184 &kobj_pkt_attr_st6, 185 NULL 186 }; 187 188 DEF_ATTR(kobj_pkt_attr_wq1, "size", 0444); 189 DEF_ATTR(kobj_pkt_attr_wq2, "congestion_off", 0644); 190 DEF_ATTR(kobj_pkt_attr_wq3, "congestion_on", 0644); 191 192 static struct attribute *kobj_pkt_attrs_wqueue[] = { 193 &kobj_pkt_attr_wq1, 194 &kobj_pkt_attr_wq2, 195 &kobj_pkt_attr_wq3, 196 NULL 197 }; 198 199 static ssize_t kobj_pkt_show(struct kobject *kobj, 200 struct attribute *attr, char *data) 201 { 202 struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd; 203 int n = 0; 204 int v; 205 if (strcmp(attr->name, "packets_started") == 0) { 206 n = sprintf(data, "%lu\n", pd->stats.pkt_started); 207 208 } else if (strcmp(attr->name, "packets_finished") == 0) { 209 n = sprintf(data, "%lu\n", pd->stats.pkt_ended); 210 211 } else if (strcmp(attr->name, "kb_written") == 0) { 212 n = sprintf(data, "%lu\n", pd->stats.secs_w >> 1); 213 214 } else if (strcmp(attr->name, "kb_read") == 0) { 215 n = sprintf(data, "%lu\n", pd->stats.secs_r >> 1); 216 217 } else if (strcmp(attr->name, "kb_read_gather") == 0) { 218 n = sprintf(data, "%lu\n", pd->stats.secs_rg >> 1); 219 220 } else if (strcmp(attr->name, "size") == 0) { 221 spin_lock(&pd->lock); 222 v = pd->bio_queue_size; 223 spin_unlock(&pd->lock); 224 n = sprintf(data, "%d\n", v); 225 226 } else if (strcmp(attr->name, "congestion_off") == 0) { 227 spin_lock(&pd->lock); 228 v = pd->write_congestion_off; 229 spin_unlock(&pd->lock); 230 n = sprintf(data, "%d\n", v); 231 232 } else if (strcmp(attr->name, "congestion_on") == 0) { 233 spin_lock(&pd->lock); 234 v = pd->write_congestion_on; 235 spin_unlock(&pd->lock); 236 n = sprintf(data, "%d\n", v); 237 } 238 return n; 239 } 240 241 static void init_write_congestion_marks(int* lo, int* hi) 242 { 243 if (*hi > 0) { 244 *hi = max(*hi, 500); 245 *hi = min(*hi, 1000000); 246 if (*lo <= 0) 247 *lo = *hi - 100; 248 else { 249 *lo = min(*lo, *hi - 100); 250 *lo = max(*lo, 100); 251 } 252 } else { 253 *hi = -1; 254 *lo = -1; 255 } 256 } 257 258 static ssize_t kobj_pkt_store(struct kobject *kobj, 259 struct attribute *attr, 260 const char *data, size_t len) 261 { 262 struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd; 263 int val; 264 265 if (strcmp(attr->name, "reset") == 0 && len > 0) { 266 pd->stats.pkt_started = 0; 267 pd->stats.pkt_ended = 0; 268 pd->stats.secs_w = 0; 269 pd->stats.secs_rg = 0; 270 pd->stats.secs_r = 0; 271 272 } else if (strcmp(attr->name, "congestion_off") == 0 273 && sscanf(data, "%d", &val) == 1) { 274 spin_lock(&pd->lock); 275 pd->write_congestion_off = val; 276 init_write_congestion_marks(&pd->write_congestion_off, 277 &pd->write_congestion_on); 278 spin_unlock(&pd->lock); 279 280 } else if (strcmp(attr->name, "congestion_on") == 0 281 && sscanf(data, "%d", &val) == 1) { 282 spin_lock(&pd->lock); 283 pd->write_congestion_on = val; 284 init_write_congestion_marks(&pd->write_congestion_off, 285 &pd->write_congestion_on); 286 spin_unlock(&pd->lock); 287 } 288 return len; 289 } 290 291 static const struct sysfs_ops kobj_pkt_ops = { 292 .show = kobj_pkt_show, 293 .store = kobj_pkt_store 294 }; 295 static struct kobj_type kobj_pkt_type_stat = { 296 .release = pkt_kobj_release, 297 .sysfs_ops = &kobj_pkt_ops, 298 .default_attrs = kobj_pkt_attrs_stat 299 }; 300 static struct kobj_type kobj_pkt_type_wqueue = { 301 .release = pkt_kobj_release, 302 .sysfs_ops = &kobj_pkt_ops, 303 .default_attrs = kobj_pkt_attrs_wqueue 304 }; 305 306 static void pkt_sysfs_dev_new(struct pktcdvd_device *pd) 307 { 308 if (class_pktcdvd) { 309 pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL, 310 "%s", pd->name); 311 if (IS_ERR(pd->dev)) 312 pd->dev = NULL; 313 } 314 if (pd->dev) { 315 pd->kobj_stat = pkt_kobj_create(pd, "stat", 316 &pd->dev->kobj, 317 &kobj_pkt_type_stat); 318 pd->kobj_wqueue = pkt_kobj_create(pd, "write_queue", 319 &pd->dev->kobj, 320 &kobj_pkt_type_wqueue); 321 } 322 } 323 324 static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd) 325 { 326 pkt_kobj_remove(pd->kobj_stat); 327 pkt_kobj_remove(pd->kobj_wqueue); 328 if (class_pktcdvd) 329 device_unregister(pd->dev); 330 } 331 332 333 /******************************************************************** 334 /sys/class/pktcdvd/ 335 add map block device 336 remove unmap packet dev 337 device_map show mappings 338 *******************************************************************/ 339 340 static void class_pktcdvd_release(struct class *cls) 341 { 342 kfree(cls); 343 } 344 static ssize_t class_pktcdvd_show_map(struct class *c, 345 struct class_attribute *attr, 346 char *data) 347 { 348 int n = 0; 349 int idx; 350 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 351 for (idx = 0; idx < MAX_WRITERS; idx++) { 352 struct pktcdvd_device *pd = pkt_devs[idx]; 353 if (!pd) 354 continue; 355 n += sprintf(data+n, "%s %u:%u %u:%u\n", 356 pd->name, 357 MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev), 358 MAJOR(pd->bdev->bd_dev), 359 MINOR(pd->bdev->bd_dev)); 360 } 361 mutex_unlock(&ctl_mutex); 362 return n; 363 } 364 365 static ssize_t class_pktcdvd_store_add(struct class *c, 366 struct class_attribute *attr, 367 const char *buf, 368 size_t count) 369 { 370 unsigned int major, minor; 371 372 if (sscanf(buf, "%u:%u", &major, &minor) == 2) { 373 /* pkt_setup_dev() expects caller to hold reference to self */ 374 if (!try_module_get(THIS_MODULE)) 375 return -ENODEV; 376 377 pkt_setup_dev(MKDEV(major, minor), NULL); 378 379 module_put(THIS_MODULE); 380 381 return count; 382 } 383 384 return -EINVAL; 385 } 386 387 static ssize_t class_pktcdvd_store_remove(struct class *c, 388 struct class_attribute *attr, 389 const char *buf, 390 size_t count) 391 { 392 unsigned int major, minor; 393 if (sscanf(buf, "%u:%u", &major, &minor) == 2) { 394 pkt_remove_dev(MKDEV(major, minor)); 395 return count; 396 } 397 return -EINVAL; 398 } 399 400 static struct class_attribute class_pktcdvd_attrs[] = { 401 __ATTR(add, 0200, NULL, class_pktcdvd_store_add), 402 __ATTR(remove, 0200, NULL, class_pktcdvd_store_remove), 403 __ATTR(device_map, 0444, class_pktcdvd_show_map, NULL), 404 __ATTR_NULL 405 }; 406 407 408 static int pkt_sysfs_init(void) 409 { 410 int ret = 0; 411 412 /* 413 * create control files in sysfs 414 * /sys/class/pktcdvd/... 415 */ 416 class_pktcdvd = kzalloc(sizeof(*class_pktcdvd), GFP_KERNEL); 417 if (!class_pktcdvd) 418 return -ENOMEM; 419 class_pktcdvd->name = DRIVER_NAME; 420 class_pktcdvd->owner = THIS_MODULE; 421 class_pktcdvd->class_release = class_pktcdvd_release; 422 class_pktcdvd->class_attrs = class_pktcdvd_attrs; 423 ret = class_register(class_pktcdvd); 424 if (ret) { 425 kfree(class_pktcdvd); 426 class_pktcdvd = NULL; 427 printk(DRIVER_NAME": failed to create class pktcdvd\n"); 428 return ret; 429 } 430 return 0; 431 } 432 433 static void pkt_sysfs_cleanup(void) 434 { 435 if (class_pktcdvd) 436 class_destroy(class_pktcdvd); 437 class_pktcdvd = NULL; 438 } 439 440 /******************************************************************** 441 entries in debugfs 442 443 /sys/kernel/debug/pktcdvd[0-7]/ 444 info 445 446 *******************************************************************/ 447 448 static int pkt_debugfs_seq_show(struct seq_file *m, void *p) 449 { 450 return pkt_seq_show(m, p); 451 } 452 453 static int pkt_debugfs_fops_open(struct inode *inode, struct file *file) 454 { 455 return single_open(file, pkt_debugfs_seq_show, inode->i_private); 456 } 457 458 static const struct file_operations debug_fops = { 459 .open = pkt_debugfs_fops_open, 460 .read = seq_read, 461 .llseek = seq_lseek, 462 .release = single_release, 463 .owner = THIS_MODULE, 464 }; 465 466 static void pkt_debugfs_dev_new(struct pktcdvd_device *pd) 467 { 468 if (!pkt_debugfs_root) 469 return; 470 pd->dfs_f_info = NULL; 471 pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root); 472 if (IS_ERR(pd->dfs_d_root)) { 473 pd->dfs_d_root = NULL; 474 return; 475 } 476 pd->dfs_f_info = debugfs_create_file("info", S_IRUGO, 477 pd->dfs_d_root, pd, &debug_fops); 478 if (IS_ERR(pd->dfs_f_info)) { 479 pd->dfs_f_info = NULL; 480 return; 481 } 482 } 483 484 static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd) 485 { 486 if (!pkt_debugfs_root) 487 return; 488 if (pd->dfs_f_info) 489 debugfs_remove(pd->dfs_f_info); 490 pd->dfs_f_info = NULL; 491 if (pd->dfs_d_root) 492 debugfs_remove(pd->dfs_d_root); 493 pd->dfs_d_root = NULL; 494 } 495 496 static void pkt_debugfs_init(void) 497 { 498 pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL); 499 if (IS_ERR(pkt_debugfs_root)) { 500 pkt_debugfs_root = NULL; 501 return; 502 } 503 } 504 505 static void pkt_debugfs_cleanup(void) 506 { 507 if (!pkt_debugfs_root) 508 return; 509 debugfs_remove(pkt_debugfs_root); 510 pkt_debugfs_root = NULL; 511 } 512 513 /* ----------------------------------------------------------*/ 514 515 516 static void pkt_bio_finished(struct pktcdvd_device *pd) 517 { 518 BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0); 519 if (atomic_dec_and_test(&pd->cdrw.pending_bios)) { 520 VPRINTK(DRIVER_NAME": queue empty\n"); 521 atomic_set(&pd->iosched.attention, 1); 522 wake_up(&pd->wqueue); 523 } 524 } 525 526 /* 527 * Allocate a packet_data struct 528 */ 529 static struct packet_data *pkt_alloc_packet_data(int frames) 530 { 531 int i; 532 struct packet_data *pkt; 533 534 pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL); 535 if (!pkt) 536 goto no_pkt; 537 538 pkt->frames = frames; 539 pkt->w_bio = bio_kmalloc(GFP_KERNEL, frames); 540 if (!pkt->w_bio) 541 goto no_bio; 542 543 for (i = 0; i < frames / FRAMES_PER_PAGE; i++) { 544 pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO); 545 if (!pkt->pages[i]) 546 goto no_page; 547 } 548 549 spin_lock_init(&pkt->lock); 550 bio_list_init(&pkt->orig_bios); 551 552 for (i = 0; i < frames; i++) { 553 struct bio *bio = bio_kmalloc(GFP_KERNEL, 1); 554 if (!bio) 555 goto no_rd_bio; 556 557 pkt->r_bios[i] = bio; 558 } 559 560 return pkt; 561 562 no_rd_bio: 563 for (i = 0; i < frames; i++) { 564 struct bio *bio = pkt->r_bios[i]; 565 if (bio) 566 bio_put(bio); 567 } 568 569 no_page: 570 for (i = 0; i < frames / FRAMES_PER_PAGE; i++) 571 if (pkt->pages[i]) 572 __free_page(pkt->pages[i]); 573 bio_put(pkt->w_bio); 574 no_bio: 575 kfree(pkt); 576 no_pkt: 577 return NULL; 578 } 579 580 /* 581 * Free a packet_data struct 582 */ 583 static void pkt_free_packet_data(struct packet_data *pkt) 584 { 585 int i; 586 587 for (i = 0; i < pkt->frames; i++) { 588 struct bio *bio = pkt->r_bios[i]; 589 if (bio) 590 bio_put(bio); 591 } 592 for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++) 593 __free_page(pkt->pages[i]); 594 bio_put(pkt->w_bio); 595 kfree(pkt); 596 } 597 598 static void pkt_shrink_pktlist(struct pktcdvd_device *pd) 599 { 600 struct packet_data *pkt, *next; 601 602 BUG_ON(!list_empty(&pd->cdrw.pkt_active_list)); 603 604 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) { 605 pkt_free_packet_data(pkt); 606 } 607 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list); 608 } 609 610 static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets) 611 { 612 struct packet_data *pkt; 613 614 BUG_ON(!list_empty(&pd->cdrw.pkt_free_list)); 615 616 while (nr_packets > 0) { 617 pkt = pkt_alloc_packet_data(pd->settings.size >> 2); 618 if (!pkt) { 619 pkt_shrink_pktlist(pd); 620 return 0; 621 } 622 pkt->id = nr_packets; 623 pkt->pd = pd; 624 list_add(&pkt->list, &pd->cdrw.pkt_free_list); 625 nr_packets--; 626 } 627 return 1; 628 } 629 630 static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node) 631 { 632 struct rb_node *n = rb_next(&node->rb_node); 633 if (!n) 634 return NULL; 635 return rb_entry(n, struct pkt_rb_node, rb_node); 636 } 637 638 static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node) 639 { 640 rb_erase(&node->rb_node, &pd->bio_queue); 641 mempool_free(node, pd->rb_pool); 642 pd->bio_queue_size--; 643 BUG_ON(pd->bio_queue_size < 0); 644 } 645 646 /* 647 * Find the first node in the pd->bio_queue rb tree with a starting sector >= s. 648 */ 649 static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s) 650 { 651 struct rb_node *n = pd->bio_queue.rb_node; 652 struct rb_node *next; 653 struct pkt_rb_node *tmp; 654 655 if (!n) { 656 BUG_ON(pd->bio_queue_size > 0); 657 return NULL; 658 } 659 660 for (;;) { 661 tmp = rb_entry(n, struct pkt_rb_node, rb_node); 662 if (s <= tmp->bio->bi_sector) 663 next = n->rb_left; 664 else 665 next = n->rb_right; 666 if (!next) 667 break; 668 n = next; 669 } 670 671 if (s > tmp->bio->bi_sector) { 672 tmp = pkt_rbtree_next(tmp); 673 if (!tmp) 674 return NULL; 675 } 676 BUG_ON(s > tmp->bio->bi_sector); 677 return tmp; 678 } 679 680 /* 681 * Insert a node into the pd->bio_queue rb tree. 682 */ 683 static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node) 684 { 685 struct rb_node **p = &pd->bio_queue.rb_node; 686 struct rb_node *parent = NULL; 687 sector_t s = node->bio->bi_sector; 688 struct pkt_rb_node *tmp; 689 690 while (*p) { 691 parent = *p; 692 tmp = rb_entry(parent, struct pkt_rb_node, rb_node); 693 if (s < tmp->bio->bi_sector) 694 p = &(*p)->rb_left; 695 else 696 p = &(*p)->rb_right; 697 } 698 rb_link_node(&node->rb_node, parent, p); 699 rb_insert_color(&node->rb_node, &pd->bio_queue); 700 pd->bio_queue_size++; 701 } 702 703 /* 704 * Send a packet_command to the underlying block device and 705 * wait for completion. 706 */ 707 static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc) 708 { 709 struct request_queue *q = bdev_get_queue(pd->bdev); 710 struct request *rq; 711 int ret = 0; 712 713 rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? 714 WRITE : READ, __GFP_WAIT); 715 716 if (cgc->buflen) { 717 if (blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, __GFP_WAIT)) 718 goto out; 719 } 720 721 rq->cmd_len = COMMAND_SIZE(cgc->cmd[0]); 722 memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE); 723 724 rq->timeout = 60*HZ; 725 rq->cmd_type = REQ_TYPE_BLOCK_PC; 726 if (cgc->quiet) 727 rq->cmd_flags |= REQ_QUIET; 728 729 blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0); 730 if (rq->errors) 731 ret = -EIO; 732 out: 733 blk_put_request(rq); 734 return ret; 735 } 736 737 /* 738 * A generic sense dump / resolve mechanism should be implemented across 739 * all ATAPI + SCSI devices. 740 */ 741 static void pkt_dump_sense(struct packet_command *cgc) 742 { 743 static char *info[9] = { "No sense", "Recovered error", "Not ready", 744 "Medium error", "Hardware error", "Illegal request", 745 "Unit attention", "Data protect", "Blank check" }; 746 int i; 747 struct request_sense *sense = cgc->sense; 748 749 printk(DRIVER_NAME":"); 750 for (i = 0; i < CDROM_PACKET_SIZE; i++) 751 printk(" %02x", cgc->cmd[i]); 752 printk(" - "); 753 754 if (sense == NULL) { 755 printk("no sense\n"); 756 return; 757 } 758 759 printk("sense %02x.%02x.%02x", sense->sense_key, sense->asc, sense->ascq); 760 761 if (sense->sense_key > 8) { 762 printk(" (INVALID)\n"); 763 return; 764 } 765 766 printk(" (%s)\n", info[sense->sense_key]); 767 } 768 769 /* 770 * flush the drive cache to media 771 */ 772 static int pkt_flush_cache(struct pktcdvd_device *pd) 773 { 774 struct packet_command cgc; 775 776 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); 777 cgc.cmd[0] = GPCMD_FLUSH_CACHE; 778 cgc.quiet = 1; 779 780 /* 781 * the IMMED bit -- we default to not setting it, although that 782 * would allow a much faster close, this is safer 783 */ 784 #if 0 785 cgc.cmd[1] = 1 << 1; 786 #endif 787 return pkt_generic_packet(pd, &cgc); 788 } 789 790 /* 791 * speed is given as the normal factor, e.g. 4 for 4x 792 */ 793 static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd, 794 unsigned write_speed, unsigned read_speed) 795 { 796 struct packet_command cgc; 797 struct request_sense sense; 798 int ret; 799 800 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); 801 cgc.sense = &sense; 802 cgc.cmd[0] = GPCMD_SET_SPEED; 803 cgc.cmd[2] = (read_speed >> 8) & 0xff; 804 cgc.cmd[3] = read_speed & 0xff; 805 cgc.cmd[4] = (write_speed >> 8) & 0xff; 806 cgc.cmd[5] = write_speed & 0xff; 807 808 if ((ret = pkt_generic_packet(pd, &cgc))) 809 pkt_dump_sense(&cgc); 810 811 return ret; 812 } 813 814 /* 815 * Queue a bio for processing by the low-level CD device. Must be called 816 * from process context. 817 */ 818 static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio) 819 { 820 spin_lock(&pd->iosched.lock); 821 if (bio_data_dir(bio) == READ) 822 bio_list_add(&pd->iosched.read_queue, bio); 823 else 824 bio_list_add(&pd->iosched.write_queue, bio); 825 spin_unlock(&pd->iosched.lock); 826 827 atomic_set(&pd->iosched.attention, 1); 828 wake_up(&pd->wqueue); 829 } 830 831 /* 832 * Process the queued read/write requests. This function handles special 833 * requirements for CDRW drives: 834 * - A cache flush command must be inserted before a read request if the 835 * previous request was a write. 836 * - Switching between reading and writing is slow, so don't do it more often 837 * than necessary. 838 * - Optimize for throughput at the expense of latency. This means that streaming 839 * writes will never be interrupted by a read, but if the drive has to seek 840 * before the next write, switch to reading instead if there are any pending 841 * read requests. 842 * - Set the read speed according to current usage pattern. When only reading 843 * from the device, it's best to use the highest possible read speed, but 844 * when switching often between reading and writing, it's better to have the 845 * same read and write speeds. 846 */ 847 static void pkt_iosched_process_queue(struct pktcdvd_device *pd) 848 { 849 850 if (atomic_read(&pd->iosched.attention) == 0) 851 return; 852 atomic_set(&pd->iosched.attention, 0); 853 854 for (;;) { 855 struct bio *bio; 856 int reads_queued, writes_queued; 857 858 spin_lock(&pd->iosched.lock); 859 reads_queued = !bio_list_empty(&pd->iosched.read_queue); 860 writes_queued = !bio_list_empty(&pd->iosched.write_queue); 861 spin_unlock(&pd->iosched.lock); 862 863 if (!reads_queued && !writes_queued) 864 break; 865 866 if (pd->iosched.writing) { 867 int need_write_seek = 1; 868 spin_lock(&pd->iosched.lock); 869 bio = bio_list_peek(&pd->iosched.write_queue); 870 spin_unlock(&pd->iosched.lock); 871 if (bio && (bio->bi_sector == pd->iosched.last_write)) 872 need_write_seek = 0; 873 if (need_write_seek && reads_queued) { 874 if (atomic_read(&pd->cdrw.pending_bios) > 0) { 875 VPRINTK(DRIVER_NAME": write, waiting\n"); 876 break; 877 } 878 pkt_flush_cache(pd); 879 pd->iosched.writing = 0; 880 } 881 } else { 882 if (!reads_queued && writes_queued) { 883 if (atomic_read(&pd->cdrw.pending_bios) > 0) { 884 VPRINTK(DRIVER_NAME": read, waiting\n"); 885 break; 886 } 887 pd->iosched.writing = 1; 888 } 889 } 890 891 spin_lock(&pd->iosched.lock); 892 if (pd->iosched.writing) 893 bio = bio_list_pop(&pd->iosched.write_queue); 894 else 895 bio = bio_list_pop(&pd->iosched.read_queue); 896 spin_unlock(&pd->iosched.lock); 897 898 if (!bio) 899 continue; 900 901 if (bio_data_dir(bio) == READ) 902 pd->iosched.successive_reads += bio->bi_size >> 10; 903 else { 904 pd->iosched.successive_reads = 0; 905 pd->iosched.last_write = bio_end_sector(bio); 906 } 907 if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) { 908 if (pd->read_speed == pd->write_speed) { 909 pd->read_speed = MAX_SPEED; 910 pkt_set_speed(pd, pd->write_speed, pd->read_speed); 911 } 912 } else { 913 if (pd->read_speed != pd->write_speed) { 914 pd->read_speed = pd->write_speed; 915 pkt_set_speed(pd, pd->write_speed, pd->read_speed); 916 } 917 } 918 919 atomic_inc(&pd->cdrw.pending_bios); 920 generic_make_request(bio); 921 } 922 } 923 924 /* 925 * Special care is needed if the underlying block device has a small 926 * max_phys_segments value. 927 */ 928 static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) 929 { 930 if ((pd->settings.size << 9) / CD_FRAMESIZE 931 <= queue_max_segments(q)) { 932 /* 933 * The cdrom device can handle one segment/frame 934 */ 935 clear_bit(PACKET_MERGE_SEGS, &pd->flags); 936 return 0; 937 } else if ((pd->settings.size << 9) / PAGE_SIZE 938 <= queue_max_segments(q)) { 939 /* 940 * We can handle this case at the expense of some extra memory 941 * copies during write operations 942 */ 943 set_bit(PACKET_MERGE_SEGS, &pd->flags); 944 return 0; 945 } else { 946 printk(DRIVER_NAME": cdrom max_phys_segments too small\n"); 947 return -EIO; 948 } 949 } 950 951 /* 952 * Copy all data for this packet to pkt->pages[], so that 953 * a) The number of required segments for the write bio is minimized, which 954 * is necessary for some scsi controllers. 955 * b) The data can be used as cache to avoid read requests if we receive a 956 * new write request for the same zone. 957 */ 958 static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec) 959 { 960 int f, p, offs; 961 962 /* Copy all data to pkt->pages[] */ 963 p = 0; 964 offs = 0; 965 for (f = 0; f < pkt->frames; f++) { 966 if (bvec[f].bv_page != pkt->pages[p]) { 967 void *vfrom = kmap_atomic(bvec[f].bv_page) + bvec[f].bv_offset; 968 void *vto = page_address(pkt->pages[p]) + offs; 969 memcpy(vto, vfrom, CD_FRAMESIZE); 970 kunmap_atomic(vfrom); 971 bvec[f].bv_page = pkt->pages[p]; 972 bvec[f].bv_offset = offs; 973 } else { 974 BUG_ON(bvec[f].bv_offset != offs); 975 } 976 offs += CD_FRAMESIZE; 977 if (offs >= PAGE_SIZE) { 978 offs = 0; 979 p++; 980 } 981 } 982 } 983 984 static void pkt_end_io_read(struct bio *bio, int err) 985 { 986 struct packet_data *pkt = bio->bi_private; 987 struct pktcdvd_device *pd = pkt->pd; 988 BUG_ON(!pd); 989 990 VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio, 991 (unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err); 992 993 if (err) 994 atomic_inc(&pkt->io_errors); 995 if (atomic_dec_and_test(&pkt->io_wait)) { 996 atomic_inc(&pkt->run_sm); 997 wake_up(&pd->wqueue); 998 } 999 pkt_bio_finished(pd); 1000 } 1001 1002 static void pkt_end_io_packet_write(struct bio *bio, int err) 1003 { 1004 struct packet_data *pkt = bio->bi_private; 1005 struct pktcdvd_device *pd = pkt->pd; 1006 BUG_ON(!pd); 1007 1008 VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err); 1009 1010 pd->stats.pkt_ended++; 1011 1012 pkt_bio_finished(pd); 1013 atomic_dec(&pkt->io_wait); 1014 atomic_inc(&pkt->run_sm); 1015 wake_up(&pd->wqueue); 1016 } 1017 1018 /* 1019 * Schedule reads for the holes in a packet 1020 */ 1021 static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) 1022 { 1023 int frames_read = 0; 1024 struct bio *bio; 1025 int f; 1026 char written[PACKET_MAX_SIZE]; 1027 1028 BUG_ON(bio_list_empty(&pkt->orig_bios)); 1029 1030 atomic_set(&pkt->io_wait, 0); 1031 atomic_set(&pkt->io_errors, 0); 1032 1033 /* 1034 * Figure out which frames we need to read before we can write. 1035 */ 1036 memset(written, 0, sizeof(written)); 1037 spin_lock(&pkt->lock); 1038 bio_list_for_each(bio, &pkt->orig_bios) { 1039 int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9); 1040 int num_frames = bio->bi_size / CD_FRAMESIZE; 1041 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); 1042 BUG_ON(first_frame < 0); 1043 BUG_ON(first_frame + num_frames > pkt->frames); 1044 for (f = first_frame; f < first_frame + num_frames; f++) 1045 written[f] = 1; 1046 } 1047 spin_unlock(&pkt->lock); 1048 1049 if (pkt->cache_valid) { 1050 VPRINTK("pkt_gather_data: zone %llx cached\n", 1051 (unsigned long long)pkt->sector); 1052 goto out_account; 1053 } 1054 1055 /* 1056 * Schedule reads for missing parts of the packet. 1057 */ 1058 for (f = 0; f < pkt->frames; f++) { 1059 int p, offset; 1060 1061 if (written[f]) 1062 continue; 1063 1064 bio = pkt->r_bios[f]; 1065 bio_reset(bio); 1066 bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); 1067 bio->bi_bdev = pd->bdev; 1068 bio->bi_end_io = pkt_end_io_read; 1069 bio->bi_private = pkt; 1070 1071 p = (f * CD_FRAMESIZE) / PAGE_SIZE; 1072 offset = (f * CD_FRAMESIZE) % PAGE_SIZE; 1073 VPRINTK("pkt_gather_data: Adding frame %d, page:%p offs:%d\n", 1074 f, pkt->pages[p], offset); 1075 if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset)) 1076 BUG(); 1077 1078 atomic_inc(&pkt->io_wait); 1079 bio->bi_rw = READ; 1080 pkt_queue_bio(pd, bio); 1081 frames_read++; 1082 } 1083 1084 out_account: 1085 VPRINTK("pkt_gather_data: need %d frames for zone %llx\n", 1086 frames_read, (unsigned long long)pkt->sector); 1087 pd->stats.pkt_started++; 1088 pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9); 1089 } 1090 1091 /* 1092 * Find a packet matching zone, or the least recently used packet if 1093 * there is no match. 1094 */ 1095 static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone) 1096 { 1097 struct packet_data *pkt; 1098 1099 list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) { 1100 if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) { 1101 list_del_init(&pkt->list); 1102 if (pkt->sector != zone) 1103 pkt->cache_valid = 0; 1104 return pkt; 1105 } 1106 } 1107 BUG(); 1108 return NULL; 1109 } 1110 1111 static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt) 1112 { 1113 if (pkt->cache_valid) { 1114 list_add(&pkt->list, &pd->cdrw.pkt_free_list); 1115 } else { 1116 list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list); 1117 } 1118 } 1119 1120 /* 1121 * recover a failed write, query for relocation if possible 1122 * 1123 * returns 1 if recovery is possible, or 0 if not 1124 * 1125 */ 1126 static int pkt_start_recovery(struct packet_data *pkt) 1127 { 1128 /* 1129 * FIXME. We need help from the file system to implement 1130 * recovery handling. 1131 */ 1132 return 0; 1133 #if 0 1134 struct request *rq = pkt->rq; 1135 struct pktcdvd_device *pd = rq->rq_disk->private_data; 1136 struct block_device *pkt_bdev; 1137 struct super_block *sb = NULL; 1138 unsigned long old_block, new_block; 1139 sector_t new_sector; 1140 1141 pkt_bdev = bdget(kdev_t_to_nr(pd->pkt_dev)); 1142 if (pkt_bdev) { 1143 sb = get_super(pkt_bdev); 1144 bdput(pkt_bdev); 1145 } 1146 1147 if (!sb) 1148 return 0; 1149 1150 if (!sb->s_op->relocate_blocks) 1151 goto out; 1152 1153 old_block = pkt->sector / (CD_FRAMESIZE >> 9); 1154 if (sb->s_op->relocate_blocks(sb, old_block, &new_block)) 1155 goto out; 1156 1157 new_sector = new_block * (CD_FRAMESIZE >> 9); 1158 pkt->sector = new_sector; 1159 1160 bio_reset(pkt->bio); 1161 pkt->bio->bi_bdev = pd->bdev; 1162 pkt->bio->bi_rw = REQ_WRITE; 1163 pkt->bio->bi_sector = new_sector; 1164 pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE; 1165 pkt->bio->bi_vcnt = pkt->frames; 1166 1167 pkt->bio->bi_end_io = pkt_end_io_packet_write; 1168 pkt->bio->bi_private = pkt; 1169 1170 drop_super(sb); 1171 return 1; 1172 1173 out: 1174 drop_super(sb); 1175 return 0; 1176 #endif 1177 } 1178 1179 static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state) 1180 { 1181 #if PACKET_DEBUG > 1 1182 static const char *state_name[] = { 1183 "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED" 1184 }; 1185 enum packet_data_state old_state = pkt->state; 1186 VPRINTK("pkt %2d : s=%6llx %s -> %s\n", pkt->id, (unsigned long long)pkt->sector, 1187 state_name[old_state], state_name[state]); 1188 #endif 1189 pkt->state = state; 1190 } 1191 1192 /* 1193 * Scan the work queue to see if we can start a new packet. 1194 * returns non-zero if any work was done. 1195 */ 1196 static int pkt_handle_queue(struct pktcdvd_device *pd) 1197 { 1198 struct packet_data *pkt, *p; 1199 struct bio *bio = NULL; 1200 sector_t zone = 0; /* Suppress gcc warning */ 1201 struct pkt_rb_node *node, *first_node; 1202 struct rb_node *n; 1203 int wakeup; 1204 1205 VPRINTK("handle_queue\n"); 1206 1207 atomic_set(&pd->scan_queue, 0); 1208 1209 if (list_empty(&pd->cdrw.pkt_free_list)) { 1210 VPRINTK("handle_queue: no pkt\n"); 1211 return 0; 1212 } 1213 1214 /* 1215 * Try to find a zone we are not already working on. 1216 */ 1217 spin_lock(&pd->lock); 1218 first_node = pkt_rbtree_find(pd, pd->current_sector); 1219 if (!first_node) { 1220 n = rb_first(&pd->bio_queue); 1221 if (n) 1222 first_node = rb_entry(n, struct pkt_rb_node, rb_node); 1223 } 1224 node = first_node; 1225 while (node) { 1226 bio = node->bio; 1227 zone = ZONE(bio->bi_sector, pd); 1228 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) { 1229 if (p->sector == zone) { 1230 bio = NULL; 1231 goto try_next_bio; 1232 } 1233 } 1234 break; 1235 try_next_bio: 1236 node = pkt_rbtree_next(node); 1237 if (!node) { 1238 n = rb_first(&pd->bio_queue); 1239 if (n) 1240 node = rb_entry(n, struct pkt_rb_node, rb_node); 1241 } 1242 if (node == first_node) 1243 node = NULL; 1244 } 1245 spin_unlock(&pd->lock); 1246 if (!bio) { 1247 VPRINTK("handle_queue: no bio\n"); 1248 return 0; 1249 } 1250 1251 pkt = pkt_get_packet_data(pd, zone); 1252 1253 pd->current_sector = zone + pd->settings.size; 1254 pkt->sector = zone; 1255 BUG_ON(pkt->frames != pd->settings.size >> 2); 1256 pkt->write_size = 0; 1257 1258 /* 1259 * Scan work queue for bios in the same zone and link them 1260 * to this packet. 1261 */ 1262 spin_lock(&pd->lock); 1263 VPRINTK("pkt_handle_queue: looking for zone %llx\n", (unsigned long long)zone); 1264 while ((node = pkt_rbtree_find(pd, zone)) != NULL) { 1265 bio = node->bio; 1266 VPRINTK("pkt_handle_queue: found zone=%llx\n", 1267 (unsigned long long)ZONE(bio->bi_sector, pd)); 1268 if (ZONE(bio->bi_sector, pd) != zone) 1269 break; 1270 pkt_rbtree_erase(pd, node); 1271 spin_lock(&pkt->lock); 1272 bio_list_add(&pkt->orig_bios, bio); 1273 pkt->write_size += bio->bi_size / CD_FRAMESIZE; 1274 spin_unlock(&pkt->lock); 1275 } 1276 /* check write congestion marks, and if bio_queue_size is 1277 below, wake up any waiters */ 1278 wakeup = (pd->write_congestion_on > 0 1279 && pd->bio_queue_size <= pd->write_congestion_off); 1280 spin_unlock(&pd->lock); 1281 if (wakeup) { 1282 clear_bdi_congested(&pd->disk->queue->backing_dev_info, 1283 BLK_RW_ASYNC); 1284 } 1285 1286 pkt->sleep_time = max(PACKET_WAIT_TIME, 1); 1287 pkt_set_state(pkt, PACKET_WAITING_STATE); 1288 atomic_set(&pkt->run_sm, 1); 1289 1290 spin_lock(&pd->cdrw.active_list_lock); 1291 list_add(&pkt->list, &pd->cdrw.pkt_active_list); 1292 spin_unlock(&pd->cdrw.active_list_lock); 1293 1294 return 1; 1295 } 1296 1297 /* 1298 * Assemble a bio to write one packet and queue the bio for processing 1299 * by the underlying block device. 1300 */ 1301 static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) 1302 { 1303 int f; 1304 struct bio_vec *bvec = pkt->w_bio->bi_io_vec; 1305 1306 bio_reset(pkt->w_bio); 1307 pkt->w_bio->bi_sector = pkt->sector; 1308 pkt->w_bio->bi_bdev = pd->bdev; 1309 pkt->w_bio->bi_end_io = pkt_end_io_packet_write; 1310 pkt->w_bio->bi_private = pkt; 1311 1312 /* XXX: locking? */ 1313 for (f = 0; f < pkt->frames; f++) { 1314 bvec[f].bv_page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE]; 1315 bvec[f].bv_offset = (f * CD_FRAMESIZE) % PAGE_SIZE; 1316 if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset)) 1317 BUG(); 1318 } 1319 VPRINTK(DRIVER_NAME": vcnt=%d\n", pkt->w_bio->bi_vcnt); 1320 1321 /* 1322 * Fill-in bvec with data from orig_bios. 1323 */ 1324 spin_lock(&pkt->lock); 1325 bio_copy_data(pkt->w_bio, pkt->orig_bios.head); 1326 1327 pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE); 1328 spin_unlock(&pkt->lock); 1329 1330 VPRINTK("pkt_start_write: Writing %d frames for zone %llx\n", 1331 pkt->write_size, (unsigned long long)pkt->sector); 1332 1333 if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) { 1334 pkt_make_local_copy(pkt, bvec); 1335 pkt->cache_valid = 1; 1336 } else { 1337 pkt->cache_valid = 0; 1338 } 1339 1340 /* Start the write request */ 1341 atomic_set(&pkt->io_wait, 1); 1342 pkt->w_bio->bi_rw = WRITE; 1343 pkt_queue_bio(pd, pkt->w_bio); 1344 } 1345 1346 static void pkt_finish_packet(struct packet_data *pkt, int uptodate) 1347 { 1348 struct bio *bio; 1349 1350 if (!uptodate) 1351 pkt->cache_valid = 0; 1352 1353 /* Finish all bios corresponding to this packet */ 1354 while ((bio = bio_list_pop(&pkt->orig_bios))) 1355 bio_endio(bio, uptodate ? 0 : -EIO); 1356 } 1357 1358 static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt) 1359 { 1360 int uptodate; 1361 1362 VPRINTK("run_state_machine: pkt %d\n", pkt->id); 1363 1364 for (;;) { 1365 switch (pkt->state) { 1366 case PACKET_WAITING_STATE: 1367 if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0)) 1368 return; 1369 1370 pkt->sleep_time = 0; 1371 pkt_gather_data(pd, pkt); 1372 pkt_set_state(pkt, PACKET_READ_WAIT_STATE); 1373 break; 1374 1375 case PACKET_READ_WAIT_STATE: 1376 if (atomic_read(&pkt->io_wait) > 0) 1377 return; 1378 1379 if (atomic_read(&pkt->io_errors) > 0) { 1380 pkt_set_state(pkt, PACKET_RECOVERY_STATE); 1381 } else { 1382 pkt_start_write(pd, pkt); 1383 } 1384 break; 1385 1386 case PACKET_WRITE_WAIT_STATE: 1387 if (atomic_read(&pkt->io_wait) > 0) 1388 return; 1389 1390 if (test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags)) { 1391 pkt_set_state(pkt, PACKET_FINISHED_STATE); 1392 } else { 1393 pkt_set_state(pkt, PACKET_RECOVERY_STATE); 1394 } 1395 break; 1396 1397 case PACKET_RECOVERY_STATE: 1398 if (pkt_start_recovery(pkt)) { 1399 pkt_start_write(pd, pkt); 1400 } else { 1401 VPRINTK("No recovery possible\n"); 1402 pkt_set_state(pkt, PACKET_FINISHED_STATE); 1403 } 1404 break; 1405 1406 case PACKET_FINISHED_STATE: 1407 uptodate = test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags); 1408 pkt_finish_packet(pkt, uptodate); 1409 return; 1410 1411 default: 1412 BUG(); 1413 break; 1414 } 1415 } 1416 } 1417 1418 static void pkt_handle_packets(struct pktcdvd_device *pd) 1419 { 1420 struct packet_data *pkt, *next; 1421 1422 VPRINTK("pkt_handle_packets\n"); 1423 1424 /* 1425 * Run state machine for active packets 1426 */ 1427 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { 1428 if (atomic_read(&pkt->run_sm) > 0) { 1429 atomic_set(&pkt->run_sm, 0); 1430 pkt_run_state_machine(pd, pkt); 1431 } 1432 } 1433 1434 /* 1435 * Move no longer active packets to the free list 1436 */ 1437 spin_lock(&pd->cdrw.active_list_lock); 1438 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) { 1439 if (pkt->state == PACKET_FINISHED_STATE) { 1440 list_del(&pkt->list); 1441 pkt_put_packet_data(pd, pkt); 1442 pkt_set_state(pkt, PACKET_IDLE_STATE); 1443 atomic_set(&pd->scan_queue, 1); 1444 } 1445 } 1446 spin_unlock(&pd->cdrw.active_list_lock); 1447 } 1448 1449 static void pkt_count_states(struct pktcdvd_device *pd, int *states) 1450 { 1451 struct packet_data *pkt; 1452 int i; 1453 1454 for (i = 0; i < PACKET_NUM_STATES; i++) 1455 states[i] = 0; 1456 1457 spin_lock(&pd->cdrw.active_list_lock); 1458 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { 1459 states[pkt->state]++; 1460 } 1461 spin_unlock(&pd->cdrw.active_list_lock); 1462 } 1463 1464 /* 1465 * kcdrwd is woken up when writes have been queued for one of our 1466 * registered devices 1467 */ 1468 static int kcdrwd(void *foobar) 1469 { 1470 struct pktcdvd_device *pd = foobar; 1471 struct packet_data *pkt; 1472 long min_sleep_time, residue; 1473 1474 set_user_nice(current, -20); 1475 set_freezable(); 1476 1477 for (;;) { 1478 DECLARE_WAITQUEUE(wait, current); 1479 1480 /* 1481 * Wait until there is something to do 1482 */ 1483 add_wait_queue(&pd->wqueue, &wait); 1484 for (;;) { 1485 set_current_state(TASK_INTERRUPTIBLE); 1486 1487 /* Check if we need to run pkt_handle_queue */ 1488 if (atomic_read(&pd->scan_queue) > 0) 1489 goto work_to_do; 1490 1491 /* Check if we need to run the state machine for some packet */ 1492 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { 1493 if (atomic_read(&pkt->run_sm) > 0) 1494 goto work_to_do; 1495 } 1496 1497 /* Check if we need to process the iosched queues */ 1498 if (atomic_read(&pd->iosched.attention) != 0) 1499 goto work_to_do; 1500 1501 /* Otherwise, go to sleep */ 1502 if (PACKET_DEBUG > 1) { 1503 int states[PACKET_NUM_STATES]; 1504 pkt_count_states(pd, states); 1505 VPRINTK("kcdrwd: i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n", 1506 states[0], states[1], states[2], states[3], 1507 states[4], states[5]); 1508 } 1509 1510 min_sleep_time = MAX_SCHEDULE_TIMEOUT; 1511 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { 1512 if (pkt->sleep_time && pkt->sleep_time < min_sleep_time) 1513 min_sleep_time = pkt->sleep_time; 1514 } 1515 1516 VPRINTK("kcdrwd: sleeping\n"); 1517 residue = schedule_timeout(min_sleep_time); 1518 VPRINTK("kcdrwd: wake up\n"); 1519 1520 /* make swsusp happy with our thread */ 1521 try_to_freeze(); 1522 1523 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { 1524 if (!pkt->sleep_time) 1525 continue; 1526 pkt->sleep_time -= min_sleep_time - residue; 1527 if (pkt->sleep_time <= 0) { 1528 pkt->sleep_time = 0; 1529 atomic_inc(&pkt->run_sm); 1530 } 1531 } 1532 1533 if (kthread_should_stop()) 1534 break; 1535 } 1536 work_to_do: 1537 set_current_state(TASK_RUNNING); 1538 remove_wait_queue(&pd->wqueue, &wait); 1539 1540 if (kthread_should_stop()) 1541 break; 1542 1543 /* 1544 * if pkt_handle_queue returns true, we can queue 1545 * another request. 1546 */ 1547 while (pkt_handle_queue(pd)) 1548 ; 1549 1550 /* 1551 * Handle packet state machine 1552 */ 1553 pkt_handle_packets(pd); 1554 1555 /* 1556 * Handle iosched queues 1557 */ 1558 pkt_iosched_process_queue(pd); 1559 } 1560 1561 return 0; 1562 } 1563 1564 static void pkt_print_settings(struct pktcdvd_device *pd) 1565 { 1566 printk(DRIVER_NAME": %s packets, ", pd->settings.fp ? "Fixed" : "Variable"); 1567 printk("%u blocks, ", pd->settings.size >> 2); 1568 printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2'); 1569 } 1570 1571 static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control) 1572 { 1573 memset(cgc->cmd, 0, sizeof(cgc->cmd)); 1574 1575 cgc->cmd[0] = GPCMD_MODE_SENSE_10; 1576 cgc->cmd[2] = page_code | (page_control << 6); 1577 cgc->cmd[7] = cgc->buflen >> 8; 1578 cgc->cmd[8] = cgc->buflen & 0xff; 1579 cgc->data_direction = CGC_DATA_READ; 1580 return pkt_generic_packet(pd, cgc); 1581 } 1582 1583 static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc) 1584 { 1585 memset(cgc->cmd, 0, sizeof(cgc->cmd)); 1586 memset(cgc->buffer, 0, 2); 1587 cgc->cmd[0] = GPCMD_MODE_SELECT_10; 1588 cgc->cmd[1] = 0x10; /* PF */ 1589 cgc->cmd[7] = cgc->buflen >> 8; 1590 cgc->cmd[8] = cgc->buflen & 0xff; 1591 cgc->data_direction = CGC_DATA_WRITE; 1592 return pkt_generic_packet(pd, cgc); 1593 } 1594 1595 static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di) 1596 { 1597 struct packet_command cgc; 1598 int ret; 1599 1600 /* set up command and get the disc info */ 1601 init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ); 1602 cgc.cmd[0] = GPCMD_READ_DISC_INFO; 1603 cgc.cmd[8] = cgc.buflen = 2; 1604 cgc.quiet = 1; 1605 1606 if ((ret = pkt_generic_packet(pd, &cgc))) 1607 return ret; 1608 1609 /* not all drives have the same disc_info length, so requeue 1610 * packet with the length the drive tells us it can supply 1611 */ 1612 cgc.buflen = be16_to_cpu(di->disc_information_length) + 1613 sizeof(di->disc_information_length); 1614 1615 if (cgc.buflen > sizeof(disc_information)) 1616 cgc.buflen = sizeof(disc_information); 1617 1618 cgc.cmd[8] = cgc.buflen; 1619 return pkt_generic_packet(pd, &cgc); 1620 } 1621 1622 static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti) 1623 { 1624 struct packet_command cgc; 1625 int ret; 1626 1627 init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ); 1628 cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO; 1629 cgc.cmd[1] = type & 3; 1630 cgc.cmd[4] = (track & 0xff00) >> 8; 1631 cgc.cmd[5] = track & 0xff; 1632 cgc.cmd[8] = 8; 1633 cgc.quiet = 1; 1634 1635 if ((ret = pkt_generic_packet(pd, &cgc))) 1636 return ret; 1637 1638 cgc.buflen = be16_to_cpu(ti->track_information_length) + 1639 sizeof(ti->track_information_length); 1640 1641 if (cgc.buflen > sizeof(track_information)) 1642 cgc.buflen = sizeof(track_information); 1643 1644 cgc.cmd[8] = cgc.buflen; 1645 return pkt_generic_packet(pd, &cgc); 1646 } 1647 1648 static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd, 1649 long *last_written) 1650 { 1651 disc_information di; 1652 track_information ti; 1653 __u32 last_track; 1654 int ret = -1; 1655 1656 if ((ret = pkt_get_disc_info(pd, &di))) 1657 return ret; 1658 1659 last_track = (di.last_track_msb << 8) | di.last_track_lsb; 1660 if ((ret = pkt_get_track_info(pd, last_track, 1, &ti))) 1661 return ret; 1662 1663 /* if this track is blank, try the previous. */ 1664 if (ti.blank) { 1665 last_track--; 1666 if ((ret = pkt_get_track_info(pd, last_track, 1, &ti))) 1667 return ret; 1668 } 1669 1670 /* if last recorded field is valid, return it. */ 1671 if (ti.lra_v) { 1672 *last_written = be32_to_cpu(ti.last_rec_address); 1673 } else { 1674 /* make it up instead */ 1675 *last_written = be32_to_cpu(ti.track_start) + 1676 be32_to_cpu(ti.track_size); 1677 if (ti.free_blocks) 1678 *last_written -= (be32_to_cpu(ti.free_blocks) + 7); 1679 } 1680 return 0; 1681 } 1682 1683 /* 1684 * write mode select package based on pd->settings 1685 */ 1686 static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) 1687 { 1688 struct packet_command cgc; 1689 struct request_sense sense; 1690 write_param_page *wp; 1691 char buffer[128]; 1692 int ret, size; 1693 1694 /* doesn't apply to DVD+RW or DVD-RAM */ 1695 if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12)) 1696 return 0; 1697 1698 memset(buffer, 0, sizeof(buffer)); 1699 init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ); 1700 cgc.sense = &sense; 1701 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) { 1702 pkt_dump_sense(&cgc); 1703 return ret; 1704 } 1705 1706 size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff)); 1707 pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff); 1708 if (size > sizeof(buffer)) 1709 size = sizeof(buffer); 1710 1711 /* 1712 * now get it all 1713 */ 1714 init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ); 1715 cgc.sense = &sense; 1716 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) { 1717 pkt_dump_sense(&cgc); 1718 return ret; 1719 } 1720 1721 /* 1722 * write page is offset header + block descriptor length 1723 */ 1724 wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset]; 1725 1726 wp->fp = pd->settings.fp; 1727 wp->track_mode = pd->settings.track_mode; 1728 wp->write_type = pd->settings.write_type; 1729 wp->data_block_type = pd->settings.block_mode; 1730 1731 wp->multi_session = 0; 1732 1733 #ifdef PACKET_USE_LS 1734 wp->link_size = 7; 1735 wp->ls_v = 1; 1736 #endif 1737 1738 if (wp->data_block_type == PACKET_BLOCK_MODE1) { 1739 wp->session_format = 0; 1740 wp->subhdr2 = 0x20; 1741 } else if (wp->data_block_type == PACKET_BLOCK_MODE2) { 1742 wp->session_format = 0x20; 1743 wp->subhdr2 = 8; 1744 #if 0 1745 wp->mcn[0] = 0x80; 1746 memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1); 1747 #endif 1748 } else { 1749 /* 1750 * paranoia 1751 */ 1752 printk(DRIVER_NAME": write mode wrong %d\n", wp->data_block_type); 1753 return 1; 1754 } 1755 wp->packet_size = cpu_to_be32(pd->settings.size >> 2); 1756 1757 cgc.buflen = cgc.cmd[8] = size; 1758 if ((ret = pkt_mode_select(pd, &cgc))) { 1759 pkt_dump_sense(&cgc); 1760 return ret; 1761 } 1762 1763 pkt_print_settings(pd); 1764 return 0; 1765 } 1766 1767 /* 1768 * 1 -- we can write to this track, 0 -- we can't 1769 */ 1770 static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti) 1771 { 1772 switch (pd->mmc3_profile) { 1773 case 0x1a: /* DVD+RW */ 1774 case 0x12: /* DVD-RAM */ 1775 /* The track is always writable on DVD+RW/DVD-RAM */ 1776 return 1; 1777 default: 1778 break; 1779 } 1780 1781 if (!ti->packet || !ti->fp) 1782 return 0; 1783 1784 /* 1785 * "good" settings as per Mt Fuji. 1786 */ 1787 if (ti->rt == 0 && ti->blank == 0) 1788 return 1; 1789 1790 if (ti->rt == 0 && ti->blank == 1) 1791 return 1; 1792 1793 if (ti->rt == 1 && ti->blank == 0) 1794 return 1; 1795 1796 printk(DRIVER_NAME": bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet); 1797 return 0; 1798 } 1799 1800 /* 1801 * 1 -- we can write to this disc, 0 -- we can't 1802 */ 1803 static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di) 1804 { 1805 switch (pd->mmc3_profile) { 1806 case 0x0a: /* CD-RW */ 1807 case 0xffff: /* MMC3 not supported */ 1808 break; 1809 case 0x1a: /* DVD+RW */ 1810 case 0x13: /* DVD-RW */ 1811 case 0x12: /* DVD-RAM */ 1812 return 1; 1813 default: 1814 VPRINTK(DRIVER_NAME": Wrong disc profile (%x)\n", pd->mmc3_profile); 1815 return 0; 1816 } 1817 1818 /* 1819 * for disc type 0xff we should probably reserve a new track. 1820 * but i'm not sure, should we leave this to user apps? probably. 1821 */ 1822 if (di->disc_type == 0xff) { 1823 printk(DRIVER_NAME": Unknown disc. No track?\n"); 1824 return 0; 1825 } 1826 1827 if (di->disc_type != 0x20 && di->disc_type != 0) { 1828 printk(DRIVER_NAME": Wrong disc type (%x)\n", di->disc_type); 1829 return 0; 1830 } 1831 1832 if (di->erasable == 0) { 1833 printk(DRIVER_NAME": Disc not erasable\n"); 1834 return 0; 1835 } 1836 1837 if (di->border_status == PACKET_SESSION_RESERVED) { 1838 printk(DRIVER_NAME": Can't write to last track (reserved)\n"); 1839 return 0; 1840 } 1841 1842 return 1; 1843 } 1844 1845 static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) 1846 { 1847 struct packet_command cgc; 1848 unsigned char buf[12]; 1849 disc_information di; 1850 track_information ti; 1851 int ret, track; 1852 1853 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ); 1854 cgc.cmd[0] = GPCMD_GET_CONFIGURATION; 1855 cgc.cmd[8] = 8; 1856 ret = pkt_generic_packet(pd, &cgc); 1857 pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7]; 1858 1859 memset(&di, 0, sizeof(disc_information)); 1860 memset(&ti, 0, sizeof(track_information)); 1861 1862 if ((ret = pkt_get_disc_info(pd, &di))) { 1863 printk("failed get_disc\n"); 1864 return ret; 1865 } 1866 1867 if (!pkt_writable_disc(pd, &di)) 1868 return -EROFS; 1869 1870 pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR; 1871 1872 track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */ 1873 if ((ret = pkt_get_track_info(pd, track, 1, &ti))) { 1874 printk(DRIVER_NAME": failed get_track\n"); 1875 return ret; 1876 } 1877 1878 if (!pkt_writable_track(pd, &ti)) { 1879 printk(DRIVER_NAME": can't write to this track\n"); 1880 return -EROFS; 1881 } 1882 1883 /* 1884 * we keep packet size in 512 byte units, makes it easier to 1885 * deal with request calculations. 1886 */ 1887 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2; 1888 if (pd->settings.size == 0) { 1889 printk(DRIVER_NAME": detected zero packet size!\n"); 1890 return -ENXIO; 1891 } 1892 if (pd->settings.size > PACKET_MAX_SECTORS) { 1893 printk(DRIVER_NAME": packet size is too big\n"); 1894 return -EROFS; 1895 } 1896 pd->settings.fp = ti.fp; 1897 pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1); 1898 1899 if (ti.nwa_v) { 1900 pd->nwa = be32_to_cpu(ti.next_writable); 1901 set_bit(PACKET_NWA_VALID, &pd->flags); 1902 } 1903 1904 /* 1905 * in theory we could use lra on -RW media as well and just zero 1906 * blocks that haven't been written yet, but in practice that 1907 * is just a no-go. we'll use that for -R, naturally. 1908 */ 1909 if (ti.lra_v) { 1910 pd->lra = be32_to_cpu(ti.last_rec_address); 1911 set_bit(PACKET_LRA_VALID, &pd->flags); 1912 } else { 1913 pd->lra = 0xffffffff; 1914 set_bit(PACKET_LRA_VALID, &pd->flags); 1915 } 1916 1917 /* 1918 * fine for now 1919 */ 1920 pd->settings.link_loss = 7; 1921 pd->settings.write_type = 0; /* packet */ 1922 pd->settings.track_mode = ti.track_mode; 1923 1924 /* 1925 * mode1 or mode2 disc 1926 */ 1927 switch (ti.data_mode) { 1928 case PACKET_MODE1: 1929 pd->settings.block_mode = PACKET_BLOCK_MODE1; 1930 break; 1931 case PACKET_MODE2: 1932 pd->settings.block_mode = PACKET_BLOCK_MODE2; 1933 break; 1934 default: 1935 printk(DRIVER_NAME": unknown data mode\n"); 1936 return -EROFS; 1937 } 1938 return 0; 1939 } 1940 1941 /* 1942 * enable/disable write caching on drive 1943 */ 1944 static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd, 1945 int set) 1946 { 1947 struct packet_command cgc; 1948 struct request_sense sense; 1949 unsigned char buf[64]; 1950 int ret; 1951 1952 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ); 1953 cgc.sense = &sense; 1954 cgc.buflen = pd->mode_offset + 12; 1955 1956 /* 1957 * caching mode page might not be there, so quiet this command 1958 */ 1959 cgc.quiet = 1; 1960 1961 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0))) 1962 return ret; 1963 1964 buf[pd->mode_offset + 10] |= (!!set << 2); 1965 1966 cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff)); 1967 ret = pkt_mode_select(pd, &cgc); 1968 if (ret) { 1969 printk(DRIVER_NAME": write caching control failed\n"); 1970 pkt_dump_sense(&cgc); 1971 } else if (!ret && set) 1972 printk(DRIVER_NAME": enabled write caching on %s\n", pd->name); 1973 return ret; 1974 } 1975 1976 static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag) 1977 { 1978 struct packet_command cgc; 1979 1980 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); 1981 cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL; 1982 cgc.cmd[4] = lockflag ? 1 : 0; 1983 return pkt_generic_packet(pd, &cgc); 1984 } 1985 1986 /* 1987 * Returns drive maximum write speed 1988 */ 1989 static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd, 1990 unsigned *write_speed) 1991 { 1992 struct packet_command cgc; 1993 struct request_sense sense; 1994 unsigned char buf[256+18]; 1995 unsigned char *cap_buf; 1996 int ret, offset; 1997 1998 cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset]; 1999 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN); 2000 cgc.sense = &sense; 2001 2002 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0); 2003 if (ret) { 2004 cgc.buflen = pd->mode_offset + cap_buf[1] + 2 + 2005 sizeof(struct mode_page_header); 2006 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0); 2007 if (ret) { 2008 pkt_dump_sense(&cgc); 2009 return ret; 2010 } 2011 } 2012 2013 offset = 20; /* Obsoleted field, used by older drives */ 2014 if (cap_buf[1] >= 28) 2015 offset = 28; /* Current write speed selected */ 2016 if (cap_buf[1] >= 30) { 2017 /* If the drive reports at least one "Logical Unit Write 2018 * Speed Performance Descriptor Block", use the information 2019 * in the first block. (contains the highest speed) 2020 */ 2021 int num_spdb = (cap_buf[30] << 8) + cap_buf[31]; 2022 if (num_spdb > 0) 2023 offset = 34; 2024 } 2025 2026 *write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1]; 2027 return 0; 2028 } 2029 2030 /* These tables from cdrecord - I don't have orange book */ 2031 /* standard speed CD-RW (1-4x) */ 2032 static char clv_to_speed[16] = { 2033 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */ 2034 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 2035 }; 2036 /* high speed CD-RW (-10x) */ 2037 static char hs_clv_to_speed[16] = { 2038 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */ 2039 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 2040 }; 2041 /* ultra high speed CD-RW */ 2042 static char us_clv_to_speed[16] = { 2043 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */ 2044 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0 2045 }; 2046 2047 /* 2048 * reads the maximum media speed from ATIP 2049 */ 2050 static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, 2051 unsigned *speed) 2052 { 2053 struct packet_command cgc; 2054 struct request_sense sense; 2055 unsigned char buf[64]; 2056 unsigned int size, st, sp; 2057 int ret; 2058 2059 init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ); 2060 cgc.sense = &sense; 2061 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP; 2062 cgc.cmd[1] = 2; 2063 cgc.cmd[2] = 4; /* READ ATIP */ 2064 cgc.cmd[8] = 2; 2065 ret = pkt_generic_packet(pd, &cgc); 2066 if (ret) { 2067 pkt_dump_sense(&cgc); 2068 return ret; 2069 } 2070 size = ((unsigned int) buf[0]<<8) + buf[1] + 2; 2071 if (size > sizeof(buf)) 2072 size = sizeof(buf); 2073 2074 init_cdrom_command(&cgc, buf, size, CGC_DATA_READ); 2075 cgc.sense = &sense; 2076 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP; 2077 cgc.cmd[1] = 2; 2078 cgc.cmd[2] = 4; 2079 cgc.cmd[8] = size; 2080 ret = pkt_generic_packet(pd, &cgc); 2081 if (ret) { 2082 pkt_dump_sense(&cgc); 2083 return ret; 2084 } 2085 2086 if (!(buf[6] & 0x40)) { 2087 printk(DRIVER_NAME": Disc type is not CD-RW\n"); 2088 return 1; 2089 } 2090 if (!(buf[6] & 0x4)) { 2091 printk(DRIVER_NAME": A1 values on media are not valid, maybe not CDRW?\n"); 2092 return 1; 2093 } 2094 2095 st = (buf[6] >> 3) & 0x7; /* disc sub-type */ 2096 2097 sp = buf[16] & 0xf; /* max speed from ATIP A1 field */ 2098 2099 /* Info from cdrecord */ 2100 switch (st) { 2101 case 0: /* standard speed */ 2102 *speed = clv_to_speed[sp]; 2103 break; 2104 case 1: /* high speed */ 2105 *speed = hs_clv_to_speed[sp]; 2106 break; 2107 case 2: /* ultra high speed */ 2108 *speed = us_clv_to_speed[sp]; 2109 break; 2110 default: 2111 printk(DRIVER_NAME": Unknown disc sub-type %d\n",st); 2112 return 1; 2113 } 2114 if (*speed) { 2115 printk(DRIVER_NAME": Max. media speed: %d\n",*speed); 2116 return 0; 2117 } else { 2118 printk(DRIVER_NAME": Unknown speed %d for sub-type %d\n",sp,st); 2119 return 1; 2120 } 2121 } 2122 2123 static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd) 2124 { 2125 struct packet_command cgc; 2126 struct request_sense sense; 2127 int ret; 2128 2129 VPRINTK(DRIVER_NAME": Performing OPC\n"); 2130 2131 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); 2132 cgc.sense = &sense; 2133 cgc.timeout = 60*HZ; 2134 cgc.cmd[0] = GPCMD_SEND_OPC; 2135 cgc.cmd[1] = 1; 2136 if ((ret = pkt_generic_packet(pd, &cgc))) 2137 pkt_dump_sense(&cgc); 2138 return ret; 2139 } 2140 2141 static int pkt_open_write(struct pktcdvd_device *pd) 2142 { 2143 int ret; 2144 unsigned int write_speed, media_write_speed, read_speed; 2145 2146 if ((ret = pkt_probe_settings(pd))) { 2147 VPRINTK(DRIVER_NAME": %s failed probe\n", pd->name); 2148 return ret; 2149 } 2150 2151 if ((ret = pkt_set_write_settings(pd))) { 2152 DPRINTK(DRIVER_NAME": %s failed saving write settings\n", pd->name); 2153 return -EIO; 2154 } 2155 2156 pkt_write_caching(pd, USE_WCACHING); 2157 2158 if ((ret = pkt_get_max_speed(pd, &write_speed))) 2159 write_speed = 16 * 177; 2160 switch (pd->mmc3_profile) { 2161 case 0x13: /* DVD-RW */ 2162 case 0x1a: /* DVD+RW */ 2163 case 0x12: /* DVD-RAM */ 2164 DPRINTK(DRIVER_NAME": write speed %ukB/s\n", write_speed); 2165 break; 2166 default: 2167 if ((ret = pkt_media_speed(pd, &media_write_speed))) 2168 media_write_speed = 16; 2169 write_speed = min(write_speed, media_write_speed * 177); 2170 DPRINTK(DRIVER_NAME": write speed %ux\n", write_speed / 176); 2171 break; 2172 } 2173 read_speed = write_speed; 2174 2175 if ((ret = pkt_set_speed(pd, write_speed, read_speed))) { 2176 DPRINTK(DRIVER_NAME": %s couldn't set write speed\n", pd->name); 2177 return -EIO; 2178 } 2179 pd->write_speed = write_speed; 2180 pd->read_speed = read_speed; 2181 2182 if ((ret = pkt_perform_opc(pd))) { 2183 DPRINTK(DRIVER_NAME": %s Optimum Power Calibration failed\n", pd->name); 2184 } 2185 2186 return 0; 2187 } 2188 2189 /* 2190 * called at open time. 2191 */ 2192 static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) 2193 { 2194 int ret; 2195 long lba; 2196 struct request_queue *q; 2197 2198 /* 2199 * We need to re-open the cdrom device without O_NONBLOCK to be able 2200 * to read/write from/to it. It is already opened in O_NONBLOCK mode 2201 * so bdget() can't fail. 2202 */ 2203 bdget(pd->bdev->bd_dev); 2204 if ((ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd))) 2205 goto out; 2206 2207 if ((ret = pkt_get_last_written(pd, &lba))) { 2208 printk(DRIVER_NAME": pkt_get_last_written failed\n"); 2209 goto out_putdev; 2210 } 2211 2212 set_capacity(pd->disk, lba << 2); 2213 set_capacity(pd->bdev->bd_disk, lba << 2); 2214 bd_set_size(pd->bdev, (loff_t)lba << 11); 2215 2216 q = bdev_get_queue(pd->bdev); 2217 if (write) { 2218 if ((ret = pkt_open_write(pd))) 2219 goto out_putdev; 2220 /* 2221 * Some CDRW drives can not handle writes larger than one packet, 2222 * even if the size is a multiple of the packet size. 2223 */ 2224 spin_lock_irq(q->queue_lock); 2225 blk_queue_max_hw_sectors(q, pd->settings.size); 2226 spin_unlock_irq(q->queue_lock); 2227 set_bit(PACKET_WRITABLE, &pd->flags); 2228 } else { 2229 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); 2230 clear_bit(PACKET_WRITABLE, &pd->flags); 2231 } 2232 2233 if ((ret = pkt_set_segment_merging(pd, q))) 2234 goto out_putdev; 2235 2236 if (write) { 2237 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) { 2238 printk(DRIVER_NAME": not enough memory for buffers\n"); 2239 ret = -ENOMEM; 2240 goto out_putdev; 2241 } 2242 printk(DRIVER_NAME": %lukB available on disc\n", lba << 1); 2243 } 2244 2245 return 0; 2246 2247 out_putdev: 2248 blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL); 2249 out: 2250 return ret; 2251 } 2252 2253 /* 2254 * called when the device is closed. makes sure that the device flushes 2255 * the internal cache before we close. 2256 */ 2257 static void pkt_release_dev(struct pktcdvd_device *pd, int flush) 2258 { 2259 if (flush && pkt_flush_cache(pd)) 2260 DPRINTK(DRIVER_NAME": %s not flushing cache\n", pd->name); 2261 2262 pkt_lock_door(pd, 0); 2263 2264 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); 2265 blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL); 2266 2267 pkt_shrink_pktlist(pd); 2268 } 2269 2270 static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor) 2271 { 2272 if (dev_minor >= MAX_WRITERS) 2273 return NULL; 2274 return pkt_devs[dev_minor]; 2275 } 2276 2277 static int pkt_open(struct block_device *bdev, fmode_t mode) 2278 { 2279 struct pktcdvd_device *pd = NULL; 2280 int ret; 2281 2282 VPRINTK(DRIVER_NAME": entering open\n"); 2283 2284 mutex_lock(&pktcdvd_mutex); 2285 mutex_lock(&ctl_mutex); 2286 pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev)); 2287 if (!pd) { 2288 ret = -ENODEV; 2289 goto out; 2290 } 2291 BUG_ON(pd->refcnt < 0); 2292 2293 pd->refcnt++; 2294 if (pd->refcnt > 1) { 2295 if ((mode & FMODE_WRITE) && 2296 !test_bit(PACKET_WRITABLE, &pd->flags)) { 2297 ret = -EBUSY; 2298 goto out_dec; 2299 } 2300 } else { 2301 ret = pkt_open_dev(pd, mode & FMODE_WRITE); 2302 if (ret) 2303 goto out_dec; 2304 /* 2305 * needed here as well, since ext2 (among others) may change 2306 * the blocksize at mount time 2307 */ 2308 set_blocksize(bdev, CD_FRAMESIZE); 2309 } 2310 2311 mutex_unlock(&ctl_mutex); 2312 mutex_unlock(&pktcdvd_mutex); 2313 return 0; 2314 2315 out_dec: 2316 pd->refcnt--; 2317 out: 2318 VPRINTK(DRIVER_NAME": failed open (%d)\n", ret); 2319 mutex_unlock(&ctl_mutex); 2320 mutex_unlock(&pktcdvd_mutex); 2321 return ret; 2322 } 2323 2324 static void pkt_close(struct gendisk *disk, fmode_t mode) 2325 { 2326 struct pktcdvd_device *pd = disk->private_data; 2327 2328 mutex_lock(&pktcdvd_mutex); 2329 mutex_lock(&ctl_mutex); 2330 pd->refcnt--; 2331 BUG_ON(pd->refcnt < 0); 2332 if (pd->refcnt == 0) { 2333 int flush = test_bit(PACKET_WRITABLE, &pd->flags); 2334 pkt_release_dev(pd, flush); 2335 } 2336 mutex_unlock(&ctl_mutex); 2337 mutex_unlock(&pktcdvd_mutex); 2338 } 2339 2340 2341 static void pkt_end_io_read_cloned(struct bio *bio, int err) 2342 { 2343 struct packet_stacked_data *psd = bio->bi_private; 2344 struct pktcdvd_device *pd = psd->pd; 2345 2346 bio_put(bio); 2347 bio_endio(psd->bio, err); 2348 mempool_free(psd, psd_pool); 2349 pkt_bio_finished(pd); 2350 } 2351 2352 static void pkt_make_request(struct request_queue *q, struct bio *bio) 2353 { 2354 struct pktcdvd_device *pd; 2355 char b[BDEVNAME_SIZE]; 2356 sector_t zone; 2357 struct packet_data *pkt; 2358 int was_empty, blocked_bio; 2359 struct pkt_rb_node *node; 2360 2361 pd = q->queuedata; 2362 if (!pd) { 2363 printk(DRIVER_NAME": %s incorrect request queue\n", bdevname(bio->bi_bdev, b)); 2364 goto end_io; 2365 } 2366 2367 /* 2368 * Clone READ bios so we can have our own bi_end_io callback. 2369 */ 2370 if (bio_data_dir(bio) == READ) { 2371 struct bio *cloned_bio = bio_clone(bio, GFP_NOIO); 2372 struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO); 2373 2374 psd->pd = pd; 2375 psd->bio = bio; 2376 cloned_bio->bi_bdev = pd->bdev; 2377 cloned_bio->bi_private = psd; 2378 cloned_bio->bi_end_io = pkt_end_io_read_cloned; 2379 pd->stats.secs_r += bio_sectors(bio); 2380 pkt_queue_bio(pd, cloned_bio); 2381 return; 2382 } 2383 2384 if (!test_bit(PACKET_WRITABLE, &pd->flags)) { 2385 printk(DRIVER_NAME": WRITE for ro device %s (%llu)\n", 2386 pd->name, (unsigned long long)bio->bi_sector); 2387 goto end_io; 2388 } 2389 2390 if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) { 2391 printk(DRIVER_NAME": wrong bio size\n"); 2392 goto end_io; 2393 } 2394 2395 blk_queue_bounce(q, &bio); 2396 2397 zone = ZONE(bio->bi_sector, pd); 2398 VPRINTK("pkt_make_request: start = %6llx stop = %6llx\n", 2399 (unsigned long long)bio->bi_sector, 2400 (unsigned long long)bio_end_sector(bio)); 2401 2402 /* Check if we have to split the bio */ 2403 { 2404 struct bio_pair *bp; 2405 sector_t last_zone; 2406 int first_sectors; 2407 2408 last_zone = ZONE(bio_end_sector(bio) - 1, pd); 2409 if (last_zone != zone) { 2410 BUG_ON(last_zone != zone + pd->settings.size); 2411 first_sectors = last_zone - bio->bi_sector; 2412 bp = bio_split(bio, first_sectors); 2413 BUG_ON(!bp); 2414 pkt_make_request(q, &bp->bio1); 2415 pkt_make_request(q, &bp->bio2); 2416 bio_pair_release(bp); 2417 return; 2418 } 2419 } 2420 2421 /* 2422 * If we find a matching packet in state WAITING or READ_WAIT, we can 2423 * just append this bio to that packet. 2424 */ 2425 spin_lock(&pd->cdrw.active_list_lock); 2426 blocked_bio = 0; 2427 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { 2428 if (pkt->sector == zone) { 2429 spin_lock(&pkt->lock); 2430 if ((pkt->state == PACKET_WAITING_STATE) || 2431 (pkt->state == PACKET_READ_WAIT_STATE)) { 2432 bio_list_add(&pkt->orig_bios, bio); 2433 pkt->write_size += bio->bi_size / CD_FRAMESIZE; 2434 if ((pkt->write_size >= pkt->frames) && 2435 (pkt->state == PACKET_WAITING_STATE)) { 2436 atomic_inc(&pkt->run_sm); 2437 wake_up(&pd->wqueue); 2438 } 2439 spin_unlock(&pkt->lock); 2440 spin_unlock(&pd->cdrw.active_list_lock); 2441 return; 2442 } else { 2443 blocked_bio = 1; 2444 } 2445 spin_unlock(&pkt->lock); 2446 } 2447 } 2448 spin_unlock(&pd->cdrw.active_list_lock); 2449 2450 /* 2451 * Test if there is enough room left in the bio work queue 2452 * (queue size >= congestion on mark). 2453 * If not, wait till the work queue size is below the congestion off mark. 2454 */ 2455 spin_lock(&pd->lock); 2456 if (pd->write_congestion_on > 0 2457 && pd->bio_queue_size >= pd->write_congestion_on) { 2458 set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC); 2459 do { 2460 spin_unlock(&pd->lock); 2461 congestion_wait(BLK_RW_ASYNC, HZ); 2462 spin_lock(&pd->lock); 2463 } while(pd->bio_queue_size > pd->write_congestion_off); 2464 } 2465 spin_unlock(&pd->lock); 2466 2467 /* 2468 * No matching packet found. Store the bio in the work queue. 2469 */ 2470 node = mempool_alloc(pd->rb_pool, GFP_NOIO); 2471 node->bio = bio; 2472 spin_lock(&pd->lock); 2473 BUG_ON(pd->bio_queue_size < 0); 2474 was_empty = (pd->bio_queue_size == 0); 2475 pkt_rbtree_insert(pd, node); 2476 spin_unlock(&pd->lock); 2477 2478 /* 2479 * Wake up the worker thread. 2480 */ 2481 atomic_set(&pd->scan_queue, 1); 2482 if (was_empty) { 2483 /* This wake_up is required for correct operation */ 2484 wake_up(&pd->wqueue); 2485 } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) { 2486 /* 2487 * This wake up is not required for correct operation, 2488 * but improves performance in some cases. 2489 */ 2490 wake_up(&pd->wqueue); 2491 } 2492 return; 2493 end_io: 2494 bio_io_error(bio); 2495 } 2496 2497 2498 2499 static int pkt_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd, 2500 struct bio_vec *bvec) 2501 { 2502 struct pktcdvd_device *pd = q->queuedata; 2503 sector_t zone = ZONE(bmd->bi_sector, pd); 2504 int used = ((bmd->bi_sector - zone) << 9) + bmd->bi_size; 2505 int remaining = (pd->settings.size << 9) - used; 2506 int remaining2; 2507 2508 /* 2509 * A bio <= PAGE_SIZE must be allowed. If it crosses a packet 2510 * boundary, pkt_make_request() will split the bio. 2511 */ 2512 remaining2 = PAGE_SIZE - bmd->bi_size; 2513 remaining = max(remaining, remaining2); 2514 2515 BUG_ON(remaining < 0); 2516 return remaining; 2517 } 2518 2519 static void pkt_init_queue(struct pktcdvd_device *pd) 2520 { 2521 struct request_queue *q = pd->disk->queue; 2522 2523 blk_queue_make_request(q, pkt_make_request); 2524 blk_queue_logical_block_size(q, CD_FRAMESIZE); 2525 blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS); 2526 blk_queue_merge_bvec(q, pkt_merge_bvec); 2527 q->queuedata = pd; 2528 } 2529 2530 static int pkt_seq_show(struct seq_file *m, void *p) 2531 { 2532 struct pktcdvd_device *pd = m->private; 2533 char *msg; 2534 char bdev_buf[BDEVNAME_SIZE]; 2535 int states[PACKET_NUM_STATES]; 2536 2537 seq_printf(m, "Writer %s mapped to %s:\n", pd->name, 2538 bdevname(pd->bdev, bdev_buf)); 2539 2540 seq_printf(m, "\nSettings:\n"); 2541 seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2); 2542 2543 if (pd->settings.write_type == 0) 2544 msg = "Packet"; 2545 else 2546 msg = "Unknown"; 2547 seq_printf(m, "\twrite type:\t\t%s\n", msg); 2548 2549 seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable"); 2550 seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss); 2551 2552 seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode); 2553 2554 if (pd->settings.block_mode == PACKET_BLOCK_MODE1) 2555 msg = "Mode 1"; 2556 else if (pd->settings.block_mode == PACKET_BLOCK_MODE2) 2557 msg = "Mode 2"; 2558 else 2559 msg = "Unknown"; 2560 seq_printf(m, "\tblock mode:\t\t%s\n", msg); 2561 2562 seq_printf(m, "\nStatistics:\n"); 2563 seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started); 2564 seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended); 2565 seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1); 2566 seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1); 2567 seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1); 2568 2569 seq_printf(m, "\nMisc:\n"); 2570 seq_printf(m, "\treference count:\t%d\n", pd->refcnt); 2571 seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags); 2572 seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed); 2573 seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed); 2574 seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset); 2575 seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset); 2576 2577 seq_printf(m, "\nQueue state:\n"); 2578 seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size); 2579 seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios)); 2580 seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector); 2581 2582 pkt_count_states(pd, states); 2583 seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n", 2584 states[0], states[1], states[2], states[3], states[4], states[5]); 2585 2586 seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n", 2587 pd->write_congestion_off, 2588 pd->write_congestion_on); 2589 return 0; 2590 } 2591 2592 static int pkt_seq_open(struct inode *inode, struct file *file) 2593 { 2594 return single_open(file, pkt_seq_show, PDE_DATA(inode)); 2595 } 2596 2597 static const struct file_operations pkt_proc_fops = { 2598 .open = pkt_seq_open, 2599 .read = seq_read, 2600 .llseek = seq_lseek, 2601 .release = single_release 2602 }; 2603 2604 static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) 2605 { 2606 int i; 2607 int ret = 0; 2608 char b[BDEVNAME_SIZE]; 2609 struct block_device *bdev; 2610 2611 if (pd->pkt_dev == dev) { 2612 printk(DRIVER_NAME": Recursive setup not allowed\n"); 2613 return -EBUSY; 2614 } 2615 for (i = 0; i < MAX_WRITERS; i++) { 2616 struct pktcdvd_device *pd2 = pkt_devs[i]; 2617 if (!pd2) 2618 continue; 2619 if (pd2->bdev->bd_dev == dev) { 2620 printk(DRIVER_NAME": %s already setup\n", bdevname(pd2->bdev, b)); 2621 return -EBUSY; 2622 } 2623 if (pd2->pkt_dev == dev) { 2624 printk(DRIVER_NAME": Can't chain pktcdvd devices\n"); 2625 return -EBUSY; 2626 } 2627 } 2628 2629 bdev = bdget(dev); 2630 if (!bdev) 2631 return -ENOMEM; 2632 ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL); 2633 if (ret) 2634 return ret; 2635 2636 /* This is safe, since we have a reference from open(). */ 2637 __module_get(THIS_MODULE); 2638 2639 pd->bdev = bdev; 2640 set_blocksize(bdev, CD_FRAMESIZE); 2641 2642 pkt_init_queue(pd); 2643 2644 atomic_set(&pd->cdrw.pending_bios, 0); 2645 pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name); 2646 if (IS_ERR(pd->cdrw.thread)) { 2647 printk(DRIVER_NAME": can't start kernel thread\n"); 2648 ret = -ENOMEM; 2649 goto out_mem; 2650 } 2651 2652 proc_create_data(pd->name, 0, pkt_proc, &pkt_proc_fops, pd); 2653 DPRINTK(DRIVER_NAME": writer %s mapped to %s\n", pd->name, bdevname(bdev, b)); 2654 return 0; 2655 2656 out_mem: 2657 blkdev_put(bdev, FMODE_READ | FMODE_NDELAY); 2658 /* This is safe: open() is still holding a reference. */ 2659 module_put(THIS_MODULE); 2660 return ret; 2661 } 2662 2663 static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) 2664 { 2665 struct pktcdvd_device *pd = bdev->bd_disk->private_data; 2666 int ret; 2667 2668 VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, 2669 MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); 2670 2671 mutex_lock(&pktcdvd_mutex); 2672 switch (cmd) { 2673 case CDROMEJECT: 2674 /* 2675 * The door gets locked when the device is opened, so we 2676 * have to unlock it or else the eject command fails. 2677 */ 2678 if (pd->refcnt == 1) 2679 pkt_lock_door(pd, 0); 2680 /* fallthru */ 2681 /* 2682 * forward selected CDROM ioctls to CD-ROM, for UDF 2683 */ 2684 case CDROMMULTISESSION: 2685 case CDROMREADTOCENTRY: 2686 case CDROM_LAST_WRITTEN: 2687 case CDROM_SEND_PACKET: 2688 case SCSI_IOCTL_SEND_COMMAND: 2689 ret = __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg); 2690 break; 2691 2692 default: 2693 VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd); 2694 ret = -ENOTTY; 2695 } 2696 mutex_unlock(&pktcdvd_mutex); 2697 2698 return ret; 2699 } 2700 2701 static unsigned int pkt_check_events(struct gendisk *disk, 2702 unsigned int clearing) 2703 { 2704 struct pktcdvd_device *pd = disk->private_data; 2705 struct gendisk *attached_disk; 2706 2707 if (!pd) 2708 return 0; 2709 if (!pd->bdev) 2710 return 0; 2711 attached_disk = pd->bdev->bd_disk; 2712 if (!attached_disk || !attached_disk->fops->check_events) 2713 return 0; 2714 return attached_disk->fops->check_events(attached_disk, clearing); 2715 } 2716 2717 static const struct block_device_operations pktcdvd_ops = { 2718 .owner = THIS_MODULE, 2719 .open = pkt_open, 2720 .release = pkt_close, 2721 .ioctl = pkt_ioctl, 2722 .check_events = pkt_check_events, 2723 }; 2724 2725 static char *pktcdvd_devnode(struct gendisk *gd, umode_t *mode) 2726 { 2727 return kasprintf(GFP_KERNEL, "pktcdvd/%s", gd->disk_name); 2728 } 2729 2730 /* 2731 * Set up mapping from pktcdvd device to CD-ROM device. 2732 */ 2733 static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) 2734 { 2735 int idx; 2736 int ret = -ENOMEM; 2737 struct pktcdvd_device *pd; 2738 struct gendisk *disk; 2739 2740 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 2741 2742 for (idx = 0; idx < MAX_WRITERS; idx++) 2743 if (!pkt_devs[idx]) 2744 break; 2745 if (idx == MAX_WRITERS) { 2746 printk(DRIVER_NAME": max %d writers supported\n", MAX_WRITERS); 2747 ret = -EBUSY; 2748 goto out_mutex; 2749 } 2750 2751 pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL); 2752 if (!pd) 2753 goto out_mutex; 2754 2755 pd->rb_pool = mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE, 2756 sizeof(struct pkt_rb_node)); 2757 if (!pd->rb_pool) 2758 goto out_mem; 2759 2760 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list); 2761 INIT_LIST_HEAD(&pd->cdrw.pkt_active_list); 2762 spin_lock_init(&pd->cdrw.active_list_lock); 2763 2764 spin_lock_init(&pd->lock); 2765 spin_lock_init(&pd->iosched.lock); 2766 bio_list_init(&pd->iosched.read_queue); 2767 bio_list_init(&pd->iosched.write_queue); 2768 sprintf(pd->name, DRIVER_NAME"%d", idx); 2769 init_waitqueue_head(&pd->wqueue); 2770 pd->bio_queue = RB_ROOT; 2771 2772 pd->write_congestion_on = write_congestion_on; 2773 pd->write_congestion_off = write_congestion_off; 2774 2775 disk = alloc_disk(1); 2776 if (!disk) 2777 goto out_mem; 2778 pd->disk = disk; 2779 disk->major = pktdev_major; 2780 disk->first_minor = idx; 2781 disk->fops = &pktcdvd_ops; 2782 disk->flags = GENHD_FL_REMOVABLE; 2783 strcpy(disk->disk_name, pd->name); 2784 disk->devnode = pktcdvd_devnode; 2785 disk->private_data = pd; 2786 disk->queue = blk_alloc_queue(GFP_KERNEL); 2787 if (!disk->queue) 2788 goto out_mem2; 2789 2790 pd->pkt_dev = MKDEV(pktdev_major, idx); 2791 ret = pkt_new_dev(pd, dev); 2792 if (ret) 2793 goto out_new_dev; 2794 2795 /* inherit events of the host device */ 2796 disk->events = pd->bdev->bd_disk->events; 2797 disk->async_events = pd->bdev->bd_disk->async_events; 2798 2799 add_disk(disk); 2800 2801 pkt_sysfs_dev_new(pd); 2802 pkt_debugfs_dev_new(pd); 2803 2804 pkt_devs[idx] = pd; 2805 if (pkt_dev) 2806 *pkt_dev = pd->pkt_dev; 2807 2808 mutex_unlock(&ctl_mutex); 2809 return 0; 2810 2811 out_new_dev: 2812 blk_cleanup_queue(disk->queue); 2813 out_mem2: 2814 put_disk(disk); 2815 out_mem: 2816 if (pd->rb_pool) 2817 mempool_destroy(pd->rb_pool); 2818 kfree(pd); 2819 out_mutex: 2820 mutex_unlock(&ctl_mutex); 2821 printk(DRIVER_NAME": setup of pktcdvd device failed\n"); 2822 return ret; 2823 } 2824 2825 /* 2826 * Tear down mapping from pktcdvd device to CD-ROM device. 2827 */ 2828 static int pkt_remove_dev(dev_t pkt_dev) 2829 { 2830 struct pktcdvd_device *pd; 2831 int idx; 2832 int ret = 0; 2833 2834 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 2835 2836 for (idx = 0; idx < MAX_WRITERS; idx++) { 2837 pd = pkt_devs[idx]; 2838 if (pd && (pd->pkt_dev == pkt_dev)) 2839 break; 2840 } 2841 if (idx == MAX_WRITERS) { 2842 DPRINTK(DRIVER_NAME": dev not setup\n"); 2843 ret = -ENXIO; 2844 goto out; 2845 } 2846 2847 if (pd->refcnt > 0) { 2848 ret = -EBUSY; 2849 goto out; 2850 } 2851 if (!IS_ERR(pd->cdrw.thread)) 2852 kthread_stop(pd->cdrw.thread); 2853 2854 pkt_devs[idx] = NULL; 2855 2856 pkt_debugfs_dev_remove(pd); 2857 pkt_sysfs_dev_remove(pd); 2858 2859 blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY); 2860 2861 remove_proc_entry(pd->name, pkt_proc); 2862 DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name); 2863 2864 del_gendisk(pd->disk); 2865 blk_cleanup_queue(pd->disk->queue); 2866 put_disk(pd->disk); 2867 2868 mempool_destroy(pd->rb_pool); 2869 kfree(pd); 2870 2871 /* This is safe: open() is still holding a reference. */ 2872 module_put(THIS_MODULE); 2873 2874 out: 2875 mutex_unlock(&ctl_mutex); 2876 return ret; 2877 } 2878 2879 static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd) 2880 { 2881 struct pktcdvd_device *pd; 2882 2883 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 2884 2885 pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index); 2886 if (pd) { 2887 ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev); 2888 ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev); 2889 } else { 2890 ctrl_cmd->dev = 0; 2891 ctrl_cmd->pkt_dev = 0; 2892 } 2893 ctrl_cmd->num_devices = MAX_WRITERS; 2894 2895 mutex_unlock(&ctl_mutex); 2896 } 2897 2898 static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2899 { 2900 void __user *argp = (void __user *)arg; 2901 struct pkt_ctrl_command ctrl_cmd; 2902 int ret = 0; 2903 dev_t pkt_dev = 0; 2904 2905 if (cmd != PACKET_CTRL_CMD) 2906 return -ENOTTY; 2907 2908 if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command))) 2909 return -EFAULT; 2910 2911 switch (ctrl_cmd.command) { 2912 case PKT_CTRL_CMD_SETUP: 2913 if (!capable(CAP_SYS_ADMIN)) 2914 return -EPERM; 2915 ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev); 2916 ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev); 2917 break; 2918 case PKT_CTRL_CMD_TEARDOWN: 2919 if (!capable(CAP_SYS_ADMIN)) 2920 return -EPERM; 2921 ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev)); 2922 break; 2923 case PKT_CTRL_CMD_STATUS: 2924 pkt_get_status(&ctrl_cmd); 2925 break; 2926 default: 2927 return -ENOTTY; 2928 } 2929 2930 if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command))) 2931 return -EFAULT; 2932 return ret; 2933 } 2934 2935 #ifdef CONFIG_COMPAT 2936 static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2937 { 2938 return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 2939 } 2940 #endif 2941 2942 static const struct file_operations pkt_ctl_fops = { 2943 .open = nonseekable_open, 2944 .unlocked_ioctl = pkt_ctl_ioctl, 2945 #ifdef CONFIG_COMPAT 2946 .compat_ioctl = pkt_ctl_compat_ioctl, 2947 #endif 2948 .owner = THIS_MODULE, 2949 .llseek = no_llseek, 2950 }; 2951 2952 static struct miscdevice pkt_misc = { 2953 .minor = MISC_DYNAMIC_MINOR, 2954 .name = DRIVER_NAME, 2955 .nodename = "pktcdvd/control", 2956 .fops = &pkt_ctl_fops 2957 }; 2958 2959 static int __init pkt_init(void) 2960 { 2961 int ret; 2962 2963 mutex_init(&ctl_mutex); 2964 2965 psd_pool = mempool_create_kmalloc_pool(PSD_POOL_SIZE, 2966 sizeof(struct packet_stacked_data)); 2967 if (!psd_pool) 2968 return -ENOMEM; 2969 2970 ret = register_blkdev(pktdev_major, DRIVER_NAME); 2971 if (ret < 0) { 2972 printk(DRIVER_NAME": Unable to register block device\n"); 2973 goto out2; 2974 } 2975 if (!pktdev_major) 2976 pktdev_major = ret; 2977 2978 ret = pkt_sysfs_init(); 2979 if (ret) 2980 goto out; 2981 2982 pkt_debugfs_init(); 2983 2984 ret = misc_register(&pkt_misc); 2985 if (ret) { 2986 printk(DRIVER_NAME": Unable to register misc device\n"); 2987 goto out_misc; 2988 } 2989 2990 pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL); 2991 2992 return 0; 2993 2994 out_misc: 2995 pkt_debugfs_cleanup(); 2996 pkt_sysfs_cleanup(); 2997 out: 2998 unregister_blkdev(pktdev_major, DRIVER_NAME); 2999 out2: 3000 mempool_destroy(psd_pool); 3001 return ret; 3002 } 3003 3004 static void __exit pkt_exit(void) 3005 { 3006 remove_proc_entry("driver/"DRIVER_NAME, NULL); 3007 misc_deregister(&pkt_misc); 3008 3009 pkt_debugfs_cleanup(); 3010 pkt_sysfs_cleanup(); 3011 3012 unregister_blkdev(pktdev_major, DRIVER_NAME); 3013 mempool_destroy(psd_pool); 3014 } 3015 3016 MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives"); 3017 MODULE_AUTHOR("Jens Axboe <axboe@suse.de>"); 3018 MODULE_LICENSE("GPL"); 3019 3020 module_init(pkt_init); 3021 module_exit(pkt_exit); 3022