1 /*
2 * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
3 * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
4 * Copyright (C) 2006 Thomas Maier <balagi@justmail.de>
5 *
6 * May be copied or modified under the terms of the GNU General Public
7 * License. See linux/COPYING for more information.
8 *
9 * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
10 * DVD-RAM devices.
11 *
12 * Theory of operation:
13 *
14 * At the lowest level, there is the standard driver for the CD/DVD device,
15 * such as drivers/scsi/sr.c. This driver can handle read and write requests,
16 * but it doesn't know anything about the special restrictions that apply to
17 * packet writing. One restriction is that write requests must be aligned to
18 * packet boundaries on the physical media, and the size of a write request
19 * must be equal to the packet size. Another restriction is that a
20 * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
21 * command, if the previous command was a write.
22 *
23 * The purpose of the packet writing driver is to hide these restrictions from
24 * higher layers, such as file systems, and present a block device that can be
25 * randomly read and written using 2kB-sized blocks.
26 *
27 * The lowest layer in the packet writing driver is the packet I/O scheduler.
28 * Its data is defined by the struct packet_iosched and includes two bio
29 * queues with pending read and write requests. These queues are processed
30 * by the pkt_iosched_process_queue() function. The write requests in this
31 * queue are already properly aligned and sized. This layer is responsible for
32 * issuing the flush cache commands and scheduling the I/O in a good order.
33 *
34 * The next layer transforms unaligned write requests to aligned writes. This
35 * transformation requires reading missing pieces of data from the underlying
36 * block device, assembling the pieces to full packets and queuing them to the
37 * packet I/O scheduler.
38 *
39 * At the top layer there is a custom ->submit_bio function that forwards
40 * read requests directly to the iosched queue and puts write requests in the
41 * unaligned write queue. A kernel thread performs the necessary read
42 * gathering to convert the unaligned writes to aligned writes and then feeds
43 * them to the packet I/O scheduler.
44 *
45 *************************************************************************/
46
47 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
48
49 #include <linux/backing-dev.h>
50 #include <linux/compat.h>
51 #include <linux/debugfs.h>
52 #include <linux/device.h>
53 #include <linux/errno.h>
54 #include <linux/file.h>
55 #include <linux/freezer.h>
56 #include <linux/kernel.h>
57 #include <linux/kthread.h>
58 #include <linux/miscdevice.h>
59 #include <linux/module.h>
60 #include <linux/mutex.h>
61 #include <linux/nospec.h>
62 #include <linux/pktcdvd.h>
63 #include <linux/proc_fs.h>
64 #include <linux/seq_file.h>
65 #include <linux/slab.h>
66 #include <linux/spinlock.h>
67 #include <linux/types.h>
68 #include <linux/uaccess.h>
69
70 #include <scsi/scsi.h>
71 #include <scsi/scsi_cmnd.h>
72 #include <scsi/scsi_ioctl.h>
73
74 #include <linux/unaligned.h>
75
76 #define DRIVER_NAME "pktcdvd"
77
78 #define MAX_SPEED 0xffff
79
80 static DEFINE_MUTEX(pktcdvd_mutex);
81 static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
82 static struct proc_dir_entry *pkt_proc;
83 static int pktdev_major;
84 static int write_congestion_on = PKT_WRITE_CONGESTION_ON;
85 static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
86 static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */
87 static mempool_t psd_pool;
88 static struct bio_set pkt_bio_set;
89
90 /* /sys/class/pktcdvd */
91 static struct class class_pktcdvd;
92 static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
93
94 /* forward declaration */
95 static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
96 static int pkt_remove_dev(dev_t pkt_dev);
97
get_zone(sector_t sector,struct pktcdvd_device * pd)98 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
99 {
100 return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
101 }
102
103 /**********************************************************
104 * sysfs interface for pktcdvd
105 * by (C) 2006 Thomas Maier <balagi@justmail.de>
106
107 /sys/class/pktcdvd/pktcdvd[0-7]/
108 stat/reset
109 stat/packets_started
110 stat/packets_finished
111 stat/kb_written
112 stat/kb_read
113 stat/kb_read_gather
114 write_queue/size
115 write_queue/congestion_off
116 write_queue/congestion_on
117 **********************************************************/
118
packets_started_show(struct device * dev,struct device_attribute * attr,char * buf)119 static ssize_t packets_started_show(struct device *dev,
120 struct device_attribute *attr, char *buf)
121 {
122 struct pktcdvd_device *pd = dev_get_drvdata(dev);
123
124 return sysfs_emit(buf, "%lu\n", pd->stats.pkt_started);
125 }
126 static DEVICE_ATTR_RO(packets_started);
127
packets_finished_show(struct device * dev,struct device_attribute * attr,char * buf)128 static ssize_t packets_finished_show(struct device *dev,
129 struct device_attribute *attr, char *buf)
130 {
131 struct pktcdvd_device *pd = dev_get_drvdata(dev);
132
133 return sysfs_emit(buf, "%lu\n", pd->stats.pkt_ended);
134 }
135 static DEVICE_ATTR_RO(packets_finished);
136
kb_written_show(struct device * dev,struct device_attribute * attr,char * buf)137 static ssize_t kb_written_show(struct device *dev,
138 struct device_attribute *attr, char *buf)
139 {
140 struct pktcdvd_device *pd = dev_get_drvdata(dev);
141
142 return sysfs_emit(buf, "%lu\n", pd->stats.secs_w >> 1);
143 }
144 static DEVICE_ATTR_RO(kb_written);
145
kb_read_show(struct device * dev,struct device_attribute * attr,char * buf)146 static ssize_t kb_read_show(struct device *dev,
147 struct device_attribute *attr, char *buf)
148 {
149 struct pktcdvd_device *pd = dev_get_drvdata(dev);
150
151 return sysfs_emit(buf, "%lu\n", pd->stats.secs_r >> 1);
152 }
153 static DEVICE_ATTR_RO(kb_read);
154
kb_read_gather_show(struct device * dev,struct device_attribute * attr,char * buf)155 static ssize_t kb_read_gather_show(struct device *dev,
156 struct device_attribute *attr, char *buf)
157 {
158 struct pktcdvd_device *pd = dev_get_drvdata(dev);
159
160 return sysfs_emit(buf, "%lu\n", pd->stats.secs_rg >> 1);
161 }
162 static DEVICE_ATTR_RO(kb_read_gather);
163
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)164 static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
165 const char *buf, size_t len)
166 {
167 struct pktcdvd_device *pd = dev_get_drvdata(dev);
168
169 if (len > 0) {
170 pd->stats.pkt_started = 0;
171 pd->stats.pkt_ended = 0;
172 pd->stats.secs_w = 0;
173 pd->stats.secs_rg = 0;
174 pd->stats.secs_r = 0;
175 }
176 return len;
177 }
178 static DEVICE_ATTR_WO(reset);
179
180 static struct attribute *pkt_stat_attrs[] = {
181 &dev_attr_packets_finished.attr,
182 &dev_attr_packets_started.attr,
183 &dev_attr_kb_read.attr,
184 &dev_attr_kb_written.attr,
185 &dev_attr_kb_read_gather.attr,
186 &dev_attr_reset.attr,
187 NULL,
188 };
189
190 static const struct attribute_group pkt_stat_group = {
191 .name = "stat",
192 .attrs = pkt_stat_attrs,
193 };
194
size_show(struct device * dev,struct device_attribute * attr,char * buf)195 static ssize_t size_show(struct device *dev,
196 struct device_attribute *attr, char *buf)
197 {
198 struct pktcdvd_device *pd = dev_get_drvdata(dev);
199 int n;
200
201 spin_lock(&pd->lock);
202 n = sysfs_emit(buf, "%d\n", pd->bio_queue_size);
203 spin_unlock(&pd->lock);
204 return n;
205 }
206 static DEVICE_ATTR_RO(size);
207
init_write_congestion_marks(int * lo,int * hi)208 static void init_write_congestion_marks(int* lo, int* hi)
209 {
210 if (*hi > 0) {
211 *hi = max(*hi, 500);
212 *hi = min(*hi, 1000000);
213 if (*lo <= 0)
214 *lo = *hi - 100;
215 else {
216 *lo = min(*lo, *hi - 100);
217 *lo = max(*lo, 100);
218 }
219 } else {
220 *hi = -1;
221 *lo = -1;
222 }
223 }
224
congestion_off_show(struct device * dev,struct device_attribute * attr,char * buf)225 static ssize_t congestion_off_show(struct device *dev,
226 struct device_attribute *attr, char *buf)
227 {
228 struct pktcdvd_device *pd = dev_get_drvdata(dev);
229 int n;
230
231 spin_lock(&pd->lock);
232 n = sysfs_emit(buf, "%d\n", pd->write_congestion_off);
233 spin_unlock(&pd->lock);
234 return n;
235 }
236
congestion_off_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)237 static ssize_t congestion_off_store(struct device *dev,
238 struct device_attribute *attr,
239 const char *buf, size_t len)
240 {
241 struct pktcdvd_device *pd = dev_get_drvdata(dev);
242 int val, ret;
243
244 ret = kstrtoint(buf, 10, &val);
245 if (ret)
246 return ret;
247
248 spin_lock(&pd->lock);
249 pd->write_congestion_off = val;
250 init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on);
251 spin_unlock(&pd->lock);
252 return len;
253 }
254 static DEVICE_ATTR_RW(congestion_off);
255
congestion_on_show(struct device * dev,struct device_attribute * attr,char * buf)256 static ssize_t congestion_on_show(struct device *dev,
257 struct device_attribute *attr, char *buf)
258 {
259 struct pktcdvd_device *pd = dev_get_drvdata(dev);
260 int n;
261
262 spin_lock(&pd->lock);
263 n = sysfs_emit(buf, "%d\n", pd->write_congestion_on);
264 spin_unlock(&pd->lock);
265 return n;
266 }
267
congestion_on_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)268 static ssize_t congestion_on_store(struct device *dev,
269 struct device_attribute *attr,
270 const char *buf, size_t len)
271 {
272 struct pktcdvd_device *pd = dev_get_drvdata(dev);
273 int val, ret;
274
275 ret = kstrtoint(buf, 10, &val);
276 if (ret)
277 return ret;
278
279 spin_lock(&pd->lock);
280 pd->write_congestion_on = val;
281 init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on);
282 spin_unlock(&pd->lock);
283 return len;
284 }
285 static DEVICE_ATTR_RW(congestion_on);
286
287 static struct attribute *pkt_wq_attrs[] = {
288 &dev_attr_congestion_on.attr,
289 &dev_attr_congestion_off.attr,
290 &dev_attr_size.attr,
291 NULL,
292 };
293
294 static const struct attribute_group pkt_wq_group = {
295 .name = "write_queue",
296 .attrs = pkt_wq_attrs,
297 };
298
299 static const struct attribute_group *pkt_groups[] = {
300 &pkt_stat_group,
301 &pkt_wq_group,
302 NULL,
303 };
304
pkt_sysfs_dev_new(struct pktcdvd_device * pd)305 static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
306 {
307 if (class_is_registered(&class_pktcdvd)) {
308 pd->dev = device_create_with_groups(&class_pktcdvd, NULL,
309 MKDEV(0, 0), pd, pkt_groups,
310 "%s", pd->disk->disk_name);
311 if (IS_ERR(pd->dev))
312 pd->dev = NULL;
313 }
314 }
315
pkt_sysfs_dev_remove(struct pktcdvd_device * pd)316 static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
317 {
318 if (class_is_registered(&class_pktcdvd))
319 device_unregister(pd->dev);
320 }
321
322
323 /********************************************************************
324 /sys/class/pktcdvd/
325 add map block device
326 remove unmap packet dev
327 device_map show mappings
328 *******************************************************************/
329
device_map_show(const struct class * c,const struct class_attribute * attr,char * data)330 static ssize_t device_map_show(const struct class *c, const struct class_attribute *attr,
331 char *data)
332 {
333 int n = 0;
334 int idx;
335 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
336 for (idx = 0; idx < MAX_WRITERS; idx++) {
337 struct pktcdvd_device *pd = pkt_devs[idx];
338 if (!pd)
339 continue;
340 n += sysfs_emit_at(data, n, "%s %u:%u %u:%u\n",
341 pd->disk->disk_name,
342 MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
343 MAJOR(file_bdev(pd->bdev_file)->bd_dev),
344 MINOR(file_bdev(pd->bdev_file)->bd_dev));
345 }
346 mutex_unlock(&ctl_mutex);
347 return n;
348 }
349 static CLASS_ATTR_RO(device_map);
350
add_store(const struct class * c,const struct class_attribute * attr,const char * buf,size_t count)351 static ssize_t add_store(const struct class *c, const struct class_attribute *attr,
352 const char *buf, size_t count)
353 {
354 unsigned int major, minor;
355
356 if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
357 /* pkt_setup_dev() expects caller to hold reference to self */
358 if (!try_module_get(THIS_MODULE))
359 return -ENODEV;
360
361 pkt_setup_dev(MKDEV(major, minor), NULL);
362
363 module_put(THIS_MODULE);
364
365 return count;
366 }
367
368 return -EINVAL;
369 }
370 static CLASS_ATTR_WO(add);
371
remove_store(const struct class * c,const struct class_attribute * attr,const char * buf,size_t count)372 static ssize_t remove_store(const struct class *c, const struct class_attribute *attr,
373 const char *buf, size_t count)
374 {
375 unsigned int major, minor;
376 if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
377 pkt_remove_dev(MKDEV(major, minor));
378 return count;
379 }
380 return -EINVAL;
381 }
382 static CLASS_ATTR_WO(remove);
383
384 static struct attribute *class_pktcdvd_attrs[] = {
385 &class_attr_add.attr,
386 &class_attr_remove.attr,
387 &class_attr_device_map.attr,
388 NULL,
389 };
390 ATTRIBUTE_GROUPS(class_pktcdvd);
391
392 static struct class class_pktcdvd = {
393 .name = DRIVER_NAME,
394 .class_groups = class_pktcdvd_groups,
395 };
396
pkt_sysfs_init(void)397 static int pkt_sysfs_init(void)
398 {
399 /*
400 * create control files in sysfs
401 * /sys/class/pktcdvd/...
402 */
403 return class_register(&class_pktcdvd);
404 }
405
pkt_sysfs_cleanup(void)406 static void pkt_sysfs_cleanup(void)
407 {
408 class_unregister(&class_pktcdvd);
409 }
410
411 /********************************************************************
412 entries in debugfs
413
414 /sys/kernel/debug/pktcdvd[0-7]/
415 info
416
417 *******************************************************************/
418
pkt_count_states(struct pktcdvd_device * pd,int * states)419 static void pkt_count_states(struct pktcdvd_device *pd, int *states)
420 {
421 struct packet_data *pkt;
422 int i;
423
424 for (i = 0; i < PACKET_NUM_STATES; i++)
425 states[i] = 0;
426
427 spin_lock(&pd->cdrw.active_list_lock);
428 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
429 states[pkt->state]++;
430 }
431 spin_unlock(&pd->cdrw.active_list_lock);
432 }
433
pkt_seq_show(struct seq_file * m,void * p)434 static int pkt_seq_show(struct seq_file *m, void *p)
435 {
436 struct pktcdvd_device *pd = m->private;
437 char *msg;
438 int states[PACKET_NUM_STATES];
439
440 seq_printf(m, "Writer %s mapped to %pg:\n", pd->disk->disk_name,
441 file_bdev(pd->bdev_file));
442
443 seq_printf(m, "\nSettings:\n");
444 seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
445
446 if (pd->settings.write_type == 0)
447 msg = "Packet";
448 else
449 msg = "Unknown";
450 seq_printf(m, "\twrite type:\t\t%s\n", msg);
451
452 seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
453 seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
454
455 seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
456
457 if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
458 msg = "Mode 1";
459 else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
460 msg = "Mode 2";
461 else
462 msg = "Unknown";
463 seq_printf(m, "\tblock mode:\t\t%s\n", msg);
464
465 seq_printf(m, "\nStatistics:\n");
466 seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
467 seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
468 seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
469 seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
470 seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
471
472 seq_printf(m, "\nMisc:\n");
473 seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
474 seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
475 seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
476 seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
477 seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
478 seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
479
480 seq_printf(m, "\nQueue state:\n");
481 seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
482 seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
483 seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", pd->current_sector);
484
485 pkt_count_states(pd, states);
486 seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
487 states[0], states[1], states[2], states[3], states[4], states[5]);
488
489 seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
490 pd->write_congestion_off,
491 pd->write_congestion_on);
492 return 0;
493 }
494 DEFINE_SHOW_ATTRIBUTE(pkt_seq);
495
pkt_debugfs_dev_new(struct pktcdvd_device * pd)496 static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
497 {
498 if (!pkt_debugfs_root)
499 return;
500 pd->dfs_d_root = debugfs_create_dir(pd->disk->disk_name, pkt_debugfs_root);
501
502 pd->dfs_f_info = debugfs_create_file("info", 0444, pd->dfs_d_root,
503 pd, &pkt_seq_fops);
504 }
505
pkt_debugfs_dev_remove(struct pktcdvd_device * pd)506 static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
507 {
508 if (!pkt_debugfs_root)
509 return;
510 debugfs_remove(pd->dfs_f_info);
511 debugfs_remove(pd->dfs_d_root);
512 pd->dfs_f_info = NULL;
513 pd->dfs_d_root = NULL;
514 }
515
pkt_debugfs_init(void)516 static void pkt_debugfs_init(void)
517 {
518 pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
519 }
520
pkt_debugfs_cleanup(void)521 static void pkt_debugfs_cleanup(void)
522 {
523 debugfs_remove(pkt_debugfs_root);
524 pkt_debugfs_root = NULL;
525 }
526
527 /* ----------------------------------------------------------*/
528
529
pkt_bio_finished(struct pktcdvd_device * pd)530 static void pkt_bio_finished(struct pktcdvd_device *pd)
531 {
532 struct device *ddev = disk_to_dev(pd->disk);
533
534 BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
535 if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
536 dev_dbg(ddev, "queue empty\n");
537 atomic_set(&pd->iosched.attention, 1);
538 wake_up(&pd->wqueue);
539 }
540 }
541
542 /*
543 * Allocate a packet_data struct
544 */
pkt_alloc_packet_data(int frames)545 static struct packet_data *pkt_alloc_packet_data(int frames)
546 {
547 int i;
548 struct packet_data *pkt;
549
550 pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
551 if (!pkt)
552 goto no_pkt;
553
554 pkt->frames = frames;
555 pkt->w_bio = bio_kmalloc(frames, GFP_KERNEL);
556 if (!pkt->w_bio)
557 goto no_bio;
558
559 for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
560 pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
561 if (!pkt->pages[i])
562 goto no_page;
563 }
564
565 spin_lock_init(&pkt->lock);
566 bio_list_init(&pkt->orig_bios);
567
568 for (i = 0; i < frames; i++) {
569 pkt->r_bios[i] = bio_kmalloc(1, GFP_KERNEL);
570 if (!pkt->r_bios[i])
571 goto no_rd_bio;
572 }
573
574 return pkt;
575
576 no_rd_bio:
577 for (i = 0; i < frames; i++)
578 kfree(pkt->r_bios[i]);
579 no_page:
580 for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
581 if (pkt->pages[i])
582 __free_page(pkt->pages[i]);
583 kfree(pkt->w_bio);
584 no_bio:
585 kfree(pkt);
586 no_pkt:
587 return NULL;
588 }
589
590 /*
591 * Free a packet_data struct
592 */
pkt_free_packet_data(struct packet_data * pkt)593 static void pkt_free_packet_data(struct packet_data *pkt)
594 {
595 int i;
596
597 for (i = 0; i < pkt->frames; i++)
598 kfree(pkt->r_bios[i]);
599 for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
600 __free_page(pkt->pages[i]);
601 kfree(pkt->w_bio);
602 kfree(pkt);
603 }
604
pkt_shrink_pktlist(struct pktcdvd_device * pd)605 static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
606 {
607 struct packet_data *pkt, *next;
608
609 BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
610
611 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
612 pkt_free_packet_data(pkt);
613 }
614 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
615 }
616
pkt_grow_pktlist(struct pktcdvd_device * pd,int nr_packets)617 static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
618 {
619 struct packet_data *pkt;
620
621 BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
622
623 while (nr_packets > 0) {
624 pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
625 if (!pkt) {
626 pkt_shrink_pktlist(pd);
627 return 0;
628 }
629 pkt->id = nr_packets;
630 pkt->pd = pd;
631 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
632 nr_packets--;
633 }
634 return 1;
635 }
636
pkt_rbtree_next(struct pkt_rb_node * node)637 static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
638 {
639 struct rb_node *n = rb_next(&node->rb_node);
640 if (!n)
641 return NULL;
642 return rb_entry(n, struct pkt_rb_node, rb_node);
643 }
644
pkt_rbtree_erase(struct pktcdvd_device * pd,struct pkt_rb_node * node)645 static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
646 {
647 rb_erase(&node->rb_node, &pd->bio_queue);
648 mempool_free(node, &pd->rb_pool);
649 pd->bio_queue_size--;
650 BUG_ON(pd->bio_queue_size < 0);
651 }
652
653 /*
654 * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
655 */
pkt_rbtree_find(struct pktcdvd_device * pd,sector_t s)656 static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
657 {
658 struct rb_node *n = pd->bio_queue.rb_node;
659 struct rb_node *next;
660 struct pkt_rb_node *tmp;
661
662 if (!n) {
663 BUG_ON(pd->bio_queue_size > 0);
664 return NULL;
665 }
666
667 for (;;) {
668 tmp = rb_entry(n, struct pkt_rb_node, rb_node);
669 if (s <= tmp->bio->bi_iter.bi_sector)
670 next = n->rb_left;
671 else
672 next = n->rb_right;
673 if (!next)
674 break;
675 n = next;
676 }
677
678 if (s > tmp->bio->bi_iter.bi_sector) {
679 tmp = pkt_rbtree_next(tmp);
680 if (!tmp)
681 return NULL;
682 }
683 BUG_ON(s > tmp->bio->bi_iter.bi_sector);
684 return tmp;
685 }
686
687 /*
688 * Insert a node into the pd->bio_queue rb tree.
689 */
pkt_rbtree_insert(struct pktcdvd_device * pd,struct pkt_rb_node * node)690 static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
691 {
692 struct rb_node **p = &pd->bio_queue.rb_node;
693 struct rb_node *parent = NULL;
694 sector_t s = node->bio->bi_iter.bi_sector;
695 struct pkt_rb_node *tmp;
696
697 while (*p) {
698 parent = *p;
699 tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
700 if (s < tmp->bio->bi_iter.bi_sector)
701 p = &(*p)->rb_left;
702 else
703 p = &(*p)->rb_right;
704 }
705 rb_link_node(&node->rb_node, parent, p);
706 rb_insert_color(&node->rb_node, &pd->bio_queue);
707 pd->bio_queue_size++;
708 }
709
710 /*
711 * Send a packet_command to the underlying block device and
712 * wait for completion.
713 */
pkt_generic_packet(struct pktcdvd_device * pd,struct packet_command * cgc)714 static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
715 {
716 struct request_queue *q = bdev_get_queue(file_bdev(pd->bdev_file));
717 struct scsi_cmnd *scmd;
718 struct request *rq;
719 int ret = 0;
720
721 rq = scsi_alloc_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
722 REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
723 if (IS_ERR(rq))
724 return PTR_ERR(rq);
725 scmd = blk_mq_rq_to_pdu(rq);
726
727 if (cgc->buflen) {
728 ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
729 GFP_NOIO);
730 if (ret)
731 goto out;
732 }
733
734 scmd->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
735 memcpy(scmd->cmnd, cgc->cmd, CDROM_PACKET_SIZE);
736
737 rq->timeout = 60*HZ;
738 if (cgc->quiet)
739 rq->rq_flags |= RQF_QUIET;
740
741 blk_execute_rq(rq, false);
742 if (scmd->result)
743 ret = -EIO;
744 out:
745 blk_mq_free_request(rq);
746 return ret;
747 }
748
sense_key_string(__u8 index)749 static const char *sense_key_string(__u8 index)
750 {
751 static const char * const info[] = {
752 "No sense", "Recovered error", "Not ready",
753 "Medium error", "Hardware error", "Illegal request",
754 "Unit attention", "Data protect", "Blank check",
755 };
756
757 return index < ARRAY_SIZE(info) ? info[index] : "INVALID";
758 }
759
760 /*
761 * A generic sense dump / resolve mechanism should be implemented across
762 * all ATAPI + SCSI devices.
763 */
pkt_dump_sense(struct pktcdvd_device * pd,struct packet_command * cgc)764 static void pkt_dump_sense(struct pktcdvd_device *pd,
765 struct packet_command *cgc)
766 {
767 struct device *ddev = disk_to_dev(pd->disk);
768 struct scsi_sense_hdr *sshdr = cgc->sshdr;
769
770 if (sshdr)
771 dev_err(ddev, "%*ph - sense %02x.%02x.%02x (%s)\n",
772 CDROM_PACKET_SIZE, cgc->cmd,
773 sshdr->sense_key, sshdr->asc, sshdr->ascq,
774 sense_key_string(sshdr->sense_key));
775 else
776 dev_err(ddev, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
777 }
778
779 /*
780 * flush the drive cache to media
781 */
pkt_flush_cache(struct pktcdvd_device * pd)782 static int pkt_flush_cache(struct pktcdvd_device *pd)
783 {
784 struct packet_command cgc;
785
786 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
787 cgc.cmd[0] = GPCMD_FLUSH_CACHE;
788 cgc.quiet = 1;
789
790 /*
791 * the IMMED bit -- we default to not setting it, although that
792 * would allow a much faster close, this is safer
793 */
794 #if 0
795 cgc.cmd[1] = 1 << 1;
796 #endif
797 return pkt_generic_packet(pd, &cgc);
798 }
799
800 /*
801 * speed is given as the normal factor, e.g. 4 for 4x
802 */
pkt_set_speed(struct pktcdvd_device * pd,unsigned write_speed,unsigned read_speed)803 static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
804 unsigned write_speed, unsigned read_speed)
805 {
806 struct packet_command cgc;
807 struct scsi_sense_hdr sshdr;
808 int ret;
809
810 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
811 cgc.sshdr = &sshdr;
812 cgc.cmd[0] = GPCMD_SET_SPEED;
813 put_unaligned_be16(read_speed, &cgc.cmd[2]);
814 put_unaligned_be16(write_speed, &cgc.cmd[4]);
815
816 ret = pkt_generic_packet(pd, &cgc);
817 if (ret)
818 pkt_dump_sense(pd, &cgc);
819
820 return ret;
821 }
822
823 /*
824 * Queue a bio for processing by the low-level CD device. Must be called
825 * from process context.
826 */
pkt_queue_bio(struct pktcdvd_device * pd,struct bio * bio)827 static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
828 {
829 /*
830 * Some CDRW drives can not handle writes larger than one packet,
831 * even if the size is a multiple of the packet size.
832 */
833 bio->bi_opf |= REQ_NOMERGE;
834
835 spin_lock(&pd->iosched.lock);
836 if (bio_data_dir(bio) == READ)
837 bio_list_add(&pd->iosched.read_queue, bio);
838 else
839 bio_list_add(&pd->iosched.write_queue, bio);
840 spin_unlock(&pd->iosched.lock);
841
842 atomic_set(&pd->iosched.attention, 1);
843 wake_up(&pd->wqueue);
844 }
845
846 /*
847 * Process the queued read/write requests. This function handles special
848 * requirements for CDRW drives:
849 * - A cache flush command must be inserted before a read request if the
850 * previous request was a write.
851 * - Switching between reading and writing is slow, so don't do it more often
852 * than necessary.
853 * - Optimize for throughput at the expense of latency. This means that streaming
854 * writes will never be interrupted by a read, but if the drive has to seek
855 * before the next write, switch to reading instead if there are any pending
856 * read requests.
857 * - Set the read speed according to current usage pattern. When only reading
858 * from the device, it's best to use the highest possible read speed, but
859 * when switching often between reading and writing, it's better to have the
860 * same read and write speeds.
861 */
pkt_iosched_process_queue(struct pktcdvd_device * pd)862 static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
863 {
864 struct device *ddev = disk_to_dev(pd->disk);
865
866 if (atomic_read(&pd->iosched.attention) == 0)
867 return;
868 atomic_set(&pd->iosched.attention, 0);
869
870 for (;;) {
871 struct bio *bio;
872 int reads_queued, writes_queued;
873
874 spin_lock(&pd->iosched.lock);
875 reads_queued = !bio_list_empty(&pd->iosched.read_queue);
876 writes_queued = !bio_list_empty(&pd->iosched.write_queue);
877 spin_unlock(&pd->iosched.lock);
878
879 if (!reads_queued && !writes_queued)
880 break;
881
882 if (pd->iosched.writing) {
883 int need_write_seek = 1;
884 spin_lock(&pd->iosched.lock);
885 bio = bio_list_peek(&pd->iosched.write_queue);
886 spin_unlock(&pd->iosched.lock);
887 if (bio && (bio->bi_iter.bi_sector ==
888 pd->iosched.last_write))
889 need_write_seek = 0;
890 if (need_write_seek && reads_queued) {
891 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
892 dev_dbg(ddev, "write, waiting\n");
893 break;
894 }
895 pkt_flush_cache(pd);
896 pd->iosched.writing = 0;
897 }
898 } else {
899 if (!reads_queued && writes_queued) {
900 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
901 dev_dbg(ddev, "read, waiting\n");
902 break;
903 }
904 pd->iosched.writing = 1;
905 }
906 }
907
908 spin_lock(&pd->iosched.lock);
909 if (pd->iosched.writing)
910 bio = bio_list_pop(&pd->iosched.write_queue);
911 else
912 bio = bio_list_pop(&pd->iosched.read_queue);
913 spin_unlock(&pd->iosched.lock);
914
915 if (!bio)
916 continue;
917
918 if (bio_data_dir(bio) == READ)
919 pd->iosched.successive_reads +=
920 bio->bi_iter.bi_size >> 10;
921 else {
922 pd->iosched.successive_reads = 0;
923 pd->iosched.last_write = bio_end_sector(bio);
924 }
925 if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
926 if (pd->read_speed == pd->write_speed) {
927 pd->read_speed = MAX_SPEED;
928 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
929 }
930 } else {
931 if (pd->read_speed != pd->write_speed) {
932 pd->read_speed = pd->write_speed;
933 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
934 }
935 }
936
937 atomic_inc(&pd->cdrw.pending_bios);
938 submit_bio_noacct(bio);
939 }
940 }
941
942 /*
943 * Special care is needed if the underlying block device has a small
944 * max_phys_segments value.
945 */
pkt_set_segment_merging(struct pktcdvd_device * pd,struct request_queue * q)946 static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
947 {
948 struct device *ddev = disk_to_dev(pd->disk);
949
950 if ((pd->settings.size << 9) / CD_FRAMESIZE <= queue_max_segments(q)) {
951 /*
952 * The cdrom device can handle one segment/frame
953 */
954 clear_bit(PACKET_MERGE_SEGS, &pd->flags);
955 return 0;
956 }
957
958 if ((pd->settings.size << 9) / PAGE_SIZE <= queue_max_segments(q)) {
959 /*
960 * We can handle this case at the expense of some extra memory
961 * copies during write operations
962 */
963 set_bit(PACKET_MERGE_SEGS, &pd->flags);
964 return 0;
965 }
966
967 dev_err(ddev, "cdrom max_phys_segments too small\n");
968 return -EIO;
969 }
970
pkt_end_io_read(struct bio * bio)971 static void pkt_end_io_read(struct bio *bio)
972 {
973 struct packet_data *pkt = bio->bi_private;
974 struct pktcdvd_device *pd = pkt->pd;
975 BUG_ON(!pd);
976
977 dev_dbg(disk_to_dev(pd->disk), "bio=%p sec0=%llx sec=%llx err=%d\n",
978 bio, pkt->sector, bio->bi_iter.bi_sector, bio->bi_status);
979
980 if (bio->bi_status)
981 atomic_inc(&pkt->io_errors);
982 bio_uninit(bio);
983 if (atomic_dec_and_test(&pkt->io_wait)) {
984 atomic_inc(&pkt->run_sm);
985 wake_up(&pd->wqueue);
986 }
987 pkt_bio_finished(pd);
988 }
989
pkt_end_io_packet_write(struct bio * bio)990 static void pkt_end_io_packet_write(struct bio *bio)
991 {
992 struct packet_data *pkt = bio->bi_private;
993 struct pktcdvd_device *pd = pkt->pd;
994 BUG_ON(!pd);
995
996 dev_dbg(disk_to_dev(pd->disk), "id=%d, err=%d\n", pkt->id, bio->bi_status);
997
998 pd->stats.pkt_ended++;
999
1000 bio_uninit(bio);
1001 pkt_bio_finished(pd);
1002 atomic_dec(&pkt->io_wait);
1003 atomic_inc(&pkt->run_sm);
1004 wake_up(&pd->wqueue);
1005 }
1006
1007 /*
1008 * Schedule reads for the holes in a packet
1009 */
pkt_gather_data(struct pktcdvd_device * pd,struct packet_data * pkt)1010 static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1011 {
1012 struct device *ddev = disk_to_dev(pd->disk);
1013 int frames_read = 0;
1014 struct bio *bio;
1015 int f;
1016 char written[PACKET_MAX_SIZE];
1017
1018 BUG_ON(bio_list_empty(&pkt->orig_bios));
1019
1020 atomic_set(&pkt->io_wait, 0);
1021 atomic_set(&pkt->io_errors, 0);
1022
1023 /*
1024 * Figure out which frames we need to read before we can write.
1025 */
1026 memset(written, 0, sizeof(written));
1027 spin_lock(&pkt->lock);
1028 bio_list_for_each(bio, &pkt->orig_bios) {
1029 int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
1030 (CD_FRAMESIZE >> 9);
1031 int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
1032 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
1033 BUG_ON(first_frame < 0);
1034 BUG_ON(first_frame + num_frames > pkt->frames);
1035 for (f = first_frame; f < first_frame + num_frames; f++)
1036 written[f] = 1;
1037 }
1038 spin_unlock(&pkt->lock);
1039
1040 if (pkt->cache_valid) {
1041 dev_dbg(ddev, "zone %llx cached\n", pkt->sector);
1042 goto out_account;
1043 }
1044
1045 /*
1046 * Schedule reads for missing parts of the packet.
1047 */
1048 for (f = 0; f < pkt->frames; f++) {
1049 int p, offset;
1050
1051 if (written[f])
1052 continue;
1053
1054 bio = pkt->r_bios[f];
1055 bio_init(bio, file_bdev(pd->bdev_file), bio->bi_inline_vecs, 1,
1056 REQ_OP_READ);
1057 bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
1058 bio->bi_end_io = pkt_end_io_read;
1059 bio->bi_private = pkt;
1060
1061 p = (f * CD_FRAMESIZE) / PAGE_SIZE;
1062 offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
1063 dev_dbg(ddev, "Adding frame %d, page:%p offs:%d\n", f,
1064 pkt->pages[p], offset);
1065 if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
1066 BUG();
1067
1068 atomic_inc(&pkt->io_wait);
1069 pkt_queue_bio(pd, bio);
1070 frames_read++;
1071 }
1072
1073 out_account:
1074 dev_dbg(ddev, "need %d frames for zone %llx\n", frames_read, pkt->sector);
1075 pd->stats.pkt_started++;
1076 pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
1077 }
1078
1079 /*
1080 * Find a packet matching zone, or the least recently used packet if
1081 * there is no match.
1082 */
pkt_get_packet_data(struct pktcdvd_device * pd,int zone)1083 static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
1084 {
1085 struct packet_data *pkt;
1086
1087 list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
1088 if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
1089 list_del_init(&pkt->list);
1090 if (pkt->sector != zone)
1091 pkt->cache_valid = 0;
1092 return pkt;
1093 }
1094 }
1095 BUG();
1096 return NULL;
1097 }
1098
pkt_put_packet_data(struct pktcdvd_device * pd,struct packet_data * pkt)1099 static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1100 {
1101 if (pkt->cache_valid) {
1102 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
1103 } else {
1104 list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
1105 }
1106 }
1107
pkt_set_state(struct device * ddev,struct packet_data * pkt,enum packet_data_state state)1108 static inline void pkt_set_state(struct device *ddev, struct packet_data *pkt,
1109 enum packet_data_state state)
1110 {
1111 static const char *state_name[] = {
1112 "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
1113 };
1114 enum packet_data_state old_state = pkt->state;
1115
1116 dev_dbg(ddev, "pkt %2d : s=%6llx %s -> %s\n",
1117 pkt->id, pkt->sector, state_name[old_state], state_name[state]);
1118
1119 pkt->state = state;
1120 }
1121
1122 /*
1123 * Scan the work queue to see if we can start a new packet.
1124 * returns non-zero if any work was done.
1125 */
pkt_handle_queue(struct pktcdvd_device * pd)1126 static int pkt_handle_queue(struct pktcdvd_device *pd)
1127 {
1128 struct device *ddev = disk_to_dev(pd->disk);
1129 struct packet_data *pkt, *p;
1130 struct bio *bio = NULL;
1131 sector_t zone = 0; /* Suppress gcc warning */
1132 struct pkt_rb_node *node, *first_node;
1133 struct rb_node *n;
1134
1135 atomic_set(&pd->scan_queue, 0);
1136
1137 if (list_empty(&pd->cdrw.pkt_free_list)) {
1138 dev_dbg(ddev, "no pkt\n");
1139 return 0;
1140 }
1141
1142 /*
1143 * Try to find a zone we are not already working on.
1144 */
1145 spin_lock(&pd->lock);
1146 first_node = pkt_rbtree_find(pd, pd->current_sector);
1147 if (!first_node) {
1148 n = rb_first(&pd->bio_queue);
1149 if (n)
1150 first_node = rb_entry(n, struct pkt_rb_node, rb_node);
1151 }
1152 node = first_node;
1153 while (node) {
1154 bio = node->bio;
1155 zone = get_zone(bio->bi_iter.bi_sector, pd);
1156 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
1157 if (p->sector == zone) {
1158 bio = NULL;
1159 goto try_next_bio;
1160 }
1161 }
1162 break;
1163 try_next_bio:
1164 node = pkt_rbtree_next(node);
1165 if (!node) {
1166 n = rb_first(&pd->bio_queue);
1167 if (n)
1168 node = rb_entry(n, struct pkt_rb_node, rb_node);
1169 }
1170 if (node == first_node)
1171 node = NULL;
1172 }
1173 spin_unlock(&pd->lock);
1174 if (!bio) {
1175 dev_dbg(ddev, "no bio\n");
1176 return 0;
1177 }
1178
1179 pkt = pkt_get_packet_data(pd, zone);
1180
1181 pd->current_sector = zone + pd->settings.size;
1182 pkt->sector = zone;
1183 BUG_ON(pkt->frames != pd->settings.size >> 2);
1184 pkt->write_size = 0;
1185
1186 /*
1187 * Scan work queue for bios in the same zone and link them
1188 * to this packet.
1189 */
1190 spin_lock(&pd->lock);
1191 dev_dbg(ddev, "looking for zone %llx\n", zone);
1192 while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
1193 sector_t tmp = get_zone(node->bio->bi_iter.bi_sector, pd);
1194
1195 bio = node->bio;
1196 dev_dbg(ddev, "found zone=%llx\n", tmp);
1197 if (tmp != zone)
1198 break;
1199 pkt_rbtree_erase(pd, node);
1200 spin_lock(&pkt->lock);
1201 bio_list_add(&pkt->orig_bios, bio);
1202 pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
1203 spin_unlock(&pkt->lock);
1204 }
1205 /* check write congestion marks, and if bio_queue_size is
1206 * below, wake up any waiters
1207 */
1208 if (pd->congested &&
1209 pd->bio_queue_size <= pd->write_congestion_off) {
1210 pd->congested = false;
1211 wake_up_var(&pd->congested);
1212 }
1213 spin_unlock(&pd->lock);
1214
1215 pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
1216 pkt_set_state(ddev, pkt, PACKET_WAITING_STATE);
1217 atomic_set(&pkt->run_sm, 1);
1218
1219 spin_lock(&pd->cdrw.active_list_lock);
1220 list_add(&pkt->list, &pd->cdrw.pkt_active_list);
1221 spin_unlock(&pd->cdrw.active_list_lock);
1222
1223 return 1;
1224 }
1225
1226 /**
1227 * bio_list_copy_data - copy contents of data buffers from one chain of bios to
1228 * another
1229 * @src: source bio list
1230 * @dst: destination bio list
1231 *
1232 * Stops when it reaches the end of either the @src list or @dst list - that is,
1233 * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
1234 * bios).
1235 */
bio_list_copy_data(struct bio * dst,struct bio * src)1236 static void bio_list_copy_data(struct bio *dst, struct bio *src)
1237 {
1238 struct bvec_iter src_iter = src->bi_iter;
1239 struct bvec_iter dst_iter = dst->bi_iter;
1240
1241 while (1) {
1242 if (!src_iter.bi_size) {
1243 src = src->bi_next;
1244 if (!src)
1245 break;
1246
1247 src_iter = src->bi_iter;
1248 }
1249
1250 if (!dst_iter.bi_size) {
1251 dst = dst->bi_next;
1252 if (!dst)
1253 break;
1254
1255 dst_iter = dst->bi_iter;
1256 }
1257
1258 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1259 }
1260 }
1261
1262 /*
1263 * Assemble a bio to write one packet and queue the bio for processing
1264 * by the underlying block device.
1265 */
pkt_start_write(struct pktcdvd_device * pd,struct packet_data * pkt)1266 static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
1267 {
1268 struct device *ddev = disk_to_dev(pd->disk);
1269 int f;
1270
1271 bio_init(pkt->w_bio, file_bdev(pd->bdev_file), pkt->w_bio->bi_inline_vecs,
1272 pkt->frames, REQ_OP_WRITE);
1273 pkt->w_bio->bi_iter.bi_sector = pkt->sector;
1274 pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
1275 pkt->w_bio->bi_private = pkt;
1276
1277 /* XXX: locking? */
1278 for (f = 0; f < pkt->frames; f++) {
1279 struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
1280 unsigned offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
1281
1282 if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset))
1283 BUG();
1284 }
1285 dev_dbg(ddev, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
1286
1287 /*
1288 * Fill-in bvec with data from orig_bios.
1289 */
1290 spin_lock(&pkt->lock);
1291 bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head);
1292
1293 pkt_set_state(ddev, pkt, PACKET_WRITE_WAIT_STATE);
1294 spin_unlock(&pkt->lock);
1295
1296 dev_dbg(ddev, "Writing %d frames for zone %llx\n", pkt->write_size, pkt->sector);
1297
1298 if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames))
1299 pkt->cache_valid = 1;
1300 else
1301 pkt->cache_valid = 0;
1302
1303 /* Start the write request */
1304 atomic_set(&pkt->io_wait, 1);
1305 pkt_queue_bio(pd, pkt->w_bio);
1306 }
1307
pkt_finish_packet(struct packet_data * pkt,blk_status_t status)1308 static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status)
1309 {
1310 struct bio *bio;
1311
1312 if (status)
1313 pkt->cache_valid = 0;
1314
1315 /* Finish all bios corresponding to this packet */
1316 while ((bio = bio_list_pop(&pkt->orig_bios))) {
1317 bio->bi_status = status;
1318 bio_endio(bio);
1319 }
1320 }
1321
pkt_run_state_machine(struct pktcdvd_device * pd,struct packet_data * pkt)1322 static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
1323 {
1324 struct device *ddev = disk_to_dev(pd->disk);
1325
1326 dev_dbg(ddev, "pkt %d\n", pkt->id);
1327
1328 for (;;) {
1329 switch (pkt->state) {
1330 case PACKET_WAITING_STATE:
1331 if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
1332 return;
1333
1334 pkt->sleep_time = 0;
1335 pkt_gather_data(pd, pkt);
1336 pkt_set_state(ddev, pkt, PACKET_READ_WAIT_STATE);
1337 break;
1338
1339 case PACKET_READ_WAIT_STATE:
1340 if (atomic_read(&pkt->io_wait) > 0)
1341 return;
1342
1343 if (atomic_read(&pkt->io_errors) > 0) {
1344 pkt_set_state(ddev, pkt, PACKET_RECOVERY_STATE);
1345 } else {
1346 pkt_start_write(pd, pkt);
1347 }
1348 break;
1349
1350 case PACKET_WRITE_WAIT_STATE:
1351 if (atomic_read(&pkt->io_wait) > 0)
1352 return;
1353
1354 if (!pkt->w_bio->bi_status) {
1355 pkt_set_state(ddev, pkt, PACKET_FINISHED_STATE);
1356 } else {
1357 pkt_set_state(ddev, pkt, PACKET_RECOVERY_STATE);
1358 }
1359 break;
1360
1361 case PACKET_RECOVERY_STATE:
1362 dev_dbg(ddev, "No recovery possible\n");
1363 pkt_set_state(ddev, pkt, PACKET_FINISHED_STATE);
1364 break;
1365
1366 case PACKET_FINISHED_STATE:
1367 pkt_finish_packet(pkt, pkt->w_bio->bi_status);
1368 return;
1369
1370 default:
1371 BUG();
1372 break;
1373 }
1374 }
1375 }
1376
pkt_handle_packets(struct pktcdvd_device * pd)1377 static void pkt_handle_packets(struct pktcdvd_device *pd)
1378 {
1379 struct device *ddev = disk_to_dev(pd->disk);
1380 struct packet_data *pkt, *next;
1381
1382 /*
1383 * Run state machine for active packets
1384 */
1385 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1386 if (atomic_read(&pkt->run_sm) > 0) {
1387 atomic_set(&pkt->run_sm, 0);
1388 pkt_run_state_machine(pd, pkt);
1389 }
1390 }
1391
1392 /*
1393 * Move no longer active packets to the free list
1394 */
1395 spin_lock(&pd->cdrw.active_list_lock);
1396 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
1397 if (pkt->state == PACKET_FINISHED_STATE) {
1398 list_del(&pkt->list);
1399 pkt_put_packet_data(pd, pkt);
1400 pkt_set_state(ddev, pkt, PACKET_IDLE_STATE);
1401 atomic_set(&pd->scan_queue, 1);
1402 }
1403 }
1404 spin_unlock(&pd->cdrw.active_list_lock);
1405 }
1406
1407 /*
1408 * kcdrwd is woken up when writes have been queued for one of our
1409 * registered devices
1410 */
kcdrwd(void * foobar)1411 static int kcdrwd(void *foobar)
1412 {
1413 struct pktcdvd_device *pd = foobar;
1414 struct device *ddev = disk_to_dev(pd->disk);
1415 struct packet_data *pkt;
1416 int states[PACKET_NUM_STATES];
1417 long min_sleep_time, residue;
1418
1419 set_user_nice(current, MIN_NICE);
1420 set_freezable();
1421
1422 for (;;) {
1423 DECLARE_WAITQUEUE(wait, current);
1424
1425 /*
1426 * Wait until there is something to do
1427 */
1428 add_wait_queue(&pd->wqueue, &wait);
1429 for (;;) {
1430 set_current_state(TASK_INTERRUPTIBLE);
1431
1432 /* Check if we need to run pkt_handle_queue */
1433 if (atomic_read(&pd->scan_queue) > 0)
1434 goto work_to_do;
1435
1436 /* Check if we need to run the state machine for some packet */
1437 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1438 if (atomic_read(&pkt->run_sm) > 0)
1439 goto work_to_do;
1440 }
1441
1442 /* Check if we need to process the iosched queues */
1443 if (atomic_read(&pd->iosched.attention) != 0)
1444 goto work_to_do;
1445
1446 /* Otherwise, go to sleep */
1447 pkt_count_states(pd, states);
1448 dev_dbg(ddev, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
1449 states[0], states[1], states[2], states[3], states[4], states[5]);
1450
1451 min_sleep_time = MAX_SCHEDULE_TIMEOUT;
1452 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1453 if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
1454 min_sleep_time = pkt->sleep_time;
1455 }
1456
1457 dev_dbg(ddev, "sleeping\n");
1458 residue = schedule_timeout(min_sleep_time);
1459 dev_dbg(ddev, "wake up\n");
1460
1461 /* make swsusp happy with our thread */
1462 try_to_freeze();
1463
1464 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1465 if (!pkt->sleep_time)
1466 continue;
1467 pkt->sleep_time -= min_sleep_time - residue;
1468 if (pkt->sleep_time <= 0) {
1469 pkt->sleep_time = 0;
1470 atomic_inc(&pkt->run_sm);
1471 }
1472 }
1473
1474 if (kthread_should_stop())
1475 break;
1476 }
1477 work_to_do:
1478 set_current_state(TASK_RUNNING);
1479 remove_wait_queue(&pd->wqueue, &wait);
1480
1481 if (kthread_should_stop())
1482 break;
1483
1484 /*
1485 * if pkt_handle_queue returns true, we can queue
1486 * another request.
1487 */
1488 while (pkt_handle_queue(pd))
1489 ;
1490
1491 /*
1492 * Handle packet state machine
1493 */
1494 pkt_handle_packets(pd);
1495
1496 /*
1497 * Handle iosched queues
1498 */
1499 pkt_iosched_process_queue(pd);
1500 }
1501
1502 return 0;
1503 }
1504
pkt_print_settings(struct pktcdvd_device * pd)1505 static void pkt_print_settings(struct pktcdvd_device *pd)
1506 {
1507 dev_info(disk_to_dev(pd->disk), "%s packets, %u blocks, Mode-%c disc\n",
1508 pd->settings.fp ? "Fixed" : "Variable",
1509 pd->settings.size >> 2,
1510 pd->settings.block_mode == 8 ? '1' : '2');
1511 }
1512
pkt_mode_sense(struct pktcdvd_device * pd,struct packet_command * cgc,int page_code,int page_control)1513 static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
1514 {
1515 memset(cgc->cmd, 0, sizeof(cgc->cmd));
1516
1517 cgc->cmd[0] = GPCMD_MODE_SENSE_10;
1518 cgc->cmd[2] = page_code | (page_control << 6);
1519 put_unaligned_be16(cgc->buflen, &cgc->cmd[7]);
1520 cgc->data_direction = CGC_DATA_READ;
1521 return pkt_generic_packet(pd, cgc);
1522 }
1523
pkt_mode_select(struct pktcdvd_device * pd,struct packet_command * cgc)1524 static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
1525 {
1526 memset(cgc->cmd, 0, sizeof(cgc->cmd));
1527 memset(cgc->buffer, 0, 2);
1528 cgc->cmd[0] = GPCMD_MODE_SELECT_10;
1529 cgc->cmd[1] = 0x10; /* PF */
1530 put_unaligned_be16(cgc->buflen, &cgc->cmd[7]);
1531 cgc->data_direction = CGC_DATA_WRITE;
1532 return pkt_generic_packet(pd, cgc);
1533 }
1534
pkt_get_disc_info(struct pktcdvd_device * pd,disc_information * di)1535 static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
1536 {
1537 struct packet_command cgc;
1538 int ret;
1539
1540 /* set up command and get the disc info */
1541 init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
1542 cgc.cmd[0] = GPCMD_READ_DISC_INFO;
1543 cgc.cmd[8] = cgc.buflen = 2;
1544 cgc.quiet = 1;
1545
1546 ret = pkt_generic_packet(pd, &cgc);
1547 if (ret)
1548 return ret;
1549
1550 /* not all drives have the same disc_info length, so requeue
1551 * packet with the length the drive tells us it can supply
1552 */
1553 cgc.buflen = be16_to_cpu(di->disc_information_length) +
1554 sizeof(di->disc_information_length);
1555
1556 if (cgc.buflen > sizeof(disc_information))
1557 cgc.buflen = sizeof(disc_information);
1558
1559 cgc.cmd[8] = cgc.buflen;
1560 return pkt_generic_packet(pd, &cgc);
1561 }
1562
pkt_get_track_info(struct pktcdvd_device * pd,__u16 track,__u8 type,track_information * ti)1563 static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
1564 {
1565 struct packet_command cgc;
1566 int ret;
1567
1568 init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
1569 cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
1570 cgc.cmd[1] = type & 3;
1571 put_unaligned_be16(track, &cgc.cmd[4]);
1572 cgc.cmd[8] = 8;
1573 cgc.quiet = 1;
1574
1575 ret = pkt_generic_packet(pd, &cgc);
1576 if (ret)
1577 return ret;
1578
1579 cgc.buflen = be16_to_cpu(ti->track_information_length) +
1580 sizeof(ti->track_information_length);
1581
1582 if (cgc.buflen > sizeof(track_information))
1583 cgc.buflen = sizeof(track_information);
1584
1585 cgc.cmd[8] = cgc.buflen;
1586 return pkt_generic_packet(pd, &cgc);
1587 }
1588
pkt_get_last_written(struct pktcdvd_device * pd,long * last_written)1589 static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
1590 long *last_written)
1591 {
1592 disc_information di;
1593 track_information ti;
1594 __u32 last_track;
1595 int ret;
1596
1597 ret = pkt_get_disc_info(pd, &di);
1598 if (ret)
1599 return ret;
1600
1601 last_track = (di.last_track_msb << 8) | di.last_track_lsb;
1602 ret = pkt_get_track_info(pd, last_track, 1, &ti);
1603 if (ret)
1604 return ret;
1605
1606 /* if this track is blank, try the previous. */
1607 if (ti.blank) {
1608 last_track--;
1609 ret = pkt_get_track_info(pd, last_track, 1, &ti);
1610 if (ret)
1611 return ret;
1612 }
1613
1614 /* if last recorded field is valid, return it. */
1615 if (ti.lra_v) {
1616 *last_written = be32_to_cpu(ti.last_rec_address);
1617 } else {
1618 /* make it up instead */
1619 *last_written = be32_to_cpu(ti.track_start) +
1620 be32_to_cpu(ti.track_size);
1621 if (ti.free_blocks)
1622 *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
1623 }
1624 return 0;
1625 }
1626
1627 /*
1628 * write mode select package based on pd->settings
1629 */
pkt_set_write_settings(struct pktcdvd_device * pd)1630 static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
1631 {
1632 struct device *ddev = disk_to_dev(pd->disk);
1633 struct packet_command cgc;
1634 struct scsi_sense_hdr sshdr;
1635 write_param_page *wp;
1636 char buffer[128];
1637 int ret, size;
1638
1639 /* doesn't apply to DVD+RW or DVD-RAM */
1640 if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
1641 return 0;
1642
1643 memset(buffer, 0, sizeof(buffer));
1644 init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
1645 cgc.sshdr = &sshdr;
1646 ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
1647 if (ret) {
1648 pkt_dump_sense(pd, &cgc);
1649 return ret;
1650 }
1651
1652 size = 2 + get_unaligned_be16(&buffer[0]);
1653 pd->mode_offset = get_unaligned_be16(&buffer[6]);
1654 if (size > sizeof(buffer))
1655 size = sizeof(buffer);
1656
1657 /*
1658 * now get it all
1659 */
1660 init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
1661 cgc.sshdr = &sshdr;
1662 ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
1663 if (ret) {
1664 pkt_dump_sense(pd, &cgc);
1665 return ret;
1666 }
1667
1668 /*
1669 * write page is offset header + block descriptor length
1670 */
1671 wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
1672
1673 wp->fp = pd->settings.fp;
1674 wp->track_mode = pd->settings.track_mode;
1675 wp->write_type = pd->settings.write_type;
1676 wp->data_block_type = pd->settings.block_mode;
1677
1678 wp->multi_session = 0;
1679
1680 #ifdef PACKET_USE_LS
1681 wp->link_size = 7;
1682 wp->ls_v = 1;
1683 #endif
1684
1685 if (wp->data_block_type == PACKET_BLOCK_MODE1) {
1686 wp->session_format = 0;
1687 wp->subhdr2 = 0x20;
1688 } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
1689 wp->session_format = 0x20;
1690 wp->subhdr2 = 8;
1691 #if 0
1692 wp->mcn[0] = 0x80;
1693 memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
1694 #endif
1695 } else {
1696 /*
1697 * paranoia
1698 */
1699 dev_err(ddev, "write mode wrong %d\n", wp->data_block_type);
1700 return 1;
1701 }
1702 wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
1703
1704 cgc.buflen = cgc.cmd[8] = size;
1705 ret = pkt_mode_select(pd, &cgc);
1706 if (ret) {
1707 pkt_dump_sense(pd, &cgc);
1708 return ret;
1709 }
1710
1711 pkt_print_settings(pd);
1712 return 0;
1713 }
1714
1715 /*
1716 * 1 -- we can write to this track, 0 -- we can't
1717 */
pkt_writable_track(struct pktcdvd_device * pd,track_information * ti)1718 static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
1719 {
1720 struct device *ddev = disk_to_dev(pd->disk);
1721
1722 switch (pd->mmc3_profile) {
1723 case 0x1a: /* DVD+RW */
1724 case 0x12: /* DVD-RAM */
1725 /* The track is always writable on DVD+RW/DVD-RAM */
1726 return 1;
1727 default:
1728 break;
1729 }
1730
1731 if (!ti->packet || !ti->fp)
1732 return 0;
1733
1734 /*
1735 * "good" settings as per Mt Fuji.
1736 */
1737 if (ti->rt == 0 && ti->blank == 0)
1738 return 1;
1739
1740 if (ti->rt == 0 && ti->blank == 1)
1741 return 1;
1742
1743 if (ti->rt == 1 && ti->blank == 0)
1744 return 1;
1745
1746 dev_err(ddev, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
1747 return 0;
1748 }
1749
1750 /*
1751 * 1 -- we can write to this disc, 0 -- we can't
1752 */
pkt_writable_disc(struct pktcdvd_device * pd,disc_information * di)1753 static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
1754 {
1755 struct device *ddev = disk_to_dev(pd->disk);
1756
1757 switch (pd->mmc3_profile) {
1758 case 0x0a: /* CD-RW */
1759 case 0xffff: /* MMC3 not supported */
1760 break;
1761 case 0x1a: /* DVD+RW */
1762 case 0x13: /* DVD-RW */
1763 case 0x12: /* DVD-RAM */
1764 return 1;
1765 default:
1766 dev_dbg(ddev, "Wrong disc profile (%x)\n", pd->mmc3_profile);
1767 return 0;
1768 }
1769
1770 /*
1771 * for disc type 0xff we should probably reserve a new track.
1772 * but i'm not sure, should we leave this to user apps? probably.
1773 */
1774 if (di->disc_type == 0xff) {
1775 dev_notice(ddev, "unknown disc - no track?\n");
1776 return 0;
1777 }
1778
1779 if (di->disc_type != 0x20 && di->disc_type != 0) {
1780 dev_err(ddev, "wrong disc type (%x)\n", di->disc_type);
1781 return 0;
1782 }
1783
1784 if (di->erasable == 0) {
1785 dev_err(ddev, "disc not erasable\n");
1786 return 0;
1787 }
1788
1789 if (di->border_status == PACKET_SESSION_RESERVED) {
1790 dev_err(ddev, "can't write to last track (reserved)\n");
1791 return 0;
1792 }
1793
1794 return 1;
1795 }
1796
pkt_probe_settings(struct pktcdvd_device * pd)1797 static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
1798 {
1799 struct device *ddev = disk_to_dev(pd->disk);
1800 struct packet_command cgc;
1801 unsigned char buf[12];
1802 disc_information di;
1803 track_information ti;
1804 int ret, track;
1805
1806 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1807 cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
1808 cgc.cmd[8] = 8;
1809 ret = pkt_generic_packet(pd, &cgc);
1810 pd->mmc3_profile = ret ? 0xffff : get_unaligned_be16(&buf[6]);
1811
1812 memset(&di, 0, sizeof(disc_information));
1813 memset(&ti, 0, sizeof(track_information));
1814
1815 ret = pkt_get_disc_info(pd, &di);
1816 if (ret) {
1817 dev_err(ddev, "failed get_disc\n");
1818 return ret;
1819 }
1820
1821 if (!pkt_writable_disc(pd, &di))
1822 return -EROFS;
1823
1824 pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
1825
1826 track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
1827 ret = pkt_get_track_info(pd, track, 1, &ti);
1828 if (ret) {
1829 dev_err(ddev, "failed get_track\n");
1830 return ret;
1831 }
1832
1833 if (!pkt_writable_track(pd, &ti)) {
1834 dev_err(ddev, "can't write to this track\n");
1835 return -EROFS;
1836 }
1837
1838 /*
1839 * we keep packet size in 512 byte units, makes it easier to
1840 * deal with request calculations.
1841 */
1842 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
1843 if (pd->settings.size == 0) {
1844 dev_notice(ddev, "detected zero packet size!\n");
1845 return -ENXIO;
1846 }
1847 if (pd->settings.size > PACKET_MAX_SECTORS) {
1848 dev_err(ddev, "packet size is too big\n");
1849 return -EROFS;
1850 }
1851 pd->settings.fp = ti.fp;
1852 pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
1853
1854 if (ti.nwa_v) {
1855 pd->nwa = be32_to_cpu(ti.next_writable);
1856 set_bit(PACKET_NWA_VALID, &pd->flags);
1857 }
1858
1859 /*
1860 * in theory we could use lra on -RW media as well and just zero
1861 * blocks that haven't been written yet, but in practice that
1862 * is just a no-go. we'll use that for -R, naturally.
1863 */
1864 if (ti.lra_v) {
1865 pd->lra = be32_to_cpu(ti.last_rec_address);
1866 set_bit(PACKET_LRA_VALID, &pd->flags);
1867 } else {
1868 pd->lra = 0xffffffff;
1869 set_bit(PACKET_LRA_VALID, &pd->flags);
1870 }
1871
1872 /*
1873 * fine for now
1874 */
1875 pd->settings.link_loss = 7;
1876 pd->settings.write_type = 0; /* packet */
1877 pd->settings.track_mode = ti.track_mode;
1878
1879 /*
1880 * mode1 or mode2 disc
1881 */
1882 switch (ti.data_mode) {
1883 case PACKET_MODE1:
1884 pd->settings.block_mode = PACKET_BLOCK_MODE1;
1885 break;
1886 case PACKET_MODE2:
1887 pd->settings.block_mode = PACKET_BLOCK_MODE2;
1888 break;
1889 default:
1890 dev_err(ddev, "unknown data mode\n");
1891 return -EROFS;
1892 }
1893 return 0;
1894 }
1895
1896 /*
1897 * enable/disable write caching on drive
1898 */
pkt_write_caching(struct pktcdvd_device * pd)1899 static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd)
1900 {
1901 struct device *ddev = disk_to_dev(pd->disk);
1902 struct packet_command cgc;
1903 struct scsi_sense_hdr sshdr;
1904 unsigned char buf[64];
1905 bool set = IS_ENABLED(CONFIG_CDROM_PKTCDVD_WCACHE);
1906 int ret;
1907
1908 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1909 cgc.sshdr = &sshdr;
1910 cgc.buflen = pd->mode_offset + 12;
1911
1912 /*
1913 * caching mode page might not be there, so quiet this command
1914 */
1915 cgc.quiet = 1;
1916
1917 ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0);
1918 if (ret)
1919 return ret;
1920
1921 /*
1922 * use drive write caching -- we need deferred error handling to be
1923 * able to successfully recover with this option (drive will return good
1924 * status as soon as the cdb is validated).
1925 */
1926 buf[pd->mode_offset + 10] |= (set << 2);
1927
1928 cgc.buflen = cgc.cmd[8] = 2 + get_unaligned_be16(&buf[0]);
1929 ret = pkt_mode_select(pd, &cgc);
1930 if (ret) {
1931 dev_err(ddev, "write caching control failed\n");
1932 pkt_dump_sense(pd, &cgc);
1933 } else if (!ret && set)
1934 dev_notice(ddev, "enabled write caching\n");
1935 return ret;
1936 }
1937
pkt_lock_door(struct pktcdvd_device * pd,int lockflag)1938 static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
1939 {
1940 struct packet_command cgc;
1941
1942 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1943 cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
1944 cgc.cmd[4] = lockflag ? 1 : 0;
1945 return pkt_generic_packet(pd, &cgc);
1946 }
1947
1948 /*
1949 * Returns drive maximum write speed
1950 */
pkt_get_max_speed(struct pktcdvd_device * pd,unsigned * write_speed)1951 static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
1952 unsigned *write_speed)
1953 {
1954 struct packet_command cgc;
1955 struct scsi_sense_hdr sshdr;
1956 unsigned char buf[256+18];
1957 unsigned char *cap_buf;
1958 int ret, offset;
1959
1960 cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
1961 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
1962 cgc.sshdr = &sshdr;
1963
1964 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1965 if (ret) {
1966 cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
1967 sizeof(struct mode_page_header);
1968 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1969 if (ret) {
1970 pkt_dump_sense(pd, &cgc);
1971 return ret;
1972 }
1973 }
1974
1975 offset = 20; /* Obsoleted field, used by older drives */
1976 if (cap_buf[1] >= 28)
1977 offset = 28; /* Current write speed selected */
1978 if (cap_buf[1] >= 30) {
1979 /* If the drive reports at least one "Logical Unit Write
1980 * Speed Performance Descriptor Block", use the information
1981 * in the first block. (contains the highest speed)
1982 */
1983 int num_spdb = get_unaligned_be16(&cap_buf[30]);
1984 if (num_spdb > 0)
1985 offset = 34;
1986 }
1987
1988 *write_speed = get_unaligned_be16(&cap_buf[offset]);
1989 return 0;
1990 }
1991
1992 /* These tables from cdrecord - I don't have orange book */
1993 /* standard speed CD-RW (1-4x) */
1994 static char clv_to_speed[16] = {
1995 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
1996 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1997 };
1998 /* high speed CD-RW (-10x) */
1999 static char hs_clv_to_speed[16] = {
2000 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
2001 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2002 };
2003 /* ultra high speed CD-RW */
2004 static char us_clv_to_speed[16] = {
2005 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
2006 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
2007 };
2008
2009 /*
2010 * reads the maximum media speed from ATIP
2011 */
pkt_media_speed(struct pktcdvd_device * pd,unsigned * speed)2012 static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
2013 unsigned *speed)
2014 {
2015 struct device *ddev = disk_to_dev(pd->disk);
2016 struct packet_command cgc;
2017 struct scsi_sense_hdr sshdr;
2018 unsigned char buf[64];
2019 unsigned int size, st, sp;
2020 int ret;
2021
2022 init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
2023 cgc.sshdr = &sshdr;
2024 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
2025 cgc.cmd[1] = 2;
2026 cgc.cmd[2] = 4; /* READ ATIP */
2027 cgc.cmd[8] = 2;
2028 ret = pkt_generic_packet(pd, &cgc);
2029 if (ret) {
2030 pkt_dump_sense(pd, &cgc);
2031 return ret;
2032 }
2033 size = 2 + get_unaligned_be16(&buf[0]);
2034 if (size > sizeof(buf))
2035 size = sizeof(buf);
2036
2037 init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
2038 cgc.sshdr = &sshdr;
2039 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
2040 cgc.cmd[1] = 2;
2041 cgc.cmd[2] = 4;
2042 cgc.cmd[8] = size;
2043 ret = pkt_generic_packet(pd, &cgc);
2044 if (ret) {
2045 pkt_dump_sense(pd, &cgc);
2046 return ret;
2047 }
2048
2049 if (!(buf[6] & 0x40)) {
2050 dev_notice(ddev, "disc type is not CD-RW\n");
2051 return 1;
2052 }
2053 if (!(buf[6] & 0x4)) {
2054 dev_notice(ddev, "A1 values on media are not valid, maybe not CDRW?\n");
2055 return 1;
2056 }
2057
2058 st = (buf[6] >> 3) & 0x7; /* disc sub-type */
2059
2060 sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
2061
2062 /* Info from cdrecord */
2063 switch (st) {
2064 case 0: /* standard speed */
2065 *speed = clv_to_speed[sp];
2066 break;
2067 case 1: /* high speed */
2068 *speed = hs_clv_to_speed[sp];
2069 break;
2070 case 2: /* ultra high speed */
2071 *speed = us_clv_to_speed[sp];
2072 break;
2073 default:
2074 dev_notice(ddev, "unknown disc sub-type %d\n", st);
2075 return 1;
2076 }
2077 if (*speed) {
2078 dev_info(ddev, "maximum media speed: %d\n", *speed);
2079 return 0;
2080 } else {
2081 dev_notice(ddev, "unknown speed %d for sub-type %d\n", sp, st);
2082 return 1;
2083 }
2084 }
2085
pkt_perform_opc(struct pktcdvd_device * pd)2086 static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
2087 {
2088 struct device *ddev = disk_to_dev(pd->disk);
2089 struct packet_command cgc;
2090 struct scsi_sense_hdr sshdr;
2091 int ret;
2092
2093 dev_dbg(ddev, "Performing OPC\n");
2094
2095 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
2096 cgc.sshdr = &sshdr;
2097 cgc.timeout = 60*HZ;
2098 cgc.cmd[0] = GPCMD_SEND_OPC;
2099 cgc.cmd[1] = 1;
2100 ret = pkt_generic_packet(pd, &cgc);
2101 if (ret)
2102 pkt_dump_sense(pd, &cgc);
2103 return ret;
2104 }
2105
pkt_open_write(struct pktcdvd_device * pd)2106 static int pkt_open_write(struct pktcdvd_device *pd)
2107 {
2108 struct device *ddev = disk_to_dev(pd->disk);
2109 int ret;
2110 unsigned int write_speed, media_write_speed, read_speed;
2111
2112 ret = pkt_probe_settings(pd);
2113 if (ret) {
2114 dev_dbg(ddev, "failed probe\n");
2115 return ret;
2116 }
2117
2118 ret = pkt_set_write_settings(pd);
2119 if (ret) {
2120 dev_notice(ddev, "failed saving write settings\n");
2121 return -EIO;
2122 }
2123
2124 pkt_write_caching(pd);
2125
2126 ret = pkt_get_max_speed(pd, &write_speed);
2127 if (ret)
2128 write_speed = 16 * 177;
2129 switch (pd->mmc3_profile) {
2130 case 0x13: /* DVD-RW */
2131 case 0x1a: /* DVD+RW */
2132 case 0x12: /* DVD-RAM */
2133 dev_notice(ddev, "write speed %ukB/s\n", write_speed);
2134 break;
2135 default:
2136 ret = pkt_media_speed(pd, &media_write_speed);
2137 if (ret)
2138 media_write_speed = 16;
2139 write_speed = min(write_speed, media_write_speed * 177);
2140 dev_notice(ddev, "write speed %ux\n", write_speed / 176);
2141 break;
2142 }
2143 read_speed = write_speed;
2144
2145 ret = pkt_set_speed(pd, write_speed, read_speed);
2146 if (ret) {
2147 dev_notice(ddev, "couldn't set write speed\n");
2148 return -EIO;
2149 }
2150 pd->write_speed = write_speed;
2151 pd->read_speed = read_speed;
2152
2153 ret = pkt_perform_opc(pd);
2154 if (ret)
2155 dev_notice(ddev, "Optimum Power Calibration failed\n");
2156
2157 return 0;
2158 }
2159
2160 /*
2161 * called at open time.
2162 */
pkt_open_dev(struct pktcdvd_device * pd,bool write)2163 static int pkt_open_dev(struct pktcdvd_device *pd, bool write)
2164 {
2165 struct device *ddev = disk_to_dev(pd->disk);
2166 int ret;
2167 long lba;
2168 struct request_queue *q;
2169 struct file *bdev_file;
2170
2171 /*
2172 * We need to re-open the cdrom device without O_NONBLOCK to be able
2173 * to read/write from/to it. It is already opened in O_NONBLOCK mode
2174 * so open should not fail.
2175 */
2176 bdev_file = bdev_file_open_by_dev(file_bdev(pd->bdev_file)->bd_dev,
2177 BLK_OPEN_READ, pd, NULL);
2178 if (IS_ERR(bdev_file)) {
2179 ret = PTR_ERR(bdev_file);
2180 goto out;
2181 }
2182 pd->f_open_bdev = bdev_file;
2183
2184 ret = pkt_get_last_written(pd, &lba);
2185 if (ret) {
2186 dev_err(ddev, "pkt_get_last_written failed\n");
2187 goto out_putdev;
2188 }
2189
2190 set_capacity(pd->disk, lba << 2);
2191 set_capacity_and_notify(file_bdev(pd->bdev_file)->bd_disk, lba << 2);
2192
2193 q = bdev_get_queue(file_bdev(pd->bdev_file));
2194 if (write) {
2195 ret = pkt_open_write(pd);
2196 if (ret)
2197 goto out_putdev;
2198 set_bit(PACKET_WRITABLE, &pd->flags);
2199 } else {
2200 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2201 clear_bit(PACKET_WRITABLE, &pd->flags);
2202 }
2203
2204 ret = pkt_set_segment_merging(pd, q);
2205 if (ret)
2206 goto out_putdev;
2207
2208 if (write) {
2209 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
2210 dev_err(ddev, "not enough memory for buffers\n");
2211 ret = -ENOMEM;
2212 goto out_putdev;
2213 }
2214 dev_info(ddev, "%lukB available on disc\n", lba << 1);
2215 }
2216 set_blocksize(bdev_file, CD_FRAMESIZE);
2217
2218 return 0;
2219
2220 out_putdev:
2221 fput(bdev_file);
2222 out:
2223 return ret;
2224 }
2225
2226 /*
2227 * called when the device is closed. makes sure that the device flushes
2228 * the internal cache before we close.
2229 */
pkt_release_dev(struct pktcdvd_device * pd,int flush)2230 static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
2231 {
2232 struct device *ddev = disk_to_dev(pd->disk);
2233
2234 if (flush && pkt_flush_cache(pd))
2235 dev_notice(ddev, "not flushing cache\n");
2236
2237 pkt_lock_door(pd, 0);
2238
2239 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2240 fput(pd->f_open_bdev);
2241 pd->f_open_bdev = NULL;
2242
2243 pkt_shrink_pktlist(pd);
2244 }
2245
pkt_find_dev_from_minor(unsigned int dev_minor)2246 static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
2247 {
2248 if (dev_minor >= MAX_WRITERS)
2249 return NULL;
2250
2251 dev_minor = array_index_nospec(dev_minor, MAX_WRITERS);
2252 return pkt_devs[dev_minor];
2253 }
2254
pkt_open(struct gendisk * disk,blk_mode_t mode)2255 static int pkt_open(struct gendisk *disk, blk_mode_t mode)
2256 {
2257 struct pktcdvd_device *pd = NULL;
2258 int ret;
2259
2260 mutex_lock(&pktcdvd_mutex);
2261 mutex_lock(&ctl_mutex);
2262 pd = pkt_find_dev_from_minor(disk->first_minor);
2263 if (!pd) {
2264 ret = -ENODEV;
2265 goto out;
2266 }
2267 BUG_ON(pd->refcnt < 0);
2268
2269 pd->refcnt++;
2270 if (pd->refcnt > 1) {
2271 if ((mode & BLK_OPEN_WRITE) &&
2272 !test_bit(PACKET_WRITABLE, &pd->flags)) {
2273 ret = -EBUSY;
2274 goto out_dec;
2275 }
2276 } else {
2277 ret = pkt_open_dev(pd, mode & BLK_OPEN_WRITE);
2278 if (ret)
2279 goto out_dec;
2280 }
2281 mutex_unlock(&ctl_mutex);
2282 mutex_unlock(&pktcdvd_mutex);
2283 return 0;
2284
2285 out_dec:
2286 pd->refcnt--;
2287 out:
2288 mutex_unlock(&ctl_mutex);
2289 mutex_unlock(&pktcdvd_mutex);
2290 return ret;
2291 }
2292
pkt_release(struct gendisk * disk)2293 static void pkt_release(struct gendisk *disk)
2294 {
2295 struct pktcdvd_device *pd = disk->private_data;
2296
2297 mutex_lock(&pktcdvd_mutex);
2298 mutex_lock(&ctl_mutex);
2299 pd->refcnt--;
2300 BUG_ON(pd->refcnt < 0);
2301 if (pd->refcnt == 0) {
2302 int flush = test_bit(PACKET_WRITABLE, &pd->flags);
2303 pkt_release_dev(pd, flush);
2304 }
2305 mutex_unlock(&ctl_mutex);
2306 mutex_unlock(&pktcdvd_mutex);
2307 }
2308
2309
pkt_end_io_read_cloned(struct bio * bio)2310 static void pkt_end_io_read_cloned(struct bio *bio)
2311 {
2312 struct packet_stacked_data *psd = bio->bi_private;
2313 struct pktcdvd_device *pd = psd->pd;
2314
2315 psd->bio->bi_status = bio->bi_status;
2316 bio_put(bio);
2317 bio_endio(psd->bio);
2318 mempool_free(psd, &psd_pool);
2319 pkt_bio_finished(pd);
2320 }
2321
pkt_make_request_read(struct pktcdvd_device * pd,struct bio * bio)2322 static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
2323 {
2324 struct bio *cloned_bio = bio_alloc_clone(file_bdev(pd->bdev_file), bio,
2325 GFP_NOIO, &pkt_bio_set);
2326 struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO);
2327
2328 psd->pd = pd;
2329 psd->bio = bio;
2330 cloned_bio->bi_private = psd;
2331 cloned_bio->bi_end_io = pkt_end_io_read_cloned;
2332 pd->stats.secs_r += bio_sectors(bio);
2333 pkt_queue_bio(pd, cloned_bio);
2334 }
2335
pkt_make_request_write(struct bio * bio)2336 static void pkt_make_request_write(struct bio *bio)
2337 {
2338 struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->private_data;
2339 sector_t zone;
2340 struct packet_data *pkt;
2341 int was_empty, blocked_bio;
2342 struct pkt_rb_node *node;
2343
2344 zone = get_zone(bio->bi_iter.bi_sector, pd);
2345
2346 /*
2347 * If we find a matching packet in state WAITING or READ_WAIT, we can
2348 * just append this bio to that packet.
2349 */
2350 spin_lock(&pd->cdrw.active_list_lock);
2351 blocked_bio = 0;
2352 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
2353 if (pkt->sector == zone) {
2354 spin_lock(&pkt->lock);
2355 if ((pkt->state == PACKET_WAITING_STATE) ||
2356 (pkt->state == PACKET_READ_WAIT_STATE)) {
2357 bio_list_add(&pkt->orig_bios, bio);
2358 pkt->write_size +=
2359 bio->bi_iter.bi_size / CD_FRAMESIZE;
2360 if ((pkt->write_size >= pkt->frames) &&
2361 (pkt->state == PACKET_WAITING_STATE)) {
2362 atomic_inc(&pkt->run_sm);
2363 wake_up(&pd->wqueue);
2364 }
2365 spin_unlock(&pkt->lock);
2366 spin_unlock(&pd->cdrw.active_list_lock);
2367 return;
2368 } else {
2369 blocked_bio = 1;
2370 }
2371 spin_unlock(&pkt->lock);
2372 }
2373 }
2374 spin_unlock(&pd->cdrw.active_list_lock);
2375
2376 /*
2377 * Test if there is enough room left in the bio work queue
2378 * (queue size >= congestion on mark).
2379 * If not, wait till the work queue size is below the congestion off mark.
2380 */
2381 spin_lock(&pd->lock);
2382 if (pd->write_congestion_on > 0
2383 && pd->bio_queue_size >= pd->write_congestion_on) {
2384 struct wait_bit_queue_entry wqe;
2385
2386 init_wait_var_entry(&wqe, &pd->congested, 0);
2387 for (;;) {
2388 prepare_to_wait_event(__var_waitqueue(&pd->congested),
2389 &wqe.wq_entry,
2390 TASK_UNINTERRUPTIBLE);
2391 if (pd->bio_queue_size <= pd->write_congestion_off)
2392 break;
2393 pd->congested = true;
2394 spin_unlock(&pd->lock);
2395 schedule();
2396 spin_lock(&pd->lock);
2397 }
2398 }
2399 spin_unlock(&pd->lock);
2400
2401 /*
2402 * No matching packet found. Store the bio in the work queue.
2403 */
2404 node = mempool_alloc(&pd->rb_pool, GFP_NOIO);
2405 node->bio = bio;
2406 spin_lock(&pd->lock);
2407 BUG_ON(pd->bio_queue_size < 0);
2408 was_empty = (pd->bio_queue_size == 0);
2409 pkt_rbtree_insert(pd, node);
2410 spin_unlock(&pd->lock);
2411
2412 /*
2413 * Wake up the worker thread.
2414 */
2415 atomic_set(&pd->scan_queue, 1);
2416 if (was_empty) {
2417 /* This wake_up is required for correct operation */
2418 wake_up(&pd->wqueue);
2419 } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
2420 /*
2421 * This wake up is not required for correct operation,
2422 * but improves performance in some cases.
2423 */
2424 wake_up(&pd->wqueue);
2425 }
2426 }
2427
pkt_submit_bio(struct bio * bio)2428 static void pkt_submit_bio(struct bio *bio)
2429 {
2430 struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->private_data;
2431 struct device *ddev = disk_to_dev(pd->disk);
2432 struct bio *split;
2433
2434 bio = bio_split_to_limits(bio);
2435 if (!bio)
2436 return;
2437
2438 dev_dbg(ddev, "start = %6llx stop = %6llx\n",
2439 bio->bi_iter.bi_sector, bio_end_sector(bio));
2440
2441 /*
2442 * Clone READ bios so we can have our own bi_end_io callback.
2443 */
2444 if (bio_data_dir(bio) == READ) {
2445 pkt_make_request_read(pd, bio);
2446 return;
2447 }
2448
2449 if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2450 dev_notice(ddev, "WRITE for ro device (%llu)\n", bio->bi_iter.bi_sector);
2451 goto end_io;
2452 }
2453
2454 if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
2455 dev_err(ddev, "wrong bio size\n");
2456 goto end_io;
2457 }
2458
2459 do {
2460 sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
2461 sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
2462
2463 if (last_zone != zone) {
2464 BUG_ON(last_zone != zone + pd->settings.size);
2465
2466 split = bio_split(bio, last_zone -
2467 bio->bi_iter.bi_sector,
2468 GFP_NOIO, &pkt_bio_set);
2469 bio_chain(split, bio);
2470 } else {
2471 split = bio;
2472 }
2473
2474 pkt_make_request_write(split);
2475 } while (split != bio);
2476
2477 return;
2478 end_io:
2479 bio_io_error(bio);
2480 }
2481
pkt_new_dev(struct pktcdvd_device * pd,dev_t dev)2482 static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2483 {
2484 struct device *ddev = disk_to_dev(pd->disk);
2485 int i;
2486 struct file *bdev_file;
2487 struct scsi_device *sdev;
2488
2489 if (pd->pkt_dev == dev) {
2490 dev_err(ddev, "recursive setup not allowed\n");
2491 return -EBUSY;
2492 }
2493 for (i = 0; i < MAX_WRITERS; i++) {
2494 struct pktcdvd_device *pd2 = pkt_devs[i];
2495 if (!pd2)
2496 continue;
2497 if (file_bdev(pd2->bdev_file)->bd_dev == dev) {
2498 dev_err(ddev, "%pg already setup\n",
2499 file_bdev(pd2->bdev_file));
2500 return -EBUSY;
2501 }
2502 if (pd2->pkt_dev == dev) {
2503 dev_err(ddev, "can't chain pktcdvd devices\n");
2504 return -EBUSY;
2505 }
2506 }
2507
2508 bdev_file = bdev_file_open_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_NDELAY,
2509 NULL, NULL);
2510 if (IS_ERR(bdev_file))
2511 return PTR_ERR(bdev_file);
2512 sdev = scsi_device_from_queue(file_bdev(bdev_file)->bd_disk->queue);
2513 if (!sdev) {
2514 fput(bdev_file);
2515 return -EINVAL;
2516 }
2517 put_device(&sdev->sdev_gendev);
2518
2519 /* This is safe, since we have a reference from open(). */
2520 __module_get(THIS_MODULE);
2521
2522 pd->bdev_file = bdev_file;
2523
2524 atomic_set(&pd->cdrw.pending_bios, 0);
2525 pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->disk->disk_name);
2526 if (IS_ERR(pd->cdrw.thread)) {
2527 dev_err(ddev, "can't start kernel thread\n");
2528 goto out_mem;
2529 }
2530
2531 proc_create_single_data(pd->disk->disk_name, 0, pkt_proc, pkt_seq_show, pd);
2532 dev_notice(ddev, "writer mapped to %pg\n", file_bdev(bdev_file));
2533 return 0;
2534
2535 out_mem:
2536 fput(bdev_file);
2537 /* This is safe: open() is still holding a reference. */
2538 module_put(THIS_MODULE);
2539 return -ENOMEM;
2540 }
2541
pkt_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)2542 static int pkt_ioctl(struct block_device *bdev, blk_mode_t mode,
2543 unsigned int cmd, unsigned long arg)
2544 {
2545 struct pktcdvd_device *pd = bdev->bd_disk->private_data;
2546 struct device *ddev = disk_to_dev(pd->disk);
2547 int ret;
2548
2549 dev_dbg(ddev, "cmd %x, dev %d:%d\n", cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
2550
2551 mutex_lock(&pktcdvd_mutex);
2552 switch (cmd) {
2553 case CDROMEJECT:
2554 /*
2555 * The door gets locked when the device is opened, so we
2556 * have to unlock it or else the eject command fails.
2557 */
2558 if (pd->refcnt == 1)
2559 pkt_lock_door(pd, 0);
2560 fallthrough;
2561 /*
2562 * forward selected CDROM ioctls to CD-ROM, for UDF
2563 */
2564 case CDROMMULTISESSION:
2565 case CDROMREADTOCENTRY:
2566 case CDROM_LAST_WRITTEN:
2567 case CDROM_SEND_PACKET:
2568 case SCSI_IOCTL_SEND_COMMAND:
2569 if (!bdev->bd_disk->fops->ioctl)
2570 ret = -ENOTTY;
2571 else
2572 ret = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
2573 break;
2574 default:
2575 dev_dbg(ddev, "Unknown ioctl (%x)\n", cmd);
2576 ret = -ENOTTY;
2577 }
2578 mutex_unlock(&pktcdvd_mutex);
2579
2580 return ret;
2581 }
2582
pkt_check_events(struct gendisk * disk,unsigned int clearing)2583 static unsigned int pkt_check_events(struct gendisk *disk,
2584 unsigned int clearing)
2585 {
2586 struct pktcdvd_device *pd = disk->private_data;
2587 struct gendisk *attached_disk;
2588
2589 if (!pd)
2590 return 0;
2591 if (!pd->bdev_file)
2592 return 0;
2593 attached_disk = file_bdev(pd->bdev_file)->bd_disk;
2594 if (!attached_disk || !attached_disk->fops->check_events)
2595 return 0;
2596 return attached_disk->fops->check_events(attached_disk, clearing);
2597 }
2598
pkt_devnode(struct gendisk * disk,umode_t * mode)2599 static char *pkt_devnode(struct gendisk *disk, umode_t *mode)
2600 {
2601 return kasprintf(GFP_KERNEL, "pktcdvd/%s", disk->disk_name);
2602 }
2603
2604 static const struct block_device_operations pktcdvd_ops = {
2605 .owner = THIS_MODULE,
2606 .submit_bio = pkt_submit_bio,
2607 .open = pkt_open,
2608 .release = pkt_release,
2609 .ioctl = pkt_ioctl,
2610 .compat_ioctl = blkdev_compat_ptr_ioctl,
2611 .check_events = pkt_check_events,
2612 .devnode = pkt_devnode,
2613 };
2614
2615 /*
2616 * Set up mapping from pktcdvd device to CD-ROM device.
2617 */
pkt_setup_dev(dev_t dev,dev_t * pkt_dev)2618 static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
2619 {
2620 struct queue_limits lim = {
2621 .max_hw_sectors = PACKET_MAX_SECTORS,
2622 .logical_block_size = CD_FRAMESIZE,
2623 .features = BLK_FEAT_ROTATIONAL,
2624 };
2625 int idx;
2626 int ret = -ENOMEM;
2627 struct pktcdvd_device *pd;
2628 struct gendisk *disk;
2629
2630 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2631
2632 for (idx = 0; idx < MAX_WRITERS; idx++)
2633 if (!pkt_devs[idx])
2634 break;
2635 if (idx == MAX_WRITERS) {
2636 pr_err("max %d writers supported\n", MAX_WRITERS);
2637 ret = -EBUSY;
2638 goto out_mutex;
2639 }
2640
2641 pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
2642 if (!pd)
2643 goto out_mutex;
2644
2645 ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE,
2646 sizeof(struct pkt_rb_node));
2647 if (ret)
2648 goto out_mem;
2649
2650 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
2651 INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
2652 spin_lock_init(&pd->cdrw.active_list_lock);
2653
2654 spin_lock_init(&pd->lock);
2655 spin_lock_init(&pd->iosched.lock);
2656 bio_list_init(&pd->iosched.read_queue);
2657 bio_list_init(&pd->iosched.write_queue);
2658 init_waitqueue_head(&pd->wqueue);
2659 pd->bio_queue = RB_ROOT;
2660
2661 pd->write_congestion_on = write_congestion_on;
2662 pd->write_congestion_off = write_congestion_off;
2663
2664 disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
2665 if (IS_ERR(disk)) {
2666 ret = PTR_ERR(disk);
2667 goto out_mem;
2668 }
2669 pd->disk = disk;
2670 disk->major = pktdev_major;
2671 disk->first_minor = idx;
2672 disk->minors = 1;
2673 disk->fops = &pktcdvd_ops;
2674 disk->flags = GENHD_FL_REMOVABLE | GENHD_FL_NO_PART;
2675 snprintf(disk->disk_name, sizeof(disk->disk_name), DRIVER_NAME"%d", idx);
2676 disk->private_data = pd;
2677
2678 pd->pkt_dev = MKDEV(pktdev_major, idx);
2679 ret = pkt_new_dev(pd, dev);
2680 if (ret)
2681 goto out_mem2;
2682
2683 /* inherit events of the host device */
2684 disk->events = file_bdev(pd->bdev_file)->bd_disk->events;
2685
2686 ret = add_disk(disk);
2687 if (ret)
2688 goto out_mem2;
2689
2690 pkt_sysfs_dev_new(pd);
2691 pkt_debugfs_dev_new(pd);
2692
2693 pkt_devs[idx] = pd;
2694 if (pkt_dev)
2695 *pkt_dev = pd->pkt_dev;
2696
2697 mutex_unlock(&ctl_mutex);
2698 return 0;
2699
2700 out_mem2:
2701 put_disk(disk);
2702 out_mem:
2703 mempool_exit(&pd->rb_pool);
2704 kfree(pd);
2705 out_mutex:
2706 mutex_unlock(&ctl_mutex);
2707 pr_err("setup of pktcdvd device failed\n");
2708 return ret;
2709 }
2710
2711 /*
2712 * Tear down mapping from pktcdvd device to CD-ROM device.
2713 */
pkt_remove_dev(dev_t pkt_dev)2714 static int pkt_remove_dev(dev_t pkt_dev)
2715 {
2716 struct pktcdvd_device *pd;
2717 struct device *ddev;
2718 int idx;
2719 int ret = 0;
2720
2721 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2722
2723 for (idx = 0; idx < MAX_WRITERS; idx++) {
2724 pd = pkt_devs[idx];
2725 if (pd && (pd->pkt_dev == pkt_dev))
2726 break;
2727 }
2728 if (idx == MAX_WRITERS) {
2729 pr_debug("dev not setup\n");
2730 ret = -ENXIO;
2731 goto out;
2732 }
2733
2734 if (pd->refcnt > 0) {
2735 ret = -EBUSY;
2736 goto out;
2737 }
2738
2739 ddev = disk_to_dev(pd->disk);
2740
2741 if (!IS_ERR(pd->cdrw.thread))
2742 kthread_stop(pd->cdrw.thread);
2743
2744 pkt_devs[idx] = NULL;
2745
2746 pkt_debugfs_dev_remove(pd);
2747 pkt_sysfs_dev_remove(pd);
2748
2749 fput(pd->bdev_file);
2750
2751 remove_proc_entry(pd->disk->disk_name, pkt_proc);
2752 dev_notice(ddev, "writer unmapped\n");
2753
2754 del_gendisk(pd->disk);
2755 put_disk(pd->disk);
2756
2757 mempool_exit(&pd->rb_pool);
2758 kfree(pd);
2759
2760 /* This is safe: open() is still holding a reference. */
2761 module_put(THIS_MODULE);
2762
2763 out:
2764 mutex_unlock(&ctl_mutex);
2765 return ret;
2766 }
2767
pkt_get_status(struct pkt_ctrl_command * ctrl_cmd)2768 static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
2769 {
2770 struct pktcdvd_device *pd;
2771
2772 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2773
2774 pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
2775 if (pd) {
2776 ctrl_cmd->dev = new_encode_dev(file_bdev(pd->bdev_file)->bd_dev);
2777 ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
2778 } else {
2779 ctrl_cmd->dev = 0;
2780 ctrl_cmd->pkt_dev = 0;
2781 }
2782 ctrl_cmd->num_devices = MAX_WRITERS;
2783
2784 mutex_unlock(&ctl_mutex);
2785 }
2786
pkt_ctl_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2787 static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2788 {
2789 void __user *argp = (void __user *)arg;
2790 struct pkt_ctrl_command ctrl_cmd;
2791 int ret = 0;
2792 dev_t pkt_dev = 0;
2793
2794 if (cmd != PACKET_CTRL_CMD)
2795 return -ENOTTY;
2796
2797 if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
2798 return -EFAULT;
2799
2800 switch (ctrl_cmd.command) {
2801 case PKT_CTRL_CMD_SETUP:
2802 if (!capable(CAP_SYS_ADMIN))
2803 return -EPERM;
2804 ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
2805 ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
2806 break;
2807 case PKT_CTRL_CMD_TEARDOWN:
2808 if (!capable(CAP_SYS_ADMIN))
2809 return -EPERM;
2810 ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
2811 break;
2812 case PKT_CTRL_CMD_STATUS:
2813 pkt_get_status(&ctrl_cmd);
2814 break;
2815 default:
2816 return -ENOTTY;
2817 }
2818
2819 if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
2820 return -EFAULT;
2821 return ret;
2822 }
2823
2824 #ifdef CONFIG_COMPAT
pkt_ctl_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2825 static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2826 {
2827 return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
2828 }
2829 #endif
2830
2831 static const struct file_operations pkt_ctl_fops = {
2832 .open = nonseekable_open,
2833 .unlocked_ioctl = pkt_ctl_ioctl,
2834 #ifdef CONFIG_COMPAT
2835 .compat_ioctl = pkt_ctl_compat_ioctl,
2836 #endif
2837 .owner = THIS_MODULE,
2838 };
2839
2840 static struct miscdevice pkt_misc = {
2841 .minor = MISC_DYNAMIC_MINOR,
2842 .name = DRIVER_NAME,
2843 .nodename = "pktcdvd/control",
2844 .fops = &pkt_ctl_fops
2845 };
2846
pkt_init(void)2847 static int __init pkt_init(void)
2848 {
2849 int ret;
2850
2851 mutex_init(&ctl_mutex);
2852
2853 ret = mempool_init_kmalloc_pool(&psd_pool, PSD_POOL_SIZE,
2854 sizeof(struct packet_stacked_data));
2855 if (ret)
2856 return ret;
2857 ret = bioset_init(&pkt_bio_set, BIO_POOL_SIZE, 0, 0);
2858 if (ret) {
2859 mempool_exit(&psd_pool);
2860 return ret;
2861 }
2862
2863 ret = register_blkdev(pktdev_major, DRIVER_NAME);
2864 if (ret < 0) {
2865 pr_err("unable to register block device\n");
2866 goto out2;
2867 }
2868 if (!pktdev_major)
2869 pktdev_major = ret;
2870
2871 ret = pkt_sysfs_init();
2872 if (ret)
2873 goto out;
2874
2875 pkt_debugfs_init();
2876
2877 ret = misc_register(&pkt_misc);
2878 if (ret) {
2879 pr_err("unable to register misc device\n");
2880 goto out_misc;
2881 }
2882
2883 pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
2884
2885 return 0;
2886
2887 out_misc:
2888 pkt_debugfs_cleanup();
2889 pkt_sysfs_cleanup();
2890 out:
2891 unregister_blkdev(pktdev_major, DRIVER_NAME);
2892 out2:
2893 mempool_exit(&psd_pool);
2894 bioset_exit(&pkt_bio_set);
2895 return ret;
2896 }
2897
pkt_exit(void)2898 static void __exit pkt_exit(void)
2899 {
2900 remove_proc_entry("driver/"DRIVER_NAME, NULL);
2901 misc_deregister(&pkt_misc);
2902
2903 pkt_debugfs_cleanup();
2904 pkt_sysfs_cleanup();
2905
2906 unregister_blkdev(pktdev_major, DRIVER_NAME);
2907 mempool_exit(&psd_pool);
2908 bioset_exit(&pkt_bio_set);
2909 }
2910
2911 MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
2912 MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
2913 MODULE_LICENSE("GPL");
2914
2915 module_init(pkt_init);
2916 module_exit(pkt_exit);
2917