1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3 * f_mass_storage.c -- Mass Storage USB Composite Function
4 *
5 * Copyright (C) 2003-2008 Alan Stern
6 * Copyright (C) 2009 Samsung Electronics
7 * Author: Michal Nazarewicz <mina86@mina86.com>
8 * All rights reserved.
9 */
10
11 /*
12 * The Mass Storage Function acts as a USB Mass Storage device,
13 * appearing to the host as a disk drive or as a CD-ROM drive. In
14 * addition to providing an example of a genuinely useful composite
15 * function for a USB device, it also illustrates a technique of
16 * double-buffering for increased throughput.
17 *
18 * For more information about MSF and in particular its module
19 * parameters and sysfs interface read the
20 * <Documentation/usb/mass-storage.rst> file.
21 */
22
23 /*
24 * MSF is configured by specifying a fsg_config structure. It has the
25 * following fields:
26 *
27 * nluns Number of LUNs function have (anywhere from 1
28 * to FSG_MAX_LUNS).
29 * luns An array of LUN configuration values. This
30 * should be filled for each LUN that
31 * function will include (ie. for "nluns"
32 * LUNs). Each element of the array has
33 * the following fields:
34 * ->filename The path to the backing file for the LUN.
35 * Required if LUN is not marked as
36 * removable.
37 * ->ro Flag specifying access to the LUN shall be
38 * read-only. This is implied if CD-ROM
39 * emulation is enabled as well as when
40 * it was impossible to open "filename"
41 * in R/W mode.
42 * ->removable Flag specifying that LUN shall be indicated as
43 * being removable.
44 * ->cdrom Flag specifying that LUN shall be reported as
45 * being a CD-ROM.
46 * ->nofua Flag specifying that FUA flag in SCSI WRITE(10,12)
47 * commands for this LUN shall be ignored.
48 *
49 * vendor_name
50 * product_name
51 * release Information used as a reply to INQUIRY
52 * request. To use default set to NULL,
53 * NULL, 0xffff respectively. The first
54 * field should be 8 and the second 16
55 * characters or less.
56 *
57 * can_stall Set to permit function to halt bulk endpoints.
58 * Disabled on some USB devices known not
59 * to work correctly. You should set it
60 * to true.
61 *
62 * If "removable" is not set for a LUN then a backing file must be
63 * specified. If it is set, then NULL filename means the LUN's medium
64 * is not loaded (an empty string as "filename" in the fsg_config
65 * structure causes error). The CD-ROM emulation includes a single
66 * data track and no audio tracks; hence there need be only one
67 * backing file per LUN.
68 *
69 * This function is heavily based on "File-backed Storage Gadget" by
70 * Alan Stern which in turn is heavily based on "Gadget Zero" by David
71 * Brownell. The driver's SCSI command interface was based on the
72 * "Information technology - Small Computer System Interface - 2"
73 * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93,
74 * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>.
75 * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which
76 * was based on the "Universal Serial Bus Mass Storage Class UFI
77 * Command Specification" document, Revision 1.0, December 14, 1998,
78 * available at
79 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
80 */
81
82 /*
83 * Driver Design
84 *
85 * The MSF is fairly straightforward. There is a main kernel
86 * thread that handles most of the work. Interrupt routines field
87 * callbacks from the controller driver: bulk- and interrupt-request
88 * completion notifications, endpoint-0 events, and disconnect events.
89 * Completion events are passed to the main thread by wakeup calls. Many
90 * ep0 requests are handled at interrupt time, but SetInterface,
91 * SetConfiguration, and device reset requests are forwarded to the
92 * thread in the form of "exceptions" using SIGUSR1 signals (since they
93 * should interrupt any ongoing file I/O operations).
94 *
95 * The thread's main routine implements the standard command/data/status
96 * parts of a SCSI interaction. It and its subroutines are full of tests
97 * for pending signals/exceptions -- all this polling is necessary since
98 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an
99 * indication that the driver really wants to be running in userspace.)
100 * An important point is that so long as the thread is alive it keeps an
101 * open reference to the backing file. This will prevent unmounting
102 * the backing file's underlying filesystem and could cause problems
103 * during system shutdown, for example. To prevent such problems, the
104 * thread catches INT, TERM, and KILL signals and converts them into
105 * an EXIT exception.
106 *
107 * In normal operation the main thread is started during the gadget's
108 * fsg_bind() callback and stopped during fsg_unbind(). But it can
109 * also exit when it receives a signal, and there's no point leaving
110 * the gadget running when the thread is dead. As of this moment, MSF
111 * provides no way to deregister the gadget when thread dies -- maybe
112 * a callback functions is needed.
113 *
114 * To provide maximum throughput, the driver uses a circular pipeline of
115 * buffer heads (struct fsg_buffhd). In principle the pipeline can be
116 * arbitrarily long; in practice the benefits don't justify having more
117 * than 2 stages (i.e., double buffering). But it helps to think of the
118 * pipeline as being a long one. Each buffer head contains a bulk-in and
119 * a bulk-out request pointer (since the buffer can be used for both
120 * output and input -- directions always are given from the host's
121 * point of view) as well as a pointer to the buffer and various state
122 * variables.
123 *
124 * Use of the pipeline follows a simple protocol. There is a variable
125 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
126 * At any time that buffer head may still be in use from an earlier
127 * request, so each buffer head has a state variable indicating whether
128 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
129 * buffer head to be EMPTY, filling the buffer either by file I/O or by
130 * USB I/O (during which the buffer head is BUSY), and marking the buffer
131 * head FULL when the I/O is complete. Then the buffer will be emptied
132 * (again possibly by USB I/O, during which it is marked BUSY) and
133 * finally marked EMPTY again (possibly by a completion routine).
134 *
135 * A module parameter tells the driver to avoid stalling the bulk
136 * endpoints wherever the transport specification allows. This is
137 * necessary for some UDCs like the SuperH, which cannot reliably clear a
138 * halt on a bulk endpoint. However, under certain circumstances the
139 * Bulk-only specification requires a stall. In such cases the driver
140 * will halt the endpoint and set a flag indicating that it should clear
141 * the halt in software during the next device reset. Hopefully this
142 * will permit everything to work correctly. Furthermore, although the
143 * specification allows the bulk-out endpoint to halt when the host sends
144 * too much data, implementing this would cause an unavoidable race.
145 * The driver will always use the "no-stall" approach for OUT transfers.
146 *
147 * One subtle point concerns sending status-stage responses for ep0
148 * requests. Some of these requests, such as device reset, can involve
149 * interrupting an ongoing file I/O operation, which might take an
150 * arbitrarily long time. During that delay the host might give up on
151 * the original ep0 request and issue a new one. When that happens the
152 * driver should not notify the host about completion of the original
153 * request, as the host will no longer be waiting for it. So the driver
154 * assigns to each ep0 request a unique tag, and it keeps track of the
155 * tag value of the request associated with a long-running exception
156 * (device-reset, interface-change, or configuration-change). When the
157 * exception handler is finished, the status-stage response is submitted
158 * only if the current ep0 request tag is equal to the exception request
159 * tag. Thus only the most recently received ep0 request will get a
160 * status-stage response.
161 *
162 * Warning: This driver source file is too long. It ought to be split up
163 * into a header file plus about 3 separate .c files, to handle the details
164 * of the Gadget, USB Mass Storage, and SCSI protocols.
165 */
166
167
168 /* #define VERBOSE_DEBUG */
169 /* #define DUMP_MSGS */
170
171 #include <linux/blkdev.h>
172 #include <linux/completion.h>
173 #include <linux/dcache.h>
174 #include <linux/delay.h>
175 #include <linux/device.h>
176 #include <linux/fcntl.h>
177 #include <linux/file.h>
178 #include <linux/fs.h>
179 #include <linux/kstrtox.h>
180 #include <linux/kthread.h>
181 #include <linux/sched/signal.h>
182 #include <linux/limits.h>
183 #include <linux/overflow.h>
184 #include <linux/pagemap.h>
185 #include <linux/rwsem.h>
186 #include <linux/slab.h>
187 #include <linux/spinlock.h>
188 #include <linux/string.h>
189 #include <linux/freezer.h>
190 #include <linux/module.h>
191 #include <linux/uaccess.h>
192 #include <linux/unaligned.h>
193
194 #include <linux/usb/ch9.h>
195 #include <linux/usb/gadget.h>
196 #include <linux/usb/composite.h>
197
198 #include <linux/nospec.h>
199
200 #include "configfs.h"
201
202
203 /*------------------------------------------------------------------------*/
204
205 #define FSG_DRIVER_DESC "Mass Storage Function"
206 #define FSG_DRIVER_VERSION "2009/09/11"
207
208 static const char fsg_string_interface[] = "Mass Storage";
209
210 #include "storage_common.h"
211 #include "f_mass_storage.h"
212
213 /* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */
214 static struct usb_string fsg_strings[] = {
215 {FSG_STRING_INTERFACE, fsg_string_interface},
216 {}
217 };
218
219 static struct usb_gadget_strings fsg_stringtab = {
220 .language = 0x0409, /* en-us */
221 .strings = fsg_strings,
222 };
223
224 static struct usb_gadget_strings *fsg_strings_array[] = {
225 &fsg_stringtab,
226 NULL,
227 };
228
229 /*-------------------------------------------------------------------------*/
230
231 struct fsg_dev;
232 struct fsg_common;
233
234 /* Data shared by all the FSG instances. */
235 struct fsg_common {
236 struct usb_gadget *gadget;
237 struct usb_composite_dev *cdev;
238 struct fsg_dev *fsg;
239 wait_queue_head_t io_wait;
240 wait_queue_head_t fsg_wait;
241
242 /* filesem protects: backing files in use */
243 struct rw_semaphore filesem;
244
245 /* lock protects: state and thread_task */
246 spinlock_t lock;
247
248 struct usb_ep *ep0; /* Copy of gadget->ep0 */
249 struct usb_request *ep0req; /* Copy of cdev->req */
250 unsigned int ep0_req_tag;
251
252 struct fsg_buffhd *next_buffhd_to_fill;
253 struct fsg_buffhd *next_buffhd_to_drain;
254 struct fsg_buffhd *buffhds;
255 unsigned int fsg_num_buffers;
256
257 int cmnd_size;
258 u8 cmnd[MAX_COMMAND_SIZE];
259
260 unsigned int lun;
261 struct fsg_lun *luns[FSG_MAX_LUNS];
262 struct fsg_lun *curlun;
263
264 unsigned int bulk_out_maxpacket;
265 enum fsg_state state; /* For exception handling */
266 unsigned int exception_req_tag;
267 void *exception_arg;
268
269 enum data_direction data_dir;
270 u32 data_size;
271 u32 data_size_from_cmnd;
272 u32 tag;
273 u32 residue;
274 u32 usb_amount_left;
275
276 unsigned int can_stall:1;
277 unsigned int free_storage_on_release:1;
278 unsigned int phase_error:1;
279 unsigned int short_packet_received:1;
280 unsigned int bad_lun_okay:1;
281 unsigned int running:1;
282 unsigned int sysfs:1;
283
284 struct completion thread_notifier;
285 struct task_struct *thread_task;
286
287 /* Gadget's private data. */
288 void *private_data;
289
290 char inquiry_string[INQUIRY_STRING_LEN];
291 };
292
293 struct fsg_dev {
294 struct usb_function function;
295 struct usb_gadget *gadget; /* Copy of cdev->gadget */
296 struct fsg_common *common;
297
298 u16 interface_number;
299
300 unsigned int bulk_in_enabled:1;
301 unsigned int bulk_out_enabled:1;
302
303 unsigned long atomic_bitflags;
304 #define IGNORE_BULK_OUT 0
305
306 struct usb_ep *bulk_in;
307 struct usb_ep *bulk_out;
308 };
309
__fsg_is_set(struct fsg_common * common,const char * func,unsigned line)310 static inline int __fsg_is_set(struct fsg_common *common,
311 const char *func, unsigned line)
312 {
313 if (common->fsg)
314 return 1;
315 ERROR(common, "common->fsg is NULL in %s at %u\n", func, line);
316 WARN_ON(1);
317 return 0;
318 }
319
320 #define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__))
321
fsg_from_func(struct usb_function * f)322 static inline struct fsg_dev *fsg_from_func(struct usb_function *f)
323 {
324 return container_of(f, struct fsg_dev, function);
325 }
326
exception_in_progress(struct fsg_common * common)327 static int exception_in_progress(struct fsg_common *common)
328 {
329 return common->state > FSG_STATE_NORMAL;
330 }
331
332 /* Make bulk-out requests be divisible by the maxpacket size */
set_bulk_out_req_length(struct fsg_common * common,struct fsg_buffhd * bh,unsigned int length)333 static void set_bulk_out_req_length(struct fsg_common *common,
334 struct fsg_buffhd *bh, unsigned int length)
335 {
336 unsigned int rem;
337
338 bh->bulk_out_intended_length = length;
339 rem = length % common->bulk_out_maxpacket;
340 if (rem > 0)
341 length += common->bulk_out_maxpacket - rem;
342 bh->outreq->length = length;
343 }
344
345
346 /*-------------------------------------------------------------------------*/
347
fsg_set_halt(struct fsg_dev * fsg,struct usb_ep * ep)348 static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
349 {
350 const char *name;
351
352 if (ep == fsg->bulk_in)
353 name = "bulk-in";
354 else if (ep == fsg->bulk_out)
355 name = "bulk-out";
356 else
357 name = ep->name;
358 DBG(fsg, "%s set halt\n", name);
359 return usb_ep_set_halt(ep);
360 }
361
362
363 /*-------------------------------------------------------------------------*/
364
365 /* These routines may be called in process context or in_irq */
366
__raise_exception(struct fsg_common * common,enum fsg_state new_state,void * arg)367 static void __raise_exception(struct fsg_common *common, enum fsg_state new_state,
368 void *arg)
369 {
370 unsigned long flags;
371
372 /*
373 * Do nothing if a higher-priority exception is already in progress.
374 * If a lower-or-equal priority exception is in progress, preempt it
375 * and notify the main thread by sending it a signal.
376 */
377 spin_lock_irqsave(&common->lock, flags);
378 if (common->state <= new_state) {
379 common->exception_req_tag = common->ep0_req_tag;
380 common->state = new_state;
381 common->exception_arg = arg;
382 if (common->thread_task)
383 send_sig_info(SIGUSR1, SEND_SIG_PRIV,
384 common->thread_task);
385 }
386 spin_unlock_irqrestore(&common->lock, flags);
387 }
388
raise_exception(struct fsg_common * common,enum fsg_state new_state)389 static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
390 {
391 __raise_exception(common, new_state, NULL);
392 }
393
394 /*-------------------------------------------------------------------------*/
395
ep0_queue(struct fsg_common * common)396 static int ep0_queue(struct fsg_common *common)
397 {
398 int rc;
399
400 rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC);
401 common->ep0->driver_data = common;
402 if (rc != 0 && rc != -ESHUTDOWN) {
403 /* We can't do much more than wait for a reset */
404 WARNING(common, "error in submission: %s --> %d\n",
405 common->ep0->name, rc);
406 }
407 return rc;
408 }
409
410
411 /*-------------------------------------------------------------------------*/
412
413 /* Completion handlers. These always run in_irq. */
414
bulk_in_complete(struct usb_ep * ep,struct usb_request * req)415 static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
416 {
417 struct fsg_common *common = ep->driver_data;
418 struct fsg_buffhd *bh = req->context;
419
420 if (req->status || req->actual != req->length)
421 DBG(common, "%s --> %d, %u/%u\n", __func__,
422 req->status, req->actual, req->length);
423 if (req->status == -ECONNRESET) /* Request was cancelled */
424 usb_ep_fifo_flush(ep);
425
426 /* Synchronize with the smp_load_acquire() in sleep_thread() */
427 smp_store_release(&bh->state, BUF_STATE_EMPTY);
428 wake_up(&common->io_wait);
429 }
430
bulk_out_complete(struct usb_ep * ep,struct usb_request * req)431 static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
432 {
433 struct fsg_common *common = ep->driver_data;
434 struct fsg_buffhd *bh = req->context;
435
436 dump_msg(common, "bulk-out", req->buf, req->actual);
437 if (req->status || req->actual != bh->bulk_out_intended_length)
438 DBG(common, "%s --> %d, %u/%u\n", __func__,
439 req->status, req->actual, bh->bulk_out_intended_length);
440 if (req->status == -ECONNRESET) /* Request was cancelled */
441 usb_ep_fifo_flush(ep);
442
443 /* Synchronize with the smp_load_acquire() in sleep_thread() */
444 smp_store_release(&bh->state, BUF_STATE_FULL);
445 wake_up(&common->io_wait);
446 }
447
_fsg_common_get_max_lun(struct fsg_common * common)448 static int _fsg_common_get_max_lun(struct fsg_common *common)
449 {
450 int i = ARRAY_SIZE(common->luns) - 1;
451
452 while (i >= 0 && !common->luns[i])
453 --i;
454
455 return i;
456 }
457
fsg_setup(struct usb_function * f,const struct usb_ctrlrequest * ctrl)458 static int fsg_setup(struct usb_function *f,
459 const struct usb_ctrlrequest *ctrl)
460 {
461 struct fsg_dev *fsg = fsg_from_func(f);
462 struct usb_request *req = fsg->common->ep0req;
463 u16 w_index = le16_to_cpu(ctrl->wIndex);
464 u16 w_value = le16_to_cpu(ctrl->wValue);
465 u16 w_length = le16_to_cpu(ctrl->wLength);
466
467 if (!fsg_is_set(fsg->common))
468 return -EOPNOTSUPP;
469
470 ++fsg->common->ep0_req_tag; /* Record arrival of a new request */
471 req->context = NULL;
472 req->length = 0;
473 dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl));
474
475 switch (ctrl->bRequest) {
476
477 case US_BULK_RESET_REQUEST:
478 if (ctrl->bRequestType !=
479 (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
480 break;
481 if (w_index != fsg->interface_number || w_value != 0 ||
482 w_length != 0)
483 return -EDOM;
484
485 /*
486 * Raise an exception to stop the current operation
487 * and reinitialize our state.
488 */
489 DBG(fsg, "bulk reset request\n");
490 raise_exception(fsg->common, FSG_STATE_PROTOCOL_RESET);
491 return USB_GADGET_DELAYED_STATUS;
492
493 case US_BULK_GET_MAX_LUN:
494 if (ctrl->bRequestType !=
495 (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
496 break;
497 if (w_index != fsg->interface_number || w_value != 0 ||
498 w_length != 1)
499 return -EDOM;
500 VDBG(fsg, "get max LUN\n");
501 *(u8 *)req->buf = _fsg_common_get_max_lun(fsg->common);
502
503 /* Respond with data/status */
504 req->length = min_t(u16, 1, w_length);
505 return ep0_queue(fsg->common);
506 }
507
508 VDBG(fsg,
509 "unknown class-specific control req %02x.%02x v%04x i%04x l%u\n",
510 ctrl->bRequestType, ctrl->bRequest,
511 le16_to_cpu(ctrl->wValue), w_index, w_length);
512 return -EOPNOTSUPP;
513 }
514
515
516 /*-------------------------------------------------------------------------*/
517
518 /* All the following routines run in process context */
519
520 /* Use this for bulk or interrupt transfers, not ep0 */
start_transfer(struct fsg_dev * fsg,struct usb_ep * ep,struct usb_request * req)521 static int start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
522 struct usb_request *req)
523 {
524 int rc;
525
526 if (ep == fsg->bulk_in)
527 dump_msg(fsg, "bulk-in", req->buf, req->length);
528
529 rc = usb_ep_queue(ep, req, GFP_KERNEL);
530 if (rc) {
531
532 /* We can't do much more than wait for a reset */
533 req->status = rc;
534
535 /*
536 * Note: currently the net2280 driver fails zero-length
537 * submissions if DMA is enabled.
538 */
539 if (rc != -ESHUTDOWN &&
540 !(rc == -EOPNOTSUPP && req->length == 0))
541 WARNING(fsg, "error in submission: %s --> %d\n",
542 ep->name, rc);
543 }
544 return rc;
545 }
546
start_in_transfer(struct fsg_common * common,struct fsg_buffhd * bh)547 static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
548 {
549 int rc;
550
551 if (!fsg_is_set(common))
552 return false;
553 bh->state = BUF_STATE_SENDING;
554 rc = start_transfer(common->fsg, common->fsg->bulk_in, bh->inreq);
555 if (rc) {
556 bh->state = BUF_STATE_EMPTY;
557 if (rc == -ESHUTDOWN) {
558 common->running = 0;
559 return false;
560 }
561 }
562 return true;
563 }
564
start_out_transfer(struct fsg_common * common,struct fsg_buffhd * bh)565 static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
566 {
567 int rc;
568
569 if (!fsg_is_set(common))
570 return false;
571 bh->state = BUF_STATE_RECEIVING;
572 rc = start_transfer(common->fsg, common->fsg->bulk_out, bh->outreq);
573 if (rc) {
574 bh->state = BUF_STATE_FULL;
575 if (rc == -ESHUTDOWN) {
576 common->running = 0;
577 return false;
578 }
579 }
580 return true;
581 }
582
sleep_thread(struct fsg_common * common,bool can_freeze,struct fsg_buffhd * bh)583 static int sleep_thread(struct fsg_common *common, bool can_freeze,
584 struct fsg_buffhd *bh)
585 {
586 int rc;
587
588 /* Wait until a signal arrives or bh is no longer busy */
589 if (can_freeze)
590 /*
591 * synchronize with the smp_store_release(&bh->state) in
592 * bulk_in_complete() or bulk_out_complete()
593 */
594 rc = wait_event_freezable(common->io_wait,
595 bh && smp_load_acquire(&bh->state) >=
596 BUF_STATE_EMPTY);
597 else
598 rc = wait_event_interruptible(common->io_wait,
599 bh && smp_load_acquire(&bh->state) >=
600 BUF_STATE_EMPTY);
601 return rc ? -EINTR : 0;
602 }
603
604
605 /*-------------------------------------------------------------------------*/
606
do_read(struct fsg_common * common)607 static int do_read(struct fsg_common *common)
608 {
609 struct fsg_lun *curlun = common->curlun;
610 u64 lba;
611 struct fsg_buffhd *bh;
612 int rc;
613 u32 amount_left;
614 loff_t file_offset, file_offset_tmp;
615 unsigned int amount;
616 ssize_t nread;
617
618 /*
619 * Get the starting Logical Block Address and check that it's
620 * not too big.
621 */
622 if (common->cmnd[0] == READ_6)
623 lba = get_unaligned_be24(&common->cmnd[1]);
624 else {
625 if (common->cmnd[0] == READ_16)
626 lba = get_unaligned_be64(&common->cmnd[2]);
627 else /* READ_10 or READ_12 */
628 lba = get_unaligned_be32(&common->cmnd[2]);
629
630 /*
631 * We allow DPO (Disable Page Out = don't save data in the
632 * cache) and FUA (Force Unit Access = don't read from the
633 * cache), but we don't implement them.
634 */
635 if ((common->cmnd[1] & ~0x18) != 0) {
636 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
637 return -EINVAL;
638 }
639 }
640 if (lba >= curlun->num_sectors) {
641 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
642 return -EINVAL;
643 }
644 file_offset = ((loff_t) lba) << curlun->blkbits;
645
646 /* Carry out the file reads */
647 amount_left = common->data_size_from_cmnd;
648 if (unlikely(amount_left == 0))
649 return -EIO; /* No default reply */
650
651 for (;;) {
652 /*
653 * Figure out how much we need to read:
654 * Try to read the remaining amount.
655 * But don't read more than the buffer size.
656 * And don't try to read past the end of the file.
657 */
658 amount = min(amount_left, FSG_BUFLEN);
659 amount = min_t(loff_t, amount,
660 curlun->file_length - file_offset);
661
662 /* Wait for the next buffer to become available */
663 bh = common->next_buffhd_to_fill;
664 rc = sleep_thread(common, false, bh);
665 if (rc)
666 return rc;
667
668 /*
669 * If we were asked to read past the end of file,
670 * end with an empty buffer.
671 */
672 if (amount == 0) {
673 curlun->sense_data =
674 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
675 curlun->sense_data_info =
676 file_offset >> curlun->blkbits;
677 curlun->info_valid = 1;
678 bh->inreq->length = 0;
679 bh->state = BUF_STATE_FULL;
680 break;
681 }
682
683 /* Perform the read */
684 file_offset_tmp = file_offset;
685 nread = kernel_read(curlun->filp, bh->buf, amount,
686 &file_offset_tmp);
687 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
688 (unsigned long long)file_offset, (int)nread);
689 if (signal_pending(current))
690 return -EINTR;
691
692 if (nread < 0) {
693 LDBG(curlun, "error in file read: %d\n", (int)nread);
694 nread = 0;
695 } else if (nread < amount) {
696 LDBG(curlun, "partial file read: %d/%u\n",
697 (int)nread, amount);
698 nread = round_down(nread, curlun->blksize);
699 }
700 file_offset += nread;
701 amount_left -= nread;
702 common->residue -= nread;
703
704 /*
705 * Except at the end of the transfer, nread will be
706 * equal to the buffer size, which is divisible by the
707 * bulk-in maxpacket size.
708 */
709 bh->inreq->length = nread;
710 bh->state = BUF_STATE_FULL;
711
712 /* If an error occurred, report it and its position */
713 if (nread < amount) {
714 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
715 curlun->sense_data_info =
716 file_offset >> curlun->blkbits;
717 curlun->info_valid = 1;
718 break;
719 }
720
721 if (amount_left == 0)
722 break; /* No more left to read */
723
724 /* Send this buffer and go read some more */
725 bh->inreq->zero = 0;
726 if (!start_in_transfer(common, bh))
727 /* Don't know what to do if common->fsg is NULL */
728 return -EIO;
729 common->next_buffhd_to_fill = bh->next;
730 }
731
732 return -EIO; /* No default reply */
733 }
734
735
736 /*-------------------------------------------------------------------------*/
737
do_write(struct fsg_common * common)738 static int do_write(struct fsg_common *common)
739 {
740 struct fsg_lun *curlun = common->curlun;
741 u64 lba;
742 struct fsg_buffhd *bh;
743 int get_some_more;
744 u32 amount_left_to_req, amount_left_to_write;
745 loff_t usb_offset, file_offset, file_offset_tmp;
746 unsigned int amount;
747 ssize_t nwritten;
748 int rc;
749
750 if (curlun->ro) {
751 curlun->sense_data = SS_WRITE_PROTECTED;
752 return -EINVAL;
753 }
754 spin_lock(&curlun->filp->f_lock);
755 curlun->filp->f_flags &= ~O_SYNC; /* Default is not to wait */
756 spin_unlock(&curlun->filp->f_lock);
757
758 /*
759 * Get the starting Logical Block Address and check that it's
760 * not too big
761 */
762 if (common->cmnd[0] == WRITE_6)
763 lba = get_unaligned_be24(&common->cmnd[1]);
764 else {
765 if (common->cmnd[0] == WRITE_16)
766 lba = get_unaligned_be64(&common->cmnd[2]);
767 else /* WRITE_10 or WRITE_12 */
768 lba = get_unaligned_be32(&common->cmnd[2]);
769
770 /*
771 * We allow DPO (Disable Page Out = don't save data in the
772 * cache) and FUA (Force Unit Access = write directly to the
773 * medium). We don't implement DPO; we implement FUA by
774 * performing synchronous output.
775 */
776 if (common->cmnd[1] & ~0x18) {
777 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
778 return -EINVAL;
779 }
780 if (!curlun->nofua && (common->cmnd[1] & 0x08)) { /* FUA */
781 spin_lock(&curlun->filp->f_lock);
782 curlun->filp->f_flags |= O_SYNC;
783 spin_unlock(&curlun->filp->f_lock);
784 }
785 }
786 if (lba >= curlun->num_sectors) {
787 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
788 return -EINVAL;
789 }
790
791 /* Carry out the file writes */
792 get_some_more = 1;
793 file_offset = usb_offset = ((loff_t) lba) << curlun->blkbits;
794 amount_left_to_req = common->data_size_from_cmnd;
795 amount_left_to_write = common->data_size_from_cmnd;
796
797 while (amount_left_to_write > 0) {
798
799 /* Queue a request for more data from the host */
800 bh = common->next_buffhd_to_fill;
801 if (bh->state == BUF_STATE_EMPTY && get_some_more) {
802
803 /*
804 * Figure out how much we want to get:
805 * Try to get the remaining amount,
806 * but not more than the buffer size.
807 */
808 amount = min(amount_left_to_req, FSG_BUFLEN);
809
810 /* Beyond the end of the backing file? */
811 if (usb_offset >= curlun->file_length) {
812 get_some_more = 0;
813 curlun->sense_data =
814 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
815 curlun->sense_data_info =
816 usb_offset >> curlun->blkbits;
817 curlun->info_valid = 1;
818 continue;
819 }
820
821 /* Get the next buffer */
822 usb_offset += amount;
823 common->usb_amount_left -= amount;
824 amount_left_to_req -= amount;
825 if (amount_left_to_req == 0)
826 get_some_more = 0;
827
828 /*
829 * Except at the end of the transfer, amount will be
830 * equal to the buffer size, which is divisible by
831 * the bulk-out maxpacket size.
832 */
833 set_bulk_out_req_length(common, bh, amount);
834 if (!start_out_transfer(common, bh))
835 /* Dunno what to do if common->fsg is NULL */
836 return -EIO;
837 common->next_buffhd_to_fill = bh->next;
838 continue;
839 }
840
841 /* Write the received data to the backing file */
842 bh = common->next_buffhd_to_drain;
843 if (bh->state == BUF_STATE_EMPTY && !get_some_more)
844 break; /* We stopped early */
845
846 /* Wait for the data to be received */
847 rc = sleep_thread(common, false, bh);
848 if (rc)
849 return rc;
850
851 common->next_buffhd_to_drain = bh->next;
852 bh->state = BUF_STATE_EMPTY;
853
854 /* Did something go wrong with the transfer? */
855 if (bh->outreq->status != 0) {
856 curlun->sense_data = SS_COMMUNICATION_FAILURE;
857 curlun->sense_data_info =
858 file_offset >> curlun->blkbits;
859 curlun->info_valid = 1;
860 break;
861 }
862
863 amount = bh->outreq->actual;
864 if (curlun->file_length - file_offset < amount) {
865 LERROR(curlun, "write %u @ %llu beyond end %llu\n",
866 amount, (unsigned long long)file_offset,
867 (unsigned long long)curlun->file_length);
868 amount = curlun->file_length - file_offset;
869 }
870
871 /*
872 * Don't accept excess data. The spec doesn't say
873 * what to do in this case. We'll ignore the error.
874 */
875 amount = min(amount, bh->bulk_out_intended_length);
876
877 /* Don't write a partial block */
878 amount = round_down(amount, curlun->blksize);
879 if (amount == 0)
880 goto empty_write;
881
882 /* Perform the write */
883 file_offset_tmp = file_offset;
884 nwritten = kernel_write(curlun->filp, bh->buf, amount,
885 &file_offset_tmp);
886 VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
887 (unsigned long long)file_offset, (int)nwritten);
888 if (signal_pending(current))
889 return -EINTR; /* Interrupted! */
890
891 if (nwritten < 0) {
892 LDBG(curlun, "error in file write: %d\n",
893 (int) nwritten);
894 nwritten = 0;
895 } else if (nwritten < amount) {
896 LDBG(curlun, "partial file write: %d/%u\n",
897 (int) nwritten, amount);
898 nwritten = round_down(nwritten, curlun->blksize);
899 }
900 file_offset += nwritten;
901 amount_left_to_write -= nwritten;
902 common->residue -= nwritten;
903
904 /* If an error occurred, report it and its position */
905 if (nwritten < amount) {
906 curlun->sense_data = SS_WRITE_ERROR;
907 curlun->sense_data_info =
908 file_offset >> curlun->blkbits;
909 curlun->info_valid = 1;
910 break;
911 }
912
913 empty_write:
914 /* Did the host decide to stop early? */
915 if (bh->outreq->actual < bh->bulk_out_intended_length) {
916 common->short_packet_received = 1;
917 break;
918 }
919 }
920
921 return -EIO; /* No default reply */
922 }
923
924
925 /*-------------------------------------------------------------------------*/
926
do_synchronize_cache(struct fsg_common * common)927 static int do_synchronize_cache(struct fsg_common *common)
928 {
929 struct fsg_lun *curlun = common->curlun;
930 int rc;
931
932 /* We ignore the requested LBA and write out all file's
933 * dirty data buffers. */
934 rc = fsg_lun_fsync_sub(curlun);
935 if (rc)
936 curlun->sense_data = SS_WRITE_ERROR;
937 return 0;
938 }
939
940
941 /*-------------------------------------------------------------------------*/
942
invalidate_sub(struct fsg_lun * curlun)943 static void invalidate_sub(struct fsg_lun *curlun)
944 {
945 struct file *filp = curlun->filp;
946 struct inode *inode = file_inode(filp);
947 unsigned long __maybe_unused rc;
948
949 rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
950 VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
951 }
952
do_verify(struct fsg_common * common)953 static int do_verify(struct fsg_common *common)
954 {
955 struct fsg_lun *curlun = common->curlun;
956 u32 lba;
957 u32 verification_length;
958 struct fsg_buffhd *bh = common->next_buffhd_to_fill;
959 loff_t file_offset, file_offset_tmp;
960 u32 amount_left;
961 unsigned int amount;
962 ssize_t nread;
963
964 /*
965 * Get the starting Logical Block Address and check that it's
966 * not too big.
967 */
968 lba = get_unaligned_be32(&common->cmnd[2]);
969 if (lba >= curlun->num_sectors) {
970 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
971 return -EINVAL;
972 }
973
974 /*
975 * We allow DPO (Disable Page Out = don't save data in the
976 * cache) but we don't implement it.
977 */
978 if (common->cmnd[1] & ~0x10) {
979 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
980 return -EINVAL;
981 }
982
983 verification_length = get_unaligned_be16(&common->cmnd[7]);
984 if (unlikely(verification_length == 0))
985 return -EIO; /* No default reply */
986
987 /* Prepare to carry out the file verify */
988 amount_left = verification_length << curlun->blkbits;
989 file_offset = ((loff_t) lba) << curlun->blkbits;
990
991 /* Write out all the dirty buffers before invalidating them */
992 fsg_lun_fsync_sub(curlun);
993 if (signal_pending(current))
994 return -EINTR;
995
996 invalidate_sub(curlun);
997 if (signal_pending(current))
998 return -EINTR;
999
1000 /* Just try to read the requested blocks */
1001 while (amount_left > 0) {
1002 /*
1003 * Figure out how much we need to read:
1004 * Try to read the remaining amount, but not more than
1005 * the buffer size.
1006 * And don't try to read past the end of the file.
1007 */
1008 amount = min(amount_left, FSG_BUFLEN);
1009 amount = min_t(loff_t, amount,
1010 curlun->file_length - file_offset);
1011 if (amount == 0) {
1012 curlun->sense_data =
1013 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1014 curlun->sense_data_info =
1015 file_offset >> curlun->blkbits;
1016 curlun->info_valid = 1;
1017 break;
1018 }
1019
1020 /* Perform the read */
1021 file_offset_tmp = file_offset;
1022 nread = kernel_read(curlun->filp, bh->buf, amount,
1023 &file_offset_tmp);
1024 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
1025 (unsigned long long) file_offset,
1026 (int) nread);
1027 if (signal_pending(current))
1028 return -EINTR;
1029
1030 if (nread < 0) {
1031 LDBG(curlun, "error in file verify: %d\n", (int)nread);
1032 nread = 0;
1033 } else if (nread < amount) {
1034 LDBG(curlun, "partial file verify: %d/%u\n",
1035 (int)nread, amount);
1036 nread = round_down(nread, curlun->blksize);
1037 }
1038 if (nread == 0) {
1039 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
1040 curlun->sense_data_info =
1041 file_offset >> curlun->blkbits;
1042 curlun->info_valid = 1;
1043 break;
1044 }
1045 file_offset += nread;
1046 amount_left -= nread;
1047 }
1048 return 0;
1049 }
1050
1051
1052 /*-------------------------------------------------------------------------*/
1053
do_inquiry(struct fsg_common * common,struct fsg_buffhd * bh)1054 static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh)
1055 {
1056 struct fsg_lun *curlun = common->curlun;
1057 u8 *buf = (u8 *) bh->buf;
1058
1059 if (!curlun) { /* Unsupported LUNs are okay */
1060 common->bad_lun_okay = 1;
1061 memset(buf, 0, 36);
1062 buf[0] = TYPE_NO_LUN; /* Unsupported, no device-type */
1063 buf[4] = 31; /* Additional length */
1064 return 36;
1065 }
1066
1067 buf[0] = curlun->cdrom ? TYPE_ROM : TYPE_DISK;
1068 buf[1] = curlun->removable ? 0x80 : 0;
1069 buf[2] = 2; /* ANSI SCSI level 2 */
1070 buf[3] = 2; /* SCSI-2 INQUIRY data format */
1071 buf[4] = 31; /* Additional length */
1072 buf[5] = 0; /* No special options */
1073 buf[6] = 0;
1074 buf[7] = 0;
1075 if (curlun->inquiry_string[0])
1076 memcpy(buf + 8, curlun->inquiry_string,
1077 sizeof(curlun->inquiry_string));
1078 else
1079 memcpy(buf + 8, common->inquiry_string,
1080 sizeof(common->inquiry_string));
1081 return 36;
1082 }
1083
do_request_sense(struct fsg_common * common,struct fsg_buffhd * bh)1084 static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1085 {
1086 struct fsg_lun *curlun = common->curlun;
1087 u8 *buf = (u8 *) bh->buf;
1088 u32 sd, sdinfo;
1089 int valid;
1090
1091 /*
1092 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
1093 *
1094 * If a REQUEST SENSE command is received from an initiator
1095 * with a pending unit attention condition (before the target
1096 * generates the contingent allegiance condition), then the
1097 * target shall either:
1098 * a) report any pending sense data and preserve the unit
1099 * attention condition on the logical unit, or,
1100 * b) report the unit attention condition, may discard any
1101 * pending sense data, and clear the unit attention
1102 * condition on the logical unit for that initiator.
1103 *
1104 * FSG normally uses option a); enable this code to use option b).
1105 */
1106 #if 0
1107 if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
1108 curlun->sense_data = curlun->unit_attention_data;
1109 curlun->unit_attention_data = SS_NO_SENSE;
1110 }
1111 #endif
1112
1113 if (!curlun) { /* Unsupported LUNs are okay */
1114 common->bad_lun_okay = 1;
1115 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1116 sdinfo = 0;
1117 valid = 0;
1118 } else {
1119 sd = curlun->sense_data;
1120 sdinfo = curlun->sense_data_info;
1121 valid = curlun->info_valid << 7;
1122 curlun->sense_data = SS_NO_SENSE;
1123 curlun->sense_data_info = 0;
1124 curlun->info_valid = 0;
1125 }
1126
1127 memset(buf, 0, 18);
1128 buf[0] = valid | 0x70; /* Valid, current error */
1129 buf[2] = SK(sd);
1130 put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */
1131 buf[7] = 18 - 8; /* Additional sense length */
1132 buf[12] = ASC(sd);
1133 buf[13] = ASCQ(sd);
1134 return 18;
1135 }
1136
do_read_capacity(struct fsg_common * common,struct fsg_buffhd * bh)1137 static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
1138 {
1139 struct fsg_lun *curlun = common->curlun;
1140 u32 lba = get_unaligned_be32(&common->cmnd[2]);
1141 int pmi = common->cmnd[8];
1142 u8 *buf = (u8 *)bh->buf;
1143 u32 max_lba;
1144
1145 /* Check the PMI and LBA fields */
1146 if (pmi > 1 || (pmi == 0 && lba != 0)) {
1147 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1148 return -EINVAL;
1149 }
1150
1151 if (curlun->num_sectors < 0x100000000ULL)
1152 max_lba = curlun->num_sectors - 1;
1153 else
1154 max_lba = 0xffffffff;
1155 put_unaligned_be32(max_lba, &buf[0]); /* Max logical block */
1156 put_unaligned_be32(curlun->blksize, &buf[4]); /* Block length */
1157 return 8;
1158 }
1159
do_read_capacity_16(struct fsg_common * common,struct fsg_buffhd * bh)1160 static int do_read_capacity_16(struct fsg_common *common, struct fsg_buffhd *bh)
1161 {
1162 struct fsg_lun *curlun = common->curlun;
1163 u64 lba = get_unaligned_be64(&common->cmnd[2]);
1164 int pmi = common->cmnd[14];
1165 u8 *buf = (u8 *)bh->buf;
1166
1167 /* Check the PMI and LBA fields */
1168 if (pmi > 1 || (pmi == 0 && lba != 0)) {
1169 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1170 return -EINVAL;
1171 }
1172
1173 put_unaligned_be64(curlun->num_sectors - 1, &buf[0]);
1174 /* Max logical block */
1175 put_unaligned_be32(curlun->blksize, &buf[8]); /* Block length */
1176
1177 /* It is safe to keep other fields zeroed */
1178 memset(&buf[12], 0, 32 - 12);
1179 return 32;
1180 }
1181
do_read_header(struct fsg_common * common,struct fsg_buffhd * bh)1182 static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh)
1183 {
1184 struct fsg_lun *curlun = common->curlun;
1185 int msf = common->cmnd[1] & 0x02;
1186 u32 lba = get_unaligned_be32(&common->cmnd[2]);
1187 u8 *buf = (u8 *)bh->buf;
1188
1189 if (common->cmnd[1] & ~0x02) { /* Mask away MSF */
1190 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1191 return -EINVAL;
1192 }
1193 if (lba >= curlun->num_sectors) {
1194 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1195 return -EINVAL;
1196 }
1197
1198 memset(buf, 0, 8);
1199 buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */
1200 store_cdrom_address(&buf[4], msf, lba);
1201 return 8;
1202 }
1203
do_read_toc(struct fsg_common * common,struct fsg_buffhd * bh)1204 static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
1205 {
1206 struct fsg_lun *curlun = common->curlun;
1207 int msf = common->cmnd[1] & 0x02;
1208 int start_track = common->cmnd[6];
1209 u8 *buf = (u8 *)bh->buf;
1210 u8 format;
1211 int i, len;
1212
1213 format = common->cmnd[2] & 0xf;
1214
1215 if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */
1216 (start_track > 1 && format != 0x1)) {
1217 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1218 return -EINVAL;
1219 }
1220
1221 /*
1222 * Check if CDB is old style SFF-8020i
1223 * i.e. format is in 2 MSBs of byte 9
1224 * Mac OS-X host sends us this.
1225 */
1226 if (format == 0)
1227 format = (common->cmnd[9] >> 6) & 0x3;
1228
1229 switch (format) {
1230 case 0: /* Formatted TOC */
1231 case 1: /* Multi-session info */
1232 len = 4 + 2*8; /* 4 byte header + 2 descriptors */
1233 memset(buf, 0, len);
1234 buf[1] = len - 2; /* TOC Length excludes length field */
1235 buf[2] = 1; /* First track number */
1236 buf[3] = 1; /* Last track number */
1237 buf[5] = 0x16; /* Data track, copying allowed */
1238 buf[6] = 0x01; /* Only track is number 1 */
1239 store_cdrom_address(&buf[8], msf, 0);
1240
1241 buf[13] = 0x16; /* Lead-out track is data */
1242 buf[14] = 0xAA; /* Lead-out track number */
1243 store_cdrom_address(&buf[16], msf, curlun->num_sectors);
1244 return len;
1245
1246 case 2:
1247 /* Raw TOC */
1248 len = 4 + 3*11; /* 4 byte header + 3 descriptors */
1249 memset(buf, 0, len); /* Header + A0, A1 & A2 descriptors */
1250 buf[1] = len - 2; /* TOC Length excludes length field */
1251 buf[2] = 1; /* First complete session */
1252 buf[3] = 1; /* Last complete session */
1253
1254 buf += 4;
1255 /* fill in A0, A1 and A2 points */
1256 for (i = 0; i < 3; i++) {
1257 buf[0] = 1; /* Session number */
1258 buf[1] = 0x16; /* Data track, copying allowed */
1259 /* 2 - Track number 0 -> TOC */
1260 buf[3] = 0xA0 + i; /* A0, A1, A2 point */
1261 /* 4, 5, 6 - Min, sec, frame is zero */
1262 buf[8] = 1; /* Pmin: last track number */
1263 buf += 11; /* go to next track descriptor */
1264 }
1265 buf -= 11; /* go back to A2 descriptor */
1266
1267 /* For A2, 7, 8, 9, 10 - zero, Pmin, Psec, Pframe of Lead out */
1268 store_cdrom_address(&buf[7], msf, curlun->num_sectors);
1269 return len;
1270
1271 default:
1272 /* PMA, ATIP, CD-TEXT not supported/required */
1273 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1274 return -EINVAL;
1275 }
1276 }
1277
do_mode_sense(struct fsg_common * common,struct fsg_buffhd * bh)1278 static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1279 {
1280 struct fsg_lun *curlun = common->curlun;
1281 int mscmnd = common->cmnd[0];
1282 u8 *buf = (u8 *) bh->buf;
1283 u8 *buf0 = buf;
1284 int pc, page_code;
1285 int changeable_values, all_pages;
1286 int valid_page = 0;
1287 int len, limit;
1288
1289 if ((common->cmnd[1] & ~0x08) != 0) { /* Mask away DBD */
1290 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1291 return -EINVAL;
1292 }
1293 pc = common->cmnd[2] >> 6;
1294 page_code = common->cmnd[2] & 0x3f;
1295 if (pc == 3) {
1296 curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
1297 return -EINVAL;
1298 }
1299 changeable_values = (pc == 1);
1300 all_pages = (page_code == 0x3f);
1301
1302 /*
1303 * Write the mode parameter header. Fixed values are: default
1304 * medium type, no cache control (DPOFUA), and no block descriptors.
1305 * The only variable value is the WriteProtect bit. We will fill in
1306 * the mode data length later.
1307 */
1308 memset(buf, 0, 8);
1309 if (mscmnd == MODE_SENSE) {
1310 buf[2] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
1311 buf += 4;
1312 limit = 255;
1313 } else { /* MODE_SENSE_10 */
1314 buf[3] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
1315 buf += 8;
1316 limit = 65535; /* Should really be FSG_BUFLEN */
1317 }
1318
1319 /* No block descriptors */
1320
1321 /*
1322 * The mode pages, in numerical order. The only page we support
1323 * is the Caching page.
1324 */
1325 if (page_code == 0x08 || all_pages) {
1326 valid_page = 1;
1327 buf[0] = 0x08; /* Page code */
1328 buf[1] = 10; /* Page length */
1329 memset(buf+2, 0, 10); /* None of the fields are changeable */
1330
1331 if (!changeable_values) {
1332 buf[2] = 0x04; /* Write cache enable, */
1333 /* Read cache not disabled */
1334 /* No cache retention priorities */
1335 put_unaligned_be16(0xffff, &buf[4]);
1336 /* Don't disable prefetch */
1337 /* Minimum prefetch = 0 */
1338 put_unaligned_be16(0xffff, &buf[8]);
1339 /* Maximum prefetch */
1340 put_unaligned_be16(0xffff, &buf[10]);
1341 /* Maximum prefetch ceiling */
1342 }
1343 buf += 12;
1344 }
1345
1346 /*
1347 * Check that a valid page was requested and the mode data length
1348 * isn't too long.
1349 */
1350 len = buf - buf0;
1351 if (!valid_page || len > limit) {
1352 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1353 return -EINVAL;
1354 }
1355
1356 /* Store the mode data length */
1357 if (mscmnd == MODE_SENSE)
1358 buf0[0] = len - 1;
1359 else
1360 put_unaligned_be16(len - 2, buf0);
1361 return len;
1362 }
1363
do_start_stop(struct fsg_common * common)1364 static int do_start_stop(struct fsg_common *common)
1365 {
1366 struct fsg_lun *curlun = common->curlun;
1367 int loej, start;
1368
1369 if (!curlun) {
1370 return -EINVAL;
1371 } else if (!curlun->removable) {
1372 curlun->sense_data = SS_INVALID_COMMAND;
1373 return -EINVAL;
1374 } else if ((common->cmnd[1] & ~0x01) != 0 || /* Mask away Immed */
1375 (common->cmnd[4] & ~0x03) != 0) { /* Mask LoEj, Start */
1376 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1377 return -EINVAL;
1378 }
1379
1380 loej = common->cmnd[4] & 0x02;
1381 start = common->cmnd[4] & 0x01;
1382
1383 /*
1384 * Our emulation doesn't support mounting; the medium is
1385 * available for use as soon as it is loaded.
1386 */
1387 if (start) {
1388 if (!fsg_lun_is_open(curlun)) {
1389 curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
1390 return -EINVAL;
1391 }
1392 return 0;
1393 }
1394
1395 /* Are we allowed to unload the media? */
1396 if (curlun->prevent_medium_removal) {
1397 LDBG(curlun, "unload attempt prevented\n");
1398 curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED;
1399 return -EINVAL;
1400 }
1401
1402 if (!loej)
1403 return 0;
1404
1405 up_read(&common->filesem);
1406 down_write(&common->filesem);
1407 fsg_lun_close(curlun);
1408 up_write(&common->filesem);
1409 down_read(&common->filesem);
1410
1411 return 0;
1412 }
1413
do_prevent_allow(struct fsg_common * common)1414 static int do_prevent_allow(struct fsg_common *common)
1415 {
1416 struct fsg_lun *curlun = common->curlun;
1417 int prevent;
1418
1419 if (!common->curlun) {
1420 return -EINVAL;
1421 } else if (!common->curlun->removable) {
1422 common->curlun->sense_data = SS_INVALID_COMMAND;
1423 return -EINVAL;
1424 }
1425
1426 prevent = common->cmnd[4] & 0x01;
1427 if ((common->cmnd[4] & ~0x01) != 0) { /* Mask away Prevent */
1428 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1429 return -EINVAL;
1430 }
1431
1432 if (curlun->prevent_medium_removal && !prevent)
1433 fsg_lun_fsync_sub(curlun);
1434 curlun->prevent_medium_removal = prevent;
1435 return 0;
1436 }
1437
do_read_format_capacities(struct fsg_common * common,struct fsg_buffhd * bh)1438 static int do_read_format_capacities(struct fsg_common *common,
1439 struct fsg_buffhd *bh)
1440 {
1441 struct fsg_lun *curlun = common->curlun;
1442 u8 *buf = (u8 *) bh->buf;
1443
1444 buf[0] = buf[1] = buf[2] = 0;
1445 buf[3] = 8; /* Only the Current/Maximum Capacity Descriptor */
1446 buf += 4;
1447
1448 put_unaligned_be32(curlun->num_sectors, &buf[0]);
1449 /* Number of blocks */
1450 put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */
1451 buf[4] = 0x02; /* Current capacity */
1452 return 12;
1453 }
1454
do_mode_select(struct fsg_common * common,struct fsg_buffhd * bh)1455 static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh)
1456 {
1457 struct fsg_lun *curlun = common->curlun;
1458
1459 /* We don't support MODE SELECT */
1460 if (curlun)
1461 curlun->sense_data = SS_INVALID_COMMAND;
1462 return -EINVAL;
1463 }
1464
1465
1466 /*-------------------------------------------------------------------------*/
1467
halt_bulk_in_endpoint(struct fsg_dev * fsg)1468 static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
1469 {
1470 int rc;
1471
1472 rc = fsg_set_halt(fsg, fsg->bulk_in);
1473 if (rc == -EAGAIN)
1474 VDBG(fsg, "delayed bulk-in endpoint halt\n");
1475 while (rc != 0) {
1476 if (rc != -EAGAIN) {
1477 WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
1478 rc = 0;
1479 break;
1480 }
1481
1482 /* Wait for a short time and then try again */
1483 if (msleep_interruptible(100) != 0)
1484 return -EINTR;
1485 rc = usb_ep_set_halt(fsg->bulk_in);
1486 }
1487 return rc;
1488 }
1489
wedge_bulk_in_endpoint(struct fsg_dev * fsg)1490 static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
1491 {
1492 int rc;
1493
1494 DBG(fsg, "bulk-in set wedge\n");
1495 rc = usb_ep_set_wedge(fsg->bulk_in);
1496 if (rc == -EAGAIN)
1497 VDBG(fsg, "delayed bulk-in endpoint wedge\n");
1498 while (rc != 0) {
1499 if (rc != -EAGAIN) {
1500 WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
1501 rc = 0;
1502 break;
1503 }
1504
1505 /* Wait for a short time and then try again */
1506 if (msleep_interruptible(100) != 0)
1507 return -EINTR;
1508 rc = usb_ep_set_wedge(fsg->bulk_in);
1509 }
1510 return rc;
1511 }
1512
throw_away_data(struct fsg_common * common)1513 static int throw_away_data(struct fsg_common *common)
1514 {
1515 struct fsg_buffhd *bh, *bh2;
1516 u32 amount;
1517 int rc;
1518
1519 for (bh = common->next_buffhd_to_drain;
1520 bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0;
1521 bh = common->next_buffhd_to_drain) {
1522
1523 /* Try to submit another request if we need one */
1524 bh2 = common->next_buffhd_to_fill;
1525 if (bh2->state == BUF_STATE_EMPTY &&
1526 common->usb_amount_left > 0) {
1527 amount = min(common->usb_amount_left, FSG_BUFLEN);
1528
1529 /*
1530 * Except at the end of the transfer, amount will be
1531 * equal to the buffer size, which is divisible by
1532 * the bulk-out maxpacket size.
1533 */
1534 set_bulk_out_req_length(common, bh2, amount);
1535 if (!start_out_transfer(common, bh2))
1536 /* Dunno what to do if common->fsg is NULL */
1537 return -EIO;
1538 common->next_buffhd_to_fill = bh2->next;
1539 common->usb_amount_left -= amount;
1540 continue;
1541 }
1542
1543 /* Wait for the data to be received */
1544 rc = sleep_thread(common, false, bh);
1545 if (rc)
1546 return rc;
1547
1548 /* Throw away the data in a filled buffer */
1549 bh->state = BUF_STATE_EMPTY;
1550 common->next_buffhd_to_drain = bh->next;
1551
1552 /* A short packet or an error ends everything */
1553 if (bh->outreq->actual < bh->bulk_out_intended_length ||
1554 bh->outreq->status != 0) {
1555 raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
1556 return -EINTR;
1557 }
1558 }
1559 return 0;
1560 }
1561
finish_reply(struct fsg_common * common)1562 static int finish_reply(struct fsg_common *common)
1563 {
1564 struct fsg_buffhd *bh = common->next_buffhd_to_fill;
1565 int rc = 0;
1566
1567 switch (common->data_dir) {
1568 case DATA_DIR_NONE:
1569 break; /* Nothing to send */
1570
1571 /*
1572 * If we don't know whether the host wants to read or write,
1573 * this must be CB or CBI with an unknown command. We mustn't
1574 * try to send or receive any data. So stall both bulk pipes
1575 * if we can and wait for a reset.
1576 */
1577 case DATA_DIR_UNKNOWN:
1578 if (!common->can_stall) {
1579 /* Nothing */
1580 } else if (fsg_is_set(common)) {
1581 fsg_set_halt(common->fsg, common->fsg->bulk_out);
1582 rc = halt_bulk_in_endpoint(common->fsg);
1583 } else {
1584 /* Don't know what to do if common->fsg is NULL */
1585 rc = -EIO;
1586 }
1587 break;
1588
1589 /* All but the last buffer of data must have already been sent */
1590 case DATA_DIR_TO_HOST:
1591 if (common->data_size == 0) {
1592 /* Nothing to send */
1593
1594 /* Don't know what to do if common->fsg is NULL */
1595 } else if (!fsg_is_set(common)) {
1596 rc = -EIO;
1597
1598 /* If there's no residue, simply send the last buffer */
1599 } else if (common->residue == 0) {
1600 bh->inreq->zero = 0;
1601 if (!start_in_transfer(common, bh))
1602 return -EIO;
1603 common->next_buffhd_to_fill = bh->next;
1604
1605 /*
1606 * For Bulk-only, mark the end of the data with a short
1607 * packet. If we are allowed to stall, halt the bulk-in
1608 * endpoint. (Note: This violates the Bulk-Only Transport
1609 * specification, which requires us to pad the data if we
1610 * don't halt the endpoint. Presumably nobody will mind.)
1611 */
1612 } else {
1613 bh->inreq->zero = 1;
1614 if (!start_in_transfer(common, bh))
1615 rc = -EIO;
1616 common->next_buffhd_to_fill = bh->next;
1617 if (common->can_stall)
1618 rc = halt_bulk_in_endpoint(common->fsg);
1619 }
1620 break;
1621
1622 /*
1623 * We have processed all we want from the data the host has sent.
1624 * There may still be outstanding bulk-out requests.
1625 */
1626 case DATA_DIR_FROM_HOST:
1627 if (common->residue == 0) {
1628 /* Nothing to receive */
1629
1630 /* Did the host stop sending unexpectedly early? */
1631 } else if (common->short_packet_received) {
1632 raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
1633 rc = -EINTR;
1634
1635 /*
1636 * We haven't processed all the incoming data. Even though
1637 * we may be allowed to stall, doing so would cause a race.
1638 * The controller may already have ACK'ed all the remaining
1639 * bulk-out packets, in which case the host wouldn't see a
1640 * STALL. Not realizing the endpoint was halted, it wouldn't
1641 * clear the halt -- leading to problems later on.
1642 */
1643 #if 0
1644 } else if (common->can_stall) {
1645 if (fsg_is_set(common))
1646 fsg_set_halt(common->fsg,
1647 common->fsg->bulk_out);
1648 raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
1649 rc = -EINTR;
1650 #endif
1651
1652 /*
1653 * We can't stall. Read in the excess data and throw it
1654 * all away.
1655 */
1656 } else {
1657 rc = throw_away_data(common);
1658 }
1659 break;
1660 }
1661 return rc;
1662 }
1663
send_status(struct fsg_common * common)1664 static void send_status(struct fsg_common *common)
1665 {
1666 struct fsg_lun *curlun = common->curlun;
1667 struct fsg_buffhd *bh;
1668 struct bulk_cs_wrap *csw;
1669 int rc;
1670 u8 status = US_BULK_STAT_OK;
1671 u32 sd, sdinfo = 0;
1672
1673 /* Wait for the next buffer to become available */
1674 bh = common->next_buffhd_to_fill;
1675 rc = sleep_thread(common, false, bh);
1676 if (rc)
1677 return;
1678
1679 if (curlun) {
1680 sd = curlun->sense_data;
1681 sdinfo = curlun->sense_data_info;
1682 } else if (common->bad_lun_okay)
1683 sd = SS_NO_SENSE;
1684 else
1685 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1686
1687 if (common->phase_error) {
1688 DBG(common, "sending phase-error status\n");
1689 status = US_BULK_STAT_PHASE;
1690 sd = SS_INVALID_COMMAND;
1691 } else if (sd != SS_NO_SENSE) {
1692 DBG(common, "sending command-failure status\n");
1693 status = US_BULK_STAT_FAIL;
1694 VDBG(common, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
1695 " info x%x\n",
1696 SK(sd), ASC(sd), ASCQ(sd), sdinfo);
1697 }
1698
1699 /* Store and send the Bulk-only CSW */
1700 csw = (void *)bh->buf;
1701
1702 csw->Signature = cpu_to_le32(US_BULK_CS_SIGN);
1703 csw->Tag = common->tag;
1704 csw->Residue = cpu_to_le32(common->residue);
1705 csw->Status = status;
1706
1707 bh->inreq->length = US_BULK_CS_WRAP_LEN;
1708 bh->inreq->zero = 0;
1709 if (!start_in_transfer(common, bh))
1710 /* Don't know what to do if common->fsg is NULL */
1711 return;
1712
1713 common->next_buffhd_to_fill = bh->next;
1714 return;
1715 }
1716
1717
1718 /*-------------------------------------------------------------------------*/
1719
1720 /*
1721 * Check whether the command is properly formed and whether its data size
1722 * and direction agree with the values we already have.
1723 */
check_command(struct fsg_common * common,int cmnd_size,enum data_direction data_dir,unsigned int mask,int needs_medium,const char * name)1724 static int check_command(struct fsg_common *common, int cmnd_size,
1725 enum data_direction data_dir, unsigned int mask,
1726 int needs_medium, const char *name)
1727 {
1728 int i;
1729 unsigned int lun = common->cmnd[1] >> 5;
1730 static const char dirletter[4] = {'u', 'o', 'i', 'n'};
1731 char hdlen[20];
1732 struct fsg_lun *curlun;
1733
1734 hdlen[0] = 0;
1735 if (common->data_dir != DATA_DIR_UNKNOWN)
1736 sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir],
1737 common->data_size);
1738 VDBG(common, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
1739 name, cmnd_size, dirletter[(int) data_dir],
1740 common->data_size_from_cmnd, common->cmnd_size, hdlen);
1741
1742 /*
1743 * We can't reply at all until we know the correct data direction
1744 * and size.
1745 */
1746 if (common->data_size_from_cmnd == 0)
1747 data_dir = DATA_DIR_NONE;
1748 if (common->data_size < common->data_size_from_cmnd) {
1749 /*
1750 * Host data size < Device data size is a phase error.
1751 * Carry out the command, but only transfer as much as
1752 * we are allowed.
1753 */
1754 common->data_size_from_cmnd = common->data_size;
1755 common->phase_error = 1;
1756 }
1757 common->residue = common->data_size;
1758 common->usb_amount_left = common->data_size;
1759
1760 /* Conflicting data directions is a phase error */
1761 if (common->data_dir != data_dir && common->data_size_from_cmnd > 0) {
1762 common->phase_error = 1;
1763 return -EINVAL;
1764 }
1765
1766 /* Verify the length of the command itself */
1767 if (cmnd_size != common->cmnd_size) {
1768
1769 /*
1770 * Special case workaround: There are plenty of buggy SCSI
1771 * implementations. Many have issues with cbw->Length
1772 * field passing a wrong command size. For those cases we
1773 * always try to work around the problem by using the length
1774 * sent by the host side provided it is at least as large
1775 * as the correct command length.
1776 * Examples of such cases would be MS-Windows, which issues
1777 * REQUEST SENSE with cbw->Length == 12 where it should
1778 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
1779 * REQUEST SENSE with cbw->Length == 10 where it should
1780 * be 6 as well.
1781 */
1782 if (cmnd_size <= common->cmnd_size) {
1783 DBG(common, "%s is buggy! Expected length %d "
1784 "but we got %d\n", name,
1785 cmnd_size, common->cmnd_size);
1786 cmnd_size = common->cmnd_size;
1787 } else {
1788 common->phase_error = 1;
1789 return -EINVAL;
1790 }
1791 }
1792
1793 /* Check that the LUN values are consistent */
1794 if (common->lun != lun)
1795 DBG(common, "using LUN %u from CBW, not LUN %u from CDB\n",
1796 common->lun, lun);
1797
1798 /* Check the LUN */
1799 curlun = common->curlun;
1800 if (curlun) {
1801 if (common->cmnd[0] != REQUEST_SENSE) {
1802 curlun->sense_data = SS_NO_SENSE;
1803 curlun->sense_data_info = 0;
1804 curlun->info_valid = 0;
1805 }
1806 } else {
1807 common->bad_lun_okay = 0;
1808
1809 /*
1810 * INQUIRY and REQUEST SENSE commands are explicitly allowed
1811 * to use unsupported LUNs; all others may not.
1812 */
1813 if (common->cmnd[0] != INQUIRY &&
1814 common->cmnd[0] != REQUEST_SENSE) {
1815 DBG(common, "unsupported LUN %u\n", common->lun);
1816 return -EINVAL;
1817 }
1818 }
1819
1820 /*
1821 * If a unit attention condition exists, only INQUIRY and
1822 * REQUEST SENSE commands are allowed; anything else must fail.
1823 */
1824 if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
1825 common->cmnd[0] != INQUIRY &&
1826 common->cmnd[0] != REQUEST_SENSE) {
1827 curlun->sense_data = curlun->unit_attention_data;
1828 curlun->unit_attention_data = SS_NO_SENSE;
1829 return -EINVAL;
1830 }
1831
1832 /* Check that only command bytes listed in the mask are non-zero */
1833 common->cmnd[1] &= 0x1f; /* Mask away the LUN */
1834 for (i = 1; i < cmnd_size; ++i) {
1835 if (common->cmnd[i] && !(mask & (1 << i))) {
1836 if (curlun)
1837 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1838 return -EINVAL;
1839 }
1840 }
1841
1842 /* If the medium isn't mounted and the command needs to access
1843 * it, return an error. */
1844 if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
1845 curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
1846 return -EINVAL;
1847 }
1848
1849 return 0;
1850 }
1851
1852 /* wrapper of check_command for data size in blocks handling */
check_command_size_in_blocks(struct fsg_common * common,int cmnd_size,enum data_direction data_dir,unsigned int mask,int needs_medium,const char * name)1853 static int check_command_size_in_blocks(struct fsg_common *common,
1854 int cmnd_size, enum data_direction data_dir,
1855 unsigned int mask, int needs_medium, const char *name)
1856 {
1857 if (common->curlun) {
1858 if (check_shl_overflow(common->data_size_from_cmnd,
1859 common->curlun->blkbits,
1860 &common->data_size_from_cmnd)) {
1861 common->phase_error = 1;
1862 return -EINVAL;
1863 }
1864 }
1865
1866 return check_command(common, cmnd_size, data_dir,
1867 mask, needs_medium, name);
1868 }
1869
do_scsi_command(struct fsg_common * common)1870 static int do_scsi_command(struct fsg_common *common)
1871 {
1872 struct fsg_buffhd *bh;
1873 int rc;
1874 int reply = -EINVAL;
1875 int i;
1876 static char unknown[16];
1877
1878 dump_cdb(common);
1879
1880 /* Wait for the next buffer to become available for data or status */
1881 bh = common->next_buffhd_to_fill;
1882 common->next_buffhd_to_drain = bh;
1883 rc = sleep_thread(common, false, bh);
1884 if (rc)
1885 return rc;
1886
1887 common->phase_error = 0;
1888 common->short_packet_received = 0;
1889
1890 down_read(&common->filesem); /* We're using the backing file */
1891 switch (common->cmnd[0]) {
1892
1893 case INQUIRY:
1894 common->data_size_from_cmnd = common->cmnd[4];
1895 reply = check_command(common, 6, DATA_DIR_TO_HOST,
1896 (1<<4), 0,
1897 "INQUIRY");
1898 if (reply == 0)
1899 reply = do_inquiry(common, bh);
1900 break;
1901
1902 case MODE_SELECT:
1903 common->data_size_from_cmnd = common->cmnd[4];
1904 reply = check_command(common, 6, DATA_DIR_FROM_HOST,
1905 (1<<1) | (1<<4), 0,
1906 "MODE SELECT(6)");
1907 if (reply == 0)
1908 reply = do_mode_select(common, bh);
1909 break;
1910
1911 case MODE_SELECT_10:
1912 common->data_size_from_cmnd =
1913 get_unaligned_be16(&common->cmnd[7]);
1914 reply = check_command(common, 10, DATA_DIR_FROM_HOST,
1915 (1<<1) | (3<<7), 0,
1916 "MODE SELECT(10)");
1917 if (reply == 0)
1918 reply = do_mode_select(common, bh);
1919 break;
1920
1921 case MODE_SENSE:
1922 common->data_size_from_cmnd = common->cmnd[4];
1923 reply = check_command(common, 6, DATA_DIR_TO_HOST,
1924 (1<<1) | (1<<2) | (1<<4), 0,
1925 "MODE SENSE(6)");
1926 if (reply == 0)
1927 reply = do_mode_sense(common, bh);
1928 break;
1929
1930 case MODE_SENSE_10:
1931 common->data_size_from_cmnd =
1932 get_unaligned_be16(&common->cmnd[7]);
1933 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1934 (1<<1) | (1<<2) | (3<<7), 0,
1935 "MODE SENSE(10)");
1936 if (reply == 0)
1937 reply = do_mode_sense(common, bh);
1938 break;
1939
1940 case ALLOW_MEDIUM_REMOVAL:
1941 common->data_size_from_cmnd = 0;
1942 reply = check_command(common, 6, DATA_DIR_NONE,
1943 (1<<4), 0,
1944 "PREVENT-ALLOW MEDIUM REMOVAL");
1945 if (reply == 0)
1946 reply = do_prevent_allow(common);
1947 break;
1948
1949 case READ_6:
1950 i = common->cmnd[4];
1951 common->data_size_from_cmnd = (i == 0) ? 256 : i;
1952 reply = check_command_size_in_blocks(common, 6,
1953 DATA_DIR_TO_HOST,
1954 (7<<1) | (1<<4), 1,
1955 "READ(6)");
1956 if (reply == 0)
1957 reply = do_read(common);
1958 break;
1959
1960 case READ_10:
1961 common->data_size_from_cmnd =
1962 get_unaligned_be16(&common->cmnd[7]);
1963 reply = check_command_size_in_blocks(common, 10,
1964 DATA_DIR_TO_HOST,
1965 (1<<1) | (0xf<<2) | (3<<7), 1,
1966 "READ(10)");
1967 if (reply == 0)
1968 reply = do_read(common);
1969 break;
1970
1971 case READ_12:
1972 common->data_size_from_cmnd =
1973 get_unaligned_be32(&common->cmnd[6]);
1974 reply = check_command_size_in_blocks(common, 12,
1975 DATA_DIR_TO_HOST,
1976 (1<<1) | (0xf<<2) | (0xf<<6), 1,
1977 "READ(12)");
1978 if (reply == 0)
1979 reply = do_read(common);
1980 break;
1981
1982 case READ_16:
1983 common->data_size_from_cmnd =
1984 get_unaligned_be32(&common->cmnd[10]);
1985 reply = check_command_size_in_blocks(common, 16,
1986 DATA_DIR_TO_HOST,
1987 (1<<1) | (0xff<<2) | (0xf<<10), 1,
1988 "READ(16)");
1989 if (reply == 0)
1990 reply = do_read(common);
1991 break;
1992
1993 case READ_CAPACITY:
1994 common->data_size_from_cmnd = 8;
1995 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1996 (0xf<<2) | (1<<8), 1,
1997 "READ CAPACITY");
1998 if (reply == 0)
1999 reply = do_read_capacity(common, bh);
2000 break;
2001
2002 case READ_HEADER:
2003 if (!common->curlun || !common->curlun->cdrom)
2004 goto unknown_cmnd;
2005 common->data_size_from_cmnd =
2006 get_unaligned_be16(&common->cmnd[7]);
2007 reply = check_command(common, 10, DATA_DIR_TO_HOST,
2008 (3<<7) | (0x1f<<1), 1,
2009 "READ HEADER");
2010 if (reply == 0)
2011 reply = do_read_header(common, bh);
2012 break;
2013
2014 case READ_TOC:
2015 if (!common->curlun || !common->curlun->cdrom)
2016 goto unknown_cmnd;
2017 common->data_size_from_cmnd =
2018 get_unaligned_be16(&common->cmnd[7]);
2019 reply = check_command(common, 10, DATA_DIR_TO_HOST,
2020 (0xf<<6) | (3<<1), 1,
2021 "READ TOC");
2022 if (reply == 0)
2023 reply = do_read_toc(common, bh);
2024 break;
2025
2026 case READ_FORMAT_CAPACITIES:
2027 common->data_size_from_cmnd =
2028 get_unaligned_be16(&common->cmnd[7]);
2029 reply = check_command(common, 10, DATA_DIR_TO_HOST,
2030 (3<<7), 1,
2031 "READ FORMAT CAPACITIES");
2032 if (reply == 0)
2033 reply = do_read_format_capacities(common, bh);
2034 break;
2035
2036 case REQUEST_SENSE:
2037 common->data_size_from_cmnd = common->cmnd[4];
2038 reply = check_command(common, 6, DATA_DIR_TO_HOST,
2039 (1<<4), 0,
2040 "REQUEST SENSE");
2041 if (reply == 0)
2042 reply = do_request_sense(common, bh);
2043 break;
2044
2045 case SERVICE_ACTION_IN_16:
2046 switch (common->cmnd[1] & 0x1f) {
2047
2048 case SAI_READ_CAPACITY_16:
2049 common->data_size_from_cmnd =
2050 get_unaligned_be32(&common->cmnd[10]);
2051 reply = check_command(common, 16, DATA_DIR_TO_HOST,
2052 (1<<1) | (0xff<<2) | (0xf<<10) |
2053 (1<<14), 1,
2054 "READ CAPACITY(16)");
2055 if (reply == 0)
2056 reply = do_read_capacity_16(common, bh);
2057 break;
2058
2059 default:
2060 goto unknown_cmnd;
2061 }
2062 break;
2063
2064 case START_STOP:
2065 common->data_size_from_cmnd = 0;
2066 reply = check_command(common, 6, DATA_DIR_NONE,
2067 (1<<1) | (1<<4), 0,
2068 "START-STOP UNIT");
2069 if (reply == 0)
2070 reply = do_start_stop(common);
2071 break;
2072
2073 case SYNCHRONIZE_CACHE:
2074 common->data_size_from_cmnd = 0;
2075 reply = check_command(common, 10, DATA_DIR_NONE,
2076 (0xf<<2) | (3<<7), 1,
2077 "SYNCHRONIZE CACHE");
2078 if (reply == 0)
2079 reply = do_synchronize_cache(common);
2080 break;
2081
2082 case TEST_UNIT_READY:
2083 common->data_size_from_cmnd = 0;
2084 reply = check_command(common, 6, DATA_DIR_NONE,
2085 0, 1,
2086 "TEST UNIT READY");
2087 break;
2088
2089 /*
2090 * Although optional, this command is used by MS-Windows. We
2091 * support a minimal version: BytChk must be 0.
2092 */
2093 case VERIFY:
2094 common->data_size_from_cmnd = 0;
2095 reply = check_command(common, 10, DATA_DIR_NONE,
2096 (1<<1) | (0xf<<2) | (3<<7), 1,
2097 "VERIFY");
2098 if (reply == 0)
2099 reply = do_verify(common);
2100 break;
2101
2102 case WRITE_6:
2103 i = common->cmnd[4];
2104 common->data_size_from_cmnd = (i == 0) ? 256 : i;
2105 reply = check_command_size_in_blocks(common, 6,
2106 DATA_DIR_FROM_HOST,
2107 (7<<1) | (1<<4), 1,
2108 "WRITE(6)");
2109 if (reply == 0)
2110 reply = do_write(common);
2111 break;
2112
2113 case WRITE_10:
2114 common->data_size_from_cmnd =
2115 get_unaligned_be16(&common->cmnd[7]);
2116 reply = check_command_size_in_blocks(common, 10,
2117 DATA_DIR_FROM_HOST,
2118 (1<<1) | (0xf<<2) | (3<<7), 1,
2119 "WRITE(10)");
2120 if (reply == 0)
2121 reply = do_write(common);
2122 break;
2123
2124 case WRITE_12:
2125 common->data_size_from_cmnd =
2126 get_unaligned_be32(&common->cmnd[6]);
2127 reply = check_command_size_in_blocks(common, 12,
2128 DATA_DIR_FROM_HOST,
2129 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2130 "WRITE(12)");
2131 if (reply == 0)
2132 reply = do_write(common);
2133 break;
2134
2135 case WRITE_16:
2136 common->data_size_from_cmnd =
2137 get_unaligned_be32(&common->cmnd[10]);
2138 reply = check_command_size_in_blocks(common, 16,
2139 DATA_DIR_FROM_HOST,
2140 (1<<1) | (0xff<<2) | (0xf<<10), 1,
2141 "WRITE(16)");
2142 if (reply == 0)
2143 reply = do_write(common);
2144 break;
2145
2146 /*
2147 * Some mandatory commands that we recognize but don't implement.
2148 * They don't mean much in this setting. It's left as an exercise
2149 * for anyone interested to implement RESERVE and RELEASE in terms
2150 * of Posix locks.
2151 */
2152 case FORMAT_UNIT:
2153 case RELEASE_6:
2154 case RESERVE_6:
2155 case SEND_DIAGNOSTIC:
2156
2157 default:
2158 unknown_cmnd:
2159 common->data_size_from_cmnd = 0;
2160 sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
2161 reply = check_command(common, common->cmnd_size,
2162 DATA_DIR_UNKNOWN, ~0, 0, unknown);
2163 if (reply == 0) {
2164 common->curlun->sense_data = SS_INVALID_COMMAND;
2165 reply = -EINVAL;
2166 }
2167 break;
2168 }
2169 up_read(&common->filesem);
2170
2171 if (reply == -EINTR || signal_pending(current))
2172 return -EINTR;
2173
2174 /* Set up the single reply buffer for finish_reply() */
2175 if (reply == -EINVAL)
2176 reply = 0; /* Error reply length */
2177 if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) {
2178 reply = min_t(u32, reply, common->data_size_from_cmnd);
2179 bh->inreq->length = reply;
2180 bh->state = BUF_STATE_FULL;
2181 common->residue -= reply;
2182 } /* Otherwise it's already set */
2183
2184 return 0;
2185 }
2186
2187
2188 /*-------------------------------------------------------------------------*/
2189
received_cbw(struct fsg_dev * fsg,struct fsg_buffhd * bh)2190 static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2191 {
2192 struct usb_request *req = bh->outreq;
2193 struct bulk_cb_wrap *cbw = req->buf;
2194 struct fsg_common *common = fsg->common;
2195
2196 /* Was this a real packet? Should it be ignored? */
2197 if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
2198 return -EINVAL;
2199
2200 /* Is the CBW valid? */
2201 if (req->actual != US_BULK_CB_WRAP_LEN ||
2202 cbw->Signature != cpu_to_le32(
2203 US_BULK_CB_SIGN)) {
2204 DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
2205 req->actual,
2206 le32_to_cpu(cbw->Signature));
2207
2208 /*
2209 * The Bulk-only spec says we MUST stall the IN endpoint
2210 * (6.6.1), so it's unavoidable. It also says we must
2211 * retain this state until the next reset, but there's
2212 * no way to tell the controller driver it should ignore
2213 * Clear-Feature(HALT) requests.
2214 *
2215 * We aren't required to halt the OUT endpoint; instead
2216 * we can simply accept and discard any data received
2217 * until the next reset.
2218 */
2219 wedge_bulk_in_endpoint(fsg);
2220 set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2221 return -EINVAL;
2222 }
2223
2224 /* Is the CBW meaningful? */
2225 if (cbw->Lun >= ARRAY_SIZE(common->luns) ||
2226 cbw->Flags & ~US_BULK_FLAG_IN || cbw->Length <= 0 ||
2227 cbw->Length > MAX_COMMAND_SIZE) {
2228 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2229 "cmdlen %u\n",
2230 cbw->Lun, cbw->Flags, cbw->Length);
2231
2232 /*
2233 * We can do anything we want here, so let's stall the
2234 * bulk pipes if we are allowed to.
2235 */
2236 if (common->can_stall) {
2237 fsg_set_halt(fsg, fsg->bulk_out);
2238 halt_bulk_in_endpoint(fsg);
2239 }
2240 return -EINVAL;
2241 }
2242
2243 /* Save the command for later */
2244 common->cmnd_size = cbw->Length;
2245 memcpy(common->cmnd, cbw->CDB, common->cmnd_size);
2246 if (cbw->Flags & US_BULK_FLAG_IN)
2247 common->data_dir = DATA_DIR_TO_HOST;
2248 else
2249 common->data_dir = DATA_DIR_FROM_HOST;
2250 common->data_size = le32_to_cpu(cbw->DataTransferLength);
2251 if (common->data_size == 0)
2252 common->data_dir = DATA_DIR_NONE;
2253 common->lun = cbw->Lun;
2254 if (common->lun < ARRAY_SIZE(common->luns))
2255 common->curlun = common->luns[common->lun];
2256 else
2257 common->curlun = NULL;
2258 common->tag = cbw->Tag;
2259 return 0;
2260 }
2261
get_next_command(struct fsg_common * common)2262 static int get_next_command(struct fsg_common *common)
2263 {
2264 struct fsg_buffhd *bh;
2265 int rc = 0;
2266
2267 /* Wait for the next buffer to become available */
2268 bh = common->next_buffhd_to_fill;
2269 rc = sleep_thread(common, true, bh);
2270 if (rc)
2271 return rc;
2272
2273 /* Queue a request to read a Bulk-only CBW */
2274 set_bulk_out_req_length(common, bh, US_BULK_CB_WRAP_LEN);
2275 if (!start_out_transfer(common, bh))
2276 /* Don't know what to do if common->fsg is NULL */
2277 return -EIO;
2278
2279 /*
2280 * We will drain the buffer in software, which means we
2281 * can reuse it for the next filling. No need to advance
2282 * next_buffhd_to_fill.
2283 */
2284
2285 /* Wait for the CBW to arrive */
2286 rc = sleep_thread(common, true, bh);
2287 if (rc)
2288 return rc;
2289
2290 rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO;
2291 bh->state = BUF_STATE_EMPTY;
2292
2293 return rc;
2294 }
2295
2296
2297 /*-------------------------------------------------------------------------*/
2298
alloc_request(struct fsg_common * common,struct usb_ep * ep,struct usb_request ** preq)2299 static int alloc_request(struct fsg_common *common, struct usb_ep *ep,
2300 struct usb_request **preq)
2301 {
2302 *preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
2303 if (*preq)
2304 return 0;
2305 ERROR(common, "can't allocate request for %s\n", ep->name);
2306 return -ENOMEM;
2307 }
2308
2309 /* Reset interface setting and re-init endpoint state (toggle etc). */
do_set_interface(struct fsg_common * common,struct fsg_dev * new_fsg)2310 static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg)
2311 {
2312 struct fsg_dev *fsg;
2313 int i, rc = 0;
2314
2315 if (common->running)
2316 DBG(common, "reset interface\n");
2317
2318 reset:
2319 /* Deallocate the requests */
2320 if (common->fsg) {
2321 fsg = common->fsg;
2322
2323 for (i = 0; i < common->fsg_num_buffers; ++i) {
2324 struct fsg_buffhd *bh = &common->buffhds[i];
2325
2326 if (bh->inreq) {
2327 usb_ep_free_request(fsg->bulk_in, bh->inreq);
2328 bh->inreq = NULL;
2329 }
2330 if (bh->outreq) {
2331 usb_ep_free_request(fsg->bulk_out, bh->outreq);
2332 bh->outreq = NULL;
2333 }
2334 }
2335
2336 /* Disable the endpoints */
2337 if (fsg->bulk_in_enabled) {
2338 usb_ep_disable(fsg->bulk_in);
2339 fsg->bulk_in_enabled = 0;
2340 }
2341 if (fsg->bulk_out_enabled) {
2342 usb_ep_disable(fsg->bulk_out);
2343 fsg->bulk_out_enabled = 0;
2344 }
2345
2346 common->fsg = NULL;
2347 wake_up(&common->fsg_wait);
2348 }
2349
2350 common->running = 0;
2351 if (!new_fsg || rc)
2352 return rc;
2353
2354 common->fsg = new_fsg;
2355 fsg = common->fsg;
2356
2357 /* Enable the endpoints */
2358 rc = config_ep_by_speed(common->gadget, &(fsg->function), fsg->bulk_in);
2359 if (rc)
2360 goto reset;
2361 rc = usb_ep_enable(fsg->bulk_in);
2362 if (rc)
2363 goto reset;
2364 fsg->bulk_in->driver_data = common;
2365 fsg->bulk_in_enabled = 1;
2366
2367 rc = config_ep_by_speed(common->gadget, &(fsg->function),
2368 fsg->bulk_out);
2369 if (rc)
2370 goto reset;
2371 rc = usb_ep_enable(fsg->bulk_out);
2372 if (rc)
2373 goto reset;
2374 fsg->bulk_out->driver_data = common;
2375 fsg->bulk_out_enabled = 1;
2376 common->bulk_out_maxpacket = usb_endpoint_maxp(fsg->bulk_out->desc);
2377 clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2378
2379 /* Allocate the requests */
2380 for (i = 0; i < common->fsg_num_buffers; ++i) {
2381 struct fsg_buffhd *bh = &common->buffhds[i];
2382
2383 rc = alloc_request(common, fsg->bulk_in, &bh->inreq);
2384 if (rc)
2385 goto reset;
2386 rc = alloc_request(common, fsg->bulk_out, &bh->outreq);
2387 if (rc)
2388 goto reset;
2389 bh->inreq->buf = bh->outreq->buf = bh->buf;
2390 bh->inreq->context = bh->outreq->context = bh;
2391 bh->inreq->complete = bulk_in_complete;
2392 bh->outreq->complete = bulk_out_complete;
2393 }
2394
2395 common->running = 1;
2396 for (i = 0; i < ARRAY_SIZE(common->luns); ++i)
2397 if (common->luns[i])
2398 common->luns[i]->unit_attention_data =
2399 SS_RESET_OCCURRED;
2400 return rc;
2401 }
2402
2403
2404 /****************************** ALT CONFIGS ******************************/
2405
fsg_set_alt(struct usb_function * f,unsigned intf,unsigned alt)2406 static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
2407 {
2408 struct fsg_dev *fsg = fsg_from_func(f);
2409
2410 __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, fsg);
2411 return USB_GADGET_DELAYED_STATUS;
2412 }
2413
fsg_disable(struct usb_function * f)2414 static void fsg_disable(struct usb_function *f)
2415 {
2416 struct fsg_dev *fsg = fsg_from_func(f);
2417
2418 /* Disable the endpoints */
2419 if (fsg->bulk_in_enabled) {
2420 usb_ep_disable(fsg->bulk_in);
2421 fsg->bulk_in_enabled = 0;
2422 }
2423 if (fsg->bulk_out_enabled) {
2424 usb_ep_disable(fsg->bulk_out);
2425 fsg->bulk_out_enabled = 0;
2426 }
2427
2428 __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
2429 }
2430
2431
2432 /*-------------------------------------------------------------------------*/
2433
handle_exception(struct fsg_common * common)2434 static void handle_exception(struct fsg_common *common)
2435 {
2436 int i;
2437 struct fsg_buffhd *bh;
2438 enum fsg_state old_state;
2439 struct fsg_lun *curlun;
2440 unsigned int exception_req_tag;
2441 struct fsg_dev *new_fsg;
2442
2443 /*
2444 * Clear the existing signals. Anything but SIGUSR1 is converted
2445 * into a high-priority EXIT exception.
2446 */
2447 for (;;) {
2448 int sig = kernel_dequeue_signal();
2449 if (!sig)
2450 break;
2451 if (sig != SIGUSR1) {
2452 spin_lock_irq(&common->lock);
2453 if (common->state < FSG_STATE_EXIT)
2454 DBG(common, "Main thread exiting on signal\n");
2455 common->state = FSG_STATE_EXIT;
2456 spin_unlock_irq(&common->lock);
2457 }
2458 }
2459
2460 /* Cancel all the pending transfers */
2461 if (likely(common->fsg)) {
2462 for (i = 0; i < common->fsg_num_buffers; ++i) {
2463 bh = &common->buffhds[i];
2464 if (bh->state == BUF_STATE_SENDING)
2465 usb_ep_dequeue(common->fsg->bulk_in, bh->inreq);
2466 if (bh->state == BUF_STATE_RECEIVING)
2467 usb_ep_dequeue(common->fsg->bulk_out,
2468 bh->outreq);
2469
2470 /* Wait for a transfer to become idle */
2471 if (sleep_thread(common, false, bh))
2472 return;
2473 }
2474
2475 /* Clear out the controller's fifos */
2476 if (common->fsg->bulk_in_enabled)
2477 usb_ep_fifo_flush(common->fsg->bulk_in);
2478 if (common->fsg->bulk_out_enabled)
2479 usb_ep_fifo_flush(common->fsg->bulk_out);
2480 }
2481
2482 /*
2483 * Reset the I/O buffer states and pointers, the SCSI
2484 * state, and the exception. Then invoke the handler.
2485 */
2486 spin_lock_irq(&common->lock);
2487
2488 for (i = 0; i < common->fsg_num_buffers; ++i) {
2489 bh = &common->buffhds[i];
2490 bh->state = BUF_STATE_EMPTY;
2491 }
2492 common->next_buffhd_to_fill = &common->buffhds[0];
2493 common->next_buffhd_to_drain = &common->buffhds[0];
2494 exception_req_tag = common->exception_req_tag;
2495 new_fsg = common->exception_arg;
2496 old_state = common->state;
2497 common->state = FSG_STATE_NORMAL;
2498
2499 if (old_state != FSG_STATE_ABORT_BULK_OUT) {
2500 for (i = 0; i < ARRAY_SIZE(common->luns); ++i) {
2501 curlun = common->luns[i];
2502 if (!curlun)
2503 continue;
2504 curlun->prevent_medium_removal = 0;
2505 curlun->sense_data = SS_NO_SENSE;
2506 curlun->unit_attention_data = SS_NO_SENSE;
2507 curlun->sense_data_info = 0;
2508 curlun->info_valid = 0;
2509 }
2510 }
2511 spin_unlock_irq(&common->lock);
2512
2513 /* Carry out any extra actions required for the exception */
2514 switch (old_state) {
2515 case FSG_STATE_NORMAL:
2516 break;
2517
2518 case FSG_STATE_ABORT_BULK_OUT:
2519 send_status(common);
2520 break;
2521
2522 case FSG_STATE_PROTOCOL_RESET:
2523 /*
2524 * In case we were forced against our will to halt a
2525 * bulk endpoint, clear the halt now. (The SuperH UDC
2526 * requires this.)
2527 */
2528 if (!fsg_is_set(common))
2529 break;
2530 if (test_and_clear_bit(IGNORE_BULK_OUT,
2531 &common->fsg->atomic_bitflags))
2532 usb_ep_clear_halt(common->fsg->bulk_in);
2533
2534 if (common->ep0_req_tag == exception_req_tag)
2535 ep0_queue(common); /* Complete the status stage */
2536
2537 /*
2538 * Technically this should go here, but it would only be
2539 * a waste of time. Ditto for the INTERFACE_CHANGE and
2540 * CONFIG_CHANGE cases.
2541 */
2542 /* for (i = 0; i < common->ARRAY_SIZE(common->luns); ++i) */
2543 /* if (common->luns[i]) */
2544 /* common->luns[i]->unit_attention_data = */
2545 /* SS_RESET_OCCURRED; */
2546 break;
2547
2548 case FSG_STATE_CONFIG_CHANGE:
2549 do_set_interface(common, new_fsg);
2550 if (new_fsg)
2551 usb_composite_setup_continue(common->cdev);
2552 break;
2553
2554 case FSG_STATE_EXIT:
2555 do_set_interface(common, NULL); /* Free resources */
2556 spin_lock_irq(&common->lock);
2557 common->state = FSG_STATE_TERMINATED; /* Stop the thread */
2558 spin_unlock_irq(&common->lock);
2559 break;
2560
2561 case FSG_STATE_TERMINATED:
2562 break;
2563 }
2564 }
2565
2566
2567 /*-------------------------------------------------------------------------*/
2568
fsg_main_thread(void * common_)2569 static int fsg_main_thread(void *common_)
2570 {
2571 struct fsg_common *common = common_;
2572 int i;
2573
2574 /*
2575 * Allow the thread to be killed by a signal, but set the signal mask
2576 * to block everything but INT, TERM, KILL, and USR1.
2577 */
2578 allow_signal(SIGINT);
2579 allow_signal(SIGTERM);
2580 allow_signal(SIGKILL);
2581 allow_signal(SIGUSR1);
2582
2583 /* Allow the thread to be frozen */
2584 set_freezable();
2585
2586 /* The main loop */
2587 while (common->state != FSG_STATE_TERMINATED) {
2588 if (exception_in_progress(common) || signal_pending(current)) {
2589 handle_exception(common);
2590 continue;
2591 }
2592
2593 if (!common->running) {
2594 sleep_thread(common, true, NULL);
2595 continue;
2596 }
2597
2598 if (get_next_command(common) || exception_in_progress(common))
2599 continue;
2600 if (do_scsi_command(common) || exception_in_progress(common))
2601 continue;
2602 if (finish_reply(common) || exception_in_progress(common))
2603 continue;
2604 send_status(common);
2605 }
2606
2607 spin_lock_irq(&common->lock);
2608 common->thread_task = NULL;
2609 spin_unlock_irq(&common->lock);
2610
2611 /* Eject media from all LUNs */
2612
2613 down_write(&common->filesem);
2614 for (i = 0; i < ARRAY_SIZE(common->luns); i++) {
2615 struct fsg_lun *curlun = common->luns[i];
2616
2617 if (curlun && fsg_lun_is_open(curlun))
2618 fsg_lun_close(curlun);
2619 }
2620 up_write(&common->filesem);
2621
2622 /* Let fsg_unbind() know the thread has exited */
2623 kthread_complete_and_exit(&common->thread_notifier, 0);
2624 }
2625
2626
2627 /*************************** DEVICE ATTRIBUTES ***************************/
2628
ro_show(struct device * dev,struct device_attribute * attr,char * buf)2629 static ssize_t ro_show(struct device *dev, struct device_attribute *attr, char *buf)
2630 {
2631 struct fsg_lun *curlun = fsg_lun_from_dev(dev);
2632
2633 return fsg_show_ro(curlun, buf);
2634 }
2635
nofua_show(struct device * dev,struct device_attribute * attr,char * buf)2636 static ssize_t nofua_show(struct device *dev, struct device_attribute *attr,
2637 char *buf)
2638 {
2639 struct fsg_lun *curlun = fsg_lun_from_dev(dev);
2640
2641 return fsg_show_nofua(curlun, buf);
2642 }
2643
file_show(struct device * dev,struct device_attribute * attr,char * buf)2644 static ssize_t file_show(struct device *dev, struct device_attribute *attr,
2645 char *buf)
2646 {
2647 struct fsg_lun *curlun = fsg_lun_from_dev(dev);
2648 struct rw_semaphore *filesem = dev_get_drvdata(dev);
2649
2650 return fsg_show_file(curlun, filesem, buf);
2651 }
2652
ro_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2653 static ssize_t ro_store(struct device *dev, struct device_attribute *attr,
2654 const char *buf, size_t count)
2655 {
2656 struct fsg_lun *curlun = fsg_lun_from_dev(dev);
2657 struct rw_semaphore *filesem = dev_get_drvdata(dev);
2658
2659 return fsg_store_ro(curlun, filesem, buf, count);
2660 }
2661
nofua_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2662 static ssize_t nofua_store(struct device *dev, struct device_attribute *attr,
2663 const char *buf, size_t count)
2664 {
2665 struct fsg_lun *curlun = fsg_lun_from_dev(dev);
2666
2667 return fsg_store_nofua(curlun, buf, count);
2668 }
2669
file_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2670 static ssize_t file_store(struct device *dev, struct device_attribute *attr,
2671 const char *buf, size_t count)
2672 {
2673 struct fsg_lun *curlun = fsg_lun_from_dev(dev);
2674 struct rw_semaphore *filesem = dev_get_drvdata(dev);
2675
2676 return fsg_store_file(curlun, filesem, buf, count);
2677 }
2678
forced_eject_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2679 static ssize_t forced_eject_store(struct device *dev,
2680 struct device_attribute *attr,
2681 const char *buf, size_t count)
2682 {
2683 struct fsg_lun *curlun = fsg_lun_from_dev(dev);
2684 struct rw_semaphore *filesem = dev_get_drvdata(dev);
2685
2686 return fsg_store_forced_eject(curlun, filesem, buf, count);
2687 }
2688
2689 static DEVICE_ATTR_RW(nofua);
2690 static DEVICE_ATTR_WO(forced_eject);
2691
2692 /*
2693 * Mode of the ro and file attribute files will be overridden in
2694 * fsg_lun_dev_is_visible() depending on if this is a cdrom, or if it is a
2695 * removable device.
2696 */
2697 static DEVICE_ATTR_RW(ro);
2698 static DEVICE_ATTR_RW(file);
2699
2700 /****************************** FSG COMMON ******************************/
2701
fsg_lun_release(struct device * dev)2702 static void fsg_lun_release(struct device *dev)
2703 {
2704 /* Nothing needs to be done */
2705 }
2706
fsg_common_setup(struct fsg_common * common)2707 static struct fsg_common *fsg_common_setup(struct fsg_common *common)
2708 {
2709 if (!common) {
2710 common = kzalloc_obj(*common);
2711 if (!common)
2712 return ERR_PTR(-ENOMEM);
2713 common->free_storage_on_release = 1;
2714 } else {
2715 common->free_storage_on_release = 0;
2716 }
2717 init_rwsem(&common->filesem);
2718 spin_lock_init(&common->lock);
2719 init_completion(&common->thread_notifier);
2720 init_waitqueue_head(&common->io_wait);
2721 init_waitqueue_head(&common->fsg_wait);
2722 common->state = FSG_STATE_TERMINATED;
2723 memset(common->luns, 0, sizeof(common->luns));
2724
2725 return common;
2726 }
2727
fsg_common_set_sysfs(struct fsg_common * common,bool sysfs)2728 void fsg_common_set_sysfs(struct fsg_common *common, bool sysfs)
2729 {
2730 common->sysfs = sysfs;
2731 }
2732 EXPORT_SYMBOL_GPL(fsg_common_set_sysfs);
2733
_fsg_common_free_buffers(struct fsg_buffhd * buffhds,unsigned n)2734 static void _fsg_common_free_buffers(struct fsg_buffhd *buffhds, unsigned n)
2735 {
2736 if (buffhds) {
2737 struct fsg_buffhd *bh = buffhds;
2738 while (n--) {
2739 kfree(bh->buf);
2740 ++bh;
2741 }
2742 kfree(buffhds);
2743 }
2744 }
2745
fsg_common_set_num_buffers(struct fsg_common * common,unsigned int n)2746 int fsg_common_set_num_buffers(struct fsg_common *common, unsigned int n)
2747 {
2748 struct fsg_buffhd *bh, *buffhds;
2749 int i;
2750
2751 buffhds = kzalloc_objs(*buffhds, n);
2752 if (!buffhds)
2753 return -ENOMEM;
2754
2755 /* Data buffers cyclic list */
2756 bh = buffhds;
2757 i = n;
2758 goto buffhds_first_it;
2759 do {
2760 bh->next = bh + 1;
2761 ++bh;
2762 buffhds_first_it:
2763 bh->buf = kmalloc(FSG_BUFLEN, GFP_KERNEL);
2764 if (unlikely(!bh->buf))
2765 goto error_release;
2766 } while (--i);
2767 bh->next = buffhds;
2768
2769 _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
2770 common->fsg_num_buffers = n;
2771 common->buffhds = buffhds;
2772
2773 return 0;
2774
2775 error_release:
2776 /*
2777 * "buf"s pointed to by heads after n - i are NULL
2778 * so releasing them won't hurt
2779 */
2780 _fsg_common_free_buffers(buffhds, n);
2781
2782 return -ENOMEM;
2783 }
2784 EXPORT_SYMBOL_GPL(fsg_common_set_num_buffers);
2785
fsg_common_remove_lun(struct fsg_lun * lun)2786 void fsg_common_remove_lun(struct fsg_lun *lun)
2787 {
2788 if (device_is_registered(&lun->dev))
2789 device_unregister(&lun->dev);
2790 fsg_lun_close(lun);
2791 kfree(lun);
2792 }
2793 EXPORT_SYMBOL_GPL(fsg_common_remove_lun);
2794
_fsg_common_remove_luns(struct fsg_common * common,int n)2795 static void _fsg_common_remove_luns(struct fsg_common *common, int n)
2796 {
2797 int i;
2798
2799 for (i = 0; i < n; ++i)
2800 if (common->luns[i]) {
2801 fsg_common_remove_lun(common->luns[i]);
2802 common->luns[i] = NULL;
2803 }
2804 }
2805
fsg_common_remove_luns(struct fsg_common * common)2806 void fsg_common_remove_luns(struct fsg_common *common)
2807 {
2808 _fsg_common_remove_luns(common, ARRAY_SIZE(common->luns));
2809 }
2810 EXPORT_SYMBOL_GPL(fsg_common_remove_luns);
2811
fsg_common_free_buffers(struct fsg_common * common)2812 void fsg_common_free_buffers(struct fsg_common *common)
2813 {
2814 _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
2815 common->buffhds = NULL;
2816 }
2817 EXPORT_SYMBOL_GPL(fsg_common_free_buffers);
2818
fsg_common_set_cdev(struct fsg_common * common,struct usb_composite_dev * cdev,bool can_stall)2819 int fsg_common_set_cdev(struct fsg_common *common,
2820 struct usb_composite_dev *cdev, bool can_stall)
2821 {
2822 struct usb_string *us;
2823
2824 common->gadget = cdev->gadget;
2825 common->ep0 = cdev->gadget->ep0;
2826 common->ep0req = cdev->req;
2827 common->cdev = cdev;
2828
2829 us = usb_gstrings_attach(cdev, fsg_strings_array,
2830 ARRAY_SIZE(fsg_strings));
2831 if (IS_ERR(us))
2832 return PTR_ERR(us);
2833
2834 fsg_intf_desc.iInterface = us[FSG_STRING_INTERFACE].id;
2835
2836 /*
2837 * Some peripheral controllers are known not to be able to
2838 * halt bulk endpoints correctly. If one of them is present,
2839 * disable stalls.
2840 */
2841 common->can_stall = can_stall &&
2842 gadget_is_stall_supported(common->gadget);
2843
2844 return 0;
2845 }
2846 EXPORT_SYMBOL_GPL(fsg_common_set_cdev);
2847
2848 static struct attribute *fsg_lun_dev_attrs[] = {
2849 &dev_attr_ro.attr,
2850 &dev_attr_file.attr,
2851 &dev_attr_nofua.attr,
2852 &dev_attr_forced_eject.attr,
2853 NULL
2854 };
2855
fsg_lun_dev_is_visible(struct kobject * kobj,struct attribute * attr,int idx)2856 static umode_t fsg_lun_dev_is_visible(struct kobject *kobj,
2857 struct attribute *attr, int idx)
2858 {
2859 struct device *dev = kobj_to_dev(kobj);
2860 struct fsg_lun *lun = fsg_lun_from_dev(dev);
2861
2862 if (attr == &dev_attr_ro.attr)
2863 return lun->cdrom ? S_IRUGO : (S_IWUSR | S_IRUGO);
2864 if (attr == &dev_attr_file.attr)
2865 return lun->removable ? (S_IWUSR | S_IRUGO) : S_IRUGO;
2866 return attr->mode;
2867 }
2868
2869 static const struct attribute_group fsg_lun_dev_group = {
2870 .attrs = fsg_lun_dev_attrs,
2871 .is_visible = fsg_lun_dev_is_visible,
2872 };
2873
2874 static const struct attribute_group *fsg_lun_dev_groups[] = {
2875 &fsg_lun_dev_group,
2876 NULL
2877 };
2878
fsg_common_create_lun(struct fsg_common * common,struct fsg_lun_config * cfg,unsigned int id,const char * name,const char ** name_pfx)2879 int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
2880 unsigned int id, const char *name,
2881 const char **name_pfx)
2882 {
2883 struct fsg_lun *lun;
2884 char *pathbuf = NULL, *p = "(no medium)";
2885 int rc = -ENOMEM;
2886
2887 if (id >= ARRAY_SIZE(common->luns))
2888 return -ENODEV;
2889
2890 if (common->luns[id])
2891 return -EBUSY;
2892
2893 if (!cfg->filename && !cfg->removable) {
2894 pr_err("no file given for LUN%d\n", id);
2895 return -EINVAL;
2896 }
2897
2898 lun = kzalloc_obj(*lun);
2899 if (!lun)
2900 return -ENOMEM;
2901
2902 lun->name_pfx = name_pfx;
2903
2904 lun->cdrom = !!cfg->cdrom;
2905 lun->ro = cfg->cdrom || cfg->ro;
2906 lun->initially_ro = lun->ro;
2907 lun->removable = !!cfg->removable;
2908
2909 if (!common->sysfs) {
2910 /* we DON'T own the name!*/
2911 lun->name = name;
2912 } else {
2913 lun->dev.release = fsg_lun_release;
2914 lun->dev.parent = &common->gadget->dev;
2915 lun->dev.groups = fsg_lun_dev_groups;
2916 dev_set_drvdata(&lun->dev, &common->filesem);
2917 dev_set_name(&lun->dev, "%s", name);
2918 lun->name = dev_name(&lun->dev);
2919
2920 rc = device_register(&lun->dev);
2921 if (rc) {
2922 pr_info("failed to register LUN%d: %d\n", id, rc);
2923 put_device(&lun->dev);
2924 goto error_sysfs;
2925 }
2926 }
2927
2928 common->luns[id] = lun;
2929
2930 if (cfg->filename) {
2931 rc = fsg_lun_open(lun, cfg->filename);
2932 if (rc)
2933 goto error_lun;
2934
2935 p = "(error)";
2936 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
2937 if (pathbuf) {
2938 p = file_path(lun->filp, pathbuf, PATH_MAX);
2939 if (IS_ERR(p))
2940 p = "(error)";
2941 }
2942 }
2943 pr_info("LUN: %s%s%sfile: %s\n",
2944 lun->removable ? "removable " : "",
2945 lun->ro ? "read only " : "",
2946 lun->cdrom ? "CD-ROM " : "",
2947 p);
2948 kfree(pathbuf);
2949
2950 return 0;
2951
2952 error_lun:
2953 if (device_is_registered(&lun->dev))
2954 device_unregister(&lun->dev);
2955 common->luns[id] = NULL;
2956 error_sysfs:
2957 kfree(lun);
2958 return rc;
2959 }
2960 EXPORT_SYMBOL_GPL(fsg_common_create_lun);
2961
fsg_common_create_luns(struct fsg_common * common,struct fsg_config * cfg)2962 int fsg_common_create_luns(struct fsg_common *common, struct fsg_config *cfg)
2963 {
2964 char buf[8]; /* enough for 100000000 different numbers, decimal */
2965 int i, rc;
2966
2967 fsg_common_remove_luns(common);
2968
2969 for (i = 0; i < cfg->nluns; ++i) {
2970 snprintf(buf, sizeof(buf), "lun%d", i);
2971 rc = fsg_common_create_lun(common, &cfg->luns[i], i, buf, NULL);
2972 if (rc)
2973 goto fail;
2974 }
2975
2976 pr_info("Number of LUNs=%d\n", cfg->nluns);
2977
2978 return 0;
2979
2980 fail:
2981 _fsg_common_remove_luns(common, i);
2982 return rc;
2983 }
2984 EXPORT_SYMBOL_GPL(fsg_common_create_luns);
2985
fsg_common_set_inquiry_string(struct fsg_common * common,const char * vn,const char * pn)2986 void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn,
2987 const char *pn)
2988 {
2989 int i;
2990
2991 /* Prepare inquiryString */
2992 i = get_default_bcdDevice();
2993 snprintf(common->inquiry_string, sizeof(common->inquiry_string),
2994 "%-8s%-16s%04x", vn ?: "Linux",
2995 /* Assume product name dependent on the first LUN */
2996 pn ?: ((*common->luns)->cdrom
2997 ? "File-CD Gadget"
2998 : "File-Stor Gadget"),
2999 i);
3000 }
3001 EXPORT_SYMBOL_GPL(fsg_common_set_inquiry_string);
3002
fsg_common_release(struct fsg_common * common)3003 static void fsg_common_release(struct fsg_common *common)
3004 {
3005 int i;
3006
3007 /* If the thread isn't already dead, tell it to exit now */
3008 if (common->state != FSG_STATE_TERMINATED) {
3009 raise_exception(common, FSG_STATE_EXIT);
3010 wait_for_completion(&common->thread_notifier);
3011 }
3012
3013 for (i = 0; i < ARRAY_SIZE(common->luns); ++i) {
3014 struct fsg_lun *lun = common->luns[i];
3015 if (!lun)
3016 continue;
3017 fsg_lun_close(lun);
3018 if (device_is_registered(&lun->dev))
3019 device_unregister(&lun->dev);
3020 kfree(lun);
3021 }
3022
3023 _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
3024 if (common->free_storage_on_release)
3025 kfree(common);
3026 }
3027
3028
3029 /*-------------------------------------------------------------------------*/
3030
fsg_bind(struct usb_configuration * c,struct usb_function * f)3031 static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
3032 {
3033 struct fsg_dev *fsg = fsg_from_func(f);
3034 struct fsg_common *common = fsg->common;
3035 struct usb_gadget *gadget = c->cdev->gadget;
3036 int i;
3037 struct usb_ep *ep;
3038 unsigned max_burst;
3039 int ret;
3040 struct fsg_opts *opts;
3041
3042 /* Don't allow to bind if we don't have at least one LUN */
3043 ret = _fsg_common_get_max_lun(common);
3044 if (ret < 0) {
3045 pr_err("There should be at least one LUN.\n");
3046 return -EINVAL;
3047 }
3048
3049 opts = fsg_opts_from_func_inst(f->fi);
3050 if (!opts->no_configfs) {
3051 ret = fsg_common_set_cdev(fsg->common, c->cdev,
3052 fsg->common->can_stall);
3053 if (ret)
3054 return ret;
3055 fsg_common_set_inquiry_string(fsg->common, NULL, NULL);
3056 }
3057
3058 if (!common->thread_task) {
3059 common->state = FSG_STATE_NORMAL;
3060 common->thread_task =
3061 kthread_run(fsg_main_thread, common, "file-storage");
3062 if (IS_ERR(common->thread_task)) {
3063 ret = PTR_ERR(common->thread_task);
3064 common->thread_task = NULL;
3065 common->state = FSG_STATE_TERMINATED;
3066 return ret;
3067 }
3068 DBG(common, "I/O thread pid: %d\n",
3069 task_pid_nr(common->thread_task));
3070 }
3071
3072 fsg->gadget = gadget;
3073
3074 /* New interface */
3075 i = usb_interface_id(c, f);
3076 if (i < 0)
3077 goto fail;
3078 fsg_intf_desc.bInterfaceNumber = i;
3079 fsg->interface_number = i;
3080
3081 /* Find all the endpoints we will use */
3082 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
3083 if (!ep)
3084 goto autoconf_fail;
3085 fsg->bulk_in = ep;
3086
3087 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
3088 if (!ep)
3089 goto autoconf_fail;
3090 fsg->bulk_out = ep;
3091
3092 /* Assume endpoint addresses are the same for both speeds */
3093 fsg_hs_bulk_in_desc.bEndpointAddress =
3094 fsg_fs_bulk_in_desc.bEndpointAddress;
3095 fsg_hs_bulk_out_desc.bEndpointAddress =
3096 fsg_fs_bulk_out_desc.bEndpointAddress;
3097
3098 /* Calculate bMaxBurst, we know packet size is 1024 */
3099 max_burst = min_t(unsigned, FSG_BUFLEN / 1024, 15);
3100
3101 fsg_ss_bulk_in_desc.bEndpointAddress =
3102 fsg_fs_bulk_in_desc.bEndpointAddress;
3103 fsg_ss_bulk_in_comp_desc.bMaxBurst = max_burst;
3104
3105 fsg_ss_bulk_out_desc.bEndpointAddress =
3106 fsg_fs_bulk_out_desc.bEndpointAddress;
3107 fsg_ss_bulk_out_comp_desc.bMaxBurst = max_burst;
3108
3109 ret = usb_assign_descriptors(f, fsg_fs_function, fsg_hs_function,
3110 fsg_ss_function, fsg_ss_function);
3111 if (ret)
3112 goto autoconf_fail;
3113
3114 return 0;
3115
3116 autoconf_fail:
3117 ERROR(fsg, "unable to autoconfigure all endpoints\n");
3118 i = -ENOTSUPP;
3119 fail:
3120 /* terminate the thread */
3121 if (fsg->common->state != FSG_STATE_TERMINATED) {
3122 raise_exception(fsg->common, FSG_STATE_EXIT);
3123 wait_for_completion(&fsg->common->thread_notifier);
3124 }
3125 return i;
3126 }
3127
3128 /****************************** ALLOCATE FUNCTION *************************/
3129
fsg_unbind(struct usb_configuration * c,struct usb_function * f)3130 static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
3131 {
3132 struct fsg_dev *fsg = fsg_from_func(f);
3133 struct fsg_common *common = fsg->common;
3134
3135 DBG(fsg, "unbind\n");
3136 if (fsg->common->fsg == fsg) {
3137 __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
3138 /* FIXME: make interruptible or killable somehow? */
3139 wait_event(common->fsg_wait, common->fsg != fsg);
3140 }
3141
3142 usb_free_all_descriptors(&fsg->function);
3143 }
3144
to_fsg_lun_opts(struct config_item * item)3145 static inline struct fsg_lun_opts *to_fsg_lun_opts(struct config_item *item)
3146 {
3147 return container_of(to_config_group(item), struct fsg_lun_opts, group);
3148 }
3149
to_fsg_opts(struct config_item * item)3150 static inline struct fsg_opts *to_fsg_opts(struct config_item *item)
3151 {
3152 return container_of(to_config_group(item), struct fsg_opts,
3153 func_inst.group);
3154 }
3155
fsg_lun_attr_release(struct config_item * item)3156 static void fsg_lun_attr_release(struct config_item *item)
3157 {
3158 struct fsg_lun_opts *lun_opts;
3159
3160 lun_opts = to_fsg_lun_opts(item);
3161 kfree(lun_opts);
3162 }
3163
3164 static const struct configfs_item_operations fsg_lun_item_ops = {
3165 .release = fsg_lun_attr_release,
3166 };
3167
fsg_lun_opts_file_show(struct config_item * item,char * page)3168 static ssize_t fsg_lun_opts_file_show(struct config_item *item, char *page)
3169 {
3170 struct fsg_lun_opts *opts = to_fsg_lun_opts(item);
3171 struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
3172
3173 return fsg_show_file(opts->lun, &fsg_opts->common->filesem, page);
3174 }
3175
fsg_lun_opts_file_store(struct config_item * item,const char * page,size_t len)3176 static ssize_t fsg_lun_opts_file_store(struct config_item *item,
3177 const char *page, size_t len)
3178 {
3179 struct fsg_lun_opts *opts = to_fsg_lun_opts(item);
3180 struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
3181
3182 return fsg_store_file(opts->lun, &fsg_opts->common->filesem, page, len);
3183 }
3184
3185 CONFIGFS_ATTR(fsg_lun_opts_, file);
3186
fsg_lun_opts_ro_show(struct config_item * item,char * page)3187 static ssize_t fsg_lun_opts_ro_show(struct config_item *item, char *page)
3188 {
3189 return fsg_show_ro(to_fsg_lun_opts(item)->lun, page);
3190 }
3191
fsg_lun_opts_ro_store(struct config_item * item,const char * page,size_t len)3192 static ssize_t fsg_lun_opts_ro_store(struct config_item *item,
3193 const char *page, size_t len)
3194 {
3195 struct fsg_lun_opts *opts = to_fsg_lun_opts(item);
3196 struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
3197
3198 return fsg_store_ro(opts->lun, &fsg_opts->common->filesem, page, len);
3199 }
3200
3201 CONFIGFS_ATTR(fsg_lun_opts_, ro);
3202
fsg_lun_opts_removable_show(struct config_item * item,char * page)3203 static ssize_t fsg_lun_opts_removable_show(struct config_item *item,
3204 char *page)
3205 {
3206 return fsg_show_removable(to_fsg_lun_opts(item)->lun, page);
3207 }
3208
fsg_lun_opts_removable_store(struct config_item * item,const char * page,size_t len)3209 static ssize_t fsg_lun_opts_removable_store(struct config_item *item,
3210 const char *page, size_t len)
3211 {
3212 return fsg_store_removable(to_fsg_lun_opts(item)->lun, page, len);
3213 }
3214
3215 CONFIGFS_ATTR(fsg_lun_opts_, removable);
3216
fsg_lun_opts_cdrom_show(struct config_item * item,char * page)3217 static ssize_t fsg_lun_opts_cdrom_show(struct config_item *item, char *page)
3218 {
3219 return fsg_show_cdrom(to_fsg_lun_opts(item)->lun, page);
3220 }
3221
fsg_lun_opts_cdrom_store(struct config_item * item,const char * page,size_t len)3222 static ssize_t fsg_lun_opts_cdrom_store(struct config_item *item,
3223 const char *page, size_t len)
3224 {
3225 struct fsg_lun_opts *opts = to_fsg_lun_opts(item);
3226 struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
3227
3228 return fsg_store_cdrom(opts->lun, &fsg_opts->common->filesem, page,
3229 len);
3230 }
3231
3232 CONFIGFS_ATTR(fsg_lun_opts_, cdrom);
3233
fsg_lun_opts_nofua_show(struct config_item * item,char * page)3234 static ssize_t fsg_lun_opts_nofua_show(struct config_item *item, char *page)
3235 {
3236 return fsg_show_nofua(to_fsg_lun_opts(item)->lun, page);
3237 }
3238
fsg_lun_opts_nofua_store(struct config_item * item,const char * page,size_t len)3239 static ssize_t fsg_lun_opts_nofua_store(struct config_item *item,
3240 const char *page, size_t len)
3241 {
3242 return fsg_store_nofua(to_fsg_lun_opts(item)->lun, page, len);
3243 }
3244
3245 CONFIGFS_ATTR(fsg_lun_opts_, nofua);
3246
fsg_lun_opts_inquiry_string_show(struct config_item * item,char * page)3247 static ssize_t fsg_lun_opts_inquiry_string_show(struct config_item *item,
3248 char *page)
3249 {
3250 return fsg_show_inquiry_string(to_fsg_lun_opts(item)->lun, page);
3251 }
3252
fsg_lun_opts_inquiry_string_store(struct config_item * item,const char * page,size_t len)3253 static ssize_t fsg_lun_opts_inquiry_string_store(struct config_item *item,
3254 const char *page, size_t len)
3255 {
3256 return fsg_store_inquiry_string(to_fsg_lun_opts(item)->lun, page, len);
3257 }
3258
3259 CONFIGFS_ATTR(fsg_lun_opts_, inquiry_string);
3260
fsg_lun_opts_forced_eject_store(struct config_item * item,const char * page,size_t len)3261 static ssize_t fsg_lun_opts_forced_eject_store(struct config_item *item,
3262 const char *page, size_t len)
3263 {
3264 struct fsg_lun_opts *opts = to_fsg_lun_opts(item);
3265 struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
3266
3267 return fsg_store_forced_eject(opts->lun, &fsg_opts->common->filesem,
3268 page, len);
3269 }
3270
3271 CONFIGFS_ATTR_WO(fsg_lun_opts_, forced_eject);
3272
3273 static struct configfs_attribute *fsg_lun_attrs[] = {
3274 &fsg_lun_opts_attr_file,
3275 &fsg_lun_opts_attr_ro,
3276 &fsg_lun_opts_attr_removable,
3277 &fsg_lun_opts_attr_cdrom,
3278 &fsg_lun_opts_attr_nofua,
3279 &fsg_lun_opts_attr_inquiry_string,
3280 &fsg_lun_opts_attr_forced_eject,
3281 NULL,
3282 };
3283
3284 static const struct config_item_type fsg_lun_type = {
3285 .ct_item_ops = &fsg_lun_item_ops,
3286 .ct_attrs = fsg_lun_attrs,
3287 .ct_owner = THIS_MODULE,
3288 };
3289
fsg_lun_make(struct config_group * group,const char * name)3290 static struct config_group *fsg_lun_make(struct config_group *group,
3291 const char *name)
3292 {
3293 struct fsg_lun_opts *opts;
3294 struct fsg_opts *fsg_opts;
3295 struct fsg_lun_config config;
3296 char *num_str;
3297 u8 num;
3298 int ret;
3299
3300 num_str = strchr(name, '.');
3301 if (!num_str) {
3302 pr_err("Unable to locate . in LUN.NUMBER\n");
3303 return ERR_PTR(-EINVAL);
3304 }
3305 num_str++;
3306
3307 ret = kstrtou8(num_str, 0, &num);
3308 if (ret)
3309 return ERR_PTR(ret);
3310
3311 fsg_opts = to_fsg_opts(&group->cg_item);
3312 if (num >= FSG_MAX_LUNS)
3313 return ERR_PTR(-ERANGE);
3314 num = array_index_nospec(num, FSG_MAX_LUNS);
3315
3316 mutex_lock(&fsg_opts->lock);
3317 if (fsg_opts->refcnt || fsg_opts->common->luns[num]) {
3318 ret = -EBUSY;
3319 goto out;
3320 }
3321
3322 opts = kzalloc_obj(*opts);
3323 if (!opts) {
3324 ret = -ENOMEM;
3325 goto out;
3326 }
3327
3328 memset(&config, 0, sizeof(config));
3329 config.removable = true;
3330
3331 ret = fsg_common_create_lun(fsg_opts->common, &config, num, name,
3332 (const char **)&group->cg_item.ci_name);
3333 if (ret) {
3334 kfree(opts);
3335 goto out;
3336 }
3337 opts->lun = fsg_opts->common->luns[num];
3338 opts->lun_id = num;
3339 mutex_unlock(&fsg_opts->lock);
3340
3341 config_group_init_type_name(&opts->group, name, &fsg_lun_type);
3342
3343 return &opts->group;
3344 out:
3345 mutex_unlock(&fsg_opts->lock);
3346 return ERR_PTR(ret);
3347 }
3348
fsg_lun_drop(struct config_group * group,struct config_item * item)3349 static void fsg_lun_drop(struct config_group *group, struct config_item *item)
3350 {
3351 struct fsg_lun_opts *lun_opts;
3352 struct fsg_opts *fsg_opts;
3353
3354 lun_opts = to_fsg_lun_opts(item);
3355 fsg_opts = to_fsg_opts(&group->cg_item);
3356
3357 mutex_lock(&fsg_opts->lock);
3358 if (fsg_opts->refcnt) {
3359 struct config_item *gadget;
3360
3361 gadget = group->cg_item.ci_parent->ci_parent;
3362 unregister_gadget_item(gadget);
3363 }
3364
3365 fsg_common_remove_lun(lun_opts->lun);
3366 fsg_opts->common->luns[lun_opts->lun_id] = NULL;
3367 lun_opts->lun_id = 0;
3368 mutex_unlock(&fsg_opts->lock);
3369
3370 config_item_put(item);
3371 }
3372
fsg_attr_release(struct config_item * item)3373 static void fsg_attr_release(struct config_item *item)
3374 {
3375 struct fsg_opts *opts = to_fsg_opts(item);
3376
3377 usb_put_function_instance(&opts->func_inst);
3378 }
3379
3380 static const struct configfs_item_operations fsg_item_ops = {
3381 .release = fsg_attr_release,
3382 };
3383
fsg_opts_stall_show(struct config_item * item,char * page)3384 static ssize_t fsg_opts_stall_show(struct config_item *item, char *page)
3385 {
3386 struct fsg_opts *opts = to_fsg_opts(item);
3387 int result;
3388
3389 mutex_lock(&opts->lock);
3390 result = sprintf(page, "%d", opts->common->can_stall);
3391 mutex_unlock(&opts->lock);
3392
3393 return result;
3394 }
3395
fsg_opts_stall_store(struct config_item * item,const char * page,size_t len)3396 static ssize_t fsg_opts_stall_store(struct config_item *item, const char *page,
3397 size_t len)
3398 {
3399 struct fsg_opts *opts = to_fsg_opts(item);
3400 int ret;
3401 bool stall;
3402
3403 mutex_lock(&opts->lock);
3404
3405 if (opts->refcnt) {
3406 mutex_unlock(&opts->lock);
3407 return -EBUSY;
3408 }
3409
3410 ret = kstrtobool(page, &stall);
3411 if (!ret) {
3412 opts->common->can_stall = stall;
3413 ret = len;
3414 }
3415
3416 mutex_unlock(&opts->lock);
3417
3418 return ret;
3419 }
3420
3421 CONFIGFS_ATTR(fsg_opts_, stall);
3422
3423 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
fsg_opts_num_buffers_show(struct config_item * item,char * page)3424 static ssize_t fsg_opts_num_buffers_show(struct config_item *item, char *page)
3425 {
3426 struct fsg_opts *opts = to_fsg_opts(item);
3427 int result;
3428
3429 mutex_lock(&opts->lock);
3430 result = sprintf(page, "%d", opts->common->fsg_num_buffers);
3431 mutex_unlock(&opts->lock);
3432
3433 return result;
3434 }
3435
fsg_opts_num_buffers_store(struct config_item * item,const char * page,size_t len)3436 static ssize_t fsg_opts_num_buffers_store(struct config_item *item,
3437 const char *page, size_t len)
3438 {
3439 struct fsg_opts *opts = to_fsg_opts(item);
3440 int ret;
3441 u8 num;
3442
3443 mutex_lock(&opts->lock);
3444 if (opts->refcnt) {
3445 ret = -EBUSY;
3446 goto end;
3447 }
3448 ret = kstrtou8(page, 0, &num);
3449 if (ret)
3450 goto end;
3451
3452 ret = fsg_common_set_num_buffers(opts->common, num);
3453 if (ret)
3454 goto end;
3455 ret = len;
3456
3457 end:
3458 mutex_unlock(&opts->lock);
3459 return ret;
3460 }
3461
3462 CONFIGFS_ATTR(fsg_opts_, num_buffers);
3463 #endif
3464
3465 static struct configfs_attribute *fsg_attrs[] = {
3466 &fsg_opts_attr_stall,
3467 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
3468 &fsg_opts_attr_num_buffers,
3469 #endif
3470 NULL,
3471 };
3472
3473 static const struct configfs_group_operations fsg_group_ops = {
3474 .make_group = fsg_lun_make,
3475 .drop_item = fsg_lun_drop,
3476 };
3477
3478 static const struct config_item_type fsg_func_type = {
3479 .ct_item_ops = &fsg_item_ops,
3480 .ct_group_ops = &fsg_group_ops,
3481 .ct_attrs = fsg_attrs,
3482 .ct_owner = THIS_MODULE,
3483 };
3484
fsg_free_inst(struct usb_function_instance * fi)3485 static void fsg_free_inst(struct usb_function_instance *fi)
3486 {
3487 struct fsg_opts *opts;
3488
3489 opts = fsg_opts_from_func_inst(fi);
3490 fsg_common_release(opts->common);
3491 kfree(opts);
3492 }
3493
fsg_alloc_inst(void)3494 static struct usb_function_instance *fsg_alloc_inst(void)
3495 {
3496 struct fsg_opts *opts;
3497 struct fsg_lun_config config;
3498 int rc;
3499
3500 opts = kzalloc_obj(*opts);
3501 if (!opts)
3502 return ERR_PTR(-ENOMEM);
3503 mutex_init(&opts->lock);
3504 opts->func_inst.free_func_inst = fsg_free_inst;
3505 opts->common = fsg_common_setup(opts->common);
3506 if (IS_ERR(opts->common)) {
3507 rc = PTR_ERR(opts->common);
3508 goto release_opts;
3509 }
3510
3511 rc = fsg_common_set_num_buffers(opts->common,
3512 CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS);
3513 if (rc)
3514 goto release_common;
3515
3516 pr_info(FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
3517
3518 memset(&config, 0, sizeof(config));
3519 config.removable = true;
3520 rc = fsg_common_create_lun(opts->common, &config, 0, "lun.0",
3521 (const char **)&opts->func_inst.group.cg_item.ci_name);
3522 if (rc)
3523 goto release_buffers;
3524
3525 opts->lun0.lun = opts->common->luns[0];
3526 opts->lun0.lun_id = 0;
3527
3528 config_group_init_type_name(&opts->func_inst.group, "", &fsg_func_type);
3529
3530 config_group_init_type_name(&opts->lun0.group, "lun.0", &fsg_lun_type);
3531 configfs_add_default_group(&opts->lun0.group, &opts->func_inst.group);
3532
3533 return &opts->func_inst;
3534
3535 release_buffers:
3536 fsg_common_free_buffers(opts->common);
3537 release_common:
3538 kfree(opts->common);
3539 release_opts:
3540 kfree(opts);
3541 return ERR_PTR(rc);
3542 }
3543
fsg_free(struct usb_function * f)3544 static void fsg_free(struct usb_function *f)
3545 {
3546 struct fsg_dev *fsg;
3547 struct fsg_opts *opts;
3548
3549 fsg = container_of(f, struct fsg_dev, function);
3550 opts = container_of(f->fi, struct fsg_opts, func_inst);
3551
3552 mutex_lock(&opts->lock);
3553 opts->refcnt--;
3554 mutex_unlock(&opts->lock);
3555
3556 kfree(fsg);
3557 }
3558
fsg_alloc(struct usb_function_instance * fi)3559 static struct usb_function *fsg_alloc(struct usb_function_instance *fi)
3560 {
3561 struct fsg_opts *opts = fsg_opts_from_func_inst(fi);
3562 struct fsg_common *common = opts->common;
3563 struct fsg_dev *fsg;
3564
3565 fsg = kzalloc_obj(*fsg);
3566 if (unlikely(!fsg))
3567 return ERR_PTR(-ENOMEM);
3568
3569 mutex_lock(&opts->lock);
3570 opts->refcnt++;
3571 mutex_unlock(&opts->lock);
3572
3573 fsg->function.name = FSG_DRIVER_DESC;
3574 fsg->function.bind = fsg_bind;
3575 fsg->function.unbind = fsg_unbind;
3576 fsg->function.setup = fsg_setup;
3577 fsg->function.set_alt = fsg_set_alt;
3578 fsg->function.disable = fsg_disable;
3579 fsg->function.free_func = fsg_free;
3580
3581 fsg->common = common;
3582
3583 return &fsg->function;
3584 }
3585
3586 DECLARE_USB_FUNCTION_INIT(mass_storage, fsg_alloc_inst, fsg_alloc);
3587 MODULE_DESCRIPTION("Mass Storage USB Composite Function");
3588 MODULE_LICENSE("GPL");
3589 MODULE_AUTHOR("Michal Nazarewicz");
3590
3591 /************************* Module parameters *************************/
3592
3593
fsg_config_from_params(struct fsg_config * cfg,const struct fsg_module_parameters * params,unsigned int fsg_num_buffers)3594 void fsg_config_from_params(struct fsg_config *cfg,
3595 const struct fsg_module_parameters *params,
3596 unsigned int fsg_num_buffers)
3597 {
3598 struct fsg_lun_config *lun;
3599 unsigned i;
3600
3601 /* Configure LUNs */
3602 cfg->nluns =
3603 min(params->luns ?: (params->file_count ?: 1u),
3604 (unsigned)FSG_MAX_LUNS);
3605 for (i = 0, lun = cfg->luns; i < cfg->nluns; ++i, ++lun) {
3606 lun->ro = !!params->ro[i];
3607 lun->cdrom = !!params->cdrom[i];
3608 lun->removable = !!params->removable[i];
3609 lun->filename =
3610 params->file_count > i && params->file[i][0]
3611 ? params->file[i]
3612 : NULL;
3613 }
3614
3615 /* Let MSF use defaults */
3616 cfg->vendor_name = NULL;
3617 cfg->product_name = NULL;
3618
3619 cfg->ops = NULL;
3620 cfg->private_data = NULL;
3621
3622 /* Finalise */
3623 cfg->can_stall = params->stall;
3624 cfg->fsg_num_buffers = fsg_num_buffers;
3625 }
3626 EXPORT_SYMBOL_GPL(fsg_config_from_params);
3627