blob: 1a181bd6c637d820c94ea36f3acf593570aaf869 [file] [log] [blame]
Tom Rini8b0c8a12018-05-06 18:27:01 -04001// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
Piotr Wilczek91637d72013-03-05 12:10:16 +01002/*
3 * f_mass_storage.c -- Mass Storage USB Composite Function
4 *
5 * Copyright (C) 2003-2008 Alan Stern
6 * Copyright (C) 2009 Samsung Electronics
7 * Author: Michal Nazarewicz <m.nazarewicz@samsung.com>
8 * All rights reserved.
Piotr Wilczek91637d72013-03-05 12:10:16 +01009 */
10
Piotr Wilczek91637d72013-03-05 12:10:16 +010011/*
12 * The Mass Storage Function acts as a USB Mass Storage device,
13 * appearing to the host as a disk drive or as a CD-ROM drive. In
14 * addition to providing an example of a genuinely useful composite
15 * function for a USB device, it also illustrates a technique of
16 * double-buffering for increased throughput.
17 *
18 * Function supports multiple logical units (LUNs). Backing storage
19 * for each LUN is provided by a regular file or a block device.
20 * Access for each LUN can be limited to read-only. Moreover, the
21 * function can indicate that LUN is removable and/or CD-ROM. (The
22 * later implies read-only access.)
23 *
24 * MSF is configured by specifying a fsg_config structure. It has the
25 * following fields:
26 *
27 * nluns Number of LUNs function have (anywhere from 1
28 * to FSG_MAX_LUNS which is 8).
29 * luns An array of LUN configuration values. This
30 * should be filled for each LUN that
31 * function will include (ie. for "nluns"
32 * LUNs). Each element of the array has
33 * the following fields:
34 * ->filename The path to the backing file for the LUN.
35 * Required if LUN is not marked as
36 * removable.
37 * ->ro Flag specifying access to the LUN shall be
38 * read-only. This is implied if CD-ROM
39 * emulation is enabled as well as when
40 * it was impossible to open "filename"
41 * in R/W mode.
42 * ->removable Flag specifying that LUN shall be indicated as
43 * being removable.
44 * ->cdrom Flag specifying that LUN shall be reported as
45 * being a CD-ROM.
46 *
47 * lun_name_format A printf-like format for names of the LUN
48 * devices. This determines how the
49 * directory in sysfs will be named.
50 * Unless you are using several MSFs in
51 * a single gadget (as opposed to single
52 * MSF in many configurations) you may
53 * leave it as NULL (in which case
54 * "lun%d" will be used). In the format
55 * you can use "%d" to index LUNs for
56 * MSF's with more than one LUN. (Beware
57 * that there is only one integer given
58 * as an argument for the format and
59 * specifying invalid format may cause
60 * unspecified behaviour.)
61 * thread_name Name of the kernel thread process used by the
62 * MSF. You can safely set it to NULL
63 * (in which case default "file-storage"
64 * will be used).
65 *
66 * vendor_name
67 * product_name
68 * release Information used as a reply to INQUIRY
69 * request. To use default set to NULL,
70 * NULL, 0xffff respectively. The first
71 * field should be 8 and the second 16
72 * characters or less.
73 *
74 * can_stall Set to permit function to halt bulk endpoints.
75 * Disabled on some USB devices known not
76 * to work correctly. You should set it
77 * to true.
78 *
79 * If "removable" is not set for a LUN then a backing file must be
80 * specified. If it is set, then NULL filename means the LUN's medium
81 * is not loaded (an empty string as "filename" in the fsg_config
82 * structure causes error). The CD-ROM emulation includes a single
83 * data track and no audio tracks; hence there need be only one
84 * backing file per LUN. Note also that the CD-ROM block length is
85 * set to 512 rather than the more common value 2048.
86 *
87 *
88 * MSF includes support for module parameters. If gadget using it
89 * decides to use it, the following module parameters will be
90 * available:
91 *
92 * file=filename[,filename...]
93 * Names of the files or block devices used for
94 * backing storage.
95 * ro=b[,b...] Default false, boolean for read-only access.
96 * removable=b[,b...]
97 * Default true, boolean for removable media.
98 * cdrom=b[,b...] Default false, boolean for whether to emulate
99 * a CD-ROM drive.
100 * luns=N Default N = number of filenames, number of
101 * LUNs to support.
102 * stall Default determined according to the type of
103 * USB device controller (usually true),
104 * boolean to permit the driver to halt
105 * bulk endpoints.
106 *
107 * The module parameters may be prefixed with some string. You need
108 * to consult gadget's documentation or source to verify whether it is
109 * using those module parameters and if it does what are the prefixes
110 * (look for FSG_MODULE_PARAMETERS() macro usage, what's inside it is
111 * the prefix).
112 *
113 *
114 * Requirements are modest; only a bulk-in and a bulk-out endpoint are
115 * needed. The memory requirement amounts to two 16K buffers, size
116 * configurable by a parameter. Support is included for both
117 * full-speed and high-speed operation.
118 *
119 * Note that the driver is slightly non-portable in that it assumes a
120 * single memory/DMA buffer will be useable for bulk-in, bulk-out, and
121 * interrupt-in endpoints. With most device controllers this isn't an
122 * issue, but there may be some with hardware restrictions that prevent
123 * a buffer from being used by more than one endpoint.
124 *
125 *
126 * The pathnames of the backing files and the ro settings are
127 * available in the attribute files "file" and "ro" in the lun<n> (or
128 * to be more precise in a directory which name comes from
129 * "lun_name_format" option!) subdirectory of the gadget's sysfs
130 * directory. If the "removable" option is set, writing to these
131 * files will simulate ejecting/loading the medium (writing an empty
132 * line means eject) and adjusting a write-enable tab. Changes to the
133 * ro setting are not allowed when the medium is loaded or if CD-ROM
134 * emulation is being used.
135 *
136 * When a LUN receive an "eject" SCSI request (Start/Stop Unit),
137 * if the LUN is removable, the backing file is released to simulate
138 * ejection.
139 *
140 *
141 * This function is heavily based on "File-backed Storage Gadget" by
142 * Alan Stern which in turn is heavily based on "Gadget Zero" by David
143 * Brownell. The driver's SCSI command interface was based on the
144 * "Information technology - Small Computer System Interface - 2"
145 * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93,
146 * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>.
147 * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which
148 * was based on the "Universal Serial Bus Mass Storage Class UFI
149 * Command Specification" document, Revision 1.0, December 14, 1998,
150 * available at
151 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
152 */
153
Piotr Wilczek91637d72013-03-05 12:10:16 +0100154/*
155 * Driver Design
156 *
157 * The MSF is fairly straightforward. There is a main kernel
158 * thread that handles most of the work. Interrupt routines field
159 * callbacks from the controller driver: bulk- and interrupt-request
160 * completion notifications, endpoint-0 events, and disconnect events.
161 * Completion events are passed to the main thread by wakeup calls. Many
162 * ep0 requests are handled at interrupt time, but SetInterface,
163 * SetConfiguration, and device reset requests are forwarded to the
164 * thread in the form of "exceptions" using SIGUSR1 signals (since they
165 * should interrupt any ongoing file I/O operations).
166 *
167 * The thread's main routine implements the standard command/data/status
168 * parts of a SCSI interaction. It and its subroutines are full of tests
169 * for pending signals/exceptions -- all this polling is necessary since
170 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an
171 * indication that the driver really wants to be running in userspace.)
172 * An important point is that so long as the thread is alive it keeps an
173 * open reference to the backing file. This will prevent unmounting
174 * the backing file's underlying filesystem and could cause problems
175 * during system shutdown, for example. To prevent such problems, the
176 * thread catches INT, TERM, and KILL signals and converts them into
177 * an EXIT exception.
178 *
179 * In normal operation the main thread is started during the gadget's
180 * fsg_bind() callback and stopped during fsg_unbind(). But it can
181 * also exit when it receives a signal, and there's no point leaving
182 * the gadget running when the thread is dead. At of this moment, MSF
183 * provides no way to deregister the gadget when thread dies -- maybe
184 * a callback functions is needed.
185 *
186 * To provide maximum throughput, the driver uses a circular pipeline of
187 * buffer heads (struct fsg_buffhd). In principle the pipeline can be
188 * arbitrarily long; in practice the benefits don't justify having more
189 * than 2 stages (i.e., double buffering). But it helps to think of the
190 * pipeline as being a long one. Each buffer head contains a bulk-in and
191 * a bulk-out request pointer (since the buffer can be used for both
192 * output and input -- directions always are given from the host's
193 * point of view) as well as a pointer to the buffer and various state
194 * variables.
195 *
196 * Use of the pipeline follows a simple protocol. There is a variable
197 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
198 * At any time that buffer head may still be in use from an earlier
199 * request, so each buffer head has a state variable indicating whether
200 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
201 * buffer head to be EMPTY, filling the buffer either by file I/O or by
202 * USB I/O (during which the buffer head is BUSY), and marking the buffer
203 * head FULL when the I/O is complete. Then the buffer will be emptied
204 * (again possibly by USB I/O, during which it is marked BUSY) and
205 * finally marked EMPTY again (possibly by a completion routine).
206 *
207 * A module parameter tells the driver to avoid stalling the bulk
208 * endpoints wherever the transport specification allows. This is
209 * necessary for some UDCs like the SuperH, which cannot reliably clear a
210 * halt on a bulk endpoint. However, under certain circumstances the
211 * Bulk-only specification requires a stall. In such cases the driver
212 * will halt the endpoint and set a flag indicating that it should clear
213 * the halt in software during the next device reset. Hopefully this
214 * will permit everything to work correctly. Furthermore, although the
215 * specification allows the bulk-out endpoint to halt when the host sends
216 * too much data, implementing this would cause an unavoidable race.
217 * The driver will always use the "no-stall" approach for OUT transfers.
218 *
219 * One subtle point concerns sending status-stage responses for ep0
220 * requests. Some of these requests, such as device reset, can involve
221 * interrupting an ongoing file I/O operation, which might take an
222 * arbitrarily long time. During that delay the host might give up on
223 * the original ep0 request and issue a new one. When that happens the
224 * driver should not notify the host about completion of the original
225 * request, as the host will no longer be waiting for it. So the driver
226 * assigns to each ep0 request a unique tag, and it keeps track of the
227 * tag value of the request associated with a long-running exception
228 * (device-reset, interface-change, or configuration-change). When the
229 * exception handler is finished, the status-stage response is submitted
230 * only if the current ep0 request tag is equal to the exception request
231 * tag. Thus only the most recently received ep0 request will get a
232 * status-stage response.
233 *
234 * Warning: This driver source file is too long. It ought to be split up
235 * into a header file plus about 3 separate .c files, to handle the details
236 * of the Gadget, USB Mass Storage, and SCSI protocols.
237 */
238
239/* #define VERBOSE_DEBUG */
240/* #define DUMP_MSGS */
241
242#include <config.h>
Alexey Brodkin2d2fa492018-06-05 17:17:57 +0300243#include <hexdump.h>
Simon Glass0f2af882020-05-10 11:40:05 -0600244#include <log.h>
Piotr Wilczek91637d72013-03-05 12:10:16 +0100245#include <malloc.h>
246#include <common.h>
Simon Glassa73bda42015-11-08 23:47:45 -0700247#include <console.h>
Mateusz Zalega21fe3f72014-04-30 13:07:48 +0200248#include <g_dnl.h>
Simon Glassd66c5f72020-02-03 07:36:15 -0700249#include <dm/devres.h>
Piotr Wilczek91637d72013-03-05 12:10:16 +0100250
251#include <linux/err.h>
252#include <linux/usb/ch9.h>
253#include <linux/usb/gadget.h>
254#include <usb_mass_storage.h>
255
256#include <asm/unaligned.h>
Bryan O'Donoghue56312512018-04-30 15:56:09 +0100257#include <linux/bitops.h>
Piotr Wilczek91637d72013-03-05 12:10:16 +0100258#include <linux/usb/gadget.h>
259#include <linux/usb/gadget.h>
260#include <linux/usb/composite.h>
Lukasz Majewski80d353c2018-11-23 17:36:19 +0100261#include <linux/bitmap.h>
Mateusz Zalega69cb0bb2014-04-28 21:13:28 +0200262#include <g_dnl.h>
Piotr Wilczek91637d72013-03-05 12:10:16 +0100263
264/*------------------------------------------------------------------------*/
265
266#define FSG_DRIVER_DESC "Mass Storage Function"
267#define FSG_DRIVER_VERSION "2012/06/5"
268
269static const char fsg_string_interface[] = "Mass Storage";
270
Piotr Wilczek91637d72013-03-05 12:10:16 +0100271#define FSG_NO_INTR_EP 1
272#define FSG_NO_DEVICE_STRINGS 1
273#define FSG_NO_OTG 1
274#define FSG_NO_INTR_EP 1
275
276#include "storage_common.c"
277
278/*-------------------------------------------------------------------------*/
279
280#define GFP_ATOMIC ((gfp_t) 0)
281#define PAGE_CACHE_SHIFT 12
282#define PAGE_CACHE_SIZE (1 << PAGE_CACHE_SHIFT)
283#define kthread_create(...) __builtin_return_address(0)
284#define wait_for_completion(...) do {} while (0)
285
286struct kref {int x; };
287struct completion {int x; };
288
Piotr Wilczek91637d72013-03-05 12:10:16 +0100289struct fsg_dev;
290struct fsg_common;
291
292/* Data shared by all the FSG instances. */
293struct fsg_common {
294 struct usb_gadget *gadget;
295 struct fsg_dev *fsg, *new_fsg;
296
297 struct usb_ep *ep0; /* Copy of gadget->ep0 */
298 struct usb_request *ep0req; /* Copy of cdev->req */
299 unsigned int ep0_req_tag;
300
301 struct fsg_buffhd *next_buffhd_to_fill;
302 struct fsg_buffhd *next_buffhd_to_drain;
303 struct fsg_buffhd buffhds[FSG_NUM_BUFFERS];
304
305 int cmnd_size;
306 u8 cmnd[MAX_COMMAND_SIZE];
307
308 unsigned int nluns;
309 unsigned int lun;
310 struct fsg_lun luns[FSG_MAX_LUNS];
311
312 unsigned int bulk_out_maxpacket;
313 enum fsg_state state; /* For exception handling */
314 unsigned int exception_req_tag;
315
316 enum data_direction data_dir;
317 u32 data_size;
318 u32 data_size_from_cmnd;
319 u32 tag;
320 u32 residue;
321 u32 usb_amount_left;
322
323 unsigned int can_stall:1;
324 unsigned int free_storage_on_release:1;
325 unsigned int phase_error:1;
326 unsigned int short_packet_received:1;
327 unsigned int bad_lun_okay:1;
328 unsigned int running:1;
329
330 int thread_wakeup_needed;
331 struct completion thread_notifier;
332 struct task_struct *thread_task;
333
334 /* Callback functions. */
335 const struct fsg_operations *ops;
336 /* Gadget's private data. */
337 void *private_data;
338
339 const char *vendor_name; /* 8 characters or less */
340 const char *product_name; /* 16 characters or less */
341 u16 release;
342
343 /* Vendor (8 chars), product (16 chars), release (4
344 * hexadecimal digits) and NUL byte */
345 char inquiry_string[8 + 16 + 4 + 1];
346
347 struct kref ref;
348};
349
350struct fsg_config {
351 unsigned nluns;
352 struct fsg_lun_config {
353 const char *filename;
354 char ro;
355 char removable;
356 char cdrom;
357 char nofua;
358 } luns[FSG_MAX_LUNS];
359
360 /* Callback functions. */
361 const struct fsg_operations *ops;
362 /* Gadget's private data. */
363 void *private_data;
364
365 const char *vendor_name; /* 8 characters or less */
366 const char *product_name; /* 16 characters or less */
367
368 char can_stall;
369};
370
371struct fsg_dev {
372 struct usb_function function;
373 struct usb_gadget *gadget; /* Copy of cdev->gadget */
374 struct fsg_common *common;
375
376 u16 interface_number;
377
378 unsigned int bulk_in_enabled:1;
379 unsigned int bulk_out_enabled:1;
380
381 unsigned long atomic_bitflags;
382#define IGNORE_BULK_OUT 0
383
384 struct usb_ep *bulk_in;
385 struct usb_ep *bulk_out;
386};
387
388
389static inline int __fsg_is_set(struct fsg_common *common,
390 const char *func, unsigned line)
391{
392 if (common->fsg)
393 return 1;
394 ERROR(common, "common->fsg is NULL in %s at %u\n", func, line);
Simon Glassb18a9602019-12-29 21:19:12 -0700395#ifdef __UBOOT__
396 assert_noisy(false);
397#else
Piotr Wilczek91637d72013-03-05 12:10:16 +0100398 WARN_ON(1);
Simon Glassb18a9602019-12-29 21:19:12 -0700399#endif
Piotr Wilczek91637d72013-03-05 12:10:16 +0100400 return 0;
401}
402
403#define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__))
404
405
406static inline struct fsg_dev *fsg_from_func(struct usb_function *f)
407{
408 return container_of(f, struct fsg_dev, function);
409}
410
411
412typedef void (*fsg_routine_t)(struct fsg_dev *);
413
414static int exception_in_progress(struct fsg_common *common)
415{
416 return common->state > FSG_STATE_IDLE;
417}
418
419/* Make bulk-out requests be divisible by the maxpacket size */
420static void set_bulk_out_req_length(struct fsg_common *common,
421 struct fsg_buffhd *bh, unsigned int length)
422{
423 unsigned int rem;
424
425 bh->bulk_out_intended_length = length;
426 rem = length % common->bulk_out_maxpacket;
427 if (rem > 0)
428 length += common->bulk_out_maxpacket - rem;
429 bh->outreq->length = length;
430}
431
432/*-------------------------------------------------------------------------*/
433
Stephen Warren9e7d5882015-12-07 11:38:50 -0700434static struct ums *ums;
435static int ums_count;
436static struct fsg_common *the_fsg_common;
Piotr Wilczek91637d72013-03-05 12:10:16 +0100437
438static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
439{
440 const char *name;
441
442 if (ep == fsg->bulk_in)
443 name = "bulk-in";
444 else if (ep == fsg->bulk_out)
445 name = "bulk-out";
446 else
447 name = ep->name;
448 DBG(fsg, "%s set halt\n", name);
449 return usb_ep_set_halt(ep);
450}
451
452/*-------------------------------------------------------------------------*/
453
454/* These routines may be called in process context or in_irq */
455
456/* Caller must hold fsg->lock */
457static void wakeup_thread(struct fsg_common *common)
458{
459 common->thread_wakeup_needed = 1;
460}
461
462static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
463{
464 /* Do nothing if a higher-priority exception is already in progress.
465 * If a lower-or-equal priority exception is in progress, preempt it
466 * and notify the main thread by sending it a signal. */
467 if (common->state <= new_state) {
468 common->exception_req_tag = common->ep0_req_tag;
469 common->state = new_state;
470 common->thread_wakeup_needed = 1;
471 }
472}
473
474/*-------------------------------------------------------------------------*/
475
476static int ep0_queue(struct fsg_common *common)
477{
478 int rc;
479
480 rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC);
481 common->ep0->driver_data = common;
482 if (rc != 0 && rc != -ESHUTDOWN) {
483 /* We can't do much more than wait for a reset */
484 WARNING(common, "error in submission: %s --> %d\n",
485 common->ep0->name, rc);
486 }
487 return rc;
488}
489
490/*-------------------------------------------------------------------------*/
491
492/* Bulk and interrupt endpoint completion handlers.
493 * These always run in_irq. */
494
495static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
496{
497 struct fsg_common *common = ep->driver_data;
498 struct fsg_buffhd *bh = req->context;
499
500 if (req->status || req->actual != req->length)
501 DBG(common, "%s --> %d, %u/%u\n", __func__,
502 req->status, req->actual, req->length);
503 if (req->status == -ECONNRESET) /* Request was cancelled */
504 usb_ep_fifo_flush(ep);
505
506 /* Hold the lock while we update the request and buffer states */
507 bh->inreq_busy = 0;
508 bh->state = BUF_STATE_EMPTY;
509 wakeup_thread(common);
510}
511
512static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
513{
514 struct fsg_common *common = ep->driver_data;
515 struct fsg_buffhd *bh = req->context;
516
517 dump_msg(common, "bulk-out", req->buf, req->actual);
518 if (req->status || req->actual != bh->bulk_out_intended_length)
519 DBG(common, "%s --> %d, %u/%u\n", __func__,
520 req->status, req->actual,
521 bh->bulk_out_intended_length);
522 if (req->status == -ECONNRESET) /* Request was cancelled */
523 usb_ep_fifo_flush(ep);
524
525 /* Hold the lock while we update the request and buffer states */
526 bh->outreq_busy = 0;
527 bh->state = BUF_STATE_FULL;
528 wakeup_thread(common);
529}
530
531/*-------------------------------------------------------------------------*/
532
533/* Ep0 class-specific handlers. These always run in_irq. */
534
535static int fsg_setup(struct usb_function *f,
536 const struct usb_ctrlrequest *ctrl)
537{
538 struct fsg_dev *fsg = fsg_from_func(f);
539 struct usb_request *req = fsg->common->ep0req;
Piotr Wilczek2963d802013-06-26 08:22:05 +0200540 u16 w_index = get_unaligned_le16(&ctrl->wIndex);
541 u16 w_value = get_unaligned_le16(&ctrl->wValue);
542 u16 w_length = get_unaligned_le16(&ctrl->wLength);
Piotr Wilczek91637d72013-03-05 12:10:16 +0100543
544 if (!fsg_is_set(fsg->common))
545 return -EOPNOTSUPP;
546
547 switch (ctrl->bRequest) {
548
549 case USB_BULK_RESET_REQUEST:
550 if (ctrl->bRequestType !=
551 (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
552 break;
553 if (w_index != fsg->interface_number || w_value != 0)
554 return -EDOM;
555
556 /* Raise an exception to stop the current operation
557 * and reinitialize our state. */
558 DBG(fsg, "bulk reset request\n");
559 raise_exception(fsg->common, FSG_STATE_RESET);
560 return DELAYED_STATUS;
561
562 case USB_BULK_GET_MAX_LUN_REQUEST:
563 if (ctrl->bRequestType !=
564 (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
565 break;
566 if (w_index != fsg->interface_number || w_value != 0)
567 return -EDOM;
568 VDBG(fsg, "get max LUN\n");
569 *(u8 *) req->buf = fsg->common->nluns - 1;
570
571 /* Respond with data/status */
572 req->length = min((u16)1, w_length);
573 return ep0_queue(fsg->common);
574 }
575
576 VDBG(fsg,
577 "unknown class-specific control req "
578 "%02x.%02x v%04x i%04x l%u\n",
579 ctrl->bRequestType, ctrl->bRequest,
Piotr Wilczek2963d802013-06-26 08:22:05 +0200580 get_unaligned_le16(&ctrl->wValue), w_index, w_length);
Piotr Wilczek91637d72013-03-05 12:10:16 +0100581 return -EOPNOTSUPP;
582}
583
584/*-------------------------------------------------------------------------*/
585
586/* All the following routines run in process context */
587
588/* Use this for bulk or interrupt transfers, not ep0 */
589static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
590 struct usb_request *req, int *pbusy,
591 enum fsg_buffer_state *state)
592{
593 int rc;
594
595 if (ep == fsg->bulk_in)
596 dump_msg(fsg, "bulk-in", req->buf, req->length);
597
598 *pbusy = 1;
599 *state = BUF_STATE_BUSY;
600 rc = usb_ep_queue(ep, req, GFP_KERNEL);
601 if (rc != 0) {
602 *pbusy = 0;
603 *state = BUF_STATE_EMPTY;
604
605 /* We can't do much more than wait for a reset */
606
607 /* Note: currently the net2280 driver fails zero-length
608 * submissions if DMA is enabled. */
609 if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
610 req->length == 0))
611 WARNING(fsg, "error in submission: %s --> %d\n",
612 ep->name, rc);
613 }
614}
615
616#define START_TRANSFER_OR(common, ep_name, req, pbusy, state) \
617 if (fsg_is_set(common)) \
618 start_transfer((common)->fsg, (common)->fsg->ep_name, \
619 req, pbusy, state); \
620 else
621
622#define START_TRANSFER(common, ep_name, req, pbusy, state) \
623 START_TRANSFER_OR(common, ep_name, req, pbusy, state) (void)0
624
625static void busy_indicator(void)
626{
627 static int state;
628
629 switch (state) {
630 case 0:
631 puts("\r|"); break;
632 case 1:
633 puts("\r/"); break;
634 case 2:
635 puts("\r-"); break;
636 case 3:
637 puts("\r\\"); break;
638 case 4:
639 puts("\r|"); break;
640 case 5:
641 puts("\r/"); break;
642 case 6:
643 puts("\r-"); break;
644 case 7:
645 puts("\r\\"); break;
646 default:
647 state = 0;
648 }
649 if (state++ == 8)
650 state = 0;
651}
652
653static int sleep_thread(struct fsg_common *common)
654{
655 int rc = 0;
656 int i = 0, k = 0;
657
658 /* Wait until a signal arrives or we are woken up */
659 for (;;) {
660 if (common->thread_wakeup_needed)
661 break;
662
Inha Songf7d92522015-05-22 18:14:26 +0200663 if (++i == 20000) {
Piotr Wilczek91637d72013-03-05 12:10:16 +0100664 busy_indicator();
665 i = 0;
666 k++;
667 }
668
Przemyslaw Marczak06ef7cc2013-10-23 14:30:46 +0200669 if (k == 10) {
670 /* Handle CTRL+C */
671 if (ctrlc())
672 return -EPIPE;
Mateusz Zalega21fe3f72014-04-30 13:07:48 +0200673
Przemyslaw Marczak06ef7cc2013-10-23 14:30:46 +0200674 /* Check cable connection */
Mateusz Zalega21fe3f72014-04-30 13:07:48 +0200675 if (!g_dnl_board_usb_cable_connected())
Przemyslaw Marczak06ef7cc2013-10-23 14:30:46 +0200676 return -EIO;
Mateusz Zalega21fe3f72014-04-30 13:07:48 +0200677
Przemyslaw Marczak06ef7cc2013-10-23 14:30:46 +0200678 k = 0;
679 }
680
Kishon Vijay Abraham I4763e162015-02-23 18:40:23 +0530681 usb_gadget_handle_interrupts(0);
Piotr Wilczek91637d72013-03-05 12:10:16 +0100682 }
683 common->thread_wakeup_needed = 0;
684 return rc;
685}
686
687/*-------------------------------------------------------------------------*/
688
689static int do_read(struct fsg_common *common)
690{
691 struct fsg_lun *curlun = &common->luns[common->lun];
692 u32 lba;
693 struct fsg_buffhd *bh;
694 int rc;
695 u32 amount_left;
696 loff_t file_offset;
697 unsigned int amount;
698 unsigned int partial_page;
699 ssize_t nread;
700
701 /* Get the starting Logical Block Address and check that it's
702 * not too big */
703 if (common->cmnd[0] == SC_READ_6)
704 lba = get_unaligned_be24(&common->cmnd[1]);
705 else {
706 lba = get_unaligned_be32(&common->cmnd[2]);
707
708 /* We allow DPO (Disable Page Out = don't save data in the
709 * cache) and FUA (Force Unit Access = don't read from the
710 * cache), but we don't implement them. */
711 if ((common->cmnd[1] & ~0x18) != 0) {
712 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
713 return -EINVAL;
714 }
715 }
716 if (lba >= curlun->num_sectors) {
717 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
718 return -EINVAL;
719 }
720 file_offset = ((loff_t) lba) << 9;
721
722 /* Carry out the file reads */
723 amount_left = common->data_size_from_cmnd;
724 if (unlikely(amount_left == 0))
725 return -EIO; /* No default reply */
726
727 for (;;) {
728
729 /* Figure out how much we need to read:
730 * Try to read the remaining amount.
731 * But don't read more than the buffer size.
732 * And don't try to read past the end of the file.
733 * Finally, if we're not at a page boundary, don't read past
734 * the next page.
735 * If this means reading 0 then we were asked to read past
736 * the end of file. */
737 amount = min(amount_left, FSG_BUFLEN);
738 partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
739 if (partial_page > 0)
740 amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
741 partial_page);
742
743 /* Wait for the next buffer to become available */
744 bh = common->next_buffhd_to_fill;
745 while (bh->state != BUF_STATE_EMPTY) {
746 rc = sleep_thread(common);
747 if (rc)
748 return rc;
749 }
750
751 /* If we were asked to read past the end of file,
752 * end with an empty buffer. */
753 if (amount == 0) {
754 curlun->sense_data =
755 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
756 curlun->info_valid = 1;
757 bh->inreq->length = 0;
758 bh->state = BUF_STATE_FULL;
759 break;
760 }
761
762 /* Perform the read */
Stephen Warren9e7d5882015-12-07 11:38:50 -0700763 rc = ums[common->lun].read_sector(&ums[common->lun],
Przemyslaw Marczak674b1a62013-10-23 14:30:42 +0200764 file_offset / SECTOR_SIZE,
765 amount / SECTOR_SIZE,
766 (char __user *)bh->buf);
767 if (!rc)
Piotr Wilczek91637d72013-03-05 12:10:16 +0100768 return -EIO;
Przemyslaw Marczak674b1a62013-10-23 14:30:42 +0200769
770 nread = rc * SECTOR_SIZE;
Piotr Wilczek91637d72013-03-05 12:10:16 +0100771
772 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
773 (unsigned long long) file_offset,
774 (int) nread);
775
776 if (nread < 0) {
777 LDBG(curlun, "error in file read: %d\n",
778 (int) nread);
779 nread = 0;
780 } else if (nread < amount) {
781 LDBG(curlun, "partial file read: %d/%u\n",
782 (int) nread, amount);
783 nread -= (nread & 511); /* Round down to a block */
784 }
785 file_offset += nread;
786 amount_left -= nread;
787 common->residue -= nread;
788 bh->inreq->length = nread;
789 bh->state = BUF_STATE_FULL;
790
791 /* If an error occurred, report it and its position */
792 if (nread < amount) {
793 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
794 curlun->info_valid = 1;
795 break;
796 }
797
798 if (amount_left == 0)
799 break; /* No more left to read */
800
801 /* Send this buffer and go read some more */
802 bh->inreq->zero = 0;
803 START_TRANSFER_OR(common, bulk_in, bh->inreq,
804 &bh->inreq_busy, &bh->state)
805 /* Don't know what to do if
806 * common->fsg is NULL */
807 return -EIO;
808 common->next_buffhd_to_fill = bh->next;
809 }
810
811 return -EIO; /* No default reply */
812}
813
814/*-------------------------------------------------------------------------*/
815
816static int do_write(struct fsg_common *common)
817{
818 struct fsg_lun *curlun = &common->luns[common->lun];
819 u32 lba;
820 struct fsg_buffhd *bh;
821 int get_some_more;
822 u32 amount_left_to_req, amount_left_to_write;
823 loff_t usb_offset, file_offset;
824 unsigned int amount;
825 unsigned int partial_page;
826 ssize_t nwritten;
827 int rc;
828
829 if (curlun->ro) {
830 curlun->sense_data = SS_WRITE_PROTECTED;
831 return -EINVAL;
832 }
833
834 /* Get the starting Logical Block Address and check that it's
835 * not too big */
836 if (common->cmnd[0] == SC_WRITE_6)
837 lba = get_unaligned_be24(&common->cmnd[1]);
838 else {
839 lba = get_unaligned_be32(&common->cmnd[2]);
840
841 /* We allow DPO (Disable Page Out = don't save data in the
842 * cache) and FUA (Force Unit Access = write directly to the
843 * medium). We don't implement DPO; we implement FUA by
844 * performing synchronous output. */
845 if (common->cmnd[1] & ~0x18) {
846 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
847 return -EINVAL;
848 }
849 }
850 if (lba >= curlun->num_sectors) {
851 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
852 return -EINVAL;
853 }
854
855 /* Carry out the file writes */
856 get_some_more = 1;
857 file_offset = usb_offset = ((loff_t) lba) << 9;
858 amount_left_to_req = common->data_size_from_cmnd;
859 amount_left_to_write = common->data_size_from_cmnd;
860
861 while (amount_left_to_write > 0) {
862
863 /* Queue a request for more data from the host */
864 bh = common->next_buffhd_to_fill;
865 if (bh->state == BUF_STATE_EMPTY && get_some_more) {
866
867 /* Figure out how much we want to get:
868 * Try to get the remaining amount.
869 * But don't get more than the buffer size.
870 * And don't try to go past the end of the file.
871 * If we're not at a page boundary,
872 * don't go past the next page.
873 * If this means getting 0, then we were asked
874 * to write past the end of file.
875 * Finally, round down to a block boundary. */
876 amount = min(amount_left_to_req, FSG_BUFLEN);
877 partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
878 if (partial_page > 0)
879 amount = min(amount,
880 (unsigned int) PAGE_CACHE_SIZE - partial_page);
881
882 if (amount == 0) {
883 get_some_more = 0;
884 curlun->sense_data =
885 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
886 curlun->info_valid = 1;
887 continue;
888 }
889 amount -= (amount & 511);
890 if (amount == 0) {
891
892 /* Why were we were asked to transfer a
893 * partial block? */
894 get_some_more = 0;
895 continue;
896 }
897
898 /* Get the next buffer */
899 usb_offset += amount;
900 common->usb_amount_left -= amount;
901 amount_left_to_req -= amount;
902 if (amount_left_to_req == 0)
903 get_some_more = 0;
904
905 /* amount is always divisible by 512, hence by
906 * the bulk-out maxpacket size */
907 bh->outreq->length = amount;
908 bh->bulk_out_intended_length = amount;
909 bh->outreq->short_not_ok = 1;
910 START_TRANSFER_OR(common, bulk_out, bh->outreq,
911 &bh->outreq_busy, &bh->state)
912 /* Don't know what to do if
913 * common->fsg is NULL */
914 return -EIO;
915 common->next_buffhd_to_fill = bh->next;
916 continue;
917 }
918
919 /* Write the received data to the backing file */
920 bh = common->next_buffhd_to_drain;
921 if (bh->state == BUF_STATE_EMPTY && !get_some_more)
922 break; /* We stopped early */
923 if (bh->state == BUF_STATE_FULL) {
924 common->next_buffhd_to_drain = bh->next;
925 bh->state = BUF_STATE_EMPTY;
926
927 /* Did something go wrong with the transfer? */
928 if (bh->outreq->status != 0) {
929 curlun->sense_data = SS_COMMUNICATION_FAILURE;
930 curlun->info_valid = 1;
931 break;
932 }
933
934 amount = bh->outreq->actual;
935
936 /* Perform the write */
Stephen Warren9e7d5882015-12-07 11:38:50 -0700937 rc = ums[common->lun].write_sector(&ums[common->lun],
Piotr Wilczek91637d72013-03-05 12:10:16 +0100938 file_offset / SECTOR_SIZE,
939 amount / SECTOR_SIZE,
940 (char __user *)bh->buf);
Przemyslaw Marczak674b1a62013-10-23 14:30:42 +0200941 if (!rc)
Piotr Wilczek91637d72013-03-05 12:10:16 +0100942 return -EIO;
Przemyslaw Marczak674b1a62013-10-23 14:30:42 +0200943 nwritten = rc * SECTOR_SIZE;
Piotr Wilczek91637d72013-03-05 12:10:16 +0100944
945 VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
946 (unsigned long long) file_offset,
947 (int) nwritten);
948
949 if (nwritten < 0) {
950 LDBG(curlun, "error in file write: %d\n",
951 (int) nwritten);
952 nwritten = 0;
953 } else if (nwritten < amount) {
954 LDBG(curlun, "partial file write: %d/%u\n",
955 (int) nwritten, amount);
956 nwritten -= (nwritten & 511);
957 /* Round down to a block */
958 }
959 file_offset += nwritten;
960 amount_left_to_write -= nwritten;
961 common->residue -= nwritten;
962
963 /* If an error occurred, report it and its position */
964 if (nwritten < amount) {
Thierry Reding15fe9c82015-03-20 12:41:25 +0100965 printf("nwritten:%zd amount:%u\n", nwritten,
Przemyslaw Marczak674b1a62013-10-23 14:30:42 +0200966 amount);
Piotr Wilczek91637d72013-03-05 12:10:16 +0100967 curlun->sense_data = SS_WRITE_ERROR;
968 curlun->info_valid = 1;
969 break;
970 }
971
972 /* Did the host decide to stop early? */
973 if (bh->outreq->actual != bh->outreq->length) {
974 common->short_packet_received = 1;
975 break;
976 }
977 continue;
978 }
979
980 /* Wait for something to happen */
981 rc = sleep_thread(common);
982 if (rc)
983 return rc;
984 }
985
986 return -EIO; /* No default reply */
987}
988
989/*-------------------------------------------------------------------------*/
990
991static int do_synchronize_cache(struct fsg_common *common)
992{
993 return 0;
994}
995
996/*-------------------------------------------------------------------------*/
997
998static int do_verify(struct fsg_common *common)
999{
1000 struct fsg_lun *curlun = &common->luns[common->lun];
1001 u32 lba;
1002 u32 verification_length;
1003 struct fsg_buffhd *bh = common->next_buffhd_to_fill;
1004 loff_t file_offset;
1005 u32 amount_left;
1006 unsigned int amount;
1007 ssize_t nread;
1008 int rc;
1009
1010 /* Get the starting Logical Block Address and check that it's
1011 * not too big */
1012 lba = get_unaligned_be32(&common->cmnd[2]);
1013 if (lba >= curlun->num_sectors) {
1014 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1015 return -EINVAL;
1016 }
1017
1018 /* We allow DPO (Disable Page Out = don't save data in the
1019 * cache) but we don't implement it. */
1020 if (common->cmnd[1] & ~0x10) {
1021 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1022 return -EINVAL;
1023 }
1024
1025 verification_length = get_unaligned_be16(&common->cmnd[7]);
1026 if (unlikely(verification_length == 0))
1027 return -EIO; /* No default reply */
1028
1029 /* Prepare to carry out the file verify */
1030 amount_left = verification_length << 9;
1031 file_offset = ((loff_t) lba) << 9;
1032
1033 /* Write out all the dirty buffers before invalidating them */
1034
1035 /* Just try to read the requested blocks */
1036 while (amount_left > 0) {
1037
1038 /* Figure out how much we need to read:
1039 * Try to read the remaining amount, but not more than
1040 * the buffer size.
1041 * And don't try to read past the end of the file.
1042 * If this means reading 0 then we were asked to read
1043 * past the end of file. */
1044 amount = min(amount_left, FSG_BUFLEN);
1045 if (amount == 0) {
1046 curlun->sense_data =
1047 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1048 curlun->info_valid = 1;
1049 break;
1050 }
1051
1052 /* Perform the read */
Stephen Warren9e7d5882015-12-07 11:38:50 -07001053 rc = ums[common->lun].read_sector(&ums[common->lun],
Przemyslaw Marczak674b1a62013-10-23 14:30:42 +02001054 file_offset / SECTOR_SIZE,
1055 amount / SECTOR_SIZE,
1056 (char __user *)bh->buf);
1057 if (!rc)
Piotr Wilczek91637d72013-03-05 12:10:16 +01001058 return -EIO;
Przemyslaw Marczak674b1a62013-10-23 14:30:42 +02001059 nread = rc * SECTOR_SIZE;
Piotr Wilczek91637d72013-03-05 12:10:16 +01001060
1061 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
1062 (unsigned long long) file_offset,
1063 (int) nread);
1064 if (nread < 0) {
1065 LDBG(curlun, "error in file verify: %d\n",
1066 (int) nread);
1067 nread = 0;
1068 } else if (nread < amount) {
1069 LDBG(curlun, "partial file verify: %d/%u\n",
1070 (int) nread, amount);
1071 nread -= (nread & 511); /* Round down to a sector */
1072 }
1073 if (nread == 0) {
1074 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
1075 curlun->info_valid = 1;
1076 break;
1077 }
1078 file_offset += nread;
1079 amount_left -= nread;
1080 }
1081 return 0;
1082}
1083
1084/*-------------------------------------------------------------------------*/
1085
1086static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh)
1087{
1088 struct fsg_lun *curlun = &common->luns[common->lun];
1089 static const char vendor_id[] = "Linux ";
1090 u8 *buf = (u8 *) bh->buf;
1091
1092 if (!curlun) { /* Unsupported LUNs are okay */
1093 common->bad_lun_okay = 1;
1094 memset(buf, 0, 36);
1095 buf[0] = 0x7f; /* Unsupported, no device-type */
1096 buf[4] = 31; /* Additional length */
1097 return 36;
1098 }
1099
1100 memset(buf, 0, 8);
1101 buf[0] = TYPE_DISK;
Eric Nelsone1d833a2014-09-19 17:06:46 -07001102 buf[1] = curlun->removable ? 0x80 : 0;
Piotr Wilczek91637d72013-03-05 12:10:16 +01001103 buf[2] = 2; /* ANSI SCSI level 2 */
1104 buf[3] = 2; /* SCSI-2 INQUIRY data format */
1105 buf[4] = 31; /* Additional length */
1106 /* No special options */
1107 sprintf((char *) (buf + 8), "%-8s%-16s%04x", (char*) vendor_id ,
Stephen Warren9e7d5882015-12-07 11:38:50 -07001108 ums[common->lun].name, (u16) 0xffff);
Piotr Wilczek91637d72013-03-05 12:10:16 +01001109
1110 return 36;
1111}
1112
1113
1114static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1115{
1116 struct fsg_lun *curlun = &common->luns[common->lun];
1117 u8 *buf = (u8 *) bh->buf;
1118 u32 sd, sdinfo;
1119 int valid;
1120
1121 /*
1122 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
1123 *
1124 * If a REQUEST SENSE command is received from an initiator
1125 * with a pending unit attention condition (before the target
1126 * generates the contingent allegiance condition), then the
1127 * target shall either:
1128 * a) report any pending sense data and preserve the unit
1129 * attention condition on the logical unit, or,
1130 * b) report the unit attention condition, may discard any
1131 * pending sense data, and clear the unit attention
1132 * condition on the logical unit for that initiator.
1133 *
1134 * FSG normally uses option a); enable this code to use option b).
1135 */
1136#if 0
1137 if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
1138 curlun->sense_data = curlun->unit_attention_data;
1139 curlun->unit_attention_data = SS_NO_SENSE;
1140 }
1141#endif
1142
1143 if (!curlun) { /* Unsupported LUNs are okay */
1144 common->bad_lun_okay = 1;
1145 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1146 sdinfo = 0;
1147 valid = 0;
1148 } else {
1149 sd = curlun->sense_data;
1150 valid = curlun->info_valid << 7;
1151 curlun->sense_data = SS_NO_SENSE;
1152 curlun->info_valid = 0;
1153 }
1154
1155 memset(buf, 0, 18);
1156 buf[0] = valid | 0x70; /* Valid, current error */
1157 buf[2] = SK(sd);
1158 put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */
1159 buf[7] = 18 - 8; /* Additional sense length */
1160 buf[12] = ASC(sd);
1161 buf[13] = ASCQ(sd);
1162 return 18;
1163}
1164
1165static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
1166{
1167 struct fsg_lun *curlun = &common->luns[common->lun];
1168 u32 lba = get_unaligned_be32(&common->cmnd[2]);
1169 int pmi = common->cmnd[8];
1170 u8 *buf = (u8 *) bh->buf;
1171
1172 /* Check the PMI and LBA fields */
1173 if (pmi > 1 || (pmi == 0 && lba != 0)) {
1174 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1175 return -EINVAL;
1176 }
1177
1178 put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
1179 /* Max logical block */
1180 put_unaligned_be32(512, &buf[4]); /* Block length */
1181 return 8;
1182}
1183
1184static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh)
1185{
1186 struct fsg_lun *curlun = &common->luns[common->lun];
1187 int msf = common->cmnd[1] & 0x02;
1188 u32 lba = get_unaligned_be32(&common->cmnd[2]);
1189 u8 *buf = (u8 *) bh->buf;
1190
1191 if (common->cmnd[1] & ~0x02) { /* Mask away MSF */
1192 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1193 return -EINVAL;
1194 }
1195 if (lba >= curlun->num_sectors) {
1196 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1197 return -EINVAL;
1198 }
1199
1200 memset(buf, 0, 8);
1201 buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */
1202 store_cdrom_address(&buf[4], msf, lba);
1203 return 8;
1204}
1205
1206
1207static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
1208{
1209 struct fsg_lun *curlun = &common->luns[common->lun];
1210 int msf = common->cmnd[1] & 0x02;
1211 int start_track = common->cmnd[6];
1212 u8 *buf = (u8 *) bh->buf;
1213
1214 if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */
1215 start_track > 1) {
1216 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1217 return -EINVAL;
1218 }
1219
1220 memset(buf, 0, 20);
1221 buf[1] = (20-2); /* TOC data length */
1222 buf[2] = 1; /* First track number */
1223 buf[3] = 1; /* Last track number */
1224 buf[5] = 0x16; /* Data track, copying allowed */
1225 buf[6] = 0x01; /* Only track is number 1 */
1226 store_cdrom_address(&buf[8], msf, 0);
1227
1228 buf[13] = 0x16; /* Lead-out track is data */
1229 buf[14] = 0xAA; /* Lead-out track number */
1230 store_cdrom_address(&buf[16], msf, curlun->num_sectors);
1231
1232 return 20;
1233}
1234
1235static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1236{
1237 struct fsg_lun *curlun = &common->luns[common->lun];
1238 int mscmnd = common->cmnd[0];
1239 u8 *buf = (u8 *) bh->buf;
1240 u8 *buf0 = buf;
1241 int pc, page_code;
1242 int changeable_values, all_pages;
1243 int valid_page = 0;
1244 int len, limit;
1245
1246 if ((common->cmnd[1] & ~0x08) != 0) { /* Mask away DBD */
1247 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1248 return -EINVAL;
1249 }
1250 pc = common->cmnd[2] >> 6;
1251 page_code = common->cmnd[2] & 0x3f;
1252 if (pc == 3) {
1253 curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
1254 return -EINVAL;
1255 }
1256 changeable_values = (pc == 1);
1257 all_pages = (page_code == 0x3f);
1258
1259 /* Write the mode parameter header. Fixed values are: default
1260 * medium type, no cache control (DPOFUA), and no block descriptors.
1261 * The only variable value is the WriteProtect bit. We will fill in
1262 * the mode data length later. */
1263 memset(buf, 0, 8);
1264 if (mscmnd == SC_MODE_SENSE_6) {
1265 buf[2] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
1266 buf += 4;
1267 limit = 255;
1268 } else { /* SC_MODE_SENSE_10 */
1269 buf[3] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
1270 buf += 8;
1271 limit = 65535; /* Should really be FSG_BUFLEN */
1272 }
1273
1274 /* No block descriptors */
1275
1276 /* The mode pages, in numerical order. The only page we support
1277 * is the Caching page. */
1278 if (page_code == 0x08 || all_pages) {
1279 valid_page = 1;
1280 buf[0] = 0x08; /* Page code */
1281 buf[1] = 10; /* Page length */
1282 memset(buf+2, 0, 10); /* None of the fields are changeable */
1283
1284 if (!changeable_values) {
1285 buf[2] = 0x04; /* Write cache enable, */
1286 /* Read cache not disabled */
1287 /* No cache retention priorities */
1288 put_unaligned_be16(0xffff, &buf[4]);
1289 /* Don't disable prefetch */
1290 /* Minimum prefetch = 0 */
1291 put_unaligned_be16(0xffff, &buf[8]);
1292 /* Maximum prefetch */
1293 put_unaligned_be16(0xffff, &buf[10]);
1294 /* Maximum prefetch ceiling */
1295 }
1296 buf += 12;
1297 }
1298
1299 /* Check that a valid page was requested and the mode data length
1300 * isn't too long. */
1301 len = buf - buf0;
1302 if (!valid_page || len > limit) {
1303 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1304 return -EINVAL;
1305 }
1306
1307 /* Store the mode data length */
1308 if (mscmnd == SC_MODE_SENSE_6)
1309 buf0[0] = len - 1;
1310 else
1311 put_unaligned_be16(len - 2, buf0);
1312 return len;
1313}
1314
1315
1316static int do_start_stop(struct fsg_common *common)
1317{
1318 struct fsg_lun *curlun = &common->luns[common->lun];
1319
1320 if (!curlun) {
1321 return -EINVAL;
1322 } else if (!curlun->removable) {
1323 curlun->sense_data = SS_INVALID_COMMAND;
1324 return -EINVAL;
1325 }
1326
1327 return 0;
1328}
1329
1330static int do_prevent_allow(struct fsg_common *common)
1331{
1332 struct fsg_lun *curlun = &common->luns[common->lun];
1333 int prevent;
1334
1335 if (!curlun->removable) {
1336 curlun->sense_data = SS_INVALID_COMMAND;
1337 return -EINVAL;
1338 }
1339
1340 prevent = common->cmnd[4] & 0x01;
1341 if ((common->cmnd[4] & ~0x01) != 0) { /* Mask away Prevent */
1342 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1343 return -EINVAL;
1344 }
1345
1346 if (curlun->prevent_medium_removal && !prevent)
1347 fsg_lun_fsync_sub(curlun);
1348 curlun->prevent_medium_removal = prevent;
1349 return 0;
1350}
1351
1352
1353static int do_read_format_capacities(struct fsg_common *common,
1354 struct fsg_buffhd *bh)
1355{
1356 struct fsg_lun *curlun = &common->luns[common->lun];
1357 u8 *buf = (u8 *) bh->buf;
1358
1359 buf[0] = buf[1] = buf[2] = 0;
1360 buf[3] = 8; /* Only the Current/Maximum Capacity Descriptor */
1361 buf += 4;
1362
1363 put_unaligned_be32(curlun->num_sectors, &buf[0]);
1364 /* Number of blocks */
1365 put_unaligned_be32(512, &buf[4]); /* Block length */
1366 buf[4] = 0x02; /* Current capacity */
1367 return 12;
1368}
1369
1370
1371static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh)
1372{
1373 struct fsg_lun *curlun = &common->luns[common->lun];
1374
1375 /* We don't support MODE SELECT */
1376 if (curlun)
1377 curlun->sense_data = SS_INVALID_COMMAND;
1378 return -EINVAL;
1379}
1380
1381
1382/*-------------------------------------------------------------------------*/
1383
1384static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
1385{
1386 int rc;
1387
1388 rc = fsg_set_halt(fsg, fsg->bulk_in);
1389 if (rc == -EAGAIN)
1390 VDBG(fsg, "delayed bulk-in endpoint halt\n");
1391 while (rc != 0) {
1392 if (rc != -EAGAIN) {
1393 WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
1394 rc = 0;
1395 break;
1396 }
1397
1398 rc = usb_ep_set_halt(fsg->bulk_in);
1399 }
1400 return rc;
1401}
1402
1403static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
1404{
1405 int rc;
1406
1407 DBG(fsg, "bulk-in set wedge\n");
1408 rc = 0; /* usb_ep_set_wedge(fsg->bulk_in); */
1409 if (rc == -EAGAIN)
1410 VDBG(fsg, "delayed bulk-in endpoint wedge\n");
1411 while (rc != 0) {
1412 if (rc != -EAGAIN) {
1413 WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
1414 rc = 0;
1415 break;
1416 }
1417 }
1418 return rc;
1419}
1420
1421static int pad_with_zeros(struct fsg_dev *fsg)
1422{
1423 struct fsg_buffhd *bh = fsg->common->next_buffhd_to_fill;
1424 u32 nkeep = bh->inreq->length;
1425 u32 nsend;
1426 int rc;
1427
1428 bh->state = BUF_STATE_EMPTY; /* For the first iteration */
1429 fsg->common->usb_amount_left = nkeep + fsg->common->residue;
1430 while (fsg->common->usb_amount_left > 0) {
1431
1432 /* Wait for the next buffer to be free */
1433 while (bh->state != BUF_STATE_EMPTY) {
1434 rc = sleep_thread(fsg->common);
1435 if (rc)
1436 return rc;
1437 }
1438
1439 nsend = min(fsg->common->usb_amount_left, FSG_BUFLEN);
1440 memset(bh->buf + nkeep, 0, nsend - nkeep);
1441 bh->inreq->length = nsend;
1442 bh->inreq->zero = 0;
1443 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1444 &bh->inreq_busy, &bh->state);
1445 bh = fsg->common->next_buffhd_to_fill = bh->next;
1446 fsg->common->usb_amount_left -= nsend;
1447 nkeep = 0;
1448 }
1449 return 0;
1450}
1451
1452static int throw_away_data(struct fsg_common *common)
1453{
1454 struct fsg_buffhd *bh;
1455 u32 amount;
1456 int rc;
1457
1458 for (bh = common->next_buffhd_to_drain;
1459 bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0;
1460 bh = common->next_buffhd_to_drain) {
1461
1462 /* Throw away the data in a filled buffer */
1463 if (bh->state == BUF_STATE_FULL) {
1464 bh->state = BUF_STATE_EMPTY;
1465 common->next_buffhd_to_drain = bh->next;
1466
1467 /* A short packet or an error ends everything */
1468 if (bh->outreq->actual != bh->outreq->length ||
1469 bh->outreq->status != 0) {
1470 raise_exception(common,
1471 FSG_STATE_ABORT_BULK_OUT);
1472 return -EINTR;
1473 }
1474 continue;
1475 }
1476
1477 /* Try to submit another request if we need one */
1478 bh = common->next_buffhd_to_fill;
1479 if (bh->state == BUF_STATE_EMPTY
1480 && common->usb_amount_left > 0) {
1481 amount = min(common->usb_amount_left, FSG_BUFLEN);
1482
1483 /* amount is always divisible by 512, hence by
1484 * the bulk-out maxpacket size */
1485 bh->outreq->length = amount;
1486 bh->bulk_out_intended_length = amount;
1487 bh->outreq->short_not_ok = 1;
1488 START_TRANSFER_OR(common, bulk_out, bh->outreq,
1489 &bh->outreq_busy, &bh->state)
1490 /* Don't know what to do if
1491 * common->fsg is NULL */
1492 return -EIO;
1493 common->next_buffhd_to_fill = bh->next;
1494 common->usb_amount_left -= amount;
1495 continue;
1496 }
1497
1498 /* Otherwise wait for something to happen */
1499 rc = sleep_thread(common);
1500 if (rc)
1501 return rc;
1502 }
1503 return 0;
1504}
1505
1506
1507static int finish_reply(struct fsg_common *common)
1508{
1509 struct fsg_buffhd *bh = common->next_buffhd_to_fill;
1510 int rc = 0;
1511
1512 switch (common->data_dir) {
1513 case DATA_DIR_NONE:
1514 break; /* Nothing to send */
1515
1516 /* If we don't know whether the host wants to read or write,
1517 * this must be CB or CBI with an unknown command. We mustn't
1518 * try to send or receive any data. So stall both bulk pipes
1519 * if we can and wait for a reset. */
1520 case DATA_DIR_UNKNOWN:
1521 if (!common->can_stall) {
1522 /* Nothing */
1523 } else if (fsg_is_set(common)) {
1524 fsg_set_halt(common->fsg, common->fsg->bulk_out);
1525 rc = halt_bulk_in_endpoint(common->fsg);
1526 } else {
1527 /* Don't know what to do if common->fsg is NULL */
1528 rc = -EIO;
1529 }
1530 break;
1531
1532 /* All but the last buffer of data must have already been sent */
1533 case DATA_DIR_TO_HOST:
1534 if (common->data_size == 0) {
1535 /* Nothing to send */
1536
1537 /* If there's no residue, simply send the last buffer */
1538 } else if (common->residue == 0) {
1539 bh->inreq->zero = 0;
1540 START_TRANSFER_OR(common, bulk_in, bh->inreq,
1541 &bh->inreq_busy, &bh->state)
1542 return -EIO;
1543 common->next_buffhd_to_fill = bh->next;
1544
1545 /* For Bulk-only, if we're allowed to stall then send the
1546 * short packet and halt the bulk-in endpoint. If we can't
1547 * stall, pad out the remaining data with 0's. */
1548 } else if (common->can_stall) {
1549 bh->inreq->zero = 1;
1550 START_TRANSFER_OR(common, bulk_in, bh->inreq,
1551 &bh->inreq_busy, &bh->state)
1552 /* Don't know what to do if
1553 * common->fsg is NULL */
1554 rc = -EIO;
1555 common->next_buffhd_to_fill = bh->next;
1556 if (common->fsg)
1557 rc = halt_bulk_in_endpoint(common->fsg);
1558 } else if (fsg_is_set(common)) {
1559 rc = pad_with_zeros(common->fsg);
1560 } else {
1561 /* Don't know what to do if common->fsg is NULL */
1562 rc = -EIO;
1563 }
1564 break;
1565
1566 /* We have processed all we want from the data the host has sent.
1567 * There may still be outstanding bulk-out requests. */
1568 case DATA_DIR_FROM_HOST:
1569 if (common->residue == 0) {
1570 /* Nothing to receive */
1571
1572 /* Did the host stop sending unexpectedly early? */
1573 } else if (common->short_packet_received) {
1574 raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
1575 rc = -EINTR;
1576
1577 /* We haven't processed all the incoming data. Even though
1578 * we may be allowed to stall, doing so would cause a race.
1579 * The controller may already have ACK'ed all the remaining
1580 * bulk-out packets, in which case the host wouldn't see a
1581 * STALL. Not realizing the endpoint was halted, it wouldn't
1582 * clear the halt -- leading to problems later on. */
1583#if 0
1584 } else if (common->can_stall) {
1585 if (fsg_is_set(common))
1586 fsg_set_halt(common->fsg,
1587 common->fsg->bulk_out);
1588 raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
1589 rc = -EINTR;
1590#endif
1591
1592 /* We can't stall. Read in the excess data and throw it
1593 * all away. */
1594 } else {
1595 rc = throw_away_data(common);
1596 }
1597 break;
1598 }
1599 return rc;
1600}
1601
1602
1603static int send_status(struct fsg_common *common)
1604{
1605 struct fsg_lun *curlun = &common->luns[common->lun];
1606 struct fsg_buffhd *bh;
1607 struct bulk_cs_wrap *csw;
1608 int rc;
1609 u8 status = USB_STATUS_PASS;
1610 u32 sd, sdinfo = 0;
1611
1612 /* Wait for the next buffer to become available */
1613 bh = common->next_buffhd_to_fill;
1614 while (bh->state != BUF_STATE_EMPTY) {
1615 rc = sleep_thread(common);
1616 if (rc)
1617 return rc;
1618 }
1619
1620 if (curlun)
1621 sd = curlun->sense_data;
1622 else if (common->bad_lun_okay)
1623 sd = SS_NO_SENSE;
1624 else
1625 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1626
1627 if (common->phase_error) {
1628 DBG(common, "sending phase-error status\n");
1629 status = USB_STATUS_PHASE_ERROR;
1630 sd = SS_INVALID_COMMAND;
1631 } else if (sd != SS_NO_SENSE) {
1632 DBG(common, "sending command-failure status\n");
1633 status = USB_STATUS_FAIL;
1634 VDBG(common, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
1635 " info x%x\n",
1636 SK(sd), ASC(sd), ASCQ(sd), sdinfo);
1637 }
1638
1639 /* Store and send the Bulk-only CSW */
1640 csw = (void *)bh->buf;
1641
1642 csw->Signature = cpu_to_le32(USB_BULK_CS_SIG);
1643 csw->Tag = common->tag;
1644 csw->Residue = cpu_to_le32(common->residue);
1645 csw->Status = status;
1646
1647 bh->inreq->length = USB_BULK_CS_WRAP_LEN;
1648 bh->inreq->zero = 0;
1649 START_TRANSFER_OR(common, bulk_in, bh->inreq,
1650 &bh->inreq_busy, &bh->state)
1651 /* Don't know what to do if common->fsg is NULL */
1652 return -EIO;
1653
1654 common->next_buffhd_to_fill = bh->next;
1655 return 0;
1656}
1657
1658
1659/*-------------------------------------------------------------------------*/
1660
1661/* Check whether the command is properly formed and whether its data size
1662 * and direction agree with the values we already have. */
1663static int check_command(struct fsg_common *common, int cmnd_size,
1664 enum data_direction data_dir, unsigned int mask,
1665 int needs_medium, const char *name)
1666{
1667 int i;
1668 int lun = common->cmnd[1] >> 5;
1669 static const char dirletter[4] = {'u', 'o', 'i', 'n'};
1670 char hdlen[20];
1671 struct fsg_lun *curlun;
1672
1673 hdlen[0] = 0;
1674 if (common->data_dir != DATA_DIR_UNKNOWN)
1675 sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir],
1676 common->data_size);
1677 VDBG(common, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
1678 name, cmnd_size, dirletter[(int) data_dir],
1679 common->data_size_from_cmnd, common->cmnd_size, hdlen);
1680
1681 /* We can't reply at all until we know the correct data direction
1682 * and size. */
1683 if (common->data_size_from_cmnd == 0)
1684 data_dir = DATA_DIR_NONE;
1685 if (common->data_size < common->data_size_from_cmnd) {
1686 /* Host data size < Device data size is a phase error.
1687 * Carry out the command, but only transfer as much as
1688 * we are allowed. */
1689 common->data_size_from_cmnd = common->data_size;
1690 common->phase_error = 1;
1691 }
1692 common->residue = common->data_size;
1693 common->usb_amount_left = common->data_size;
1694
1695 /* Conflicting data directions is a phase error */
1696 if (common->data_dir != data_dir
1697 && common->data_size_from_cmnd > 0) {
1698 common->phase_error = 1;
1699 return -EINVAL;
1700 }
1701
1702 /* Verify the length of the command itself */
1703 if (cmnd_size != common->cmnd_size) {
1704
1705 /* Special case workaround: There are plenty of buggy SCSI
1706 * implementations. Many have issues with cbw->Length
1707 * field passing a wrong command size. For those cases we
1708 * always try to work around the problem by using the length
1709 * sent by the host side provided it is at least as large
1710 * as the correct command length.
1711 * Examples of such cases would be MS-Windows, which issues
1712 * REQUEST SENSE with cbw->Length == 12 where it should
1713 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
1714 * REQUEST SENSE with cbw->Length == 10 where it should
1715 * be 6 as well.
1716 */
1717 if (cmnd_size <= common->cmnd_size) {
1718 DBG(common, "%s is buggy! Expected length %d "
1719 "but we got %d\n", name,
1720 cmnd_size, common->cmnd_size);
1721 cmnd_size = common->cmnd_size;
1722 } else {
1723 common->phase_error = 1;
1724 return -EINVAL;
1725 }
1726 }
1727
1728 /* Check that the LUN values are consistent */
1729 if (common->lun != lun)
1730 DBG(common, "using LUN %d from CBW, not LUN %d from CDB\n",
1731 common->lun, lun);
1732
1733 /* Check the LUN */
Heinrich Schuchardtd7a0fc82018-03-18 13:12:14 +01001734 if (common->lun < common->nluns) {
Piotr Wilczek91637d72013-03-05 12:10:16 +01001735 curlun = &common->luns[common->lun];
1736 if (common->cmnd[0] != SC_REQUEST_SENSE) {
1737 curlun->sense_data = SS_NO_SENSE;
1738 curlun->info_valid = 0;
1739 }
1740 } else {
1741 curlun = NULL;
1742 common->bad_lun_okay = 0;
1743
1744 /* INQUIRY and REQUEST SENSE commands are explicitly allowed
1745 * to use unsupported LUNs; all others may not. */
1746 if (common->cmnd[0] != SC_INQUIRY &&
1747 common->cmnd[0] != SC_REQUEST_SENSE) {
1748 DBG(common, "unsupported LUN %d\n", common->lun);
1749 return -EINVAL;
1750 }
1751 }
1752#if 0
1753 /* If a unit attention condition exists, only INQUIRY and
1754 * REQUEST SENSE commands are allowed; anything else must fail. */
1755 if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
1756 common->cmnd[0] != SC_INQUIRY &&
1757 common->cmnd[0] != SC_REQUEST_SENSE) {
1758 curlun->sense_data = curlun->unit_attention_data;
1759 curlun->unit_attention_data = SS_NO_SENSE;
1760 return -EINVAL;
1761 }
1762#endif
1763 /* Check that only command bytes listed in the mask are non-zero */
1764 common->cmnd[1] &= 0x1f; /* Mask away the LUN */
1765 for (i = 1; i < cmnd_size; ++i) {
1766 if (common->cmnd[i] && !(mask & (1 << i))) {
1767 if (curlun)
1768 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1769 return -EINVAL;
1770 }
1771 }
1772
1773 return 0;
1774}
1775
1776
1777static int do_scsi_command(struct fsg_common *common)
1778{
1779 struct fsg_buffhd *bh;
1780 int rc;
1781 int reply = -EINVAL;
1782 int i;
1783 static char unknown[16];
1784 struct fsg_lun *curlun = &common->luns[common->lun];
1785
1786 dump_cdb(common);
1787
1788 /* Wait for the next buffer to become available for data or status */
1789 bh = common->next_buffhd_to_fill;
1790 common->next_buffhd_to_drain = bh;
1791 while (bh->state != BUF_STATE_EMPTY) {
1792 rc = sleep_thread(common);
1793 if (rc)
1794 return rc;
1795 }
1796 common->phase_error = 0;
1797 common->short_packet_received = 0;
1798
1799 down_read(&common->filesem); /* We're using the backing file */
1800 switch (common->cmnd[0]) {
1801
1802 case SC_INQUIRY:
1803 common->data_size_from_cmnd = common->cmnd[4];
1804 reply = check_command(common, 6, DATA_DIR_TO_HOST,
1805 (1<<4), 0,
1806 "INQUIRY");
1807 if (reply == 0)
1808 reply = do_inquiry(common, bh);
1809 break;
1810
1811 case SC_MODE_SELECT_6:
1812 common->data_size_from_cmnd = common->cmnd[4];
1813 reply = check_command(common, 6, DATA_DIR_FROM_HOST,
1814 (1<<1) | (1<<4), 0,
1815 "MODE SELECT(6)");
1816 if (reply == 0)
1817 reply = do_mode_select(common, bh);
1818 break;
1819
1820 case SC_MODE_SELECT_10:
1821 common->data_size_from_cmnd =
1822 get_unaligned_be16(&common->cmnd[7]);
1823 reply = check_command(common, 10, DATA_DIR_FROM_HOST,
1824 (1<<1) | (3<<7), 0,
1825 "MODE SELECT(10)");
1826 if (reply == 0)
1827 reply = do_mode_select(common, bh);
1828 break;
1829
1830 case SC_MODE_SENSE_6:
1831 common->data_size_from_cmnd = common->cmnd[4];
1832 reply = check_command(common, 6, DATA_DIR_TO_HOST,
1833 (1<<1) | (1<<2) | (1<<4), 0,
1834 "MODE SENSE(6)");
1835 if (reply == 0)
1836 reply = do_mode_sense(common, bh);
1837 break;
1838
1839 case SC_MODE_SENSE_10:
1840 common->data_size_from_cmnd =
1841 get_unaligned_be16(&common->cmnd[7]);
1842 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1843 (1<<1) | (1<<2) | (3<<7), 0,
1844 "MODE SENSE(10)");
1845 if (reply == 0)
1846 reply = do_mode_sense(common, bh);
1847 break;
1848
1849 case SC_PREVENT_ALLOW_MEDIUM_REMOVAL:
1850 common->data_size_from_cmnd = 0;
1851 reply = check_command(common, 6, DATA_DIR_NONE,
1852 (1<<4), 0,
1853 "PREVENT-ALLOW MEDIUM REMOVAL");
1854 if (reply == 0)
1855 reply = do_prevent_allow(common);
1856 break;
1857
1858 case SC_READ_6:
1859 i = common->cmnd[4];
1860 common->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
1861 reply = check_command(common, 6, DATA_DIR_TO_HOST,
1862 (7<<1) | (1<<4), 1,
1863 "READ(6)");
1864 if (reply == 0)
1865 reply = do_read(common);
1866 break;
1867
1868 case SC_READ_10:
1869 common->data_size_from_cmnd =
1870 get_unaligned_be16(&common->cmnd[7]) << 9;
1871 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1872 (1<<1) | (0xf<<2) | (3<<7), 1,
1873 "READ(10)");
1874 if (reply == 0)
1875 reply = do_read(common);
1876 break;
1877
1878 case SC_READ_12:
1879 common->data_size_from_cmnd =
1880 get_unaligned_be32(&common->cmnd[6]) << 9;
1881 reply = check_command(common, 12, DATA_DIR_TO_HOST,
1882 (1<<1) | (0xf<<2) | (0xf<<6), 1,
1883 "READ(12)");
1884 if (reply == 0)
1885 reply = do_read(common);
1886 break;
1887
1888 case SC_READ_CAPACITY:
1889 common->data_size_from_cmnd = 8;
1890 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1891 (0xf<<2) | (1<<8), 1,
1892 "READ CAPACITY");
1893 if (reply == 0)
1894 reply = do_read_capacity(common, bh);
1895 break;
1896
1897 case SC_READ_HEADER:
1898 if (!common->luns[common->lun].cdrom)
1899 goto unknown_cmnd;
1900 common->data_size_from_cmnd =
1901 get_unaligned_be16(&common->cmnd[7]);
1902 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1903 (3<<7) | (0x1f<<1), 1,
1904 "READ HEADER");
1905 if (reply == 0)
1906 reply = do_read_header(common, bh);
1907 break;
1908
1909 case SC_READ_TOC:
1910 if (!common->luns[common->lun].cdrom)
1911 goto unknown_cmnd;
1912 common->data_size_from_cmnd =
1913 get_unaligned_be16(&common->cmnd[7]);
1914 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1915 (7<<6) | (1<<1), 1,
1916 "READ TOC");
1917 if (reply == 0)
1918 reply = do_read_toc(common, bh);
1919 break;
1920
1921 case SC_READ_FORMAT_CAPACITIES:
1922 common->data_size_from_cmnd =
1923 get_unaligned_be16(&common->cmnd[7]);
1924 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1925 (3<<7), 1,
1926 "READ FORMAT CAPACITIES");
1927 if (reply == 0)
1928 reply = do_read_format_capacities(common, bh);
1929 break;
1930
1931 case SC_REQUEST_SENSE:
1932 common->data_size_from_cmnd = common->cmnd[4];
1933 reply = check_command(common, 6, DATA_DIR_TO_HOST,
1934 (1<<4), 0,
1935 "REQUEST SENSE");
1936 if (reply == 0)
1937 reply = do_request_sense(common, bh);
1938 break;
1939
1940 case SC_START_STOP_UNIT:
1941 common->data_size_from_cmnd = 0;
1942 reply = check_command(common, 6, DATA_DIR_NONE,
1943 (1<<1) | (1<<4), 0,
1944 "START-STOP UNIT");
1945 if (reply == 0)
1946 reply = do_start_stop(common);
1947 break;
1948
1949 case SC_SYNCHRONIZE_CACHE:
1950 common->data_size_from_cmnd = 0;
1951 reply = check_command(common, 10, DATA_DIR_NONE,
1952 (0xf<<2) | (3<<7), 1,
1953 "SYNCHRONIZE CACHE");
1954 if (reply == 0)
1955 reply = do_synchronize_cache(common);
1956 break;
1957
1958 case SC_TEST_UNIT_READY:
1959 common->data_size_from_cmnd = 0;
1960 reply = check_command(common, 6, DATA_DIR_NONE,
1961 0, 1,
1962 "TEST UNIT READY");
1963 break;
1964
1965 /* Although optional, this command is used by MS-Windows. We
1966 * support a minimal version: BytChk must be 0. */
1967 case SC_VERIFY:
1968 common->data_size_from_cmnd = 0;
1969 reply = check_command(common, 10, DATA_DIR_NONE,
1970 (1<<1) | (0xf<<2) | (3<<7), 1,
1971 "VERIFY");
1972 if (reply == 0)
1973 reply = do_verify(common);
1974 break;
1975
1976 case SC_WRITE_6:
1977 i = common->cmnd[4];
1978 common->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
1979 reply = check_command(common, 6, DATA_DIR_FROM_HOST,
1980 (7<<1) | (1<<4), 1,
1981 "WRITE(6)");
1982 if (reply == 0)
1983 reply = do_write(common);
1984 break;
1985
1986 case SC_WRITE_10:
1987 common->data_size_from_cmnd =
1988 get_unaligned_be16(&common->cmnd[7]) << 9;
1989 reply = check_command(common, 10, DATA_DIR_FROM_HOST,
1990 (1<<1) | (0xf<<2) | (3<<7), 1,
1991 "WRITE(10)");
1992 if (reply == 0)
1993 reply = do_write(common);
1994 break;
1995
1996 case SC_WRITE_12:
1997 common->data_size_from_cmnd =
1998 get_unaligned_be32(&common->cmnd[6]) << 9;
1999 reply = check_command(common, 12, DATA_DIR_FROM_HOST,
2000 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2001 "WRITE(12)");
2002 if (reply == 0)
2003 reply = do_write(common);
2004 break;
2005
2006 /* Some mandatory commands that we recognize but don't implement.
2007 * They don't mean much in this setting. It's left as an exercise
2008 * for anyone interested to implement RESERVE and RELEASE in terms
2009 * of Posix locks. */
2010 case SC_FORMAT_UNIT:
2011 case SC_RELEASE:
2012 case SC_RESERVE:
2013 case SC_SEND_DIAGNOSTIC:
2014 /* Fall through */
2015
2016 default:
2017unknown_cmnd:
2018 common->data_size_from_cmnd = 0;
2019 sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
2020 reply = check_command(common, common->cmnd_size,
2021 DATA_DIR_UNKNOWN, 0xff, 0, unknown);
2022 if (reply == 0) {
2023 curlun->sense_data = SS_INVALID_COMMAND;
2024 reply = -EINVAL;
2025 }
2026 break;
2027 }
2028 up_read(&common->filesem);
2029
2030 if (reply == -EINTR)
2031 return -EINTR;
2032
2033 /* Set up the single reply buffer for finish_reply() */
2034 if (reply == -EINVAL)
2035 reply = 0; /* Error reply length */
2036 if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) {
2037 reply = min((u32) reply, common->data_size_from_cmnd);
2038 bh->inreq->length = reply;
2039 bh->state = BUF_STATE_FULL;
2040 common->residue -= reply;
2041 } /* Otherwise it's already set */
2042
2043 return 0;
2044}
2045
2046/*-------------------------------------------------------------------------*/
2047
2048static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2049{
2050 struct usb_request *req = bh->outreq;
2051 struct fsg_bulk_cb_wrap *cbw = req->buf;
2052 struct fsg_common *common = fsg->common;
2053
2054 /* Was this a real packet? Should it be ignored? */
2055 if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
2056 return -EINVAL;
2057
2058 /* Is the CBW valid? */
2059 if (req->actual != USB_BULK_CB_WRAP_LEN ||
2060 cbw->Signature != cpu_to_le32(
2061 USB_BULK_CB_SIG)) {
2062 DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
2063 req->actual,
2064 le32_to_cpu(cbw->Signature));
2065
2066 /* The Bulk-only spec says we MUST stall the IN endpoint
2067 * (6.6.1), so it's unavoidable. It also says we must
2068 * retain this state until the next reset, but there's
2069 * no way to tell the controller driver it should ignore
2070 * Clear-Feature(HALT) requests.
2071 *
2072 * We aren't required to halt the OUT endpoint; instead
2073 * we can simply accept and discard any data received
2074 * until the next reset. */
2075 wedge_bulk_in_endpoint(fsg);
Bryan O'Donoghue56312512018-04-30 15:56:09 +01002076 generic_set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
Piotr Wilczek91637d72013-03-05 12:10:16 +01002077 return -EINVAL;
2078 }
2079
2080 /* Is the CBW meaningful? */
2081 if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
2082 cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
2083 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2084 "cmdlen %u\n",
2085 cbw->Lun, cbw->Flags, cbw->Length);
2086
2087 /* We can do anything we want here, so let's stall the
2088 * bulk pipes if we are allowed to. */
2089 if (common->can_stall) {
2090 fsg_set_halt(fsg, fsg->bulk_out);
2091 halt_bulk_in_endpoint(fsg);
2092 }
2093 return -EINVAL;
2094 }
2095
2096 /* Save the command for later */
2097 common->cmnd_size = cbw->Length;
2098 memcpy(common->cmnd, cbw->CDB, common->cmnd_size);
2099 if (cbw->Flags & USB_BULK_IN_FLAG)
2100 common->data_dir = DATA_DIR_TO_HOST;
2101 else
2102 common->data_dir = DATA_DIR_FROM_HOST;
2103 common->data_size = le32_to_cpu(cbw->DataTransferLength);
2104 if (common->data_size == 0)
2105 common->data_dir = DATA_DIR_NONE;
2106 common->lun = cbw->Lun;
2107 common->tag = cbw->Tag;
2108 return 0;
2109}
2110
2111
2112static int get_next_command(struct fsg_common *common)
2113{
2114 struct fsg_buffhd *bh;
2115 int rc = 0;
2116
2117 /* Wait for the next buffer to become available */
2118 bh = common->next_buffhd_to_fill;
2119 while (bh->state != BUF_STATE_EMPTY) {
2120 rc = sleep_thread(common);
2121 if (rc)
2122 return rc;
2123 }
2124
2125 /* Queue a request to read a Bulk-only CBW */
2126 set_bulk_out_req_length(common, bh, USB_BULK_CB_WRAP_LEN);
2127 bh->outreq->short_not_ok = 1;
2128 START_TRANSFER_OR(common, bulk_out, bh->outreq,
2129 &bh->outreq_busy, &bh->state)
2130 /* Don't know what to do if common->fsg is NULL */
2131 return -EIO;
2132
2133 /* We will drain the buffer in software, which means we
2134 * can reuse it for the next filling. No need to advance
2135 * next_buffhd_to_fill. */
2136
2137 /* Wait for the CBW to arrive */
2138 while (bh->state != BUF_STATE_FULL) {
2139 rc = sleep_thread(common);
2140 if (rc)
2141 return rc;
2142 }
2143
2144 rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO;
2145 bh->state = BUF_STATE_EMPTY;
2146
2147 return rc;
2148}
2149
2150
2151/*-------------------------------------------------------------------------*/
2152
2153static int enable_endpoint(struct fsg_common *common, struct usb_ep *ep,
2154 const struct usb_endpoint_descriptor *d)
2155{
2156 int rc;
2157
2158 ep->driver_data = common;
2159 rc = usb_ep_enable(ep, d);
2160 if (rc)
2161 ERROR(common, "can't enable %s, result %d\n", ep->name, rc);
2162 return rc;
2163}
2164
2165static int alloc_request(struct fsg_common *common, struct usb_ep *ep,
2166 struct usb_request **preq)
2167{
2168 *preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
2169 if (*preq)
2170 return 0;
2171 ERROR(common, "can't allocate request for %s\n", ep->name);
2172 return -ENOMEM;
2173}
2174
2175/* Reset interface setting and re-init endpoint state (toggle etc). */
2176static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg)
2177{
2178 const struct usb_endpoint_descriptor *d;
2179 struct fsg_dev *fsg;
2180 int i, rc = 0;
2181
2182 if (common->running)
2183 DBG(common, "reset interface\n");
2184
2185reset:
2186 /* Deallocate the requests */
2187 if (common->fsg) {
2188 fsg = common->fsg;
2189
2190 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2191 struct fsg_buffhd *bh = &common->buffhds[i];
2192
2193 if (bh->inreq) {
2194 usb_ep_free_request(fsg->bulk_in, bh->inreq);
2195 bh->inreq = NULL;
2196 }
2197 if (bh->outreq) {
2198 usb_ep_free_request(fsg->bulk_out, bh->outreq);
2199 bh->outreq = NULL;
2200 }
2201 }
2202
2203 /* Disable the endpoints */
2204 if (fsg->bulk_in_enabled) {
2205 usb_ep_disable(fsg->bulk_in);
2206 fsg->bulk_in_enabled = 0;
2207 }
2208 if (fsg->bulk_out_enabled) {
2209 usb_ep_disable(fsg->bulk_out);
2210 fsg->bulk_out_enabled = 0;
2211 }
2212
2213 common->fsg = NULL;
2214 /* wake_up(&common->fsg_wait); */
2215 }
2216
2217 common->running = 0;
2218 if (!new_fsg || rc)
2219 return rc;
2220
2221 common->fsg = new_fsg;
2222 fsg = common->fsg;
2223
2224 /* Enable the endpoints */
2225 d = fsg_ep_desc(common->gadget,
2226 &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc);
2227 rc = enable_endpoint(common, fsg->bulk_in, d);
2228 if (rc)
2229 goto reset;
2230 fsg->bulk_in_enabled = 1;
2231
2232 d = fsg_ep_desc(common->gadget,
2233 &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc);
2234 rc = enable_endpoint(common, fsg->bulk_out, d);
2235 if (rc)
2236 goto reset;
2237 fsg->bulk_out_enabled = 1;
Vivek Gautam1d62db82013-05-13 15:53:38 +05302238 common->bulk_out_maxpacket =
2239 le16_to_cpu(get_unaligned(&d->wMaxPacketSize));
Bryan O'Donoghue56312512018-04-30 15:56:09 +01002240 generic_clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
Piotr Wilczek91637d72013-03-05 12:10:16 +01002241
2242 /* Allocate the requests */
2243 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2244 struct fsg_buffhd *bh = &common->buffhds[i];
2245
2246 rc = alloc_request(common, fsg->bulk_in, &bh->inreq);
2247 if (rc)
2248 goto reset;
2249 rc = alloc_request(common, fsg->bulk_out, &bh->outreq);
2250 if (rc)
2251 goto reset;
2252 bh->inreq->buf = bh->outreq->buf = bh->buf;
2253 bh->inreq->context = bh->outreq->context = bh;
2254 bh->inreq->complete = bulk_in_complete;
2255 bh->outreq->complete = bulk_out_complete;
2256 }
2257
2258 common->running = 1;
2259
2260 return rc;
2261}
2262
2263
2264/****************************** ALT CONFIGS ******************************/
2265
2266
2267static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
2268{
2269 struct fsg_dev *fsg = fsg_from_func(f);
2270 fsg->common->new_fsg = fsg;
2271 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2272 return 0;
2273}
2274
2275static void fsg_disable(struct usb_function *f)
2276{
2277 struct fsg_dev *fsg = fsg_from_func(f);
2278 fsg->common->new_fsg = NULL;
2279 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2280}
2281
2282/*-------------------------------------------------------------------------*/
2283
2284static void handle_exception(struct fsg_common *common)
2285{
2286 int i;
2287 struct fsg_buffhd *bh;
2288 enum fsg_state old_state;
2289 struct fsg_lun *curlun;
2290 unsigned int exception_req_tag;
2291
2292 /* Cancel all the pending transfers */
2293 if (common->fsg) {
2294 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2295 bh = &common->buffhds[i];
2296 if (bh->inreq_busy)
2297 usb_ep_dequeue(common->fsg->bulk_in, bh->inreq);
2298 if (bh->outreq_busy)
2299 usb_ep_dequeue(common->fsg->bulk_out,
2300 bh->outreq);
2301 }
2302
2303 /* Wait until everything is idle */
2304 for (;;) {
2305 int num_active = 0;
2306 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2307 bh = &common->buffhds[i];
2308 num_active += bh->inreq_busy + bh->outreq_busy;
2309 }
2310 if (num_active == 0)
2311 break;
2312 if (sleep_thread(common))
2313 return;
2314 }
2315
2316 /* Clear out the controller's fifos */
2317 if (common->fsg->bulk_in_enabled)
2318 usb_ep_fifo_flush(common->fsg->bulk_in);
2319 if (common->fsg->bulk_out_enabled)
2320 usb_ep_fifo_flush(common->fsg->bulk_out);
2321 }
2322
2323 /* Reset the I/O buffer states and pointers, the SCSI
2324 * state, and the exception. Then invoke the handler. */
2325
2326 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2327 bh = &common->buffhds[i];
2328 bh->state = BUF_STATE_EMPTY;
2329 }
2330 common->next_buffhd_to_fill = &common->buffhds[0];
2331 common->next_buffhd_to_drain = &common->buffhds[0];
2332 exception_req_tag = common->exception_req_tag;
2333 old_state = common->state;
2334
2335 if (old_state == FSG_STATE_ABORT_BULK_OUT)
2336 common->state = FSG_STATE_STATUS_PHASE;
2337 else {
2338 for (i = 0; i < common->nluns; ++i) {
2339 curlun = &common->luns[i];
2340 curlun->sense_data = SS_NO_SENSE;
2341 curlun->info_valid = 0;
2342 }
2343 common->state = FSG_STATE_IDLE;
2344 }
2345
2346 /* Carry out any extra actions required for the exception */
2347 switch (old_state) {
2348 case FSG_STATE_ABORT_BULK_OUT:
2349 send_status(common);
2350
2351 if (common->state == FSG_STATE_STATUS_PHASE)
2352 common->state = FSG_STATE_IDLE;
2353 break;
2354
2355 case FSG_STATE_RESET:
2356 /* In case we were forced against our will to halt a
2357 * bulk endpoint, clear the halt now. (The SuperH UDC
2358 * requires this.) */
2359 if (!fsg_is_set(common))
2360 break;
2361 if (test_and_clear_bit(IGNORE_BULK_OUT,
2362 &common->fsg->atomic_bitflags))
2363 usb_ep_clear_halt(common->fsg->bulk_in);
2364
2365 if (common->ep0_req_tag == exception_req_tag)
2366 ep0_queue(common); /* Complete the status stage */
2367
2368 break;
2369
2370 case FSG_STATE_CONFIG_CHANGE:
2371 do_set_interface(common, common->new_fsg);
2372 break;
2373
2374 case FSG_STATE_EXIT:
2375 case FSG_STATE_TERMINATED:
2376 do_set_interface(common, NULL); /* Free resources */
2377 common->state = FSG_STATE_TERMINATED; /* Stop the thread */
2378 break;
2379
2380 case FSG_STATE_INTERFACE_CHANGE:
2381 case FSG_STATE_DISCONNECT:
2382 case FSG_STATE_COMMAND_PHASE:
2383 case FSG_STATE_DATA_PHASE:
2384 case FSG_STATE_STATUS_PHASE:
2385 case FSG_STATE_IDLE:
2386 break;
2387 }
2388}
2389
2390/*-------------------------------------------------------------------------*/
2391
2392int fsg_main_thread(void *common_)
2393{
Przemyslaw Marczak06ef7cc2013-10-23 14:30:46 +02002394 int ret;
Piotr Wilczek91637d72013-03-05 12:10:16 +01002395 struct fsg_common *common = the_fsg_common;
2396 /* The main loop */
2397 do {
2398 if (exception_in_progress(common)) {
2399 handle_exception(common);
2400 continue;
2401 }
2402
2403 if (!common->running) {
Przemyslaw Marczak06ef7cc2013-10-23 14:30:46 +02002404 ret = sleep_thread(common);
2405 if (ret)
2406 return ret;
2407
Piotr Wilczek91637d72013-03-05 12:10:16 +01002408 continue;
2409 }
2410
Przemyslaw Marczak06ef7cc2013-10-23 14:30:46 +02002411 ret = get_next_command(common);
2412 if (ret)
2413 return ret;
Piotr Wilczek91637d72013-03-05 12:10:16 +01002414
2415 if (!exception_in_progress(common))
2416 common->state = FSG_STATE_DATA_PHASE;
2417
2418 if (do_scsi_command(common) || finish_reply(common))
2419 continue;
2420
2421 if (!exception_in_progress(common))
2422 common->state = FSG_STATE_STATUS_PHASE;
2423
2424 if (send_status(common))
2425 continue;
2426
2427 if (!exception_in_progress(common))
2428 common->state = FSG_STATE_IDLE;
2429 } while (0);
2430
2431 common->thread_task = NULL;
2432
2433 return 0;
2434}
2435
2436static void fsg_common_release(struct kref *ref);
2437
2438static struct fsg_common *fsg_common_init(struct fsg_common *common,
2439 struct usb_composite_dev *cdev)
2440{
2441 struct usb_gadget *gadget = cdev->gadget;
2442 struct fsg_buffhd *bh;
2443 struct fsg_lun *curlun;
2444 int nluns, i, rc;
2445
2446 /* Find out how many LUNs there should be */
Stephen Warren9e7d5882015-12-07 11:38:50 -07002447 nluns = ums_count;
Piotr Wilczek91637d72013-03-05 12:10:16 +01002448 if (nluns < 1 || nluns > FSG_MAX_LUNS) {
2449 printf("invalid number of LUNs: %u\n", nluns);
2450 return ERR_PTR(-EINVAL);
2451 }
2452
2453 /* Allocate? */
2454 if (!common) {
Jeroen Hofstee6931bff2014-06-09 15:28:59 +02002455 common = calloc(sizeof(*common), 1);
Piotr Wilczek91637d72013-03-05 12:10:16 +01002456 if (!common)
2457 return ERR_PTR(-ENOMEM);
2458 common->free_storage_on_release = 1;
2459 } else {
Jeroen Hofstee6931bff2014-06-09 15:28:59 +02002460 memset(common, 0, sizeof(*common));
Piotr Wilczek91637d72013-03-05 12:10:16 +01002461 common->free_storage_on_release = 0;
2462 }
2463
2464 common->ops = NULL;
2465 common->private_data = NULL;
2466
2467 common->gadget = gadget;
2468 common->ep0 = gadget->ep0;
2469 common->ep0req = cdev->req;
2470
2471 /* Maybe allocate device-global string IDs, and patch descriptors */
2472 if (fsg_strings[FSG_STRING_INTERFACE].id == 0) {
2473 rc = usb_string_id(cdev);
2474 if (unlikely(rc < 0))
2475 goto error_release;
2476 fsg_strings[FSG_STRING_INTERFACE].id = rc;
2477 fsg_intf_desc.iInterface = rc;
2478 }
2479
2480 /* Create the LUNs, open their backing files, and register the
2481 * LUN devices in sysfs. */
2482 curlun = calloc(nluns, sizeof *curlun);
2483 if (!curlun) {
2484 rc = -ENOMEM;
2485 goto error_release;
2486 }
2487 common->nluns = nluns;
2488
2489 for (i = 0; i < nluns; i++) {
2490 common->luns[i].removable = 1;
2491
Stephen Warren9e7d5882015-12-07 11:38:50 -07002492 rc = fsg_lun_open(&common->luns[i], ums[i].num_sectors, "");
Piotr Wilczek91637d72013-03-05 12:10:16 +01002493 if (rc)
2494 goto error_luns;
2495 }
2496 common->lun = 0;
2497
2498 /* Data buffers cyclic list */
2499 bh = common->buffhds;
2500
2501 i = FSG_NUM_BUFFERS;
2502 goto buffhds_first_it;
2503 do {
2504 bh->next = bh + 1;
2505 ++bh;
2506buffhds_first_it:
2507 bh->inreq_busy = 0;
2508 bh->outreq_busy = 0;
Lukasz Majewski05751132014-02-05 10:10:41 +01002509 bh->buf = memalign(CONFIG_SYS_CACHELINE_SIZE, FSG_BUFLEN);
Piotr Wilczek91637d72013-03-05 12:10:16 +01002510 if (unlikely(!bh->buf)) {
2511 rc = -ENOMEM;
2512 goto error_release;
2513 }
2514 } while (--i);
2515 bh->next = common->buffhds;
2516
2517 snprintf(common->inquiry_string, sizeof common->inquiry_string,
2518 "%-8s%-16s%04x",
2519 "Linux ",
2520 "File-Store Gadget",
2521 0xffff);
2522
2523 /* Some peripheral controllers are known not to be able to
2524 * halt bulk endpoints correctly. If one of them is present,
2525 * disable stalls.
2526 */
2527
2528 /* Tell the thread to start working */
2529 common->thread_task =
2530 kthread_create(fsg_main_thread, common,
2531 OR(cfg->thread_name, "file-storage"));
2532 if (IS_ERR(common->thread_task)) {
2533 rc = PTR_ERR(common->thread_task);
2534 goto error_release;
2535 }
2536
2537#undef OR
2538 /* Information */
2539 INFO(common, FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
2540 INFO(common, "Number of LUNs=%d\n", common->nluns);
2541
2542 return common;
2543
2544error_luns:
2545 common->nluns = i + 1;
2546error_release:
2547 common->state = FSG_STATE_TERMINATED; /* The thread is dead */
2548 /* Call fsg_common_release() directly, ref might be not
2549 * initialised */
2550 fsg_common_release(&common->ref);
2551 return ERR_PTR(rc);
2552}
2553
2554static void fsg_common_release(struct kref *ref)
2555{
2556 struct fsg_common *common = container_of(ref, struct fsg_common, ref);
2557
2558 /* If the thread isn't already dead, tell it to exit now */
2559 if (common->state != FSG_STATE_TERMINATED) {
2560 raise_exception(common, FSG_STATE_EXIT);
2561 wait_for_completion(&common->thread_notifier);
2562 }
2563
2564 if (likely(common->luns)) {
2565 struct fsg_lun *lun = common->luns;
2566 unsigned i = common->nluns;
2567
2568 /* In error recovery common->nluns may be zero. */
2569 for (; i; --i, ++lun)
2570 fsg_lun_close(lun);
2571
2572 kfree(common->luns);
2573 }
2574
2575 {
2576 struct fsg_buffhd *bh = common->buffhds;
2577 unsigned i = FSG_NUM_BUFFERS;
2578 do {
2579 kfree(bh->buf);
2580 } while (++bh, --i);
2581 }
2582
2583 if (common->free_storage_on_release)
2584 kfree(common);
2585}
2586
2587
2588/*-------------------------------------------------------------------------*/
2589
2590/**
2591 * usb_copy_descriptors - copy a vector of USB descriptors
2592 * @src: null-terminated vector to copy
2593 * Context: initialization code, which may sleep
2594 *
2595 * This makes a copy of a vector of USB descriptors. Its primary use
2596 * is to support usb_function objects which can have multiple copies,
2597 * each needing different descriptors. Functions may have static
2598 * tables of descriptors, which are used as templates and customized
2599 * with identifiers (for interfaces, strings, endpoints, and more)
2600 * as needed by a given function instance.
2601 */
2602struct usb_descriptor_header **
2603usb_copy_descriptors(struct usb_descriptor_header **src)
2604{
2605 struct usb_descriptor_header **tmp;
2606 unsigned bytes;
2607 unsigned n_desc;
2608 void *mem;
2609 struct usb_descriptor_header **ret;
2610
2611 /* count descriptors and their sizes; then add vector size */
2612 for (bytes = 0, n_desc = 0, tmp = src; *tmp; tmp++, n_desc++)
2613 bytes += (*tmp)->bLength;
2614 bytes += (n_desc + 1) * sizeof(*tmp);
2615
Lukasz Majewski05751132014-02-05 10:10:41 +01002616 mem = memalign(CONFIG_SYS_CACHELINE_SIZE, bytes);
Piotr Wilczek91637d72013-03-05 12:10:16 +01002617 if (!mem)
2618 return NULL;
2619
2620 /* fill in pointers starting at "tmp",
2621 * to descriptors copied starting at "mem";
2622 * and return "ret"
2623 */
2624 tmp = mem;
2625 ret = mem;
2626 mem += (n_desc + 1) * sizeof(*tmp);
2627 while (*src) {
2628 memcpy(mem, *src, (*src)->bLength);
2629 *tmp = mem;
2630 tmp++;
2631 mem += (*src)->bLength;
2632 src++;
2633 }
2634 *tmp = NULL;
2635
2636 return ret;
2637}
2638
Piotr Wilczek91637d72013-03-05 12:10:16 +01002639static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
2640{
2641 struct fsg_dev *fsg = fsg_from_func(f);
2642
2643 DBG(fsg, "unbind\n");
2644 if (fsg->common->fsg == fsg) {
2645 fsg->common->new_fsg = NULL;
2646 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2647 }
2648
2649 free(fsg->function.descriptors);
2650 free(fsg->function.hs_descriptors);
2651 kfree(fsg);
2652}
2653
2654static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
2655{
2656 struct fsg_dev *fsg = fsg_from_func(f);
2657 struct usb_gadget *gadget = c->cdev->gadget;
2658 int i;
2659 struct usb_ep *ep;
2660 fsg->gadget = gadget;
2661
2662 /* New interface */
2663 i = usb_interface_id(c, f);
2664 if (i < 0)
2665 return i;
2666 fsg_intf_desc.bInterfaceNumber = i;
2667 fsg->interface_number = i;
2668
2669 /* Find all the endpoints we will use */
2670 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
2671 if (!ep)
2672 goto autoconf_fail;
2673 ep->driver_data = fsg->common; /* claim the endpoint */
2674 fsg->bulk_in = ep;
2675
2676 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
2677 if (!ep)
2678 goto autoconf_fail;
2679 ep->driver_data = fsg->common; /* claim the endpoint */
2680 fsg->bulk_out = ep;
2681
2682 /* Copy descriptors */
2683 f->descriptors = usb_copy_descriptors(fsg_fs_function);
2684 if (unlikely(!f->descriptors))
2685 return -ENOMEM;
2686
2687 if (gadget_is_dualspeed(gadget)) {
2688 /* Assume endpoint addresses are the same for both speeds */
2689 fsg_hs_bulk_in_desc.bEndpointAddress =
2690 fsg_fs_bulk_in_desc.bEndpointAddress;
2691 fsg_hs_bulk_out_desc.bEndpointAddress =
2692 fsg_fs_bulk_out_desc.bEndpointAddress;
2693 f->hs_descriptors = usb_copy_descriptors(fsg_hs_function);
2694 if (unlikely(!f->hs_descriptors)) {
2695 free(f->descriptors);
2696 return -ENOMEM;
2697 }
2698 }
2699 return 0;
2700
2701autoconf_fail:
2702 ERROR(fsg, "unable to autoconfigure all endpoints\n");
2703 return -ENOTSUPP;
2704}
2705
2706
2707/****************************** ADD FUNCTION ******************************/
2708
2709static struct usb_gadget_strings *fsg_strings_array[] = {
2710 &fsg_stringtab,
2711 NULL,
2712};
2713
2714static int fsg_bind_config(struct usb_composite_dev *cdev,
2715 struct usb_configuration *c,
2716 struct fsg_common *common)
2717{
2718 struct fsg_dev *fsg;
2719 int rc;
2720
2721 fsg = calloc(1, sizeof *fsg);
2722 if (!fsg)
2723 return -ENOMEM;
2724 fsg->function.name = FSG_DRIVER_DESC;
2725 fsg->function.strings = fsg_strings_array;
2726 fsg->function.bind = fsg_bind;
2727 fsg->function.unbind = fsg_unbind;
2728 fsg->function.setup = fsg_setup;
2729 fsg->function.set_alt = fsg_set_alt;
2730 fsg->function.disable = fsg_disable;
2731
2732 fsg->common = common;
2733 common->fsg = fsg;
2734 /* Our caller holds a reference to common structure so we
2735 * don't have to be worry about it being freed until we return
2736 * from this function. So instead of incrementing counter now
2737 * and decrement in error recovery we increment it only when
2738 * call to usb_add_function() was successful. */
2739
2740 rc = usb_add_function(c, &fsg->function);
2741
2742 if (rc)
2743 kfree(fsg);
2744
2745 return rc;
2746}
2747
2748int fsg_add(struct usb_configuration *c)
2749{
2750 struct fsg_common *fsg_common;
2751
2752 fsg_common = fsg_common_init(NULL, c->cdev);
2753
2754 fsg_common->vendor_name = 0;
2755 fsg_common->product_name = 0;
2756 fsg_common->release = 0xffff;
2757
2758 fsg_common->ops = NULL;
2759 fsg_common->private_data = NULL;
2760
2761 the_fsg_common = fsg_common;
2762
2763 return fsg_bind_config(c->cdev, c, fsg_common);
2764}
2765
Stephen Warren9e7d5882015-12-07 11:38:50 -07002766int fsg_init(struct ums *ums_devs, int count)
Piotr Wilczek91637d72013-03-05 12:10:16 +01002767{
Stephen Warren9e7d5882015-12-07 11:38:50 -07002768 ums = ums_devs;
2769 ums_count = count;
Piotr Wilczek91637d72013-03-05 12:10:16 +01002770
2771 return 0;
2772}
Mateusz Zalega69cb0bb2014-04-28 21:13:28 +02002773
2774DECLARE_GADGET_BIND_CALLBACK(usb_dnl_ums, fsg_add);