blob: ef90c7ec7fb5a274e40eae2670343862d3d09eda [file] [log] [blame]
Tom Rini8b0c8a12018-05-06 18:27:01 -04001// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
Piotr Wilczek91637d72013-03-05 12:10:16 +01002/*
3 * f_mass_storage.c -- Mass Storage USB Composite Function
4 *
5 * Copyright (C) 2003-2008 Alan Stern
6 * Copyright (C) 2009 Samsung Electronics
7 * Author: Michal Nazarewicz <m.nazarewicz@samsung.com>
8 * All rights reserved.
Piotr Wilczek91637d72013-03-05 12:10:16 +01009 */
10
Piotr Wilczek91637d72013-03-05 12:10:16 +010011/*
12 * The Mass Storage Function acts as a USB Mass Storage device,
13 * appearing to the host as a disk drive or as a CD-ROM drive. In
14 * addition to providing an example of a genuinely useful composite
15 * function for a USB device, it also illustrates a technique of
16 * double-buffering for increased throughput.
17 *
18 * Function supports multiple logical units (LUNs). Backing storage
19 * for each LUN is provided by a regular file or a block device.
20 * Access for each LUN can be limited to read-only. Moreover, the
21 * function can indicate that LUN is removable and/or CD-ROM. (The
22 * later implies read-only access.)
23 *
24 * MSF is configured by specifying a fsg_config structure. It has the
25 * following fields:
26 *
27 * nluns Number of LUNs function have (anywhere from 1
28 * to FSG_MAX_LUNS which is 8).
29 * luns An array of LUN configuration values. This
30 * should be filled for each LUN that
31 * function will include (ie. for "nluns"
32 * LUNs). Each element of the array has
33 * the following fields:
34 * ->filename The path to the backing file for the LUN.
35 * Required if LUN is not marked as
36 * removable.
37 * ->ro Flag specifying access to the LUN shall be
38 * read-only. This is implied if CD-ROM
39 * emulation is enabled as well as when
40 * it was impossible to open "filename"
41 * in R/W mode.
42 * ->removable Flag specifying that LUN shall be indicated as
43 * being removable.
44 * ->cdrom Flag specifying that LUN shall be reported as
45 * being a CD-ROM.
46 *
47 * lun_name_format A printf-like format for names of the LUN
48 * devices. This determines how the
49 * directory in sysfs will be named.
50 * Unless you are using several MSFs in
51 * a single gadget (as opposed to single
52 * MSF in many configurations) you may
53 * leave it as NULL (in which case
54 * "lun%d" will be used). In the format
55 * you can use "%d" to index LUNs for
56 * MSF's with more than one LUN. (Beware
57 * that there is only one integer given
58 * as an argument for the format and
59 * specifying invalid format may cause
60 * unspecified behaviour.)
61 * thread_name Name of the kernel thread process used by the
62 * MSF. You can safely set it to NULL
63 * (in which case default "file-storage"
64 * will be used).
65 *
66 * vendor_name
67 * product_name
68 * release Information used as a reply to INQUIRY
69 * request. To use default set to NULL,
70 * NULL, 0xffff respectively. The first
71 * field should be 8 and the second 16
72 * characters or less.
73 *
74 * can_stall Set to permit function to halt bulk endpoints.
75 * Disabled on some USB devices known not
76 * to work correctly. You should set it
77 * to true.
78 *
79 * If "removable" is not set for a LUN then a backing file must be
80 * specified. If it is set, then NULL filename means the LUN's medium
81 * is not loaded (an empty string as "filename" in the fsg_config
82 * structure causes error). The CD-ROM emulation includes a single
83 * data track and no audio tracks; hence there need be only one
84 * backing file per LUN. Note also that the CD-ROM block length is
85 * set to 512 rather than the more common value 2048.
86 *
87 *
88 * MSF includes support for module parameters. If gadget using it
89 * decides to use it, the following module parameters will be
90 * available:
91 *
92 * file=filename[,filename...]
93 * Names of the files or block devices used for
94 * backing storage.
95 * ro=b[,b...] Default false, boolean for read-only access.
96 * removable=b[,b...]
97 * Default true, boolean for removable media.
98 * cdrom=b[,b...] Default false, boolean for whether to emulate
99 * a CD-ROM drive.
100 * luns=N Default N = number of filenames, number of
101 * LUNs to support.
102 * stall Default determined according to the type of
103 * USB device controller (usually true),
104 * boolean to permit the driver to halt
105 * bulk endpoints.
106 *
107 * The module parameters may be prefixed with some string. You need
108 * to consult gadget's documentation or source to verify whether it is
109 * using those module parameters and if it does what are the prefixes
110 * (look for FSG_MODULE_PARAMETERS() macro usage, what's inside it is
111 * the prefix).
112 *
113 *
114 * Requirements are modest; only a bulk-in and a bulk-out endpoint are
115 * needed. The memory requirement amounts to two 16K buffers, size
116 * configurable by a parameter. Support is included for both
117 * full-speed and high-speed operation.
118 *
119 * Note that the driver is slightly non-portable in that it assumes a
120 * single memory/DMA buffer will be useable for bulk-in, bulk-out, and
121 * interrupt-in endpoints. With most device controllers this isn't an
122 * issue, but there may be some with hardware restrictions that prevent
123 * a buffer from being used by more than one endpoint.
124 *
125 *
126 * The pathnames of the backing files and the ro settings are
127 * available in the attribute files "file" and "ro" in the lun<n> (or
128 * to be more precise in a directory which name comes from
129 * "lun_name_format" option!) subdirectory of the gadget's sysfs
130 * directory. If the "removable" option is set, writing to these
131 * files will simulate ejecting/loading the medium (writing an empty
132 * line means eject) and adjusting a write-enable tab. Changes to the
133 * ro setting are not allowed when the medium is loaded or if CD-ROM
134 * emulation is being used.
135 *
136 * When a LUN receive an "eject" SCSI request (Start/Stop Unit),
137 * if the LUN is removable, the backing file is released to simulate
138 * ejection.
139 *
140 *
141 * This function is heavily based on "File-backed Storage Gadget" by
142 * Alan Stern which in turn is heavily based on "Gadget Zero" by David
143 * Brownell. The driver's SCSI command interface was based on the
144 * "Information technology - Small Computer System Interface - 2"
145 * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93,
146 * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>.
147 * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which
148 * was based on the "Universal Serial Bus Mass Storage Class UFI
149 * Command Specification" document, Revision 1.0, December 14, 1998,
150 * available at
151 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
152 */
153
Piotr Wilczek91637d72013-03-05 12:10:16 +0100154/*
155 * Driver Design
156 *
157 * The MSF is fairly straightforward. There is a main kernel
158 * thread that handles most of the work. Interrupt routines field
159 * callbacks from the controller driver: bulk- and interrupt-request
160 * completion notifications, endpoint-0 events, and disconnect events.
161 * Completion events are passed to the main thread by wakeup calls. Many
162 * ep0 requests are handled at interrupt time, but SetInterface,
163 * SetConfiguration, and device reset requests are forwarded to the
164 * thread in the form of "exceptions" using SIGUSR1 signals (since they
165 * should interrupt any ongoing file I/O operations).
166 *
167 * The thread's main routine implements the standard command/data/status
168 * parts of a SCSI interaction. It and its subroutines are full of tests
169 * for pending signals/exceptions -- all this polling is necessary since
170 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an
171 * indication that the driver really wants to be running in userspace.)
172 * An important point is that so long as the thread is alive it keeps an
173 * open reference to the backing file. This will prevent unmounting
174 * the backing file's underlying filesystem and could cause problems
175 * during system shutdown, for example. To prevent such problems, the
176 * thread catches INT, TERM, and KILL signals and converts them into
177 * an EXIT exception.
178 *
179 * In normal operation the main thread is started during the gadget's
180 * fsg_bind() callback and stopped during fsg_unbind(). But it can
181 * also exit when it receives a signal, and there's no point leaving
182 * the gadget running when the thread is dead. At of this moment, MSF
183 * provides no way to deregister the gadget when thread dies -- maybe
184 * a callback functions is needed.
185 *
186 * To provide maximum throughput, the driver uses a circular pipeline of
187 * buffer heads (struct fsg_buffhd). In principle the pipeline can be
188 * arbitrarily long; in practice the benefits don't justify having more
189 * than 2 stages (i.e., double buffering). But it helps to think of the
190 * pipeline as being a long one. Each buffer head contains a bulk-in and
191 * a bulk-out request pointer (since the buffer can be used for both
192 * output and input -- directions always are given from the host's
193 * point of view) as well as a pointer to the buffer and various state
194 * variables.
195 *
196 * Use of the pipeline follows a simple protocol. There is a variable
197 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
198 * At any time that buffer head may still be in use from an earlier
199 * request, so each buffer head has a state variable indicating whether
200 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
201 * buffer head to be EMPTY, filling the buffer either by file I/O or by
202 * USB I/O (during which the buffer head is BUSY), and marking the buffer
203 * head FULL when the I/O is complete. Then the buffer will be emptied
204 * (again possibly by USB I/O, during which it is marked BUSY) and
205 * finally marked EMPTY again (possibly by a completion routine).
206 *
207 * A module parameter tells the driver to avoid stalling the bulk
208 * endpoints wherever the transport specification allows. This is
209 * necessary for some UDCs like the SuperH, which cannot reliably clear a
210 * halt on a bulk endpoint. However, under certain circumstances the
211 * Bulk-only specification requires a stall. In such cases the driver
212 * will halt the endpoint and set a flag indicating that it should clear
213 * the halt in software during the next device reset. Hopefully this
214 * will permit everything to work correctly. Furthermore, although the
215 * specification allows the bulk-out endpoint to halt when the host sends
216 * too much data, implementing this would cause an unavoidable race.
217 * The driver will always use the "no-stall" approach for OUT transfers.
218 *
219 * One subtle point concerns sending status-stage responses for ep0
220 * requests. Some of these requests, such as device reset, can involve
221 * interrupting an ongoing file I/O operation, which might take an
222 * arbitrarily long time. During that delay the host might give up on
223 * the original ep0 request and issue a new one. When that happens the
224 * driver should not notify the host about completion of the original
225 * request, as the host will no longer be waiting for it. So the driver
226 * assigns to each ep0 request a unique tag, and it keeps track of the
227 * tag value of the request associated with a long-running exception
228 * (device-reset, interface-change, or configuration-change). When the
229 * exception handler is finished, the status-stage response is submitted
230 * only if the current ep0 request tag is equal to the exception request
231 * tag. Thus only the most recently received ep0 request will get a
232 * status-stage response.
233 *
234 * Warning: This driver source file is too long. It ought to be split up
235 * into a header file plus about 3 separate .c files, to handle the details
236 * of the Gadget, USB Mass Storage, and SCSI protocols.
237 */
238
239/* #define VERBOSE_DEBUG */
240/* #define DUMP_MSGS */
241
242#include <config.h>
Caleb Connolly00e9e0c2024-03-20 14:30:50 +0000243#include <div64.h>
Alexey Brodkin2d2fa492018-06-05 17:17:57 +0300244#include <hexdump.h>
Simon Glass0f2af882020-05-10 11:40:05 -0600245#include <log.h>
Piotr Wilczek91637d72013-03-05 12:10:16 +0100246#include <malloc.h>
247#include <common.h>
Simon Glassa73bda42015-11-08 23:47:45 -0700248#include <console.h>
Mateusz Zalega21fe3f72014-04-30 13:07:48 +0200249#include <g_dnl.h>
Simon Glassd66c5f72020-02-03 07:36:15 -0700250#include <dm/devres.h>
Simon Glassc06c1be2020-05-10 11:40:08 -0600251#include <linux/bug.h>
Piotr Wilczek91637d72013-03-05 12:10:16 +0100252
253#include <linux/err.h>
254#include <linux/usb/ch9.h>
255#include <linux/usb/gadget.h>
256#include <usb_mass_storage.h>
257
258#include <asm/unaligned.h>
Bryan O'Donoghue56312512018-04-30 15:56:09 +0100259#include <linux/bitops.h>
Piotr Wilczek91637d72013-03-05 12:10:16 +0100260#include <linux/usb/gadget.h>
261#include <linux/usb/gadget.h>
262#include <linux/usb/composite.h>
Lukasz Majewski80d353c2018-11-23 17:36:19 +0100263#include <linux/bitmap.h>
Mateusz Zalega69cb0bb2014-04-28 21:13:28 +0200264#include <g_dnl.h>
Piotr Wilczek91637d72013-03-05 12:10:16 +0100265
266/*------------------------------------------------------------------------*/
267
268#define FSG_DRIVER_DESC "Mass Storage Function"
269#define FSG_DRIVER_VERSION "2012/06/5"
270
271static const char fsg_string_interface[] = "Mass Storage";
272
Piotr Wilczek91637d72013-03-05 12:10:16 +0100273#define FSG_NO_INTR_EP 1
274#define FSG_NO_DEVICE_STRINGS 1
275#define FSG_NO_OTG 1
276#define FSG_NO_INTR_EP 1
277
278#include "storage_common.c"
279
280/*-------------------------------------------------------------------------*/
281
282#define GFP_ATOMIC ((gfp_t) 0)
283#define PAGE_CACHE_SHIFT 12
284#define PAGE_CACHE_SIZE (1 << PAGE_CACHE_SHIFT)
285#define kthread_create(...) __builtin_return_address(0)
286#define wait_for_completion(...) do {} while (0)
287
288struct kref {int x; };
289struct completion {int x; };
290
Piotr Wilczek91637d72013-03-05 12:10:16 +0100291struct fsg_dev;
292struct fsg_common;
293
294/* Data shared by all the FSG instances. */
295struct fsg_common {
296 struct usb_gadget *gadget;
297 struct fsg_dev *fsg, *new_fsg;
298
299 struct usb_ep *ep0; /* Copy of gadget->ep0 */
300 struct usb_request *ep0req; /* Copy of cdev->req */
301 unsigned int ep0_req_tag;
302
303 struct fsg_buffhd *next_buffhd_to_fill;
304 struct fsg_buffhd *next_buffhd_to_drain;
305 struct fsg_buffhd buffhds[FSG_NUM_BUFFERS];
306
307 int cmnd_size;
308 u8 cmnd[MAX_COMMAND_SIZE];
309
310 unsigned int nluns;
311 unsigned int lun;
312 struct fsg_lun luns[FSG_MAX_LUNS];
313
314 unsigned int bulk_out_maxpacket;
315 enum fsg_state state; /* For exception handling */
316 unsigned int exception_req_tag;
317
318 enum data_direction data_dir;
319 u32 data_size;
320 u32 data_size_from_cmnd;
321 u32 tag;
322 u32 residue;
323 u32 usb_amount_left;
324
325 unsigned int can_stall:1;
326 unsigned int free_storage_on_release:1;
327 unsigned int phase_error:1;
328 unsigned int short_packet_received:1;
329 unsigned int bad_lun_okay:1;
330 unsigned int running:1;
Marek Vasut5412e672023-11-07 01:09:59 +0100331 unsigned int eject:1;
Piotr Wilczek91637d72013-03-05 12:10:16 +0100332
333 int thread_wakeup_needed;
334 struct completion thread_notifier;
335 struct task_struct *thread_task;
336
337 /* Callback functions. */
338 const struct fsg_operations *ops;
339 /* Gadget's private data. */
340 void *private_data;
341
342 const char *vendor_name; /* 8 characters or less */
343 const char *product_name; /* 16 characters or less */
344 u16 release;
345
346 /* Vendor (8 chars), product (16 chars), release (4
347 * hexadecimal digits) and NUL byte */
348 char inquiry_string[8 + 16 + 4 + 1];
349
350 struct kref ref;
351};
352
353struct fsg_config {
354 unsigned nluns;
355 struct fsg_lun_config {
356 const char *filename;
357 char ro;
358 char removable;
359 char cdrom;
360 char nofua;
361 } luns[FSG_MAX_LUNS];
362
363 /* Callback functions. */
364 const struct fsg_operations *ops;
365 /* Gadget's private data. */
366 void *private_data;
367
368 const char *vendor_name; /* 8 characters or less */
369 const char *product_name; /* 16 characters or less */
370
371 char can_stall;
372};
373
374struct fsg_dev {
375 struct usb_function function;
376 struct usb_gadget *gadget; /* Copy of cdev->gadget */
377 struct fsg_common *common;
378
379 u16 interface_number;
380
381 unsigned int bulk_in_enabled:1;
382 unsigned int bulk_out_enabled:1;
383
384 unsigned long atomic_bitflags;
385#define IGNORE_BULK_OUT 0
386
387 struct usb_ep *bulk_in;
388 struct usb_ep *bulk_out;
389};
390
391
392static inline int __fsg_is_set(struct fsg_common *common,
393 const char *func, unsigned line)
394{
395 if (common->fsg)
396 return 1;
397 ERROR(common, "common->fsg is NULL in %s at %u\n", func, line);
Simon Glassb18a9602019-12-29 21:19:12 -0700398#ifdef __UBOOT__
399 assert_noisy(false);
400#else
Piotr Wilczek91637d72013-03-05 12:10:16 +0100401 WARN_ON(1);
Simon Glassb18a9602019-12-29 21:19:12 -0700402#endif
Piotr Wilczek91637d72013-03-05 12:10:16 +0100403 return 0;
404}
405
406#define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__))
407
408
409static inline struct fsg_dev *fsg_from_func(struct usb_function *f)
410{
411 return container_of(f, struct fsg_dev, function);
412}
413
414
415typedef void (*fsg_routine_t)(struct fsg_dev *);
416
417static int exception_in_progress(struct fsg_common *common)
418{
419 return common->state > FSG_STATE_IDLE;
420}
421
422/* Make bulk-out requests be divisible by the maxpacket size */
423static void set_bulk_out_req_length(struct fsg_common *common,
424 struct fsg_buffhd *bh, unsigned int length)
425{
426 unsigned int rem;
427
428 bh->bulk_out_intended_length = length;
429 rem = length % common->bulk_out_maxpacket;
430 if (rem > 0)
431 length += common->bulk_out_maxpacket - rem;
432 bh->outreq->length = length;
433}
434
435/*-------------------------------------------------------------------------*/
436
Stephen Warren9e7d5882015-12-07 11:38:50 -0700437static struct ums *ums;
438static int ums_count;
439static struct fsg_common *the_fsg_common;
Marek Vasut7786e702023-09-01 11:49:54 +0200440static struct udevice *udcdev;
Piotr Wilczek91637d72013-03-05 12:10:16 +0100441
442static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
443{
444 const char *name;
445
446 if (ep == fsg->bulk_in)
447 name = "bulk-in";
448 else if (ep == fsg->bulk_out)
449 name = "bulk-out";
450 else
451 name = ep->name;
452 DBG(fsg, "%s set halt\n", name);
453 return usb_ep_set_halt(ep);
454}
455
456/*-------------------------------------------------------------------------*/
457
458/* These routines may be called in process context or in_irq */
459
460/* Caller must hold fsg->lock */
461static void wakeup_thread(struct fsg_common *common)
462{
463 common->thread_wakeup_needed = 1;
464}
465
466static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
467{
468 /* Do nothing if a higher-priority exception is already in progress.
469 * If a lower-or-equal priority exception is in progress, preempt it
470 * and notify the main thread by sending it a signal. */
471 if (common->state <= new_state) {
472 common->exception_req_tag = common->ep0_req_tag;
473 common->state = new_state;
474 common->thread_wakeup_needed = 1;
475 }
476}
477
478/*-------------------------------------------------------------------------*/
479
480static int ep0_queue(struct fsg_common *common)
481{
482 int rc;
483
484 rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC);
485 common->ep0->driver_data = common;
486 if (rc != 0 && rc != -ESHUTDOWN) {
487 /* We can't do much more than wait for a reset */
488 WARNING(common, "error in submission: %s --> %d\n",
489 common->ep0->name, rc);
490 }
491 return rc;
492}
493
494/*-------------------------------------------------------------------------*/
495
496/* Bulk and interrupt endpoint completion handlers.
497 * These always run in_irq. */
498
499static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
500{
501 struct fsg_common *common = ep->driver_data;
502 struct fsg_buffhd *bh = req->context;
503
504 if (req->status || req->actual != req->length)
505 DBG(common, "%s --> %d, %u/%u\n", __func__,
506 req->status, req->actual, req->length);
507 if (req->status == -ECONNRESET) /* Request was cancelled */
508 usb_ep_fifo_flush(ep);
509
510 /* Hold the lock while we update the request and buffer states */
511 bh->inreq_busy = 0;
512 bh->state = BUF_STATE_EMPTY;
513 wakeup_thread(common);
514}
515
516static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
517{
518 struct fsg_common *common = ep->driver_data;
519 struct fsg_buffhd *bh = req->context;
520
521 dump_msg(common, "bulk-out", req->buf, req->actual);
522 if (req->status || req->actual != bh->bulk_out_intended_length)
523 DBG(common, "%s --> %d, %u/%u\n", __func__,
524 req->status, req->actual,
525 bh->bulk_out_intended_length);
526 if (req->status == -ECONNRESET) /* Request was cancelled */
527 usb_ep_fifo_flush(ep);
528
529 /* Hold the lock while we update the request and buffer states */
530 bh->outreq_busy = 0;
531 bh->state = BUF_STATE_FULL;
532 wakeup_thread(common);
533}
534
535/*-------------------------------------------------------------------------*/
536
537/* Ep0 class-specific handlers. These always run in_irq. */
538
539static int fsg_setup(struct usb_function *f,
540 const struct usb_ctrlrequest *ctrl)
541{
542 struct fsg_dev *fsg = fsg_from_func(f);
543 struct usb_request *req = fsg->common->ep0req;
Piotr Wilczek2963d802013-06-26 08:22:05 +0200544 u16 w_index = get_unaligned_le16(&ctrl->wIndex);
545 u16 w_value = get_unaligned_le16(&ctrl->wValue);
546 u16 w_length = get_unaligned_le16(&ctrl->wLength);
Piotr Wilczek91637d72013-03-05 12:10:16 +0100547
548 if (!fsg_is_set(fsg->common))
549 return -EOPNOTSUPP;
550
551 switch (ctrl->bRequest) {
552
553 case USB_BULK_RESET_REQUEST:
554 if (ctrl->bRequestType !=
555 (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
556 break;
557 if (w_index != fsg->interface_number || w_value != 0)
558 return -EDOM;
559
560 /* Raise an exception to stop the current operation
561 * and reinitialize our state. */
562 DBG(fsg, "bulk reset request\n");
563 raise_exception(fsg->common, FSG_STATE_RESET);
564 return DELAYED_STATUS;
565
566 case USB_BULK_GET_MAX_LUN_REQUEST:
567 if (ctrl->bRequestType !=
568 (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
569 break;
570 if (w_index != fsg->interface_number || w_value != 0)
571 return -EDOM;
572 VDBG(fsg, "get max LUN\n");
573 *(u8 *) req->buf = fsg->common->nluns - 1;
574
575 /* Respond with data/status */
576 req->length = min((u16)1, w_length);
577 return ep0_queue(fsg->common);
578 }
579
580 VDBG(fsg,
581 "unknown class-specific control req "
582 "%02x.%02x v%04x i%04x l%u\n",
583 ctrl->bRequestType, ctrl->bRequest,
Piotr Wilczek2963d802013-06-26 08:22:05 +0200584 get_unaligned_le16(&ctrl->wValue), w_index, w_length);
Piotr Wilczek91637d72013-03-05 12:10:16 +0100585 return -EOPNOTSUPP;
586}
587
588/*-------------------------------------------------------------------------*/
589
590/* All the following routines run in process context */
591
592/* Use this for bulk or interrupt transfers, not ep0 */
593static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
594 struct usb_request *req, int *pbusy,
595 enum fsg_buffer_state *state)
596{
597 int rc;
598
599 if (ep == fsg->bulk_in)
600 dump_msg(fsg, "bulk-in", req->buf, req->length);
601
602 *pbusy = 1;
603 *state = BUF_STATE_BUSY;
604 rc = usb_ep_queue(ep, req, GFP_KERNEL);
605 if (rc != 0) {
606 *pbusy = 0;
607 *state = BUF_STATE_EMPTY;
608
609 /* We can't do much more than wait for a reset */
610
611 /* Note: currently the net2280 driver fails zero-length
612 * submissions if DMA is enabled. */
613 if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
614 req->length == 0))
615 WARNING(fsg, "error in submission: %s --> %d\n",
616 ep->name, rc);
617 }
618}
619
620#define START_TRANSFER_OR(common, ep_name, req, pbusy, state) \
621 if (fsg_is_set(common)) \
622 start_transfer((common)->fsg, (common)->fsg->ep_name, \
623 req, pbusy, state); \
624 else
625
626#define START_TRANSFER(common, ep_name, req, pbusy, state) \
627 START_TRANSFER_OR(common, ep_name, req, pbusy, state) (void)0
628
629static void busy_indicator(void)
630{
631 static int state;
632
633 switch (state) {
634 case 0:
635 puts("\r|"); break;
636 case 1:
637 puts("\r/"); break;
638 case 2:
639 puts("\r-"); break;
640 case 3:
641 puts("\r\\"); break;
642 case 4:
643 puts("\r|"); break;
644 case 5:
645 puts("\r/"); break;
646 case 6:
647 puts("\r-"); break;
648 case 7:
649 puts("\r\\"); break;
650 default:
651 state = 0;
652 }
653 if (state++ == 8)
654 state = 0;
655}
656
657static int sleep_thread(struct fsg_common *common)
658{
659 int rc = 0;
660 int i = 0, k = 0;
661
662 /* Wait until a signal arrives or we are woken up */
663 for (;;) {
664 if (common->thread_wakeup_needed)
665 break;
666
Inha Songf7d92522015-05-22 18:14:26 +0200667 if (++i == 20000) {
Piotr Wilczek91637d72013-03-05 12:10:16 +0100668 busy_indicator();
669 i = 0;
670 k++;
671 }
672
Przemyslaw Marczak06ef7cc2013-10-23 14:30:46 +0200673 if (k == 10) {
Marek Vasut5412e672023-11-07 01:09:59 +0100674 /* Handle START-STOP UNIT */
675 if (common->eject)
676 return -EPIPE;
677
Przemyslaw Marczak06ef7cc2013-10-23 14:30:46 +0200678 /* Handle CTRL+C */
679 if (ctrlc())
680 return -EPIPE;
Mateusz Zalega21fe3f72014-04-30 13:07:48 +0200681
Przemyslaw Marczak06ef7cc2013-10-23 14:30:46 +0200682 /* Check cable connection */
Mateusz Zalega21fe3f72014-04-30 13:07:48 +0200683 if (!g_dnl_board_usb_cable_connected())
Przemyslaw Marczak06ef7cc2013-10-23 14:30:46 +0200684 return -EIO;
Mateusz Zalega21fe3f72014-04-30 13:07:48 +0200685
Przemyslaw Marczak06ef7cc2013-10-23 14:30:46 +0200686 k = 0;
687 }
688
Marek Vasut7786e702023-09-01 11:49:54 +0200689 dm_usb_gadget_handle_interrupts(udcdev);
Piotr Wilczek91637d72013-03-05 12:10:16 +0100690 }
691 common->thread_wakeup_needed = 0;
692 return rc;
693}
694
695/*-------------------------------------------------------------------------*/
696
697static int do_read(struct fsg_common *common)
698{
699 struct fsg_lun *curlun = &common->luns[common->lun];
700 u32 lba;
701 struct fsg_buffhd *bh;
702 int rc;
703 u32 amount_left;
704 loff_t file_offset;
705 unsigned int amount;
706 unsigned int partial_page;
707 ssize_t nread;
708
709 /* Get the starting Logical Block Address and check that it's
710 * not too big */
711 if (common->cmnd[0] == SC_READ_6)
712 lba = get_unaligned_be24(&common->cmnd[1]);
713 else {
714 lba = get_unaligned_be32(&common->cmnd[2]);
715
716 /* We allow DPO (Disable Page Out = don't save data in the
717 * cache) and FUA (Force Unit Access = don't read from the
718 * cache), but we don't implement them. */
719 if ((common->cmnd[1] & ~0x18) != 0) {
720 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
721 return -EINVAL;
722 }
723 }
724 if (lba >= curlun->num_sectors) {
725 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
726 return -EINVAL;
727 }
Caleb Connolly00e9e0c2024-03-20 14:30:50 +0000728 file_offset = ((loff_t)lba) << curlun->blkbits;
Piotr Wilczek91637d72013-03-05 12:10:16 +0100729
730 /* Carry out the file reads */
731 amount_left = common->data_size_from_cmnd;
Caleb Connolly00e9e0c2024-03-20 14:30:50 +0000732 if (unlikely(amount_left == 0)) {
Piotr Wilczek91637d72013-03-05 12:10:16 +0100733 return -EIO; /* No default reply */
Caleb Connolly00e9e0c2024-03-20 14:30:50 +0000734 }
Piotr Wilczek91637d72013-03-05 12:10:16 +0100735
736 for (;;) {
737
738 /* Figure out how much we need to read:
739 * Try to read the remaining amount.
740 * But don't read more than the buffer size.
741 * And don't try to read past the end of the file.
742 * Finally, if we're not at a page boundary, don't read past
743 * the next page.
744 * If this means reading 0 then we were asked to read past
745 * the end of file. */
746 amount = min(amount_left, FSG_BUFLEN);
747 partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
748 if (partial_page > 0)
749 amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
750 partial_page);
751
752 /* Wait for the next buffer to become available */
753 bh = common->next_buffhd_to_fill;
754 while (bh->state != BUF_STATE_EMPTY) {
755 rc = sleep_thread(common);
756 if (rc)
757 return rc;
758 }
759
760 /* If we were asked to read past the end of file,
761 * end with an empty buffer. */
762 if (amount == 0) {
763 curlun->sense_data =
764 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
765 curlun->info_valid = 1;
766 bh->inreq->length = 0;
767 bh->state = BUF_STATE_FULL;
768 break;
769 }
770
771 /* Perform the read */
Stephen Warren9e7d5882015-12-07 11:38:50 -0700772 rc = ums[common->lun].read_sector(&ums[common->lun],
Caleb Connolly00e9e0c2024-03-20 14:30:50 +0000773 lldiv(file_offset, curlun->blksize),
774 lldiv(amount, curlun->blksize),
Przemyslaw Marczak674b1a62013-10-23 14:30:42 +0200775 (char __user *)bh->buf);
776 if (!rc)
Piotr Wilczek91637d72013-03-05 12:10:16 +0100777 return -EIO;
Przemyslaw Marczak674b1a62013-10-23 14:30:42 +0200778
Caleb Connolly00e9e0c2024-03-20 14:30:50 +0000779 nread = rc * curlun->blksize;
Piotr Wilczek91637d72013-03-05 12:10:16 +0100780
781 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
782 (unsigned long long) file_offset,
783 (int) nread);
784
785 if (nread < 0) {
786 LDBG(curlun, "error in file read: %d\n",
787 (int) nread);
788 nread = 0;
789 } else if (nread < amount) {
790 LDBG(curlun, "partial file read: %d/%u\n",
791 (int) nread, amount);
Caleb Connolly00e9e0c2024-03-20 14:30:50 +0000792 nread -= (nread & (curlun->blksize - 1)); /* Round down to a block */
Piotr Wilczek91637d72013-03-05 12:10:16 +0100793 }
794 file_offset += nread;
795 amount_left -= nread;
796 common->residue -= nread;
797 bh->inreq->length = nread;
798 bh->state = BUF_STATE_FULL;
799
800 /* If an error occurred, report it and its position */
801 if (nread < amount) {
802 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
803 curlun->info_valid = 1;
804 break;
805 }
806
807 if (amount_left == 0)
808 break; /* No more left to read */
809
810 /* Send this buffer and go read some more */
811 bh->inreq->zero = 0;
812 START_TRANSFER_OR(common, bulk_in, bh->inreq,
813 &bh->inreq_busy, &bh->state)
814 /* Don't know what to do if
815 * common->fsg is NULL */
816 return -EIO;
817 common->next_buffhd_to_fill = bh->next;
818 }
819
820 return -EIO; /* No default reply */
821}
822
823/*-------------------------------------------------------------------------*/
824
825static int do_write(struct fsg_common *common)
826{
827 struct fsg_lun *curlun = &common->luns[common->lun];
828 u32 lba;
829 struct fsg_buffhd *bh;
830 int get_some_more;
831 u32 amount_left_to_req, amount_left_to_write;
832 loff_t usb_offset, file_offset;
833 unsigned int amount;
834 unsigned int partial_page;
835 ssize_t nwritten;
836 int rc;
837
838 if (curlun->ro) {
839 curlun->sense_data = SS_WRITE_PROTECTED;
840 return -EINVAL;
841 }
842
843 /* Get the starting Logical Block Address and check that it's
844 * not too big */
845 if (common->cmnd[0] == SC_WRITE_6)
846 lba = get_unaligned_be24(&common->cmnd[1]);
847 else {
848 lba = get_unaligned_be32(&common->cmnd[2]);
849
850 /* We allow DPO (Disable Page Out = don't save data in the
851 * cache) and FUA (Force Unit Access = write directly to the
852 * medium). We don't implement DPO; we implement FUA by
853 * performing synchronous output. */
854 if (common->cmnd[1] & ~0x18) {
855 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
856 return -EINVAL;
857 }
858 }
859 if (lba >= curlun->num_sectors) {
860 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
861 return -EINVAL;
862 }
863
864 /* Carry out the file writes */
865 get_some_more = 1;
Caleb Connolly00e9e0c2024-03-20 14:30:50 +0000866 file_offset = usb_offset = ((loff_t)lba) << curlun->blkbits;
Piotr Wilczek91637d72013-03-05 12:10:16 +0100867 amount_left_to_req = common->data_size_from_cmnd;
868 amount_left_to_write = common->data_size_from_cmnd;
869
870 while (amount_left_to_write > 0) {
871
872 /* Queue a request for more data from the host */
873 bh = common->next_buffhd_to_fill;
874 if (bh->state == BUF_STATE_EMPTY && get_some_more) {
875
876 /* Figure out how much we want to get:
877 * Try to get the remaining amount.
878 * But don't get more than the buffer size.
879 * And don't try to go past the end of the file.
880 * If we're not at a page boundary,
881 * don't go past the next page.
882 * If this means getting 0, then we were asked
883 * to write past the end of file.
884 * Finally, round down to a block boundary. */
885 amount = min(amount_left_to_req, FSG_BUFLEN);
886 partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
887 if (partial_page > 0)
888 amount = min(amount,
889 (unsigned int) PAGE_CACHE_SIZE - partial_page);
890
891 if (amount == 0) {
892 get_some_more = 0;
893 curlun->sense_data =
894 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
895 curlun->info_valid = 1;
896 continue;
897 }
Caleb Connolly00e9e0c2024-03-20 14:30:50 +0000898 amount -= (amount & (curlun->blksize - 1));
Piotr Wilczek91637d72013-03-05 12:10:16 +0100899 if (amount == 0) {
900
901 /* Why were we were asked to transfer a
902 * partial block? */
903 get_some_more = 0;
904 continue;
905 }
906
907 /* Get the next buffer */
908 usb_offset += amount;
909 common->usb_amount_left -= amount;
910 amount_left_to_req -= amount;
911 if (amount_left_to_req == 0)
912 get_some_more = 0;
913
914 /* amount is always divisible by 512, hence by
915 * the bulk-out maxpacket size */
916 bh->outreq->length = amount;
917 bh->bulk_out_intended_length = amount;
918 bh->outreq->short_not_ok = 1;
919 START_TRANSFER_OR(common, bulk_out, bh->outreq,
920 &bh->outreq_busy, &bh->state)
921 /* Don't know what to do if
922 * common->fsg is NULL */
923 return -EIO;
924 common->next_buffhd_to_fill = bh->next;
925 continue;
926 }
927
928 /* Write the received data to the backing file */
929 bh = common->next_buffhd_to_drain;
930 if (bh->state == BUF_STATE_EMPTY && !get_some_more)
931 break; /* We stopped early */
932 if (bh->state == BUF_STATE_FULL) {
933 common->next_buffhd_to_drain = bh->next;
934 bh->state = BUF_STATE_EMPTY;
935
936 /* Did something go wrong with the transfer? */
937 if (bh->outreq->status != 0) {
938 curlun->sense_data = SS_COMMUNICATION_FAILURE;
939 curlun->info_valid = 1;
940 break;
941 }
942
943 amount = bh->outreq->actual;
944
945 /* Perform the write */
Stephen Warren9e7d5882015-12-07 11:38:50 -0700946 rc = ums[common->lun].write_sector(&ums[common->lun],
Caleb Connolly00e9e0c2024-03-20 14:30:50 +0000947 lldiv(file_offset, curlun->blksize),
948 lldiv(amount, curlun->blksize),
Piotr Wilczek91637d72013-03-05 12:10:16 +0100949 (char __user *)bh->buf);
Przemyslaw Marczak674b1a62013-10-23 14:30:42 +0200950 if (!rc)
Piotr Wilczek91637d72013-03-05 12:10:16 +0100951 return -EIO;
Caleb Connolly00e9e0c2024-03-20 14:30:50 +0000952 nwritten = rc * curlun->blksize;
Piotr Wilczek91637d72013-03-05 12:10:16 +0100953
954 VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
955 (unsigned long long) file_offset,
956 (int) nwritten);
957
958 if (nwritten < 0) {
959 LDBG(curlun, "error in file write: %d\n",
960 (int) nwritten);
961 nwritten = 0;
962 } else if (nwritten < amount) {
963 LDBG(curlun, "partial file write: %d/%u\n",
964 (int) nwritten, amount);
Caleb Connolly00e9e0c2024-03-20 14:30:50 +0000965 nwritten -= (nwritten & (curlun->blksize - 1));
Piotr Wilczek91637d72013-03-05 12:10:16 +0100966 /* Round down to a block */
967 }
968 file_offset += nwritten;
969 amount_left_to_write -= nwritten;
970 common->residue -= nwritten;
971
972 /* If an error occurred, report it and its position */
973 if (nwritten < amount) {
Thierry Reding15fe9c82015-03-20 12:41:25 +0100974 printf("nwritten:%zd amount:%u\n", nwritten,
Przemyslaw Marczak674b1a62013-10-23 14:30:42 +0200975 amount);
Piotr Wilczek91637d72013-03-05 12:10:16 +0100976 curlun->sense_data = SS_WRITE_ERROR;
977 curlun->info_valid = 1;
978 break;
979 }
980
981 /* Did the host decide to stop early? */
982 if (bh->outreq->actual != bh->outreq->length) {
983 common->short_packet_received = 1;
984 break;
985 }
986 continue;
987 }
988
989 /* Wait for something to happen */
990 rc = sleep_thread(common);
991 if (rc)
992 return rc;
993 }
994
995 return -EIO; /* No default reply */
996}
997
998/*-------------------------------------------------------------------------*/
999
1000static int do_synchronize_cache(struct fsg_common *common)
1001{
1002 return 0;
1003}
1004
1005/*-------------------------------------------------------------------------*/
1006
1007static int do_verify(struct fsg_common *common)
1008{
1009 struct fsg_lun *curlun = &common->luns[common->lun];
1010 u32 lba;
1011 u32 verification_length;
1012 struct fsg_buffhd *bh = common->next_buffhd_to_fill;
1013 loff_t file_offset;
1014 u32 amount_left;
1015 unsigned int amount;
1016 ssize_t nread;
1017 int rc;
1018
1019 /* Get the starting Logical Block Address and check that it's
1020 * not too big */
1021 lba = get_unaligned_be32(&common->cmnd[2]);
1022 if (lba >= curlun->num_sectors) {
1023 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1024 return -EINVAL;
1025 }
1026
1027 /* We allow DPO (Disable Page Out = don't save data in the
1028 * cache) but we don't implement it. */
1029 if (common->cmnd[1] & ~0x10) {
1030 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1031 return -EINVAL;
1032 }
1033
1034 verification_length = get_unaligned_be16(&common->cmnd[7]);
1035 if (unlikely(verification_length == 0))
1036 return -EIO; /* No default reply */
1037
1038 /* Prepare to carry out the file verify */
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00001039 amount_left = verification_length << curlun->blkbits;
1040 file_offset = ((loff_t) lba) << curlun->blkbits;
Piotr Wilczek91637d72013-03-05 12:10:16 +01001041
1042 /* Write out all the dirty buffers before invalidating them */
1043
1044 /* Just try to read the requested blocks */
1045 while (amount_left > 0) {
1046
1047 /* Figure out how much we need to read:
1048 * Try to read the remaining amount, but not more than
1049 * the buffer size.
1050 * And don't try to read past the end of the file.
1051 * If this means reading 0 then we were asked to read
1052 * past the end of file. */
1053 amount = min(amount_left, FSG_BUFLEN);
1054 if (amount == 0) {
1055 curlun->sense_data =
1056 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1057 curlun->info_valid = 1;
1058 break;
1059 }
1060
1061 /* Perform the read */
Stephen Warren9e7d5882015-12-07 11:38:50 -07001062 rc = ums[common->lun].read_sector(&ums[common->lun],
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00001063 lldiv(file_offset, curlun->blksize),
1064 lldiv(amount, curlun->blksize),
Przemyslaw Marczak674b1a62013-10-23 14:30:42 +02001065 (char __user *)bh->buf);
1066 if (!rc)
Piotr Wilczek91637d72013-03-05 12:10:16 +01001067 return -EIO;
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00001068 nread = rc * curlun->blksize;
Piotr Wilczek91637d72013-03-05 12:10:16 +01001069
1070 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
1071 (unsigned long long) file_offset,
1072 (int) nread);
1073 if (nread < 0) {
1074 LDBG(curlun, "error in file verify: %d\n",
1075 (int) nread);
1076 nread = 0;
1077 } else if (nread < amount) {
1078 LDBG(curlun, "partial file verify: %d/%u\n",
1079 (int) nread, amount);
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00001080 nread -= (nread & (curlun->blksize - 1)); /* Round down to a sector */
Piotr Wilczek91637d72013-03-05 12:10:16 +01001081 }
1082 if (nread == 0) {
1083 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
1084 curlun->info_valid = 1;
1085 break;
1086 }
1087 file_offset += nread;
1088 amount_left -= nread;
1089 }
1090 return 0;
1091}
1092
1093/*-------------------------------------------------------------------------*/
1094
1095static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh)
1096{
1097 struct fsg_lun *curlun = &common->luns[common->lun];
1098 static const char vendor_id[] = "Linux ";
1099 u8 *buf = (u8 *) bh->buf;
1100
1101 if (!curlun) { /* Unsupported LUNs are okay */
1102 common->bad_lun_okay = 1;
1103 memset(buf, 0, 36);
1104 buf[0] = 0x7f; /* Unsupported, no device-type */
1105 buf[4] = 31; /* Additional length */
1106 return 36;
1107 }
1108
1109 memset(buf, 0, 8);
1110 buf[0] = TYPE_DISK;
Eric Nelsone1d833a2014-09-19 17:06:46 -07001111 buf[1] = curlun->removable ? 0x80 : 0;
Piotr Wilczek91637d72013-03-05 12:10:16 +01001112 buf[2] = 2; /* ANSI SCSI level 2 */
1113 buf[3] = 2; /* SCSI-2 INQUIRY data format */
1114 buf[4] = 31; /* Additional length */
1115 /* No special options */
1116 sprintf((char *) (buf + 8), "%-8s%-16s%04x", (char*) vendor_id ,
Stephen Warren9e7d5882015-12-07 11:38:50 -07001117 ums[common->lun].name, (u16) 0xffff);
Piotr Wilczek91637d72013-03-05 12:10:16 +01001118
1119 return 36;
1120}
1121
1122
1123static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1124{
1125 struct fsg_lun *curlun = &common->luns[common->lun];
1126 u8 *buf = (u8 *) bh->buf;
Tom Rini2416ba02023-04-05 19:48:57 -04001127 u32 sd, sdinfo = 0;
Piotr Wilczek91637d72013-03-05 12:10:16 +01001128 int valid;
1129
1130 /*
1131 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
1132 *
1133 * If a REQUEST SENSE command is received from an initiator
1134 * with a pending unit attention condition (before the target
1135 * generates the contingent allegiance condition), then the
1136 * target shall either:
1137 * a) report any pending sense data and preserve the unit
1138 * attention condition on the logical unit, or,
1139 * b) report the unit attention condition, may discard any
1140 * pending sense data, and clear the unit attention
1141 * condition on the logical unit for that initiator.
1142 *
1143 * FSG normally uses option a); enable this code to use option b).
1144 */
1145#if 0
1146 if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
1147 curlun->sense_data = curlun->unit_attention_data;
1148 curlun->unit_attention_data = SS_NO_SENSE;
1149 }
1150#endif
1151
1152 if (!curlun) { /* Unsupported LUNs are okay */
1153 common->bad_lun_okay = 1;
1154 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
Piotr Wilczek91637d72013-03-05 12:10:16 +01001155 valid = 0;
1156 } else {
1157 sd = curlun->sense_data;
1158 valid = curlun->info_valid << 7;
1159 curlun->sense_data = SS_NO_SENSE;
1160 curlun->info_valid = 0;
1161 }
1162
1163 memset(buf, 0, 18);
1164 buf[0] = valid | 0x70; /* Valid, current error */
1165 buf[2] = SK(sd);
1166 put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */
1167 buf[7] = 18 - 8; /* Additional sense length */
1168 buf[12] = ASC(sd);
1169 buf[13] = ASCQ(sd);
1170 return 18;
1171}
1172
1173static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
1174{
1175 struct fsg_lun *curlun = &common->luns[common->lun];
1176 u32 lba = get_unaligned_be32(&common->cmnd[2]);
1177 int pmi = common->cmnd[8];
1178 u8 *buf = (u8 *) bh->buf;
1179
1180 /* Check the PMI and LBA fields */
1181 if (pmi > 1 || (pmi == 0 && lba != 0)) {
1182 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1183 return -EINVAL;
1184 }
1185
1186 put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
1187 /* Max logical block */
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00001188 put_unaligned_be32(curlun->blksize, &buf[4]); /* Block length */
Piotr Wilczek91637d72013-03-05 12:10:16 +01001189 return 8;
1190}
1191
1192static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh)
1193{
1194 struct fsg_lun *curlun = &common->luns[common->lun];
1195 int msf = common->cmnd[1] & 0x02;
1196 u32 lba = get_unaligned_be32(&common->cmnd[2]);
1197 u8 *buf = (u8 *) bh->buf;
1198
1199 if (common->cmnd[1] & ~0x02) { /* Mask away MSF */
1200 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1201 return -EINVAL;
1202 }
1203 if (lba >= curlun->num_sectors) {
1204 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1205 return -EINVAL;
1206 }
1207
1208 memset(buf, 0, 8);
1209 buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */
1210 store_cdrom_address(&buf[4], msf, lba);
1211 return 8;
1212}
1213
1214
1215static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
1216{
1217 struct fsg_lun *curlun = &common->luns[common->lun];
1218 int msf = common->cmnd[1] & 0x02;
1219 int start_track = common->cmnd[6];
1220 u8 *buf = (u8 *) bh->buf;
1221
1222 if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */
1223 start_track > 1) {
1224 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1225 return -EINVAL;
1226 }
1227
1228 memset(buf, 0, 20);
1229 buf[1] = (20-2); /* TOC data length */
1230 buf[2] = 1; /* First track number */
1231 buf[3] = 1; /* Last track number */
1232 buf[5] = 0x16; /* Data track, copying allowed */
1233 buf[6] = 0x01; /* Only track is number 1 */
1234 store_cdrom_address(&buf[8], msf, 0);
1235
1236 buf[13] = 0x16; /* Lead-out track is data */
1237 buf[14] = 0xAA; /* Lead-out track number */
1238 store_cdrom_address(&buf[16], msf, curlun->num_sectors);
1239
1240 return 20;
1241}
1242
1243static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1244{
1245 struct fsg_lun *curlun = &common->luns[common->lun];
1246 int mscmnd = common->cmnd[0];
1247 u8 *buf = (u8 *) bh->buf;
1248 u8 *buf0 = buf;
1249 int pc, page_code;
1250 int changeable_values, all_pages;
1251 int valid_page = 0;
1252 int len, limit;
1253
1254 if ((common->cmnd[1] & ~0x08) != 0) { /* Mask away DBD */
1255 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1256 return -EINVAL;
1257 }
1258 pc = common->cmnd[2] >> 6;
1259 page_code = common->cmnd[2] & 0x3f;
1260 if (pc == 3) {
1261 curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
1262 return -EINVAL;
1263 }
1264 changeable_values = (pc == 1);
1265 all_pages = (page_code == 0x3f);
1266
1267 /* Write the mode parameter header. Fixed values are: default
1268 * medium type, no cache control (DPOFUA), and no block descriptors.
1269 * The only variable value is the WriteProtect bit. We will fill in
1270 * the mode data length later. */
1271 memset(buf, 0, 8);
1272 if (mscmnd == SC_MODE_SENSE_6) {
1273 buf[2] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
1274 buf += 4;
1275 limit = 255;
1276 } else { /* SC_MODE_SENSE_10 */
1277 buf[3] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
1278 buf += 8;
1279 limit = 65535; /* Should really be FSG_BUFLEN */
1280 }
1281
1282 /* No block descriptors */
1283
1284 /* The mode pages, in numerical order. The only page we support
1285 * is the Caching page. */
1286 if (page_code == 0x08 || all_pages) {
1287 valid_page = 1;
1288 buf[0] = 0x08; /* Page code */
1289 buf[1] = 10; /* Page length */
1290 memset(buf+2, 0, 10); /* None of the fields are changeable */
1291
1292 if (!changeable_values) {
1293 buf[2] = 0x04; /* Write cache enable, */
1294 /* Read cache not disabled */
1295 /* No cache retention priorities */
1296 put_unaligned_be16(0xffff, &buf[4]);
1297 /* Don't disable prefetch */
1298 /* Minimum prefetch = 0 */
1299 put_unaligned_be16(0xffff, &buf[8]);
1300 /* Maximum prefetch */
1301 put_unaligned_be16(0xffff, &buf[10]);
1302 /* Maximum prefetch ceiling */
1303 }
1304 buf += 12;
1305 }
1306
1307 /* Check that a valid page was requested and the mode data length
1308 * isn't too long. */
1309 len = buf - buf0;
1310 if (!valid_page || len > limit) {
1311 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1312 return -EINVAL;
1313 }
1314
1315 /* Store the mode data length */
1316 if (mscmnd == SC_MODE_SENSE_6)
1317 buf0[0] = len - 1;
1318 else
1319 put_unaligned_be16(len - 2, buf0);
1320 return len;
1321}
1322
1323
1324static int do_start_stop(struct fsg_common *common)
1325{
1326 struct fsg_lun *curlun = &common->luns[common->lun];
1327
1328 if (!curlun) {
1329 return -EINVAL;
1330 } else if (!curlun->removable) {
1331 curlun->sense_data = SS_INVALID_COMMAND;
1332 return -EINVAL;
1333 }
1334
Marek Vasut5412e672023-11-07 01:09:59 +01001335 common->eject = 1;
1336
Piotr Wilczek91637d72013-03-05 12:10:16 +01001337 return 0;
1338}
1339
1340static int do_prevent_allow(struct fsg_common *common)
1341{
1342 struct fsg_lun *curlun = &common->luns[common->lun];
1343 int prevent;
1344
1345 if (!curlun->removable) {
1346 curlun->sense_data = SS_INVALID_COMMAND;
1347 return -EINVAL;
1348 }
1349
1350 prevent = common->cmnd[4] & 0x01;
1351 if ((common->cmnd[4] & ~0x01) != 0) { /* Mask away Prevent */
1352 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1353 return -EINVAL;
1354 }
1355
1356 if (curlun->prevent_medium_removal && !prevent)
1357 fsg_lun_fsync_sub(curlun);
1358 curlun->prevent_medium_removal = prevent;
1359 return 0;
1360}
1361
1362
1363static int do_read_format_capacities(struct fsg_common *common,
1364 struct fsg_buffhd *bh)
1365{
1366 struct fsg_lun *curlun = &common->luns[common->lun];
1367 u8 *buf = (u8 *) bh->buf;
1368
1369 buf[0] = buf[1] = buf[2] = 0;
1370 buf[3] = 8; /* Only the Current/Maximum Capacity Descriptor */
1371 buf += 4;
1372
1373 put_unaligned_be32(curlun->num_sectors, &buf[0]);
1374 /* Number of blocks */
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00001375 put_unaligned_be32(curlun->blksize, &buf[4]); /* Block length */
Piotr Wilczek91637d72013-03-05 12:10:16 +01001376 buf[4] = 0x02; /* Current capacity */
1377 return 12;
1378}
1379
1380
1381static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh)
1382{
1383 struct fsg_lun *curlun = &common->luns[common->lun];
1384
1385 /* We don't support MODE SELECT */
1386 if (curlun)
1387 curlun->sense_data = SS_INVALID_COMMAND;
1388 return -EINVAL;
1389}
1390
1391
1392/*-------------------------------------------------------------------------*/
1393
1394static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
1395{
1396 int rc;
1397
1398 rc = fsg_set_halt(fsg, fsg->bulk_in);
1399 if (rc == -EAGAIN)
1400 VDBG(fsg, "delayed bulk-in endpoint halt\n");
1401 while (rc != 0) {
1402 if (rc != -EAGAIN) {
1403 WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
1404 rc = 0;
1405 break;
1406 }
1407
1408 rc = usb_ep_set_halt(fsg->bulk_in);
1409 }
1410 return rc;
1411}
1412
1413static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
1414{
1415 int rc;
1416
1417 DBG(fsg, "bulk-in set wedge\n");
1418 rc = 0; /* usb_ep_set_wedge(fsg->bulk_in); */
1419 if (rc == -EAGAIN)
1420 VDBG(fsg, "delayed bulk-in endpoint wedge\n");
1421 while (rc != 0) {
1422 if (rc != -EAGAIN) {
1423 WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
1424 rc = 0;
1425 break;
1426 }
1427 }
1428 return rc;
1429}
1430
1431static int pad_with_zeros(struct fsg_dev *fsg)
1432{
1433 struct fsg_buffhd *bh = fsg->common->next_buffhd_to_fill;
1434 u32 nkeep = bh->inreq->length;
1435 u32 nsend;
1436 int rc;
1437
1438 bh->state = BUF_STATE_EMPTY; /* For the first iteration */
1439 fsg->common->usb_amount_left = nkeep + fsg->common->residue;
1440 while (fsg->common->usb_amount_left > 0) {
1441
1442 /* Wait for the next buffer to be free */
1443 while (bh->state != BUF_STATE_EMPTY) {
1444 rc = sleep_thread(fsg->common);
1445 if (rc)
1446 return rc;
1447 }
1448
1449 nsend = min(fsg->common->usb_amount_left, FSG_BUFLEN);
1450 memset(bh->buf + nkeep, 0, nsend - nkeep);
1451 bh->inreq->length = nsend;
1452 bh->inreq->zero = 0;
1453 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1454 &bh->inreq_busy, &bh->state);
1455 bh = fsg->common->next_buffhd_to_fill = bh->next;
1456 fsg->common->usb_amount_left -= nsend;
1457 nkeep = 0;
1458 }
1459 return 0;
1460}
1461
1462static int throw_away_data(struct fsg_common *common)
1463{
1464 struct fsg_buffhd *bh;
1465 u32 amount;
1466 int rc;
1467
1468 for (bh = common->next_buffhd_to_drain;
1469 bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0;
1470 bh = common->next_buffhd_to_drain) {
1471
1472 /* Throw away the data in a filled buffer */
1473 if (bh->state == BUF_STATE_FULL) {
1474 bh->state = BUF_STATE_EMPTY;
1475 common->next_buffhd_to_drain = bh->next;
1476
1477 /* A short packet or an error ends everything */
1478 if (bh->outreq->actual != bh->outreq->length ||
1479 bh->outreq->status != 0) {
1480 raise_exception(common,
1481 FSG_STATE_ABORT_BULK_OUT);
1482 return -EINTR;
1483 }
1484 continue;
1485 }
1486
1487 /* Try to submit another request if we need one */
1488 bh = common->next_buffhd_to_fill;
1489 if (bh->state == BUF_STATE_EMPTY
1490 && common->usb_amount_left > 0) {
1491 amount = min(common->usb_amount_left, FSG_BUFLEN);
1492
1493 /* amount is always divisible by 512, hence by
1494 * the bulk-out maxpacket size */
1495 bh->outreq->length = amount;
1496 bh->bulk_out_intended_length = amount;
1497 bh->outreq->short_not_ok = 1;
1498 START_TRANSFER_OR(common, bulk_out, bh->outreq,
1499 &bh->outreq_busy, &bh->state)
1500 /* Don't know what to do if
1501 * common->fsg is NULL */
1502 return -EIO;
1503 common->next_buffhd_to_fill = bh->next;
1504 common->usb_amount_left -= amount;
1505 continue;
1506 }
1507
1508 /* Otherwise wait for something to happen */
1509 rc = sleep_thread(common);
1510 if (rc)
1511 return rc;
1512 }
1513 return 0;
1514}
1515
1516
1517static int finish_reply(struct fsg_common *common)
1518{
1519 struct fsg_buffhd *bh = common->next_buffhd_to_fill;
1520 int rc = 0;
1521
1522 switch (common->data_dir) {
1523 case DATA_DIR_NONE:
1524 break; /* Nothing to send */
1525
1526 /* If we don't know whether the host wants to read or write,
1527 * this must be CB or CBI with an unknown command. We mustn't
1528 * try to send or receive any data. So stall both bulk pipes
1529 * if we can and wait for a reset. */
1530 case DATA_DIR_UNKNOWN:
1531 if (!common->can_stall) {
1532 /* Nothing */
1533 } else if (fsg_is_set(common)) {
1534 fsg_set_halt(common->fsg, common->fsg->bulk_out);
1535 rc = halt_bulk_in_endpoint(common->fsg);
1536 } else {
1537 /* Don't know what to do if common->fsg is NULL */
1538 rc = -EIO;
1539 }
1540 break;
1541
1542 /* All but the last buffer of data must have already been sent */
1543 case DATA_DIR_TO_HOST:
1544 if (common->data_size == 0) {
1545 /* Nothing to send */
1546
1547 /* If there's no residue, simply send the last buffer */
1548 } else if (common->residue == 0) {
1549 bh->inreq->zero = 0;
1550 START_TRANSFER_OR(common, bulk_in, bh->inreq,
1551 &bh->inreq_busy, &bh->state)
1552 return -EIO;
1553 common->next_buffhd_to_fill = bh->next;
1554
1555 /* For Bulk-only, if we're allowed to stall then send the
1556 * short packet and halt the bulk-in endpoint. If we can't
1557 * stall, pad out the remaining data with 0's. */
1558 } else if (common->can_stall) {
1559 bh->inreq->zero = 1;
1560 START_TRANSFER_OR(common, bulk_in, bh->inreq,
1561 &bh->inreq_busy, &bh->state)
1562 /* Don't know what to do if
1563 * common->fsg is NULL */
1564 rc = -EIO;
1565 common->next_buffhd_to_fill = bh->next;
1566 if (common->fsg)
1567 rc = halt_bulk_in_endpoint(common->fsg);
1568 } else if (fsg_is_set(common)) {
1569 rc = pad_with_zeros(common->fsg);
1570 } else {
1571 /* Don't know what to do if common->fsg is NULL */
1572 rc = -EIO;
1573 }
1574 break;
1575
1576 /* We have processed all we want from the data the host has sent.
1577 * There may still be outstanding bulk-out requests. */
1578 case DATA_DIR_FROM_HOST:
1579 if (common->residue == 0) {
1580 /* Nothing to receive */
1581
1582 /* Did the host stop sending unexpectedly early? */
1583 } else if (common->short_packet_received) {
1584 raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
1585 rc = -EINTR;
1586
1587 /* We haven't processed all the incoming data. Even though
1588 * we may be allowed to stall, doing so would cause a race.
1589 * The controller may already have ACK'ed all the remaining
1590 * bulk-out packets, in which case the host wouldn't see a
1591 * STALL. Not realizing the endpoint was halted, it wouldn't
1592 * clear the halt -- leading to problems later on. */
1593#if 0
1594 } else if (common->can_stall) {
1595 if (fsg_is_set(common))
1596 fsg_set_halt(common->fsg,
1597 common->fsg->bulk_out);
1598 raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
1599 rc = -EINTR;
1600#endif
1601
1602 /* We can't stall. Read in the excess data and throw it
1603 * all away. */
1604 } else {
1605 rc = throw_away_data(common);
1606 }
1607 break;
1608 }
1609 return rc;
1610}
1611
1612
1613static int send_status(struct fsg_common *common)
1614{
1615 struct fsg_lun *curlun = &common->luns[common->lun];
1616 struct fsg_buffhd *bh;
1617 struct bulk_cs_wrap *csw;
1618 int rc;
1619 u8 status = USB_STATUS_PASS;
1620 u32 sd, sdinfo = 0;
1621
1622 /* Wait for the next buffer to become available */
1623 bh = common->next_buffhd_to_fill;
1624 while (bh->state != BUF_STATE_EMPTY) {
1625 rc = sleep_thread(common);
1626 if (rc)
1627 return rc;
1628 }
1629
1630 if (curlun)
1631 sd = curlun->sense_data;
1632 else if (common->bad_lun_okay)
1633 sd = SS_NO_SENSE;
1634 else
1635 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1636
1637 if (common->phase_error) {
1638 DBG(common, "sending phase-error status\n");
1639 status = USB_STATUS_PHASE_ERROR;
1640 sd = SS_INVALID_COMMAND;
1641 } else if (sd != SS_NO_SENSE) {
1642 DBG(common, "sending command-failure status\n");
1643 status = USB_STATUS_FAIL;
1644 VDBG(common, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
1645 " info x%x\n",
1646 SK(sd), ASC(sd), ASCQ(sd), sdinfo);
1647 }
1648
1649 /* Store and send the Bulk-only CSW */
1650 csw = (void *)bh->buf;
1651
1652 csw->Signature = cpu_to_le32(USB_BULK_CS_SIG);
1653 csw->Tag = common->tag;
1654 csw->Residue = cpu_to_le32(common->residue);
1655 csw->Status = status;
1656
1657 bh->inreq->length = USB_BULK_CS_WRAP_LEN;
1658 bh->inreq->zero = 0;
1659 START_TRANSFER_OR(common, bulk_in, bh->inreq,
1660 &bh->inreq_busy, &bh->state)
1661 /* Don't know what to do if common->fsg is NULL */
1662 return -EIO;
1663
1664 common->next_buffhd_to_fill = bh->next;
1665 return 0;
1666}
1667
1668
1669/*-------------------------------------------------------------------------*/
1670
1671/* Check whether the command is properly formed and whether its data size
1672 * and direction agree with the values we already have. */
1673static int check_command(struct fsg_common *common, int cmnd_size,
1674 enum data_direction data_dir, unsigned int mask,
1675 int needs_medium, const char *name)
1676{
1677 int i;
1678 int lun = common->cmnd[1] >> 5;
1679 static const char dirletter[4] = {'u', 'o', 'i', 'n'};
1680 char hdlen[20];
1681 struct fsg_lun *curlun;
1682
1683 hdlen[0] = 0;
1684 if (common->data_dir != DATA_DIR_UNKNOWN)
1685 sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir],
1686 common->data_size);
1687 VDBG(common, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
1688 name, cmnd_size, dirletter[(int) data_dir],
1689 common->data_size_from_cmnd, common->cmnd_size, hdlen);
1690
1691 /* We can't reply at all until we know the correct data direction
1692 * and size. */
1693 if (common->data_size_from_cmnd == 0)
1694 data_dir = DATA_DIR_NONE;
1695 if (common->data_size < common->data_size_from_cmnd) {
1696 /* Host data size < Device data size is a phase error.
1697 * Carry out the command, but only transfer as much as
1698 * we are allowed. */
1699 common->data_size_from_cmnd = common->data_size;
1700 common->phase_error = 1;
1701 }
1702 common->residue = common->data_size;
1703 common->usb_amount_left = common->data_size;
1704
1705 /* Conflicting data directions is a phase error */
1706 if (common->data_dir != data_dir
1707 && common->data_size_from_cmnd > 0) {
1708 common->phase_error = 1;
1709 return -EINVAL;
1710 }
1711
1712 /* Verify the length of the command itself */
1713 if (cmnd_size != common->cmnd_size) {
1714
1715 /* Special case workaround: There are plenty of buggy SCSI
1716 * implementations. Many have issues with cbw->Length
1717 * field passing a wrong command size. For those cases we
1718 * always try to work around the problem by using the length
1719 * sent by the host side provided it is at least as large
1720 * as the correct command length.
1721 * Examples of such cases would be MS-Windows, which issues
1722 * REQUEST SENSE with cbw->Length == 12 where it should
1723 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
1724 * REQUEST SENSE with cbw->Length == 10 where it should
1725 * be 6 as well.
1726 */
1727 if (cmnd_size <= common->cmnd_size) {
1728 DBG(common, "%s is buggy! Expected length %d "
1729 "but we got %d\n", name,
1730 cmnd_size, common->cmnd_size);
1731 cmnd_size = common->cmnd_size;
1732 } else {
1733 common->phase_error = 1;
1734 return -EINVAL;
1735 }
1736 }
1737
1738 /* Check that the LUN values are consistent */
1739 if (common->lun != lun)
1740 DBG(common, "using LUN %d from CBW, not LUN %d from CDB\n",
1741 common->lun, lun);
1742
1743 /* Check the LUN */
Heinrich Schuchardtd7a0fc82018-03-18 13:12:14 +01001744 if (common->lun < common->nluns) {
Piotr Wilczek91637d72013-03-05 12:10:16 +01001745 curlun = &common->luns[common->lun];
1746 if (common->cmnd[0] != SC_REQUEST_SENSE) {
1747 curlun->sense_data = SS_NO_SENSE;
1748 curlun->info_valid = 0;
1749 }
1750 } else {
1751 curlun = NULL;
1752 common->bad_lun_okay = 0;
1753
1754 /* INQUIRY and REQUEST SENSE commands are explicitly allowed
1755 * to use unsupported LUNs; all others may not. */
1756 if (common->cmnd[0] != SC_INQUIRY &&
1757 common->cmnd[0] != SC_REQUEST_SENSE) {
1758 DBG(common, "unsupported LUN %d\n", common->lun);
1759 return -EINVAL;
1760 }
1761 }
1762#if 0
1763 /* If a unit attention condition exists, only INQUIRY and
1764 * REQUEST SENSE commands are allowed; anything else must fail. */
1765 if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
1766 common->cmnd[0] != SC_INQUIRY &&
1767 common->cmnd[0] != SC_REQUEST_SENSE) {
1768 curlun->sense_data = curlun->unit_attention_data;
1769 curlun->unit_attention_data = SS_NO_SENSE;
1770 return -EINVAL;
1771 }
1772#endif
1773 /* Check that only command bytes listed in the mask are non-zero */
1774 common->cmnd[1] &= 0x1f; /* Mask away the LUN */
1775 for (i = 1; i < cmnd_size; ++i) {
1776 if (common->cmnd[i] && !(mask & (1 << i))) {
1777 if (curlun)
1778 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1779 return -EINVAL;
1780 }
1781 }
1782
1783 return 0;
1784}
1785
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00001786/* wrapper of check_command for data size in blocks handling */
1787static int check_command_size_in_blocks(struct fsg_common *common,
1788 int cmnd_size, enum data_direction data_dir,
1789 unsigned int mask, int needs_medium, const char *name)
1790{
1791 common->data_size_from_cmnd <<= common->luns[common->lun].blkbits;
1792 return check_command(common, cmnd_size, data_dir,
1793 mask, needs_medium, name);
1794}
1795
Piotr Wilczek91637d72013-03-05 12:10:16 +01001796
1797static int do_scsi_command(struct fsg_common *common)
1798{
1799 struct fsg_buffhd *bh;
1800 int rc;
1801 int reply = -EINVAL;
1802 int i;
1803 static char unknown[16];
1804 struct fsg_lun *curlun = &common->luns[common->lun];
1805
1806 dump_cdb(common);
1807
1808 /* Wait for the next buffer to become available for data or status */
1809 bh = common->next_buffhd_to_fill;
1810 common->next_buffhd_to_drain = bh;
1811 while (bh->state != BUF_STATE_EMPTY) {
1812 rc = sleep_thread(common);
1813 if (rc)
1814 return rc;
1815 }
1816 common->phase_error = 0;
1817 common->short_packet_received = 0;
1818
1819 down_read(&common->filesem); /* We're using the backing file */
1820 switch (common->cmnd[0]) {
1821
1822 case SC_INQUIRY:
1823 common->data_size_from_cmnd = common->cmnd[4];
1824 reply = check_command(common, 6, DATA_DIR_TO_HOST,
1825 (1<<4), 0,
1826 "INQUIRY");
1827 if (reply == 0)
1828 reply = do_inquiry(common, bh);
1829 break;
1830
1831 case SC_MODE_SELECT_6:
1832 common->data_size_from_cmnd = common->cmnd[4];
1833 reply = check_command(common, 6, DATA_DIR_FROM_HOST,
1834 (1<<1) | (1<<4), 0,
1835 "MODE SELECT(6)");
1836 if (reply == 0)
1837 reply = do_mode_select(common, bh);
1838 break;
1839
1840 case SC_MODE_SELECT_10:
1841 common->data_size_from_cmnd =
1842 get_unaligned_be16(&common->cmnd[7]);
1843 reply = check_command(common, 10, DATA_DIR_FROM_HOST,
1844 (1<<1) | (3<<7), 0,
1845 "MODE SELECT(10)");
1846 if (reply == 0)
1847 reply = do_mode_select(common, bh);
1848 break;
1849
1850 case SC_MODE_SENSE_6:
1851 common->data_size_from_cmnd = common->cmnd[4];
1852 reply = check_command(common, 6, DATA_DIR_TO_HOST,
1853 (1<<1) | (1<<2) | (1<<4), 0,
1854 "MODE SENSE(6)");
1855 if (reply == 0)
1856 reply = do_mode_sense(common, bh);
1857 break;
1858
1859 case SC_MODE_SENSE_10:
1860 common->data_size_from_cmnd =
1861 get_unaligned_be16(&common->cmnd[7]);
1862 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1863 (1<<1) | (1<<2) | (3<<7), 0,
1864 "MODE SENSE(10)");
1865 if (reply == 0)
1866 reply = do_mode_sense(common, bh);
1867 break;
1868
1869 case SC_PREVENT_ALLOW_MEDIUM_REMOVAL:
1870 common->data_size_from_cmnd = 0;
1871 reply = check_command(common, 6, DATA_DIR_NONE,
1872 (1<<4), 0,
1873 "PREVENT-ALLOW MEDIUM REMOVAL");
1874 if (reply == 0)
1875 reply = do_prevent_allow(common);
1876 break;
1877
1878 case SC_READ_6:
1879 i = common->cmnd[4];
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00001880 common->data_size_from_cmnd = (i == 0 ? 256 : i);
1881 reply = check_command_size_in_blocks(common, 6, DATA_DIR_TO_HOST,
1882 (7<<1) | (1<<4), 1,
1883 "READ(6)");
Piotr Wilczek91637d72013-03-05 12:10:16 +01001884 if (reply == 0)
1885 reply = do_read(common);
1886 break;
1887
1888 case SC_READ_10:
1889 common->data_size_from_cmnd =
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00001890 get_unaligned_be16(&common->cmnd[7]);
1891 reply = check_command_size_in_blocks(common, 10, DATA_DIR_TO_HOST,
1892 (1<<1) | (0xf<<2) | (3<<7), 1,
1893 "READ(10)");
Piotr Wilczek91637d72013-03-05 12:10:16 +01001894 if (reply == 0)
1895 reply = do_read(common);
1896 break;
1897
1898 case SC_READ_12:
1899 common->data_size_from_cmnd =
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00001900 get_unaligned_be32(&common->cmnd[6]);
1901 reply = check_command_size_in_blocks(common, 12, DATA_DIR_TO_HOST,
1902 (1<<1) | (0xf<<2) | (0xf<<6), 1,
1903 "READ(12)");
Piotr Wilczek91637d72013-03-05 12:10:16 +01001904 if (reply == 0)
1905 reply = do_read(common);
1906 break;
1907
1908 case SC_READ_CAPACITY:
1909 common->data_size_from_cmnd = 8;
1910 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1911 (0xf<<2) | (1<<8), 1,
1912 "READ CAPACITY");
1913 if (reply == 0)
1914 reply = do_read_capacity(common, bh);
1915 break;
1916
1917 case SC_READ_HEADER:
1918 if (!common->luns[common->lun].cdrom)
1919 goto unknown_cmnd;
1920 common->data_size_from_cmnd =
1921 get_unaligned_be16(&common->cmnd[7]);
1922 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1923 (3<<7) | (0x1f<<1), 1,
1924 "READ HEADER");
1925 if (reply == 0)
1926 reply = do_read_header(common, bh);
1927 break;
1928
1929 case SC_READ_TOC:
1930 if (!common->luns[common->lun].cdrom)
1931 goto unknown_cmnd;
1932 common->data_size_from_cmnd =
1933 get_unaligned_be16(&common->cmnd[7]);
1934 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1935 (7<<6) | (1<<1), 1,
1936 "READ TOC");
1937 if (reply == 0)
1938 reply = do_read_toc(common, bh);
1939 break;
1940
1941 case SC_READ_FORMAT_CAPACITIES:
1942 common->data_size_from_cmnd =
1943 get_unaligned_be16(&common->cmnd[7]);
1944 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1945 (3<<7), 1,
1946 "READ FORMAT CAPACITIES");
1947 if (reply == 0)
1948 reply = do_read_format_capacities(common, bh);
1949 break;
1950
1951 case SC_REQUEST_SENSE:
1952 common->data_size_from_cmnd = common->cmnd[4];
1953 reply = check_command(common, 6, DATA_DIR_TO_HOST,
1954 (1<<4), 0,
1955 "REQUEST SENSE");
1956 if (reply == 0)
1957 reply = do_request_sense(common, bh);
1958 break;
1959
1960 case SC_START_STOP_UNIT:
1961 common->data_size_from_cmnd = 0;
1962 reply = check_command(common, 6, DATA_DIR_NONE,
1963 (1<<1) | (1<<4), 0,
1964 "START-STOP UNIT");
1965 if (reply == 0)
1966 reply = do_start_stop(common);
1967 break;
1968
1969 case SC_SYNCHRONIZE_CACHE:
1970 common->data_size_from_cmnd = 0;
1971 reply = check_command(common, 10, DATA_DIR_NONE,
1972 (0xf<<2) | (3<<7), 1,
1973 "SYNCHRONIZE CACHE");
1974 if (reply == 0)
1975 reply = do_synchronize_cache(common);
1976 break;
1977
1978 case SC_TEST_UNIT_READY:
1979 common->data_size_from_cmnd = 0;
1980 reply = check_command(common, 6, DATA_DIR_NONE,
1981 0, 1,
1982 "TEST UNIT READY");
1983 break;
1984
1985 /* Although optional, this command is used by MS-Windows. We
1986 * support a minimal version: BytChk must be 0. */
1987 case SC_VERIFY:
1988 common->data_size_from_cmnd = 0;
1989 reply = check_command(common, 10, DATA_DIR_NONE,
1990 (1<<1) | (0xf<<2) | (3<<7), 1,
1991 "VERIFY");
1992 if (reply == 0)
1993 reply = do_verify(common);
1994 break;
1995
1996 case SC_WRITE_6:
1997 i = common->cmnd[4];
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00001998 common->data_size_from_cmnd = (i == 0 ? 256 : i);
1999 reply = check_command_size_in_blocks(common, 6, DATA_DIR_FROM_HOST,
2000 (7<<1) | (1<<4), 1,
2001 "WRITE(6)");
Piotr Wilczek91637d72013-03-05 12:10:16 +01002002 if (reply == 0)
2003 reply = do_write(common);
2004 break;
2005
2006 case SC_WRITE_10:
2007 common->data_size_from_cmnd =
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00002008 get_unaligned_be16(&common->cmnd[7]);
2009 reply = check_command_size_in_blocks(common, 10, DATA_DIR_FROM_HOST,
2010 (1<<1) | (0xf<<2) | (3<<7), 1,
2011 "WRITE(10)");
Piotr Wilczek91637d72013-03-05 12:10:16 +01002012 if (reply == 0)
2013 reply = do_write(common);
2014 break;
2015
2016 case SC_WRITE_12:
2017 common->data_size_from_cmnd =
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00002018 get_unaligned_be32(&common->cmnd[6]);
2019 reply = check_command_size_in_blocks(common, 12, DATA_DIR_FROM_HOST,
2020 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2021 "WRITE(12)");
Piotr Wilczek91637d72013-03-05 12:10:16 +01002022 if (reply == 0)
2023 reply = do_write(common);
2024 break;
2025
2026 /* Some mandatory commands that we recognize but don't implement.
2027 * They don't mean much in this setting. It's left as an exercise
2028 * for anyone interested to implement RESERVE and RELEASE in terms
2029 * of Posix locks. */
2030 case SC_FORMAT_UNIT:
2031 case SC_RELEASE:
2032 case SC_RESERVE:
2033 case SC_SEND_DIAGNOSTIC:
2034 /* Fall through */
2035
2036 default:
2037unknown_cmnd:
2038 common->data_size_from_cmnd = 0;
2039 sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
2040 reply = check_command(common, common->cmnd_size,
2041 DATA_DIR_UNKNOWN, 0xff, 0, unknown);
2042 if (reply == 0) {
2043 curlun->sense_data = SS_INVALID_COMMAND;
2044 reply = -EINVAL;
2045 }
2046 break;
2047 }
2048 up_read(&common->filesem);
2049
2050 if (reply == -EINTR)
2051 return -EINTR;
2052
2053 /* Set up the single reply buffer for finish_reply() */
2054 if (reply == -EINVAL)
2055 reply = 0; /* Error reply length */
2056 if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) {
2057 reply = min((u32) reply, common->data_size_from_cmnd);
2058 bh->inreq->length = reply;
2059 bh->state = BUF_STATE_FULL;
2060 common->residue -= reply;
2061 } /* Otherwise it's already set */
2062
2063 return 0;
2064}
2065
2066/*-------------------------------------------------------------------------*/
2067
2068static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2069{
2070 struct usb_request *req = bh->outreq;
2071 struct fsg_bulk_cb_wrap *cbw = req->buf;
2072 struct fsg_common *common = fsg->common;
2073
2074 /* Was this a real packet? Should it be ignored? */
2075 if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
2076 return -EINVAL;
2077
2078 /* Is the CBW valid? */
2079 if (req->actual != USB_BULK_CB_WRAP_LEN ||
2080 cbw->Signature != cpu_to_le32(
2081 USB_BULK_CB_SIG)) {
2082 DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
2083 req->actual,
2084 le32_to_cpu(cbw->Signature));
2085
2086 /* The Bulk-only spec says we MUST stall the IN endpoint
2087 * (6.6.1), so it's unavoidable. It also says we must
2088 * retain this state until the next reset, but there's
2089 * no way to tell the controller driver it should ignore
2090 * Clear-Feature(HALT) requests.
2091 *
2092 * We aren't required to halt the OUT endpoint; instead
2093 * we can simply accept and discard any data received
2094 * until the next reset. */
2095 wedge_bulk_in_endpoint(fsg);
Bryan O'Donoghue56312512018-04-30 15:56:09 +01002096 generic_set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
Piotr Wilczek91637d72013-03-05 12:10:16 +01002097 return -EINVAL;
2098 }
2099
2100 /* Is the CBW meaningful? */
2101 if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
2102 cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
2103 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2104 "cmdlen %u\n",
2105 cbw->Lun, cbw->Flags, cbw->Length);
2106
2107 /* We can do anything we want here, so let's stall the
2108 * bulk pipes if we are allowed to. */
2109 if (common->can_stall) {
2110 fsg_set_halt(fsg, fsg->bulk_out);
2111 halt_bulk_in_endpoint(fsg);
2112 }
2113 return -EINVAL;
2114 }
2115
2116 /* Save the command for later */
2117 common->cmnd_size = cbw->Length;
2118 memcpy(common->cmnd, cbw->CDB, common->cmnd_size);
2119 if (cbw->Flags & USB_BULK_IN_FLAG)
2120 common->data_dir = DATA_DIR_TO_HOST;
2121 else
2122 common->data_dir = DATA_DIR_FROM_HOST;
2123 common->data_size = le32_to_cpu(cbw->DataTransferLength);
2124 if (common->data_size == 0)
2125 common->data_dir = DATA_DIR_NONE;
2126 common->lun = cbw->Lun;
2127 common->tag = cbw->Tag;
2128 return 0;
2129}
2130
2131
2132static int get_next_command(struct fsg_common *common)
2133{
2134 struct fsg_buffhd *bh;
2135 int rc = 0;
2136
2137 /* Wait for the next buffer to become available */
2138 bh = common->next_buffhd_to_fill;
2139 while (bh->state != BUF_STATE_EMPTY) {
2140 rc = sleep_thread(common);
2141 if (rc)
2142 return rc;
2143 }
2144
2145 /* Queue a request to read a Bulk-only CBW */
2146 set_bulk_out_req_length(common, bh, USB_BULK_CB_WRAP_LEN);
2147 bh->outreq->short_not_ok = 1;
2148 START_TRANSFER_OR(common, bulk_out, bh->outreq,
2149 &bh->outreq_busy, &bh->state)
2150 /* Don't know what to do if common->fsg is NULL */
2151 return -EIO;
2152
2153 /* We will drain the buffer in software, which means we
2154 * can reuse it for the next filling. No need to advance
2155 * next_buffhd_to_fill. */
2156
2157 /* Wait for the CBW to arrive */
2158 while (bh->state != BUF_STATE_FULL) {
2159 rc = sleep_thread(common);
2160 if (rc)
2161 return rc;
2162 }
2163
2164 rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO;
2165 bh->state = BUF_STATE_EMPTY;
2166
2167 return rc;
2168}
2169
2170
2171/*-------------------------------------------------------------------------*/
2172
2173static int enable_endpoint(struct fsg_common *common, struct usb_ep *ep,
2174 const struct usb_endpoint_descriptor *d)
2175{
2176 int rc;
2177
2178 ep->driver_data = common;
2179 rc = usb_ep_enable(ep, d);
2180 if (rc)
2181 ERROR(common, "can't enable %s, result %d\n", ep->name, rc);
2182 return rc;
2183}
2184
2185static int alloc_request(struct fsg_common *common, struct usb_ep *ep,
2186 struct usb_request **preq)
2187{
2188 *preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
2189 if (*preq)
2190 return 0;
2191 ERROR(common, "can't allocate request for %s\n", ep->name);
2192 return -ENOMEM;
2193}
2194
2195/* Reset interface setting and re-init endpoint state (toggle etc). */
2196static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg)
2197{
2198 const struct usb_endpoint_descriptor *d;
2199 struct fsg_dev *fsg;
2200 int i, rc = 0;
2201
2202 if (common->running)
2203 DBG(common, "reset interface\n");
2204
2205reset:
2206 /* Deallocate the requests */
2207 if (common->fsg) {
2208 fsg = common->fsg;
2209
2210 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2211 struct fsg_buffhd *bh = &common->buffhds[i];
2212
2213 if (bh->inreq) {
2214 usb_ep_free_request(fsg->bulk_in, bh->inreq);
2215 bh->inreq = NULL;
2216 }
2217 if (bh->outreq) {
2218 usb_ep_free_request(fsg->bulk_out, bh->outreq);
2219 bh->outreq = NULL;
2220 }
2221 }
2222
2223 /* Disable the endpoints */
2224 if (fsg->bulk_in_enabled) {
2225 usb_ep_disable(fsg->bulk_in);
2226 fsg->bulk_in_enabled = 0;
2227 }
2228 if (fsg->bulk_out_enabled) {
2229 usb_ep_disable(fsg->bulk_out);
2230 fsg->bulk_out_enabled = 0;
2231 }
2232
2233 common->fsg = NULL;
2234 /* wake_up(&common->fsg_wait); */
2235 }
2236
2237 common->running = 0;
2238 if (!new_fsg || rc)
2239 return rc;
2240
2241 common->fsg = new_fsg;
2242 fsg = common->fsg;
2243
2244 /* Enable the endpoints */
2245 d = fsg_ep_desc(common->gadget,
2246 &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc);
2247 rc = enable_endpoint(common, fsg->bulk_in, d);
2248 if (rc)
2249 goto reset;
2250 fsg->bulk_in_enabled = 1;
2251
2252 d = fsg_ep_desc(common->gadget,
2253 &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc);
2254 rc = enable_endpoint(common, fsg->bulk_out, d);
2255 if (rc)
2256 goto reset;
2257 fsg->bulk_out_enabled = 1;
Vivek Gautam1d62db82013-05-13 15:53:38 +05302258 common->bulk_out_maxpacket =
2259 le16_to_cpu(get_unaligned(&d->wMaxPacketSize));
Bryan O'Donoghue56312512018-04-30 15:56:09 +01002260 generic_clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
Piotr Wilczek91637d72013-03-05 12:10:16 +01002261
2262 /* Allocate the requests */
2263 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2264 struct fsg_buffhd *bh = &common->buffhds[i];
2265
2266 rc = alloc_request(common, fsg->bulk_in, &bh->inreq);
2267 if (rc)
2268 goto reset;
2269 rc = alloc_request(common, fsg->bulk_out, &bh->outreq);
2270 if (rc)
2271 goto reset;
2272 bh->inreq->buf = bh->outreq->buf = bh->buf;
2273 bh->inreq->context = bh->outreq->context = bh;
2274 bh->inreq->complete = bulk_in_complete;
2275 bh->outreq->complete = bulk_out_complete;
2276 }
2277
2278 common->running = 1;
2279
2280 return rc;
2281}
2282
2283
2284/****************************** ALT CONFIGS ******************************/
2285
2286
2287static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
2288{
2289 struct fsg_dev *fsg = fsg_from_func(f);
2290 fsg->common->new_fsg = fsg;
2291 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2292 return 0;
2293}
2294
2295static void fsg_disable(struct usb_function *f)
2296{
2297 struct fsg_dev *fsg = fsg_from_func(f);
2298 fsg->common->new_fsg = NULL;
2299 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2300}
2301
2302/*-------------------------------------------------------------------------*/
2303
2304static void handle_exception(struct fsg_common *common)
2305{
2306 int i;
2307 struct fsg_buffhd *bh;
2308 enum fsg_state old_state;
2309 struct fsg_lun *curlun;
2310 unsigned int exception_req_tag;
2311
2312 /* Cancel all the pending transfers */
2313 if (common->fsg) {
2314 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2315 bh = &common->buffhds[i];
2316 if (bh->inreq_busy)
2317 usb_ep_dequeue(common->fsg->bulk_in, bh->inreq);
2318 if (bh->outreq_busy)
2319 usb_ep_dequeue(common->fsg->bulk_out,
2320 bh->outreq);
2321 }
2322
2323 /* Wait until everything is idle */
2324 for (;;) {
2325 int num_active = 0;
2326 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2327 bh = &common->buffhds[i];
2328 num_active += bh->inreq_busy + bh->outreq_busy;
2329 }
2330 if (num_active == 0)
2331 break;
2332 if (sleep_thread(common))
2333 return;
2334 }
2335
2336 /* Clear out the controller's fifos */
2337 if (common->fsg->bulk_in_enabled)
2338 usb_ep_fifo_flush(common->fsg->bulk_in);
2339 if (common->fsg->bulk_out_enabled)
2340 usb_ep_fifo_flush(common->fsg->bulk_out);
2341 }
2342
2343 /* Reset the I/O buffer states and pointers, the SCSI
2344 * state, and the exception. Then invoke the handler. */
2345
2346 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2347 bh = &common->buffhds[i];
2348 bh->state = BUF_STATE_EMPTY;
2349 }
2350 common->next_buffhd_to_fill = &common->buffhds[0];
2351 common->next_buffhd_to_drain = &common->buffhds[0];
2352 exception_req_tag = common->exception_req_tag;
2353 old_state = common->state;
2354
2355 if (old_state == FSG_STATE_ABORT_BULK_OUT)
2356 common->state = FSG_STATE_STATUS_PHASE;
2357 else {
2358 for (i = 0; i < common->nluns; ++i) {
2359 curlun = &common->luns[i];
2360 curlun->sense_data = SS_NO_SENSE;
2361 curlun->info_valid = 0;
2362 }
2363 common->state = FSG_STATE_IDLE;
2364 }
2365
2366 /* Carry out any extra actions required for the exception */
2367 switch (old_state) {
2368 case FSG_STATE_ABORT_BULK_OUT:
2369 send_status(common);
2370
2371 if (common->state == FSG_STATE_STATUS_PHASE)
2372 common->state = FSG_STATE_IDLE;
2373 break;
2374
2375 case FSG_STATE_RESET:
2376 /* In case we were forced against our will to halt a
2377 * bulk endpoint, clear the halt now. (The SuperH UDC
2378 * requires this.) */
2379 if (!fsg_is_set(common))
2380 break;
2381 if (test_and_clear_bit(IGNORE_BULK_OUT,
2382 &common->fsg->atomic_bitflags))
2383 usb_ep_clear_halt(common->fsg->bulk_in);
2384
2385 if (common->ep0_req_tag == exception_req_tag)
2386 ep0_queue(common); /* Complete the status stage */
2387
2388 break;
2389
2390 case FSG_STATE_CONFIG_CHANGE:
2391 do_set_interface(common, common->new_fsg);
2392 break;
2393
2394 case FSG_STATE_EXIT:
2395 case FSG_STATE_TERMINATED:
2396 do_set_interface(common, NULL); /* Free resources */
2397 common->state = FSG_STATE_TERMINATED; /* Stop the thread */
2398 break;
2399
2400 case FSG_STATE_INTERFACE_CHANGE:
2401 case FSG_STATE_DISCONNECT:
2402 case FSG_STATE_COMMAND_PHASE:
2403 case FSG_STATE_DATA_PHASE:
2404 case FSG_STATE_STATUS_PHASE:
2405 case FSG_STATE_IDLE:
2406 break;
2407 }
2408}
2409
2410/*-------------------------------------------------------------------------*/
2411
2412int fsg_main_thread(void *common_)
2413{
Przemyslaw Marczak06ef7cc2013-10-23 14:30:46 +02002414 int ret;
Piotr Wilczek91637d72013-03-05 12:10:16 +01002415 struct fsg_common *common = the_fsg_common;
2416 /* The main loop */
2417 do {
2418 if (exception_in_progress(common)) {
2419 handle_exception(common);
2420 continue;
2421 }
2422
2423 if (!common->running) {
Przemyslaw Marczak06ef7cc2013-10-23 14:30:46 +02002424 ret = sleep_thread(common);
2425 if (ret)
2426 return ret;
2427
Piotr Wilczek91637d72013-03-05 12:10:16 +01002428 continue;
2429 }
2430
Przemyslaw Marczak06ef7cc2013-10-23 14:30:46 +02002431 ret = get_next_command(common);
2432 if (ret)
2433 return ret;
Piotr Wilczek91637d72013-03-05 12:10:16 +01002434
2435 if (!exception_in_progress(common))
2436 common->state = FSG_STATE_DATA_PHASE;
2437
2438 if (do_scsi_command(common) || finish_reply(common))
2439 continue;
2440
2441 if (!exception_in_progress(common))
2442 common->state = FSG_STATE_STATUS_PHASE;
2443
2444 if (send_status(common))
2445 continue;
2446
2447 if (!exception_in_progress(common))
2448 common->state = FSG_STATE_IDLE;
2449 } while (0);
2450
2451 common->thread_task = NULL;
2452
2453 return 0;
2454}
2455
2456static void fsg_common_release(struct kref *ref);
2457
2458static struct fsg_common *fsg_common_init(struct fsg_common *common,
2459 struct usb_composite_dev *cdev)
2460{
2461 struct usb_gadget *gadget = cdev->gadget;
2462 struct fsg_buffhd *bh;
2463 struct fsg_lun *curlun;
2464 int nluns, i, rc;
2465
2466 /* Find out how many LUNs there should be */
Stephen Warren9e7d5882015-12-07 11:38:50 -07002467 nluns = ums_count;
Piotr Wilczek91637d72013-03-05 12:10:16 +01002468 if (nluns < 1 || nluns > FSG_MAX_LUNS) {
2469 printf("invalid number of LUNs: %u\n", nluns);
2470 return ERR_PTR(-EINVAL);
2471 }
2472
2473 /* Allocate? */
2474 if (!common) {
Jeroen Hofstee6931bff2014-06-09 15:28:59 +02002475 common = calloc(sizeof(*common), 1);
Piotr Wilczek91637d72013-03-05 12:10:16 +01002476 if (!common)
2477 return ERR_PTR(-ENOMEM);
2478 common->free_storage_on_release = 1;
2479 } else {
Jeroen Hofstee6931bff2014-06-09 15:28:59 +02002480 memset(common, 0, sizeof(*common));
Piotr Wilczek91637d72013-03-05 12:10:16 +01002481 common->free_storage_on_release = 0;
2482 }
2483
2484 common->ops = NULL;
2485 common->private_data = NULL;
2486
2487 common->gadget = gadget;
2488 common->ep0 = gadget->ep0;
2489 common->ep0req = cdev->req;
2490
2491 /* Maybe allocate device-global string IDs, and patch descriptors */
2492 if (fsg_strings[FSG_STRING_INTERFACE].id == 0) {
2493 rc = usb_string_id(cdev);
2494 if (unlikely(rc < 0))
2495 goto error_release;
2496 fsg_strings[FSG_STRING_INTERFACE].id = rc;
2497 fsg_intf_desc.iInterface = rc;
2498 }
2499
2500 /* Create the LUNs, open their backing files, and register the
2501 * LUN devices in sysfs. */
2502 curlun = calloc(nluns, sizeof *curlun);
2503 if (!curlun) {
2504 rc = -ENOMEM;
2505 goto error_release;
2506 }
2507 common->nluns = nluns;
2508
2509 for (i = 0; i < nluns; i++) {
2510 common->luns[i].removable = 1;
2511
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00002512 rc = fsg_lun_open(&common->luns[i], ums[i].num_sectors, ums->block_dev.blksz, "");
Piotr Wilczek91637d72013-03-05 12:10:16 +01002513 if (rc)
2514 goto error_luns;
2515 }
2516 common->lun = 0;
2517
2518 /* Data buffers cyclic list */
2519 bh = common->buffhds;
2520
2521 i = FSG_NUM_BUFFERS;
2522 goto buffhds_first_it;
2523 do {
2524 bh->next = bh + 1;
2525 ++bh;
2526buffhds_first_it:
2527 bh->inreq_busy = 0;
2528 bh->outreq_busy = 0;
Lukasz Majewski05751132014-02-05 10:10:41 +01002529 bh->buf = memalign(CONFIG_SYS_CACHELINE_SIZE, FSG_BUFLEN);
Piotr Wilczek91637d72013-03-05 12:10:16 +01002530 if (unlikely(!bh->buf)) {
2531 rc = -ENOMEM;
2532 goto error_release;
2533 }
2534 } while (--i);
2535 bh->next = common->buffhds;
2536
2537 snprintf(common->inquiry_string, sizeof common->inquiry_string,
2538 "%-8s%-16s%04x",
2539 "Linux ",
2540 "File-Store Gadget",
2541 0xffff);
2542
2543 /* Some peripheral controllers are known not to be able to
2544 * halt bulk endpoints correctly. If one of them is present,
2545 * disable stalls.
2546 */
2547
2548 /* Tell the thread to start working */
2549 common->thread_task =
2550 kthread_create(fsg_main_thread, common,
2551 OR(cfg->thread_name, "file-storage"));
2552 if (IS_ERR(common->thread_task)) {
2553 rc = PTR_ERR(common->thread_task);
2554 goto error_release;
2555 }
2556
2557#undef OR
2558 /* Information */
2559 INFO(common, FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
2560 INFO(common, "Number of LUNs=%d\n", common->nluns);
2561
2562 return common;
2563
2564error_luns:
2565 common->nluns = i + 1;
2566error_release:
2567 common->state = FSG_STATE_TERMINATED; /* The thread is dead */
2568 /* Call fsg_common_release() directly, ref might be not
2569 * initialised */
2570 fsg_common_release(&common->ref);
2571 return ERR_PTR(rc);
2572}
2573
2574static void fsg_common_release(struct kref *ref)
2575{
2576 struct fsg_common *common = container_of(ref, struct fsg_common, ref);
2577
2578 /* If the thread isn't already dead, tell it to exit now */
2579 if (common->state != FSG_STATE_TERMINATED) {
2580 raise_exception(common, FSG_STATE_EXIT);
2581 wait_for_completion(&common->thread_notifier);
2582 }
2583
2584 if (likely(common->luns)) {
2585 struct fsg_lun *lun = common->luns;
2586 unsigned i = common->nluns;
2587
2588 /* In error recovery common->nluns may be zero. */
2589 for (; i; --i, ++lun)
2590 fsg_lun_close(lun);
2591
2592 kfree(common->luns);
2593 }
2594
2595 {
2596 struct fsg_buffhd *bh = common->buffhds;
2597 unsigned i = FSG_NUM_BUFFERS;
2598 do {
2599 kfree(bh->buf);
2600 } while (++bh, --i);
2601 }
2602
2603 if (common->free_storage_on_release)
2604 kfree(common);
2605}
2606
2607
2608/*-------------------------------------------------------------------------*/
2609
2610/**
2611 * usb_copy_descriptors - copy a vector of USB descriptors
2612 * @src: null-terminated vector to copy
2613 * Context: initialization code, which may sleep
2614 *
2615 * This makes a copy of a vector of USB descriptors. Its primary use
2616 * is to support usb_function objects which can have multiple copies,
2617 * each needing different descriptors. Functions may have static
2618 * tables of descriptors, which are used as templates and customized
2619 * with identifiers (for interfaces, strings, endpoints, and more)
2620 * as needed by a given function instance.
2621 */
2622struct usb_descriptor_header **
2623usb_copy_descriptors(struct usb_descriptor_header **src)
2624{
2625 struct usb_descriptor_header **tmp;
2626 unsigned bytes;
2627 unsigned n_desc;
2628 void *mem;
2629 struct usb_descriptor_header **ret;
2630
2631 /* count descriptors and their sizes; then add vector size */
2632 for (bytes = 0, n_desc = 0, tmp = src; *tmp; tmp++, n_desc++)
2633 bytes += (*tmp)->bLength;
2634 bytes += (n_desc + 1) * sizeof(*tmp);
2635
Lukasz Majewski05751132014-02-05 10:10:41 +01002636 mem = memalign(CONFIG_SYS_CACHELINE_SIZE, bytes);
Piotr Wilczek91637d72013-03-05 12:10:16 +01002637 if (!mem)
2638 return NULL;
2639
2640 /* fill in pointers starting at "tmp",
2641 * to descriptors copied starting at "mem";
2642 * and return "ret"
2643 */
2644 tmp = mem;
2645 ret = mem;
2646 mem += (n_desc + 1) * sizeof(*tmp);
2647 while (*src) {
2648 memcpy(mem, *src, (*src)->bLength);
2649 *tmp = mem;
2650 tmp++;
2651 mem += (*src)->bLength;
2652 src++;
2653 }
2654 *tmp = NULL;
2655
2656 return ret;
2657}
2658
Piotr Wilczek91637d72013-03-05 12:10:16 +01002659static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
2660{
2661 struct fsg_dev *fsg = fsg_from_func(f);
2662
2663 DBG(fsg, "unbind\n");
2664 if (fsg->common->fsg == fsg) {
2665 fsg->common->new_fsg = NULL;
2666 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2667 }
2668
2669 free(fsg->function.descriptors);
2670 free(fsg->function.hs_descriptors);
2671 kfree(fsg);
2672}
2673
2674static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
2675{
2676 struct fsg_dev *fsg = fsg_from_func(f);
2677 struct usb_gadget *gadget = c->cdev->gadget;
2678 int i;
2679 struct usb_ep *ep;
2680 fsg->gadget = gadget;
2681
2682 /* New interface */
2683 i = usb_interface_id(c, f);
2684 if (i < 0)
2685 return i;
2686 fsg_intf_desc.bInterfaceNumber = i;
2687 fsg->interface_number = i;
2688
2689 /* Find all the endpoints we will use */
2690 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
2691 if (!ep)
2692 goto autoconf_fail;
2693 ep->driver_data = fsg->common; /* claim the endpoint */
2694 fsg->bulk_in = ep;
2695
2696 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
2697 if (!ep)
2698 goto autoconf_fail;
2699 ep->driver_data = fsg->common; /* claim the endpoint */
2700 fsg->bulk_out = ep;
2701
2702 /* Copy descriptors */
2703 f->descriptors = usb_copy_descriptors(fsg_fs_function);
2704 if (unlikely(!f->descriptors))
2705 return -ENOMEM;
2706
2707 if (gadget_is_dualspeed(gadget)) {
2708 /* Assume endpoint addresses are the same for both speeds */
2709 fsg_hs_bulk_in_desc.bEndpointAddress =
2710 fsg_fs_bulk_in_desc.bEndpointAddress;
2711 fsg_hs_bulk_out_desc.bEndpointAddress =
2712 fsg_fs_bulk_out_desc.bEndpointAddress;
2713 f->hs_descriptors = usb_copy_descriptors(fsg_hs_function);
2714 if (unlikely(!f->hs_descriptors)) {
2715 free(f->descriptors);
2716 return -ENOMEM;
2717 }
2718 }
2719 return 0;
2720
2721autoconf_fail:
2722 ERROR(fsg, "unable to autoconfigure all endpoints\n");
2723 return -ENOTSUPP;
2724}
2725
2726
2727/****************************** ADD FUNCTION ******************************/
2728
2729static struct usb_gadget_strings *fsg_strings_array[] = {
2730 &fsg_stringtab,
2731 NULL,
2732};
2733
2734static int fsg_bind_config(struct usb_composite_dev *cdev,
2735 struct usb_configuration *c,
2736 struct fsg_common *common)
2737{
2738 struct fsg_dev *fsg;
2739 int rc;
2740
2741 fsg = calloc(1, sizeof *fsg);
2742 if (!fsg)
2743 return -ENOMEM;
2744 fsg->function.name = FSG_DRIVER_DESC;
2745 fsg->function.strings = fsg_strings_array;
2746 fsg->function.bind = fsg_bind;
2747 fsg->function.unbind = fsg_unbind;
2748 fsg->function.setup = fsg_setup;
2749 fsg->function.set_alt = fsg_set_alt;
2750 fsg->function.disable = fsg_disable;
2751
2752 fsg->common = common;
2753 common->fsg = fsg;
2754 /* Our caller holds a reference to common structure so we
2755 * don't have to be worry about it being freed until we return
2756 * from this function. So instead of incrementing counter now
2757 * and decrement in error recovery we increment it only when
2758 * call to usb_add_function() was successful. */
2759
2760 rc = usb_add_function(c, &fsg->function);
2761
2762 if (rc)
2763 kfree(fsg);
2764
2765 return rc;
2766}
2767
2768int fsg_add(struct usb_configuration *c)
2769{
2770 struct fsg_common *fsg_common;
2771
2772 fsg_common = fsg_common_init(NULL, c->cdev);
2773
2774 fsg_common->vendor_name = 0;
2775 fsg_common->product_name = 0;
2776 fsg_common->release = 0xffff;
2777
2778 fsg_common->ops = NULL;
2779 fsg_common->private_data = NULL;
2780
2781 the_fsg_common = fsg_common;
2782
2783 return fsg_bind_config(c->cdev, c, fsg_common);
2784}
2785
Marek Vasut7786e702023-09-01 11:49:54 +02002786int fsg_init(struct ums *ums_devs, int count, struct udevice *udc)
Piotr Wilczek91637d72013-03-05 12:10:16 +01002787{
Stephen Warren9e7d5882015-12-07 11:38:50 -07002788 ums = ums_devs;
2789 ums_count = count;
Marek Vasut7786e702023-09-01 11:49:54 +02002790 udcdev = udc;
Piotr Wilczek91637d72013-03-05 12:10:16 +01002791
2792 return 0;
2793}
Mateusz Zalega69cb0bb2014-04-28 21:13:28 +02002794
2795DECLARE_GADGET_BIND_CALLBACK(usb_dnl_ums, fsg_add);