blob: ffe1ae6eb737be8a1ac66ae7a81c94cc2d87cb3b [file] [log] [blame]
Tom Rini8b0c8a12018-05-06 18:27:01 -04001// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
Piotr Wilczek91637d72013-03-05 12:10:16 +01002/*
3 * f_mass_storage.c -- Mass Storage USB Composite Function
4 *
5 * Copyright (C) 2003-2008 Alan Stern
6 * Copyright (C) 2009 Samsung Electronics
7 * Author: Michal Nazarewicz <m.nazarewicz@samsung.com>
8 * All rights reserved.
Piotr Wilczek91637d72013-03-05 12:10:16 +01009 */
10
Piotr Wilczek91637d72013-03-05 12:10:16 +010011/*
12 * The Mass Storage Function acts as a USB Mass Storage device,
13 * appearing to the host as a disk drive or as a CD-ROM drive. In
14 * addition to providing an example of a genuinely useful composite
15 * function for a USB device, it also illustrates a technique of
16 * double-buffering for increased throughput.
17 *
18 * Function supports multiple logical units (LUNs). Backing storage
19 * for each LUN is provided by a regular file or a block device.
20 * Access for each LUN can be limited to read-only. Moreover, the
21 * function can indicate that LUN is removable and/or CD-ROM. (The
22 * later implies read-only access.)
23 *
24 * MSF is configured by specifying a fsg_config structure. It has the
25 * following fields:
26 *
27 * nluns Number of LUNs function have (anywhere from 1
28 * to FSG_MAX_LUNS which is 8).
29 * luns An array of LUN configuration values. This
30 * should be filled for each LUN that
31 * function will include (ie. for "nluns"
32 * LUNs). Each element of the array has
33 * the following fields:
34 * ->filename The path to the backing file for the LUN.
35 * Required if LUN is not marked as
36 * removable.
37 * ->ro Flag specifying access to the LUN shall be
38 * read-only. This is implied if CD-ROM
39 * emulation is enabled as well as when
40 * it was impossible to open "filename"
41 * in R/W mode.
42 * ->removable Flag specifying that LUN shall be indicated as
43 * being removable.
44 * ->cdrom Flag specifying that LUN shall be reported as
45 * being a CD-ROM.
46 *
47 * lun_name_format A printf-like format for names of the LUN
48 * devices. This determines how the
49 * directory in sysfs will be named.
50 * Unless you are using several MSFs in
51 * a single gadget (as opposed to single
52 * MSF in many configurations) you may
53 * leave it as NULL (in which case
54 * "lun%d" will be used). In the format
55 * you can use "%d" to index LUNs for
56 * MSF's with more than one LUN. (Beware
57 * that there is only one integer given
58 * as an argument for the format and
59 * specifying invalid format may cause
60 * unspecified behaviour.)
61 * thread_name Name of the kernel thread process used by the
62 * MSF. You can safely set it to NULL
63 * (in which case default "file-storage"
64 * will be used).
65 *
66 * vendor_name
67 * product_name
68 * release Information used as a reply to INQUIRY
69 * request. To use default set to NULL,
70 * NULL, 0xffff respectively. The first
71 * field should be 8 and the second 16
72 * characters or less.
73 *
74 * can_stall Set to permit function to halt bulk endpoints.
75 * Disabled on some USB devices known not
76 * to work correctly. You should set it
77 * to true.
78 *
79 * If "removable" is not set for a LUN then a backing file must be
80 * specified. If it is set, then NULL filename means the LUN's medium
81 * is not loaded (an empty string as "filename" in the fsg_config
82 * structure causes error). The CD-ROM emulation includes a single
83 * data track and no audio tracks; hence there need be only one
84 * backing file per LUN. Note also that the CD-ROM block length is
85 * set to 512 rather than the more common value 2048.
86 *
87 *
88 * MSF includes support for module parameters. If gadget using it
89 * decides to use it, the following module parameters will be
90 * available:
91 *
92 * file=filename[,filename...]
93 * Names of the files or block devices used for
94 * backing storage.
95 * ro=b[,b...] Default false, boolean for read-only access.
96 * removable=b[,b...]
97 * Default true, boolean for removable media.
98 * cdrom=b[,b...] Default false, boolean for whether to emulate
99 * a CD-ROM drive.
100 * luns=N Default N = number of filenames, number of
101 * LUNs to support.
102 * stall Default determined according to the type of
103 * USB device controller (usually true),
104 * boolean to permit the driver to halt
105 * bulk endpoints.
106 *
107 * The module parameters may be prefixed with some string. You need
108 * to consult gadget's documentation or source to verify whether it is
109 * using those module parameters and if it does what are the prefixes
110 * (look for FSG_MODULE_PARAMETERS() macro usage, what's inside it is
111 * the prefix).
112 *
113 *
114 * Requirements are modest; only a bulk-in and a bulk-out endpoint are
115 * needed. The memory requirement amounts to two 16K buffers, size
116 * configurable by a parameter. Support is included for both
117 * full-speed and high-speed operation.
118 *
119 * Note that the driver is slightly non-portable in that it assumes a
120 * single memory/DMA buffer will be useable for bulk-in, bulk-out, and
121 * interrupt-in endpoints. With most device controllers this isn't an
122 * issue, but there may be some with hardware restrictions that prevent
123 * a buffer from being used by more than one endpoint.
124 *
125 *
126 * The pathnames of the backing files and the ro settings are
127 * available in the attribute files "file" and "ro" in the lun<n> (or
128 * to be more precise in a directory which name comes from
129 * "lun_name_format" option!) subdirectory of the gadget's sysfs
130 * directory. If the "removable" option is set, writing to these
131 * files will simulate ejecting/loading the medium (writing an empty
132 * line means eject) and adjusting a write-enable tab. Changes to the
133 * ro setting are not allowed when the medium is loaded or if CD-ROM
134 * emulation is being used.
135 *
136 * When a LUN receive an "eject" SCSI request (Start/Stop Unit),
137 * if the LUN is removable, the backing file is released to simulate
138 * ejection.
139 *
140 *
141 * This function is heavily based on "File-backed Storage Gadget" by
142 * Alan Stern which in turn is heavily based on "Gadget Zero" by David
143 * Brownell. The driver's SCSI command interface was based on the
144 * "Information technology - Small Computer System Interface - 2"
145 * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93,
146 * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>.
147 * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which
148 * was based on the "Universal Serial Bus Mass Storage Class UFI
149 * Command Specification" document, Revision 1.0, December 14, 1998,
150 * available at
151 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
152 */
153
Piotr Wilczek91637d72013-03-05 12:10:16 +0100154/*
155 * Driver Design
156 *
157 * The MSF is fairly straightforward. There is a main kernel
158 * thread that handles most of the work. Interrupt routines field
159 * callbacks from the controller driver: bulk- and interrupt-request
160 * completion notifications, endpoint-0 events, and disconnect events.
161 * Completion events are passed to the main thread by wakeup calls. Many
162 * ep0 requests are handled at interrupt time, but SetInterface,
163 * SetConfiguration, and device reset requests are forwarded to the
164 * thread in the form of "exceptions" using SIGUSR1 signals (since they
165 * should interrupt any ongoing file I/O operations).
166 *
167 * The thread's main routine implements the standard command/data/status
168 * parts of a SCSI interaction. It and its subroutines are full of tests
169 * for pending signals/exceptions -- all this polling is necessary since
170 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an
171 * indication that the driver really wants to be running in userspace.)
172 * An important point is that so long as the thread is alive it keeps an
173 * open reference to the backing file. This will prevent unmounting
174 * the backing file's underlying filesystem and could cause problems
175 * during system shutdown, for example. To prevent such problems, the
176 * thread catches INT, TERM, and KILL signals and converts them into
177 * an EXIT exception.
178 *
179 * In normal operation the main thread is started during the gadget's
180 * fsg_bind() callback and stopped during fsg_unbind(). But it can
181 * also exit when it receives a signal, and there's no point leaving
182 * the gadget running when the thread is dead. At of this moment, MSF
183 * provides no way to deregister the gadget when thread dies -- maybe
184 * a callback functions is needed.
185 *
186 * To provide maximum throughput, the driver uses a circular pipeline of
187 * buffer heads (struct fsg_buffhd). In principle the pipeline can be
188 * arbitrarily long; in practice the benefits don't justify having more
189 * than 2 stages (i.e., double buffering). But it helps to think of the
190 * pipeline as being a long one. Each buffer head contains a bulk-in and
191 * a bulk-out request pointer (since the buffer can be used for both
192 * output and input -- directions always are given from the host's
193 * point of view) as well as a pointer to the buffer and various state
194 * variables.
195 *
196 * Use of the pipeline follows a simple protocol. There is a variable
197 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
198 * At any time that buffer head may still be in use from an earlier
199 * request, so each buffer head has a state variable indicating whether
200 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
201 * buffer head to be EMPTY, filling the buffer either by file I/O or by
202 * USB I/O (during which the buffer head is BUSY), and marking the buffer
203 * head FULL when the I/O is complete. Then the buffer will be emptied
204 * (again possibly by USB I/O, during which it is marked BUSY) and
205 * finally marked EMPTY again (possibly by a completion routine).
206 *
207 * A module parameter tells the driver to avoid stalling the bulk
208 * endpoints wherever the transport specification allows. This is
209 * necessary for some UDCs like the SuperH, which cannot reliably clear a
210 * halt on a bulk endpoint. However, under certain circumstances the
211 * Bulk-only specification requires a stall. In such cases the driver
212 * will halt the endpoint and set a flag indicating that it should clear
213 * the halt in software during the next device reset. Hopefully this
214 * will permit everything to work correctly. Furthermore, although the
215 * specification allows the bulk-out endpoint to halt when the host sends
216 * too much data, implementing this would cause an unavoidable race.
217 * The driver will always use the "no-stall" approach for OUT transfers.
218 *
219 * One subtle point concerns sending status-stage responses for ep0
220 * requests. Some of these requests, such as device reset, can involve
221 * interrupting an ongoing file I/O operation, which might take an
222 * arbitrarily long time. During that delay the host might give up on
223 * the original ep0 request and issue a new one. When that happens the
224 * driver should not notify the host about completion of the original
225 * request, as the host will no longer be waiting for it. So the driver
226 * assigns to each ep0 request a unique tag, and it keeps track of the
227 * tag value of the request associated with a long-running exception
228 * (device-reset, interface-change, or configuration-change). When the
229 * exception handler is finished, the status-stage response is submitted
230 * only if the current ep0 request tag is equal to the exception request
231 * tag. Thus only the most recently received ep0 request will get a
232 * status-stage response.
233 *
234 * Warning: This driver source file is too long. It ought to be split up
235 * into a header file plus about 3 separate .c files, to handle the details
236 * of the Gadget, USB Mass Storage, and SCSI protocols.
237 */
238
239/* #define VERBOSE_DEBUG */
240/* #define DUMP_MSGS */
241
242#include <config.h>
Caleb Connolly00e9e0c2024-03-20 14:30:50 +0000243#include <div64.h>
Alexey Brodkin2d2fa492018-06-05 17:17:57 +0300244#include <hexdump.h>
Simon Glass0f2af882020-05-10 11:40:05 -0600245#include <log.h>
Piotr Wilczek91637d72013-03-05 12:10:16 +0100246#include <malloc.h>
Simon Glassa73bda42015-11-08 23:47:45 -0700247#include <console.h>
Mateusz Zalega21fe3f72014-04-30 13:07:48 +0200248#include <g_dnl.h>
Simon Glassd66c5f72020-02-03 07:36:15 -0700249#include <dm/devres.h>
Simon Glassc06c1be2020-05-10 11:40:08 -0600250#include <linux/bug.h>
Piotr Wilczek91637d72013-03-05 12:10:16 +0100251
252#include <linux/err.h>
253#include <linux/usb/ch9.h>
254#include <linux/usb/gadget.h>
255#include <usb_mass_storage.h>
256
257#include <asm/unaligned.h>
Bryan O'Donoghue56312512018-04-30 15:56:09 +0100258#include <linux/bitops.h>
Piotr Wilczek91637d72013-03-05 12:10:16 +0100259#include <linux/usb/gadget.h>
260#include <linux/usb/gadget.h>
261#include <linux/usb/composite.h>
Lukasz Majewski80d353c2018-11-23 17:36:19 +0100262#include <linux/bitmap.h>
Mateusz Zalega69cb0bb2014-04-28 21:13:28 +0200263#include <g_dnl.h>
Piotr Wilczek91637d72013-03-05 12:10:16 +0100264
265/*------------------------------------------------------------------------*/
266
267#define FSG_DRIVER_DESC "Mass Storage Function"
268#define FSG_DRIVER_VERSION "2012/06/5"
269
270static const char fsg_string_interface[] = "Mass Storage";
271
Piotr Wilczek91637d72013-03-05 12:10:16 +0100272#define FSG_NO_INTR_EP 1
273#define FSG_NO_DEVICE_STRINGS 1
274#define FSG_NO_OTG 1
275#define FSG_NO_INTR_EP 1
276
277#include "storage_common.c"
278
279/*-------------------------------------------------------------------------*/
280
281#define GFP_ATOMIC ((gfp_t) 0)
282#define PAGE_CACHE_SHIFT 12
283#define PAGE_CACHE_SIZE (1 << PAGE_CACHE_SHIFT)
284#define kthread_create(...) __builtin_return_address(0)
285#define wait_for_completion(...) do {} while (0)
286
287struct kref {int x; };
288struct completion {int x; };
289
Piotr Wilczek91637d72013-03-05 12:10:16 +0100290struct fsg_dev;
291struct fsg_common;
292
293/* Data shared by all the FSG instances. */
294struct fsg_common {
295 struct usb_gadget *gadget;
296 struct fsg_dev *fsg, *new_fsg;
297
298 struct usb_ep *ep0; /* Copy of gadget->ep0 */
299 struct usb_request *ep0req; /* Copy of cdev->req */
300 unsigned int ep0_req_tag;
301
302 struct fsg_buffhd *next_buffhd_to_fill;
303 struct fsg_buffhd *next_buffhd_to_drain;
304 struct fsg_buffhd buffhds[FSG_NUM_BUFFERS];
305
306 int cmnd_size;
307 u8 cmnd[MAX_COMMAND_SIZE];
308
309 unsigned int nluns;
310 unsigned int lun;
311 struct fsg_lun luns[FSG_MAX_LUNS];
312
313 unsigned int bulk_out_maxpacket;
314 enum fsg_state state; /* For exception handling */
315 unsigned int exception_req_tag;
316
317 enum data_direction data_dir;
318 u32 data_size;
319 u32 data_size_from_cmnd;
320 u32 tag;
321 u32 residue;
322 u32 usb_amount_left;
323
324 unsigned int can_stall:1;
325 unsigned int free_storage_on_release:1;
326 unsigned int phase_error:1;
327 unsigned int short_packet_received:1;
328 unsigned int bad_lun_okay:1;
329 unsigned int running:1;
Marek Vasut5412e672023-11-07 01:09:59 +0100330 unsigned int eject:1;
Piotr Wilczek91637d72013-03-05 12:10:16 +0100331
332 int thread_wakeup_needed;
333 struct completion thread_notifier;
334 struct task_struct *thread_task;
335
336 /* Callback functions. */
337 const struct fsg_operations *ops;
338 /* Gadget's private data. */
339 void *private_data;
340
341 const char *vendor_name; /* 8 characters or less */
342 const char *product_name; /* 16 characters or less */
343 u16 release;
344
345 /* Vendor (8 chars), product (16 chars), release (4
346 * hexadecimal digits) and NUL byte */
347 char inquiry_string[8 + 16 + 4 + 1];
348
349 struct kref ref;
350};
351
352struct fsg_config {
353 unsigned nluns;
354 struct fsg_lun_config {
355 const char *filename;
356 char ro;
357 char removable;
358 char cdrom;
359 char nofua;
360 } luns[FSG_MAX_LUNS];
361
362 /* Callback functions. */
363 const struct fsg_operations *ops;
364 /* Gadget's private data. */
365 void *private_data;
366
367 const char *vendor_name; /* 8 characters or less */
368 const char *product_name; /* 16 characters or less */
369
370 char can_stall;
371};
372
373struct fsg_dev {
374 struct usb_function function;
375 struct usb_gadget *gadget; /* Copy of cdev->gadget */
376 struct fsg_common *common;
377
378 u16 interface_number;
379
380 unsigned int bulk_in_enabled:1;
381 unsigned int bulk_out_enabled:1;
382
383 unsigned long atomic_bitflags;
384#define IGNORE_BULK_OUT 0
385
386 struct usb_ep *bulk_in;
387 struct usb_ep *bulk_out;
388};
389
Piotr Wilczek91637d72013-03-05 12:10:16 +0100390static inline int __fsg_is_set(struct fsg_common *common,
391 const char *func, unsigned line)
392{
393 if (common->fsg)
394 return 1;
395 ERROR(common, "common->fsg is NULL in %s at %u\n", func, line);
Simon Glassb18a9602019-12-29 21:19:12 -0700396#ifdef __UBOOT__
397 assert_noisy(false);
398#else
Piotr Wilczek91637d72013-03-05 12:10:16 +0100399 WARN_ON(1);
Simon Glassb18a9602019-12-29 21:19:12 -0700400#endif
Piotr Wilczek91637d72013-03-05 12:10:16 +0100401 return 0;
402}
403
404#define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__))
405
Piotr Wilczek91637d72013-03-05 12:10:16 +0100406static inline struct fsg_dev *fsg_from_func(struct usb_function *f)
407{
408 return container_of(f, struct fsg_dev, function);
409}
410
Piotr Wilczek91637d72013-03-05 12:10:16 +0100411typedef void (*fsg_routine_t)(struct fsg_dev *);
412
413static int exception_in_progress(struct fsg_common *common)
414{
415 return common->state > FSG_STATE_IDLE;
416}
417
418/* Make bulk-out requests be divisible by the maxpacket size */
419static void set_bulk_out_req_length(struct fsg_common *common,
420 struct fsg_buffhd *bh, unsigned int length)
421{
422 unsigned int rem;
423
424 bh->bulk_out_intended_length = length;
425 rem = length % common->bulk_out_maxpacket;
426 if (rem > 0)
427 length += common->bulk_out_maxpacket - rem;
428 bh->outreq->length = length;
429}
430
431/*-------------------------------------------------------------------------*/
432
Stephen Warren9e7d5882015-12-07 11:38:50 -0700433static struct ums *ums;
434static int ums_count;
435static struct fsg_common *the_fsg_common;
Marek Vasut7786e702023-09-01 11:49:54 +0200436static struct udevice *udcdev;
Piotr Wilczek91637d72013-03-05 12:10:16 +0100437
438static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
439{
440 const char *name;
441
442 if (ep == fsg->bulk_in)
443 name = "bulk-in";
444 else if (ep == fsg->bulk_out)
445 name = "bulk-out";
446 else
447 name = ep->name;
448 DBG(fsg, "%s set halt\n", name);
449 return usb_ep_set_halt(ep);
450}
451
452/*-------------------------------------------------------------------------*/
453
454/* These routines may be called in process context or in_irq */
455
456/* Caller must hold fsg->lock */
457static void wakeup_thread(struct fsg_common *common)
458{
459 common->thread_wakeup_needed = 1;
460}
461
462static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
463{
464 /* Do nothing if a higher-priority exception is already in progress.
465 * If a lower-or-equal priority exception is in progress, preempt it
466 * and notify the main thread by sending it a signal. */
467 if (common->state <= new_state) {
468 common->exception_req_tag = common->ep0_req_tag;
469 common->state = new_state;
470 common->thread_wakeup_needed = 1;
471 }
472}
473
474/*-------------------------------------------------------------------------*/
475
476static int ep0_queue(struct fsg_common *common)
477{
478 int rc;
479
480 rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC);
481 common->ep0->driver_data = common;
482 if (rc != 0 && rc != -ESHUTDOWN) {
483 /* We can't do much more than wait for a reset */
484 WARNING(common, "error in submission: %s --> %d\n",
485 common->ep0->name, rc);
486 }
487 return rc;
488}
489
490/*-------------------------------------------------------------------------*/
491
492/* Bulk and interrupt endpoint completion handlers.
493 * These always run in_irq. */
494
495static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
496{
497 struct fsg_common *common = ep->driver_data;
498 struct fsg_buffhd *bh = req->context;
499
500 if (req->status || req->actual != req->length)
501 DBG(common, "%s --> %d, %u/%u\n", __func__,
502 req->status, req->actual, req->length);
503 if (req->status == -ECONNRESET) /* Request was cancelled */
504 usb_ep_fifo_flush(ep);
505
506 /* Hold the lock while we update the request and buffer states */
507 bh->inreq_busy = 0;
508 bh->state = BUF_STATE_EMPTY;
509 wakeup_thread(common);
510}
511
512static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
513{
514 struct fsg_common *common = ep->driver_data;
515 struct fsg_buffhd *bh = req->context;
516
517 dump_msg(common, "bulk-out", req->buf, req->actual);
518 if (req->status || req->actual != bh->bulk_out_intended_length)
519 DBG(common, "%s --> %d, %u/%u\n", __func__,
520 req->status, req->actual,
521 bh->bulk_out_intended_length);
522 if (req->status == -ECONNRESET) /* Request was cancelled */
523 usb_ep_fifo_flush(ep);
524
525 /* Hold the lock while we update the request and buffer states */
526 bh->outreq_busy = 0;
527 bh->state = BUF_STATE_FULL;
528 wakeup_thread(common);
529}
530
531/*-------------------------------------------------------------------------*/
532
533/* Ep0 class-specific handlers. These always run in_irq. */
534
535static int fsg_setup(struct usb_function *f,
536 const struct usb_ctrlrequest *ctrl)
537{
538 struct fsg_dev *fsg = fsg_from_func(f);
539 struct usb_request *req = fsg->common->ep0req;
Piotr Wilczek2963d802013-06-26 08:22:05 +0200540 u16 w_index = get_unaligned_le16(&ctrl->wIndex);
541 u16 w_value = get_unaligned_le16(&ctrl->wValue);
542 u16 w_length = get_unaligned_le16(&ctrl->wLength);
Piotr Wilczek91637d72013-03-05 12:10:16 +0100543
544 if (!fsg_is_set(fsg->common))
545 return -EOPNOTSUPP;
546
547 switch (ctrl->bRequest) {
548
549 case USB_BULK_RESET_REQUEST:
550 if (ctrl->bRequestType !=
551 (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
552 break;
553 if (w_index != fsg->interface_number || w_value != 0)
554 return -EDOM;
555
556 /* Raise an exception to stop the current operation
557 * and reinitialize our state. */
558 DBG(fsg, "bulk reset request\n");
559 raise_exception(fsg->common, FSG_STATE_RESET);
560 return DELAYED_STATUS;
561
562 case USB_BULK_GET_MAX_LUN_REQUEST:
563 if (ctrl->bRequestType !=
564 (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
565 break;
566 if (w_index != fsg->interface_number || w_value != 0)
567 return -EDOM;
568 VDBG(fsg, "get max LUN\n");
569 *(u8 *) req->buf = fsg->common->nluns - 1;
570
571 /* Respond with data/status */
572 req->length = min((u16)1, w_length);
573 return ep0_queue(fsg->common);
574 }
575
576 VDBG(fsg,
577 "unknown class-specific control req "
578 "%02x.%02x v%04x i%04x l%u\n",
579 ctrl->bRequestType, ctrl->bRequest,
Piotr Wilczek2963d802013-06-26 08:22:05 +0200580 get_unaligned_le16(&ctrl->wValue), w_index, w_length);
Piotr Wilczek91637d72013-03-05 12:10:16 +0100581 return -EOPNOTSUPP;
582}
583
584/*-------------------------------------------------------------------------*/
585
586/* All the following routines run in process context */
587
588/* Use this for bulk or interrupt transfers, not ep0 */
589static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
590 struct usb_request *req, int *pbusy,
591 enum fsg_buffer_state *state)
592{
593 int rc;
594
595 if (ep == fsg->bulk_in)
596 dump_msg(fsg, "bulk-in", req->buf, req->length);
597
598 *pbusy = 1;
599 *state = BUF_STATE_BUSY;
600 rc = usb_ep_queue(ep, req, GFP_KERNEL);
601 if (rc != 0) {
602 *pbusy = 0;
603 *state = BUF_STATE_EMPTY;
604
605 /* We can't do much more than wait for a reset */
606
607 /* Note: currently the net2280 driver fails zero-length
608 * submissions if DMA is enabled. */
609 if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
610 req->length == 0))
611 WARNING(fsg, "error in submission: %s --> %d\n",
612 ep->name, rc);
613 }
614}
615
616#define START_TRANSFER_OR(common, ep_name, req, pbusy, state) \
617 if (fsg_is_set(common)) \
618 start_transfer((common)->fsg, (common)->fsg->ep_name, \
619 req, pbusy, state); \
620 else
621
622#define START_TRANSFER(common, ep_name, req, pbusy, state) \
623 START_TRANSFER_OR(common, ep_name, req, pbusy, state) (void)0
624
625static void busy_indicator(void)
626{
627 static int state;
628
629 switch (state) {
630 case 0:
631 puts("\r|"); break;
632 case 1:
633 puts("\r/"); break;
634 case 2:
635 puts("\r-"); break;
636 case 3:
637 puts("\r\\"); break;
638 case 4:
639 puts("\r|"); break;
640 case 5:
641 puts("\r/"); break;
642 case 6:
643 puts("\r-"); break;
644 case 7:
645 puts("\r\\"); break;
646 default:
647 state = 0;
648 }
649 if (state++ == 8)
650 state = 0;
651}
652
653static int sleep_thread(struct fsg_common *common)
654{
655 int rc = 0;
656 int i = 0, k = 0;
657
658 /* Wait until a signal arrives or we are woken up */
659 for (;;) {
660 if (common->thread_wakeup_needed)
661 break;
662
Inha Songf7d92522015-05-22 18:14:26 +0200663 if (++i == 20000) {
Piotr Wilczek91637d72013-03-05 12:10:16 +0100664 busy_indicator();
665 i = 0;
666 k++;
667 }
668
Przemyslaw Marczak06ef7cc2013-10-23 14:30:46 +0200669 if (k == 10) {
Marek Vasut5412e672023-11-07 01:09:59 +0100670 /* Handle START-STOP UNIT */
671 if (common->eject)
672 return -EPIPE;
673
Przemyslaw Marczak06ef7cc2013-10-23 14:30:46 +0200674 /* Handle CTRL+C */
675 if (ctrlc())
676 return -EPIPE;
Mateusz Zalega21fe3f72014-04-30 13:07:48 +0200677
Przemyslaw Marczak06ef7cc2013-10-23 14:30:46 +0200678 /* Check cable connection */
Mateusz Zalega21fe3f72014-04-30 13:07:48 +0200679 if (!g_dnl_board_usb_cable_connected())
Przemyslaw Marczak06ef7cc2013-10-23 14:30:46 +0200680 return -EIO;
Mateusz Zalega21fe3f72014-04-30 13:07:48 +0200681
Przemyslaw Marczak06ef7cc2013-10-23 14:30:46 +0200682 k = 0;
683 }
684
Marek Vasut7786e702023-09-01 11:49:54 +0200685 dm_usb_gadget_handle_interrupts(udcdev);
Piotr Wilczek91637d72013-03-05 12:10:16 +0100686 }
687 common->thread_wakeup_needed = 0;
688 return rc;
689}
690
691/*-------------------------------------------------------------------------*/
692
693static int do_read(struct fsg_common *common)
694{
695 struct fsg_lun *curlun = &common->luns[common->lun];
696 u32 lba;
697 struct fsg_buffhd *bh;
698 int rc;
699 u32 amount_left;
700 loff_t file_offset;
701 unsigned int amount;
702 unsigned int partial_page;
703 ssize_t nread;
704
705 /* Get the starting Logical Block Address and check that it's
706 * not too big */
707 if (common->cmnd[0] == SC_READ_6)
708 lba = get_unaligned_be24(&common->cmnd[1]);
709 else {
710 lba = get_unaligned_be32(&common->cmnd[2]);
711
712 /* We allow DPO (Disable Page Out = don't save data in the
713 * cache) and FUA (Force Unit Access = don't read from the
714 * cache), but we don't implement them. */
715 if ((common->cmnd[1] & ~0x18) != 0) {
716 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
717 return -EINVAL;
718 }
719 }
720 if (lba >= curlun->num_sectors) {
721 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
722 return -EINVAL;
723 }
Caleb Connolly00e9e0c2024-03-20 14:30:50 +0000724 file_offset = ((loff_t)lba) << curlun->blkbits;
Piotr Wilczek91637d72013-03-05 12:10:16 +0100725
726 /* Carry out the file reads */
727 amount_left = common->data_size_from_cmnd;
Caleb Connolly00e9e0c2024-03-20 14:30:50 +0000728 if (unlikely(amount_left == 0)) {
Piotr Wilczek91637d72013-03-05 12:10:16 +0100729 return -EIO; /* No default reply */
Caleb Connolly00e9e0c2024-03-20 14:30:50 +0000730 }
Piotr Wilczek91637d72013-03-05 12:10:16 +0100731
732 for (;;) {
733
734 /* Figure out how much we need to read:
735 * Try to read the remaining amount.
736 * But don't read more than the buffer size.
737 * And don't try to read past the end of the file.
738 * Finally, if we're not at a page boundary, don't read past
739 * the next page.
740 * If this means reading 0 then we were asked to read past
741 * the end of file. */
742 amount = min(amount_left, FSG_BUFLEN);
743 partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
744 if (partial_page > 0)
745 amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
746 partial_page);
747
748 /* Wait for the next buffer to become available */
749 bh = common->next_buffhd_to_fill;
750 while (bh->state != BUF_STATE_EMPTY) {
751 rc = sleep_thread(common);
752 if (rc)
753 return rc;
754 }
755
756 /* If we were asked to read past the end of file,
757 * end with an empty buffer. */
758 if (amount == 0) {
759 curlun->sense_data =
760 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
761 curlun->info_valid = 1;
762 bh->inreq->length = 0;
763 bh->state = BUF_STATE_FULL;
764 break;
765 }
766
767 /* Perform the read */
Stephen Warren9e7d5882015-12-07 11:38:50 -0700768 rc = ums[common->lun].read_sector(&ums[common->lun],
Caleb Connolly00e9e0c2024-03-20 14:30:50 +0000769 lldiv(file_offset, curlun->blksize),
770 lldiv(amount, curlun->blksize),
Przemyslaw Marczak674b1a62013-10-23 14:30:42 +0200771 (char __user *)bh->buf);
772 if (!rc)
Piotr Wilczek91637d72013-03-05 12:10:16 +0100773 return -EIO;
Przemyslaw Marczak674b1a62013-10-23 14:30:42 +0200774
Caleb Connolly00e9e0c2024-03-20 14:30:50 +0000775 nread = rc * curlun->blksize;
Piotr Wilczek91637d72013-03-05 12:10:16 +0100776
777 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
778 (unsigned long long) file_offset,
779 (int) nread);
780
781 if (nread < 0) {
782 LDBG(curlun, "error in file read: %d\n",
783 (int) nread);
784 nread = 0;
785 } else if (nread < amount) {
786 LDBG(curlun, "partial file read: %d/%u\n",
787 (int) nread, amount);
Caleb Connolly00e9e0c2024-03-20 14:30:50 +0000788 nread -= (nread & (curlun->blksize - 1)); /* Round down to a block */
Piotr Wilczek91637d72013-03-05 12:10:16 +0100789 }
790 file_offset += nread;
791 amount_left -= nread;
792 common->residue -= nread;
793 bh->inreq->length = nread;
794 bh->state = BUF_STATE_FULL;
795
796 /* If an error occurred, report it and its position */
797 if (nread < amount) {
798 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
799 curlun->info_valid = 1;
800 break;
801 }
802
803 if (amount_left == 0)
804 break; /* No more left to read */
805
806 /* Send this buffer and go read some more */
807 bh->inreq->zero = 0;
808 START_TRANSFER_OR(common, bulk_in, bh->inreq,
809 &bh->inreq_busy, &bh->state)
810 /* Don't know what to do if
811 * common->fsg is NULL */
812 return -EIO;
813 common->next_buffhd_to_fill = bh->next;
814 }
815
816 return -EIO; /* No default reply */
817}
818
819/*-------------------------------------------------------------------------*/
820
821static int do_write(struct fsg_common *common)
822{
823 struct fsg_lun *curlun = &common->luns[common->lun];
824 u32 lba;
825 struct fsg_buffhd *bh;
826 int get_some_more;
827 u32 amount_left_to_req, amount_left_to_write;
828 loff_t usb_offset, file_offset;
829 unsigned int amount;
830 unsigned int partial_page;
831 ssize_t nwritten;
832 int rc;
833
834 if (curlun->ro) {
835 curlun->sense_data = SS_WRITE_PROTECTED;
836 return -EINVAL;
837 }
838
839 /* Get the starting Logical Block Address and check that it's
840 * not too big */
841 if (common->cmnd[0] == SC_WRITE_6)
842 lba = get_unaligned_be24(&common->cmnd[1]);
843 else {
844 lba = get_unaligned_be32(&common->cmnd[2]);
845
846 /* We allow DPO (Disable Page Out = don't save data in the
847 * cache) and FUA (Force Unit Access = write directly to the
848 * medium). We don't implement DPO; we implement FUA by
849 * performing synchronous output. */
850 if (common->cmnd[1] & ~0x18) {
851 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
852 return -EINVAL;
853 }
854 }
855 if (lba >= curlun->num_sectors) {
856 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
857 return -EINVAL;
858 }
859
860 /* Carry out the file writes */
861 get_some_more = 1;
Caleb Connolly00e9e0c2024-03-20 14:30:50 +0000862 file_offset = usb_offset = ((loff_t)lba) << curlun->blkbits;
Piotr Wilczek91637d72013-03-05 12:10:16 +0100863 amount_left_to_req = common->data_size_from_cmnd;
864 amount_left_to_write = common->data_size_from_cmnd;
865
866 while (amount_left_to_write > 0) {
867
868 /* Queue a request for more data from the host */
869 bh = common->next_buffhd_to_fill;
870 if (bh->state == BUF_STATE_EMPTY && get_some_more) {
871
872 /* Figure out how much we want to get:
873 * Try to get the remaining amount.
874 * But don't get more than the buffer size.
875 * And don't try to go past the end of the file.
876 * If we're not at a page boundary,
877 * don't go past the next page.
878 * If this means getting 0, then we were asked
879 * to write past the end of file.
880 * Finally, round down to a block boundary. */
881 amount = min(amount_left_to_req, FSG_BUFLEN);
882 partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
883 if (partial_page > 0)
884 amount = min(amount,
885 (unsigned int) PAGE_CACHE_SIZE - partial_page);
886
887 if (amount == 0) {
888 get_some_more = 0;
889 curlun->sense_data =
890 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
891 curlun->info_valid = 1;
892 continue;
893 }
Caleb Connolly00e9e0c2024-03-20 14:30:50 +0000894 amount -= (amount & (curlun->blksize - 1));
Piotr Wilczek91637d72013-03-05 12:10:16 +0100895 if (amount == 0) {
896
897 /* Why were we were asked to transfer a
898 * partial block? */
899 get_some_more = 0;
900 continue;
901 }
902
903 /* Get the next buffer */
904 usb_offset += amount;
905 common->usb_amount_left -= amount;
906 amount_left_to_req -= amount;
907 if (amount_left_to_req == 0)
908 get_some_more = 0;
909
910 /* amount is always divisible by 512, hence by
911 * the bulk-out maxpacket size */
912 bh->outreq->length = amount;
913 bh->bulk_out_intended_length = amount;
914 bh->outreq->short_not_ok = 1;
915 START_TRANSFER_OR(common, bulk_out, bh->outreq,
916 &bh->outreq_busy, &bh->state)
917 /* Don't know what to do if
918 * common->fsg is NULL */
919 return -EIO;
920 common->next_buffhd_to_fill = bh->next;
921 continue;
922 }
923
924 /* Write the received data to the backing file */
925 bh = common->next_buffhd_to_drain;
926 if (bh->state == BUF_STATE_EMPTY && !get_some_more)
927 break; /* We stopped early */
928 if (bh->state == BUF_STATE_FULL) {
929 common->next_buffhd_to_drain = bh->next;
930 bh->state = BUF_STATE_EMPTY;
931
932 /* Did something go wrong with the transfer? */
933 if (bh->outreq->status != 0) {
934 curlun->sense_data = SS_COMMUNICATION_FAILURE;
935 curlun->info_valid = 1;
936 break;
937 }
938
939 amount = bh->outreq->actual;
940
941 /* Perform the write */
Stephen Warren9e7d5882015-12-07 11:38:50 -0700942 rc = ums[common->lun].write_sector(&ums[common->lun],
Caleb Connolly00e9e0c2024-03-20 14:30:50 +0000943 lldiv(file_offset, curlun->blksize),
944 lldiv(amount, curlun->blksize),
Piotr Wilczek91637d72013-03-05 12:10:16 +0100945 (char __user *)bh->buf);
Przemyslaw Marczak674b1a62013-10-23 14:30:42 +0200946 if (!rc)
Piotr Wilczek91637d72013-03-05 12:10:16 +0100947 return -EIO;
Caleb Connolly00e9e0c2024-03-20 14:30:50 +0000948 nwritten = rc * curlun->blksize;
Piotr Wilczek91637d72013-03-05 12:10:16 +0100949
950 VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
951 (unsigned long long) file_offset,
952 (int) nwritten);
953
954 if (nwritten < 0) {
955 LDBG(curlun, "error in file write: %d\n",
956 (int) nwritten);
957 nwritten = 0;
958 } else if (nwritten < amount) {
959 LDBG(curlun, "partial file write: %d/%u\n",
960 (int) nwritten, amount);
Caleb Connolly00e9e0c2024-03-20 14:30:50 +0000961 nwritten -= (nwritten & (curlun->blksize - 1));
Piotr Wilczek91637d72013-03-05 12:10:16 +0100962 /* Round down to a block */
963 }
964 file_offset += nwritten;
965 amount_left_to_write -= nwritten;
966 common->residue -= nwritten;
967
968 /* If an error occurred, report it and its position */
969 if (nwritten < amount) {
Thierry Reding15fe9c82015-03-20 12:41:25 +0100970 printf("nwritten:%zd amount:%u\n", nwritten,
Przemyslaw Marczak674b1a62013-10-23 14:30:42 +0200971 amount);
Piotr Wilczek91637d72013-03-05 12:10:16 +0100972 curlun->sense_data = SS_WRITE_ERROR;
973 curlun->info_valid = 1;
974 break;
975 }
976
977 /* Did the host decide to stop early? */
978 if (bh->outreq->actual != bh->outreq->length) {
979 common->short_packet_received = 1;
980 break;
981 }
982 continue;
983 }
984
985 /* Wait for something to happen */
986 rc = sleep_thread(common);
987 if (rc)
988 return rc;
989 }
990
991 return -EIO; /* No default reply */
992}
993
994/*-------------------------------------------------------------------------*/
995
996static int do_synchronize_cache(struct fsg_common *common)
997{
998 return 0;
999}
1000
1001/*-------------------------------------------------------------------------*/
1002
1003static int do_verify(struct fsg_common *common)
1004{
1005 struct fsg_lun *curlun = &common->luns[common->lun];
1006 u32 lba;
1007 u32 verification_length;
1008 struct fsg_buffhd *bh = common->next_buffhd_to_fill;
1009 loff_t file_offset;
1010 u32 amount_left;
1011 unsigned int amount;
1012 ssize_t nread;
1013 int rc;
1014
1015 /* Get the starting Logical Block Address and check that it's
1016 * not too big */
1017 lba = get_unaligned_be32(&common->cmnd[2]);
1018 if (lba >= curlun->num_sectors) {
1019 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1020 return -EINVAL;
1021 }
1022
1023 /* We allow DPO (Disable Page Out = don't save data in the
1024 * cache) but we don't implement it. */
1025 if (common->cmnd[1] & ~0x10) {
1026 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1027 return -EINVAL;
1028 }
1029
1030 verification_length = get_unaligned_be16(&common->cmnd[7]);
1031 if (unlikely(verification_length == 0))
1032 return -EIO; /* No default reply */
1033
1034 /* Prepare to carry out the file verify */
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00001035 amount_left = verification_length << curlun->blkbits;
1036 file_offset = ((loff_t) lba) << curlun->blkbits;
Piotr Wilczek91637d72013-03-05 12:10:16 +01001037
1038 /* Write out all the dirty buffers before invalidating them */
1039
1040 /* Just try to read the requested blocks */
1041 while (amount_left > 0) {
1042
1043 /* Figure out how much we need to read:
1044 * Try to read the remaining amount, but not more than
1045 * the buffer size.
1046 * And don't try to read past the end of the file.
1047 * If this means reading 0 then we were asked to read
1048 * past the end of file. */
1049 amount = min(amount_left, FSG_BUFLEN);
1050 if (amount == 0) {
1051 curlun->sense_data =
1052 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1053 curlun->info_valid = 1;
1054 break;
1055 }
1056
1057 /* Perform the read */
Stephen Warren9e7d5882015-12-07 11:38:50 -07001058 rc = ums[common->lun].read_sector(&ums[common->lun],
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00001059 lldiv(file_offset, curlun->blksize),
1060 lldiv(amount, curlun->blksize),
Przemyslaw Marczak674b1a62013-10-23 14:30:42 +02001061 (char __user *)bh->buf);
1062 if (!rc)
Piotr Wilczek91637d72013-03-05 12:10:16 +01001063 return -EIO;
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00001064 nread = rc * curlun->blksize;
Piotr Wilczek91637d72013-03-05 12:10:16 +01001065
1066 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
1067 (unsigned long long) file_offset,
1068 (int) nread);
1069 if (nread < 0) {
1070 LDBG(curlun, "error in file verify: %d\n",
1071 (int) nread);
1072 nread = 0;
1073 } else if (nread < amount) {
1074 LDBG(curlun, "partial file verify: %d/%u\n",
1075 (int) nread, amount);
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00001076 nread -= (nread & (curlun->blksize - 1)); /* Round down to a sector */
Piotr Wilczek91637d72013-03-05 12:10:16 +01001077 }
1078 if (nread == 0) {
1079 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
1080 curlun->info_valid = 1;
1081 break;
1082 }
1083 file_offset += nread;
1084 amount_left -= nread;
1085 }
1086 return 0;
1087}
1088
1089/*-------------------------------------------------------------------------*/
1090
1091static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh)
1092{
1093 struct fsg_lun *curlun = &common->luns[common->lun];
1094 static const char vendor_id[] = "Linux ";
1095 u8 *buf = (u8 *) bh->buf;
1096
1097 if (!curlun) { /* Unsupported LUNs are okay */
1098 common->bad_lun_okay = 1;
1099 memset(buf, 0, 36);
1100 buf[0] = 0x7f; /* Unsupported, no device-type */
1101 buf[4] = 31; /* Additional length */
1102 return 36;
1103 }
1104
1105 memset(buf, 0, 8);
1106 buf[0] = TYPE_DISK;
Eric Nelsone1d833a2014-09-19 17:06:46 -07001107 buf[1] = curlun->removable ? 0x80 : 0;
Piotr Wilczek91637d72013-03-05 12:10:16 +01001108 buf[2] = 2; /* ANSI SCSI level 2 */
1109 buf[3] = 2; /* SCSI-2 INQUIRY data format */
1110 buf[4] = 31; /* Additional length */
1111 /* No special options */
1112 sprintf((char *) (buf + 8), "%-8s%-16s%04x", (char*) vendor_id ,
Stephen Warren9e7d5882015-12-07 11:38:50 -07001113 ums[common->lun].name, (u16) 0xffff);
Piotr Wilczek91637d72013-03-05 12:10:16 +01001114
1115 return 36;
1116}
1117
Piotr Wilczek91637d72013-03-05 12:10:16 +01001118static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1119{
1120 struct fsg_lun *curlun = &common->luns[common->lun];
1121 u8 *buf = (u8 *) bh->buf;
Tom Rini2416ba02023-04-05 19:48:57 -04001122 u32 sd, sdinfo = 0;
Piotr Wilczek91637d72013-03-05 12:10:16 +01001123 int valid;
1124
1125 /*
1126 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
1127 *
1128 * If a REQUEST SENSE command is received from an initiator
1129 * with a pending unit attention condition (before the target
1130 * generates the contingent allegiance condition), then the
1131 * target shall either:
1132 * a) report any pending sense data and preserve the unit
1133 * attention condition on the logical unit, or,
1134 * b) report the unit attention condition, may discard any
1135 * pending sense data, and clear the unit attention
1136 * condition on the logical unit for that initiator.
1137 *
1138 * FSG normally uses option a); enable this code to use option b).
1139 */
1140#if 0
1141 if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
1142 curlun->sense_data = curlun->unit_attention_data;
1143 curlun->unit_attention_data = SS_NO_SENSE;
1144 }
1145#endif
1146
1147 if (!curlun) { /* Unsupported LUNs are okay */
1148 common->bad_lun_okay = 1;
1149 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
Piotr Wilczek91637d72013-03-05 12:10:16 +01001150 valid = 0;
1151 } else {
1152 sd = curlun->sense_data;
1153 valid = curlun->info_valid << 7;
1154 curlun->sense_data = SS_NO_SENSE;
1155 curlun->info_valid = 0;
1156 }
1157
1158 memset(buf, 0, 18);
1159 buf[0] = valid | 0x70; /* Valid, current error */
1160 buf[2] = SK(sd);
1161 put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */
1162 buf[7] = 18 - 8; /* Additional sense length */
1163 buf[12] = ASC(sd);
1164 buf[13] = ASCQ(sd);
1165 return 18;
1166}
1167
1168static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
1169{
1170 struct fsg_lun *curlun = &common->luns[common->lun];
1171 u32 lba = get_unaligned_be32(&common->cmnd[2]);
1172 int pmi = common->cmnd[8];
1173 u8 *buf = (u8 *) bh->buf;
1174
1175 /* Check the PMI and LBA fields */
1176 if (pmi > 1 || (pmi == 0 && lba != 0)) {
1177 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1178 return -EINVAL;
1179 }
1180
1181 put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
1182 /* Max logical block */
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00001183 put_unaligned_be32(curlun->blksize, &buf[4]); /* Block length */
Piotr Wilczek91637d72013-03-05 12:10:16 +01001184 return 8;
1185}
1186
1187static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh)
1188{
1189 struct fsg_lun *curlun = &common->luns[common->lun];
1190 int msf = common->cmnd[1] & 0x02;
1191 u32 lba = get_unaligned_be32(&common->cmnd[2]);
1192 u8 *buf = (u8 *) bh->buf;
1193
1194 if (common->cmnd[1] & ~0x02) { /* Mask away MSF */
1195 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1196 return -EINVAL;
1197 }
1198 if (lba >= curlun->num_sectors) {
1199 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1200 return -EINVAL;
1201 }
1202
1203 memset(buf, 0, 8);
1204 buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */
1205 store_cdrom_address(&buf[4], msf, lba);
1206 return 8;
1207}
1208
Piotr Wilczek91637d72013-03-05 12:10:16 +01001209static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
1210{
1211 struct fsg_lun *curlun = &common->luns[common->lun];
1212 int msf = common->cmnd[1] & 0x02;
1213 int start_track = common->cmnd[6];
1214 u8 *buf = (u8 *) bh->buf;
1215
1216 if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */
1217 start_track > 1) {
1218 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1219 return -EINVAL;
1220 }
1221
1222 memset(buf, 0, 20);
1223 buf[1] = (20-2); /* TOC data length */
1224 buf[2] = 1; /* First track number */
1225 buf[3] = 1; /* Last track number */
1226 buf[5] = 0x16; /* Data track, copying allowed */
1227 buf[6] = 0x01; /* Only track is number 1 */
1228 store_cdrom_address(&buf[8], msf, 0);
1229
1230 buf[13] = 0x16; /* Lead-out track is data */
1231 buf[14] = 0xAA; /* Lead-out track number */
1232 store_cdrom_address(&buf[16], msf, curlun->num_sectors);
1233
1234 return 20;
1235}
1236
1237static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1238{
1239 struct fsg_lun *curlun = &common->luns[common->lun];
1240 int mscmnd = common->cmnd[0];
1241 u8 *buf = (u8 *) bh->buf;
1242 u8 *buf0 = buf;
1243 int pc, page_code;
1244 int changeable_values, all_pages;
1245 int valid_page = 0;
1246 int len, limit;
1247
1248 if ((common->cmnd[1] & ~0x08) != 0) { /* Mask away DBD */
1249 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1250 return -EINVAL;
1251 }
1252 pc = common->cmnd[2] >> 6;
1253 page_code = common->cmnd[2] & 0x3f;
1254 if (pc == 3) {
1255 curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
1256 return -EINVAL;
1257 }
1258 changeable_values = (pc == 1);
1259 all_pages = (page_code == 0x3f);
1260
1261 /* Write the mode parameter header. Fixed values are: default
1262 * medium type, no cache control (DPOFUA), and no block descriptors.
1263 * The only variable value is the WriteProtect bit. We will fill in
1264 * the mode data length later. */
1265 memset(buf, 0, 8);
1266 if (mscmnd == SC_MODE_SENSE_6) {
1267 buf[2] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
1268 buf += 4;
1269 limit = 255;
1270 } else { /* SC_MODE_SENSE_10 */
1271 buf[3] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
1272 buf += 8;
1273 limit = 65535; /* Should really be FSG_BUFLEN */
1274 }
1275
1276 /* No block descriptors */
1277
1278 /* The mode pages, in numerical order. The only page we support
1279 * is the Caching page. */
1280 if (page_code == 0x08 || all_pages) {
1281 valid_page = 1;
1282 buf[0] = 0x08; /* Page code */
1283 buf[1] = 10; /* Page length */
1284 memset(buf+2, 0, 10); /* None of the fields are changeable */
1285
1286 if (!changeable_values) {
1287 buf[2] = 0x04; /* Write cache enable, */
1288 /* Read cache not disabled */
1289 /* No cache retention priorities */
1290 put_unaligned_be16(0xffff, &buf[4]);
1291 /* Don't disable prefetch */
1292 /* Minimum prefetch = 0 */
1293 put_unaligned_be16(0xffff, &buf[8]);
1294 /* Maximum prefetch */
1295 put_unaligned_be16(0xffff, &buf[10]);
1296 /* Maximum prefetch ceiling */
1297 }
1298 buf += 12;
1299 }
1300
1301 /* Check that a valid page was requested and the mode data length
1302 * isn't too long. */
1303 len = buf - buf0;
1304 if (!valid_page || len > limit) {
1305 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1306 return -EINVAL;
1307 }
1308
1309 /* Store the mode data length */
1310 if (mscmnd == SC_MODE_SENSE_6)
1311 buf0[0] = len - 1;
1312 else
1313 put_unaligned_be16(len - 2, buf0);
1314 return len;
1315}
1316
Piotr Wilczek91637d72013-03-05 12:10:16 +01001317static int do_start_stop(struct fsg_common *common)
1318{
1319 struct fsg_lun *curlun = &common->luns[common->lun];
1320
1321 if (!curlun) {
1322 return -EINVAL;
1323 } else if (!curlun->removable) {
1324 curlun->sense_data = SS_INVALID_COMMAND;
1325 return -EINVAL;
1326 }
1327
Marek Vasut5412e672023-11-07 01:09:59 +01001328 common->eject = 1;
1329
Piotr Wilczek91637d72013-03-05 12:10:16 +01001330 return 0;
1331}
1332
1333static int do_prevent_allow(struct fsg_common *common)
1334{
1335 struct fsg_lun *curlun = &common->luns[common->lun];
1336 int prevent;
1337
1338 if (!curlun->removable) {
1339 curlun->sense_data = SS_INVALID_COMMAND;
1340 return -EINVAL;
1341 }
1342
1343 prevent = common->cmnd[4] & 0x01;
1344 if ((common->cmnd[4] & ~0x01) != 0) { /* Mask away Prevent */
1345 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1346 return -EINVAL;
1347 }
1348
1349 if (curlun->prevent_medium_removal && !prevent)
1350 fsg_lun_fsync_sub(curlun);
1351 curlun->prevent_medium_removal = prevent;
1352 return 0;
1353}
1354
Piotr Wilczek91637d72013-03-05 12:10:16 +01001355static int do_read_format_capacities(struct fsg_common *common,
1356 struct fsg_buffhd *bh)
1357{
1358 struct fsg_lun *curlun = &common->luns[common->lun];
1359 u8 *buf = (u8 *) bh->buf;
1360
1361 buf[0] = buf[1] = buf[2] = 0;
1362 buf[3] = 8; /* Only the Current/Maximum Capacity Descriptor */
1363 buf += 4;
1364
1365 put_unaligned_be32(curlun->num_sectors, &buf[0]);
1366 /* Number of blocks */
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00001367 put_unaligned_be32(curlun->blksize, &buf[4]); /* Block length */
Piotr Wilczek91637d72013-03-05 12:10:16 +01001368 buf[4] = 0x02; /* Current capacity */
1369 return 12;
1370}
1371
Piotr Wilczek91637d72013-03-05 12:10:16 +01001372static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh)
1373{
1374 struct fsg_lun *curlun = &common->luns[common->lun];
1375
1376 /* We don't support MODE SELECT */
1377 if (curlun)
1378 curlun->sense_data = SS_INVALID_COMMAND;
1379 return -EINVAL;
1380}
1381
Piotr Wilczek91637d72013-03-05 12:10:16 +01001382/*-------------------------------------------------------------------------*/
1383
1384static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
1385{
1386 int rc;
1387
1388 rc = fsg_set_halt(fsg, fsg->bulk_in);
1389 if (rc == -EAGAIN)
1390 VDBG(fsg, "delayed bulk-in endpoint halt\n");
1391 while (rc != 0) {
1392 if (rc != -EAGAIN) {
1393 WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
1394 rc = 0;
1395 break;
1396 }
1397
1398 rc = usb_ep_set_halt(fsg->bulk_in);
1399 }
1400 return rc;
1401}
1402
1403static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
1404{
1405 int rc;
1406
1407 DBG(fsg, "bulk-in set wedge\n");
1408 rc = 0; /* usb_ep_set_wedge(fsg->bulk_in); */
1409 if (rc == -EAGAIN)
1410 VDBG(fsg, "delayed bulk-in endpoint wedge\n");
1411 while (rc != 0) {
1412 if (rc != -EAGAIN) {
1413 WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
1414 rc = 0;
1415 break;
1416 }
1417 }
1418 return rc;
1419}
1420
1421static int pad_with_zeros(struct fsg_dev *fsg)
1422{
1423 struct fsg_buffhd *bh = fsg->common->next_buffhd_to_fill;
1424 u32 nkeep = bh->inreq->length;
1425 u32 nsend;
1426 int rc;
1427
1428 bh->state = BUF_STATE_EMPTY; /* For the first iteration */
1429 fsg->common->usb_amount_left = nkeep + fsg->common->residue;
1430 while (fsg->common->usb_amount_left > 0) {
1431
1432 /* Wait for the next buffer to be free */
1433 while (bh->state != BUF_STATE_EMPTY) {
1434 rc = sleep_thread(fsg->common);
1435 if (rc)
1436 return rc;
1437 }
1438
1439 nsend = min(fsg->common->usb_amount_left, FSG_BUFLEN);
1440 memset(bh->buf + nkeep, 0, nsend - nkeep);
1441 bh->inreq->length = nsend;
1442 bh->inreq->zero = 0;
1443 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1444 &bh->inreq_busy, &bh->state);
1445 bh = fsg->common->next_buffhd_to_fill = bh->next;
1446 fsg->common->usb_amount_left -= nsend;
1447 nkeep = 0;
1448 }
1449 return 0;
1450}
1451
1452static int throw_away_data(struct fsg_common *common)
1453{
1454 struct fsg_buffhd *bh;
1455 u32 amount;
1456 int rc;
1457
1458 for (bh = common->next_buffhd_to_drain;
1459 bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0;
1460 bh = common->next_buffhd_to_drain) {
1461
1462 /* Throw away the data in a filled buffer */
1463 if (bh->state == BUF_STATE_FULL) {
1464 bh->state = BUF_STATE_EMPTY;
1465 common->next_buffhd_to_drain = bh->next;
1466
1467 /* A short packet or an error ends everything */
1468 if (bh->outreq->actual != bh->outreq->length ||
1469 bh->outreq->status != 0) {
1470 raise_exception(common,
1471 FSG_STATE_ABORT_BULK_OUT);
1472 return -EINTR;
1473 }
1474 continue;
1475 }
1476
1477 /* Try to submit another request if we need one */
1478 bh = common->next_buffhd_to_fill;
1479 if (bh->state == BUF_STATE_EMPTY
1480 && common->usb_amount_left > 0) {
1481 amount = min(common->usb_amount_left, FSG_BUFLEN);
1482
1483 /* amount is always divisible by 512, hence by
1484 * the bulk-out maxpacket size */
1485 bh->outreq->length = amount;
1486 bh->bulk_out_intended_length = amount;
1487 bh->outreq->short_not_ok = 1;
1488 START_TRANSFER_OR(common, bulk_out, bh->outreq,
1489 &bh->outreq_busy, &bh->state)
1490 /* Don't know what to do if
1491 * common->fsg is NULL */
1492 return -EIO;
1493 common->next_buffhd_to_fill = bh->next;
1494 common->usb_amount_left -= amount;
1495 continue;
1496 }
1497
1498 /* Otherwise wait for something to happen */
1499 rc = sleep_thread(common);
1500 if (rc)
1501 return rc;
1502 }
1503 return 0;
1504}
1505
Piotr Wilczek91637d72013-03-05 12:10:16 +01001506static int finish_reply(struct fsg_common *common)
1507{
1508 struct fsg_buffhd *bh = common->next_buffhd_to_fill;
1509 int rc = 0;
1510
1511 switch (common->data_dir) {
1512 case DATA_DIR_NONE:
1513 break; /* Nothing to send */
1514
1515 /* If we don't know whether the host wants to read or write,
1516 * this must be CB or CBI with an unknown command. We mustn't
1517 * try to send or receive any data. So stall both bulk pipes
1518 * if we can and wait for a reset. */
1519 case DATA_DIR_UNKNOWN:
1520 if (!common->can_stall) {
1521 /* Nothing */
1522 } else if (fsg_is_set(common)) {
1523 fsg_set_halt(common->fsg, common->fsg->bulk_out);
1524 rc = halt_bulk_in_endpoint(common->fsg);
1525 } else {
1526 /* Don't know what to do if common->fsg is NULL */
1527 rc = -EIO;
1528 }
1529 break;
1530
1531 /* All but the last buffer of data must have already been sent */
1532 case DATA_DIR_TO_HOST:
1533 if (common->data_size == 0) {
1534 /* Nothing to send */
1535
1536 /* If there's no residue, simply send the last buffer */
1537 } else if (common->residue == 0) {
1538 bh->inreq->zero = 0;
1539 START_TRANSFER_OR(common, bulk_in, bh->inreq,
1540 &bh->inreq_busy, &bh->state)
1541 return -EIO;
1542 common->next_buffhd_to_fill = bh->next;
1543
1544 /* For Bulk-only, if we're allowed to stall then send the
1545 * short packet and halt the bulk-in endpoint. If we can't
1546 * stall, pad out the remaining data with 0's. */
1547 } else if (common->can_stall) {
1548 bh->inreq->zero = 1;
1549 START_TRANSFER_OR(common, bulk_in, bh->inreq,
1550 &bh->inreq_busy, &bh->state)
1551 /* Don't know what to do if
1552 * common->fsg is NULL */
1553 rc = -EIO;
1554 common->next_buffhd_to_fill = bh->next;
1555 if (common->fsg)
1556 rc = halt_bulk_in_endpoint(common->fsg);
1557 } else if (fsg_is_set(common)) {
1558 rc = pad_with_zeros(common->fsg);
1559 } else {
1560 /* Don't know what to do if common->fsg is NULL */
1561 rc = -EIO;
1562 }
1563 break;
1564
1565 /* We have processed all we want from the data the host has sent.
1566 * There may still be outstanding bulk-out requests. */
1567 case DATA_DIR_FROM_HOST:
1568 if (common->residue == 0) {
1569 /* Nothing to receive */
1570
1571 /* Did the host stop sending unexpectedly early? */
1572 } else if (common->short_packet_received) {
1573 raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
1574 rc = -EINTR;
1575
1576 /* We haven't processed all the incoming data. Even though
1577 * we may be allowed to stall, doing so would cause a race.
1578 * The controller may already have ACK'ed all the remaining
1579 * bulk-out packets, in which case the host wouldn't see a
1580 * STALL. Not realizing the endpoint was halted, it wouldn't
1581 * clear the halt -- leading to problems later on. */
1582#if 0
1583 } else if (common->can_stall) {
1584 if (fsg_is_set(common))
1585 fsg_set_halt(common->fsg,
1586 common->fsg->bulk_out);
1587 raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
1588 rc = -EINTR;
1589#endif
1590
1591 /* We can't stall. Read in the excess data and throw it
1592 * all away. */
1593 } else {
1594 rc = throw_away_data(common);
1595 }
1596 break;
1597 }
1598 return rc;
1599}
1600
Piotr Wilczek91637d72013-03-05 12:10:16 +01001601static int send_status(struct fsg_common *common)
1602{
1603 struct fsg_lun *curlun = &common->luns[common->lun];
1604 struct fsg_buffhd *bh;
1605 struct bulk_cs_wrap *csw;
1606 int rc;
1607 u8 status = USB_STATUS_PASS;
1608 u32 sd, sdinfo = 0;
1609
1610 /* Wait for the next buffer to become available */
1611 bh = common->next_buffhd_to_fill;
1612 while (bh->state != BUF_STATE_EMPTY) {
1613 rc = sleep_thread(common);
1614 if (rc)
1615 return rc;
1616 }
1617
1618 if (curlun)
1619 sd = curlun->sense_data;
1620 else if (common->bad_lun_okay)
1621 sd = SS_NO_SENSE;
1622 else
1623 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1624
1625 if (common->phase_error) {
1626 DBG(common, "sending phase-error status\n");
1627 status = USB_STATUS_PHASE_ERROR;
1628 sd = SS_INVALID_COMMAND;
1629 } else if (sd != SS_NO_SENSE) {
1630 DBG(common, "sending command-failure status\n");
1631 status = USB_STATUS_FAIL;
1632 VDBG(common, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
1633 " info x%x\n",
1634 SK(sd), ASC(sd), ASCQ(sd), sdinfo);
1635 }
1636
1637 /* Store and send the Bulk-only CSW */
1638 csw = (void *)bh->buf;
1639
1640 csw->Signature = cpu_to_le32(USB_BULK_CS_SIG);
1641 csw->Tag = common->tag;
1642 csw->Residue = cpu_to_le32(common->residue);
1643 csw->Status = status;
1644
1645 bh->inreq->length = USB_BULK_CS_WRAP_LEN;
1646 bh->inreq->zero = 0;
1647 START_TRANSFER_OR(common, bulk_in, bh->inreq,
1648 &bh->inreq_busy, &bh->state)
1649 /* Don't know what to do if common->fsg is NULL */
1650 return -EIO;
1651
1652 common->next_buffhd_to_fill = bh->next;
1653 return 0;
1654}
1655
Piotr Wilczek91637d72013-03-05 12:10:16 +01001656/*-------------------------------------------------------------------------*/
1657
1658/* Check whether the command is properly formed and whether its data size
1659 * and direction agree with the values we already have. */
1660static int check_command(struct fsg_common *common, int cmnd_size,
1661 enum data_direction data_dir, unsigned int mask,
1662 int needs_medium, const char *name)
1663{
1664 int i;
1665 int lun = common->cmnd[1] >> 5;
1666 static const char dirletter[4] = {'u', 'o', 'i', 'n'};
1667 char hdlen[20];
1668 struct fsg_lun *curlun;
1669
1670 hdlen[0] = 0;
1671 if (common->data_dir != DATA_DIR_UNKNOWN)
1672 sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir],
1673 common->data_size);
1674 VDBG(common, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
1675 name, cmnd_size, dirletter[(int) data_dir],
1676 common->data_size_from_cmnd, common->cmnd_size, hdlen);
1677
1678 /* We can't reply at all until we know the correct data direction
1679 * and size. */
1680 if (common->data_size_from_cmnd == 0)
1681 data_dir = DATA_DIR_NONE;
1682 if (common->data_size < common->data_size_from_cmnd) {
1683 /* Host data size < Device data size is a phase error.
1684 * Carry out the command, but only transfer as much as
1685 * we are allowed. */
1686 common->data_size_from_cmnd = common->data_size;
1687 common->phase_error = 1;
1688 }
1689 common->residue = common->data_size;
1690 common->usb_amount_left = common->data_size;
1691
1692 /* Conflicting data directions is a phase error */
1693 if (common->data_dir != data_dir
1694 && common->data_size_from_cmnd > 0) {
1695 common->phase_error = 1;
1696 return -EINVAL;
1697 }
1698
1699 /* Verify the length of the command itself */
1700 if (cmnd_size != common->cmnd_size) {
1701
1702 /* Special case workaround: There are plenty of buggy SCSI
1703 * implementations. Many have issues with cbw->Length
1704 * field passing a wrong command size. For those cases we
1705 * always try to work around the problem by using the length
1706 * sent by the host side provided it is at least as large
1707 * as the correct command length.
1708 * Examples of such cases would be MS-Windows, which issues
1709 * REQUEST SENSE with cbw->Length == 12 where it should
1710 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
1711 * REQUEST SENSE with cbw->Length == 10 where it should
1712 * be 6 as well.
1713 */
1714 if (cmnd_size <= common->cmnd_size) {
1715 DBG(common, "%s is buggy! Expected length %d "
1716 "but we got %d\n", name,
1717 cmnd_size, common->cmnd_size);
1718 cmnd_size = common->cmnd_size;
1719 } else {
1720 common->phase_error = 1;
1721 return -EINVAL;
1722 }
1723 }
1724
1725 /* Check that the LUN values are consistent */
1726 if (common->lun != lun)
1727 DBG(common, "using LUN %d from CBW, not LUN %d from CDB\n",
1728 common->lun, lun);
1729
1730 /* Check the LUN */
Heinrich Schuchardtd7a0fc82018-03-18 13:12:14 +01001731 if (common->lun < common->nluns) {
Piotr Wilczek91637d72013-03-05 12:10:16 +01001732 curlun = &common->luns[common->lun];
1733 if (common->cmnd[0] != SC_REQUEST_SENSE) {
1734 curlun->sense_data = SS_NO_SENSE;
1735 curlun->info_valid = 0;
1736 }
1737 } else {
1738 curlun = NULL;
1739 common->bad_lun_okay = 0;
1740
1741 /* INQUIRY and REQUEST SENSE commands are explicitly allowed
1742 * to use unsupported LUNs; all others may not. */
1743 if (common->cmnd[0] != SC_INQUIRY &&
1744 common->cmnd[0] != SC_REQUEST_SENSE) {
1745 DBG(common, "unsupported LUN %d\n", common->lun);
1746 return -EINVAL;
1747 }
1748 }
1749#if 0
1750 /* If a unit attention condition exists, only INQUIRY and
1751 * REQUEST SENSE commands are allowed; anything else must fail. */
1752 if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
1753 common->cmnd[0] != SC_INQUIRY &&
1754 common->cmnd[0] != SC_REQUEST_SENSE) {
1755 curlun->sense_data = curlun->unit_attention_data;
1756 curlun->unit_attention_data = SS_NO_SENSE;
1757 return -EINVAL;
1758 }
1759#endif
1760 /* Check that only command bytes listed in the mask are non-zero */
1761 common->cmnd[1] &= 0x1f; /* Mask away the LUN */
1762 for (i = 1; i < cmnd_size; ++i) {
1763 if (common->cmnd[i] && !(mask & (1 << i))) {
1764 if (curlun)
1765 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1766 return -EINVAL;
1767 }
1768 }
1769
1770 return 0;
1771}
1772
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00001773/* wrapper of check_command for data size in blocks handling */
1774static int check_command_size_in_blocks(struct fsg_common *common,
1775 int cmnd_size, enum data_direction data_dir,
1776 unsigned int mask, int needs_medium, const char *name)
1777{
1778 common->data_size_from_cmnd <<= common->luns[common->lun].blkbits;
1779 return check_command(common, cmnd_size, data_dir,
1780 mask, needs_medium, name);
1781}
1782
Piotr Wilczek91637d72013-03-05 12:10:16 +01001783static int do_scsi_command(struct fsg_common *common)
1784{
1785 struct fsg_buffhd *bh;
1786 int rc;
1787 int reply = -EINVAL;
1788 int i;
1789 static char unknown[16];
1790 struct fsg_lun *curlun = &common->luns[common->lun];
1791
1792 dump_cdb(common);
1793
1794 /* Wait for the next buffer to become available for data or status */
1795 bh = common->next_buffhd_to_fill;
1796 common->next_buffhd_to_drain = bh;
1797 while (bh->state != BUF_STATE_EMPTY) {
1798 rc = sleep_thread(common);
1799 if (rc)
1800 return rc;
1801 }
1802 common->phase_error = 0;
1803 common->short_packet_received = 0;
1804
1805 down_read(&common->filesem); /* We're using the backing file */
1806 switch (common->cmnd[0]) {
1807
1808 case SC_INQUIRY:
1809 common->data_size_from_cmnd = common->cmnd[4];
1810 reply = check_command(common, 6, DATA_DIR_TO_HOST,
1811 (1<<4), 0,
1812 "INQUIRY");
1813 if (reply == 0)
1814 reply = do_inquiry(common, bh);
1815 break;
1816
1817 case SC_MODE_SELECT_6:
1818 common->data_size_from_cmnd = common->cmnd[4];
1819 reply = check_command(common, 6, DATA_DIR_FROM_HOST,
1820 (1<<1) | (1<<4), 0,
1821 "MODE SELECT(6)");
1822 if (reply == 0)
1823 reply = do_mode_select(common, bh);
1824 break;
1825
1826 case SC_MODE_SELECT_10:
1827 common->data_size_from_cmnd =
1828 get_unaligned_be16(&common->cmnd[7]);
1829 reply = check_command(common, 10, DATA_DIR_FROM_HOST,
1830 (1<<1) | (3<<7), 0,
1831 "MODE SELECT(10)");
1832 if (reply == 0)
1833 reply = do_mode_select(common, bh);
1834 break;
1835
1836 case SC_MODE_SENSE_6:
1837 common->data_size_from_cmnd = common->cmnd[4];
1838 reply = check_command(common, 6, DATA_DIR_TO_HOST,
1839 (1<<1) | (1<<2) | (1<<4), 0,
1840 "MODE SENSE(6)");
1841 if (reply == 0)
1842 reply = do_mode_sense(common, bh);
1843 break;
1844
1845 case SC_MODE_SENSE_10:
1846 common->data_size_from_cmnd =
1847 get_unaligned_be16(&common->cmnd[7]);
1848 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1849 (1<<1) | (1<<2) | (3<<7), 0,
1850 "MODE SENSE(10)");
1851 if (reply == 0)
1852 reply = do_mode_sense(common, bh);
1853 break;
1854
1855 case SC_PREVENT_ALLOW_MEDIUM_REMOVAL:
1856 common->data_size_from_cmnd = 0;
1857 reply = check_command(common, 6, DATA_DIR_NONE,
1858 (1<<4), 0,
1859 "PREVENT-ALLOW MEDIUM REMOVAL");
1860 if (reply == 0)
1861 reply = do_prevent_allow(common);
1862 break;
1863
1864 case SC_READ_6:
1865 i = common->cmnd[4];
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00001866 common->data_size_from_cmnd = (i == 0 ? 256 : i);
1867 reply = check_command_size_in_blocks(common, 6, DATA_DIR_TO_HOST,
1868 (7<<1) | (1<<4), 1,
1869 "READ(6)");
Piotr Wilczek91637d72013-03-05 12:10:16 +01001870 if (reply == 0)
1871 reply = do_read(common);
1872 break;
1873
1874 case SC_READ_10:
1875 common->data_size_from_cmnd =
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00001876 get_unaligned_be16(&common->cmnd[7]);
1877 reply = check_command_size_in_blocks(common, 10, DATA_DIR_TO_HOST,
1878 (1<<1) | (0xf<<2) | (3<<7), 1,
1879 "READ(10)");
Piotr Wilczek91637d72013-03-05 12:10:16 +01001880 if (reply == 0)
1881 reply = do_read(common);
1882 break;
1883
1884 case SC_READ_12:
1885 common->data_size_from_cmnd =
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00001886 get_unaligned_be32(&common->cmnd[6]);
1887 reply = check_command_size_in_blocks(common, 12, DATA_DIR_TO_HOST,
1888 (1<<1) | (0xf<<2) | (0xf<<6), 1,
1889 "READ(12)");
Piotr Wilczek91637d72013-03-05 12:10:16 +01001890 if (reply == 0)
1891 reply = do_read(common);
1892 break;
1893
1894 case SC_READ_CAPACITY:
1895 common->data_size_from_cmnd = 8;
1896 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1897 (0xf<<2) | (1<<8), 1,
1898 "READ CAPACITY");
1899 if (reply == 0)
1900 reply = do_read_capacity(common, bh);
1901 break;
1902
1903 case SC_READ_HEADER:
1904 if (!common->luns[common->lun].cdrom)
1905 goto unknown_cmnd;
1906 common->data_size_from_cmnd =
1907 get_unaligned_be16(&common->cmnd[7]);
1908 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1909 (3<<7) | (0x1f<<1), 1,
1910 "READ HEADER");
1911 if (reply == 0)
1912 reply = do_read_header(common, bh);
1913 break;
1914
1915 case SC_READ_TOC:
1916 if (!common->luns[common->lun].cdrom)
1917 goto unknown_cmnd;
1918 common->data_size_from_cmnd =
1919 get_unaligned_be16(&common->cmnd[7]);
1920 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1921 (7<<6) | (1<<1), 1,
1922 "READ TOC");
1923 if (reply == 0)
1924 reply = do_read_toc(common, bh);
1925 break;
1926
1927 case SC_READ_FORMAT_CAPACITIES:
1928 common->data_size_from_cmnd =
1929 get_unaligned_be16(&common->cmnd[7]);
1930 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1931 (3<<7), 1,
1932 "READ FORMAT CAPACITIES");
1933 if (reply == 0)
1934 reply = do_read_format_capacities(common, bh);
1935 break;
1936
1937 case SC_REQUEST_SENSE:
1938 common->data_size_from_cmnd = common->cmnd[4];
1939 reply = check_command(common, 6, DATA_DIR_TO_HOST,
1940 (1<<4), 0,
1941 "REQUEST SENSE");
1942 if (reply == 0)
1943 reply = do_request_sense(common, bh);
1944 break;
1945
1946 case SC_START_STOP_UNIT:
1947 common->data_size_from_cmnd = 0;
1948 reply = check_command(common, 6, DATA_DIR_NONE,
1949 (1<<1) | (1<<4), 0,
1950 "START-STOP UNIT");
1951 if (reply == 0)
1952 reply = do_start_stop(common);
1953 break;
1954
1955 case SC_SYNCHRONIZE_CACHE:
1956 common->data_size_from_cmnd = 0;
1957 reply = check_command(common, 10, DATA_DIR_NONE,
1958 (0xf<<2) | (3<<7), 1,
1959 "SYNCHRONIZE CACHE");
1960 if (reply == 0)
1961 reply = do_synchronize_cache(common);
1962 break;
1963
1964 case SC_TEST_UNIT_READY:
1965 common->data_size_from_cmnd = 0;
1966 reply = check_command(common, 6, DATA_DIR_NONE,
1967 0, 1,
1968 "TEST UNIT READY");
1969 break;
1970
1971 /* Although optional, this command is used by MS-Windows. We
1972 * support a minimal version: BytChk must be 0. */
1973 case SC_VERIFY:
1974 common->data_size_from_cmnd = 0;
1975 reply = check_command(common, 10, DATA_DIR_NONE,
1976 (1<<1) | (0xf<<2) | (3<<7), 1,
1977 "VERIFY");
1978 if (reply == 0)
1979 reply = do_verify(common);
1980 break;
1981
1982 case SC_WRITE_6:
1983 i = common->cmnd[4];
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00001984 common->data_size_from_cmnd = (i == 0 ? 256 : i);
1985 reply = check_command_size_in_blocks(common, 6, DATA_DIR_FROM_HOST,
1986 (7<<1) | (1<<4), 1,
1987 "WRITE(6)");
Piotr Wilczek91637d72013-03-05 12:10:16 +01001988 if (reply == 0)
1989 reply = do_write(common);
1990 break;
1991
1992 case SC_WRITE_10:
1993 common->data_size_from_cmnd =
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00001994 get_unaligned_be16(&common->cmnd[7]);
1995 reply = check_command_size_in_blocks(common, 10, DATA_DIR_FROM_HOST,
1996 (1<<1) | (0xf<<2) | (3<<7), 1,
1997 "WRITE(10)");
Piotr Wilczek91637d72013-03-05 12:10:16 +01001998 if (reply == 0)
1999 reply = do_write(common);
2000 break;
2001
2002 case SC_WRITE_12:
2003 common->data_size_from_cmnd =
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00002004 get_unaligned_be32(&common->cmnd[6]);
2005 reply = check_command_size_in_blocks(common, 12, DATA_DIR_FROM_HOST,
2006 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2007 "WRITE(12)");
Piotr Wilczek91637d72013-03-05 12:10:16 +01002008 if (reply == 0)
2009 reply = do_write(common);
2010 break;
2011
2012 /* Some mandatory commands that we recognize but don't implement.
2013 * They don't mean much in this setting. It's left as an exercise
2014 * for anyone interested to implement RESERVE and RELEASE in terms
2015 * of Posix locks. */
2016 case SC_FORMAT_UNIT:
2017 case SC_RELEASE:
2018 case SC_RESERVE:
2019 case SC_SEND_DIAGNOSTIC:
2020 /* Fall through */
2021
2022 default:
2023unknown_cmnd:
2024 common->data_size_from_cmnd = 0;
2025 sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
2026 reply = check_command(common, common->cmnd_size,
2027 DATA_DIR_UNKNOWN, 0xff, 0, unknown);
2028 if (reply == 0) {
2029 curlun->sense_data = SS_INVALID_COMMAND;
2030 reply = -EINVAL;
2031 }
2032 break;
2033 }
2034 up_read(&common->filesem);
2035
2036 if (reply == -EINTR)
2037 return -EINTR;
2038
2039 /* Set up the single reply buffer for finish_reply() */
2040 if (reply == -EINVAL)
2041 reply = 0; /* Error reply length */
2042 if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) {
2043 reply = min((u32) reply, common->data_size_from_cmnd);
2044 bh->inreq->length = reply;
2045 bh->state = BUF_STATE_FULL;
2046 common->residue -= reply;
2047 } /* Otherwise it's already set */
2048
2049 return 0;
2050}
2051
2052/*-------------------------------------------------------------------------*/
2053
2054static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2055{
2056 struct usb_request *req = bh->outreq;
2057 struct fsg_bulk_cb_wrap *cbw = req->buf;
2058 struct fsg_common *common = fsg->common;
2059
2060 /* Was this a real packet? Should it be ignored? */
2061 if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
2062 return -EINVAL;
2063
2064 /* Is the CBW valid? */
2065 if (req->actual != USB_BULK_CB_WRAP_LEN ||
2066 cbw->Signature != cpu_to_le32(
2067 USB_BULK_CB_SIG)) {
2068 DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
2069 req->actual,
2070 le32_to_cpu(cbw->Signature));
2071
2072 /* The Bulk-only spec says we MUST stall the IN endpoint
2073 * (6.6.1), so it's unavoidable. It also says we must
2074 * retain this state until the next reset, but there's
2075 * no way to tell the controller driver it should ignore
2076 * Clear-Feature(HALT) requests.
2077 *
2078 * We aren't required to halt the OUT endpoint; instead
2079 * we can simply accept and discard any data received
2080 * until the next reset. */
2081 wedge_bulk_in_endpoint(fsg);
Bryan O'Donoghue56312512018-04-30 15:56:09 +01002082 generic_set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
Piotr Wilczek91637d72013-03-05 12:10:16 +01002083 return -EINVAL;
2084 }
2085
2086 /* Is the CBW meaningful? */
2087 if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
2088 cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
2089 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2090 "cmdlen %u\n",
2091 cbw->Lun, cbw->Flags, cbw->Length);
2092
2093 /* We can do anything we want here, so let's stall the
2094 * bulk pipes if we are allowed to. */
2095 if (common->can_stall) {
2096 fsg_set_halt(fsg, fsg->bulk_out);
2097 halt_bulk_in_endpoint(fsg);
2098 }
2099 return -EINVAL;
2100 }
2101
2102 /* Save the command for later */
2103 common->cmnd_size = cbw->Length;
2104 memcpy(common->cmnd, cbw->CDB, common->cmnd_size);
2105 if (cbw->Flags & USB_BULK_IN_FLAG)
2106 common->data_dir = DATA_DIR_TO_HOST;
2107 else
2108 common->data_dir = DATA_DIR_FROM_HOST;
2109 common->data_size = le32_to_cpu(cbw->DataTransferLength);
2110 if (common->data_size == 0)
2111 common->data_dir = DATA_DIR_NONE;
2112 common->lun = cbw->Lun;
2113 common->tag = cbw->Tag;
2114 return 0;
2115}
2116
Piotr Wilczek91637d72013-03-05 12:10:16 +01002117static int get_next_command(struct fsg_common *common)
2118{
2119 struct fsg_buffhd *bh;
2120 int rc = 0;
2121
2122 /* Wait for the next buffer to become available */
2123 bh = common->next_buffhd_to_fill;
2124 while (bh->state != BUF_STATE_EMPTY) {
2125 rc = sleep_thread(common);
2126 if (rc)
2127 return rc;
2128 }
2129
2130 /* Queue a request to read a Bulk-only CBW */
2131 set_bulk_out_req_length(common, bh, USB_BULK_CB_WRAP_LEN);
2132 bh->outreq->short_not_ok = 1;
2133 START_TRANSFER_OR(common, bulk_out, bh->outreq,
2134 &bh->outreq_busy, &bh->state)
2135 /* Don't know what to do if common->fsg is NULL */
2136 return -EIO;
2137
2138 /* We will drain the buffer in software, which means we
2139 * can reuse it for the next filling. No need to advance
2140 * next_buffhd_to_fill. */
2141
2142 /* Wait for the CBW to arrive */
2143 while (bh->state != BUF_STATE_FULL) {
2144 rc = sleep_thread(common);
2145 if (rc)
2146 return rc;
2147 }
2148
2149 rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO;
2150 bh->state = BUF_STATE_EMPTY;
2151
2152 return rc;
2153}
2154
Piotr Wilczek91637d72013-03-05 12:10:16 +01002155/*-------------------------------------------------------------------------*/
2156
2157static int enable_endpoint(struct fsg_common *common, struct usb_ep *ep,
2158 const struct usb_endpoint_descriptor *d)
2159{
2160 int rc;
2161
2162 ep->driver_data = common;
2163 rc = usb_ep_enable(ep, d);
2164 if (rc)
2165 ERROR(common, "can't enable %s, result %d\n", ep->name, rc);
2166 return rc;
2167}
2168
2169static int alloc_request(struct fsg_common *common, struct usb_ep *ep,
2170 struct usb_request **preq)
2171{
2172 *preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
2173 if (*preq)
2174 return 0;
2175 ERROR(common, "can't allocate request for %s\n", ep->name);
2176 return -ENOMEM;
2177}
2178
2179/* Reset interface setting and re-init endpoint state (toggle etc). */
2180static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg)
2181{
2182 const struct usb_endpoint_descriptor *d;
2183 struct fsg_dev *fsg;
2184 int i, rc = 0;
2185
2186 if (common->running)
2187 DBG(common, "reset interface\n");
2188
2189reset:
2190 /* Deallocate the requests */
2191 if (common->fsg) {
2192 fsg = common->fsg;
2193
2194 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2195 struct fsg_buffhd *bh = &common->buffhds[i];
2196
2197 if (bh->inreq) {
2198 usb_ep_free_request(fsg->bulk_in, bh->inreq);
2199 bh->inreq = NULL;
2200 }
2201 if (bh->outreq) {
2202 usb_ep_free_request(fsg->bulk_out, bh->outreq);
2203 bh->outreq = NULL;
2204 }
2205 }
2206
2207 /* Disable the endpoints */
2208 if (fsg->bulk_in_enabled) {
2209 usb_ep_disable(fsg->bulk_in);
2210 fsg->bulk_in_enabled = 0;
2211 }
2212 if (fsg->bulk_out_enabled) {
2213 usb_ep_disable(fsg->bulk_out);
2214 fsg->bulk_out_enabled = 0;
2215 }
2216
2217 common->fsg = NULL;
2218 /* wake_up(&common->fsg_wait); */
2219 }
2220
2221 common->running = 0;
2222 if (!new_fsg || rc)
2223 return rc;
2224
2225 common->fsg = new_fsg;
2226 fsg = common->fsg;
2227
2228 /* Enable the endpoints */
2229 d = fsg_ep_desc(common->gadget,
2230 &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc);
2231 rc = enable_endpoint(common, fsg->bulk_in, d);
2232 if (rc)
2233 goto reset;
2234 fsg->bulk_in_enabled = 1;
2235
2236 d = fsg_ep_desc(common->gadget,
2237 &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc);
2238 rc = enable_endpoint(common, fsg->bulk_out, d);
2239 if (rc)
2240 goto reset;
2241 fsg->bulk_out_enabled = 1;
Vivek Gautam1d62db82013-05-13 15:53:38 +05302242 common->bulk_out_maxpacket =
2243 le16_to_cpu(get_unaligned(&d->wMaxPacketSize));
Bryan O'Donoghue56312512018-04-30 15:56:09 +01002244 generic_clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
Piotr Wilczek91637d72013-03-05 12:10:16 +01002245
2246 /* Allocate the requests */
2247 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2248 struct fsg_buffhd *bh = &common->buffhds[i];
2249
2250 rc = alloc_request(common, fsg->bulk_in, &bh->inreq);
2251 if (rc)
2252 goto reset;
2253 rc = alloc_request(common, fsg->bulk_out, &bh->outreq);
2254 if (rc)
2255 goto reset;
2256 bh->inreq->buf = bh->outreq->buf = bh->buf;
2257 bh->inreq->context = bh->outreq->context = bh;
2258 bh->inreq->complete = bulk_in_complete;
2259 bh->outreq->complete = bulk_out_complete;
2260 }
2261
2262 common->running = 1;
2263
2264 return rc;
2265}
2266
Piotr Wilczek91637d72013-03-05 12:10:16 +01002267/****************************** ALT CONFIGS ******************************/
2268
Piotr Wilczek91637d72013-03-05 12:10:16 +01002269static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
2270{
2271 struct fsg_dev *fsg = fsg_from_func(f);
2272 fsg->common->new_fsg = fsg;
2273 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2274 return 0;
2275}
2276
2277static void fsg_disable(struct usb_function *f)
2278{
2279 struct fsg_dev *fsg = fsg_from_func(f);
2280 fsg->common->new_fsg = NULL;
2281 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2282}
2283
2284/*-------------------------------------------------------------------------*/
2285
2286static void handle_exception(struct fsg_common *common)
2287{
2288 int i;
2289 struct fsg_buffhd *bh;
2290 enum fsg_state old_state;
2291 struct fsg_lun *curlun;
2292 unsigned int exception_req_tag;
2293
2294 /* Cancel all the pending transfers */
2295 if (common->fsg) {
2296 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2297 bh = &common->buffhds[i];
2298 if (bh->inreq_busy)
2299 usb_ep_dequeue(common->fsg->bulk_in, bh->inreq);
2300 if (bh->outreq_busy)
2301 usb_ep_dequeue(common->fsg->bulk_out,
2302 bh->outreq);
2303 }
2304
2305 /* Wait until everything is idle */
2306 for (;;) {
2307 int num_active = 0;
2308 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2309 bh = &common->buffhds[i];
2310 num_active += bh->inreq_busy + bh->outreq_busy;
2311 }
2312 if (num_active == 0)
2313 break;
2314 if (sleep_thread(common))
2315 return;
2316 }
2317
2318 /* Clear out the controller's fifos */
2319 if (common->fsg->bulk_in_enabled)
2320 usb_ep_fifo_flush(common->fsg->bulk_in);
2321 if (common->fsg->bulk_out_enabled)
2322 usb_ep_fifo_flush(common->fsg->bulk_out);
2323 }
2324
2325 /* Reset the I/O buffer states and pointers, the SCSI
2326 * state, and the exception. Then invoke the handler. */
2327
2328 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2329 bh = &common->buffhds[i];
2330 bh->state = BUF_STATE_EMPTY;
2331 }
2332 common->next_buffhd_to_fill = &common->buffhds[0];
2333 common->next_buffhd_to_drain = &common->buffhds[0];
2334 exception_req_tag = common->exception_req_tag;
2335 old_state = common->state;
2336
2337 if (old_state == FSG_STATE_ABORT_BULK_OUT)
2338 common->state = FSG_STATE_STATUS_PHASE;
2339 else {
2340 for (i = 0; i < common->nluns; ++i) {
2341 curlun = &common->luns[i];
2342 curlun->sense_data = SS_NO_SENSE;
2343 curlun->info_valid = 0;
2344 }
2345 common->state = FSG_STATE_IDLE;
2346 }
2347
2348 /* Carry out any extra actions required for the exception */
2349 switch (old_state) {
2350 case FSG_STATE_ABORT_BULK_OUT:
2351 send_status(common);
2352
2353 if (common->state == FSG_STATE_STATUS_PHASE)
2354 common->state = FSG_STATE_IDLE;
2355 break;
2356
2357 case FSG_STATE_RESET:
2358 /* In case we were forced against our will to halt a
2359 * bulk endpoint, clear the halt now. (The SuperH UDC
2360 * requires this.) */
2361 if (!fsg_is_set(common))
2362 break;
2363 if (test_and_clear_bit(IGNORE_BULK_OUT,
2364 &common->fsg->atomic_bitflags))
2365 usb_ep_clear_halt(common->fsg->bulk_in);
2366
2367 if (common->ep0_req_tag == exception_req_tag)
2368 ep0_queue(common); /* Complete the status stage */
2369
2370 break;
2371
2372 case FSG_STATE_CONFIG_CHANGE:
2373 do_set_interface(common, common->new_fsg);
2374 break;
2375
2376 case FSG_STATE_EXIT:
2377 case FSG_STATE_TERMINATED:
2378 do_set_interface(common, NULL); /* Free resources */
2379 common->state = FSG_STATE_TERMINATED; /* Stop the thread */
2380 break;
2381
2382 case FSG_STATE_INTERFACE_CHANGE:
2383 case FSG_STATE_DISCONNECT:
2384 case FSG_STATE_COMMAND_PHASE:
2385 case FSG_STATE_DATA_PHASE:
2386 case FSG_STATE_STATUS_PHASE:
2387 case FSG_STATE_IDLE:
2388 break;
2389 }
2390}
2391
2392/*-------------------------------------------------------------------------*/
2393
2394int fsg_main_thread(void *common_)
2395{
Przemyslaw Marczak06ef7cc2013-10-23 14:30:46 +02002396 int ret;
Piotr Wilczek91637d72013-03-05 12:10:16 +01002397 struct fsg_common *common = the_fsg_common;
2398 /* The main loop */
2399 do {
2400 if (exception_in_progress(common)) {
2401 handle_exception(common);
2402 continue;
2403 }
2404
2405 if (!common->running) {
Przemyslaw Marczak06ef7cc2013-10-23 14:30:46 +02002406 ret = sleep_thread(common);
2407 if (ret)
2408 return ret;
2409
Piotr Wilczek91637d72013-03-05 12:10:16 +01002410 continue;
2411 }
2412
Przemyslaw Marczak06ef7cc2013-10-23 14:30:46 +02002413 ret = get_next_command(common);
2414 if (ret)
2415 return ret;
Piotr Wilczek91637d72013-03-05 12:10:16 +01002416
2417 if (!exception_in_progress(common))
2418 common->state = FSG_STATE_DATA_PHASE;
2419
2420 if (do_scsi_command(common) || finish_reply(common))
2421 continue;
2422
2423 if (!exception_in_progress(common))
2424 common->state = FSG_STATE_STATUS_PHASE;
2425
2426 if (send_status(common))
2427 continue;
2428
2429 if (!exception_in_progress(common))
2430 common->state = FSG_STATE_IDLE;
2431 } while (0);
2432
2433 common->thread_task = NULL;
2434
2435 return 0;
2436}
2437
2438static void fsg_common_release(struct kref *ref);
2439
2440static struct fsg_common *fsg_common_init(struct fsg_common *common,
2441 struct usb_composite_dev *cdev)
2442{
2443 struct usb_gadget *gadget = cdev->gadget;
2444 struct fsg_buffhd *bh;
2445 struct fsg_lun *curlun;
2446 int nluns, i, rc;
2447
2448 /* Find out how many LUNs there should be */
Stephen Warren9e7d5882015-12-07 11:38:50 -07002449 nluns = ums_count;
Piotr Wilczek91637d72013-03-05 12:10:16 +01002450 if (nluns < 1 || nluns > FSG_MAX_LUNS) {
2451 printf("invalid number of LUNs: %u\n", nluns);
2452 return ERR_PTR(-EINVAL);
2453 }
2454
2455 /* Allocate? */
2456 if (!common) {
Jeroen Hofstee6931bff2014-06-09 15:28:59 +02002457 common = calloc(sizeof(*common), 1);
Piotr Wilczek91637d72013-03-05 12:10:16 +01002458 if (!common)
2459 return ERR_PTR(-ENOMEM);
2460 common->free_storage_on_release = 1;
2461 } else {
Jeroen Hofstee6931bff2014-06-09 15:28:59 +02002462 memset(common, 0, sizeof(*common));
Piotr Wilczek91637d72013-03-05 12:10:16 +01002463 common->free_storage_on_release = 0;
2464 }
2465
2466 common->ops = NULL;
2467 common->private_data = NULL;
2468
2469 common->gadget = gadget;
2470 common->ep0 = gadget->ep0;
2471 common->ep0req = cdev->req;
2472
2473 /* Maybe allocate device-global string IDs, and patch descriptors */
2474 if (fsg_strings[FSG_STRING_INTERFACE].id == 0) {
2475 rc = usb_string_id(cdev);
2476 if (unlikely(rc < 0))
2477 goto error_release;
2478 fsg_strings[FSG_STRING_INTERFACE].id = rc;
2479 fsg_intf_desc.iInterface = rc;
2480 }
2481
2482 /* Create the LUNs, open their backing files, and register the
2483 * LUN devices in sysfs. */
2484 curlun = calloc(nluns, sizeof *curlun);
2485 if (!curlun) {
2486 rc = -ENOMEM;
2487 goto error_release;
2488 }
2489 common->nluns = nluns;
2490
2491 for (i = 0; i < nluns; i++) {
2492 common->luns[i].removable = 1;
2493
Caleb Connolly00e9e0c2024-03-20 14:30:50 +00002494 rc = fsg_lun_open(&common->luns[i], ums[i].num_sectors, ums->block_dev.blksz, "");
Piotr Wilczek91637d72013-03-05 12:10:16 +01002495 if (rc)
2496 goto error_luns;
2497 }
2498 common->lun = 0;
2499
2500 /* Data buffers cyclic list */
2501 bh = common->buffhds;
2502
2503 i = FSG_NUM_BUFFERS;
2504 goto buffhds_first_it;
2505 do {
2506 bh->next = bh + 1;
2507 ++bh;
2508buffhds_first_it:
2509 bh->inreq_busy = 0;
2510 bh->outreq_busy = 0;
Lukasz Majewski05751132014-02-05 10:10:41 +01002511 bh->buf = memalign(CONFIG_SYS_CACHELINE_SIZE, FSG_BUFLEN);
Piotr Wilczek91637d72013-03-05 12:10:16 +01002512 if (unlikely(!bh->buf)) {
2513 rc = -ENOMEM;
2514 goto error_release;
2515 }
2516 } while (--i);
2517 bh->next = common->buffhds;
2518
2519 snprintf(common->inquiry_string, sizeof common->inquiry_string,
2520 "%-8s%-16s%04x",
2521 "Linux ",
2522 "File-Store Gadget",
2523 0xffff);
2524
2525 /* Some peripheral controllers are known not to be able to
2526 * halt bulk endpoints correctly. If one of them is present,
2527 * disable stalls.
2528 */
2529
2530 /* Tell the thread to start working */
2531 common->thread_task =
2532 kthread_create(fsg_main_thread, common,
2533 OR(cfg->thread_name, "file-storage"));
2534 if (IS_ERR(common->thread_task)) {
2535 rc = PTR_ERR(common->thread_task);
2536 goto error_release;
2537 }
2538
2539#undef OR
2540 /* Information */
2541 INFO(common, FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
2542 INFO(common, "Number of LUNs=%d\n", common->nluns);
2543
2544 return common;
2545
2546error_luns:
2547 common->nluns = i + 1;
2548error_release:
2549 common->state = FSG_STATE_TERMINATED; /* The thread is dead */
2550 /* Call fsg_common_release() directly, ref might be not
2551 * initialised */
2552 fsg_common_release(&common->ref);
2553 return ERR_PTR(rc);
2554}
2555
2556static void fsg_common_release(struct kref *ref)
2557{
2558 struct fsg_common *common = container_of(ref, struct fsg_common, ref);
2559
2560 /* If the thread isn't already dead, tell it to exit now */
2561 if (common->state != FSG_STATE_TERMINATED) {
2562 raise_exception(common, FSG_STATE_EXIT);
2563 wait_for_completion(&common->thread_notifier);
2564 }
2565
2566 if (likely(common->luns)) {
2567 struct fsg_lun *lun = common->luns;
2568 unsigned i = common->nluns;
2569
2570 /* In error recovery common->nluns may be zero. */
2571 for (; i; --i, ++lun)
2572 fsg_lun_close(lun);
2573
2574 kfree(common->luns);
2575 }
2576
2577 {
2578 struct fsg_buffhd *bh = common->buffhds;
2579 unsigned i = FSG_NUM_BUFFERS;
2580 do {
2581 kfree(bh->buf);
2582 } while (++bh, --i);
2583 }
2584
2585 if (common->free_storage_on_release)
2586 kfree(common);
2587}
2588
Piotr Wilczek91637d72013-03-05 12:10:16 +01002589/*-------------------------------------------------------------------------*/
2590
2591/**
2592 * usb_copy_descriptors - copy a vector of USB descriptors
2593 * @src: null-terminated vector to copy
2594 * Context: initialization code, which may sleep
2595 *
2596 * This makes a copy of a vector of USB descriptors. Its primary use
2597 * is to support usb_function objects which can have multiple copies,
2598 * each needing different descriptors. Functions may have static
2599 * tables of descriptors, which are used as templates and customized
2600 * with identifiers (for interfaces, strings, endpoints, and more)
2601 * as needed by a given function instance.
2602 */
2603struct usb_descriptor_header **
2604usb_copy_descriptors(struct usb_descriptor_header **src)
2605{
2606 struct usb_descriptor_header **tmp;
2607 unsigned bytes;
2608 unsigned n_desc;
2609 void *mem;
2610 struct usb_descriptor_header **ret;
2611
2612 /* count descriptors and their sizes; then add vector size */
2613 for (bytes = 0, n_desc = 0, tmp = src; *tmp; tmp++, n_desc++)
2614 bytes += (*tmp)->bLength;
2615 bytes += (n_desc + 1) * sizeof(*tmp);
2616
Lukasz Majewski05751132014-02-05 10:10:41 +01002617 mem = memalign(CONFIG_SYS_CACHELINE_SIZE, bytes);
Piotr Wilczek91637d72013-03-05 12:10:16 +01002618 if (!mem)
2619 return NULL;
2620
2621 /* fill in pointers starting at "tmp",
2622 * to descriptors copied starting at "mem";
2623 * and return "ret"
2624 */
2625 tmp = mem;
2626 ret = mem;
2627 mem += (n_desc + 1) * sizeof(*tmp);
2628 while (*src) {
2629 memcpy(mem, *src, (*src)->bLength);
2630 *tmp = mem;
2631 tmp++;
2632 mem += (*src)->bLength;
2633 src++;
2634 }
2635 *tmp = NULL;
2636
2637 return ret;
2638}
2639
Piotr Wilczek91637d72013-03-05 12:10:16 +01002640static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
2641{
2642 struct fsg_dev *fsg = fsg_from_func(f);
2643
2644 DBG(fsg, "unbind\n");
2645 if (fsg->common->fsg == fsg) {
2646 fsg->common->new_fsg = NULL;
2647 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2648 }
2649
2650 free(fsg->function.descriptors);
2651 free(fsg->function.hs_descriptors);
2652 kfree(fsg);
2653}
2654
2655static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
2656{
2657 struct fsg_dev *fsg = fsg_from_func(f);
2658 struct usb_gadget *gadget = c->cdev->gadget;
2659 int i;
2660 struct usb_ep *ep;
2661 fsg->gadget = gadget;
2662
2663 /* New interface */
2664 i = usb_interface_id(c, f);
2665 if (i < 0)
2666 return i;
2667 fsg_intf_desc.bInterfaceNumber = i;
2668 fsg->interface_number = i;
2669
2670 /* Find all the endpoints we will use */
2671 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
2672 if (!ep)
2673 goto autoconf_fail;
2674 ep->driver_data = fsg->common; /* claim the endpoint */
2675 fsg->bulk_in = ep;
2676
2677 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
2678 if (!ep)
2679 goto autoconf_fail;
2680 ep->driver_data = fsg->common; /* claim the endpoint */
2681 fsg->bulk_out = ep;
2682
2683 /* Copy descriptors */
2684 f->descriptors = usb_copy_descriptors(fsg_fs_function);
2685 if (unlikely(!f->descriptors))
2686 return -ENOMEM;
2687
2688 if (gadget_is_dualspeed(gadget)) {
2689 /* Assume endpoint addresses are the same for both speeds */
2690 fsg_hs_bulk_in_desc.bEndpointAddress =
2691 fsg_fs_bulk_in_desc.bEndpointAddress;
2692 fsg_hs_bulk_out_desc.bEndpointAddress =
2693 fsg_fs_bulk_out_desc.bEndpointAddress;
2694 f->hs_descriptors = usb_copy_descriptors(fsg_hs_function);
2695 if (unlikely(!f->hs_descriptors)) {
2696 free(f->descriptors);
2697 return -ENOMEM;
2698 }
2699 }
2700 return 0;
2701
2702autoconf_fail:
2703 ERROR(fsg, "unable to autoconfigure all endpoints\n");
2704 return -ENOTSUPP;
2705}
2706
Piotr Wilczek91637d72013-03-05 12:10:16 +01002707/****************************** ADD FUNCTION ******************************/
2708
2709static struct usb_gadget_strings *fsg_strings_array[] = {
2710 &fsg_stringtab,
2711 NULL,
2712};
2713
2714static int fsg_bind_config(struct usb_composite_dev *cdev,
2715 struct usb_configuration *c,
2716 struct fsg_common *common)
2717{
2718 struct fsg_dev *fsg;
2719 int rc;
2720
2721 fsg = calloc(1, sizeof *fsg);
2722 if (!fsg)
2723 return -ENOMEM;
2724 fsg->function.name = FSG_DRIVER_DESC;
2725 fsg->function.strings = fsg_strings_array;
2726 fsg->function.bind = fsg_bind;
2727 fsg->function.unbind = fsg_unbind;
2728 fsg->function.setup = fsg_setup;
2729 fsg->function.set_alt = fsg_set_alt;
2730 fsg->function.disable = fsg_disable;
2731
2732 fsg->common = common;
2733 common->fsg = fsg;
2734 /* Our caller holds a reference to common structure so we
2735 * don't have to be worry about it being freed until we return
2736 * from this function. So instead of incrementing counter now
2737 * and decrement in error recovery we increment it only when
2738 * call to usb_add_function() was successful. */
2739
2740 rc = usb_add_function(c, &fsg->function);
2741
2742 if (rc)
2743 kfree(fsg);
2744
2745 return rc;
2746}
2747
2748int fsg_add(struct usb_configuration *c)
2749{
2750 struct fsg_common *fsg_common;
2751
2752 fsg_common = fsg_common_init(NULL, c->cdev);
2753
2754 fsg_common->vendor_name = 0;
2755 fsg_common->product_name = 0;
2756 fsg_common->release = 0xffff;
2757
2758 fsg_common->ops = NULL;
2759 fsg_common->private_data = NULL;
2760
2761 the_fsg_common = fsg_common;
2762
2763 return fsg_bind_config(c->cdev, c, fsg_common);
2764}
2765
Marek Vasut7786e702023-09-01 11:49:54 +02002766int fsg_init(struct ums *ums_devs, int count, struct udevice *udc)
Piotr Wilczek91637d72013-03-05 12:10:16 +01002767{
Stephen Warren9e7d5882015-12-07 11:38:50 -07002768 ums = ums_devs;
2769 ums_count = count;
Marek Vasut7786e702023-09-01 11:49:54 +02002770 udcdev = udc;
Piotr Wilczek91637d72013-03-05 12:10:16 +01002771
2772 return 0;
2773}
Mateusz Zalega69cb0bb2014-04-28 21:13:28 +02002774
2775DECLARE_GADGET_BIND_CALLBACK(usb_dnl_ums, fsg_add);