[x86] alix2, geos, net5501: remove kmod-ledtrig-netfilter, its not used by any standa...
[openwrt.git] / target / linux / goldfish / patches-2.6.30 / 0101-android_usb-Composite-USB-gadget-driver-for-android.patch
1 From 03e39e8f663c896dac11e87d96e8cb0292520e36 Mon Sep 17 00:00:00 2001
2 From: Mike Lockwood <lockwood@android.com>
3 Date: Tue, 2 Dec 2008 22:01:33 -0500
4 Subject: [PATCH 101/134] android_usb: Composite USB gadget driver for android.
5
6 Signed-off-by: Mike Lockwood <lockwood@android.com>
7
8 USB: android gadget: add remote wakeup attribute to android function
9
10 Add remote wakeup attribute to configuration descriptor of android
11 function to advertise remote wakeup capability to host
12
13 Acked-by: Allam, Suresh Reddy <sallam@qualcomm.com>
14
15 Signed-off-by: Mike Lockwood <lockwood@android.com>
16
17 usb gadget: link fixes for android composite gadget
18
19 Signed-off-by: Mike Lockwood <lockwood@android.com>
20
21 usb gadget: Fix null pointer errors in android composite driver
22
23 Signed-off-by: Mike Lockwood <lockwood@android.com>
24 ---
25 drivers/usb/gadget/Kconfig | 8 +
26 drivers/usb/gadget/Makefile | 2 +
27 drivers/usb/gadget/android.c | 345 +++++
28 drivers/usb/gadget/f_adb.c | 668 ++++++++
29 drivers/usb/gadget/f_adb.h | 25 +
30 drivers/usb/gadget/f_mass_storage.c | 2906 +++++++++++++++++++++++++++++++++++
31 drivers/usb/gadget/f_mass_storage.h | 52 +
32 include/linux/usb/android.h | 40 +
33 8 files changed, 4046 insertions(+), 0 deletions(-)
34 create mode 100644 drivers/usb/gadget/android.c
35 create mode 100644 drivers/usb/gadget/f_adb.c
36 create mode 100644 drivers/usb/gadget/f_adb.h
37 create mode 100644 drivers/usb/gadget/f_mass_storage.c
38 create mode 100644 drivers/usb/gadget/f_mass_storage.h
39 create mode 100644 include/linux/usb/android.h
40
41 --- a/drivers/usb/gadget/Kconfig
42 +++ b/drivers/usb/gadget/Kconfig
43 @@ -700,6 +700,14 @@ config USB_G_PRINTER
44 For more information, see Documentation/usb/gadget_printer.txt
45 which includes sample code for accessing the device file.
46
47 +config USB_ANDROID
48 + tristate "Android Gadget"
49 + help
50 + The Android gadget provides mass storage and adb transport.
51 +
52 + Say "y" to link the driver statically, or "m" to build a
53 + dynamically linked module called "g_android".
54 +
55 config USB_CDC_COMPOSITE
56 tristate "CDC Composite Device (Ethernet and ACM)"
57 depends on NET
58 --- a/drivers/usb/gadget/Makefile
59 +++ b/drivers/usb/gadget/Makefile
60 @@ -33,6 +33,7 @@ gadgetfs-objs := inode.o
61 g_file_storage-objs := file_storage.o
62 g_printer-objs := printer.o
63 g_cdc-objs := cdc2.o
64 +g_android-objs := android.o f_adb.o f_mass_storage.o
65
66 obj-$(CONFIG_USB_ZERO) += g_zero.o
67 obj-$(CONFIG_USB_ETH) += g_ether.o
68 @@ -42,4 +43,5 @@ obj-$(CONFIG_USB_G_SERIAL) += g_serial.o
69 obj-$(CONFIG_USB_G_PRINTER) += g_printer.o
70 obj-$(CONFIG_USB_MIDI_GADGET) += g_midi.o
71 obj-$(CONFIG_USB_CDC_COMPOSITE) += g_cdc.o
72 +obj-$(CONFIG_USB_ANDROID) += g_android.o
73
74 --- /dev/null
75 +++ b/drivers/usb/gadget/android.c
76 @@ -0,0 +1,345 @@
77 +/*
78 + * Gadget Driver for Android
79 + *
80 + * Copyright (C) 2008 Google, Inc.
81 + * Author: Mike Lockwood <lockwood@android.com>
82 + *
83 + * This software is licensed under the terms of the GNU General Public
84 + * License version 2, as published by the Free Software Foundation, and
85 + * may be copied, distributed, and modified under those terms.
86 + *
87 + * This program is distributed in the hope that it will be useful,
88 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
89 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
90 + * GNU General Public License for more details.
91 + *
92 + */
93 +
94 +/* #define DEBUG */
95 +/* #define VERBOSE_DEBUG */
96 +
97 +#include <linux/init.h>
98 +#include <linux/module.h>
99 +#include <linux/fs.h>
100 +
101 +#include <linux/delay.h>
102 +#include <linux/kernel.h>
103 +#include <linux/utsname.h>
104 +#include <linux/miscdevice.h>
105 +#include <linux/platform_device.h>
106 +
107 +#include <linux/usb/android.h>
108 +#include <linux/usb/ch9.h>
109 +#include <linux/usb/composite.h>
110 +#include <linux/usb/gadget.h>
111 +
112 +#include "f_mass_storage.h"
113 +#include "f_adb.h"
114 +
115 +#include "gadget_chips.h"
116 +
117 +/*
118 + * Kbuild is not very cooperative with respect to linking separately
119 + * compiled library objects into one module. So for now we won't use
120 + * separate compilation ... ensuring init/exit sections work to shrink
121 + * the runtime footprint, and giving us at least some parts of what
122 + * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
123 + */
124 +#include "usbstring.c"
125 +#include "config.c"
126 +#include "epautoconf.c"
127 +#include "composite.c"
128 +
129 +MODULE_AUTHOR("Mike Lockwood");
130 +MODULE_DESCRIPTION("Android Composite USB Driver");
131 +MODULE_LICENSE("GPL");
132 +MODULE_VERSION("1.0");
133 +
134 +static const char longname[] = "Gadget Android";
135 +
136 +/* Default vendor and product IDs, overridden by platform data */
137 +#define VENDOR_ID 0x18D1
138 +#define PRODUCT_ID 0x0001
139 +#define ADB_PRODUCT_ID 0x0002
140 +
141 +struct android_dev {
142 + struct usb_gadget *gadget;
143 + struct usb_composite_dev *cdev;
144 +
145 + int product_id;
146 + int adb_product_id;
147 + int version;
148 +
149 + int adb_enabled;
150 + int nluns;
151 +};
152 +
153 +static atomic_t adb_enable_excl;
154 +static struct android_dev *_android_dev;
155 +
156 +/* string IDs are assigned dynamically */
157 +
158 +#define STRING_MANUFACTURER_IDX 0
159 +#define STRING_PRODUCT_IDX 1
160 +#define STRING_SERIAL_IDX 2
161 +
162 +/* String Table */
163 +static struct usb_string strings_dev[] = {
164 + /* These dummy values should be overridden by platform data */
165 + [STRING_MANUFACTURER_IDX].s = "Android",
166 + [STRING_PRODUCT_IDX].s = "Android",
167 + [STRING_SERIAL_IDX].s = "0123456789ABCDEF",
168 + { } /* end of list */
169 +};
170 +
171 +static struct usb_gadget_strings stringtab_dev = {
172 + .language = 0x0409, /* en-us */
173 + .strings = strings_dev,
174 +};
175 +
176 +static struct usb_gadget_strings *dev_strings[] = {
177 + &stringtab_dev,
178 + NULL,
179 +};
180 +
181 +static struct usb_device_descriptor device_desc = {
182 + .bLength = sizeof(device_desc),
183 + .bDescriptorType = USB_DT_DEVICE,
184 + .bcdUSB = __constant_cpu_to_le16(0x0200),
185 + .bDeviceClass = USB_CLASS_PER_INTERFACE,
186 + .idVendor = __constant_cpu_to_le16(VENDOR_ID),
187 + .idProduct = __constant_cpu_to_le16(PRODUCT_ID),
188 + .bcdDevice = __constant_cpu_to_le16(0xffff),
189 + .bNumConfigurations = 1,
190 +};
191 +
192 +static int __init android_bind_config(struct usb_configuration *c)
193 +{
194 + struct android_dev *dev = _android_dev;
195 + int ret;
196 + printk(KERN_DEBUG "android_bind_config\n");
197 +
198 + ret = mass_storage_function_add(dev->cdev, c, dev->nluns);
199 + if (ret)
200 + return ret;
201 + return adb_function_add(dev->cdev, c);
202 +}
203 +
204 +static struct usb_configuration android_config_driver = {
205 + .label = "android",
206 + .bind = android_bind_config,
207 + .bConfigurationValue = 1,
208 + .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
209 + .bMaxPower = 0x80, /* 250ma */
210 +};
211 +
212 +static int __init android_bind(struct usb_composite_dev *cdev)
213 +{
214 + struct android_dev *dev = _android_dev;
215 + struct usb_gadget *gadget = cdev->gadget;
216 + int gcnum;
217 + int id;
218 + int ret;
219 +
220 + printk(KERN_INFO "android_bind\n");
221 +
222 + /* Allocate string descriptor numbers ... note that string
223 + * contents can be overridden by the composite_dev glue.
224 + */
225 + id = usb_string_id(cdev);
226 + if (id < 0)
227 + return id;
228 + strings_dev[STRING_MANUFACTURER_IDX].id = id;
229 + device_desc.iManufacturer = id;
230 +
231 + id = usb_string_id(cdev);
232 + if (id < 0)
233 + return id;
234 + strings_dev[STRING_PRODUCT_IDX].id = id;
235 + device_desc.iProduct = id;
236 +
237 + id = usb_string_id(cdev);
238 + if (id < 0)
239 + return id;
240 + strings_dev[STRING_SERIAL_IDX].id = id;
241 + device_desc.iSerialNumber = id;
242 +
243 + if (gadget->ops->wakeup)
244 + android_config_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
245 +
246 + /* register our configuration */
247 + ret = usb_add_config(cdev, &android_config_driver);
248 + if (ret) {
249 + printk(KERN_ERR "usb_add_config failed\n");
250 + return ret;
251 + }
252 +
253 + gcnum = usb_gadget_controller_number(gadget);
254 + if (gcnum >= 0)
255 + device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum);
256 + else {
257 + /* gadget zero is so simple (for now, no altsettings) that
258 + * it SHOULD NOT have problems with bulk-capable hardware.
259 + * so just warn about unrcognized controllers -- don't panic.
260 + *
261 + * things like configuration and altsetting numbering
262 + * can need hardware-specific attention though.
263 + */
264 + pr_warning("%s: controller '%s' not recognized\n",
265 + longname, gadget->name);
266 + device_desc.bcdDevice = __constant_cpu_to_le16(0x9999);
267 + }
268 +
269 + usb_gadget_set_selfpowered(gadget);
270 + dev->cdev = cdev;
271 +
272 + return 0;
273 +}
274 +
275 +static struct usb_composite_driver android_usb_driver = {
276 + .name = "android_usb",
277 + .dev = &device_desc,
278 + .strings = dev_strings,
279 + .bind = android_bind,
280 +};
281 +
282 +static void enable_adb(struct android_dev *dev, int enable)
283 +{
284 + if (enable != dev->adb_enabled) {
285 + dev->adb_enabled = enable;
286 + adb_function_enable(enable);
287 +
288 + /* set product ID to the appropriate value */
289 + if (enable)
290 + device_desc.idProduct =
291 + __constant_cpu_to_le16(dev->adb_product_id);
292 + else
293 + device_desc.idProduct =
294 + __constant_cpu_to_le16(dev->product_id);
295 + if (dev->cdev)
296 + dev->cdev->desc.idProduct = device_desc.idProduct;
297 +
298 + /* force reenumeration */
299 + if (dev->cdev && dev->cdev->gadget &&
300 + dev->cdev->gadget->speed != USB_SPEED_UNKNOWN) {
301 + usb_gadget_disconnect(dev->cdev->gadget);
302 + msleep(10);
303 + usb_gadget_connect(dev->cdev->gadget);
304 + }
305 + }
306 +}
307 +
308 +static int adb_enable_open(struct inode *ip, struct file *fp)
309 +{
310 + if (atomic_inc_return(&adb_enable_excl) != 1) {
311 + atomic_dec(&adb_enable_excl);
312 + return -EBUSY;
313 + }
314 +
315 + printk(KERN_INFO "enabling adb\n");
316 + enable_adb(_android_dev, 1);
317 +
318 + return 0;
319 +}
320 +
321 +static int adb_enable_release(struct inode *ip, struct file *fp)
322 +{
323 + printk(KERN_INFO "disabling adb\n");
324 + enable_adb(_android_dev, 0);
325 + atomic_dec(&adb_enable_excl);
326 + return 0;
327 +}
328 +
329 +static struct file_operations adb_enable_fops = {
330 + .owner = THIS_MODULE,
331 + .open = adb_enable_open,
332 + .release = adb_enable_release,
333 +};
334 +
335 +static struct miscdevice adb_enable_device = {
336 + .minor = MISC_DYNAMIC_MINOR,
337 + .name = "android_adb_enable",
338 + .fops = &adb_enable_fops,
339 +};
340 +
341 +static int __init android_probe(struct platform_device *pdev)
342 +{
343 + struct android_usb_platform_data *pdata = pdev->dev.platform_data;
344 + struct android_dev *dev = _android_dev;
345 +
346 + printk(KERN_INFO "android_probe pdata: %p\n", pdata);
347 +
348 + if (pdata) {
349 + if (pdata->vendor_id)
350 + device_desc.idVendor =
351 + __constant_cpu_to_le16(pdata->vendor_id);
352 + if (pdata->product_id) {
353 + dev->product_id = pdata->product_id;
354 + device_desc.idProduct =
355 + __constant_cpu_to_le16(pdata->product_id);
356 + }
357 + if (pdata->adb_product_id)
358 + dev->adb_product_id = pdata->adb_product_id;
359 + if (pdata->version)
360 + dev->version = pdata->version;
361 +
362 + if (pdata->product_name)
363 + strings_dev[STRING_PRODUCT_IDX].s = pdata->product_name;
364 + if (pdata->manufacturer_name)
365 + strings_dev[STRING_MANUFACTURER_IDX].s =
366 + pdata->manufacturer_name;
367 + if (pdata->serial_number)
368 + strings_dev[STRING_SERIAL_IDX].s = pdata->serial_number;
369 + dev->nluns = pdata->nluns;
370 + }
371 +
372 + return 0;
373 +}
374 +
375 +static struct platform_driver android_platform_driver = {
376 + .driver = { .name = "android_usb", },
377 + .probe = android_probe,
378 +};
379 +
380 +static int __init init(void)
381 +{
382 + struct android_dev *dev;
383 + int ret;
384 +
385 + printk(KERN_INFO "android init\n");
386 +
387 + dev = kzalloc(sizeof(*dev), GFP_KERNEL);
388 + if (!dev)
389 + return -ENOMEM;
390 +
391 + /* set default values, which should be overridden by platform data */
392 + dev->product_id = PRODUCT_ID;
393 + dev->adb_product_id = ADB_PRODUCT_ID;
394 + _android_dev = dev;
395 +
396 + ret = platform_driver_register(&android_platform_driver);
397 + if (ret)
398 + return ret;
399 + ret = misc_register(&adb_enable_device);
400 + if (ret) {
401 + platform_driver_unregister(&android_platform_driver);
402 + return ret;
403 + }
404 + ret = usb_composite_register(&android_usb_driver);
405 + if (ret) {
406 + misc_deregister(&adb_enable_device);
407 + platform_driver_unregister(&android_platform_driver);
408 + }
409 + return ret;
410 +}
411 +module_init(init);
412 +
413 +static void __exit cleanup(void)
414 +{
415 + usb_composite_unregister(&android_usb_driver);
416 + misc_deregister(&adb_enable_device);
417 + platform_driver_unregister(&android_platform_driver);
418 + kfree(_android_dev);
419 + _android_dev = NULL;
420 +}
421 +module_exit(cleanup);
422 --- /dev/null
423 +++ b/drivers/usb/gadget/f_adb.c
424 @@ -0,0 +1,668 @@
425 +/*
426 + * Gadget Driver for Android ADB
427 + *
428 + * Copyright (C) 2008 Google, Inc.
429 + * Author: Mike Lockwood <lockwood@android.com>
430 + *
431 + * This software is licensed under the terms of the GNU General Public
432 + * License version 2, as published by the Free Software Foundation, and
433 + * may be copied, distributed, and modified under those terms.
434 + *
435 + * This program is distributed in the hope that it will be useful,
436 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
437 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
438 + * GNU General Public License for more details.
439 + *
440 + */
441 +
442 +/* #define DEBUG */
443 +/* #define VERBOSE_DEBUG */
444 +
445 +#include <linux/module.h>
446 +#include <linux/init.h>
447 +#include <linux/poll.h>
448 +#include <linux/delay.h>
449 +#include <linux/wait.h>
450 +#include <linux/err.h>
451 +#include <linux/interrupt.h>
452 +
453 +#include <linux/types.h>
454 +#include <linux/device.h>
455 +#include <linux/miscdevice.h>
456 +
457 +#include <linux/usb/ch9.h>
458 +#include <linux/usb/composite.h>
459 +#include <linux/usb/gadget.h>
460 +
461 +#include "f_adb.h"
462 +
463 +#define BULK_BUFFER_SIZE 4096
464 +
465 +/* number of rx and tx requests to allocate */
466 +#define RX_REQ_MAX 4
467 +#define TX_REQ_MAX 4
468 +
469 +static const char shortname[] = "android_adb";
470 +
471 +struct adb_dev {
472 + struct usb_function function;
473 + struct usb_composite_dev *cdev;
474 + spinlock_t lock;
475 +
476 + struct usb_ep *ep_in;
477 + struct usb_ep *ep_out;
478 +
479 + int online;
480 + int error;
481 +
482 + atomic_t read_excl;
483 + atomic_t write_excl;
484 + atomic_t open_excl;
485 +
486 + struct list_head tx_idle;
487 + struct list_head rx_idle;
488 + struct list_head rx_done;
489 +
490 + wait_queue_head_t read_wq;
491 + wait_queue_head_t write_wq;
492 +
493 + /* the request we're currently reading from */
494 + struct usb_request *read_req;
495 + unsigned char *read_buf;
496 + unsigned read_count;
497 +};
498 +
499 +static struct usb_interface_descriptor adb_interface_desc = {
500 + .bLength = USB_DT_INTERFACE_SIZE,
501 + .bDescriptorType = USB_DT_INTERFACE,
502 + .bInterfaceNumber = 0,
503 + .bNumEndpoints = 2,
504 + .bInterfaceClass = 0xFF,
505 + .bInterfaceSubClass = 0x42,
506 + .bInterfaceProtocol = 1,
507 +};
508 +
509 +static struct usb_endpoint_descriptor adb_highspeed_in_desc = {
510 + .bLength = USB_DT_ENDPOINT_SIZE,
511 + .bDescriptorType = USB_DT_ENDPOINT,
512 + .bEndpointAddress = USB_DIR_IN,
513 + .bmAttributes = USB_ENDPOINT_XFER_BULK,
514 + .wMaxPacketSize = __constant_cpu_to_le16(512),
515 +};
516 +
517 +static struct usb_endpoint_descriptor adb_highspeed_out_desc = {
518 + .bLength = USB_DT_ENDPOINT_SIZE,
519 + .bDescriptorType = USB_DT_ENDPOINT,
520 + .bEndpointAddress = USB_DIR_OUT,
521 + .bmAttributes = USB_ENDPOINT_XFER_BULK,
522 + .wMaxPacketSize = __constant_cpu_to_le16(512),
523 +};
524 +
525 +static struct usb_endpoint_descriptor adb_fullspeed_in_desc = {
526 + .bLength = USB_DT_ENDPOINT_SIZE,
527 + .bDescriptorType = USB_DT_ENDPOINT,
528 + .bEndpointAddress = USB_DIR_IN,
529 + .bmAttributes = USB_ENDPOINT_XFER_BULK,
530 +};
531 +
532 +static struct usb_endpoint_descriptor adb_fullspeed_out_desc = {
533 + .bLength = USB_DT_ENDPOINT_SIZE,
534 + .bDescriptorType = USB_DT_ENDPOINT,
535 + .bEndpointAddress = USB_DIR_OUT,
536 + .bmAttributes = USB_ENDPOINT_XFER_BULK,
537 +};
538 +
539 +static struct usb_descriptor_header *fs_adb_descs[] = {
540 + (struct usb_descriptor_header *) &adb_interface_desc,
541 + (struct usb_descriptor_header *) &adb_fullspeed_in_desc,
542 + (struct usb_descriptor_header *) &adb_fullspeed_out_desc,
543 + NULL,
544 +};
545 +
546 +static struct usb_descriptor_header *hs_adb_descs[] = {
547 + (struct usb_descriptor_header *) &adb_interface_desc,
548 + (struct usb_descriptor_header *) &adb_highspeed_in_desc,
549 + (struct usb_descriptor_header *) &adb_highspeed_out_desc,
550 + NULL,
551 +};
552 +
553 +/* used when adb function is disabled */
554 +static struct usb_descriptor_header *null_adb_descs[] = {
555 + NULL,
556 +};
557 +
558 +
559 +/* temporary variable used between adb_open() and adb_gadget_bind() */
560 +static struct adb_dev *_adb_dev;
561 +
562 +static inline struct adb_dev *func_to_dev(struct usb_function *f)
563 +{
564 + return container_of(f, struct adb_dev, function);
565 +}
566 +
567 +
568 +static struct usb_request *adb_request_new(struct usb_ep *ep, int buffer_size)
569 +{
570 + struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
571 + if (!req)
572 + return NULL;
573 +
574 + /* now allocate buffers for the requests */
575 + req->buf = kmalloc(buffer_size, GFP_KERNEL);
576 + if (!req->buf) {
577 + usb_ep_free_request(ep, req);
578 + return NULL;
579 + }
580 +
581 + return req;
582 +}
583 +
584 +static void adb_request_free(struct usb_request *req, struct usb_ep *ep)
585 +{
586 + if (req) {
587 + kfree(req->buf);
588 + usb_ep_free_request(ep, req);
589 + }
590 +}
591 +
592 +static inline int _lock(atomic_t *excl)
593 +{
594 + if (atomic_inc_return(excl) == 1) {
595 + return 0;
596 + } else {
597 + atomic_dec(excl);
598 + return -1;
599 + }
600 +}
601 +
602 +static inline void _unlock(atomic_t *excl)
603 +{
604 + atomic_dec(excl);
605 +}
606 +
607 +/* add a request to the tail of a list */
608 +void req_put(struct adb_dev *dev, struct list_head *head,
609 + struct usb_request *req)
610 +{
611 + unsigned long flags;
612 +
613 + spin_lock_irqsave(&dev->lock, flags);
614 + list_add_tail(&req->list, head);
615 + spin_unlock_irqrestore(&dev->lock, flags);
616 +}
617 +
618 +/* remove a request from the head of a list */
619 +struct usb_request *req_get(struct adb_dev *dev, struct list_head *head)
620 +{
621 + unsigned long flags;
622 + struct usb_request *req;
623 +
624 + spin_lock_irqsave(&dev->lock, flags);
625 + if (list_empty(head)) {
626 + req = 0;
627 + } else {
628 + req = list_first_entry(head, struct usb_request, list);
629 + list_del(&req->list);
630 + }
631 + spin_unlock_irqrestore(&dev->lock, flags);
632 + return req;
633 +}
634 +
635 +static void adb_complete_in(struct usb_ep *ep, struct usb_request *req)
636 +{
637 + struct adb_dev *dev = _adb_dev;
638 +
639 + if (req->status != 0)
640 + dev->error = 1;
641 +
642 + req_put(dev, &dev->tx_idle, req);
643 +
644 + wake_up(&dev->write_wq);
645 +}
646 +
647 +static void adb_complete_out(struct usb_ep *ep, struct usb_request *req)
648 +{
649 + struct adb_dev *dev = _adb_dev;
650 +
651 + if (req->status != 0) {
652 + dev->error = 1;
653 + req_put(dev, &dev->rx_idle, req);
654 + } else {
655 + req_put(dev, &dev->rx_done, req);
656 + }
657 +
658 + wake_up(&dev->read_wq);
659 +}
660 +
661 +static int __init create_bulk_endpoints(struct adb_dev *dev,
662 + struct usb_endpoint_descriptor *in_desc,
663 + struct usb_endpoint_descriptor *out_desc)
664 +{
665 + struct usb_composite_dev *cdev = dev->cdev;
666 + struct usb_request *req;
667 + struct usb_ep *ep;
668 + int i;
669 +
670 + DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
671 +
672 + ep = usb_ep_autoconfig(cdev->gadget, in_desc);
673 + if (!ep) {
674 + DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
675 + return -ENODEV;
676 + }
677 + DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
678 + dev->ep_in = ep;
679 +
680 + ep = usb_ep_autoconfig(cdev->gadget, out_desc);
681 + if (!ep) {
682 + DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
683 + return -ENODEV;
684 + }
685 + DBG(cdev, "usb_ep_autoconfig for adb ep_out got %s\n", ep->name);
686 + dev->ep_out = ep;
687 +
688 + /* now allocate requests for our endpoints */
689 + for (i = 0; i < RX_REQ_MAX; i++) {
690 + req = adb_request_new(dev->ep_out, BULK_BUFFER_SIZE);
691 + if (!req)
692 + goto fail;
693 + req->complete = adb_complete_out;
694 + req_put(dev, &dev->rx_idle, req);
695 + }
696 +
697 + for (i = 0; i < TX_REQ_MAX; i++) {
698 + req = adb_request_new(dev->ep_in, BULK_BUFFER_SIZE);
699 + if (!req)
700 + goto fail;
701 + req->complete = adb_complete_in;
702 + req_put(dev, &dev->tx_idle, req);
703 + }
704 +
705 + return 0;
706 +
707 +fail:
708 + printk(KERN_ERR "adb_bind() could not allocate requests\n");
709 + return -1;
710 +}
711 +
712 +static ssize_t adb_read(struct file *fp, char __user *buf,
713 + size_t count, loff_t *pos)
714 +{
715 + struct adb_dev *dev = fp->private_data;
716 + struct usb_composite_dev *cdev = dev->cdev;
717 + struct usb_request *req;
718 + int r = count, xfer;
719 + int ret;
720 +
721 + DBG(cdev, "adb_read(%d)\n", count);
722 +
723 + if (_lock(&dev->read_excl))
724 + return -EBUSY;
725 +
726 + /* we will block until we're online */
727 + while (!(dev->online || dev->error)) {
728 + DBG(cdev, "adb_read: waiting for online state\n");
729 + ret = wait_event_interruptible(dev->read_wq,
730 + (dev->online || dev->error));
731 + if (ret < 0) {
732 + _unlock(&dev->read_excl);
733 + return ret;
734 + }
735 + }
736 +
737 + while (count > 0) {
738 + if (dev->error) {
739 + DBG(cdev, "adb_read dev->error\n");
740 + r = -EIO;
741 + break;
742 + }
743 +
744 + /* if we have idle read requests, get them queued */
745 + while ((req = req_get(dev, &dev->rx_idle))) {
746 +requeue_req:
747 + req->length = BULK_BUFFER_SIZE;
748 + ret = usb_ep_queue(dev->ep_out, req, GFP_ATOMIC);
749 +
750 + if (ret < 0) {
751 + r = -EIO;
752 + dev->error = 1;
753 + req_put(dev, &dev->rx_idle, req);
754 + goto fail;
755 + } else {
756 + DBG(cdev, "rx %p queue\n", req);
757 + }
758 + }
759 +
760 + /* if we have data pending, give it to userspace */
761 + if (dev->read_count > 0) {
762 + if (dev->read_count < count)
763 + xfer = dev->read_count;
764 + else
765 + xfer = count;
766 +
767 + if (copy_to_user(buf, dev->read_buf, xfer)) {
768 + r = -EFAULT;
769 + break;
770 + }
771 + dev->read_buf += xfer;
772 + dev->read_count -= xfer;
773 + buf += xfer;
774 + count -= xfer;
775 +
776 + /* if we've emptied the buffer, release the request */
777 + if (dev->read_count == 0) {
778 + req_put(dev, &dev->rx_idle, dev->read_req);
779 + dev->read_req = 0;
780 + }
781 + continue;
782 + }
783 +
784 + /* wait for a request to complete */
785 + req = 0;
786 + ret = wait_event_interruptible(dev->read_wq,
787 + ((req = req_get(dev, &dev->rx_done)) || dev->error));
788 + if (req != 0) {
789 + /* if we got a 0-len one we need to put it back into
790 + ** service. if we made it the current read req we'd
791 + ** be stuck forever
792 + */
793 + if (req->actual == 0)
794 + goto requeue_req;
795 +
796 + dev->read_req = req;
797 + dev->read_count = req->actual;
798 + dev->read_buf = req->buf;
799 + DBG(cdev, "rx %p %d\n", req, req->actual);
800 + }
801 +
802 + if (ret < 0) {
803 + r = ret;
804 + break;
805 + }
806 + }
807 +
808 +fail:
809 + _unlock(&dev->read_excl);
810 + DBG(cdev, "adb_read returning %d\n", r);
811 + return r;
812 +}
813 +
814 +static ssize_t adb_write(struct file *fp, const char __user *buf,
815 + size_t count, loff_t *pos)
816 +{
817 + struct adb_dev *dev = fp->private_data;
818 + struct usb_composite_dev *cdev = dev->cdev;
819 + struct usb_request *req = 0;
820 + int r = count, xfer;
821 + int ret;
822 +
823 + DBG(cdev, "adb_write(%d)\n", count);
824 +
825 + if (_lock(&dev->write_excl))
826 + return -EBUSY;
827 +
828 + while (count > 0) {
829 + if (dev->error) {
830 + DBG(cdev, "adb_write dev->error\n");
831 + r = -EIO;
832 + break;
833 + }
834 +
835 + /* get an idle tx request to use */
836 + req = 0;
837 + ret = wait_event_interruptible(dev->write_wq,
838 + ((req = req_get(dev, &dev->tx_idle)) || dev->error));
839 +
840 + if (ret < 0) {
841 + r = ret;
842 + break;
843 + }
844 +
845 + if (req != 0) {
846 + if (count > BULK_BUFFER_SIZE)
847 + xfer = BULK_BUFFER_SIZE;
848 + else
849 + xfer = count;
850 + if (copy_from_user(req->buf, buf, xfer)) {
851 + r = -EFAULT;
852 + break;
853 + }
854 +
855 + req->length = xfer;
856 + ret = usb_ep_queue(dev->ep_in, req, GFP_ATOMIC);
857 + if (ret < 0) {
858 + DBG(cdev, "adb_write: xfer error %d\n", ret);
859 + dev->error = 1;
860 + r = -EIO;
861 + break;
862 + }
863 +
864 + buf += xfer;
865 + count -= xfer;
866 +
867 + /* zero this so we don't try to free it on error exit */
868 + req = 0;
869 + }
870 + }
871 +
872 + if (req)
873 + req_put(dev, &dev->tx_idle, req);
874 +
875 + _unlock(&dev->write_excl);
876 + DBG(cdev, "adb_write returning %d\n", r);
877 + return r;
878 +}
879 +
880 +static int adb_open(struct inode *ip, struct file *fp)
881 +{
882 + printk(KERN_INFO "adb_open\n");
883 + if (_lock(&_adb_dev->open_excl))
884 + return -EBUSY;
885 +
886 + fp->private_data = _adb_dev;
887 +
888 + /* clear the error latch */
889 + _adb_dev->error = 0;
890 +
891 + return 0;
892 +}
893 +
894 +static int adb_release(struct inode *ip, struct file *fp)
895 +{
896 + printk(KERN_INFO "adb_release\n");
897 + _unlock(&_adb_dev->open_excl);
898 + return 0;
899 +}
900 +
901 +/* file operations for ADB device /dev/android_adb */
902 +static struct file_operations adb_fops = {
903 + .owner = THIS_MODULE,
904 + .read = adb_read,
905 + .write = adb_write,
906 + .open = adb_open,
907 + .release = adb_release,
908 +};
909 +
910 +static struct miscdevice adb_device = {
911 + .minor = MISC_DYNAMIC_MINOR,
912 + .name = shortname,
913 + .fops = &adb_fops,
914 +};
915 +
916 +static int __init
917 +adb_function_bind(struct usb_configuration *c, struct usb_function *f)
918 +{
919 + struct usb_composite_dev *cdev = c->cdev;
920 + struct adb_dev *dev = func_to_dev(f);
921 + int id;
922 + int ret;
923 +
924 + dev->cdev = cdev;
925 + DBG(cdev, "adb_function_bind dev: %p\n", dev);
926 +
927 + /* allocate interface ID(s) */
928 + id = usb_interface_id(c, f);
929 + if (id < 0)
930 + return id;
931 + adb_interface_desc.bInterfaceNumber = id;
932 +
933 + /* allocate endpoints */
934 + ret = create_bulk_endpoints(dev, &adb_fullspeed_in_desc,
935 + &adb_fullspeed_out_desc);
936 + if (ret)
937 + return ret;
938 +
939 + /* support high speed hardware */
940 + if (gadget_is_dualspeed(c->cdev->gadget)) {
941 + adb_highspeed_in_desc.bEndpointAddress =
942 + adb_fullspeed_in_desc.bEndpointAddress;
943 + adb_highspeed_out_desc.bEndpointAddress =
944 + adb_fullspeed_out_desc.bEndpointAddress;
945 + }
946 +
947 + DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
948 + gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
949 + f->name, dev->ep_in->name, dev->ep_out->name);
950 + return 0;
951 +}
952 +
953 +static void
954 +adb_function_unbind(struct usb_configuration *c, struct usb_function *f)
955 +{
956 + struct adb_dev *dev = func_to_dev(f);
957 + struct usb_request *req;
958 +
959 + spin_lock_irq(&dev->lock);
960 +
961 + while ((req = req_get(dev, &dev->rx_idle)))
962 + adb_request_free(req, dev->ep_out);
963 + while ((req = req_get(dev, &dev->tx_idle)))
964 + adb_request_free(req, dev->ep_in);
965 +
966 + dev->online = 0;
967 + dev->error = 1;
968 + spin_unlock_irq(&dev->lock);
969 +
970 + misc_deregister(&adb_device);
971 + kfree(_adb_dev);
972 + _adb_dev = NULL;
973 +}
974 +
975 +static int adb_function_set_alt(struct usb_function *f,
976 + unsigned intf, unsigned alt)
977 +{
978 + struct adb_dev *dev = func_to_dev(f);
979 + struct usb_composite_dev *cdev = f->config->cdev;
980 + int ret;
981 +
982 + DBG(cdev, "adb_function_set_alt intf: %d alt: %d\n", intf, alt);
983 + ret = usb_ep_enable(dev->ep_in,
984 + ep_choose(cdev->gadget,
985 + &adb_highspeed_in_desc,
986 + &adb_fullspeed_in_desc));
987 + if (ret)
988 + return ret;
989 + ret = usb_ep_enable(dev->ep_out,
990 + ep_choose(cdev->gadget,
991 + &adb_highspeed_out_desc,
992 + &adb_fullspeed_out_desc));
993 + if (ret) {
994 + usb_ep_disable(dev->ep_in);
995 + return ret;
996 + }
997 + dev->online = 1;
998 +
999 + /* readers may be blocked waiting for us to go online */
1000 + wake_up(&dev->read_wq);
1001 + return 0;
1002 +}
1003 +
1004 +static void adb_function_disable(struct usb_function *f)
1005 +{
1006 + struct adb_dev *dev = func_to_dev(f);
1007 + struct usb_composite_dev *cdev = dev->cdev;
1008 +
1009 + DBG(cdev, "adb_function_disable\n");
1010 + dev->online = 0;
1011 + dev->error = 1;
1012 + usb_ep_disable(dev->ep_in);
1013 + usb_ep_disable(dev->ep_out);
1014 +
1015 + /* readers may be blocked waiting for us to go online */
1016 + wake_up(&dev->read_wq);
1017 +
1018 + VDBG(cdev, "%s disabled\n", dev->function.name);
1019 +}
1020 +
1021 +int __init adb_function_add(struct usb_composite_dev *cdev,
1022 + struct usb_configuration *c)
1023 +{
1024 + struct adb_dev *dev;
1025 + int ret;
1026 +
1027 + printk(KERN_INFO "adb_function_add\n");
1028 +
1029 + dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1030 + if (!dev)
1031 + return -ENOMEM;
1032 +
1033 + spin_lock_init(&dev->lock);
1034 +
1035 + init_waitqueue_head(&dev->read_wq);
1036 + init_waitqueue_head(&dev->write_wq);
1037 +
1038 + atomic_set(&dev->open_excl, 0);
1039 + atomic_set(&dev->read_excl, 0);
1040 + atomic_set(&dev->write_excl, 0);
1041 +
1042 + INIT_LIST_HEAD(&dev->rx_idle);
1043 + INIT_LIST_HEAD(&dev->rx_done);
1044 + INIT_LIST_HEAD(&dev->tx_idle);
1045 +
1046 + dev->cdev = cdev;
1047 + dev->function.name = "adb";
1048 + dev->function.descriptors = null_adb_descs;
1049 + dev->function.hs_descriptors = null_adb_descs;
1050 + dev->function.bind = adb_function_bind;
1051 + dev->function.unbind = adb_function_unbind;
1052 + dev->function.set_alt = adb_function_set_alt;
1053 + dev->function.disable = adb_function_disable;
1054 +
1055 + /* _adb_dev must be set before calling usb_gadget_register_driver */
1056 + _adb_dev = dev;
1057 +
1058 + ret = misc_register(&adb_device);
1059 + if (ret)
1060 + goto err1;
1061 + ret = usb_add_function(c, &dev->function);
1062 + if (ret)
1063 + goto err2;
1064 +
1065 + return 0;
1066 +
1067 +err2:
1068 + misc_deregister(&adb_device);
1069 +err1:
1070 + kfree(dev);
1071 + printk(KERN_ERR "adb gadget driver failed to initialize\n");
1072 + return ret;
1073 +}
1074 +
1075 +void adb_function_enable(int enable)
1076 +{
1077 + struct adb_dev *dev = _adb_dev;
1078 +
1079 + if (dev) {
1080 + DBG(dev->cdev, "adb_function_enable(%s)\n",
1081 + enable ? "true" : "false");
1082 +
1083 + if (enable) {
1084 + dev->function.descriptors = fs_adb_descs;
1085 + dev->function.hs_descriptors = hs_adb_descs;
1086 + } else {
1087 + dev->function.descriptors = null_adb_descs;
1088 + dev->function.hs_descriptors = null_adb_descs;
1089 + }
1090 + }
1091 +}
1092 +
1093 --- /dev/null
1094 +++ b/drivers/usb/gadget/f_adb.h
1095 @@ -0,0 +1,25 @@
1096 +/*
1097 + * Gadget Driver for Android ADB
1098 + *
1099 + * Copyright (C) 2008 Google, Inc.
1100 + * Author: Mike Lockwood <lockwood@android.com>
1101 + *
1102 + * This software is licensed under the terms of the GNU General Public
1103 + * License version 2, as published by the Free Software Foundation, and
1104 + * may be copied, distributed, and modified under those terms.
1105 + *
1106 + * This program is distributed in the hope that it will be useful,
1107 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1108 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1109 + * GNU General Public License for more details.
1110 + *
1111 + */
1112 +
1113 +#ifndef __F_ADB_H
1114 +#define __F_ADB_H
1115 +
1116 +int adb_function_add(struct usb_composite_dev *cdev,
1117 + struct usb_configuration *c);
1118 +void adb_function_enable(int enable);
1119 +
1120 +#endif /* __F_ADB_H */
1121 --- /dev/null
1122 +++ b/drivers/usb/gadget/f_mass_storage.c
1123 @@ -0,0 +1,2906 @@
1124 +/*
1125 + * drivers/usb/gadget/f_mass_storage.c
1126 + *
1127 + * Function Driver for USB Mass Storage
1128 + *
1129 + * Copyright (C) 2008 Google, Inc.
1130 + * Author: Mike Lockwood <lockwood@android.com>
1131 + *
1132 + * Based heavily on the file_storage gadget driver in
1133 + * drivers/usb/gadget/file_storage.c and licensed under the same terms:
1134 + *
1135 + * Copyright (C) 2003-2007 Alan Stern
1136 + * All rights reserved.
1137 + *
1138 + * Redistribution and use in source and binary forms, with or without
1139 + * modification, are permitted provided that the following conditions
1140 + * are met:
1141 + * 1. Redistributions of source code must retain the above copyright
1142 + * notice, this list of conditions, and the following disclaimer,
1143 + * without modification.
1144 + * 2. Redistributions in binary form must reproduce the above copyright
1145 + * notice, this list of conditions and the following disclaimer in the
1146 + * documentation and/or other materials provided with the distribution.
1147 + * 3. The names of the above-listed copyright holders may not be used
1148 + * to endorse or promote products derived from this software without
1149 + * specific prior written permission.
1150 + *
1151 + * ALTERNATIVELY, this software may be distributed under the terms of the
1152 + * GNU General Public License ("GPL") as published by the Free Software
1153 + * Foundation, either version 2 of that License or (at your option) any
1154 + * later version.
1155 + *
1156 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1157 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1158 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1159 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1160 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1161 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1162 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1163 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1164 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1165 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1166 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1167 + */
1168 +
1169 +/* #define DEBUG */
1170 +/* #define VERBOSE_DEBUG */
1171 +/* #define DUMP_MSGS */
1172 +
1173 +
1174 +#include <linux/blkdev.h>
1175 +#include <linux/completion.h>
1176 +#include <linux/dcache.h>
1177 +#include <linux/delay.h>
1178 +#include <linux/device.h>
1179 +#include <linux/fcntl.h>
1180 +#include <linux/file.h>
1181 +#include <linux/fs.h>
1182 +#include <linux/kref.h>
1183 +#include <linux/kthread.h>
1184 +#include <linux/limits.h>
1185 +#include <linux/rwsem.h>
1186 +#include <linux/slab.h>
1187 +#include <linux/spinlock.h>
1188 +#include <linux/string.h>
1189 +#include <linux/switch.h>
1190 +#include <linux/freezer.h>
1191 +#include <linux/utsname.h>
1192 +#include <linux/wakelock.h>
1193 +
1194 +#include <linux/usb_usual.h>
1195 +#include <linux/usb/ch9.h>
1196 +#include <linux/usb/composite.h>
1197 +#include <linux/usb/gadget.h>
1198 +
1199 +#include "f_mass_storage.h"
1200 +#include "gadget_chips.h"
1201 +
1202 +
1203 +#define BULK_BUFFER_SIZE 4096
1204 +
1205 +/*-------------------------------------------------------------------------*/
1206 +
1207 +#define DRIVER_NAME "usb_mass_storage"
1208 +#define MAX_LUNS 8
1209 +
1210 +static const char shortname[] = DRIVER_NAME;
1211 +
1212 +#ifdef DEBUG
1213 +#define LDBG(lun, fmt, args...) \
1214 + dev_dbg(&(lun)->dev , fmt , ## args)
1215 +#define MDBG(fmt,args...) \
1216 + printk(KERN_DEBUG DRIVER_NAME ": " fmt , ## args)
1217 +#else
1218 +#define LDBG(lun, fmt, args...) \
1219 + do { } while (0)
1220 +#define MDBG(fmt,args...) \
1221 + do { } while (0)
1222 +#undef VERBOSE_DEBUG
1223 +#undef DUMP_MSGS
1224 +#endif /* DEBUG */
1225 +
1226 +#ifdef VERBOSE_DEBUG
1227 +#define VLDBG LDBG
1228 +#else
1229 +#define VLDBG(lun, fmt, args...) \
1230 + do { } while (0)
1231 +#endif /* VERBOSE_DEBUG */
1232 +
1233 +#define LERROR(lun, fmt, args...) \
1234 + dev_err(&(lun)->dev , fmt , ## args)
1235 +#define LWARN(lun, fmt, args...) \
1236 + dev_warn(&(lun)->dev , fmt , ## args)
1237 +#define LINFO(lun, fmt, args...) \
1238 + dev_info(&(lun)->dev , fmt , ## args)
1239 +
1240 +#define MINFO(fmt,args...) \
1241 + printk(KERN_INFO DRIVER_NAME ": " fmt , ## args)
1242 +
1243 +#undef DBG
1244 +#undef VDBG
1245 +#undef ERROR
1246 +#undef WARNING
1247 +#undef INFO
1248 +#define DBG(d, fmt, args...) \
1249 + dev_dbg(&(d)->cdev->gadget->dev , fmt , ## args)
1250 +#define VDBG(d, fmt, args...) \
1251 + dev_vdbg(&(d)->cdev->gadget->dev , fmt , ## args)
1252 +#define ERROR(d, fmt, args...) \
1253 + dev_err(&(d)->cdev->gadget->dev , fmt , ## args)
1254 +#define WARNING(d, fmt, args...) \
1255 + dev_warn(&(d)->cdev->gadget->dev , fmt , ## args)
1256 +#define INFO(d, fmt, args...) \
1257 + dev_info(&(d)->cdev->gadget->dev , fmt , ## args)
1258 +
1259 +
1260 +/*-------------------------------------------------------------------------*/
1261 +
1262 +/* Bulk-only data structures */
1263 +
1264 +/* Command Block Wrapper */
1265 +struct bulk_cb_wrap {
1266 + __le32 Signature; /* Contains 'USBC' */
1267 + u32 Tag; /* Unique per command id */
1268 + __le32 DataTransferLength; /* Size of the data */
1269 + u8 Flags; /* Direction in bit 7 */
1270 + u8 Lun; /* LUN (normally 0) */
1271 + u8 Length; /* Of the CDB, <= MAX_COMMAND_SIZE */
1272 + u8 CDB[16]; /* Command Data Block */
1273 +};
1274 +
1275 +#define USB_BULK_CB_WRAP_LEN 31
1276 +#define USB_BULK_CB_SIG 0x43425355 /* Spells out USBC */
1277 +#define USB_BULK_IN_FLAG 0x80
1278 +
1279 +/* Command Status Wrapper */
1280 +struct bulk_cs_wrap {
1281 + __le32 Signature; /* Should = 'USBS' */
1282 + u32 Tag; /* Same as original command */
1283 + __le32 Residue; /* Amount not transferred */
1284 + u8 Status; /* See below */
1285 +};
1286 +
1287 +#define USB_BULK_CS_WRAP_LEN 13
1288 +#define USB_BULK_CS_SIG 0x53425355 /* Spells out 'USBS' */
1289 +#define USB_STATUS_PASS 0
1290 +#define USB_STATUS_FAIL 1
1291 +#define USB_STATUS_PHASE_ERROR 2
1292 +
1293 +/* Bulk-only class specific requests */
1294 +#define USB_BULK_RESET_REQUEST 0xff
1295 +#define USB_BULK_GET_MAX_LUN_REQUEST 0xfe
1296 +
1297 +/* Length of a SCSI Command Data Block */
1298 +#define MAX_COMMAND_SIZE 16
1299 +
1300 +/* SCSI commands that we recognize */
1301 +#define SC_FORMAT_UNIT 0x04
1302 +#define SC_INQUIRY 0x12
1303 +#define SC_MODE_SELECT_6 0x15
1304 +#define SC_MODE_SELECT_10 0x55
1305 +#define SC_MODE_SENSE_6 0x1a
1306 +#define SC_MODE_SENSE_10 0x5a
1307 +#define SC_PREVENT_ALLOW_MEDIUM_REMOVAL 0x1e
1308 +#define SC_READ_6 0x08
1309 +#define SC_READ_10 0x28
1310 +#define SC_READ_12 0xa8
1311 +#define SC_READ_CAPACITY 0x25
1312 +#define SC_READ_FORMAT_CAPACITIES 0x23
1313 +#define SC_RELEASE 0x17
1314 +#define SC_REQUEST_SENSE 0x03
1315 +#define SC_RESERVE 0x16
1316 +#define SC_SEND_DIAGNOSTIC 0x1d
1317 +#define SC_START_STOP_UNIT 0x1b
1318 +#define SC_SYNCHRONIZE_CACHE 0x35
1319 +#define SC_TEST_UNIT_READY 0x00
1320 +#define SC_VERIFY 0x2f
1321 +#define SC_WRITE_6 0x0a
1322 +#define SC_WRITE_10 0x2a
1323 +#define SC_WRITE_12 0xaa
1324 +
1325 +/* SCSI Sense Key/Additional Sense Code/ASC Qualifier values */
1326 +#define SS_NO_SENSE 0
1327 +#define SS_COMMUNICATION_FAILURE 0x040800
1328 +#define SS_INVALID_COMMAND 0x052000
1329 +#define SS_INVALID_FIELD_IN_CDB 0x052400
1330 +#define SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE 0x052100
1331 +#define SS_LOGICAL_UNIT_NOT_SUPPORTED 0x052500
1332 +#define SS_MEDIUM_NOT_PRESENT 0x023a00
1333 +#define SS_MEDIUM_REMOVAL_PREVENTED 0x055302
1334 +#define SS_NOT_READY_TO_READY_TRANSITION 0x062800
1335 +#define SS_RESET_OCCURRED 0x062900
1336 +#define SS_SAVING_PARAMETERS_NOT_SUPPORTED 0x053900
1337 +#define SS_UNRECOVERED_READ_ERROR 0x031100
1338 +#define SS_WRITE_ERROR 0x030c02
1339 +#define SS_WRITE_PROTECTED 0x072700
1340 +
1341 +#define SK(x) ((u8) ((x) >> 16)) /* Sense Key byte, etc. */
1342 +#define ASC(x) ((u8) ((x) >> 8))
1343 +#define ASCQ(x) ((u8) (x))
1344 +
1345 +
1346 +/*-------------------------------------------------------------------------*/
1347 +
1348 +struct lun {
1349 + struct file *filp;
1350 + loff_t file_length;
1351 + loff_t num_sectors;
1352 +
1353 + unsigned int ro : 1;
1354 + unsigned int prevent_medium_removal : 1;
1355 + unsigned int registered : 1;
1356 + unsigned int info_valid : 1;
1357 +
1358 + u32 sense_data;
1359 + u32 sense_data_info;
1360 + u32 unit_attention_data;
1361 +
1362 + struct device dev;
1363 +};
1364 +
1365 +#define backing_file_is_open(curlun) ((curlun)->filp != NULL)
1366 +
1367 +
1368 +static struct lun *dev_to_lun(struct device *dev)
1369 +{
1370 + return container_of(dev, struct lun, dev);
1371 +}
1372 +
1373 +/* Big enough to hold our biggest descriptor */
1374 +#define EP0_BUFSIZE 256
1375 +#define DELAYED_STATUS (EP0_BUFSIZE + 999) /* An impossibly large value */
1376 +
1377 +/* Number of buffers we will use. 2 is enough for double-buffering */
1378 +#define NUM_BUFFERS 2
1379 +
1380 +enum fsg_buffer_state {
1381 + BUF_STATE_EMPTY = 0,
1382 + BUF_STATE_FULL,
1383 + BUF_STATE_BUSY
1384 +};
1385 +
1386 +struct fsg_buffhd {
1387 + void *buf;
1388 + enum fsg_buffer_state state;
1389 + struct fsg_buffhd *next;
1390 +
1391 + /* The NetChip 2280 is faster, and handles some protocol faults
1392 + * better, if we don't submit any short bulk-out read requests.
1393 + * So we will record the intended request length here. */
1394 + unsigned int bulk_out_intended_length;
1395 +
1396 + struct usb_request *inreq;
1397 + int inreq_busy;
1398 + struct usb_request *outreq;
1399 + int outreq_busy;
1400 +};
1401 +
1402 +enum fsg_state {
1403 + /* This one isn't used anywhere */
1404 + FSG_STATE_COMMAND_PHASE = -10,
1405 +
1406 + FSG_STATE_DATA_PHASE,
1407 + FSG_STATE_STATUS_PHASE,
1408 +
1409 + FSG_STATE_IDLE = 0,
1410 + FSG_STATE_ABORT_BULK_OUT,
1411 + FSG_STATE_RESET,
1412 + FSG_STATE_CONFIG_CHANGE,
1413 + FSG_STATE_EXIT,
1414 + FSG_STATE_TERMINATED
1415 +};
1416 +
1417 +enum data_direction {
1418 + DATA_DIR_UNKNOWN = 0,
1419 + DATA_DIR_FROM_HOST,
1420 + DATA_DIR_TO_HOST,
1421 + DATA_DIR_NONE
1422 +};
1423 +
1424 +struct fsg_dev {
1425 + struct usb_function function;
1426 + struct usb_composite_dev *cdev;
1427 +
1428 + /* lock protects: state and all the req_busy's */
1429 + spinlock_t lock;
1430 +
1431 + /* filesem protects: backing files in use */
1432 + struct rw_semaphore filesem;
1433 +
1434 + /* reference counting: wait until all LUNs are released */
1435 + struct kref ref;
1436 +
1437 + unsigned int bulk_out_maxpacket;
1438 + enum fsg_state state; /* For exception handling */
1439 +
1440 + u8 config, new_config;
1441 +
1442 + unsigned int running : 1;
1443 + unsigned int bulk_in_enabled : 1;
1444 + unsigned int bulk_out_enabled : 1;
1445 + unsigned int phase_error : 1;
1446 + unsigned int short_packet_received : 1;
1447 + unsigned int bad_lun_okay : 1;
1448 +
1449 + unsigned long atomic_bitflags;
1450 +#define REGISTERED 0
1451 +#define CLEAR_BULK_HALTS 1
1452 +#define SUSPENDED 2
1453 +
1454 + struct usb_ep *bulk_in;
1455 + struct usb_ep *bulk_out;
1456 +
1457 + struct fsg_buffhd *next_buffhd_to_fill;
1458 + struct fsg_buffhd *next_buffhd_to_drain;
1459 + struct fsg_buffhd buffhds[NUM_BUFFERS];
1460 +
1461 + int thread_wakeup_needed;
1462 + struct completion thread_notifier;
1463 + struct task_struct *thread_task;
1464 +
1465 + int cmnd_size;
1466 + u8 cmnd[MAX_COMMAND_SIZE];
1467 + enum data_direction data_dir;
1468 + u32 data_size;
1469 + u32 data_size_from_cmnd;
1470 + u32 tag;
1471 + unsigned int lun;
1472 + u32 residue;
1473 + u32 usb_amount_left;
1474 +
1475 + unsigned int nluns;
1476 + struct lun *luns;
1477 + struct lun *curlun;
1478 +
1479 + u32 buf_size;
1480 + const char *vendor;
1481 + const char *product;
1482 + int release;
1483 +
1484 + struct switch_dev sdev;
1485 +
1486 + struct wake_lock wake_lock;
1487 +};
1488 +
1489 +static inline struct fsg_dev *func_to_dev(struct usb_function *f)
1490 +{
1491 + return container_of(f, struct fsg_dev, function);
1492 +}
1493 +
1494 +static int exception_in_progress(struct fsg_dev *fsg)
1495 +{
1496 + return (fsg->state > FSG_STATE_IDLE);
1497 +}
1498 +
1499 +/* Make bulk-out requests be divisible by the maxpacket size */
1500 +static void set_bulk_out_req_length(struct fsg_dev *fsg,
1501 + struct fsg_buffhd *bh, unsigned int length)
1502 +{
1503 + unsigned int rem;
1504 +
1505 + bh->bulk_out_intended_length = length;
1506 + rem = length % fsg->bulk_out_maxpacket;
1507 + if (rem > 0)
1508 + length += fsg->bulk_out_maxpacket - rem;
1509 + bh->outreq->length = length;
1510 +}
1511 +
1512 +static struct fsg_dev *the_fsg;
1513 +
1514 +static void close_backing_file(struct fsg_dev *fsg, struct lun *curlun);
1515 +static void close_all_backing_files(struct fsg_dev *fsg);
1516 +
1517 +
1518 +/*-------------------------------------------------------------------------*/
1519 +
1520 +#ifdef DUMP_MSGS
1521 +
1522 +static void dump_msg(struct fsg_dev *fsg, const char *label,
1523 + const u8 *buf, unsigned int length)
1524 +{
1525 + if (length < 512) {
1526 + DBG(fsg, "%s, length %u:\n", label, length);
1527 + print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1528 + 16, 1, buf, length, 0);
1529 + }
1530 +}
1531 +
1532 +static void dump_cdb(struct fsg_dev *fsg)
1533 +{}
1534 +
1535 +#else
1536 +
1537 +static void dump_msg(struct fsg_dev *fsg, const char *label,
1538 + const u8 *buf, unsigned int length)
1539 +{}
1540 +
1541 +#ifdef VERBOSE_DEBUG
1542 +
1543 +static void dump_cdb(struct fsg_dev *fsg)
1544 +{
1545 + print_hex_dump(KERN_DEBUG, "SCSI CDB: ", DUMP_PREFIX_NONE,
1546 + 16, 1, fsg->cmnd, fsg->cmnd_size, 0);
1547 +}
1548 +
1549 +#else
1550 +
1551 +static void dump_cdb(struct fsg_dev *fsg)
1552 +{}
1553 +
1554 +#endif /* VERBOSE_DEBUG */
1555 +#endif /* DUMP_MSGS */
1556 +
1557 +
1558 +/*-------------------------------------------------------------------------*/
1559 +
1560 +/* Routines for unaligned data access */
1561 +
1562 +static u16 get_be16(u8 *buf)
1563 +{
1564 + return ((u16) buf[0] << 8) | ((u16) buf[1]);
1565 +}
1566 +
1567 +static u32 get_be32(u8 *buf)
1568 +{
1569 + return ((u32) buf[0] << 24) | ((u32) buf[1] << 16) |
1570 + ((u32) buf[2] << 8) | ((u32) buf[3]);
1571 +}
1572 +
1573 +static void put_be16(u8 *buf, u16 val)
1574 +{
1575 + buf[0] = val >> 8;
1576 + buf[1] = val;
1577 +}
1578 +
1579 +static void put_be32(u8 *buf, u32 val)
1580 +{
1581 + buf[0] = val >> 24;
1582 + buf[1] = val >> 16;
1583 + buf[2] = val >> 8;
1584 + buf[3] = val & 0xff;
1585 +}
1586 +
1587 +/*-------------------------------------------------------------------------*/
1588 +
1589 +/*
1590 + * DESCRIPTORS ... most are static, but strings and (full) configuration
1591 + * descriptors are built on demand. Also the (static) config and interface
1592 + * descriptors are adjusted during fsg_bind().
1593 + */
1594 +
1595 +/* There is only one interface. */
1596 +
1597 +static struct usb_interface_descriptor
1598 +intf_desc = {
1599 + .bLength = sizeof intf_desc,
1600 + .bDescriptorType = USB_DT_INTERFACE,
1601 +
1602 + .bNumEndpoints = 2, /* Adjusted during fsg_bind() */
1603 + .bInterfaceClass = USB_CLASS_MASS_STORAGE,
1604 + .bInterfaceSubClass = US_SC_SCSI,
1605 + .bInterfaceProtocol = US_PR_BULK,
1606 +};
1607 +
1608 +/* Three full-speed endpoint descriptors: bulk-in, bulk-out,
1609 + * and interrupt-in. */
1610 +
1611 +static struct usb_endpoint_descriptor
1612 +fs_bulk_in_desc = {
1613 + .bLength = USB_DT_ENDPOINT_SIZE,
1614 + .bDescriptorType = USB_DT_ENDPOINT,
1615 +
1616 + .bEndpointAddress = USB_DIR_IN,
1617 + .bmAttributes = USB_ENDPOINT_XFER_BULK,
1618 + /* wMaxPacketSize set by autoconfiguration */
1619 +};
1620 +
1621 +static struct usb_endpoint_descriptor
1622 +fs_bulk_out_desc = {
1623 + .bLength = USB_DT_ENDPOINT_SIZE,
1624 + .bDescriptorType = USB_DT_ENDPOINT,
1625 +
1626 + .bEndpointAddress = USB_DIR_OUT,
1627 + .bmAttributes = USB_ENDPOINT_XFER_BULK,
1628 + /* wMaxPacketSize set by autoconfiguration */
1629 +};
1630 +
1631 +static struct usb_descriptor_header *fs_function[] = {
1632 + (struct usb_descriptor_header *) &intf_desc,
1633 + (struct usb_descriptor_header *) &fs_bulk_in_desc,
1634 + (struct usb_descriptor_header *) &fs_bulk_out_desc,
1635 + NULL,
1636 +};
1637 +#define FS_FUNCTION_PRE_EP_ENTRIES 2
1638 +
1639 +
1640 +static struct usb_endpoint_descriptor
1641 +hs_bulk_in_desc = {
1642 + .bLength = USB_DT_ENDPOINT_SIZE,
1643 + .bDescriptorType = USB_DT_ENDPOINT,
1644 +
1645 + /* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */
1646 + .bmAttributes = USB_ENDPOINT_XFER_BULK,
1647 + .wMaxPacketSize = __constant_cpu_to_le16(512),
1648 +};
1649 +
1650 +static struct usb_endpoint_descriptor
1651 +hs_bulk_out_desc = {
1652 + .bLength = USB_DT_ENDPOINT_SIZE,
1653 + .bDescriptorType = USB_DT_ENDPOINT,
1654 +
1655 + /* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */
1656 + .bmAttributes = USB_ENDPOINT_XFER_BULK,
1657 + .wMaxPacketSize = __constant_cpu_to_le16(512),
1658 + .bInterval = 1, /* NAK every 1 uframe */
1659 +};
1660 +
1661 +
1662 +static struct usb_descriptor_header *hs_function[] = {
1663 + (struct usb_descriptor_header *) &intf_desc,
1664 + (struct usb_descriptor_header *) &hs_bulk_in_desc,
1665 + (struct usb_descriptor_header *) &hs_bulk_out_desc,
1666 + NULL,
1667 +};
1668 +
1669 +/* Maxpacket and other transfer characteristics vary by speed. */
1670 +static struct usb_endpoint_descriptor *
1671 +ep_desc(struct usb_gadget *g, struct usb_endpoint_descriptor *fs,
1672 + struct usb_endpoint_descriptor *hs)
1673 +{
1674 + if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
1675 + return hs;
1676 + return fs;
1677 +}
1678 +
1679 +/*-------------------------------------------------------------------------*/
1680 +
1681 +/* These routines may be called in process context or in_irq */
1682 +
1683 +/* Caller must hold fsg->lock */
1684 +static void wakeup_thread(struct fsg_dev *fsg)
1685 +{
1686 + /* Tell the main thread that something has happened */
1687 + fsg->thread_wakeup_needed = 1;
1688 + if (fsg->thread_task)
1689 + wake_up_process(fsg->thread_task);
1690 +}
1691 +
1692 +
1693 +static void raise_exception(struct fsg_dev *fsg, enum fsg_state new_state)
1694 +{
1695 + unsigned long flags;
1696 +
1697 + DBG(fsg, "raise_exception %d\n", (int)new_state);
1698 + /* Do nothing if a higher-priority exception is already in progress.
1699 + * If a lower-or-equal priority exception is in progress, preempt it
1700 + * and notify the main thread by sending it a signal. */
1701 + spin_lock_irqsave(&fsg->lock, flags);
1702 + if (fsg->state <= new_state) {
1703 + fsg->state = new_state;
1704 + if (fsg->thread_task)
1705 + send_sig_info(SIGUSR1, SEND_SIG_FORCED,
1706 + fsg->thread_task);
1707 + }
1708 + spin_unlock_irqrestore(&fsg->lock, flags);
1709 +}
1710 +
1711 +
1712 +/*-------------------------------------------------------------------------*/
1713 +
1714 +/* Bulk and interrupt endpoint completion handlers.
1715 + * These always run in_irq. */
1716 +
1717 +static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
1718 +{
1719 + struct fsg_dev *fsg = ep->driver_data;
1720 + struct fsg_buffhd *bh = req->context;
1721 +
1722 + if (req->status || req->actual != req->length)
1723 + DBG(fsg, "%s --> %d, %u/%u\n", __func__,
1724 + req->status, req->actual, req->length);
1725 +
1726 + /* Hold the lock while we update the request and buffer states */
1727 + smp_wmb();
1728 + spin_lock(&fsg->lock);
1729 + bh->inreq_busy = 0;
1730 + bh->state = BUF_STATE_EMPTY;
1731 + wakeup_thread(fsg);
1732 + spin_unlock(&fsg->lock);
1733 +}
1734 +
1735 +static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
1736 +{
1737 + struct fsg_dev *fsg = ep->driver_data;
1738 + struct fsg_buffhd *bh = req->context;
1739 +
1740 + dump_msg(fsg, "bulk-out", req->buf, req->actual);
1741 + if (req->status || req->actual != bh->bulk_out_intended_length)
1742 + DBG(fsg, "%s --> %d, %u/%u\n", __func__,
1743 + req->status, req->actual,
1744 + bh->bulk_out_intended_length);
1745 +
1746 + /* Hold the lock while we update the request and buffer states */
1747 + smp_wmb();
1748 + spin_lock(&fsg->lock);
1749 + bh->outreq_busy = 0;
1750 + bh->state = BUF_STATE_FULL;
1751 + wakeup_thread(fsg);
1752 + spin_unlock(&fsg->lock);
1753 +}
1754 +
1755 +static int fsg_function_setup(struct usb_function *f,
1756 + const struct usb_ctrlrequest *ctrl)
1757 +{
1758 + struct fsg_dev *fsg = func_to_dev(f);
1759 + struct usb_composite_dev *cdev = fsg->cdev;
1760 + int value = -EOPNOTSUPP;
1761 + u16 w_index = le16_to_cpu(ctrl->wIndex);
1762 + u16 w_value = le16_to_cpu(ctrl->wValue);
1763 + u16 w_length = le16_to_cpu(ctrl->wLength);
1764 +
1765 + DBG(fsg, "fsg_function_setup\n");
1766 + /* Handle Bulk-only class-specific requests */
1767 + if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
1768 + DBG(fsg, "USB_TYPE_CLASS\n");
1769 + switch (ctrl->bRequest) {
1770 + case USB_BULK_RESET_REQUEST:
1771 + if (ctrl->bRequestType != (USB_DIR_OUT |
1772 + USB_TYPE_CLASS | USB_RECIP_INTERFACE))
1773 + break;
1774 + if (w_index != 0 || w_value != 0) {
1775 + value = -EDOM;
1776 + break;
1777 + }
1778 +
1779 + /* Raise an exception to stop the current operation
1780 + * and reinitialize our state. */
1781 + DBG(fsg, "bulk reset request\n");
1782 + raise_exception(fsg, FSG_STATE_RESET);
1783 + value = DELAYED_STATUS;
1784 + break;
1785 +
1786 + case USB_BULK_GET_MAX_LUN_REQUEST:
1787 + if (ctrl->bRequestType != (USB_DIR_IN |
1788 + USB_TYPE_CLASS | USB_RECIP_INTERFACE))
1789 + break;
1790 + if (w_index != 0 || w_value != 0) {
1791 + value = -EDOM;
1792 + break;
1793 + }
1794 + VDBG(fsg, "get max LUN\n");
1795 + *(u8 *)cdev->req->buf = fsg->nluns - 1;
1796 + value = 1;
1797 + break;
1798 + }
1799 + }
1800 +
1801 + if (value == -EOPNOTSUPP)
1802 + VDBG(fsg,
1803 + "unknown class-specific control req "
1804 + "%02x.%02x v%04x i%04x l%u\n",
1805 + ctrl->bRequestType, ctrl->bRequest,
1806 + le16_to_cpu(ctrl->wValue), w_index, w_length);
1807 + return value;
1808 +}
1809 +
1810 +/*-------------------------------------------------------------------------*/
1811 +
1812 +/* All the following routines run in process context */
1813 +
1814 +
1815 +/* Use this for bulk or interrupt transfers, not ep0 */
1816 +static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
1817 + struct usb_request *req, int *pbusy,
1818 + enum fsg_buffer_state *state)
1819 +{
1820 + int rc;
1821 +
1822 + DBG(fsg, "start_transfer req: %p, req->buf: %p\n", req, req->buf);
1823 + if (ep == fsg->bulk_in)
1824 + dump_msg(fsg, "bulk-in", req->buf, req->length);
1825 +
1826 + spin_lock_irq(&fsg->lock);
1827 + *pbusy = 1;
1828 + *state = BUF_STATE_BUSY;
1829 + spin_unlock_irq(&fsg->lock);
1830 + rc = usb_ep_queue(ep, req, GFP_KERNEL);
1831 + if (rc != 0) {
1832 + *pbusy = 0;
1833 + *state = BUF_STATE_EMPTY;
1834 +
1835 + /* We can't do much more than wait for a reset */
1836 +
1837 + /* Note: currently the net2280 driver fails zero-length
1838 + * submissions if DMA is enabled. */
1839 + if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
1840 + req->length == 0))
1841 + WARN(fsg, "error in submission: %s --> %d\n",
1842 + (ep == fsg->bulk_in ? "bulk-in" : "bulk-out"),
1843 + rc);
1844 + }
1845 +}
1846 +
1847 +
1848 +static int sleep_thread(struct fsg_dev *fsg)
1849 +{
1850 + int rc = 0;
1851 +
1852 + /* Wait until a signal arrives or we are woken up */
1853 + for (;;) {
1854 + try_to_freeze();
1855 + set_current_state(TASK_INTERRUPTIBLE);
1856 + if (signal_pending(current)) {
1857 + rc = -EINTR;
1858 + break;
1859 + }
1860 + if (fsg->thread_wakeup_needed)
1861 + break;
1862 + schedule();
1863 + }
1864 + __set_current_state(TASK_RUNNING);
1865 + fsg->thread_wakeup_needed = 0;
1866 + return rc;
1867 +}
1868 +
1869 +
1870 +/*-------------------------------------------------------------------------*/
1871 +
1872 +static int do_read(struct fsg_dev *fsg)
1873 +{
1874 + struct lun *curlun = fsg->curlun;
1875 + u32 lba;
1876 + struct fsg_buffhd *bh;
1877 + int rc;
1878 + u32 amount_left;
1879 + loff_t file_offset, file_offset_tmp;
1880 + unsigned int amount;
1881 + unsigned int partial_page;
1882 + ssize_t nread;
1883 +
1884 + /* Get the starting Logical Block Address and check that it's
1885 + * not too big */
1886 + if (fsg->cmnd[0] == SC_READ_6)
1887 + lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
1888 + else {
1889 + lba = get_be32(&fsg->cmnd[2]);
1890 +
1891 + /* We allow DPO (Disable Page Out = don't save data in the
1892 + * cache) and FUA (Force Unit Access = don't read from the
1893 + * cache), but we don't implement them. */
1894 + if ((fsg->cmnd[1] & ~0x18) != 0) {
1895 + curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1896 + return -EINVAL;
1897 + }
1898 + }
1899 + if (lba >= curlun->num_sectors) {
1900 + curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1901 + return -EINVAL;
1902 + }
1903 + file_offset = ((loff_t) lba) << 9;
1904 +
1905 + /* Carry out the file reads */
1906 + amount_left = fsg->data_size_from_cmnd;
1907 + if (unlikely(amount_left == 0))
1908 + return -EIO; /* No default reply */
1909 +
1910 + for (;;) {
1911 +
1912 + /* Figure out how much we need to read:
1913 + * Try to read the remaining amount.
1914 + * But don't read more than the buffer size.
1915 + * And don't try to read past the end of the file.
1916 + * Finally, if we're not at a page boundary, don't read past
1917 + * the next page.
1918 + * If this means reading 0 then we were asked to read past
1919 + * the end of file. */
1920 + amount = min((unsigned int) amount_left,
1921 + (unsigned int)fsg->buf_size);
1922 + amount = min((loff_t) amount,
1923 + curlun->file_length - file_offset);
1924 + partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
1925 + if (partial_page > 0)
1926 + amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
1927 + partial_page);
1928 +
1929 + /* Wait for the next buffer to become available */
1930 + bh = fsg->next_buffhd_to_fill;
1931 + while (bh->state != BUF_STATE_EMPTY) {
1932 + rc = sleep_thread(fsg);
1933 + if (rc)
1934 + return rc;
1935 + }
1936 +
1937 + /* If we were asked to read past the end of file,
1938 + * end with an empty buffer. */
1939 + if (amount == 0) {
1940 + curlun->sense_data =
1941 + SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1942 + curlun->sense_data_info = file_offset >> 9;
1943 + curlun->info_valid = 1;
1944 + bh->inreq->length = 0;
1945 + bh->state = BUF_STATE_FULL;
1946 + break;
1947 + }
1948 +
1949 + /* Perform the read */
1950 + file_offset_tmp = file_offset;
1951 + nread = vfs_read(curlun->filp,
1952 + (char __user *) bh->buf,
1953 + amount, &file_offset_tmp);
1954 + VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
1955 + (unsigned long long) file_offset,
1956 + (int) nread);
1957 + if (signal_pending(current))
1958 + return -EINTR;
1959 +
1960 + if (nread < 0) {
1961 + LDBG(curlun, "error in file read: %d\n",
1962 + (int) nread);
1963 + nread = 0;
1964 + } else if (nread < amount) {
1965 + LDBG(curlun, "partial file read: %d/%u\n",
1966 + (int) nread, amount);
1967 + nread -= (nread & 511); /* Round down to a block */
1968 + }
1969 + file_offset += nread;
1970 + amount_left -= nread;
1971 + fsg->residue -= nread;
1972 + bh->inreq->length = nread;
1973 + bh->state = BUF_STATE_FULL;
1974 +
1975 + /* If an error occurred, report it and its position */
1976 + if (nread < amount) {
1977 + curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
1978 + curlun->sense_data_info = file_offset >> 9;
1979 + curlun->info_valid = 1;
1980 + break;
1981 + }
1982 +
1983 + if (amount_left == 0)
1984 + break; /* No more left to read */
1985 +
1986 + /* Send this buffer and go read some more */
1987 + start_transfer(fsg, fsg->bulk_in, bh->inreq,
1988 + &bh->inreq_busy, &bh->state);
1989 + fsg->next_buffhd_to_fill = bh->next;
1990 + }
1991 +
1992 + return -EIO; /* No default reply */
1993 +}
1994 +
1995 +
1996 +/*-------------------------------------------------------------------------*/
1997 +
1998 +static int do_write(struct fsg_dev *fsg)
1999 +{
2000 + struct lun *curlun = fsg->curlun;
2001 + u32 lba;
2002 + struct fsg_buffhd *bh;
2003 + int get_some_more;
2004 + u32 amount_left_to_req, amount_left_to_write;
2005 + loff_t usb_offset, file_offset, file_offset_tmp;
2006 + unsigned int amount;
2007 + unsigned int partial_page;
2008 + ssize_t nwritten;
2009 + int rc;
2010 +
2011 + if (curlun->ro) {
2012 + curlun->sense_data = SS_WRITE_PROTECTED;
2013 + return -EINVAL;
2014 + }
2015 + curlun->filp->f_flags &= ~O_SYNC; /* Default is not to wait */
2016 +
2017 + /* Get the starting Logical Block Address and check that it's
2018 + * not too big */
2019 + if (fsg->cmnd[0] == SC_WRITE_6)
2020 + lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
2021 + else {
2022 + lba = get_be32(&fsg->cmnd[2]);
2023 +
2024 + /* We allow DPO (Disable Page Out = don't save data in the
2025 + * cache) and FUA (Force Unit Access = write directly to the
2026 + * medium). We don't implement DPO; we implement FUA by
2027 + * performing synchronous output. */
2028 + if ((fsg->cmnd[1] & ~0x18) != 0) {
2029 + curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
2030 + return -EINVAL;
2031 + }
2032 + if (fsg->cmnd[1] & 0x08) /* FUA */
2033 + curlun->filp->f_flags |= O_SYNC;
2034 + }
2035 + if (lba >= curlun->num_sectors) {
2036 + curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
2037 + return -EINVAL;
2038 + }
2039 +
2040 + /* Carry out the file writes */
2041 + get_some_more = 1;
2042 + file_offset = usb_offset = ((loff_t) lba) << 9;
2043 + amount_left_to_req = amount_left_to_write = fsg->data_size_from_cmnd;
2044 +
2045 + while (amount_left_to_write > 0) {
2046 +
2047 + /* Queue a request for more data from the host */
2048 + bh = fsg->next_buffhd_to_fill;
2049 + if (bh->state == BUF_STATE_EMPTY && get_some_more) {
2050 +
2051 + /* Figure out how much we want to get:
2052 + * Try to get the remaining amount.
2053 + * But don't get more than the buffer size.
2054 + * And don't try to go past the end of the file.
2055 + * If we're not at a page boundary,
2056 + * don't go past the next page.
2057 + * If this means getting 0, then we were asked
2058 + * to write past the end of file.
2059 + * Finally, round down to a block boundary. */
2060 + amount = min(amount_left_to_req, (u32)fsg->buf_size);
2061 + amount = min((loff_t) amount, curlun->file_length -
2062 + usb_offset);
2063 + partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
2064 + if (partial_page > 0)
2065 + amount = min(amount,
2066 + (unsigned int) PAGE_CACHE_SIZE - partial_page);
2067 +
2068 + if (amount == 0) {
2069 + get_some_more = 0;
2070 + curlun->sense_data =
2071 + SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
2072 + curlun->sense_data_info = usb_offset >> 9;
2073 + curlun->info_valid = 1;
2074 + continue;
2075 + }
2076 + amount -= (amount & 511);
2077 + if (amount == 0) {
2078 +
2079 + /* Why were we were asked to transfer a
2080 + * partial block? */
2081 + get_some_more = 0;
2082 + continue;
2083 + }
2084 +
2085 + /* Get the next buffer */
2086 + usb_offset += amount;
2087 + fsg->usb_amount_left -= amount;
2088 + amount_left_to_req -= amount;
2089 + if (amount_left_to_req == 0)
2090 + get_some_more = 0;
2091 +
2092 + /* amount is always divisible by 512, hence by
2093 + * the bulk-out maxpacket size */
2094 + bh->outreq->length = bh->bulk_out_intended_length =
2095 + amount;
2096 + start_transfer(fsg, fsg->bulk_out, bh->outreq,
2097 + &bh->outreq_busy, &bh->state);
2098 + fsg->next_buffhd_to_fill = bh->next;
2099 + continue;
2100 + }
2101 +
2102 + /* Write the received data to the backing file */
2103 + bh = fsg->next_buffhd_to_drain;
2104 + if (bh->state == BUF_STATE_EMPTY && !get_some_more)
2105 + break; /* We stopped early */
2106 + if (bh->state == BUF_STATE_FULL) {
2107 + smp_rmb();
2108 + fsg->next_buffhd_to_drain = bh->next;
2109 + bh->state = BUF_STATE_EMPTY;
2110 +
2111 + /* Did something go wrong with the transfer? */
2112 + if (bh->outreq->status != 0) {
2113 + curlun->sense_data = SS_COMMUNICATION_FAILURE;
2114 + curlun->sense_data_info = file_offset >> 9;
2115 + curlun->info_valid = 1;
2116 + break;
2117 + }
2118 +
2119 + amount = bh->outreq->actual;
2120 + if (curlun->file_length - file_offset < amount) {
2121 + LERROR(curlun,
2122 + "write %u @ %llu beyond end %llu\n",
2123 + amount, (unsigned long long) file_offset,
2124 + (unsigned long long) curlun->file_length);
2125 + amount = curlun->file_length - file_offset;
2126 + }
2127 +
2128 + /* Perform the write */
2129 + file_offset_tmp = file_offset;
2130 + nwritten = vfs_write(curlun->filp,
2131 + (char __user *) bh->buf,
2132 + amount, &file_offset_tmp);
2133 + VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
2134 + (unsigned long long) file_offset,
2135 + (int) nwritten);
2136 + if (signal_pending(current))
2137 + return -EINTR; /* Interrupted! */
2138 +
2139 + if (nwritten < 0) {
2140 + LDBG(curlun, "error in file write: %d\n",
2141 + (int) nwritten);
2142 + nwritten = 0;
2143 + } else if (nwritten < amount) {
2144 + LDBG(curlun, "partial file write: %d/%u\n",
2145 + (int) nwritten, amount);
2146 + nwritten -= (nwritten & 511);
2147 + /* Round down to a block */
2148 + }
2149 + file_offset += nwritten;
2150 + amount_left_to_write -= nwritten;
2151 + fsg->residue -= nwritten;
2152 +
2153 + /* If an error occurred, report it and its position */
2154 + if (nwritten < amount) {
2155 + curlun->sense_data = SS_WRITE_ERROR;
2156 + curlun->sense_data_info = file_offset >> 9;
2157 + curlun->info_valid = 1;
2158 + break;
2159 + }
2160 +
2161 + /* Did the host decide to stop early? */
2162 + if (bh->outreq->actual != bh->outreq->length) {
2163 + fsg->short_packet_received = 1;
2164 + break;
2165 + }
2166 + continue;
2167 + }
2168 +
2169 + /* Wait for something to happen */
2170 + rc = sleep_thread(fsg);
2171 + if (rc)
2172 + return rc;
2173 + }
2174 +
2175 + return -EIO; /* No default reply */
2176 +}
2177 +
2178 +
2179 +/*-------------------------------------------------------------------------*/
2180 +
2181 +/* Sync the file data, don't bother with the metadata.
2182 + * The caller must own fsg->filesem.
2183 + * This code was copied from fs/buffer.c:sys_fdatasync(). */
2184 +static int fsync_sub(struct lun *curlun)
2185 +{
2186 + struct file *filp = curlun->filp;
2187 + struct inode *inode;
2188 + int rc, err;
2189 +
2190 + if (curlun->ro || !filp)
2191 + return 0;
2192 + if (!filp->f_op->fsync)
2193 + return -EINVAL;
2194 +
2195 + inode = filp->f_path.dentry->d_inode;
2196 + mutex_lock(&inode->i_mutex);
2197 + rc = filemap_fdatawrite(inode->i_mapping);
2198 + err = filp->f_op->fsync(filp, filp->f_path.dentry, 1);
2199 + if (!rc)
2200 + rc = err;
2201 + err = filemap_fdatawait(inode->i_mapping);
2202 + if (!rc)
2203 + rc = err;
2204 + mutex_unlock(&inode->i_mutex);
2205 + VLDBG(curlun, "fdatasync -> %d\n", rc);
2206 + return rc;
2207 +}
2208 +
2209 +static void fsync_all(struct fsg_dev *fsg)
2210 +{
2211 + int i;
2212 +
2213 + for (i = 0; i < fsg->nluns; ++i)
2214 + fsync_sub(&fsg->luns[i]);
2215 +}
2216 +
2217 +static int do_synchronize_cache(struct fsg_dev *fsg)
2218 +{
2219 + struct lun *curlun = fsg->curlun;
2220 + int rc;
2221 +
2222 + /* We ignore the requested LBA and write out all file's
2223 + * dirty data buffers. */
2224 + rc = fsync_sub(curlun);
2225 + if (rc)
2226 + curlun->sense_data = SS_WRITE_ERROR;
2227 + return 0;
2228 +}
2229 +
2230 +
2231 +/*-------------------------------------------------------------------------*/
2232 +
2233 +static void invalidate_sub(struct lun *curlun)
2234 +{
2235 + struct file *filp = curlun->filp;
2236 + struct inode *inode = filp->f_path.dentry->d_inode;
2237 + unsigned long rc;
2238 +
2239 + rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
2240 + VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc);
2241 +}
2242 +
2243 +static int do_verify(struct fsg_dev *fsg)
2244 +{
2245 + struct lun *curlun = fsg->curlun;
2246 + u32 lba;
2247 + u32 verification_length;
2248 + struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
2249 + loff_t file_offset, file_offset_tmp;
2250 + u32 amount_left;
2251 + unsigned int amount;
2252 + ssize_t nread;
2253 +
2254 + /* Get the starting Logical Block Address and check that it's
2255 + * not too big */
2256 + lba = get_be32(&fsg->cmnd[2]);
2257 + if (lba >= curlun->num_sectors) {
2258 + curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
2259 + return -EINVAL;
2260 + }
2261 +
2262 + /* We allow DPO (Disable Page Out = don't save data in the
2263 + * cache) but we don't implement it. */
2264 + if ((fsg->cmnd[1] & ~0x10) != 0) {
2265 + curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
2266 + return -EINVAL;
2267 + }
2268 +
2269 + verification_length = get_be16(&fsg->cmnd[7]);
2270 + if (unlikely(verification_length == 0))
2271 + return -EIO; /* No default reply */
2272 +
2273 + /* Prepare to carry out the file verify */
2274 + amount_left = verification_length << 9;
2275 + file_offset = ((loff_t) lba) << 9;
2276 +
2277 + /* Write out all the dirty buffers before invalidating them */
2278 + fsync_sub(curlun);
2279 + if (signal_pending(current))
2280 + return -EINTR;
2281 +
2282 + invalidate_sub(curlun);
2283 + if (signal_pending(current))
2284 + return -EINTR;
2285 +
2286 + /* Just try to read the requested blocks */
2287 + while (amount_left > 0) {
2288 +
2289 + /* Figure out how much we need to read:
2290 + * Try to read the remaining amount, but not more than
2291 + * the buffer size.
2292 + * And don't try to read past the end of the file.
2293 + * If this means reading 0 then we were asked to read
2294 + * past the end of file. */
2295 + amount = min((unsigned int) amount_left,
2296 + (unsigned int)fsg->buf_size);
2297 + amount = min((loff_t) amount,
2298 + curlun->file_length - file_offset);
2299 + if (amount == 0) {
2300 + curlun->sense_data =
2301 + SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
2302 + curlun->sense_data_info = file_offset >> 9;
2303 + curlun->info_valid = 1;
2304 + break;
2305 + }
2306 +
2307 + /* Perform the read */
2308 + file_offset_tmp = file_offset;
2309 + nread = vfs_read(curlun->filp,
2310 + (char __user *) bh->buf,
2311 + amount, &file_offset_tmp);
2312 + VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
2313 + (unsigned long long) file_offset,
2314 + (int) nread);
2315 + if (signal_pending(current))
2316 + return -EINTR;
2317 +
2318 + if (nread < 0) {
2319 + LDBG(curlun, "error in file verify: %d\n",
2320 + (int) nread);
2321 + nread = 0;
2322 + } else if (nread < amount) {
2323 + LDBG(curlun, "partial file verify: %d/%u\n",
2324 + (int) nread, amount);
2325 + nread -= (nread & 511); /* Round down to a sector */
2326 + }
2327 + if (nread == 0) {
2328 + curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
2329 + curlun->sense_data_info = file_offset >> 9;
2330 + curlun->info_valid = 1;
2331 + break;
2332 + }
2333 + file_offset += nread;
2334 + amount_left -= nread;
2335 + }
2336 + return 0;
2337 +}
2338 +
2339 +
2340 +/*-------------------------------------------------------------------------*/
2341 +
2342 +static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2343 +{
2344 + u8 *buf = (u8 *) bh->buf;
2345 +
2346 + if (!fsg->curlun) { /* Unsupported LUNs are okay */
2347 + fsg->bad_lun_okay = 1;
2348 + memset(buf, 0, 36);
2349 + buf[0] = 0x7f; /* Unsupported, no device-type */
2350 + return 36;
2351 + }
2352 +
2353 + memset(buf, 0, 8); /* Non-removable, direct-access device */
2354 +
2355 + buf[1] = 0x80; /* set removable bit */
2356 + buf[2] = 2; /* ANSI SCSI level 2 */
2357 + buf[3] = 2; /* SCSI-2 INQUIRY data format */
2358 + buf[4] = 31; /* Additional length */
2359 + /* No special options */
2360 + sprintf(buf + 8, "%-8s%-16s%04x", fsg->vendor,
2361 + fsg->product, fsg->release);
2362 + return 36;
2363 +}
2364 +
2365 +
2366 +static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2367 +{
2368 + struct lun *curlun = fsg->curlun;
2369 + u8 *buf = (u8 *) bh->buf;
2370 + u32 sd, sdinfo;
2371 + int valid;
2372 +
2373 + /*
2374 + * From the SCSI-2 spec., section 7.9 (Unit attention condition):
2375 + *
2376 + * If a REQUEST SENSE command is received from an initiator
2377 + * with a pending unit attention condition (before the target
2378 + * generates the contingent allegiance condition), then the
2379 + * target shall either:
2380 + * a) report any pending sense data and preserve the unit
2381 + * attention condition on the logical unit, or,
2382 + * b) report the unit attention condition, may discard any
2383 + * pending sense data, and clear the unit attention
2384 + * condition on the logical unit for that initiator.
2385 + *
2386 + * FSG normally uses option a); enable this code to use option b).
2387 + */
2388 +#if 0
2389 + if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
2390 + curlun->sense_data = curlun->unit_attention_data;
2391 + curlun->unit_attention_data = SS_NO_SENSE;
2392 + }
2393 +#endif
2394 +
2395 + if (!curlun) { /* Unsupported LUNs are okay */
2396 + fsg->bad_lun_okay = 1;
2397 + sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
2398 + sdinfo = 0;
2399 + valid = 0;
2400 + } else {
2401 + sd = curlun->sense_data;
2402 + sdinfo = curlun->sense_data_info;
2403 + valid = curlun->info_valid << 7;
2404 + curlun->sense_data = SS_NO_SENSE;
2405 + curlun->sense_data_info = 0;
2406 + curlun->info_valid = 0;
2407 + }
2408 +
2409 + memset(buf, 0, 18);
2410 + buf[0] = valid | 0x70; /* Valid, current error */
2411 + buf[2] = SK(sd);
2412 + put_be32(&buf[3], sdinfo); /* Sense information */
2413 + buf[7] = 18 - 8; /* Additional sense length */
2414 + buf[12] = ASC(sd);
2415 + buf[13] = ASCQ(sd);
2416 + return 18;
2417 +}
2418 +
2419 +
2420 +static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2421 +{
2422 + struct lun *curlun = fsg->curlun;
2423 + u32 lba = get_be32(&fsg->cmnd[2]);
2424 + int pmi = fsg->cmnd[8];
2425 + u8 *buf = (u8 *) bh->buf;
2426 +
2427 + /* Check the PMI and LBA fields */
2428 + if (pmi > 1 || (pmi == 0 && lba != 0)) {
2429 + curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
2430 + return -EINVAL;
2431 + }
2432 +
2433 + put_be32(&buf[0], curlun->num_sectors - 1); /* Max logical block */
2434 + put_be32(&buf[4], 512); /* Block length */
2435 + return 8;
2436 +}
2437 +
2438 +
2439 +static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2440 +{
2441 + struct lun *curlun = fsg->curlun;
2442 + int mscmnd = fsg->cmnd[0];
2443 + u8 *buf = (u8 *) bh->buf;
2444 + u8 *buf0 = buf;
2445 + int pc, page_code;
2446 + int changeable_values, all_pages;
2447 + int valid_page = 0;
2448 + int len, limit;
2449 +
2450 + if ((fsg->cmnd[1] & ~0x08) != 0) { /* Mask away DBD */
2451 + curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
2452 + return -EINVAL;
2453 + }
2454 + pc = fsg->cmnd[2] >> 6;
2455 + page_code = fsg->cmnd[2] & 0x3f;
2456 + if (pc == 3) {
2457 + curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
2458 + return -EINVAL;
2459 + }
2460 + changeable_values = (pc == 1);
2461 + all_pages = (page_code == 0x3f);
2462 +
2463 + /* Write the mode parameter header. Fixed values are: default
2464 + * medium type, no cache control (DPOFUA), and no block descriptors.
2465 + * The only variable value is the WriteProtect bit. We will fill in
2466 + * the mode data length later. */
2467 + memset(buf, 0, 8);
2468 + if (mscmnd == SC_MODE_SENSE_6) {
2469 + buf[2] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
2470 + buf += 4;
2471 + limit = 255;
2472 + } else { /* SC_MODE_SENSE_10 */
2473 + buf[3] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
2474 + buf += 8;
2475 + limit = 65535;
2476 + }
2477 +
2478 + /* No block descriptors */
2479 +
2480 + /* Disabled to workaround USB reset problems with a Vista host.
2481 + */
2482 +#if 0
2483 + /* The mode pages, in numerical order. The only page we support
2484 + * is the Caching page. */
2485 + if (page_code == 0x08 || all_pages) {
2486 + valid_page = 1;
2487 + buf[0] = 0x08; /* Page code */
2488 + buf[1] = 10; /* Page length */
2489 + memset(buf+2, 0, 10); /* None of the fields are changeable */
2490 +
2491 + if (!changeable_values) {
2492 + buf[2] = 0x04; /* Write cache enable, */
2493 + /* Read cache not disabled */
2494 + /* No cache retention priorities */
2495 + put_be16(&buf[4], 0xffff); /* Don't disable prefetch */
2496 + /* Minimum prefetch = 0 */
2497 + put_be16(&buf[8], 0xffff); /* Maximum prefetch */
2498 + /* Maximum prefetch ceiling */
2499 + put_be16(&buf[10], 0xffff);
2500 + }
2501 + buf += 12;
2502 + }
2503 +#else
2504 + valid_page = 1;
2505 +#endif
2506 +
2507 + /* Check that a valid page was requested and the mode data length
2508 + * isn't too long. */
2509 + len = buf - buf0;
2510 + if (!valid_page || len > limit) {
2511 + curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
2512 + return -EINVAL;
2513 + }
2514 +
2515 + /* Store the mode data length */
2516 + if (mscmnd == SC_MODE_SENSE_6)
2517 + buf0[0] = len - 1;
2518 + else
2519 + put_be16(buf0, len - 2);
2520 + return len;
2521 +}
2522 +
2523 +static int do_start_stop(struct fsg_dev *fsg)
2524 +{
2525 + struct lun *curlun = fsg->curlun;
2526 + int loej, start;
2527 +
2528 + /* int immed = fsg->cmnd[1] & 0x01; */
2529 + loej = fsg->cmnd[4] & 0x02;
2530 + start = fsg->cmnd[4] & 0x01;
2531 +
2532 + if (loej) {
2533 + /* eject request from the host */
2534 + if (backing_file_is_open(curlun)) {
2535 + close_backing_file(fsg, curlun);
2536 + curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
2537 + }
2538 + }
2539 +
2540 + return 0;
2541 +}
2542 +
2543 +static int do_prevent_allow(struct fsg_dev *fsg)
2544 +{
2545 + struct lun *curlun = fsg->curlun;
2546 + int prevent;
2547 +
2548 + prevent = fsg->cmnd[4] & 0x01;
2549 + if ((fsg->cmnd[4] & ~0x01) != 0) { /* Mask away Prevent */
2550 + curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
2551 + return -EINVAL;
2552 + }
2553 +
2554 + if (curlun->prevent_medium_removal && !prevent)
2555 + fsync_sub(curlun);
2556 + curlun->prevent_medium_removal = prevent;
2557 + return 0;
2558 +}
2559 +
2560 +
2561 +static int do_read_format_capacities(struct fsg_dev *fsg,
2562 + struct fsg_buffhd *bh)
2563 +{
2564 + struct lun *curlun = fsg->curlun;
2565 + u8 *buf = (u8 *) bh->buf;
2566 +
2567 + buf[0] = buf[1] = buf[2] = 0;
2568 + buf[3] = 8; /* Only the Current/Maximum Capacity Descriptor */
2569 + buf += 4;
2570 +
2571 + put_be32(&buf[0], curlun->num_sectors); /* Number of blocks */
2572 + put_be32(&buf[4], 512); /* Block length */
2573 + buf[4] = 0x02; /* Current capacity */
2574 + return 12;
2575 +}
2576 +
2577 +
2578 +static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2579 +{
2580 + struct lun *curlun = fsg->curlun;
2581 +
2582 + /* We don't support MODE SELECT */
2583 + curlun->sense_data = SS_INVALID_COMMAND;
2584 + return -EINVAL;
2585 +}
2586 +
2587 +
2588 +/*-------------------------------------------------------------------------*/
2589 +#if 0
2590 +static int write_zero(struct fsg_dev *fsg)
2591 +{
2592 + struct fsg_buffhd *bh;
2593 + int rc;
2594 +
2595 + DBG(fsg, "write_zero\n");
2596 + /* Wait for the next buffer to become available */
2597 + bh = fsg->next_buffhd_to_fill;
2598 + while (bh->state != BUF_STATE_EMPTY) {
2599 + rc = sleep_thread(fsg);
2600 + if (rc)
2601 + return rc;
2602 + }
2603 +
2604 + bh->inreq->length = 0;
2605 + start_transfer(fsg, fsg->bulk_in, bh->inreq,
2606 + &bh->inreq_busy, &bh->state);
2607 +
2608 + fsg->next_buffhd_to_fill = bh->next;
2609 + return 0;
2610 +}
2611 +#endif
2612 +
2613 +static int throw_away_data(struct fsg_dev *fsg)
2614 +{
2615 + struct fsg_buffhd *bh;
2616 + u32 amount;
2617 + int rc;
2618 +
2619 + DBG(fsg, "throw_away_data\n");
2620 + while ((bh = fsg->next_buffhd_to_drain)->state != BUF_STATE_EMPTY ||
2621 + fsg->usb_amount_left > 0) {
2622 +
2623 + /* Throw away the data in a filled buffer */
2624 + if (bh->state == BUF_STATE_FULL) {
2625 + smp_rmb();
2626 + bh->state = BUF_STATE_EMPTY;
2627 + fsg->next_buffhd_to_drain = bh->next;
2628 +
2629 + /* A short packet or an error ends everything */
2630 + if (bh->outreq->actual != bh->outreq->length ||
2631 + bh->outreq->status != 0) {
2632 + raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
2633 + return -EINTR;
2634 + }
2635 + continue;
2636 + }
2637 +
2638 + /* Try to submit another request if we need one */
2639 + bh = fsg->next_buffhd_to_fill;
2640 + if (bh->state == BUF_STATE_EMPTY && fsg->usb_amount_left > 0) {
2641 + amount = min(fsg->usb_amount_left, (u32) fsg->buf_size);
2642 +
2643 + /* amount is always divisible by 512, hence by
2644 + * the bulk-out maxpacket size */
2645 + bh->outreq->length = bh->bulk_out_intended_length =
2646 + amount;
2647 + start_transfer(fsg, fsg->bulk_out, bh->outreq,
2648 + &bh->outreq_busy, &bh->state);
2649 + fsg->next_buffhd_to_fill = bh->next;
2650 + fsg->usb_amount_left -= amount;
2651 + continue;
2652 + }
2653 +
2654 + /* Otherwise wait for something to happen */
2655 + rc = sleep_thread(fsg);
2656 + if (rc)
2657 + return rc;
2658 + }
2659 + return 0;
2660 +}
2661 +
2662 +
2663 +static int finish_reply(struct fsg_dev *fsg)
2664 +{
2665 + struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
2666 + int rc = 0;
2667 +
2668 + switch (fsg->data_dir) {
2669 + case DATA_DIR_NONE:
2670 + break; /* Nothing to send */
2671 +
2672 + case DATA_DIR_UNKNOWN:
2673 + rc = -EINVAL;
2674 + break;
2675 +
2676 + /* All but the last buffer of data must have already been sent */
2677 + case DATA_DIR_TO_HOST:
2678 + if (fsg->data_size == 0)
2679 + ; /* Nothing to send */
2680 +
2681 + /* If there's no residue, simply send the last buffer */
2682 + else if (fsg->residue == 0) {
2683 + start_transfer(fsg, fsg->bulk_in, bh->inreq,
2684 + &bh->inreq_busy, &bh->state);
2685 + fsg->next_buffhd_to_fill = bh->next;
2686 + } else {
2687 + start_transfer(fsg, fsg->bulk_in, bh->inreq,
2688 + &bh->inreq_busy, &bh->state);
2689 + fsg->next_buffhd_to_fill = bh->next;
2690 +#if 0
2691 + /* this is unnecessary, and was causing problems with MacOS */
2692 + if (bh->inreq->length > 0)
2693 + write_zero(fsg);
2694 +#endif
2695 + }
2696 + break;
2697 +
2698 + /* We have processed all we want from the data the host has sent.
2699 + * There may still be outstanding bulk-out requests. */
2700 + case DATA_DIR_FROM_HOST:
2701 + if (fsg->residue == 0)
2702 + ; /* Nothing to receive */
2703 +
2704 + /* Did the host stop sending unexpectedly early? */
2705 + else if (fsg->short_packet_received) {
2706 + raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
2707 + rc = -EINTR;
2708 + }
2709 +
2710 + /* We haven't processed all the incoming data. Even though
2711 + * we may be allowed to stall, doing so would cause a race.
2712 + * The controller may already have ACK'ed all the remaining
2713 + * bulk-out packets, in which case the host wouldn't see a
2714 + * STALL. Not realizing the endpoint was halted, it wouldn't
2715 + * clear the halt -- leading to problems later on. */
2716 +#if 0
2717 + fsg_set_halt(fsg, fsg->bulk_out);
2718 + raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
2719 + rc = -EINTR;
2720 +#endif
2721 +
2722 + /* We can't stall. Read in the excess data and throw it
2723 + * all away. */
2724 + else
2725 + rc = throw_away_data(fsg);
2726 + break;
2727 + }
2728 + return rc;
2729 +}
2730 +
2731 +
2732 +static int send_status(struct fsg_dev *fsg)
2733 +{
2734 + struct lun *curlun = fsg->curlun;
2735 + struct fsg_buffhd *bh;
2736 + int rc;
2737 + u8 status = USB_STATUS_PASS;
2738 + u32 sd, sdinfo = 0;
2739 + struct bulk_cs_wrap *csw;
2740 +
2741 + DBG(fsg, "send_status\n");
2742 + /* Wait for the next buffer to become available */
2743 + bh = fsg->next_buffhd_to_fill;
2744 + while (bh->state != BUF_STATE_EMPTY) {
2745 + rc = sleep_thread(fsg);
2746 + if (rc)
2747 + return rc;
2748 + }
2749 +
2750 + if (curlun) {
2751 + sd = curlun->sense_data;
2752 + sdinfo = curlun->sense_data_info;
2753 + } else if (fsg->bad_lun_okay)
2754 + sd = SS_NO_SENSE;
2755 + else
2756 + sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
2757 +
2758 + if (fsg->phase_error) {
2759 + DBG(fsg, "sending phase-error status\n");
2760 + status = USB_STATUS_PHASE_ERROR;
2761 + sd = SS_INVALID_COMMAND;
2762 + } else if (sd != SS_NO_SENSE) {
2763 + DBG(fsg, "sending command-failure status\n");
2764 + status = USB_STATUS_FAIL;
2765 + VDBG(fsg, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
2766 + " info x%x\n",
2767 + SK(sd), ASC(sd), ASCQ(sd), sdinfo);
2768 + }
2769 +
2770 + csw = bh->buf;
2771 +
2772 + /* Store and send the Bulk-only CSW */
2773 + csw->Signature = __constant_cpu_to_le32(USB_BULK_CS_SIG);
2774 + csw->Tag = fsg->tag;
2775 + csw->Residue = cpu_to_le32(fsg->residue);
2776 + csw->Status = status;
2777 +
2778 + bh->inreq->length = USB_BULK_CS_WRAP_LEN;
2779 + start_transfer(fsg, fsg->bulk_in, bh->inreq,
2780 + &bh->inreq_busy, &bh->state);
2781 +
2782 + fsg->next_buffhd_to_fill = bh->next;
2783 + return 0;
2784 +}
2785 +
2786 +
2787 +/*-------------------------------------------------------------------------*/
2788 +
2789 +/* Check whether the command is properly formed and whether its data size
2790 + * and direction agree with the values we already have. */
2791 +static int check_command(struct fsg_dev *fsg, int cmnd_size,
2792 + enum data_direction data_dir, unsigned int mask,
2793 + int needs_medium, const char *name)
2794 +{
2795 + int i;
2796 + int lun = fsg->cmnd[1] >> 5;
2797 + static const char dirletter[4] = {'u', 'o', 'i', 'n'};
2798 + char hdlen[20];
2799 + struct lun *curlun;
2800 +
2801 + hdlen[0] = 0;
2802 + if (fsg->data_dir != DATA_DIR_UNKNOWN)
2803 + sprintf(hdlen, ", H%c=%u", dirletter[(int) fsg->data_dir],
2804 + fsg->data_size);
2805 + VDBG(fsg, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
2806 + name, cmnd_size, dirletter[(int) data_dir],
2807 + fsg->data_size_from_cmnd, fsg->cmnd_size, hdlen);
2808 +
2809 + /* We can't reply at all until we know the correct data direction
2810 + * and size. */
2811 + if (fsg->data_size_from_cmnd == 0)
2812 + data_dir = DATA_DIR_NONE;
2813 + if (fsg->data_dir == DATA_DIR_UNKNOWN) { /* CB or CBI */
2814 + fsg->data_dir = data_dir;
2815 + fsg->data_size = fsg->data_size_from_cmnd;
2816 +
2817 + } else { /* Bulk-only */
2818 + if (fsg->data_size < fsg->data_size_from_cmnd) {
2819 +
2820 + /* Host data size < Device data size is a phase error.
2821 + * Carry out the command, but only transfer as much
2822 + * as we are allowed. */
2823 + DBG(fsg, "phase error 1\n");
2824 + fsg->data_size_from_cmnd = fsg->data_size;
2825 + fsg->phase_error = 1;
2826 + }
2827 + }
2828 + fsg->residue = fsg->usb_amount_left = fsg->data_size;
2829 +
2830 + /* Conflicting data directions is a phase error */
2831 + if (fsg->data_dir != data_dir && fsg->data_size_from_cmnd > 0) {
2832 + fsg->phase_error = 1;
2833 + DBG(fsg, "phase error 2\n");
2834 + return -EINVAL;
2835 + }
2836 +
2837 + /* Verify the length of the command itself */
2838 + if (cmnd_size != fsg->cmnd_size) {
2839 +
2840 + /* Special case workaround: MS-Windows issues REQUEST SENSE
2841 + * with cbw->Length == 12 (it should be 6). */
2842 + if (fsg->cmnd[0] == SC_REQUEST_SENSE && fsg->cmnd_size == 12)
2843 + cmnd_size = fsg->cmnd_size;
2844 + else {
2845 + fsg->phase_error = 1;
2846 + return -EINVAL;
2847 + }
2848 + }
2849 +
2850 + /* Check that the LUN values are consistent */
2851 + if (fsg->lun != lun)
2852 + DBG(fsg, "using LUN %d from CBW, "
2853 + "not LUN %d from CDB\n",
2854 + fsg->lun, lun);
2855 +
2856 + /* Check the LUN */
2857 + if (fsg->lun >= 0 && fsg->lun < fsg->nluns) {
2858 + fsg->curlun = curlun = &fsg->luns[fsg->lun];
2859 + if (fsg->cmnd[0] != SC_REQUEST_SENSE) {
2860 + curlun->sense_data = SS_NO_SENSE;
2861 + curlun->sense_data_info = 0;
2862 + curlun->info_valid = 0;
2863 + }
2864 + } else {
2865 + fsg->curlun = curlun = NULL;
2866 + fsg->bad_lun_okay = 0;
2867 +
2868 + /* INQUIRY and REQUEST SENSE commands are explicitly allowed
2869 + * to use unsupported LUNs; all others may not. */
2870 + if (fsg->cmnd[0] != SC_INQUIRY &&
2871 + fsg->cmnd[0] != SC_REQUEST_SENSE) {
2872 + DBG(fsg, "unsupported LUN %d\n", fsg->lun);
2873 + return -EINVAL;
2874 + }
2875 + }
2876 +
2877 + /* If a unit attention condition exists, only INQUIRY and
2878 + * REQUEST SENSE commands are allowed; anything else must fail. */
2879 + if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
2880 + fsg->cmnd[0] != SC_INQUIRY &&
2881 + fsg->cmnd[0] != SC_REQUEST_SENSE) {
2882 + curlun->sense_data = curlun->unit_attention_data;
2883 + curlun->unit_attention_data = SS_NO_SENSE;
2884 + return -EINVAL;
2885 + }
2886 +
2887 + /* Check that only command bytes listed in the mask are non-zero */
2888 + fsg->cmnd[1] &= 0x1f; /* Mask away the LUN */
2889 + for (i = 1; i < cmnd_size; ++i) {
2890 + if (fsg->cmnd[i] && !(mask & (1 << i))) {
2891 + if (curlun)
2892 + curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
2893 + DBG(fsg, "SS_INVALID_FIELD_IN_CDB\n");
2894 + return -EINVAL;
2895 + }
2896 + }
2897 +
2898 + /* If the medium isn't mounted and the command needs to access
2899 + * it, return an error. */
2900 + if (curlun && !backing_file_is_open(curlun) && needs_medium) {
2901 + curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
2902 + DBG(fsg, "SS_MEDIUM_NOT_PRESENT\n");
2903 + return -EINVAL;
2904 + }
2905 +
2906 + return 0;
2907 +}
2908 +
2909 +
2910 +static int do_scsi_command(struct fsg_dev *fsg)
2911 +{
2912 + struct fsg_buffhd *bh;
2913 + int rc;
2914 + int reply = -EINVAL;
2915 + int i;
2916 + static char unknown[16];
2917 +
2918 + dump_cdb(fsg);
2919 +
2920 + /* Wait for the next buffer to become available for data or status */
2921 + bh = fsg->next_buffhd_to_drain = fsg->next_buffhd_to_fill;
2922 + while (bh->state != BUF_STATE_EMPTY) {
2923 + rc = sleep_thread(fsg);
2924 + if (rc)
2925 + return rc;
2926 + }
2927 + fsg->phase_error = 0;
2928 + fsg->short_packet_received = 0;
2929 +
2930 + down_read(&fsg->filesem); /* We're using the backing file */
2931 + switch (fsg->cmnd[0]) {
2932 +
2933 + case SC_INQUIRY:
2934 + fsg->data_size_from_cmnd = fsg->cmnd[4];
2935 + if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2936 + (1<<4), 0,
2937 + "INQUIRY")) == 0)
2938 + reply = do_inquiry(fsg, bh);
2939 + break;
2940 +
2941 + case SC_MODE_SELECT_6:
2942 + fsg->data_size_from_cmnd = fsg->cmnd[4];
2943 + if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
2944 + (1<<1) | (1<<4), 0,
2945 + "MODE SELECT(6)")) == 0)
2946 + reply = do_mode_select(fsg, bh);
2947 + break;
2948 +
2949 + case SC_MODE_SELECT_10:
2950 + fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
2951 + if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
2952 + (1<<1) | (3<<7), 0,
2953 + "MODE SELECT(10)")) == 0)
2954 + reply = do_mode_select(fsg, bh);
2955 + break;
2956 +
2957 + case SC_MODE_SENSE_6:
2958 + fsg->data_size_from_cmnd = fsg->cmnd[4];
2959 + if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2960 + (1<<1) | (1<<2) | (1<<4), 0,
2961 + "MODE SENSE(6)")) == 0)
2962 + reply = do_mode_sense(fsg, bh);
2963 + break;
2964 +
2965 + case SC_MODE_SENSE_10:
2966 + fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
2967 + if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2968 + (1<<1) | (1<<2) | (3<<7), 0,
2969 + "MODE SENSE(10)")) == 0)
2970 + reply = do_mode_sense(fsg, bh);
2971 + break;
2972 +
2973 + case SC_PREVENT_ALLOW_MEDIUM_REMOVAL:
2974 + fsg->data_size_from_cmnd = 0;
2975 + if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
2976 + (1<<4), 0,
2977 + "PREVENT-ALLOW MEDIUM REMOVAL")) == 0)
2978 + reply = do_prevent_allow(fsg);
2979 + break;
2980 +
2981 + case SC_READ_6:
2982 + i = fsg->cmnd[4];
2983 + fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
2984 + if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2985 + (7<<1) | (1<<4), 1,
2986 + "READ(6)")) == 0)
2987 + reply = do_read(fsg);
2988 + break;
2989 +
2990 + case SC_READ_10:
2991 + fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
2992 + if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2993 + (1<<1) | (0xf<<2) | (3<<7), 1,
2994 + "READ(10)")) == 0)
2995 + reply = do_read(fsg);
2996 + break;
2997 +
2998 + case SC_READ_12:
2999 + fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
3000 + if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST,
3001 + (1<<1) | (0xf<<2) | (0xf<<6), 1,
3002 + "READ(12)")) == 0)
3003 + reply = do_read(fsg);
3004 + break;
3005 +
3006 + case SC_READ_CAPACITY:
3007 + fsg->data_size_from_cmnd = 8;
3008 + if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
3009 + (0xf<<2) | (1<<8), 1,
3010 + "READ CAPACITY")) == 0)
3011 + reply = do_read_capacity(fsg, bh);
3012 + break;
3013 +
3014 + case SC_READ_FORMAT_CAPACITIES:
3015 + fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
3016 + if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
3017 + (3<<7), 1,
3018 + "READ FORMAT CAPACITIES")) == 0)
3019 + reply = do_read_format_capacities(fsg, bh);
3020 + break;
3021 +
3022 + case SC_REQUEST_SENSE:
3023 + fsg->data_size_from_cmnd = fsg->cmnd[4];
3024 + if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
3025 + (1<<4), 0,
3026 + "REQUEST SENSE")) == 0)
3027 + reply = do_request_sense(fsg, bh);
3028 + break;
3029 +
3030 + case SC_START_STOP_UNIT:
3031 + fsg->data_size_from_cmnd = 0;
3032 + if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
3033 + (1<<1) | (1<<4), 0,
3034 + "START-STOP UNIT")) == 0)
3035 + reply = do_start_stop(fsg);
3036 + break;
3037 +
3038 + case SC_SYNCHRONIZE_CACHE:
3039 + fsg->data_size_from_cmnd = 0;
3040 + if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
3041 + (0xf<<2) | (3<<7), 1,
3042 + "SYNCHRONIZE CACHE")) == 0)
3043 + reply = do_synchronize_cache(fsg);
3044 + break;
3045 +
3046 + case SC_TEST_UNIT_READY:
3047 + fsg->data_size_from_cmnd = 0;
3048 + reply = check_command(fsg, 6, DATA_DIR_NONE,
3049 + 0, 1,
3050 + "TEST UNIT READY");
3051 + break;
3052 +
3053 + /* Although optional, this command is used by MS-Windows. We
3054 + * support a minimal version: BytChk must be 0. */
3055 + case SC_VERIFY:
3056 + fsg->data_size_from_cmnd = 0;
3057 + if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
3058 + (1<<1) | (0xf<<2) | (3<<7), 1,
3059 + "VERIFY")) == 0)
3060 + reply = do_verify(fsg);
3061 + break;
3062 +
3063 + case SC_WRITE_6:
3064 + i = fsg->cmnd[4];
3065 + fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
3066 + if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
3067 + (7<<1) | (1<<4), 1,
3068 + "WRITE(6)")) == 0)
3069 + reply = do_write(fsg);
3070 + break;
3071 +
3072 + case SC_WRITE_10:
3073 + fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
3074 + if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
3075 + (1<<1) | (0xf<<2) | (3<<7), 1,
3076 + "WRITE(10)")) == 0)
3077 + reply = do_write(fsg);
3078 + break;
3079 +
3080 + case SC_WRITE_12:
3081 + fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
3082 + if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST,
3083 + (1<<1) | (0xf<<2) | (0xf<<6), 1,
3084 + "WRITE(12)")) == 0)
3085 + reply = do_write(fsg);
3086 + break;
3087 +
3088 + /* Some mandatory commands that we recognize but don't implement.
3089 + * They don't mean much in this setting. It's left as an exercise
3090 + * for anyone interested to implement RESERVE and RELEASE in terms
3091 + * of Posix locks. */
3092 + case SC_FORMAT_UNIT:
3093 + case SC_RELEASE:
3094 + case SC_RESERVE:
3095 + case SC_SEND_DIAGNOSTIC:
3096 + /* Fall through */
3097 +
3098 + default:
3099 + fsg->data_size_from_cmnd = 0;
3100 + sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
3101 + if ((reply = check_command(fsg, fsg->cmnd_size,
3102 + DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) {
3103 + fsg->curlun->sense_data = SS_INVALID_COMMAND;
3104 + reply = -EINVAL;
3105 + }
3106 + break;
3107 + }
3108 + up_read(&fsg->filesem);
3109 +
3110 + VDBG(fsg, "reply: %d, fsg->data_size_from_cmnd: %d\n",
3111 + reply, fsg->data_size_from_cmnd);
3112 + if (reply == -EINTR || signal_pending(current))
3113 + return -EINTR;
3114 +
3115 + /* Set up the single reply buffer for finish_reply() */
3116 + if (reply == -EINVAL)
3117 + reply = 0; /* Error reply length */
3118 + if (reply >= 0 && fsg->data_dir == DATA_DIR_TO_HOST) {
3119 + reply = min((u32) reply, fsg->data_size_from_cmnd);
3120 + bh->inreq->length = reply;
3121 + bh->state = BUF_STATE_FULL;
3122 + fsg->residue -= reply;
3123 + } /* Otherwise it's already set */
3124 +
3125 + return 0;
3126 +}
3127 +
3128 +
3129 +/*-------------------------------------------------------------------------*/
3130 +
3131 +static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
3132 +{
3133 + struct usb_request *req = bh->outreq;
3134 + struct bulk_cb_wrap *cbw = req->buf;
3135 +
3136 + /* Was this a real packet? */
3137 + if (req->status)
3138 + return -EINVAL;
3139 +
3140 + /* Is the CBW valid? */
3141 + if (req->actual != USB_BULK_CB_WRAP_LEN ||
3142 + cbw->Signature != __constant_cpu_to_le32(
3143 + USB_BULK_CB_SIG)) {
3144 + DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
3145 + req->actual,
3146 + le32_to_cpu(cbw->Signature));
3147 + return -EINVAL;
3148 + }
3149 +
3150 + /* Is the CBW meaningful? */
3151 + if (cbw->Lun >= MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
3152 + cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
3153 + DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
3154 + "cmdlen %u\n",
3155 + cbw->Lun, cbw->Flags, cbw->Length);
3156 + return -EINVAL;
3157 + }
3158 +
3159 + /* Save the command for later */
3160 + fsg->cmnd_size = cbw->Length;
3161 + memcpy(fsg->cmnd, cbw->CDB, fsg->cmnd_size);
3162 + if (cbw->Flags & USB_BULK_IN_FLAG)
3163 + fsg->data_dir = DATA_DIR_TO_HOST;
3164 + else
3165 + fsg->data_dir = DATA_DIR_FROM_HOST;
3166 + fsg->data_size = le32_to_cpu(cbw->DataTransferLength);
3167 + if (fsg->data_size == 0)
3168 + fsg->data_dir = DATA_DIR_NONE;
3169 + fsg->lun = cbw->Lun;
3170 + fsg->tag = cbw->Tag;
3171 + return 0;
3172 +}
3173 +
3174 +
3175 +static int get_next_command(struct fsg_dev *fsg)
3176 +{
3177 + struct fsg_buffhd *bh;
3178 + int rc = 0;
3179 +
3180 + /* Wait for the next buffer to become available */
3181 + bh = fsg->next_buffhd_to_fill;
3182 + while (bh->state != BUF_STATE_EMPTY) {
3183 + rc = sleep_thread(fsg);
3184 + if (rc)
3185 + return rc;
3186 + }
3187 +
3188 + /* Queue a request to read a Bulk-only CBW */
3189 + set_bulk_out_req_length(fsg, bh, USB_BULK_CB_WRAP_LEN);
3190 + start_transfer(fsg, fsg->bulk_out, bh->outreq,
3191 + &bh->outreq_busy, &bh->state);
3192 +
3193 + /* We will drain the buffer in software, which means we
3194 + * can reuse it for the next filling. No need to advance
3195 + * next_buffhd_to_fill. */
3196 +
3197 + /* Wait for the CBW to arrive */
3198 + while (bh->state != BUF_STATE_FULL) {
3199 + rc = sleep_thread(fsg);
3200 + if (rc)
3201 + return rc;
3202 + }
3203 + smp_rmb();
3204 + rc = received_cbw(fsg, bh);
3205 + bh->state = BUF_STATE_EMPTY;
3206 +
3207 + return rc;
3208 +}
3209 +
3210 +
3211 +/*-------------------------------------------------------------------------*/
3212 +
3213 +static int enable_endpoint(struct fsg_dev *fsg, struct usb_ep *ep,
3214 + const struct usb_endpoint_descriptor *d)
3215 +{
3216 + int rc;
3217 +
3218 + DBG(fsg, "usb_ep_enable %s\n", ep->name);
3219 + ep->driver_data = fsg;
3220 + rc = usb_ep_enable(ep, d);
3221 + if (rc)
3222 + ERROR(fsg, "can't enable %s, result %d\n", ep->name, rc);
3223 + return rc;
3224 +}
3225 +
3226 +static int alloc_request(struct fsg_dev *fsg, struct usb_ep *ep,
3227 + struct usb_request **preq)
3228 +{
3229 + *preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
3230 + if (*preq)
3231 + return 0;
3232 + ERROR(fsg, "can't allocate request for %s\n", ep->name);
3233 + return -ENOMEM;
3234 +}
3235 +
3236 +/*
3237 + * Reset interface setting and re-init endpoint state (toggle etc).
3238 + * Call with altsetting < 0 to disable the interface. The only other
3239 + * available altsetting is 0, which enables the interface.
3240 + */
3241 +static int do_set_interface(struct fsg_dev *fsg, int altsetting)
3242 +{
3243 + struct usb_composite_dev *cdev = fsg->cdev;
3244 + int rc = 0;
3245 + int i;
3246 + const struct usb_endpoint_descriptor *d;
3247 +
3248 + if (fsg->running)
3249 + DBG(fsg, "reset interface\n");
3250 +
3251 +reset:
3252 + /* Deallocate the requests */
3253 + for (i = 0; i < NUM_BUFFERS; ++i) {
3254 + struct fsg_buffhd *bh = &fsg->buffhds[i];
3255 +
3256 + if (bh->inreq) {
3257 + usb_ep_free_request(fsg->bulk_in, bh->inreq);
3258 + bh->inreq = NULL;
3259 + }
3260 + if (bh->outreq) {
3261 + usb_ep_free_request(fsg->bulk_out, bh->outreq);
3262 + bh->outreq = NULL;
3263 + }
3264 + }
3265 +
3266 + /* Disable the endpoints */
3267 + if (fsg->bulk_in_enabled) {
3268 + DBG(fsg, "usb_ep_disable %s\n", fsg->bulk_in->name);
3269 + usb_ep_disable(fsg->bulk_in);
3270 + fsg->bulk_in_enabled = 0;
3271 + }
3272 + if (fsg->bulk_out_enabled) {
3273 + DBG(fsg, "usb_ep_disable %s\n", fsg->bulk_out->name);
3274 + usb_ep_disable(fsg->bulk_out);
3275 + fsg->bulk_out_enabled = 0;
3276 + }
3277 +
3278 + fsg->running = 0;
3279 + if (altsetting < 0 || rc != 0)
3280 + return rc;
3281 +
3282 + DBG(fsg, "set interface %d\n", altsetting);
3283 +
3284 + /* Enable the endpoints */
3285 + d = ep_desc(cdev->gadget, &fs_bulk_in_desc, &hs_bulk_in_desc);
3286 + if ((rc = enable_endpoint(fsg, fsg->bulk_in, d)) != 0)
3287 + goto reset;
3288 + fsg->bulk_in_enabled = 1;
3289 +
3290 + d = ep_desc(cdev->gadget, &fs_bulk_out_desc, &hs_bulk_out_desc);
3291 + if ((rc = enable_endpoint(fsg, fsg->bulk_out, d)) != 0)
3292 + goto reset;
3293 + fsg->bulk_out_enabled = 1;
3294 + fsg->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
3295 +
3296 + /* Allocate the requests */
3297 + for (i = 0; i < NUM_BUFFERS; ++i) {
3298 + struct fsg_buffhd *bh = &fsg->buffhds[i];
3299 +
3300 + rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq);
3301 + if (rc != 0)
3302 + goto reset;
3303 + rc = alloc_request(fsg, fsg->bulk_out, &bh->outreq);
3304 + if (rc != 0)
3305 + goto reset;
3306 + bh->inreq->buf = bh->outreq->buf = bh->buf;
3307 + bh->inreq->context = bh->outreq->context = bh;
3308 + bh->inreq->complete = bulk_in_complete;
3309 + bh->outreq->complete = bulk_out_complete;
3310 + }
3311 +
3312 + fsg->running = 1;
3313 + for (i = 0; i < fsg->nluns; ++i)
3314 + fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
3315 +
3316 + return rc;
3317 +}
3318 +
3319 +static void adjust_wake_lock(struct fsg_dev *fsg)
3320 +{
3321 + int ums_active = 0;
3322 + int i;
3323 +
3324 + spin_lock_irq(&fsg->lock);
3325 +
3326 + if (fsg->config) {
3327 + for (i = 0; i < fsg->nluns; ++i) {
3328 + if (backing_file_is_open(&fsg->luns[i]))
3329 + ums_active = 1;
3330 + }
3331 + }
3332 +
3333 + if (ums_active)
3334 + wake_lock(&fsg->wake_lock);
3335 + else
3336 + wake_unlock(&fsg->wake_lock);
3337 +
3338 + spin_unlock_irq(&fsg->lock);
3339 +}
3340 +
3341 +/*
3342 + * Change our operational configuration. This code must agree with the code
3343 + * that returns config descriptors, and with interface altsetting code.
3344 + *
3345 + * It's also responsible for power management interactions. Some
3346 + * configurations might not work with our current power sources.
3347 + * For now we just assume the gadget is always self-powered.
3348 + */
3349 +static int do_set_config(struct fsg_dev *fsg, u8 new_config)
3350 +{
3351 + int rc = 0;
3352 +
3353 + if (new_config == fsg->config)
3354 + return rc;
3355 +
3356 + /* Disable the single interface */
3357 + if (fsg->config != 0) {
3358 + DBG(fsg, "reset config\n");
3359 + fsg->config = 0;
3360 + rc = do_set_interface(fsg, -1);
3361 + }
3362 +
3363 + /* Enable the interface */
3364 + if (new_config != 0) {
3365 + fsg->config = new_config;
3366 + rc = do_set_interface(fsg, 0);
3367 + if (rc != 0)
3368 + fsg->config = 0; /* Reset on errors */
3369 + else
3370 + INFO(fsg, "config #%d\n", fsg->config);
3371 + }
3372 +
3373 + switch_set_state(&fsg->sdev, new_config);
3374 + adjust_wake_lock(fsg);
3375 + return rc;
3376 +}
3377 +
3378 +
3379 +/*-------------------------------------------------------------------------*/
3380 +
3381 +static void handle_exception(struct fsg_dev *fsg)
3382 +{
3383 + siginfo_t info;
3384 + int sig;
3385 + int i;
3386 + struct fsg_buffhd *bh;
3387 + enum fsg_state old_state;
3388 + u8 new_config;
3389 + struct lun *curlun;
3390 + int rc;
3391 +
3392 + DBG(fsg, "handle_exception state: %d\n", (int)fsg->state);
3393 + /* Clear the existing signals. Anything but SIGUSR1 is converted
3394 + * into a high-priority EXIT exception. */
3395 + for (;;) {
3396 + sig = dequeue_signal_lock(current, &current->blocked, &info);
3397 + if (!sig)
3398 + break;
3399 + if (sig != SIGUSR1) {
3400 + if (fsg->state < FSG_STATE_EXIT)
3401 + DBG(fsg, "Main thread exiting on signal\n");
3402 + raise_exception(fsg, FSG_STATE_EXIT);
3403 + }
3404 + }
3405 +
3406 + /* Clear out the controller's fifos */
3407 + if (fsg->bulk_in_enabled)
3408 + usb_ep_fifo_flush(fsg->bulk_in);
3409 + if (fsg->bulk_out_enabled)
3410 + usb_ep_fifo_flush(fsg->bulk_out);
3411 +
3412 + /* Reset the I/O buffer states and pointers, the SCSI
3413 + * state, and the exception. Then invoke the handler. */
3414 + spin_lock_irq(&fsg->lock);
3415 +
3416 + for (i = 0; i < NUM_BUFFERS; ++i) {
3417 + bh = &fsg->buffhds[i];
3418 + bh->state = BUF_STATE_EMPTY;
3419 + }
3420 + fsg->next_buffhd_to_fill = fsg->next_buffhd_to_drain =
3421 + &fsg->buffhds[0];
3422 +
3423 + new_config = fsg->new_config;
3424 + old_state = fsg->state;
3425 +
3426 + if (old_state == FSG_STATE_ABORT_BULK_OUT)
3427 + fsg->state = FSG_STATE_STATUS_PHASE;
3428 + else {
3429 + for (i = 0; i < fsg->nluns; ++i) {
3430 + curlun = &fsg->luns[i];
3431 + curlun->prevent_medium_removal = 0;
3432 + curlun->sense_data = curlun->unit_attention_data =
3433 + SS_NO_SENSE;
3434 + curlun->sense_data_info = 0;
3435 + curlun->info_valid = 0;
3436 + }
3437 + fsg->state = FSG_STATE_IDLE;
3438 + }
3439 + spin_unlock_irq(&fsg->lock);
3440 +
3441 + /* Carry out any extra actions required for the exception */
3442 + switch (old_state) {
3443 + default:
3444 + break;
3445 +
3446 + case FSG_STATE_ABORT_BULK_OUT:
3447 + DBG(fsg, "FSG_STATE_ABORT_BULK_OUT\n");
3448 + spin_lock_irq(&fsg->lock);
3449 + if (fsg->state == FSG_STATE_STATUS_PHASE)
3450 + fsg->state = FSG_STATE_IDLE;
3451 + spin_unlock_irq(&fsg->lock);
3452 + break;
3453 +
3454 + case FSG_STATE_RESET:
3455 + /* really not much to do here */
3456 + break;
3457 +
3458 + case FSG_STATE_CONFIG_CHANGE:
3459 + rc = do_set_config(fsg, new_config);
3460 + if (new_config == 0) {
3461 + /* We're using the backing file */
3462 + down_read(&fsg->filesem);
3463 + fsync_all(fsg);
3464 + up_read(&fsg->filesem);
3465 + }
3466 + break;
3467 +
3468 + case FSG_STATE_EXIT:
3469 + case FSG_STATE_TERMINATED:
3470 + do_set_config(fsg, 0); /* Free resources */
3471 + spin_lock_irq(&fsg->lock);
3472 + fsg->state = FSG_STATE_TERMINATED; /* Stop the thread */
3473 + spin_unlock_irq(&fsg->lock);
3474 + break;
3475 + }
3476 +}
3477 +
3478 +
3479 +/*-------------------------------------------------------------------------*/
3480 +
3481 +static int fsg_main_thread(void *fsg_)
3482 +{
3483 + struct fsg_dev *fsg = fsg_;
3484 +
3485 + /* Allow the thread to be killed by a signal, but set the signal mask
3486 + * to block everything but INT, TERM, KILL, and USR1. */
3487 + allow_signal(SIGINT);
3488 + allow_signal(SIGTERM);
3489 + allow_signal(SIGKILL);
3490 + allow_signal(SIGUSR1);
3491 +
3492 + /* Allow the thread to be frozen */
3493 + set_freezable();
3494 +
3495 + /* Arrange for userspace references to be interpreted as kernel
3496 + * pointers. That way we can pass a kernel pointer to a routine
3497 + * that expects a __user pointer and it will work okay. */
3498 + set_fs(get_ds());
3499 +
3500 + /* The main loop */
3501 + while (fsg->state != FSG_STATE_TERMINATED) {
3502 + if (exception_in_progress(fsg) || signal_pending(current)) {
3503 + handle_exception(fsg);
3504 + continue;
3505 + }
3506 +
3507 + if (!fsg->running) {
3508 + sleep_thread(fsg);
3509 + continue;
3510 + }
3511 +
3512 + if (get_next_command(fsg))
3513 + continue;
3514 +
3515 + spin_lock_irq(&fsg->lock);
3516 + if (!exception_in_progress(fsg))
3517 + fsg->state = FSG_STATE_DATA_PHASE;
3518 + spin_unlock_irq(&fsg->lock);
3519 +
3520 + if (do_scsi_command(fsg) || finish_reply(fsg))
3521 + continue;
3522 +
3523 + spin_lock_irq(&fsg->lock);
3524 + if (!exception_in_progress(fsg))
3525 + fsg->state = FSG_STATE_STATUS_PHASE;
3526 + spin_unlock_irq(&fsg->lock);
3527 +
3528 + if (send_status(fsg))
3529 + continue;
3530 +
3531 + spin_lock_irq(&fsg->lock);
3532 + if (!exception_in_progress(fsg))
3533 + fsg->state = FSG_STATE_IDLE;
3534 + spin_unlock_irq(&fsg->lock);
3535 + }
3536 +
3537 + spin_lock_irq(&fsg->lock);
3538 + fsg->thread_task = NULL;
3539 + spin_unlock_irq(&fsg->lock);
3540 +
3541 + /* In case we are exiting because of a signal, unregister the
3542 + * gadget driver and close the backing file. */
3543 + if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags))
3544 + close_all_backing_files(fsg);
3545 +
3546 + /* Let the unbind and cleanup routines know the thread has exited */
3547 + complete_and_exit(&fsg->thread_notifier, 0);
3548 +}
3549 +
3550 +
3551 +/*-------------------------------------------------------------------------*/
3552 +
3553 +/* If the next two routines are called while the gadget is registered,
3554 + * the caller must own fsg->filesem for writing. */
3555 +
3556 +static int open_backing_file(struct fsg_dev *fsg, struct lun *curlun,
3557 + const char *filename)
3558 +{
3559 + int ro;
3560 + struct file *filp = NULL;
3561 + int rc = -EINVAL;
3562 + struct inode *inode = NULL;
3563 + loff_t size;
3564 + loff_t num_sectors;
3565 +
3566 + /* R/W if we can, R/O if we must */
3567 + ro = curlun->ro;
3568 + if (!ro) {
3569 + filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0);
3570 + if (-EROFS == PTR_ERR(filp))
3571 + ro = 1;
3572 + }
3573 + if (ro)
3574 + filp = filp_open(filename, O_RDONLY | O_LARGEFILE, 0);
3575 + if (IS_ERR(filp)) {
3576 + LINFO(curlun, "unable to open backing file: %s\n", filename);
3577 + return PTR_ERR(filp);
3578 + }
3579 +
3580 + if (!(filp->f_mode & FMODE_WRITE))
3581 + ro = 1;
3582 +
3583 + if (filp->f_path.dentry)
3584 + inode = filp->f_path.dentry->d_inode;
3585 + if (inode && S_ISBLK(inode->i_mode)) {
3586 + if (bdev_read_only(inode->i_bdev))
3587 + ro = 1;
3588 + } else if (!inode || !S_ISREG(inode->i_mode)) {
3589 + LINFO(curlun, "invalid file type: %s\n", filename);
3590 + goto out;
3591 + }
3592 +
3593 + /* If we can't read the file, it's no good.
3594 + * If we can't write the file, use it read-only. */
3595 + if (!filp->f_op || !(filp->f_op->read || filp->f_op->aio_read)) {
3596 + LINFO(curlun, "file not readable: %s\n", filename);
3597 + goto out;
3598 + }
3599 + if (!(filp->f_op->write || filp->f_op->aio_write))
3600 + ro = 1;
3601 +
3602 + size = i_size_read(inode->i_mapping->host);
3603 + if (size < 0) {
3604 + LINFO(curlun, "unable to find file size: %s\n", filename);
3605 + rc = (int) size;
3606 + goto out;
3607 + }
3608 + num_sectors = size >> 9; /* File size in 512-byte sectors */
3609 + if (num_sectors == 0) {
3610 + LINFO(curlun, "file too small: %s\n", filename);
3611 + rc = -ETOOSMALL;
3612 + goto out;
3613 + }
3614 +
3615 + get_file(filp);
3616 + curlun->ro = ro;
3617 + curlun->filp = filp;
3618 + curlun->file_length = size;
3619 + curlun->num_sectors = num_sectors;
3620 + LDBG(curlun, "open backing file: %s size: %lld num_sectors: %lld\n",
3621 + filename, size, num_sectors);
3622 + rc = 0;
3623 + adjust_wake_lock(fsg);
3624 +
3625 +out:
3626 + filp_close(filp, current->files);
3627 + return rc;
3628 +}
3629 +
3630 +
3631 +static void close_backing_file(struct fsg_dev *fsg, struct lun *curlun)
3632 +{
3633 + if (curlun->filp) {
3634 + int rc;
3635 +
3636 + /*
3637 + * XXX: San: Ugly hack here added to ensure that
3638 + * our pages get synced to disk.
3639 + * Also drop caches here just to be extra-safe
3640 + */
3641 + rc = vfs_fsync(curlun->filp, curlun->filp->f_path.dentry, 1);
3642 + if (rc < 0)
3643 + printk(KERN_ERR "ums: Error syncing data (%d)\n", rc);
3644 + /* drop_pagecache and drop_slab are no longer available */
3645 + /* drop_pagecache(); */
3646 + /* drop_slab(); */
3647 +
3648 + LDBG(curlun, "close backing file\n");
3649 + fput(curlun->filp);
3650 + curlun->filp = NULL;
3651 + adjust_wake_lock(fsg);
3652 + }
3653 +}
3654 +
3655 +static void close_all_backing_files(struct fsg_dev *fsg)
3656 +{
3657 + int i;
3658 +
3659 + for (i = 0; i < fsg->nluns; ++i)
3660 + close_backing_file(fsg, &fsg->luns[i]);
3661 +}
3662 +
3663 +static ssize_t show_file(struct device *dev, struct device_attribute *attr,
3664 + char *buf)
3665 +{
3666 + struct lun *curlun = dev_to_lun(dev);
3667 + struct fsg_dev *fsg = dev_get_drvdata(dev);
3668 + char *p;
3669 + ssize_t rc;
3670 +
3671 + down_read(&fsg->filesem);
3672 + if (backing_file_is_open(curlun)) { /* Get the complete pathname */
3673 + p = d_path(&curlun->filp->f_path, buf, PAGE_SIZE - 1);
3674 + if (IS_ERR(p))
3675 + rc = PTR_ERR(p);
3676 + else {
3677 + rc = strlen(p);
3678 + memmove(buf, p, rc);
3679 + buf[rc] = '\n'; /* Add a newline */
3680 + buf[++rc] = 0;
3681 + }
3682 + } else { /* No file, return 0 bytes */
3683 + *buf = 0;
3684 + rc = 0;
3685 + }
3686 + up_read(&fsg->filesem);
3687 + return rc;
3688 +}
3689 +
3690 +static ssize_t store_file(struct device *dev, struct device_attribute *attr,
3691 + const char *buf, size_t count)
3692 +{
3693 + struct lun *curlun = dev_to_lun(dev);
3694 + struct fsg_dev *fsg = dev_get_drvdata(dev);
3695 + int rc = 0;
3696 +
3697 + DBG(fsg, "store_file: \"%s\"\n", buf);
3698 +#if 0
3699 + /* disabled because we need to allow closing the backing file if the media was removed */
3700 + if (curlun->prevent_medium_removal && backing_file_is_open(curlun)) {
3701 + LDBG(curlun, "eject attempt prevented\n");
3702 + return -EBUSY; /* "Door is locked" */
3703 + }
3704 +#endif
3705 +
3706 + /* Remove a trailing newline */
3707 + if (count > 0 && buf[count-1] == '\n')
3708 + ((char *) buf)[count-1] = 0;
3709 +
3710 + /* Eject current medium */
3711 + down_write(&fsg->filesem);
3712 + if (backing_file_is_open(curlun)) {
3713 + close_backing_file(fsg, curlun);
3714 + curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
3715 + }
3716 +
3717 + /* Load new medium */
3718 + if (count > 0 && buf[0]) {
3719 + rc = open_backing_file(fsg, curlun, buf);
3720 + if (rc == 0)
3721 + curlun->unit_attention_data =
3722 + SS_NOT_READY_TO_READY_TRANSITION;
3723 + }
3724 + up_write(&fsg->filesem);
3725 + return (rc < 0 ? rc : count);
3726 +}
3727 +
3728 +
3729 +static DEVICE_ATTR(file, 0444, show_file, store_file);
3730 +
3731 +/*-------------------------------------------------------------------------*/
3732 +
3733 +static void fsg_release(struct kref *ref)
3734 +{
3735 + struct fsg_dev *fsg = container_of(ref, struct fsg_dev, ref);
3736 +
3737 + kfree(fsg->luns);
3738 + kfree(fsg);
3739 +}
3740 +
3741 +static void lun_release(struct device *dev)
3742 +{
3743 + struct fsg_dev *fsg = dev_get_drvdata(dev);
3744 +
3745 + kref_put(&fsg->ref, fsg_release);
3746 +}
3747 +
3748 +
3749 +/*-------------------------------------------------------------------------*/
3750 +
3751 +static int __init fsg_alloc(void)
3752 +{
3753 + struct fsg_dev *fsg;
3754 +
3755 + fsg = kzalloc(sizeof *fsg, GFP_KERNEL);
3756 + if (!fsg)
3757 + return -ENOMEM;
3758 + spin_lock_init(&fsg->lock);
3759 + init_rwsem(&fsg->filesem);
3760 + kref_init(&fsg->ref);
3761 + init_completion(&fsg->thread_notifier);
3762 +
3763 + the_fsg = fsg;
3764 + return 0;
3765 +}
3766 +
3767 +static ssize_t print_switch_name(struct switch_dev *sdev, char *buf)
3768 +{
3769 + return sprintf(buf, "%s\n", DRIVER_NAME);
3770 +}
3771 +
3772 +static ssize_t print_switch_state(struct switch_dev *sdev, char *buf)
3773 +{
3774 + struct fsg_dev *fsg = container_of(sdev, struct fsg_dev, sdev);
3775 + return sprintf(buf, "%s\n", (fsg->config ? "online" : "offline"));
3776 +}
3777 +
3778 +static void
3779 +fsg_function_unbind(struct usb_configuration *c, struct usb_function *f)
3780 +{
3781 + struct fsg_dev *fsg = func_to_dev(f);
3782 + int i;
3783 + struct lun *curlun;
3784 +
3785 + DBG(fsg, "fsg_function_unbind\n");
3786 + clear_bit(REGISTERED, &fsg->atomic_bitflags);
3787 +
3788 + /* Unregister the sysfs attribute files and the LUNs */
3789 + for (i = 0; i < fsg->nluns; ++i) {
3790 + curlun = &fsg->luns[i];
3791 + if (curlun->registered) {
3792 + device_remove_file(&curlun->dev, &dev_attr_file);
3793 + device_unregister(&curlun->dev);
3794 + curlun->registered = 0;
3795 + }
3796 + }
3797 +
3798 + /* If the thread isn't already dead, tell it to exit now */
3799 + if (fsg->state != FSG_STATE_TERMINATED) {
3800 + raise_exception(fsg, FSG_STATE_EXIT);
3801 + wait_for_completion(&fsg->thread_notifier);
3802 +
3803 + /* The cleanup routine waits for this completion also */
3804 + complete(&fsg->thread_notifier);
3805 + }
3806 +
3807 + /* Free the data buffers */
3808 + for (i = 0; i < NUM_BUFFERS; ++i)
3809 + kfree(fsg->buffhds[i].buf);
3810 +}
3811 +
3812 +static int __init
3813 +fsg_function_bind(struct usb_configuration *c, struct usb_function *f)
3814 +{
3815 + struct usb_composite_dev *cdev = c->cdev;
3816 + struct fsg_dev *fsg = func_to_dev(f);
3817 + int rc;
3818 + int i;
3819 + int id;
3820 + struct lun *curlun;
3821 + struct usb_ep *ep;
3822 + char *pathbuf, *p;
3823 +
3824 + fsg->cdev = cdev;
3825 + DBG(fsg, "fsg_function_bind\n");
3826 +
3827 + dev_attr_file.attr.mode = 0644;
3828 +
3829 + /* Find out how many LUNs there should be */
3830 + i = fsg->nluns;
3831 + if (i == 0)
3832 + i = 1;
3833 + if (i > MAX_LUNS) {
3834 + ERROR(fsg, "invalid number of LUNs: %d\n", i);
3835 + rc = -EINVAL;
3836 + goto out;
3837 + }
3838 +
3839 + /* Create the LUNs, open their backing files, and register the
3840 + * LUN devices in sysfs. */
3841 + fsg->luns = kzalloc(i * sizeof(struct lun), GFP_KERNEL);
3842 + if (!fsg->luns) {
3843 + rc = -ENOMEM;
3844 + goto out;
3845 + }
3846 + fsg->nluns = i;
3847 +
3848 + for (i = 0; i < fsg->nluns; ++i) {
3849 + curlun = &fsg->luns[i];
3850 + curlun->ro = 0;
3851 + curlun->dev.release = lun_release;
3852 + curlun->dev.parent = &cdev->gadget->dev;
3853 + dev_set_drvdata(&curlun->dev, fsg);
3854 + snprintf(curlun->dev.bus_id, BUS_ID_SIZE,
3855 + "lun%d", i);
3856 +
3857 + rc = device_register(&curlun->dev);
3858 + if (rc != 0) {
3859 + INFO(fsg, "failed to register LUN%d: %d\n", i, rc);
3860 + goto out;
3861 + }
3862 + rc = device_create_file(&curlun->dev, &dev_attr_file);
3863 + if (rc != 0) {
3864 + ERROR(fsg, "device_create_file failed: %d\n", rc);
3865 + device_unregister(&curlun->dev);
3866 + goto out;
3867 + }
3868 + curlun->registered = 1;
3869 + kref_get(&fsg->ref);
3870 + }
3871 +
3872 + /* allocate interface ID(s) */
3873 + id = usb_interface_id(c, f);
3874 + if (id < 0)
3875 + return id;
3876 + intf_desc.bInterfaceNumber = id;
3877 +
3878 + ep = usb_ep_autoconfig(cdev->gadget, &fs_bulk_in_desc);
3879 + if (!ep)
3880 + goto autoconf_fail;
3881 + ep->driver_data = fsg; /* claim the endpoint */
3882 + fsg->bulk_in = ep;
3883 +
3884 + ep = usb_ep_autoconfig(cdev->gadget, &fs_bulk_out_desc);
3885 + if (!ep)
3886 + goto autoconf_fail;
3887 + ep->driver_data = fsg; /* claim the endpoint */
3888 + fsg->bulk_out = ep;
3889 +
3890 + rc = -ENOMEM;
3891 +
3892 + if (gadget_is_dualspeed(cdev->gadget)) {
3893 + /* Assume endpoint addresses are the same for both speeds */
3894 + hs_bulk_in_desc.bEndpointAddress =
3895 + fs_bulk_in_desc.bEndpointAddress;
3896 + hs_bulk_out_desc.bEndpointAddress =
3897 + fs_bulk_out_desc.bEndpointAddress;
3898 +
3899 + f->hs_descriptors = hs_function;
3900 + }
3901 +
3902 + /* Allocate the data buffers */
3903 + for (i = 0; i < NUM_BUFFERS; ++i) {
3904 + struct fsg_buffhd *bh = &fsg->buffhds[i];
3905 +
3906 + /* Allocate for the bulk-in endpoint. We assume that
3907 + * the buffer will also work with the bulk-out (and
3908 + * interrupt-in) endpoint. */
3909 + bh->buf = kmalloc(fsg->buf_size, GFP_KERNEL);
3910 + if (!bh->buf)
3911 + goto out;
3912 + bh->next = bh + 1;
3913 + }
3914 + fsg->buffhds[NUM_BUFFERS - 1].next = &fsg->buffhds[0];
3915 +
3916 + fsg->thread_task = kthread_create(fsg_main_thread, fsg,
3917 + shortname);
3918 + if (IS_ERR(fsg->thread_task)) {
3919 + rc = PTR_ERR(fsg->thread_task);
3920 + ERROR(fsg, "kthread_create failed: %d\n", rc);
3921 + goto out;
3922 + }
3923 +
3924 + INFO(fsg, "Number of LUNs=%d\n", fsg->nluns);
3925 +
3926 + pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
3927 + for (i = 0; i < fsg->nluns; ++i) {
3928 + curlun = &fsg->luns[i];
3929 + if (backing_file_is_open(curlun)) {
3930 + p = NULL;
3931 + if (pathbuf) {
3932 + p = d_path(&curlun->filp->f_path,
3933 + pathbuf, PATH_MAX);
3934 + if (IS_ERR(p))
3935 + p = NULL;
3936 + }
3937 + LINFO(curlun, "ro=%d, file: %s\n",
3938 + curlun->ro, (p ? p : "(error)"));
3939 + }
3940 + }
3941 + kfree(pathbuf);
3942 +
3943 + set_bit(REGISTERED, &fsg->atomic_bitflags);
3944 +
3945 + /* Tell the thread to start working */
3946 + wake_up_process(fsg->thread_task);
3947 + return 0;
3948 +
3949 +autoconf_fail:
3950 + ERROR(fsg, "unable to autoconfigure all endpoints\n");
3951 + rc = -ENOTSUPP;
3952 +
3953 +out:
3954 + DBG(fsg, "fsg_function_bind failed: %d\n", rc);
3955 + fsg->state = FSG_STATE_TERMINATED; /* The thread is dead */
3956 + fsg_function_unbind(c, f);
3957 + close_all_backing_files(fsg);
3958 + return rc;
3959 +}
3960 +
3961 +static int fsg_function_set_alt(struct usb_function *f,
3962 + unsigned intf, unsigned alt)
3963 +{
3964 + struct fsg_dev *fsg = func_to_dev(f);
3965 + DBG(fsg, "fsg_function_set_alt intf: %d alt: %d\n", intf, alt);
3966 + fsg->new_config = 1;
3967 + raise_exception(fsg, FSG_STATE_CONFIG_CHANGE);
3968 + return 0;
3969 +}
3970 +
3971 +static void fsg_function_disable(struct usb_function *f)
3972 +{
3973 + struct fsg_dev *fsg = func_to_dev(f);
3974 + DBG(fsg, "fsg_function_disable\n");
3975 + fsg->new_config = 0;
3976 + raise_exception(fsg, FSG_STATE_CONFIG_CHANGE);
3977 +}
3978 +
3979 +int __init mass_storage_function_add(struct usb_composite_dev *cdev,
3980 + struct usb_configuration *c, int nluns)
3981 +{
3982 + int rc;
3983 + struct fsg_dev *fsg;
3984 +
3985 + printk(KERN_INFO "mass_storage_function_add\n");
3986 + rc = fsg_alloc();
3987 + if (rc)
3988 + return rc;
3989 + fsg = the_fsg;
3990 + fsg->nluns = nluns;
3991 +
3992 + spin_lock_init(&fsg->lock);
3993 + init_rwsem(&fsg->filesem);
3994 + kref_init(&fsg->ref);
3995 + init_completion(&fsg->thread_notifier);
3996 +
3997 + the_fsg->buf_size = BULK_BUFFER_SIZE;
3998 + the_fsg->sdev.name = DRIVER_NAME;
3999 + the_fsg->sdev.print_name = print_switch_name;
4000 + the_fsg->sdev.print_state = print_switch_state;
4001 + rc = switch_dev_register(&the_fsg->sdev);
4002 + if (rc < 0)
4003 + goto err_switch_dev_register;
4004 +
4005 + wake_lock_init(&the_fsg->wake_lock, WAKE_LOCK_SUSPEND,
4006 + "usb_mass_storage");
4007 +
4008 + fsg->cdev = cdev;
4009 + fsg->function.name = shortname;
4010 + fsg->function.descriptors = fs_function;
4011 + fsg->function.bind = fsg_function_bind;
4012 + fsg->function.unbind = fsg_function_unbind;
4013 + fsg->function.setup = fsg_function_setup;
4014 + fsg->function.set_alt = fsg_function_set_alt;
4015 + fsg->function.disable = fsg_function_disable;
4016 +
4017 + rc = usb_add_function(c, &fsg->function);
4018 + if (rc != 0)
4019 + goto err_usb_add_function;
4020 +
4021 + return 0;
4022 +
4023 +err_usb_add_function:
4024 + switch_dev_unregister(&the_fsg->sdev);
4025 +err_switch_dev_register:
4026 + kref_put(&the_fsg->ref, fsg_release);
4027 +
4028 + return rc;
4029 +}
4030 --- /dev/null
4031 +++ b/drivers/usb/gadget/f_mass_storage.h
4032 @@ -0,0 +1,52 @@
4033 +/*
4034 + * drivers/usb/gadget/f_mass_storage.h
4035 + *
4036 + * Function Driver for USB Mass Storage
4037 + *
4038 + * Copyright (C) 2008 Google, Inc.
4039 + * Author: Mike Lockwood <lockwood@android.com>
4040 + *
4041 + * Based heavily on the file_storage gadget driver in
4042 + * drivers/usb/gadget/file_storage.c and licensed under the same terms:
4043 + *
4044 + * Copyright (C) 2003-2007 Alan Stern
4045 + * All rights reserved.
4046 + *
4047 + * Redistribution and use in source and binary forms, with or without
4048 + * modification, are permitted provided that the following conditions
4049 + * are met:
4050 + * 1. Redistributions of source code must retain the above copyright
4051 + * notice, this list of conditions, and the following disclaimer,
4052 + * without modification.
4053 + * 2. Redistributions in binary form must reproduce the above copyright
4054 + * notice, this list of conditions and the following disclaimer in the
4055 + * documentation and/or other materials provided with the distribution.
4056 + * 3. The names of the above-listed copyright holders may not be used
4057 + * to endorse or promote products derived from this software without
4058 + * specific prior written permission.
4059 + *
4060 + * ALTERNATIVELY, this software may be distributed under the terms of the
4061 + * GNU General Public License ("GPL") as published by the Free Software
4062 + * Foundation, either version 2 of that License or (at your option) any
4063 + * later version.
4064 + *
4065 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
4066 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
4067 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
4068 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
4069 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
4070 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
4071 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
4072 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
4073 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
4074 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
4075 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4076 + */
4077 +
4078 +#ifndef __F_MASS_STORAGE_H
4079 +#define __F_MASS_STORAGE_H
4080 +
4081 +int mass_storage_function_add(struct usb_composite_dev *cdev,
4082 + struct usb_configuration *c, int nluns);
4083 +
4084 +#endif /* __F_MASS_STORAGE_H */
4085 --- /dev/null
4086 +++ b/include/linux/usb/android.h
4087 @@ -0,0 +1,40 @@
4088 +/*
4089 + * Platform data for Android USB
4090 + *
4091 + * Copyright (C) 2008 Google, Inc.
4092 + * Author: Mike Lockwood <lockwood@android.com>
4093 + *
4094 + * This software is licensed under the terms of the GNU General Public
4095 + * License version 2, as published by the Free Software Foundation, and
4096 + * may be copied, distributed, and modified under those terms.
4097 + *
4098 + * This program is distributed in the hope that it will be useful,
4099 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
4100 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4101 + * GNU General Public License for more details.
4102 + *
4103 + */
4104 +#ifndef __LINUX_USB_ANDROID_H
4105 +#define __LINUX_USB_ANDROID_H
4106 +
4107 +struct android_usb_platform_data {
4108 + /* USB device descriptor fields */
4109 + __u16 vendor_id;
4110 +
4111 + /* Default product ID. */
4112 + __u16 product_id;
4113 +
4114 + /* Product ID when adb is enabled. */
4115 + __u16 adb_product_id;
4116 +
4117 + __u16 version;
4118 +
4119 + char *product_name;
4120 + char *manufacturer_name;
4121 + char *serial_number;
4122 +
4123 + /* number of LUNS for mass storage function */
4124 + int nluns;
4125 +};
4126 +
4127 +#endif /* __LINUX_USB_ANDROID_H */
This page took 0.214912 seconds and 5 git commands to generate.