]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/usb/usb-skeleton.c
arm: imx6: defconfig: update tx6 defconfigs
[karo-tx-linux.git] / drivers / usb / usb-skeleton.c
1 /*
2  * USB Skeleton driver - 2.2
3  *
4  * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
5  *
6  *      This program is free software; you can redistribute it and/or
7  *      modify it under the terms of the GNU General Public License as
8  *      published by the Free Software Foundation, version 2.
9  *
10  * This driver is based on the 2.6.3 version of drivers/usb/usb-skeleton.c
11  * but has been rewritten to be easier to read and use.
12  *
13  */
14
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <linux/module.h>
20 #include <linux/kref.h>
21 #include <linux/uaccess.h>
22 #include <linux/usb.h>
23 #include <linux/mutex.h>
24
25
26 /* Define these values to match your devices */
27 #define USB_SKEL_VENDOR_ID      0xfff0
28 #define USB_SKEL_PRODUCT_ID     0xfff0
29
30 /* table of devices that work with this driver */
31 static const struct usb_device_id skel_table[] = {
32         { USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
33         { }                                     /* Terminating entry */
34 };
35 MODULE_DEVICE_TABLE(usb, skel_table);
36
37
38 /* Get a minor range for your devices from the usb maintainer */
39 #define USB_SKEL_MINOR_BASE     192
40
41 /* our private defines. if this grows any larger, use your own .h file */
42 #define MAX_TRANSFER            (PAGE_SIZE - 512)
43 /* MAX_TRANSFER is chosen so that the VM is not stressed by
44    allocations > PAGE_SIZE and the number of packets in a page
45    is an integer 512 is the largest possible packet on EHCI */
46 #define WRITES_IN_FLIGHT        8
47 /* arbitrarily chosen */
48
49 /* Structure to hold all of our device specific stuff */
50 struct usb_skel {
51         struct usb_device       *udev;                  /* the usb device for this device */
52         struct usb_interface    *interface;             /* the interface for this device */
53         struct semaphore        limit_sem;              /* limiting the number of writes in progress */
54         struct usb_anchor       submitted;              /* in case we need to retract our submissions */
55         struct urb              *bulk_in_urb;           /* the urb to read data with */
56         unsigned char           *bulk_in_buffer;        /* the buffer to receive data */
57         size_t                  bulk_in_size;           /* the size of the receive buffer */
58         size_t                  bulk_in_filled;         /* number of bytes in the buffer */
59         size_t                  bulk_in_copied;         /* already copied to user space */
60         __u8                    bulk_in_endpointAddr;   /* the address of the bulk in endpoint */
61         __u8                    bulk_out_endpointAddr;  /* the address of the bulk out endpoint */
62         int                     errors;                 /* the last request tanked */
63         bool                    ongoing_read;           /* a read is going on */
64         spinlock_t              err_lock;               /* lock for errors */
65         struct kref             kref;
66         struct mutex            io_mutex;               /* synchronize I/O with disconnect */
67         wait_queue_head_t       bulk_in_wait;           /* to wait for an ongoing read */
68 };
69 #define to_skel_dev(d) container_of(d, struct usb_skel, kref)
70
71 static struct usb_driver skel_driver;
72 static void skel_draw_down(struct usb_skel *dev);
73
74 static void skel_delete(struct kref *kref)
75 {
76         struct usb_skel *dev = to_skel_dev(kref);
77
78         usb_free_urb(dev->bulk_in_urb);
79         usb_put_dev(dev->udev);
80         kfree(dev->bulk_in_buffer);
81         kfree(dev);
82 }
83
84 static int skel_open(struct inode *inode, struct file *file)
85 {
86         struct usb_skel *dev;
87         struct usb_interface *interface;
88         int subminor;
89         int retval = 0;
90
91         subminor = iminor(inode);
92
93         interface = usb_find_interface(&skel_driver, subminor);
94         if (!interface) {
95                 pr_err("%s - error, can't find device for minor %d\n",
96                         __func__, subminor);
97                 retval = -ENODEV;
98                 goto exit;
99         }
100
101         dev = usb_get_intfdata(interface);
102         if (!dev) {
103                 retval = -ENODEV;
104                 goto exit;
105         }
106
107         retval = usb_autopm_get_interface(interface);
108         if (retval)
109                 goto exit;
110
111         /* increment our usage count for the device */
112         kref_get(&dev->kref);
113
114         /* save our object in the file's private structure */
115         file->private_data = dev;
116
117 exit:
118         return retval;
119 }
120
121 static int skel_release(struct inode *inode, struct file *file)
122 {
123         struct usb_skel *dev;
124
125         dev = file->private_data;
126         if (dev == NULL)
127                 return -ENODEV;
128
129         /* allow the device to be autosuspended */
130         mutex_lock(&dev->io_mutex);
131         if (dev->interface)
132                 usb_autopm_put_interface(dev->interface);
133         mutex_unlock(&dev->io_mutex);
134
135         /* decrement the count on our device */
136         kref_put(&dev->kref, skel_delete);
137         return 0;
138 }
139
140 static int skel_flush(struct file *file, fl_owner_t id)
141 {
142         struct usb_skel *dev;
143         int res;
144
145         dev = file->private_data;
146         if (dev == NULL)
147                 return -ENODEV;
148
149         /* wait for io to stop */
150         mutex_lock(&dev->io_mutex);
151         skel_draw_down(dev);
152
153         /* read out errors, leave subsequent opens a clean slate */
154         spin_lock_irq(&dev->err_lock);
155         res = dev->errors ? (dev->errors == -EPIPE ? -EPIPE : -EIO) : 0;
156         dev->errors = 0;
157         spin_unlock_irq(&dev->err_lock);
158
159         mutex_unlock(&dev->io_mutex);
160
161         return res;
162 }
163
164 static void skel_read_bulk_callback(struct urb *urb)
165 {
166         struct usb_skel *dev;
167
168         dev = urb->context;
169
170         spin_lock(&dev->err_lock);
171         /* sync/async unlink faults aren't errors */
172         if (urb->status) {
173                 if (!(urb->status == -ENOENT ||
174                     urb->status == -ECONNRESET ||
175                     urb->status == -ESHUTDOWN))
176                         dev_err(&dev->interface->dev,
177                                 "%s - nonzero write bulk status received: %d\n",
178                                 __func__, urb->status);
179
180                 dev->errors = urb->status;
181         } else {
182                 dev->bulk_in_filled = urb->actual_length;
183         }
184         dev->ongoing_read = 0;
185         spin_unlock(&dev->err_lock);
186
187         wake_up_interruptible(&dev->bulk_in_wait);
188 }
189
190 static int skel_do_read_io(struct usb_skel *dev, size_t count)
191 {
192         int rv;
193
194         /* prepare a read */
195         usb_fill_bulk_urb(dev->bulk_in_urb,
196                         dev->udev,
197                         usb_rcvbulkpipe(dev->udev,
198                                 dev->bulk_in_endpointAddr),
199                         dev->bulk_in_buffer,
200                         min(dev->bulk_in_size, count),
201                         skel_read_bulk_callback,
202                         dev);
203         /* tell everybody to leave the URB alone */
204         spin_lock_irq(&dev->err_lock);
205         dev->ongoing_read = 1;
206         spin_unlock_irq(&dev->err_lock);
207
208         /* submit bulk in urb, which means no data to deliver */
209         dev->bulk_in_filled = 0;
210         dev->bulk_in_copied = 0;
211
212         /* do it */
213         rv = usb_submit_urb(dev->bulk_in_urb, GFP_KERNEL);
214         if (rv < 0) {
215                 dev_err(&dev->interface->dev,
216                         "%s - failed submitting read urb, error %d\n",
217                         __func__, rv);
218                 rv = (rv == -ENOMEM) ? rv : -EIO;
219                 spin_lock_irq(&dev->err_lock);
220                 dev->ongoing_read = 0;
221                 spin_unlock_irq(&dev->err_lock);
222         }
223
224         return rv;
225 }
226
227 static ssize_t skel_read(struct file *file, char *buffer, size_t count,
228                          loff_t *ppos)
229 {
230         struct usb_skel *dev;
231         int rv;
232         bool ongoing_io;
233
234         dev = file->private_data;
235
236         /* if we cannot read at all, return EOF */
237         if (!dev->bulk_in_urb || !count)
238                 return 0;
239
240         /* no concurrent readers */
241         rv = mutex_lock_interruptible(&dev->io_mutex);
242         if (rv < 0)
243                 return rv;
244
245         if (!dev->interface) {          /* disconnect() was called */
246                 rv = -ENODEV;
247                 goto exit;
248         }
249
250         /* if IO is under way, we must not touch things */
251 retry:
252         spin_lock_irq(&dev->err_lock);
253         ongoing_io = dev->ongoing_read;
254         spin_unlock_irq(&dev->err_lock);
255
256         if (ongoing_io) {
257                 /* nonblocking IO shall not wait */
258                 if (file->f_flags & O_NONBLOCK) {
259                         rv = -EAGAIN;
260                         goto exit;
261                 }
262                 /*
263                  * IO may take forever
264                  * hence wait in an interruptible state
265                  */
266                 rv = wait_event_interruptible(dev->bulk_in_wait, (!dev->ongoing_read));
267                 if (rv < 0)
268                         goto exit;
269         }
270
271         /* errors must be reported */
272         rv = dev->errors;
273         if (rv < 0) {
274                 /* any error is reported once */
275                 dev->errors = 0;
276                 /* to preserve notifications about reset */
277                 rv = (rv == -EPIPE) ? rv : -EIO;
278                 /* report it */
279                 goto exit;
280         }
281
282         /*
283          * if the buffer is filled we may satisfy the read
284          * else we need to start IO
285          */
286
287         if (dev->bulk_in_filled) {
288                 /* we had read data */
289                 size_t available = dev->bulk_in_filled - dev->bulk_in_copied;
290                 size_t chunk = min(available, count);
291
292                 if (!available) {
293                         /*
294                          * all data has been used
295                          * actual IO needs to be done
296                          */
297                         rv = skel_do_read_io(dev, count);
298                         if (rv < 0)
299                                 goto exit;
300                         else
301                                 goto retry;
302                 }
303                 /*
304                  * data is available
305                  * chunk tells us how much shall be copied
306                  */
307
308                 if (copy_to_user(buffer,
309                                  dev->bulk_in_buffer + dev->bulk_in_copied,
310                                  chunk))
311                         rv = -EFAULT;
312                 else
313                         rv = chunk;
314
315                 dev->bulk_in_copied += chunk;
316
317                 /*
318                  * if we are asked for more than we have,
319                  * we start IO but don't wait
320                  */
321                 if (available < count)
322                         skel_do_read_io(dev, count - chunk);
323         } else {
324                 /* no data in the buffer */
325                 rv = skel_do_read_io(dev, count);
326                 if (rv < 0)
327                         goto exit;
328                 else
329                         goto retry;
330         }
331 exit:
332         mutex_unlock(&dev->io_mutex);
333         return rv;
334 }
335
336 static void skel_write_bulk_callback(struct urb *urb)
337 {
338         struct usb_skel *dev;
339
340         dev = urb->context;
341
342         /* sync/async unlink faults aren't errors */
343         if (urb->status) {
344                 if (!(urb->status == -ENOENT ||
345                     urb->status == -ECONNRESET ||
346                     urb->status == -ESHUTDOWN))
347                         dev_err(&dev->interface->dev,
348                                 "%s - nonzero write bulk status received: %d\n",
349                                 __func__, urb->status);
350
351                 spin_lock(&dev->err_lock);
352                 dev->errors = urb->status;
353                 spin_unlock(&dev->err_lock);
354         }
355
356         /* free up our allocated buffer */
357         usb_free_coherent(urb->dev, urb->transfer_buffer_length,
358                           urb->transfer_buffer, urb->transfer_dma);
359         up(&dev->limit_sem);
360 }
361
362 static ssize_t skel_write(struct file *file, const char *user_buffer,
363                           size_t count, loff_t *ppos)
364 {
365         struct usb_skel *dev;
366         int retval = 0;
367         struct urb *urb = NULL;
368         char *buf = NULL;
369         size_t writesize = min(count, (size_t)MAX_TRANSFER);
370
371         dev = file->private_data;
372
373         /* verify that we actually have some data to write */
374         if (count == 0)
375                 goto exit;
376
377         /*
378          * limit the number of URBs in flight to stop a user from using up all
379          * RAM
380          */
381         if (!(file->f_flags & O_NONBLOCK)) {
382                 if (down_interruptible(&dev->limit_sem)) {
383                         retval = -ERESTARTSYS;
384                         goto exit;
385                 }
386         } else {
387                 if (down_trylock(&dev->limit_sem)) {
388                         retval = -EAGAIN;
389                         goto exit;
390                 }
391         }
392
393         spin_lock_irq(&dev->err_lock);
394         retval = dev->errors;
395         if (retval < 0) {
396                 /* any error is reported once */
397                 dev->errors = 0;
398                 /* to preserve notifications about reset */
399                 retval = (retval == -EPIPE) ? retval : -EIO;
400         }
401         spin_unlock_irq(&dev->err_lock);
402         if (retval < 0)
403                 goto error;
404
405         /* create a urb, and a buffer for it, and copy the data to the urb */
406         urb = usb_alloc_urb(0, GFP_KERNEL);
407         if (!urb) {
408                 retval = -ENOMEM;
409                 goto error;
410         }
411
412         buf = usb_alloc_coherent(dev->udev, writesize, GFP_KERNEL,
413                                  &urb->transfer_dma);
414         if (!buf) {
415                 retval = -ENOMEM;
416                 goto error;
417         }
418
419         if (copy_from_user(buf, user_buffer, writesize)) {
420                 retval = -EFAULT;
421                 goto error;
422         }
423
424         /* this lock makes sure we don't submit URBs to gone devices */
425         mutex_lock(&dev->io_mutex);
426         if (!dev->interface) {          /* disconnect() was called */
427                 mutex_unlock(&dev->io_mutex);
428                 retval = -ENODEV;
429                 goto error;
430         }
431
432         /* initialize the urb properly */
433         usb_fill_bulk_urb(urb, dev->udev,
434                           usb_sndbulkpipe(dev->udev, dev->bulk_out_endpointAddr),
435                           buf, writesize, skel_write_bulk_callback, dev);
436         urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
437         usb_anchor_urb(urb, &dev->submitted);
438
439         /* send the data out the bulk port */
440         retval = usb_submit_urb(urb, GFP_KERNEL);
441         mutex_unlock(&dev->io_mutex);
442         if (retval) {
443                 dev_err(&dev->interface->dev,
444                         "%s - failed submitting write urb, error %d\n",
445                         __func__, retval);
446                 goto error_unanchor;
447         }
448
449         /*
450          * release our reference to this urb, the USB core will eventually free
451          * it entirely
452          */
453         usb_free_urb(urb);
454
455
456         return writesize;
457
458 error_unanchor:
459         usb_unanchor_urb(urb);
460 error:
461         if (urb) {
462                 usb_free_coherent(dev->udev, writesize, buf, urb->transfer_dma);
463                 usb_free_urb(urb);
464         }
465         up(&dev->limit_sem);
466
467 exit:
468         return retval;
469 }
470
471 static const struct file_operations skel_fops = {
472         .owner =        THIS_MODULE,
473         .read =         skel_read,
474         .write =        skel_write,
475         .open =         skel_open,
476         .release =      skel_release,
477         .flush =        skel_flush,
478         .llseek =       noop_llseek,
479 };
480
481 /*
482  * usb class driver info in order to get a minor number from the usb core,
483  * and to have the device registered with the driver core
484  */
485 static struct usb_class_driver skel_class = {
486         .name =         "skel%d",
487         .fops =         &skel_fops,
488         .minor_base =   USB_SKEL_MINOR_BASE,
489 };
490
491 static int skel_probe(struct usb_interface *interface,
492                       const struct usb_device_id *id)
493 {
494         struct usb_skel *dev;
495         struct usb_host_interface *iface_desc;
496         struct usb_endpoint_descriptor *endpoint;
497         size_t buffer_size;
498         int i;
499         int retval = -ENOMEM;
500
501         /* allocate memory for our device state and initialize it */
502         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
503         if (!dev) {
504                 dev_err(&interface->dev, "Out of memory\n");
505                 goto error;
506         }
507         kref_init(&dev->kref);
508         sema_init(&dev->limit_sem, WRITES_IN_FLIGHT);
509         mutex_init(&dev->io_mutex);
510         spin_lock_init(&dev->err_lock);
511         init_usb_anchor(&dev->submitted);
512         init_waitqueue_head(&dev->bulk_in_wait);
513
514         dev->udev = usb_get_dev(interface_to_usbdev(interface));
515         dev->interface = interface;
516
517         /* set up the endpoint information */
518         /* use only the first bulk-in and bulk-out endpoints */
519         iface_desc = interface->cur_altsetting;
520         for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
521                 endpoint = &iface_desc->endpoint[i].desc;
522
523                 if (!dev->bulk_in_endpointAddr &&
524                     usb_endpoint_is_bulk_in(endpoint)) {
525                         /* we found a bulk in endpoint */
526                         buffer_size = usb_endpoint_maxp(endpoint);
527                         dev->bulk_in_size = buffer_size;
528                         dev->bulk_in_endpointAddr = endpoint->bEndpointAddress;
529                         dev->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
530                         if (!dev->bulk_in_buffer) {
531                                 dev_err(&interface->dev,
532                                         "Could not allocate bulk_in_buffer\n");
533                                 goto error;
534                         }
535                         dev->bulk_in_urb = usb_alloc_urb(0, GFP_KERNEL);
536                         if (!dev->bulk_in_urb) {
537                                 dev_err(&interface->dev,
538                                         "Could not allocate bulk_in_urb\n");
539                                 goto error;
540                         }
541                 }
542
543                 if (!dev->bulk_out_endpointAddr &&
544                     usb_endpoint_is_bulk_out(endpoint)) {
545                         /* we found a bulk out endpoint */
546                         dev->bulk_out_endpointAddr = endpoint->bEndpointAddress;
547                 }
548         }
549         if (!(dev->bulk_in_endpointAddr && dev->bulk_out_endpointAddr)) {
550                 dev_err(&interface->dev,
551                         "Could not find both bulk-in and bulk-out endpoints\n");
552                 goto error;
553         }
554
555         /* save our data pointer in this interface device */
556         usb_set_intfdata(interface, dev);
557
558         /* we can register the device now, as it is ready */
559         retval = usb_register_dev(interface, &skel_class);
560         if (retval) {
561                 /* something prevented us from registering this driver */
562                 dev_err(&interface->dev,
563                         "Not able to get a minor for this device.\n");
564                 usb_set_intfdata(interface, NULL);
565                 goto error;
566         }
567
568         /* let the user know what node this device is now attached to */
569         dev_info(&interface->dev,
570                  "USB Skeleton device now attached to USBSkel-%d",
571                  interface->minor);
572         return 0;
573
574 error:
575         if (dev)
576                 /* this frees allocated memory */
577                 kref_put(&dev->kref, skel_delete);
578         return retval;
579 }
580
581 static void skel_disconnect(struct usb_interface *interface)
582 {
583         struct usb_skel *dev;
584         int minor = interface->minor;
585
586         dev = usb_get_intfdata(interface);
587         usb_set_intfdata(interface, NULL);
588
589         /* give back our minor */
590         usb_deregister_dev(interface, &skel_class);
591
592         /* prevent more I/O from starting */
593         mutex_lock(&dev->io_mutex);
594         dev->interface = NULL;
595         mutex_unlock(&dev->io_mutex);
596
597         usb_kill_anchored_urbs(&dev->submitted);
598
599         /* decrement our usage count */
600         kref_put(&dev->kref, skel_delete);
601
602         dev_info(&interface->dev, "USB Skeleton #%d now disconnected", minor);
603 }
604
605 static void skel_draw_down(struct usb_skel *dev)
606 {
607         int time;
608
609         time = usb_wait_anchor_empty_timeout(&dev->submitted, 1000);
610         if (!time)
611                 usb_kill_anchored_urbs(&dev->submitted);
612         usb_kill_urb(dev->bulk_in_urb);
613 }
614
615 static int skel_suspend(struct usb_interface *intf, pm_message_t message)
616 {
617         struct usb_skel *dev = usb_get_intfdata(intf);
618
619         if (!dev)
620                 return 0;
621         skel_draw_down(dev);
622         return 0;
623 }
624
625 static int skel_resume(struct usb_interface *intf)
626 {
627         return 0;
628 }
629
630 static int skel_pre_reset(struct usb_interface *intf)
631 {
632         struct usb_skel *dev = usb_get_intfdata(intf);
633
634         mutex_lock(&dev->io_mutex);
635         skel_draw_down(dev);
636
637         return 0;
638 }
639
640 static int skel_post_reset(struct usb_interface *intf)
641 {
642         struct usb_skel *dev = usb_get_intfdata(intf);
643
644         /* we are sure no URBs are active - no locking needed */
645         dev->errors = -EPIPE;
646         mutex_unlock(&dev->io_mutex);
647
648         return 0;
649 }
650
651 static struct usb_driver skel_driver = {
652         .name =         "skeleton",
653         .probe =        skel_probe,
654         .disconnect =   skel_disconnect,
655         .suspend =      skel_suspend,
656         .resume =       skel_resume,
657         .pre_reset =    skel_pre_reset,
658         .post_reset =   skel_post_reset,
659         .id_table =     skel_table,
660         .supports_autosuspend = 1,
661 };
662
663 module_usb_driver(skel_driver);
664
665 MODULE_LICENSE("GPL");