1 /***********************************************************************************
2 CED1401 usb driver. This basic loading is based on the usb-skeleton.c code that is:
3 Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
4 Copyright (C) 2012 Alois Schloegl <alois.schloegl@ist.ac.at>
5 There is not a great deal of the skeleton left.
7 All the remainder dealing specifically with the CED1401 is based on drivers written
8 by CED for other systems (mainly Windows) and is:
9 Copyright (C) 2010 Cambridge Electronic Design Ltd
10 Author Greg P Smith (greg@ced.co.uk)
12 This program is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License
14 as published by the Free Software Foundation; either version 2
15 of the License, or (at your option) any later version.
17 This program is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the Free Software
24 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
28 There are 4 endpoints plus the control endpoint in the standard interface
29 provided by most 1401s. The control endpoint is used for standard USB requests,
30 plus various CED-specific transactions such as start self test, debug and get
31 the 1401 status. The other endpoints are:
33 1 Characters to the 1401
34 2 Characters from the 1401
35 3 Block data to the 1401
36 4 Block data to the host.
38 inside the driver these are indexed as an array from 0 to 3, transactions
39 over the control endpoint are carried out using a separate mechanism. The
40 use of the endpoints is mostly straightforward, with the driver issuing
41 IO request packets (IRPs) as required to transfer data to and from the 1401.
42 The handling of endpoint 2 is different because it is used for characters
43 from the 1401, which can appear spontaneously and without any other driver
44 activity - for example to repeatedly request DMA transfers in Spike2. The
45 desired effect is achieved by using an interrupt endpoint which can be
46 polled to see if it has data available, and writing the driver so that it
47 always maintains a pending read IRP from that endpoint which will read the
48 character data and terminate as soon as the 1401 makes data available. This
49 works very well, some care is taken with when you kick off this character
50 read IRP to avoid it being active when it is not wanted but generally it
51 is running all the time.
53 In the 2270, there are only three endpoints plus the control endpoint. In
54 addition to the transactions mentioned above, the control endpoint is used
55 to transfer character data to the 1401. The other endpoints are used as:
57 1 Characters from the 1401
58 2 Block data to the 1401
59 3 Block data to the host.
61 The type of interface available is specified by the interface subclass field
62 in the interface descriptor provided by the 1401. See the USB_INT_ constants
63 for the values that this field can hold.
65 ****************************************************************************
68 Although Linux Device Drivers (3rd Edition) was a major source of information,
69 it is very out of date. A lot of information was gleaned from the latest
70 usb_skeleton.c code (you need to download the kernel sources to get this).
72 To match the Windows version, everything is done using ioctl calls. All the
73 device state is held in the DEVICE_EXTENSION (named to match Windows use).
74 Block transfers are done by using get_user_pages() to pin down a list of
75 pages that we hold a pointer to in the device driver. We also allocate a
76 coherent transfer buffer of size STAGED_SZ (this must be a multiple of the
77 bulk endpoint size so that the 1401 does not realise that we break large
78 transfers down into smaller pieces). We use kmap_atomic() to get a kernel
79 va for each page, as it is required, for copying; see CopyUserSpace().
81 All character and data transfers are done using asynchronous IO. All Urbs are
82 tracked by anchoring them. Status and debug ioctls are implemented with the
83 synchronous non-Urb based transfers.
86 #include <linux/kernel.h>
87 #include <linux/errno.h>
88 #include <linux/usb.h>
89 #include <linux/mutex.h>
91 #include <linux/highmem.h>
92 #include <linux/version.h>
93 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) )
94 #include <linux/init.h>
95 #include <linux/slab.h>
96 #include <linux/module.h>
97 #include <linux/kref.h>
98 #include <linux/uaccess.h>
103 /* Define these values to match your devices */
104 #define USB_CED_VENDOR_ID 0x0525
105 #define USB_CED_PRODUCT_ID 0xa0f0
107 /* table of devices that work with this driver */
108 static const struct usb_device_id ced_table[] = {
109 {USB_DEVICE(USB_CED_VENDOR_ID, USB_CED_PRODUCT_ID)},
110 {} /* Terminating entry */
113 MODULE_DEVICE_TABLE(usb, ced_table);
115 /* Get a minor range for your devices from the usb maintainer */
116 #define USB_CED_MINOR_BASE 192
118 /* our private defines. if this grows any larger, use your own .h file */
119 #define MAX_TRANSFER (PAGE_SIZE - 512)
120 /* MAX_TRANSFER is chosen so that the VM is not stressed by
121 allocations > PAGE_SIZE and the number of packets in a page
122 is an integer 512 is the largest possible packet on EHCI */
123 #define WRITES_IN_FLIGHT 8
124 /* arbitrarily chosen */
127 The cause for these errors is that the driver makes use of the functions usb_buffer_alloc() and usb_buffer_free() which got renamed in kernel 2.6.35. This is stated in the Changelog: USB: rename usb_buffer_alloc() and usb_buffer_free() users
128 For more clearance what the functions actually do,
129 usb_buffer_alloc() is renamed to usb_alloc_coherent()
130 usb_buffer_free() is renamed to usb_free_coherent()
131 This is needed on Debian 2.6.32-5-amd64
133 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
134 #define usb_alloc_coherent usb_buffer_alloc
135 #define usb_free_coherent usb_buffer_free
136 #define noop_llseek NULL
139 static struct usb_driver ced_driver;
141 static void ced_delete(struct kref *kref)
143 DEVICE_EXTENSION *pdx = to_DEVICE_EXTENSION(kref);
145 // Free up the output buffer, then free the output urb. Note that the interface member
146 // of pdx will probably be NULL, so cannot be used to get to dev.
147 usb_free_coherent(pdx->udev, OUTBUF_SZ, pdx->pCoherCharOut,
148 pdx->pUrbCharOut->transfer_dma);
149 usb_free_urb(pdx->pUrbCharOut);
151 // Do the same for chan input
152 usb_free_coherent(pdx->udev, INBUF_SZ, pdx->pCoherCharIn,
153 pdx->pUrbCharIn->transfer_dma);
154 usb_free_urb(pdx->pUrbCharIn);
156 // Do the same for the block transfers
157 usb_free_coherent(pdx->udev, STAGED_SZ, pdx->pCoherStagedIO,
158 pdx->pStagedUrb->transfer_dma);
159 usb_free_urb(pdx->pStagedUrb);
161 usb_put_dev(pdx->udev);
165 // This is the driver end of the open() call from user space.
166 static int ced_open(struct inode *inode, struct file *file)
168 DEVICE_EXTENSION *pdx;
170 int subminor = iminor(inode);
171 struct usb_interface *interface =
172 usb_find_interface(&ced_driver, subminor);
174 pr_err("%s - error, can't find device for minor %d", __func__,
180 pdx = usb_get_intfdata(interface);
186 dev_dbg(&interface->dev, "%s got pdx", __func__);
188 /* increment our usage count for the device */
189 kref_get(&pdx->kref);
191 /* lock the device to allow correctly handling errors
193 mutex_lock(&pdx->io_mutex);
195 if (!pdx->open_count++) {
196 retval = usb_autopm_get_interface(interface);
199 mutex_unlock(&pdx->io_mutex);
200 kref_put(&pdx->kref, ced_delete);
203 } else { //uncomment this block if you want exclusive open
204 dev_err(&interface->dev, "%s fail: already open", __func__);
207 mutex_unlock(&pdx->io_mutex);
208 kref_put(&pdx->kref, ced_delete);
211 /* prevent the device from being autosuspended */
213 /* save our object in the file's private structure */
214 file->private_data = pdx;
215 mutex_unlock(&pdx->io_mutex);
221 static int ced_release(struct inode *inode, struct file *file)
223 DEVICE_EXTENSION *pdx = file->private_data;
227 dev_dbg(&pdx->interface->dev, "%s called", __func__);
228 mutex_lock(&pdx->io_mutex);
229 if (!--pdx->open_count && pdx->interface) // Allow autosuspend
230 usb_autopm_put_interface(pdx->interface);
231 mutex_unlock(&pdx->io_mutex);
233 kref_put(&pdx->kref, ced_delete); // decrement the count on our device
237 static int ced_flush(struct file *file, fl_owner_t id)
240 DEVICE_EXTENSION *pdx = file->private_data;
244 dev_dbg(&pdx->interface->dev, "%s char in pend=%d", __func__,
245 pdx->bReadCharsPending);
247 /* wait for io to stop */
248 mutex_lock(&pdx->io_mutex);
249 dev_dbg(&pdx->interface->dev, "%s got io_mutex", __func__);
252 /* read out errors, leave subsequent opens a clean slate */
253 spin_lock_irq(&pdx->err_lock);
254 res = pdx->errors ? (pdx->errors == -EPIPE ? -EPIPE : -EIO) : 0;
256 spin_unlock_irq(&pdx->err_lock);
258 mutex_unlock(&pdx->io_mutex);
259 dev_dbg(&pdx->interface->dev, "%s exit reached", __func__);
264 /***************************************************************************
265 ** CanAcceptIoRequests
266 ** If the device is removed, interface is set NULL. We also clear our pointer
267 ** from the interface, so we should make sure that pdx is not NULL. This will
268 ** not help with a device extension held by a file.
269 ** return true if can accept new io requests, else false
271 static bool CanAcceptIoRequests(DEVICE_EXTENSION * pdx)
273 return pdx && pdx->interface; // Can we accept IO requests
276 /****************************************************************************
277 ** Callback routine to complete writes. This may need to fire off another
278 ** urb to complete the transfer.
279 ****************************************************************************/
280 static void ced_writechar_callback(struct urb *pUrb)
282 DEVICE_EXTENSION *pdx = pUrb->context;
283 int nGot = pUrb->actual_length; // what we transferred
285 if (pUrb->status) { // sync/async unlink faults aren't errors
287 (pUrb->status == -ENOENT || pUrb->status == -ECONNRESET
288 || pUrb->status == -ESHUTDOWN)) {
289 dev_err(&pdx->interface->dev,
290 "%s - nonzero write bulk status received: %d",
291 __func__, pUrb->status);
294 spin_lock(&pdx->err_lock);
295 pdx->errors = pUrb->status;
296 spin_unlock(&pdx->err_lock);
297 nGot = 0; // and tidy up again if so
299 spin_lock(&pdx->charOutLock); // already at irq level
300 pdx->dwOutBuffGet = 0; // Reset the output buffer
301 pdx->dwOutBuffPut = 0;
302 pdx->dwNumOutput = 0; // Clear the char count
303 pdx->bPipeError[0] = 1; // Flag an error for later
304 pdx->bSendCharsPending = false; // Allow other threads again
305 spin_unlock(&pdx->charOutLock); // already at irq level
306 dev_dbg(&pdx->interface->dev,
307 "%s - char out done, 0 chars sent", __func__);
309 dev_dbg(&pdx->interface->dev,
310 "%s - char out done, %d chars sent", __func__, nGot);
311 spin_lock(&pdx->charOutLock); // already at irq level
312 pdx->dwNumOutput -= nGot; // Now adjust the char send buffer
313 pdx->dwOutBuffGet += nGot; // to match what we did
314 if (pdx->dwOutBuffGet >= OUTBUF_SZ) // Can't do this any earlier as data could be overwritten
315 pdx->dwOutBuffGet = 0;
317 if (pdx->dwNumOutput > 0) // if more to be done...
319 int nPipe = 0; // The pipe number to use
321 char *pDat = &pdx->outputBuffer[pdx->dwOutBuffGet];
322 unsigned int dwCount = pdx->dwNumOutput; // maximum to send
323 if ((pdx->dwOutBuffGet + dwCount) > OUTBUF_SZ) // does it cross buffer end?
324 dwCount = OUTBUF_SZ - pdx->dwOutBuffGet;
325 spin_unlock(&pdx->charOutLock); // we are done with stuff that changes
326 memcpy(pdx->pCoherCharOut, pDat, dwCount); // copy output data to the buffer
327 usb_fill_bulk_urb(pdx->pUrbCharOut, pdx->udev,
328 usb_sndbulkpipe(pdx->udev,
330 pdx->pCoherCharOut, dwCount,
331 ced_writechar_callback, pdx);
332 pdx->pUrbCharOut->transfer_flags |=
333 URB_NO_TRANSFER_DMA_MAP;
334 usb_anchor_urb(pdx->pUrbCharOut, &pdx->submitted); // in case we need to kill it
335 iReturn = usb_submit_urb(pdx->pUrbCharOut, GFP_ATOMIC);
336 dev_dbg(&pdx->interface->dev, "%s n=%d>%s<", __func__,
338 spin_lock(&pdx->charOutLock); // grab lock for errors
340 pdx->bPipeError[nPipe] = 1; // Flag an error to be handled later
341 pdx->bSendCharsPending = false; // Allow other threads again
342 usb_unanchor_urb(pdx->pUrbCharOut);
343 dev_err(&pdx->interface->dev,
344 "%s usb_submit_urb() returned %d",
348 pdx->bSendCharsPending = false; // Allow other threads again
349 spin_unlock(&pdx->charOutLock); // already at irq level
353 /****************************************************************************
355 ** Transmit the characters in the output buffer to the 1401. This may need
356 ** breaking down into multiple transfers.
357 ****************************************************************************/
358 int SendChars(DEVICE_EXTENSION * pdx)
360 int iReturn = U14ERR_NOERROR;
362 spin_lock_irq(&pdx->charOutLock); // Protect ourselves
364 if ((!pdx->bSendCharsPending) && // Not currently sending
365 (pdx->dwNumOutput > 0) && // has characters to output
366 (CanAcceptIoRequests(pdx))) // and current activity is OK
368 unsigned int dwCount = pdx->dwNumOutput; // Get a copy of the character count
369 pdx->bSendCharsPending = true; // Set flag to lock out other threads
371 dev_dbg(&pdx->interface->dev,
372 "Send %d chars to 1401, EP0 flag %d\n", dwCount,
374 // If we have only 3 end points we must send the characters to the 1401 using EP0.
375 if (pdx->nPipes == 3) {
376 // For EP0 character transmissions to the 1401, we have to hang about until they
377 // are gone, as otherwise without more character IO activity they will never go.
378 unsigned int count = dwCount; // Local char counter
379 unsigned int index = 0; // The index into the char buffer
381 spin_unlock_irq(&pdx->charOutLock); // Free spinlock as we call USBD
383 while ((count > 0) && (iReturn == U14ERR_NOERROR)) {
384 // We have to break the transfer up into 64-byte chunks because of a 2270 problem
385 int n = count > 64 ? 64 : count; // Chars for this xfer, max of 64
386 int nSent = usb_control_msg(pdx->udev,
387 usb_sndctrlpipe(pdx->udev, 0), // use end point 0
388 DB_CHARS, // bRequest
389 (H_TO_D | VENDOR | DEVREQ), // to the device, vendor request to the device
390 0, 0, // value and index are both 0
391 &pdx->outputBuffer[index], // where to send from
392 n, // how much to send
393 1000); // timeout in jiffies
395 iReturn = nSent ? nSent : -ETIMEDOUT; // if 0 chars says we timed out
396 dev_err(&pdx->interface->dev,
397 "Send %d chars by EP0 failed: %d",
400 dev_dbg(&pdx->interface->dev,
401 "Sent %d chars by EP0", n);
407 spin_lock_irq(&pdx->charOutLock); // Protect pdx changes, released by general code
408 pdx->dwOutBuffGet = 0; // so reset the output buffer
409 pdx->dwOutBuffPut = 0;
410 pdx->dwNumOutput = 0; // and clear the buffer count
411 pdx->bSendCharsPending = false; // Allow other threads again
412 } else { // Here for sending chars normally - we hold the spin lock
413 int nPipe = 0; // The pipe number to use
414 char *pDat = &pdx->outputBuffer[pdx->dwOutBuffGet];
416 if ((pdx->dwOutBuffGet + dwCount) > OUTBUF_SZ) // does it cross buffer end?
417 dwCount = OUTBUF_SZ - pdx->dwOutBuffGet;
418 spin_unlock_irq(&pdx->charOutLock); // we are done with stuff that changes
419 memcpy(pdx->pCoherCharOut, pDat, dwCount); // copy output data to the buffer
420 usb_fill_bulk_urb(pdx->pUrbCharOut, pdx->udev,
421 usb_sndbulkpipe(pdx->udev,
423 pdx->pCoherCharOut, dwCount,
424 ced_writechar_callback, pdx);
425 pdx->pUrbCharOut->transfer_flags |=
426 URB_NO_TRANSFER_DMA_MAP;
427 usb_anchor_urb(pdx->pUrbCharOut, &pdx->submitted);
428 iReturn = usb_submit_urb(pdx->pUrbCharOut, GFP_KERNEL);
429 spin_lock_irq(&pdx->charOutLock); // grab lock for errors
431 pdx->bPipeError[nPipe] = 1; // Flag an error to be handled later
432 pdx->bSendCharsPending = false; // Allow other threads again
433 usb_unanchor_urb(pdx->pUrbCharOut); // remove from list of active urbs
436 } else if (pdx->bSendCharsPending && (pdx->dwNumOutput > 0))
437 dev_dbg(&pdx->interface->dev,
438 "SendChars bSendCharsPending:true");
440 dev_dbg(&pdx->interface->dev, "SendChars exit code: %d", iReturn);
441 spin_unlock_irq(&pdx->charOutLock); // Now let go of the spinlock
445 /***************************************************************************
447 ** This moves memory between pinned down user space and the pCoherStagedIO
448 ** memory buffer we use for transfers. Copy n bytes in the directions that
449 ** is defined by pdx->StagedRead. The user space is determined by the area
450 ** in pdx->StagedId and the offset in pdx->StagedDone. The user
451 ** area may well not start on a page boundary, so allow for that.
453 ** We have a table of physical pages that describe the area, so we can use
454 ** this to get a virtual address that the kernel can use.
456 ** pdx Is our device extension which holds all we know about the transfer.
457 ** n The number of bytes to move one way or the other.
458 ***************************************************************************/
459 static void CopyUserSpace(DEVICE_EXTENSION * pdx, int n)
461 unsigned int nArea = pdx->StagedId;
462 if (nArea < MAX_TRANSAREAS) {
463 TRANSAREA *pArea = &pdx->rTransDef[nArea]; // area to be used
464 unsigned int dwOffset =
465 pdx->StagedDone + pdx->StagedOffset + pArea->dwBaseOffset;
466 char *pCoherBuf = pdx->pCoherStagedIO; // coherent buffer
468 dev_err(&pdx->interface->dev, "%s area %d unused",
474 int nPage = dwOffset >> PAGE_SHIFT; // page number in table
475 if (nPage < pArea->nPages) {
477 (char *)kmap_atomic(pArea->pPages[nPage]);
479 unsigned int uiPageOff = dwOffset & (PAGE_SIZE - 1); // offset into the page
480 size_t uiXfer = PAGE_SIZE - uiPageOff; // max to transfer on this page
481 if (uiXfer > n) // limit byte count if too much
482 uiXfer = n; // for the page
484 memcpy(pvAddress + uiPageOff,
488 pvAddress + uiPageOff,
490 kunmap_atomic(pvAddress);
495 dev_err(&pdx->interface->dev,
496 "%s did not map page %d",
502 dev_err(&pdx->interface->dev,
503 "%s exceeded pages %d", __func__,
509 dev_err(&pdx->interface->dev, "%s bad area %d", __func__,
513 // Forward declarations for stuff used circularly
514 static int StageChunk(DEVICE_EXTENSION * pdx);
515 /***************************************************************************
516 ** ReadWrite_Complete
518 ** Completion routine for our staged read/write Irps
520 static void staged_callback(struct urb *pUrb)
522 DEVICE_EXTENSION *pdx = pUrb->context;
523 unsigned int nGot = pUrb->actual_length; // what we transferred
524 bool bCancel = false;
525 bool bRestartCharInput; // used at the end
527 spin_lock(&pdx->stagedLock); // stop ReadWriteMem() action while this routine is running
528 pdx->bStagedUrbPending = false; // clear the flag for staged IRP pending
530 if (pUrb->status) { // sync/async unlink faults aren't errors
532 (pUrb->status == -ENOENT || pUrb->status == -ECONNRESET
533 || pUrb->status == -ESHUTDOWN)) {
534 dev_err(&pdx->interface->dev,
535 "%s - nonzero write bulk status received: %d",
536 __func__, pUrb->status);
538 dev_info(&pdx->interface->dev,
539 "%s - staged xfer cancelled", __func__);
541 spin_lock(&pdx->err_lock);
542 pdx->errors = pUrb->status;
543 spin_unlock(&pdx->err_lock);
544 nGot = 0; // and tidy up again if so
547 dev_dbg(&pdx->interface->dev, "%s %d chars xferred", __func__,
549 if (pdx->StagedRead) // if reading, save to user space
550 CopyUserSpace(pdx, nGot); // copy from buffer to user
552 dev_dbg(&pdx->interface->dev, "%s ZLP", __func__);
555 // Update the transfer length based on the TransferBufferLength value in the URB
556 pdx->StagedDone += nGot;
558 dev_dbg(&pdx->interface->dev, "%s, done %d bytes of %d", __func__,
559 pdx->StagedDone, pdx->StagedLength);
561 if ((pdx->StagedDone == pdx->StagedLength) || // If no more to do
562 (bCancel)) // or this IRP was cancelled
564 TRANSAREA *pArea = &pdx->rTransDef[pdx->StagedId]; // Transfer area info
565 dev_dbg(&pdx->interface->dev,
566 "%s transfer done, bytes %d, cancel %d", __func__,
567 pdx->StagedDone, bCancel);
569 // Here is where we sort out what to do with this transfer if using a circular buffer. We have
570 // a completed transfer that can be assumed to fit into the transfer area. We should be able to
571 // add this to the end of a growing block or to use it to start a new block unless the code
572 // that calculates the offset to use (in ReadWriteMem) is totally duff.
573 if ((pArea->bCircular) && (pArea->bCircToHost) && (!bCancel) && // Time to sort out circular buffer info?
574 (pdx->StagedRead)) // Only for tohost transfers for now
576 if (pArea->aBlocks[1].dwSize > 0) // If block 1 is in use we must append to it
578 if (pdx->StagedOffset ==
579 (pArea->aBlocks[1].dwOffset +
580 pArea->aBlocks[1].dwSize)) {
581 pArea->aBlocks[1].dwSize +=
583 dev_dbg(&pdx->interface->dev,
584 "RWM_Complete, circ block 1 now %d bytes at %d",
585 pArea->aBlocks[1].dwSize,
586 pArea->aBlocks[1].dwOffset);
588 // Here things have gone very, very, wrong, but I cannot see how this can actually be achieved
589 pArea->aBlocks[1].dwOffset =
591 pArea->aBlocks[1].dwSize =
593 dev_err(&pdx->interface->dev,
594 "%s ERROR, circ block 1 re-started %d bytes at %d",
596 pArea->aBlocks[1].dwSize,
597 pArea->aBlocks[1].dwOffset);
599 } else // If block 1 is not used, we try to add to block 0
601 if (pArea->aBlocks[0].dwSize > 0) // Got stored block 0 information?
602 { // Must append onto the existing block 0
603 if (pdx->StagedOffset ==
604 (pArea->aBlocks[0].dwOffset +
605 pArea->aBlocks[0].dwSize)) {
606 pArea->aBlocks[0].dwSize += pdx->StagedLength; // Just add this transfer in
607 dev_dbg(&pdx->interface->dev,
608 "RWM_Complete, circ block 0 now %d bytes at %d",
613 } else // If it doesn't append, put into new block 1
615 pArea->aBlocks[1].dwOffset =
617 pArea->aBlocks[1].dwSize =
619 dev_dbg(&pdx->interface->dev,
620 "RWM_Complete, circ block 1 started %d bytes at %d",
626 } else // No info stored yet, just save in block 0
628 pArea->aBlocks[0].dwOffset =
630 pArea->aBlocks[0].dwSize =
632 dev_dbg(&pdx->interface->dev,
633 "RWM_Complete, circ block 0 started %d bytes at %d",
634 pArea->aBlocks[0].dwSize,
635 pArea->aBlocks[0].dwOffset);
640 if (!bCancel) // Don't generate an event if cancelled
642 dev_dbg(&pdx->interface->dev,
643 "RWM_Complete, bCircular %d, bToHost %d, eStart %d, eSize %d",
644 pArea->bCircular, pArea->bEventToHost,
645 pArea->dwEventSt, pArea->dwEventSz);
646 if ((pArea->dwEventSz) && // Set a user-mode event...
647 (pdx->StagedRead == pArea->bEventToHost)) // ...on transfers in this direction?
649 int iWakeUp = 0; // assume
650 // If we have completed the right sort of DMA transfer then set the event to notify
651 // the user code to wake up anyone that is waiting.
652 if ((pArea->bCircular) && // Circular areas use a simpler test
653 (pArea->bCircToHost)) // only in supported direction
654 { // Is total data waiting up to size limit?
655 unsigned int dwTotal =
656 pArea->aBlocks[0].dwSize +
657 pArea->aBlocks[1].dwSize;
658 iWakeUp = (dwTotal >= pArea->dwEventSz);
660 unsigned int transEnd =
663 unsigned int eventEnd =
664 pArea->dwEventSt + pArea->dwEventSz;
665 iWakeUp = (pdx->StagedOffset < eventEnd)
666 && (transEnd > pArea->dwEventSt);
670 dev_dbg(&pdx->interface->dev,
671 "About to set event to notify app");
672 wake_up_interruptible(&pArea->wqEvent); // wake up waiting processes
673 ++pArea->iWakeUp; // increment wakeup count
678 pdx->dwDMAFlag = MODE_CHAR; // Switch back to char mode before ReadWriteMem call
680 if (!bCancel) // Don't look for waiting transfer if cancelled
682 // If we have a transfer waiting, kick it off
683 if (pdx->bXFerWaiting) // Got a block xfer waiting?
686 dev_info(&pdx->interface->dev,
687 "*** RWM_Complete *** pending transfer will now be set up!!!");
689 ReadWriteMem(pdx, !pdx->rDMAInfo.bOutWard,
690 pdx->rDMAInfo.wIdent,
691 pdx->rDMAInfo.dwOffset,
692 pdx->rDMAInfo.dwSize);
695 dev_err(&pdx->interface->dev,
696 "RWM_Complete rw setup failed %d",
701 } else // Here for more to do
702 StageChunk(pdx); // fire off the next bit
704 // While we hold the stagedLock, see if we should reallow character input ints
705 // Don't allow if cancelled, or if a new block has started or if there is a waiting block.
706 // This feels wrong as we should ask which spin lock protects dwDMAFlag.
707 bRestartCharInput = !bCancel && (pdx->dwDMAFlag == MODE_CHAR)
708 && !pdx->bXFerWaiting;
710 spin_unlock(&pdx->stagedLock); // Finally release the lock again
712 // This is not correct as dwDMAFlag is protected by the staged lock, but it is treated
713 // in Allowi as if it were protected by the char lock. In any case, most systems will
714 // not be upset by char input during DMA... sigh. Needs sorting out.
715 if (bRestartCharInput) // may be out of date, but...
716 Allowi(pdx, true); // ...Allowi tests a lock too.
717 dev_dbg(&pdx->interface->dev, "%s done", __func__);
720 /****************************************************************************
723 ** Generates the next chunk of data making up a staged transfer.
725 ** The calling code must have acquired the staging spinlock before calling
726 ** this function, and is responsible for releasing it. We are at callback level.
727 ****************************************************************************/
728 static int StageChunk(DEVICE_EXTENSION * pdx)
730 int iReturn = U14ERR_NOERROR;
731 unsigned int ChunkSize;
732 int nPipe = pdx->StagedRead ? 3 : 2; // The pipe number to use for reads or writes
733 if (pdx->nPipes == 3)
734 nPipe--; // Adjust for the 3-pipe case
735 if (nPipe < 0) // and trap case that should never happen
738 if (!CanAcceptIoRequests(pdx)) // got sudden remove?
740 dev_info(&pdx->interface->dev, "%s sudden remove, giving up",
742 return U14ERR_FAIL; // could do with a better error
745 ChunkSize = (pdx->StagedLength - pdx->StagedDone); // transfer length remaining
746 if (ChunkSize > STAGED_SZ) // make sure to keep legal
747 ChunkSize = STAGED_SZ; // limit to max allowed
749 if (!pdx->StagedRead) // if writing...
750 CopyUserSpace(pdx, ChunkSize); // ...copy data into the buffer
752 usb_fill_bulk_urb(pdx->pStagedUrb, pdx->udev,
753 pdx->StagedRead ? usb_rcvbulkpipe(pdx->udev,
756 usb_sndbulkpipe(pdx->udev, pdx->epAddr[nPipe]),
757 pdx->pCoherStagedIO, ChunkSize, staged_callback, pdx);
758 pdx->pStagedUrb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
759 usb_anchor_urb(pdx->pStagedUrb, &pdx->submitted); // in case we need to kill it
760 iReturn = usb_submit_urb(pdx->pStagedUrb, GFP_ATOMIC);
762 usb_unanchor_urb(pdx->pStagedUrb); // kill it
763 pdx->bPipeError[nPipe] = 1; // Flag an error to be handled later
764 dev_err(&pdx->interface->dev, "%s submit urb failed, code %d",
767 pdx->bStagedUrbPending = true; // Set the flag for staged URB pending
768 dev_dbg(&pdx->interface->dev, "%s done so far:%d, this size:%d",
769 __func__, pdx->StagedDone, ChunkSize);
774 /***************************************************************************
777 ** This routine is used generally for block read and write operations.
778 ** Breaks up a read or write in to specified sized chunks, as specified by pipe
779 ** information on maximum transfer size.
781 ** Any code that calls this must be holding the stagedLock
784 ** DeviceObject - pointer to our FDO (Functional Device Object)
785 ** Read - TRUE for read, FALSE for write. This is from POV of the driver
786 ** wIdent - the transfer area number - defines memory area and more.
787 ** dwOffs - the start offset within the transfer area of the start of this
789 ** dwLen - the number of bytes to transfer.
791 int ReadWriteMem(DEVICE_EXTENSION * pdx, bool Read, unsigned short wIdent,
792 unsigned int dwOffs, unsigned int dwLen)
794 TRANSAREA *pArea = &pdx->rTransDef[wIdent]; // Transfer area info
796 if (!CanAcceptIoRequests(pdx)) // Are we in a state to accept new requests?
798 dev_err(&pdx->interface->dev, "%s can't accept requests",
803 dev_dbg(&pdx->interface->dev,
804 "%s xfer %d bytes to %s, offset %d, area %d", __func__, dwLen,
805 Read ? "host" : "1401", dwOffs, wIdent);
807 // Amazingly, we can get an escape sequence back before the current staged Urb is done, so we
808 // have to check for this situation and, if so, wait until all is OK.
809 if (pdx->bStagedUrbPending) {
810 pdx->bXFerWaiting = true; // Flag we are waiting
811 dev_info(&pdx->interface->dev,
812 "%s xfer is waiting, as previous staged pending",
814 return U14ERR_NOERROR;
817 if (dwLen == 0) // allow 0-len read or write; just return success
819 dev_dbg(&pdx->interface->dev,
820 "%s OK; zero-len read/write request", __func__);
821 return U14ERR_NOERROR;
824 if ((pArea->bCircular) && // Circular transfer?
825 (pArea->bCircToHost) && (Read)) // In a supported direction
826 { // If so, we sort out offset ourself
827 bool bWait = false; // Flag for transfer having to wait
829 dev_dbg(&pdx->interface->dev,
830 "Circular buffers are %d at %d and %d at %d",
831 pArea->aBlocks[0].dwSize, pArea->aBlocks[0].dwOffset,
832 pArea->aBlocks[1].dwSize, pArea->aBlocks[1].dwOffset);
833 if (pArea->aBlocks[1].dwSize > 0) // Using the second block already?
835 dwOffs = pArea->aBlocks[1].dwOffset + pArea->aBlocks[1].dwSize; // take offset from that
836 bWait = (dwOffs + dwLen) > pArea->aBlocks[0].dwOffset; // Wait if will overwrite block 0?
837 bWait |= (dwOffs + dwLen) > pArea->dwLength; // or if it overflows the buffer
838 } else // Area 1 not in use, try to use area 0
840 if (pArea->aBlocks[0].dwSize == 0) // Reset block 0 if not in use
841 pArea->aBlocks[0].dwOffset = 0;
843 pArea->aBlocks[0].dwOffset +
844 pArea->aBlocks[0].dwSize;
845 if ((dwOffs + dwLen) > pArea->dwLength) // Off the end of the buffer?
847 pArea->aBlocks[1].dwOffset = 0; // Set up to use second block
849 bWait = (dwOffs + dwLen) > pArea->aBlocks[0].dwOffset; // Wait if will overwrite block 0?
850 bWait |= (dwOffs + dwLen) > pArea->dwLength; // or if it overflows the buffer
854 if (bWait) // This transfer will have to wait?
856 pdx->bXFerWaiting = true; // Flag we are waiting
857 dev_dbg(&pdx->interface->dev,
858 "%s xfer waiting for circular buffer space",
860 return U14ERR_NOERROR;
863 dev_dbg(&pdx->interface->dev,
864 "%s circular xfer, %d bytes starting at %d", __func__,
867 // Save the parameters for the read\write transfer
868 pdx->StagedRead = Read; // Save the parameters for this read
869 pdx->StagedId = wIdent; // ID allows us to get transfer area info
870 pdx->StagedOffset = dwOffs; // The area within the transfer area
871 pdx->StagedLength = dwLen;
872 pdx->StagedDone = 0; // Initialise the byte count
873 pdx->dwDMAFlag = MODE_LINEAR; // Set DMA mode flag at this point
874 pdx->bXFerWaiting = false; // Clearly not a transfer waiting now
876 // KeClearEvent(&pdx->StagingDoneEvent); // Clear the transfer done event
877 StageChunk(pdx); // fire off the first chunk
879 return U14ERR_NOERROR;
882 /****************************************************************************
886 ** Reads a character a buffer. If there is no more
887 ** data we return FALSE. Used as part of decoding a DMA request.
889 ****************************************************************************/
890 static bool ReadChar(unsigned char *pChar, char *pBuf, unsigned int *pdDone,
894 unsigned int dDone = *pdDone;
896 if (dDone < dGot) // If there is more data
898 *pChar = (unsigned char)pBuf[dDone]; // Extract the next char
899 dDone++; // Increment the done count
901 bRead = true; // and flag success
908 /****************************************************************************
912 ** Reads a word from the 1401, just uses ReadChar twice; passes on any error
914 *****************************************************************************/
915 static bool ReadWord(unsigned short *pWord, char *pBuf, unsigned int *pdDone,
918 if (ReadChar((unsigned char *)pWord, pBuf, pdDone, dGot))
919 return ReadChar(((unsigned char *)pWord) + 1, pBuf, pdDone,
926 /****************************************************************************
929 ** Reads a coded number in and returns it, Code is:
930 ** If data is in range 0..127 we recieve 1 byte. If data in range 128-16383
931 ** we recieve two bytes, top bit of first indicates another on its way. If
932 ** data in range 16383-4194303 we get three bytes, top two bits of first set
933 ** to indicate three byte total.
935 *****************************************************************************/
936 static bool ReadHuff(volatile unsigned int *pDWord, char *pBuf,
937 unsigned int *pdDone, unsigned int dGot)
939 unsigned char ucData; /* for each read to ReadChar */
940 bool bReturn = true; /* assume we will succeed */
941 unsigned int dwData = 0; /* Accumulator for the data */
943 if (ReadChar(&ucData, pBuf, pdDone, dGot)) {
944 dwData = ucData; /* copy the data */
945 if ((dwData & 0x00000080) != 0) { /* Bit set for more data ? */
946 dwData &= 0x0000007F; /* Clear the relevant bit */
947 if (ReadChar(&ucData, pBuf, pdDone, dGot)) {
948 dwData = (dwData << 8) | ucData;
949 if ((dwData & 0x00004000) != 0) { /* three byte sequence ? */
950 dwData &= 0x00003FFF; /* Clear the relevant bit */
952 (&ucData, pBuf, pdDone, dGot))
953 dwData = (dwData << 8) | ucData;
958 bReturn = false; /* couldn't read data */
963 *pDWord = dwData; /* return the data */
967 /***************************************************************************
971 ** Tries to read info about the dma request from the 1401 and decode it into
972 ** the dma descriptor block. We have at this point had the escape character
973 ** from the 1401 and now we must read in the rest of the information about
974 ** the transfer request. Returns FALSE if 1401 fails to respond or obselete
975 ** code from 1401 or bad parameters.
977 ** The pBuf char pointer does not include the initial escape character, so
978 ** we start handling the data at offset zero.
980 *****************************************************************************/
981 static bool ReadDMAInfo(volatile DMADESC * pDmaDesc, DEVICE_EXTENSION * pdx,
982 char *pBuf, unsigned int dwCount)
984 bool bResult = false; // assume we won't succeed
985 unsigned char ucData;
986 unsigned int dDone = 0; // We haven't parsed anything so far
988 dev_dbg(&pdx->interface->dev, "%s", __func__);
990 if (ReadChar(&ucData, pBuf, &dDone, dwCount)) {
991 unsigned char ucTransCode = (ucData & 0x0F); // get code for transfer type
992 unsigned short wIdent = ((ucData >> 4) & 0x07); // and area identifier
994 // fill in the structure we were given
995 pDmaDesc->wTransType = ucTransCode; // type of transfer
996 pDmaDesc->wIdent = wIdent; // area to use
997 pDmaDesc->dwSize = 0; // initialise other bits
998 pDmaDesc->dwOffset = 0;
1000 dev_dbg(&pdx->interface->dev, "%s type: %d ident: %d", __func__,
1001 pDmaDesc->wTransType, pDmaDesc->wIdent);
1003 pDmaDesc->bOutWard = (ucTransCode != TM_EXTTOHOST); // set transfer direction
1005 switch (ucTransCode) {
1006 case TM_EXTTOHOST: // Extended linear transfer modes (the only ones!)
1010 ReadHuff(&(pDmaDesc->dwOffset), pBuf,
1012 && ReadHuff(&(pDmaDesc->dwSize), pBuf,
1015 dev_dbg(&pdx->interface->dev,
1016 "%s xfer offset & size %d %d",
1017 __func__, pDmaDesc->dwOffset,
1020 if ((wIdent >= MAX_TRANSAREAS) || // Illegal area number, or...
1021 (!pdx->rTransDef[wIdent].bUsed) || // area not set up, or...
1022 (pDmaDesc->dwOffset > pdx->rTransDef[wIdent].dwLength) || // range/size
1023 ((pDmaDesc->dwOffset +
1025 (pdx->rTransDef[wIdent].
1027 bResult = false; // bad parameter(s)
1028 dev_dbg(&pdx->interface->dev,
1029 "%s bad param - id %d, bUsed %d, offset %d, size %d, area length %d",
1031 pdx->rTransDef[wIdent].
1035 pdx->rTransDef[wIdent].
1047 if (!bResult) // now check parameters for validity
1048 dev_err(&pdx->interface->dev, "%s error reading Esc sequence",
1054 /****************************************************************************
1058 ** Deals with an escape sequence coming from the 1401. This can either be
1059 ** a DMA transfer request of various types or a response to an escape sequence
1060 ** sent to the 1401. This is called from a callback.
1064 ** dwCount - the number of characters in the device extension char in buffer,
1065 ** this is known to be at least 2 or we will not be called.
1067 ****************************************************************************/
1068 static int Handle1401Esc(DEVICE_EXTENSION * pdx, char *pCh,
1069 unsigned int dwCount)
1071 int iReturn = U14ERR_FAIL;
1073 // I have no idea what this next test is about. '?' is 0x3f, which is area 3, code
1074 // 15. At the moment, this is not used, so it does no harm, but unless someone can
1075 // tell me what this is for, it should be removed from this and the Windows driver.
1076 if (pCh[0] == '?') // Is this an information response
1077 { // Parse and save the information
1079 spin_lock(&pdx->stagedLock); // Lock others out
1081 if (ReadDMAInfo(&pdx->rDMAInfo, pdx, pCh, dwCount)) // Get DMA parameters
1083 unsigned short wTransType = pdx->rDMAInfo.wTransType; // check transfer type
1085 dev_dbg(&pdx->interface->dev,
1086 "%s xfer to %s, offset %d, length %d", __func__,
1087 pdx->rDMAInfo.bOutWard ? "1401" : "host",
1088 pdx->rDMAInfo.dwOffset, pdx->rDMAInfo.dwSize);
1090 if (pdx->bXFerWaiting) // Check here for badly out of kilter...
1091 { // This can never happen, really
1092 dev_err(&pdx->interface->dev,
1093 "ERROR: DMA setup while transfer still waiting");
1094 spin_unlock(&pdx->stagedLock);
1096 if ((wTransType == TM_EXTTOHOST)
1097 || (wTransType == TM_EXTTO1401)) {
1102 pdx->rDMAInfo.wIdent,
1103 pdx->rDMAInfo.dwOffset,
1104 pdx->rDMAInfo.dwSize);
1105 if (iReturn != U14ERR_NOERROR)
1106 dev_err(&pdx->interface->dev,
1107 "%s ReadWriteMem() failed %d",
1109 } else // This covers non-linear transfer setup
1110 dev_err(&pdx->interface->dev,
1111 "%s Unknown block xfer type %d",
1112 __func__, wTransType);
1114 } else // Failed to read parameters
1115 dev_err(&pdx->interface->dev, "%s ReadDMAInfo() fail",
1118 spin_unlock(&pdx->stagedLock); // OK here
1121 dev_dbg(&pdx->interface->dev, "%s returns %d", __func__, iReturn);
1126 /****************************************************************************
1127 ** Callback for the character read complete or error
1128 ****************************************************************************/
1129 static void ced_readchar_callback(struct urb *pUrb)
1131 DEVICE_EXTENSION *pdx = pUrb->context;
1132 int nGot = pUrb->actual_length; // what we transferred
1134 if (pUrb->status) // Do we have a problem to handle?
1136 int nPipe = pdx->nPipes == 4 ? 1 : 0; // The pipe number to use for error
1137 // sync/async unlink faults aren't errors... just saying device removed or stopped
1139 (pUrb->status == -ENOENT || pUrb->status == -ECONNRESET
1140 || pUrb->status == -ESHUTDOWN)) {
1141 dev_err(&pdx->interface->dev,
1142 "%s - nonzero write bulk status received: %d",
1143 __func__, pUrb->status);
1145 dev_dbg(&pdx->interface->dev,
1146 "%s - 0 chars pUrb->status=%d (shutdown?)",
1147 __func__, pUrb->status);
1149 spin_lock(&pdx->err_lock);
1150 pdx->errors = pUrb->status;
1151 spin_unlock(&pdx->err_lock);
1152 nGot = 0; // and tidy up again if so
1154 spin_lock(&pdx->charInLock); // already at irq level
1155 pdx->bPipeError[nPipe] = 1; // Flag an error for later
1157 if ((nGot > 1) && ((pdx->pCoherCharIn[0] & 0x7f) == 0x1b)) // Esc sequence?
1159 Handle1401Esc(pdx, &pdx->pCoherCharIn[1], nGot - 1); // handle it
1160 spin_lock(&pdx->charInLock); // already at irq level
1162 spin_lock(&pdx->charInLock); // already at irq level
1165 if (nGot < INBUF_SZ) {
1166 pdx->pCoherCharIn[nGot] = 0; // tidy the string
1167 dev_dbg(&pdx->interface->dev,
1168 "%s got %d chars >%s<",
1172 // We know that whatever we read must fit in the input buffer
1173 for (i = 0; i < nGot; i++) {
1174 pdx->inputBuffer[pdx->dwInBuffPut++] =
1175 pdx->pCoherCharIn[i] & 0x7F;
1176 if (pdx->dwInBuffPut >= INBUF_SZ)
1177 pdx->dwInBuffPut = 0;
1180 if ((pdx->dwNumInput + nGot) <= INBUF_SZ)
1181 pdx->dwNumInput += nGot; // Adjust the buffer count accordingly
1183 dev_dbg(&pdx->interface->dev, "%s read ZLP",
1188 pdx->bReadCharsPending = false; // No longer have a pending read
1189 spin_unlock(&pdx->charInLock); // already at irq level
1191 Allowi(pdx, true); // see if we can do the next one
1194 /****************************************************************************
1197 ** This is used to make sure that there is always a pending input transfer so
1198 ** we can pick up any inward transfers. This can be called in multiple contexts
1199 ** so we use the irqsave version of the spinlock.
1200 ****************************************************************************/
1201 int Allowi(DEVICE_EXTENSION * pdx, bool bInCallback)
1203 int iReturn = U14ERR_NOERROR;
1204 unsigned long flags;
1205 spin_lock_irqsave(&pdx->charInLock, flags); // can be called in multiple contexts
1207 // We don't want char input running while DMA is in progress as we know that this
1208 // can cause sequencing problems for the 2270. So don't. It will also allow the
1209 // ERR response to get back to the host code too early on some PCs, even if there
1210 // is no actual driver failure, so we don't allow this at all.
1211 if (!pdx->bInDrawDown && // stop input if
1212 !pdx->bReadCharsPending && // If no read request outstanding
1213 (pdx->dwNumInput < (INBUF_SZ / 2)) && // and there is some space
1214 (pdx->dwDMAFlag == MODE_CHAR) && // not doing any DMA
1215 (!pdx->bXFerWaiting) && // no xfer waiting to start
1216 (CanAcceptIoRequests(pdx))) // and activity is generally OK
1218 unsigned int nMax = INBUF_SZ - pdx->dwNumInput; // max we could read
1219 int nPipe = pdx->nPipes == 4 ? 1 : 0; // The pipe number to use
1221 dev_dbg(&pdx->interface->dev, "%s %d chars in input buffer",
1222 __func__, pdx->dwNumInput);
1224 usb_fill_int_urb(pdx->pUrbCharIn, pdx->udev,
1225 usb_rcvintpipe(pdx->udev, pdx->epAddr[nPipe]),
1226 pdx->pCoherCharIn, nMax, ced_readchar_callback,
1227 pdx, pdx->bInterval);
1228 pdx->pUrbCharIn->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; // short xfers are OK by default
1229 usb_anchor_urb(pdx->pUrbCharIn, &pdx->submitted); // in case we need to kill it
1231 usb_submit_urb(pdx->pUrbCharIn,
1232 bInCallback ? GFP_ATOMIC : GFP_KERNEL);
1234 usb_unanchor_urb(pdx->pUrbCharIn); // remove from list of active Urbs
1235 pdx->bPipeError[nPipe] = 1; // Flag an error to be handled later
1236 dev_err(&pdx->interface->dev,
1237 "%s submit urb failed: %d", __func__, iReturn);
1239 pdx->bReadCharsPending = true; // Flag that we are active here
1242 spin_unlock_irqrestore(&pdx->charInLock, flags);
1248 /*****************************************************************************
1249 ** The ioctl entry point to the driver that is used by us to talk to it.
1250 ** inode The device node (no longer in 3.0.0 kernels)
1251 ** file The file that is open, which holds our pdx pointer
1252 ** ulArg The argument passed in. Note that long is 64-bits in 64-bit system, i.e. it is big
1253 ** enough for a 64-bit pointer.
1254 *****************************************************************************/
1255 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
1256 static long ced_ioctl(struct file *file, unsigned int cmd, unsigned long ulArg)
1258 static int ced_ioctl(struct inode *node, struct file *file, unsigned int cmd,
1259 unsigned long ulArg)
1263 DEVICE_EXTENSION *pdx = file->private_data;
1264 if (!CanAcceptIoRequests(pdx)) // check we still exist
1267 // Check that access is allowed, where is is needed. Anything that would have an indeterminate
1268 // size will be checked by the specific command.
1269 if (_IOC_DIR(cmd) & _IOC_READ) // read from point of view of user...
1270 err = !access_ok(VERIFY_WRITE, (void __user *)ulArg, _IOC_SIZE(cmd)); // is kernel write
1271 else if (_IOC_DIR(cmd) & _IOC_WRITE) // and write from point of view of user...
1272 err = !access_ok(VERIFY_READ, (void __user *)ulArg, _IOC_SIZE(cmd)); // is kernel read
1276 switch (_IOC_NR(cmd)) {
1277 case _IOC_NR(IOCTL_CED_SENDSTRING(0)):
1278 return SendString(pdx, (const char __user *)ulArg,
1281 case _IOC_NR(IOCTL_CED_RESET1401):
1282 return Reset1401(pdx);
1284 case _IOC_NR(IOCTL_CED_GETCHAR):
1285 return GetChar(pdx);
1287 case _IOC_NR(IOCTL_CED_SENDCHAR):
1288 return SendChar(pdx, (char)ulArg);
1290 case _IOC_NR(IOCTL_CED_STAT1401):
1291 return Stat1401(pdx);
1293 case _IOC_NR(IOCTL_CED_LINECOUNT):
1294 return LineCount(pdx);
1296 case _IOC_NR(IOCTL_CED_GETSTRING(0)):
1297 return GetString(pdx, (char __user *)ulArg, _IOC_SIZE(cmd));
1299 case _IOC_NR(IOCTL_CED_SETTRANSFER):
1300 return SetTransfer(pdx, (TRANSFERDESC __user *) ulArg);
1302 case _IOC_NR(IOCTL_CED_UNSETTRANSFER):
1303 return UnsetTransfer(pdx, (int)ulArg);
1305 case _IOC_NR(IOCTL_CED_SETEVENT):
1306 return SetEvent(pdx, (TRANSFEREVENT __user *) ulArg);
1308 case _IOC_NR(IOCTL_CED_GETOUTBUFSPACE):
1309 return GetOutBufSpace(pdx);
1311 case _IOC_NR(IOCTL_CED_GETBASEADDRESS):
1314 case _IOC_NR(IOCTL_CED_GETDRIVERREVISION):
1315 return (2 << 24) | (DRIVERMAJREV << 16) | DRIVERMINREV; // USB | MAJOR | MINOR
1317 case _IOC_NR(IOCTL_CED_GETTRANSFER):
1318 return GetTransfer(pdx, (TGET_TX_BLOCK __user *) ulArg);
1320 case _IOC_NR(IOCTL_CED_KILLIO1401):
1321 return KillIO1401(pdx);
1323 case _IOC_NR(IOCTL_CED_STATEOF1401):
1324 return StateOf1401(pdx);
1326 case _IOC_NR(IOCTL_CED_GRAB1401):
1327 case _IOC_NR(IOCTL_CED_FREE1401):
1328 return U14ERR_NOERROR;
1330 case _IOC_NR(IOCTL_CED_STARTSELFTEST):
1331 return StartSelfTest(pdx);
1333 case _IOC_NR(IOCTL_CED_CHECKSELFTEST):
1334 return CheckSelfTest(pdx, (TGET_SELFTEST __user *) ulArg);
1336 case _IOC_NR(IOCTL_CED_TYPEOF1401):
1337 return TypeOf1401(pdx);
1339 case _IOC_NR(IOCTL_CED_TRANSFERFLAGS):
1340 return TransferFlags(pdx);
1342 case _IOC_NR(IOCTL_CED_DBGPEEK):
1343 return DbgPeek(pdx, (TDBGBLOCK __user *) ulArg);
1345 case _IOC_NR(IOCTL_CED_DBGPOKE):
1346 return DbgPoke(pdx, (TDBGBLOCK __user *) ulArg);
1348 case _IOC_NR(IOCTL_CED_DBGRAMPDATA):
1349 return DbgRampData(pdx, (TDBGBLOCK __user *) ulArg);
1351 case _IOC_NR(IOCTL_CED_DBGRAMPADDR):
1352 return DbgRampAddr(pdx, (TDBGBLOCK __user *) ulArg);
1354 case _IOC_NR(IOCTL_CED_DBGGETDATA):
1355 return DbgGetData(pdx, (TDBGBLOCK __user *) ulArg);
1357 case _IOC_NR(IOCTL_CED_DBGSTOPLOOP):
1358 return DbgStopLoop(pdx);
1360 case _IOC_NR(IOCTL_CED_FULLRESET):
1361 pdx->bForceReset = true; // Set a flag for a full reset
1364 case _IOC_NR(IOCTL_CED_SETCIRCULAR):
1365 return SetCircular(pdx, (TRANSFERDESC __user *) ulArg);
1367 case _IOC_NR(IOCTL_CED_GETCIRCBLOCK):
1368 return GetCircBlock(pdx, (TCIRCBLOCK __user *) ulArg);
1370 case _IOC_NR(IOCTL_CED_FREECIRCBLOCK):
1371 return FreeCircBlock(pdx, (TCIRCBLOCK __user *) ulArg);
1373 case _IOC_NR(IOCTL_CED_WAITEVENT):
1374 return WaitEvent(pdx, (int)(ulArg & 0xff), (int)(ulArg >> 8));
1376 case _IOC_NR(IOCTL_CED_TESTEVENT):
1377 return TestEvent(pdx, (int)ulArg);
1380 return U14ERR_NO_SUCH_FN;
1382 return U14ERR_NOERROR;
1385 static const struct file_operations ced_fops = {
1386 .owner = THIS_MODULE,
1388 .release = ced_release,
1390 .llseek = noop_llseek,
1391 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
1392 .unlocked_ioctl = ced_ioctl,
1399 * usb class driver info in order to get a minor number from the usb core,
1400 * and to have the device registered with the driver core
1402 static struct usb_class_driver ced_class = {
1405 .minor_base = USB_CED_MINOR_BASE,
1408 // Check that the device that matches a 1401 vendor and product ID is OK to use and
1409 // initialise our DEVICE_EXTENSION.
1410 static int ced_probe(struct usb_interface *interface,
1411 const struct usb_device_id *id)
1413 DEVICE_EXTENSION *pdx;
1414 struct usb_host_interface *iface_desc;
1415 struct usb_endpoint_descriptor *endpoint;
1417 int retval = -ENOMEM;
1419 // allocate memory for our device extension and initialize it
1420 pdx = kzalloc(sizeof(*pdx), GFP_KERNEL);
1422 dev_err(&interface->dev, "Out of memory\n");
1426 for (i = 0; i < MAX_TRANSAREAS; ++i) // Initialise the wait queues
1428 init_waitqueue_head(&pdx->rTransDef[i].wqEvent);
1431 // Put initialises for our stuff here. Note that all of *pdx is zero, so
1432 // no need to explicitly zero it.
1433 spin_lock_init(&pdx->charOutLock);
1434 spin_lock_init(&pdx->charInLock);
1435 spin_lock_init(&pdx->stagedLock);
1437 // Initialises from the skeleton stuff
1438 kref_init(&pdx->kref);
1439 mutex_init(&pdx->io_mutex);
1440 spin_lock_init(&pdx->err_lock);
1441 init_usb_anchor(&pdx->submitted);
1443 pdx->udev = usb_get_dev(interface_to_usbdev(interface));
1444 pdx->interface = interface;
1446 // Attempt to identify the device
1447 bcdDevice = pdx->udev->descriptor.bcdDevice;
1448 i = (bcdDevice >> 8);
1450 pdx->s1401Type = TYPEU1401;
1451 else if ((i >= 1) && (i <= 23))
1452 pdx->s1401Type = i + 2;
1454 dev_err(&interface->dev, "%s Unknown device. bcdDevice = %d",
1455 __func__, bcdDevice);
1458 // set up the endpoint information. We only care about the number of EP as
1459 // we know that we are dealing with a 1401 device.
1460 iface_desc = interface->cur_altsetting;
1461 pdx->nPipes = iface_desc->desc.bNumEndpoints;
1462 dev_info(&interface->dev, "1401Type=%d with %d End Points",
1463 pdx->s1401Type, pdx->nPipes);
1464 if ((pdx->nPipes < 3) || (pdx->nPipes > 4))
1467 // Allocate the URBs we hold for performing transfers
1468 pdx->pUrbCharOut = usb_alloc_urb(0, GFP_KERNEL); // character output URB
1469 pdx->pUrbCharIn = usb_alloc_urb(0, GFP_KERNEL); // character input URB
1470 pdx->pStagedUrb = usb_alloc_urb(0, GFP_KERNEL); // block transfer URB
1471 if (!pdx->pUrbCharOut || !pdx->pUrbCharIn || !pdx->pStagedUrb) {
1472 dev_err(&interface->dev, "%s URB alloc failed", __func__);
1476 pdx->pCoherStagedIO =
1477 usb_alloc_coherent(pdx->udev, STAGED_SZ, GFP_KERNEL,
1478 &pdx->pStagedUrb->transfer_dma);
1479 pdx->pCoherCharOut =
1480 usb_alloc_coherent(pdx->udev, OUTBUF_SZ, GFP_KERNEL,
1481 &pdx->pUrbCharOut->transfer_dma);
1483 usb_alloc_coherent(pdx->udev, INBUF_SZ, GFP_KERNEL,
1484 &pdx->pUrbCharIn->transfer_dma);
1485 if (!pdx->pCoherCharOut || !pdx->pCoherCharIn || !pdx->pCoherStagedIO) {
1486 dev_err(&interface->dev, "%s Coherent buffer alloc failed",
1491 for (i = 0; i < pdx->nPipes; ++i) {
1492 endpoint = &iface_desc->endpoint[i].desc;
1493 pdx->epAddr[i] = endpoint->bEndpointAddress;
1494 dev_info(&interface->dev, "Pipe %d, ep address %02x", i,
1496 if (((pdx->nPipes == 3) && (i == 0)) || // if char input end point
1497 ((pdx->nPipes == 4) && (i == 1))) {
1498 pdx->bInterval = endpoint->bInterval; // save the endpoint interrupt interval
1499 dev_info(&interface->dev, "Pipe %d, bInterval = %d", i,
1502 // Detect USB2 by checking last ep size (64 if USB1)
1503 if (i == pdx->nPipes - 1) // if this is the last ep (bulk)
1506 le16_to_cpu(endpoint->wMaxPacketSize) > 64;
1507 dev_info(&pdx->interface->dev, "USB%d",
1512 /* save our data pointer in this interface device */
1513 usb_set_intfdata(interface, pdx);
1515 /* we can register the device now, as it is ready */
1516 retval = usb_register_dev(interface, &ced_class);
1518 /* something prevented us from registering this driver */
1519 dev_err(&interface->dev,
1520 "Not able to get a minor for this device.\n");
1521 usb_set_intfdata(interface, NULL);
1525 /* let the user know what node this device is now attached to */
1526 dev_info(&interface->dev,
1527 "USB CEDUSB device now attached to cedusb #%d",
1533 kref_put(&pdx->kref, ced_delete); // frees allocated memory
1537 static void ced_disconnect(struct usb_interface *interface)
1539 DEVICE_EXTENSION *pdx = usb_get_intfdata(interface);
1540 int minor = interface->minor; // save for message at the end
1543 usb_set_intfdata(interface, NULL); // remove the pdx from the interface
1544 usb_deregister_dev(interface, &ced_class); // give back our minor device number
1546 mutex_lock(&pdx->io_mutex); // stop more I/O starting while...
1547 ced_draw_down(pdx); // ...wait for then kill any io
1548 for (i = 0; i < MAX_TRANSAREAS; ++i) {
1549 int iErr = ClearArea(pdx, i); // ...release any used memory
1550 if (iErr == U14ERR_UNLOCKFAIL)
1551 dev_err(&pdx->interface->dev, "%s Area %d was in used",
1554 pdx->interface = NULL; // ...we kill off link to interface
1555 mutex_unlock(&pdx->io_mutex);
1557 usb_kill_anchored_urbs(&pdx->submitted);
1559 kref_put(&pdx->kref, ced_delete); // decrement our usage count
1561 dev_info(&interface->dev, "USB cedusb #%d now disconnected", minor);
1564 // Wait for all the urbs we know of to be done with, then kill off any that
1565 // are left. NBNB we will need to have a mechanism to stop circular xfers
1566 // from trying to fire off more urbs. We will wait up to 3 seconds for Urbs
1568 void ced_draw_down(DEVICE_EXTENSION * pdx)
1571 dev_dbg(&pdx->interface->dev, "%s called", __func__);
1573 pdx->bInDrawDown = true;
1574 time = usb_wait_anchor_empty_timeout(&pdx->submitted, 3000);
1575 if (!time) // if we timed out we kill the urbs
1577 usb_kill_anchored_urbs(&pdx->submitted);
1578 dev_err(&pdx->interface->dev, "%s timed out", __func__);
1580 pdx->bInDrawDown = false;
1583 static int ced_suspend(struct usb_interface *intf, pm_message_t message)
1585 DEVICE_EXTENSION *pdx = usb_get_intfdata(intf);
1590 dev_dbg(&pdx->interface->dev, "%s called", __func__);
1594 static int ced_resume(struct usb_interface *intf)
1596 DEVICE_EXTENSION *pdx = usb_get_intfdata(intf);
1599 dev_dbg(&pdx->interface->dev, "%s called", __func__);
1603 static int ced_pre_reset(struct usb_interface *intf)
1605 DEVICE_EXTENSION *pdx = usb_get_intfdata(intf);
1606 dev_dbg(&pdx->interface->dev, "%s", __func__);
1607 mutex_lock(&pdx->io_mutex);
1612 static int ced_post_reset(struct usb_interface *intf)
1614 DEVICE_EXTENSION *pdx = usb_get_intfdata(intf);
1615 dev_dbg(&pdx->interface->dev, "%s", __func__);
1617 /* we are sure no URBs are active - no locking needed */
1618 pdx->errors = -EPIPE;
1619 mutex_unlock(&pdx->io_mutex);
1624 static struct usb_driver ced_driver = {
1627 .disconnect = ced_disconnect,
1628 .suspend = ced_suspend,
1629 .resume = ced_resume,
1630 .pre_reset = ced_pre_reset,
1631 .post_reset = ced_post_reset,
1632 .id_table = ced_table,
1633 .supports_autosuspend = 1,
1636 module_usb_driver(ced_driver);
1637 MODULE_LICENSE("GPL");