2 libata-core.c - helper library for ATA
4 Copyright 2003-2004 Red Hat, Inc. All rights reserved.
5 Copyright 2003-2004 Jeff Garzik
7 The contents of this file are subject to the Open
8 Software License version 1.1 that can be found at
9 http://www.opensource.org/licenses/osl-1.1.txt and is included herein
12 Alternatively, the contents of this file may be used under the terms
13 of the GNU General Public License version 2 (the "GPL") as distributed
14 in the kernel source COPYING file, in which case the provisions of
15 the GPL are applicable instead of the above. If you wish to allow
16 the use of your version of this file only under the terms of the
17 GPL and not to allow others to use your version of this file under
18 the OSL, indicate your decision by deleting the provisions above and
19 replace them with the notice and other provisions required by the GPL.
20 If you do not delete the provisions above, a recipient may use your
21 version of this file under either the OSL or the GPL.
25 #include <linux/config.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/list.h>
32 #include <linux/highmem.h>
33 #include <linux/spinlock.h>
34 #include <linux/blkdev.h>
35 #include <linux/delay.h>
36 #include <linux/timer.h>
37 #include <linux/interrupt.h>
38 #include <linux/completion.h>
39 #include <linux/suspend.h>
40 #include <linux/workqueue.h>
41 #include <scsi/scsi.h>
43 #include "scsi_priv.h"
44 #include <scsi/scsi_host.h>
45 #include <linux/libata.h>
47 #include <asm/semaphore.h>
48 #include <asm/byteorder.h>
52 static unsigned int ata_busy_sleep (struct ata_port *ap,
53 unsigned long tmout_pat,
55 static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
56 static void ata_set_mode(struct ata_port *ap);
57 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
58 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift);
59 static int fgb(u32 bitmap);
60 static int ata_choose_xfer_mode(struct ata_port *ap,
62 unsigned int *xfer_shift_out);
63 static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
64 static void __ata_qc_complete(struct ata_queued_cmd *qc);
66 static unsigned int ata_unique_id = 1;
67 static struct workqueue_struct *ata_wq;
69 MODULE_AUTHOR("Jeff Garzik");
70 MODULE_DESCRIPTION("Library module for ATA devices");
71 MODULE_LICENSE("GPL");
72 MODULE_VERSION(DRV_VERSION);
75 * ata_tf_load - send taskfile registers to host controller
76 * @ap: Port to which output is sent
77 * @tf: ATA taskfile register set
79 * Outputs ATA taskfile to standard ATA host controller.
82 * Inherited from caller.
85 static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf)
87 struct ata_ioports *ioaddr = &ap->ioaddr;
88 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
90 if (tf->ctl != ap->last_ctl) {
91 outb(tf->ctl, ioaddr->ctl_addr);
92 ap->last_ctl = tf->ctl;
96 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
97 outb(tf->hob_feature, ioaddr->feature_addr);
98 outb(tf->hob_nsect, ioaddr->nsect_addr);
99 outb(tf->hob_lbal, ioaddr->lbal_addr);
100 outb(tf->hob_lbam, ioaddr->lbam_addr);
101 outb(tf->hob_lbah, ioaddr->lbah_addr);
102 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
111 outb(tf->feature, ioaddr->feature_addr);
112 outb(tf->nsect, ioaddr->nsect_addr);
113 outb(tf->lbal, ioaddr->lbal_addr);
114 outb(tf->lbam, ioaddr->lbam_addr);
115 outb(tf->lbah, ioaddr->lbah_addr);
116 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
124 if (tf->flags & ATA_TFLAG_DEVICE) {
125 outb(tf->device, ioaddr->device_addr);
126 VPRINTK("device 0x%X\n", tf->device);
133 * ata_tf_load_mmio - send taskfile registers to host controller
134 * @ap: Port to which output is sent
135 * @tf: ATA taskfile register set
137 * Outputs ATA taskfile to standard ATA host controller using MMIO.
140 * Inherited from caller.
143 static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
145 struct ata_ioports *ioaddr = &ap->ioaddr;
146 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
148 if (tf->ctl != ap->last_ctl) {
149 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
150 ap->last_ctl = tf->ctl;
154 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
155 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
156 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
157 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
158 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
159 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
160 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
169 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
170 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
171 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
172 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
173 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
174 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
182 if (tf->flags & ATA_TFLAG_DEVICE) {
183 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
184 VPRINTK("device 0x%X\n", tf->device);
190 void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
192 if (ap->flags & ATA_FLAG_MMIO)
193 ata_tf_load_mmio(ap, tf);
195 ata_tf_load_pio(ap, tf);
199 * ata_exec_command - issue ATA command to host controller
200 * @ap: port to which command is being issued
201 * @tf: ATA taskfile register set
203 * Issues PIO/MMIO write to ATA command register, with proper
204 * synchronization with interrupt handler / other threads.
207 * spin_lock_irqsave(host_set lock)
210 static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf)
212 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
214 outb(tf->command, ap->ioaddr.command_addr);
220 * ata_exec_command_mmio - issue ATA command to host controller
221 * @ap: port to which command is being issued
222 * @tf: ATA taskfile register set
224 * Issues MMIO write to ATA command register, with proper
225 * synchronization with interrupt handler / other threads.
228 * spin_lock_irqsave(host_set lock)
231 static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
233 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
235 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
239 void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf)
241 if (ap->flags & ATA_FLAG_MMIO)
242 ata_exec_command_mmio(ap, tf);
244 ata_exec_command_pio(ap, tf);
248 * ata_exec - issue ATA command to host controller
249 * @ap: port to which command is being issued
250 * @tf: ATA taskfile register set
252 * Issues PIO/MMIO write to ATA command register, with proper
253 * synchronization with interrupt handler / other threads.
256 * Obtains host_set lock.
259 static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf)
263 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
264 spin_lock_irqsave(&ap->host_set->lock, flags);
265 ap->ops->exec_command(ap, tf);
266 spin_unlock_irqrestore(&ap->host_set->lock, flags);
270 * ata_tf_to_host - issue ATA taskfile to host controller
271 * @ap: port to which command is being issued
272 * @tf: ATA taskfile register set
274 * Issues ATA taskfile register set to ATA host controller,
275 * with proper synchronization with interrupt handler and
279 * Obtains host_set lock.
282 static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf)
284 ap->ops->tf_load(ap, tf);
290 * ata_tf_to_host_nolock - issue ATA taskfile to host controller
291 * @ap: port to which command is being issued
292 * @tf: ATA taskfile register set
294 * Issues ATA taskfile register set to ATA host controller,
295 * with proper synchronization with interrupt handler and
299 * spin_lock_irqsave(host_set lock)
302 void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf)
304 ap->ops->tf_load(ap, tf);
305 ap->ops->exec_command(ap, tf);
309 * ata_tf_read - input device's ATA taskfile shadow registers
310 * @ap: Port from which input is read
311 * @tf: ATA taskfile register set for storing input
313 * Reads ATA taskfile registers for currently-selected device
317 * Inherited from caller.
320 static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
322 struct ata_ioports *ioaddr = &ap->ioaddr;
324 tf->nsect = inb(ioaddr->nsect_addr);
325 tf->lbal = inb(ioaddr->lbal_addr);
326 tf->lbam = inb(ioaddr->lbam_addr);
327 tf->lbah = inb(ioaddr->lbah_addr);
328 tf->device = inb(ioaddr->device_addr);
330 if (tf->flags & ATA_TFLAG_LBA48) {
331 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
332 tf->hob_feature = inb(ioaddr->error_addr);
333 tf->hob_nsect = inb(ioaddr->nsect_addr);
334 tf->hob_lbal = inb(ioaddr->lbal_addr);
335 tf->hob_lbam = inb(ioaddr->lbam_addr);
336 tf->hob_lbah = inb(ioaddr->lbah_addr);
341 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
342 * @ap: Port from which input is read
343 * @tf: ATA taskfile register set for storing input
345 * Reads ATA taskfile registers for currently-selected device
349 * Inherited from caller.
352 static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
354 struct ata_ioports *ioaddr = &ap->ioaddr;
356 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
357 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
358 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
359 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
360 tf->device = readb((void __iomem *)ioaddr->device_addr);
362 if (tf->flags & ATA_TFLAG_LBA48) {
363 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
364 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
365 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
366 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
367 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
368 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
372 void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
374 if (ap->flags & ATA_FLAG_MMIO)
375 ata_tf_read_mmio(ap, tf);
377 ata_tf_read_pio(ap, tf);
381 * ata_check_status_pio - Read device status reg & clear interrupt
382 * @ap: port where the device is
384 * Reads ATA taskfile status register for currently-selected device
385 * and return it's value. This also clears pending interrupts
389 * Inherited from caller.
391 static u8 ata_check_status_pio(struct ata_port *ap)
393 return inb(ap->ioaddr.status_addr);
397 * ata_check_status_mmio - Read device status reg & clear interrupt
398 * @ap: port where the device is
400 * Reads ATA taskfile status register for currently-selected device
401 * via MMIO and return it's value. This also clears pending interrupts
405 * Inherited from caller.
407 static u8 ata_check_status_mmio(struct ata_port *ap)
409 return readb((void __iomem *) ap->ioaddr.status_addr);
412 u8 ata_check_status(struct ata_port *ap)
414 if (ap->flags & ATA_FLAG_MMIO)
415 return ata_check_status_mmio(ap);
416 return ata_check_status_pio(ap);
419 u8 ata_altstatus(struct ata_port *ap)
421 if (ap->ops->check_altstatus)
422 return ap->ops->check_altstatus(ap);
424 if (ap->flags & ATA_FLAG_MMIO)
425 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
426 return inb(ap->ioaddr.altstatus_addr);
429 u8 ata_chk_err(struct ata_port *ap)
431 if (ap->ops->check_err)
432 return ap->ops->check_err(ap);
434 if (ap->flags & ATA_FLAG_MMIO) {
435 return readb((void __iomem *) ap->ioaddr.error_addr);
437 return inb(ap->ioaddr.error_addr);
441 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
442 * @tf: Taskfile to convert
443 * @fis: Buffer into which data will output
444 * @pmp: Port multiplier port
446 * Converts a standard ATA taskfile to a Serial ATA
447 * FIS structure (Register - Host to Device).
450 * Inherited from caller.
453 void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp)
455 fis[0] = 0x27; /* Register - Host to Device FIS */
456 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
457 bit 7 indicates Command FIS */
458 fis[2] = tf->command;
459 fis[3] = tf->feature;
466 fis[8] = tf->hob_lbal;
467 fis[9] = tf->hob_lbam;
468 fis[10] = tf->hob_lbah;
469 fis[11] = tf->hob_feature;
472 fis[13] = tf->hob_nsect;
483 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
484 * @fis: Buffer from which data will be input
485 * @tf: Taskfile to output
487 * Converts a standard ATA taskfile to a Serial ATA
488 * FIS structure (Register - Host to Device).
491 * Inherited from caller.
494 void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf)
496 tf->command = fis[2]; /* status */
497 tf->feature = fis[3]; /* error */
504 tf->hob_lbal = fis[8];
505 tf->hob_lbam = fis[9];
506 tf->hob_lbah = fis[10];
509 tf->hob_nsect = fis[13];
513 * ata_prot_to_cmd - determine which read/write opcodes to use
514 * @protocol: ATA_PROT_xxx taskfile protocol
515 * @lba48: true is lba48 is present
517 * Given necessary input, determine which read/write commands
518 * to use to transfer data.
523 static int ata_prot_to_cmd(int protocol, int lba48)
525 int rcmd = 0, wcmd = 0;
530 rcmd = ATA_CMD_PIO_READ_EXT;
531 wcmd = ATA_CMD_PIO_WRITE_EXT;
533 rcmd = ATA_CMD_PIO_READ;
534 wcmd = ATA_CMD_PIO_WRITE;
540 rcmd = ATA_CMD_READ_EXT;
541 wcmd = ATA_CMD_WRITE_EXT;
544 wcmd = ATA_CMD_WRITE;
552 return rcmd | (wcmd << 8);
556 * ata_dev_set_protocol - set taskfile protocol and r/w commands
557 * @dev: device to examine and configure
559 * Examine the device configuration, after we have
560 * read the identify-device page and configured the
561 * data transfer mode. Set internal state related to
562 * the ATA taskfile protocol (pio, pio mult, dma, etc.)
563 * and calculate the proper read/write commands to use.
568 static void ata_dev_set_protocol(struct ata_device *dev)
570 int pio = (dev->flags & ATA_DFLAG_PIO);
571 int lba48 = (dev->flags & ATA_DFLAG_LBA48);
575 proto = dev->xfer_protocol = ATA_PROT_PIO;
577 proto = dev->xfer_protocol = ATA_PROT_DMA;
579 cmd = ata_prot_to_cmd(proto, lba48);
583 dev->read_cmd = cmd & 0xff;
584 dev->write_cmd = (cmd >> 8) & 0xff;
587 static const char * xfer_mode_str[] = {
607 * ata_udma_string - convert UDMA bit offset to string
608 * @mask: mask of bits supported; only highest bit counts.
610 * Determine string which represents the highest speed
611 * (highest bit in @udma_mask).
617 * Constant C string representing highest speed listed in
618 * @udma_mask, or the constant C string "<n/a>".
621 static const char *ata_mode_string(unsigned int mask)
625 for (i = 7; i >= 0; i--)
628 for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--)
631 for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--)
638 return xfer_mode_str[i];
642 * ata_pio_devchk - PATA device presence detection
643 * @ap: ATA channel to examine
644 * @device: Device to examine (starting at zero)
646 * This technique was originally described in
647 * Hale Landis's ATADRVR (www.ata-atapi.com), and
648 * later found its way into the ATA/ATAPI spec.
650 * Write a pattern to the ATA shadow registers,
651 * and if a device is present, it will respond by
652 * correctly storing and echoing back the
653 * ATA shadow register contents.
659 static unsigned int ata_pio_devchk(struct ata_port *ap,
662 struct ata_ioports *ioaddr = &ap->ioaddr;
665 ap->ops->dev_select(ap, device);
667 outb(0x55, ioaddr->nsect_addr);
668 outb(0xaa, ioaddr->lbal_addr);
670 outb(0xaa, ioaddr->nsect_addr);
671 outb(0x55, ioaddr->lbal_addr);
673 outb(0x55, ioaddr->nsect_addr);
674 outb(0xaa, ioaddr->lbal_addr);
676 nsect = inb(ioaddr->nsect_addr);
677 lbal = inb(ioaddr->lbal_addr);
679 if ((nsect == 0x55) && (lbal == 0xaa))
680 return 1; /* we found a device */
682 return 0; /* nothing found */
686 * ata_mmio_devchk - PATA device presence detection
687 * @ap: ATA channel to examine
688 * @device: Device to examine (starting at zero)
690 * This technique was originally described in
691 * Hale Landis's ATADRVR (www.ata-atapi.com), and
692 * later found its way into the ATA/ATAPI spec.
694 * Write a pattern to the ATA shadow registers,
695 * and if a device is present, it will respond by
696 * correctly storing and echoing back the
697 * ATA shadow register contents.
703 static unsigned int ata_mmio_devchk(struct ata_port *ap,
706 struct ata_ioports *ioaddr = &ap->ioaddr;
709 ap->ops->dev_select(ap, device);
711 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
712 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
714 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
715 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
717 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
718 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
720 nsect = readb((void __iomem *) ioaddr->nsect_addr);
721 lbal = readb((void __iomem *) ioaddr->lbal_addr);
723 if ((nsect == 0x55) && (lbal == 0xaa))
724 return 1; /* we found a device */
726 return 0; /* nothing found */
730 * ata_devchk - PATA device presence detection
731 * @ap: ATA channel to examine
732 * @device: Device to examine (starting at zero)
734 * Dispatch ATA device presence detection, depending
735 * on whether we are using PIO or MMIO to talk to the
736 * ATA shadow registers.
742 static unsigned int ata_devchk(struct ata_port *ap,
745 if (ap->flags & ATA_FLAG_MMIO)
746 return ata_mmio_devchk(ap, device);
747 return ata_pio_devchk(ap, device);
751 * ata_dev_classify - determine device type based on ATA-spec signature
752 * @tf: ATA taskfile register set for device to be identified
754 * Determine from taskfile register contents whether a device is
755 * ATA or ATAPI, as per "Signature and persistence" section
756 * of ATA/PI spec (volume 1, sect 5.14).
762 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
763 * the event of failure.
766 unsigned int ata_dev_classify(struct ata_taskfile *tf)
768 /* Apple's open source Darwin code hints that some devices only
769 * put a proper signature into the LBA mid/high registers,
770 * So, we only check those. It's sufficient for uniqueness.
773 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
774 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
775 DPRINTK("found ATA device by sig\n");
779 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
780 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
781 DPRINTK("found ATAPI device by sig\n");
782 return ATA_DEV_ATAPI;
785 DPRINTK("unknown device\n");
786 return ATA_DEV_UNKNOWN;
790 * ata_dev_try_classify - Parse returned ATA device signature
791 * @ap: ATA channel to examine
792 * @device: Device to examine (starting at zero)
794 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
795 * an ATA/ATAPI-defined set of values is placed in the ATA
796 * shadow registers, indicating the results of device detection
799 * Select the ATA device, and read the values from the ATA shadow
800 * registers. Then parse according to the Error register value,
801 * and the spec-defined values examined by ata_dev_classify().
807 static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
809 struct ata_device *dev = &ap->device[device];
810 struct ata_taskfile tf;
814 ap->ops->dev_select(ap, device);
816 memset(&tf, 0, sizeof(tf));
818 err = ata_chk_err(ap);
819 ap->ops->tf_read(ap, &tf);
821 dev->class = ATA_DEV_NONE;
823 /* see if device passed diags */
826 else if ((device == 0) && (err == 0x81))
831 /* determine if device if ATA or ATAPI */
832 class = ata_dev_classify(&tf);
833 if (class == ATA_DEV_UNKNOWN)
835 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
844 * ata_dev_id_string - Convert IDENTIFY DEVICE page into string
845 * @id: IDENTIFY DEVICE results we will examine
846 * @s: string into which data is output
847 * @ofs: offset into identify device page
848 * @len: length of string to return. must be an even number.
850 * The strings in the IDENTIFY DEVICE page are broken up into
851 * 16-bit chunks. Run through the string, and output each
852 * 8-bit chunk linearly, regardless of platform.
858 void ata_dev_id_string(u16 *id, unsigned char *s,
859 unsigned int ofs, unsigned int len)
877 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
882 * ata_std_dev_select - Select device 0/1 on ATA bus
883 * @ap: ATA channel to manipulate
884 * @device: ATA device (numbered from zero) to select
886 * Use the method defined in the ATA specification to
887 * make either device 0, or device 1, active on the
894 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
899 tmp = ATA_DEVICE_OBS;
901 tmp = ATA_DEVICE_OBS | ATA_DEV1;
903 if (ap->flags & ATA_FLAG_MMIO) {
904 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
906 outb(tmp, ap->ioaddr.device_addr);
908 ata_pause(ap); /* needed; also flushes, for mmio */
912 * ata_dev_select - Select device 0/1 on ATA bus
913 * @ap: ATA channel to manipulate
914 * @device: ATA device (numbered from zero) to select
915 * @wait: non-zero to wait for Status register BSY bit to clear
916 * @can_sleep: non-zero if context allows sleeping
918 * Use the method defined in the ATA specification to
919 * make either device 0, or device 1, active on the
922 * This is a high-level version of ata_std_dev_select(),
923 * which additionally provides the services of inserting
924 * the proper pauses and status polling, where needed.
930 void ata_dev_select(struct ata_port *ap, unsigned int device,
931 unsigned int wait, unsigned int can_sleep)
933 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
934 ap->id, device, wait);
939 ap->ops->dev_select(ap, device);
942 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
949 * ata_dump_id - IDENTIFY DEVICE info debugging output
950 * @dev: Device whose IDENTIFY DEVICE page we will dump
952 * Dump selected 16-bit words from a detected device's
953 * IDENTIFY PAGE page.
959 static inline void ata_dump_id(struct ata_device *dev)
961 DPRINTK("49==0x%04x "
971 DPRINTK("80==0x%04x "
981 DPRINTK("88==0x%04x "
988 * ata_dev_identify - obtain IDENTIFY x DEVICE page
989 * @ap: port on which device we wish to probe resides
990 * @device: device bus address, starting at zero
992 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE
993 * command, and read back the 512-byte device information page.
994 * The device information page is fed to us via the standard
995 * PIO-IN protocol, but we hand-code it here. (TODO: investigate
996 * using standard PIO-IN paths)
998 * After reading the device information page, we use several
999 * bits of information from it to initialize data structures
1000 * that will be used during the lifetime of the ata_device.
1001 * Other data from the info page is used to disqualify certain
1002 * older ATA devices we do not wish to support.
1005 * Inherited from caller. Some functions called by this function
1006 * obtain the host_set lock.
1009 static void ata_dev_identify(struct ata_port *ap, unsigned int device)
1011 struct ata_device *dev = &ap->device[device];
1012 unsigned int major_version;
1014 unsigned long xfer_modes;
1016 unsigned int using_edd;
1017 DECLARE_COMPLETION(wait);
1018 struct ata_queued_cmd *qc;
1019 unsigned long flags;
1022 if (!ata_dev_present(dev)) {
1023 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1028 if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1033 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
1035 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
1036 dev->class == ATA_DEV_NONE);
1038 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */
1040 qc = ata_qc_new_init(ap, dev);
1043 ata_sg_init_one(qc, dev->id, sizeof(dev->id));
1044 qc->dma_dir = DMA_FROM_DEVICE;
1045 qc->tf.protocol = ATA_PROT_PIO;
1049 if (dev->class == ATA_DEV_ATA) {
1050 qc->tf.command = ATA_CMD_ID_ATA;
1051 DPRINTK("do ATA identify\n");
1053 qc->tf.command = ATA_CMD_ID_ATAPI;
1054 DPRINTK("do ATAPI identify\n");
1057 qc->waiting = &wait;
1058 qc->complete_fn = ata_qc_complete_noop;
1060 spin_lock_irqsave(&ap->host_set->lock, flags);
1061 rc = ata_qc_issue(qc);
1062 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1067 wait_for_completion(&wait);
1069 status = ata_chk_status(ap);
1070 if (status & ATA_ERR) {
1072 * arg! EDD works for all test cases, but seems to return
1073 * the ATA signature for some ATAPI devices. Until the
1074 * reason for this is found and fixed, we fix up the mess
1075 * here. If IDENTIFY DEVICE returns command aborted
1076 * (as ATAPI devices do), then we issue an
1077 * IDENTIFY PACKET DEVICE.
1079 * ATA software reset (SRST, the default) does not appear
1080 * to have this problem.
1082 if ((using_edd) && (qc->tf.command == ATA_CMD_ID_ATA)) {
1083 u8 err = ata_chk_err(ap);
1084 if (err & ATA_ABORTED) {
1085 dev->class = ATA_DEV_ATAPI;
1096 swap_buf_le16(dev->id, ATA_ID_WORDS);
1098 /* print device capabilities */
1099 printk(KERN_DEBUG "ata%u: dev %u cfg "
1100 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1101 ap->id, device, dev->id[49],
1102 dev->id[82], dev->id[83], dev->id[84],
1103 dev->id[85], dev->id[86], dev->id[87],
1107 * common ATA, ATAPI feature tests
1110 /* we require DMA support (bits 8 of word 49) */
1111 if (!ata_id_has_dma(dev->id)) {
1112 printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
1116 /* quick-n-dirty find max transfer mode; for printk only */
1117 xfer_modes = dev->id[ATA_ID_UDMA_MODES];
1119 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
1121 xfer_modes = (dev->id[ATA_ID_PIO_MODES]) << (ATA_SHIFT_PIO + 3);
1122 xfer_modes |= (0x7 << ATA_SHIFT_PIO);
1127 /* ATA-specific feature tests */
1128 if (dev->class == ATA_DEV_ATA) {
1129 if (!ata_id_is_ata(dev->id)) /* sanity check */
1132 /* get major version */
1133 tmp = dev->id[ATA_ID_MAJOR_VER];
1134 for (major_version = 14; major_version >= 1; major_version--)
1135 if (tmp & (1 << major_version))
1139 * The exact sequence expected by certain pre-ATA4 drives is:
1142 * INITIALIZE DEVICE PARAMETERS
1144 * Some drives were very specific about that exact sequence.
1146 if (major_version < 4 || (!ata_id_has_lba(dev->id)))
1147 ata_dev_init_params(ap, dev);
1149 if (ata_id_has_lba(dev->id)) {
1150 dev->flags |= ATA_DFLAG_LBA;
1152 if (ata_id_has_lba48(dev->id)) {
1153 dev->flags |= ATA_DFLAG_LBA48;
1154 dev->n_sectors = ata_id_u64(dev->id, 100);
1156 dev->n_sectors = ata_id_u32(dev->id, 60);
1159 /* print device info to dmesg */
1160 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n",
1163 ata_mode_string(xfer_modes),
1164 (unsigned long long)dev->n_sectors,
1165 dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA");
1169 /* Default translation */
1170 dev->cylinders = dev->id[1];
1171 dev->heads = dev->id[3];
1172 dev->sectors = dev->id[6];
1173 dev->n_sectors = dev->cylinders * dev->heads * dev->sectors;
1175 if (ata_id_current_chs_valid(dev->id)) {
1176 /* Current CHS translation is valid. */
1177 dev->cylinders = dev->id[54];
1178 dev->heads = dev->id[55];
1179 dev->sectors = dev->id[56];
1181 dev->n_sectors = ata_id_u32(dev->id, 57);
1184 /* print device info to dmesg */
1185 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n",
1188 ata_mode_string(xfer_modes),
1189 (unsigned long long)dev->n_sectors,
1190 (int)dev->cylinders, (int)dev->heads, (int)dev->sectors);
1194 ap->host->max_cmd_len = 16;
1197 /* ATAPI-specific feature tests */
1199 if (ata_id_is_ata(dev->id)) /* sanity check */
1202 rc = atapi_cdb_len(dev->id);
1203 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1204 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1207 ap->cdb_len = (unsigned int) rc;
1208 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1210 /* print device info to dmesg */
1211 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1213 ata_mode_string(xfer_modes));
1216 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1220 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1223 dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
1224 DPRINTK("EXIT, err\n");
1228 * ata_bus_probe - Reset and probe ATA bus
1234 * Zero on success, non-zero on error.
1237 static int ata_bus_probe(struct ata_port *ap)
1239 unsigned int i, found = 0;
1241 ap->ops->phy_reset(ap);
1242 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1245 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1246 ata_dev_identify(ap, i);
1247 if (ata_dev_present(&ap->device[i])) {
1249 if (ap->ops->dev_config)
1250 ap->ops->dev_config(ap, &ap->device[i]);
1254 if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1255 goto err_out_disable;
1258 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1259 goto err_out_disable;
1264 ap->ops->port_disable(ap);
1276 void ata_port_probe(struct ata_port *ap)
1278 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1282 * __sata_phy_reset -
1288 void __sata_phy_reset(struct ata_port *ap)
1291 unsigned long timeout = jiffies + (HZ * 5);
1293 if (ap->flags & ATA_FLAG_SATA_RESET) {
1294 scr_write(ap, SCR_CONTROL, 0x301); /* issue phy wake/reset */
1295 scr_read(ap, SCR_STATUS); /* dummy read; flush */
1296 udelay(400); /* FIXME: a guess */
1298 scr_write(ap, SCR_CONTROL, 0x300); /* issue phy wake/clear reset */
1300 /* wait for phy to become ready, if necessary */
1303 sstatus = scr_read(ap, SCR_STATUS);
1304 if ((sstatus & 0xf) != 1)
1306 } while (time_before(jiffies, timeout));
1308 /* TODO: phy layer with polling, timeouts, etc. */
1309 if (sata_dev_present(ap))
1312 sstatus = scr_read(ap, SCR_STATUS);
1313 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
1315 ata_port_disable(ap);
1318 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1321 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1322 ata_port_disable(ap);
1326 ap->cbl = ATA_CBL_SATA;
1330 * __sata_phy_reset -
1336 void sata_phy_reset(struct ata_port *ap)
1338 __sata_phy_reset(ap);
1339 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1345 * ata_port_disable -
1351 void ata_port_disable(struct ata_port *ap)
1353 ap->device[0].class = ATA_DEV_NONE;
1354 ap->device[1].class = ATA_DEV_NONE;
1355 ap->flags |= ATA_FLAG_PORT_DISABLED;
1361 } xfer_mode_classes[] = {
1362 { ATA_SHIFT_UDMA, XFER_UDMA_0 },
1363 { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 },
1364 { ATA_SHIFT_PIO, XFER_PIO_0 },
1367 static inline u8 base_from_shift(unsigned int shift)
1371 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++)
1372 if (xfer_mode_classes[i].shift == shift)
1373 return xfer_mode_classes[i].base;
1378 static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1383 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1386 if (dev->xfer_shift == ATA_SHIFT_PIO)
1387 dev->flags |= ATA_DFLAG_PIO;
1389 ata_dev_set_xfermode(ap, dev);
1391 base = base_from_shift(dev->xfer_shift);
1392 ofs = dev->xfer_mode - base;
1393 idx = ofs + dev->xfer_shift;
1394 WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str));
1396 DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n",
1397 idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs);
1399 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1400 ap->id, dev->devno, xfer_mode_str[idx]);
1403 static int ata_host_set_pio(struct ata_port *ap)
1409 mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO);
1412 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
1416 base = base_from_shift(ATA_SHIFT_PIO);
1417 xfer_mode = base + x;
1419 DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
1420 (int)base, (int)xfer_mode, mask, x);
1422 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1423 struct ata_device *dev = &ap->device[i];
1424 if (ata_dev_present(dev)) {
1425 dev->pio_mode = xfer_mode;
1426 dev->xfer_mode = xfer_mode;
1427 dev->xfer_shift = ATA_SHIFT_PIO;
1428 if (ap->ops->set_piomode)
1429 ap->ops->set_piomode(ap, dev);
1436 static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1437 unsigned int xfer_shift)
1441 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1442 struct ata_device *dev = &ap->device[i];
1443 if (ata_dev_present(dev)) {
1444 dev->dma_mode = xfer_mode;
1445 dev->xfer_mode = xfer_mode;
1446 dev->xfer_shift = xfer_shift;
1447 if (ap->ops->set_dmamode)
1448 ap->ops->set_dmamode(ap, dev);
1454 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1455 * @ap: port on which timings will be programmed
1460 static void ata_set_mode(struct ata_port *ap)
1462 unsigned int i, xfer_shift;
1466 /* step 1: always set host PIO timings */
1467 rc = ata_host_set_pio(ap);
1471 /* step 2: choose the best data xfer mode */
1472 xfer_mode = xfer_shift = 0;
1473 rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift);
1477 /* step 3: if that xfer mode isn't PIO, set host DMA timings */
1478 if (xfer_shift != ATA_SHIFT_PIO)
1479 ata_host_set_dma(ap, xfer_mode, xfer_shift);
1481 /* step 4: update devices' xfer mode */
1482 ata_dev_set_mode(ap, &ap->device[0]);
1483 ata_dev_set_mode(ap, &ap->device[1]);
1485 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1488 if (ap->ops->post_set_mode)
1489 ap->ops->post_set_mode(ap);
1491 for (i = 0; i < 2; i++) {
1492 struct ata_device *dev = &ap->device[i];
1493 ata_dev_set_protocol(dev);
1499 ata_port_disable(ap);
1503 * ata_busy_sleep - sleep until BSY clears, or timeout
1504 * @ap: port containing status register to be polled
1505 * @tmout_pat: impatience timeout
1506 * @tmout: overall timeout
1512 static unsigned int ata_busy_sleep (struct ata_port *ap,
1513 unsigned long tmout_pat,
1514 unsigned long tmout)
1516 unsigned long timer_start, timeout;
1519 status = ata_busy_wait(ap, ATA_BUSY, 300);
1520 timer_start = jiffies;
1521 timeout = timer_start + tmout_pat;
1522 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1524 status = ata_busy_wait(ap, ATA_BUSY, 3);
1527 if (status & ATA_BUSY)
1528 printk(KERN_WARNING "ata%u is slow to respond, "
1529 "please be patient\n", ap->id);
1531 timeout = timer_start + tmout;
1532 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1534 status = ata_chk_status(ap);
1537 if (status & ATA_BUSY) {
1538 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1539 ap->id, tmout / HZ);
1546 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1548 struct ata_ioports *ioaddr = &ap->ioaddr;
1549 unsigned int dev0 = devmask & (1 << 0);
1550 unsigned int dev1 = devmask & (1 << 1);
1551 unsigned long timeout;
1553 /* if device 0 was found in ata_devchk, wait for its
1557 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1559 /* if device 1 was found in ata_devchk, wait for
1560 * register access, then wait for BSY to clear
1562 timeout = jiffies + ATA_TMOUT_BOOT;
1566 ap->ops->dev_select(ap, 1);
1567 if (ap->flags & ATA_FLAG_MMIO) {
1568 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1569 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1571 nsect = inb(ioaddr->nsect_addr);
1572 lbal = inb(ioaddr->lbal_addr);
1574 if ((nsect == 1) && (lbal == 1))
1576 if (time_after(jiffies, timeout)) {
1580 msleep(50); /* give drive a breather */
1583 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1585 /* is all this really necessary? */
1586 ap->ops->dev_select(ap, 0);
1588 ap->ops->dev_select(ap, 1);
1590 ap->ops->dev_select(ap, 0);
1601 static unsigned int ata_bus_edd(struct ata_port *ap)
1603 struct ata_taskfile tf;
1605 /* set up execute-device-diag (bus reset) taskfile */
1606 /* also, take interrupts to a known state (disabled) */
1607 DPRINTK("execute-device-diag\n");
1608 ata_tf_init(ap, &tf, 0);
1610 tf.command = ATA_CMD_EDD;
1611 tf.protocol = ATA_PROT_NODATA;
1614 ata_tf_to_host(ap, &tf);
1616 /* spec says at least 2ms. but who knows with those
1617 * crazy ATAPI devices...
1621 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1624 static unsigned int ata_bus_softreset(struct ata_port *ap,
1625 unsigned int devmask)
1627 struct ata_ioports *ioaddr = &ap->ioaddr;
1629 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1631 /* software reset. causes dev0 to be selected */
1632 if (ap->flags & ATA_FLAG_MMIO) {
1633 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1634 udelay(20); /* FIXME: flush */
1635 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
1636 udelay(20); /* FIXME: flush */
1637 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1639 outb(ap->ctl, ioaddr->ctl_addr);
1641 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1643 outb(ap->ctl, ioaddr->ctl_addr);
1646 /* spec mandates ">= 2ms" before checking status.
1647 * We wait 150ms, because that was the magic delay used for
1648 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
1649 * between when the ATA command register is written, and then
1650 * status is checked. Because waiting for "a while" before
1651 * checking status is fine, post SRST, we perform this magic
1652 * delay here as well.
1656 ata_bus_post_reset(ap, devmask);
1662 * ata_bus_reset - reset host port and associated ATA channel
1663 * @ap: port to reset
1665 * This is typically the first time we actually start issuing
1666 * commands to the ATA channel. We wait for BSY to clear, then
1667 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
1668 * result. Determine what devices, if any, are on the channel
1669 * by looking at the device 0/1 error register. Look at the signature
1670 * stored in each device's taskfile registers, to determine if
1671 * the device is ATA or ATAPI.
1674 * Inherited from caller. Some functions called by this function
1675 * obtain the host_set lock.
1678 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
1681 void ata_bus_reset(struct ata_port *ap)
1683 struct ata_ioports *ioaddr = &ap->ioaddr;
1684 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1686 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
1688 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
1690 /* determine if device 0/1 are present */
1691 if (ap->flags & ATA_FLAG_SATA_RESET)
1694 dev0 = ata_devchk(ap, 0);
1696 dev1 = ata_devchk(ap, 1);
1700 devmask |= (1 << 0);
1702 devmask |= (1 << 1);
1704 /* select device 0 again */
1705 ap->ops->dev_select(ap, 0);
1707 /* issue bus reset */
1708 if (ap->flags & ATA_FLAG_SRST)
1709 rc = ata_bus_softreset(ap, devmask);
1710 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
1711 /* set up device control */
1712 if (ap->flags & ATA_FLAG_MMIO)
1713 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1715 outb(ap->ctl, ioaddr->ctl_addr);
1716 rc = ata_bus_edd(ap);
1723 * determine by signature whether we have ATA or ATAPI devices
1725 err = ata_dev_try_classify(ap, 0);
1726 if ((slave_possible) && (err != 0x81))
1727 ata_dev_try_classify(ap, 1);
1729 /* re-enable interrupts */
1730 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
1733 /* is double-select really necessary? */
1734 if (ap->device[1].class != ATA_DEV_NONE)
1735 ap->ops->dev_select(ap, 1);
1736 if (ap->device[0].class != ATA_DEV_NONE)
1737 ap->ops->dev_select(ap, 0);
1739 /* if no devices were detected, disable this port */
1740 if ((ap->device[0].class == ATA_DEV_NONE) &&
1741 (ap->device[1].class == ATA_DEV_NONE))
1744 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
1745 /* set up device control for ATA_FLAG_SATA_RESET */
1746 if (ap->flags & ATA_FLAG_MMIO)
1747 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1749 outb(ap->ctl, ioaddr->ctl_addr);
1756 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
1757 ap->ops->port_disable(ap);
1762 static void ata_pr_blacklisted(struct ata_port *ap, struct ata_device *dev)
1764 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n",
1765 ap->id, dev->devno);
1768 static const char * ata_dma_blacklist [] = {
1787 "Toshiba CD-ROM XM-6202B",
1789 "E-IDE CD-ROM CR-840",
1792 "SAMSUNG CD-ROM SC-148C",
1793 "SAMSUNG CD-ROM SC",
1795 "SAMSUNG CD-ROM SN-124",
1796 "ATAPI CD-ROM DRIVE 40X MAXIMUM",
1800 static int ata_dma_blacklisted(struct ata_port *ap, struct ata_device *dev)
1802 unsigned char model_num[40];
1807 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
1810 len = strnlen(s, sizeof(model_num));
1812 /* ATAPI specifies that empty space is blank-filled; remove blanks */
1813 while ((len > 0) && (s[len - 1] == ' ')) {
1818 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
1819 if (!strncmp(ata_dma_blacklist[i], s, len))
1825 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
1827 struct ata_device *master, *slave;
1830 master = &ap->device[0];
1831 slave = &ap->device[1];
1833 assert (ata_dev_present(master) || ata_dev_present(slave));
1835 if (shift == ATA_SHIFT_UDMA) {
1836 mask = ap->udma_mask;
1837 if (ata_dev_present(master)) {
1838 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
1839 if (ata_dma_blacklisted(ap, master)) {
1841 ata_pr_blacklisted(ap, master);
1844 if (ata_dev_present(slave)) {
1845 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
1846 if (ata_dma_blacklisted(ap, slave)) {
1848 ata_pr_blacklisted(ap, slave);
1852 else if (shift == ATA_SHIFT_MWDMA) {
1853 mask = ap->mwdma_mask;
1854 if (ata_dev_present(master)) {
1855 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
1856 if (ata_dma_blacklisted(ap, master)) {
1858 ata_pr_blacklisted(ap, master);
1861 if (ata_dev_present(slave)) {
1862 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
1863 if (ata_dma_blacklisted(ap, slave)) {
1865 ata_pr_blacklisted(ap, slave);
1869 else if (shift == ATA_SHIFT_PIO) {
1870 mask = ap->pio_mask;
1871 if (ata_dev_present(master)) {
1872 /* spec doesn't return explicit support for
1873 * PIO0-2, so we fake it
1875 u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03;
1880 if (ata_dev_present(slave)) {
1881 /* spec doesn't return explicit support for
1882 * PIO0-2, so we fake it
1884 u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03;
1891 mask = 0xffffffff; /* shut up compiler warning */
1898 /* find greatest bit */
1899 static int fgb(u32 bitmap)
1904 for (i = 0; i < 32; i++)
1905 if (bitmap & (1 << i))
1912 * ata_choose_xfer_mode - attempt to find best transfer mode
1913 * @ap: Port for which an xfer mode will be selected
1914 * @xfer_mode_out: (output) SET FEATURES - XFER MODE code
1915 * @xfer_shift_out: (output) bit shift that selects this mode
1920 * Zero on success, negative on error.
1923 static int ata_choose_xfer_mode(struct ata_port *ap,
1925 unsigned int *xfer_shift_out)
1927 unsigned int mask, shift;
1930 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) {
1931 shift = xfer_mode_classes[i].shift;
1932 mask = ata_get_mode_mask(ap, shift);
1936 *xfer_mode_out = xfer_mode_classes[i].base + x;
1937 *xfer_shift_out = shift;
1946 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1947 * @ap: Port associated with device @dev
1948 * @dev: Device to which command will be sent
1953 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
1955 DECLARE_COMPLETION(wait);
1956 struct ata_queued_cmd *qc;
1958 unsigned long flags;
1960 /* set up set-features taskfile */
1961 DPRINTK("set features - xfer mode\n");
1963 qc = ata_qc_new_init(ap, dev);
1966 qc->tf.command = ATA_CMD_SET_FEATURES;
1967 qc->tf.feature = SETFEATURES_XFER;
1968 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1969 qc->tf.protocol = ATA_PROT_NODATA;
1970 qc->tf.nsect = dev->xfer_mode;
1972 qc->waiting = &wait;
1973 qc->complete_fn = ata_qc_complete_noop;
1975 spin_lock_irqsave(&ap->host_set->lock, flags);
1976 rc = ata_qc_issue(qc);
1977 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1980 ata_port_disable(ap);
1982 wait_for_completion(&wait);
1988 * ata_dev_init_params - Issue INIT DEV PARAMS command
1989 * @ap: Port associated with device @dev
1990 * @dev: Device to which command will be sent
1995 static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev)
1997 DECLARE_COMPLETION(wait);
1998 struct ata_queued_cmd *qc;
2000 unsigned long flags;
2001 u16 sectors = dev->id[6];
2002 u16 heads = dev->id[3];
2004 /* Number of sectors per track 1-255. Number of heads 1-16 */
2005 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2008 /* set up init dev params taskfile */
2009 DPRINTK("init dev params \n");
2011 qc = ata_qc_new_init(ap, dev);
2014 qc->tf.command = ATA_CMD_INIT_DEV_PARAMS;
2015 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2016 qc->tf.protocol = ATA_PROT_NODATA;
2017 qc->tf.nsect = sectors;
2018 qc->tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2020 qc->waiting = &wait;
2021 qc->complete_fn = ata_qc_complete_noop;
2023 spin_lock_irqsave(&ap->host_set->lock, flags);
2024 rc = ata_qc_issue(qc);
2025 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2028 ata_port_disable(ap);
2030 wait_for_completion(&wait);
2042 static void ata_sg_clean(struct ata_queued_cmd *qc)
2044 struct ata_port *ap = qc->ap;
2045 struct scatterlist *sg = qc->sg;
2046 int dir = qc->dma_dir;
2048 assert(qc->flags & ATA_QCFLAG_DMAMAP);
2051 if (qc->flags & ATA_QCFLAG_SINGLE)
2052 assert(qc->n_elem == 1);
2054 DPRINTK("unmapping %u sg elements\n", qc->n_elem);
2056 if (qc->flags & ATA_QCFLAG_SG)
2057 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2059 dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]),
2060 sg_dma_len(&sg[0]), dir);
2062 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2067 * ata_fill_sg - Fill PCI IDE PRD table
2068 * @qc: Metadata associated with taskfile to be transferred
2073 static void ata_fill_sg(struct ata_queued_cmd *qc)
2075 struct scatterlist *sg = qc->sg;
2076 struct ata_port *ap = qc->ap;
2077 unsigned int idx, nelem;
2080 assert(qc->n_elem > 0);
2083 for (nelem = qc->n_elem; nelem; nelem--,sg++) {
2087 /* determine if physical DMA addr spans 64K boundary.
2088 * Note h/w doesn't support 64-bit, so we unconditionally
2089 * truncate dma_addr_t to u32.
2091 addr = (u32) sg_dma_address(sg);
2092 sg_len = sg_dma_len(sg);
2095 offset = addr & 0xffff;
2097 if ((offset + sg_len) > 0x10000)
2098 len = 0x10000 - offset;
2100 ap->prd[idx].addr = cpu_to_le32(addr);
2101 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2102 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2111 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2114 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2115 * @qc: Metadata associated with taskfile to check
2118 * RETURNS: 0 when ATAPI DMA can be used
2121 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2123 struct ata_port *ap = qc->ap;
2124 int rc = 0; /* Assume ATAPI DMA is OK by default */
2126 if (ap->ops->check_atapi_dma)
2127 rc = ap->ops->check_atapi_dma(qc);
2132 * ata_qc_prep - Prepare taskfile for submission
2133 * @qc: Metadata associated with taskfile to be prepared
2136 * spin_lock_irqsave(host_set lock)
2138 void ata_qc_prep(struct ata_queued_cmd *qc)
2140 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2146 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2148 struct scatterlist *sg;
2150 qc->flags |= ATA_QCFLAG_SINGLE;
2152 memset(&qc->sgent, 0, sizeof(qc->sgent));
2153 qc->sg = &qc->sgent;
2158 sg->page = virt_to_page(buf);
2159 sg->offset = (unsigned long) buf & ~PAGE_MASK;
2160 sg_dma_len(sg) = buflen;
2163 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2164 unsigned int n_elem)
2166 qc->flags |= ATA_QCFLAG_SG;
2168 qc->n_elem = n_elem;
2172 * ata_sg_setup_one -
2176 * spin_lock_irqsave(host_set lock)
2182 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2184 struct ata_port *ap = qc->ap;
2185 int dir = qc->dma_dir;
2186 struct scatterlist *sg = qc->sg;
2187 dma_addr_t dma_address;
2189 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
2190 sg_dma_len(sg), dir);
2191 if (dma_mapping_error(dma_address))
2194 sg_dma_address(sg) = dma_address;
2196 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
2197 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2207 * spin_lock_irqsave(host_set lock)
2213 static int ata_sg_setup(struct ata_queued_cmd *qc)
2215 struct ata_port *ap = qc->ap;
2216 struct scatterlist *sg = qc->sg;
2219 VPRINTK("ENTER, ata%u\n", ap->id);
2220 assert(qc->flags & ATA_QCFLAG_SG);
2223 n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2227 DPRINTK("%d sg elements mapped\n", n_elem);
2229 qc->n_elem = n_elem;
2244 static unsigned long ata_pio_poll(struct ata_port *ap)
2247 unsigned int poll_state = PIO_ST_UNKNOWN;
2248 unsigned int reg_state = PIO_ST_UNKNOWN;
2249 const unsigned int tmout_state = PIO_ST_TMOUT;
2251 switch (ap->pio_task_state) {
2254 poll_state = PIO_ST_POLL;
2258 case PIO_ST_LAST_POLL:
2259 poll_state = PIO_ST_LAST_POLL;
2260 reg_state = PIO_ST_LAST;
2267 status = ata_chk_status(ap);
2268 if (status & ATA_BUSY) {
2269 if (time_after(jiffies, ap->pio_task_timeout)) {
2270 ap->pio_task_state = tmout_state;
2273 ap->pio_task_state = poll_state;
2274 return ATA_SHORT_PAUSE;
2277 ap->pio_task_state = reg_state;
2282 * ata_pio_complete -
2288 static void ata_pio_complete (struct ata_port *ap)
2290 struct ata_queued_cmd *qc;
2294 * This is purely hueristic. This is a fast path.
2295 * Sometimes when we enter, BSY will be cleared in
2296 * a chk-status or two. If not, the drive is probably seeking
2297 * or something. Snooze for a couple msecs, then
2298 * chk-status again. If still busy, fall back to
2299 * PIO_ST_POLL state.
2301 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2302 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2304 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2305 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2306 ap->pio_task_state = PIO_ST_LAST_POLL;
2307 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2312 drv_stat = ata_wait_idle(ap);
2313 if (!ata_ok(drv_stat)) {
2314 ap->pio_task_state = PIO_ST_ERR;
2318 qc = ata_qc_from_tag(ap, ap->active_tag);
2321 ap->pio_task_state = PIO_ST_IDLE;
2325 ata_qc_complete(qc, drv_stat);
2328 void swap_buf_le16(u16 *buf, unsigned int buf_words)
2333 for (i = 0; i < buf_words; i++)
2334 buf[i] = le16_to_cpu(buf[i]);
2335 #endif /* __BIG_ENDIAN */
2338 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
2339 unsigned int buflen, int write_data)
2342 unsigned int words = buflen >> 1;
2343 u16 *buf16 = (u16 *) buf;
2344 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
2347 for (i = 0; i < words; i++)
2348 writew(le16_to_cpu(buf16[i]), mmio);
2350 for (i = 0; i < words; i++)
2351 buf16[i] = cpu_to_le16(readw(mmio));
2355 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
2356 unsigned int buflen, int write_data)
2358 unsigned int dwords = buflen >> 1;
2361 outsw(ap->ioaddr.data_addr, buf, dwords);
2363 insw(ap->ioaddr.data_addr, buf, dwords);
2366 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
2367 unsigned int buflen, int do_write)
2369 if (ap->flags & ATA_FLAG_MMIO)
2370 ata_mmio_data_xfer(ap, buf, buflen, do_write);
2372 ata_pio_data_xfer(ap, buf, buflen, do_write);
2375 static void ata_pio_sector(struct ata_queued_cmd *qc)
2377 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2378 struct scatterlist *sg = qc->sg;
2379 struct ata_port *ap = qc->ap;
2381 unsigned int offset;
2384 if (qc->cursect == (qc->nsect - 1))
2385 ap->pio_task_state = PIO_ST_LAST;
2387 page = sg[qc->cursg].page;
2388 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
2390 /* get the current page and offset */
2391 page = nth_page(page, (offset >> PAGE_SHIFT));
2392 offset %= PAGE_SIZE;
2394 buf = kmap(page) + offset;
2399 if ((qc->cursg_ofs * ATA_SECT_SIZE) == sg_dma_len(&sg[qc->cursg])) {
2404 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2406 /* do the actual data transfer */
2407 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2408 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
2413 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
2415 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2416 struct scatterlist *sg = qc->sg;
2417 struct ata_port *ap = qc->ap;
2420 unsigned int offset, count;
2422 if (qc->curbytes == qc->nbytes - bytes)
2423 ap->pio_task_state = PIO_ST_LAST;
2426 sg = &qc->sg[qc->cursg];
2430 offset = sg->offset + qc->cursg_ofs;
2432 /* get the current page and offset */
2433 page = nth_page(page, (offset >> PAGE_SHIFT));
2434 offset %= PAGE_SIZE;
2436 count = min(sg_dma_len(sg) - qc->cursg_ofs, bytes);
2438 /* don't cross page boundaries */
2439 count = min(count, (unsigned int)PAGE_SIZE - offset);
2441 buf = kmap(page) + offset;
2444 qc->curbytes += count;
2445 qc->cursg_ofs += count;
2447 if (qc->cursg_ofs == sg_dma_len(sg)) {
2452 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2454 /* do the actual data transfer */
2455 ata_data_xfer(ap, buf, count, do_write);
2460 if (qc->cursg_ofs < sg_dma_len(sg))
2466 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
2468 struct ata_port *ap = qc->ap;
2469 struct ata_device *dev = qc->dev;
2470 unsigned int ireason, bc_lo, bc_hi, bytes;
2471 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
2473 ap->ops->tf_read(ap, &qc->tf);
2474 ireason = qc->tf.nsect;
2475 bc_lo = qc->tf.lbam;
2476 bc_hi = qc->tf.lbah;
2477 bytes = (bc_hi << 8) | bc_lo;
2479 /* shall be cleared to zero, indicating xfer of data */
2480 if (ireason & (1 << 0))
2483 /* make sure transfer direction matches expected */
2484 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
2485 if (do_write != i_write)
2488 __atapi_pio_bytes(qc, bytes);
2493 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
2494 ap->id, dev->devno);
2495 ap->pio_task_state = PIO_ST_ERR;
2505 static void ata_pio_block(struct ata_port *ap)
2507 struct ata_queued_cmd *qc;
2511 * This is purely hueristic. This is a fast path.
2512 * Sometimes when we enter, BSY will be cleared in
2513 * a chk-status or two. If not, the drive is probably seeking
2514 * or something. Snooze for a couple msecs, then
2515 * chk-status again. If still busy, fall back to
2516 * PIO_ST_POLL state.
2518 status = ata_busy_wait(ap, ATA_BUSY, 5);
2519 if (status & ATA_BUSY) {
2521 status = ata_busy_wait(ap, ATA_BUSY, 10);
2522 if (status & ATA_BUSY) {
2523 ap->pio_task_state = PIO_ST_POLL;
2524 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2529 qc = ata_qc_from_tag(ap, ap->active_tag);
2532 if (is_atapi_taskfile(&qc->tf)) {
2533 /* no more data to transfer or unsupported ATAPI command */
2534 if ((status & ATA_DRQ) == 0) {
2535 ap->pio_task_state = PIO_ST_IDLE;
2539 ata_qc_complete(qc, status);
2543 atapi_pio_bytes(qc);
2545 /* handle BSY=0, DRQ=0 as error */
2546 if ((status & ATA_DRQ) == 0) {
2547 ap->pio_task_state = PIO_ST_ERR;
2555 static void ata_pio_error(struct ata_port *ap)
2557 struct ata_queued_cmd *qc;
2560 qc = ata_qc_from_tag(ap, ap->active_tag);
2563 drv_stat = ata_chk_status(ap);
2564 printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n",
2567 ap->pio_task_state = PIO_ST_IDLE;
2571 ata_qc_complete(qc, drv_stat | ATA_ERR);
2574 static void ata_pio_task(void *_data)
2576 struct ata_port *ap = _data;
2577 unsigned long timeout = 0;
2579 switch (ap->pio_task_state) {
2588 ata_pio_complete(ap);
2592 case PIO_ST_LAST_POLL:
2593 timeout = ata_pio_poll(ap);
2603 queue_delayed_work(ata_wq, &ap->pio_task,
2606 queue_work(ata_wq, &ap->pio_task);
2609 static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
2610 struct scsi_cmnd *cmd)
2612 DECLARE_COMPLETION(wait);
2613 struct ata_queued_cmd *qc;
2614 unsigned long flags;
2617 DPRINTK("ATAPI request sense\n");
2619 qc = ata_qc_new_init(ap, dev);
2622 /* FIXME: is this needed? */
2623 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
2625 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2626 qc->dma_dir = DMA_FROM_DEVICE;
2628 memset(&qc->cdb, 0, sizeof(ap->cdb_len));
2629 qc->cdb[0] = REQUEST_SENSE;
2630 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2632 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2633 qc->tf.command = ATA_CMD_PACKET;
2635 qc->tf.protocol = ATA_PROT_ATAPI;
2636 qc->tf.lbam = (8 * 1024) & 0xff;
2637 qc->tf.lbah = (8 * 1024) >> 8;
2638 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2640 qc->waiting = &wait;
2641 qc->complete_fn = ata_qc_complete_noop;
2643 spin_lock_irqsave(&ap->host_set->lock, flags);
2644 rc = ata_qc_issue(qc);
2645 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2648 ata_port_disable(ap);
2650 wait_for_completion(&wait);
2656 * ata_qc_timeout - Handle timeout of queued command
2657 * @qc: Command that timed out
2659 * Some part of the kernel (currently, only the SCSI layer)
2660 * has noticed that the active command on port @ap has not
2661 * completed after a specified length of time. Handle this
2662 * condition by disabling DMA (if necessary) and completing
2663 * transactions, with error if necessary.
2665 * This also handles the case of the "lost interrupt", where
2666 * for some reason (possibly hardware bug, possibly driver bug)
2667 * an interrupt was not delivered to the driver, even though the
2668 * transaction completed successfully.
2673 static void ata_qc_timeout(struct ata_queued_cmd *qc)
2675 struct ata_port *ap = qc->ap;
2676 struct ata_device *dev = qc->dev;
2677 u8 host_stat = 0, drv_stat;
2681 /* FIXME: doesn't this conflict with timeout handling? */
2682 if (qc->dev->class == ATA_DEV_ATAPI && qc->scsicmd) {
2683 struct scsi_cmnd *cmd = qc->scsicmd;
2685 if (!scsi_eh_eflags_chk(cmd, SCSI_EH_CANCEL_CMD)) {
2687 /* finish completing original command */
2688 __ata_qc_complete(qc);
2690 atapi_request_sense(ap, dev, cmd);
2692 cmd->result = (CHECK_CONDITION << 1) | (DID_OK << 16);
2693 scsi_finish_command(cmd);
2699 /* hack alert! We cannot use the supplied completion
2700 * function from inside the ->eh_strategy_handler() thread.
2701 * libata is the only user of ->eh_strategy_handler() in
2702 * any kernel, so the default scsi_done() assumes it is
2703 * not being called from the SCSI EH.
2705 qc->scsidone = scsi_finish_command;
2707 switch (qc->tf.protocol) {
2710 case ATA_PROT_ATAPI_DMA:
2711 host_stat = ap->ops->bmdma_status(ap);
2713 /* before we do anything else, clear DMA-Start bit */
2714 ap->ops->bmdma_stop(ap);
2720 drv_stat = ata_chk_status(ap);
2722 /* ack bmdma irq events */
2723 ap->ops->irq_clear(ap);
2725 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
2726 ap->id, qc->tf.command, drv_stat, host_stat);
2728 /* complete taskfile transaction */
2729 ata_qc_complete(qc, drv_stat);
2737 * ata_eng_timeout - Handle timeout of queued command
2738 * @ap: Port on which timed-out command is active
2740 * Some part of the kernel (currently, only the SCSI layer)
2741 * has noticed that the active command on port @ap has not
2742 * completed after a specified length of time. Handle this
2743 * condition by disabling DMA (if necessary) and completing
2744 * transactions, with error if necessary.
2746 * This also handles the case of the "lost interrupt", where
2747 * for some reason (possibly hardware bug, possibly driver bug)
2748 * an interrupt was not delivered to the driver, even though the
2749 * transaction completed successfully.
2752 * Inherited from SCSI layer (none, can sleep)
2755 void ata_eng_timeout(struct ata_port *ap)
2757 struct ata_queued_cmd *qc;
2761 qc = ata_qc_from_tag(ap, ap->active_tag);
2763 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
2775 * ata_qc_new - Request an available ATA command, for queueing
2776 * @ap: Port associated with device @dev
2777 * @dev: Device from whom we request an available command structure
2782 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
2784 struct ata_queued_cmd *qc = NULL;
2787 for (i = 0; i < ATA_MAX_QUEUE; i++)
2788 if (!test_and_set_bit(i, &ap->qactive)) {
2789 qc = ata_qc_from_tag(ap, i);
2800 * ata_qc_new_init - Request an available ATA command, and initialize it
2801 * @ap: Port associated with device @dev
2802 * @dev: Device from whom we request an available command structure
2807 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
2808 struct ata_device *dev)
2810 struct ata_queued_cmd *qc;
2812 qc = ata_qc_new(ap);
2819 qc->cursect = qc->cursg = qc->cursg_ofs = 0;
2821 qc->nbytes = qc->curbytes = 0;
2823 ata_tf_init(ap, &qc->tf, dev->devno);
2825 if (dev->flags & ATA_DFLAG_LBA) {
2826 qc->tf.flags |= ATA_TFLAG_LBA;
2828 if (dev->flags & ATA_DFLAG_LBA48)
2829 qc->tf.flags |= ATA_TFLAG_LBA48;
2836 static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat)
2841 static void __ata_qc_complete(struct ata_queued_cmd *qc)
2843 struct ata_port *ap = qc->ap;
2844 unsigned int tag, do_clear = 0;
2848 if (likely(ata_tag_valid(tag))) {
2849 if (tag == ap->active_tag)
2850 ap->active_tag = ATA_TAG_POISON;
2851 qc->tag = ATA_TAG_POISON;
2856 struct completion *waiting = qc->waiting;
2861 if (likely(do_clear))
2862 clear_bit(tag, &ap->qactive);
2866 * ata_qc_free - free unused ata_queued_cmd
2867 * @qc: Command to complete
2869 * Designed to free unused ata_queued_cmd object
2870 * in case something prevents using it.
2875 void ata_qc_free(struct ata_queued_cmd *qc)
2877 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
2878 assert(qc->waiting == NULL); /* nothing should be waiting */
2880 __ata_qc_complete(qc);
2884 * ata_qc_complete - Complete an active ATA command
2885 * @qc: Command to complete
2886 * @drv_stat: ATA status register contents
2892 void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
2896 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
2897 assert(qc->flags & ATA_QCFLAG_ACTIVE);
2899 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
2902 /* call completion callback */
2903 rc = qc->complete_fn(qc, drv_stat);
2905 /* if callback indicates not to complete command (non-zero),
2906 * return immediately
2911 __ata_qc_complete(qc);
2916 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
2918 struct ata_port *ap = qc->ap;
2920 switch (qc->tf.protocol) {
2922 case ATA_PROT_ATAPI_DMA:
2925 case ATA_PROT_ATAPI:
2927 case ATA_PROT_PIO_MULT:
2928 if (ap->flags & ATA_FLAG_PIO_DMA)
2941 * ata_qc_issue - issue taskfile to device
2942 * @qc: command to issue to device
2944 * Prepare an ATA command to submission to device.
2945 * This includes mapping the data into a DMA-able
2946 * area, filling in the S/G table, and finally
2947 * writing the taskfile to hardware, starting the command.
2950 * spin_lock_irqsave(host_set lock)
2953 * Zero on success, negative on error.
2956 int ata_qc_issue(struct ata_queued_cmd *qc)
2958 struct ata_port *ap = qc->ap;
2960 if (ata_should_dma_map(qc)) {
2961 if (qc->flags & ATA_QCFLAG_SG) {
2962 if (ata_sg_setup(qc))
2964 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
2965 if (ata_sg_setup_one(qc))
2969 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2972 ap->ops->qc_prep(qc);
2974 qc->ap->active_tag = qc->tag;
2975 qc->flags |= ATA_QCFLAG_ACTIVE;
2977 return ap->ops->qc_issue(qc);
2984 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
2985 * @qc: command to issue to device
2987 * Using various libata functions and hooks, this function
2988 * starts an ATA command. ATA commands are grouped into
2989 * classes called "protocols", and issuing each type of protocol
2990 * is slightly different.
2993 * spin_lock_irqsave(host_set lock)
2996 * Zero on success, negative on error.
2999 int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3001 struct ata_port *ap = qc->ap;
3003 ata_dev_select(ap, qc->dev->devno, 1, 0);
3005 switch (qc->tf.protocol) {
3006 case ATA_PROT_NODATA:
3007 ata_tf_to_host_nolock(ap, &qc->tf);
3011 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3012 ap->ops->bmdma_setup(qc); /* set up bmdma */
3013 ap->ops->bmdma_start(qc); /* initiate bmdma */
3016 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
3017 ata_qc_set_polling(qc);
3018 ata_tf_to_host_nolock(ap, &qc->tf);
3019 ap->pio_task_state = PIO_ST;
3020 queue_work(ata_wq, &ap->pio_task);
3023 case ATA_PROT_ATAPI:
3024 ata_qc_set_polling(qc);
3025 ata_tf_to_host_nolock(ap, &qc->tf);
3026 queue_work(ata_wq, &ap->packet_task);
3029 case ATA_PROT_ATAPI_NODATA:
3030 ata_tf_to_host_nolock(ap, &qc->tf);
3031 queue_work(ata_wq, &ap->packet_task);
3034 case ATA_PROT_ATAPI_DMA:
3035 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3036 ap->ops->bmdma_setup(qc); /* set up bmdma */
3037 queue_work(ata_wq, &ap->packet_task);
3049 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
3050 * @qc: Info associated with this ATA transaction.
3053 * spin_lock_irqsave(host_set lock)
3056 static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
3058 struct ata_port *ap = qc->ap;
3059 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
3061 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3063 /* load PRD table addr. */
3064 mb(); /* make sure PRD table writes are visible to controller */
3065 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
3067 /* specify data direction, triple-check start bit is clear */
3068 dmactl = readb(mmio + ATA_DMA_CMD);
3069 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3071 dmactl |= ATA_DMA_WR;
3072 writeb(dmactl, mmio + ATA_DMA_CMD);
3074 /* issue r/w command */
3075 ap->ops->exec_command(ap, &qc->tf);
3079 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
3080 * @qc: Info associated with this ATA transaction.
3083 * spin_lock_irqsave(host_set lock)
3086 static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
3088 struct ata_port *ap = qc->ap;
3089 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3092 /* start host DMA transaction */
3093 dmactl = readb(mmio + ATA_DMA_CMD);
3094 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
3096 /* Strictly, one may wish to issue a readb() here, to
3097 * flush the mmio write. However, control also passes
3098 * to the hardware at this point, and it will interrupt
3099 * us when we are to resume control. So, in effect,
3100 * we don't care when the mmio write flushes.
3101 * Further, a read of the DMA status register _immediately_
3102 * following the write may not be what certain flaky hardware
3103 * is expected, so I think it is best to not add a readb()
3104 * without first all the MMIO ATA cards/mobos.
3105 * Or maybe I'm just being paranoid.
3110 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
3111 * @qc: Info associated with this ATA transaction.
3114 * spin_lock_irqsave(host_set lock)
3117 static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
3119 struct ata_port *ap = qc->ap;
3120 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
3123 /* load PRD table addr. */
3124 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
3126 /* specify data direction, triple-check start bit is clear */
3127 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3128 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3130 dmactl |= ATA_DMA_WR;
3131 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3133 /* issue r/w command */
3134 ap->ops->exec_command(ap, &qc->tf);
3138 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
3139 * @qc: Info associated with this ATA transaction.
3142 * spin_lock_irqsave(host_set lock)
3145 static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
3147 struct ata_port *ap = qc->ap;
3150 /* start host DMA transaction */
3151 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3152 outb(dmactl | ATA_DMA_START,
3153 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3156 void ata_bmdma_start(struct ata_queued_cmd *qc)
3158 if (qc->ap->flags & ATA_FLAG_MMIO)
3159 ata_bmdma_start_mmio(qc);
3161 ata_bmdma_start_pio(qc);
3164 void ata_bmdma_setup(struct ata_queued_cmd *qc)
3166 if (qc->ap->flags & ATA_FLAG_MMIO)
3167 ata_bmdma_setup_mmio(qc);
3169 ata_bmdma_setup_pio(qc);
3172 void ata_bmdma_irq_clear(struct ata_port *ap)
3174 if (ap->flags & ATA_FLAG_MMIO) {
3175 void __iomem *mmio = ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
3176 writeb(readb(mmio), mmio);
3178 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
3179 outb(inb(addr), addr);
3184 u8 ata_bmdma_status(struct ata_port *ap)
3187 if (ap->flags & ATA_FLAG_MMIO) {
3188 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3189 host_stat = readb(mmio + ATA_DMA_STATUS);
3191 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3195 void ata_bmdma_stop(struct ata_port *ap)
3197 if (ap->flags & ATA_FLAG_MMIO) {
3198 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3200 /* clear start/stop bit */
3201 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
3202 mmio + ATA_DMA_CMD);
3204 /* clear start/stop bit */
3205 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
3206 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3209 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
3210 ata_altstatus(ap); /* dummy read */
3214 * ata_host_intr - Handle host interrupt for given (port, task)
3215 * @ap: Port on which interrupt arrived (possibly...)
3216 * @qc: Taskfile currently active in engine
3218 * Handle host interrupt for given queued command. Currently,
3219 * only DMA interrupts are handled. All other commands are
3220 * handled via polling with interrupts disabled (nIEN bit).
3223 * spin_lock_irqsave(host_set lock)
3226 * One if interrupt was handled, zero if not (shared irq).
3229 inline unsigned int ata_host_intr (struct ata_port *ap,
3230 struct ata_queued_cmd *qc)
3232 u8 status, host_stat;
3234 switch (qc->tf.protocol) {
3237 case ATA_PROT_ATAPI_DMA:
3238 case ATA_PROT_ATAPI:
3239 /* check status of DMA engine */
3240 host_stat = ap->ops->bmdma_status(ap);
3241 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
3243 /* if it's not our irq... */
3244 if (!(host_stat & ATA_DMA_INTR))
3247 /* before we do anything else, clear DMA-Start bit */
3248 ap->ops->bmdma_stop(ap);
3252 case ATA_PROT_ATAPI_NODATA:
3253 case ATA_PROT_NODATA:
3254 /* check altstatus */
3255 status = ata_altstatus(ap);
3256 if (status & ATA_BUSY)
3259 /* check main status, clearing INTRQ */
3260 status = ata_chk_status(ap);
3261 if (unlikely(status & ATA_BUSY))
3263 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
3264 ap->id, qc->tf.protocol, status);
3266 /* ack bmdma irq events */
3267 ap->ops->irq_clear(ap);
3269 /* complete taskfile transaction */
3270 ata_qc_complete(qc, status);
3277 return 1; /* irq handled */
3280 ap->stats.idle_irq++;
3283 if ((ap->stats.idle_irq % 1000) == 0) {
3285 ata_irq_ack(ap, 0); /* debug trap */
3286 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
3289 return 0; /* irq not handled */
3293 * ata_interrupt - Default ATA host interrupt handler
3295 * @dev_instance: pointer to our host information structure
3304 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
3306 struct ata_host_set *host_set = dev_instance;
3308 unsigned int handled = 0;
3309 unsigned long flags;
3311 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
3312 spin_lock_irqsave(&host_set->lock, flags);
3314 for (i = 0; i < host_set->n_ports; i++) {
3315 struct ata_port *ap;
3317 ap = host_set->ports[i];
3318 if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
3319 struct ata_queued_cmd *qc;
3321 qc = ata_qc_from_tag(ap, ap->active_tag);
3322 if (qc && (!(qc->tf.ctl & ATA_NIEN)))
3323 handled |= ata_host_intr(ap, qc);
3327 spin_unlock_irqrestore(&host_set->lock, flags);
3329 return IRQ_RETVAL(handled);
3333 * atapi_packet_task - Write CDB bytes to hardware
3334 * @_data: Port to which ATAPI device is attached.
3336 * When device has indicated its readiness to accept
3337 * a CDB, this function is called. Send the CDB.
3338 * If DMA is to be performed, exit immediately.
3339 * Otherwise, we are in polling mode, so poll
3340 * status under operation succeeds or fails.
3343 * Kernel thread context (may sleep)
3346 static void atapi_packet_task(void *_data)
3348 struct ata_port *ap = _data;
3349 struct ata_queued_cmd *qc;
3352 qc = ata_qc_from_tag(ap, ap->active_tag);
3354 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3356 /* sleep-wait for BSY to clear */
3357 DPRINTK("busy wait\n");
3358 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB))
3361 /* make sure DRQ is set */
3362 status = ata_chk_status(ap);
3363 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)
3367 DPRINTK("send cdb\n");
3368 assert(ap->cdb_len >= 12);
3369 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3371 /* if we are DMA'ing, irq handler takes over from here */
3372 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3373 ap->ops->bmdma_start(qc); /* initiate bmdma */
3375 /* non-data commands are also handled via irq */
3376 else if (qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3380 /* PIO commands are handled by polling */
3382 ap->pio_task_state = PIO_ST;
3383 queue_work(ata_wq, &ap->pio_task);
3389 ata_qc_complete(qc, ATA_ERR);
3392 int ata_port_start (struct ata_port *ap)
3394 struct device *dev = ap->host_set->dev;
3396 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
3400 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
3405 void ata_port_stop (struct ata_port *ap)
3407 struct device *dev = ap->host_set->dev;
3409 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
3413 * ata_host_remove - Unregister SCSI host structure with upper layers
3414 * @ap: Port to unregister
3415 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
3420 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
3422 struct Scsi_Host *sh = ap->host;
3427 scsi_remove_host(sh);
3429 ap->ops->port_stop(ap);
3433 * ata_host_init - Initialize an ata_port structure
3434 * @ap: Structure to initialize
3435 * @host: associated SCSI mid-layer structure
3436 * @host_set: Collection of hosts to which @ap belongs
3437 * @ent: Probe information provided by low-level driver
3438 * @port_no: Port number associated with this ata_port
3444 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
3445 struct ata_host_set *host_set,
3446 struct ata_probe_ent *ent, unsigned int port_no)
3452 host->max_channel = 1;
3453 host->unique_id = ata_unique_id++;
3454 host->max_cmd_len = 12;
3455 scsi_set_device(host, ent->dev);
3456 scsi_assign_lock(host, &host_set->lock);
3458 ap->flags = ATA_FLAG_PORT_DISABLED;
3459 ap->id = host->unique_id;
3461 ap->ctl = ATA_DEVCTL_OBS;
3462 ap->host_set = host_set;
3463 ap->port_no = port_no;
3465 ent->legacy_mode ? ent->hard_port_no : port_no;
3466 ap->pio_mask = ent->pio_mask;
3467 ap->mwdma_mask = ent->mwdma_mask;
3468 ap->udma_mask = ent->udma_mask;
3469 ap->flags |= ent->host_flags;
3470 ap->ops = ent->port_ops;
3471 ap->cbl = ATA_CBL_NONE;
3472 ap->active_tag = ATA_TAG_POISON;
3473 ap->last_ctl = 0xFF;
3475 INIT_WORK(&ap->packet_task, atapi_packet_task, ap);
3476 INIT_WORK(&ap->pio_task, ata_pio_task, ap);
3478 for (i = 0; i < ATA_MAX_DEVICES; i++)
3479 ap->device[i].devno = i;
3482 ap->stats.unhandled_irq = 1;
3483 ap->stats.idle_irq = 1;
3486 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
3490 * ata_host_add - Attach low-level ATA driver to system
3491 * @ent: Information provided by low-level driver
3492 * @host_set: Collections of ports to which we add
3493 * @port_no: Port number associated with this host
3501 static struct ata_port * ata_host_add(struct ata_probe_ent *ent,
3502 struct ata_host_set *host_set,
3503 unsigned int port_no)
3505 struct Scsi_Host *host;
3506 struct ata_port *ap;
3510 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
3514 ap = (struct ata_port *) &host->hostdata[0];
3516 ata_host_init(ap, host, host_set, ent, port_no);
3518 rc = ap->ops->port_start(ap);
3525 scsi_host_put(host);
3539 int ata_device_add(struct ata_probe_ent *ent)
3541 unsigned int count = 0, i;
3542 struct device *dev = ent->dev;
3543 struct ata_host_set *host_set;
3546 /* alloc a container for our list of ATA ports (buses) */
3547 host_set = kmalloc(sizeof(struct ata_host_set) +
3548 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
3551 memset(host_set, 0, sizeof(struct ata_host_set) + (ent->n_ports * sizeof(void *)));
3552 spin_lock_init(&host_set->lock);
3554 host_set->dev = dev;
3555 host_set->n_ports = ent->n_ports;
3556 host_set->irq = ent->irq;
3557 host_set->mmio_base = ent->mmio_base;
3558 host_set->private_data = ent->private_data;
3559 host_set->ops = ent->port_ops;
3561 /* register each port bound to this device */
3562 for (i = 0; i < ent->n_ports; i++) {
3563 struct ata_port *ap;
3564 unsigned long xfer_mode_mask;
3566 ap = ata_host_add(ent, host_set, i);
3570 host_set->ports[i] = ap;
3571 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
3572 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
3573 (ap->pio_mask << ATA_SHIFT_PIO);
3575 /* print per-port info to dmesg */
3576 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
3577 "bmdma 0x%lX irq %lu\n",
3579 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
3580 ata_mode_string(xfer_mode_mask),
3581 ap->ioaddr.cmd_addr,
3582 ap->ioaddr.ctl_addr,
3583 ap->ioaddr.bmdma_addr,
3587 host_set->ops->irq_clear(ap);
3596 /* obtain irq, that is shared between channels */
3597 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
3598 DRV_NAME, host_set))
3601 /* perform each probe synchronously */
3602 DPRINTK("probe begin\n");
3603 for (i = 0; i < count; i++) {
3604 struct ata_port *ap;
3607 ap = host_set->ports[i];
3609 DPRINTK("ata%u: probe begin\n", ap->id);
3610 rc = ata_bus_probe(ap);
3611 DPRINTK("ata%u: probe end\n", ap->id);
3614 /* FIXME: do something useful here?
3615 * Current libata behavior will
3616 * tear down everything when
3617 * the module is removed
3618 * or the h/w is unplugged.
3622 rc = scsi_add_host(ap->host, dev);
3624 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
3626 /* FIXME: do something useful here */
3627 /* FIXME: handle unconditional calls to
3628 * scsi_scan_host and ata_host_remove, below,
3634 /* probes are done, now scan each port's disk(s) */
3635 DPRINTK("probe begin\n");
3636 for (i = 0; i < count; i++) {
3637 struct ata_port *ap = host_set->ports[i];
3639 scsi_scan_host(ap->host);
3642 dev_set_drvdata(dev, host_set);
3644 VPRINTK("EXIT, returning %u\n", ent->n_ports);
3645 return ent->n_ports; /* success */
3648 for (i = 0; i < count; i++) {
3649 ata_host_remove(host_set->ports[i], 1);
3650 scsi_host_put(host_set->ports[i]->host);
3653 VPRINTK("EXIT, returning 0\n");
3658 * ata_scsi_release - SCSI layer callback hook for host unload
3659 * @host: libata host to be unloaded
3661 * Performs all duties necessary to shut down a libata port...
3662 * Kill port kthread, disable port, and release resources.
3665 * Inherited from SCSI layer.
3671 int ata_scsi_release(struct Scsi_Host *host)
3673 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
3677 ap->ops->port_disable(ap);
3678 ata_host_remove(ap, 0);
3685 * ata_std_ports - initialize ioaddr with standard port offsets.
3686 * @ioaddr: IO address structure to be initialized
3688 void ata_std_ports(struct ata_ioports *ioaddr)
3690 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
3691 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
3692 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
3693 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
3694 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
3695 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
3696 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
3697 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
3698 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
3699 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
3702 static struct ata_probe_ent *
3703 ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port)
3705 struct ata_probe_ent *probe_ent;
3707 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
3709 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
3710 kobject_name(&(dev->kobj)));
3714 memset(probe_ent, 0, sizeof(*probe_ent));
3716 INIT_LIST_HEAD(&probe_ent->node);
3717 probe_ent->dev = dev;
3719 probe_ent->sht = port->sht;
3720 probe_ent->host_flags = port->host_flags;
3721 probe_ent->pio_mask = port->pio_mask;
3722 probe_ent->mwdma_mask = port->mwdma_mask;
3723 probe_ent->udma_mask = port->udma_mask;
3724 probe_ent->port_ops = port->port_ops;
3730 struct ata_probe_ent *
3731 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port)
3733 struct ata_probe_ent *probe_ent =
3734 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
3738 probe_ent->n_ports = 2;
3739 probe_ent->irq = pdev->irq;
3740 probe_ent->irq_flags = SA_SHIRQ;
3742 probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0);
3743 probe_ent->port[0].altstatus_addr =
3744 probe_ent->port[0].ctl_addr =
3745 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
3746 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
3748 probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2);
3749 probe_ent->port[1].altstatus_addr =
3750 probe_ent->port[1].ctl_addr =
3751 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
3752 probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8;
3754 ata_std_ports(&probe_ent->port[0]);
3755 ata_std_ports(&probe_ent->port[1]);
3760 static struct ata_probe_ent *
3761 ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port,
3762 struct ata_probe_ent **ppe2)
3764 struct ata_probe_ent *probe_ent, *probe_ent2;
3766 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
3769 probe_ent2 = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[1]);
3775 probe_ent->n_ports = 1;
3776 probe_ent->irq = 14;
3778 probe_ent->hard_port_no = 0;
3779 probe_ent->legacy_mode = 1;
3781 probe_ent2->n_ports = 1;
3782 probe_ent2->irq = 15;
3784 probe_ent2->hard_port_no = 1;
3785 probe_ent2->legacy_mode = 1;
3787 probe_ent->port[0].cmd_addr = 0x1f0;
3788 probe_ent->port[0].altstatus_addr =
3789 probe_ent->port[0].ctl_addr = 0x3f6;
3790 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
3792 probe_ent2->port[0].cmd_addr = 0x170;
3793 probe_ent2->port[0].altstatus_addr =
3794 probe_ent2->port[0].ctl_addr = 0x376;
3795 probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8;
3797 ata_std_ports(&probe_ent->port[0]);
3798 ata_std_ports(&probe_ent2->port[0]);
3805 * ata_pci_init_one - Initialize/register PCI IDE host controller
3806 * @pdev: Controller to be initialized
3807 * @port_info: Information from low-level host driver
3808 * @n_ports: Number of ports attached to host controller
3811 * Inherited from PCI layer (may sleep).
3817 int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
3818 unsigned int n_ports)
3820 struct ata_probe_ent *probe_ent, *probe_ent2 = NULL;
3821 struct ata_port_info *port[2];
3823 unsigned int legacy_mode = 0;
3824 int disable_dev_on_err = 1;
3829 port[0] = port_info[0];
3831 port[1] = port_info[1];
3835 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
3836 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
3837 /* TODO: support transitioning to native mode? */
3838 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
3839 mask = (1 << 2) | (1 << 0);
3840 if ((tmp8 & mask) != mask)
3841 legacy_mode = (1 << 3);
3845 if ((!legacy_mode) && (n_ports > 1)) {
3846 printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n");
3850 rc = pci_enable_device(pdev);
3854 rc = pci_request_regions(pdev, DRV_NAME);
3856 disable_dev_on_err = 0;
3861 if (!request_region(0x1f0, 8, "libata")) {
3862 struct resource *conflict, res;
3864 res.end = 0x1f0 + 8 - 1;
3865 conflict = ____request_resource(&ioport_resource, &res);
3866 if (!strcmp(conflict->name, "libata"))
3867 legacy_mode |= (1 << 0);
3869 disable_dev_on_err = 0;
3870 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
3873 legacy_mode |= (1 << 0);
3875 if (!request_region(0x170, 8, "libata")) {
3876 struct resource *conflict, res;
3878 res.end = 0x170 + 8 - 1;
3879 conflict = ____request_resource(&ioport_resource, &res);
3880 if (!strcmp(conflict->name, "libata"))
3881 legacy_mode |= (1 << 1);
3883 disable_dev_on_err = 0;
3884 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
3887 legacy_mode |= (1 << 1);
3890 /* we have legacy mode, but all ports are unavailable */
3891 if (legacy_mode == (1 << 3)) {
3893 goto err_out_regions;
3896 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
3898 goto err_out_regions;
3899 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
3901 goto err_out_regions;
3904 probe_ent = ata_pci_init_legacy_mode(pdev, port, &probe_ent2);
3906 probe_ent = ata_pci_init_native_mode(pdev, port);
3909 goto err_out_regions;
3912 pci_set_master(pdev);
3914 /* FIXME: check ata_device_add return */
3916 if (legacy_mode & (1 << 0))
3917 ata_device_add(probe_ent);
3918 if (legacy_mode & (1 << 1))
3919 ata_device_add(probe_ent2);
3921 ata_device_add(probe_ent);
3929 if (legacy_mode & (1 << 0))
3930 release_region(0x1f0, 8);
3931 if (legacy_mode & (1 << 1))
3932 release_region(0x170, 8);
3933 pci_release_regions(pdev);
3935 if (disable_dev_on_err)
3936 pci_disable_device(pdev);
3941 * ata_pci_remove_one - PCI layer callback for device removal
3942 * @pdev: PCI device that was removed
3944 * PCI layer indicates to libata via this hook that
3945 * hot-unplug or module unload event has occured.
3946 * Handle this by unregistering all objects associated
3947 * with this PCI device. Free those objects. Then finally
3948 * release PCI resources and disable device.
3951 * Inherited from PCI layer (may sleep).
3954 void ata_pci_remove_one (struct pci_dev *pdev)
3956 struct device *dev = pci_dev_to_dev(pdev);
3957 struct ata_host_set *host_set = dev_get_drvdata(dev);
3958 struct ata_port *ap;
3961 for (i = 0; i < host_set->n_ports; i++) {
3962 ap = host_set->ports[i];
3964 scsi_remove_host(ap->host);
3967 free_irq(host_set->irq, host_set);
3968 if (host_set->ops->host_stop)
3969 host_set->ops->host_stop(host_set);
3970 if (host_set->mmio_base)
3971 iounmap(host_set->mmio_base);
3973 for (i = 0; i < host_set->n_ports; i++) {
3974 ap = host_set->ports[i];
3976 ata_scsi_release(ap->host);
3978 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
3979 struct ata_ioports *ioaddr = &ap->ioaddr;
3981 if (ioaddr->cmd_addr == 0x1f0)
3982 release_region(0x1f0, 8);
3983 else if (ioaddr->cmd_addr == 0x170)
3984 release_region(0x170, 8);
3987 scsi_host_put(ap->host);
3992 pci_release_regions(pdev);
3993 pci_disable_device(pdev);
3994 dev_set_drvdata(dev, NULL);
3997 /* move to PCI subsystem */
3998 int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits)
4000 unsigned long tmp = 0;
4002 switch (bits->width) {
4005 pci_read_config_byte(pdev, bits->reg, &tmp8);
4011 pci_read_config_word(pdev, bits->reg, &tmp16);
4017 pci_read_config_dword(pdev, bits->reg, &tmp32);
4028 return (tmp == bits->val) ? 1 : 0;
4030 #endif /* CONFIG_PCI */
4042 static int __init ata_init(void)
4044 ata_wq = create_workqueue("ata");
4048 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
4052 static void __exit ata_exit(void)
4054 destroy_workqueue(ata_wq);
4057 module_init(ata_init);
4058 module_exit(ata_exit);
4061 * libata is essentially a library of internal helper functions for
4062 * low-level ATA host controller drivers. As such, the API/ABI is
4063 * likely to change as new drivers are added and updated.
4064 * Do not depend on ABI/API stability.
4067 EXPORT_SYMBOL_GPL(ata_std_bios_param);
4068 EXPORT_SYMBOL_GPL(ata_std_ports);
4069 EXPORT_SYMBOL_GPL(ata_device_add);
4070 EXPORT_SYMBOL_GPL(ata_sg_init);
4071 EXPORT_SYMBOL_GPL(ata_sg_init_one);
4072 EXPORT_SYMBOL_GPL(ata_qc_complete);
4073 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4074 EXPORT_SYMBOL_GPL(ata_eng_timeout);
4075 EXPORT_SYMBOL_GPL(ata_tf_load);
4076 EXPORT_SYMBOL_GPL(ata_tf_read);
4077 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
4078 EXPORT_SYMBOL_GPL(ata_std_dev_select);
4079 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
4080 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
4081 EXPORT_SYMBOL_GPL(ata_check_status);
4082 EXPORT_SYMBOL_GPL(ata_altstatus);
4083 EXPORT_SYMBOL_GPL(ata_chk_err);
4084 EXPORT_SYMBOL_GPL(ata_exec_command);
4085 EXPORT_SYMBOL_GPL(ata_port_start);
4086 EXPORT_SYMBOL_GPL(ata_port_stop);
4087 EXPORT_SYMBOL_GPL(ata_interrupt);
4088 EXPORT_SYMBOL_GPL(ata_qc_prep);
4089 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
4090 EXPORT_SYMBOL_GPL(ata_bmdma_start);
4091 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4092 EXPORT_SYMBOL_GPL(ata_bmdma_status);
4093 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
4094 EXPORT_SYMBOL_GPL(ata_port_probe);
4095 EXPORT_SYMBOL_GPL(sata_phy_reset);
4096 EXPORT_SYMBOL_GPL(__sata_phy_reset);
4097 EXPORT_SYMBOL_GPL(ata_bus_reset);
4098 EXPORT_SYMBOL_GPL(ata_port_disable);
4099 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4100 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4101 EXPORT_SYMBOL_GPL(ata_scsi_error);
4102 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
4103 EXPORT_SYMBOL_GPL(ata_scsi_release);
4104 EXPORT_SYMBOL_GPL(ata_host_intr);
4105 EXPORT_SYMBOL_GPL(ata_dev_classify);
4106 EXPORT_SYMBOL_GPL(ata_dev_id_string);
4107 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4110 EXPORT_SYMBOL_GPL(pci_test_config_bits);
4111 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
4112 EXPORT_SYMBOL_GPL(ata_pci_init_one);
4113 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
4114 #endif /* CONFIG_PCI */