]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge ../scsi-rc-fixes-2.6
authorJames Bottomley <jejb@mulgrave.il.steeleye.com>
Wed, 22 Nov 2006 18:06:44 +0000 (12:06 -0600)
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>
Wed, 22 Nov 2006 18:06:44 +0000 (12:06 -0600)
1  2 
Documentation/kernel-parameters.txt
block/scsi_ioctl.c
drivers/scsi/BusLogic.c
drivers/scsi/aic94xx/aic94xx_init.c
drivers/scsi/aic94xx/aic94xx_scb.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_scan.c
drivers/scsi/st.c

index 5a92ac085969edfca303302b2bc00b6cc885f9bf,67473849f20e0e42cc4d2ef6ae015634b4c830ba..9913f06766436887f710ef3e69a37743f4796842
@@@ -164,6 -164,10 +164,10 @@@ and is between 256 and 4096 characters
        acpi_skip_timer_override [HW,ACPI]
                        Recognize and ignore IRQ0/pin2 Interrupt Override.
                        For broken nForce2 BIOS resulting in XT-PIC timer.
+       acpi_use_timer_override [HW,ACPI}
+                       Use timer override. For some broken Nvidia NF5 boards
+                       that require a timer override, but don't have
+                       HPET
  
        acpi_dbg_layer= [HW,ACPI]
                        Format: <int>
                                machine check when some devices' config space
                                is read. But various workarounds are disabled
                                and some IOMMU drivers will not work.
+               bfsort          Sort PCI devices into breadth-first order.
+                               This sorting is done to get a device
+                               order compatible with older (<= 2.4) kernels.
+               nobfsort        Don't sort PCI devices into breadth-first order.
        pcmv=           [HW,PCMCIA] BadgePAD 4
  
        pd.             [PARIDE]
  
        scsi_logging=   [SCSI]
  
 +      scsi_mod.scan=  [SCSI] sync (default) scans SCSI busses as they are
 +                      discovered.  async scans them in kernel threads,
 +                      allowing boot to proceed.  none ignores them, expecting
 +                      user space to do the scan.
 +
        selinux         [SELINUX] Disable or enable SELinux at boot time.
                        Format: { "0" | "1" }
                        See security/selinux/Kconfig help text.
diff --combined block/scsi_ioctl.c
index ac63964b72421d591f933dafcdc51220f67f3c43,e55a756214375577ffa942dbc239fe79e87985fe..dcd9c71fe8d3adddd115396f17d8644100ba3540
@@@ -246,10 -246,10 +246,10 @@@ static int sg_io(struct file *file, req
                switch (hdr->dxfer_direction) {
                default:
                        return -EINVAL;
-               case SG_DXFER_TO_FROM_DEV:
                case SG_DXFER_TO_DEV:
                        writing = 1;
                        break;
+               case SG_DXFER_TO_FROM_DEV:
                case SG_DXFER_FROM_DEV:
                        break;
                }
         * fill in request structure
         */
        rq->cmd_len = hdr->cmd_len;
+       memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
        memcpy(rq->cmd, cmd, hdr->cmd_len);
-       if (sizeof(rq->cmd) != hdr->cmd_len)
-               memset(rq->cmd + hdr->cmd_len, 0, sizeof(rq->cmd) - hdr->cmd_len);
  
        memset(sense, 0, sizeof(sense));
        rq->sense = sense;
        if (rq->bio)
                blk_queue_bounce(q, &rq->bio);
  
 -      rq->timeout = (hdr->timeout * HZ) / 1000;
 +      rq->timeout = jiffies_to_msecs(hdr->timeout);
        if (!rq->timeout)
                rq->timeout = q->sg_timeout;
        if (!rq->timeout)
diff --combined drivers/scsi/BusLogic.c
index 689dc4cc789c00110fb0161c75b1b08c669e7cf7,cdd03372478617f3e13a2220cc715fb836f7d0b7..3075204915c867b532509788e18668393840dd0b
@@@ -2186,21 -2186,21 +2186,21 @@@ static int __init BusLogic_init(void
  
        if (BusLogic_ProbeOptions.NoProbe)
                return -ENODEV;
 -      BusLogic_ProbeInfoList = (struct BusLogic_ProbeInfo *)
 -          kmalloc(BusLogic_MaxHostAdapters * sizeof(struct BusLogic_ProbeInfo), GFP_ATOMIC);
 +      BusLogic_ProbeInfoList =
 +          kzalloc(BusLogic_MaxHostAdapters * sizeof(struct BusLogic_ProbeInfo), GFP_KERNEL);
        if (BusLogic_ProbeInfoList == NULL) {
                BusLogic_Error("BusLogic: Unable to allocate Probe Info List\n", NULL);
                return -ENOMEM;
        }
 -      memset(BusLogic_ProbeInfoList, 0, BusLogic_MaxHostAdapters * sizeof(struct BusLogic_ProbeInfo));
 -      PrototypeHostAdapter = (struct BusLogic_HostAdapter *)
 -          kmalloc(sizeof(struct BusLogic_HostAdapter), GFP_ATOMIC);
 +
 +      PrototypeHostAdapter =
 +          kzalloc(sizeof(struct BusLogic_HostAdapter), GFP_KERNEL);
        if (PrototypeHostAdapter == NULL) {
                kfree(BusLogic_ProbeInfoList);
                BusLogic_Error("BusLogic: Unable to allocate Prototype " "Host Adapter\n", NULL);
                return -ENOMEM;
        }
 -      memset(PrototypeHostAdapter, 0, sizeof(struct BusLogic_HostAdapter));
 +
  #ifdef MODULE
        if (BusLogic != NULL)
                BusLogic_Setup(BusLogic);
@@@ -3600,5 -3600,16 +3600,16 @@@ static void __exit BusLogic_exit(void
  
  __setup("BusLogic=", BusLogic_Setup);
  
+ static struct pci_device_id BusLogic_pci_tbl[] __devinitdata = {
+       { PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+       { PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+       { PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+       { }
+ };
+ MODULE_DEVICE_TABLE(pci, BusLogic_pci_tbl);
  module_init(BusLogic_init);
  module_exit(BusLogic_exit);
index 3a5bbba3976eaf164c8fa892be47c77193bf2a7e,57c5ba4043f29f99c9410ce039114bc2edc94134..42302ef05ee56579777d2854e7d6e0c6942dfdf2
@@@ -724,15 -724,6 +724,15 @@@ static void asd_free_queues(struct asd_
  
        list_for_each_safe(pos, n, &pending) {
                struct asd_ascb *ascb = list_entry(pos, struct asd_ascb, list);
 +              /*
 +               * Delete unexpired ascb timers.  This may happen if we issue
 +               * a CONTROL PHY scb to an adapter and rmmod before the scb
 +               * times out.  Apparently we don't wait for the CONTROL PHY
 +               * to complete, so it doesn't matter if we kill the timer.
 +               */
 +              del_timer_sync(&ascb->timer);
 +              WARN_ON(ascb->scb->header.opcode != CONTROL_PHY);
 +
                list_del_init(pos);
                ASD_DPRINTK("freeing from pending\n");
                asd_ascb_free(ascb);
@@@ -795,8 -786,6 +795,6 @@@ static void asd_remove_driver_attrs(str
  }
  
  static struct sas_domain_function_template aic94xx_transport_functions = {
-       .lldd_port_formed       = asd_update_port_links,
        .lldd_dev_found         = asd_dev_found,
        .lldd_dev_gone          = asd_dev_gone,
  
@@@ -823,6 -812,8 +821,8 @@@ static const struct pci_device_id aic94
         0, 0, 1},
        {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR1E),
         0, 0, 1},
+       {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR1F),
+        0, 0, 1},
        {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR30),
         0, 0, 2},
        {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR32),
index 52c6ea4fbf714d350250dcaafed00a53e701cbc4,b15caf1c8fa21c2ffed74c9b1d2ed73259133ae3..14d5d8c2ee1334f1a62b4c2016960209d20892c4
@@@ -25,7 -25,6 +25,7 @@@
   */
  
  #include <linux/pci.h>
 +#include <scsi/scsi_host.h>
  
  #include "aic94xx.h"
  #include "aic94xx_reg.h"
@@@ -169,6 -168,70 +169,70 @@@ static inline void asd_get_attached_sas
        }
  }
  
+ static void asd_form_port(struct asd_ha_struct *asd_ha, struct asd_phy *phy)
+ {
+       int i;
+       struct asd_port *free_port = NULL;
+       struct asd_port *port;
+       struct asd_sas_phy *sas_phy = &phy->sas_phy;
+       unsigned long flags;
+       spin_lock_irqsave(&asd_ha->asd_ports_lock, flags);
+       if (!phy->asd_port) {
+               for (i = 0; i < ASD_MAX_PHYS; i++) {
+                       port = &asd_ha->asd_ports[i];
+                       /* Check for wide port */
+                       if (port->num_phys > 0 &&
+                           memcmp(port->sas_addr, sas_phy->sas_addr,
+                                  SAS_ADDR_SIZE) == 0 &&
+                           memcmp(port->attached_sas_addr,
+                                  sas_phy->attached_sas_addr,
+                                  SAS_ADDR_SIZE) == 0) {
+                               break;
+                       }
+                       /* Find a free port */
+                       if (port->num_phys == 0 && free_port == NULL) {
+                               free_port = port;
+                       }
+               }
+               /* Use a free port if this doesn't form a wide port */
+               if (i >= ASD_MAX_PHYS) {
+                       port = free_port;
+                       BUG_ON(!port);
+                       memcpy(port->sas_addr, sas_phy->sas_addr,
+                              SAS_ADDR_SIZE);
+                       memcpy(port->attached_sas_addr,
+                              sas_phy->attached_sas_addr,
+                              SAS_ADDR_SIZE);
+               }
+               port->num_phys++;
+               port->phy_mask |= (1U << sas_phy->id);
+               phy->asd_port = port;
+       }
+       ASD_DPRINTK("%s: updating phy_mask 0x%x for phy%d\n",
+                   __FUNCTION__, phy->asd_port->phy_mask, sas_phy->id);
+       asd_update_port_links(asd_ha, phy);
+       spin_unlock_irqrestore(&asd_ha->asd_ports_lock, flags);
+ }
+ static void asd_deform_port(struct asd_ha_struct *asd_ha, struct asd_phy *phy)
+ {
+       struct asd_port *port = phy->asd_port;
+       struct asd_sas_phy *sas_phy = &phy->sas_phy;
+       unsigned long flags;
+       spin_lock_irqsave(&asd_ha->asd_ports_lock, flags);
+       if (port) {
+               port->num_phys--;
+               port->phy_mask &= ~(1U << sas_phy->id);
+               phy->asd_port = NULL;
+       }
+       spin_unlock_irqrestore(&asd_ha->asd_ports_lock, flags);
+ }
  static inline void asd_bytes_dmaed_tasklet(struct asd_ascb *ascb,
                                           struct done_list_struct *dl,
                                           int edb_id, int phy_id)
        asd_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
        spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
        asd_dump_frame_rcvd(phy, dl);
+       asd_form_port(ascb->ha, phy);
        sas_ha->notify_port_event(&phy->sas_phy, PORTE_BYTES_DMAED);
  }
  
@@@ -198,6 -262,7 +263,7 @@@ static inline void asd_link_reset_err_t
        struct asd_ha_struct *asd_ha = ascb->ha;
        struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
        struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
+       struct asd_phy *phy = &asd_ha->phys[phy_id];
        u8 lr_error = dl->status_block[1];
        u8 retries_left = dl->status_block[2];
  
  
        asd_turn_led(asd_ha, phy_id, 0);
        sas_phy_disconnected(sas_phy);
+       asd_deform_port(asd_ha, phy);
        sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
  
        if (retries_left == 0) {
@@@ -249,6 -315,8 +316,8 @@@ static inline void asd_primitive_rcvd_t
        unsigned long flags;
        struct sas_ha_struct *sas_ha = &ascb->ha->sas_ha;
        struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
+       struct asd_ha_struct *asd_ha = ascb->ha;
+       struct asd_phy *phy = &asd_ha->phys[phy_id];
        u8  reg  = dl->status_block[1];
        u32 cont = dl->status_block[2] << ((reg & 3)*8);
  
                                    phy_id);
                        /* The sequencer disables all phys on that port.
                         * We have to re-enable the phys ourselves. */
+                       asd_deform_port(asd_ha, phy);
                        sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
                        break;
  
@@@ -343,39 -412,6 +413,39 @@@ void asd_invalidate_edb(struct asd_asc
        }
  }
  
 +/* hard reset a phy later */
 +static void do_phy_reset_later(void *data)
 +{
 +      struct sas_phy *sas_phy = data;
 +      int error;
 +
 +      ASD_DPRINTK("%s: About to hard reset phy %d\n", __FUNCTION__,
 +                  sas_phy->identify.phy_identifier);
 +      /* Reset device port */
 +      error = sas_phy_reset(sas_phy, 1);
 +      if (error)
 +              ASD_DPRINTK("%s: Hard reset of phy %d failed (%d).\n",
 +                          __FUNCTION__, sas_phy->identify.phy_identifier, error);
 +}
 +
 +static void phy_reset_later(struct sas_phy *sas_phy, struct Scsi_Host *shost)
 +{
 +      INIT_WORK(&sas_phy->reset_work, do_phy_reset_later, sas_phy);
 +      queue_work(shost->work_q, &sas_phy->reset_work);
 +}
 +
 +/* start up the ABORT TASK tmf... */
 +static void task_kill_later(struct asd_ascb *ascb)
 +{
 +      struct asd_ha_struct *asd_ha = ascb->ha;
 +      struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
 +      struct Scsi_Host *shost = sas_ha->core.shost;
 +      struct sas_task *task = ascb->uldd_task;
 +
 +      INIT_WORK(&task->abort_work, (void (*)(void *))sas_task_abort, task);
 +      queue_work(shost->work_q, &task->abort_work);
 +}
 +
  static void escb_tasklet_complete(struct asd_ascb *ascb,
                                  struct done_list_struct *dl)
  {
        u8  sb_opcode = dl->status_block[0];
        int phy_id = sb_opcode & DL_PHY_MASK;
        struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
+       struct asd_phy *phy = &asd_ha->phys[phy_id];
  
        if (edb > 6 || edb < 0) {
                ASD_DPRINTK("edb is 0x%x! dl->opcode is 0x%x\n",
                            ascb->scb->header.opcode);
        }
  
 +      /* Catch these before we mask off the sb_opcode bits */
 +      switch (sb_opcode) {
 +      case REQ_TASK_ABORT: {
 +              struct asd_ascb *a, *b;
 +              u16 tc_abort;
 +
 +              tc_abort = *((u16*)(&dl->status_block[1]));
 +              tc_abort = le16_to_cpu(tc_abort);
 +
 +              ASD_DPRINTK("%s: REQ_TASK_ABORT, reason=0x%X\n",
 +                          __FUNCTION__, dl->status_block[3]);
 +
 +              /* Find the pending task and abort it. */
 +              list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list)
 +                      if (a->tc_index == tc_abort) {
 +                              task_kill_later(a);
 +                              break;
 +                      }
 +              goto out;
 +      }
 +      case REQ_DEVICE_RESET: {
 +              struct Scsi_Host *shost = sas_ha->core.shost;
 +              struct sas_phy *dev_phy;
 +              struct asd_ascb *a;
 +              u16 conn_handle;
 +
 +              conn_handle = *((u16*)(&dl->status_block[1]));
 +              conn_handle = le16_to_cpu(conn_handle);
 +
 +              ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __FUNCTION__,
 +                          dl->status_block[3]);
 +
 +              /* Kill all pending tasks and reset the device */
 +              dev_phy = NULL;
 +              list_for_each_entry(a, &asd_ha->seq.pend_q, list) {
 +                      struct sas_task *task;
 +                      struct domain_device *dev;
 +                      u16 x;
 +
 +                      task = a->uldd_task;
 +                      if (!task)
 +                              continue;
 +                      dev = task->dev;
 +
 +                      x = (unsigned long)dev->lldd_dev;
 +                      if (x == conn_handle) {
 +                              dev_phy = dev->port->phy;
 +                              task_kill_later(a);
 +                      }
 +              }
 +
 +              /* Reset device port */
 +              if (!dev_phy) {
 +                      ASD_DPRINTK("%s: No pending commands; can't reset.\n",
 +                                  __FUNCTION__);
 +                      goto out;
 +              }
 +              phy_reset_later(dev_phy, shost);
 +              goto out;
 +      }
 +      case SIGNAL_NCQ_ERROR:
 +              ASD_DPRINTK("%s: SIGNAL_NCQ_ERROR\n", __FUNCTION__);
 +              goto out;
 +      case CLEAR_NCQ_ERROR:
 +              ASD_DPRINTK("%s: CLEAR_NCQ_ERROR\n", __FUNCTION__);
 +              goto out;
 +      }
 +
        sb_opcode &= ~DL_PHY_MASK;
  
        switch (sb_opcode) {
                asd_turn_led(asd_ha, phy_id, 0);
                /* the device is gone */
                sas_phy_disconnected(sas_phy);
+               asd_deform_port(asd_ha, phy);
                sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT);
                break;
 -      case REQ_TASK_ABORT:
 -              ASD_DPRINTK("%s: phy%d: REQ_TASK_ABORT\n", __FUNCTION__,
 -                          phy_id);
 -              break;
 -      case REQ_DEVICE_RESET:
 -              ASD_DPRINTK("%s: phy%d: REQ_DEVICE_RESET\n", __FUNCTION__,
 -                          phy_id);
 -              break;
 -      case SIGNAL_NCQ_ERROR:
 -              ASD_DPRINTK("%s: phy%d: SIGNAL_NCQ_ERROR\n", __FUNCTION__,
 -                          phy_id);
 -              break;
 -      case CLEAR_NCQ_ERROR:
 -              ASD_DPRINTK("%s: phy%d: CLEAR_NCQ_ERROR\n", __FUNCTION__,
 -                          phy_id);
 -              break;
        default:
                ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __FUNCTION__,
                            phy_id, sb_opcode);
  
                break;
        }
 -
 +out:
        asd_invalidate_edb(ascb, edb);
  }
  
index 5e70c49fdf840ce24301fdf5965f1a6a104532f2,208607be78c7267a5770974b8d4be12d4b839b87..3eb4cd2cbc7838baef9dc3ee199528f269451d1d
@@@ -61,9 -61,9 +61,9 @@@ MODULE_PARM_DESC(ql2xallocfwdump
                "during HBA initialization.  Memory allocation requirements "
                "vary by ISP type.  Default is 1 - allocate memory.");
  
- int qla2_extended_error_logging;
- module_param(qla2_extended_error_logging, int, S_IRUGO|S_IRUSR);
- MODULE_PARM_DESC(qla2_extended_error_logging,
+ int ql2xextended_error_logging;
+ module_param(ql2xextended_error_logging, int, S_IRUGO|S_IRUSR);
+ MODULE_PARM_DESC(ql2xextended_error_logging,
                "Option to enable extended error logging, "
                "Default is 0 - no logging. 1 - log errors.");
  
@@@ -77,6 -77,19 +77,19 @@@ MODULE_PARM_DESC(ql2xfdmienable
                "Enables FDMI registratons "
                "Default is 0 - no FDMI. 1 - perfom FDMI.");
  
+ #define MAX_Q_DEPTH    32
+ static int ql2xmaxqdepth = MAX_Q_DEPTH;
+ module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
+ MODULE_PARM_DESC(ql2xmaxqdepth,
+               "Maximum queue depth to report for target devices.");
+ int ql2xqfullrampup = 120;
+ module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR);
+ MODULE_PARM_DESC(ql2xqfullrampup,
+               "Number of seconds to wait to begin to ramp-up the queue "
+               "depth for a device after a queue-full condition has been "
+               "detected.  Default is 120 seconds.");
  /*
   * SCSI host template entry points
   */
@@@ -274,7 -287,7 +287,7 @@@ qla24xx_pci_info_str(struct scsi_qla_ho
        return str;
  }
  
 -char *
 +static char *
  qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str)
  {
        char un_str[10];
        return (str);
  }
  
 -char *
 +static char *
  qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str)
  {
        sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
@@@ -621,7 -634,7 +634,7 @@@ qla2x00_block_error_handler(struct scsi
  * Note:
  *    Only return FAILED if command not returned by firmware.
  **************************************************************************/
 -int
 +static int
  qla2xxx_eh_abort(struct scsi_cmnd *cmd)
  {
        scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
@@@ -758,7 -771,7 +771,7 @@@ qla2x00_eh_wait_for_pending_target_comm
  *    SUCCESS/FAILURE (defined as macro in scsi.h).
  *
  **************************************************************************/
 -int
 +static int
  qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
  {
        scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
@@@ -889,7 -902,7 +902,7 @@@ qla2x00_eh_wait_for_pending_commands(sc
  *    SUCCESS/FAILURE (defined as macro in scsi.h).
  *
  **************************************************************************/
 -int
 +static int
  qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
  {
        scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
@@@ -950,7 -963,7 +963,7 @@@ eh_bus_reset_done
  *
  * Note:
  **************************************************************************/
 -int
 +static int
  qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
  {
        scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
@@@ -1104,9 -1117,9 +1117,9 @@@ qla2xxx_slave_configure(struct scsi_dev
        struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
  
        if (sdev->tagged_supported)
-               scsi_activate_tcq(sdev, 32);
+               scsi_activate_tcq(sdev, ha->max_q_depth);
        else
-               scsi_deactivate_tcq(sdev, 32);
+               scsi_deactivate_tcq(sdev, ha->max_q_depth);
  
        rport->dev_loss_tmo = ha->port_down_retry_count + 5;
  
@@@ -1413,6 -1426,10 +1426,10 @@@ qla2x00_probe_one(struct pci_dev *pdev
        ha->link_data_rate = PORT_SPEED_UNKNOWN;
        ha->optrom_size = OPTROM_SIZE_2300;
  
+       ha->max_q_depth = MAX_Q_DEPTH;
+       if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
+               ha->max_q_depth = ql2xmaxqdepth;
        /* Assign ISP specific operations. */
        ha->isp_ops.pci_config          = qla2100_pci_config;
        ha->isp_ops.reset_chip          = qla2x00_reset_chip;
@@@ -1712,8 -1729,10 +1729,10 @@@ qla2x00_free_device(scsi_qla_host_t *ha
        if (ha->eft)
                qla2x00_trace_control(ha, TC_DISABLE, 0, 0);
  
+       ha->flags.online = 0;
        /* Stop currently executing firmware. */
-       qla2x00_stop_firmware(ha);
+       qla2x00_try_to_stop_firmware(ha);
  
        /* turn-off interrupts on the card */
        if (ha->interrupts_on)
  
        qla2x00_mem_free(ha);
  
-       ha->flags.online = 0;
        /* Detach interrupts */
        if (ha->host->irq)
                free_irq(ha->host->irq, ha);
@@@ -2697,7 -2714,7 +2714,7 @@@ qla2x00_module_init(void
  
        /* Derive version string. */
        strcpy(qla2x00_version_str, QLA2XXX_VERSION);
-       if (qla2_extended_error_logging)
+       if (ql2xextended_error_logging)
                strcat(qla2x00_version_str, "-debug");
  
        qla2xxx_transport_template =
diff --combined drivers/scsi/scsi_lib.c
index ee35a62bb7a2c30828b448f62927c277e768775b,3ac4890ce086cfab2ff6519c1f9858760b764de5..2f12f9f12fcb269f5d55a11d9048b6d808bf5f12
@@@ -410,6 -410,7 +410,7 @@@ int scsi_execute_async(struct scsi_devi
                goto free_req;
  
        req->cmd_len = cmd_len;
+       memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
        memcpy(req->cmd, cmd, req->cmd_len);
        req->sense = sioc->sense;
        req->sense_len = 0;
@@@ -995,14 -996,25 +996,14 @@@ static int scsi_init_io(struct scsi_cmn
        int                count;
  
        /*
 -       * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
 -       */
 -      if (blk_pc_request(req) && !req->bio) {
 -              cmd->request_bufflen = req->data_len;
 -              cmd->request_buffer = req->data;
 -              req->buffer = req->data;
 -              cmd->use_sg = 0;
 -              return 0;
 -      }
 -
 -      /*
 -       * we used to not use scatter-gather for single segment request,
 +       * We used to not use scatter-gather for single segment request,
         * but now we do (it makes highmem I/O easier to support without
         * kmapping pages)
         */
        cmd->use_sg = req->nr_phys_segments;
  
        /*
 -       * if sg table allocation fails, requeue request later.
 +       * If sg table allocation fails, requeue request later.
         */
        sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
        if (unlikely(!sgpnt)) {
                return BLKPREP_DEFER;
        }
  
 +      req->buffer = NULL;
        cmd->request_buffer = (char *) sgpnt;
 -      cmd->request_bufflen = req->nr_sectors << 9;
        if (blk_pc_request(req))
                cmd->request_bufflen = req->data_len;
 -      req->buffer = NULL;
 +      else
 +              cmd->request_bufflen = req->nr_sectors << 9;
  
        /* 
         * Next, walk the list, and fill in the addresses and sizes of
         * each segment.
         */
        count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
 -
 -      /*
 -       * mapped well, send it off
 -       */
        if (likely(count <= cmd->use_sg)) {
                cmd->use_sg = count;
 -              return 0;
 +              return BLKPREP_OK;
        }
  
        printk(KERN_ERR "Incorrect number of segments after building list\n");
@@@ -1054,27 -1069,6 +1055,27 @@@ static int scsi_issue_flush_fn(request_
        return -EOPNOTSUPP;
  }
  
 +static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
 +              struct request *req)
 +{
 +      struct scsi_cmnd *cmd;
 +
 +      if (!req->special) {
 +              cmd = scsi_get_command(sdev, GFP_ATOMIC);
 +              if (unlikely(!cmd))
 +                      return NULL;
 +              req->special = cmd;
 +      } else {
 +              cmd = req->special;
 +      }
 +
 +      /* pull a tag out of the request if we have one */
 +      cmd->tag = req->tag;
 +      cmd->request = req;
 +
 +      return cmd;
 +}
 +
  static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
  {
        BUG_ON(!blk_pc_request(cmd->request));
        scsi_io_completion(cmd, cmd->request_bufflen);
  }
  
 -static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
 +static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
  {
 -      struct request *req = cmd->request;
 +      struct scsi_cmnd *cmd;
 +
 +      cmd = scsi_get_cmd_from_req(sdev, req);
 +      if (unlikely(!cmd))
 +              return BLKPREP_DEFER;
 +
 +      /*
 +       * BLOCK_PC requests may transfer data, in which case they must
 +       * a bio attached to them.  Or they might contain a SCSI command
 +       * that does not transfer data, in which case they may optionally
 +       * submit a request without an attached bio.
 +       */
 +      if (req->bio) {
 +              int ret;
 +
 +              BUG_ON(!req->nr_phys_segments);
 +
 +              ret = scsi_init_io(cmd);
 +              if (unlikely(ret))
 +                      return ret;
 +      } else {
 +              BUG_ON(req->data_len);
 +              BUG_ON(req->data);
 +
 +              cmd->request_bufflen = 0;
 +              cmd->request_buffer = NULL;
 +              cmd->use_sg = 0;
 +              req->buffer = NULL;
 +      }
  
-       BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
+       BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
        memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
        cmd->cmd_len = req->cmd_len;
        if (!req->data_len)
        cmd->allowed = req->retries;
        cmd->timeout_per_command = req->timeout;
        cmd->done = scsi_blk_pc_done;
 +      return BLKPREP_OK;
  }
  
 -static int scsi_prep_fn(struct request_queue *q, struct request *req)
 +/*
 + * Setup a REQ_TYPE_FS command.  These are simple read/write request
 + * from filesystems that still need to be translated to SCSI CDBs from
 + * the ULD.
 + */
 +static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
  {
 -      struct scsi_device *sdev = q->queuedata;
        struct scsi_cmnd *cmd;
 -      int specials_only = 0;
 +      struct scsi_driver *drv;
 +      int ret;
  
        /*
 -       * Just check to see if the device is online.  If it isn't, we
 -       * refuse to process any commands.  The device must be brought
 -       * online before trying any recovery commands
 +       * Filesystem requests must transfer data.
         */
 -      if (unlikely(!scsi_device_online(sdev))) {
 -              sdev_printk(KERN_ERR, sdev,
 -                          "rejecting I/O to offline device\n");
 -              goto kill;
 -      }
 -      if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
 -              /* OK, we're not in a running state don't prep
 -               * user commands */
 -              if (sdev->sdev_state == SDEV_DEL) {
 -                      /* Device is fully deleted, no commands
 -                       * at all allowed down */
 -                      sdev_printk(KERN_ERR, sdev,
 -                                  "rejecting I/O to dead device\n");
 -                      goto kill;
 -              }
 -              /* OK, we only allow special commands (i.e. not
 -               * user initiated ones */
 -              specials_only = sdev->sdev_state;
 +      BUG_ON(!req->nr_phys_segments);
 +
 +      cmd = scsi_get_cmd_from_req(sdev, req);
 +      if (unlikely(!cmd))
 +              return BLKPREP_DEFER;
 +
 +      ret = scsi_init_io(cmd);
 +      if (unlikely(ret))
 +              return ret;
 +
 +      /*
 +       * Initialize the actual SCSI command for this request.
 +       */
 +      drv = *(struct scsi_driver **)req->rq_disk->private_data;
 +      if (unlikely(!drv->init_command(cmd))) {
 +              scsi_release_buffers(cmd);
 +              scsi_put_command(cmd);
 +              return BLKPREP_KILL;
        }
  
 +      return BLKPREP_OK;
 +}
 +
 +static int scsi_prep_fn(struct request_queue *q, struct request *req)
 +{
 +      struct scsi_device *sdev = q->queuedata;
 +      int ret = BLKPREP_OK;
 +
        /*
 -       * Find the actual device driver associated with this command.
 -       * The SPECIAL requests are things like character device or
 -       * ioctls, which did not originate from ll_rw_blk.  Note that
 -       * the special field is also used to indicate the cmd for
 -       * the remainder of a partially fulfilled request that can 
 -       * come up when there is a medium error.  We have to treat
 -       * these two cases differently.  We differentiate by looking
 -       * at request->cmd, as this tells us the real story.
 +       * If the device is not in running state we will reject some
 +       * or all commands.
         */
 -      if (blk_special_request(req) && req->special)
 -              cmd = req->special;
 -      else if (blk_pc_request(req) || blk_fs_request(req)) {
 -              if (unlikely(specials_only) && !(req->cmd_flags & REQ_PREEMPT)){
 -                      if (specials_only == SDEV_QUIESCE ||
 -                          specials_only == SDEV_BLOCK)
 -                              goto defer;
 -                      
 +      if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
 +              switch (sdev->sdev_state) {
 +              case SDEV_OFFLINE:
 +                      /*
 +                       * If the device is offline we refuse to process any
 +                       * commands.  The device must be brought online
 +                       * before trying any recovery commands.
 +                       */
 +                      sdev_printk(KERN_ERR, sdev,
 +                                  "rejecting I/O to offline device\n");
 +                      ret = BLKPREP_KILL;
 +                      break;
 +              case SDEV_DEL:
 +                      /*
 +                       * If the device is fully deleted, we refuse to
 +                       * process any commands as well.
 +                       */
                        sdev_printk(KERN_ERR, sdev,
 -                                  "rejecting I/O to device being removed\n");
 -                      goto kill;
 +                                  "rejecting I/O to dead device\n");
 +                      ret = BLKPREP_KILL;
 +                      break;
 +              case SDEV_QUIESCE:
 +              case SDEV_BLOCK:
 +                      /*
 +                       * If the devices is blocked we defer normal commands.
 +                       */
 +                      if (!(req->cmd_flags & REQ_PREEMPT))
 +                              ret = BLKPREP_DEFER;
 +                      break;
 +              default:
 +                      /*
 +                       * For any other not fully online state we only allow
 +                       * special commands.  In particular any user initiated
 +                       * command is not allowed.
 +                       */
 +                      if (!(req->cmd_flags & REQ_PREEMPT))
 +                              ret = BLKPREP_KILL;
 +                      break;
                }
 -                      
 -              /*
 -               * Now try and find a command block that we can use.
 -               */
 -              if (!req->special) {
 -                      cmd = scsi_get_command(sdev, GFP_ATOMIC);
 -                      if (unlikely(!cmd))
 -                              goto defer;
 -              } else
 -                      cmd = req->special;
 -              
 -              /* pull a tag out of the request if we have one */
 -              cmd->tag = req->tag;
 -      } else {
 -              blk_dump_rq_flags(req, "SCSI bad req");
 -              goto kill;
 +
 +              if (ret != BLKPREP_OK)
 +                      goto out;
        }
 -      
 -      /* note the overloading of req->special.  When the tag
 -       * is active it always means cmd.  If the tag goes
 -       * back for re-queueing, it may be reset */
 -      req->special = cmd;
 -      cmd->request = req;
 -      
 -      /*
 -       * FIXME: drop the lock here because the functions below
 -       * expect to be called without the queue lock held.  Also,
 -       * previously, we dequeued the request before dropping the
 -       * lock.  We hope REQ_STARTED prevents anything untoward from
 -       * happening now.
 -       */
 -      if (blk_fs_request(req) || blk_pc_request(req)) {
 -              int ret;
  
 +      switch (req->cmd_type) {
 +      case REQ_TYPE_BLOCK_PC:
 +              ret = scsi_setup_blk_pc_cmnd(sdev, req);
 +              break;
 +      case REQ_TYPE_FS:
 +              ret = scsi_setup_fs_cmnd(sdev, req);
 +              break;
 +      default:
                /*
 -               * This will do a couple of things:
 -               *  1) Fill in the actual SCSI command.
 -               *  2) Fill in any other upper-level specific fields
 -               * (timeout).
 +               * All other command types are not supported.
                 *
 -               * If this returns 0, it means that the request failed
 -               * (reading past end of disk, reading offline device,
 -               * etc).   This won't actually talk to the device, but
 -               * some kinds of consistency checking may cause the     
 -               * request to be rejected immediately.
 +               * Note that these days the SCSI subsystem does not use
 +               * REQ_TYPE_SPECIAL requests anymore.  These are only used
 +               * (directly or via blk_insert_request) by non-SCSI drivers.
                 */
 +              blk_dump_rq_flags(req, "SCSI bad req");
 +              ret = BLKPREP_KILL;
 +              break;
 +      }
  
 -              /* 
 -               * This sets up the scatter-gather table (allocating if
 -               * required).
 -               */
 -              ret = scsi_init_io(cmd);
 -              switch(ret) {
 -                      /* For BLKPREP_KILL/DEFER the cmd was released */
 -              case BLKPREP_KILL:
 -                      goto kill;
 -              case BLKPREP_DEFER:
 -                      goto defer;
 -              }
 -              
 + out:
 +      switch (ret) {
 +      case BLKPREP_KILL:
 +              req->errors = DID_NO_CONNECT << 16;
 +              break;
 +      case BLKPREP_DEFER:
                /*
 -               * Initialize the actual SCSI command for this request.
 +               * If we defer, the elv_next_request() returns NULL, but the
 +               * queue must be restarted, so we plug here if no returning
 +               * command will automatically do that.
                 */
 -              if (blk_pc_request(req)) {
 -                      scsi_setup_blk_pc_cmnd(cmd);
 -              } else if (req->rq_disk) {
 -                      struct scsi_driver *drv;
 -
 -                      drv = *(struct scsi_driver **)req->rq_disk->private_data;
 -                      if (unlikely(!drv->init_command(cmd))) {
 -                              scsi_release_buffers(cmd);
 -                              scsi_put_command(cmd);
 -                              goto kill;
 -                      }
 -              }
 +              if (sdev->device_busy == 0)
 +                      blk_plug_device(q);
 +              break;
 +      default:
 +              req->cmd_flags |= REQ_DONTPREP;
        }
  
 -      /*
 -       * The request is now prepped, no need to come back here
 -       */
 -      req->cmd_flags |= REQ_DONTPREP;
 -      return BLKPREP_OK;
 -
 - defer:
 -      /* If we defer, the elv_next_request() returns NULL, but the
 -       * queue must be restarted, so we plug here if no returning
 -       * command will automatically do that. */
 -      if (sdev->device_busy == 0)
 -              blk_plug_device(q);
 -      return BLKPREP_DEFER;
 - kill:
 -      req->errors = DID_NO_CONNECT << 16;
 -      return BLKPREP_KILL;
 +      return ret;
  }
  
  /*
diff --combined drivers/scsi/scsi_scan.c
index 148e24cc3222a50f72ce480a0681865a7ceba0e1,94a274645f6f36819c29aadb3aeff7b5290581e8..aa1b1e0e9d22d7b394dd085f66dd2a8302c36e8b
@@@ -29,9 -29,7 +29,9 @@@
  #include <linux/moduleparam.h>
  #include <linux/init.h>
  #include <linux/blkdev.h>
 -#include <asm/semaphore.h>
 +#include <linux/delay.h>
 +#include <linux/kthread.h>
 +#include <linux/spinlock.h>
  
  #include <scsi/scsi.h>
  #include <scsi/scsi_cmnd.h>
@@@ -89,11 -87,6 +89,11 @@@ module_param_named(max_luns, max_scsi_l
  MODULE_PARM_DESC(max_luns,
                 "last scsi LUN (should be between 1 and 2^32-1)");
  
 +static char scsi_scan_type[6] = "sync";
 +
 +module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO);
 +MODULE_PARM_DESC(scan, "sync, async or none");
 +
  /*
   * max_scsi_report_luns: the maximum number of LUNS that will be
   * returned from the REPORT LUNS command. 8 times this value must
@@@ -115,68 -108,6 +115,68 @@@ MODULE_PARM_DESC(inq_timeout
                 "Timeout (in seconds) waiting for devices to answer INQUIRY."
                 " Default is 5. Some non-compliant devices need more.");
  
 +static DEFINE_SPINLOCK(async_scan_lock);
 +static LIST_HEAD(scanning_hosts);
 +
 +struct async_scan_data {
 +      struct list_head list;
 +      struct Scsi_Host *shost;
 +      struct completion prev_finished;
 +};
 +
 +/**
 + * scsi_complete_async_scans - Wait for asynchronous scans to complete
 + *
 + * Asynchronous scans add themselves to the scanning_hosts list.  Once
 + * that list is empty, we know that the scans are complete.  Rather than
 + * waking up periodically to check the state of the list, we pretend to be
 + * a scanning task by adding ourselves at the end of the list and going to
 + * sleep.  When the task before us wakes us up, we take ourselves off the
 + * list and return.
 + */
 +int scsi_complete_async_scans(void)
 +{
 +      struct async_scan_data *data;
 +
 +      do {
 +              if (list_empty(&scanning_hosts))
 +                      return 0;
 +              /* If we can't get memory immediately, that's OK.  Just
 +               * sleep a little.  Even if we never get memory, the async
 +               * scans will finish eventually.
 +               */
 +              data = kmalloc(sizeof(*data), GFP_KERNEL);
 +              if (!data)
 +                      msleep(1);
 +      } while (!data);
 +
 +      data->shost = NULL;
 +      init_completion(&data->prev_finished);
 +
 +      spin_lock(&async_scan_lock);
 +      /* Check that there's still somebody else on the list */
 +      if (list_empty(&scanning_hosts))
 +              goto done;
 +      list_add_tail(&data->list, &scanning_hosts);
 +      spin_unlock(&async_scan_lock);
 +
 +      printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n");
 +      wait_for_completion(&data->prev_finished);
 +
 +      spin_lock(&async_scan_lock);
 +      list_del(&data->list);
 + done:
 +      spin_unlock(&async_scan_lock);
 +
 +      kfree(data);
 +      return 0;
 +}
 +
 +#ifdef MODULE
 +/* Only exported for the benefit of scsi_wait_scan */
 +EXPORT_SYMBOL_GPL(scsi_complete_async_scans);
 +#endif
 +
  /**
   * scsi_unlock_floptical - unlock device via a special MODE SENSE command
   * @sdev:     scsi device to send command to
@@@ -688,7 -619,7 +688,7 @@@ static int scsi_probe_lun(struct scsi_d
   *     SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
   **/
  static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
 -              int *bflags)
 +              int *bflags, int async)
  {
        /*
         * XXX do not save the inquiry, since it can change underneath us,
         * scanning run at their own risk, or supply a user level program
         * that can correctly scan.
         */
-       sdev->inquiry = kmalloc(sdev->inquiry_len, GFP_ATOMIC);
-       if (sdev->inquiry == NULL) {
+       /*
+        * Copy at least 36 bytes of INQUIRY data, so that we don't
+        * dereference unallocated memory when accessing the Vendor,
+        * Product, and Revision strings.  Badly behaved devices may set
+        * the INQUIRY Additional Length byte to a small value, indicating
+        * these strings are invalid, but often they contain plausible data
+        * nonetheless.  It doesn't matter if the device sent < 36 bytes
+        * total, since scsi_probe_lun() initializes inq_result with 0s.
+        */
+       sdev->inquiry = kmemdup(inq_result,
+                               max_t(size_t, sdev->inquiry_len, 36),
+                               GFP_ATOMIC);
+       if (sdev->inquiry == NULL)
                return SCSI_SCAN_NO_RESPONSE;
-       }
  
-       memcpy(sdev->inquiry, inq_result, sdev->inquiry_len);
        sdev->vendor = (char *) (sdev->inquiry + 8);
        sdev->model = (char *) (sdev->inquiry + 16);
        sdev->rev = (char *) (sdev->inquiry + 32);
         * register it and tell the rest of the kernel
         * about it.
         */
 -      if (scsi_sysfs_add_sdev(sdev) != 0)
 +      if (!async && scsi_sysfs_add_sdev(sdev) != 0)
                return SCSI_SCAN_NO_RESPONSE;
  
        return SCSI_SCAN_LUN_PRESENT;
@@@ -1033,7 -974,7 +1043,7 @@@ static int scsi_probe_and_add_lun(struc
                goto out_free_result;
        }
  
 -      res = scsi_add_lun(sdev, result, &bflags);
 +      res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
        if (res == SCSI_SCAN_LUN_PRESENT) {
                if (bflags & BLIST_KEY) {
                        sdev->lockable = 0;
@@@ -1533,9 -1474,6 +1543,9 @@@ void scsi_scan_target(struct device *pa
  {
        struct Scsi_Host *shost = dev_to_shost(parent);
  
 +      if (!shost->async_scan)
 +              scsi_complete_async_scans();
 +
        mutex_lock(&shost->scan_mutex);
        if (scsi_host_scan_allowed(shost))
                __scsi_scan_target(parent, channel, id, lun, rescan);
@@@ -1581,9 -1519,6 +1591,9 @@@ int scsi_scan_host_selected(struct Scsi
                "%s: <%u:%u:%u>\n",
                __FUNCTION__, channel, id, lun));
  
 +      if (!shost->async_scan)
 +              scsi_complete_async_scans();
 +
        if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
            ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
            ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun)))
        return 0;
  }
  
 +static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
 +{
 +      struct scsi_device *sdev;
 +      shost_for_each_device(sdev, shost) {
 +              if (scsi_sysfs_add_sdev(sdev) != 0)
 +                      scsi_destroy_sdev(sdev);
 +      }
 +}
 +
 +/**
 + * scsi_prep_async_scan - prepare for an async scan
 + * @shost: the host which will be scanned
 + * Returns: a cookie to be passed to scsi_finish_async_scan()
 + *
 + * Tells the midlayer this host is going to do an asynchronous scan.
 + * It reserves the host's position in the scanning list and ensures
 + * that other asynchronous scans started after this one won't affect the
 + * ordering of the discovered devices.
 + */
 +struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
 +{
 +      struct async_scan_data *data;
 +
 +      if (strncmp(scsi_scan_type, "sync", 4) == 0)
 +              return NULL;
 +
 +      if (shost->async_scan) {
 +              printk("%s called twice for host %d", __FUNCTION__,
 +                              shost->host_no);
 +              dump_stack();
 +              return NULL;
 +      }
 +
 +      data = kmalloc(sizeof(*data), GFP_KERNEL);
 +      if (!data)
 +              goto err;
 +      data->shost = scsi_host_get(shost);
 +      if (!data->shost)
 +              goto err;
 +      init_completion(&data->prev_finished);
 +
 +      spin_lock(&async_scan_lock);
 +      shost->async_scan = 1;
 +      if (list_empty(&scanning_hosts))
 +              complete(&data->prev_finished);
 +      list_add_tail(&data->list, &scanning_hosts);
 +      spin_unlock(&async_scan_lock);
 +
 +      return data;
 +
 + err:
 +      kfree(data);
 +      return NULL;
 +}
 +
 +/**
 + * scsi_finish_async_scan - asynchronous scan has finished
 + * @data: cookie returned from earlier call to scsi_prep_async_scan()
 + *
 + * All the devices currently attached to this host have been found.
 + * This function announces all the devices it has found to the rest
 + * of the system.
 + */
 +void scsi_finish_async_scan(struct async_scan_data *data)
 +{
 +      struct Scsi_Host *shost;
 +
 +      if (!data)
 +              return;
 +
 +      shost = data->shost;
 +      if (!shost->async_scan) {
 +              printk("%s called twice for host %d", __FUNCTION__,
 +                              shost->host_no);
 +              dump_stack();
 +              return;
 +      }
 +
 +      wait_for_completion(&data->prev_finished);
 +
 +      scsi_sysfs_add_devices(shost);
 +
 +      spin_lock(&async_scan_lock);
 +      shost->async_scan = 0;
 +      list_del(&data->list);
 +      if (!list_empty(&scanning_hosts)) {
 +              struct async_scan_data *next = list_entry(scanning_hosts.next,
 +                              struct async_scan_data, list);
 +              complete(&next->prev_finished);
 +      }
 +      spin_unlock(&async_scan_lock);
 +
 +      scsi_host_put(shost);
 +      kfree(data);
 +}
 +
 +static int do_scan_async(void *_data)
 +{
 +      struct async_scan_data *data = _data;
 +      scsi_scan_host_selected(data->shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
 +                              SCAN_WILD_CARD, 0);
 +
 +      scsi_finish_async_scan(data);
 +      return 0;
 +}
 +
  /**
   * scsi_scan_host - scan the given adapter
   * @shost:    adapter to scan
   **/
  void scsi_scan_host(struct Scsi_Host *shost)
  {
 -      scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
 -                              SCAN_WILD_CARD, 0);
 +      struct async_scan_data *data;
 +
 +      if (strncmp(scsi_scan_type, "none", 4) == 0)
 +              return;
 +
 +      data = scsi_prep_async_scan(shost);
 +      if (!data) {
 +              scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
 +                                      SCAN_WILD_CARD, 0);
 +              return;
 +      }
 +      kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no);
  }
  EXPORT_SYMBOL(scsi_scan_host);
  
diff --combined drivers/scsi/st.c
index febfac97ad3853cbc20761762733b963d7077ab5,e1a52c525ed492155a8243722e4387428198548b..587274dd70596a18d7eda3f9326bd8220a9ec017
@@@ -9,7 -9,7 +9,7 @@@
     Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky,
     Michael Schaefer, J"org Weule, and Eric Youngdale.
  
 -   Copyright 1992 - 2005 Kai Makisara
 +   Copyright 1992 - 2006 Kai Makisara
     email Kai.Makisara@kolumbus.fi
  
     Some small formal changes - aeb, 950809
@@@ -17,7 -17,7 +17,7 @@@
     Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
   */
  
 -static const char *verstr = "20050830";
 +static const char *verstr = "20061107";
  
  #include <linux/module.h>
  
@@@ -999,7 -999,7 +999,7 @@@ static int check_tape(struct scsi_tape 
                        STp->min_block = ((STp->buffer)->b_data[4] << 8) |
                            (STp->buffer)->b_data[5];
                        if ( DEB( debugging || ) !STp->inited)
 -                              printk(KERN_WARNING
 +                              printk(KERN_INFO
                                         "%s: Block limits %d - %d bytes.\n", name,
                                         STp->min_block, STp->max_block);
                } else {
@@@ -1177,7 -1177,10 +1177,10 @@@ static int st_open(struct inode *inode
                goto err_out;
        if ((filp->f_flags & O_NONBLOCK) == 0 &&
            retval != CHKRES_READY) {
-               retval = (-EIO);
+               if (STp->ready == NO_TAPE)
+                       retval = (-ENOMEDIUM);
+               else
+                       retval = (-EIO);
                goto err_out;
        }
        return 0;
@@@ -1221,7 -1224,7 +1224,7 @@@ static int st_flush(struct file *filp, 
        }
  
        DEBC( if (STp->nbr_requests)
 -              printk(KERN_WARNING "%s: Number of r/w requests %d, dio used in %d, pages %d (%d).\n",
 +              printk(KERN_DEBUG "%s: Number of r/w requests %d, dio used in %d, pages %d (%d).\n",
                       name, STp->nbr_requests, STp->nbr_dio, STp->nbr_pages, STp->nbr_combinable));
  
        if (STps->rw == ST_WRITING && !STp->pos_unknown) {
@@@ -4053,11 -4056,11 +4056,11 @@@ static int st_probe(struct device *dev
                        goto out_free_tape;
        }
  
 -      sdev_printk(KERN_WARNING, SDp,
 +      sdev_printk(KERN_NOTICE, SDp,
                    "Attached scsi tape %s\n", tape_name(tpnt));
 -      printk(KERN_WARNING "%s: try direct i/o: %s (alignment %d B)\n",
 -             tape_name(tpnt), tpnt->try_dio ? "yes" : "no",
 -             queue_dma_alignment(SDp->request_queue) + 1);
 +      sdev_printk(KERN_INFO, SDp, "%s: try direct i/o: %s (alignment %d B)\n",
 +                  tape_name(tpnt), tpnt->try_dio ? "yes" : "no",
 +                  queue_dma_alignment(SDp->request_queue) + 1);
  
        return 0;