]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 13 May 2013 20:25:36 +0000 (13:25 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 13 May 2013 20:25:36 +0000 (13:25 -0700)
Pull networking fixes from David Miller:
 "Several small bug fixes all over:

   1) be2net driver uses wrong payload length when submitting MAC list
      get requests to the chip.  From Sathya Perla.

   2) Fix mwifiex memory leak on driver unload, from Amitkumar Karwar.

   3) Prevent random memory access in batman-adv, from Marek Lindner.

   4) batman-adv doesn't check for pskb_trim_rcsum() errors, also from
      Marek Lindner.

   5) Fix fec crashes on rapid link up/down, from Frank Li.

   6) Fix inner protocol grovelling in GSO, from Pravin B Shelar.

   7) Link event validation fix in qlcnic from Rajesh Borundia.

   8) Not all FEC chips can support checksum offload, fix from Shawn
      Guo.

   9) EXPORT_SYMBOL + inline doesn't make any sense, from Denis Efremov.

  10) Fix race in passthru mode during device removal in macvlan, from
      Jiri Pirko.

  11) Fix RCU hash table lookup socket state race in ipv6, leading to
      NULL pointer derefs, from Eric Dumazet.

  12) Add several missing HAS_DMA kconfig dependencies, from Geert
      Uyttterhoeven.

  13) Fix bogus PCI resource management in 3c59x driver, from Sergei
      Shtylyov.

  14) Fix info leak in ipv6 GRE tunnel driver, from Amerigo Wang.

  15) Fix device leak in ipv6 IPSEC policy layer, from Cong Wang.

  16) DMA mapping leak fix in qlge from Thadeu Lima de Souza Cascardo.

  17) Missing iounmap on probe failure in bna driver, from Wei Yongjun."

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (40 commits)
  bna: add missing iounmap() on error in bnad_init()
  qlge: fix dma map leak when the last chunk is not allocated
  xfrm6: release dev before returning error
  ipv6,gre: do not leak info to user-space
  virtio_net: use default napi weight by default
  emac: Fix EMAC soft reset on 460EX/GT
  3c59x: fix PCI resource management
  caif: CAIF_VIRTIO should depend on HAS_DMA
  net/ethernet: MACB should depend on HAS_DMA
  net/ethernet: ARM_AT91_ETHER should depend on HAS_DMA
  net/wireless: ATH9K should depend on HAS_DMA
  net/ethernet: STMMAC_ETH should depend on HAS_DMA
  net/ethernet: NET_CALXEDA_XGMAC should depend on HAS_DMA
  ipv6: do not clear pinet6 field
  macvlan: fix passthru mode race between dev removal and rx path
  ipv4: ip_output: remove inline marking of EXPORT_SYMBOL functions
  net/mlx4: Strengthen VLAN tags/priorities enforcement in VST mode
  net/mlx4_core: Add missing report on VST and spoof-checking dev caps
  net: fec: enable hardware checksum only on imx6q-fec
  qlcnic: Fix validation of link event command.
  ...

483 files changed:
Documentation/devicetree/bindings/mips/ralink.txt [new file with mode: 0644]
Documentation/devicetree/bindings/vendor-prefixes.txt
Makefile
arch/arc/Kconfig
arch/arc/include/asm/Kbuild
arch/arc/include/asm/cache.h
arch/arc/include/asm/cacheflush.h
arch/arc/include/asm/page.h
arch/arc/include/asm/pgtable.h
arch/arc/include/asm/shmparam.h [new file with mode: 0644]
arch/arc/include/asm/tlb.h
arch/arc/mm/Makefile
arch/arc/mm/cache_arc700.c
arch/arc/mm/mmap.c [new file with mode: 0644]
arch/arc/mm/tlb.c
arch/arc/plat-tb10x/Kconfig
arch/arm64/include/asm/unistd32.h
arch/arm64/kernel/sys32.S
arch/blackfin/Makefile
arch/blackfin/boot/Makefile
arch/blackfin/include/asm/atomic.h
arch/blackfin/include/asm/bfin_sdh.h
arch/blackfin/include/asm/bitops.h
arch/blackfin/include/asm/def_LPBlackfin.h
arch/blackfin/include/asm/mem_init.h
arch/blackfin/kernel/cplb-nompu/cplbinit.c
arch/blackfin/kernel/cplb-nompu/cplbmgr.c
arch/blackfin/kernel/cplbinfo.c
arch/blackfin/kernel/setup.c
arch/blackfin/mach-bf537/boards/stamp.c
arch/blackfin/mach-bf538/boards/ezkit.c
arch/blackfin/mach-bf609/include/mach/cdefBF60x_base.h
arch/m68k/Kconfig.cpu
arch/m68k/Kconfig.machine
arch/m68k/Makefile
arch/m68k/include/asm/commproc.h
arch/m68k/include/asm/dbg.h [deleted file]
arch/m68k/include/asm/dma.h
arch/m68k/include/asm/m53xxacr.h
arch/m68k/include/asm/m53xxsim.h [moved from arch/m68k/include/asm/m532xsim.h with 99% similarity]
arch/m68k/include/asm/m54xxacr.h
arch/m68k/include/asm/mcfgpio.h
arch/m68k/include/asm/mcfsim.h
arch/m68k/include/asm/mcftimer.h
arch/m68k/platform/coldfire/Makefile
arch/m68k/platform/coldfire/m53xx.c [moved from arch/m68k/platform/coldfire/m532x.c with 98% similarity]
arch/m68k/platform/coldfire/timers.c
arch/microblaze/configs/mmu_defconfig
arch/microblaze/include/asm/pci.h
arch/microblaze/include/asm/uaccess.h
arch/microblaze/kernel/cpu/cpuinfo.c
arch/microblaze/kernel/head.S
arch/microblaze/kernel/intc.c
arch/microblaze/kernel/process.c
arch/microblaze/mm/init.c
arch/microblaze/pci/pci-common.c
arch/mips/Kbuild
arch/mips/Kconfig
arch/mips/Makefile
arch/mips/alchemy/Kconfig
arch/mips/alchemy/Platform
arch/mips/ar7/memory.c
arch/mips/ath79/setup.c
arch/mips/bcm63xx/Kconfig
arch/mips/bcm63xx/boards/board_bcm963xx.c
arch/mips/bcm63xx/clk.c
arch/mips/bcm63xx/cpu.c
arch/mips/bcm63xx/dev-flash.c
arch/mips/bcm63xx/dev-spi.c
arch/mips/bcm63xx/irq.c
arch/mips/bcm63xx/prom.c
arch/mips/bcm63xx/reset.c
arch/mips/bcm63xx/setup.c
arch/mips/cavium-octeon/octeon-irq.c
arch/mips/configs/malta_defconfig
arch/mips/configs/malta_kvm_defconfig [new file with mode: 0644]
arch/mips/configs/malta_kvm_guest_defconfig [new file with mode: 0644]
arch/mips/configs/maltaaprp_defconfig [new file with mode: 0644]
arch/mips/configs/maltasmtc_defconfig [new file with mode: 0644]
arch/mips/configs/maltasmvp_defconfig [new file with mode: 0644]
arch/mips/configs/maltaup_defconfig [new file with mode: 0644]
arch/mips/configs/sead3_defconfig
arch/mips/configs/sead3micro_defconfig [new file with mode: 0644]
arch/mips/fw/lib/Makefile
arch/mips/fw/lib/cmdline.c [new file with mode: 0644]
arch/mips/include/asm/asm.h
arch/mips/include/asm/bootinfo.h
arch/mips/include/asm/branch.h
arch/mips/include/asm/cpu-features.h
arch/mips/include/asm/dma-coherence.h [new file with mode: 0644]
arch/mips/include/asm/dma-mapping.h
arch/mips/include/asm/fpu_emulator.h
arch/mips/include/asm/fw/fw.h [new file with mode: 0644]
arch/mips/include/asm/gic.h
arch/mips/include/asm/hazards.h
arch/mips/include/asm/inst.h
arch/mips/include/asm/irqflags.h
arch/mips/include/asm/kvm.h [new file with mode: 0644]
arch/mips/include/asm/kvm_host.h [new file with mode: 0644]
arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h [deleted file]
arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
arch/mips/include/asm/mach-bcm63xx/ioremap.h
arch/mips/include/asm/mach-generic/dma-coherence.h
arch/mips/include/asm/mach-generic/spaces.h
arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
arch/mips/include/asm/mach-ralink/mt7620.h [new file with mode: 0644]
arch/mips/include/asm/mach-ralink/rt288x.h [new file with mode: 0644]
arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h [new file with mode: 0644]
arch/mips/include/asm/mach-ralink/rt305x.h
arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h [new file with mode: 0644]
arch/mips/include/asm/mach-ralink/rt3883.h [new file with mode: 0644]
arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h [new file with mode: 0644]
arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
arch/mips/include/asm/mips-boards/generic.h
arch/mips/include/asm/mips-boards/prom.h [deleted file]
arch/mips/include/asm/mips_machine.h
arch/mips/include/asm/mipsregs.h
arch/mips/include/asm/mmu_context.h
arch/mips/include/asm/netlogic/haldefs.h
arch/mips/include/asm/netlogic/mips-extns.h
arch/mips/include/asm/netlogic/xlp-hal/pic.h
arch/mips/include/asm/netlogic/xlp-hal/usb.h [deleted file]
arch/mips/include/asm/pgtable.h
arch/mips/include/asm/processor.h
arch/mips/include/asm/prom.h
arch/mips/include/asm/sn/sn_private.h
arch/mips/include/asm/sn/types.h
arch/mips/include/asm/spinlock.h
arch/mips/include/asm/stackframe.h
arch/mips/include/asm/thread_info.h
arch/mips/include/asm/time.h
arch/mips/include/asm/uaccess.h
arch/mips/include/asm/uasm.h
arch/mips/include/uapi/asm/inst.h
arch/mips/kernel/Makefile
arch/mips/kernel/asm-offsets.c
arch/mips/kernel/binfmt_elfo32.c
arch/mips/kernel/branch.c
arch/mips/kernel/cevt-gic.c [new file with mode: 0644]
arch/mips/kernel/cevt-r4k.c
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/csrc-gic.c
arch/mips/kernel/genex.S
arch/mips/kernel/irq-gic.c
arch/mips/kernel/linux32.c
arch/mips/kernel/mips_machine.c
arch/mips/kernel/proc.c
arch/mips/kernel/process.c
arch/mips/kernel/prom.c
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-o32.S
arch/mips/kernel/setup.c
arch/mips/kernel/signal.c
arch/mips/kernel/smp-mt.c
arch/mips/kernel/smp.c
arch/mips/kernel/smtc-asm.S
arch/mips/kernel/smtc.c
arch/mips/kernel/traps.c
arch/mips/kernel/unaligned.c
arch/mips/kvm/00README.txt [new file with mode: 0644]
arch/mips/kvm/Kconfig [new file with mode: 0644]
arch/mips/kvm/Makefile [new file with mode: 0644]
arch/mips/kvm/kvm_cb.c [new file with mode: 0644]
arch/mips/kvm/kvm_locore.S [new file with mode: 0644]
arch/mips/kvm/kvm_mips.c [new file with mode: 0644]
arch/mips/kvm/kvm_mips_comm.h [new file with mode: 0644]
arch/mips/kvm/kvm_mips_commpage.c [new file with mode: 0644]
arch/mips/kvm/kvm_mips_dyntrans.c [new file with mode: 0644]
arch/mips/kvm/kvm_mips_emul.c [new file with mode: 0644]
arch/mips/kvm/kvm_mips_int.c [new file with mode: 0644]
arch/mips/kvm/kvm_mips_int.h [new file with mode: 0644]
arch/mips/kvm/kvm_mips_opcode.h [new file with mode: 0644]
arch/mips/kvm/kvm_mips_stats.c [new file with mode: 0644]
arch/mips/kvm/kvm_tlb.c [new file with mode: 0644]
arch/mips/kvm/kvm_trap_emul.c [new file with mode: 0644]
arch/mips/kvm/trace.h [new file with mode: 0644]
arch/mips/lib/bitops.c
arch/mips/lib/dump_tlb.c
arch/mips/lib/memset.S
arch/mips/lib/mips-atomic.c
arch/mips/lib/r3k_dump_tlb.c
arch/mips/lib/strlen_user.S
arch/mips/lib/strncpy_user.S
arch/mips/lib/strnlen_user.S
arch/mips/math-emu/cp1emu.c
arch/mips/math-emu/dsemul.c
arch/mips/mm/Makefile
arch/mips/mm/c-r4k.c
arch/mips/mm/cache.c
arch/mips/mm/dma-default.c
arch/mips/mm/page.c
arch/mips/mm/tlb-r3k.c
arch/mips/mm/tlb-r4k.c
arch/mips/mm/tlb-r8k.c
arch/mips/mm/tlbex.c
arch/mips/mm/uasm-micromips.c [new file with mode: 0644]
arch/mips/mm/uasm-mips.c [new file with mode: 0644]
arch/mips/mm/uasm.c
arch/mips/mti-malta/Makefile
arch/mips/mti-malta/Platform
arch/mips/mti-malta/malta-cmdline.c [deleted file]
arch/mips/mti-malta/malta-display.c
arch/mips/mti-malta/malta-init.c
arch/mips/mti-malta/malta-int.c
arch/mips/mti-malta/malta-memory.c
arch/mips/mti-malta/malta-setup.c
arch/mips/mti-malta/malta-time.c
arch/mips/mti-sead3/Makefile
arch/mips/mti-sead3/leds-sead3.c
arch/mips/mti-sead3/sead3-cmdline.c [deleted file]
arch/mips/mti-sead3/sead3-console.c
arch/mips/mti-sead3/sead3-display.c
arch/mips/mti-sead3/sead3-init.c
arch/mips/mti-sead3/sead3-int.c
arch/mips/mti-sead3/sead3-setup.c
arch/mips/mti-sead3/sead3-time.c
arch/mips/netlogic/Kconfig
arch/mips/netlogic/common/smp.c
arch/mips/netlogic/dts/Makefile
arch/mips/netlogic/dts/xlp_evp.dts
arch/mips/netlogic/dts/xlp_svp.dts [new file with mode: 0644]
arch/mips/netlogic/xlp/nlm_hal.c
arch/mips/netlogic/xlp/setup.c
arch/mips/netlogic/xlp/usb-init.c
arch/mips/oprofile/op_model_mipsxx.c
arch/mips/pci/pci-ar71xx.c
arch/mips/pci/pci-ar724x.c
arch/mips/pci/pci-bcm63xx.c
arch/mips/powertv/init.c
arch/mips/powertv/init.h
arch/mips/powertv/memory.c
arch/mips/powertv/powertv_setup.c
arch/mips/ralink/Kconfig
arch/mips/ralink/Makefile
arch/mips/ralink/Platform
arch/mips/ralink/common.h
arch/mips/ralink/dts/Makefile
arch/mips/ralink/dts/mt7620a.dtsi [new file with mode: 0644]
arch/mips/ralink/dts/mt7620a_eval.dts [new file with mode: 0644]
arch/mips/ralink/dts/rt2880.dtsi [new file with mode: 0644]
arch/mips/ralink/dts/rt2880_eval.dts [new file with mode: 0644]
arch/mips/ralink/dts/rt3050.dtsi
arch/mips/ralink/dts/rt3052_eval.dts
arch/mips/ralink/dts/rt3883.dtsi [new file with mode: 0644]
arch/mips/ralink/dts/rt3883_eval.dts [new file with mode: 0644]
arch/mips/ralink/early_printk.c
arch/mips/ralink/irq.c
arch/mips/ralink/mt7620.c [new file with mode: 0644]
arch/mips/ralink/of.c
arch/mips/ralink/rt288x.c [new file with mode: 0644]
arch/mips/ralink/rt305x.c
arch/mips/ralink/rt3883.c [new file with mode: 0644]
arch/mips/sgi-ip27/ip27-klnuma.c
arch/mips/sgi-ip27/ip27-memory.c
arch/mips/sgi-ip27/ip27-timer.c
arch/parisc/kernel/sys_parisc32.c
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/sys_ppc32.c
arch/s390/kernel/compat_wrapper.S
arch/s390/kernel/syscalls.S
arch/sparc/kernel/sys32.S
arch/sparc/kernel/systbls_64.S
arch/unicore32/kernel/sys.c
arch/x86/ia32/sys_ia32.c
arch/x86/include/asm/sys_ia32.h
arch/x86/include/asm/syscalls.h
arch/x86/include/uapi/asm/msr-index.h
arch/x86/kernel/vm86_32.c
arch/x86/kvm/emulate.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/pci/xen.c
arch/x86/syscalls/syscall_32.tbl
arch/x86/xen/enlighten.c
arch/x86/xen/spinlock.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_encoder_slave.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/mgag200/mgag200_mode.c
drivers/hid/hid-core.c
drivers/hid/hid-debug.c
drivers/hid/hid-steelseries.c
drivers/idle/intel_idle.c
drivers/lguest/page_tables.c
drivers/md/dm-bufio.c
drivers/md/dm-cache-metadata.c
drivers/md/dm-cache-policy.h
drivers/md/dm-cache-target.c
drivers/md/dm-mpath.c
drivers/md/dm-snap.c
drivers/md/dm-stripe.c
drivers/md/dm-table.c
drivers/md/dm-thin-metadata.c
drivers/md/dm-thin-metadata.h
drivers/md/dm-thin.c
drivers/md/persistent-data/dm-space-map-disk.c
drivers/md/persistent-data/dm-space-map-metadata.c
drivers/md/persistent-data/dm-space-map.h
drivers/pcmcia/m8xx_pcmcia.c
drivers/platform/x86/Kconfig
drivers/platform/x86/Makefile
drivers/platform/x86/asus-nb-wmi.c
drivers/platform/x86/dell-laptop.c
drivers/platform/x86/dell-wmi-aio.c
drivers/platform/x86/hp-wmi.c
drivers/platform/x86/hp_accel.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/pvpanic.c [new file with mode: 0644]
drivers/platform/x86/samsung-q10.c
drivers/platform/x86/sony-laptop.c
drivers/scsi/Kconfig
drivers/scsi/aic94xx/aic94xx_dev.c
drivers/scsi/aic94xx/aic94xx_hwi.c
drivers/scsi/aic94xx/aic94xx_tmf.c
drivers/scsi/be2iscsi/be.h
drivers/scsi/be2iscsi/be_cmds.c
drivers/scsi/be2iscsi/be_cmds.h
drivers/scsi/be2iscsi/be_iscsi.c
drivers/scsi/be2iscsi/be_iscsi.h
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/be2iscsi/be_main.h
drivers/scsi/be2iscsi/be_mgmt.c
drivers/scsi/be2iscsi/be_mgmt.h
drivers/scsi/bnx2fc/bnx2fc.h
drivers/scsi/bnx2fc/bnx2fc_els.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/bnx2fc/bnx2fc_hwi.c
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/bnx2fc/bnx2fc_tgt.c
drivers/scsi/csiostor/csio_lnode.h
drivers/scsi/csiostor/csio_rnode.h
drivers/scsi/fnic/fnic.h
drivers/scsi/fnic/fnic_fcs.c
drivers/scsi/fnic/fnic_fip.h [new file with mode: 0644]
drivers/scsi/fnic/fnic_main.c
drivers/scsi/fnic/vnic_dev.c
drivers/scsi/fnic/vnic_dev.h
drivers/scsi/fnic/vnic_devcmd.h
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ibmvscsi/ibmvfc.h
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/scsi/isci/remote_device.c
drivers/scsi/isci/remote_device.h
drivers/scsi/isci/request.c
drivers/scsi/isci/task.c
drivers/scsi/libsas/sas_ata.c
drivers/scsi/libsas/sas_discover.c
drivers/scsi/libsas/sas_expander.c
drivers/scsi/libsas/sas_internal.h
drivers/scsi/libsas/sas_port.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_bsg.c
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_ct.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_hw.h
drivers/scsi/lpfc/lpfc_hw4.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_logmsg.h
drivers/scsi/lpfc/lpfc_mbox.c
drivers/scsi/lpfc/lpfc_mem.c
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_sli4.h
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/lpfc/lpfc_vport.c
drivers/scsi/lpfc/lpfc_vport.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/mvsas/mv_init.c
drivers/scsi/mvsas/mv_sas.c
drivers/scsi/mvsas/mv_sas.h
drivers/scsi/pm8001/Makefile
drivers/scsi/pm8001/pm8001_ctl.c
drivers/scsi/pm8001/pm8001_defs.h
drivers/scsi/pm8001/pm8001_hwi.c
drivers/scsi/pm8001/pm8001_hwi.h
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/pm8001/pm8001_sas.c
drivers/scsi/pm8001/pm8001_sas.h
drivers/scsi/pm8001/pm80xx_hwi.c [new file with mode: 0644]
drivers/scsi/pm8001/pm80xx_hwi.h [new file with mode: 0644]
drivers/scsi/qla2xxx/Kconfig
drivers/scsi/qla2xxx/qla_mr.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla4xxx/ql4_iocb.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/qla4xxx/ql4_version.h
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_pm.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sd.c
drivers/scsi/sd.h
drivers/scsi/sd_dif.c
drivers/scsi/ufs/Kconfig
drivers/scsi/ufs/Makefile
drivers/scsi/ufs/ufshcd-pltfrm.c [new file with mode: 0644]
drivers/scsi/ufs/ufshcd.c
drivers/spi/spi-atmel.c
drivers/spi/spi-davinci.c
drivers/spi/spi.c
drivers/tty/serial/68328serial.c
drivers/tty/serial/bcm63xx_uart.c
drivers/tty/tty_audit.c
drivers/vhost/vringh.c
drivers/video/au1100fb.c
drivers/xen/Kconfig
drivers/xen/events.c
fs/ecryptfs/crypto.c
fs/ecryptfs/ecryptfs_kernel.h
fs/namei.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4recover.c
fs/notify/fanotify/fanotify_user.c
fs/romfs/mmap-nommu.c
include/drm/drmP.h
include/drm/drm_fb_helper.h
include/linux/audit.h
include/linux/compat.h
include/linux/cpuidle.h
include/linux/device-mapper.h
include/linux/ftrace.h
include/linux/ftrace_event.h
include/linux/hid.h
include/linux/pci_ids.h
include/linux/sched.h
include/linux/spi/spi.h
include/linux/tty.h
include/scsi/libsas.h
include/scsi/osd_protocol.h
include/scsi/sas.h
include/scsi/sas_ata.h
include/scsi/scsi_device.h
include/scsi/scsi_transport_iscsi.h
include/scsi/scsi_transport_sas.h
include/sound/tlv.h
include/uapi/linux/audit.h
kernel/audit.c
kernel/audit.h
kernel/auditfilter.c
kernel/auditsc.c
kernel/params.c
kernel/sys_ni.c
kernel/sysctl_binary.c
kernel/trace/Kconfig
kernel/trace/ftrace.c
kernel/trace/trace_events.c
kernel/trace/trace_kprobe.c
net/socket.c
net/sunrpc/auth_gss/gss_rpc_xdr.c
sound/atmel/abdac.c
sound/atmel/ac97c.c
sound/mips/hal2.c
sound/mips/sgio2audio.c
sound/oss/Kconfig
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/soc/codecs/wm8994.c
sound/soc/davinci/davinci-mcasp.c
sound/soc/soc-dapm.c
tools/power/x86/turbostat/turbostat.c
virt/kvm/kvm_main.c

diff --git a/Documentation/devicetree/bindings/mips/ralink.txt b/Documentation/devicetree/bindings/mips/ralink.txt
new file mode 100644 (file)
index 0000000..b35a8d0
--- /dev/null
@@ -0,0 +1,17 @@
+Ralink MIPS SoC device tree bindings
+
+1. SoCs
+
+Each device tree must specify a compatible value for the Ralink SoC
+it uses in the compatible property of the root node. The compatible
+value must be one of the following values:
+
+  ralink,rt2880-soc
+  ralink,rt3050-soc
+  ralink,rt3052-soc
+  ralink,rt3350-soc
+  ralink,rt3352-soc
+  ralink,rt3883-soc
+  ralink,rt5350-soc
+  ralink,mt7620a-soc
+  ralink,mt7620n-soc
index 4d1919bf23322ac2209eb3046bfce404c5ced483..6931c4348d240ed9f8bf6b21a0d75f9c520edf1d 100644 (file)
@@ -42,6 +42,7 @@ onnn  ON Semiconductor Corp.
 picochip       Picochip Ltd
 powervr        PowerVR (deprecated, use img)
 qcom   Qualcomm, Inc.
+ralink Mediatek/Ralink Technology Corp.
 ramtron        Ramtron International
 realtek Realtek Semiconductor Corp.
 renesas        Renesas Electronics Corporation
index a3a834b11a97ef7d9236a56e7b5e7ca78e9799b8..cd11e88576044e127cb73f49bd41c7d4fb5d3e9f 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
-PATCHLEVEL = 9
+PATCHLEVEL = 10
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
 NAME = Unicycling Gorilla
 
 # *DOCUMENTATION*
index 491ae7923b10dd7ee8e156b53ee5bdb16edc1c30..5917099470eaf74d259051b4f7451683cc52901a 100644 (file)
@@ -182,6 +182,10 @@ config ARC_CACHE_PAGES
          Note that Global I/D ENABLE + Per Page DISABLE works but corollary
          Global DISABLE + Per Page ENABLE won't work
 
+config ARC_CACHE_VIPT_ALIASING
+       bool "Support VIPT Aliasing D$"
+       default n
+
 endif  #ARC_CACHE
 
 config ARC_HAS_ICCM
index 48af742f8b5a7f5d5078447a639730263b70771a..d8dd660898b9b214927c903e1198e36026513984 100644 (file)
@@ -32,7 +32,6 @@ generic-y += resource.h
 generic-y += scatterlist.h
 generic-y += sembuf.h
 generic-y += shmbuf.h
-generic-y += shmparam.h
 generic-y += siginfo.h
 generic-y += socket.h
 generic-y += sockios.h
index 6632273861fd205ce2bf25ed2d48b1e26f8f78ed..d5555fe4742a3f696c7c8ef72d6eae757b51469e 100644 (file)
@@ -55,9 +55,6 @@
        : "r"(data), "r"(ptr));         \
 })
 
-/* used to give SHMLBA a value to avoid Cache Aliasing */
-extern unsigned int ARC_shmlba;
-
 #define ARCH_DMA_MINALIGN      L1_CACHE_BYTES
 
 /*
index ee1f6eae82d2f6afa9a85b78a19d06602e94f862..9f841af41092f059a604c0631984bb3c6ef067d7 100644 (file)
@@ -19,6 +19,7 @@
 #define _ASM_CACHEFLUSH_H
 
 #include <linux/mm.h>
+#include <asm/shmparam.h>
 
 /*
  * Semantically we need this because icache doesn't snoop dcache/dma.
@@ -33,7 +34,9 @@ void flush_cache_all(void);
 void flush_icache_range(unsigned long start, unsigned long end);
 void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len);
 void __inv_icache_page(unsigned long paddr, unsigned long vaddr);
-void __flush_dcache_page(unsigned long paddr);
+void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr);
+#define __flush_dcache_page(p, v)      \
+               ___flush_dcache_page((unsigned long)p, (unsigned long)v)
 
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 
@@ -50,18 +53,55 @@ void dma_cache_wback(unsigned long start, unsigned long sz);
 #define flush_cache_vmap(start, end)           flush_cache_all()
 #define flush_cache_vunmap(start, end)         flush_cache_all()
 
-/*
- * VM callbacks when entire/range of user-space V-P mappings are
- * torn-down/get-invalidated
- *
- * Currently we don't support D$ aliasing configs for our VIPT caches
- * NOPS for VIPT Cache with non-aliasing D$ configurations only
- */
-#define flush_cache_dup_mm(mm)                 /* called on fork */
+#define flush_cache_dup_mm(mm)                 /* called on fork (VIVT only) */
+
+#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
+
 #define flush_cache_mm(mm)                     /* called on munmap/exit */
 #define flush_cache_range(mm, u_vstart, u_vend)
 #define flush_cache_page(vma, u_vaddr, pfn)    /* PF handling/COW-break */
 
+#else  /* VIPT aliasing dcache */
+
+/* To clear out stale userspace mappings */
+void flush_cache_mm(struct mm_struct *mm);
+void flush_cache_range(struct vm_area_struct *vma,
+       unsigned long start,unsigned long end);
+void flush_cache_page(struct vm_area_struct *vma,
+       unsigned long user_addr, unsigned long page);
+
+/*
+ * To make sure that userspace mapping is flushed to memory before
+ * get_user_pages() uses a kernel mapping to access the page
+ */
+#define ARCH_HAS_FLUSH_ANON_PAGE
+void flush_anon_page(struct vm_area_struct *vma,
+       struct page *page, unsigned long u_vaddr);
+
+#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
+
+/*
+ * Simple wrapper over config option
+ * Bootup code ensures that hardware matches kernel configuration
+ */
+static inline int cache_is_vipt_aliasing(void)
+{
+#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
+       return 1;
+#else
+       return 0;
+#endif
+}
+
+#define CACHE_COLOR(addr)      (((unsigned long)(addr) >> (PAGE_SHIFT)) & 3)
+
+/*
+ * checks if two addresses (after page aligning) index into same cache set
+ */
+#define addr_not_cache_congruent(addr1, addr2)                         \
+       cache_is_vipt_aliasing() ?                                      \
+               (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0          \
+
 #define copy_to_user_page(vma, page, vaddr, dst, src, len)             \
 do {                                                                   \
        memcpy(dst, src, len);                                          \
index bdf54610455134d1af10a2adad2952f1928b788c..374a35514116c17a71a610c2959e90d213e7ed97 100644 (file)
 #define get_user_page(vaddr)           __get_free_page(GFP_KERNEL)
 #define free_user_page(page, addr)     free_page(addr)
 
-/* TBD: for now don't worry about VIPT D$ aliasing */
 #define clear_page(paddr)              memset((paddr), 0, PAGE_SIZE)
 #define copy_page(to, from)            memcpy((to), (from), PAGE_SIZE)
 
+#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
+
 #define clear_user_page(addr, vaddr, pg)       clear_page(addr)
 #define copy_user_page(vto, vfrom, vaddr, pg)  copy_page(vto, vfrom)
 
+#else  /* VIPT aliasing dcache */
+
+struct vm_area_struct;
+struct page;
+
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
+
+void copy_user_highpage(struct page *to, struct page *from,
+                       unsigned long u_vaddr, struct vm_area_struct *vma);
+void clear_user_page(void *to, unsigned long u_vaddr, struct page *page);
+
+#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
+
 #undef STRICT_MM_TYPECHECKS
 
 #ifdef STRICT_MM_TYPECHECKS
index b7e36684c091d1f236d678007e998c5783b964bf..1cc4720faccbecf7862253720988f54e9b43751a 100644 (file)
@@ -395,6 +395,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
 
 #include <asm-generic/pgtable.h>
 
+/* to cope with aliasing VIPT cache */
+#define HAVE_ARCH_UNMAPPED_AREA
+
 /*
  * No page table caches to initialise
  */
diff --git a/arch/arc/include/asm/shmparam.h b/arch/arc/include/asm/shmparam.h
new file mode 100644 (file)
index 0000000..fffeecc
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_SHMPARAM_H
+#define __ARC_ASM_SHMPARAM_H
+
+/* Handle upto 2 cache bins */
+#define        SHMLBA  (2 * PAGE_SIZE)
+
+/* Enforce SHMLBA in shmat */
+#define __ARCH_FORCE_SHMLBA
+
+#endif
index fe91719866a57fed68e5ddb53228e7fec61279b1..85b6df839bd7b93b11465c066bdd063b32e68b77 100644 (file)
@@ -30,13 +30,20 @@ do {                                                \
 /*
  * This pair is called at time of munmap/exit to flush cache and TLB entries
  * for mappings being torn down.
- * 1) cache-flush part -implemented via tlb_start_vma( ) can be NOP (for now)
- *    as we don't support aliasing configs in our VIPT D$.
+ * 1) cache-flush part -implemented via tlb_start_vma( ) for VIPT aliasing D$
  * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range
  *
  * Note, read http://lkml.org/lkml/2004/1/15/6
  */
+#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
 #define tlb_start_vma(tlb, vma)
+#else
+#define tlb_start_vma(tlb, vma)                                                \
+do {                                                                   \
+       if (!tlb->fullmm)                                               \
+               flush_cache_range(vma, vma->vm_start, vma->vm_end);     \
+} while(0)
+#endif
 
 #define tlb_end_vma(tlb, vma)                                          \
 do {                                                                   \
index 168dc146a8f6e33f6bf0e5bb83ca6cf9da1511af..ac95cc239c1e47d3d2f5a133b1d66936c6af0b91 100644 (file)
@@ -7,4 +7,4 @@
 #
 
 obj-y  := extable.o ioremap.o dma.o fault.o init.o
-obj-y  += tlb.o tlbex.o cache_arc700.o
+obj-y  += tlb.o tlbex.o cache_arc700.o mmap.o
index c854cf95f70666669dcff0eec6006bbbe788bab0..2f12bca8aef30c4155b21e514e3ba9dd5ec63468 100644 (file)
@@ -68,6 +68,7 @@
 #include <linux/mmu_context.h>
 #include <linux/syscalls.h>
 #include <linux/uaccess.h>
+#include <linux/pagemap.h>
 #include <asm/cacheflush.h>
 #include <asm/cachectl.h>
 #include <asm/setup.h>
@@ -138,6 +139,7 @@ void __cpuinit arc_cache_init(void)
        struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
        struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
        int way_pg_ratio = way_pg_ratio;
+       int dcache_does_alias;
        char str[256];
 
        printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
@@ -184,9 +186,13 @@ chk_dc:
                panic("Cache H/W doesn't match kernel Config");
        }
 
+       dcache_does_alias = (dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE;
+
        /* check for D-Cache aliasing */
-       if ((dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE)
-               panic("D$ aliasing not handled right now\n");
+       if (dcache_does_alias && !cache_is_vipt_aliasing())
+               panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
+       else if (!dcache_does_alias && cache_is_vipt_aliasing())
+               panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n");
 #endif
 
        /* Set the default Invalidate Mode to "simpy discard dirty lines"
@@ -269,47 +275,57 @@ static inline void __dc_entire_op(const int cacheop)
  * Per Line Operation on D-Cache
  * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete
  * It's sole purpose is to help gcc generate ZOL
+ * (aliasing VIPT dcache flushing needs both vaddr and paddr)
  */
-static inline void __dc_line_loop(unsigned long start, unsigned long sz,
-                                         int aux_reg)
+static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr,
+                                 unsigned long sz, const int aux_reg)
 {
-       int num_lines, slack;
+       int num_lines;
 
        /* Ensure we properly floor/ceil the non-line aligned/sized requests
-        * and have @start - aligned to cache line and integral @num_lines.
+        * and have @paddr - aligned to cache line and integral @num_lines.
         * This however can be avoided for page sized since:
-        *  -@start will be cache-line aligned already (being page aligned)
+        *  -@paddr will be cache-line aligned already (being page aligned)
         *  -@sz will be integral multiple of line size (being page sized).
         */
        if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
-               slack = start & ~DCACHE_LINE_MASK;
-               sz += slack;
-               start -= slack;
+               sz += paddr & ~DCACHE_LINE_MASK;
+               paddr &= DCACHE_LINE_MASK;
+               vaddr &= DCACHE_LINE_MASK;
        }
 
        num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN);
 
+#if (CONFIG_ARC_MMU_VER <= 2)
+       paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
+#endif
+
        while (num_lines-- > 0) {
 #if (CONFIG_ARC_MMU_VER > 2)
                /*
                 * Just as for I$, in MMU v3, D$ ops also require
                 * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops
-                * But we pass phy addr for both. This works since Linux
-                * doesn't support aliasing configs for D$, yet.
-                * Thus paddr is enough to provide both tag and index.
                 */
-               write_aux_reg(ARC_REG_DC_PTAG, start);
+               write_aux_reg(ARC_REG_DC_PTAG, paddr);
+
+               write_aux_reg(aux_reg, vaddr);
+               vaddr += ARC_DCACHE_LINE_LEN;
+#else
+               /* paddr contains stuffed vaddrs bits */
+               write_aux_reg(aux_reg, paddr);
 #endif
-               write_aux_reg(aux_reg, start);
-               start += ARC_DCACHE_LINE_LEN;
+               paddr += ARC_DCACHE_LINE_LEN;
        }
 }
 
+/* For kernel mappings cache operation: index is same as paddr */
+#define __dc_line_op_k(p, sz, op)      __dc_line_op(p, p, sz, op)
+
 /*
  * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
  */
-static inline void __dc_line_op(unsigned long start, unsigned long sz,
-                                       const int cacheop)
+static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
+                               unsigned long sz, const int cacheop)
 {
        unsigned long flags, tmp = tmp;
        int aux;
@@ -332,7 +348,7 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
        else
                aux = ARC_REG_DC_FLDL;
 
-       __dc_line_loop(start, sz, aux);
+       __dc_line_loop(paddr, vaddr, sz, aux);
 
        if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
                wait_for_flush();
@@ -347,7 +363,8 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
 #else
 
 #define __dc_entire_op(cacheop)
-#define __dc_line_op(start, sz, cacheop)
+#define __dc_line_op(paddr, vaddr, sz, cacheop)
+#define __dc_line_op_k(paddr, sz, cacheop)
 
 #endif /* CONFIG_ARC_HAS_DCACHE */
 
@@ -399,49 +416,45 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
 /***********************************************************
  * Machine specific helper for per line I-Cache invalidate.
  */
-static void __ic_line_inv_vaddr(unsigned long phy_start, unsigned long vaddr,
+static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
                                unsigned long sz)
 {
        unsigned long flags;
-       int num_lines, slack;
-       unsigned int addr;
+       int num_lines;
 
        /*
         * Ensure we properly floor/ceil the non-line aligned/sized requests:
         * However page sized flushes can be compile time optimised.
-        *  -@phy_start will be cache-line aligned already (being page aligned)
+        *  -@paddr will be cache-line aligned already (being page aligned)
         *  -@sz will be integral multiple of line size (being page sized).
         */
        if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
-               slack = phy_start & ~ICACHE_LINE_MASK;
-               sz += slack;
-               phy_start -= slack;
+               sz += paddr & ~ICACHE_LINE_MASK;
+               paddr &= ICACHE_LINE_MASK;
+               vaddr &= ICACHE_LINE_MASK;
        }
 
        num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
 
-#if (CONFIG_ARC_MMU_VER > 2)
-       vaddr &= ~ICACHE_LINE_MASK;
-       addr = phy_start;
-#else
+#if (CONFIG_ARC_MMU_VER <= 2)
        /* bits 17:13 of vaddr go as bits 4:0 of paddr */
-       addr = phy_start | ((vaddr >> 13) & 0x1F);
+       paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
 #endif
 
        local_irq_save(flags);
        while (num_lines-- > 0) {
 #if (CONFIG_ARC_MMU_VER > 2)
                /* tag comes from phy addr */
-               write_aux_reg(ARC_REG_IC_PTAG, addr);
+               write_aux_reg(ARC_REG_IC_PTAG, paddr);
 
                /* index bits come from vaddr */
                write_aux_reg(ARC_REG_IC_IVIL, vaddr);
                vaddr += ARC_ICACHE_LINE_LEN;
 #else
                /* paddr contains stuffed vaddrs bits */
-               write_aux_reg(ARC_REG_IC_IVIL, addr);
+               write_aux_reg(ARC_REG_IC_IVIL, paddr);
 #endif
-               addr += ARC_ICACHE_LINE_LEN;
+               paddr += ARC_ICACHE_LINE_LEN;
        }
        local_irq_restore(flags);
 }
@@ -457,29 +470,66 @@ static void __ic_line_inv_vaddr(unsigned long phy_start, unsigned long vaddr,
  * Exported APIs
  */
 
+/*
+ * Handle cache congruency of kernel and userspace mappings of page when kernel
+ * writes-to/reads-from
+ *
+ * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
+ *  -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
+ *  -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
+ *  -In SMP, if hardware caches are coherent
+ *
+ * There's a corollary case, where kernel READs from a userspace mapped page.
+ * If the U-mapping is not congruent to to K-mapping, former needs flushing.
+ */
 void flush_dcache_page(struct page *page)
 {
-       /* Make a note that dcache is not yet flushed for this page */
-       set_bit(PG_arch_1, &page->flags);
+       struct address_space *mapping;
+
+       if (!cache_is_vipt_aliasing()) {
+               set_bit(PG_arch_1, &page->flags);
+               return;
+       }
+
+       /* don't handle anon pages here */
+       mapping = page_mapping(page);
+       if (!mapping)
+               return;
+
+       /*
+        * pagecache page, file not yet mapped to userspace
+        * Make a note that K-mapping is dirty
+        */
+       if (!mapping_mapped(mapping)) {
+               set_bit(PG_arch_1, &page->flags);
+       } else if (page_mapped(page)) {
+
+               /* kernel reading from page with U-mapping */
+               void *paddr = page_address(page);
+               unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
+
+               if (addr_not_cache_congruent(paddr, vaddr))
+                       __flush_dcache_page(paddr, vaddr);
+       }
 }
 EXPORT_SYMBOL(flush_dcache_page);
 
 
 void dma_cache_wback_inv(unsigned long start, unsigned long sz)
 {
-       __dc_line_op(start, sz, OP_FLUSH_N_INV);
+       __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
 }
 EXPORT_SYMBOL(dma_cache_wback_inv);
 
 void dma_cache_inv(unsigned long start, unsigned long sz)
 {
-       __dc_line_op(start, sz, OP_INV);
+       __dc_line_op_k(start, sz, OP_INV);
 }
 EXPORT_SYMBOL(dma_cache_inv);
 
 void dma_cache_wback(unsigned long start, unsigned long sz)
 {
-       __dc_line_op(start, sz, OP_FLUSH);
+       __dc_line_op_k(start, sz, OP_FLUSH);
 }
 EXPORT_SYMBOL(dma_cache_wback);
 
@@ -560,7 +610,7 @@ void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
 
        local_irq_save(flags);
        __ic_line_inv_vaddr(paddr, vaddr, len);
-       __dc_line_op(paddr, len, OP_FLUSH);
+       __dc_line_op(paddr, vaddr, len, OP_FLUSH);
        local_irq_restore(flags);
 }
 
@@ -570,9 +620,13 @@ void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
        __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
 }
 
-void __flush_dcache_page(unsigned long paddr)
+/*
+ * wrapper to clearout kernel or userspace mappings of a page
+ * For kernel mappings @vaddr == @paddr
+ */
+void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr)
 {
-       __dc_line_op(paddr, PAGE_SIZE, OP_FLUSH_N_INV);
+       __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
 }
 
 void flush_icache_all(void)
@@ -601,6 +655,87 @@ noinline void flush_cache_all(void)
 
 }
 
+#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
+
+void flush_cache_mm(struct mm_struct *mm)
+{
+       flush_cache_all();
+}
+
+void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
+                     unsigned long pfn)
+{
+       unsigned int paddr = pfn << PAGE_SHIFT;
+
+       __sync_icache_dcache(paddr, u_vaddr, PAGE_SIZE);
+}
+
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
+                      unsigned long end)
+{
+       flush_cache_all();
+}
+
+void copy_user_highpage(struct page *to, struct page *from,
+       unsigned long u_vaddr, struct vm_area_struct *vma)
+{
+       void *kfrom = page_address(from);
+       void *kto = page_address(to);
+       int clean_src_k_mappings = 0;
+
+       /*
+        * If SRC page was already mapped in userspace AND it's U-mapping is
+        * not congruent with K-mapping, sync former to physical page so that
+        * K-mapping in memcpy below, sees the right data
+        *
+        * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
+        * equally valid for SRC page as well
+        */
+       if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
+               __flush_dcache_page(kfrom, u_vaddr);
+               clean_src_k_mappings = 1;
+       }
+
+       copy_page(kto, kfrom);
+
+       /*
+        * Mark DST page K-mapping as dirty for a later finalization by
+        * update_mmu_cache(). Although the finalization could have been done
+        * here as well (given that both vaddr/paddr are available).
+        * But update_mmu_cache() already has code to do that for other
+        * non copied user pages (e.g. read faults which wire in pagecache page
+        * directly).
+        */
+       set_bit(PG_arch_1, &to->flags);
+
+       /*
+        * if SRC was already usermapped and non-congruent to kernel mapping
+        * sync the kernel mapping back to physical page
+        */
+       if (clean_src_k_mappings) {
+               __flush_dcache_page(kfrom, kfrom);
+       } else {
+               set_bit(PG_arch_1, &from->flags);
+       }
+}
+
+void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
+{
+       clear_page(to);
+       set_bit(PG_arch_1, &page->flags);
+}
+
+void flush_anon_page(struct vm_area_struct *vma, struct page *page,
+                    unsigned long u_vaddr)
+{
+       /* TBD: do we really need to clear the kernel mapping */
+       __flush_dcache_page(page_address(page), u_vaddr);
+       __flush_dcache_page(page_address(page), page_address(page));
+
+}
+
+#endif
+
 /**********************************************************************
  * Explicit Cache flush request from user space via syscall
  * Needed for JITs which generate code on the fly
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
new file mode 100644 (file)
index 0000000..2e06d56
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * ARC700 mmap
+ *
+ * (started from arm version - for VIPT alias handling)
+ *
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/sched.h>
+#include <asm/cacheflush.h>
+
+#define COLOUR_ALIGN(addr, pgoff)                      \
+       ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) +      \
+        (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
+
+/*
+ * Ensure that shared mappings are correctly aligned to
+ * avoid aliasing issues with VIPT caches.
+ * We need to ensure that
+ * a specific page of an object is always mapped at a multiple of
+ * SHMLBA bytes.
+ */
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+               unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       int do_align = 0;
+       int aliasing = cache_is_vipt_aliasing();
+       struct vm_unmapped_area_info info;
+
+       /*
+        * We only need to do colour alignment if D cache aliases.
+        */
+       if (aliasing)
+               do_align = filp || (flags & MAP_SHARED);
+
+       /*
+        * We enforce the MAP_FIXED case.
+        */
+       if (flags & MAP_FIXED) {
+               if (aliasing && flags & MAP_SHARED &&
+                   (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
+                       return -EINVAL;
+               return addr;
+       }
+
+       if (len > TASK_SIZE)
+               return -ENOMEM;
+
+       if (addr) {
+               if (do_align)
+                       addr = COLOUR_ALIGN(addr, pgoff);
+               else
+                       addr = PAGE_ALIGN(addr);
+
+               vma = find_vma(mm, addr);
+               if (TASK_SIZE - len >= addr &&
+                   (!vma || addr + len <= vma->vm_start))
+                       return addr;
+       }
+
+       info.flags = 0;
+       info.length = len;
+       info.low_limit = mm->mmap_base;
+       info.high_limit = TASK_SIZE;
+       info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+       info.align_offset = pgoff << PAGE_SHIFT;
+       return vm_unmapped_area(&info);
+}
index 003d69ac6ffa63935a7bedf1089b61e238ff7a12..066145b5f3488bcaa515769728d0389a00fc7ed7 100644 (file)
@@ -421,25 +421,40 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
 /*
  * Called at the end of pagefault, for a userspace mapped page
  *  -pre-install the corresponding TLB entry into MMU
- *  -Finalize the delayed D-cache flush (wback+inv kernel mapping)
+ *  -Finalize the delayed D-cache flush of kernel mapping of page due to
+ *     flush_dcache_page(), copy_user_page()
+ *
+ * Note that flush (when done) involves both WBACK - so physical page is
+ * in sync as well as INV - so any non-congruent aliases don't remain
  */
 void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
                      pte_t *ptep)
 {
        unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
+       unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
 
        create_tlb(vma, vaddr, ptep);
 
-       /* icache doesn't snoop dcache, thus needs to be made coherent here */
-       if (vma->vm_flags & VM_EXEC) {
+       /*
+        * Exec page : Independent of aliasing/page-color considerations,
+        *             since icache doesn't snoop dcache on ARC, any dirty
+        *             K-mapping of a code page needs to be wback+inv so that
+        *             icache fetch by userspace sees code correctly.
+        * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
+        *             so userspace sees the right data.
+        *  (Avoids the flush for Non-exec + congruent mapping case)
+        */
+       if (vma->vm_flags & VM_EXEC || addr_not_cache_congruent(paddr, vaddr)) {
                struct page *page = pfn_to_page(pte_pfn(*ptep));
 
-               /* if page was dcache dirty, flush now */
                int dirty = test_and_clear_bit(PG_arch_1, &page->flags);
                if (dirty) {
-                       unsigned long paddr =  pte_val(*ptep) & PAGE_MASK;
-                       __flush_dcache_page(paddr);
-                       __inv_icache_page(paddr, vaddr);
+                       /* wback + inv dcache lines */
+                       __flush_dcache_page(paddr, paddr);
+
+                       /* invalidate any existing icache lines */
+                       if (vma->vm_flags & VM_EXEC)
+                               __inv_icache_page(paddr, vaddr);
                }
        }
 }
index 4e121272c4e5418b86f1db7845713f904f428593..1d3452100f1ff448cd4ed493e7ddf85fde0d9f35 100644 (file)
@@ -27,10 +27,3 @@ menuconfig ARC_PLAT_TB10X
          Abilis Systems. TB10x is based on the ARC700 CPU architecture.
          Say Y if you are building a kernel for one of the SOCs in this
          series (e.g. TB100 or TB101). If in doubt say N.
-
-if ARC_PLAT_TB10X
-
-config GENERIC_GPIO
-       def_bool y
-
-endif
index 12f22492df4ced64e6268449a80598a2ae5676f2..58125bf008d3e647e5c89e6ba534827f0a0d5629 100644 (file)
@@ -389,7 +389,7 @@ __SYSCALL(364, sys_perf_event_open)
 __SYSCALL(365, compat_sys_recvmmsg)
 __SYSCALL(366, sys_accept4)
 __SYSCALL(367, sys_fanotify_init)
-__SYSCALL(368, compat_sys_fanotify_mark_wrapper)
+__SYSCALL(368, compat_sys_fanotify_mark)
 __SYSCALL(369, sys_prlimit64)
 __SYSCALL(370, sys_name_to_handle_at)
 __SYSCALL(371, compat_sys_open_by_handle_at)
index db01aa978c41e2162ed91714c91e074a361ecdd2..a1b19ed7467cf1c026147acaa2539b1dc6f60817 100644 (file)
@@ -104,13 +104,6 @@ compat_sys_fallocate_wrapper:
        b       sys_fallocate
 ENDPROC(compat_sys_fallocate_wrapper)
 
-compat_sys_fanotify_mark_wrapper:
-       orr     x2, x2, x3, lsl #32
-       mov     w3, w4
-       mov     w4, w5
-       b       sys_fanotify_mark
-ENDPROC(compat_sys_fanotify_mark_wrapper)
-
 #undef __SYSCALL
 #define __SYSCALL(x, y)                .quad   y       // x
 
index 66cf00095b8487210b3187cbf41072bc3d80406b..1fce08632ad763abbe68a640a442cea1e0353a7f 100644 (file)
@@ -141,11 +141,11 @@ archclean:
 
 INSTALL_PATH ?= /tftpboot
 boot := arch/$(ARCH)/boot
-BOOT_TARGETS = vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma vmImage.lzo vmImage.xip
+BOOT_TARGETS = uImage uImage.bin uImage.bz2 uImage.gz uImage.lzma uImage.lzo uImage.xip
 PHONY += $(BOOT_TARGETS) install
-KBUILD_IMAGE := $(boot)/vmImage
+KBUILD_IMAGE := $(boot)/uImage
 
-all: vmImage
+all: uImage
 
 $(BOOT_TARGETS): vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
index f7d27d50d02c83fa35ad4606c11ecbd13a555904..3efaa094fb90e44050ed17f62117a60ca098ae8b 100644 (file)
@@ -6,7 +6,7 @@
 # for more details.
 #
 
-targets := vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma vmImage.lzo vmImage.xip
+targets := uImage uImage.bin uImage.bz2 uImage.gz uImage.lzma uImage.lzo uImage.xip
 extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.xip
 
 ifeq ($(CONFIG_RAMKERNEL),y)
@@ -39,22 +39,22 @@ quiet_cmd_mk_bin_xip = BIN     $@
 $(obj)/vmlinux.bin.xip: $(obj)/vmlinux.bin FORCE
        $(call if_changed,mk_bin_xip)
 
-$(obj)/vmImage.bin: $(obj)/vmlinux.bin
+$(obj)/uImage.bin: $(obj)/vmlinux.bin
        $(call if_changed,uimage,none)
 
-$(obj)/vmImage.bz2: $(obj)/vmlinux.bin.bz2
+$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2
        $(call if_changed,uimage,bzip2)
 
-$(obj)/vmImage.gz: $(obj)/vmlinux.bin.gz
+$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz
        $(call if_changed,uimage,gzip)
 
-$(obj)/vmImage.lzma: $(obj)/vmlinux.bin.lzma
+$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma
        $(call if_changed,uimage,lzma)
 
-$(obj)/vmImage.lzo: $(obj)/vmlinux.bin.lzo
+$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo
        $(call if_changed,uimage,lzo)
 
-$(obj)/vmImage.xip: $(obj)/vmlinux.bin.xip
+$(obj)/uImage.xip: $(obj)/vmlinux.bin.xip
        $(call if_changed,uimage,none)
 
 suffix-y                      := bin
@@ -64,7 +64,7 @@ suffix-$(CONFIG_KERNEL_LZMA)  := lzma
 suffix-$(CONFIG_KERNEL_LZO)   := lzo
 suffix-$(CONFIG_ROMKERNEL)    := xip
 
-$(obj)/vmImage: $(obj)/vmImage.$(suffix-y)
+$(obj)/uImage: $(obj)/uImage.$(suffix-y)
        @ln -sf $(notdir $<) $@
 
 install:
index c8db653c72d2c05f75046026cf1bc1621599d92e..a107a98e99783e4cccb9d47a87bd40c1f6de010d 100644 (file)
@@ -11,7 +11,9 @@
 
 #ifdef CONFIG_SMP
 
+#include <asm/barrier.h>
 #include <linux/linkage.h>
+#include <linux/types.h>
 
 asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
 asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value);
index 6a4cfe2d33679692117308c5ab0d0395f0ec2091..a99957ea9e9b504b66b4e31f6660ddf5efc18c8e 100644 (file)
@@ -24,18 +24,27 @@ struct bfin_sd_host {
 #define CMD_INT_E          (1 << 8)    /* Command Interrupt */
 #define CMD_PEND_E         (1 << 9)    /* Command Pending */
 #define CMD_E              (1 << 10)   /* Command Enable */
+#ifdef RSI_BLKSZ
+#define CMD_CRC_CHECK_D    (1 << 11)   /* CRC Check is disabled */
+#define CMD_DATA0_BUSY     (1 << 12)   /* Check for Busy State on the DATA0 pin */
+#endif
 
 /* SDH_PWR_CTL bitmasks */
+#ifndef RSI_BLKSZ
 #define PWR_ON             0x3         /* Power On */
 #define SD_CMD_OD          (1 << 6)    /* Open Drain Output */
 #define ROD_CTL            (1 << 7)    /* Rod Control */
+#endif
 
 /* SDH_CLK_CTL bitmasks */
 #define CLKDIV             0xff        /* MC_CLK Divisor */
 #define CLK_E              (1 << 8)    /* MC_CLK Bus Clock Enable */
 #define PWR_SV_E           (1 << 9)    /* Power Save Enable */
 #define CLKDIV_BYPASS      (1 << 10)   /* Bypass Divisor */
-#define WIDE_BUS           (1 << 11)   /* Wide Bus Mode Enable */
+#define BUS_MODE_MASK      0x1800      /* Bus Mode Mask */
+#define STD_BUS_1          0x000       /* Standard Bus 1 bit mode */
+#define WIDE_BUS_4         0x800       /* Wide Bus 4 bit mode */
+#define BYTE_BUS_8         0x1000      /* Byte Bus 8 bit mode */
 
 /* SDH_RESP_CMD bitmasks */
 #define RESP_CMD           0x3f        /* Response Command */
@@ -45,7 +54,13 @@ struct bfin_sd_host {
 #define DTX_DIR            (1 << 1)    /* Data Transfer Direction */
 #define DTX_MODE           (1 << 2)    /* Data Transfer Mode */
 #define DTX_DMA_E          (1 << 3)    /* Data Transfer DMA Enable */
+#ifndef RSI_BLKSZ
 #define DTX_BLK_LGTH       (0xf << 4)  /* Data Transfer Block Length */
+#else
+
+/* Bit masks for SDH_BLK_SIZE */
+#define DTX_BLK_LGTH       0x1fff      /* Data Transfer Block Length */
+#endif
 
 /* SDH_STATUS bitmasks */
 #define CMD_CRC_FAIL       (1 << 0)    /* CMD CRC Fail */
@@ -114,10 +129,14 @@ struct bfin_sd_host {
 /* SDH_E_STATUS bitmasks */
 #define SDIO_INT_DET       (1 << 1)    /* SDIO Int Detected */
 #define SD_CARD_DET        (1 << 4)    /* SD Card Detect */
+#define SD_CARD_BUSYMODE   (1 << 31)   /* Card is in Busy mode */
+#define SD_CARD_SLPMODE    (1 << 30)   /* Card in Sleep Mode */
+#define SD_CARD_READY      (1 << 17)   /* Card Ready */
 
 /* SDH_E_MASK bitmasks */
 #define SDIO_MSK           (1 << 1)    /* Mask SDIO Int Detected */
-#define SCD_MSK            (1 << 6)    /* Mask Card Detect */
+#define SCD_MSK            (1 << 4)    /* Mask Card Detect */
+#define CARD_READY_MSK     (1 << 16)   /* Mask Card Ready */
 
 /* SDH_CFG bitmasks */
 #define CLKS_EN            (1 << 0)    /* Clocks Enable */
@@ -126,7 +145,15 @@ struct bfin_sd_host {
 #define SD_RST             (1 << 4)    /* SDMMC Reset */
 #define PUP_SDDAT          (1 << 5)    /* Pull-up SD_DAT */
 #define PUP_SDDAT3         (1 << 6)    /* Pull-up SD_DAT3 */
+#ifndef RSI_BLKSZ
 #define PD_SDDAT3          (1 << 7)    /* Pull-down SD_DAT3 */
+#else
+#define PWR_ON             0x600       /* Power On */
+#define SD_CMD_OD          (1 << 11)   /* Open Drain Output */
+#define BOOT_EN            (1 << 12)   /* Boot Enable */
+#define BOOT_MODE          (1 << 13)   /* Alternate Boot Mode */
+#define BOOT_ACK_EN        (1 << 14)   /* Boot ACK is expected */
+#endif
 
 /* SDH_RD_WAIT_EN bitmasks */
 #define RWR                (1 << 0)    /* Read Wait Request */
index 8a0fed16058f217cfb6badb41dc176faa40b70af..0ca40dd44724200fc0d098a579e1c9f1b50d6c57 100644 (file)
@@ -41,6 +41,7 @@
 #include <asm-generic/bitops/non-atomic.h>
 #else
 
+#include <asm/barrier.h>
 #include <asm/byteorder.h>     /* swab32 */
 #include <linux/linkage.h>
 
index fe0ca03a1cb2dc2552d0c564ec8ca03a17991625..ca67145c6a45987600407d44f80919b0e718b25e 100644 (file)
@@ -622,10 +622,12 @@ do { \
 #define PAGE_SIZE_4KB      0x00010000  /* 4 KB page size */
 #define PAGE_SIZE_1MB      0x00020000  /* 1 MB page size */
 #define PAGE_SIZE_4MB      0x00030000  /* 4 MB page size */
+#ifdef CONFIG_BF60x
 #define PAGE_SIZE_16KB     0x00040000  /* 16 KB page size */
 #define PAGE_SIZE_64KB     0x00050000  /* 64 KB page size */
 #define PAGE_SIZE_16MB     0x00060000  /* 16 MB page size */
 #define PAGE_SIZE_64MB     0x00070000  /* 64 MB page size */
+#endif
 #define CPLB_L1SRAM        0x00000020  /* 0=SRAM mapped in L1, 0=SRAM not
                                         * mapped to L1
                                         */
index 9b33e7247864c2a14f236fac89ba30e40961c028..c865b33eeb68bc271c4a8e9f14e72d75b5c3edef 100644 (file)
 struct ddr_config {
        u32 ddr_clk;
        u32 dmc_ddrctl;
+       u32 dmc_effctl;
        u32 dmc_ddrcfg;
        u32 dmc_ddrtr0;
        u32 dmc_ddrtr1;
@@ -348,6 +349,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
        [0] = {
                .ddr_clk    = 125,
                .dmc_ddrctl = 0x00000904,
+               .dmc_effctl = 0x004400C0,
                .dmc_ddrcfg = 0x00000422,
                .dmc_ddrtr0 = 0x20705212,
                .dmc_ddrtr1 = 0x201003CF,
@@ -358,6 +360,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
        [1] = {
                .ddr_clk    = 133,
                .dmc_ddrctl = 0x00000904,
+               .dmc_effctl = 0x004400C0,
                .dmc_ddrcfg = 0x00000422,
                .dmc_ddrtr0 = 0x20806313,
                .dmc_ddrtr1 = 0x2013040D,
@@ -368,6 +371,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
        [2] = {
                .ddr_clk    = 150,
                .dmc_ddrctl = 0x00000904,
+               .dmc_effctl = 0x004400C0,
                .dmc_ddrcfg = 0x00000422,
                .dmc_ddrtr0 = 0x20A07323,
                .dmc_ddrtr1 = 0x20160492,
@@ -378,6 +382,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
        [3] = {
                .ddr_clk    = 166,
                .dmc_ddrctl = 0x00000904,
+               .dmc_effctl = 0x004400C0,
                .dmc_ddrcfg = 0x00000422,
                .dmc_ddrtr0 = 0x20A07323,
                .dmc_ddrtr1 = 0x2016050E,
@@ -388,6 +393,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
        [4] = {
                .ddr_clk    = 200,
                .dmc_ddrctl = 0x00000904,
+               .dmc_effctl = 0x004400C0,
                .dmc_ddrcfg = 0x00000422,
                .dmc_ddrtr0 = 0x20a07323,
                .dmc_ddrtr1 = 0x2016050f,
@@ -398,6 +404,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
        [5] = {
                .ddr_clk    = 225,
                .dmc_ddrctl = 0x00000904,
+               .dmc_effctl = 0x004400C0,
                .dmc_ddrcfg = 0x00000422,
                .dmc_ddrtr0 = 0x20E0A424,
                .dmc_ddrtr1 = 0x302006DB,
@@ -408,6 +415,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
        [6] = {
                .ddr_clk    = 250,
                .dmc_ddrctl = 0x00000904,
+               .dmc_effctl = 0x004400C0,
                .dmc_ddrcfg = 0x00000422,
                .dmc_ddrtr0 = 0x20E0A424,
                .dmc_ddrtr1 = 0x3020079E,
@@ -469,6 +477,7 @@ static inline void init_dmc(u32 dmc_clk)
                        bfin_write_DMC0_TR2(ddr_config_table[i].dmc_ddrtr2);
                        bfin_write_DMC0_MR(ddr_config_table[i].dmc_ddrmr);
                        bfin_write_DMC0_EMR1(ddr_config_table[i].dmc_ddrmr1);
+                       bfin_write_DMC0_EFFCTL(ddr_config_table[i].dmc_effctl);
                        bfin_write_DMC0_CTL(ddr_config_table[i].dmc_ddrctl);
                        break;
                }
index 34e96ce02aa9671701197e7bc3b8f65902fbb5d5..b49a53b583d582acbf8670776aa87c6559d0c0e7 100644 (file)
@@ -30,6 +30,7 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
 {
        int i_d, i_i;
        unsigned long addr;
+       unsigned long cplb_pageflags, cplb_pagesize;
 
        struct cplb_entry *d_tbl = dcplb_tbl[cpu];
        struct cplb_entry *i_tbl = icplb_tbl[cpu];
@@ -49,11 +50,20 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
        /* Cover kernel memory with 4M pages.  */
        addr = 0;
 
-       for (; addr < memory_start; addr += 4 * 1024 * 1024) {
+#ifdef PAGE_SIZE_16MB
+       cplb_pageflags = PAGE_SIZE_16MB;
+       cplb_pagesize = SIZE_16M;
+#else
+       cplb_pageflags = PAGE_SIZE_4MB;
+       cplb_pagesize = SIZE_4M;
+#endif
+
+
+       for (; addr < memory_start; addr += cplb_pagesize) {
                d_tbl[i_d].addr = addr;
-               d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
+               d_tbl[i_d++].data = SDRAM_DGENERIC | cplb_pageflags;
                i_tbl[i_i].addr = addr;
-               i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
+               i_tbl[i_i++].data = SDRAM_IGENERIC | cplb_pageflags;
        }
 
 #ifdef CONFIG_ROMKERNEL
index e854f9066cbde005413bf0659e59fbed6a3b0423..79cc0f6dcdd5c26fa53137914ccc1bed0fdfd5ab 100644 (file)
@@ -145,7 +145,7 @@ MGR_ATTR static int dcplb_miss(int cpu)
        unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
        int status = bfin_read_DCPLB_STATUS();
        int idx;
-       unsigned long d_data, base, addr1, eaddr;
+       unsigned long d_data, base, addr1, eaddr, cplb_pagesize, cplb_pageflags;
 
        nr_dcplb_miss[cpu]++;
        if (unlikely(status & FAULT_USERSUPV))
@@ -167,18 +167,37 @@ MGR_ATTR static int dcplb_miss(int cpu)
        if (unlikely(d_data == 0))
                return CPLB_NO_ADDR_MATCH;
 
-       addr1 = addr & ~(SIZE_4M - 1);
        addr &= ~(SIZE_1M - 1);
        d_data |= PAGE_SIZE_1MB;
-       if (addr1 >= base && (addr1 + SIZE_4M) <= eaddr) {
+
+       /* BF60x support large than 4M CPLB page size */
+#ifdef PAGE_SIZE_16MB
+       cplb_pageflags = PAGE_SIZE_16MB;
+       cplb_pagesize = SIZE_16M;
+#else
+       cplb_pageflags = PAGE_SIZE_4MB;
+       cplb_pagesize = SIZE_4M;
+#endif
+
+find_pagesize:
+       addr1 = addr & ~(cplb_pagesize - 1);
+       if (addr1 >= base && (addr1 + cplb_pagesize) <= eaddr) {
                /*
                 * This works because
                 * (PAGE_SIZE_4MB & PAGE_SIZE_1MB) == PAGE_SIZE_1MB.
                 */
-               d_data |= PAGE_SIZE_4MB;
+               d_data |= cplb_pageflags;
                addr = addr1;
+               goto found_pagesize;
+       } else {
+               if (cplb_pagesize > SIZE_4M) {
+                       cplb_pageflags = PAGE_SIZE_4MB;
+                       cplb_pagesize = SIZE_4M;
+                       goto find_pagesize;
+               }
        }
 
+found_pagesize:
 #ifdef CONFIG_BF60x
        if ((addr >= ASYNC_BANK0_BASE)
                && (addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE))
index 404045dcc5e4bc321c30ddf0eecc1a259e5312ce..5b80d59e66e57b11a3592378abc6914adc938f28 100644 (file)
 #include <asm/cplbinit.h>
 #include <asm/blackfin.h>
 
-static char const page_strtbl[][3] = { "1K", "4K", "1M", "4M" };
-#define page(flags)    (((flags) & 0x30000) >> 16)
+static char const page_strtbl[][4] = {
+       "1K", "4K", "1M", "4M",
+#ifdef CONFIG_BF60x
+       "16K", "64K", "16M", "64M",
+#endif
+};
+#define page(flags)    (((flags) & 0x70000) >> 16)
 #define strpage(flags) page_strtbl[page(flags)]
 
 struct cplbinfo_data {
index fb96e607adcf815890de6ce475c55a3cfd76a42b..107b306b06f10819fe1fc6c3d1e3e87cdaa3afc6 100644 (file)
@@ -1314,7 +1314,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                        seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid());
        }
 
-       seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
+       seq_printf(m, "\ncpu MHz\t\t: %lu.%06lu/%lu.%06lu\n",
                cclk/1000000, cclk%1000000,
                sclk/1000000, sclk%1000000);
        seq_printf(m, "bogomips\t: %lu.%02lu\n"
index 95114ed395ac6939d6a82551c289de52fa943edf..6a3a14bcd3a1ee338be3c90d68f3672575904d39 100644 (file)
@@ -455,6 +455,7 @@ static struct platform_device bfin_async_nand_device = {
 static void bfin_plat_nand_init(void)
 {
        gpio_request(BFIN_NAND_PLAT_READY, "bfin_nand_plat");
+       gpio_direction_input(BFIN_NAND_PLAT_READY);
 }
 #else
 static void bfin_plat_nand_init(void) {}
index a4fce0370c1db2455dfeb16f3526b993333bab38..755f0dc120100a7f359d1137059c3f094cb10d75 100644 (file)
@@ -764,7 +764,6 @@ static struct platform_device i2c_bfin_twi1_device = {
        .num_resources = ARRAY_SIZE(bfin_twi1_resource),
        .resource = bfin_twi1_resource,
 };
-#endif /* CONFIG_BF542 */
 #endif /* CONFIG_I2C_BLACKFIN_TWI */
 
 #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
index 4954cf3f7e16d82ec9dba7b5bc1841362e465c94..102ee4025ac9fb373fd7192cb08fb564b660d4d1 100644 (file)
 #define bfin_write_DMC0_EMR1(val) bfin_write32(DMC0_EMR1, val)
 #define bfin_read_DMC0_CTL() bfin_read32(DMC0_CTL)
 #define bfin_write_DMC0_CTL(val) bfin_write32(DMC0_CTL, val)
+#define bfin_read_DMC0_EFFCTL() bfin_read32(DMC0_EFFCTL)
+#define bfin_write_DMC0_EFFCTL(val) bfin_write32(DMC0_EFFCTL, val)
 #define bfin_read_DMC0_STAT() bfin_read32(DMC0_STAT)
 #define bfin_write_DMC0_STAT(val) bfin_write32(DMC0_STAT, val)
 #define bfin_read_DMC0_DLLCTL() bfin_read32(DMC0_DLLCTL)
index d266787725b468ab94286aeb3e7cca62698c5d6c..33013dfcd3e1d58fb0b0f3495ce1956c76233340 100644 (file)
@@ -223,13 +223,25 @@ config M5307
        help
          Motorola ColdFire 5307 processor support.
 
+config M53xx
+       bool
+
 config M532x
        bool "MCF532x"
        depends on !MMU
+       select M53xx
        select HAVE_CACHE_CB
        help
          Freescale (Motorola) ColdFire 532x processor support.
 
+config M537x
+       bool "MCF537x"
+       depends on !MMU
+       select M53xx
+       select HAVE_CACHE_CB
+       help
+         Freescale ColdFire 537x processor support.
+
 config M5407
        bool "MCF5407"
        depends on !MMU
index 7240584d343974847fbaab832bcf50388d8d2ba3..b9ab0a69561cac3de87657e06700f8c0cf456413 100644 (file)
@@ -358,6 +358,13 @@ config COBRA5329
        help
          Support for the senTec COBRA5329 board.
 
+config M5373EVB
+       bool "Freescale M5373EVB board support"
+       depends on M537x
+       select FREESCALE
+       help
+         Support for the Freescale M5373EVB board.
+
 config M5407C3
        bool "Motorola M5407C3 board support"
        depends on M5407
@@ -539,15 +546,6 @@ config ROMVEC
          68000 type variants the vectors are at the base of the boot device
          on system startup.
 
-config ROMVECSIZE
-       hex "Size of ROM vector region (in bytes)"
-       default "0x400"
-       depends on ROM
-       help
-         Define the size of the vector region in ROM. For most 68000
-         variants this would be 0x400 bytes in size. Set to 0 if you do
-         not want a vector region at the start of the ROM.
-
 config ROMSTART
        hex "Address of the base of system image in ROM"
        default "0x400"
index 2f02acfb8edf4647e28ada0e48801b68c0c965f3..7f7830f2c5bcb488745c0326e36a1889a733e04b 100644 (file)
@@ -45,6 +45,7 @@ cpuflags-$(CONFIG_M5441x)     := $(call cc-option,-mcpu=54455,-mcfv4e)
 cpuflags-$(CONFIG_M54xx)       := $(call cc-option,-mcpu=5475,-m5200)
 cpuflags-$(CONFIG_M5407)       := $(call cc-option,-mcpu=5407,-m5200)
 cpuflags-$(CONFIG_M532x)       := $(call cc-option,-mcpu=532x,-m5307)
+cpuflags-$(CONFIG_M537x)       := $(call cc-option,-mcpu=537x,-m5307)
 cpuflags-$(CONFIG_M5307)       := $(call cc-option,-mcpu=5307,-m5200)
 cpuflags-$(CONFIG_M528x)       := $(call cc-option,-mcpu=528x,-m5307)
 cpuflags-$(CONFIG_M5275)       := $(call cc-option,-mcpu=5275,-m5307)
index a73998528d26dddbc8db3b10979a5156d8a25dca..66a36bd51aa12d120d1502610e0885411a3058a2 100644 (file)
@@ -480,23 +480,6 @@ typedef struct scc_enet {
 #define SICR_ENET_CLKRT        ((uint)0x0000003d)
 #endif
 
-#ifdef CONFIG_RPXLITE
-/* This ENET stuff is for the MPC850 with ethernet on SCC2.  Some of
- * this may be unique to the RPX-Lite configuration.
- * Note TENA is on Port B.
- */
-#define PA_ENET_RXD    ((ushort)0x0004)
-#define PA_ENET_TXD    ((ushort)0x0008)
-#define PA_ENET_TCLK   ((ushort)0x0200)
-#define PA_ENET_RCLK   ((ushort)0x0800)
-#define PB_ENET_TENA   ((uint)0x00002000)
-#define PC_ENET_CLSN   ((ushort)0x0040)
-#define PC_ENET_RENA   ((ushort)0x0080)
-
-#define SICR_ENET_MASK ((uint)0x0000ff00)
-#define SICR_ENET_CLKRT        ((uint)0x00003d00)
-#endif
-
 #ifdef CONFIG_BSEIP
 /* This ENET stuff is for the MPC823 with ethernet on SCC2.
  * This is unique to the BSE ip-Engine board.
diff --git a/arch/m68k/include/asm/dbg.h b/arch/m68k/include/asm/dbg.h
deleted file mode 100644 (file)
index 27af327..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#define DEBUG 1
-#ifdef CONFIG_COLDFIRE
-#define        BREAK asm volatile ("halt")
-#else
-#define BREAK *(volatile unsigned char *)0xdeadbee0 = 0
-#endif
index 0ff3fc6a6d9aaafda260cb0d39a70b431e6aa422..429fe26e320c9813afdf88011195552498a90032 100644 (file)
@@ -39,7 +39,7 @@
 #define MAX_M68K_DMA_CHANNELS 4
 #elif defined(CONFIG_M5272)
 #define MAX_M68K_DMA_CHANNELS 1
-#elif defined(CONFIG_M532x)
+#elif defined(CONFIG_M53xx)
 #define MAX_M68K_DMA_CHANNELS 0
 #else
 #define MAX_M68K_DMA_CHANNELS 2
index cd952b0a8bd32d81db35ef6382b9baa0af2bf410..3177ce8331d6916d5c83362f6810d4fd28728cd5 100644 (file)
@@ -55,8 +55,8 @@
 #define        CACHE_SIZE      0x2000          /* 8k of unified cache */
 #define        ICACHE_SIZE     CACHE_SIZE
 #define        DCACHE_SIZE     CACHE_SIZE
-#elif defined(CONFIG_M532x)
-#define        CACHE_SIZE      0x4000          /* 32k of unified cache */
+#elif defined(CONFIG_M53xx)
+#define        CACHE_SIZE      0x4000          /* 16k of unified cache */
 #define        ICACHE_SIZE     CACHE_SIZE
 #define        DCACHE_SIZE     CACHE_SIZE
 #endif
similarity index 99%
rename from arch/m68k/include/asm/m532xsim.h
rename to arch/m68k/include/asm/m53xxsim.h
index 8668e47ced0e6ebbd019b183f2f6dde0c6197cc7..faa1a2133bfdad99ce5b2342042a82df58017122 100644 (file)
@@ -1,15 +1,15 @@
 /****************************************************************************/
 
 /*
- *     m532xsim.h -- ColdFire 5329 registers
+ *     m53xxsim.h -- ColdFire 5329 registers
  */
 
 /****************************************************************************/
-#ifndef        m532xsim_h
-#define        m532xsim_h
+#ifndef        m53xxsim_h
+#define        m53xxsim_h
 /****************************************************************************/
 
-#define        CPU_NAME                "COLDFIRE(m532x)"
+#define        CPU_NAME                "COLDFIRE(m53xx)"
 #define        CPU_INSTR_PER_JIFFY     3
 #define        MCF_BUSCLK              (MCF_CLK / 3)
 
 /*
  *  QSPI module.
  */
-#define        MCFQSPI_BASE            0xFC058000      /* Base address of QSPI */
+#define        MCFQSPI_BASE            0xFC05C000      /* Base address of QSPI */
 #define        MCFQSPI_SIZE            0x40            /* Size of QSPI region */
 
 #define        MCFQSPI_CS0             84
 #define MCFEPORT_EPFR                 (0xFC094006)
 
 /********************************************************************/
-#endif /* m532xsim_h */
+#endif /* m53xxsim_h */
index 192bbfeabf70c1047288286b37a7ee686d0d32f0..6d13cae44af530240b99b83e4248288ee939ae69 100644 (file)
  */
 #define ACR0_MODE      (ACR_BA(CONFIG_MBAR)+ACR_ADMSK(0x1000000)+ \
                         ACR_ENABLE+ACR_SUPER+ACR_CM_OFF_PRE+ACR_SP)
+#if defined(CONFIG_CACHE_COPYBACK)
 #define ACR1_MODE      (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
-                        ACR_ENABLE+ACR_SUPER+ACR_SP)
+                        ACR_ENABLE+ACR_SUPER+ACR_SP+ACR_CM_CP)
+#else
+#define ACR1_MODE      (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
+                        ACR_ENABLE+ACR_SUPER+ACR_SP+ACR_CM_WT)
+#endif
 #define ACR2_MODE      0
 #define ACR3_MODE      (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
                         ACR_ENABLE+ACR_SUPER+ACR_SP)
index fa1059f50dfcd1bef9b98c0b482839325c642ddb..c41ebf45f1d0f576a384d1a789fc2940b3e3a460 100644 (file)
@@ -104,7 +104,7 @@ static inline void gpio_free(unsigned gpio)
 #if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
     defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
     defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-    defined(CONFIG_M532x) || defined(CONFIG_M54xx) || \
+    defined(CONFIG_M53xx) || defined(CONFIG_M54xx) || \
     defined(CONFIG_M5441x)
 
 /* These parts have GPIO organized by 8 bit ports */
@@ -139,7 +139,7 @@ static inline void gpio_free(unsigned gpio)
 
 #if defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
     defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-    defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+    defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
 /*
  * These parts have an 'Edge' Port module (external interrupt/GPIO) which uses
  * read-modify-write to change an output and a GPIO module which has separate
@@ -195,7 +195,7 @@ static inline u32 __mcfgpio_ppdr(unsigned gpio)
                return MCFSIM2_GPIO1READ;
 #elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
       defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-      defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+      defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
 #if !defined(CONFIG_M5441x)
        if (gpio < 8)
                return MCFEPORT_EPPDR;
@@ -237,7 +237,7 @@ static inline u32 __mcfgpio_podr(unsigned gpio)
                return MCFSIM2_GPIO1WRITE;
 #elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
       defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-      defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+      defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
 #if !defined(CONFIG_M5441x)
        if (gpio < 8)
                return MCFEPORT_EPDR;
@@ -279,7 +279,7 @@ static inline u32 __mcfgpio_pddr(unsigned gpio)
                return MCFSIM2_GPIO1ENABLE;
 #elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
       defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-      defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+      defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
 #if !defined(CONFIG_M5441x)
        if (gpio < 8)
                return MCFEPORT_EPDDR;
index a04fd9b2714c7d39b1365337b63ee55942a1f9f3..bc867de8a1e9a232f879c7c12eaf042a04e7c43e 100644 (file)
@@ -36,8 +36,8 @@
 #elif defined(CONFIG_M5307)
 #include <asm/m5307sim.h>
 #include <asm/mcfintc.h>
-#elif defined(CONFIG_M532x)
-#include <asm/m532xsim.h>
+#elif defined(CONFIG_M53xx)
+#include <asm/m53xxsim.h>
 #elif defined(CONFIG_M5407)
 #include <asm/m5407sim.h>
 #include <asm/mcfintc.h>
index da2fa43c2e458438ec61c6246faf5379ae9f356c..089f0f150bbfc06169a720e2ddb9b9d3d563658c 100644 (file)
@@ -19,7 +19,7 @@
 #define        MCFTIMER_TRR            0x04            /* Timer Reference (r/w) */
 #define        MCFTIMER_TCR            0x08            /* Timer Capture reg (r/w) */
 #define        MCFTIMER_TCN            0x0C            /* Timer Counter reg (r/w) */
-#if defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+#if defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
 #define        MCFTIMER_TER            0x03            /* Timer Event reg (r/w) */
 #else
 #define        MCFTIMER_TER            0x11            /* Timer Event reg (r/w) */
index 02591a109f8c0965ffea5116fbfec998078a6109..68f0fac60099277eac637851f3415e092c8fb921 100644 (file)
@@ -25,7 +25,7 @@ obj-$(CONFIG_M527x)   += m527x.o pit.o intc-2.o reset.o
 obj-$(CONFIG_M5272)    += m5272.o intc-5272.o timers.o
 obj-$(CONFIG_M528x)    += m528x.o pit.o intc-2.o reset.o
 obj-$(CONFIG_M5307)    += m5307.o timers.o intc.o reset.o
-obj-$(CONFIG_M532x)    += m532x.o timers.o intc-simr.o reset.o
+obj-$(CONFIG_M53xx)    += m53xx.o timers.o intc-simr.o reset.o
 obj-$(CONFIG_M5407)    += m5407.o timers.o intc.o reset.o
 obj-$(CONFIG_M54xx)    += m54xx.o sltimers.o intc-2.o
 obj-$(CONFIG_M5441x)   += m5441x.o pit.o intc-simr.o reset.o
similarity index 98%
rename from arch/m68k/platform/coldfire/m532x.c
rename to arch/m68k/platform/coldfire/m53xx.c
index 7951d1d43357a16ac953b584f71c5c7efd6bd526..5286f98fbed075cbe73298c02bd6b568beb332c2 100644 (file)
@@ -1,7 +1,7 @@
 /***************************************************************************/
 
 /*
- *     linux/arch/m68knommu/platform/532x/config.c
+ *     m53xx.c -- platform support for ColdFire 53xx based boards
  *
  *     Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
  *     Copyright (C) 2000, Lineo (www.lineo.com)
@@ -118,7 +118,8 @@ static struct clk * const enable_clks[] __initconst = {
        &__clk_0_24,    /* mcfuart.0 */
        &__clk_0_25,    /* mcfuart.1 */
        &__clk_0_26,    /* mcfuart.2 */
-
+       &__clk_0_28,    /* mcftmr.0 */
+       &__clk_0_29,    /* mcftmr.1 */
        &__clk_0_32,    /* mcfpit.0 */
        &__clk_0_33,    /* mcfpit.1 */
        &__clk_0_37,    /* mcfeport.0 */
@@ -134,8 +135,6 @@ static struct clk * const disable_clks[] __initconst = {
        &__clk_0_17,    /* edma */
        &__clk_0_22,    /* mcfi2c.0 */
        &__clk_0_23,    /* mcfqspi.0 */
-       &__clk_0_28,    /* mcftmr.0 */
-       &__clk_0_29,    /* mcftmr.1 */
        &__clk_0_30,    /* mcftmr.2 */
        &__clk_0_31,    /* mcftmr.3 */
        &__clk_0_34,    /* mcfpit.2 */
@@ -153,7 +152,7 @@ static struct clk * const disable_clks[] __initconst = {
 };
 
 
-static void __init m532x_clk_init(void)
+static void __init m53xx_clk_init(void)
 {
        unsigned i;
 
@@ -169,7 +168,7 @@ static void __init m532x_clk_init(void)
 
 #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 
-static void __init m532x_qspi_init(void)
+static void __init m53xx_qspi_init(void)
 {
        /* setup QSPS pins for QSPI with gpio CS control */
        writew(0x01f0, MCFGPIO_PAR_QSPI);
@@ -179,7 +178,7 @@ static void __init m532x_qspi_init(void)
 
 /***************************************************************************/
 
-static void __init m532x_uarts_init(void)
+static void __init m53xx_uarts_init(void)
 {
        /* UART GPIO initialization */
        writew(readw(MCFGPIO_PAR_UART) | 0x0FFF, MCFGPIO_PAR_UART);
@@ -187,7 +186,7 @@ static void __init m532x_uarts_init(void)
 
 /***************************************************************************/
 
-static void __init m532x_fec_init(void)
+static void __init m53xx_fec_init(void)
 {
        u8 v;
 
@@ -217,11 +216,11 @@ void __init config_BSP(char *commandp, int size)
        }
 #endif
        mach_sched_init = hw_timer_init;
-       m532x_clk_init();
-       m532x_uarts_init();
-       m532x_fec_init();
+       m53xx_clk_init();
+       m53xx_uarts_init();
+       m53xx_fec_init();
 #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
-       m532x_qspi_init();
+       m53xx_qspi_init();
 #endif
 
 #ifdef CONFIG_BDM_DISABLE
index 51f6d2af807f8dd2c401138bc3894b8ae920f34d..d06068e457643fc804f0b068679da230e640becb 100644 (file)
@@ -36,7 +36,7 @@
  */
 void coldfire_profile_init(void);
 
-#if defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+#if defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
 #define        __raw_readtrr   __raw_readl
 #define        __raw_writetrr  __raw_writel
 #else
index d2b097a652d9193f58917f2a256b4e3af09b540c..3649a8b150c0cf46f0161066a262c9c11e81e9a3 100644 (file)
@@ -17,7 +17,6 @@ CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
 # CONFIG_EFI_PARTITION is not set
-CONFIG_OPT_LIB_ASM=y
 CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
 CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1
 CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1
index 41cc841091b084e73692220324d30fd44ac0b018..d52abb6812fabb082c86196d97be0ee90cf728d0 100644 (file)
@@ -153,7 +153,5 @@ extern void __init xilinx_pci_init(void);
 static inline void __init xilinx_pci_init(void) { return; }
 #endif
 
-#include <asm-generic/pci-dma-compat.h>
-
 #endif /* __KERNEL__ */
 #endif /* __ASM_MICROBLAZE_PCI_H */
index a1ab5f0009efcd7bc84d4dd17aabedb915707653..efe59d881789fbafd7db6bd4933263394995da1f 100644 (file)
@@ -90,17 +90,25 @@ static inline int ___range_ok(unsigned long addr, unsigned long size)
 
 #else
 
-/*
- * Address is valid if:
- *  - "addr", "addr + size" and "size" are all below the limit
- */
-#define access_ok(type, addr, size) \
-       (get_fs().seg >= (((unsigned long)(addr)) | \
-               (size) | ((unsigned long)(addr) + (size))))
-
-/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
- type?"WRITE":"READ",addr,size,get_fs().seg)) */
-
+static inline int access_ok(int type, const void __user *addr,
+                                                       unsigned long size)
+{
+       if (!size)
+               goto ok;
+
+       if ((get_fs().seg < ((unsigned long)addr)) ||
+                       (get_fs().seg < ((unsigned long)addr + size - 1))) {
+               pr_debug("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
+                       type ? "WRITE" : "READ ", (u32)addr, (u32)size,
+                       (u32)get_fs().seg);
+               return 0;
+       }
+ok:
+       pr_debug("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
+                       type ? "WRITE" : "READ ", (u32)addr, (u32)size,
+                       (u32)get_fs().seg);
+       return 1;
+}
 #endif
 
 #ifdef CONFIG_MMU
index 0b2299bcb94817f59e34399adc20b05e96c0f3bd..410398f6db555a1df3be57620454dc61c47f20fa 100644 (file)
@@ -37,6 +37,8 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
        {"8.20.a", 0x15},
        {"8.20.b", 0x16},
        {"8.30.a", 0x17},
+       {"8.40.a", 0x18},
+       {"8.40.b", 0x19},
        {NULL, 0},
 };
 
@@ -57,6 +59,9 @@ const struct family_string_key family_string_lookup[] = {
        {"virtex6", 0xe},
        /* FIXME There is no key code defined for spartan2 */
        {"spartan2", 0xf0},
+       {"kintex7", 0x10},
+       {"artix7", 0x11},
+       {"zynq7000", 0x12},
        {NULL, 0},
 };
 
index eef84de5e8c83bbb5bbee4c2db4efa6e488ceded..fcc797feb9dbd5df1b654b1a5ace439c35167553 100644 (file)
@@ -112,16 +112,16 @@ no_fdt_arg:
  * copy command line directly to cmd_line placed in data section.
  */
        beqid   r5, skip        /* Skip if NULL pointer */
-       or      r6, r0, r0              /* incremment */
+       or      r11, r0, r0             /* incremment */
        ori     r4, r0, cmd_line        /* load address of command line */
        tophys(r4,r4)                   /* convert to phys address */
        ori     r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
 _copy_command_line:
        /* r2=r5+r6 - r5 contain pointer to command line */
-       lbu             r2, r5, r6
+       lbu     r2, r5, r11
        beqid   r2, skip                /* Skip if no data */
-       sb              r2, r4, r6              /* addr[r4+r6]= r2*/
-       addik   r6, r6, 1               /* increment counting */
+       sb      r2, r4, r11             /* addr[r4+r6]= r2 */
+       addik   r11, r11, 1             /* increment counting */
        bgtid   r3, _copy_command_line  /* loop for all entries       */
        addik   r3, r3, -1              /* decrement loop */
        addik   r5, r4, 0               /* add new space for command line */
@@ -131,13 +131,13 @@ skip:
 
 #ifdef NOT_COMPILE
 /* save bram context */
-       or      r6, r0, r0                              /* incremment */
+       or      r11, r0, r0                             /* incremment */
        ori     r4, r0, TOPHYS(_bram_load_start)        /* save bram context */
        ori     r3, r0, (LMB_SIZE - 4)
 _copy_bram:
-       lw      r7, r0, r             /* r7 = r0 + r6 */
-       sw      r7, r4, r6              /* addr[r4 + r6] = r7*/
-       addik   r6, r6, 4               /* increment counting */
+       lw      r7, r0, r11             /* r7 = r0 + r6 */
+       sw      r7, r4, r11             /* addr[r4 + r6] = r7 */
+       addik   r11, r11, 4             /* increment counting */
        bgtid   r3, _copy_bram          /* loop for all entries */
        addik   r3, r3, -4              /* descrement loop */
 #endif
@@ -303,8 +303,8 @@ jump_over2:
         * the exception vectors, using a 4k real==virtual mapping.
         */
        /* Use temporary TLB_ID for LMB - clear this temporary mapping later */
-       ori     r6, r0, MICROBLAZE_LMB_TLB_ID
-       mts     rtlbx,r6
+       ori     r11, r0, MICROBLAZE_LMB_TLB_ID
+       mts     rtlbx,r11
 
        ori     r4,r0,(TLB_WR | TLB_EX)
        ori     r3,r0,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
index 8778adf72bd36ac8b81ed41c15284596079467a4..d85fa3a2b0f828b6023daf3ba7daaa674fb0d863 100644 (file)
@@ -172,4 +172,6 @@ void __init init_IRQ(void)
         * and commits this patch.  ~~gcl */
        root_domain = irq_domain_add_linear(intc, nr_irq, &xintc_irq_domain_ops,
                                                        (void *)intr_mask);
+
+       irq_set_default_host(root_domain);
 }
index a55893807274cf6964f8a3ffe683b7dfb20e5dcf..7d1a9c8b1f3dfff37cad0e5a87d9372f9030a3e8 100644 (file)
@@ -160,3 +160,8 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
        return 0; /* MicroBlaze has no separate FPU registers */
 }
 #endif /* CONFIG_MMU */
+
+void arch_cpu_idle(void)
+{
+       local_irq_enable();
+}
index 4ec137d13ad793cf9dd36521225547f41d8a21d9..b38ae3acfeb45adbe98f95c05c9c52e42721a215 100644 (file)
@@ -404,10 +404,11 @@ asmlinkage void __init mmu_init(void)
 
 #if defined(CONFIG_BLK_DEV_INITRD)
        /* Remove the init RAM disk from the available memory. */
-/*     if (initrd_start) {
-               mem_pieces_remove(&phys_avail, __pa(initrd_start),
-                                 initrd_end - initrd_start, 1);
-       }*/
+       if (initrd_start) {
+               unsigned long size;
+               size = initrd_end - initrd_start;
+               memblock_reserve(virt_to_phys(initrd_start), size);
+       }
 #endif /* CONFIG_BLK_DEV_INITRD */
 
        /* Initialize the MMU hardware */
index 9ea521e4959ef062e3bae1d1ac31be242d88a5fc..bdb8ea100e73ee8637c8ea587378000292b0b37b 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_pci.h>
-#include <linux/pci.h>
 #include <linux/export.h>
 
 #include <asm/processor.h>
index 7dd65cfae83759562e43ab20bb03be071ae56ce5..d2cfe45f332b419b5c463b78eb4f44a40e0386c4 100644 (file)
@@ -17,3 +17,7 @@ obj- := $(platform-)
 obj-y += kernel/
 obj-y += mm/
 obj-y += math-emu/
+
+ifdef CONFIG_KVM
+obj-y += kvm/
+endif
index a90cfc702bb1a31cade6c98d0bd4eb0ead652f8d..7a58ab933b206a397c56b65f6a608fc4f9a1d8eb 100644 (file)
@@ -304,7 +304,6 @@ config MIPS_MALTA
        select HW_HAS_PCI
        select I8253
        select I8259
-       select MIPS_BOARDS_GEN
        select MIPS_BONITO64
        select MIPS_CPU_SCACHE
        select PCI_GT64XXX_PCI0
@@ -335,12 +334,12 @@ config MIPS_SEAD3
        select BOOT_RAW
        select CEVT_R4K
        select CSRC_R4K
+       select CSRC_GIC
        select CPU_MIPSR2_IRQ_VI
        select CPU_MIPSR2_IRQ_EI
        select DMA_NONCOHERENT
        select IRQ_CPU
        select IRQ_GIC
-       select MIPS_BOARDS_GEN
        select MIPS_CPU_SCACHE
        select MIPS_MSC
        select SYS_HAS_CPU_MIPS32_R1
@@ -352,6 +351,7 @@ config MIPS_SEAD3
        select SYS_SUPPORTS_BIG_ENDIAN
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select SYS_SUPPORTS_SMARTMIPS
+       select SYS_SUPPORTS_MICROMIPS
        select USB_ARCH_HAS_EHCI
        select USB_EHCI_BIG_ENDIAN_DESC
        select USB_EHCI_BIG_ENDIAN_MMIO
@@ -910,6 +910,9 @@ config CEVT_GT641XX
 config CEVT_R4K
        bool
 
+config CEVT_GIC
+       bool
+
 config CEVT_SB1250
        bool
 
@@ -982,9 +985,6 @@ config MIPS_MSC
 config MIPS_NILE4
        bool
 
-config MIPS_DISABLE_OBSOLETE_IDE
-       bool
-
 config SYNC_R4K
        bool
 
@@ -1075,9 +1075,6 @@ config IRQ_GT641XX
 config IRQ_GIC
        bool
 
-config MIPS_BOARDS_GEN
-       bool
-
 config PCI_GT64XXX_PCI0
        bool
 
@@ -1147,7 +1144,7 @@ config BOOT_ELF32
 
 config MIPS_L1_CACHE_SHIFT
        int
-       default "4" if MACH_DECSTATION || MIKROTIK_RB532 || PMC_MSP4200_EVAL
+       default "4" if MACH_DECSTATION || MIKROTIK_RB532 || PMC_MSP4200_EVAL || SOC_RT288X
        default "6" if MIPS_CPU_SCACHE
        default "7" if SGI_IP22 || SGI_IP27 || SGI_IP28 || SNI_RM || CPU_CAVIUM_OCTEON
        default "5"
@@ -1236,6 +1233,7 @@ config CPU_MIPS32_R2
        select CPU_HAS_PREFETCH
        select CPU_SUPPORTS_32BIT_KERNEL
        select CPU_SUPPORTS_HIGHMEM
+       select HAVE_KVM
        help
          Choose this option to build a kernel for release 2 or later of the
          MIPS32 architecture.  Most modern embedded systems with a 32-bit
@@ -1736,6 +1734,20 @@ config 64BIT
 
 endchoice
 
+config KVM_GUEST
+       bool "KVM Guest Kernel"
+       help
+         Select this option if building a guest kernel for KVM (Trap & Emulate) mode
+
+config KVM_HOST_FREQ
+       int "KVM Host Processor Frequency (MHz)"
+       depends on KVM_GUEST
+       default 500
+       help
+         Select this option if building a guest kernel for KVM to skip
+         RTC emulation when determining guest CPU Frequency.  Instead, the guest
+         processor frequency is automatically derived from the host frequency.
+
 choice
        prompt "Kernel page size"
        default PAGE_SIZE_4KB
@@ -1811,6 +1823,15 @@ config FORCE_MAX_ZONEORDER
          The page size is not necessarily 4KB.  Keep this in mind
          when choosing a value for this option.
 
+config CEVT_GIC
+       bool "Use GIC global counter for clock events"
+       depends on IRQ_GIC && !(MIPS_SEAD3 || MIPS_MT_SMTC)
+       help
+         Use the GIC global counter for the clock events. The R4K clock
+         event driver is always present, so if the platform ends up not
+         detecting a GIC, it will fall back to the R4K timer for the
+         generation of clock events.
+
 config BOARD_SCACHE
        bool
 
@@ -2016,6 +2037,7 @@ config SB1_PASS_2_1_WORKAROUNDS
        depends on CPU_SB1 && CPU_SB1_PASS_2
        default y
 
+
 config 64BIT_PHYS_ADDR
        bool
 
@@ -2034,6 +2056,13 @@ config CPU_HAS_SMARTMIPS
          you don't know you probably don't have SmartMIPS and should say N
          here.
 
+config CPU_MICROMIPS
+       depends on SYS_SUPPORTS_MICROMIPS
+       bool "Build kernel using microMIPS ISA"
+       help
+         When this option is enabled the kernel will be built using the
+         microMIPS ISA
+
 config CPU_HAS_WB
        bool
 
@@ -2096,6 +2125,9 @@ config SYS_SUPPORTS_HIGHMEM
 config SYS_SUPPORTS_SMARTMIPS
        bool
 
+config SYS_SUPPORTS_MICROMIPS
+       bool
+
 config ARCH_FLATMEM_ENABLE
        def_bool y
        depends on !NUMA && !CPU_LOONGSON2
@@ -2556,3 +2588,5 @@ source "security/Kconfig"
 source "crypto/Kconfig"
 
 source "lib/Kconfig"
+
+source "arch/mips/kvm/Kconfig"
index 6f7978f95090f5dbdfe5bf761b008becf0df6cb2..dd58a04ef4bca5dc4a6604d283722aeb428e861c 100644 (file)
@@ -114,6 +114,7 @@ cflags-$(CONFIG_CPU_BIG_ENDIAN)             += $(shell $(CC) -dumpmachine |grep -q 'mips.*e
 cflags-$(CONFIG_CPU_LITTLE_ENDIAN)     += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le))
 
 cflags-$(CONFIG_CPU_HAS_SMARTMIPS)     += $(call cc-option,-msmartmips)
+cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips -mno-jals)
 
 cflags-$(CONFIG_SB1XXX_CORELIS)        += $(call cc-option,-mno-sched-prolog) \
                                   -fno-omit-frame-pointer
index c8862bdc2ff281eeb270e615c95a685edbcddcde..7032ac7ecd1bb2f8083b848ca5ca01572dc4cd44 100644 (file)
@@ -31,7 +31,6 @@ config MIPS_DB1000
        select ALCHEMY_GPIOINT_AU1000
        select DMA_NONCOHERENT
        select HW_HAS_PCI
-       select MIPS_DISABLE_OBSOLETE_IDE
        select SYS_SUPPORTS_BIG_ENDIAN
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select SYS_HAS_EARLY_PRINTK
@@ -41,7 +40,6 @@ config MIPS_DB1235
        select ARCH_REQUIRE_GPIOLIB
        select HW_HAS_PCI
        select DMA_COHERENT
-       select MIPS_DISABLE_OBSOLETE_IDE
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select SYS_HAS_EARLY_PRINTK
 
@@ -57,7 +55,6 @@ config MIPS_GPR
        select ALCHEMY_GPIOINT_AU1000
        select HW_HAS_PCI
        select DMA_NONCOHERENT
-       select MIPS_DISABLE_OBSOLETE_IDE
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select SYS_HAS_EARLY_PRINTK
 
index fa1bdd1aea15875989e187c6d14728e245058f75..b3afcdd8d77af9380c154227a17c6a5ee944d8d2 100644 (file)
@@ -5,32 +5,14 @@ platform-$(CONFIG_MIPS_ALCHEMY) += alchemy/common/
 
 
 #
-# AMD Alchemy Pb1100 eval board
-#
-platform-$(CONFIG_MIPS_PB1100) += alchemy/devboards/
-load-$(CONFIG_MIPS_PB1100)     += 0xffffffff80100000
-
-#
-# AMD Alchemy Pb1500 eval board
-#
-platform-$(CONFIG_MIPS_PB1500) += alchemy/devboards/
-load-$(CONFIG_MIPS_PB1500)     += 0xffffffff80100000
-
-#
-# AMD Alchemy Pb1550 eval board
-#
-platform-$(CONFIG_MIPS_PB1550) += alchemy/devboards/
-load-$(CONFIG_MIPS_PB1550)     += 0xffffffff80100000
-
-#
-# AMD Alchemy Db1000/Db1500/Db1100 eval boards
+# AMD Alchemy Db1000/Db1500/Pb1500/Db1100/Pb1100 eval boards
 #
 platform-$(CONFIG_MIPS_DB1000) += alchemy/devboards/
 cflags-$(CONFIG_MIPS_DB1000)   += -I$(srctree)/arch/mips/include/asm/mach-db1x00
 load-$(CONFIG_MIPS_DB1000)     += 0xffffffff80100000
 
 #
-# AMD Alchemy Db1200/Pb1200/Db1550/Db1300 eval boards
+# AMD Alchemy Db1200/Pb1200/Db1550/Pb1550/Db1300 eval boards
 #
 platform-$(CONFIG_MIPS_DB1235) += alchemy/devboards/
 cflags-$(CONFIG_MIPS_DB1235)   += -I$(srctree)/arch/mips/include/asm/mach-db1x00
index 28abfeef09d6ff21c74c3975721ed70a3fa45d7e..92dfa481205b066ddf4ca29a62506bcab139e568 100644 (file)
@@ -30,7 +30,6 @@
 #include <asm/sections.h>
 
 #include <asm/mach-ar7/ar7.h>
-#include <asm/mips-boards/prom.h>
 
 static int __init memsize(void)
 {
index d5b3c9057018d8ccdbd2de891d8498de3b1ca76d..a0233a2c198812980405dc05e117de444413552e 100644 (file)
@@ -51,20 +51,6 @@ static void ath79_halt(void)
                cpu_wait();
 }
 
-static void __init ath79_detect_mem_size(void)
-{
-       unsigned long size;
-
-       for (size = ATH79_MEM_SIZE_MIN; size < ATH79_MEM_SIZE_MAX;
-            size <<= 1) {
-               if (!memcmp(ath79_detect_mem_size,
-                           ath79_detect_mem_size + size, 1024))
-                       break;
-       }
-
-       add_memory_region(0, size, BOOT_MEM_RAM);
-}
-
 static void __init ath79_detect_sys_type(void)
 {
        char *chip = "????";
@@ -212,7 +198,7 @@ void __init plat_mem_setup(void)
                                         AR71XX_DDR_CTRL_SIZE);
 
        ath79_detect_sys_type();
-       ath79_detect_mem_size();
+       detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
        ath79_clocks_init();
 
        _machine_restart = ath79_restart;
index d03e8799d1cf8c09455c60e0029f5371cf67de7c..5639662fd5031a005534181569e4ef628b6a5640 100644 (file)
@@ -25,6 +25,10 @@ config BCM63XX_CPU_6358
        bool "support 6358 CPU"
        select HW_HAS_PCI
 
+config BCM63XX_CPU_6362
+       bool "support 6362 CPU"
+       select HW_HAS_PCI
+
 config BCM63XX_CPU_6368
        bool "support 6368 CPU"
        select HW_HAS_PCI
index 9aa7d44898ed11cff9c70112e0e142923e27f2f4..a9505c4867e8dd1054646643abdf1640172b0dce 100644 (file)
@@ -726,11 +726,11 @@ void __init board_prom_init(void)
        u32 val;
 
        /* read base address of boot chip select (0)
-        * 6328 does not have MPI but boots from a fixed address
+        * 6328/6362 do not have MPI but boot from a fixed address
         */
-       if (BCMCPU_IS_6328())
+       if (BCMCPU_IS_6328() || BCMCPU_IS_6362()) {
                val = 0x18000000;
-       else {
+       else {
                val = bcm_mpi_readl(MPI_CSBASE_REG(0));
                val &= MPI_CSBASE_BASE_MASK;
        }
index b9e948d594300281cc4199c3a89e5756630e111d..c726a97fc7987d5cb87dfd04868ce3bf12e66cec 100644 (file)
 #include <bcm63xx_io.h>
 #include <bcm63xx_regs.h>
 #include <bcm63xx_reset.h>
-#include <bcm63xx_clk.h>
+
+struct clk {
+       void            (*set)(struct clk *, int);
+       unsigned int    rate;
+       unsigned int    usage;
+       int             id;
+};
 
 static DEFINE_MUTEX(clocks_mutex);
 
@@ -119,11 +125,18 @@ static struct clk clk_ephy = {
  */
 static void enetsw_set(struct clk *clk, int enable)
 {
-       if (!BCMCPU_IS_6368())
+       if (BCMCPU_IS_6328())
+               bcm_hwclock_set(CKCTL_6328_ROBOSW_EN, enable);
+       else if (BCMCPU_IS_6362())
+               bcm_hwclock_set(CKCTL_6362_ROBOSW_EN, enable);
+       else if (BCMCPU_IS_6368())
+               bcm_hwclock_set(CKCTL_6368_ROBOSW_EN |
+                               CKCTL_6368_SWPKT_USB_EN |
+                               CKCTL_6368_SWPKT_SAR_EN,
+                               enable);
+       else
                return;
-       bcm_hwclock_set(CKCTL_6368_ROBOSW_EN |
-                       CKCTL_6368_SWPKT_USB_EN |
-                       CKCTL_6368_SWPKT_SAR_EN, enable);
+
        if (enable) {
                /* reset switch core afer clock change */
                bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 1);
@@ -160,6 +173,8 @@ static void usbh_set(struct clk *clk, int enable)
                bcm_hwclock_set(CKCTL_6328_USBH_EN, enable);
        else if (BCMCPU_IS_6348())
                bcm_hwclock_set(CKCTL_6348_USBH_EN, enable);
+       else if (BCMCPU_IS_6362())
+               bcm_hwclock_set(CKCTL_6362_USBH_EN, enable);
        else if (BCMCPU_IS_6368())
                bcm_hwclock_set(CKCTL_6368_USBH_EN, enable);
 }
@@ -175,6 +190,8 @@ static void usbd_set(struct clk *clk, int enable)
 {
        if (BCMCPU_IS_6328())
                bcm_hwclock_set(CKCTL_6328_USBD_EN, enable);
+       else if (BCMCPU_IS_6362())
+               bcm_hwclock_set(CKCTL_6362_USBD_EN, enable);
        else if (BCMCPU_IS_6368())
                bcm_hwclock_set(CKCTL_6368_USBD_EN, enable);
 }
@@ -196,6 +213,8 @@ static void spi_set(struct clk *clk, int enable)
                mask = CKCTL_6348_SPI_EN;
        else if (BCMCPU_IS_6358())
                mask = CKCTL_6358_SPI_EN;
+       else if (BCMCPU_IS_6362())
+               mask = CKCTL_6362_SPI_EN;
        else
                /* BCMCPU_IS_6368 */
                mask = CKCTL_6368_SPI_EN;
@@ -236,7 +255,10 @@ static struct clk clk_xtm = {
  */
 static void ipsec_set(struct clk *clk, int enable)
 {
-       bcm_hwclock_set(CKCTL_6368_IPSEC_EN, enable);
+       if (BCMCPU_IS_6362())
+               bcm_hwclock_set(CKCTL_6362_IPSEC_EN, enable);
+       else if (BCMCPU_IS_6368())
+               bcm_hwclock_set(CKCTL_6368_IPSEC_EN, enable);
 }
 
 static struct clk clk_ipsec = {
@@ -249,7 +271,10 @@ static struct clk clk_ipsec = {
 
 static void pcie_set(struct clk *clk, int enable)
 {
-       bcm_hwclock_set(CKCTL_6328_PCIE_EN, enable);
+       if (BCMCPU_IS_6328())
+               bcm_hwclock_set(CKCTL_6328_PCIE_EN, enable);
+       else if (BCMCPU_IS_6362())
+               bcm_hwclock_set(CKCTL_6362_PCIE_EN, enable);
 }
 
 static struct clk clk_pcie = {
@@ -315,9 +340,9 @@ struct clk *clk_get(struct device *dev, const char *id)
                return &clk_periph;
        if (BCMCPU_IS_6358() && !strcmp(id, "pcm"))
                return &clk_pcm;
-       if (BCMCPU_IS_6368() && !strcmp(id, "ipsec"))
+       if ((BCMCPU_IS_6362() || BCMCPU_IS_6368()) && !strcmp(id, "ipsec"))
                return &clk_ipsec;
-       if (BCMCPU_IS_6328() && !strcmp(id, "pcie"))
+       if ((BCMCPU_IS_6328() || BCMCPU_IS_6362()) && !strcmp(id, "pcie"))
                return &clk_pcie;
        return ERR_PTR(-ENOENT);
 }
index a7afb289b15aa03f93d6665ad55969ba97cfe6a5..79fe32df5e96c99da07d763e0990e07ee0321260 100644 (file)
@@ -25,7 +25,7 @@ const int *bcm63xx_irqs;
 EXPORT_SYMBOL(bcm63xx_irqs);
 
 static u16 bcm63xx_cpu_id;
-static u16 bcm63xx_cpu_rev;
+static u8 bcm63xx_cpu_rev;
 static unsigned int bcm63xx_cpu_freq;
 static unsigned int bcm63xx_memory_size;
 
@@ -71,6 +71,15 @@ static const int bcm6358_irqs[] = {
 
 };
 
+static const unsigned long bcm6362_regs_base[] = {
+       __GEN_CPU_REGS_TABLE(6362)
+};
+
+static const int bcm6362_irqs[] = {
+       __GEN_CPU_IRQ_TABLE(6362)
+
+};
+
 static const unsigned long bcm6368_regs_base[] = {
        __GEN_CPU_REGS_TABLE(6368)
 };
@@ -87,7 +96,7 @@ u16 __bcm63xx_get_cpu_id(void)
 
 EXPORT_SYMBOL(__bcm63xx_get_cpu_id);
 
-u16 bcm63xx_get_cpu_rev(void)
+u8 bcm63xx_get_cpu_rev(void)
 {
        return bcm63xx_cpu_rev;
 }
@@ -169,6 +178,42 @@ static unsigned int detect_cpu_clock(void)
                return (16 * 1000000 * n1 * n2) / m1;
        }
 
+       case BCM6362_CPU_ID:
+       {
+               unsigned int tmp, mips_pll_fcvo;
+
+               tmp = bcm_misc_readl(MISC_STRAPBUS_6362_REG);
+               mips_pll_fcvo = (tmp & STRAPBUS_6362_FCVO_MASK)
+                               >> STRAPBUS_6362_FCVO_SHIFT;
+               switch (mips_pll_fcvo) {
+               case 0x03:
+               case 0x0b:
+               case 0x13:
+               case 0x1b:
+                       return 240000000;
+               case 0x04:
+               case 0x0c:
+               case 0x14:
+               case 0x1c:
+                       return 160000000;
+               case 0x05:
+               case 0x0e:
+               case 0x16:
+               case 0x1e:
+               case 0x1f:
+                       return 400000000;
+               case 0x06:
+                       return 440000000;
+               case 0x07:
+               case 0x17:
+                       return 384000000;
+               case 0x15:
+               case 0x1d:
+                       return 200000000;
+               default:
+                       return 320000000;
+               }
+       }
        case BCM6368_CPU_ID:
        {
                unsigned int tmp, p1, p2, ndiv, m1;
@@ -205,7 +250,7 @@ static unsigned int detect_memory_size(void)
        unsigned int cols = 0, rows = 0, is_32bits = 0, banks = 0;
        u32 val;
 
-       if (BCMCPU_IS_6328())
+       if (BCMCPU_IS_6328() || BCMCPU_IS_6362())
                return bcm_ddr_readl(DDR_CSEND_REG) << 24;
 
        if (BCMCPU_IS_6345()) {
@@ -240,53 +285,27 @@ static unsigned int detect_memory_size(void)
 
 void __init bcm63xx_cpu_init(void)
 {
-       unsigned int tmp, expected_cpu_id;
+       unsigned int tmp;
        struct cpuinfo_mips *c = &current_cpu_data;
        unsigned int cpu = smp_processor_id();
+       u32 chipid_reg;
 
        /* soc registers location depends on cpu type */
-       expected_cpu_id = 0;
+       chipid_reg = 0;
 
        switch (c->cputype) {
        case CPU_BMIPS3300:
-               if ((read_c0_prid() & 0xff00) == PRID_IMP_BMIPS3300_ALT) {
-                       expected_cpu_id = BCM6348_CPU_ID;
-                       bcm63xx_regs_base = bcm6348_regs_base;
-                       bcm63xx_irqs = bcm6348_irqs;
-               } else {
+               if ((read_c0_prid() & 0xff00) != PRID_IMP_BMIPS3300_ALT)
                        __cpu_name[cpu] = "Broadcom BCM6338";
-                       expected_cpu_id = BCM6338_CPU_ID;
-                       bcm63xx_regs_base = bcm6338_regs_base;
-                       bcm63xx_irqs = bcm6338_irqs;
-               }
-               break;
+               /* fall-through */
        case CPU_BMIPS32:
-               expected_cpu_id = BCM6345_CPU_ID;
-               bcm63xx_regs_base = bcm6345_regs_base;
-               bcm63xx_irqs = bcm6345_irqs;
+               chipid_reg = BCM_6345_PERF_BASE;
                break;
        case CPU_BMIPS4350:
-               if ((read_c0_prid() & 0xf0) == 0x10) {
-                       expected_cpu_id = BCM6358_CPU_ID;
-                       bcm63xx_regs_base = bcm6358_regs_base;
-                       bcm63xx_irqs = bcm6358_irqs;
-               } else {
-                       /* all newer chips have the same chip id location */
-                       u16 chip_id = bcm_readw(BCM_6368_PERF_BASE);
-
-                       switch (chip_id) {
-                       case BCM6328_CPU_ID:
-                               expected_cpu_id = BCM6328_CPU_ID;
-                               bcm63xx_regs_base = bcm6328_regs_base;
-                               bcm63xx_irqs = bcm6328_irqs;
-                               break;
-                       case BCM6368_CPU_ID:
-                               expected_cpu_id = BCM6368_CPU_ID;
-                               bcm63xx_regs_base = bcm6368_regs_base;
-                               bcm63xx_irqs = bcm6368_irqs;
-                               break;
-                       }
-               }
+               if ((read_c0_prid() & 0xf0) == 0x10)
+                       chipid_reg = BCM_6345_PERF_BASE;
+               else
+                       chipid_reg = BCM_6368_PERF_BASE;
                break;
        }
 
@@ -294,20 +313,47 @@ void __init bcm63xx_cpu_init(void)
         * really early to panic, but delaying panic would not help since we
         * will never get any working console
         */
-       if (!expected_cpu_id)
+       if (!chipid_reg)
                panic("unsupported Broadcom CPU");
 
-       /*
-        * bcm63xx_regs_base is set, we can access soc registers
-        */
-
-       /* double check CPU type */
-       tmp = bcm_perf_readl(PERF_REV_REG);
+       /* read out CPU type */
+       tmp = bcm_readl(chipid_reg);
        bcm63xx_cpu_id = (tmp & REV_CHIPID_MASK) >> REV_CHIPID_SHIFT;
        bcm63xx_cpu_rev = (tmp & REV_REVID_MASK) >> REV_REVID_SHIFT;
 
-       if (bcm63xx_cpu_id != expected_cpu_id)
-               panic("bcm63xx CPU id mismatch");
+       switch (bcm63xx_cpu_id) {
+       case BCM6328_CPU_ID:
+               bcm63xx_regs_base = bcm6328_regs_base;
+               bcm63xx_irqs = bcm6328_irqs;
+               break;
+       case BCM6338_CPU_ID:
+               bcm63xx_regs_base = bcm6338_regs_base;
+               bcm63xx_irqs = bcm6338_irqs;
+               break;
+       case BCM6345_CPU_ID:
+               bcm63xx_regs_base = bcm6345_regs_base;
+               bcm63xx_irqs = bcm6345_irqs;
+               break;
+       case BCM6348_CPU_ID:
+               bcm63xx_regs_base = bcm6348_regs_base;
+               bcm63xx_irqs = bcm6348_irqs;
+               break;
+       case BCM6358_CPU_ID:
+               bcm63xx_regs_base = bcm6358_regs_base;
+               bcm63xx_irqs = bcm6358_irqs;
+               break;
+       case BCM6362_CPU_ID:
+               bcm63xx_regs_base = bcm6362_regs_base;
+               bcm63xx_irqs = bcm6362_irqs;
+               break;
+       case BCM6368_CPU_ID:
+               bcm63xx_regs_base = bcm6368_regs_base;
+               bcm63xx_irqs = bcm6368_irqs;
+               break;
+       default:
+               panic("unsupported broadcom CPU %x", bcm63xx_cpu_id);
+               break;
+       }
 
        bcm63xx_cpu_freq = detect_cpu_clock();
        bcm63xx_memory_size = detect_memory_size();
index 58371c7deac20e0e3c53695ee75e99ec5ce91e85..588d1ec622e404fd2fbbe0a5d9a6868477b2c41b 100644 (file)
@@ -77,6 +77,12 @@ static int __init bcm63xx_detect_flash_type(void)
                        return BCM63XX_FLASH_TYPE_PARALLEL;
                else
                        return BCM63XX_FLASH_TYPE_SERIAL;
+       case BCM6362_CPU_ID:
+               val = bcm_misc_readl(MISC_STRAPBUS_6362_REG);
+               if (val & STRAPBUS_6362_BOOT_SEL_SERIAL)
+                       return BCM63XX_FLASH_TYPE_SERIAL;
+               else
+                       return BCM63XX_FLASH_TYPE_NAND;
        case BCM6368_CPU_ID:
                val = bcm_gpio_readl(GPIO_STRAPBUS_REG);
                switch (val & STRAPBUS_6368_BOOT_SEL_MASK) {
index e97fd60e92ef289e215908b7327889435f3f702f..3065bb61820d5befea57f31580c91c15afcc666b 100644 (file)
 /*
  * register offsets
  */
-static const unsigned long bcm6338_regs_spi[] = {
-       __GEN_SPI_REGS_TABLE(6338)
-};
-
 static const unsigned long bcm6348_regs_spi[] = {
        __GEN_SPI_REGS_TABLE(6348)
 };
@@ -34,23 +30,15 @@ static const unsigned long bcm6358_regs_spi[] = {
        __GEN_SPI_REGS_TABLE(6358)
 };
 
-static const unsigned long bcm6368_regs_spi[] = {
-       __GEN_SPI_REGS_TABLE(6368)
-};
-
 const unsigned long *bcm63xx_regs_spi;
 EXPORT_SYMBOL(bcm63xx_regs_spi);
 
 static __init void bcm63xx_spi_regs_init(void)
 {
-       if (BCMCPU_IS_6338())
-               bcm63xx_regs_spi = bcm6338_regs_spi;
-       if (BCMCPU_IS_6348())
+       if (BCMCPU_IS_6338() || BCMCPU_IS_6348())
                bcm63xx_regs_spi = bcm6348_regs_spi;
-       if (BCMCPU_IS_6358())
+       if (BCMCPU_IS_6358() || BCMCPU_IS_6362() || BCMCPU_IS_6368())
                bcm63xx_regs_spi = bcm6358_regs_spi;
-       if (BCMCPU_IS_6368())
-               bcm63xx_regs_spi = bcm6368_regs_spi;
 }
 #else
 static __init void bcm63xx_spi_regs_init(void) { }
@@ -93,13 +81,13 @@ int __init bcm63xx_spi_register(void)
        spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI);
 
        if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) {
-               spi_resources[0].end += BCM_6338_RSET_SPI_SIZE - 1;
-               spi_pdata.fifo_size = SPI_6338_MSG_DATA_SIZE;
-               spi_pdata.msg_type_shift = SPI_6338_MSG_TYPE_SHIFT;
-               spi_pdata.msg_ctl_width = SPI_6338_MSG_CTL_WIDTH;
+               spi_resources[0].end += BCM_6348_RSET_SPI_SIZE - 1;
+               spi_pdata.fifo_size = SPI_6348_MSG_DATA_SIZE;
+               spi_pdata.msg_type_shift = SPI_6348_MSG_TYPE_SHIFT;
+               spi_pdata.msg_ctl_width = SPI_6348_MSG_CTL_WIDTH;
        }
 
-       if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) {
+       if (BCMCPU_IS_6358() || BCMCPU_IS_6362() || BCMCPU_IS_6368()) {
                spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1;
                spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE;
                spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT;
index da24c2bd9b7cde3cfbfa10caaa6f1ae70f6a95a5..c0ab3887f42ef8b355eed9971f04188be0b966d8 100644 (file)
@@ -82,6 +82,17 @@ static void __internal_irq_unmask_64(unsigned int irq) __maybe_unused;
 #define ext_irq_cfg_reg1       PERF_EXTIRQ_CFG_REG_6358
 #define ext_irq_cfg_reg2       0
 #endif
+#ifdef CONFIG_BCM63XX_CPU_6362
+#define irq_stat_reg           PERF_IRQSTAT_6362_REG
+#define irq_mask_reg           PERF_IRQMASK_6362_REG
+#define irq_bits               64
+#define is_ext_irq_cascaded    1
+#define ext_irq_start          (BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE)
+#define ext_irq_end            (BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE)
+#define ext_irq_count          4
+#define ext_irq_cfg_reg1       PERF_EXTIRQ_CFG_REG_6362
+#define ext_irq_cfg_reg2       0
+#endif
 #ifdef CONFIG_BCM63XX_CPU_6368
 #define irq_stat_reg           PERF_IRQSTAT_6368_REG
 #define irq_mask_reg           PERF_IRQMASK_6368_REG
@@ -170,6 +181,16 @@ static void bcm63xx_init_irq(void)
                ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
                ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
                break;
+       case BCM6362_CPU_ID:
+               irq_stat_addr += PERF_IRQSTAT_6362_REG;
+               irq_mask_addr += PERF_IRQMASK_6362_REG;
+               irq_bits = 64;
+               ext_irq_count = 4;
+               is_ext_irq_cascaded = 1;
+               ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
+               ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
+               ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
+               break;
        case BCM6368_CPU_ID:
                irq_stat_addr += PERF_IRQSTAT_6368_REG;
                irq_mask_addr += PERF_IRQMASK_6368_REG;
@@ -458,6 +479,7 @@ static int bcm63xx_external_irq_set_type(struct irq_data *d,
        case BCM6338_CPU_ID:
        case BCM6345_CPU_ID:
        case BCM6358_CPU_ID:
+       case BCM6362_CPU_ID:
        case BCM6368_CPU_ID:
                if (levelsense)
                        reg |= EXTIRQ_CFG_LEVELSENSE(irq);
index 10eaff4580710d4eae4879691502c55ebef3f1d6..fd698087fbfd9367b777e1391aaabb53789b2936 100644 (file)
@@ -36,6 +36,8 @@ void __init prom_init(void)
                mask = CKCTL_6348_ALL_SAFE_EN;
        else if (BCMCPU_IS_6358())
                mask = CKCTL_6358_ALL_SAFE_EN;
+       else if (BCMCPU_IS_6362())
+               mask = CKCTL_6362_ALL_SAFE_EN;
        else if (BCMCPU_IS_6368())
                mask = CKCTL_6368_ALL_SAFE_EN;
        else
index 68a31bb90cbf6de79acda74449b0bbfa749a9f21..317931c6cf58f9305b001e25d735fcf189f18dff 100644 (file)
 #define BCM6358_RESET_PCIE     0
 #define BCM6358_RESET_PCIE_EXT 0
 
+#define BCM6362_RESET_SPI      SOFTRESET_6362_SPI_MASK
+#define BCM6362_RESET_ENET     0
+#define BCM6362_RESET_USBH     SOFTRESET_6362_USBH_MASK
+#define BCM6362_RESET_USBD     SOFTRESET_6362_USBS_MASK
+#define BCM6362_RESET_DSL      0
+#define BCM6362_RESET_SAR      SOFTRESET_6362_SAR_MASK
+#define BCM6362_RESET_EPHY     SOFTRESET_6362_EPHY_MASK
+#define BCM6362_RESET_ENETSW   SOFTRESET_6362_ENETSW_MASK
+#define BCM6362_RESET_PCM      SOFTRESET_6362_PCM_MASK
+#define BCM6362_RESET_MPI      0
+#define BCM6362_RESET_PCIE      (SOFTRESET_6362_PCIE_MASK | \
+                                SOFTRESET_6362_PCIE_CORE_MASK)
+#define BCM6362_RESET_PCIE_EXT SOFTRESET_6362_PCIE_EXT_MASK
+
 #define BCM6368_RESET_SPI      SOFTRESET_6368_SPI_MASK
 #define BCM6368_RESET_ENET     0
 #define BCM6368_RESET_USBH     SOFTRESET_6368_USBH_MASK
@@ -119,6 +133,10 @@ static const u32 bcm6358_reset_bits[] = {
        __GEN_RESET_BITS_TABLE(6358)
 };
 
+static const u32 bcm6362_reset_bits[] = {
+       __GEN_RESET_BITS_TABLE(6362)
+};
+
 static const u32 bcm6368_reset_bits[] = {
        __GEN_RESET_BITS_TABLE(6368)
 };
@@ -140,6 +158,9 @@ static int __init bcm63xx_reset_bits_init(void)
        } else if (BCMCPU_IS_6358()) {
                reset_reg = PERF_SOFTRESET_6358_REG;
                bcm63xx_reset_bits = bcm6358_reset_bits;
+       } else if (BCMCPU_IS_6362()) {
+               reset_reg = PERF_SOFTRESET_6362_REG;
+               bcm63xx_reset_bits = bcm6362_reset_bits;
        } else if (BCMCPU_IS_6368()) {
                reset_reg = PERF_SOFTRESET_6368_REG;
                bcm63xx_reset_bits = bcm6368_reset_bits;
@@ -182,6 +203,13 @@ static const u32 bcm63xx_reset_bits[] = {
 #define reset_reg PERF_SOFTRESET_6358_REG
 #endif
 
+#ifdef CONFIG_BCM63XX_CPU_6362
+static const u32 bcm63xx_reset_bits[] = {
+       __GEN_RESET_BITS_TABLE(6362)
+};
+#define reset_reg PERF_SOFTRESET_6362_REG
+#endif
+
 #ifdef CONFIG_BCM63XX_CPU_6368
 static const u32 bcm63xx_reset_bits[] = {
        __GEN_RESET_BITS_TABLE(6368)
index 35e18e98beb96b04151999aac8bf037129efc324..24a24445db64edd7d1388c35f3bd471e049489a9 100644 (file)
@@ -83,6 +83,9 @@ void bcm63xx_machine_reboot(void)
        case BCM6358_CPU_ID:
                perf_regs[0] = PERF_EXTIRQ_CFG_REG_6358;
                break;
+       case BCM6362_CPU_ID:
+               perf_regs[0] = PERF_EXTIRQ_CFG_REG_6362;
+               break;
        }
 
        for (i = 0; i < 2; i++) {
@@ -126,7 +129,7 @@ static void __bcm63xx_machine_reboot(char *p)
 const char *get_system_type(void)
 {
        static char buf[128];
-       snprintf(buf, sizeof(buf), "bcm63xx/%s (0x%04x/0x%04X)",
+       snprintf(buf, sizeof(buf), "bcm63xx/%s (0x%04x/0x%02X)",
                 board_get_name(),
                 bcm63xx_get_cpu_id(), bcm63xx_get_cpu_rev());
        return buf;
index 156aa6143e1117646ab7b81ccf57ca623a258513..a22f06a6f7cac31e3506619371615b8f4e1c8d4b 100644 (file)
@@ -1032,9 +1032,8 @@ static int octeon_irq_gpio_map_common(struct irq_domain *d,
        if (!octeon_irq_virq_in_range(virq))
                return -EINVAL;
 
-       hw += gpiod->base_hwirq;
-       line = hw >> 6;
-       bit = hw & 63;
+       line = (hw + gpiod->base_hwirq) >> 6;
+       bit = (hw + gpiod->base_hwirq) & 63;
        if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0)
                return -EINVAL;
 
index cd732e5b4fd5f856df3f46a0a2ee83d0d7491f66..ce1d3eeeb7373373fa0f07056ff3ca39e0f0c496 100644 (file)
@@ -2,30 +2,21 @@ CONFIG_MIPS_MALTA=y
 CONFIG_CPU_LITTLE_ENDIAN=y
 CONFIG_CPU_MIPS32_R2=y
 CONFIG_MIPS_MT_SMP=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
 CONFIG_HZ_100=y
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
 CONFIG_LOG_BUF_SHIFT=15
-CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_RELAY=y
 CONFIG_NAMESPACES=y
-CONFIG_UTS_NS=y
-CONFIG_IPC_NS=y
-CONFIG_PID_NS=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_RELAY=y
 CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
 CONFIG_PCI=y
-CONFIG_PM=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
@@ -41,8 +32,6 @@ CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
-CONFIG_NET_IPGRE_BROADCAST=y
 CONFIG_IP_MROUTE=y
 CONFIG_IP_PIMSM_V1=y
 CONFIG_IP_PIMSM_V2=y
@@ -65,7 +54,6 @@ CONFIG_IPV6_MROUTE=y
 CONFIG_IPV6_PIMSM_V2=y
 CONFIG_NETWORK_SECMARK=y
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
@@ -136,23 +124,15 @@ CONFIG_IP_VS_DH=m
 CONFIG_IP_VS_SH=m
 CONFIG_IP_VS_SED=m
 CONFIG_IP_VS_NQ=m
-CONFIG_IP_VS_FTP=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -162,8 +142,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
-CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
 CONFIG_IP6_NF_MATCH_FRAG=m
@@ -173,7 +151,6 @@ CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
@@ -247,12 +224,10 @@ CONFIG_MAC80211=m
 CONFIG_MAC80211_RC_PID=y
 CONFIG_MAC80211_RC_DEFAULT_PID=y
 CONFIG_MAC80211_MESH=y
-CONFIG_MAC80211_LEDS=y
 CONFIG_RFKILL=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_CONNECTOR=m
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_OOPS=m
@@ -271,7 +246,6 @@ CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
-# CONFIG_MISC_DEVICES is not set
 CONFIG_IDE=y
 CONFIG_BLK_DEV_IDECD=y
 CONFIG_IDE_GENERIC=y
@@ -317,13 +291,19 @@ CONFIG_DM_MIRROR=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_NETDEVICES=y
-CONFIG_IFB=m
-CONFIG_DUMMY=m
 CONFIG_BONDING=m
-CONFIG_MACVLAN=m
+CONFIG_DUMMY=m
 CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
 CONFIG_TUN=m
 CONFIG_VETH=m
+# CONFIG_NET_VENDOR_3COM is not set
+CONFIG_PCNET32=y
+CONFIG_CHELSIO_T3=m
+CONFIG_AX88796=m
+CONFIG_NETXEN_NIC=m
+CONFIG_TC35815=m
 CONFIG_MARVELL_PHY=m
 CONFIG_DAVICOM_PHY=m
 CONFIG_QSEMI_PHY=m
@@ -334,14 +314,6 @@ CONFIG_SMSC_PHY=m
 CONFIG_BROADCOM_PHY=m
 CONFIG_ICPLUS_PHY=m
 CONFIG_REALTEK_PHY=m
-CONFIG_MDIO_BITBANG=m
-CONFIG_NET_ETHERNET=y
-CONFIG_AX88796=m
-CONFIG_NET_PCI=y
-CONFIG_PCNET32=y
-CONFIG_TC35815=m
-CONFIG_CHELSIO_T3=m
-CONFIG_NETXEN_NIC=m
 CONFIG_ATMEL=m
 CONFIG_PCI_ATMEL=m
 CONFIG_PRISM54=m
@@ -352,15 +324,7 @@ CONFIG_HOSTAP_PLX=m
 CONFIG_HOSTAP_PCI=m
 CONFIG_IPW2100=m
 CONFIG_IPW2100_MONITOR=y
-CONFIG_IPW2200=m
-CONFIG_IPW2200_MONITOR=y
-CONFIG_IPW2200_PROMISCUOUS=y
-CONFIG_IPW2200_QOS=y
 CONFIG_LIBERTAS=m
-CONFIG_HERMES=m
-CONFIG_PLX_HERMES=m
-CONFIG_TMD_HERMES=m
-CONFIG_NORTEL_HERMES=m
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO_I8042 is not set
@@ -373,12 +337,6 @@ CONFIG_FB_CIRRUS=y
 # CONFIG_VGA_CONSOLE is not set
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_HID=m
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_TRIGGER_TIMER=m
-CONFIG_LEDS_TRIGGER_IDE_DISK=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=m
-CONFIG_LEDS_TRIGGER_BACKLIGHT=m
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_CMOS=y
 CONFIG_UIO=m
@@ -398,7 +356,6 @@ CONFIG_XFS_QUOTA=y
 CONFIG_XFS_POSIX_ACL=y
 CONFIG_QUOTA=y
 CONFIG_QFMT_V2=y
-CONFIG_AUTOFS_FS=y
 CONFIG_FUSE_FS=m
 CONFIG_ISO9660_FS=m
 CONFIG_JOLIET=y
@@ -425,7 +382,6 @@ CONFIG_ROMFS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=y
 CONFIG_NFSD_V3=y
@@ -466,7 +422,6 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_LRW=m
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
new file mode 100644 (file)
index 0000000..341bb47
--- /dev/null
@@ -0,0 +1,456 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_MIPS_MT_SMP=y
+CONFIG_HZ_100=y
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_NAMESPACES=y
+CONFIG_RELAY=y
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PCI=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_ULOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_PHONET=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_CLS_IND=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+CONFIG_MAC80211_MESH=y
+CONFIG_RFKILL=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_CONNECTOR=m
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=m
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_GLUEBI=m
+CONFIG_BLK_DEV_FD=m
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_IDE_GENERIC=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_BLK_DEV_IT8213=m
+CONFIG_BLK_DEV_TC86C001=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=m
+CONFIG_SCSI_TGT=m
+CONFIG_BLK_DEV_SD=m
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_BLK_DEV_3W_XXXX_RAID=m
+CONFIG_SCSI_3W_9XXX=m
+CONFIG_SCSI_ACARD=m
+CONFIG_SCSI_AACRAID=m
+CONFIG_SCSI_AIC7XXX=m
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_PCNET32=y
+CONFIG_CHELSIO_T3=m
+CONFIG_AX88796=m
+CONFIG_NETXEN_NIC=m
+CONFIG_TC35815=m
+CONFIG_MARVELL_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+CONFIG_PRISM54=m
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOSTAP_PCI=m
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
+CONFIG_LIBERTAS=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO_I8042 is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_CIRRUS=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_HID=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_UIO=m
+CONFIG_UIO_CIF=m
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=m
+CONFIG_VXFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_ENABLE_DEFAULT_TRACERS=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC16=m
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=m
+CONFIG_KVM_MIPS_DYN_TRANS=y
+CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS=y
+CONFIG_VHOST_NET=m
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig
new file mode 100644 (file)
index 0000000..2b8558b
--- /dev/null
@@ -0,0 +1,453 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_KVM_GUEST=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_HZ_100=y
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_NAMESPACES=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PCI=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_ULOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_PHONET=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_CLS_IND=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+CONFIG_MAC80211_MESH=y
+CONFIG_RFKILL=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_CONNECTOR=m
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=m
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_GLUEBI=m
+CONFIG_BLK_DEV_FD=m
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_IDE_GENERIC=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_BLK_DEV_IT8213=m
+CONFIG_BLK_DEV_TC86C001=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=m
+CONFIG_SCSI_TGT=m
+CONFIG_BLK_DEV_SD=m
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_BLK_DEV_3W_XXXX_RAID=m
+CONFIG_SCSI_3W_9XXX=m
+CONFIG_SCSI_ACARD=m
+CONFIG_SCSI_AACRAID=m
+CONFIG_SCSI_AIC7XXX=m
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=y
+CONFIG_PCNET32=y
+CONFIG_CHELSIO_T3=m
+CONFIG_AX88796=m
+CONFIG_NETXEN_NIC=m
+CONFIG_TC35815=m
+CONFIG_MARVELL_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+CONFIG_PRISM54=m
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOSTAP_PCI=m
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
+CONFIG_LIBERTAS=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO_I8042 is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_CIRRUS=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_HID=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_UIO=m
+CONFIG_UIO_CIF=m
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=m
+CONFIG_VXFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC16=m
diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig
new file mode 100644 (file)
index 0000000..93057a7
--- /dev/null
@@ -0,0 +1,195 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_MIPS_VPE_LOADER=y
+CONFIG_MIPS_VPE_APSP_API=y
+CONFIG_HZ_100=y
+CONFIG_LOCALVERSION="aprp"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/maltasmtc_defconfig b/arch/mips/configs/maltasmtc_defconfig
new file mode 100644 (file)
index 0000000..4e54b75
--- /dev/null
@@ -0,0 +1,196 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_MIPS_MT_SMTC=y
+# CONFIG_MIPS_MT_FPAFF is not set
+CONFIG_NR_CPUS=9
+CONFIG_HZ_48=y
+CONFIG_LOCALVERSION="smtc"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig
new file mode 100644 (file)
index 0000000..8a66602
--- /dev/null
@@ -0,0 +1,199 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_MIPS_MT_SMP=y
+CONFIG_SCHED_SMT=y
+CONFIG_MIPS_CMP=y
+CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
+CONFIG_LOCALVERSION="cmp"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=4
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig
new file mode 100644 (file)
index 0000000..9868fc9
--- /dev/null
@@ -0,0 +1,194 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_HZ_100=y
+CONFIG_LOCALVERSION="up"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
index e3eec68d91326fe9784304b32cb988b5f7e62059..0abe681c11a07abdf4945aa46b8beeeac310fa65 100644 (file)
@@ -2,7 +2,6 @@ CONFIG_MIPS_SEAD3=y
 CONFIG_CPU_LITTLE_ENDIAN=y
 CONFIG_CPU_MIPS32_R2=y
 CONFIG_HZ_100=y
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_NO_HZ=y
@@ -115,10 +114,8 @@ CONFIG_NLS_ISO8859_1=y
 CONFIG_NLS_ISO8859_15=y
 CONFIG_NLS_UTF8=y
 # CONFIG_FTRACE is not set
-CONFIG_CRYPTO=y
 CONFIG_CRYPTO_CBC=y
 CONFIG_CRYPTO_ECB=y
-CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_ARC4=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 # CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/sead3micro_defconfig b/arch/mips/configs/sead3micro_defconfig
new file mode 100644 (file)
index 0000000..2a0da5b
--- /dev/null
@@ -0,0 +1,122 @@
+CONFIG_MIPS_SEAD3=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_CPU_MICROMIPS=y
+CONFIG_HZ_100=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_GLUEBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_SCSI=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+CONFIG_SMSC911X=y
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_MARVELL_PHY=y
+CONFIG_DAVICOM_PHY=y
+CONFIG_QSEMI_PHY=y
+CONFIG_LXT_PHY=y
+CONFIG_CICADA_PHY=y
+CONFIG_VITESSE_PHY=y
+CONFIG_SMSC_PHY=y
+CONFIG_BROADCOM_PHY=y
+CONFIG_ICPLUS_PHY=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_CONSOLE_TRANSLATIONS is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_LEGACY_PTY_COUNT=32
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+# CONFIG_I2C_COMPAT is not set
+CONFIG_I2C_CHARDEV=y
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_SPI=y
+CONFIG_SENSORS_ADT7475=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_DEBUG=y
+CONFIG_MMC_SPI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_M41T80=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_UTF8=y
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_ARC4=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
index 84befc968fc4a3aee396b6220b00525cac141d0a..5291505167774d3da7000092e26512c7e29e454d 100644 (file)
@@ -2,4 +2,6 @@
 # Makefile for generic prom monitor library routines under Linux.
 #
 
+lib-y                  += cmdline.o
+
 lib-$(CONFIG_64BIT)    += call_o32.o
diff --git a/arch/mips/fw/lib/cmdline.c b/arch/mips/fw/lib/cmdline.c
new file mode 100644 (file)
index 0000000..ffd0345
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#include <asm/addrspace.h>
+#include <asm/fw/fw.h>
+
+int fw_argc;
+int *_fw_argv;
+int *_fw_envp;
+
+void __init fw_init_cmdline(void)
+{
+       int i;
+
+       /* Validate command line parameters. */
+       if ((fw_arg0 >= CKSEG0) || (fw_arg1 < CKSEG0)) {
+               fw_argc = 0;
+               _fw_argv = NULL;
+       } else {
+               fw_argc = (fw_arg0 & 0x0000ffff);
+               _fw_argv = (int *)fw_arg1;
+       }
+
+       /* Validate environment pointer. */
+       if (fw_arg2 < CKSEG0)
+               _fw_envp = NULL;
+       else
+               _fw_envp = (int *)fw_arg2;
+
+       for (i = 1; i < fw_argc; i++) {
+               strlcat(arcs_cmdline, fw_argv(i), COMMAND_LINE_SIZE);
+               if (i < (fw_argc - 1))
+                       strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
+       }
+}
+
+char * __init fw_getcmdline(void)
+{
+       return &(arcs_cmdline[0]);
+}
+
+char *fw_getenv(char *envname)
+{
+       char *result = NULL;
+
+       if (_fw_envp != NULL) {
+               /*
+                * Return a pointer to the given environment variable.
+                * YAMON uses "name", "value" pairs, while U-Boot uses
+                * "name=value".
+                */
+               int i, yamon, index = 0;
+
+               yamon = (strchr(fw_envp(index), '=') == NULL);
+               i = strlen(envname);
+
+               while (fw_envp(index)) {
+                       if (strncmp(envname, fw_envp(index), i) == 0) {
+                               if (yamon) {
+                                       result = fw_envp(index + 1);
+                                       break;
+                               } else if (fw_envp(index)[i] == '=') {
+                                       result = (fw_envp(index + 1) + i);
+                                       break;
+                               }
+                       }
+
+                       /* Increment array index. */
+                       if (yamon)
+                               index += 2;
+                       else
+                               index += 1;
+               }
+       }
+
+       return result;
+}
+
+unsigned long fw_getenvl(char *envname)
+{
+       unsigned long envl = 0UL;
+       char *str;
+       long val;
+       int tmp;
+
+       str = fw_getenv(envname);
+       if (str) {
+               tmp = kstrtol(str, 0, &val);
+               envl = (unsigned long)val;
+       }
+
+       return envl;
+}
index 164a21e65b421de9b7265a1bd403a3895e8092c7..879691d194af426f5532d47c4e467491bf785038 100644 (file)
@@ -296,6 +296,7 @@ symbol              =       value
 #define LONG_SUBU      subu
 #define LONG_L         lw
 #define LONG_S         sw
+#define LONG_SP                swp
 #define LONG_SLL       sll
 #define LONG_SLLV      sllv
 #define LONG_SRL       srl
@@ -318,6 +319,7 @@ symbol              =       value
 #define LONG_SUBU      dsubu
 #define LONG_L         ld
 #define LONG_S         sd
+#define LONG_SP                sdp
 #define LONG_SLL       dsll
 #define LONG_SLLV      dsllv
 #define LONG_SRL       dsrl
index b71dd5b160854a0e8f7fb54e4845af0ba535d748..4d2cdea5aa37f46e05e1e06e7f8d378bf9331c13 100644 (file)
@@ -104,6 +104,7 @@ struct boot_mem_map {
 extern struct boot_mem_map boot_mem_map;
 
 extern void add_memory_region(phys_t start, phys_t size, long type);
+extern void detect_memory_region(phys_t start, phys_t sz_min,  phys_t sz_max);
 
 extern void prom_init(void);
 extern void prom_free_prom_memory(void);
index 888766ae1f8598f1ff98373c6dbc7a6658307fc8..e28a3e0eb3cb6407b4ad6d6ac649e20a876ab69d 100644 (file)
 #include <asm/ptrace.h>
 #include <asm/inst.h>
 
+extern int __isa_exception_epc(struct pt_regs *regs);
+extern int __compute_return_epc(struct pt_regs *regs);
+extern int __compute_return_epc_for_insn(struct pt_regs *regs,
+                                        union mips_instruction insn);
+extern int __microMIPS_compute_return_epc(struct pt_regs *regs);
+extern int __MIPS16e_compute_return_epc(struct pt_regs *regs);
+
+
 static inline int delay_slot(struct pt_regs *regs)
 {
        return regs->cp0_cause & CAUSEF_BD;
@@ -18,20 +26,27 @@ static inline int delay_slot(struct pt_regs *regs)
 
 static inline unsigned long exception_epc(struct pt_regs *regs)
 {
-       if (!delay_slot(regs))
+       if (likely(!delay_slot(regs)))
                return regs->cp0_epc;
 
+       if (get_isa16_mode(regs->cp0_epc))
+               return __isa_exception_epc(regs);
+
        return regs->cp0_epc + 4;
 }
 
 #define BRANCH_LIKELY_TAKEN 0x0001
 
-extern int __compute_return_epc(struct pt_regs *regs);
-extern int __compute_return_epc_for_insn(struct pt_regs *regs,
-                                        union mips_instruction insn);
-
 static inline int compute_return_epc(struct pt_regs *regs)
 {
+       if (get_isa16_mode(regs->cp0_epc)) {
+               if (cpu_has_mmips)
+                       return __microMIPS_compute_return_epc(regs);
+               if (cpu_has_mips16)
+                       return __MIPS16e_compute_return_epc(regs);
+               return regs->cp0_epc;
+       }
+
        if (!delay_slot(regs)) {
                regs->cp0_epc += 4;
                return 0;
@@ -40,4 +55,19 @@ static inline int compute_return_epc(struct pt_regs *regs)
        return __compute_return_epc(regs);
 }
 
+static inline int MIPS16e_compute_return_epc(struct pt_regs *regs,
+                                            union mips16e_instruction *inst)
+{
+       if (likely(!delay_slot(regs))) {
+               if (inst->ri.opcode == MIPS16e_extend_op) {
+                       regs->cp0_epc += 4;
+                       return 0;
+               }
+               regs->cp0_epc += 2;
+               return 0;
+       }
+
+       return __MIPS16e_compute_return_epc(regs);
+}
+
 #endif /* _ASM_BRANCH_H */
index 1a57e8b4d0924b488d0e72b6c79dcb6516ffe53f..e5ec8fcd8afaf9d822167605a8f4aaed22e9486f 100644 (file)
 #ifndef cpu_has_pindexed_dcache
 #define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX)
 #endif
+#ifndef cpu_has_local_ebase
+#define cpu_has_local_ebase    1
+#endif
 
 /*
  * I-Cache snoops remote store.         This only matters on SMP.  Some multiprocessors
diff --git a/arch/mips/include/asm/dma-coherence.h b/arch/mips/include/asm/dma-coherence.h
new file mode 100644 (file)
index 0000000..242cbb3
--- /dev/null
@@ -0,0 +1,15 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006  Ralf Baechle <ralf@linux-mips.org>
+ *
+ */
+#ifndef __ASM_DMA_COHERENCE_H
+#define __ASM_DMA_COHERENCE_H
+
+extern int coherentio;
+extern int hw_coherentio;
+
+#endif
index f8fc74b6cb47430b540d072833d62872192349d8..84238c574d5e6bff1db1db79d58a5ddbd4436422 100644 (file)
@@ -2,6 +2,7 @@
 #define _ASM_DMA_MAPPING_H
 
 #include <asm/scatterlist.h>
+#include <asm/dma-coherence.h>
 #include <asm/cache.h>
 #include <asm-generic/dma-coherent.h>
 
index 3b4092705567b7b4e65dd2680febc6a5a8e540cc..2abb587d5ab40fd41aa950d1987d5f854f60e531 100644 (file)
@@ -54,6 +54,12 @@ do {                                                                 \
 extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
        unsigned long cpc);
 extern int do_dsemulret(struct pt_regs *xcp);
+extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
+                                   struct mips_fpu_struct *ctx, int has_fpu,
+                                   void *__user *fault_addr);
+int process_fpemu_return(int sig, void __user *fault_addr);
+int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+                    unsigned long *contpc);
 
 /*
  * Instruction inserted following the badinst to further tag the sequence
diff --git a/arch/mips/include/asm/fw/fw.h b/arch/mips/include/asm/fw/fw.h
new file mode 100644 (file)
index 0000000..d6c50a7
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc.
+ */
+#ifndef __ASM_FW_H_
+#define __ASM_FW_H_
+
+#include <asm/bootinfo.h>      /* For cleaner code... */
+
+enum fw_memtypes {
+       fw_dontuse,
+       fw_code,
+       fw_free,
+};
+
+typedef struct {
+       unsigned long base;     /* Within KSEG0 */
+       unsigned int size;      /* bytes */
+       enum fw_memtypes type;  /* fw_memtypes */
+} fw_memblock_t;
+
+/* Maximum number of memory block descriptors. */
+#define FW_MAX_MEMBLOCKS       32
+
+extern int fw_argc;
+extern int *_fw_argv;
+extern int *_fw_envp;
+
+/*
+ * Most firmware like YAMON, PMON, etc. pass arguments and environment
+ * variables as 32-bit pointers. These take care of sign extension.
+ */
+#define fw_argv(index)         ((char *)(long)_fw_argv[(index)])
+#define fw_envp(index)         ((char *)(long)_fw_envp[(index)])
+
+extern void fw_init_cmdline(void);
+extern char *fw_getcmdline(void);
+extern fw_memblock_t *fw_getmdesc(void);
+extern void fw_meminit(void);
+extern char *fw_getenv(char *name);
+extern unsigned long fw_getenvl(char *name);
+extern void fw_init_early_console(char port);
+
+#endif /* __ASM_FW_H_ */
index bdc9786ab5a7bcbf278b3fe1f77a73ae49dd2767..7153b32de18e6692e2792be19a34b886d1121ba8 100644 (file)
 #define GIC_VPE_WD_COUNT0_OFS          0x0094
 #define GIC_VPE_WD_INITIAL0_OFS                0x0098
 #define GIC_VPE_COMPARE_LO_OFS         0x00a0
-#define GIC_VPE_COMPARE_HI             0x00a4
+#define GIC_VPE_COMPARE_HI_OFS         0x00a4
 
 #define GIC_VPE_EIC_SHADOW_SET_BASE    0x0100
 #define GIC_VPE_EIC_SS(intr) \
@@ -359,7 +359,11 @@ struct gic_shared_intr_map {
 /* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
 #define GIC_PIN_TO_VEC_OFFSET  (1)
 
-extern int gic_present;
+#include <linux/clocksource.h>
+#include <linux/irq.h>
+
+extern unsigned int gic_present;
+extern unsigned int gic_frequency;
 extern unsigned long _gic_base;
 extern unsigned int gic_irq_base;
 extern unsigned int gic_irq_flags[];
@@ -368,18 +372,20 @@ extern struct gic_shared_intr_map gic_shared_intr_map[];
 extern void gic_init(unsigned long gic_base_addr,
        unsigned long gic_addrspace_size, struct gic_intr_map *intrmap,
        unsigned int intrmap_size, unsigned int irqbase);
-
 extern void gic_clocksource_init(unsigned int);
-extern unsigned int gic_get_int(void);
+extern unsigned int gic_compare_int (void);
+extern cycle_t gic_read_count(void);
+extern cycle_t gic_read_compare(void);
+extern void gic_write_compare(cycle_t cnt);
 extern void gic_send_ipi(unsigned int intr);
 extern unsigned int plat_ipi_call_int_xlate(unsigned int);
 extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
 extern void gic_bind_eic_interrupt(int irq, int set);
 extern unsigned int gic_get_timer_pending(void);
+extern unsigned int gic_get_int(void);
 extern void gic_enable_interrupt(int irq_vec);
 extern void gic_disable_interrupt(int irq_vec);
 extern void gic_irq_ack(struct irq_data *d);
 extern void gic_finish_irq(struct irq_data *d);
 extern void gic_platform_init(int irqs, struct irq_chip *irq_controller);
-
 #endif /* _ASM_GICREGS_H */
index 44d6a5bde4a1cb9e8db930a11d1b07b23f383507..e3ee92d4dbe750c7aa05a5488f7443cdd64fb387 100644 (file)
 #ifndef _ASM_HAZARDS_H
 #define _ASM_HAZARDS_H
 
-#ifdef __ASSEMBLY__
-#define ASMMACRO(name, code...) .macro name; code; .endm
-#else
-
-#include <asm/cpu-features.h>
-
-#define ASMMACRO(name, code...)                                                \
-__asm__(".macro " #name "; " #code "; .endm");                         \
-                                                                       \
-static inline void name(void)                                          \
-{                                                                      \
-       __asm__ __volatile__ (#name);                                   \
-}
-
-/*
- * MIPS R2 instruction hazard barrier.  Needs to be called as a subroutine.
- */
-extern void mips_ihb(void);
-
-#endif
+#include <linux/stringify.h>
 
-ASMMACRO(_ssnop,
-        sll    $0, $0, 1
-       )
+#define ___ssnop                                                       \
+       sll     $0, $0, 1
 
-ASMMACRO(_ehb,
-        sll    $0, $0, 3
-       )
+#define ___ehb                                                         \
+       sll     $0, $0, 3
 
 /*
  * TLB hazards
@@ -48,24 +27,24 @@ ASMMACRO(_ehb,
  * MIPSR2 defines ehb for hazard avoidance
  */
 
-ASMMACRO(mtc0_tlbw_hazard,
-        _ehb
-       )
-ASMMACRO(tlbw_use_hazard,
-        _ehb
-       )
-ASMMACRO(tlb_probe_hazard,
-        _ehb
-       )
-ASMMACRO(irq_enable_hazard,
-        _ehb
-       )
-ASMMACRO(irq_disable_hazard,
-       _ehb
-       )
-ASMMACRO(back_to_back_c0_hazard,
-        _ehb
-       )
+#define __mtc0_tlbw_hazard                                             \
+       ___ehb
+
+#define __tlbw_use_hazard                                              \
+       ___ehb
+
+#define __tlb_probe_hazard                                             \
+       ___ehb
+
+#define __irq_enable_hazard                                            \
+       ___ehb
+
+#define __irq_disable_hazard                                           \
+       ___ehb
+
+#define __back_to_back_c0_hazard                                       \
+       ___ehb
+
 /*
  * gcc has a tradition of misscompiling the previous construct using the
  * address of a label as argument to inline assembler. Gas otoh has the
@@ -94,24 +73,42 @@ do {                                                                        \
  * These are slightly complicated by the fact that we guarantee R1 kernels to
  * run fine on R2 processors.
  */
-ASMMACRO(mtc0_tlbw_hazard,
-       _ssnop; _ssnop; _ehb
-       )
-ASMMACRO(tlbw_use_hazard,
-       _ssnop; _ssnop; _ssnop; _ehb
-       )
-ASMMACRO(tlb_probe_hazard,
-        _ssnop; _ssnop; _ssnop; _ehb
-       )
-ASMMACRO(irq_enable_hazard,
-        _ssnop; _ssnop; _ssnop; _ehb
-       )
-ASMMACRO(irq_disable_hazard,
-       _ssnop; _ssnop; _ssnop; _ehb
-       )
-ASMMACRO(back_to_back_c0_hazard,
-        _ssnop; _ssnop; _ssnop; _ehb
-       )
+
+#define __mtc0_tlbw_hazard                                             \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ehb
+
+#define __tlbw_use_hazard                                              \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ehb
+
+#define __tlb_probe_hazard                                             \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ehb
+
+#define __irq_enable_hazard                                            \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ehb
+
+#define __irq_disable_hazard                                           \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ehb
+
+#define __back_to_back_c0_hazard                                       \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ehb
+
 /*
  * gcc has a tradition of misscompiling the previous construct using the
  * address of a label as argument to inline assembler. Gas otoh has the
@@ -147,18 +144,18 @@ do {                                                                      \
  * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
  */
 
-ASMMACRO(mtc0_tlbw_hazard,
-       )
-ASMMACRO(tlbw_use_hazard,
-       )
-ASMMACRO(tlb_probe_hazard,
-       )
-ASMMACRO(irq_enable_hazard,
-       )
-ASMMACRO(irq_disable_hazard,
-       )
-ASMMACRO(back_to_back_c0_hazard,
-       )
+#define __mtc0_tlbw_hazard
+
+#define __tlbw_use_hazard
+
+#define __tlb_probe_hazard
+
+#define __irq_enable_hazard
+
+#define __irq_disable_hazard
+
+#define __back_to_back_c0_hazard
+
 #define instruction_hazard() do { } while (0)
 
 #elif defined(CONFIG_CPU_SB1)
@@ -166,19 +163,21 @@ ASMMACRO(back_to_back_c0_hazard,
 /*
  * Mostly like R4000 for historic reasons
  */
-ASMMACRO(mtc0_tlbw_hazard,
-       )
-ASMMACRO(tlbw_use_hazard,
-       )
-ASMMACRO(tlb_probe_hazard,
-       )
-ASMMACRO(irq_enable_hazard,
-       )
-ASMMACRO(irq_disable_hazard,
-        _ssnop; _ssnop; _ssnop
-       )
-ASMMACRO(back_to_back_c0_hazard,
-       )
+#define __mtc0_tlbw_hazard
+
+#define __tlbw_use_hazard
+
+#define __tlb_probe_hazard
+
+#define __irq_enable_hazard
+
+#define __irq_disable_hazard                                           \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ssnop
+
+#define __back_to_back_c0_hazard
+
 #define instruction_hazard() do { } while (0)
 
 #else
@@ -192,24 +191,35 @@ ASMMACRO(back_to_back_c0_hazard,
  * hazard so this is nice trick to have an optimal code for a range of
  * processors.
  */
-ASMMACRO(mtc0_tlbw_hazard,
-       nop; nop
-       )
-ASMMACRO(tlbw_use_hazard,
-       nop; nop; nop
-       )
-ASMMACRO(tlb_probe_hazard,
-        nop; nop; nop
-       )
-ASMMACRO(irq_enable_hazard,
-        _ssnop; _ssnop; _ssnop;
-       )
-ASMMACRO(irq_disable_hazard,
-       nop; nop; nop
-       )
-ASMMACRO(back_to_back_c0_hazard,
-        _ssnop; _ssnop; _ssnop;
-       )
+#define __mtc0_tlbw_hazard                                             \
+       nop;                                                            \
+       nop
+
+#define __tlbw_use_hazard                                              \
+       nop;                                                            \
+       nop;                                                            \
+       nop
+
+#define __tlb_probe_hazard                                             \
+       nop;                                                            \
+       nop;                                                            \
+       nop
+
+#define __irq_enable_hazard                                            \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ssnop
+
+#define __irq_disable_hazard                                           \
+       nop;                                                            \
+       nop;                                                            \
+       nop
+
+#define __back_to_back_c0_hazard                                       \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ssnop
+
 #define instruction_hazard() do { } while (0)
 
 #endif
@@ -218,32 +228,137 @@ ASMMACRO(back_to_back_c0_hazard,
 /* FPU hazards */
 
 #if defined(CONFIG_CPU_SB1)
-ASMMACRO(enable_fpu_hazard,
-        .set   push;
-        .set   mips64;
-        .set   noreorder;
-        _ssnop;
-        bnezl  $0, .+4;
-        _ssnop;
-        .set   pop
-)
-ASMMACRO(disable_fpu_hazard,
-)
+
+#define __enable_fpu_hazard                                            \
+       .set    push;                                                   \
+       .set    mips64;                                                 \
+       .set    noreorder;                                              \
+       ___ssnop;                                                       \
+       bnezl   $0, .+4;                                                \
+       ___ssnop;                                                       \
+       .set    pop
+
+#define __disable_fpu_hazard
 
 #elif defined(CONFIG_CPU_MIPSR2)
-ASMMACRO(enable_fpu_hazard,
-        _ehb
-)
-ASMMACRO(disable_fpu_hazard,
-        _ehb
-)
+
+#define __enable_fpu_hazard                                            \
+       ___ehb
+
+#define __disable_fpu_hazard                                           \
+       ___ehb
+
 #else
-ASMMACRO(enable_fpu_hazard,
-        nop; nop; nop; nop
-)
-ASMMACRO(disable_fpu_hazard,
-        _ehb
-)
+
+#define __enable_fpu_hazard                                            \
+       nop;                                                            \
+       nop;                                                            \
+       nop;                                                            \
+       nop
+
+#define __disable_fpu_hazard                                           \
+       ___ehb
+
 #endif
 
+#ifdef __ASSEMBLY__
+
+#define _ssnop ___ssnop
+#define        _ehb ___ehb
+#define mtc0_tlbw_hazard __mtc0_tlbw_hazard
+#define tlbw_use_hazard __tlbw_use_hazard
+#define tlb_probe_hazard __tlb_probe_hazard
+#define irq_enable_hazard __irq_enable_hazard
+#define irq_disable_hazard __irq_disable_hazard
+#define back_to_back_c0_hazard __back_to_back_c0_hazard
+#define enable_fpu_hazard __enable_fpu_hazard
+#define disable_fpu_hazard __disable_fpu_hazard
+
+#else
+
+#define _ssnop()                                                       \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(___ssnop)                                           \
+       );                                                              \
+} while (0)
+
+#define        _ehb()                                                          \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(___ehb)                                             \
+       );                                                              \
+} while (0)
+
+
+#define mtc0_tlbw_hazard()                                             \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(__mtc0_tlbw_hazard)                                 \
+       );                                                              \
+} while (0)
+
+
+#define tlbw_use_hazard()                                              \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(__tlbw_use_hazard)                                  \
+       );                                                              \
+} while (0)
+
+
+#define tlb_probe_hazard()                                             \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(__tlb_probe_hazard)                                 \
+       );                                                              \
+} while (0)
+
+
+#define irq_enable_hazard()                                            \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(__irq_enable_hazard)                                \
+       );                                                              \
+} while (0)
+
+
+#define irq_disable_hazard()                                           \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(__irq_disable_hazard)                               \
+       );                                                              \
+} while (0)
+
+
+#define back_to_back_c0_hazard()                                       \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(__back_to_back_c0_hazard)                           \
+       );                                                              \
+} while (0)
+
+
+#define enable_fpu_hazard()                                            \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(__enable_fpu_hazard)                                \
+       );                                                              \
+} while (0)
+
+
+#define disable_fpu_hazard()                                           \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(__disable_fpu_hazard)                               \
+       );                                                              \
+} while (0)
+
+/*
+ * MIPS R2 instruction hazard barrier.   Needs to be called as a subroutine.
+ */
+extern void mips_ihb(void);
+
+#endif /* __ASSEMBLY__  */
+
 #endif /* _ASM_HAZARDS_H */
index f1eadf764071d1727af40d22a1476a378fc5ad1d..22912f78401c2f6b2a5d8b491146a485a84f1106 100644 (file)
 
 typedef unsigned int mips_instruction;
 
+/* microMIPS instruction decode structure. Do NOT export!!! */
+struct mm_decoded_insn {
+       mips_instruction insn;
+       mips_instruction next_insn;
+       int pc_inc;
+       int next_pc_inc;
+       int micro_mips_mode;
+};
+
+/* Recode table from 16-bit register notation to 32-bit GPR. Do NOT export!!! */
+extern const int reg16to32[];
+
 #endif /* _ASM_INST_H */
index 9f3384c789d7bfdff5511706b0dffc3d2962c96c..45c00951888b4661ecbcf93bcc46fff33f9a9555 100644 (file)
 #ifndef __ASSEMBLY__
 
 #include <linux/compiler.h>
+#include <linux/stringify.h>
 #include <asm/hazards.h>
 
 #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
 
-__asm__(
-       "       .macro  arch_local_irq_disable\n"
+static inline void arch_local_irq_disable(void)
+{
+       __asm__ __volatile__(
        "       .set    push                                            \n"
        "       .set    noat                                            \n"
        "       di                                                      \n"
-       "       irq_disable_hazard                                      \n"
+       "       " __stringify(__irq_disable_hazard) "                   \n"
        "       .set    pop                                             \n"
-       "       .endm                                                   \n");
-
-static inline void arch_local_irq_disable(void)
-{
-       __asm__ __volatile__(
-               "arch_local_irq_disable"
-               : /* no outputs */
-               : /* no inputs */
-               : "memory");
+       : /* no outputs */
+       : /* no inputs */
+       : "memory");
 }
 
+static inline unsigned long arch_local_irq_save(void)
+{
+       unsigned long flags;
 
-__asm__(
-       "       .macro  arch_local_irq_save result                      \n"
+       asm __volatile__(
        "       .set    push                                            \n"
        "       .set    reorder                                         \n"
        "       .set    noat                                            \n"
-       "       di      \\result                                        \n"
-       "       andi    \\result, 1                                     \n"
-       "       irq_disable_hazard                                      \n"
+       "       di      %[flags]                                        \n"
+       "       andi    %[flags], 1                                     \n"
+       "       " __stringify(__irq_disable_hazard) "                   \n"
        "       .set    pop                                             \n"
-       "       .endm                                                   \n");
+       : [flags] "=r" (flags)
+       : /* no inputs */
+       : "memory");
 
-static inline unsigned long arch_local_irq_save(void)
-{
-       unsigned long flags;
-       asm volatile("arch_local_irq_save\t%0"
-                    : "=r" (flags)
-                    : /* no inputs */
-                    : "memory");
        return flags;
 }
 
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+       unsigned long __tmp1;
 
-__asm__(
-       "       .macro  arch_local_irq_restore flags                    \n"
+       __asm__ __volatile__(
        "       .set    push                                            \n"
        "       .set    noreorder                                       \n"
        "       .set    noat                                            \n"
@@ -69,7 +64,7 @@ __asm__(
         * Slow, but doesn't suffer from a relatively unlikely race
         * condition we're having since days 1.
         */
-       "       beqz    \\flags, 1f                                     \n"
+       "       beqz    %[flags], 1f                                    \n"
        "       di                                                      \n"
        "       ei                                                      \n"
        "1:                                                             \n"
@@ -78,33 +73,44 @@ __asm__(
         * Fast, dangerous.  Life is fun, life is good.
         */
        "       mfc0    $1, $12                                         \n"
-       "       ins     $1, \\flags, 0, 1                               \n"
+       "       ins     $1, %[flags], 0, 1                              \n"
        "       mtc0    $1, $12                                         \n"
 #endif
-       "       irq_disable_hazard                                      \n"
+       "       " __stringify(__irq_disable_hazard) "                   \n"
        "       .set    pop                                             \n"
-       "       .endm                                                   \n");
-
-static inline void arch_local_irq_restore(unsigned long flags)
-{
-       unsigned long __tmp1;
-
-       __asm__ __volatile__(
-               "arch_local_irq_restore\t%0"
-               : "=r" (__tmp1)
-               : "0" (flags)
-               : "memory");
+       : [flags] "=r" (__tmp1)
+       : "0" (flags)
+       : "memory");
 }
 
 static inline void __arch_local_irq_restore(unsigned long flags)
 {
-       unsigned long __tmp1;
-
        __asm__ __volatile__(
-               "arch_local_irq_restore\t%0"
-               : "=r" (__tmp1)
-               : "0" (flags)
-               : "memory");
+       "       .set    push                                            \n"
+       "       .set    noreorder                                       \n"
+       "       .set    noat                                            \n"
+#if defined(CONFIG_IRQ_CPU)
+       /*
+        * Slow, but doesn't suffer from a relatively unlikely race
+        * condition we're having since days 1.
+        */
+       "       beqz    %[flags], 1f                                    \n"
+       "       di                                                      \n"
+       "       ei                                                      \n"
+       "1:                                                             \n"
+#else
+       /*
+        * Fast, dangerous.  Life is fun, life is good.
+        */
+       "       mfc0    $1, $12                                         \n"
+       "       ins     $1, %[flags], 0, 1                              \n"
+       "       mtc0    $1, $12                                         \n"
+#endif
+       "       " __stringify(__irq_disable_hazard) "                   \n"
+       "       .set    pop                                             \n"
+       : [flags] "=r" (flags)
+       : "0" (flags)
+       : "memory");
 }
 #else
 /* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
@@ -115,8 +121,18 @@ void __arch_local_irq_restore(unsigned long flags);
 #endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
 
 
-__asm__(
-       "       .macro  arch_local_irq_enable                           \n"
+extern void smtc_ipi_replay(void);
+
+static inline void arch_local_irq_enable(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+       /*
+        * SMTC kernel needs to do a software replay of queued
+        * IPIs, at the cost of call overhead on each local_irq_enable()
+        */
+       smtc_ipi_replay();
+#endif
+       __asm__ __volatile__(
        "       .set    push                                            \n"
        "       .set    reorder                                         \n"
        "       .set    noat                                            \n"
@@ -133,45 +149,28 @@ __asm__(
        "       xori    $1,0x1e                                         \n"
        "       mtc0    $1,$12                                          \n"
 #endif
-       "       irq_enable_hazard                                       \n"
+       "       " __stringify(__irq_enable_hazard) "                    \n"
        "       .set    pop                                             \n"
-       "       .endm");
-
-extern void smtc_ipi_replay(void);
-
-static inline void arch_local_irq_enable(void)
-{
-#ifdef CONFIG_MIPS_MT_SMTC
-       /*
-        * SMTC kernel needs to do a software replay of queued
-        * IPIs, at the cost of call overhead on each local_irq_enable()
-        */
-       smtc_ipi_replay();
-#endif
-       __asm__ __volatile__(
-               "arch_local_irq_enable"
-               : /* no outputs */
-               : /* no inputs */
-               : "memory");
+       : /* no outputs */
+       : /* no inputs */
+       : "memory");
 }
 
+static inline unsigned long arch_local_save_flags(void)
+{
+       unsigned long flags;
 
-__asm__(
-       "       .macro  arch_local_save_flags flags                     \n"
+       asm __volatile__(
        "       .set    push                                            \n"
        "       .set    reorder                                         \n"
 #ifdef CONFIG_MIPS_MT_SMTC
-       "       mfc0    \\flags, $2, 1                                  \n"
+       "       mfc0    %[flags], $2, 1                                 \n"
 #else
-       "       mfc0    \\flags, $12                                    \n"
+       "       mfc0    %[flags], $12                                   \n"
 #endif
        "       .set    pop                                             \n"
-       "       .endm                                                   \n");
+       : [flags] "=r" (flags));
 
-static inline unsigned long arch_local_save_flags(void)
-{
-       unsigned long flags;
-       asm volatile("arch_local_save_flags %0" : "=r" (flags));
        return flags;
 }
 
diff --git a/arch/mips/include/asm/kvm.h b/arch/mips/include/asm/kvm.h
new file mode 100644 (file)
index 0000000..85789ea
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#ifndef __LINUX_KVM_MIPS_H
+#define __LINUX_KVM_MIPS_H
+
+#include <linux/types.h>
+
+#define __KVM_MIPS
+
+#define N_MIPS_COPROC_REGS      32
+#define N_MIPS_COPROC_SEL      8
+
+/* for KVM_GET_REGS and KVM_SET_REGS */
+struct kvm_regs {
+       __u32 gprs[32];
+       __u32 hi;
+       __u32 lo;
+       __u32 pc;
+
+       __u32 cp0reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
+};
+
+/* for KVM_GET_SREGS and KVM_SET_SREGS */
+struct kvm_sregs {
+};
+
+/* for KVM_GET_FPU and KVM_SET_FPU */
+struct kvm_fpu {
+};
+
+struct kvm_debug_exit_arch {
+};
+
+/* for KVM_SET_GUEST_DEBUG */
+struct kvm_guest_debug_arch {
+};
+
+struct kvm_mips_interrupt {
+       /* in */
+       __u32 cpu;
+       __u32 irq;
+};
+
+/* definition of registers in kvm_run */
+struct kvm_sync_regs {
+};
+
+#endif /* __LINUX_KVM_MIPS_H */
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
new file mode 100644 (file)
index 0000000..e68781e
--- /dev/null
@@ -0,0 +1,667 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#ifndef __MIPS_KVM_HOST_H__
+#define __MIPS_KVM_HOST_H__
+
+#include <linux/mutex.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/kvm.h>
+#include <linux/kvm_types.h>
+#include <linux/threads.h>
+#include <linux/spinlock.h>
+
+
+#define KVM_MAX_VCPUS          1
+#define KVM_USER_MEM_SLOTS     8
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS  0
+
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
+/* Don't support huge pages */
+#define KVM_HPAGE_GFN_SHIFT(x) 0
+
+/* We don't currently support large pages. */
+#define KVM_NR_PAGE_SIZES      1
+#define KVM_PAGES_PER_HPAGE(x) 1
+
+
+
+/* Special address that contains the comm page, used for reducing # of traps */
+#define KVM_GUEST_COMMPAGE_ADDR     0x0
+
+#define KVM_GUEST_KERNEL_MODE(vcpu)    ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
+                                       ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
+
+#define KVM_GUEST_KUSEG             0x00000000UL
+#define KVM_GUEST_KSEG0             0x40000000UL
+#define KVM_GUEST_KSEG23            0x60000000UL
+#define KVM_GUEST_KSEGX(a)          ((_ACAST32_(a)) & 0x60000000)
+#define KVM_GUEST_CPHYSADDR(a)      ((_ACAST32_(a)) & 0x1fffffff)
+
+#define KVM_GUEST_CKSEG0ADDR(a)                (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
+#define KVM_GUEST_CKSEG1ADDR(a)                (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
+#define KVM_GUEST_CKSEG23ADDR(a)       (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
+
+/*
+ * Map an address to a certain kernel segment
+ */
+#define KVM_GUEST_KSEG0ADDR(a)         (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
+#define KVM_GUEST_KSEG1ADDR(a)         (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
+#define KVM_GUEST_KSEG23ADDR(a)                (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
+
+#define KVM_INVALID_PAGE            0xdeadbeef
+#define KVM_INVALID_INST            0xdeadbeef
+#define KVM_INVALID_ADDR            0xdeadbeef
+
+#define KVM_MALTA_GUEST_RTC_ADDR    0xb8000070UL
+
+#define GUEST_TICKS_PER_JIFFY (40000000/HZ)
+#define MS_TO_NS(x) (x * 1E6L)
+
+#define CAUSEB_DC       27
+#define CAUSEF_DC       (_ULCAST_(1)   << 27)
+
+struct kvm;
+struct kvm_run;
+struct kvm_vcpu;
+struct kvm_interrupt;
+
+extern atomic_t kvm_mips_instance;
+extern pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
+extern void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
+extern bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
+
+struct kvm_vm_stat {
+       u32 remote_tlb_flush;
+};
+
+struct kvm_vcpu_stat {
+       u32 wait_exits;
+       u32 cache_exits;
+       u32 signal_exits;
+       u32 int_exits;
+       u32 cop_unusable_exits;
+       u32 tlbmod_exits;
+       u32 tlbmiss_ld_exits;
+       u32 tlbmiss_st_exits;
+       u32 addrerr_st_exits;
+       u32 addrerr_ld_exits;
+       u32 syscall_exits;
+       u32 resvd_inst_exits;
+       u32 break_inst_exits;
+       u32 flush_dcache_exits;
+       u32 halt_wakeup;
+};
+
+enum kvm_mips_exit_types {
+       WAIT_EXITS,
+       CACHE_EXITS,
+       SIGNAL_EXITS,
+       INT_EXITS,
+       COP_UNUSABLE_EXITS,
+       TLBMOD_EXITS,
+       TLBMISS_LD_EXITS,
+       TLBMISS_ST_EXITS,
+       ADDRERR_ST_EXITS,
+       ADDRERR_LD_EXITS,
+       SYSCALL_EXITS,
+       RESVD_INST_EXITS,
+       BREAK_INST_EXITS,
+       FLUSH_DCACHE_EXITS,
+       MAX_KVM_MIPS_EXIT_TYPES
+};
+
+struct kvm_arch_memory_slot {
+};
+
+struct kvm_arch {
+       /* Guest GVA->HPA page table */
+       unsigned long *guest_pmap;
+       unsigned long guest_pmap_npages;
+
+       /* Wired host TLB used for the commpage */
+       int commpage_tlb;
+};
+
+#define N_MIPS_COPROC_REGS      32
+#define N_MIPS_COPROC_SEL      8
+
+struct mips_coproc {
+       unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+       unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
+#endif
+};
+
+/*
+ * Coprocessor 0 register names
+ */
+#define        MIPS_CP0_TLB_INDEX          0
+#define        MIPS_CP0_TLB_RANDOM         1
+#define        MIPS_CP0_TLB_LOW            2
+#define        MIPS_CP0_TLB_LO0            2
+#define        MIPS_CP0_TLB_LO1            3
+#define        MIPS_CP0_TLB_CONTEXT    4
+#define        MIPS_CP0_TLB_PG_MASK    5
+#define        MIPS_CP0_TLB_WIRED          6
+#define        MIPS_CP0_HWRENA             7
+#define        MIPS_CP0_BAD_VADDR          8
+#define        MIPS_CP0_COUNT          9
+#define        MIPS_CP0_TLB_HI         10
+#define        MIPS_CP0_COMPARE            11
+#define        MIPS_CP0_STATUS         12
+#define        MIPS_CP0_CAUSE          13
+#define        MIPS_CP0_EXC_PC         14
+#define        MIPS_CP0_PRID               15
+#define        MIPS_CP0_CONFIG         16
+#define        MIPS_CP0_LLADDR         17
+#define        MIPS_CP0_WATCH_LO           18
+#define        MIPS_CP0_WATCH_HI           19
+#define        MIPS_CP0_TLB_XCONTEXT   20
+#define        MIPS_CP0_ECC                26
+#define        MIPS_CP0_CACHE_ERR          27
+#define        MIPS_CP0_TAG_LO         28
+#define        MIPS_CP0_TAG_HI         29
+#define        MIPS_CP0_ERROR_PC           30
+#define        MIPS_CP0_DEBUG          23
+#define        MIPS_CP0_DEPC               24
+#define        MIPS_CP0_PERFCNT            25
+#define        MIPS_CP0_ERRCTL         26
+#define        MIPS_CP0_DATA_LO            28
+#define        MIPS_CP0_DATA_HI            29
+#define        MIPS_CP0_DESAVE         31
+
+#define MIPS_CP0_CONFIG_SEL        0
+#define MIPS_CP0_CONFIG1_SEL    1
+#define MIPS_CP0_CONFIG2_SEL    2
+#define MIPS_CP0_CONFIG3_SEL    3
+
+/* Config0 register bits */
+#define CP0C0_M    31
+#define CP0C0_K23  28
+#define CP0C0_KU   25
+#define CP0C0_MDU  20
+#define CP0C0_MM   17
+#define CP0C0_BM   16
+#define CP0C0_BE   15
+#define CP0C0_AT   13
+#define CP0C0_AR   10
+#define CP0C0_MT   7
+#define CP0C0_VI   3
+#define CP0C0_K0   0
+
+/* Config1 register bits */
+#define CP0C1_M    31
+#define CP0C1_MMU  25
+#define CP0C1_IS   22
+#define CP0C1_IL   19
+#define CP0C1_IA   16
+#define CP0C1_DS   13
+#define CP0C1_DL   10
+#define CP0C1_DA   7
+#define CP0C1_C2   6
+#define CP0C1_MD   5
+#define CP0C1_PC   4
+#define CP0C1_WR   3
+#define CP0C1_CA   2
+#define CP0C1_EP   1
+#define CP0C1_FP   0
+
+/* Config2 Register bits */
+#define CP0C2_M    31
+#define CP0C2_TU   28
+#define CP0C2_TS   24
+#define CP0C2_TL   20
+#define CP0C2_TA   16
+#define CP0C2_SU   12
+#define CP0C2_SS   8
+#define CP0C2_SL   4
+#define CP0C2_SA   0
+
+/* Config3 Register bits */
+#define CP0C3_M    31
+#define CP0C3_ISA_ON_EXC 16
+#define CP0C3_ULRI  13
+#define CP0C3_DSPP 10
+#define CP0C3_LPA  7
+#define CP0C3_VEIC 6
+#define CP0C3_VInt 5
+#define CP0C3_SP   4
+#define CP0C3_MT   2
+#define CP0C3_SM   1
+#define CP0C3_TL   0
+
+/* Have config1, Cacheable, noncoherent, write-back, write allocate*/
+#define MIPS_CONFIG0                                              \
+  ((1 << CP0C0_M) | (0x3 << CP0C0_K0))
+
+/* Have config2, no coprocessor2 attached, no MDMX support attached,
+   no performance counters, watch registers present,
+   no code compression, EJTAG present, no FPU, no watch registers */
+#define MIPS_CONFIG1                                              \
+((1 << CP0C1_M) |                                                 \
+ (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) |            \
+ (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) |            \
+ (0 << CP0C1_FP))
+
+/* Have config3, no tertiary/secondary caches implemented */
+#define MIPS_CONFIG2                                              \
+((1 << CP0C2_M))
+
+/* No config4, no DSP ASE, no large physaddr (PABITS),
+   no external interrupt controller, no vectored interrupts,
+   no 1kb pages, no SmartMIPS ASE, no trace logic */
+#define MIPS_CONFIG3                                              \
+((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) |          \
+ (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) |        \
+ (0 << CP0C3_SM) | (0 << CP0C3_TL))
+
+/* MMU types, the first four entries have the same layout as the
+   CP0C0_MT field.  */
+enum mips_mmu_types {
+       MMU_TYPE_NONE,
+       MMU_TYPE_R4000,
+       MMU_TYPE_RESERVED,
+       MMU_TYPE_FMT,
+       MMU_TYPE_R3000,
+       MMU_TYPE_R6000,
+       MMU_TYPE_R8000
+};
+
+/*
+ * Trap codes
+ */
+#define T_INT           0      /* Interrupt pending */
+#define T_TLB_MOD       1      /* TLB modified fault */
+#define T_TLB_LD_MISS       2  /* TLB miss on load or ifetch */
+#define T_TLB_ST_MISS       3  /* TLB miss on a store */
+#define T_ADDR_ERR_LD       4  /* Address error on a load or ifetch */
+#define T_ADDR_ERR_ST       5  /* Address error on a store */
+#define T_BUS_ERR_IFETCH    6  /* Bus error on an ifetch */
+#define T_BUS_ERR_LD_ST     7  /* Bus error on a load or store */
+#define T_SYSCALL       8      /* System call */
+#define T_BREAK         9      /* Breakpoint */
+#define T_RES_INST      10     /* Reserved instruction exception */
+#define T_COP_UNUSABLE      11 /* Coprocessor unusable */
+#define T_OVFLOW        12     /* Arithmetic overflow */
+
+/*
+ * Trap definitions added for r4000 port.
+ */
+#define T_TRAP          13     /* Trap instruction */
+#define T_VCEI          14     /* Virtual coherency exception */
+#define T_FPE           15     /* Floating point exception */
+#define T_WATCH         23     /* Watch address reference */
+#define T_VCED          31     /* Virtual coherency data */
+
+/* Resume Flags */
+#define RESUME_FLAG_DR          (1<<0) /* Reload guest nonvolatile state? */
+#define RESUME_FLAG_HOST        (1<<1) /* Resume host? */
+
+#define RESUME_GUEST            0
+#define RESUME_GUEST_DR         RESUME_FLAG_DR
+#define RESUME_HOST             RESUME_FLAG_HOST
+
+enum emulation_result {
+       EMULATE_DONE,           /* no further processing */
+       EMULATE_DO_MMIO,        /* kvm_run filled with MMIO request */
+       EMULATE_FAIL,           /* can't emulate this instruction */
+       EMULATE_WAIT,           /* WAIT instruction */
+       EMULATE_PRIV_FAIL,
+};
+
+#define MIPS3_PG_G  0x00000001 /* Global; ignore ASID if in lo0 & lo1 */
+#define MIPS3_PG_V  0x00000002 /* Valid */
+#define MIPS3_PG_NV 0x00000000
+#define MIPS3_PG_D  0x00000004 /* Dirty */
+
+#define mips3_paddr_to_tlbpfn(x) \
+    (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
+#define mips3_tlbpfn_to_paddr(x) \
+    ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
+
+#define MIPS3_PG_SHIFT      6
+#define MIPS3_PG_FRAME      0x3fffffc0
+
+#define VPN2_MASK           0xffffe000
+#define TLB_IS_GLOBAL(x)    (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G))
+#define TLB_VPN2(x)         ((x).tlb_hi & VPN2_MASK)
+#define TLB_ASID(x)         (ASID_MASK((x).tlb_hi))
+#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V))
+
+struct kvm_mips_tlb {
+       long tlb_mask;
+       long tlb_hi;
+       long tlb_lo0;
+       long tlb_lo1;
+};
+
+#define KVM_MIPS_GUEST_TLB_SIZE     64
+struct kvm_vcpu_arch {
+       void *host_ebase, *guest_ebase;
+       unsigned long host_stack;
+       unsigned long host_gp;
+
+       /* Host CP0 registers used when handling exits from guest */
+       unsigned long host_cp0_badvaddr;
+       unsigned long host_cp0_cause;
+       unsigned long host_cp0_epc;
+       unsigned long host_cp0_entryhi;
+       uint32_t guest_inst;
+
+       /* GPRS */
+       unsigned long gprs[32];
+       unsigned long hi;
+       unsigned long lo;
+       unsigned long pc;
+
+       /* FPU State */
+       struct mips_fpu_struct fpu;
+
+       /* COP0 State */
+       struct mips_coproc *cop0;
+
+       /* Host KSEG0 address of the EI/DI offset */
+       void *kseg0_commpage;
+
+       u32 io_gpr;             /* GPR used as IO source/target */
+
+       /* Used to calibrate the virutal count register for the guest */
+       int32_t host_cp0_count;
+
+       /* Bitmask of exceptions that are pending */
+       unsigned long pending_exceptions;
+
+       /* Bitmask of pending exceptions to be cleared */
+       unsigned long pending_exceptions_clr;
+
+       unsigned long pending_load_cause;
+
+       /* Save/Restore the entryhi register when are are preempted/scheduled back in */
+       unsigned long preempt_entryhi;
+
+       /* S/W Based TLB for guest */
+       struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
+
+       /* Cached guest kernel/user ASIDs */
+       uint32_t guest_user_asid[NR_CPUS];
+       uint32_t guest_kernel_asid[NR_CPUS];
+       struct mm_struct guest_kernel_mm, guest_user_mm;
+
+       struct kvm_mips_tlb shadow_tlb[NR_CPUS][KVM_MIPS_GUEST_TLB_SIZE];
+
+
+       struct hrtimer comparecount_timer;
+
+       int last_sched_cpu;
+
+       /* WAIT executed */
+       int wait;
+};
+
+
+#define kvm_read_c0_guest_index(cop0)               (cop0->reg[MIPS_CP0_TLB_INDEX][0])
+#define kvm_write_c0_guest_index(cop0, val)         (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val)
+#define kvm_read_c0_guest_entrylo0(cop0)            (cop0->reg[MIPS_CP0_TLB_LO0][0])
+#define kvm_read_c0_guest_entrylo1(cop0)            (cop0->reg[MIPS_CP0_TLB_LO1][0])
+#define kvm_read_c0_guest_context(cop0)             (cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
+#define kvm_write_c0_guest_context(cop0, val)       (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
+#define kvm_read_c0_guest_userlocal(cop0)           (cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
+#define kvm_read_c0_guest_pagemask(cop0)            (cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
+#define kvm_write_c0_guest_pagemask(cop0, val)      (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
+#define kvm_read_c0_guest_wired(cop0)               (cop0->reg[MIPS_CP0_TLB_WIRED][0])
+#define kvm_write_c0_guest_wired(cop0, val)         (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val))
+#define kvm_read_c0_guest_badvaddr(cop0)            (cop0->reg[MIPS_CP0_BAD_VADDR][0])
+#define kvm_write_c0_guest_badvaddr(cop0, val)      (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val))
+#define kvm_read_c0_guest_count(cop0)               (cop0->reg[MIPS_CP0_COUNT][0])
+#define kvm_write_c0_guest_count(cop0, val)         (cop0->reg[MIPS_CP0_COUNT][0] = (val))
+#define kvm_read_c0_guest_entryhi(cop0)             (cop0->reg[MIPS_CP0_TLB_HI][0])
+#define kvm_write_c0_guest_entryhi(cop0, val)       (cop0->reg[MIPS_CP0_TLB_HI][0] = (val))
+#define kvm_read_c0_guest_compare(cop0)             (cop0->reg[MIPS_CP0_COMPARE][0])
+#define kvm_write_c0_guest_compare(cop0, val)       (cop0->reg[MIPS_CP0_COMPARE][0] = (val))
+#define kvm_read_c0_guest_status(cop0)              (cop0->reg[MIPS_CP0_STATUS][0])
+#define kvm_write_c0_guest_status(cop0, val)        (cop0->reg[MIPS_CP0_STATUS][0] = (val))
+#define kvm_read_c0_guest_intctl(cop0)              (cop0->reg[MIPS_CP0_STATUS][1])
+#define kvm_write_c0_guest_intctl(cop0, val)        (cop0->reg[MIPS_CP0_STATUS][1] = (val))
+#define kvm_read_c0_guest_cause(cop0)               (cop0->reg[MIPS_CP0_CAUSE][0])
+#define kvm_write_c0_guest_cause(cop0, val)         (cop0->reg[MIPS_CP0_CAUSE][0] = (val))
+#define kvm_read_c0_guest_epc(cop0)                 (cop0->reg[MIPS_CP0_EXC_PC][0])
+#define kvm_write_c0_guest_epc(cop0, val)           (cop0->reg[MIPS_CP0_EXC_PC][0] = (val))
+#define kvm_read_c0_guest_prid(cop0)                (cop0->reg[MIPS_CP0_PRID][0])
+#define kvm_write_c0_guest_prid(cop0, val)          (cop0->reg[MIPS_CP0_PRID][0] = (val))
+#define kvm_read_c0_guest_ebase(cop0)               (cop0->reg[MIPS_CP0_PRID][1])
+#define kvm_write_c0_guest_ebase(cop0, val)         (cop0->reg[MIPS_CP0_PRID][1] = (val))
+#define kvm_read_c0_guest_config(cop0)              (cop0->reg[MIPS_CP0_CONFIG][0])
+#define kvm_read_c0_guest_config1(cop0)             (cop0->reg[MIPS_CP0_CONFIG][1])
+#define kvm_read_c0_guest_config2(cop0)             (cop0->reg[MIPS_CP0_CONFIG][2])
+#define kvm_read_c0_guest_config3(cop0)             (cop0->reg[MIPS_CP0_CONFIG][3])
+#define kvm_read_c0_guest_config7(cop0)             (cop0->reg[MIPS_CP0_CONFIG][7])
+#define kvm_write_c0_guest_config(cop0, val)        (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
+#define kvm_write_c0_guest_config1(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
+#define kvm_write_c0_guest_config2(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
+#define kvm_write_c0_guest_config3(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
+#define kvm_write_c0_guest_config7(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
+#define kvm_read_c0_guest_errorepc(cop0)            (cop0->reg[MIPS_CP0_ERROR_PC][0])
+#define kvm_write_c0_guest_errorepc(cop0, val)      (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
+
+#define kvm_set_c0_guest_status(cop0, val)          (cop0->reg[MIPS_CP0_STATUS][0] |= (val))
+#define kvm_clear_c0_guest_status(cop0, val)        (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
+#define kvm_set_c0_guest_cause(cop0, val)           (cop0->reg[MIPS_CP0_CAUSE][0] |= (val))
+#define kvm_clear_c0_guest_cause(cop0, val)         (cop0->reg[MIPS_CP0_CAUSE][0] &= ~(val))
+#define kvm_change_c0_guest_cause(cop0, change, val)  \
+{                                                     \
+    kvm_clear_c0_guest_cause(cop0, change);           \
+    kvm_set_c0_guest_cause(cop0, ((val) & (change))); \
+}
+#define kvm_set_c0_guest_ebase(cop0, val)           (cop0->reg[MIPS_CP0_PRID][1] |= (val))
+#define kvm_clear_c0_guest_ebase(cop0, val)         (cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
+#define kvm_change_c0_guest_ebase(cop0, change, val)  \
+{                                                     \
+    kvm_clear_c0_guest_ebase(cop0, change);           \
+    kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
+}
+
+
+struct kvm_mips_callbacks {
+       int (*handle_cop_unusable) (struct kvm_vcpu *vcpu);
+       int (*handle_tlb_mod) (struct kvm_vcpu *vcpu);
+       int (*handle_tlb_ld_miss) (struct kvm_vcpu *vcpu);
+       int (*handle_tlb_st_miss) (struct kvm_vcpu *vcpu);
+       int (*handle_addr_err_st) (struct kvm_vcpu *vcpu);
+       int (*handle_addr_err_ld) (struct kvm_vcpu *vcpu);
+       int (*handle_syscall) (struct kvm_vcpu *vcpu);
+       int (*handle_res_inst) (struct kvm_vcpu *vcpu);
+       int (*handle_break) (struct kvm_vcpu *vcpu);
+       int (*vm_init) (struct kvm *kvm);
+       int (*vcpu_init) (struct kvm_vcpu *vcpu);
+       int (*vcpu_setup) (struct kvm_vcpu *vcpu);
+        gpa_t(*gva_to_gpa) (gva_t gva);
+       void (*queue_timer_int) (struct kvm_vcpu *vcpu);
+       void (*dequeue_timer_int) (struct kvm_vcpu *vcpu);
+       void (*queue_io_int) (struct kvm_vcpu *vcpu,
+                             struct kvm_mips_interrupt *irq);
+       void (*dequeue_io_int) (struct kvm_vcpu *vcpu,
+                               struct kvm_mips_interrupt *irq);
+       int (*irq_deliver) (struct kvm_vcpu *vcpu, unsigned int priority,
+                           uint32_t cause);
+       int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority,
+                         uint32_t cause);
+       int (*vcpu_ioctl_get_regs) (struct kvm_vcpu *vcpu,
+                                   struct kvm_regs *regs);
+       int (*vcpu_ioctl_set_regs) (struct kvm_vcpu *vcpu,
+                                   struct kvm_regs *regs);
+};
+extern struct kvm_mips_callbacks *kvm_mips_callbacks;
+int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
+
+/* Debug: dump vcpu state */
+int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
+
+/* Trampoline ASM routine to start running in "Guest" context */
+extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+
+/* TLB handling */
+uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
+
+uint32_t kvm_get_user_asid(struct kvm_vcpu *vcpu);
+
+uint32_t kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
+
+extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
+                                          struct kvm_vcpu *vcpu);
+
+extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
+                                             struct kvm_vcpu *vcpu);
+
+extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+                                               struct kvm_mips_tlb *tlb,
+                                               unsigned long *hpa0,
+                                               unsigned long *hpa1);
+
+extern enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
+                                                    uint32_t *opc,
+                                                    struct kvm_run *run,
+                                                    struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause,
+                                                   uint32_t *opc,
+                                                   struct kvm_run *run,
+                                                   struct kvm_vcpu *vcpu);
+
+extern void kvm_mips_dump_host_tlbs(void);
+extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
+extern void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu);
+extern void kvm_mips_flush_host_tlb(int skip_kseg0);
+extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
+extern int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index);
+
+extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
+                                    unsigned long entryhi);
+extern int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr);
+extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
+                                                  unsigned long gva);
+extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
+                                   struct kvm_vcpu *vcpu);
+extern void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu);
+extern void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu);
+extern void kvm_local_flush_tlb_all(void);
+extern void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu);
+extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu);
+extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu);
+
+/* Emulation */
+uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu);
+enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause);
+
+extern enum emulation_result kvm_mips_emulate_inst(unsigned long cause,
+                                                  uint32_t *opc,
+                                                  struct kvm_run *run,
+                                                  struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
+                                                     uint32_t *opc,
+                                                     struct kvm_run *run,
+                                                     struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
+                                                        uint32_t *opc,
+                                                        struct kvm_run *run,
+                                                        struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
+                                                       uint32_t *opc,
+                                                       struct kvm_run *run,
+                                                       struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
+                                                        uint32_t *opc,
+                                                        struct kvm_run *run,
+                                                        struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
+                                                       uint32_t *opc,
+                                                       struct kvm_run *run,
+                                                       struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
+                                                    uint32_t *opc,
+                                                    struct kvm_run *run,
+                                                    struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
+                                                     uint32_t *opc,
+                                                     struct kvm_run *run,
+                                                     struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_handle_ri(unsigned long cause,
+                                               uint32_t *opc,
+                                               struct kvm_run *run,
+                                               struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
+                                                    uint32_t *opc,
+                                                    struct kvm_run *run,
+                                                    struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
+                                                    uint32_t *opc,
+                                                    struct kvm_run *run,
+                                                    struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
+                                                        struct kvm_run *run);
+
+enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu);
+
+enum emulation_result kvm_mips_check_privilege(unsigned long cause,
+                                              uint32_t *opc,
+                                              struct kvm_run *run,
+                                              struct kvm_vcpu *vcpu);
+
+enum emulation_result kvm_mips_emulate_cache(uint32_t inst,
+                                            uint32_t *opc,
+                                            uint32_t cause,
+                                            struct kvm_run *run,
+                                            struct kvm_vcpu *vcpu);
+enum emulation_result kvm_mips_emulate_CP0(uint32_t inst,
+                                          uint32_t *opc,
+                                          uint32_t cause,
+                                          struct kvm_run *run,
+                                          struct kvm_vcpu *vcpu);
+enum emulation_result kvm_mips_emulate_store(uint32_t inst,
+                                            uint32_t cause,
+                                            struct kvm_run *run,
+                                            struct kvm_vcpu *vcpu);
+enum emulation_result kvm_mips_emulate_load(uint32_t inst,
+                                           uint32_t cause,
+                                           struct kvm_run *run,
+                                           struct kvm_vcpu *vcpu);
+
+/* Dynamic binary translation */
+extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
+                                     struct kvm_vcpu *vcpu);
+extern int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
+                                  struct kvm_vcpu *vcpu);
+extern int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc,
+                              struct kvm_vcpu *vcpu);
+extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
+                              struct kvm_vcpu *vcpu);
+
+/* Misc */
+extern void mips32_SyncICache(unsigned long addr, unsigned long size);
+extern int kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
+extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
+
+
+#endif /* __MIPS_KVM_HOST_H__ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h
deleted file mode 100644 (file)
index 8fcf8df..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef BCM63XX_CLK_H_
-#define BCM63XX_CLK_H_
-
-struct clk {
-       void            (*set)(struct clk *, int);
-       unsigned int    rate;
-       unsigned int    usage;
-       int             id;
-};
-
-#endif /* ! BCM63XX_CLK_H_ */
index cb922b9cb0e9b1e7cc96f36149a24bd8b97e70cb..336228990808e5c4aed717f21682ebd1c85045bc 100644 (file)
 #define BCM6345_CPU_ID         0x6345
 #define BCM6348_CPU_ID         0x6348
 #define BCM6358_CPU_ID         0x6358
+#define BCM6362_CPU_ID         0x6362
 #define BCM6368_CPU_ID         0x6368
 
 void __init bcm63xx_cpu_init(void);
 u16 __bcm63xx_get_cpu_id(void);
-u16 bcm63xx_get_cpu_rev(void);
+u8 bcm63xx_get_cpu_rev(void);
 unsigned int bcm63xx_get_cpu_freq(void);
 
 #ifdef CONFIG_BCM63XX_CPU_6328
@@ -86,6 +87,20 @@ unsigned int bcm63xx_get_cpu_freq(void);
 # define BCMCPU_IS_6358()      (0)
 #endif
 
+#ifdef CONFIG_BCM63XX_CPU_6362
+# ifdef bcm63xx_get_cpu_id
+#  undef bcm63xx_get_cpu_id
+#  define bcm63xx_get_cpu_id() __bcm63xx_get_cpu_id()
+#  define BCMCPU_RUNTIME_DETECT
+# else
+#  define bcm63xx_get_cpu_id() BCM6362_CPU_ID
+# endif
+# define BCMCPU_IS_6362()      (bcm63xx_get_cpu_id() == BCM6362_CPU_ID)
+#else
+# define BCMCPU_IS_6362()      (0)
+#endif
+
+
 #ifdef CONFIG_BCM63XX_CPU_6368
 # ifdef bcm63xx_get_cpu_id
 #  undef bcm63xx_get_cpu_id
@@ -405,6 +420,62 @@ enum bcm63xx_regs_set {
 #define BCM_6358_MISC_BASE             (0xdeadbeef)
 
 
+/*
+ * 6362 register sets base address
+ */
+#define BCM_6362_DSL_LMEM_BASE         (0xdeadbeef)
+#define BCM_6362_PERF_BASE             (0xb0000000)
+#define BCM_6362_TIMER_BASE            (0xb0000040)
+#define BCM_6362_WDT_BASE              (0xb000005c)
+#define BCM_6362_UART0_BASE             (0xb0000100)
+#define BCM_6362_UART1_BASE            (0xb0000120)
+#define BCM_6362_GPIO_BASE             (0xb0000080)
+#define BCM_6362_SPI_BASE              (0xb0000800)
+#define BCM_6362_HSSPI_BASE            (0xb0001000)
+#define BCM_6362_UDC0_BASE             (0xdeadbeef)
+#define BCM_6362_USBDMA_BASE           (0xb000c000)
+#define BCM_6362_OHCI0_BASE            (0xb0002600)
+#define BCM_6362_OHCI_PRIV_BASE                (0xdeadbeef)
+#define BCM_6362_USBH_PRIV_BASE                (0xb0002700)
+#define BCM_6362_USBD_BASE             (0xb0002400)
+#define BCM_6362_MPI_BASE              (0xdeadbeef)
+#define BCM_6362_PCMCIA_BASE           (0xdeadbeef)
+#define BCM_6362_PCIE_BASE             (0xb0e40000)
+#define BCM_6362_SDRAM_REGS_BASE       (0xdeadbeef)
+#define BCM_6362_DSL_BASE              (0xdeadbeef)
+#define BCM_6362_UBUS_BASE             (0xdeadbeef)
+#define BCM_6362_ENET0_BASE            (0xdeadbeef)
+#define BCM_6362_ENET1_BASE            (0xdeadbeef)
+#define BCM_6362_ENETDMA_BASE          (0xb000d800)
+#define BCM_6362_ENETDMAC_BASE         (0xb000da00)
+#define BCM_6362_ENETDMAS_BASE         (0xb000dc00)
+#define BCM_6362_ENETSW_BASE           (0xb0e00000)
+#define BCM_6362_EHCI0_BASE            (0xb0002500)
+#define BCM_6362_SDRAM_BASE            (0xdeadbeef)
+#define BCM_6362_MEMC_BASE             (0xdeadbeef)
+#define BCM_6362_DDR_BASE              (0xb0003000)
+#define BCM_6362_M2M_BASE              (0xdeadbeef)
+#define BCM_6362_ATM_BASE              (0xdeadbeef)
+#define BCM_6362_XTM_BASE              (0xb0007800)
+#define BCM_6362_XTMDMA_BASE           (0xb000b800)
+#define BCM_6362_XTMDMAC_BASE          (0xdeadbeef)
+#define BCM_6362_XTMDMAS_BASE          (0xdeadbeef)
+#define BCM_6362_PCM_BASE              (0xb000a800)
+#define BCM_6362_PCMDMA_BASE           (0xdeadbeef)
+#define BCM_6362_PCMDMAC_BASE          (0xdeadbeef)
+#define BCM_6362_PCMDMAS_BASE          (0xdeadbeef)
+#define BCM_6362_RNG_BASE              (0xdeadbeef)
+#define BCM_6362_MISC_BASE             (0xb0001800)
+
+#define BCM_6362_NAND_REG_BASE         (0xb0000200)
+#define BCM_6362_NAND_CACHE_BASE       (0xb0000600)
+#define BCM_6362_LED_BASE              (0xb0001900)
+#define BCM_6362_IPSEC_BASE            (0xb0002800)
+#define BCM_6362_IPSEC_DMA_BASE                (0xb000d000)
+#define BCM_6362_WLAN_CHIPCOMMON_BASE  (0xb0004000)
+#define BCM_6362_WLAN_D11_BASE         (0xb0005000)
+#define BCM_6362_WLAN_SHIM_BASE                (0xb0007000)
+
 /*
  * 6368 register sets base address
  */
@@ -564,6 +635,9 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set)
 #ifdef CONFIG_BCM63XX_CPU_6358
        __GEN_RSET(6358)
 #endif
+#ifdef CONFIG_BCM63XX_CPU_6362
+       __GEN_RSET(6362)
+#endif
 #ifdef CONFIG_BCM63XX_CPU_6368
        __GEN_RSET(6368)
 #endif
@@ -819,6 +893,71 @@ enum bcm63xx_irq {
 #define BCM_6358_EXT_IRQ2              (IRQ_INTERNAL_BASE + 27)
 #define BCM_6358_EXT_IRQ3              (IRQ_INTERNAL_BASE + 28)
 
+/*
+ * 6362 irqs
+ */
+#define BCM_6362_HIGH_IRQ_BASE         (IRQ_INTERNAL_BASE + 32)
+
+#define BCM_6362_TIMER_IRQ             (IRQ_INTERNAL_BASE + 0)
+#define BCM_6362_SPI_IRQ               (IRQ_INTERNAL_BASE + 2)
+#define BCM_6362_UART0_IRQ             (IRQ_INTERNAL_BASE + 3)
+#define BCM_6362_UART1_IRQ             (IRQ_INTERNAL_BASE + 4)
+#define BCM_6362_DSL_IRQ               (IRQ_INTERNAL_BASE + 28)
+#define BCM_6362_UDC0_IRQ              0
+#define BCM_6362_ENET0_IRQ             0
+#define BCM_6362_ENET1_IRQ             0
+#define BCM_6362_ENET_PHY_IRQ          (IRQ_INTERNAL_BASE + 14)
+#define BCM_6362_HSSPI_IRQ             (IRQ_INTERNAL_BASE + 5)
+#define BCM_6362_OHCI0_IRQ             (IRQ_INTERNAL_BASE + 9)
+#define BCM_6362_EHCI0_IRQ             (IRQ_INTERNAL_BASE + 10)
+#define BCM_6362_USBD_IRQ              (IRQ_INTERNAL_BASE + 11)
+#define BCM_6362_USBD_RXDMA0_IRQ       (IRQ_INTERNAL_BASE + 20)
+#define BCM_6362_USBD_TXDMA0_IRQ       (IRQ_INTERNAL_BASE + 21)
+#define BCM_6362_USBD_RXDMA1_IRQ       (IRQ_INTERNAL_BASE + 22)
+#define BCM_6362_USBD_TXDMA1_IRQ       (IRQ_INTERNAL_BASE + 23)
+#define BCM_6362_USBD_RXDMA2_IRQ       (IRQ_INTERNAL_BASE + 24)
+#define BCM_6362_USBD_TXDMA2_IRQ       (IRQ_INTERNAL_BASE + 25)
+#define BCM_6362_PCMCIA_IRQ            0
+#define BCM_6362_ENET0_RXDMA_IRQ       0
+#define BCM_6362_ENET0_TXDMA_IRQ       0
+#define BCM_6362_ENET1_RXDMA_IRQ       0
+#define BCM_6362_ENET1_TXDMA_IRQ       0
+#define BCM_6362_PCI_IRQ               (IRQ_INTERNAL_BASE + 30)
+#define BCM_6362_ATM_IRQ               0
+#define BCM_6362_ENETSW_RXDMA0_IRQ     (BCM_6362_HIGH_IRQ_BASE + 0)
+#define BCM_6362_ENETSW_RXDMA1_IRQ     (BCM_6362_HIGH_IRQ_BASE + 1)
+#define BCM_6362_ENETSW_RXDMA2_IRQ     (BCM_6362_HIGH_IRQ_BASE + 2)
+#define BCM_6362_ENETSW_RXDMA3_IRQ     (BCM_6362_HIGH_IRQ_BASE + 3)
+#define BCM_6362_ENETSW_TXDMA0_IRQ     0
+#define BCM_6362_ENETSW_TXDMA1_IRQ     0
+#define BCM_6362_ENETSW_TXDMA2_IRQ     0
+#define BCM_6362_ENETSW_TXDMA3_IRQ     0
+#define BCM_6362_XTM_IRQ               0
+#define BCM_6362_XTM_DMA0_IRQ          (BCM_6362_HIGH_IRQ_BASE + 12)
+
+#define BCM_6362_RING_OSC_IRQ          (IRQ_INTERNAL_BASE + 1)
+#define BCM_6362_WLAN_GPIO_IRQ         (IRQ_INTERNAL_BASE + 6)
+#define BCM_6362_WLAN_IRQ              (IRQ_INTERNAL_BASE + 7)
+#define BCM_6362_IPSEC_IRQ             (IRQ_INTERNAL_BASE + 8)
+#define BCM_6362_NAND_IRQ              (IRQ_INTERNAL_BASE + 12)
+#define BCM_6362_PCM_IRQ               (IRQ_INTERNAL_BASE + 13)
+#define BCM_6362_DG_IRQ                        (IRQ_INTERNAL_BASE + 15)
+#define BCM_6362_EPHY_ENERGY0_IRQ      (IRQ_INTERNAL_BASE + 16)
+#define BCM_6362_EPHY_ENERGY1_IRQ      (IRQ_INTERNAL_BASE + 17)
+#define BCM_6362_EPHY_ENERGY2_IRQ      (IRQ_INTERNAL_BASE + 18)
+#define BCM_6362_EPHY_ENERGY3_IRQ      (IRQ_INTERNAL_BASE + 19)
+#define BCM_6362_IPSEC_DMA0_IRQ                (IRQ_INTERNAL_BASE + 26)
+#define BCM_6362_IPSEC_DMA1_IRQ                (IRQ_INTERNAL_BASE + 27)
+#define BCM_6362_FAP0_IRQ              (IRQ_INTERNAL_BASE + 29)
+#define BCM_6362_PCM_DMA0_IRQ          (BCM_6362_HIGH_IRQ_BASE + 4)
+#define BCM_6362_PCM_DMA1_IRQ          (BCM_6362_HIGH_IRQ_BASE + 5)
+#define BCM_6362_DECT0_IRQ             (BCM_6362_HIGH_IRQ_BASE + 6)
+#define BCM_6362_DECT1_IRQ             (BCM_6362_HIGH_IRQ_BASE + 7)
+#define BCM_6362_EXT_IRQ0              (BCM_6362_HIGH_IRQ_BASE + 8)
+#define BCM_6362_EXT_IRQ1              (BCM_6362_HIGH_IRQ_BASE + 9)
+#define BCM_6362_EXT_IRQ2              (BCM_6362_HIGH_IRQ_BASE + 10)
+#define BCM_6362_EXT_IRQ3              (BCM_6362_HIGH_IRQ_BASE + 11)
+
 /*
  * 6368 irqs
  */
index b0184cf025755a49d5fc7ca6614e82c9866b4fd8..c426cabc620a1df26338693f77e0485e07402cb1 100644 (file)
@@ -71,18 +71,13 @@ static inline unsigned long bcm63xx_spireg(enum bcm63xx_regs_spi reg)
 
        return bcm63xx_regs_spi[reg];
 #else
-#ifdef CONFIG_BCM63XX_CPU_6338
-       __GEN_SPI_RSET(6338)
-#endif
-#ifdef CONFIG_BCM63XX_CPU_6348
+#if defined(CONFIG_BCM63XX_CPU_6338) || defined(CONFIG_BCM63XX_CPU_6348)
        __GEN_SPI_RSET(6348)
 #endif
-#ifdef CONFIG_BCM63XX_CPU_6358
+#if defined(CONFIG_BCM63XX_CPU_6358) || defined(CONFIG_BCM63XX_CPU_6362) || \
+       defined(CONFIG_BCM63XX_CPU_6368)
        __GEN_SPI_RSET(6358)
 #endif
-#ifdef CONFIG_BCM63XX_CPU_6368
-       __GEN_SPI_RSET(6368)
-#endif
 #endif
        return 0;
 }
index 0a9891f7580de557e702e7c4708139705b27605e..35baa1a60a64547a7594aa30e91ee7fedcc1f101 100644 (file)
@@ -17,6 +17,8 @@ static inline unsigned long bcm63xx_gpio_count(void)
                return 8;
        case BCM6345_CPU_ID:
                return 16;
+       case BCM6362_CPU_ID:
+               return 48;
        case BCM6368_CPU_ID:
                return 38;
        case BCM6348_CPU_ID:
index 81b4702f792a81693dbfc49db980276296967cff..3203fe49b34d4d55e6808c0c50f4d55a4f9f91b0 100644 (file)
@@ -10,7 +10,7 @@
 #define REV_CHIPID_SHIFT               16
 #define REV_CHIPID_MASK                        (0xffff << REV_CHIPID_SHIFT)
 #define REV_REVID_SHIFT                        0
-#define REV_REVID_MASK                 (0xffff << REV_REVID_SHIFT)
+#define REV_REVID_MASK                 (0xff << REV_REVID_SHIFT)
 
 /* Clock Control register */
 #define PERF_CKCTL_REG                 0x4
                                        CKCTL_6358_USBSU_EN |           \
                                        CKCTL_6358_EPHY_EN)
 
+#define CKCTL_6362_ADSL_QPROC_EN       (1 << 1)
+#define CKCTL_6362_ADSL_AFE_EN         (1 << 2)
+#define CKCTL_6362_ADSL_EN             (1 << 3)
+#define CKCTL_6362_MIPS_EN             (1 << 4)
+#define CKCTL_6362_WLAN_OCP_EN         (1 << 5)
+#define CKCTL_6362_SWPKT_USB_EN                (1 << 7)
+#define CKCTL_6362_SWPKT_SAR_EN                (1 << 8)
+#define CKCTL_6362_SAR_EN              (1 << 9)
+#define CKCTL_6362_ROBOSW_EN           (1 << 10)
+#define CKCTL_6362_PCM_EN              (1 << 11)
+#define CKCTL_6362_USBD_EN             (1 << 12)
+#define CKCTL_6362_USBH_EN             (1 << 13)
+#define CKCTL_6362_IPSEC_EN            (1 << 14)
+#define CKCTL_6362_SPI_EN              (1 << 15)
+#define CKCTL_6362_HSSPI_EN            (1 << 16)
+#define CKCTL_6362_PCIE_EN             (1 << 17)
+#define CKCTL_6362_FAP_EN              (1 << 18)
+#define CKCTL_6362_PHYMIPS_EN          (1 << 19)
+#define CKCTL_6362_NAND_EN             (1 << 20)
+
+#define CKCTL_6362_ALL_SAFE_EN         (CKCTL_6362_PHYMIPS_EN |        \
+                                       CKCTL_6362_ADSL_QPROC_EN |      \
+                                       CKCTL_6362_ADSL_AFE_EN |        \
+                                       CKCTL_6362_ADSL_EN |            \
+                                       CKCTL_6362_SAR_EN  |            \
+                                       CKCTL_6362_PCM_EN  |            \
+                                       CKCTL_6362_IPSEC_EN |           \
+                                       CKCTL_6362_USBD_EN |            \
+                                       CKCTL_6362_USBH_EN |            \
+                                       CKCTL_6362_ROBOSW_EN |          \
+                                       CKCTL_6362_PCIE_EN)
+
+
 #define CKCTL_6368_VDSL_QPROC_EN       (1 << 2)
 #define CKCTL_6368_VDSL_AFE_EN         (1 << 3)
 #define CKCTL_6368_VDSL_BONDING_EN     (1 << 4)
 #define PERF_IRQMASK_6345_REG          0xc
 #define PERF_IRQMASK_6348_REG          0xc
 #define PERF_IRQMASK_6358_REG          0xc
+#define PERF_IRQMASK_6362_REG          0x20
 #define PERF_IRQMASK_6368_REG          0x20
 
 /* Interrupt Status register */
 #define PERF_IRQSTAT_6345_REG          0x10
 #define PERF_IRQSTAT_6348_REG          0x10
 #define PERF_IRQSTAT_6358_REG          0x10
+#define PERF_IRQSTAT_6362_REG          0x28
 #define PERF_IRQSTAT_6368_REG          0x28
 
 /* External Interrupt Configuration register */
 #define PERF_EXTIRQ_CFG_REG_6345       0x14
 #define PERF_EXTIRQ_CFG_REG_6348       0x14
 #define PERF_EXTIRQ_CFG_REG_6358       0x14
+#define PERF_EXTIRQ_CFG_REG_6362       0x18
 #define PERF_EXTIRQ_CFG_REG_6368       0x18
 
 #define PERF_EXTIRQ_CFG_REG2_6368      0x1c
 #define PERF_SOFTRESET_REG             0x28
 #define PERF_SOFTRESET_6328_REG                0x10
 #define PERF_SOFTRESET_6358_REG                0x34
+#define PERF_SOFTRESET_6362_REG                0x10
 #define PERF_SOFTRESET_6368_REG                0x10
 
 #define SOFTRESET_6328_SPI_MASK                (1 << 0)
 #define SOFTRESET_6358_PCM_MASK                (1 << 13)
 #define SOFTRESET_6358_ADSL_MASK       (1 << 14)
 
+#define SOFTRESET_6362_SPI_MASK                (1 << 0)
+#define SOFTRESET_6362_IPSEC_MASK      (1 << 1)
+#define SOFTRESET_6362_EPHY_MASK       (1 << 2)
+#define SOFTRESET_6362_SAR_MASK                (1 << 3)
+#define SOFTRESET_6362_ENETSW_MASK     (1 << 4)
+#define SOFTRESET_6362_USBS_MASK       (1 << 5)
+#define SOFTRESET_6362_USBH_MASK       (1 << 6)
+#define SOFTRESET_6362_PCM_MASK                (1 << 7)
+#define SOFTRESET_6362_PCIE_CORE_MASK  (1 << 8)
+#define SOFTRESET_6362_PCIE_MASK       (1 << 9)
+#define SOFTRESET_6362_PCIE_EXT_MASK   (1 << 10)
+#define SOFTRESET_6362_WLAN_SHIM_MASK  (1 << 11)
+#define SOFTRESET_6362_DDR_PHY_MASK    (1 << 12)
+#define SOFTRESET_6362_FAP_MASK                (1 << 13)
+#define SOFTRESET_6362_WLAN_UBUS_MASK  (1 << 14)
+
 #define SOFTRESET_6368_SPI_MASK                (1 << 0)
 #define SOFTRESET_6368_MPI_MASK                (1 << 3)
 #define SOFTRESET_6368_EPHY_MASK       (1 << 6)
  * _REG relative to RSET_SPI
  *************************************************************************/
 
-/* BCM 6338 SPI core */
-#define SPI_6338_CMD                   0x00    /* 16-bits register */
-#define SPI_6338_INT_STATUS            0x02
-#define SPI_6338_INT_MASK_ST           0x03
-#define SPI_6338_INT_MASK              0x04
-#define SPI_6338_ST                    0x05
-#define SPI_6338_CLK_CFG               0x06
-#define SPI_6338_FILL_BYTE             0x07
-#define SPI_6338_MSG_TAIL              0x09
-#define SPI_6338_RX_TAIL               0x0b
-#define SPI_6338_MSG_CTL               0x40    /* 8-bits register */
-#define SPI_6338_MSG_CTL_WIDTH         8
-#define SPI_6338_MSG_DATA              0x41
-#define SPI_6338_MSG_DATA_SIZE         0x3f
-#define SPI_6338_RX_DATA               0x80
-#define SPI_6338_RX_DATA_SIZE          0x3f
-
-/* BCM 6348 SPI core */
+/* BCM 6338/6348 SPI core */
 #define SPI_6348_CMD                   0x00    /* 16-bits register */
 #define SPI_6348_INT_STATUS            0x02
 #define SPI_6348_INT_MASK_ST           0x03
 #define SPI_6348_RX_DATA               0x80
 #define SPI_6348_RX_DATA_SIZE          0x3f
 
-/* BCM 6358 SPI core */
+/* BCM 6358/6262/6368 SPI core */
 #define SPI_6358_MSG_CTL               0x00    /* 16-bits register */
 #define SPI_6358_MSG_CTL_WIDTH         16
 #define SPI_6358_MSG_DATA              0x02
 #define SPI_6358_MSG_TAIL              0x709
 #define SPI_6358_RX_TAIL               0x70B
 
-/* BCM 6358 SPI core */
-#define SPI_6368_MSG_CTL               0x00    /* 16-bits register */
-#define SPI_6368_MSG_CTL_WIDTH         16
-#define SPI_6368_MSG_DATA              0x02
-#define SPI_6368_MSG_DATA_SIZE         0x21e
-#define SPI_6368_RX_DATA               0x400
-#define SPI_6368_RX_DATA_SIZE          0x220
-#define SPI_6368_CMD                   0x700   /* 16-bits register */
-#define SPI_6368_INT_STATUS            0x702
-#define SPI_6368_INT_MASK_ST           0x703
-#define SPI_6368_INT_MASK              0x704
-#define SPI_6368_ST                    0x705
-#define SPI_6368_CLK_CFG               0x706
-#define SPI_6368_FILL_BYTE             0x707
-#define SPI_6368_MSG_TAIL              0x709
-#define SPI_6368_RX_TAIL               0x70B
-
 /* Shared SPI definitions */
 
 /* Message configuration */
 #define SPI_HD_W                       0x01
 #define SPI_HD_R                       0x02
 #define SPI_BYTE_CNT_SHIFT             0
-#define SPI_6338_MSG_TYPE_SHIFT                6
 #define SPI_6348_MSG_TYPE_SHIFT                6
 #define SPI_6358_MSG_TYPE_SHIFT                14
-#define SPI_6368_MSG_TYPE_SHIFT                14
 
 /* Command */
 #define SPI_CMD_NOOP                   0x00
 /*************************************************************************
  * _REG relative to RSET_MISC
  *************************************************************************/
-#define MISC_SERDES_CTRL_REG           0x0
+#define MISC_SERDES_CTRL_6328_REG      0x0
+#define MISC_SERDES_CTRL_6362_REG      0x4
 #define SERDES_PCIE_EN                 (1 << 0)
 #define SERDES_PCIE_EXD_EN             (1 << 15)
 
+#define MISC_STRAPBUS_6362_REG         0x14
+#define STRAPBUS_6362_FCVO_SHIFT       1
+#define STRAPBUS_6362_HSSPI_CLK_FAST   (1 << 13)
+#define STRAPBUS_6362_FCVO_MASK                (0x1f << STRAPBUS_6362_FCVO_SHIFT)
+#define STRAPBUS_6362_BOOT_SEL_SERIAL  (1 << 15)
+#define STRAPBUS_6362_BOOT_SEL_NAND    (0 << 15)
+
 #define MISC_STRAPBUS_6328_REG         0x240
 #define STRAPBUS_6328_FCVO_SHIFT       7
 #define STRAPBUS_6328_FCVO_MASK                (0x1f << STRAPBUS_6328_FCVO_SHIFT)
index 30931c42379d95841ff667bc2036e1f4939dfe51..94e3011ba7df99b55e5c6657e218a5cccc25e1e4 100644 (file)
@@ -19,6 +19,7 @@ static inline int is_bcm63xx_internal_registers(phys_t offset)
                        return 1;
                break;
        case BCM6328_CPU_ID:
+       case BCM6362_CPU_ID:
        case BCM6368_CPU_ID:
                if (offset >= 0xb0000000 && offset < 0xb1000000)
                        return 1;
index 9c95177f7a7e04bde7771cf5b7c8204753a57e9b..fe23034aaf721497af158ee2deec840038478dcf 100644 (file)
@@ -61,9 +61,8 @@ static inline int plat_device_is_coherent(struct device *dev)
 {
 #ifdef CONFIG_DMA_COHERENT
        return 1;
-#endif
-#ifdef CONFIG_DMA_NONCOHERENT
-       return 0;
+#else
+       return coherentio;
 #endif
 }
 
index 73d717a75cb0a1038e2f10009d67e657e23ab429..5b2f2e68e57f08210be7d4a98370cb3895111adf 100644 (file)
 #endif
 
 #ifdef CONFIG_32BIT
-
+#ifdef CONFIG_KVM_GUEST
+#define CAC_BASE               _AC(0x40000000, UL)
+#else
 #define CAC_BASE               _AC(0x80000000, UL)
+#endif
 #define IO_BASE                        _AC(0xa0000000, UL)
 #define UNCAC_BASE             _AC(0xa0000000, UL)
 
 #ifndef MAP_BASE
+#ifdef CONFIG_KVM_GUEST
+#define MAP_BASE               _AC(0x60000000, UL)
+#else
 #define MAP_BASE               _AC(0xc0000000, UL)
 #endif
+#endif
 
 /*
  * Memory above this physical address will be considered highmem.
index 75fd8c0f986eaff51a02ce2807a5c92442e657be..c0f3ef45c2c14f0e658feee183eabc6f8141ea9f 100644 (file)
@@ -57,5 +57,6 @@
 #define cpu_has_vint           0
 #define cpu_has_vtag_icache    0
 #define cpu_has_watch          1
+#define cpu_has_local_ebase    0
 
 #endif /* __ASM_MACH_LOONGSON_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ralink/mt7620.h b/arch/mips/include/asm/mach-ralink/mt7620.h
new file mode 100644 (file)
index 0000000..9809972
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _MT7620_REGS_H_
+#define _MT7620_REGS_H_
+
+#define MT7620_SYSC_BASE               0x10000000
+
+#define SYSC_REG_CHIP_NAME0            0x00
+#define SYSC_REG_CHIP_NAME1            0x04
+#define SYSC_REG_CHIP_REV              0x0c
+#define SYSC_REG_SYSTEM_CONFIG0                0x10
+#define SYSC_REG_SYSTEM_CONFIG1                0x14
+#define SYSC_REG_CPLL_CONFIG0          0x54
+#define SYSC_REG_CPLL_CONFIG1          0x58
+
+#define MT7620N_CHIP_NAME0             0x33365452
+#define MT7620N_CHIP_NAME1             0x20203235
+
+#define MT7620A_CHIP_NAME0             0x3637544d
+#define MT7620A_CHIP_NAME1             0x20203032
+
+#define CHIP_REV_PKG_MASK              0x1
+#define CHIP_REV_PKG_SHIFT             16
+#define CHIP_REV_VER_MASK              0xf
+#define CHIP_REV_VER_SHIFT             8
+#define CHIP_REV_ECO_MASK              0xf
+
+#define CPLL_SW_CONFIG_SHIFT           31
+#define CPLL_SW_CONFIG_MASK            0x1
+#define CPLL_CPU_CLK_SHIFT             24
+#define CPLL_CPU_CLK_MASK              0x1
+#define CPLL_MULT_RATIO_SHIFT           16
+#define CPLL_MULT_RATIO                 0x7
+#define CPLL_DIV_RATIO_SHIFT            10
+#define CPLL_DIV_RATIO                  0x3
+
+#define SYSCFG0_DRAM_TYPE_MASK         0x3
+#define SYSCFG0_DRAM_TYPE_SHIFT                4
+#define SYSCFG0_DRAM_TYPE_SDRAM                0
+#define SYSCFG0_DRAM_TYPE_DDR1         1
+#define SYSCFG0_DRAM_TYPE_DDR2         2
+
+#define MT7620_DRAM_BASE               0x0
+#define MT7620_SDRAM_SIZE_MIN          2
+#define MT7620_SDRAM_SIZE_MAX          64
+#define MT7620_DDR1_SIZE_MIN           32
+#define MT7620_DDR1_SIZE_MAX           128
+#define MT7620_DDR2_SIZE_MIN           32
+#define MT7620_DDR2_SIZE_MAX           256
+
+#define MT7620_GPIO_MODE_I2C           BIT(0)
+#define MT7620_GPIO_MODE_UART0_SHIFT   2
+#define MT7620_GPIO_MODE_UART0_MASK    0x7
+#define MT7620_GPIO_MODE_UART0(x)      ((x) << MT7620_GPIO_MODE_UART0_SHIFT)
+#define MT7620_GPIO_MODE_UARTF         0x0
+#define MT7620_GPIO_MODE_PCM_UARTF     0x1
+#define MT7620_GPIO_MODE_PCM_I2S       0x2
+#define MT7620_GPIO_MODE_I2S_UARTF     0x3
+#define MT7620_GPIO_MODE_PCM_GPIO      0x4
+#define MT7620_GPIO_MODE_GPIO_UARTF    0x5
+#define MT7620_GPIO_MODE_GPIO_I2S      0x6
+#define MT7620_GPIO_MODE_GPIO          0x7
+#define MT7620_GPIO_MODE_UART1         BIT(5)
+#define MT7620_GPIO_MODE_MDIO          BIT(8)
+#define MT7620_GPIO_MODE_RGMII1                BIT(9)
+#define MT7620_GPIO_MODE_RGMII2                BIT(10)
+#define MT7620_GPIO_MODE_SPI           BIT(11)
+#define MT7620_GPIO_MODE_SPI_REF_CLK   BIT(12)
+#define MT7620_GPIO_MODE_WLED          BIT(13)
+#define MT7620_GPIO_MODE_JTAG          BIT(15)
+#define MT7620_GPIO_MODE_EPHY          BIT(15)
+#define MT7620_GPIO_MODE_WDT           BIT(22)
+
+#endif
diff --git a/arch/mips/include/asm/mach-ralink/rt288x.h b/arch/mips/include/asm/mach-ralink/rt288x.h
new file mode 100644 (file)
index 0000000..03ad716
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _RT288X_REGS_H_
+#define _RT288X_REGS_H_
+
+#define RT2880_SYSC_BASE               0x00300000
+
+#define SYSC_REG_CHIP_NAME0            0x00
+#define SYSC_REG_CHIP_NAME1            0x04
+#define SYSC_REG_CHIP_ID               0x0c
+#define SYSC_REG_SYSTEM_CONFIG         0x10
+#define SYSC_REG_CLKCFG                        0x30
+
+#define RT2880_CHIP_NAME0              0x38325452
+#define RT2880_CHIP_NAME1              0x20203038
+
+#define CHIP_ID_ID_MASK                        0xff
+#define CHIP_ID_ID_SHIFT               8
+#define CHIP_ID_REV_MASK               0xff
+
+#define SYSTEM_CONFIG_CPUCLK_SHIFT     20
+#define SYSTEM_CONFIG_CPUCLK_MASK      0x3
+#define SYSTEM_CONFIG_CPUCLK_250       0x0
+#define SYSTEM_CONFIG_CPUCLK_266       0x1
+#define SYSTEM_CONFIG_CPUCLK_280       0x2
+#define SYSTEM_CONFIG_CPUCLK_300       0x3
+
+#define RT2880_GPIO_MODE_I2C           BIT(0)
+#define RT2880_GPIO_MODE_UART0         BIT(1)
+#define RT2880_GPIO_MODE_SPI           BIT(2)
+#define RT2880_GPIO_MODE_UART1         BIT(3)
+#define RT2880_GPIO_MODE_JTAG          BIT(4)
+#define RT2880_GPIO_MODE_MDIO          BIT(5)
+#define RT2880_GPIO_MODE_SDRAM         BIT(6)
+#define RT2880_GPIO_MODE_PCI           BIT(7)
+
+#define CLKCFG_SRAM_CS_N_WDT           BIT(9)
+
+#define RT2880_SDRAM_BASE              0x08000000
+#define RT2880_MEM_SIZE_MIN            2
+#define RT2880_MEM_SIZE_MAX            128
+
+#endif
diff --git a/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h
new file mode 100644 (file)
index 0000000..72fc106
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Ralink RT288x specific CPU feature overrides
+ *
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ *     Copyright (C) 2003, 2004 Ralf Baechle
+ *     Copyright (C) 2004 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+#ifndef _RT288X_CPU_FEATURE_OVERRIDES_H
+#define _RT288X_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb            1
+#define cpu_has_4kex           1
+#define cpu_has_3k_cache       0
+#define cpu_has_4k_cache       1
+#define cpu_has_tx39_cache     0
+#define cpu_has_sb1_cache      0
+#define cpu_has_fpu            0
+#define cpu_has_32fpr          0
+#define cpu_has_counter                1
+#define cpu_has_watch          1
+#define cpu_has_divec          1
+
+#define cpu_has_prefetch       1
+#define cpu_has_ejtag          1
+#define cpu_has_llsc           1
+
+#define cpu_has_mips16         1
+#define cpu_has_mdmx           0
+#define cpu_has_mips3d         0
+#define cpu_has_smartmips      0
+
+#define cpu_has_mips32r1       1
+#define cpu_has_mips32r2       1
+#define cpu_has_mips64r1       0
+#define cpu_has_mips64r2       0
+
+#define cpu_has_dsp            0
+#define cpu_has_mipsmt         0
+
+#define cpu_has_64bits         0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_64bit_gp_regs  0
+#define cpu_has_64bit_addresses        0
+
+#define cpu_dcache_line_size() 16
+#define cpu_icache_line_size() 16
+
+#endif /* _RT288X_CPU_FEATURE_OVERRIDES_H */
index 7d344f2d7d0addb88148b1be703aa25a9c676452..069bf37a6010635ec026b0eae0bcb1766813460f 100644 (file)
@@ -97,6 +97,14 @@ static inline int soc_is_rt5350(void)
 #define RT5350_SYSCFG0_CPUCLK_320      0x2
 #define RT5350_SYSCFG0_CPUCLK_300      0x3
 
+#define RT5350_SYSCFG0_DRAM_SIZE_SHIFT  12
+#define RT5350_SYSCFG0_DRAM_SIZE_MASK   7
+#define RT5350_SYSCFG0_DRAM_SIZE_2M     0
+#define RT5350_SYSCFG0_DRAM_SIZE_8M     1
+#define RT5350_SYSCFG0_DRAM_SIZE_16M    2
+#define RT5350_SYSCFG0_DRAM_SIZE_32M    3
+#define RT5350_SYSCFG0_DRAM_SIZE_64M    4
+
 /* multi function gpio pins */
 #define RT305X_GPIO_I2C_SD             1
 #define RT305X_GPIO_I2C_SCLK           2
@@ -136,4 +144,23 @@ static inline int soc_is_rt5350(void)
 #define RT305X_GPIO_MODE_SDRAM         BIT(8)
 #define RT305X_GPIO_MODE_RGMII         BIT(9)
 
+#define RT3352_SYSC_REG_SYSCFG0                0x010
+#define RT3352_SYSC_REG_SYSCFG1         0x014
+#define RT3352_SYSC_REG_CLKCFG1         0x030
+#define RT3352_SYSC_REG_RSTCTRL         0x034
+#define RT3352_SYSC_REG_USB_PS          0x05c
+
+#define RT3352_CLKCFG0_XTAL_SEL                BIT(20)
+#define RT3352_CLKCFG1_UPHY0_CLK_EN    BIT(18)
+#define RT3352_CLKCFG1_UPHY1_CLK_EN    BIT(20)
+#define RT3352_RSTCTRL_UHST            BIT(22)
+#define RT3352_RSTCTRL_UDEV            BIT(25)
+#define RT3352_SYSCFG1_USB0_HOST_MODE  BIT(10)
+
+#define RT305X_SDRAM_BASE              0x00000000
+#define RT305X_MEM_SIZE_MIN            2
+#define RT305X_MEM_SIZE_MAX            64
+#define RT3352_MEM_SIZE_MIN            2
+#define RT3352_MEM_SIZE_MAX            256
+
 #endif
diff --git a/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h
new file mode 100644 (file)
index 0000000..917c286
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Ralink RT305x specific CPU feature overrides
+ *
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ *     Copyright (C) 2003, 2004 Ralf Baechle
+ *     Copyright (C) 2004 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+#ifndef _RT305X_CPU_FEATURE_OVERRIDES_H
+#define _RT305X_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb            1
+#define cpu_has_4kex           1
+#define cpu_has_3k_cache       0
+#define cpu_has_4k_cache       1
+#define cpu_has_tx39_cache     0
+#define cpu_has_sb1_cache      0
+#define cpu_has_fpu            0
+#define cpu_has_32fpr          0
+#define cpu_has_counter                1
+#define cpu_has_watch          1
+#define cpu_has_divec          1
+
+#define cpu_has_prefetch       1
+#define cpu_has_ejtag          1
+#define cpu_has_llsc           1
+
+#define cpu_has_mips16         1
+#define cpu_has_mdmx           0
+#define cpu_has_mips3d         0
+#define cpu_has_smartmips      0
+
+#define cpu_has_mips32r1       1
+#define cpu_has_mips32r2       1
+#define cpu_has_mips64r1       0
+#define cpu_has_mips64r2       0
+
+#define cpu_has_dsp            1
+#define cpu_has_mipsmt         0
+
+#define cpu_has_64bits         0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_64bit_gp_regs  0
+#define cpu_has_64bit_addresses        0
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 32
+
+#endif /* _RT305X_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ralink/rt3883.h b/arch/mips/include/asm/mach-ralink/rt3883.h
new file mode 100644 (file)
index 0000000..058382f
--- /dev/null
@@ -0,0 +1,252 @@
+/*
+ * Ralink RT3662/RT3883 SoC register definitions
+ *
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef _RT3883_REGS_H_
+#define _RT3883_REGS_H_
+
+#include <linux/bitops.h>
+
+#define RT3883_SDRAM_BASE      0x00000000
+#define RT3883_SYSC_BASE       0x10000000
+#define RT3883_TIMER_BASE      0x10000100
+#define RT3883_INTC_BASE       0x10000200
+#define RT3883_MEMC_BASE       0x10000300
+#define RT3883_UART0_BASE      0x10000500
+#define RT3883_PIO_BASE                0x10000600
+#define RT3883_FSCC_BASE       0x10000700
+#define RT3883_NANDC_BASE      0x10000810
+#define RT3883_I2C_BASE                0x10000900
+#define RT3883_I2S_BASE                0x10000a00
+#define RT3883_SPI_BASE                0x10000b00
+#define RT3883_UART1_BASE      0x10000c00
+#define RT3883_PCM_BASE                0x10002000
+#define RT3883_GDMA_BASE       0x10002800
+#define RT3883_CODEC1_BASE     0x10003000
+#define RT3883_CODEC2_BASE     0x10003800
+#define RT3883_FE_BASE         0x10100000
+#define RT3883_ROM_BASE                0x10118000
+#define RT3883_USBDEV_BASE     0x10112000
+#define RT3883_PCI_BASE                0x10140000
+#define RT3883_WLAN_BASE       0x10180000
+#define RT3883_USBHOST_BASE    0x101c0000
+#define RT3883_BOOT_BASE       0x1c000000
+#define RT3883_SRAM_BASE       0x1e000000
+#define RT3883_PCIMEM_BASE     0x20000000
+
+#define RT3883_EHCI_BASE       (RT3883_USBHOST_BASE)
+#define RT3883_OHCI_BASE       (RT3883_USBHOST_BASE + 0x1000)
+
+#define RT3883_SYSC_SIZE       0x100
+#define RT3883_TIMER_SIZE      0x100
+#define RT3883_INTC_SIZE       0x100
+#define RT3883_MEMC_SIZE       0x100
+#define RT3883_UART0_SIZE      0x100
+#define RT3883_UART1_SIZE      0x100
+#define RT3883_PIO_SIZE                0x100
+#define RT3883_FSCC_SIZE       0x100
+#define RT3883_NANDC_SIZE      0x0f0
+#define RT3883_I2C_SIZE                0x100
+#define RT3883_I2S_SIZE                0x100
+#define RT3883_SPI_SIZE                0x100
+#define RT3883_PCM_SIZE                0x800
+#define RT3883_GDMA_SIZE       0x800
+#define RT3883_CODEC1_SIZE     0x800
+#define RT3883_CODEC2_SIZE     0x800
+#define RT3883_FE_SIZE         0x10000
+#define RT3883_ROM_SIZE                0x4000
+#define RT3883_USBDEV_SIZE     0x4000
+#define RT3883_PCI_SIZE                0x40000
+#define RT3883_WLAN_SIZE       0x40000
+#define RT3883_USBHOST_SIZE    0x40000
+#define RT3883_BOOT_SIZE       (32 * 1024 * 1024)
+#define RT3883_SRAM_SIZE       (32 * 1024 * 1024)
+
+/* SYSC registers */
+#define RT3883_SYSC_REG_CHIPID0_3      0x00    /* Chip ID 0 */
+#define RT3883_SYSC_REG_CHIPID4_7      0x04    /* Chip ID 1 */
+#define RT3883_SYSC_REG_REVID          0x0c    /* Chip Revision Identification */
+#define RT3883_SYSC_REG_SYSCFG0                0x10    /* System Configuration 0 */
+#define RT3883_SYSC_REG_SYSCFG1                0x14    /* System Configuration 1 */
+#define RT3883_SYSC_REG_CLKCFG0                0x2c    /* Clock Configuration 0 */
+#define RT3883_SYSC_REG_CLKCFG1                0x30    /* Clock Configuration 1 */
+#define RT3883_SYSC_REG_RSTCTRL                0x34    /* Reset Control*/
+#define RT3883_SYSC_REG_RSTSTAT                0x38    /* Reset Status*/
+#define RT3883_SYSC_REG_USB_PS         0x5c    /* USB Power saving control */
+#define RT3883_SYSC_REG_GPIO_MODE      0x60    /* GPIO Purpose Select */
+#define RT3883_SYSC_REG_PCIE_CLK_GEN0  0x7c
+#define RT3883_SYSC_REG_PCIE_CLK_GEN1  0x80
+#define RT3883_SYSC_REG_PCIE_CLK_GEN2  0x84
+#define RT3883_SYSC_REG_PMU            0x88
+#define RT3883_SYSC_REG_PMU1           0x8c
+
+#define RT3883_CHIP_NAME0              0x38335452
+#define RT3883_CHIP_NAME1              0x20203338
+
+#define RT3883_REVID_VER_ID_MASK       0x0f
+#define RT3883_REVID_VER_ID_SHIFT      8
+#define RT3883_REVID_ECO_ID_MASK       0x0f
+
+#define RT3883_SYSCFG0_DRAM_TYPE_DDR2  BIT(17)
+#define RT3883_SYSCFG0_CPUCLK_SHIFT    8
+#define RT3883_SYSCFG0_CPUCLK_MASK     0x3
+#define RT3883_SYSCFG0_CPUCLK_250      0x0
+#define RT3883_SYSCFG0_CPUCLK_384      0x1
+#define RT3883_SYSCFG0_CPUCLK_480      0x2
+#define RT3883_SYSCFG0_CPUCLK_500      0x3
+
+#define RT3883_SYSCFG1_USB0_HOST_MODE  BIT(10)
+#define RT3883_SYSCFG1_PCIE_RC_MODE    BIT(8)
+#define RT3883_SYSCFG1_PCI_HOST_MODE   BIT(7)
+#define RT3883_SYSCFG1_PCI_66M_MODE    BIT(6)
+#define RT3883_SYSCFG1_GPIO2_AS_WDT_OUT        BIT(2)
+
+#define RT3883_CLKCFG1_PCIE_CLK_EN     BIT(21)
+#define RT3883_CLKCFG1_UPHY1_CLK_EN    BIT(20)
+#define RT3883_CLKCFG1_PCI_CLK_EN      BIT(19)
+#define RT3883_CLKCFG1_UPHY0_CLK_EN    BIT(18)
+
+#define RT3883_GPIO_MODE_I2C           BIT(0)
+#define RT3883_GPIO_MODE_SPI           BIT(1)
+#define RT3883_GPIO_MODE_UART0_SHIFT   2
+#define RT3883_GPIO_MODE_UART0_MASK    0x7
+#define RT3883_GPIO_MODE_UART0(x)      ((x) << RT3883_GPIO_MODE_UART0_SHIFT)
+#define RT3883_GPIO_MODE_UARTF         0x0
+#define RT3883_GPIO_MODE_PCM_UARTF     0x1
+#define RT3883_GPIO_MODE_PCM_I2S       0x2
+#define RT3883_GPIO_MODE_I2S_UARTF     0x3
+#define RT3883_GPIO_MODE_PCM_GPIO      0x4
+#define RT3883_GPIO_MODE_GPIO_UARTF    0x5
+#define RT3883_GPIO_MODE_GPIO_I2S      0x6
+#define RT3883_GPIO_MODE_GPIO          0x7
+#define RT3883_GPIO_MODE_UART1         BIT(5)
+#define RT3883_GPIO_MODE_JTAG          BIT(6)
+#define RT3883_GPIO_MODE_MDIO          BIT(7)
+#define RT3883_GPIO_MODE_GE1           BIT(9)
+#define RT3883_GPIO_MODE_GE2           BIT(10)
+#define RT3883_GPIO_MODE_PCI_SHIFT     11
+#define RT3883_GPIO_MODE_PCI_MASK      0x7
+#define RT3883_GPIO_MODE_PCI           (RT3883_GPIO_MODE_PCI_MASK << RT3883_GPIO_MODE_PCI_SHIFT)
+#define RT3883_GPIO_MODE_LNA_A_SHIFT   16
+#define RT3883_GPIO_MODE_LNA_A_MASK    0x3
+#define _RT3883_GPIO_MODE_LNA_A(_x)    ((_x) << RT3883_GPIO_MODE_LNA_A_SHIFT)
+#define RT3883_GPIO_MODE_LNA_A_GPIO    0x3
+#define RT3883_GPIO_MODE_LNA_A         _RT3883_GPIO_MODE_LNA_A(RT3883_GPIO_MODE_LNA_A_MASK)
+#define RT3883_GPIO_MODE_LNA_G_SHIFT   18
+#define RT3883_GPIO_MODE_LNA_G_MASK    0x3
+#define _RT3883_GPIO_MODE_LNA_G(_x)    ((_x) << RT3883_GPIO_MODE_LNA_G_SHIFT)
+#define RT3883_GPIO_MODE_LNA_G_GPIO    0x3
+#define RT3883_GPIO_MODE_LNA_G         _RT3883_GPIO_MODE_LNA_G(RT3883_GPIO_MODE_LNA_G_MASK)
+
+#define RT3883_GPIO_I2C_SD             1
+#define RT3883_GPIO_I2C_SCLK           2
+#define RT3883_GPIO_SPI_CS0            3
+#define RT3883_GPIO_SPI_CLK            4
+#define RT3883_GPIO_SPI_MOSI           5
+#define RT3883_GPIO_SPI_MISO           6
+#define RT3883_GPIO_7                  7
+#define RT3883_GPIO_10                 10
+#define RT3883_GPIO_11                 11
+#define RT3883_GPIO_14                 14
+#define RT3883_GPIO_UART1_TXD          15
+#define RT3883_GPIO_UART1_RXD          16
+#define RT3883_GPIO_JTAG_TDO           17
+#define RT3883_GPIO_JTAG_TDI           18
+#define RT3883_GPIO_JTAG_TMS           19
+#define RT3883_GPIO_JTAG_TCLK          20
+#define RT3883_GPIO_JTAG_TRST_N                21
+#define RT3883_GPIO_MDIO_MDC           22
+#define RT3883_GPIO_MDIO_MDIO          23
+#define RT3883_GPIO_LNA_PE_A0          32
+#define RT3883_GPIO_LNA_PE_A1          33
+#define RT3883_GPIO_LNA_PE_A2          34
+#define RT3883_GPIO_LNA_PE_G0          35
+#define RT3883_GPIO_LNA_PE_G1          36
+#define RT3883_GPIO_LNA_PE_G2          37
+#define RT3883_GPIO_PCI_AD0            40
+#define RT3883_GPIO_PCI_AD31           71
+#define RT3883_GPIO_GE2_TXD0           72
+#define RT3883_GPIO_GE2_TXD1           73
+#define RT3883_GPIO_GE2_TXD2           74
+#define RT3883_GPIO_GE2_TXD3           75
+#define RT3883_GPIO_GE2_TXEN           76
+#define RT3883_GPIO_GE2_TXCLK          77
+#define RT3883_GPIO_GE2_RXD0           78
+#define RT3883_GPIO_GE2_RXD1           79
+#define RT3883_GPIO_GE2_RXD2           80
+#define RT3883_GPIO_GE2_RXD3           81
+#define RT3883_GPIO_GE2_RXDV           82
+#define RT3883_GPIO_GE2_RXCLK          83
+#define RT3883_GPIO_GE1_TXD0           84
+#define RT3883_GPIO_GE1_TXD1           85
+#define RT3883_GPIO_GE1_TXD2           86
+#define RT3883_GPIO_GE1_TXD3           87
+#define RT3883_GPIO_GE1_TXEN           88
+#define RT3883_GPIO_GE1_TXCLK          89
+#define RT3883_GPIO_GE1_RXD0           90
+#define RT3883_GPIO_GE1_RXD1           91
+#define RT3883_GPIO_GE1_RXD2           92
+#define RT3883_GPIO_GE1_RXD3           93
+#define RT3883_GPIO_GE1_RXDV           94
+#define RT3883_GPIO_GE1_RXCLK  95
+
+#define RT3883_RSTCTRL_PCIE_PCI_PDM    BIT(27)
+#define RT3883_RSTCTRL_FLASH           BIT(26)
+#define RT3883_RSTCTRL_UDEV            BIT(25)
+#define RT3883_RSTCTRL_PCI             BIT(24)
+#define RT3883_RSTCTRL_PCIE            BIT(23)
+#define RT3883_RSTCTRL_UHST            BIT(22)
+#define RT3883_RSTCTRL_FE              BIT(21)
+#define RT3883_RSTCTRL_WLAN            BIT(20)
+#define RT3883_RSTCTRL_UART1           BIT(29)
+#define RT3883_RSTCTRL_SPI             BIT(18)
+#define RT3883_RSTCTRL_I2S             BIT(17)
+#define RT3883_RSTCTRL_I2C             BIT(16)
+#define RT3883_RSTCTRL_NAND            BIT(15)
+#define RT3883_RSTCTRL_DMA             BIT(14)
+#define RT3883_RSTCTRL_PIO             BIT(13)
+#define RT3883_RSTCTRL_UART            BIT(12)
+#define RT3883_RSTCTRL_PCM             BIT(11)
+#define RT3883_RSTCTRL_MC              BIT(10)
+#define RT3883_RSTCTRL_INTC            BIT(9)
+#define RT3883_RSTCTRL_TIMER           BIT(8)
+#define RT3883_RSTCTRL_SYS             BIT(0)
+
+#define RT3883_INTC_INT_SYSCTL BIT(0)
+#define RT3883_INTC_INT_TIMER0 BIT(1)
+#define RT3883_INTC_INT_TIMER1 BIT(2)
+#define RT3883_INTC_INT_IA     BIT(3)
+#define RT3883_INTC_INT_PCM    BIT(4)
+#define RT3883_INTC_INT_UART0  BIT(5)
+#define RT3883_INTC_INT_PIO    BIT(6)
+#define RT3883_INTC_INT_DMA    BIT(7)
+#define RT3883_INTC_INT_NAND   BIT(8)
+#define RT3883_INTC_INT_PERFC  BIT(9)
+#define RT3883_INTC_INT_I2S    BIT(10)
+#define RT3883_INTC_INT_UART1  BIT(12)
+#define RT3883_INTC_INT_UHST   BIT(18)
+#define RT3883_INTC_INT_UDEV   BIT(19)
+
+/* FLASH/SRAM/Codec Controller registers */
+#define RT3883_FSCC_REG_FLASH_CFG0     0x00
+#define RT3883_FSCC_REG_FLASH_CFG1     0x04
+#define RT3883_FSCC_REG_CODEC_CFG0     0x40
+#define RT3883_FSCC_REG_CODEC_CFG1     0x44
+
+#define RT3883_FLASH_CFG_WIDTH_SHIFT   26
+#define RT3883_FLASH_CFG_WIDTH_MASK    0x3
+#define RT3883_FLASH_CFG_WIDTH_8BIT    0x0
+#define RT3883_FLASH_CFG_WIDTH_16BIT   0x1
+#define RT3883_FLASH_CFG_WIDTH_32BIT   0x2
+
+#define RT3883_SDRAM_BASE              0x00000000
+#define RT3883_MEM_SIZE_MIN            2
+#define RT3883_MEM_SIZE_MAX            256
+
+#endif /* _RT3883_REGS_H_ */
diff --git a/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h
new file mode 100644 (file)
index 0000000..181fbf4
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Ralink RT3662/RT3883 specific CPU feature overrides
+ *
+ * Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ *     Copyright (C) 2003, 2004 Ralf Baechle
+ *     Copyright (C) 2004 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+#ifndef _RT3883_CPU_FEATURE_OVERRIDES_H
+#define _RT3883_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb            1
+#define cpu_has_4kex           1
+#define cpu_has_3k_cache       0
+#define cpu_has_4k_cache       1
+#define cpu_has_tx39_cache     0
+#define cpu_has_sb1_cache      0
+#define cpu_has_fpu            0
+#define cpu_has_32fpr          0
+#define cpu_has_counter                1
+#define cpu_has_watch          1
+#define cpu_has_divec          1
+
+#define cpu_has_prefetch       1
+#define cpu_has_ejtag          1
+#define cpu_has_llsc           1
+
+#define cpu_has_mips16         1
+#define cpu_has_mdmx           0
+#define cpu_has_mips3d         0
+#define cpu_has_smartmips      0
+
+#define cpu_has_mips32r1       1
+#define cpu_has_mips32r2       1
+#define cpu_has_mips64r1       0
+#define cpu_has_mips64r2       0
+
+#define cpu_has_dsp            1
+#define cpu_has_mipsmt         0
+
+#define cpu_has_64bits         0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_64bit_gp_regs  0
+#define cpu_has_64bit_addresses        0
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 32
+
+#endif /* _RT3883_CPU_FEATURE_OVERRIDES_H */
index 193c0912d38e651519b8fb8b7bbc9fa31d7893e8..bfbd7035d4c54541787fb7de42cbafe7b1230edb 100644 (file)
 /* #define cpu_has_prefetch    ? */
 #define cpu_has_mcheck         1
 /* #define cpu_has_ejtag       ? */
+#ifdef CONFIG_CPU_MICROMIPS
+#define cpu_has_llsc           0
+#else
 #define cpu_has_llsc           1
+#endif
 /* #define cpu_has_vtag_icache ? */
 /* #define cpu_has_dc_aliases  ? */
 /* #define cpu_has_ic_fills_f_dc ? */
index 44a09a64160ac52fac8862beaaa8a9c9815f3fd2..bd9746fbe4af8a6b098df65c5f386cecec86f2e4 100644 (file)
@@ -83,4 +83,7 @@ extern void mips_pcibios_init(void);
 #define mips_pcibios_init() do { } while (0)
 #endif
 
+extern void mips_scroll_message(void);
+extern void mips_display_message(const char *str);
+
 #endif /* __ASM_MIPS_BOARDS_GENERIC_H */
diff --git a/arch/mips/include/asm/mips-boards/prom.h b/arch/mips/include/asm/mips-boards/prom.h
deleted file mode 100644 (file)
index e7aed3e..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
- *
- * ########################################################################
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * ########################################################################
- *
- * MIPS boards bootprom interface for the Linux kernel.
- *
- */
-
-#ifndef _MIPS_PROM_H
-#define _MIPS_PROM_H
-
-extern char *prom_getcmdline(void);
-extern char *prom_getenv(char *name);
-extern void prom_init_cmdline(void);
-extern void prom_meminit(void);
-extern void prom_fixup_mem_map(unsigned long start_mem, unsigned long end_mem);
-extern void mips_display_message(const char *str);
-extern void mips_display_word(unsigned int num);
-extern void mips_scroll_message(void);
-extern int get_ethernet_addr(char *ethernet_addr);
-
-/* Memory descriptor management. */
-#define PROM_MAX_PMEMBLOCKS    32
-struct prom_pmemblock {
-       unsigned long base; /* Within KSEG0. */
-       unsigned int size;  /* In bytes. */
-       unsigned int type;  /* free or prom memory */
-};
-
-#endif /* !(_MIPS_PROM_H) */
index 363bb352c7f70b4da402ed8725ce383a0f22a6e1..9d00aebe98426c5ab93c44bb0df4b09c1d552b7b 100644 (file)
@@ -42,13 +42,9 @@ extern long __mips_machines_end;
 #ifdef CONFIG_MIPS_MACHINE
 int  mips_machtype_setup(char *id) __init;
 void mips_machine_setup(void) __init;
-void mips_set_machine_name(const char *name) __init;
-char *mips_get_machine_name(void);
 #else
 static inline int mips_machtype_setup(char *id) { return 1; }
 static inline void mips_machine_setup(void) { }
-static inline void mips_set_machine_name(const char *name) { }
-static inline char *mips_get_machine_name(void) { return NULL; }
 #endif /* CONFIG_MIPS_MACHINE */
 
 #endif /* __ASM_MIPS_MACHINE_H */
index 0da44d422f5b0242380bfc0dde4973b547c1900e..87e6207b05e4334f242cadf80b01305811edd3fd 100644 (file)
 #define MIPS_CONF3_RXI         (_ULCAST_(1) << 12)
 #define MIPS_CONF3_ULRI                (_ULCAST_(1) << 13)
 #define MIPS_CONF3_ISA         (_ULCAST_(3) << 14)
+#define MIPS_CONF3_ISA_OE      (_ULCAST_(3) << 16)
 #define MIPS_CONF3_VZ          (_ULCAST_(1) << 23)
 
 #define MIPS_CONF4_MMUSIZEEXT  (_ULCAST_(255) << 0)
 
 #ifndef __ASSEMBLY__
 
+/*
+ * Macros for handling the ISA mode bit for microMIPS.
+ */
+#define get_isa16_mode(x)              ((x) & 0x1)
+#define msk_isa16_mode(x)              ((x) & ~0x1)
+#define set_isa16_mode(x)              do { (x) |= 0x1; } while(0)
+
+/*
+ * microMIPS instructions can be 16-bit or 32-bit in length. This
+ * returns a 1 if the instruction is 16-bit and a 0 if 32-bit.
+ */
+static inline int mm_insn_16bit(u16 insn)
+{
+       u16 opcode = (insn >> 10) & 0x7;
+
+       return (opcode >= 1 && opcode <= 3) ? 1 : 0;
+}
+
 /*
  * Functions to access the R10000 performance counters.         These are basically
  * mfc0 and mtc0 instructions from and to coprocessor register with a 5-bit
index e81d719efcd18e9e226d07e8d69e29b970c89280..1554721e4808e7ffc67d61bc87646cbdd5a325be 100644 (file)
 
 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
 
-#define TLBMISS_HANDLER_SETUP_PGD(pgd)                         \
-       tlbmiss_handler_setup_pgd((unsigned long)(pgd))
-
-extern void tlbmiss_handler_setup_pgd(unsigned long pgd);
+#define TLBMISS_HANDLER_SETUP_PGD(pgd)                                 \
+do {                                                                   \
+       void (*tlbmiss_handler_setup_pgd)(unsigned long);               \
+       extern u32 tlbmiss_handler_setup_pgd_array[16];                 \
+                                                                       \
+       tlbmiss_handler_setup_pgd =                                     \
+               (__typeof__(tlbmiss_handler_setup_pgd)) tlbmiss_handler_setup_pgd_array; \
+       tlbmiss_handler_setup_pgd((unsigned long)(pgd));                \
+} while (0)
 
 #define TLBMISS_HANDLER_SETUP()                                                \
        do {                                                            \
@@ -62,59 +67,88 @@ extern unsigned long pgd_current[];
        TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
 #endif
 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
-#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
-
-#define ASID_INC       0x40
-#define ASID_MASK      0xfc0
-
-#elif defined(CONFIG_CPU_R8000)
-
-#define ASID_INC       0x10
-#define ASID_MASK      0xff0
 
-#elif defined(CONFIG_MIPS_MT_SMTC)
-
-#define ASID_INC       0x1
-extern unsigned long smtc_asid_mask;
-#define ASID_MASK      (smtc_asid_mask)
-#define HW_ASID_MASK   0xff
-/* End SMTC/34K debug hack */
-#else /* FIXME: not correct for R6000 */
-
-#define ASID_INC       0x1
-#define ASID_MASK      0xff
+#define ASID_INC(asid)                                         \
+({                                                             \
+       unsigned long __asid = asid;                            \
+       __asm__("1:\taddiu\t%0,1\t\t\t\t# patched\n\t"          \
+       ".section\t__asid_inc,\"a\"\n\t"                        \
+       ".word\t1b\n\t"                                         \
+       ".previous"                                             \
+       :"=r" (__asid)                                          \
+       :"0" (__asid));                                         \
+       __asid;                                                 \
+})
+#define ASID_MASK(asid)                                                \
+({                                                             \
+       unsigned long __asid = asid;                            \
+       __asm__("1:\tandi\t%0,%1,0xfc0\t\t\t# patched\n\t"      \
+       ".section\t__asid_mask,\"a\"\n\t"                       \
+       ".word\t1b\n\t"                                         \
+       ".previous"                                             \
+       :"=r" (__asid)                                          \
+       :"r" (__asid));                                         \
+       __asid;                                                 \
+})
+#define ASID_VERSION_MASK                                      \
+({                                                             \
+       unsigned long __asid;                                   \
+       __asm__("1:\taddiu\t%0,$0,0xff00\t\t\t\t# patched\n\t"  \
+       ".section\t__asid_version_mask,\"a\"\n\t"               \
+       ".word\t1b\n\t"                                         \
+       ".previous"                                             \
+       :"=r" (__asid));                                        \
+       __asid;                                                 \
+})
+#define ASID_FIRST_VERSION                                     \
+({                                                             \
+       unsigned long __asid = asid;                            \
+       __asm__("1:\tli\t%0,0x100\t\t\t\t# patched\n\t"         \
+       ".section\t__asid_first_version,\"a\"\n\t"              \
+       ".word\t1b\n\t"                                         \
+       ".previous"                                             \
+       :"=r" (__asid));                                        \
+       __asid;                                                 \
+})
+
+#define ASID_FIRST_VERSION_R3000       0x1000
+#define ASID_FIRST_VERSION_R4000       0x100
+#define ASID_FIRST_VERSION_R8000       0x1000
+#define ASID_FIRST_VERSION_RM9000      0x1000
 
+#ifdef CONFIG_MIPS_MT_SMTC
+#define SMTC_HW_ASID_MASK              0xff
+extern unsigned int smtc_asid_mask;
 #endif
 
 #define cpu_context(cpu, mm)   ((mm)->context.asid[cpu])
-#define cpu_asid(cpu, mm)      (cpu_context((cpu), (mm)) & ASID_MASK)
+#define cpu_asid(cpu, mm)      ASID_MASK(cpu_context((cpu), (mm)))
 #define asid_cache(cpu)                (cpu_data[cpu].asid_cache)
 
 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 {
 }
 
-/*
- *  All unused by hardware upper bits will be considered
- *  as a software asid extension.
- */
-#define ASID_VERSION_MASK  ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
-#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
-
 #ifndef CONFIG_MIPS_MT_SMTC
 /* Normal, classic MIPS get_new_mmu_context */
 static inline void
 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
 {
+       extern void kvm_local_flush_tlb_all(void);
        unsigned long asid = asid_cache(cpu);
 
-       if (! ((asid += ASID_INC) & ASID_MASK) ) {
+       if (!ASID_MASK((asid = ASID_INC(asid)))) {
                if (cpu_has_vtag_icache)
                        flush_icache_all();
+#ifdef CONFIG_VIRTUALIZATION
+               kvm_local_flush_tlb_all();      /* start new asid cycle */
+#else
                local_flush_tlb_all();  /* start new asid cycle */
+#endif
                if (!asid)              /* fix version if needed */
                        asid = ASID_FIRST_VERSION;
        }
+
        cpu_context(cpu, mm) = asid_cache(cpu) = asid;
 }
 
@@ -133,7 +167,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 {
        int i;
 
-       for_each_online_cpu(i)
+       for_each_possible_cpu(i)
                cpu_context(i, mm) = 0;
 
        return 0;
@@ -166,7 +200,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
         * free up the ASID value for use and flush any old
         * instances of it from the TLB.
         */
-       oldasid = (read_c0_entryhi() & ASID_MASK);
+       oldasid = ASID_MASK(read_c0_entryhi());
        if(smtc_live_asid[mytlb][oldasid]) {
                smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
                if(smtc_live_asid[mytlb][oldasid] == 0)
@@ -177,7 +211,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
         * having ASID_MASK smaller than the hardware maximum,
         * make sure no "soft" bits become "hard"...
         */
-       write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
+       write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
                         cpu_asid(cpu, next));
        ehb(); /* Make sure it propagates to TCStatus */
        evpe(mtflags);
@@ -230,15 +264,15 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
 #ifdef CONFIG_MIPS_MT_SMTC
        /* See comments for similar code above */
        mtflags = dvpe();
-       oldasid = read_c0_entryhi() & ASID_MASK;
+       oldasid = ASID_MASK(read_c0_entryhi());
        if(smtc_live_asid[mytlb][oldasid]) {
                smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
                if(smtc_live_asid[mytlb][oldasid] == 0)
                         smtc_flush_tlb_asid(oldasid);
        }
        /* See comments for similar code above */
-       write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
-                        cpu_asid(cpu, next));
+       write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
+                        cpu_asid(cpu, next));
        ehb(); /* Make sure it propagates to TCStatus */
        evpe(mtflags);
 #else
@@ -275,14 +309,14 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
 #ifdef CONFIG_MIPS_MT_SMTC
                /* See comments for similar code above */
                prevvpe = dvpe();
-               oldasid = (read_c0_entryhi() & ASID_MASK);
+               oldasid = ASID_MASK(read_c0_entryhi());
                if (smtc_live_asid[mytlb][oldasid]) {
                        smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
                        if(smtc_live_asid[mytlb][oldasid] == 0)
                                smtc_flush_tlb_asid(oldasid);
                }
                /* See comments for similar code above */
-               write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
+               write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK)
                                | cpu_asid(cpu, mm));
                ehb(); /* Make sure it propagates to TCStatus */
                evpe(prevvpe);
index 419d8aef8569333418f68f61e88144d1e0b37285..79c7cccdc22ca5b688cfe6f5bdb125f17b37a0dd 100644 (file)
 #ifndef __NLM_HAL_HALDEFS_H__
 #define __NLM_HAL_HALDEFS_H__
 
+#include <linux/irqflags.h>    /* for local_irq_disable */
+
 /*
  * This file contains platform specific memory mapped IO implementation
  * and will provide a way to read 32/64 bit memory mapped registers in
  * all ABIs
  */
-#if !defined(CONFIG_64BIT) && defined(CONFIG_CPU_XLP)
-#error "o32 compile not supported on XLP yet"
-#endif
-/*
- * For o32 compilation, we have to disable interrupts and enable KX bit to
- * access 64 bit addresses or data.
- *
- * We need to disable interrupts because we save just the lower 32 bits of
- * registers in         interrupt handling. So if we get hit by an interrupt while
- * using the upper 32 bits of a register, we lose.
- */
-static inline uint32_t nlm_save_flags_kx(void)
-{
-       return change_c0_status(ST0_KX | ST0_IE, ST0_KX);
-}
-
-static inline uint32_t nlm_save_flags_cop2(void)
-{
-       return change_c0_status(ST0_CU2 | ST0_IE, ST0_CU2);
-}
-
-static inline void nlm_restore_flags(uint32_t sr)
-{
-       write_c0_status(sr);
-}
-
-/*
- * The n64 implementations are simple, the o32 implementations when they
- * are added, will have to disable interrupts and enable KX before doing
- * 64 bit ops.
- */
 static inline uint32_t
 nlm_read_reg(uint64_t base, uint32_t reg)
 {
@@ -87,13 +58,40 @@ nlm_write_reg(uint64_t base, uint32_t reg, uint32_t val)
        *addr = val;
 }
 
+/*
+ * For o32 compilation, we have to disable interrupts to access 64 bit
+ * registers
+ *
+ * We need to disable interrupts because we save just the lower 32 bits of
+ * registers in  interrupt handling. So if we get hit by an interrupt while
+ * using the upper 32 bits of a register, we lose.
+ */
+
 static inline uint64_t
 nlm_read_reg64(uint64_t base, uint32_t reg)
 {
        uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
        volatile uint64_t *ptr = (volatile uint64_t *)(long)addr;
-
-       return *ptr;
+       uint64_t val;
+
+       if (sizeof(unsigned long) == 4) {
+               unsigned long flags;
+
+               local_irq_save(flags);
+               __asm__ __volatile__(
+                       ".set   push"                   "\n\t"
+                       ".set   mips64"                 "\n\t"
+                       "ld     %L0, %1"                "\n\t"
+                       "dsra32 %M0, %L0, 0"            "\n\t"
+                       "sll    %L0, %L0, 0"            "\n\t"
+                       ".set   pop"                    "\n"
+                       : "=r" (val)
+                       : "m" (*ptr));
+               local_irq_restore(flags);
+       } else
+               val = *ptr;
+
+       return val;
 }
 
 static inline void
@@ -102,7 +100,25 @@ nlm_write_reg64(uint64_t base, uint32_t reg, uint64_t val)
        uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
        volatile uint64_t *ptr = (volatile uint64_t *)(long)addr;
 
-       *ptr = val;
+       if (sizeof(unsigned long) == 4) {
+               unsigned long flags;
+               uint64_t tmp;
+
+               local_irq_save(flags);
+               __asm__ __volatile__(
+                       ".set   push"                   "\n\t"
+                       ".set   mips64"                 "\n\t"
+                       "dsll32 %L0, %L0, 0"            "\n\t"
+                       "dsrl32 %L0, %L0, 0"            "\n\t"
+                       "dsll32 %M0, %M0, 0"            "\n\t"
+                       "or     %L0, %L0, %M0"          "\n\t"
+                       "sd     %L0, %2"                "\n\t"
+                       ".set   pop"                    "\n"
+                       : "=r" (tmp)
+                       : "0" (val), "m" (*ptr));
+               local_irq_restore(flags);
+       } else
+               *ptr = val;
 }
 
 /*
@@ -143,14 +159,6 @@ nlm_pcicfg_base(uint32_t devoffset)
        return nlm_io_base + devoffset;
 }
 
-static inline uint64_t
-nlm_xkphys_map_pcibar0(uint64_t pcibase)
-{
-       uint64_t paddr;
-
-       paddr = nlm_read_reg(pcibase, 0x4) & ~0xfu;
-       return (uint64_t)0x9000000000000000 | paddr;
-}
 #elif defined(CONFIG_CPU_XLR)
 
 static inline uint64_t
index 8ad2e0f81719814f077659d48c46a3b4f87c578d..f299d31d7c1a3faf7eeffd22f7409cce90954cd6 100644 (file)
 /*
  * XLR and XLP interrupt request and interrupt mask registers
  */
-#define read_c0_eirr()         __read_64bit_c0_register($9, 6)
-#define read_c0_eimr()         __read_64bit_c0_register($9, 7)
-#define write_c0_eirr(val)     __write_64bit_c0_register($9, 6, val)
-
 /*
- * Writing EIMR in 32 bit is a special case, the lower 8 bit of the
- * EIMR is shadowed in the status register, so we cannot save and
- * restore status register for split read.
+ * NOTE: Do not save/restore flags around write_c0_eimr().
+ * On non-R2 platforms the flags has part of EIMR that is shadowed in STATUS
+ * register. Restoring flags will overwrite the lower 8 bits of EIMR.
+ *
+ * Call with interrupts disabled.
  */
 #define write_c0_eimr(val)                                             \
 do {                                                                   \
        if (sizeof(unsigned long) == 4) {                               \
-               unsigned long __flags;                                  \
-                                                                       \
-               local_irq_save(__flags);                                \
                __asm__ __volatile__(                                   \
                        ".set\tmips64\n\t"                              \
                        "dsll\t%L0, %L0, 32\n\t"                        \
@@ -62,8 +57,6 @@ do {                                                                  \
                        "dmtc0\t%L0, $9, 7\n\t"                         \
                        ".set\tmips0"                                   \
                        : : "r" (val));                                 \
-               __flags = (__flags & 0xffff00ff) | (((val) & 0xff) << 8);\
-               local_irq_restore(__flags);                             \
        } else                                                          \
                __write_64bit_c0_register($9, 7, (val));                \
 } while (0)
@@ -128,7 +121,7 @@ static inline uint64_t read_c0_eirr_and_eimr(void)
        uint64_t val;
 
 #ifdef CONFIG_64BIT
-       val = read_c0_eimr() & read_c0_eirr();
+       val = __read_64bit_c0_register($9, 6) & __read_64bit_c0_register($9, 7);
 #else
        __asm__ __volatile__(
                ".set   push\n\t"
@@ -143,7 +136,6 @@ static inline uint64_t read_c0_eirr_and_eimr(void)
                ".set   pop"
                : "=r" (val));
 #endif
-
        return val;
 }
 
index 3df53017fe513f29b8ee99a6ce2ff11ad78b198c..a981f4681a154a53fb6fed0bffc788be83ed24a3 100644 (file)
 #define PIC_IRT_PCIE_LINK_2_INDEX      80
 #define PIC_IRT_PCIE_LINK_3_INDEX      81
 #define PIC_IRT_PCIE_LINK_INDEX(num)   ((num) + PIC_IRT_PCIE_LINK_0_INDEX)
-/* 78 to 81 */
-#define PIC_NUM_NA_IRTS                        32
-/* 82 to 113 */
-#define PIC_IRT_NA_0_INDEX             82
-#define PIC_IRT_NA_INDEX(num)          ((num) + PIC_IRT_NA_0_INDEX)
-#define PIC_IRT_POE_INDEX              114
-
-#define PIC_NUM_USB_IRTS               6
-#define PIC_IRT_USB_0_INDEX            115
-#define PIC_IRT_EHCI_0_INDEX           115
-#define PIC_IRT_OHCI_0_INDEX           116
-#define PIC_IRT_OHCI_1_INDEX           117
-#define PIC_IRT_EHCI_1_INDEX           118
-#define PIC_IRT_OHCI_2_INDEX           119
-#define PIC_IRT_OHCI_3_INDEX           120
-#define PIC_IRT_USB_INDEX(num)         ((num) + PIC_IRT_USB_0_INDEX)
-/* 115 to 120 */
-#define PIC_IRT_GDX_INDEX              121
-#define PIC_IRT_SEC_INDEX              122
-#define PIC_IRT_RSA_INDEX              123
-
-#define PIC_NUM_COMP_IRTS              4
-#define PIC_IRT_COMP_0_INDEX           124
-#define PIC_IRT_COMP_INDEX(num)                ((num) + PIC_IRT_COMP_0_INDEX)
-/* 124 to 127 */
-#define PIC_IRT_GBU_INDEX              128
-#define PIC_IRT_ICC_0_INDEX            129 /* ICC - Inter Chip Coherency */
-#define PIC_IRT_ICC_1_INDEX            130
-#define PIC_IRT_ICC_2_INDEX            131
-#define PIC_IRT_CAM_INDEX              132
-#define PIC_IRT_UART_0_INDEX           133
-#define PIC_IRT_UART_1_INDEX           134
-#define PIC_IRT_I2C_0_INDEX            135
-#define PIC_IRT_I2C_1_INDEX            136
-#define PIC_IRT_SYS_0_INDEX            137
-#define PIC_IRT_SYS_1_INDEX            138
-#define PIC_IRT_JTAG_INDEX             139
-#define PIC_IRT_PIC_INDEX              140
-#define PIC_IRT_NBU_INDEX              141
-#define PIC_IRT_TCU_INDEX              142
-#define PIC_IRT_GCU_INDEX              143 /* GBC - Global Coherency */
-#define PIC_IRT_DMC_0_INDEX            144
-#define PIC_IRT_DMC_1_INDEX            145
-
-#define PIC_NUM_GPIO_IRTS              4
-#define PIC_IRT_GPIO_0_INDEX           146
-#define PIC_IRT_GPIO_INDEX(num)                ((num) + PIC_IRT_GPIO_0_INDEX)
-
-/* 146 to 149 */
-#define PIC_IRT_NOR_INDEX              150
-#define PIC_IRT_NAND_INDEX             151
-#define PIC_IRT_SPI_INDEX              152
-#define PIC_IRT_MMC_INDEX              153
 
 #define PIC_CLOCK_TIMER                        7
 #define PIC_IRQ_BASE                   8
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/usb.h b/arch/mips/include/asm/netlogic/xlp-hal/usb.h
deleted file mode 100644 (file)
index a9cd350..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2003-2012 Broadcom Corporation
- * All Rights Reserved
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the Broadcom
- * license below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __NLM_HAL_USB_H__
-#define __NLM_HAL_USB_H__
-
-#define USB_CTL_0                      0x01
-#define USB_PHY_0                      0x0A
-#define USB_PHY_RESET                  0x01
-#define USB_PHY_PORT_RESET_0           0x10
-#define USB_PHY_PORT_RESET_1           0x20
-#define USB_CONTROLLER_RESET           0x01
-#define USB_INT_STATUS                 0x0E
-#define USB_INT_EN                     0x0F
-#define USB_PHY_INTERRUPT_EN           0x01
-#define USB_OHCI_INTERRUPT_EN          0x02
-#define USB_OHCI_INTERRUPT1_EN         0x04
-#define USB_OHCI_INTERRUPT2_EN         0x08
-#define USB_CTRL_INTERRUPT_EN          0x10
-
-#ifndef __ASSEMBLY__
-
-#define nlm_read_usb_reg(b, r)                 nlm_read_reg(b, r)
-#define nlm_write_usb_reg(b, r, v)             nlm_write_reg(b, r, v)
-#define nlm_get_usb_pcibase(node, inst)                \
-       nlm_pcicfg_base(XLP_IO_USB_OFFSET(node, inst))
-#define nlm_get_usb_hcd_base(node, inst)       \
-       nlm_xkphys_map_pcibar0(nlm_get_usb_pcibase(node, inst))
-#define nlm_get_usb_regbase(node, inst)                \
-       (nlm_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
-
-#endif
-#endif /* __NLM_HAL_USB_H__ */
index fdc62fb5630da68bb746ed3dac53b45eb5bddc37..8b8f6b39336350b8c570b30d990f4b16332e29db 100644 (file)
@@ -8,6 +8,7 @@
 #ifndef _ASM_PGTABLE_H
 #define _ASM_PGTABLE_H
 
+#include <linux/mm_types.h>
 #include <linux/mmzone.h>
 #ifdef CONFIG_32BIT
 #include <asm/pgtable-32.h>
index 2a5fa7abb346b8633ddda81ff40766c0e2e47aef..71686c897deaa2651bbb5808ae4192f57ea1f54f 100644 (file)
@@ -44,11 +44,16 @@ extern unsigned int vced_count, vcei_count;
 #define SPECIAL_PAGES_SIZE PAGE_SIZE
 
 #ifdef CONFIG_32BIT
+#ifdef CONFIG_KVM_GUEST
+/* User space process size is limited to 1GB in KVM Guest Mode */
+#define TASK_SIZE      0x3fff8000UL
+#else
 /*
  * User space process size: 2GB. This is hardcoded into a few places,
  * so don't change it unless you know what you are doing.
  */
 #define TASK_SIZE      0x7fff8000UL
+#endif
 
 #ifdef __KERNEL__
 #define STACK_TOP_MAX  TASK_SIZE
index 8808bf548b99d2e0b4966b162f22ed4505d046e0..1e7e0961064bdcdbeba50f3fdce48b1c7cc9029d 100644 (file)
@@ -48,4 +48,7 @@ extern void __dt_setup_arch(struct boot_param_header *bph);
 static inline void device_tree_init(void) { }
 #endif /* CONFIG_OF */
 
+extern char *mips_get_machine_name(void);
+extern void mips_set_machine_name(const char *name);
+
 #endif /* __ASM_PROM_H */
index 1a2c3025bf2892ba98feb2a338ae2cf18c5c3e4c..fdfae43d8b99e49b4f6b20565f6e97178f00a216 100644 (file)
@@ -14,6 +14,6 @@ extern void install_cpu_nmi_handler(int slice);
 extern void install_ipi(void);
 extern void setup_replication_mask(void);
 extern void replicate_kernel_text(void);
-extern pfn_t node_getfirstfree(cnodeid_t);
+extern unsigned long node_getfirstfree(cnodeid_t);
 
 #endif /* __ASM_SN_SN_PRIVATE_H */
index c4813d67aec3f5543272a53e1d87e9a1b202e193..6d24d4e8b9ed855cb27a860133e1a541c91870ab 100644 (file)
@@ -19,7 +19,6 @@ typedef signed char   partid_t;       /* partition ID type */
 typedef signed short   moduleid_t;     /* user-visible module number type */
 typedef signed short   cmoduleid_t;    /* kernel compact module id type */
 typedef unsigned char  clusterid_t;    /* Clusterid of the cell */
-typedef unsigned long  pfn_t;
 
 typedef dev_t          vertex_hdl_t;   /* hardware graph vertex handle */
 
index 5130c88d64204b398be70c968823d408bb27f93c..78d201fb6c87c93608b8295327277a6e8804818e 100644 (file)
@@ -71,7 +71,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
                "        nop                                            \n"
                "       srl     %[my_ticket], %[ticket], 16             \n"
                "       andi    %[ticket], %[ticket], 0xffff            \n"
-               "       andi    %[my_ticket], %[my_ticket], 0xffff      \n"
                "       bne     %[ticket], %[my_ticket], 4f             \n"
                "        subu   %[ticket], %[my_ticket], %[ticket]      \n"
                "2:                                                     \n"
@@ -105,7 +104,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
                "       beqz    %[my_ticket], 1b                        \n"
                "        srl    %[my_ticket], %[ticket], 16             \n"
                "       andi    %[ticket], %[ticket], 0xffff            \n"
-               "       andi    %[my_ticket], %[my_ticket], 0xffff      \n"
                "       bne     %[ticket], %[my_ticket], 4f             \n"
                "        subu   %[ticket], %[my_ticket], %[ticket]      \n"
                "2:                                                     \n"
@@ -153,7 +151,6 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
                "                                                       \n"
                "1:     ll      %[ticket], %[ticket_ptr]                \n"
                "       srl     %[my_ticket], %[ticket], 16             \n"
-               "       andi    %[my_ticket], %[my_ticket], 0xffff      \n"
                "       andi    %[now_serving], %[ticket], 0xffff       \n"
                "       bne     %[my_ticket], %[now_serving], 3f        \n"
                "        addu   %[ticket], %[ticket], %[inc]            \n"
@@ -178,7 +175,6 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
                "                                                       \n"
                "1:     ll      %[ticket], %[ticket_ptr]                \n"
                "       srl     %[my_ticket], %[ticket], 16             \n"
-               "       andi    %[my_ticket], %[my_ticket], 0xffff      \n"
                "       andi    %[now_serving], %[ticket], 0xffff       \n"
                "       bne     %[my_ticket], %[now_serving], 3f        \n"
                "        addu   %[ticket], %[ticket], %[inc]            \n"
@@ -242,25 +238,16 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
                : "m" (rw->lock)
                : "memory");
        } else {
-               __asm__ __volatile__(
-               "       .set    noreorder       # arch_read_lock        \n"
-               "1:     ll      %1, %2                                  \n"
-               "       bltz    %1, 3f                                  \n"
-               "        addu   %1, 1                                   \n"
-               "2:     sc      %1, %0                                  \n"
-               "       beqz    %1, 1b                                  \n"
-               "        nop                                            \n"
-               "       .subsection 2                                   \n"
-               "3:     ll      %1, %2                                  \n"
-               "       bltz    %1, 3b                                  \n"
-               "        addu   %1, 1                                   \n"
-               "       b       2b                                      \n"
-               "        nop                                            \n"
-               "       .previous                                       \n"
-               "       .set    reorder                                 \n"
-               : "=m" (rw->lock), "=&r" (tmp)
-               : "m" (rw->lock)
-               : "memory");
+               do {
+                       __asm__ __volatile__(
+                       "1:     ll      %1, %2  # arch_read_lock        \n"
+                       "       bltz    %1, 1b                          \n"
+                       "        addu   %1, 1                           \n"
+                       "2:     sc      %1, %0                          \n"
+                       : "=m" (rw->lock), "=&r" (tmp)
+                       : "m" (rw->lock)
+                       : "memory");
+               } while (unlikely(!tmp));
        }
 
        smp_llsc_mb();
@@ -285,21 +272,15 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
                : "m" (rw->lock)
                : "memory");
        } else {
-               __asm__ __volatile__(
-               "       .set    noreorder       # arch_read_unlock      \n"
-               "1:     ll      %1, %2                                  \n"
-               "       sub     %1, 1                                   \n"
-               "       sc      %1, %0                                  \n"
-               "       beqz    %1, 2f                                  \n"
-               "        nop                                            \n"
-               "       .subsection 2                                   \n"
-               "2:     b       1b                                      \n"
-               "        nop                                            \n"
-               "       .previous                                       \n"
-               "       .set    reorder                                 \n"
-               : "=m" (rw->lock), "=&r" (tmp)
-               : "m" (rw->lock)
-               : "memory");
+               do {
+                       __asm__ __volatile__(
+                       "1:     ll      %1, %2  # arch_read_unlock      \n"
+                       "       sub     %1, 1                           \n"
+                       "       sc      %1, %0                          \n"
+                       : "=m" (rw->lock), "=&r" (tmp)
+                       : "m" (rw->lock)
+                       : "memory");
+               } while (unlikely(!tmp));
        }
 }
 
@@ -321,25 +302,16 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
                : "m" (rw->lock)
                : "memory");
        } else {
-               __asm__ __volatile__(
-               "       .set    noreorder       # arch_write_lock       \n"
-               "1:     ll      %1, %2                                  \n"
-               "       bnez    %1, 3f                                  \n"
-               "        lui    %1, 0x8000                              \n"
-               "2:     sc      %1, %0                                  \n"
-               "       beqz    %1, 3f                                  \n"
-               "        nop                                            \n"
-               "       .subsection 2                                   \n"
-               "3:     ll      %1, %2                                  \n"
-               "       bnez    %1, 3b                                  \n"
-               "        lui    %1, 0x8000                              \n"
-               "       b       2b                                      \n"
-               "        nop                                            \n"
-               "       .previous                                       \n"
-               "       .set    reorder                                 \n"
-               : "=m" (rw->lock), "=&r" (tmp)
-               : "m" (rw->lock)
-               : "memory");
+               do {
+                       __asm__ __volatile__(
+                       "1:     ll      %1, %2  # arch_write_lock       \n"
+                       "       bnez    %1, 1b                          \n"
+                       "        lui    %1, 0x8000                      \n"
+                       "2:     sc      %1, %0                          \n"
+                       : "=m" (rw->lock), "=&r" (tmp)
+                       : "m" (rw->lock)
+                       : "memory");
+               } while (unlikely(!tmp));
        }
 
        smp_llsc_mb();
@@ -424,25 +396,21 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
                : "m" (rw->lock)
                : "memory");
        } else {
-               __asm__ __volatile__(
-               "       .set    noreorder       # arch_write_trylock    \n"
-               "       li      %2, 0                                   \n"
-               "1:     ll      %1, %3                                  \n"
-               "       bnez    %1, 2f                                  \n"
-               "       lui     %1, 0x8000                              \n"
-               "       sc      %1, %0                                  \n"
-               "       beqz    %1, 3f                                  \n"
-               "        li     %2, 1                                   \n"
-               "2:                                                     \n"
-               __WEAK_LLSC_MB
-               "       .subsection 2                                   \n"
-               "3:     b       1b                                      \n"
-               "        li     %2, 0                                   \n"
-               "       .previous                                       \n"
-               "       .set    reorder                                 \n"
-               : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
-               : "m" (rw->lock)
-               : "memory");
+               do {
+                       __asm__ __volatile__(
+                       "       ll      %1, %3  # arch_write_trylock    \n"
+                       "       li      %2, 0                           \n"
+                       "       bnez    %1, 2f                          \n"
+                       "       lui     %1, 0x8000                      \n"
+                       "       sc      %1, %0                          \n"
+                       "       li      %2, 1                           \n"
+                       "2:                                             \n"
+                       : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
+                       : "m" (rw->lock)
+                       : "memory");
+               } while (unlikely(!tmp));
+
+               smp_llsc_mb();
        }
 
        return ret;
index c99384018161ec77fe54e72f1ae851b2b04c8ac3..a89d1b10d027ce65d83eecb9dd57bfda7859400e 100644 (file)
 1:             move    ra, k0
                li      k0, 3
                mtc0    k0, $22
-#endif /* CONFIG_CPU_LOONGSON2F */
+#endif /* CONFIG_CPU_JUMP_WORKAROUNDS */
 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
                lui     k1, %hi(kernelsp)
 #else
                LONG_S  $0, PT_R0(sp)
                mfc0    v1, CP0_STATUS
                LONG_S  $2, PT_R2(sp)
+               LONG_S  v1, PT_STATUS(sp)
 #ifdef CONFIG_MIPS_MT_SMTC
                /*
                 * Ideally, these instructions would be shuffled in
                LONG_S  k0, PT_TCSTATUS(sp)
 #endif /* CONFIG_MIPS_MT_SMTC */
                LONG_S  $4, PT_R4(sp)
-               LONG_S  $5, PT_R5(sp)
-               LONG_S  v1, PT_STATUS(sp)
                mfc0    v1, CP0_CAUSE
-               LONG_S  $6, PT_R6(sp)
-               LONG_S  $7, PT_R7(sp)
+               LONG_S  $5, PT_R5(sp)
                LONG_S  v1, PT_CAUSE(sp)
+               LONG_S  $6, PT_R6(sp)
                MFC0    v1, CP0_EPC
+               LONG_S  $7, PT_R7(sp)
 #ifdef CONFIG_64BIT
                LONG_S  $8, PT_R8(sp)
                LONG_S  $9, PT_R9(sp)
 #endif
+               LONG_S  v1, PT_EPC(sp)
                LONG_S  $25, PT_R25(sp)
                LONG_S  $28, PT_R28(sp)
                LONG_S  $31, PT_R31(sp)
-               LONG_S  v1, PT_EPC(sp)
                ori     $28, sp, _THREAD_MASK
                xori    $28, _THREAD_MASK
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
index 178f7924149ad4b5e06ff4aa1a1db868734a10d8..895320e25662cd98a673980c449ec916841920d7 100644 (file)
@@ -58,8 +58,12 @@ struct thread_info {
 #define init_stack             (init_thread_union.stack)
 
 /* How to get the thread information struct from C.  */
-register struct thread_info *__current_thread_info __asm__("$28");
-#define current_thread_info()  __current_thread_info
+static inline struct thread_info *current_thread_info(void)
+{
+       register struct thread_info *__current_thread_info __asm__("$28");
+
+       return __current_thread_info;
+}
 
 #endif /* !__ASSEMBLY__ */
 
index debc8009bd5814e1f22dda89c63fd2eda3497202..2d7b9df4542dd478d53f53d8151d3381d1c58aee 100644 (file)
@@ -52,13 +52,15 @@ extern int (*perf_irq)(void);
  */
 extern unsigned int __weak get_c0_compare_int(void);
 extern int r4k_clockevent_init(void);
+extern int smtc_clockevent_init(void);
+extern int gic_clockevent_init(void);
 
 static inline int mips_clockevent_init(void)
 {
 #ifdef CONFIG_MIPS_MT_SMTC
-       extern int smtc_clockevent_init(void);
-
        return smtc_clockevent_init();
+#elif defined(CONFIG_CEVT_GIC)
+       return (gic_clockevent_init() | r4k_clockevent_init());
 #elif defined(CONFIG_CEVT_R4K)
        return r4k_clockevent_init();
 #else
@@ -69,9 +71,7 @@ static inline int mips_clockevent_init(void)
 /*
  * Initialize the count register as a clocksource
  */
-#ifdef CONFIG_CSRC_R4K
 extern int init_r4k_clocksource(void);
-#endif
 
 static inline int init_mips_clocksource(void)
 {
index bd87e36bf26a3cc980c73804bbb8a5d009f4d2ef..f3fa3750f577c2414396943871a8f0bd6df6b928 100644 (file)
  */
 #ifdef CONFIG_32BIT
 
-#define __UA_LIMIT     0x80000000UL
+#ifdef CONFIG_KVM_GUEST
+#define __UA_LIMIT 0x40000000UL
+#else
+#define __UA_LIMIT 0x80000000UL
+#endif
 
 #define __UA_ADDR      ".word"
 #define __UA_LA                "la"
@@ -55,8 +59,13 @@ extern u64 __ua_limit;
  * address in this range it's the process's problem, not ours :-)
  */
 
+#ifdef CONFIG_KVM_GUEST
+#define KERNEL_DS      ((mm_segment_t) { 0x80000000UL })
+#define USER_DS                ((mm_segment_t) { 0xC0000000UL })
+#else
 #define KERNEL_DS      ((mm_segment_t) { 0UL })
 #define USER_DS                ((mm_segment_t) { __UA_LIMIT })
+#endif
 
 #define VERIFY_READ    0
 #define VERIFY_WRITE   1
@@ -261,6 +270,7 @@ do {                                                                        \
        __asm__ __volatile__(                                           \
        "1:     " insn "        %1, %3                          \n"     \
        "2:                                                     \n"     \
+       "       .insn                                           \n"     \
        "       .section .fixup,\"ax\"                          \n"     \
        "3:     li      %0, %4                                  \n"     \
        "       j       2b                                      \n"     \
@@ -287,7 +297,9 @@ do {                                                                        \
        __asm__ __volatile__(                                           \
        "1:     lw      %1, (%3)                                \n"     \
        "2:     lw      %D1, 4(%3)                              \n"     \
-       "3:     .section        .fixup,\"ax\"                   \n"     \
+       "3:                                                     \n"     \
+       "       .insn                                           \n"     \
+       "       .section        .fixup,\"ax\"                   \n"     \
        "4:     li      %0, %4                                  \n"     \
        "       move    %1, $0                                  \n"     \
        "       move    %D1, $0                                 \n"     \
@@ -355,6 +367,7 @@ do {                                                                        \
        __asm__ __volatile__(                                           \
        "1:     " insn "        %z2, %3         # __put_user_asm\n"     \
        "2:                                                     \n"     \
+       "       .insn                                           \n"     \
        "       .section        .fixup,\"ax\"                   \n"     \
        "3:     li      %0, %4                                  \n"     \
        "       j       2b                                      \n"     \
@@ -373,6 +386,7 @@ do {                                                                        \
        "1:     sw      %2, (%3)        # __put_user_asm_ll32   \n"     \
        "2:     sw      %D2, 4(%3)                              \n"     \
        "3:                                                     \n"     \
+       "       .insn                                           \n"     \
        "       .section        .fixup,\"ax\"                   \n"     \
        "4:     li      %0, %4                                  \n"     \
        "       j       3b                                      \n"     \
@@ -524,6 +538,7 @@ do {                                                                        \
        __asm__ __volatile__(                                           \
        "1:     " insn "        %1, %3                          \n"     \
        "2:                                                     \n"     \
+       "       .insn                                           \n"     \
        "       .section .fixup,\"ax\"                          \n"     \
        "3:     li      %0, %4                                  \n"     \
        "       j       2b                                      \n"     \
@@ -549,7 +564,9 @@ do {                                                                        \
        "1:     ulw     %1, (%3)                                \n"     \
        "2:     ulw     %D1, 4(%3)                              \n"     \
        "       move    %0, $0                                  \n"     \
-       "3:     .section        .fixup,\"ax\"                   \n"     \
+       "3:                                                     \n"     \
+       "       .insn                                           \n"     \
+       "       .section        .fixup,\"ax\"                   \n"     \
        "4:     li      %0, %4                                  \n"     \
        "       move    %1, $0                                  \n"     \
        "       move    %D1, $0                                 \n"     \
@@ -616,6 +633,7 @@ do {                                                                        \
        __asm__ __volatile__(                                           \
        "1:     " insn "        %z2, %3         # __put_user_unaligned_asm\n" \
        "2:                                                     \n"     \
+       "       .insn                                           \n"     \
        "       .section        .fixup,\"ax\"                   \n"     \
        "3:     li      %0, %4                                  \n"     \
        "       j       2b                                      \n"     \
@@ -634,6 +652,7 @@ do {                                                                        \
        "1:     sw      %2, (%3)        # __put_user_unaligned_asm_ll32 \n" \
        "2:     sw      %D2, 4(%3)                              \n"     \
        "3:                                                     \n"     \
+       "       .insn                                           \n"     \
        "       .section        .fixup,\"ax\"                   \n"     \
        "4:     li      %0, %4                                  \n"     \
        "       j       3b                                      \n"     \
index 058e941626a6ab51e823fa352e8c1f63e1d06624..370d967725c28d0e59d26ad8726f2ad052d5d2fa 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright (C) 2004, 2005, 2006, 2008         Thiemo Seufer
  * Copyright (C) 2005  Maciej W. Rozycki
  * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
- * Copyright (C) 2012  MIPS Technologies, Inc.
+ * Copyright (C) 2012, 2013  MIPS Technologies, Inc.  All rights reserved.
  */
 
 #include <linux/types.h>
 #define UASM_EXPORT_SYMBOL(sym)
 #endif
 
+#define _UASM_ISA_CLASSIC      0
+#define _UASM_ISA_MICROMIPS    1
+
+#ifndef UASM_ISA
+#ifdef CONFIG_CPU_MICROMIPS
+#define UASM_ISA       _UASM_ISA_MICROMIPS
+#else
+#define UASM_ISA       _UASM_ISA_CLASSIC
+#endif
+#endif
+
+#if (UASM_ISA == _UASM_ISA_CLASSIC)
+#ifdef CONFIG_CPU_MICROMIPS
+#define ISAOPC(op)     CL_uasm_i##op
+#define ISAFUNC(x)     CL_##x
+#else
+#define ISAOPC(op)     uasm_i##op
+#define ISAFUNC(x)     x
+#endif
+#elif (UASM_ISA == _UASM_ISA_MICROMIPS)
+#ifdef CONFIG_CPU_MICROMIPS
+#define ISAOPC(op)     uasm_i##op
+#define ISAFUNC(x)     x
+#else
+#define ISAOPC(op)     MM_uasm_i##op
+#define ISAFUNC(x)     MM_##x
+#endif
+#else
+#error Unsupported micro-assembler ISA!!!
+#endif
+
 #define Ip_u1u2u3(op)                                                  \
 void __uasminit                                                                \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
 
 #define Ip_u2u1u3(op)                                                  \
 void __uasminit                                                                \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
 
 #define Ip_u3u1u2(op)                                                  \
 void __uasminit                                                                \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
 
 #define Ip_u1u2s3(op)                                                  \
 void __uasminit                                                                \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
 
 #define Ip_u2s3u1(op)                                                  \
 void __uasminit                                                                \
-uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c)
+ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c)
 
 #define Ip_u2u1s3(op)                                                  \
 void __uasminit                                                                \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
 
 #define Ip_u2u1msbu3(op)                                               \
 void __uasminit                                                                \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c,  \
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c,  \
           unsigned int d)
 
 #define Ip_u1u2(op)                                                    \
-void __uasminit uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
+void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
 
 #define Ip_u1s2(op)                                                    \
-void __uasminit uasm_i##op(u32 **buf, unsigned int a, signed int b)
+void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, signed int b)
 
-#define Ip_u1(op) void __uasminit uasm_i##op(u32 **buf, unsigned int a)
+#define Ip_u1(op) void __uasminit ISAOPC(op)(u32 **buf, unsigned int a)
 
-#define Ip_0(op) void __uasminit uasm_i##op(u32 **buf)
+#define Ip_0(op) void __uasminit ISAOPC(op)(u32 **buf)
 
 Ip_u2u1s3(_addiu);
 Ip_u3u1u2(_addu);
@@ -132,19 +163,20 @@ struct uasm_label {
        int lab;
 };
 
-void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid);
+void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr,
+                       int lid);
 #ifdef CONFIG_64BIT
-int uasm_in_compat_space_p(long addr);
+int ISAFUNC(uasm_in_compat_space_p)(long addr);
 #endif
-int uasm_rel_hi(long val);
-int uasm_rel_lo(long val);
-void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr);
-void UASM_i_LA(u32 **buf, unsigned int rs, long addr);
+int ISAFUNC(uasm_rel_hi)(long val);
+int ISAFUNC(uasm_rel_lo)(long val);
+void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr);
+void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr);
 
 #define UASM_L_LA(lb)                                                  \
-static inline void __uasminit uasm_l##lb(struct uasm_label **lab, u32 *addr) \
+static inline void __uasminit ISAFUNC(uasm_l##lb)(struct uasm_label **lab, u32 *addr) \
 {                                                                      \
-       uasm_build_label(lab, addr, label##lb);                         \
+       ISAFUNC(uasm_build_label)(lab, addr, label##lb);                \
 }
 
 /* convenience macros for instructions */
@@ -196,27 +228,27 @@ static inline void uasm_i_drotr_safe(u32 **p, unsigned int a1,
                                     unsigned int a2, unsigned int a3)
 {
        if (a3 < 32)
-               uasm_i_drotr(p, a1, a2, a3);
+               ISAOPC(_drotr)(p, a1, a2, a3);
        else
-               uasm_i_drotr32(p, a1, a2, a3 - 32);
+               ISAOPC(_drotr32)(p, a1, a2, a3 - 32);
 }
 
 static inline void uasm_i_dsll_safe(u32 **p, unsigned int a1,
                                    unsigned int a2, unsigned int a3)
 {
        if (a3 < 32)
-               uasm_i_dsll(p, a1, a2, a3);
+               ISAOPC(_dsll)(p, a1, a2, a3);
        else
-               uasm_i_dsll32(p, a1, a2, a3 - 32);
+               ISAOPC(_dsll32)(p, a1, a2, a3 - 32);
 }
 
 static inline void uasm_i_dsrl_safe(u32 **p, unsigned int a1,
                                    unsigned int a2, unsigned int a3)
 {
        if (a3 < 32)
-               uasm_i_dsrl(p, a1, a2, a3);
+               ISAOPC(_dsrl)(p, a1, a2, a3);
        else
-               uasm_i_dsrl32(p, a1, a2, a3 - 32);
+               ISAOPC(_dsrl32)(p, a1, a2, a3 - 32);
 }
 
 /* Handle relocations. */
index 4d078815eaa5448d933ecce88e1ada2503f629ce..0f4aec2ad1e6e1c9f7d8b7a1b54cf49ebb58c392 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright (C) 1996, 2000 by Ralf Baechle
  * Copyright (C) 2006 by Thiemo Seufer
+ * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
  */
 #ifndef _UAPI_ASM_INST_H
 #define _UAPI_ASM_INST_H
@@ -192,6 +193,282 @@ enum lx_func {
        lbx_op  = 0x16,
 };
 
+/*
+ * (microMIPS) Major opcodes.
+ */
+enum mm_major_op {
+       mm_pool32a_op, mm_pool16a_op, mm_lbu16_op, mm_move16_op,
+       mm_addi32_op, mm_lbu32_op, mm_sb32_op, mm_lb32_op,
+       mm_pool32b_op, mm_pool16b_op, mm_lhu16_op, mm_andi16_op,
+       mm_addiu32_op, mm_lhu32_op, mm_sh32_op, mm_lh32_op,
+       mm_pool32i_op, mm_pool16c_op, mm_lwsp16_op, mm_pool16d_op,
+       mm_ori32_op, mm_pool32f_op, mm_reserved1_op, mm_reserved2_op,
+       mm_pool32c_op, mm_lwgp16_op, mm_lw16_op, mm_pool16e_op,
+       mm_xori32_op, mm_jals32_op, mm_addiupc_op, mm_reserved3_op,
+       mm_reserved4_op, mm_pool16f_op, mm_sb16_op, mm_beqz16_op,
+       mm_slti32_op, mm_beq32_op, mm_swc132_op, mm_lwc132_op,
+       mm_reserved5_op, mm_reserved6_op, mm_sh16_op, mm_bnez16_op,
+       mm_sltiu32_op, mm_bne32_op, mm_sdc132_op, mm_ldc132_op,
+       mm_reserved7_op, mm_reserved8_op, mm_swsp16_op, mm_b16_op,
+       mm_andi32_op, mm_j32_op, mm_sd32_op, mm_ld32_op,
+       mm_reserved11_op, mm_reserved12_op, mm_sw16_op, mm_li16_op,
+       mm_jalx32_op, mm_jal32_op, mm_sw32_op, mm_lw32_op,
+};
+
+/*
+ * (microMIPS) POOL32I minor opcodes.
+ */
+enum mm_32i_minor_op {
+       mm_bltz_op, mm_bltzal_op, mm_bgez_op, mm_bgezal_op,
+       mm_blez_op, mm_bnezc_op, mm_bgtz_op, mm_beqzc_op,
+       mm_tlti_op, mm_tgei_op, mm_tltiu_op, mm_tgeiu_op,
+       mm_tnei_op, mm_lui_op, mm_teqi_op, mm_reserved13_op,
+       mm_synci_op, mm_bltzals_op, mm_reserved14_op, mm_bgezals_op,
+       mm_bc2f_op, mm_bc2t_op, mm_reserved15_op, mm_reserved16_op,
+       mm_reserved17_op, mm_reserved18_op, mm_bposge64_op, mm_bposge32_op,
+       mm_bc1f_op, mm_bc1t_op, mm_reserved19_op, mm_reserved20_op,
+       mm_bc1any2f_op, mm_bc1any2t_op, mm_bc1any4f_op, mm_bc1any4t_op,
+};
+
+/*
+ * (microMIPS) POOL32A minor opcodes.
+ */
+enum mm_32a_minor_op {
+       mm_sll32_op = 0x000,
+       mm_ins_op = 0x00c,
+       mm_ext_op = 0x02c,
+       mm_pool32axf_op = 0x03c,
+       mm_srl32_op = 0x040,
+       mm_sra_op = 0x080,
+       mm_rotr_op = 0x0c0,
+       mm_lwxs_op = 0x118,
+       mm_addu32_op = 0x150,
+       mm_subu32_op = 0x1d0,
+       mm_and_op = 0x250,
+       mm_or32_op = 0x290,
+       mm_xor32_op = 0x310,
+};
+
+/*
+ * (microMIPS) POOL32B functions.
+ */
+enum mm_32b_func {
+       mm_lwc2_func = 0x0,
+       mm_lwp_func = 0x1,
+       mm_ldc2_func = 0x2,
+       mm_ldp_func = 0x4,
+       mm_lwm32_func = 0x5,
+       mm_cache_func = 0x6,
+       mm_ldm_func = 0x7,
+       mm_swc2_func = 0x8,
+       mm_swp_func = 0x9,
+       mm_sdc2_func = 0xa,
+       mm_sdp_func = 0xc,
+       mm_swm32_func = 0xd,
+       mm_sdm_func = 0xf,
+};
+
+/*
+ * (microMIPS) POOL32C functions.
+ */
+enum mm_32c_func {
+       mm_pref_func = 0x2,
+       mm_ll_func = 0x3,
+       mm_swr_func = 0x9,
+       mm_sc_func = 0xb,
+       mm_lwu_func = 0xe,
+};
+
+/*
+ * (microMIPS) POOL32AXF minor opcodes.
+ */
+enum mm_32axf_minor_op {
+       mm_mfc0_op = 0x003,
+       mm_mtc0_op = 0x00b,
+       mm_tlbp_op = 0x00d,
+       mm_jalr_op = 0x03c,
+       mm_tlbr_op = 0x04d,
+       mm_jalrhb_op = 0x07c,
+       mm_tlbwi_op = 0x08d,
+       mm_tlbwr_op = 0x0cd,
+       mm_jalrs_op = 0x13c,
+       mm_jalrshb_op = 0x17c,
+       mm_syscall_op = 0x22d,
+       mm_eret_op = 0x3cd,
+};
+
+/*
+ * (microMIPS) POOL32F minor opcodes.
+ */
+enum mm_32f_minor_op {
+       mm_32f_00_op = 0x00,
+       mm_32f_01_op = 0x01,
+       mm_32f_02_op = 0x02,
+       mm_32f_10_op = 0x08,
+       mm_32f_11_op = 0x09,
+       mm_32f_12_op = 0x0a,
+       mm_32f_20_op = 0x10,
+       mm_32f_30_op = 0x18,
+       mm_32f_40_op = 0x20,
+       mm_32f_41_op = 0x21,
+       mm_32f_42_op = 0x22,
+       mm_32f_50_op = 0x28,
+       mm_32f_51_op = 0x29,
+       mm_32f_52_op = 0x2a,
+       mm_32f_60_op = 0x30,
+       mm_32f_70_op = 0x38,
+       mm_32f_73_op = 0x3b,
+       mm_32f_74_op = 0x3c,
+};
+
+/*
+ * (microMIPS) POOL32F secondary minor opcodes.
+ */
+enum mm_32f_10_minor_op {
+       mm_lwxc1_op = 0x1,
+       mm_swxc1_op,
+       mm_ldxc1_op,
+       mm_sdxc1_op,
+       mm_luxc1_op,
+       mm_suxc1_op,
+};
+
+enum mm_32f_func {
+       mm_lwxc1_func = 0x048,
+       mm_swxc1_func = 0x088,
+       mm_ldxc1_func = 0x0c8,
+       mm_sdxc1_func = 0x108,
+};
+
+/*
+ * (microMIPS) POOL32F secondary minor opcodes.
+ */
+enum mm_32f_40_minor_op {
+       mm_fmovf_op,
+       mm_fmovt_op,
+};
+
+/*
+ * (microMIPS) POOL32F secondary minor opcodes.
+ */
+enum mm_32f_60_minor_op {
+       mm_fadd_op,
+       mm_fsub_op,
+       mm_fmul_op,
+       mm_fdiv_op,
+};
+
+/*
+ * (microMIPS) POOL32F secondary minor opcodes.
+ */
+enum mm_32f_70_minor_op {
+       mm_fmovn_op,
+       mm_fmovz_op,
+};
+
+/*
+ * (microMIPS) POOL32FXF secondary minor opcodes for POOL32F.
+ */
+enum mm_32f_73_minor_op {
+       mm_fmov0_op = 0x01,
+       mm_fcvtl_op = 0x04,
+       mm_movf0_op = 0x05,
+       mm_frsqrt_op = 0x08,
+       mm_ffloorl_op = 0x0c,
+       mm_fabs0_op = 0x0d,
+       mm_fcvtw_op = 0x24,
+       mm_movt0_op = 0x25,
+       mm_fsqrt_op = 0x28,
+       mm_ffloorw_op = 0x2c,
+       mm_fneg0_op = 0x2d,
+       mm_cfc1_op = 0x40,
+       mm_frecip_op = 0x48,
+       mm_fceill_op = 0x4c,
+       mm_fcvtd0_op = 0x4d,
+       mm_ctc1_op = 0x60,
+       mm_fceilw_op = 0x6c,
+       mm_fcvts0_op = 0x6d,
+       mm_mfc1_op = 0x80,
+       mm_fmov1_op = 0x81,
+       mm_movf1_op = 0x85,
+       mm_ftruncl_op = 0x8c,
+       mm_fabs1_op = 0x8d,
+       mm_mtc1_op = 0xa0,
+       mm_movt1_op = 0xa5,
+       mm_ftruncw_op = 0xac,
+       mm_fneg1_op = 0xad,
+       mm_froundl_op = 0xcc,
+       mm_fcvtd1_op = 0xcd,
+       mm_froundw_op = 0xec,
+       mm_fcvts1_op = 0xed,
+};
+
+/*
+ * (microMIPS) POOL16C minor opcodes.
+ */
+enum mm_16c_minor_op {
+       mm_lwm16_op = 0x04,
+       mm_swm16_op = 0x05,
+       mm_jr16_op = 0x18,
+       mm_jrc_op = 0x1a,
+       mm_jalr16_op = 0x1c,
+       mm_jalrs16_op = 0x1e,
+};
+
+/*
+ * (microMIPS) POOL16D minor opcodes.
+ */
+enum mm_16d_minor_op {
+       mm_addius5_func,
+       mm_addiusp_func,
+};
+
+/*
+ * (MIPS16e) opcodes.
+ */
+enum MIPS16e_ops {
+       MIPS16e_jal_op = 003,
+       MIPS16e_ld_op = 007,
+       MIPS16e_i8_op = 014,
+       MIPS16e_sd_op = 017,
+       MIPS16e_lb_op = 020,
+       MIPS16e_lh_op = 021,
+       MIPS16e_lwsp_op = 022,
+       MIPS16e_lw_op = 023,
+       MIPS16e_lbu_op = 024,
+       MIPS16e_lhu_op = 025,
+       MIPS16e_lwpc_op = 026,
+       MIPS16e_lwu_op = 027,
+       MIPS16e_sb_op = 030,
+       MIPS16e_sh_op = 031,
+       MIPS16e_swsp_op = 032,
+       MIPS16e_sw_op = 033,
+       MIPS16e_rr_op = 035,
+       MIPS16e_extend_op = 036,
+       MIPS16e_i64_op = 037,
+};
+
+enum MIPS16e_i64_func {
+       MIPS16e_ldsp_func,
+       MIPS16e_sdsp_func,
+       MIPS16e_sdrasp_func,
+       MIPS16e_dadjsp_func,
+       MIPS16e_ldpc_func,
+};
+
+enum MIPS16e_rr_func {
+       MIPS16e_jr_func,
+};
+
+enum MIPS6e_i8_func {
+       MIPS16e_swrasp_func = 02,
+};
+
+/*
+ * (microMIPS & MIPS16e) NOP instruction.
+ */
+#define MM_NOP16       0x0c00
+
 /*
  * Damn ...  bitfields depend from byteorder :-(
  */
@@ -311,6 +588,262 @@ struct v_format {                         /* MDMX vector format */
        ;)))))))
 };
 
+/*
+ * microMIPS instruction formats (32-bit length)
+ *
+ * NOTE:
+ *     Parenthesis denote whether the format is a microMIPS instruction or
+ *     if it is MIPS32 instruction re-encoded for use in the microMIPS ASE.
+ */
+struct fb_format {             /* FPU branch format (MIPS32) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int bc : 5,
+       BITFIELD_FIELD(unsigned int cc : 3,
+       BITFIELD_FIELD(unsigned int flag : 2,
+       BITFIELD_FIELD(signed int simmediate : 16,
+       ;)))))
+};
+
+struct fp0_format {            /* FPU multiply and add format (MIPS32) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int fmt : 5,
+       BITFIELD_FIELD(unsigned int ft : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int fd : 5,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))
+};
+
+struct mm_fp0_format {         /* FPU multipy and add format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int ft : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int fd : 5,
+       BITFIELD_FIELD(unsigned int fmt : 3,
+       BITFIELD_FIELD(unsigned int op : 2,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;)))))))
+};
+
+struct fp1_format {            /* FPU mfc1 and cfc1 format (MIPS32) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int op : 5,
+       BITFIELD_FIELD(unsigned int rt : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int fd : 5,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))
+};
+
+struct mm_fp1_format {         /* FPU mfc1 and cfc1 format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rt : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int fmt : 2,
+       BITFIELD_FIELD(unsigned int op : 8,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))
+};
+
+struct mm_fp2_format {         /* FPU movt and movf format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int fd : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int cc : 3,
+       BITFIELD_FIELD(unsigned int zero : 2,
+       BITFIELD_FIELD(unsigned int fmt : 2,
+       BITFIELD_FIELD(unsigned int op : 3,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))))
+};
+
+struct mm_fp3_format {         /* FPU abs and neg format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rt : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int fmt : 3,
+       BITFIELD_FIELD(unsigned int op : 7,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))
+};
+
+struct mm_fp4_format {         /* FPU c.cond format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rt : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int cc : 3,
+       BITFIELD_FIELD(unsigned int fmt : 3,
+       BITFIELD_FIELD(unsigned int cond : 4,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;)))))))
+};
+
+struct mm_fp5_format {         /* FPU lwxc1 and swxc1 format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int index : 5,
+       BITFIELD_FIELD(unsigned int base : 5,
+       BITFIELD_FIELD(unsigned int fd : 5,
+       BITFIELD_FIELD(unsigned int op : 5,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))
+};
+
+struct fp6_format {            /* FPU madd and msub format (MIPS IV) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int fr : 5,
+       BITFIELD_FIELD(unsigned int ft : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int fd : 5,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))
+};
+
+struct mm_fp6_format {         /* FPU madd and msub format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int ft : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int fd : 5,
+       BITFIELD_FIELD(unsigned int fr : 5,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))
+};
+
+struct mm_i_format {           /* Immediate format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rt : 5,
+       BITFIELD_FIELD(unsigned int rs : 5,
+       BITFIELD_FIELD(signed int simmediate : 16,
+       ;))))
+};
+
+struct mm_m_format {           /* Multi-word load/store format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rd : 5,
+       BITFIELD_FIELD(unsigned int base : 5,
+       BITFIELD_FIELD(unsigned int func : 4,
+       BITFIELD_FIELD(signed int simmediate : 12,
+       ;)))))
+};
+
+struct mm_x_format {           /* Scaled indexed load format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int index : 5,
+       BITFIELD_FIELD(unsigned int base : 5,
+       BITFIELD_FIELD(unsigned int rd : 5,
+       BITFIELD_FIELD(unsigned int func : 11,
+       ;)))))
+};
+
+/*
+ * microMIPS instruction formats (16-bit length)
+ */
+struct mm_b0_format {          /* Unconditional branch format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(signed int simmediate : 10,
+       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       ;)))
+};
+
+struct mm_b1_format {          /* Conditional branch format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rs : 3,
+       BITFIELD_FIELD(signed int simmediate : 7,
+       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       ;))))
+};
+
+struct mm16_m_format {         /* Multi-word load/store format */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int func : 4,
+       BITFIELD_FIELD(unsigned int rlist : 2,
+       BITFIELD_FIELD(unsigned int imm : 4,
+       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       ;)))))
+};
+
+struct mm16_rb_format {                /* Signed immediate format */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rt : 3,
+       BITFIELD_FIELD(unsigned int base : 3,
+       BITFIELD_FIELD(signed int simmediate : 4,
+       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       ;)))))
+};
+
+struct mm16_r3_format {                /* Load from global pointer format */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rt : 3,
+       BITFIELD_FIELD(signed int simmediate : 7,
+       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       ;))))
+};
+
+struct mm16_r5_format {                /* Load/store from stack pointer format */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rt : 5,
+       BITFIELD_FIELD(signed int simmediate : 5,
+       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       ;))))
+};
+
+/*
+ * MIPS16e instruction formats (16-bit length)
+ */
+struct m16e_rr {
+       BITFIELD_FIELD(unsigned int opcode : 5,
+       BITFIELD_FIELD(unsigned int rx : 3,
+       BITFIELD_FIELD(unsigned int nd : 1,
+       BITFIELD_FIELD(unsigned int l : 1,
+       BITFIELD_FIELD(unsigned int ra : 1,
+       BITFIELD_FIELD(unsigned int func : 5,
+       ;))))))
+};
+
+struct m16e_jal {
+       BITFIELD_FIELD(unsigned int opcode : 5,
+       BITFIELD_FIELD(unsigned int x : 1,
+       BITFIELD_FIELD(unsigned int imm20_16 : 5,
+       BITFIELD_FIELD(signed int imm25_21 : 5,
+       ;))))
+};
+
+struct m16e_i64 {
+       BITFIELD_FIELD(unsigned int opcode : 5,
+       BITFIELD_FIELD(unsigned int func : 3,
+       BITFIELD_FIELD(unsigned int imm : 8,
+       ;)))
+};
+
+struct m16e_ri64 {
+       BITFIELD_FIELD(unsigned int opcode : 5,
+       BITFIELD_FIELD(unsigned int func : 3,
+       BITFIELD_FIELD(unsigned int ry : 3,
+       BITFIELD_FIELD(unsigned int imm : 5,
+       ;))))
+};
+
+struct m16e_ri {
+       BITFIELD_FIELD(unsigned int opcode : 5,
+       BITFIELD_FIELD(unsigned int rx : 3,
+       BITFIELD_FIELD(unsigned int imm : 8,
+       ;)))
+};
+
+struct m16e_rri {
+       BITFIELD_FIELD(unsigned int opcode : 5,
+       BITFIELD_FIELD(unsigned int rx : 3,
+       BITFIELD_FIELD(unsigned int ry : 3,
+       BITFIELD_FIELD(unsigned int imm : 5,
+       ;))))
+};
+
+struct m16e_i8 {
+       BITFIELD_FIELD(unsigned int opcode : 5,
+       BITFIELD_FIELD(unsigned int func : 3,
+       BITFIELD_FIELD(unsigned int imm : 8,
+       ;)))
+};
+
 union mips_instruction {
        unsigned int word;
        unsigned short halfword[2];
@@ -326,6 +859,37 @@ union mips_instruction {
        struct b_format b_format;
        struct ps_format ps_format;
        struct v_format v_format;
+       struct fb_format fb_format;
+       struct fp0_format fp0_format;
+       struct mm_fp0_format mm_fp0_format;
+       struct fp1_format fp1_format;
+       struct mm_fp1_format mm_fp1_format;
+       struct mm_fp2_format mm_fp2_format;
+       struct mm_fp3_format mm_fp3_format;
+       struct mm_fp4_format mm_fp4_format;
+       struct mm_fp5_format mm_fp5_format;
+       struct fp6_format fp6_format;
+       struct mm_fp6_format mm_fp6_format;
+       struct mm_i_format mm_i_format;
+       struct mm_m_format mm_m_format;
+       struct mm_x_format mm_x_format;
+       struct mm_b0_format mm_b0_format;
+       struct mm_b1_format mm_b1_format;
+       struct mm16_m_format mm16_m_format ;
+       struct mm16_rb_format mm16_rb_format;
+       struct mm16_r3_format mm16_r3_format;
+       struct mm16_r5_format mm16_r5_format;
+};
+
+union mips16e_instruction {
+       unsigned int full : 16;
+       struct m16e_rr rr;
+       struct m16e_jal jal;
+       struct m16e_i64 i64;
+       struct m16e_ri64 ri64;
+       struct m16e_ri ri;
+       struct m16e_rri rri;
+       struct m16e_i8 i8;
 };
 
 #endif /* _UAPI_ASM_INST_H */
index 520a908d45d62af6d3edb1ef83b2f0f990faa562..6ad9e04bdf6210a8b722e92aca5e49161cb4deca 100644 (file)
@@ -5,7 +5,7 @@
 extra-y                := head.o vmlinux.lds
 
 obj-y          += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
-                  ptrace.o reset.o setup.o signal.o syscall.o \
+                  prom.o ptrace.o reset.o setup.o signal.o syscall.o \
                   time.o topology.o traps.o unaligned.o watch.o vdso.o
 
 ifdef CONFIG_FUNCTION_TRACER
@@ -19,15 +19,16 @@ obj-$(CONFIG_CEVT_BCM1480)  += cevt-bcm1480.o
 obj-$(CONFIG_CEVT_R4K)         += cevt-r4k.o
 obj-$(CONFIG_MIPS_MT_SMTC)     += cevt-smtc.o
 obj-$(CONFIG_CEVT_DS1287)      += cevt-ds1287.o
+obj-$(CONFIG_CEVT_GIC)         += cevt-gic.o
 obj-$(CONFIG_CEVT_GT641XX)     += cevt-gt641xx.o
 obj-$(CONFIG_CEVT_SB1250)      += cevt-sb1250.o
 obj-$(CONFIG_CEVT_TXX9)                += cevt-txx9.o
 obj-$(CONFIG_CSRC_BCM1480)     += csrc-bcm1480.o
+obj-$(CONFIG_CSRC_GIC)         += csrc-gic.o
 obj-$(CONFIG_CSRC_IOASIC)      += csrc-ioasic.o
 obj-$(CONFIG_CSRC_POWERTV)     += csrc-powertv.o
 obj-$(CONFIG_CSRC_R4K)         += csrc-r4k.o
 obj-$(CONFIG_CSRC_SB1250)      += csrc-sb1250.o
-obj-$(CONFIG_CSRC_GIC)         += csrc-gic.o
 obj-$(CONFIG_SYNC_R4K)         += sync-r4k.o
 
 obj-$(CONFIG_STACKTRACE)       += stacktrace.o
@@ -86,8 +87,6 @@ obj-$(CONFIG_EARLY_PRINTK)    += early_printk.o
 obj-$(CONFIG_SPINLOCK_TEST)    += spinlock_test.o
 obj-$(CONFIG_MIPS_MACHINE)     += mips_machine.o
 
-obj-$(CONFIG_OF)               += prom.o
-
 CFLAGS_cpu-bugs64.o    = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
 
 obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT)  += 8250-platform.o
index 50285b2c7ffe30d5a54d877f7f0cac075dd59f8c..0845091ba480bba5313e32f7e12c2a0b79b83048 100644 (file)
@@ -17,6 +17,8 @@
 #include <asm/ptrace.h>
 #include <asm/processor.h>
 
+#include <linux/kvm_host.h>
+
 void output_ptreg_defines(void)
 {
        COMMENT("MIPS pt_regs offsets.");
@@ -328,3 +330,67 @@ void output_pbe_defines(void)
        BLANK();
 }
 #endif
+
+void output_kvm_defines(void)
+{
+       COMMENT(" KVM/MIPS Specfic offsets. ");
+       DEFINE(VCPU_ARCH_SIZE, sizeof(struct kvm_vcpu_arch));
+       OFFSET(VCPU_RUN, kvm_vcpu, run);
+       OFFSET(VCPU_HOST_ARCH, kvm_vcpu, arch);
+
+       OFFSET(VCPU_HOST_EBASE, kvm_vcpu_arch, host_ebase);
+       OFFSET(VCPU_GUEST_EBASE, kvm_vcpu_arch, guest_ebase);
+
+       OFFSET(VCPU_HOST_STACK, kvm_vcpu_arch, host_stack);
+       OFFSET(VCPU_HOST_GP, kvm_vcpu_arch, host_gp);
+
+       OFFSET(VCPU_HOST_CP0_BADVADDR, kvm_vcpu_arch, host_cp0_badvaddr);
+       OFFSET(VCPU_HOST_CP0_CAUSE, kvm_vcpu_arch, host_cp0_cause);
+       OFFSET(VCPU_HOST_EPC, kvm_vcpu_arch, host_cp0_epc);
+       OFFSET(VCPU_HOST_ENTRYHI, kvm_vcpu_arch, host_cp0_entryhi);
+
+       OFFSET(VCPU_GUEST_INST, kvm_vcpu_arch, guest_inst);
+
+       OFFSET(VCPU_R0, kvm_vcpu_arch, gprs[0]);
+       OFFSET(VCPU_R1, kvm_vcpu_arch, gprs[1]);
+       OFFSET(VCPU_R2, kvm_vcpu_arch, gprs[2]);
+       OFFSET(VCPU_R3, kvm_vcpu_arch, gprs[3]);
+       OFFSET(VCPU_R4, kvm_vcpu_arch, gprs[4]);
+       OFFSET(VCPU_R5, kvm_vcpu_arch, gprs[5]);
+       OFFSET(VCPU_R6, kvm_vcpu_arch, gprs[6]);
+       OFFSET(VCPU_R7, kvm_vcpu_arch, gprs[7]);
+       OFFSET(VCPU_R8, kvm_vcpu_arch, gprs[8]);
+       OFFSET(VCPU_R9, kvm_vcpu_arch, gprs[9]);
+       OFFSET(VCPU_R10, kvm_vcpu_arch, gprs[10]);
+       OFFSET(VCPU_R11, kvm_vcpu_arch, gprs[11]);
+       OFFSET(VCPU_R12, kvm_vcpu_arch, gprs[12]);
+       OFFSET(VCPU_R13, kvm_vcpu_arch, gprs[13]);
+       OFFSET(VCPU_R14, kvm_vcpu_arch, gprs[14]);
+       OFFSET(VCPU_R15, kvm_vcpu_arch, gprs[15]);
+       OFFSET(VCPU_R16, kvm_vcpu_arch, gprs[16]);
+       OFFSET(VCPU_R17, kvm_vcpu_arch, gprs[17]);
+       OFFSET(VCPU_R18, kvm_vcpu_arch, gprs[18]);
+       OFFSET(VCPU_R19, kvm_vcpu_arch, gprs[19]);
+       OFFSET(VCPU_R20, kvm_vcpu_arch, gprs[20]);
+       OFFSET(VCPU_R21, kvm_vcpu_arch, gprs[21]);
+       OFFSET(VCPU_R22, kvm_vcpu_arch, gprs[22]);
+       OFFSET(VCPU_R23, kvm_vcpu_arch, gprs[23]);
+       OFFSET(VCPU_R24, kvm_vcpu_arch, gprs[24]);
+       OFFSET(VCPU_R25, kvm_vcpu_arch, gprs[25]);
+       OFFSET(VCPU_R26, kvm_vcpu_arch, gprs[26]);
+       OFFSET(VCPU_R27, kvm_vcpu_arch, gprs[27]);
+       OFFSET(VCPU_R28, kvm_vcpu_arch, gprs[28]);
+       OFFSET(VCPU_R29, kvm_vcpu_arch, gprs[29]);
+       OFFSET(VCPU_R30, kvm_vcpu_arch, gprs[30]);
+       OFFSET(VCPU_R31, kvm_vcpu_arch, gprs[31]);
+       OFFSET(VCPU_LO, kvm_vcpu_arch, lo);
+       OFFSET(VCPU_HI, kvm_vcpu_arch, hi);
+       OFFSET(VCPU_PC, kvm_vcpu_arch, pc);
+       OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
+       OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid);
+       OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid);
+
+       OFFSET(COP0_TLB_HI, mips_coproc, reg[MIPS_CP0_TLB_HI][0]);
+       OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]);
+       BLANK();
+}
index 556a4357d7fc69f79b024fd968ef409bcbe20fb8..97c5a1668e5347bb4a7668882931349cfdac08a6 100644 (file)
@@ -48,7 +48,11 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
        __res;                                                          \
 })
 
+#ifdef CONFIG_KVM_GUEST
+#define TASK32_SIZE            0x3fff8000UL
+#else
 #define TASK32_SIZE            0x7fff8000UL
+#endif
 #undef ELF_ET_DYN_BASE
 #define ELF_ET_DYN_BASE                (TASK32_SIZE / 3 * 2)
 
index 83ffe950f710f86c985d56e9febabb0aafe39d75..46c2ad0703a0b1040140b4a2041c179fdaa45b34 100644 (file)
 #include <asm/cpu.h>
 #include <asm/cpu-features.h>
 #include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
 #include <asm/inst.h>
 #include <asm/ptrace.h>
 #include <asm/uaccess.h>
 
+/*
+ * Calculate and return exception PC in case of branch delay slot
+ * for microMIPS and MIPS16e. It does not clear the ISA mode bit.
+ */
+int __isa_exception_epc(struct pt_regs *regs)
+{
+       unsigned short inst;
+       long epc = regs->cp0_epc;
+
+       /* Calculate exception PC in branch delay slot. */
+       if (__get_user(inst, (u16 __user *) msk_isa16_mode(epc))) {
+               /* This should never happen because delay slot was checked. */
+               force_sig(SIGSEGV, current);
+               return epc;
+       }
+       if (cpu_has_mips16) {
+               if (((union mips16e_instruction)inst).ri.opcode
+                               == MIPS16e_jal_op)
+                       epc += 4;
+               else
+                       epc += 2;
+       } else if (mm_insn_16bit(inst))
+               epc += 2;
+       else
+               epc += 4;
+
+       return epc;
+}
+
+/*
+ * Compute return address and emulate branch in microMIPS mode after an
+ * exception only. It does not handle compact branches/jumps and cannot
+ * be used in interrupt context. (Compact branches/jumps do not cause
+ * exceptions.)
+ */
+int __microMIPS_compute_return_epc(struct pt_regs *regs)
+{
+       u16 __user *pc16;
+       u16 halfword;
+       unsigned int word;
+       unsigned long contpc;
+       struct mm_decoded_insn mminsn = { 0 };
+
+       mminsn.micro_mips_mode = 1;
+
+       /* This load never faults. */
+       pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
+       __get_user(halfword, pc16);
+       pc16++;
+       contpc = regs->cp0_epc + 2;
+       word = ((unsigned int)halfword << 16);
+       mminsn.pc_inc = 2;
+
+       if (!mm_insn_16bit(halfword)) {
+               __get_user(halfword, pc16);
+               pc16++;
+               contpc = regs->cp0_epc + 4;
+               mminsn.pc_inc = 4;
+               word |= halfword;
+       }
+       mminsn.insn = word;
+
+       if (get_user(halfword, pc16))
+               goto sigsegv;
+       mminsn.next_pc_inc = 2;
+       word = ((unsigned int)halfword << 16);
+
+       if (!mm_insn_16bit(halfword)) {
+               pc16++;
+               if (get_user(halfword, pc16))
+                       goto sigsegv;
+               mminsn.next_pc_inc = 4;
+               word |= halfword;
+       }
+       mminsn.next_insn = word;
+
+       mm_isBranchInstr(regs, mminsn, &contpc);
+
+       regs->cp0_epc = contpc;
+
+       return 0;
+
+sigsegv:
+       force_sig(SIGSEGV, current);
+       return -EFAULT;
+}
+
+/*
+ * Compute return address and emulate branch in MIPS16e mode after an
+ * exception only. It does not handle compact branches/jumps and cannot
+ * be used in interrupt context. (Compact branches/jumps do not cause
+ * exceptions.)
+ */
+int __MIPS16e_compute_return_epc(struct pt_regs *regs)
+{
+       u16 __user *addr;
+       union mips16e_instruction inst;
+       u16 inst2;
+       u32 fullinst;
+       long epc;
+
+       epc = regs->cp0_epc;
+
+       /* Read the instruction. */
+       addr = (u16 __user *)msk_isa16_mode(epc);
+       if (__get_user(inst.full, addr)) {
+               force_sig(SIGSEGV, current);
+               return -EFAULT;
+       }
+
+       switch (inst.ri.opcode) {
+       case MIPS16e_extend_op:
+               regs->cp0_epc += 4;
+               return 0;
+
+               /*
+                *  JAL and JALX in MIPS16e mode
+                */
+       case MIPS16e_jal_op:
+               addr += 1;
+               if (__get_user(inst2, addr)) {
+                       force_sig(SIGSEGV, current);
+                       return -EFAULT;
+               }
+               fullinst = ((unsigned)inst.full << 16) | inst2;
+               regs->regs[31] = epc + 6;
+               epc += 4;
+               epc >>= 28;
+               epc <<= 28;
+               /*
+                * JAL:5 X:1 TARGET[20-16]:5 TARGET[25:21]:5 TARGET[15:0]:16
+                *
+                * ......TARGET[15:0].................TARGET[20:16]...........
+                * ......TARGET[25:21]
+                */
+               epc |=
+                   ((fullinst & 0xffff) << 2) | ((fullinst & 0x3e00000) >> 3) |
+                   ((fullinst & 0x1f0000) << 7);
+               if (!inst.jal.x)
+                       set_isa16_mode(epc);    /* Set ISA mode bit. */
+               regs->cp0_epc = epc;
+               return 0;
+
+               /*
+                *  J(AL)R(C)
+                */
+       case MIPS16e_rr_op:
+               if (inst.rr.func == MIPS16e_jr_func) {
+
+                       if (inst.rr.ra)
+                               regs->cp0_epc = regs->regs[31];
+                       else
+                               regs->cp0_epc =
+                                   regs->regs[reg16to32[inst.rr.rx]];
+
+                       if (inst.rr.l) {
+                               if (inst.rr.nd)
+                                       regs->regs[31] = epc + 2;
+                               else
+                                       regs->regs[31] = epc + 4;
+                       }
+                       return 0;
+               }
+               break;
+       }
+
+       /*
+        * All other cases have no branch delay slot and are 16-bits.
+        * Branches do not cause an exception.
+        */
+       regs->cp0_epc += 2;
+
+       return 0;
+}
+
 /**
  * __compute_return_epc_for_insn - Computes the return address and do emulate
  *                                 branch simulation, if required.
@@ -129,6 +305,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                epc <<= 28;
                epc |= (insn.j_format.target << 2);
                regs->cp0_epc = epc;
+               if (insn.i_format.opcode == jalx_op)
+                       set_isa16_mode(regs->cp0_epc);
                break;
 
        /*
diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c
new file mode 100644 (file)
index 0000000..730eaf9
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013  Imagination Technologies Ltd.
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+
+#include <asm/time.h>
+#include <asm/gic.h>
+#include <asm/mips-boards/maltaint.h>
+
+DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device);
+int gic_timer_irq_installed;
+
+
+static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
+{
+       u64 cnt;
+       int res;
+
+       cnt = gic_read_count();
+       cnt += (u64)delta;
+       gic_write_compare(cnt);
+       res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0;
+       return res;
+}
+
+void gic_set_clock_mode(enum clock_event_mode mode,
+                               struct clock_event_device *evt)
+{
+       /* Nothing to do ...  */
+}
+
+irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
+{
+       struct clock_event_device *cd;
+       int cpu = smp_processor_id();
+
+       gic_write_compare(gic_read_compare());
+       cd = &per_cpu(gic_clockevent_device, cpu);
+       cd->event_handler(cd);
+       return IRQ_HANDLED;
+}
+
+struct irqaction gic_compare_irqaction = {
+       .handler = gic_compare_interrupt,
+       .flags = IRQF_PERCPU | IRQF_TIMER,
+       .name = "timer",
+};
+
+
+void gic_event_handler(struct clock_event_device *dev)
+{
+}
+
+int __cpuinit gic_clockevent_init(void)
+{
+       unsigned int cpu = smp_processor_id();
+       struct clock_event_device *cd;
+       unsigned int irq;
+
+       if (!cpu_has_counter || !gic_frequency)
+               return -ENXIO;
+
+       irq = MIPS_GIC_IRQ_BASE;
+
+       cd = &per_cpu(gic_clockevent_device, cpu);
+
+       cd->name                = "MIPS GIC";
+       cd->features            = CLOCK_EVT_FEAT_ONESHOT;
+
+       clockevent_set_clock(cd, gic_frequency);
+
+       /* Calculate the min / max delta */
+       cd->max_delta_ns        = clockevent_delta2ns(0x7fffffff, cd);
+       cd->min_delta_ns        = clockevent_delta2ns(0x300, cd);
+
+       cd->rating              = 300;
+       cd->irq                 = irq;
+       cd->cpumask             = cpumask_of(cpu);
+       cd->set_next_event      = gic_next_event;
+       cd->set_mode            = gic_set_clock_mode;
+       cd->event_handler       = gic_event_handler;
+
+       clockevents_register_device(cd);
+
+       GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_MAP), 0x80000002);
+       GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), GIC_VPE_SMASK_CMP_MSK);
+
+       if (gic_timer_irq_installed)
+               return 0;
+
+       gic_timer_irq_installed = 1;
+
+       setup_irq(irq, &gic_compare_irqaction);
+       irq_set_handler(irq, handle_percpu_irq);
+       return 0;
+}
index 07b847d77f5da029dca6a6cbb53198807ff781a8..02033eaf8825420eea30105e92edcc795d8b5b70 100644 (file)
@@ -23,7 +23,6 @@
  */
 
 #ifndef CONFIG_MIPS_MT_SMTC
-
 static int mips_next_event(unsigned long delta,
                           struct clock_event_device *evt)
 {
@@ -49,7 +48,6 @@ DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
 int cp0_timer_irq_installed;
 
 #ifndef CONFIG_MIPS_MT_SMTC
-
 irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
 {
        const int r2 = cpu_has_mips_r2;
@@ -74,6 +72,9 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
                /* Clear Count/Compare Interrupt */
                write_c0_compare(read_c0_compare());
                cd = &per_cpu(mips_clockevent_device, cpu);
+#ifdef CONFIG_CEVT_GIC
+               if (!gic_present)
+#endif
                cd->event_handler(cd);
        }
 
@@ -118,6 +119,10 @@ int c0_compare_int_usable(void)
        unsigned int delta;
        unsigned int cnt;
 
+#ifdef CONFIG_KVM_GUEST
+    return 1;
+#endif
+
        /*
         * IP7 already pending?  Try to clear it by acking the timer.
         */
@@ -166,7 +171,6 @@ int c0_compare_int_usable(void)
 }
 
 #ifndef CONFIG_MIPS_MT_SMTC
-
 int __cpuinit r4k_clockevent_init(void)
 {
        unsigned int cpu = smp_processor_id();
@@ -206,6 +210,9 @@ int __cpuinit r4k_clockevent_init(void)
        cd->set_mode            = mips_set_clock_mode;
        cd->event_handler       = mips_event_handler;
 
+#ifdef CONFIG_CEVT_GIC
+       if (!gic_present)
+#endif
        clockevents_register_device(cd);
 
        if (cp0_timer_irq_installed)
index 5fe66a0c32245366bc56079eca0a160a7b9bc0cb..4bbffdb9024ffb9cf437adb4fd5243e75e68a88c 100644 (file)
@@ -470,6 +470,9 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
                c->options |= MIPS_CPU_ULRI;
        if (config3 & MIPS_CONF3_ISA)
                c->options |= MIPS_CPU_MICROMIPS;
+#ifdef CONFIG_CPU_MICROMIPS
+       write_c0_config3(read_c0_config3() | MIPS_CONF3_ISA_OE);
+#endif
        if (config3 & MIPS_CONF3_VZ)
                c->ases |= MIPS_ASE_VZ;
 
index 5dca24bce51bef67320417767e7df8419a983dd7..e026209011178342a6c98bbd709366fb8f3d06d8 100644 (file)
@@ -5,23 +5,14 @@
  *
  * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
  */
-#include <linux/clocksource.h>
 #include <linux/init.h>
+#include <linux/time.h>
 
-#include <asm/time.h>
 #include <asm/gic.h>
 
 static cycle_t gic_hpt_read(struct clocksource *cs)
 {
-       unsigned int hi, hi2, lo;
-
-       do {
-               GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi);
-               GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo);
-               GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2);
-       } while (hi2 != hi);
-
-       return (((cycle_t) hi) << 32) + lo;
+       return gic_read_count();
 }
 
 static struct clocksource gic_clocksource = {
index ecb347ce1b3d03edc9cb15c3d5dcaf9bb434da84..5c2ba9f08a80d33ed0cdf61ffaf429524c3ffd4f 100644 (file)
@@ -5,8 +5,8 @@
  *
  * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
- * Copyright (C) 2001 MIPS Technologies, Inc.
  * Copyright (C) 2002, 2007  Maciej W. Rozycki
+ * Copyright (C) 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
  */
 #include <linux/init.h>
 
 #include <asm/war.h>
 #include <asm/thread_info.h>
 
+#ifdef CONFIG_MIPS_MT_SMTC
 #define PANIC_PIC(msg)                                 \
-               .set push;                              \
+               .set    push;                           \
+               .set    nomicromips;                    \
                .set    reorder;                        \
                PTR_LA  a0,8f;                          \
                .set    noat;                           \
 9:             b       9b;                             \
                .set    pop;                            \
                TEXT(msg)
+#endif
 
        __INIT
 
-NESTED(except_vec0_generic, 0, sp)
-       PANIC_PIC("Exception vector 0 called")
-       END(except_vec0_generic)
-
-NESTED(except_vec1_generic, 0, sp)
-       PANIC_PIC("Exception vector 1 called")
-       END(except_vec1_generic)
-
 /*
  * General exception vector for all other CPUs.
  *
@@ -138,12 +133,19 @@ LEAF(r4k_wait)
         nop
        nop
        nop
+#ifdef CONFIG_CPU_MICROMIPS
+       nop
+       nop
+       nop
+       nop
+#endif
        .set    mips3
        wait
        /* end of rollback region (the region size must be power of two) */
-       .set    pop
 1:
        jr      ra
+       nop
+       .set    pop
        END(r4k_wait)
 
        .macro  BUILD_ROLLBACK_PROLOGUE handler
@@ -201,7 +203,11 @@ NESTED(handle_int, PT_SIZE, sp)
        LONG_L  s0, TI_REGS($28)
        LONG_S  sp, TI_REGS($28)
        PTR_LA  ra, ret_from_irq
-       j       plat_irq_dispatch
+       PTR_LA  v0, plat_irq_dispatch
+       jr      v0
+#ifdef CONFIG_CPU_MICROMIPS
+       nop
+#endif
        END(handle_int)
 
        __INIT
@@ -222,11 +228,14 @@ NESTED(except_vec4, 0, sp)
 /*
  * EJTAG debug exception handler.
  * The EJTAG debug exception entry point is 0xbfc00480, which
- * normally is in the boot PROM, so the boot PROM must do a
+ * normally is in the boot PROM, so the boot PROM must do an
  * unconditional jump to this vector.
  */
 NESTED(except_vec_ejtag_debug, 0, sp)
        j       ejtag_debug_handler
+#ifdef CONFIG_CPU_MICROMIPS
+        nop
+#endif
        END(except_vec_ejtag_debug)
 
        __FINIT
@@ -251,9 +260,10 @@ NESTED(except_vec_vi, 0, sp)
 FEXPORT(except_vec_vi_mori)
        ori     a0, $0, 0
 #endif /* CONFIG_MIPS_MT_SMTC */
+       PTR_LA  v1, except_vec_vi_handler
 FEXPORT(except_vec_vi_lui)
        lui     v0, 0           /* Patched */
-       j       except_vec_vi_handler
+       jr      v1
 FEXPORT(except_vec_vi_ori)
         ori    v0, 0           /* Patched */
        .set    pop
@@ -354,6 +364,9 @@ EXPORT(ejtag_debug_buffer)
  */
 NESTED(except_vec_nmi, 0, sp)
        j       nmi_handler
+#ifdef CONFIG_CPU_MICROMIPS
+        nop
+#endif
        END(except_vec_nmi)
 
        __FINIT
@@ -480,7 +493,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
        .set    noreorder
        /* check if TLB contains a entry for EPC */
        MFC0    k1, CP0_ENTRYHI
-       andi    k1, 0xff        /* ASID_MASK */
+       andi    k1, 0xff        /* ASID_MASK patched at run-time!! */
        MFC0    k0, CP0_EPC
        PTR_SRL k0, _PAGE_SHIFT + 1
        PTR_SLL k0, _PAGE_SHIFT + 1
@@ -500,13 +513,35 @@ NESTED(nmi_handler, PT_SIZE, sp)
        .set    push
        .set    noat
        .set    noreorder
-       /* 0x7c03e83b: rdhwr v1,$29 */
+       /* MIPS32:    0x7c03e83b: rdhwr v1,$29 */
+       /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
        MFC0    k1, CP0_EPC
-       lui     k0, 0x7c03
-       lw      k1, (k1)
-       ori     k0, 0xe83b
-       .set    reorder
+#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
+       and     k0, k1, 1
+       beqz    k0, 1f
+       xor     k1, k0
+       lhu     k0, (k1)
+       lhu     k1, 2(k1)
+       ins     k1, k0, 16, 16
+       lui     k0, 0x007d
+       b       docheck
+       ori     k0, 0x6b3c
+1:
+       lui     k0, 0x7c03
+       lw      k1, (k1)
+       ori     k0, 0xe83b
+#else
+       andi    k0, k1, 1
+       bnez    k0, handle_ri
+       lui     k0, 0x7c03
+       lw      k1, (k1)
+       ori     k0, 0xe83b
+#endif
+       .set    reorder
+docheck:
        bne     k0, k1, handle_ri       /* if not ours */
+
+isrdhwr:
        /* The insn is rdhwr.  No need to check CAUSE.BD here. */
        get_saved_sp    /* k1 := current_thread_info */
        .set    noreorder
index 485e6a961b317ea0f6778b04e57c8c08d3ad8c02..c01b307317a9635b88394d18164710262e206234 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/init.h>
 #include <linux/smp.h>
 #include <linux/irq.h>
+#include <linux/clocksource.h>
 
 #include <asm/io.h>
 #include <asm/gic.h>
@@ -19,6 +20,8 @@
 #include <linux/hardirq.h>
 #include <asm-generic/bitops/find.h>
 
+unsigned int gic_frequency;
+unsigned int gic_present;
 unsigned long _gic_base;
 unsigned int gic_irq_base;
 unsigned int gic_irq_flags[GIC_NUM_INTRS];
@@ -30,6 +33,39 @@ static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
 static struct gic_pending_regs pending_regs[NR_CPUS];
 static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
 
+#if defined(CONFIG_CSRC_GIC) || defined(CONFIG_CEVT_GIC)
+cycle_t gic_read_count(void)
+{
+       unsigned int hi, hi2, lo;
+
+       do {
+               GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi);
+               GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo);
+               GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2);
+       } while (hi2 != hi);
+
+       return (((cycle_t) hi) << 32) + lo;
+}
+
+void gic_write_compare(cycle_t cnt)
+{
+       GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
+                               (int)(cnt >> 32));
+       GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
+                               (int)(cnt & 0xffffffff));
+}
+
+cycle_t gic_read_compare(void)
+{
+       unsigned int hi, lo;
+
+       GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), hi);
+       GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), lo);
+
+       return (((cycle_t) hi) << 32) + lo;
+}
+#endif
+
 unsigned int gic_get_timer_pending(void)
 {
        unsigned int vpe_pending;
@@ -116,6 +152,17 @@ static void __init vpe_local_setup(unsigned int numvpes)
        }
 }
 
+unsigned int gic_compare_int(void)
+{
+       unsigned int pending;
+
+       GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_PEND), pending);
+       if (pending & GIC_VPE_PEND_CMP_MSK)
+               return 1;
+       else
+               return 0;
+}
+
 unsigned int gic_get_int(void)
 {
        unsigned int i;
index d1d576b765f5ecdd86e19c63ade4634d326687fe..0b29646bcee770533e4a0d25cf1a50b4cc167b7b 100644 (file)
@@ -165,10 +165,3 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_a2,
        return sys_fallocate(fd, mode, merge_64(offset_a2, offset_a3),
                             merge_64(len_a4, len_a5));
 }
-
-SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags,
-               u64, a3, u64, a4, int, dfd, const char  __user *, pathname)
-{
-       return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4),
-                                dfd, pathname);
-}
index 411a058d2c536c7709edfcb9476b4fa185034942..876097529697250254b9c3fd58492348703381e3 100644 (file)
@@ -11,9 +11,9 @@
 #include <linux/slab.h>
 
 #include <asm/mips_machine.h>
+#include <asm/prom.h>
 
 static struct mips_machine *mips_machine __initdata;
-static char *mips_machine_name = "Unknown";
 
 #define for_each_machine(mach) \
        for ((mach) = (struct mips_machine *)&__mips_machines_start; \
@@ -21,25 +21,6 @@ static char *mips_machine_name = "Unknown";
             (unsigned long)(mach) < (unsigned long)&__mips_machines_end; \
             (mach)++)
 
-__init void mips_set_machine_name(const char *name)
-{
-       char *p;
-
-       if (name == NULL)
-               return;
-
-       p = kstrdup(name, GFP_KERNEL);
-       if (!p)
-               pr_err("MIPS: no memory for machine_name\n");
-
-       mips_machine_name = p;
-}
-
-char *mips_get_machine_name(void)
-{
-       return mips_machine_name;
-}
-
 __init int mips_machtype_setup(char *id)
 {
        struct mips_machine *mach;
@@ -79,7 +60,6 @@ __init void mips_machine_setup(void)
                return;
 
        mips_set_machine_name(mips_machine->mach_name);
-       pr_info("MIPS: machine is %s\n", mips_machine_name);
 
        if (mips_machine->mach_setup)
                mips_machine->mach_setup();
index 7a54f74b7818ad402a70221eb2090dc91f5643f8..a3e461408b7e830758d3b3dfeed661573277131e 100644 (file)
@@ -12,7 +12,7 @@
 #include <asm/cpu-features.h>
 #include <asm/mipsregs.h>
 #include <asm/processor.h>
-#include <asm/mips_machine.h>
+#include <asm/prom.h>
 
 unsigned int vced_count, vcei_count;
 
@@ -99,6 +99,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
        if (cpu_has_vz)         seq_printf(m, "%s", " vz");
        seq_printf(m, "\n");
 
+       if (cpu_has_mmips) {
+               seq_printf(m, "micromips kernel\t: %s\n",
+                     (read_c0_config3() & MIPS_CONF3_ISA_OE) ?  "yes" : "no");
+       }
        seq_printf(m, "shadow register sets\t: %d\n",
                      cpu_data[n].srsets);
        seq_printf(m, "kscratch registers\t: %d\n",
index cfc742d75b7f3a74f1ace85269d55e0e0b85a48d..eb902c1f0cad4c50031836ad73225f620be8d7d5 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  * Copyright (C) 2004 Thiemo Seufer
+ * Copyright (C) 2013  Imagination Technologies Ltd.
  */
 #include <linux/errno.h>
 #include <linux/sched.h>
@@ -225,34 +226,115 @@ struct mips_frame_info {
 
 static inline int is_ra_save_ins(union mips_instruction *ip)
 {
+#ifdef CONFIG_CPU_MICROMIPS
+       union mips_instruction mmi;
+
+       /*
+        * swsp ra,offset
+        * swm16 reglist,offset(sp)
+        * swm32 reglist,offset(sp)
+        * sw32 ra,offset(sp)
+        * jradiussp - NOT SUPPORTED
+        *
+        * microMIPS is way more fun...
+        */
+       if (mm_insn_16bit(ip->halfword[0])) {
+               mmi.word = (ip->halfword[0] << 16);
+               return ((mmi.mm16_r5_format.opcode == mm_swsp16_op &&
+                        mmi.mm16_r5_format.rt == 31) ||
+                       (mmi.mm16_m_format.opcode == mm_pool16c_op &&
+                        mmi.mm16_m_format.func == mm_swm16_op));
+       }
+       else {
+               mmi.halfword[0] = ip->halfword[1];
+               mmi.halfword[1] = ip->halfword[0];
+               return ((mmi.mm_m_format.opcode == mm_pool32b_op &&
+                        mmi.mm_m_format.rd > 9 &&
+                        mmi.mm_m_format.base == 29 &&
+                        mmi.mm_m_format.func == mm_swm32_func) ||
+                       (mmi.i_format.opcode == mm_sw32_op &&
+                        mmi.i_format.rs == 29 &&
+                        mmi.i_format.rt == 31));
+       }
+#else
        /* sw / sd $ra, offset($sp) */
        return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
                ip->i_format.rs == 29 &&
                ip->i_format.rt == 31;
+#endif
 }
 
 static inline int is_jal_jalr_jr_ins(union mips_instruction *ip)
 {
+#ifdef CONFIG_CPU_MICROMIPS
+       /*
+        * jr16,jrc,jalr16,jalr16
+        * jal
+        * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
+        * jraddiusp - NOT SUPPORTED
+        *
+        * microMIPS is kind of more fun...
+        */
+       union mips_instruction mmi;
+
+       mmi.word = (ip->halfword[0] << 16);
+
+       if ((mmi.mm16_r5_format.opcode == mm_pool16c_op &&
+           (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) ||
+           ip->j_format.opcode == mm_jal32_op)
+               return 1;
+       if (ip->r_format.opcode != mm_pool32a_op ||
+                       ip->r_format.func != mm_pool32axf_op)
+               return 0;
+       return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op);
+#else
        if (ip->j_format.opcode == jal_op)
                return 1;
        if (ip->r_format.opcode != spec_op)
                return 0;
        return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
+#endif
 }
 
 static inline int is_sp_move_ins(union mips_instruction *ip)
 {
+#ifdef CONFIG_CPU_MICROMIPS
+       /*
+        * addiusp -imm
+        * addius5 sp,-imm
+        * addiu32 sp,sp,-imm
+        * jradiussp - NOT SUPPORTED
+        *
+        * microMIPS is not more fun...
+        */
+       if (mm_insn_16bit(ip->halfword[0])) {
+               union mips_instruction mmi;
+
+               mmi.word = (ip->halfword[0] << 16);
+               return ((mmi.mm16_r3_format.opcode == mm_pool16d_op &&
+                        mmi.mm16_r3_format.simmediate && mm_addiusp_func) ||
+                       (mmi.mm16_r5_format.opcode == mm_pool16d_op &&
+                        mmi.mm16_r5_format.rt == 29));
+       }
+       return (ip->mm_i_format.opcode == mm_addiu32_op &&
+                ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29);
+#else
        /* addiu/daddiu sp,sp,-imm */
        if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
                return 0;
        if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
                return 1;
+#endif
        return 0;
 }
 
 static int get_frame_info(struct mips_frame_info *info)
 {
+#ifdef CONFIG_CPU_MICROMIPS
+       union mips_instruction *ip = (void *) (((char *) info->func) - 1);
+#else
        union mips_instruction *ip = info->func;
+#endif
        unsigned max_insns = info->func_size / sizeof(union mips_instruction);
        unsigned i;
 
@@ -272,7 +354,26 @@ static int get_frame_info(struct mips_frame_info *info)
                        break;
                if (!info->frame_size) {
                        if (is_sp_move_ins(ip))
+                       {
+#ifdef CONFIG_CPU_MICROMIPS
+                               if (mm_insn_16bit(ip->halfword[0]))
+                               {
+                                       unsigned short tmp;
+
+                                       if (ip->halfword[0] & mm_addiusp_func)
+                                       {
+                                               tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2);
+                                               info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0));
+                                       } else {
+                                               tmp = (ip->halfword[0] >> 1);
+                                               info->frame_size = -(signed short)(tmp & 0xf);
+                                       }
+                                       ip = (void *) &ip->halfword[1];
+                                       ip--;
+                               } else
+#endif
                                info->frame_size = - ip->i_format.simmediate;
+                       }
                        continue;
                }
                if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
index 028f6f837ef9c975697cd763fa685126a5be6b48..5712bb5322454f1a98d169e58490e77b62e149be 100644 (file)
 #include <asm/page.h>
 #include <asm/prom.h>
 
+static char mips_machine_name[64] = "Unknown";
+
+__init void mips_set_machine_name(const char *name)
+{
+       if (name == NULL)
+               return;
+
+       strncpy(mips_machine_name, name, sizeof(mips_machine_name));
+       pr_info("MIPS: machine is %s\n", mips_get_machine_name());
+}
+
+char *mips_get_machine_name(void)
+{
+       return mips_machine_name;
+}
+
+#ifdef CONFIG_OF
 int __init early_init_dt_scan_memory_arch(unsigned long node,
                                          const char *uname, int depth,
                                          void *data)
@@ -50,6 +67,18 @@ void __init early_init_dt_setup_initrd_arch(unsigned long start,
 }
 #endif
 
+int __init early_init_dt_scan_model(unsigned long node,        const char *uname,
+                                   int depth, void *data)
+{
+       if (!depth) {
+               char *model = of_get_flat_dt_prop(node, "model", NULL);
+
+               if (model)
+                       mips_set_machine_name(model);
+       }
+       return 0;
+}
+
 void __init early_init_devtree(void *params)
 {
        /* Setup flat device-tree pointer */
@@ -65,6 +94,9 @@ void __init early_init_devtree(void *params)
        /* Scan memory nodes */
        of_scan_flat_dt(early_init_dt_scan_root, NULL);
        of_scan_flat_dt(early_init_dt_scan_memory_arch, NULL);
+
+       /* try to load the mips machine name */
+       of_scan_flat_dt(early_init_dt_scan_model, NULL);
 }
 
 void __init __dt_setup_arch(struct boot_param_header *bph)
@@ -79,3 +111,4 @@ void __init __dt_setup_arch(struct boot_param_header *bph)
 
        early_init_devtree(initial_boot_params);
 }
+#endif
index 9ea29649fc28b6c119e965e750fe77efeaef77ec..9b36424b03c5f41aa48312c770f90e69a43f6fae 100644 (file)
@@ -138,9 +138,18 @@ stackargs:
 5:     jr      t1
         sw     t5, 16(sp)              # argument #5 to ksp
 
+#ifdef CONFIG_CPU_MICROMIPS
        sw      t8, 28(sp)              # argument #8 to ksp
+       nop
        sw      t7, 24(sp)              # argument #7 to ksp
+       nop
        sw      t6, 20(sp)              # argument #6 to ksp
+       nop
+#else
+       sw      t8, 28(sp)              # argument #8 to ksp
+       sw      t7, 24(sp)              # argument #7 to ksp
+       sw      t6, 20(sp)              # argument #6 to ksp
+#endif
 6:     j       stack_done              # go back
         nop
        .set    pop
index 103bfe570fe8ee375d94732b074efee7723ec550..74f485d3c0ef41bb7f73204f06a7a801d0465f13 100644 (file)
@@ -529,7 +529,7 @@ sys_call_table:
        PTR     sys_accept4
        PTR     compat_sys_recvmmsg             /* 4335 */
        PTR     sys_fanotify_init
-       PTR     sys_32_fanotify_mark
+       PTR     compat_sys_fanotify_mark
        PTR     sys_prlimit64
        PTR     sys_name_to_handle_at
        PTR     compat_sys_open_by_handle_at    /* 4340 */
index 4c774d5d50874ab515e023809ffd48fcb611c053..c7f90519e58ce0ade98dd826752fbaa1d2287da0 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/pfn.h>
 #include <linux/debugfs.h>
 #include <linux/kexec.h>
+#include <linux/sizes.h>
 
 #include <asm/addrspace.h>
 #include <asm/bootinfo.h>
@@ -77,6 +78,8 @@ EXPORT_SYMBOL(mips_io_port_base);
 static struct resource code_resource = { .name = "Kernel code", };
 static struct resource data_resource = { .name = "Kernel data", };
 
+static void *detect_magic __initdata = detect_memory_region;
+
 void __init add_memory_region(phys_t start, phys_t size, long type)
 {
        int x = boot_mem_map.nr_map;
@@ -122,6 +125,25 @@ void __init add_memory_region(phys_t start, phys_t size, long type)
        boot_mem_map.nr_map++;
 }
 
+void __init detect_memory_region(phys_t start, phys_t sz_min, phys_t sz_max)
+{
+       void *dm = &detect_magic;
+       phys_t size;
+
+       for (size = sz_min; size < sz_max; size <<= 1) {
+               if (!memcmp(dm, dm + size, sizeof(detect_magic)))
+                       break;
+       }
+
+       pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
+               ((unsigned long long) size) / SZ_1M,
+               (unsigned long long) start,
+               ((unsigned long long) sz_min) / SZ_1M,
+               ((unsigned long long) sz_max) / SZ_1M);
+
+       add_memory_region(start, size, BOOT_MEM_RAM);
+}
+
 static void __init print_memory_map(void)
 {
        int i;
index b5e88fd832775009a774b727bfcb489667e3cca8..fd3ef2c2afbc37732d9bedcb9fff59d1dda935ea 100644 (file)
@@ -35,6 +35,7 @@
 #include <asm/war.h>
 #include <asm/vdso.h>
 #include <asm/dsp.h>
+#include <asm/inst.h>
 
 #include "signal-common.h"
 
@@ -480,7 +481,15 @@ static void handle_signal(unsigned long sig, siginfo_t *info,
        sigset_t *oldset = sigmask_to_save();
        int ret;
        struct mips_abi *abi = current->thread.abi;
+#ifdef CONFIG_CPU_MICROMIPS
+       void *vdso;
+       unsigned int tmp = (unsigned int)current->mm->context.vdso;
+
+       set_isa16_mode(tmp);
+       vdso = (void *)tmp;
+#else
        void *vdso = current->mm->context.vdso;
+#endif
 
        if (regs->regs[0]) {
                switch(regs->regs[2]) {
index bfede063d96a52e3dd8f12aedf67f15468d7c3aa..3e5164c11cacabe66ea021cb29c979b93d920e00 100644 (file)
@@ -34,6 +34,7 @@
 #include <asm/mipsregs.h>
 #include <asm/mipsmtregs.h>
 #include <asm/mips_mt.h>
+#include <asm/gic.h>
 
 static void __init smvp_copy_vpe_config(void)
 {
@@ -151,8 +152,6 @@ static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
 static void __cpuinit vsmp_init_secondary(void)
 {
 #ifdef CONFIG_IRQ_GIC
-       extern int gic_present;
-
        /* This is Malta specific: IPI,performance and timer interrupts */
        if (gic_present)
                change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
index aee04af213c5895d5b273b4f17fe896621707705..c17619fe18e32a9f23a5df7e18b0b943b5d0cf2c 100644 (file)
@@ -83,6 +83,7 @@ static inline void set_cpu_sibling_map(int cpu)
 }
 
 struct plat_smp_ops *mp_ops;
+EXPORT_SYMBOL(mp_ops);
 
 __cpuinit void register_smp_ops(struct plat_smp_ops *ops)
 {
index 76016ac0a9c8818960e61f055ac47e4d55a9bdf2..2866863a39df269da6ba9710acf96808197e33ea 100644 (file)
@@ -49,6 +49,9 @@ CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED??
        .text
        .align 5
 FEXPORT(__smtc_ipi_vector)
+#ifdef CONFIG_CPU_MICROMIPS
+       nop
+#endif
        .set    noat
        /* Disable thread scheduling to make Status update atomic */
        DMT     27                                      # dmt   k1
index 7186222dc5bb285a4ff74a23a5851dd03b9356c1..31d22f3121c98bb8c0b57488c60c58d4c0ca5b4c 100644 (file)
@@ -111,7 +111,7 @@ static int vpe0limit;
 static int ipibuffers;
 static int nostlb;
 static int asidmask;
-unsigned long smtc_asid_mask = 0xff;
+unsigned int smtc_asid_mask = 0xff;
 
 static int __init vpe0tcs(char *str)
 {
@@ -1395,7 +1395,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
        asid = asid_cache(cpu);
 
        do {
-               if (!((asid += ASID_INC) & ASID_MASK) ) {
+               if (!ASID_MASK(ASID_INC(asid))) {
                        if (cpu_has_vtag_icache)
                                flush_icache_all();
                        /* Traverse all online CPUs (hack requires contiguous range) */
@@ -1414,7 +1414,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
                                                mips_ihb();
                                        }
                                        tcstat = read_tc_c0_tcstatus();
-                                       smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
+                                       smtc_live_asid[tlb][ASID_MASK(tcstat)] |= (asiduse)(0x1 << i);
                                        if (!prevhalt)
                                                write_tc_c0_tchalt(0);
                                }
@@ -1423,7 +1423,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
                                asid = ASID_FIRST_VERSION;
                        local_flush_tlb_all();  /* start new asid cycle */
                }
-       } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
+       } while (smtc_live_asid[tlb][ASID_MASK(asid)]);
 
        /*
         * SMTC shares the TLB within VPEs and possibly across all VPEs.
@@ -1461,7 +1461,7 @@ void smtc_flush_tlb_asid(unsigned long asid)
                tlb_read();
                ehb();
                ehi = read_c0_entryhi();
-               if ((ehi & ASID_MASK) == asid) {
+               if (ASID_MASK(ehi) == asid) {
                    /*
                     * Invalidate only entries with specified ASID,
                     * makiing sure all entries differ.
index 25225515451f8f348d4f2ffa91a4c64c9b004a82..77cff1f6d050cb92e21475ae52a9ef2f037b5bc5 100644 (file)
@@ -8,8 +8,8 @@
  * Copyright (C) 1998 Ulf Carlsson
  * Copyright (C) 1999 Silicon Graphics, Inc.
  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000, 01 MIPS Technologies, Inc.
  * Copyright (C) 2002, 2003, 2004, 2005, 2007  Maciej W. Rozycki
+ * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
  */
 #include <linux/bug.h>
 #include <linux/compiler.h>
@@ -60,9 +60,9 @@ extern void check_wait(void);
 extern asmlinkage void r4k_wait(void);
 extern asmlinkage void rollback_handle_int(void);
 extern asmlinkage void handle_int(void);
-extern asmlinkage void handle_tlbm(void);
-extern asmlinkage void handle_tlbl(void);
-extern asmlinkage void handle_tlbs(void);
+extern u32 handle_tlbl[];
+extern u32 handle_tlbs[];
+extern u32 handle_tlbm[];
 extern asmlinkage void handle_adel(void);
 extern asmlinkage void handle_ades(void);
 extern asmlinkage void handle_ibe(void);
@@ -83,10 +83,6 @@ extern asmlinkage void handle_dsp(void);
 extern asmlinkage void handle_mcheck(void);
 extern asmlinkage void handle_reserved(void);
 
-extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
-                                   struct mips_fpu_struct *ctx, int has_fpu,
-                                   void *__user *fault_addr);
-
 void (*board_be_init)(void);
 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
 void (*board_nmi_handler_setup)(void);
@@ -482,6 +478,12 @@ asmlinkage void do_be(struct pt_regs *regs)
 #define SYNC   0x0000000f
 #define RDHWR  0x0000003b
 
+/*  microMIPS definitions   */
+#define MM_POOL32A_FUNC 0xfc00ffff
+#define MM_RDHWR        0x00006b3c
+#define MM_RS           0x001f0000
+#define MM_RT           0x03e00000
+
 /*
  * The ll_bit is cleared by r*_switch.S
  */
@@ -596,42 +598,62 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
  * Simulate trapping 'rdhwr' instructions to provide user accessible
  * registers not implemented in hardware.
  */
-static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
+static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
 {
        struct thread_info *ti = task_thread_info(current);
 
+       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+                       1, regs, 0);
+       switch (rd) {
+       case 0:         /* CPU number */
+               regs->regs[rt] = smp_processor_id();
+               return 0;
+       case 1:         /* SYNCI length */
+               regs->regs[rt] = min(current_cpu_data.dcache.linesz,
+                                    current_cpu_data.icache.linesz);
+               return 0;
+       case 2:         /* Read count register */
+               regs->regs[rt] = read_c0_count();
+               return 0;
+       case 3:         /* Count register resolution */
+               switch (current_cpu_data.cputype) {
+               case CPU_20KC:
+               case CPU_25KF:
+                       regs->regs[rt] = 1;
+                       break;
+               default:
+                       regs->regs[rt] = 2;
+               }
+               return 0;
+       case 29:
+               regs->regs[rt] = ti->tp_value;
+               return 0;
+       default:
+               return -1;
+       }
+}
+
+static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
+{
        if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
                int rd = (opcode & RD) >> 11;
                int rt = (opcode & RT) >> 16;
-               perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
-                               1, regs, 0);
-               switch (rd) {
-               case 0:         /* CPU number */
-                       regs->regs[rt] = smp_processor_id();
-                       return 0;
-               case 1:         /* SYNCI length */
-                       regs->regs[rt] = min(current_cpu_data.dcache.linesz,
-                                            current_cpu_data.icache.linesz);
-                       return 0;
-               case 2:         /* Read count register */
-                       regs->regs[rt] = read_c0_count();
-                       return 0;
-               case 3:         /* Count register resolution */
-                       switch (current_cpu_data.cputype) {
-                       case CPU_20KC:
-                       case CPU_25KF:
-                               regs->regs[rt] = 1;
-                               break;
-                       default:
-                               regs->regs[rt] = 2;
-                       }
-                       return 0;
-               case 29:
-                       regs->regs[rt] = ti->tp_value;
-                       return 0;
-               default:
-                       return -1;
-               }
+
+               simulate_rdhwr(regs, rd, rt);
+               return 0;
+       }
+
+       /* Not ours.  */
+       return -1;
+}
+
+static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
+{
+       if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
+               int rd = (opcode & MM_RS) >> 16;
+               int rt = (opcode & MM_RT) >> 21;
+               simulate_rdhwr(regs, rd, rt);
+               return 0;
        }
 
        /* Not ours.  */
@@ -662,7 +684,7 @@ asmlinkage void do_ov(struct pt_regs *regs)
        force_sig_info(SIGFPE, &info, current);
 }
 
-static int process_fpemu_return(int sig, void __user *fault_addr)
+int process_fpemu_return(int sig, void __user *fault_addr)
 {
        if (sig == SIGSEGV || sig == SIGBUS) {
                struct siginfo si = {0};
@@ -813,9 +835,29 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
 asmlinkage void do_bp(struct pt_regs *regs)
 {
        unsigned int opcode, bcode;
-
-       if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
-               goto out_sigsegv;
+       unsigned long epc;
+       u16 instr[2];
+
+       if (get_isa16_mode(regs->cp0_epc)) {
+               /* Calculate EPC. */
+               epc = exception_epc(regs);
+               if (cpu_has_mmips) {
+                       if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||
+                           (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))
+                               goto out_sigsegv;
+                   opcode = (instr[0] << 16) | instr[1];
+               } else {
+                   /* MIPS16e mode */
+                   if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)))
+                               goto out_sigsegv;
+                   bcode = (instr[0] >> 6) & 0x3f;
+                   do_trap_or_bp(regs, bcode, "Break");
+                   return;
+               }
+       } else {
+               if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+                       goto out_sigsegv;
+       }
 
        /*
         * There is the ancient bug in the MIPS assemblers that the break
@@ -856,13 +898,22 @@ out_sigsegv:
 asmlinkage void do_tr(struct pt_regs *regs)
 {
        unsigned int opcode, tcode = 0;
+       u16 instr[2];
+       unsigned long epc = exception_epc(regs);
 
-       if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
-               goto out_sigsegv;
+       if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) ||
+               (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))
+                       goto out_sigsegv;
+       opcode = (instr[0] << 16) | instr[1];
 
        /* Immediate versions don't provide a code.  */
-       if (!(opcode & OPCODE))
-               tcode = ((opcode >> 6) & ((1 << 10) - 1));
+       if (!(opcode & OPCODE)) {
+               if (get_isa16_mode(regs->cp0_epc))
+                       /* microMIPS */
+                       tcode = (opcode >> 12) & 0x1f;
+               else
+                       tcode = ((opcode >> 6) & ((1 << 10) - 1));
+       }
 
        do_trap_or_bp(regs, tcode, "Trap");
        return;
@@ -875,6 +926,7 @@ asmlinkage void do_ri(struct pt_regs *regs)
 {
        unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
        unsigned long old_epc = regs->cp0_epc;
+       unsigned long old31 = regs->regs[31];
        unsigned int opcode = 0;
        int status = -1;
 
@@ -887,23 +939,37 @@ asmlinkage void do_ri(struct pt_regs *regs)
        if (unlikely(compute_return_epc(regs) < 0))
                return;
 
-       if (unlikely(get_user(opcode, epc) < 0))
-               status = SIGSEGV;
+       if (get_isa16_mode(regs->cp0_epc)) {
+               unsigned short mmop[2] = { 0 };
 
-       if (!cpu_has_llsc && status < 0)
-               status = simulate_llsc(regs, opcode);
+               if (unlikely(get_user(mmop[0], epc) < 0))
+                       status = SIGSEGV;
+               if (unlikely(get_user(mmop[1], epc) < 0))
+                       status = SIGSEGV;
+               opcode = (mmop[0] << 16) | mmop[1];
 
-       if (status < 0)
-               status = simulate_rdhwr(regs, opcode);
+               if (status < 0)
+                       status = simulate_rdhwr_mm(regs, opcode);
+       } else {
+               if (unlikely(get_user(opcode, epc) < 0))
+                       status = SIGSEGV;
 
-       if (status < 0)
-               status = simulate_sync(regs, opcode);
+               if (!cpu_has_llsc && status < 0)
+                       status = simulate_llsc(regs, opcode);
+
+               if (status < 0)
+                       status = simulate_rdhwr_normal(regs, opcode);
+
+               if (status < 0)
+                       status = simulate_sync(regs, opcode);
+       }
 
        if (status < 0)
                status = SIGILL;
 
        if (unlikely(status > 0)) {
                regs->cp0_epc = old_epc;                /* Undo skip-over.  */
+               regs->regs[31] = old31;
                force_sig(status, current);
        }
 }
@@ -973,7 +1039,7 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
 asmlinkage void do_cpu(struct pt_regs *regs)
 {
        unsigned int __user *epc;
-       unsigned long old_epc;
+       unsigned long old_epc, old31;
        unsigned int opcode;
        unsigned int cpid;
        int status;
@@ -987,26 +1053,41 @@ asmlinkage void do_cpu(struct pt_regs *regs)
        case 0:
                epc = (unsigned int __user *)exception_epc(regs);
                old_epc = regs->cp0_epc;
+               old31 = regs->regs[31];
                opcode = 0;
                status = -1;
 
                if (unlikely(compute_return_epc(regs) < 0))
                        return;
 
-               if (unlikely(get_user(opcode, epc) < 0))
-                       status = SIGSEGV;
+               if (get_isa16_mode(regs->cp0_epc)) {
+                       unsigned short mmop[2] = { 0 };
 
-               if (!cpu_has_llsc && status < 0)
-                       status = simulate_llsc(regs, opcode);
+                       if (unlikely(get_user(mmop[0], epc) < 0))
+                               status = SIGSEGV;
+                       if (unlikely(get_user(mmop[1], epc) < 0))
+                               status = SIGSEGV;
+                       opcode = (mmop[0] << 16) | mmop[1];
 
-               if (status < 0)
-                       status = simulate_rdhwr(regs, opcode);
+                       if (status < 0)
+                               status = simulate_rdhwr_mm(regs, opcode);
+               } else {
+                       if (unlikely(get_user(opcode, epc) < 0))
+                               status = SIGSEGV;
+
+                       if (!cpu_has_llsc && status < 0)
+                               status = simulate_llsc(regs, opcode);
+
+                       if (status < 0)
+                               status = simulate_rdhwr_normal(regs, opcode);
+               }
 
                if (status < 0)
                        status = SIGILL;
 
                if (unlikely(status > 0)) {
                        regs->cp0_epc = old_epc;        /* Undo skip-over.  */
+                       regs->regs[31] = old31;
                        force_sig(status, current);
                }
 
@@ -1320,7 +1401,7 @@ asmlinkage void cache_parity_error(void)
 void ejtag_exception_handler(struct pt_regs *regs)
 {
        const int field = 2 * sizeof(unsigned long);
-       unsigned long depc, old_epc;
+       unsigned long depc, old_epc, old_ra;
        unsigned int debug;
 
        printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
@@ -1335,10 +1416,12 @@ void ejtag_exception_handler(struct pt_regs *regs)
                 * calculation.
                 */
                old_epc = regs->cp0_epc;
+               old_ra = regs->regs[31];
                regs->cp0_epc = depc;
-               __compute_return_epc(regs);
+               compute_return_epc(regs);
                depc = regs->cp0_epc;
                regs->cp0_epc = old_epc;
+               regs->regs[31] = old_ra;
        } else
                depc += 4;
        write_c0_depc(depc);
@@ -1377,11 +1460,27 @@ unsigned long vi_handlers[64];
 void __init *set_except_vector(int n, void *addr)
 {
        unsigned long handler = (unsigned long) addr;
-       unsigned long old_handler = exception_handlers[n];
+       unsigned long old_handler;
+
+#ifdef CONFIG_CPU_MICROMIPS
+       /*
+        * Only the TLB handlers are cache aligned with an even
+        * address. All other handlers are on an odd address and
+        * require no modification. Otherwise, MIPS32 mode will
+        * be entered when handling any TLB exceptions. That
+        * would be bad...since we must stay in microMIPS mode.
+        */
+       if (!(handler & 0x1))
+               handler |= 1;
+#endif
+       old_handler = xchg(&exception_handlers[n], handler);
 
-       exception_handlers[n] = handler;
        if (n == 0 && cpu_has_divec) {
+#ifdef CONFIG_CPU_MICROMIPS
+               unsigned long jump_mask = ~((1 << 27) - 1);
+#else
                unsigned long jump_mask = ~((1 << 28) - 1);
+#endif
                u32 *buf = (u32 *)(ebase + 0x200);
                unsigned int k0 = 26;
                if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
@@ -1397,7 +1496,7 @@ void __init *set_except_vector(int n, void *addr)
        return (void *)old_handler;
 }
 
-static asmlinkage void do_default_vi(void)
+static void do_default_vi(void)
 {
        show_regs(get_irq_regs());
        panic("Caught unexpected vectored interrupt.");
@@ -1408,17 +1507,18 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
        unsigned long handler;
        unsigned long old_handler = vi_handlers[n];
        int srssets = current_cpu_data.srsets;
-       u32 *w;
+       u16 *h;
        unsigned char *b;
 
        BUG_ON(!cpu_has_veic && !cpu_has_vint);
+       BUG_ON((n < 0) && (n > 9));
 
        if (addr == NULL) {
                handler = (unsigned long) do_default_vi;
                srs = 0;
        } else
                handler = (unsigned long) addr;
-       vi_handlers[n] = (unsigned long) addr;
+       vi_handlers[n] = handler;
 
        b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
 
@@ -1437,9 +1537,8 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
        if (srs == 0) {
                /*
                 * If no shadow set is selected then use the default handler
-                * that does normal register saving and standard interrupt exit
+                * that does normal register saving and standard interrupt exit
                 */
-
                extern char except_vec_vi, except_vec_vi_lui;
                extern char except_vec_vi_ori, except_vec_vi_end;
                extern char rollback_except_vec_vi;
@@ -1452,11 +1551,20 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
                 * Status.IM bit to be masked before going there.
                 */
                extern char except_vec_vi_mori;
+#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
+               const int mori_offset = &except_vec_vi_mori - vec_start + 2;
+#else
                const int mori_offset = &except_vec_vi_mori - vec_start;
+#endif
 #endif /* CONFIG_MIPS_MT_SMTC */
-               const int handler_len = &except_vec_vi_end - vec_start;
+#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
+               const int lui_offset = &except_vec_vi_lui - vec_start + 2;
+               const int ori_offset = &except_vec_vi_ori - vec_start + 2;
+#else
                const int lui_offset = &except_vec_vi_lui - vec_start;
                const int ori_offset = &except_vec_vi_ori - vec_start;
+#endif
+               const int handler_len = &except_vec_vi_end - vec_start;
 
                if (handler_len > VECTORSPACING) {
                        /*
@@ -1466,30 +1574,44 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
                        panic("VECTORSPACING too small");
                }
 
-               memcpy(b, vec_start, handler_len);
+               set_handler(((unsigned long)b - ebase), vec_start,
+#ifdef CONFIG_CPU_MICROMIPS
+                               (handler_len - 1));
+#else
+                               handler_len);
+#endif
 #ifdef CONFIG_MIPS_MT_SMTC
                BUG_ON(n > 7);  /* Vector index %d exceeds SMTC maximum. */
 
-               w = (u32 *)(b + mori_offset);
-               *w = (*w & 0xffff0000) | (0x100 << n);
+               h = (u16 *)(b + mori_offset);
+               *h = (0x100 << n);
 #endif /* CONFIG_MIPS_MT_SMTC */
-               w = (u32 *)(b + lui_offset);
-               *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
-               w = (u32 *)(b + ori_offset);
-               *w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
+               h = (u16 *)(b + lui_offset);
+               *h = (handler >> 16) & 0xffff;
+               h = (u16 *)(b + ori_offset);
+               *h = (handler & 0xffff);
                local_flush_icache_range((unsigned long)b,
                                         (unsigned long)(b+handler_len));
        }
        else {
                /*
-                * In other cases jump directly to the interrupt handler
-                *
-                * It is the handlers responsibility to save registers if required
-                * (eg hi/lo) and return from the exception using "eret"
+                * In other cases jump directly to the interrupt handler. It
+                * is the handler's responsibility to save registers if required
+                * (eg hi/lo) and return from the exception using "eret".
                 */
-               w = (u32 *)b;
-               *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
-               *w = 0;
+               u32 insn;
+
+               h = (u16 *)b;
+               /* j handler */
+#ifdef CONFIG_CPU_MICROMIPS
+               insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
+#else
+               insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
+#endif
+               h[0] = (insn >> 16) & 0xffff;
+               h[1] = insn & 0xffff;
+               h[2] = 0;
+               h[3] = 0;
                local_flush_icache_range((unsigned long)b,
                                         (unsigned long)(b+8));
        }
@@ -1534,6 +1656,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
        unsigned int cpu = smp_processor_id();
        unsigned int status_set = ST0_CU0;
        unsigned int hwrena = cpu_hwrena_impl_bits;
+       unsigned long asid = 0;
 #ifdef CONFIG_MIPS_MT_SMTC
        int secondaryTC = 0;
        int bootTC = (cpu == 0);
@@ -1617,8 +1740,9 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
        }
 #endif /* CONFIG_MIPS_MT_SMTC */
 
-       if (!cpu_data[cpu].asid_cache)
-               cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
+       asid = ASID_FIRST_VERSION;
+       cpu_data[cpu].asid_cache = asid;
+       TLBMISS_HANDLER_SETUP();
 
        atomic_inc(&init_mm.mm_count);
        current->active_mm = &init_mm;
@@ -1648,7 +1772,11 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
 /* Install CPU exception handler */
 void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size)
 {
+#ifdef CONFIG_CPU_MICROMIPS
+       memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
+#else
        memcpy((void *)(ebase + offset), addr, size);
+#endif
        local_flush_icache_range(ebase + offset, ebase + offset + size);
 }
 
@@ -1682,8 +1810,9 @@ __setup("rdhwr_noopt", set_rdhwr_noopt);
 
 void __init trap_init(void)
 {
-       extern char except_vec3_generic, except_vec3_r4000;
+       extern char except_vec3_generic;
        extern char except_vec4;
+       extern char except_vec3_r4000;
        unsigned long i;
        int rollback;
 
@@ -1700,7 +1829,12 @@ void __init trap_init(void)
                ebase = (unsigned long)
                        __alloc_bootmem(size, 1 << fls(size), 0);
        } else {
-               ebase = CKSEG0;
+#ifdef CONFIG_KVM_GUEST
+#define KVM_GUEST_KSEG0     0x40000000
+        ebase = KVM_GUEST_KSEG0;
+#else
+        ebase = CKSEG0;
+#endif
                if (cpu_has_mips_r2)
                        ebase += (read_c0_ebase() & 0x3ffff000);
        }
@@ -1816,11 +1950,11 @@ void __init trap_init(void)
 
        if (cpu_has_vce)
                /* Special exception: R4[04]00 uses also the divec space. */
-               memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100);
+               set_handler(0x180, &except_vec3_r4000, 0x100);
        else if (cpu_has_4kex)
-               memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80);
+               set_handler(0x180, &except_vec3_generic, 0x80);
        else
-               memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80);
+               set_handler(0x080, &except_vec3_generic, 0x80);
 
        local_flush_icache_range(ebase, ebase + 0x400);
        flush_tlb_handlers();
index 6087a54c86a0dbc5e6e9a167659df9408a1dc3e1..203d8857070dd225f2d8fdea7bf986ad7a6560cc 100644 (file)
 #include <asm/branch.h>
 #include <asm/byteorder.h>
 #include <asm/cop2.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
 #include <asm/inst.h>
 #include <asm/uaccess.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
 
 #define STR(x) __STR(x)
 #define __STR(x)  #x
@@ -102,12 +106,332 @@ static u32 unaligned_action;
 #endif
 extern void show_registers(struct pt_regs *regs);
 
+#ifdef __BIG_ENDIAN
+#define     LoadHW(addr, value, res)  \
+               __asm__ __volatile__ (".set\tnoat\n"        \
+                       "1:\tlb\t%0, 0(%2)\n"               \
+                       "2:\tlbu\t$1, 1(%2)\n\t"            \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       "3:\t.set\tat\n\t"                  \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadW(addr, value, res)   \
+               __asm__ __volatile__ (                      \
+                       "1:\tlwl\t%0, (%2)\n"               \
+                       "2:\tlwr\t%0, 3(%2)\n\t"            \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadHWU(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tlbu\t%0, 0(%2)\n"              \
+                       "2:\tlbu\t$1, 1(%2)\n\t"            \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".set\tat\n\t"                      \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadWU(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       "1:\tlwl\t%0, (%2)\n"               \
+                       "2:\tlwr\t%0, 3(%2)\n\t"            \
+                       "dsll\t%0, %0, 32\n\t"              \
+                       "dsrl\t%0, %0, 32\n\t"              \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       "\t.section\t.fixup,\"ax\"\n\t"     \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadDW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       "1:\tldl\t%0, (%2)\n"               \
+                       "2:\tldr\t%0, 7(%2)\n\t"            \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       "\t.section\t.fixup,\"ax\"\n\t"     \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     StoreHW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tsb\t%1, 1(%2)\n\t"             \
+                       "srl\t$1, %1, 0x8\n"                \
+                       "2:\tsb\t$1, 0(%2)\n\t"             \
+                       ".set\tat\n\t"                      \
+                       "li\t%0, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%0, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=r" (res)                        \
+                       : "r" (value), "r" (addr), "i" (-EFAULT));
+
+#define     StoreW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       "1:\tswl\t%1,(%2)\n"                \
+                       "2:\tswr\t%1, 3(%2)\n\t"            \
+                       "li\t%0, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%0, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+               : "=r" (res)                                \
+               : "r" (value), "r" (addr), "i" (-EFAULT));
+
+#define     StoreDW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       "1:\tsdl\t%1,(%2)\n"                \
+                       "2:\tsdr\t%1, 7(%2)\n\t"            \
+                       "li\t%0, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%0, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+               : "=r" (res)                                \
+               : "r" (value), "r" (addr), "i" (-EFAULT));
+#endif
+
+#ifdef __LITTLE_ENDIAN
+#define     LoadHW(addr, value, res)  \
+               __asm__ __volatile__ (".set\tnoat\n"        \
+                       "1:\tlb\t%0, 1(%2)\n"               \
+                       "2:\tlbu\t$1, 0(%2)\n\t"            \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       "3:\t.set\tat\n\t"                  \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadW(addr, value, res)   \
+               __asm__ __volatile__ (                      \
+                       "1:\tlwl\t%0, 3(%2)\n"              \
+                       "2:\tlwr\t%0, (%2)\n\t"             \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadHWU(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tlbu\t%0, 1(%2)\n"              \
+                       "2:\tlbu\t$1, 0(%2)\n\t"            \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".set\tat\n\t"                      \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadWU(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       "1:\tlwl\t%0, 3(%2)\n"              \
+                       "2:\tlwr\t%0, (%2)\n\t"             \
+                       "dsll\t%0, %0, 32\n\t"              \
+                       "dsrl\t%0, %0, 32\n\t"              \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       "\t.section\t.fixup,\"ax\"\n\t"     \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadDW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       "1:\tldl\t%0, 7(%2)\n"              \
+                       "2:\tldr\t%0, (%2)\n\t"             \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       "\t.section\t.fixup,\"ax\"\n\t"     \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     StoreHW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tsb\t%1, 0(%2)\n\t"             \
+                       "srl\t$1,%1, 0x8\n"                 \
+                       "2:\tsb\t$1, 1(%2)\n\t"             \
+                       ".set\tat\n\t"                      \
+                       "li\t%0, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%0, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=r" (res)                        \
+                       : "r" (value), "r" (addr), "i" (-EFAULT));
+
+#define     StoreW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       "1:\tswl\t%1, 3(%2)\n"              \
+                       "2:\tswr\t%1, (%2)\n\t"             \
+                       "li\t%0, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%0, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+               : "=r" (res)                                \
+               : "r" (value), "r" (addr), "i" (-EFAULT));
+
+#define     StoreDW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       "1:\tsdl\t%1, 7(%2)\n"              \
+                       "2:\tsdr\t%1, (%2)\n\t"             \
+                       "li\t%0, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%0, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+               : "=r" (res)                                \
+               : "r" (value), "r" (addr), "i" (-EFAULT));
+#endif
+
 static void emulate_load_store_insn(struct pt_regs *regs,
        void __user *addr, unsigned int __user *pc)
 {
        union mips_instruction insn;
        unsigned long value;
        unsigned int res;
+       unsigned long origpc;
+       unsigned long orig31;
+       void __user *fault_addr = NULL;
+
+       origpc = (unsigned long)pc;
+       orig31 = regs->regs[31];
 
        perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
 
@@ -117,22 +441,22 @@ static void emulate_load_store_insn(struct pt_regs *regs,
        __get_user(insn.word, pc);
 
        switch (insn.i_format.opcode) {
-       /*
-        * These are instructions that a compiler doesn't generate.  We
-        * can assume therefore that the code is MIPS-aware and
-        * really buggy.  Emulating these instructions would break the
-        * semantics anyway.
-        */
+               /*
+                * These are instructions that a compiler doesn't generate.  We
+                * can assume therefore that the code is MIPS-aware and
+                * really buggy.  Emulating these instructions would break the
+                * semantics anyway.
+                */
        case ll_op:
        case lld_op:
        case sc_op:
        case scd_op:
 
-       /*
-        * For these instructions the only way to create an address
-        * error is an attempted access to kernel/supervisor address
-        * space.
-        */
+               /*
+                * For these instructions the only way to create an address
+                * error is an attempted access to kernel/supervisor address
+                * space.
+                */
        case ldl_op:
        case ldr_op:
        case lwl_op:
@@ -146,36 +470,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
        case sb_op:
                goto sigbus;
 
-       /*
-        * The remaining opcodes are the ones that are really of interest.
-        */
+               /*
+                * The remaining opcodes are the ones that are really of
+                * interest.
+                */
        case lh_op:
                if (!access_ok(VERIFY_READ, addr, 2))
                        goto sigbus;
 
-               __asm__ __volatile__ (".set\tnoat\n"
-#ifdef __BIG_ENDIAN
-                       "1:\tlb\t%0, 0(%2)\n"
-                       "2:\tlbu\t$1, 1(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-                       "1:\tlb\t%0, 1(%2)\n"
-                       "2:\tlbu\t$1, 0(%2)\n\t"
-#endif
-                       "sll\t%0, 0x8\n\t"
-                       "or\t%0, $1\n\t"
-                       "li\t%1, 0\n"
-                       "3:\t.set\tat\n\t"
-                       ".section\t.fixup,\"ax\"\n\t"
-                       "4:\tli\t%1, %3\n\t"
-                       "j\t3b\n\t"
-                       ".previous\n\t"
-                       ".section\t__ex_table,\"a\"\n\t"
-                       STR(PTR)"\t1b, 4b\n\t"
-                       STR(PTR)"\t2b, 4b\n\t"
-                       ".previous"
-                       : "=&r" (value), "=r" (res)
-                       : "r" (addr), "i" (-EFAULT));
+               LoadHW(addr, value, res);
                if (res)
                        goto fault;
                compute_return_epc(regs);
@@ -186,26 +489,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                if (!access_ok(VERIFY_READ, addr, 4))
                        goto sigbus;
 
-               __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
-                       "1:\tlwl\t%0, (%2)\n"
-                       "2:\tlwr\t%0, 3(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-                       "1:\tlwl\t%0, 3(%2)\n"
-                       "2:\tlwr\t%0, (%2)\n\t"
-#endif
-                       "li\t%1, 0\n"
-                       "3:\t.section\t.fixup,\"ax\"\n\t"
-                       "4:\tli\t%1, %3\n\t"
-                       "j\t3b\n\t"
-                       ".previous\n\t"
-                       ".section\t__ex_table,\"a\"\n\t"
-                       STR(PTR)"\t1b, 4b\n\t"
-                       STR(PTR)"\t2b, 4b\n\t"
-                       ".previous"
-                       : "=&r" (value), "=r" (res)
-                       : "r" (addr), "i" (-EFAULT));
+               LoadW(addr, value, res);
                if (res)
                        goto fault;
                compute_return_epc(regs);
@@ -216,30 +500,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                if (!access_ok(VERIFY_READ, addr, 2))
                        goto sigbus;
 
-               __asm__ __volatile__ (
-                       ".set\tnoat\n"
-#ifdef __BIG_ENDIAN
-                       "1:\tlbu\t%0, 0(%2)\n"
-                       "2:\tlbu\t$1, 1(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-                       "1:\tlbu\t%0, 1(%2)\n"
-                       "2:\tlbu\t$1, 0(%2)\n\t"
-#endif
-                       "sll\t%0, 0x8\n\t"
-                       "or\t%0, $1\n\t"
-                       "li\t%1, 0\n"
-                       "3:\t.set\tat\n\t"
-                       ".section\t.fixup,\"ax\"\n\t"
-                       "4:\tli\t%1, %3\n\t"
-                       "j\t3b\n\t"
-                       ".previous\n\t"
-                       ".section\t__ex_table,\"a\"\n\t"
-                       STR(PTR)"\t1b, 4b\n\t"
-                       STR(PTR)"\t2b, 4b\n\t"
-                       ".previous"
-                       : "=&r" (value), "=r" (res)
-                       : "r" (addr), "i" (-EFAULT));
+               LoadHWU(addr, value, res);
                if (res)
                        goto fault;
                compute_return_epc(regs);
@@ -258,28 +519,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                if (!access_ok(VERIFY_READ, addr, 4))
                        goto sigbus;
 
-               __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
-                       "1:\tlwl\t%0, (%2)\n"
-                       "2:\tlwr\t%0, 3(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-                       "1:\tlwl\t%0, 3(%2)\n"
-                       "2:\tlwr\t%0, (%2)\n\t"
-#endif
-                       "dsll\t%0, %0, 32\n\t"
-                       "dsrl\t%0, %0, 32\n\t"
-                       "li\t%1, 0\n"
-                       "3:\t.section\t.fixup,\"ax\"\n\t"
-                       "4:\tli\t%1, %3\n\t"
-                       "j\t3b\n\t"
-                       ".previous\n\t"
-                       ".section\t__ex_table,\"a\"\n\t"
-                       STR(PTR)"\t1b, 4b\n\t"
-                       STR(PTR)"\t2b, 4b\n\t"
-                       ".previous"
-                       : "=&r" (value), "=r" (res)
-                       : "r" (addr), "i" (-EFAULT));
+               LoadWU(addr, value, res);
                if (res)
                        goto fault;
                compute_return_epc(regs);
@@ -302,26 +542,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                if (!access_ok(VERIFY_READ, addr, 8))
                        goto sigbus;
 
-               __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
-                       "1:\tldl\t%0, (%2)\n"
-                       "2:\tldr\t%0, 7(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-                       "1:\tldl\t%0, 7(%2)\n"
-                       "2:\tldr\t%0, (%2)\n\t"
-#endif
-                       "li\t%1, 0\n"
-                       "3:\t.section\t.fixup,\"ax\"\n\t"
-                       "4:\tli\t%1, %3\n\t"
-                       "j\t3b\n\t"
-                       ".previous\n\t"
-                       ".section\t__ex_table,\"a\"\n\t"
-                       STR(PTR)"\t1b, 4b\n\t"
-                       STR(PTR)"\t2b, 4b\n\t"
-                       ".previous"
-                       : "=&r" (value), "=r" (res)
-                       : "r" (addr), "i" (-EFAULT));
+               LoadDW(addr, value, res);
                if (res)
                        goto fault;
                compute_return_epc(regs);
@@ -336,68 +557,22 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                if (!access_ok(VERIFY_WRITE, addr, 2))
                        goto sigbus;
 
+               compute_return_epc(regs);
                value = regs->regs[insn.i_format.rt];
-               __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
-                       ".set\tnoat\n"
-                       "1:\tsb\t%1, 1(%2)\n\t"
-                       "srl\t$1, %1, 0x8\n"
-                       "2:\tsb\t$1, 0(%2)\n\t"
-                       ".set\tat\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-                       ".set\tnoat\n"
-                       "1:\tsb\t%1, 0(%2)\n\t"
-                       "srl\t$1,%1, 0x8\n"
-                       "2:\tsb\t$1, 1(%2)\n\t"
-                       ".set\tat\n\t"
-#endif
-                       "li\t%0, 0\n"
-                       "3:\n\t"
-                       ".section\t.fixup,\"ax\"\n\t"
-                       "4:\tli\t%0, %3\n\t"
-                       "j\t3b\n\t"
-                       ".previous\n\t"
-                       ".section\t__ex_table,\"a\"\n\t"
-                       STR(PTR)"\t1b, 4b\n\t"
-                       STR(PTR)"\t2b, 4b\n\t"
-                       ".previous"
-                       : "=r" (res)
-                       : "r" (value), "r" (addr), "i" (-EFAULT));
+               StoreHW(addr, value, res);
                if (res)
                        goto fault;
-               compute_return_epc(regs);
                break;
 
        case sw_op:
                if (!access_ok(VERIFY_WRITE, addr, 4))
                        goto sigbus;
 
+               compute_return_epc(regs);
                value = regs->regs[insn.i_format.rt];
-               __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
-                       "1:\tswl\t%1,(%2)\n"
-                       "2:\tswr\t%1, 3(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-                       "1:\tswl\t%1, 3(%2)\n"
-                       "2:\tswr\t%1, (%2)\n\t"
-#endif
-                       "li\t%0, 0\n"
-                       "3:\n\t"
-                       ".section\t.fixup,\"ax\"\n\t"
-                       "4:\tli\t%0, %3\n\t"
-                       "j\t3b\n\t"
-                       ".previous\n\t"
-                       ".section\t__ex_table,\"a\"\n\t"
-                       STR(PTR)"\t1b, 4b\n\t"
-                       STR(PTR)"\t2b, 4b\n\t"
-                       ".previous"
-               : "=r" (res)
-               : "r" (value), "r" (addr), "i" (-EFAULT));
+               StoreW(addr, value, res);
                if (res)
                        goto fault;
-               compute_return_epc(regs);
                break;
 
        case sd_op:
@@ -412,31 +587,11 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                if (!access_ok(VERIFY_WRITE, addr, 8))
                        goto sigbus;
 
+               compute_return_epc(regs);
                value = regs->regs[insn.i_format.rt];
-               __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
-                       "1:\tsdl\t%1,(%2)\n"
-                       "2:\tsdr\t%1, 7(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-                       "1:\tsdl\t%1, 7(%2)\n"
-                       "2:\tsdr\t%1, (%2)\n\t"
-#endif
-                       "li\t%0, 0\n"
-                       "3:\n\t"
-                       ".section\t.fixup,\"ax\"\n\t"
-                       "4:\tli\t%0, %3\n\t"
-                       "j\t3b\n\t"
-                       ".previous\n\t"
-                       ".section\t__ex_table,\"a\"\n\t"
-                       STR(PTR)"\t1b, 4b\n\t"
-                       STR(PTR)"\t2b, 4b\n\t"
-                       ".previous"
-               : "=r" (res)
-               : "r" (value), "r" (addr), "i" (-EFAULT));
+               StoreDW(addr, value, res);
                if (res)
                        goto fault;
-               compute_return_epc(regs);
                break;
 #endif /* CONFIG_64BIT */
 
@@ -447,10 +602,21 @@ static void emulate_load_store_insn(struct pt_regs *regs,
        case ldc1_op:
        case swc1_op:
        case sdc1_op:
-               /*
-                * I herewith declare: this does not happen.  So send SIGBUS.
-                */
-               goto sigbus;
+               die_if_kernel("Unaligned FP access in kernel code", regs);
+               BUG_ON(!used_math());
+               BUG_ON(!is_fpu_owner());
+
+               lose_fpu(1);    /* Save FPU state for the emulator. */
+               res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+                                              &fault_addr);
+               own_fpu(1);     /* Restore FPU state. */
+
+               /* Signal if something went wrong. */
+               process_fpemu_return(res, fault_addr);
+
+               if (res == 0)
+                       break;
+               return;
 
        /*
         * COP2 is available to implementor for application specific use.
@@ -488,6 +654,9 @@ static void emulate_load_store_insn(struct pt_regs *regs,
        return;
 
 fault:
+       /* roll back jump/branch */
+       regs->cp0_epc = origpc;
+       regs->regs[31] = orig31;
        /* Did we have an exception handler installed? */
        if (fixup_exception(regs))
                return;
@@ -504,10 +673,881 @@ sigbus:
        return;
 
 sigill:
-       die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs);
+       die_if_kernel
+           ("Unhandled kernel unaligned access or invalid instruction", regs);
        force_sig(SIGILL, current);
 }
 
+/* Recode table from 16-bit register notation to 32-bit GPR. */
+const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
+
+/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
+const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
+
+void emulate_load_store_microMIPS(struct pt_regs *regs, void __user * addr)
+{
+       unsigned long value;
+       unsigned int res;
+       int i;
+       unsigned int reg = 0, rvar;
+       unsigned long orig31;
+       u16 __user *pc16;
+       u16 halfword;
+       unsigned int word;
+       unsigned long origpc, contpc;
+       union mips_instruction insn;
+       struct mm_decoded_insn mminsn;
+       void __user *fault_addr = NULL;
+
+       origpc = regs->cp0_epc;
+       orig31 = regs->regs[31];
+
+       mminsn.micro_mips_mode = 1;
+
+       /*
+        * This load never faults.
+        */
+       pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
+       __get_user(halfword, pc16);
+       pc16++;
+       contpc = regs->cp0_epc + 2;
+       word = ((unsigned int)halfword << 16);
+       mminsn.pc_inc = 2;
+
+       if (!mm_insn_16bit(halfword)) {
+               __get_user(halfword, pc16);
+               pc16++;
+               contpc = regs->cp0_epc + 4;
+               mminsn.pc_inc = 4;
+               word |= halfword;
+       }
+       mminsn.insn = word;
+
+       if (get_user(halfword, pc16))
+               goto fault;
+       mminsn.next_pc_inc = 2;
+       word = ((unsigned int)halfword << 16);
+
+       if (!mm_insn_16bit(halfword)) {
+               pc16++;
+               if (get_user(halfword, pc16))
+                       goto fault;
+               mminsn.next_pc_inc = 4;
+               word |= halfword;
+       }
+       mminsn.next_insn = word;
+
+       insn = (union mips_instruction)(mminsn.insn);
+       if (mm_isBranchInstr(regs, mminsn, &contpc))
+               insn = (union mips_instruction)(mminsn.next_insn);
+
+       /*  Parse instruction to find what to do */
+
+       switch (insn.mm_i_format.opcode) {
+
+       case mm_pool32a_op:
+               switch (insn.mm_x_format.func) {
+               case mm_lwxs_op:
+                       reg = insn.mm_x_format.rd;
+                       goto loadW;
+               }
+
+               goto sigbus;
+
+       case mm_pool32b_op:
+               switch (insn.mm_m_format.func) {
+               case mm_lwp_func:
+                       reg = insn.mm_m_format.rd;
+                       if (reg == 31)
+                               goto sigbus;
+
+                       if (!access_ok(VERIFY_READ, addr, 8))
+                               goto sigbus;
+
+                       LoadW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       regs->regs[reg] = value;
+                       addr += 4;
+                       LoadW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       regs->regs[reg + 1] = value;
+                       goto success;
+
+               case mm_swp_func:
+                       reg = insn.mm_m_format.rd;
+                       if (reg == 31)
+                               goto sigbus;
+
+                       if (!access_ok(VERIFY_WRITE, addr, 8))
+                               goto sigbus;
+
+                       value = regs->regs[reg];
+                       StoreW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       addr += 4;
+                       value = regs->regs[reg + 1];
+                       StoreW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       goto success;
+
+               case mm_ldp_func:
+#ifdef CONFIG_64BIT
+                       reg = insn.mm_m_format.rd;
+                       if (reg == 31)
+                               goto sigbus;
+
+                       if (!access_ok(VERIFY_READ, addr, 16))
+                               goto sigbus;
+
+                       LoadDW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       regs->regs[reg] = value;
+                       addr += 8;
+                       LoadDW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       regs->regs[reg + 1] = value;
+                       goto success;
+#endif /* CONFIG_64BIT */
+
+                       goto sigill;
+
+               case mm_sdp_func:
+#ifdef CONFIG_64BIT
+                       reg = insn.mm_m_format.rd;
+                       if (reg == 31)
+                               goto sigbus;
+
+                       if (!access_ok(VERIFY_WRITE, addr, 16))
+                               goto sigbus;
+
+                       value = regs->regs[reg];
+                       StoreDW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       addr += 8;
+                       value = regs->regs[reg + 1];
+                       StoreDW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       goto success;
+#endif /* CONFIG_64BIT */
+
+                       goto sigill;
+
+               case mm_lwm32_func:
+                       reg = insn.mm_m_format.rd;
+                       rvar = reg & 0xf;
+                       if ((rvar > 9) || !reg)
+                               goto sigill;
+                       if (reg & 0x10) {
+                               if (!access_ok
+                                   (VERIFY_READ, addr, 4 * (rvar + 1)))
+                                       goto sigbus;
+                       } else {
+                               if (!access_ok(VERIFY_READ, addr, 4 * rvar))
+                                       goto sigbus;
+                       }
+                       if (rvar == 9)
+                               rvar = 8;
+                       for (i = 16; rvar; rvar--, i++) {
+                               LoadW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 4;
+                               regs->regs[i] = value;
+                       }
+                       if ((reg & 0xf) == 9) {
+                               LoadW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 4;
+                               regs->regs[30] = value;
+                       }
+                       if (reg & 0x10) {
+                               LoadW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               regs->regs[31] = value;
+                       }
+                       goto success;
+
+               case mm_swm32_func:
+                       reg = insn.mm_m_format.rd;
+                       rvar = reg & 0xf;
+                       if ((rvar > 9) || !reg)
+                               goto sigill;
+                       if (reg & 0x10) {
+                               if (!access_ok
+                                   (VERIFY_WRITE, addr, 4 * (rvar + 1)))
+                                       goto sigbus;
+                       } else {
+                               if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
+                                       goto sigbus;
+                       }
+                       if (rvar == 9)
+                               rvar = 8;
+                       for (i = 16; rvar; rvar--, i++) {
+                               value = regs->regs[i];
+                               StoreW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 4;
+                       }
+                       if ((reg & 0xf) == 9) {
+                               value = regs->regs[30];
+                               StoreW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 4;
+                       }
+                       if (reg & 0x10) {
+                               value = regs->regs[31];
+                               StoreW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                       }
+                       goto success;
+
+               case mm_ldm_func:
+#ifdef CONFIG_64BIT
+                       reg = insn.mm_m_format.rd;
+                       rvar = reg & 0xf;
+                       if ((rvar > 9) || !reg)
+                               goto sigill;
+                       if (reg & 0x10) {
+                               if (!access_ok
+                                   (VERIFY_READ, addr, 8 * (rvar + 1)))
+                                       goto sigbus;
+                       } else {
+                               if (!access_ok(VERIFY_READ, addr, 8 * rvar))
+                                       goto sigbus;
+                       }
+                       if (rvar == 9)
+                               rvar = 8;
+
+                       for (i = 16; rvar; rvar--, i++) {
+                               LoadDW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 4;
+                               regs->regs[i] = value;
+                       }
+                       if ((reg & 0xf) == 9) {
+                               LoadDW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 8;
+                               regs->regs[30] = value;
+                       }
+                       if (reg & 0x10) {
+                               LoadDW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               regs->regs[31] = value;
+                       }
+                       goto success;
+#endif /* CONFIG_64BIT */
+
+                       goto sigill;
+
+               case mm_sdm_func:
+#ifdef CONFIG_64BIT
+                       reg = insn.mm_m_format.rd;
+                       rvar = reg & 0xf;
+                       if ((rvar > 9) || !reg)
+                               goto sigill;
+                       if (reg & 0x10) {
+                               if (!access_ok
+                                   (VERIFY_WRITE, addr, 8 * (rvar + 1)))
+                                       goto sigbus;
+                       } else {
+                               if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
+                                       goto sigbus;
+                       }
+                       if (rvar == 9)
+                               rvar = 8;
+
+                       for (i = 16; rvar; rvar--, i++) {
+                               value = regs->regs[i];
+                               StoreDW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 8;
+                       }
+                       if ((reg & 0xf) == 9) {
+                               value = regs->regs[30];
+                               StoreDW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 8;
+                       }
+                       if (reg & 0x10) {
+                               value = regs->regs[31];
+                               StoreDW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                       }
+                       goto success;
+#endif /* CONFIG_64BIT */
+
+                       goto sigill;
+
+                       /*  LWC2, SWC2, LDC2, SDC2 are not serviced */
+               }
+
+               goto sigbus;
+
+       case mm_pool32c_op:
+               switch (insn.mm_m_format.func) {
+               case mm_lwu_func:
+                       reg = insn.mm_m_format.rd;
+                       goto loadWU;
+               }
+
+               /*  LL,SC,LLD,SCD are not serviced */
+               goto sigbus;
+
+       case mm_pool32f_op:
+               switch (insn.mm_x_format.func) {
+               case mm_lwxc1_func:
+               case mm_swxc1_func:
+               case mm_ldxc1_func:
+               case mm_sdxc1_func:
+                       goto fpu_emul;
+               }
+
+               goto sigbus;
+
+       case mm_ldc132_op:
+       case mm_sdc132_op:
+       case mm_lwc132_op:
+       case mm_swc132_op:
+fpu_emul:
+               /* roll back jump/branch */
+               regs->cp0_epc = origpc;
+               regs->regs[31] = orig31;
+
+               die_if_kernel("Unaligned FP access in kernel code", regs);
+               BUG_ON(!used_math());
+               BUG_ON(!is_fpu_owner());
+
+               lose_fpu(1);    /* save the FPU state for the emulator */
+               res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+                                              &fault_addr);
+               own_fpu(1);     /* restore FPU state */
+
+               /* If something went wrong, signal */
+               process_fpemu_return(res, fault_addr);
+
+               if (res == 0)
+                       goto success;
+               return;
+
+       case mm_lh32_op:
+               reg = insn.mm_i_format.rt;
+               goto loadHW;
+
+       case mm_lhu32_op:
+               reg = insn.mm_i_format.rt;
+               goto loadHWU;
+
+       case mm_lw32_op:
+               reg = insn.mm_i_format.rt;
+               goto loadW;
+
+       case mm_sh32_op:
+               reg = insn.mm_i_format.rt;
+               goto storeHW;
+
+       case mm_sw32_op:
+               reg = insn.mm_i_format.rt;
+               goto storeW;
+
+       case mm_ld32_op:
+               reg = insn.mm_i_format.rt;
+               goto loadDW;
+
+       case mm_sd32_op:
+               reg = insn.mm_i_format.rt;
+               goto storeDW;
+
+       case mm_pool16c_op:
+               switch (insn.mm16_m_format.func) {
+               case mm_lwm16_op:
+                       reg = insn.mm16_m_format.rlist;
+                       rvar = reg + 1;
+                       if (!access_ok(VERIFY_READ, addr, 4 * rvar))
+                               goto sigbus;
+
+                       for (i = 16; rvar; rvar--, i++) {
+                               LoadW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 4;
+                               regs->regs[i] = value;
+                       }
+                       LoadW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       regs->regs[31] = value;
+
+                       goto success;
+
+               case mm_swm16_op:
+                       reg = insn.mm16_m_format.rlist;
+                       rvar = reg + 1;
+                       if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
+                               goto sigbus;
+
+                       for (i = 16; rvar; rvar--, i++) {
+                               value = regs->regs[i];
+                               StoreW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 4;
+                       }
+                       value = regs->regs[31];
+                       StoreW(addr, value, res);
+                       if (res)
+                               goto fault;
+
+                       goto success;
+
+               }
+
+               goto sigbus;
+
+       case mm_lhu16_op:
+               reg = reg16to32[insn.mm16_rb_format.rt];
+               goto loadHWU;
+
+       case mm_lw16_op:
+               reg = reg16to32[insn.mm16_rb_format.rt];
+               goto loadW;
+
+       case mm_sh16_op:
+               reg = reg16to32st[insn.mm16_rb_format.rt];
+               goto storeHW;
+
+       case mm_sw16_op:
+               reg = reg16to32st[insn.mm16_rb_format.rt];
+               goto storeW;
+
+       case mm_lwsp16_op:
+               reg = insn.mm16_r5_format.rt;
+               goto loadW;
+
+       case mm_swsp16_op:
+               reg = insn.mm16_r5_format.rt;
+               goto storeW;
+
+       case mm_lwgp16_op:
+               reg = reg16to32[insn.mm16_r3_format.rt];
+               goto loadW;
+
+       default:
+               goto sigill;
+       }
+
+loadHW:
+       if (!access_ok(VERIFY_READ, addr, 2))
+               goto sigbus;
+
+       LoadHW(addr, value, res);
+       if (res)
+               goto fault;
+       regs->regs[reg] = value;
+       goto success;
+
+loadHWU:
+       if (!access_ok(VERIFY_READ, addr, 2))
+               goto sigbus;
+
+       LoadHWU(addr, value, res);
+       if (res)
+               goto fault;
+       regs->regs[reg] = value;
+       goto success;
+
+loadW:
+       if (!access_ok(VERIFY_READ, addr, 4))
+               goto sigbus;
+
+       LoadW(addr, value, res);
+       if (res)
+               goto fault;
+       regs->regs[reg] = value;
+       goto success;
+
+loadWU:
+#ifdef CONFIG_64BIT
+       /*
+        * A 32-bit kernel might be running on a 64-bit processor.  But
+        * if we're on a 32-bit processor and an i-cache incoherency
+        * or race makes us see a 64-bit instruction here the sdl/sdr
+        * would blow up, so for now we don't handle unaligned 64-bit
+        * instructions on 32-bit kernels.
+        */
+       if (!access_ok(VERIFY_READ, addr, 4))
+               goto sigbus;
+
+       LoadWU(addr, value, res);
+       if (res)
+               goto fault;
+       regs->regs[reg] = value;
+       goto success;
+#endif /* CONFIG_64BIT */
+
+       /* Cannot handle 64-bit instructions in 32-bit kernel */
+       goto sigill;
+
+loadDW:
+#ifdef CONFIG_64BIT
+       /*
+        * A 32-bit kernel might be running on a 64-bit processor.  But
+        * if we're on a 32-bit processor and an i-cache incoherency
+        * or race makes us see a 64-bit instruction here the sdl/sdr
+        * would blow up, so for now we don't handle unaligned 64-bit
+        * instructions on 32-bit kernels.
+        */
+       if (!access_ok(VERIFY_READ, addr, 8))
+               goto sigbus;
+
+       LoadDW(addr, value, res);
+       if (res)
+               goto fault;
+       regs->regs[reg] = value;
+       goto success;
+#endif /* CONFIG_64BIT */
+
+       /* Cannot handle 64-bit instructions in 32-bit kernel */
+       goto sigill;
+
+storeHW:
+       if (!access_ok(VERIFY_WRITE, addr, 2))
+               goto sigbus;
+
+       value = regs->regs[reg];
+       StoreHW(addr, value, res);
+       if (res)
+               goto fault;
+       goto success;
+
+storeW:
+       if (!access_ok(VERIFY_WRITE, addr, 4))
+               goto sigbus;
+
+       value = regs->regs[reg];
+       StoreW(addr, value, res);
+       if (res)
+               goto fault;
+       goto success;
+
+storeDW:
+#ifdef CONFIG_64BIT
+       /*
+        * A 32-bit kernel might be running on a 64-bit processor.  But
+        * if we're on a 32-bit processor and an i-cache incoherency
+        * or race makes us see a 64-bit instruction here the sdl/sdr
+        * would blow up, so for now we don't handle unaligned 64-bit
+        * instructions on 32-bit kernels.
+        */
+       if (!access_ok(VERIFY_WRITE, addr, 8))
+               goto sigbus;
+
+       value = regs->regs[reg];
+       StoreDW(addr, value, res);
+       if (res)
+               goto fault;
+       goto success;
+#endif /* CONFIG_64BIT */
+
+       /* Cannot handle 64-bit instructions in 32-bit kernel */
+       goto sigill;
+
+success:
+       regs->cp0_epc = contpc; /* advance or branch */
+
+#ifdef CONFIG_DEBUG_FS
+       unaligned_instructions++;
+#endif
+       return;
+
+fault:
+       /* roll back jump/branch */
+       regs->cp0_epc = origpc;
+       regs->regs[31] = orig31;
+       /* Did we have an exception handler installed? */
+       if (fixup_exception(regs))
+               return;
+
+       die_if_kernel("Unhandled kernel unaligned access", regs);
+       force_sig(SIGSEGV, current);
+
+       return;
+
+sigbus:
+       die_if_kernel("Unhandled kernel unaligned access", regs);
+       force_sig(SIGBUS, current);
+
+       return;
+
+sigill:
+       die_if_kernel
+           ("Unhandled kernel unaligned access or invalid instruction", regs);
+       force_sig(SIGILL, current);
+}
+
+static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
+{
+       unsigned long value;
+       unsigned int res;
+       int reg;
+       unsigned long orig31;
+       u16 __user *pc16;
+       unsigned long origpc;
+       union mips16e_instruction mips16inst, oldinst;
+
+       origpc = regs->cp0_epc;
+       orig31 = regs->regs[31];
+       pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
+       /*
+        * This load never faults.
+        */
+       __get_user(mips16inst.full, pc16);
+       oldinst = mips16inst;
+
+       /* skip EXTEND instruction */
+       if (mips16inst.ri.opcode == MIPS16e_extend_op) {
+               pc16++;
+               __get_user(mips16inst.full, pc16);
+       } else if (delay_slot(regs)) {
+               /*  skip jump instructions */
+               /*  JAL/JALX are 32 bits but have OPCODE in first short int */
+               if (mips16inst.ri.opcode == MIPS16e_jal_op)
+                       pc16++;
+               pc16++;
+               if (get_user(mips16inst.full, pc16))
+                       goto sigbus;
+       }
+
+       switch (mips16inst.ri.opcode) {
+       case MIPS16e_i64_op:    /* I64 or RI64 instruction */
+               switch (mips16inst.i64.func) {  /* I64/RI64 func field check */
+               case MIPS16e_ldpc_func:
+               case MIPS16e_ldsp_func:
+                       reg = reg16to32[mips16inst.ri64.ry];
+                       goto loadDW;
+
+               case MIPS16e_sdsp_func:
+                       reg = reg16to32[mips16inst.ri64.ry];
+                       goto writeDW;
+
+               case MIPS16e_sdrasp_func:
+                       reg = 29;       /* GPRSP */
+                       goto writeDW;
+               }
+
+               goto sigbus;
+
+       case MIPS16e_swsp_op:
+       case MIPS16e_lwpc_op:
+       case MIPS16e_lwsp_op:
+               reg = reg16to32[mips16inst.ri.rx];
+               break;
+
+       case MIPS16e_i8_op:
+               if (mips16inst.i8.func != MIPS16e_swrasp_func)
+                       goto sigbus;
+               reg = 29;       /* GPRSP */
+               break;
+
+       default:
+               reg = reg16to32[mips16inst.rri.ry];
+               break;
+       }
+
+       switch (mips16inst.ri.opcode) {
+
+       case MIPS16e_lb_op:
+       case MIPS16e_lbu_op:
+       case MIPS16e_sb_op:
+               goto sigbus;
+
+       case MIPS16e_lh_op:
+               if (!access_ok(VERIFY_READ, addr, 2))
+                       goto sigbus;
+
+               LoadHW(addr, value, res);
+               if (res)
+                       goto fault;
+               MIPS16e_compute_return_epc(regs, &oldinst);
+               regs->regs[reg] = value;
+               break;
+
+       case MIPS16e_lhu_op:
+               if (!access_ok(VERIFY_READ, addr, 2))
+                       goto sigbus;
+
+               LoadHWU(addr, value, res);
+               if (res)
+                       goto fault;
+               MIPS16e_compute_return_epc(regs, &oldinst);
+               regs->regs[reg] = value;
+               break;
+
+       case MIPS16e_lw_op:
+       case MIPS16e_lwpc_op:
+       case MIPS16e_lwsp_op:
+               if (!access_ok(VERIFY_READ, addr, 4))
+                       goto sigbus;
+
+               LoadW(addr, value, res);
+               if (res)
+                       goto fault;
+               MIPS16e_compute_return_epc(regs, &oldinst);
+               regs->regs[reg] = value;
+               break;
+
+       case MIPS16e_lwu_op:
+#ifdef CONFIG_64BIT
+               /*
+                * A 32-bit kernel might be running on a 64-bit processor.  But
+                * if we're on a 32-bit processor and an i-cache incoherency
+                * or race makes us see a 64-bit instruction here the sdl/sdr
+                * would blow up, so for now we don't handle unaligned 64-bit
+                * instructions on 32-bit kernels.
+                */
+               if (!access_ok(VERIFY_READ, addr, 4))
+                       goto sigbus;
+
+               LoadWU(addr, value, res);
+               if (res)
+                       goto fault;
+               MIPS16e_compute_return_epc(regs, &oldinst);
+               regs->regs[reg] = value;
+               break;
+#endif /* CONFIG_64BIT */
+
+               /* Cannot handle 64-bit instructions in 32-bit kernel */
+               goto sigill;
+
+       case MIPS16e_ld_op:
+loadDW:
+#ifdef CONFIG_64BIT
+               /*
+                * A 32-bit kernel might be running on a 64-bit processor.  But
+                * if we're on a 32-bit processor and an i-cache incoherency
+                * or race makes us see a 64-bit instruction here the sdl/sdr
+                * would blow up, so for now we don't handle unaligned 64-bit
+                * instructions on 32-bit kernels.
+                */
+               if (!access_ok(VERIFY_READ, addr, 8))
+                       goto sigbus;
+
+               LoadDW(addr, value, res);
+               if (res)
+                       goto fault;
+               MIPS16e_compute_return_epc(regs, &oldinst);
+               regs->regs[reg] = value;
+               break;
+#endif /* CONFIG_64BIT */
+
+               /* Cannot handle 64-bit instructions in 32-bit kernel */
+               goto sigill;
+
+       case MIPS16e_sh_op:
+               if (!access_ok(VERIFY_WRITE, addr, 2))
+                       goto sigbus;
+
+               MIPS16e_compute_return_epc(regs, &oldinst);
+               value = regs->regs[reg];
+               StoreHW(addr, value, res);
+               if (res)
+                       goto fault;
+               break;
+
+       case MIPS16e_sw_op:
+       case MIPS16e_swsp_op:
+       case MIPS16e_i8_op:     /* actually - MIPS16e_swrasp_func */
+               if (!access_ok(VERIFY_WRITE, addr, 4))
+                       goto sigbus;
+
+               MIPS16e_compute_return_epc(regs, &oldinst);
+               value = regs->regs[reg];
+               StoreW(addr, value, res);
+               if (res)
+                       goto fault;
+               break;
+
+       case MIPS16e_sd_op:
+writeDW:
+#ifdef CONFIG_64BIT
+               /*
+                * A 32-bit kernel might be running on a 64-bit processor.  But
+                * if we're on a 32-bit processor and an i-cache incoherency
+                * or race makes us see a 64-bit instruction here the sdl/sdr
+                * would blow up, so for now we don't handle unaligned 64-bit
+                * instructions on 32-bit kernels.
+                */
+               if (!access_ok(VERIFY_WRITE, addr, 8))
+                       goto sigbus;
+
+               MIPS16e_compute_return_epc(regs, &oldinst);
+               value = regs->regs[reg];
+               StoreDW(addr, value, res);
+               if (res)
+                       goto fault;
+               break;
+#endif /* CONFIG_64BIT */
+
+               /* Cannot handle 64-bit instructions in 32-bit kernel */
+               goto sigill;
+
+       default:
+               /*
+                * Pheeee...  We encountered an yet unknown instruction or
+                * cache coherence problem.  Die sucker, die ...
+                */
+               goto sigill;
+       }
+
+#ifdef CONFIG_DEBUG_FS
+       unaligned_instructions++;
+#endif
+
+       return;
+
+fault:
+       /* roll back jump/branch */
+       regs->cp0_epc = origpc;
+       regs->regs[31] = orig31;
+       /* Did we have an exception handler installed? */
+       if (fixup_exception(regs))
+               return;
+
+       die_if_kernel("Unhandled kernel unaligned access", regs);
+       force_sig(SIGSEGV, current);
+
+       return;
+
+sigbus:
+       die_if_kernel("Unhandled kernel unaligned access", regs);
+       force_sig(SIGBUS, current);
+
+       return;
+
+sigill:
+       die_if_kernel
+           ("Unhandled kernel unaligned access or invalid instruction", regs);
+       force_sig(SIGILL, current);
+}
 asmlinkage void do_ade(struct pt_regs *regs)
 {
        unsigned int __user *pc;
@@ -517,23 +1557,62 @@ asmlinkage void do_ade(struct pt_regs *regs)
                        1, regs, regs->cp0_badvaddr);
        /*
         * Did we catch a fault trying to load an instruction?
-        * Or are we running in MIPS16 mode?
         */
-       if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1))
+       if (regs->cp0_badvaddr == regs->cp0_epc)
                goto sigbus;
 
-       pc = (unsigned int __user *) exception_epc(regs);
        if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
                goto sigbus;
        if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
                goto sigbus;
-       else if (unaligned_action == UNALIGNED_ACTION_SHOW)
-               show_registers(regs);
 
        /*
         * Do branch emulation only if we didn't forward the exception.
         * This is all so but ugly ...
         */
+
+       /*
+        * Are we running in microMIPS mode?
+        */
+       if (get_isa16_mode(regs->cp0_epc)) {
+               /*
+                * Did we catch a fault trying to load an instruction in
+                * 16-bit mode?
+                */
+               if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
+                       goto sigbus;
+               if (unaligned_action == UNALIGNED_ACTION_SHOW)
+                       show_registers(regs);
+
+               if (cpu_has_mmips) {
+                       seg = get_fs();
+                       if (!user_mode(regs))
+                               set_fs(KERNEL_DS);
+                       emulate_load_store_microMIPS(regs,
+                               (void __user *)regs->cp0_badvaddr);
+                       set_fs(seg);
+
+                       return;
+               }
+
+               if (cpu_has_mips16) {
+                       seg = get_fs();
+                       if (!user_mode(regs))
+                               set_fs(KERNEL_DS);
+                       emulate_load_store_MIPS16e(regs,
+                               (void __user *)regs->cp0_badvaddr);
+                       set_fs(seg);
+
+                       return;
+       }
+
+               goto sigbus;
+       }
+
+       if (unaligned_action == UNALIGNED_ACTION_SHOW)
+               show_registers(regs);
+       pc = (unsigned int __user *)exception_epc(regs);
+
        seg = get_fs();
        if (!user_mode(regs))
                set_fs(KERNEL_DS);
diff --git a/arch/mips/kvm/00README.txt b/arch/mips/kvm/00README.txt
new file mode 100644 (file)
index 0000000..51617e4
--- /dev/null
@@ -0,0 +1,31 @@
+KVM/MIPS Trap & Emulate Release Notes
+=====================================
+
+(1) KVM/MIPS should support MIPS32R2 and beyond. It has been tested on the following platforms:
+    Malta Board with FPGA based 34K
+    Sigma Designs TangoX board with a 24K based 8654 SoC.
+    Malta Board with 74K @ 1GHz
+
+(2) Both Guest kernel and Guest Userspace execute in UM.
+    Guest User address space:   0x00000000 -> 0x40000000
+    Guest Kernel Unmapped:      0x40000000 -> 0x60000000
+    Guest Kernel Mapped:        0x60000000 -> 0x80000000
+
+    Guest Usermode virtual memory is limited to 1GB.
+
+(2) 16K Page Sizes: Both Host Kernel and Guest Kernel should have the same page size, currently at least 16K.
+    Note that due to cache aliasing issues, 4K page sizes are NOT supported.
+
+(3) No HugeTLB Support
+    Both the host kernel and Guest kernel should have the page size set to 16K.
+    This will be implemented in a future release.
+
+(4) KVM/MIPS does not have support for SMP Guests
+    Linux-3.7-rc2 based SMP guest hangs due to the following code sequence in the generated TLB handlers:
+       LL/TLBP/SC.  Since the TLBP instruction causes a trap the reservation gets cleared
+       when we ERET back to the guest. This causes the guest to hang in an infinite loop.
+       This will be fixed in a future release.
+
+(5) Use Host FPU
+    Currently KVM/MIPS emulates a 24K CPU without a FPU.
+    This will be fixed in a future release
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
new file mode 100644 (file)
index 0000000..2c15590
--- /dev/null
@@ -0,0 +1,49 @@
+#
+# KVM configuration
+#
+source "virt/kvm/Kconfig"
+
+menuconfig VIRTUALIZATION
+       bool "Virtualization"
+       depends on HAVE_KVM
+       ---help---
+         Say Y here to get to see options for using your Linux host to run
+         other operating systems inside virtual machines (guests).
+         This option alone does not add any kernel code.
+
+         If you say N, all options in this submenu will be skipped and disabled.
+
+if VIRTUALIZATION
+
+config KVM
+       tristate "Kernel-based Virtual Machine (KVM) support"
+       depends on HAVE_KVM
+       select PREEMPT_NOTIFIERS
+       select ANON_INODES
+       select KVM_MMIO
+       ---help---
+         Support for hosting Guest kernels.
+         Currently supported on MIPS32 processors.
+
+config KVM_MIPS_DYN_TRANS
+       bool "KVM/MIPS: Dynamic binary translation to reduce traps"
+       depends on KVM
+       ---help---
+         When running in Trap & Emulate mode patch privileged
+         instructions to reduce the number of traps.
+
+         If unsure, say Y.
+
+config KVM_MIPS_DEBUG_COP0_COUNTERS
+       bool "Maintain counters for COP0 accesses"
+       depends on KVM
+       ---help---
+         Maintain statistics for Guest COP0 accesses.
+         A histogram of COP0 accesses is printed when the VM is
+         shutdown.
+
+         If unsure, say N.
+
+source drivers/vhost/Kconfig
+
+endif # VIRTUALIZATION
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
new file mode 100644 (file)
index 0000000..78d87bb
--- /dev/null
@@ -0,0 +1,13 @@
+# Makefile for KVM support for MIPS
+#
+
+common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
+
+EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
+
+kvm-objs := $(common-objs) kvm_mips.o kvm_mips_emul.o kvm_locore.o \
+           kvm_mips_int.o kvm_mips_stats.o kvm_mips_commpage.o \
+           kvm_mips_dyntrans.o kvm_trap_emul.o
+
+obj-$(CONFIG_KVM)      += kvm.o
+obj-y                  += kvm_cb.o kvm_tlb.o
diff --git a/arch/mips/kvm/kvm_cb.c b/arch/mips/kvm/kvm_cb.c
new file mode 100644 (file)
index 0000000..313c2e3
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Yann Le Du <ledu@kymasys.com>
+ */
+
+#include <linux/export.h>
+#include <linux/kvm_host.h>
+
+struct kvm_mips_callbacks *kvm_mips_callbacks;
+EXPORT_SYMBOL(kvm_mips_callbacks);
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
new file mode 100644 (file)
index 0000000..dca2aa6
--- /dev/null
@@ -0,0 +1,650 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Main entry point for the guest, exception handling.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/asm-offsets.h>
+
+
+#define _C_LABEL(x)     x
+#define MIPSX(name)     mips32_ ## name
+#define CALLFRAME_SIZ   32
+
+/*
+ * VECTOR
+ *  exception vector entrypoint
+ */
+#define VECTOR(x, regmask)      \
+    .ent    _C_LABEL(x),0;      \
+    EXPORT(x);
+
+#define VECTOR_END(x)      \
+    EXPORT(x);
+
+/* Overload, Danger Will Robinson!! */
+#define PT_HOST_ASID        PT_BVADDR
+#define PT_HOST_USERLOCAL   PT_EPC
+
+#define CP0_DDATA_LO        $28,3
+#define CP0_EBASE           $15,1
+
+#define CP0_INTCTL          $12,1
+#define CP0_SRSCTL          $12,2
+#define CP0_SRSMAP          $12,3
+#define CP0_HWRENA          $7,0
+
+/* Resume Flags */
+#define RESUME_FLAG_HOST        (1<<1)  /* Resume host? */
+
+#define RESUME_GUEST            0
+#define RESUME_HOST             RESUME_FLAG_HOST
+
+/*
+ * __kvm_mips_vcpu_run: entry point to the guest
+ * a0: run
+ * a1: vcpu
+ */
+
+FEXPORT(__kvm_mips_vcpu_run)
+    .set    push
+    .set    noreorder
+    .set    noat
+
+    /* k0/k1 not being used in host kernel context */
+       addiu           k1,sp, -PT_SIZE
+    LONG_S         $0, PT_R0(k1)
+    LONG_S             $1, PT_R1(k1)
+    LONG_S             $2, PT_R2(k1)
+    LONG_S             $3, PT_R3(k1)
+
+    LONG_S             $4, PT_R4(k1)
+    LONG_S             $5, PT_R5(k1)
+    LONG_S             $6, PT_R6(k1)
+    LONG_S             $7, PT_R7(k1)
+
+    LONG_S             $8,  PT_R8(k1)
+    LONG_S             $9,  PT_R9(k1)
+    LONG_S             $10, PT_R10(k1)
+    LONG_S             $11, PT_R11(k1)
+    LONG_S             $12, PT_R12(k1)
+    LONG_S             $13, PT_R13(k1)
+    LONG_S             $14, PT_R14(k1)
+    LONG_S             $15, PT_R15(k1)
+    LONG_S             $16, PT_R16(k1)
+    LONG_S             $17, PT_R17(k1)
+
+    LONG_S             $18, PT_R18(k1)
+    LONG_S             $19, PT_R19(k1)
+    LONG_S             $20, PT_R20(k1)
+    LONG_S             $21, PT_R21(k1)
+    LONG_S             $22, PT_R22(k1)
+    LONG_S             $23, PT_R23(k1)
+    LONG_S             $24, PT_R24(k1)
+    LONG_S             $25, PT_R25(k1)
+
+       /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */
+
+    LONG_S             $28, PT_R28(k1)
+    LONG_S             $29, PT_R29(k1)
+    LONG_S             $30, PT_R30(k1)
+    LONG_S             $31, PT_R31(k1)
+
+    /* Save hi/lo */
+       mflo            v0
+       LONG_S          v0, PT_LO(k1)
+       mfhi            v1
+       LONG_S          v1, PT_HI(k1)
+
+       /* Save host status */
+       mfc0            v0, CP0_STATUS
+       LONG_S          v0, PT_STATUS(k1)
+
+       /* Save host ASID, shove it into the BVADDR location */
+       mfc0            v1,CP0_ENTRYHI
+       andi            v1, 0xff
+       LONG_S          v1, PT_HOST_ASID(k1)
+
+    /* Save DDATA_LO, will be used to store pointer to vcpu */
+    mfc0        v1, CP0_DDATA_LO
+    LONG_S      v1, PT_HOST_USERLOCAL(k1)
+
+    /* DDATA_LO has pointer to vcpu */
+    mtc0        a1,CP0_DDATA_LO
+
+    /* Offset into vcpu->arch */
+       addiu           k1, a1, VCPU_HOST_ARCH
+
+    /* Save the host stack to VCPU, used for exception processing when we exit from the Guest */
+    LONG_S      sp, VCPU_HOST_STACK(k1)
+
+    /* Save the kernel gp as well */
+    LONG_S      gp, VCPU_HOST_GP(k1)
+
+       /* Setup status register for running the guest in UM, interrupts are disabled */
+       li                      k0,(ST0_EXL | KSU_USER| ST0_BEV)
+       mtc0            k0,CP0_STATUS
+    ehb
+
+    /* load up the new EBASE */
+    LONG_L      k0, VCPU_GUEST_EBASE(k1)
+    mtc0        k0,CP0_EBASE
+
+    /* Now that the new EBASE has been loaded, unset BEV, set interrupt mask as it was
+     * but make sure that timer interrupts are enabled
+     */
+    li          k0,(ST0_EXL | KSU_USER | ST0_IE)
+    andi        v0, v0, ST0_IM
+    or          k0, k0, v0
+    mtc0        k0,CP0_STATUS
+    ehb
+
+
+       /* Set Guest EPC */
+       LONG_L          t0, VCPU_PC(k1)
+       mtc0            t0, CP0_EPC
+
+FEXPORT(__kvm_mips_load_asid)
+    /* Set the ASID for the Guest Kernel */
+    sll         t0, t0, 1                       /* with kseg0 @ 0x40000000, kernel */
+                                                /* addresses shift to 0x80000000 */
+    bltz        t0, 1f                          /* If kernel */
+       addiu       t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
+    addiu       t1, k1, VCPU_GUEST_USER_ASID    /* else user */
+1:
+    /* t1: contains the base of the ASID array, need to get the cpu id  */
+    LONG_L      t2, TI_CPU($28)             /* smp_processor_id */
+    sll         t2, t2, 2                   /* x4 */
+    addu        t3, t1, t2
+    LONG_L      k0, (t3)
+    andi        k0, k0, 0xff
+       mtc0            k0,CP0_ENTRYHI
+    ehb
+
+    /* Disable RDHWR access */
+    mtc0    zero,  CP0_HWRENA
+
+    /* Now load up the Guest Context from VCPU */
+    LONG_L             $1, VCPU_R1(k1)
+    LONG_L             $2, VCPU_R2(k1)
+    LONG_L             $3, VCPU_R3(k1)
+
+    LONG_L             $4, VCPU_R4(k1)
+    LONG_L             $5, VCPU_R5(k1)
+    LONG_L             $6, VCPU_R6(k1)
+    LONG_L             $7, VCPU_R7(k1)
+
+    LONG_L             $8,  VCPU_R8(k1)
+    LONG_L             $9,  VCPU_R9(k1)
+    LONG_L             $10, VCPU_R10(k1)
+    LONG_L             $11, VCPU_R11(k1)
+    LONG_L             $12, VCPU_R12(k1)
+    LONG_L             $13, VCPU_R13(k1)
+    LONG_L             $14, VCPU_R14(k1)
+    LONG_L             $15, VCPU_R15(k1)
+    LONG_L             $16, VCPU_R16(k1)
+    LONG_L             $17, VCPU_R17(k1)
+    LONG_L             $18, VCPU_R18(k1)
+    LONG_L             $19, VCPU_R19(k1)
+    LONG_L             $20, VCPU_R20(k1)
+    LONG_L             $21, VCPU_R21(k1)
+    LONG_L             $22, VCPU_R22(k1)
+    LONG_L             $23, VCPU_R23(k1)
+    LONG_L             $24, VCPU_R24(k1)
+    LONG_L             $25, VCPU_R25(k1)
+
+    /* k0/k1 loaded up later */
+
+    LONG_L             $28, VCPU_R28(k1)
+    LONG_L             $29, VCPU_R29(k1)
+    LONG_L             $30, VCPU_R30(k1)
+    LONG_L             $31, VCPU_R31(k1)
+
+    /* Restore hi/lo */
+       LONG_L          k0, VCPU_LO(k1)
+       mtlo            k0
+
+       LONG_L          k0, VCPU_HI(k1)
+       mthi            k0
+
+FEXPORT(__kvm_mips_load_k0k1)
+       /* Restore the guest's k0/k1 registers */
+    LONG_L             k0, VCPU_R26(k1)
+    LONG_L             k1, VCPU_R27(k1)
+
+    /* Jump to guest */
+       eret
+       .set    pop
+
+VECTOR(MIPSX(exception), unknown)
+/*
+ * Find out what mode we came from and jump to the proper handler.
+ */
+    .set    push
+       .set    noat
+    .set    noreorder
+    mtc0    k0, CP0_ERROREPC    #01: Save guest k0
+    ehb                         #02:
+
+    mfc0    k0, CP0_EBASE       #02: Get EBASE
+    srl     k0, k0, 10          #03: Get rid of CPUNum
+    sll     k0, k0, 10          #04
+    LONG_S  k1, 0x3000(k0)      #05: Save k1 @ offset 0x3000
+    addiu   k0, k0, 0x2000      #06: Exception handler is installed @ offset 0x2000
+       j       k0                                      #07: jump to the function
+       nop                                             #08: branch delay slot
+       .set    push
+VECTOR_END(MIPSX(exceptionEnd))
+.end MIPSX(exception)
+
+/*
+ * Generic Guest exception handler. We end up here when the guest
+ * does something that causes a trap to kernel mode.
+ *
+ */
+NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
+    .set    push
+    .set    noat
+    .set    noreorder
+
+    /* Get the VCPU pointer from DDTATA_LO */
+    mfc0        k1, CP0_DDATA_LO
+       addiu           k1, k1, VCPU_HOST_ARCH
+
+    /* Start saving Guest context to VCPU */
+    LONG_S  $0, VCPU_R0(k1)
+    LONG_S  $1, VCPU_R1(k1)
+    LONG_S  $2, VCPU_R2(k1)
+    LONG_S  $3, VCPU_R3(k1)
+    LONG_S  $4, VCPU_R4(k1)
+    LONG_S  $5, VCPU_R5(k1)
+    LONG_S  $6, VCPU_R6(k1)
+    LONG_S  $7, VCPU_R7(k1)
+    LONG_S  $8, VCPU_R8(k1)
+    LONG_S  $9, VCPU_R9(k1)
+    LONG_S  $10, VCPU_R10(k1)
+    LONG_S  $11, VCPU_R11(k1)
+    LONG_S  $12, VCPU_R12(k1)
+    LONG_S  $13, VCPU_R13(k1)
+    LONG_S  $14, VCPU_R14(k1)
+    LONG_S  $15, VCPU_R15(k1)
+    LONG_S  $16, VCPU_R16(k1)
+    LONG_S  $17,VCPU_R17(k1)
+    LONG_S  $18, VCPU_R18(k1)
+    LONG_S  $19, VCPU_R19(k1)
+    LONG_S  $20, VCPU_R20(k1)
+    LONG_S  $21, VCPU_R21(k1)
+    LONG_S  $22, VCPU_R22(k1)
+    LONG_S  $23, VCPU_R23(k1)
+    LONG_S  $24, VCPU_R24(k1)
+    LONG_S  $25, VCPU_R25(k1)
+
+    /* Guest k0/k1 saved later */
+
+    LONG_S  $28, VCPU_R28(k1)
+    LONG_S  $29, VCPU_R29(k1)
+    LONG_S  $30, VCPU_R30(k1)
+    LONG_S  $31, VCPU_R31(k1)
+
+    /* We need to save hi/lo and restore them on
+     * the way out
+     */
+    mfhi    t0
+    LONG_S  t0, VCPU_HI(k1)
+
+    mflo    t0
+    LONG_S  t0, VCPU_LO(k1)
+
+    /* Finally save guest k0/k1 to VCPU */
+    mfc0    t0, CP0_ERROREPC
+    LONG_S  t0, VCPU_R26(k1)
+
+    /* Get GUEST k1 and save it in VCPU */
+    la      t1, ~0x2ff
+    mfc0    t0, CP0_EBASE
+    and     t0, t0, t1
+    LONG_L  t0, 0x3000(t0)
+    LONG_S  t0, VCPU_R27(k1)
+
+    /* Now that context has been saved, we can use other registers */
+
+    /* Restore vcpu */
+    mfc0        a1, CP0_DDATA_LO
+    move        s1, a1
+
+   /* Restore run (vcpu->run) */
+    LONG_L      a0, VCPU_RUN(a1)
+    /* Save pointer to run in s0, will be saved by the compiler */
+    move        s0, a0
+
+
+    /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process the exception */
+    mfc0    k0,CP0_EPC
+    LONG_S  k0, VCPU_PC(k1)
+
+    mfc0    k0, CP0_BADVADDR
+    LONG_S  k0, VCPU_HOST_CP0_BADVADDR(k1)
+
+    mfc0    k0, CP0_CAUSE
+    LONG_S  k0, VCPU_HOST_CP0_CAUSE(k1)
+
+    mfc0    k0, CP0_ENTRYHI
+    LONG_S  k0, VCPU_HOST_ENTRYHI(k1)
+
+    /* Now restore the host state just enough to run the handlers */
+
+    /* Swtich EBASE to the one used by Linux */
+    /* load up the host EBASE */
+    mfc0        v0, CP0_STATUS
+
+    .set at
+       or          k0, v0, ST0_BEV
+    .set noat
+
+    mtc0        k0, CP0_STATUS
+    ehb
+
+    LONG_L      k0, VCPU_HOST_EBASE(k1)
+    mtc0        k0,CP0_EBASE
+
+
+    /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
+    .set at
+       and         v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
+    or          v0, v0, ST0_CU0
+    .set noat
+    mtc0        v0, CP0_STATUS
+    ehb
+
+    /* Load up host GP */
+    LONG_L  gp, VCPU_HOST_GP(k1)
+
+    /* Need a stack before we can jump to "C" */
+    LONG_L  sp, VCPU_HOST_STACK(k1)
+
+    /* Saved host state */
+    addiu   sp,sp, -PT_SIZE
+
+    /* XXXKYMA do we need to load the host ASID, maybe not because the
+     * kernel entries are marked GLOBAL, need to verify
+     */
+
+    /* Restore host DDATA_LO */
+    LONG_L      k0, PT_HOST_USERLOCAL(sp)
+    mtc0        k0, CP0_DDATA_LO
+
+    /* Restore RDHWR access */
+    la      k0, 0x2000000F
+    mtc0    k0,  CP0_HWRENA
+
+    /* Jump to handler */
+FEXPORT(__kvm_mips_jump_to_handler)
+    /* XXXKYMA: not sure if this is safe, how large is the stack?? */
+    /* Now jump to the kvm_mips_handle_exit() to see if we can deal with this in the kernel */
+    la          t9,kvm_mips_handle_exit
+    jalr.hb     t9
+    addiu       sp,sp, -CALLFRAME_SIZ           /* BD Slot */
+
+    /* Return from handler Make sure interrupts are disabled */
+    di
+    ehb
+
+    /* XXXKYMA: k0/k1 could have been blown away if we processed an exception
+     * while we were handling the exception from the guest, reload k1
+     */
+    move        k1, s1
+       addiu           k1, k1, VCPU_HOST_ARCH
+
+    /* Check return value, should tell us if we are returning to the host (handle I/O etc)
+     * or resuming the guest
+     */
+    andi        t0, v0, RESUME_HOST
+    bnez        t0, __kvm_mips_return_to_host
+    nop
+
+__kvm_mips_return_to_guest:
+    /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
+    mtc0        s1, CP0_DDATA_LO
+
+    /* Load up the Guest EBASE to minimize the window where BEV is set */
+    LONG_L      t0, VCPU_GUEST_EBASE(k1)
+
+    /* Switch EBASE back to the one used by KVM */
+    mfc0        v1, CP0_STATUS
+    .set at
+       or          k0, v1, ST0_BEV
+    .set noat
+    mtc0        k0, CP0_STATUS
+    ehb
+    mtc0        t0,CP0_EBASE
+
+    /* Setup status register for running guest in UM */
+    .set at
+    or     v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
+    and     v1, v1, ~ST0_CU0
+    .set noat
+    mtc0    v1, CP0_STATUS
+    ehb
+
+
+       /* Set Guest EPC */
+       LONG_L          t0, VCPU_PC(k1)
+       mtc0            t0, CP0_EPC
+
+    /* Set the ASID for the Guest Kernel */
+    sll         t0, t0, 1                       /* with kseg0 @ 0x40000000, kernel */
+                                                /* addresses shift to 0x80000000 */
+    bltz        t0, 1f                          /* If kernel */
+       addiu       t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
+    addiu       t1, k1, VCPU_GUEST_USER_ASID    /* else user */
+1:
+    /* t1: contains the base of the ASID array, need to get the cpu id  */
+    LONG_L      t2, TI_CPU($28)             /* smp_processor_id */
+    sll         t2, t2, 2                   /* x4 */
+    addu        t3, t1, t2
+    LONG_L      k0, (t3)
+    andi        k0, k0, 0xff
+       mtc0            k0,CP0_ENTRYHI
+    ehb
+
+    /* Disable RDHWR access */
+    mtc0    zero,  CP0_HWRENA
+
+    /* load the guest context from VCPU and return */
+    LONG_L  $0, VCPU_R0(k1)
+    LONG_L  $1, VCPU_R1(k1)
+    LONG_L  $2, VCPU_R2(k1)
+    LONG_L  $3, VCPU_R3(k1)
+    LONG_L  $4, VCPU_R4(k1)
+    LONG_L  $5, VCPU_R5(k1)
+    LONG_L  $6, VCPU_R6(k1)
+    LONG_L  $7, VCPU_R7(k1)
+    LONG_L  $8, VCPU_R8(k1)
+    LONG_L  $9, VCPU_R9(k1)
+    LONG_L  $10, VCPU_R10(k1)
+    LONG_L  $11, VCPU_R11(k1)
+    LONG_L  $12, VCPU_R12(k1)
+    LONG_L  $13, VCPU_R13(k1)
+    LONG_L  $14, VCPU_R14(k1)
+    LONG_L  $15, VCPU_R15(k1)
+    LONG_L  $16, VCPU_R16(k1)
+    LONG_L  $17, VCPU_R17(k1)
+    LONG_L  $18, VCPU_R18(k1)
+    LONG_L  $19, VCPU_R19(k1)
+    LONG_L  $20, VCPU_R20(k1)
+    LONG_L  $21, VCPU_R21(k1)
+    LONG_L  $22, VCPU_R22(k1)
+    LONG_L  $23, VCPU_R23(k1)
+    LONG_L  $24, VCPU_R24(k1)
+    LONG_L  $25, VCPU_R25(k1)
+
+    /* $/k1 loaded later */
+    LONG_L  $28, VCPU_R28(k1)
+    LONG_L  $29, VCPU_R29(k1)
+    LONG_L  $30, VCPU_R30(k1)
+    LONG_L  $31, VCPU_R31(k1)
+
+FEXPORT(__kvm_mips_skip_guest_restore)
+    LONG_L  k0, VCPU_HI(k1)
+    mthi    k0
+
+    LONG_L  k0, VCPU_LO(k1)
+    mtlo    k0
+
+    LONG_L  k0, VCPU_R26(k1)
+    LONG_L  k1, VCPU_R27(k1)
+
+    eret
+
+__kvm_mips_return_to_host:
+    /* EBASE is already pointing to Linux */
+    LONG_L  k1, VCPU_HOST_STACK(k1)
+       addiu   k1,k1, -PT_SIZE
+
+    /* Restore host DDATA_LO */
+    LONG_L      k0, PT_HOST_USERLOCAL(k1)
+    mtc0        k0, CP0_DDATA_LO
+
+    /* Restore host ASID */
+    LONG_L      k0, PT_HOST_ASID(sp)
+    andi        k0, 0xff
+    mtc0        k0,CP0_ENTRYHI
+    ehb
+
+    /* Load context saved on the host stack */
+    LONG_L  $0, PT_R0(k1)
+    LONG_L  $1, PT_R1(k1)
+
+    /* r2/v0 is the return code, shift it down by 2 (arithmetic) to recover the err code  */
+    sra     k0, v0, 2
+    move    $2, k0
+
+    LONG_L  $3, PT_R3(k1)
+    LONG_L  $4, PT_R4(k1)
+    LONG_L  $5, PT_R5(k1)
+    LONG_L  $6, PT_R6(k1)
+    LONG_L  $7, PT_R7(k1)
+    LONG_L  $8, PT_R8(k1)
+    LONG_L  $9, PT_R9(k1)
+    LONG_L  $10, PT_R10(k1)
+    LONG_L  $11, PT_R11(k1)
+    LONG_L  $12, PT_R12(k1)
+    LONG_L  $13, PT_R13(k1)
+    LONG_L  $14, PT_R14(k1)
+    LONG_L  $15, PT_R15(k1)
+    LONG_L  $16, PT_R16(k1)
+    LONG_L  $17, PT_R17(k1)
+    LONG_L  $18, PT_R18(k1)
+    LONG_L  $19, PT_R19(k1)
+    LONG_L  $20, PT_R20(k1)
+    LONG_L  $21, PT_R21(k1)
+    LONG_L  $22, PT_R22(k1)
+    LONG_L  $23, PT_R23(k1)
+    LONG_L  $24, PT_R24(k1)
+    LONG_L  $25, PT_R25(k1)
+
+    /* Host k0/k1 were not saved */
+
+    LONG_L  $28, PT_R28(k1)
+    LONG_L  $29, PT_R29(k1)
+    LONG_L  $30, PT_R30(k1)
+
+    LONG_L  k0, PT_HI(k1)
+    mthi    k0
+
+    LONG_L  k0, PT_LO(k1)
+    mtlo    k0
+
+    /* Restore RDHWR access */
+    la      k0, 0x2000000F
+    mtc0    k0,  CP0_HWRENA
+
+
+    /* Restore RA, which is the address we will return to */
+    LONG_L  ra, PT_R31(k1)
+    j       ra
+    nop
+
+    .set    pop
+VECTOR_END(MIPSX(GuestExceptionEnd))
+.end MIPSX(GuestException)
+
+MIPSX(exceptions):
+       ####
+       ##### The exception handlers.
+       #####
+       .word _C_LABEL(MIPSX(GuestException))   #  0
+       .word _C_LABEL(MIPSX(GuestException))   #  1
+       .word _C_LABEL(MIPSX(GuestException))   #  2
+       .word _C_LABEL(MIPSX(GuestException))   #  3
+       .word _C_LABEL(MIPSX(GuestException))   #  4
+       .word _C_LABEL(MIPSX(GuestException))   #  5
+       .word _C_LABEL(MIPSX(GuestException))   #  6
+       .word _C_LABEL(MIPSX(GuestException))   #  7
+       .word _C_LABEL(MIPSX(GuestException))   #  8
+       .word _C_LABEL(MIPSX(GuestException))   #  9
+       .word _C_LABEL(MIPSX(GuestException))   # 10
+       .word _C_LABEL(MIPSX(GuestException))   # 11
+       .word _C_LABEL(MIPSX(GuestException))   # 12
+       .word _C_LABEL(MIPSX(GuestException))   # 13
+       .word _C_LABEL(MIPSX(GuestException))   # 14
+       .word _C_LABEL(MIPSX(GuestException))   # 15
+       .word _C_LABEL(MIPSX(GuestException))   # 16
+       .word _C_LABEL(MIPSX(GuestException))   # 17
+       .word _C_LABEL(MIPSX(GuestException))   # 18
+       .word _C_LABEL(MIPSX(GuestException))   # 19
+       .word _C_LABEL(MIPSX(GuestException))   # 20
+       .word _C_LABEL(MIPSX(GuestException))   # 21
+       .word _C_LABEL(MIPSX(GuestException))   # 22
+       .word _C_LABEL(MIPSX(GuestException))   # 23
+       .word _C_LABEL(MIPSX(GuestException))   # 24
+       .word _C_LABEL(MIPSX(GuestException))   # 25
+       .word _C_LABEL(MIPSX(GuestException))   # 26
+       .word _C_LABEL(MIPSX(GuestException))   # 27
+       .word _C_LABEL(MIPSX(GuestException))   # 28
+       .word _C_LABEL(MIPSX(GuestException))   # 29
+       .word _C_LABEL(MIPSX(GuestException))   # 30
+       .word _C_LABEL(MIPSX(GuestException))   # 31
+
+
+/* This routine makes changes to the instruction stream effective to the hardware.
+ * It should be called after the instruction stream is written.
+ * On return, the new instructions are effective.
+ * Inputs:
+ * a0 = Start address of new instruction stream
+ * a1 = Size, in bytes, of new instruction stream
+ */
+
+#define HW_SYNCI_Step       $1
+LEAF(MIPSX(SyncICache))
+    .set    push
+       .set    mips32r2
+    beq     a1, zero, 20f
+    nop
+    addu    a1, a0, a1
+    rdhwr   v0, HW_SYNCI_Step
+    beq     v0, zero, 20f
+    nop
+
+10:
+    synci   0(a0)
+    addu    a0, a0, v0
+    sltu    v1, a0, a1
+    bne     v1, zero, 10b
+    nop
+    sync
+20:
+    jr.hb   ra
+    nop
+    .set pop
+END(MIPSX(SyncICache))
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
new file mode 100644 (file)
index 0000000..e0dad02
--- /dev/null
@@ -0,0 +1,958 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: MIPS specific KVM APIs
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_int.h"
+#include "kvm_mips_comm.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#ifndef VECTORSPACING
+#define VECTORSPACING 0x100    /* for EI/VI mode */
+#endif
+
+#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+       { "wait", VCPU_STAT(wait_exits) },
+       { "cache", VCPU_STAT(cache_exits) },
+       { "signal", VCPU_STAT(signal_exits) },
+       { "interrupt", VCPU_STAT(int_exits) },
+       { "cop_unsuable", VCPU_STAT(cop_unusable_exits) },
+       { "tlbmod", VCPU_STAT(tlbmod_exits) },
+       { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) },
+       { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) },
+       { "addrerr_st", VCPU_STAT(addrerr_st_exits) },
+       { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) },
+       { "syscall", VCPU_STAT(syscall_exits) },
+       { "resvd_inst", VCPU_STAT(resvd_inst_exits) },
+       { "break_inst", VCPU_STAT(break_inst_exits) },
+       { "flush_dcache", VCPU_STAT(flush_dcache_exits) },
+       { "halt_wakeup", VCPU_STAT(halt_wakeup) },
+       {NULL}
+};
+
+static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
+{
+       int i;
+       for_each_possible_cpu(i) {
+               vcpu->arch.guest_kernel_asid[i] = 0;
+               vcpu->arch.guest_user_asid[i] = 0;
+       }
+       return 0;
+}
+
+gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+{
+       return gfn;
+}
+
+/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we
+ * are "runnable" if interrupts are pending
+ */
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+{
+       return !!(vcpu->arch.pending_exceptions);
+}
+
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+{
+       return 1;
+}
+
+int kvm_arch_hardware_enable(void *garbage)
+{
+       return 0;
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+}
+
+int kvm_arch_hardware_setup(void)
+{
+       return 0;
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+       int *r = (int *)rtn;
+       *r = 0;
+       return;
+}
+
+static void kvm_mips_init_tlbs(struct kvm *kvm)
+{
+       unsigned long wired;
+
+       /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */
+       wired = read_c0_wired();
+       write_c0_wired(wired + 1);
+       mtc0_tlbw_hazard();
+       kvm->arch.commpage_tlb = wired;
+
+       kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
+                 kvm->arch.commpage_tlb);
+}
+
+static void kvm_mips_init_vm_percpu(void *arg)
+{
+       struct kvm *kvm = (struct kvm *)arg;
+
+       kvm_mips_init_tlbs(kvm);
+       kvm_mips_callbacks->vm_init(kvm);
+
+}
+
+int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+{
+       if (atomic_inc_return(&kvm_mips_instance) == 1) {
+               kvm_info("%s: 1st KVM instance, setup host TLB parameters\n",
+                        __func__);
+               on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
+       }
+
+
+       return 0;
+}
+
+void kvm_mips_free_vcpus(struct kvm *kvm)
+{
+       unsigned int i;
+       struct kvm_vcpu *vcpu;
+
+       /* Put the pages we reserved for the guest pmap */
+       for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
+               if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
+                       kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
+       }
+
+       if (kvm->arch.guest_pmap)
+               kfree(kvm->arch.guest_pmap);
+
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               kvm_arch_vcpu_free(vcpu);
+       }
+
+       mutex_lock(&kvm->lock);
+
+       for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+               kvm->vcpus[i] = NULL;
+
+       atomic_set(&kvm->online_vcpus, 0);
+
+       mutex_unlock(&kvm->lock);
+}
+
+void kvm_arch_sync_events(struct kvm *kvm)
+{
+}
+
+static void kvm_mips_uninit_tlbs(void *arg)
+{
+       /* Restore wired count */
+       write_c0_wired(0);
+       mtc0_tlbw_hazard();
+       /* Clear out all the TLBs */
+       kvm_local_flush_tlb_all();
+}
+
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+       kvm_mips_free_vcpus(kvm);
+
+       /* If this is the last instance, restore wired count */
+       if (atomic_dec_return(&kvm_mips_instance) == 0) {
+               kvm_info("%s: last KVM instance, restoring TLB parameters\n",
+                        __func__);
+               on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
+       }
+}
+
+long
+kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+       return -EINVAL;
+}
+
+void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+                          struct kvm_memory_slot *dont)
+{
+}
+
+int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+{
+       return 0;
+}
+
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+                                struct kvm_memory_slot *memslot,
+                                struct kvm_userspace_memory_region *mem,
+                                enum kvm_mr_change change)
+{
+       return 0;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+                                struct kvm_userspace_memory_region *mem,
+                                const struct kvm_memory_slot *old,
+                                enum kvm_mr_change change)
+{
+       unsigned long npages = 0;
+       int i, err = 0;
+
+       kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
+                 __func__, kvm, mem->slot, mem->guest_phys_addr,
+                 mem->memory_size, mem->userspace_addr);
+
+       /* Setup Guest PMAP table */
+       if (!kvm->arch.guest_pmap) {
+               if (mem->slot == 0)
+                       npages = mem->memory_size >> PAGE_SHIFT;
+
+               if (npages) {
+                       kvm->arch.guest_pmap_npages = npages;
+                       kvm->arch.guest_pmap =
+                           kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
+
+                       if (!kvm->arch.guest_pmap) {
+                               kvm_err("Failed to allocate guest PMAP");
+                               err = -ENOMEM;
+                               goto out;
+                       }
+
+                       kvm_info
+                           ("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
+                            npages, kvm->arch.guest_pmap);
+
+                       /* Now setup the page table */
+                       for (i = 0; i < npages; i++) {
+                               kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
+                       }
+               }
+       }
+out:
+       return;
+}
+
+void kvm_arch_flush_shadow_all(struct kvm *kvm)
+{
+}
+
+void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+                                  struct kvm_memory_slot *slot)
+{
+}
+
+void kvm_arch_flush_shadow(struct kvm *kvm)
+{
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+       extern char mips32_exception[], mips32_exceptionEnd[];
+       extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
+       int err, size, offset;
+       void *gebase;
+       int i;
+
+       struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
+
+       if (!vcpu) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       err = kvm_vcpu_init(vcpu, kvm, id);
+
+       if (err)
+               goto out_free_cpu;
+
+       kvm_info("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
+
+       /* Allocate space for host mode exception handlers that handle
+        * guest mode exits
+        */
+       if (cpu_has_veic || cpu_has_vint) {
+               size = 0x200 + VECTORSPACING * 64;
+       } else {
+               size = 0x200;
+       }
+
+       /* Save Linux EBASE */
+       vcpu->arch.host_ebase = (void *)read_c0_ebase();
+
+       gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
+
+       if (!gebase) {
+               err = -ENOMEM;
+               goto out_free_cpu;
+       }
+       kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n",
+                ALIGN(size, PAGE_SIZE), gebase);
+
+       /* Save new ebase */
+       vcpu->arch.guest_ebase = gebase;
+
+       /* Copy L1 Guest Exception handler to correct offset */
+
+       /* TLB Refill, EXL = 0 */
+       memcpy(gebase, mips32_exception,
+              mips32_exceptionEnd - mips32_exception);
+
+       /* General Exception Entry point */
+       memcpy(gebase + 0x180, mips32_exception,
+              mips32_exceptionEnd - mips32_exception);
+
+       /* For vectored interrupts poke the exception code @ all offsets 0-7 */
+       for (i = 0; i < 8; i++) {
+               kvm_debug("L1 Vectored handler @ %p\n",
+                         gebase + 0x200 + (i * VECTORSPACING));
+               memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
+                      mips32_exceptionEnd - mips32_exception);
+       }
+
+       /* General handler, relocate to unmapped space for sanity's sake */
+       offset = 0x2000;
+       kvm_info("Installing KVM Exception handlers @ %p, %#x bytes\n",
+                gebase + offset,
+                mips32_GuestExceptionEnd - mips32_GuestException);
+
+       memcpy(gebase + offset, mips32_GuestException,
+              mips32_GuestExceptionEnd - mips32_GuestException);
+
+       /* Invalidate the icache for these ranges */
+       mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE));
+
+       /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */
+       vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
+
+       if (!vcpu->arch.kseg0_commpage) {
+               err = -ENOMEM;
+               goto out_free_gebase;
+       }
+
+       kvm_info("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
+       kvm_mips_commpage_init(vcpu);
+
+       /* Init */
+       vcpu->arch.last_sched_cpu = -1;
+
+       /* Start off the timer */
+       kvm_mips_emulate_count(vcpu);
+
+       return vcpu;
+
+out_free_gebase:
+       kfree(gebase);
+
+out_free_cpu:
+       kfree(vcpu);
+
+out:
+       return ERR_PTR(err);
+}
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+{
+       hrtimer_cancel(&vcpu->arch.comparecount_timer);
+
+       kvm_vcpu_uninit(vcpu);
+
+       kvm_mips_dump_stats(vcpu);
+
+       if (vcpu->arch.guest_ebase)
+               kfree(vcpu->arch.guest_ebase);
+
+       if (vcpu->arch.kseg0_commpage)
+               kfree(vcpu->arch.kseg0_commpage);
+
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+       kvm_arch_vcpu_free(vcpu);
+}
+
+int
+kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+                                   struct kvm_guest_debug *dbg)
+{
+       return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       int r = 0;
+       sigset_t sigsaved;
+
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
+       if (vcpu->mmio_needed) {
+               if (!vcpu->mmio_is_write)
+                       kvm_mips_complete_mmio_load(vcpu, run);
+               vcpu->mmio_needed = 0;
+       }
+
+       /* Check if we have any exceptions/interrupts pending */
+       kvm_mips_deliver_interrupts(vcpu,
+                                   kvm_read_c0_guest_cause(vcpu->arch.cop0));
+
+       local_irq_disable();
+       kvm_guest_enter();
+
+       r = __kvm_mips_vcpu_run(run, vcpu);
+
+       kvm_guest_exit();
+       local_irq_enable();
+
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
+       return r;
+}
+
+int
+kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
+{
+       int intr = (int)irq->irq;
+       struct kvm_vcpu *dvcpu = NULL;
+
+       if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
+               kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
+                         (int)intr);
+
+       if (irq->cpu == -1)
+               dvcpu = vcpu;
+       else
+               dvcpu = vcpu->kvm->vcpus[irq->cpu];
+
+       if (intr == 2 || intr == 3 || intr == 4) {
+               kvm_mips_callbacks->queue_io_int(dvcpu, irq);
+
+       } else if (intr == -2 || intr == -3 || intr == -4) {
+               kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
+       } else {
+               kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
+                       irq->cpu, irq->irq);
+               return -EINVAL;
+       }
+
+       dvcpu->arch.wait = 0;
+
+       if (waitqueue_active(&dvcpu->wq)) {
+               wake_up_interruptible(&dvcpu->wq);
+       }
+
+       return 0;
+}
+
+int
+kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+                               struct kvm_mp_state *mp_state)
+{
+       return -EINVAL;
+}
+
+int
+kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+                               struct kvm_mp_state *mp_state)
+{
+       return -EINVAL;
+}
+
+long
+kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+       struct kvm_vcpu *vcpu = filp->private_data;
+       void __user *argp = (void __user *)arg;
+       long r;
+       int intr;
+
+       switch (ioctl) {
+       case KVM_NMI:
+               /* Treat the NMI as a CPU reset */
+               r = kvm_mips_reset_vcpu(vcpu);
+               break;
+       case KVM_INTERRUPT:
+               {
+                       struct kvm_mips_interrupt irq;
+                       r = -EFAULT;
+                       if (copy_from_user(&irq, argp, sizeof(irq)))
+                               goto out;
+
+                       intr = (int)irq.irq;
+
+                       kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
+                                 irq.irq);
+
+                       r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
+                       break;
+               }
+       default:
+               r = -EINVAL;
+       }
+
+out:
+       return r;
+}
+
+/*
+ * Get (and clear) the dirty memory log for a memory slot.
+ */
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+       struct kvm_memory_slot *memslot;
+       unsigned long ga, ga_end;
+       int is_dirty = 0;
+       int r;
+       unsigned long n;
+
+       mutex_lock(&kvm->slots_lock);
+
+       r = kvm_get_dirty_log(kvm, log, &is_dirty);
+       if (r)
+               goto out;
+
+       /* If nothing is dirty, don't bother messing with page tables. */
+       if (is_dirty) {
+               memslot = &kvm->memslots->memslots[log->slot];
+
+               ga = memslot->base_gfn << PAGE_SHIFT;
+               ga_end = ga + (memslot->npages << PAGE_SHIFT);
+
+               printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
+                      ga_end);
+
+               n = kvm_dirty_bitmap_bytes(memslot);
+               memset(memslot->dirty_bitmap, 0, n);
+       }
+
+       r = 0;
+out:
+       mutex_unlock(&kvm->slots_lock);
+       return r;
+
+}
+
+long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+       long r;
+
+       switch (ioctl) {
+       default:
+               r = -EINVAL;
+       }
+
+       return r;
+}
+
+int kvm_arch_init(void *opaque)
+{
+       int ret;
+
+       if (kvm_mips_callbacks) {
+               kvm_err("kvm: module already exists\n");
+               return -EEXIST;
+       }
+
+       ret = kvm_mips_emulation_init(&kvm_mips_callbacks);
+
+       return ret;
+}
+
+void kvm_arch_exit(void)
+{
+       kvm_mips_callbacks = NULL;
+}
+
+int
+kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+       return -ENOTSUPP;
+}
+
+int
+kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+       return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+       return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+       return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
+{
+       return VM_FAULT_SIGBUS;
+}
+
+int kvm_dev_ioctl_check_extension(long ext)
+{
+       int r;
+
+       switch (ext) {
+       case KVM_CAP_COALESCED_MMIO:
+               r = KVM_COALESCED_MMIO_PAGE_OFFSET;
+               break;
+       default:
+               r = 0;
+               break;
+       }
+       return r;
+
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+       return kvm_mips_pending_timer(vcpu);
+}
+
+int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
+{
+       int i;
+       struct mips_coproc *cop0;
+
+       if (!vcpu)
+               return -1;
+
+       printk("VCPU Register Dump:\n");
+       printk("\tpc = 0x%08lx\n", vcpu->arch.pc);;
+       printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
+
+       for (i = 0; i < 32; i += 4) {
+               printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
+                      vcpu->arch.gprs[i],
+                      vcpu->arch.gprs[i + 1],
+                      vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
+       }
+       printk("\thi: 0x%08lx\n", vcpu->arch.hi);
+       printk("\tlo: 0x%08lx\n", vcpu->arch.lo);
+
+       cop0 = vcpu->arch.cop0;
+       printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
+              kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0));
+
+       printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
+
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       int i;
+
+       for (i = 0; i < 32; i++)
+               vcpu->arch.gprs[i] = regs->gprs[i];
+
+       vcpu->arch.hi = regs->hi;
+       vcpu->arch.lo = regs->lo;
+       vcpu->arch.pc = regs->pc;
+
+       return kvm_mips_callbacks->vcpu_ioctl_set_regs(vcpu, regs);
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       int i;
+
+       for (i = 0; i < 32; i++)
+               regs->gprs[i] = vcpu->arch.gprs[i];
+
+       regs->hi = vcpu->arch.hi;
+       regs->lo = vcpu->arch.lo;
+       regs->pc = vcpu->arch.pc;
+
+       return kvm_mips_callbacks->vcpu_ioctl_get_regs(vcpu, regs);
+}
+
+void kvm_mips_comparecount_func(unsigned long data)
+{
+       struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+
+       kvm_mips_callbacks->queue_timer_int(vcpu);
+
+       vcpu->arch.wait = 0;
+       if (waitqueue_active(&vcpu->wq)) {
+               wake_up_interruptible(&vcpu->wq);
+       }
+}
+
+/*
+ * low level hrtimer wake routine.
+ */
+enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
+{
+       struct kvm_vcpu *vcpu;
+
+       vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
+       kvm_mips_comparecount_func((unsigned long) vcpu);
+       hrtimer_forward_now(&vcpu->arch.comparecount_timer,
+                           ktime_set(0, MS_TO_NS(10)));
+       return HRTIMER_RESTART;
+}
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       kvm_mips_callbacks->vcpu_init(vcpu);
+       hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
+                    HRTIMER_MODE_REL);
+       vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
+       kvm_mips_init_shadow_tlb(vcpu);
+       return 0;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+       return;
+}
+
+int
+kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr)
+{
+       return 0;
+}
+
+/* Initial guest state */
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+       return kvm_mips_callbacks->vcpu_setup(vcpu);
+}
+
+static
+void kvm_mips_set_c0_status(void)
+{
+       uint32_t status = read_c0_status();
+
+       if (cpu_has_fpu)
+               status |= (ST0_CU1);
+
+       if (cpu_has_dsp)
+               status |= (ST0_MX);
+
+       write_c0_status(status);
+       ehb();
+}
+
+/*
+ * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
+ */
+int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       uint32_t cause = vcpu->arch.host_cp0_cause;
+       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       /* Set a default exit reason */
+       run->exit_reason = KVM_EXIT_UNKNOWN;
+       run->ready_for_interrupt_injection = 1;
+
+       /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */
+       kvm_mips_set_c0_status();
+
+       local_irq_enable();
+
+       kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
+                       cause, opc, run, vcpu);
+
+       /* Do a privilege check, if in UM most of these exit conditions end up
+        * causing an exception to be delivered to the Guest Kernel
+        */
+       er = kvm_mips_check_privilege(cause, opc, run, vcpu);
+       if (er == EMULATE_PRIV_FAIL) {
+               goto skip_emul;
+       } else if (er == EMULATE_FAIL) {
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               goto skip_emul;
+       }
+
+       switch (exccode) {
+       case T_INT:
+               kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
+
+               ++vcpu->stat.int_exits;
+               trace_kvm_exit(vcpu, INT_EXITS);
+
+               if (need_resched()) {
+                       cond_resched();
+               }
+
+               ret = RESUME_GUEST;
+               break;
+
+       case T_COP_UNUSABLE:
+               kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
+
+               ++vcpu->stat.cop_unusable_exits;
+               trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
+               ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
+               /* XXXKYMA: Might need to return to user space */
+               if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) {
+                       ret = RESUME_HOST;
+               }
+               break;
+
+       case T_TLB_MOD:
+               ++vcpu->stat.tlbmod_exits;
+               trace_kvm_exit(vcpu, TLBMOD_EXITS);
+               ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
+               break;
+
+       case T_TLB_ST_MISS:
+               kvm_debug
+                   ("TLB ST fault:  cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
+                    badvaddr);
+
+               ++vcpu->stat.tlbmiss_st_exits;
+               trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
+               ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
+               break;
+
+       case T_TLB_LD_MISS:
+               kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
+                         cause, opc, badvaddr);
+
+               ++vcpu->stat.tlbmiss_ld_exits;
+               trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
+               ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
+               break;
+
+       case T_ADDR_ERR_ST:
+               ++vcpu->stat.addrerr_st_exits;
+               trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
+               ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
+               break;
+
+       case T_ADDR_ERR_LD:
+               ++vcpu->stat.addrerr_ld_exits;
+               trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
+               ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
+               break;
+
+       case T_SYSCALL:
+               ++vcpu->stat.syscall_exits;
+               trace_kvm_exit(vcpu, SYSCALL_EXITS);
+               ret = kvm_mips_callbacks->handle_syscall(vcpu);
+               break;
+
+       case T_RES_INST:
+               ++vcpu->stat.resvd_inst_exits;
+               trace_kvm_exit(vcpu, RESVD_INST_EXITS);
+               ret = kvm_mips_callbacks->handle_res_inst(vcpu);
+               break;
+
+       case T_BREAK:
+               ++vcpu->stat.break_inst_exits;
+               trace_kvm_exit(vcpu, BREAK_INST_EXITS);
+               ret = kvm_mips_callbacks->handle_break(vcpu);
+               break;
+
+       default:
+               kvm_err
+                   ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#lx\n",
+                    exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
+                    kvm_read_c0_guest_status(vcpu->arch.cop0));
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               break;
+
+       }
+
+skip_emul:
+       local_irq_disable();
+
+       if (er == EMULATE_DONE && !(ret & RESUME_HOST))
+               kvm_mips_deliver_interrupts(vcpu, cause);
+
+       if (!(ret & RESUME_HOST)) {
+               /* Only check for signals if not already exiting to userspace  */
+               if (signal_pending(current)) {
+                       run->exit_reason = KVM_EXIT_INTR;
+                       ret = (-EINTR << 2) | RESUME_HOST;
+                       ++vcpu->stat.signal_exits;
+                       trace_kvm_exit(vcpu, SIGNAL_EXITS);
+               }
+       }
+
+       return ret;
+}
+
+int __init kvm_mips_init(void)
+{
+       int ret;
+
+       ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+
+       if (ret)
+               return ret;
+
+       /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs.
+        * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c)
+        * to avoid the possibility of double faulting. The issue is that the TLB code
+        * references routines that are part of the the KVM module,
+        * which are only available once the module is loaded.
+        */
+       kvm_mips_gfn_to_pfn = gfn_to_pfn;
+       kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
+       kvm_mips_is_error_pfn = is_error_pfn;
+
+       pr_info("KVM/MIPS Initialized\n");
+       return 0;
+}
+
+void __exit kvm_mips_exit(void)
+{
+       kvm_exit();
+
+       kvm_mips_gfn_to_pfn = NULL;
+       kvm_mips_release_pfn_clean = NULL;
+       kvm_mips_is_error_pfn = NULL;
+
+       pr_info("KVM/MIPS unloaded\n");
+}
+
+module_init(kvm_mips_init);
+module_exit(kvm_mips_exit);
+
+EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
diff --git a/arch/mips/kvm/kvm_mips_comm.h b/arch/mips/kvm/kvm_mips_comm.h
new file mode 100644 (file)
index 0000000..a4a8c85
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: commpage: mapped into get kernel space
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#ifndef __KVM_MIPS_COMMPAGE_H__
+#define __KVM_MIPS_COMMPAGE_H__
+
+struct kvm_mips_commpage {
+       struct mips_coproc cop0;        /* COP0 state is mapped into Guest kernel via commpage */
+};
+
+#define KVM_MIPS_COMM_EIDI_OFFSET       0x0
+
+extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
+
+#endif /* __KVM_MIPS_COMMPAGE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_commpage.c b/arch/mips/kvm/kvm_mips_commpage.c
new file mode 100644 (file)
index 0000000..3873b1e
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* commpage, currently used for Virtual COP0 registers.
+* Mapped into the guest kernel @ 0x0.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_comm.h"
+
+void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
+{
+       struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
+       memset(page, 0, sizeof(struct kvm_mips_commpage));
+
+       /* Specific init values for fields */
+       vcpu->arch.cop0 = &page->cop0;
+       memset(vcpu->arch.cop0, 0, sizeof(struct mips_coproc));
+
+       return;
+}
diff --git a/arch/mips/kvm/kvm_mips_dyntrans.c b/arch/mips/kvm/kvm_mips_dyntrans.c
new file mode 100644 (file)
index 0000000..96528e2
--- /dev/null
@@ -0,0 +1,149 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+
+#include "kvm_mips_comm.h"
+
+#define SYNCI_TEMPLATE  0x041f0000
+#define SYNCI_BASE(x)   (((x) >> 21) & 0x1f)
+#define SYNCI_OFFSET    ((x) & 0xffff)
+
+#define LW_TEMPLATE     0x8c000000
+#define CLEAR_TEMPLATE  0x00000020
+#define SW_TEMPLATE     0xac000000
+
+int
+kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
+                          struct kvm_vcpu *vcpu)
+{
+       int result = 0;
+       unsigned long kseg0_opc;
+       uint32_t synci_inst = 0x0;
+
+       /* Replace the CACHE instruction, with a NOP */
+       kseg0_opc =
+           CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+                      (vcpu, (unsigned long) opc));
+       memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
+       mips32_SyncICache(kseg0_opc, 32);
+
+       return result;
+}
+
+/*
+ *  Address based CACHE instructions are transformed into synci(s). A little heavy
+ * for just D-cache invalidates, but avoids an expensive trap
+ */
+int
+kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
+                       struct kvm_vcpu *vcpu)
+{
+       int result = 0;
+       unsigned long kseg0_opc;
+       uint32_t synci_inst = SYNCI_TEMPLATE, base, offset;
+
+       base = (inst >> 21) & 0x1f;
+       offset = inst & 0xffff;
+       synci_inst |= (base << 21);
+       synci_inst |= offset;
+
+       kseg0_opc =
+           CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+                      (vcpu, (unsigned long) opc));
+       memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
+       mips32_SyncICache(kseg0_opc, 32);
+
+       return result;
+}
+
+int
+kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+       int32_t rt, rd, sel;
+       uint32_t mfc0_inst;
+       unsigned long kseg0_opc, flags;
+
+       rt = (inst >> 16) & 0x1f;
+       rd = (inst >> 11) & 0x1f;
+       sel = inst & 0x7;
+
+       if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
+               mfc0_inst = CLEAR_TEMPLATE;
+               mfc0_inst |= ((rt & 0x1f) << 16);
+       } else {
+               mfc0_inst = LW_TEMPLATE;
+               mfc0_inst |= ((rt & 0x1f) << 16);
+               mfc0_inst |=
+                   offsetof(struct mips_coproc,
+                            reg[rd][sel]) + offsetof(struct kvm_mips_commpage,
+                                                     cop0);
+       }
+
+       if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+               kseg0_opc =
+                   CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+                              (vcpu, (unsigned long) opc));
+               memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
+               mips32_SyncICache(kseg0_opc, 32);
+       } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+               local_irq_save(flags);
+               memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
+               mips32_SyncICache((unsigned long) opc, 32);
+               local_irq_restore(flags);
+       } else {
+               kvm_err("%s: Invalid address: %p\n", __func__, opc);
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int
+kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+       int32_t rt, rd, sel;
+       uint32_t mtc0_inst = SW_TEMPLATE;
+       unsigned long kseg0_opc, flags;
+
+       rt = (inst >> 16) & 0x1f;
+       rd = (inst >> 11) & 0x1f;
+       sel = inst & 0x7;
+
+       mtc0_inst |= ((rt & 0x1f) << 16);
+       mtc0_inst |=
+           offsetof(struct mips_coproc,
+                    reg[rd][sel]) + offsetof(struct kvm_mips_commpage, cop0);
+
+       if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+               kseg0_opc =
+                   CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+                              (vcpu, (unsigned long) opc));
+               memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
+               mips32_SyncICache(kseg0_opc, 32);
+       } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+               local_irq_save(flags);
+               memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
+               mips32_SyncICache((unsigned long) opc, 32);
+               local_irq_restore(flags);
+       } else {
+               kvm_err("%s: Invalid address: %p\n", __func__, opc);
+               return -EFAULT;
+       }
+
+       return 0;
+}
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
new file mode 100644 (file)
index 0000000..2b2bac9
--- /dev/null
@@ -0,0 +1,1826 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Instruction/Exception emulation
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <linux/random.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu-info.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+#include <asm/inst.h>
+
+#undef CONFIG_MIPS_MT
+#include <asm/r4kcache.h>
+#define CONFIG_MIPS_MT
+
+#include "kvm_mips_opcode.h"
+#include "kvm_mips_int.h"
+#include "kvm_mips_comm.h"
+
+#include "trace.h"
+
+/*
+ * Compute the return address and do emulate branch simulation, if required.
+ * This function should be called only in branch delay slot active.
+ */
+unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
+       unsigned long instpc)
+{
+       unsigned int dspcontrol;
+       union mips_instruction insn;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       long epc = instpc;
+       long nextpc = KVM_INVALID_INST;
+
+       if (epc & 3)
+               goto unaligned;
+
+       /*
+        * Read the instruction
+        */
+       insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
+
+       if (insn.word == KVM_INVALID_INST)
+               return KVM_INVALID_INST;
+
+       switch (insn.i_format.opcode) {
+               /*
+                * jr and jalr are in r_format format.
+                */
+       case spec_op:
+               switch (insn.r_format.func) {
+               case jalr_op:
+                       arch->gprs[insn.r_format.rd] = epc + 8;
+                       /* Fall through */
+               case jr_op:
+                       nextpc = arch->gprs[insn.r_format.rs];
+                       break;
+               }
+               break;
+
+               /*
+                * This group contains:
+                * bltz_op, bgez_op, bltzl_op, bgezl_op,
+                * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+                */
+       case bcond_op:
+               switch (insn.i_format.rt) {
+               case bltz_op:
+               case bltzl_op:
+                       if ((long)arch->gprs[insn.i_format.rs] < 0)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+
+               case bgez_op:
+               case bgezl_op:
+                       if ((long)arch->gprs[insn.i_format.rs] >= 0)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+
+               case bltzal_op:
+               case bltzall_op:
+                       arch->gprs[31] = epc + 8;
+                       if ((long)arch->gprs[insn.i_format.rs] < 0)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+
+               case bgezal_op:
+               case bgezall_op:
+                       arch->gprs[31] = epc + 8;
+                       if ((long)arch->gprs[insn.i_format.rs] >= 0)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+               case bposge32_op:
+                       if (!cpu_has_dsp)
+                               goto sigill;
+
+                       dspcontrol = rddsp(0x01);
+
+                       if (dspcontrol >= 32) {
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       } else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+               }
+               break;
+
+               /*
+                * These are unconditional and in j_format.
+                */
+       case jal_op:
+               arch->gprs[31] = instpc + 8;
+       case j_op:
+               epc += 4;
+               epc >>= 28;
+               epc <<= 28;
+               epc |= (insn.j_format.target << 2);
+               nextpc = epc;
+               break;
+
+               /*
+                * These are conditional and in i_format.
+                */
+       case beq_op:
+       case beql_op:
+               if (arch->gprs[insn.i_format.rs] ==
+                   arch->gprs[insn.i_format.rt])
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       epc += 8;
+               nextpc = epc;
+               break;
+
+       case bne_op:
+       case bnel_op:
+               if (arch->gprs[insn.i_format.rs] !=
+                   arch->gprs[insn.i_format.rt])
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       epc += 8;
+               nextpc = epc;
+               break;
+
+       case blez_op:           /* not really i_format */
+       case blezl_op:
+               /* rt field assumed to be zero */
+               if ((long)arch->gprs[insn.i_format.rs] <= 0)
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       epc += 8;
+               nextpc = epc;
+               break;
+
+       case bgtz_op:
+       case bgtzl_op:
+               /* rt field assumed to be zero */
+               if ((long)arch->gprs[insn.i_format.rs] > 0)
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       epc += 8;
+               nextpc = epc;
+               break;
+
+               /*
+                * And now the FPA/cp1 branch instructions.
+                */
+       case cop1_op:
+               printk("%s: unsupported cop1_op\n", __func__);
+               break;
+       }
+
+       return nextpc;
+
+unaligned:
+       printk("%s: unaligned epc\n", __func__);
+       return nextpc;
+
+sigill:
+       printk("%s: DSP branch but not DSP ASE\n", __func__);
+       return nextpc;
+}
+
+enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
+{
+       unsigned long branch_pc;
+       enum emulation_result er = EMULATE_DONE;
+
+       if (cause & CAUSEF_BD) {
+               branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
+               if (branch_pc == KVM_INVALID_INST) {
+                       er = EMULATE_FAIL;
+               } else {
+                       vcpu->arch.pc = branch_pc;
+                       kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc);
+               }
+       } else
+               vcpu->arch.pc += 4;
+
+       kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
+
+       return er;
+}
+
+/* Everytime the compare register is written to, we need to decide when to fire
+ * the timer that represents timer ticks to the GUEST.
+ *
+ */
+enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_DONE;
+
+       /* If COUNT is enabled */
+       if (!(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC)) {
+               hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
+               hrtimer_start(&vcpu->arch.comparecount_timer,
+                             ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL);
+       } else {
+               hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
+       }
+
+       return er;
+}
+
+enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_DONE;
+
+       if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
+               kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
+                         kvm_read_c0_guest_epc(cop0));
+               kvm_clear_c0_guest_status(cop0, ST0_EXL);
+               vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
+
+       } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
+               kvm_clear_c0_guest_status(cop0, ST0_ERL);
+               vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
+       } else {
+               printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
+                      vcpu->arch.pc);
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+
+       kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
+                 vcpu->arch.pending_exceptions);
+
+       ++vcpu->stat.wait_exits;
+       trace_kvm_exit(vcpu, WAIT_EXITS);
+       if (!vcpu->arch.pending_exceptions) {
+               vcpu->arch.wait = 1;
+               kvm_vcpu_block(vcpu);
+
+               /* We we are runnable, then definitely go off to user space to check if any
+                * I/O interrupts are pending.
+                */
+               if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
+                       clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+                       vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
+               }
+       }
+
+       return er;
+}
+
+/* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
+ * this, if things ever change
+ */
+enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_FAIL;
+       uint32_t pc = vcpu->arch.pc;
+
+       printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
+       return er;
+}
+
+/* Write Guest TLB Entry @ Index */
+enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       int index = kvm_read_c0_guest_index(cop0);
+       enum emulation_result er = EMULATE_DONE;
+       struct kvm_mips_tlb *tlb = NULL;
+       uint32_t pc = vcpu->arch.pc;
+
+       if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
+               printk("%s: illegal index: %d\n", __func__, index);
+               printk
+                   ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+                    pc, index, kvm_read_c0_guest_entryhi(cop0),
+                    kvm_read_c0_guest_entrylo0(cop0),
+                    kvm_read_c0_guest_entrylo1(cop0),
+                    kvm_read_c0_guest_pagemask(cop0));
+               index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
+       }
+
+       tlb = &vcpu->arch.guest_tlb[index];
+#if 1
+       /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
+       kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+#endif
+
+       tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+       tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
+       tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
+       tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+
+       kvm_debug
+           ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+            pc, index, kvm_read_c0_guest_entryhi(cop0),
+            kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0),
+            kvm_read_c0_guest_pagemask(cop0));
+
+       return er;
+}
+
+/* Write Guest TLB Entry @ Random Index */
+enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_DONE;
+       struct kvm_mips_tlb *tlb = NULL;
+       uint32_t pc = vcpu->arch.pc;
+       int index;
+
+#if 1
+       get_random_bytes(&index, sizeof(index));
+       index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
+#else
+       index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
+#endif
+
+       if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
+               printk("%s: illegal index: %d\n", __func__, index);
+               return EMULATE_FAIL;
+       }
+
+       tlb = &vcpu->arch.guest_tlb[index];
+
+#if 1
+       /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
+       kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+#endif
+
+       tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+       tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
+       tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
+       tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+
+       kvm_debug
+           ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
+            pc, index, kvm_read_c0_guest_entryhi(cop0),
+            kvm_read_c0_guest_entrylo0(cop0),
+            kvm_read_c0_guest_entrylo1(cop0));
+
+       return er;
+}
+
+enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       long entryhi = kvm_read_c0_guest_entryhi(cop0);
+       enum emulation_result er = EMULATE_DONE;
+       uint32_t pc = vcpu->arch.pc;
+       int index = -1;
+
+       index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
+
+       kvm_write_c0_guest_index(cop0, index);
+
+       kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
+                 index);
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
+                    struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_DONE;
+       int32_t rt, rd, copz, sel, co_bit, op;
+       uint32_t pc = vcpu->arch.pc;
+       unsigned long curr_pc;
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL) {
+               return er;
+       }
+
+       copz = (inst >> 21) & 0x1f;
+       rt = (inst >> 16) & 0x1f;
+       rd = (inst >> 11) & 0x1f;
+       sel = inst & 0x7;
+       co_bit = (inst >> 25) & 1;
+
+       /* Verify that the register is valid */
+       if (rd > MIPS_CP0_DESAVE) {
+               printk("Invalid rd: %d\n", rd);
+               er = EMULATE_FAIL;
+               goto done;
+       }
+
+       if (co_bit) {
+               op = (inst) & 0xff;
+
+               switch (op) {
+               case tlbr_op:   /*  Read indexed TLB entry  */
+                       er = kvm_mips_emul_tlbr(vcpu);
+                       break;
+               case tlbwi_op:  /*  Write indexed  */
+                       er = kvm_mips_emul_tlbwi(vcpu);
+                       break;
+               case tlbwr_op:  /*  Write random  */
+                       er = kvm_mips_emul_tlbwr(vcpu);
+                       break;
+               case tlbp_op:   /* TLB Probe */
+                       er = kvm_mips_emul_tlbp(vcpu);
+                       break;
+               case rfe_op:
+                       printk("!!!COP0_RFE!!!\n");
+                       break;
+               case eret_op:
+                       er = kvm_mips_emul_eret(vcpu);
+                       goto dont_update_pc;
+                       break;
+               case wait_op:
+                       er = kvm_mips_emul_wait(vcpu);
+                       break;
+               }
+       } else {
+               switch (copz) {
+               case mfc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+                       cop0->stat[rd][sel]++;
+#endif
+                       /* Get reg */
+                       if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
+                               /* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */
+                               vcpu->arch.gprs[rt] = (read_c0_count() >> 2);
+                       } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
+                               vcpu->arch.gprs[rt] = 0x0;
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+                               kvm_mips_trans_mfc0(inst, opc, vcpu);
+#endif
+                       }
+                       else {
+                               vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+                               kvm_mips_trans_mfc0(inst, opc, vcpu);
+#endif
+                       }
+
+                       kvm_debug
+                           ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
+                            pc, rd, sel, rt, vcpu->arch.gprs[rt]);
+
+                       break;
+
+               case dmfc_op:
+                       vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+                       break;
+
+               case mtc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+                       cop0->stat[rd][sel]++;
+#endif
+                       if ((rd == MIPS_CP0_TLB_INDEX)
+                           && (vcpu->arch.gprs[rt] >=
+                               KVM_MIPS_GUEST_TLB_SIZE)) {
+                               printk("Invalid TLB Index: %ld",
+                                      vcpu->arch.gprs[rt]);
+                               er = EMULATE_FAIL;
+                               break;
+                       }
+#define C0_EBASE_CORE_MASK 0xff
+                       if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
+                               /* Preserve CORE number */
+                               kvm_change_c0_guest_ebase(cop0,
+                                                         ~(C0_EBASE_CORE_MASK),
+                                                         vcpu->arch.gprs[rt]);
+                               printk("MTCz, cop0->reg[EBASE]: %#lx\n",
+                                      kvm_read_c0_guest_ebase(cop0));
+                       } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
+                               uint32_t nasid = ASID_MASK(vcpu->arch.gprs[rt]);
+                               if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
+                                   &&
+                                   (ASID_MASK(kvm_read_c0_guest_entryhi(cop0))
+                                     != nasid)) {
+
+                                       kvm_debug
+                                           ("MTCz, change ASID from %#lx to %#lx\n",
+                                            ASID_MASK(kvm_read_c0_guest_entryhi(cop0)),
+                                            ASID_MASK(vcpu->arch.gprs[rt]));
+
+                                       /* Blow away the shadow host TLBs */
+                                       kvm_mips_flush_host_tlb(1);
+                               }
+                               kvm_write_c0_guest_entryhi(cop0,
+                                                          vcpu->arch.gprs[rt]);
+                       }
+                       /* Are we writing to COUNT */
+                       else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
+                               /* Linux doesn't seem to write into COUNT, we throw an error
+                                * if we notice a write to COUNT
+                                */
+                               /*er = EMULATE_FAIL; */
+                               goto done;
+                       } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
+                               kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
+                                         pc, kvm_read_c0_guest_compare(cop0),
+                                         vcpu->arch.gprs[rt]);
+
+                               /* If we are writing to COMPARE */
+                               /* Clear pending timer interrupt, if any */
+                               kvm_mips_callbacks->dequeue_timer_int(vcpu);
+                               kvm_write_c0_guest_compare(cop0,
+                                                          vcpu->arch.gprs[rt]);
+                       } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
+                               kvm_write_c0_guest_status(cop0,
+                                                         vcpu->arch.gprs[rt]);
+                               /* Make sure that CU1 and NMI bits are never set */
+                               kvm_clear_c0_guest_status(cop0,
+                                                         (ST0_CU1 | ST0_NMI));
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+                               kvm_mips_trans_mtc0(inst, opc, vcpu);
+#endif
+                       } else {
+                               cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+                               kvm_mips_trans_mtc0(inst, opc, vcpu);
+#endif
+                       }
+
+                       kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
+                                 rd, sel, cop0->reg[rd][sel]);
+                       break;
+
+               case dmtc_op:
+                       printk
+                           ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
+                            vcpu->arch.pc, rt, rd, sel);
+                       er = EMULATE_FAIL;
+                       break;
+
+               case mfmcz_op:
+#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
+                       cop0->stat[MIPS_CP0_STATUS][0]++;
+#endif
+                       if (rt != 0) {
+                               vcpu->arch.gprs[rt] =
+                                   kvm_read_c0_guest_status(cop0);
+                       }
+                       /* EI */
+                       if (inst & 0x20) {
+                               kvm_debug("[%#lx] mfmcz_op: EI\n",
+                                         vcpu->arch.pc);
+                               kvm_set_c0_guest_status(cop0, ST0_IE);
+                       } else {
+                               kvm_debug("[%#lx] mfmcz_op: DI\n",
+                                         vcpu->arch.pc);
+                               kvm_clear_c0_guest_status(cop0, ST0_IE);
+                       }
+
+                       break;
+
+               case wrpgpr_op:
+                       {
+                               uint32_t css =
+                                   cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
+                               uint32_t pss =
+                                   (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
+                               /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
+                               if (css || pss) {
+                                       er = EMULATE_FAIL;
+                                       break;
+                               }
+                               kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
+                                         vcpu->arch.gprs[rt]);
+                               vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
+                       }
+                       break;
+               default:
+                       printk
+                           ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
+                            vcpu->arch.pc, copz);
+                       er = EMULATE_FAIL;
+                       break;
+               }
+       }
+
+done:
+       /*
+        * Rollback PC only if emulation was unsuccessful
+        */
+       if (er == EMULATE_FAIL) {
+               vcpu->arch.pc = curr_pc;
+       }
+
+dont_update_pc:
+       /*
+        * This is for special instructions whose emulation
+        * updates the PC, so do not overwrite the PC under
+        * any circumstances
+        */
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
+                      struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DO_MMIO;
+       int32_t op, base, rt, offset;
+       uint32_t bytes;
+       void *data = run->mmio.data;
+       unsigned long curr_pc;
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       rt = (inst >> 16) & 0x1f;
+       base = (inst >> 21) & 0x1f;
+       offset = inst & 0xffff;
+       op = (inst >> 26) & 0x3f;
+
+       switch (op) {
+       case sb_op:
+               bytes = 1;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+               run->mmio.len = bytes;
+               run->mmio.is_write = 1;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 1;
+               *(u8 *) data = vcpu->arch.gprs[rt];
+               kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+                         vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
+                         *(uint8_t *) data);
+
+               break;
+
+       case sw_op:
+               bytes = 4;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 1;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 1;
+               *(uint32_t *) data = vcpu->arch.gprs[rt];
+
+               kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+                         vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+                         vcpu->arch.gprs[rt], *(uint32_t *) data);
+               break;
+
+       case sh_op:
+               bytes = 2;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 1;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 1;
+               *(uint16_t *) data = vcpu->arch.gprs[rt];
+
+               kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+                         vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+                         vcpu->arch.gprs[rt], *(uint32_t *) data);
+               break;
+
+       default:
+               printk("Store not yet supported");
+               er = EMULATE_FAIL;
+               break;
+       }
+
+       /*
+        * Rollback PC if emulation was unsuccessful
+        */
+       if (er == EMULATE_FAIL) {
+               vcpu->arch.pc = curr_pc;
+       }
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
+                     struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DO_MMIO;
+       int32_t op, base, rt, offset;
+       uint32_t bytes;
+
+       rt = (inst >> 16) & 0x1f;
+       base = (inst >> 21) & 0x1f;
+       offset = inst & 0xffff;
+       op = (inst >> 26) & 0x3f;
+
+       vcpu->arch.pending_load_cause = cause;
+       vcpu->arch.io_gpr = rt;
+
+       switch (op) {
+       case lw_op:
+               bytes = 4;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+                       er = EMULATE_FAIL;
+                       break;
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 0;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 0;
+               break;
+
+       case lh_op:
+       case lhu_op:
+               bytes = 2;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+                       er = EMULATE_FAIL;
+                       break;
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 0;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 0;
+
+               if (op == lh_op)
+                       vcpu->mmio_needed = 2;
+               else
+                       vcpu->mmio_needed = 1;
+
+               break;
+
+       case lbu_op:
+       case lb_op:
+               bytes = 1;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+                       er = EMULATE_FAIL;
+                       break;
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 0;
+               vcpu->mmio_is_write = 0;
+
+               if (op == lb_op)
+                       vcpu->mmio_needed = 2;
+               else
+                       vcpu->mmio_needed = 1;
+
+               break;
+
+       default:
+               printk("Load not yet supported");
+               er = EMULATE_FAIL;
+               break;
+       }
+
+       return er;
+}
+
+int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
+{
+       unsigned long offset = (va & ~PAGE_MASK);
+       struct kvm *kvm = vcpu->kvm;
+       unsigned long pa;
+       gfn_t gfn;
+       pfn_t pfn;
+
+       gfn = va >> PAGE_SHIFT;
+
+       if (gfn >= kvm->arch.guest_pmap_npages) {
+               printk("%s: Invalid gfn: %#llx\n", __func__, gfn);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               return -1;
+       }
+       pfn = kvm->arch.guest_pmap[gfn];
+       pa = (pfn << PAGE_SHIFT) | offset;
+
+       printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa));
+
+       mips32_SyncICache(CKSEG0ADDR(pa), 32);
+       return 0;
+}
+
+#define MIPS_CACHE_OP_INDEX_INV         0x0
+#define MIPS_CACHE_OP_INDEX_LD_TAG      0x1
+#define MIPS_CACHE_OP_INDEX_ST_TAG      0x2
+#define MIPS_CACHE_OP_IMP               0x3
+#define MIPS_CACHE_OP_HIT_INV           0x4
+#define MIPS_CACHE_OP_FILL_WB_INV       0x5
+#define MIPS_CACHE_OP_HIT_HB            0x6
+#define MIPS_CACHE_OP_FETCH_LOCK        0x7
+
+#define MIPS_CACHE_ICACHE               0x0
+#define MIPS_CACHE_DCACHE               0x1
+#define MIPS_CACHE_SEC                  0x3
+
+enum emulation_result
+kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
+                      struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       extern void (*r4k_blast_dcache) (void);
+       extern void (*r4k_blast_icache) (void);
+       enum emulation_result er = EMULATE_DONE;
+       int32_t offset, cache, op_inst, op, base;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       unsigned long va;
+       unsigned long curr_pc;
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       base = (inst >> 21) & 0x1f;
+       op_inst = (inst >> 16) & 0x1f;
+       offset = inst & 0xffff;
+       cache = (inst >> 16) & 0x3;
+       op = (inst >> 18) & 0x7;
+
+       va = arch->gprs[base] + offset;
+
+       kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+                 cache, op, base, arch->gprs[base], offset);
+
+       /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
+        * the caches entirely by stepping through all the ways/indexes
+        */
+       if (op == MIPS_CACHE_OP_INDEX_INV) {
+               kvm_debug
+                   ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+                    vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
+                    arch->gprs[base], offset);
+
+               if (cache == MIPS_CACHE_DCACHE)
+                       r4k_blast_dcache();
+               else if (cache == MIPS_CACHE_ICACHE)
+                       r4k_blast_icache();
+               else {
+                       printk("%s: unsupported CACHE INDEX operation\n",
+                              __func__);
+                       return EMULATE_FAIL;
+               }
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+               kvm_mips_trans_cache_index(inst, opc, vcpu);
+#endif
+               goto done;
+       }
+
+       preempt_disable();
+       if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
+
+               if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
+                       kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
+               }
+       } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
+                  KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
+               int index;
+
+               /* If an entry already exists then skip */
+               if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) {
+                       goto skip_fault;
+               }
+
+               /* If address not in the guest TLB, then give the guest a fault, the
+                * resulting handler will do the right thing
+                */
+               index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
+                                                 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
+
+               if (index < 0) {
+                       vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
+                       vcpu->arch.host_cp0_badvaddr = va;
+                       er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
+                                                        vcpu);
+                       preempt_enable();
+                       goto dont_update_pc;
+               } else {
+                       struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
+                       /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
+                       if (!TLB_IS_VALID(*tlb, va)) {
+                               er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
+                                                               run, vcpu);
+                               preempt_enable();
+                               goto dont_update_pc;
+                       } else {
+                               /* We fault an entry from the guest tlb to the shadow host TLB */
+                               kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
+                                                                    NULL,
+                                                                    NULL);
+                       }
+               }
+       } else {
+               printk
+                   ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+                    cache, op, base, arch->gprs[base], offset);
+               er = EMULATE_FAIL;
+               preempt_enable();
+               goto dont_update_pc;
+
+       }
+
+skip_fault:
+       /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
+       if (cache == MIPS_CACHE_DCACHE
+           && (op == MIPS_CACHE_OP_FILL_WB_INV
+               || op == MIPS_CACHE_OP_HIT_INV)) {
+               flush_dcache_line(va);
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+               /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
+               kvm_mips_trans_cache_va(inst, opc, vcpu);
+#endif
+       } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
+               flush_dcache_line(va);
+               flush_icache_line(va);
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+               /* Replace the CACHE instruction, with a SYNCI */
+               kvm_mips_trans_cache_va(inst, opc, vcpu);
+#endif
+       } else {
+               printk
+                   ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+                    cache, op, base, arch->gprs[base], offset);
+               er = EMULATE_FAIL;
+               preempt_enable();
+               goto dont_update_pc;
+       }
+
+       preempt_enable();
+
+      dont_update_pc:
+       /*
+        * Rollback PC
+        */
+       vcpu->arch.pc = curr_pc;
+      done:
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
+                     struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+       uint32_t inst;
+
+       /*
+        *  Fetch the instruction.
+        */
+       if (cause & CAUSEF_BD) {
+               opc += 1;
+       }
+
+       inst = kvm_get_inst(opc, vcpu);
+
+       switch (((union mips_instruction)inst).r_format.opcode) {
+       case cop0_op:
+               er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
+               break;
+       case sb_op:
+       case sh_op:
+       case sw_op:
+               er = kvm_mips_emulate_store(inst, cause, run, vcpu);
+               break;
+       case lb_op:
+       case lbu_op:
+       case lhu_op:
+       case lh_op:
+       case lw_op:
+               er = kvm_mips_emulate_load(inst, cause, run, vcpu);
+               break;
+
+       case cache_op:
+               ++vcpu->stat.cache_exits;
+               trace_kvm_exit(vcpu, CACHE_EXITS);
+               er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
+               break;
+
+       default:
+               printk("Instruction emulation not supported (%p/%#x)\n", opc,
+                      inst);
+               kvm_arch_vcpu_dump_regs(vcpu);
+               er = EMULATE_FAIL;
+               break;
+       }
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
+                        struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
+
+               kvm_change_c0_guest_cause(cop0, (0xff),
+                                         (T_SYSCALL << CAUSEB_EXCCODE));
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       } else {
+               printk("Trying to deliver SYSCALL when EXL is already set\n");
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
+                           struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
+                               ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+
+               /* set pc to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x0;
+
+       } else {
+               kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
+                          struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long entryhi =
+               (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+               ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
+                         arch->pc);
+
+               /* set pc to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       } else {
+               kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
+                           struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+                               ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x0;
+       } else {
+               kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
+                          struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+               ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       } else {
+               kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return er;
+}
+
+/* TLBMOD: store into address matching TLB with Dirty bit off */
+enum emulation_result
+kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
+                      struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+
+#ifdef DEBUG
+       /*
+        * If address not in the guest TLB, then we are in trouble
+        */
+       index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
+       if (index < 0) {
+               /* XXXKYMA Invalidate and retry */
+               kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
+               kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
+                    __func__, entryhi);
+               kvm_mips_dump_guest_tlbs(vcpu);
+               kvm_mips_dump_host_tlbs();
+               return EMULATE_FAIL;
+       }
+#endif
+
+       er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
+                       struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+                               ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
+                         arch->pc);
+
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       } else {
+               kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
+                         arch->pc);
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
+                        struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+       }
+
+       arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_COP_UNUSABLE << CAUSEB_EXCCODE));
+       kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
+                       struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
+
+               kvm_change_c0_guest_cause(cop0, (0xff),
+                                         (T_RES_INST << CAUSEB_EXCCODE));
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       } else {
+               kvm_err("Trying to deliver RI when EXL is already set\n");
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
+                       struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
+
+               kvm_change_c0_guest_cause(cop0, (0xff),
+                                         (T_BREAK << CAUSEB_EXCCODE));
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       } else {
+               printk("Trying to deliver BP when EXL is already set\n");
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+/*
+ * ll/sc, rdhwr, sync emulation
+ */
+
+#define OPCODE 0xfc000000
+#define BASE   0x03e00000
+#define RT     0x001f0000
+#define OFFSET 0x0000ffff
+#define LL     0xc0000000
+#define SC     0xe0000000
+#define SPEC0  0x00000000
+#define SPEC3  0x7c000000
+#define RD     0x0000f800
+#define FUNC   0x0000003f
+#define SYNC   0x0000000f
+#define RDHWR  0x0000003b
+
+enum emulation_result
+kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
+                  struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long curr_pc;
+       uint32_t inst;
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       /*
+        *  Fetch the instruction.
+        */
+       if (cause & CAUSEF_BD)
+               opc += 1;
+
+       inst = kvm_get_inst(opc, vcpu);
+
+       if (inst == KVM_INVALID_INST) {
+               printk("%s: Cannot get inst @ %p\n", __func__, opc);
+               return EMULATE_FAIL;
+       }
+
+       if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
+               int rd = (inst & RD) >> 11;
+               int rt = (inst & RT) >> 16;
+               switch (rd) {
+               case 0: /* CPU number */
+                       arch->gprs[rt] = 0;
+                       break;
+               case 1: /* SYNCI length */
+                       arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
+                                            current_cpu_data.icache.linesz);
+                       break;
+               case 2: /* Read count register */
+                       printk("RDHWR: Cont register\n");
+                       arch->gprs[rt] = kvm_read_c0_guest_count(cop0);
+                       break;
+               case 3: /* Count register resolution */
+                       switch (current_cpu_data.cputype) {
+                       case CPU_20KC:
+                       case CPU_25KF:
+                               arch->gprs[rt] = 1;
+                               break;
+                       default:
+                               arch->gprs[rt] = 2;
+                       }
+                       break;
+               case 29:
+#if 1
+                       arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
+#else
+                       /* UserLocal not implemented */
+                       er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
+#endif
+                       break;
+
+               default:
+                       printk("RDHWR not supported\n");
+                       er = EMULATE_FAIL;
+                       break;
+               }
+       } else {
+               printk("Emulate RI not supported @ %p: %#x\n", opc, inst);
+               er = EMULATE_FAIL;
+       }
+
+       /*
+        * Rollback PC only if emulation was unsuccessful
+        */
+       if (er == EMULATE_FAIL) {
+               vcpu->arch.pc = curr_pc;
+       }
+       return er;
+}
+
+enum emulation_result
+kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long curr_pc;
+
+       if (run->mmio.len > sizeof(*gpr)) {
+               printk("Bad MMIO length: %d", run->mmio.len);
+               er = EMULATE_FAIL;
+               goto done;
+       }
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, vcpu->arch.pending_load_cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       switch (run->mmio.len) {
+       case 4:
+               *gpr = *(int32_t *) run->mmio.data;
+               break;
+
+       case 2:
+               if (vcpu->mmio_needed == 2)
+                       *gpr = *(int16_t *) run->mmio.data;
+               else
+                       *gpr = *(int16_t *) run->mmio.data;
+
+               break;
+       case 1:
+               if (vcpu->mmio_needed == 2)
+                       *gpr = *(int8_t *) run->mmio.data;
+               else
+                       *gpr = *(u8 *) run->mmio.data;
+               break;
+       }
+
+       if (vcpu->arch.pending_load_cause & CAUSEF_BD)
+               kvm_debug
+                   ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
+                    vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
+                    vcpu->mmio_needed);
+
+done:
+       return er;
+}
+
+static enum emulation_result
+kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
+                    struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_change_c0_guest_cause(cop0, (0xff),
+                                         (exccode << CAUSEB_EXCCODE));
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+               kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+
+               kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
+                         exccode, kvm_read_c0_guest_epc(cop0),
+                         kvm_read_c0_guest_badvaddr(cop0));
+       } else {
+               printk("Trying to deliver EXC when EXL is already set\n");
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
+                        struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+
+       int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
+
+       if (usermode) {
+               switch (exccode) {
+               case T_INT:
+               case T_SYSCALL:
+               case T_BREAK:
+               case T_RES_INST:
+                       break;
+
+               case T_COP_UNUSABLE:
+                       if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
+                               er = EMULATE_PRIV_FAIL;
+                       break;
+
+               case T_TLB_MOD:
+                       break;
+
+               case T_TLB_LD_MISS:
+                       /* We we are accessing Guest kernel space, then send an address error exception to the guest */
+                       if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
+                               printk("%s: LD MISS @ %#lx\n", __func__,
+                                      badvaddr);
+                               cause &= ~0xff;
+                               cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
+                               er = EMULATE_PRIV_FAIL;
+                       }
+                       break;
+
+               case T_TLB_ST_MISS:
+                       /* We we are accessing Guest kernel space, then send an address error exception to the guest */
+                       if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
+                               printk("%s: ST MISS @ %#lx\n", __func__,
+                                      badvaddr);
+                               cause &= ~0xff;
+                               cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
+                               er = EMULATE_PRIV_FAIL;
+                       }
+                       break;
+
+               case T_ADDR_ERR_ST:
+                       printk("%s: address error ST @ %#lx\n", __func__,
+                              badvaddr);
+                       if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
+                               cause &= ~0xff;
+                               cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
+                       }
+                       er = EMULATE_PRIV_FAIL;
+                       break;
+               case T_ADDR_ERR_LD:
+                       printk("%s: address error LD @ %#lx\n", __func__,
+                              badvaddr);
+                       if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
+                               cause &= ~0xff;
+                               cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
+                       }
+                       er = EMULATE_PRIV_FAIL;
+                       break;
+               default:
+                       er = EMULATE_PRIV_FAIL;
+                       break;
+               }
+       }
+
+       if (er == EMULATE_PRIV_FAIL) {
+               kvm_mips_emulate_exc(cause, opc, run, vcpu);
+       }
+       return er;
+}
+
+/* User Address (UA) fault, this could happen if
+ * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
+ *     case we pass on the fault to the guest kernel and let it handle it.
+ * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
+ *     case we inject the TLB from the Guest TLB into the shadow host TLB
+ */
+enum emulation_result
+kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
+                       struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+       unsigned long va = vcpu->arch.host_cp0_badvaddr;
+       int index;
+
+       kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
+                 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
+
+       /* KVM would not have got the exception if this entry was valid in the shadow host TLB
+        * Check the Guest TLB, if the entry is not there then send the guest an
+        * exception. The guest exc handler should then inject an entry into the
+        * guest TLB
+        */
+       index = kvm_mips_guest_tlb_lookup(vcpu,
+                                         (va & VPN2_MASK) |
+                                         ASID_MASK(kvm_read_c0_guest_entryhi
+                                          (vcpu->arch.cop0)));
+       if (index < 0) {
+               if (exccode == T_TLB_LD_MISS) {
+                       er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
+               } else if (exccode == T_TLB_ST_MISS) {
+                       er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
+               } else {
+                       printk("%s: invalid exc code: %d\n", __func__, exccode);
+                       er = EMULATE_FAIL;
+               }
+       } else {
+               struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
+
+               /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
+               if (!TLB_IS_VALID(*tlb, va)) {
+                       if (exccode == T_TLB_LD_MISS) {
+                               er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
+                                                               vcpu);
+                       } else if (exccode == T_TLB_ST_MISS) {
+                               er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
+                                                               vcpu);
+                       } else {
+                               printk("%s: invalid exc code: %d\n", __func__,
+                                      exccode);
+                               er = EMULATE_FAIL;
+                       }
+               } else {
+#ifdef DEBUG
+                       kvm_debug
+                           ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
+                            tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
+#endif
+                       /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
+                       kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
+                                                            NULL);
+               }
+       }
+
+       return er;
+}
diff --git a/arch/mips/kvm/kvm_mips_int.c b/arch/mips/kvm/kvm_mips_int.c
new file mode 100644 (file)
index 0000000..1e5de16
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Interrupt delivery
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_int.h"
+
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+{
+       set_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+{
+       clear_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+       /* Cause bits to reflect the pending timer interrupt,
+        * the EXC code will be set when we are actually
+        * delivering the interrupt:
+        */
+       kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
+
+       /* Queue up an INT exception for the core */
+       kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
+
+}
+
+void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+       kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
+       kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
+}
+
+void
+kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
+{
+       int intr = (int)irq->irq;
+
+       /* Cause bits to reflect the pending IO interrupt,
+        * the EXC code will be set when we are actually
+        * delivering the interrupt:
+        */
+       switch (intr) {
+       case 2:
+               kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
+               /* Queue up an INT exception for the core */
+               kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO);
+               break;
+
+       case 3:
+               kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
+               kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
+               break;
+
+       case 4:
+               kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
+               kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
+               break;
+
+       default:
+               break;
+       }
+
+}
+
+void
+kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
+                          struct kvm_mips_interrupt *irq)
+{
+       int intr = (int)irq->irq;
+       switch (intr) {
+       case -2:
+               kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
+               kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
+               break;
+
+       case -3:
+               kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
+               kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
+               break;
+
+       case -4:
+               kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
+               kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
+               break;
+
+       default:
+               break;
+       }
+
+}
+
+/* Deliver the interrupt of the corresponding priority, if possible. */
+int
+kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+                       uint32_t cause)
+{
+       int allowed = 0;
+       uint32_t exccode;
+
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+       switch (priority) {
+       case MIPS_EXC_INT_TIMER:
+               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) {
+                       allowed = 1;
+                       exccode = T_INT;
+               }
+               break;
+
+       case MIPS_EXC_INT_IO:
+               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) {
+                       allowed = 1;
+                       exccode = T_INT;
+               }
+               break;
+
+       case MIPS_EXC_INT_IPI_1:
+               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) {
+                       allowed = 1;
+                       exccode = T_INT;
+               }
+               break;
+
+       case MIPS_EXC_INT_IPI_2:
+               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) {
+                       allowed = 1;
+                       exccode = T_INT;
+               }
+               break;
+
+       default:
+               break;
+       }
+
+       /* Are we allowed to deliver the interrupt ??? */
+       if (allowed) {
+
+               if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+                       /* save old pc */
+                       kvm_write_c0_guest_epc(cop0, arch->pc);
+                       kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+                       if (cause & CAUSEF_BD)
+                               kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+                       else
+                               kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+                       kvm_debug("Delivering INT @ pc %#lx\n", arch->pc);
+
+               } else
+                       kvm_err("Trying to deliver interrupt when EXL is already set\n");
+
+               kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE,
+                                         (exccode << CAUSEB_EXCCODE));
+
+               /* XXXSL Set PC to the interrupt exception entry point */
+               if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV)
+                       arch->pc = KVM_GUEST_KSEG0 + 0x200;
+               else
+                       arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+               clear_bit(priority, &vcpu->arch.pending_exceptions);
+       }
+
+       return allowed;
+}
+
+int
+kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+                     uint32_t cause)
+{
+       return 1;
+}
+
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause)
+{
+       unsigned long *pending = &vcpu->arch.pending_exceptions;
+       unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
+       unsigned int priority;
+
+       if (!(*pending) && !(*pending_clr))
+               return;
+
+       priority = __ffs(*pending_clr);
+       while (priority <= MIPS_EXC_MAX) {
+               if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) {
+                       if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE)
+                               break;
+               }
+
+               priority = find_next_bit(pending_clr,
+                                        BITS_PER_BYTE * sizeof(*pending_clr),
+                                        priority + 1);
+       }
+
+       priority = __ffs(*pending);
+       while (priority <= MIPS_EXC_MAX) {
+               if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) {
+                       if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE)
+                               break;
+               }
+
+               priority = find_next_bit(pending,
+                                        BITS_PER_BYTE * sizeof(*pending),
+                                        priority + 1);
+       }
+
+}
+
+int kvm_mips_pending_timer(struct kvm_vcpu *vcpu)
+{
+       return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions);
+}
diff --git a/arch/mips/kvm/kvm_mips_int.h b/arch/mips/kvm/kvm_mips_int.h
new file mode 100644 (file)
index 0000000..20da7d2
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Interrupts
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+/* MIPS Exception Priorities, exceptions (including interrupts) are queued up
+ * for the guest in the order specified by their priorities
+ */
+
+#define MIPS_EXC_RESET              0
+#define MIPS_EXC_SRESET             1
+#define MIPS_EXC_DEBUG_ST           2
+#define MIPS_EXC_DEBUG              3
+#define MIPS_EXC_DDB                4
+#define MIPS_EXC_NMI                5
+#define MIPS_EXC_MCHK               6
+#define MIPS_EXC_INT_TIMER          7
+#define MIPS_EXC_INT_IO             8
+#define MIPS_EXC_EXECUTE            9
+#define MIPS_EXC_INT_IPI_1          10
+#define MIPS_EXC_INT_IPI_2          11
+#define MIPS_EXC_MAX                12
+/* XXXSL More to follow */
+
+#define C_TI        (_ULCAST_(1) << 30)
+
+#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
+#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE   (0)
+
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
+int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
+
+void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
+void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu);
+void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
+                             struct kvm_mips_interrupt *irq);
+void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
+                               struct kvm_mips_interrupt *irq);
+int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+                           uint32_t cause);
+int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+                         uint32_t cause);
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause);
diff --git a/arch/mips/kvm/kvm_mips_opcode.h b/arch/mips/kvm/kvm_mips_opcode.h
new file mode 100644 (file)
index 0000000..86d3b4c
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+/*
+ * Define opcode values not defined in <asm/isnt.h>
+ */
+
+#ifndef __KVM_MIPS_OPCODE_H__
+#define __KVM_MIPS_OPCODE_H__
+
+/* COP0 Ops */
+#define     mfmcz_op         0x0b      /*  01011  */
+#define     wrpgpr_op        0x0e      /*  01110  */
+
+/*  COP0 opcodes (only if COP0 and CO=1):  */
+#define     wait_op               0x20 /*  100000  */
+
+#endif /* __KVM_MIPS_OPCODE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_stats.c b/arch/mips/kvm/kvm_mips_stats.c
new file mode 100644 (file)
index 0000000..075904b
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: COP0 access histogram
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/kvm_host.h>
+
+char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
+       "WAIT",
+       "CACHE",
+       "Signal",
+       "Interrupt",
+       "COP0/1 Unusable",
+       "TLB Mod",
+       "TLB Miss (LD)",
+       "TLB Miss (ST)",
+       "Address Err (ST)",
+       "Address Error (LD)",
+       "System Call",
+       "Reserved Inst",
+       "Break Inst",
+       "D-Cache Flushes",
+};
+
+char *kvm_cop0_str[N_MIPS_COPROC_REGS] = {
+       "Index",
+       "Random",
+       "EntryLo0",
+       "EntryLo1",
+       "Context",
+       "PG Mask",
+       "Wired",
+       "HWREna",
+       "BadVAddr",
+       "Count",
+       "EntryHI",
+       "Compare",
+       "Status",
+       "Cause",
+       "EXC PC",
+       "PRID",
+       "Config",
+       "LLAddr",
+       "Watch Lo",
+       "Watch Hi",
+       "X Context",
+       "Reserved",
+       "Impl Dep",
+       "Debug",
+       "DEPC",
+       "PerfCnt",
+       "ErrCtl",
+       "CacheErr",
+       "TagLo",
+       "TagHi",
+       "ErrorEPC",
+       "DESAVE"
+};
+
+int kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+       int i, j;
+
+       printk("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
+       for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
+               for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
+                       if (vcpu->arch.cop0->stat[i][j])
+                               printk("%s[%d]: %lu\n", kvm_cop0_str[i], j,
+                                      vcpu->arch.cop0->stat[i][j]);
+               }
+       }
+#endif
+
+       return 0;
+}
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
new file mode 100644 (file)
index 0000000..89511a9
--- /dev/null
@@ -0,0 +1,928 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
+* TLB handlers run from KSEG0
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/kvm_host.h>
+
+#include <asm/cpu.h>
+#include <asm/bootinfo.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+
+#undef CONFIG_MIPS_MT
+#include <asm/r4kcache.h>
+#define CONFIG_MIPS_MT
+
+#define KVM_GUEST_PC_TLB    0
+#define KVM_GUEST_SP_TLB    1
+
+#define PRIx64 "llx"
+
+/* Use VZ EntryHi.EHINV to invalidate TLB entries */
+#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
+
+atomic_t kvm_mips_instance;
+EXPORT_SYMBOL(kvm_mips_instance);
+
+/* These function pointers are initialized once the KVM module is loaded */
+pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
+EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
+
+void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
+EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
+
+bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
+EXPORT_SYMBOL(kvm_mips_is_error_pfn);
+
+uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
+{
+       return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]);
+}
+
+
+uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
+{
+       return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]);
+}
+
+inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
+{
+       return vcpu->kvm->arch.commpage_tlb;
+}
+
+
+/*
+ * Structure defining an tlb entry data set.
+ */
+
+void kvm_mips_dump_host_tlbs(void)
+{
+       unsigned long old_entryhi;
+       unsigned long old_pagemask;
+       struct kvm_mips_tlb tlb;
+       unsigned long flags;
+       int i;
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+       old_pagemask = read_c0_pagemask();
+
+       printk("HOST TLBs:\n");
+       printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi()));
+
+       for (i = 0; i < current_cpu_data.tlbsize; i++) {
+               write_c0_index(i);
+               mtc0_tlbw_hazard();
+
+               tlb_read();
+               tlbw_use_hazard();
+
+               tlb.tlb_hi = read_c0_entryhi();
+               tlb.tlb_lo0 = read_c0_entrylo0();
+               tlb.tlb_lo1 = read_c0_entrylo1();
+               tlb.tlb_mask = read_c0_pagemask();
+
+               printk("TLB%c%3d Hi 0x%08lx ",
+                      (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+                      i, tlb.tlb_hi);
+               printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+                      (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+                      (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+                      (tlb.tlb_lo0 >> 3) & 7);
+               printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+                      (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+                      (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+                      (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+       }
+       write_c0_entryhi(old_entryhi);
+       write_c0_pagemask(old_pagemask);
+       mtc0_tlbw_hazard();
+       local_irq_restore(flags);
+}
+
+void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_mips_tlb tlb;
+       int i;
+
+       printk("Guest TLBs:\n");
+       printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
+
+       for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+               tlb = vcpu->arch.guest_tlb[i];
+               printk("TLB%c%3d Hi 0x%08lx ",
+                      (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+                      i, tlb.tlb_hi);
+               printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+                      (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+                      (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+                      (tlb.tlb_lo0 >> 3) & 7);
+               printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+                      (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+                      (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+                      (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+       }
+}
+
+void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
+{
+       int i;
+       volatile struct kvm_mips_tlb tlb;
+
+       printk("Shadow TLBs:\n");
+       for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+               tlb = vcpu->arch.shadow_tlb[smp_processor_id()][i];
+               printk("TLB%c%3d Hi 0x%08lx ",
+                      (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+                      i, tlb.tlb_hi);
+               printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+                      (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+                      (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+                      (tlb.tlb_lo0 >> 3) & 7);
+               printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+                      (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+                      (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+                      (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+       }
+}
+
+static void kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
+{
+       pfn_t pfn;
+
+       if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
+               return;
+
+       pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
+
+       if (kvm_mips_is_error_pfn(pfn)) {
+               panic("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
+       }
+
+       kvm->arch.guest_pmap[gfn] = pfn;
+       return;
+}
+
+/* Translate guest KSEG0 addresses to Host PA */
+unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
+       unsigned long gva)
+{
+       gfn_t gfn;
+       uint32_t offset = gva & ~PAGE_MASK;
+       struct kvm *kvm = vcpu->kvm;
+
+       if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
+               kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
+                       __builtin_return_address(0), gva);
+               return KVM_INVALID_PAGE;
+       }
+
+       gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
+
+       if (gfn >= kvm->arch.guest_pmap_npages) {
+               kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
+                       gva);
+               return KVM_INVALID_PAGE;
+       }
+       kvm_mips_map_page(vcpu->kvm, gfn);
+       return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
+}
+
+/* XXXKYMA: Must be called with interrupts disabled */
+/* set flush_dcache_mask == 0 if no dcache flush required */
+int
+kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
+       unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask)
+{
+       unsigned long flags;
+       unsigned long old_entryhi;
+       volatile int idx;
+
+       local_irq_save(flags);
+
+
+       old_entryhi = read_c0_entryhi();
+       write_c0_entryhi(entryhi);
+       mtc0_tlbw_hazard();
+
+       tlb_probe();
+       tlb_probe_hazard();
+       idx = read_c0_index();
+
+       if (idx > current_cpu_data.tlbsize) {
+               kvm_err("%s: Invalid Index: %d\n", __func__, idx);
+               kvm_mips_dump_host_tlbs();
+               return -1;
+       }
+
+       if (idx < 0) {
+               idx = read_c0_random() % current_cpu_data.tlbsize;
+               write_c0_index(idx);
+               mtc0_tlbw_hazard();
+       }
+       write_c0_entrylo0(entrylo0);
+       write_c0_entrylo1(entrylo1);
+       mtc0_tlbw_hazard();
+
+       tlb_write_indexed();
+       tlbw_use_hazard();
+
+#ifdef DEBUG
+       if (debug) {
+               kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] "
+                         "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
+                         vcpu->arch.pc, idx, read_c0_entryhi(),
+                         read_c0_entrylo0(), read_c0_entrylo1());
+       }
+#endif
+
+       /* Flush D-cache */
+       if (flush_dcache_mask) {
+               if (entrylo0 & MIPS3_PG_V) {
+                       ++vcpu->stat.flush_dcache_exits;
+                       flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask);
+               }
+               if (entrylo1 & MIPS3_PG_V) {
+                       ++vcpu->stat.flush_dcache_exits;
+                       flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) |
+                               (0x1 << PAGE_SHIFT));
+               }
+       }
+
+       /* Restore old ASID */
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+       local_irq_restore(flags);
+       return 0;
+}
+
+
+/* XXXKYMA: Must be called with interrupts disabled */
+int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
+       struct kvm_vcpu *vcpu)
+{
+       gfn_t gfn;
+       pfn_t pfn0, pfn1;
+       unsigned long vaddr = 0;
+       unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+       int even;
+       struct kvm *kvm = vcpu->kvm;
+       const int flush_dcache_mask = 0;
+
+
+       if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
+               kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               return -1;
+       }
+
+       gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
+       if (gfn >= kvm->arch.guest_pmap_npages) {
+               kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
+                       gfn, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               return -1;
+       }
+       even = !(gfn & 0x1);
+       vaddr = badvaddr & (PAGE_MASK << 1);
+
+       kvm_mips_map_page(vcpu->kvm, gfn);
+       kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1);
+
+       if (even) {
+               pfn0 = kvm->arch.guest_pmap[gfn];
+               pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
+       } else {
+               pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
+               pfn1 = kvm->arch.guest_pmap[gfn];
+       }
+
+       entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
+       entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
+                       (0x1 << 1);
+       entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
+                       (0x1 << 1);
+
+       return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+                                      flush_dcache_mask);
+}
+
+int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
+       struct kvm_vcpu *vcpu)
+{
+       pfn_t pfn0, pfn1;
+       unsigned long flags, old_entryhi = 0, vaddr = 0;
+       unsigned long entrylo0 = 0, entrylo1 = 0;
+
+
+       pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
+       pfn1 = 0;
+       entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
+                       (0x1 << 1);
+       entrylo1 = 0;
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+       vaddr = badvaddr & (PAGE_MASK << 1);
+       write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
+       mtc0_tlbw_hazard();
+       write_c0_entrylo0(entrylo0);
+       mtc0_tlbw_hazard();
+       write_c0_entrylo1(entrylo1);
+       mtc0_tlbw_hazard();
+       write_c0_index(kvm_mips_get_commpage_asid(vcpu));
+       mtc0_tlbw_hazard();
+       tlb_write_indexed();
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+#ifdef DEBUG
+       kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
+            vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
+            read_c0_entrylo0(), read_c0_entrylo1());
+#endif
+
+       /* Restore old ASID */
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+       local_irq_restore(flags);
+
+       return 0;
+}
+
+int
+kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+       struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1)
+{
+       unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+       struct kvm *kvm = vcpu->kvm;
+       pfn_t pfn0, pfn1;
+
+
+       if ((tlb->tlb_hi & VPN2_MASK) == 0) {
+               pfn0 = 0;
+               pfn1 = 0;
+       } else {
+               kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT);
+               kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT);
+
+               pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
+               pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
+       }
+
+       if (hpa0)
+               *hpa0 = pfn0 << PAGE_SHIFT;
+
+       if (hpa1)
+               *hpa1 = pfn1 << PAGE_SHIFT;
+
+       /* Get attributes from the Guest TLB */
+       entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
+                       kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
+       entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
+                       (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
+       entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
+                       (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
+
+#ifdef DEBUG
+       kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
+                 tlb->tlb_lo0, tlb->tlb_lo1);
+#endif
+
+       return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+                                      tlb->tlb_mask);
+}
+
+int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
+{
+       int i;
+       int index = -1;
+       struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
+
+
+       for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+               if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
+                       (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) {
+                       index = i;
+                       break;
+               }
+       }
+
+#ifdef DEBUG
+       kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
+                 __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
+#endif
+
+       return index;
+}
+
+int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
+{
+       unsigned long old_entryhi, flags;
+       volatile int idx;
+
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+
+       if (KVM_GUEST_KERNEL_MODE(vcpu))
+               write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu));
+       else {
+               write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
+       }
+
+       mtc0_tlbw_hazard();
+
+       tlb_probe();
+       tlb_probe_hazard();
+       idx = read_c0_index();
+
+       /* Restore old ASID */
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       local_irq_restore(flags);
+
+#ifdef DEBUG
+       kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
+#endif
+
+       return idx;
+}
+
+int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
+{
+       int idx;
+       unsigned long flags, old_entryhi;
+
+       local_irq_save(flags);
+
+
+       old_entryhi = read_c0_entryhi();
+
+       write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
+       mtc0_tlbw_hazard();
+
+       tlb_probe();
+       tlb_probe_hazard();
+       idx = read_c0_index();
+
+       if (idx >= current_cpu_data.tlbsize)
+               BUG();
+
+       if (idx > 0) {
+               write_c0_entryhi(UNIQUE_ENTRYHI(idx));
+               mtc0_tlbw_hazard();
+
+               write_c0_entrylo0(0);
+               mtc0_tlbw_hazard();
+
+               write_c0_entrylo1(0);
+               mtc0_tlbw_hazard();
+
+               tlb_write_indexed();
+               mtc0_tlbw_hazard();
+       }
+
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       local_irq_restore(flags);
+
+#ifdef DEBUG
+       if (idx > 0) {
+               kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
+                         (va & VPN2_MASK) | (vcpu->arch.asid_map[va & ASID_MASK] & ASID_MASK), idx);
+       }
+#endif
+
+       return 0;
+}
+
+/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
+int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
+{
+       unsigned long flags, old_entryhi;
+
+       if (index >= current_cpu_data.tlbsize)
+               BUG();
+
+       local_irq_save(flags);
+
+
+       old_entryhi = read_c0_entryhi();
+
+       write_c0_entryhi(UNIQUE_ENTRYHI(index));
+       mtc0_tlbw_hazard();
+
+       write_c0_index(index);
+       mtc0_tlbw_hazard();
+
+       write_c0_entrylo0(0);
+       mtc0_tlbw_hazard();
+
+       write_c0_entrylo1(0);
+       mtc0_tlbw_hazard();
+
+       tlb_write_indexed();
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       local_irq_restore(flags);
+
+       return 0;
+}
+
+void kvm_mips_flush_host_tlb(int skip_kseg0)
+{
+       unsigned long flags;
+       unsigned long old_entryhi, entryhi;
+       unsigned long old_pagemask;
+       int entry = 0;
+       int maxentry = current_cpu_data.tlbsize;
+
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+       old_pagemask = read_c0_pagemask();
+
+       /* Blast 'em all away. */
+       for (entry = 0; entry < maxentry; entry++) {
+
+               write_c0_index(entry);
+               mtc0_tlbw_hazard();
+
+               if (skip_kseg0) {
+                       tlb_read();
+                       tlbw_use_hazard();
+
+                       entryhi = read_c0_entryhi();
+
+                       /* Don't blow away guest kernel entries */
+                       if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) {
+                               continue;
+                       }
+               }
+
+               /* Make sure all entries differ. */
+               write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+               mtc0_tlbw_hazard();
+               write_c0_entrylo0(0);
+               mtc0_tlbw_hazard();
+               write_c0_entrylo1(0);
+               mtc0_tlbw_hazard();
+
+               tlb_write_indexed();
+               mtc0_tlbw_hazard();
+       }
+
+       tlbw_use_hazard();
+
+       write_c0_entryhi(old_entryhi);
+       write_c0_pagemask(old_pagemask);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       local_irq_restore(flags);
+}
+
+void
+kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
+                       struct kvm_vcpu *vcpu)
+{
+       unsigned long asid = asid_cache(cpu);
+
+       if (!(ASID_MASK(ASID_INC(asid)))) {
+               if (cpu_has_vtag_icache) {
+                       flush_icache_all();
+               }
+
+               kvm_local_flush_tlb_all();      /* start new asid cycle */
+
+               if (!asid)      /* fix version if needed */
+                       asid = ASID_FIRST_VERSION;
+       }
+
+       cpu_context(cpu, mm) = asid_cache(cpu) = asid;
+}
+
+void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu)
+{
+       unsigned long flags;
+       unsigned long old_entryhi;
+       unsigned long old_pagemask;
+       int entry = 0;
+       int cpu = smp_processor_id();
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+       old_pagemask = read_c0_pagemask();
+
+       for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
+               write_c0_index(entry);
+               mtc0_tlbw_hazard();
+               tlb_read();
+               tlbw_use_hazard();
+
+               vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = read_c0_entryhi();
+               vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = read_c0_entrylo0();
+               vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = read_c0_entrylo1();
+               vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = read_c0_pagemask();
+       }
+
+       write_c0_entryhi(old_entryhi);
+       write_c0_pagemask(old_pagemask);
+       mtc0_tlbw_hazard();
+
+       local_irq_restore(flags);
+
+}
+
+void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu)
+{
+       unsigned long flags;
+       unsigned long old_ctx;
+       int entry;
+       int cpu = smp_processor_id();
+
+       local_irq_save(flags);
+
+       old_ctx = read_c0_entryhi();
+
+       for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
+               write_c0_entryhi(vcpu->arch.shadow_tlb[cpu][entry].tlb_hi);
+               mtc0_tlbw_hazard();
+               write_c0_entrylo0(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0);
+               write_c0_entrylo1(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
+
+               write_c0_index(entry);
+               mtc0_tlbw_hazard();
+
+               tlb_write_indexed();
+               tlbw_use_hazard();
+       }
+
+       tlbw_use_hazard();
+       write_c0_entryhi(old_ctx);
+       mtc0_tlbw_hazard();
+       local_irq_restore(flags);
+}
+
+
+void kvm_local_flush_tlb_all(void)
+{
+       unsigned long flags;
+       unsigned long old_ctx;
+       int entry = 0;
+
+       local_irq_save(flags);
+       /* Save old context and create impossible VPN2 value */
+       old_ctx = read_c0_entryhi();
+       write_c0_entrylo0(0);
+       write_c0_entrylo1(0);
+
+       /* Blast 'em all away. */
+       while (entry < current_cpu_data.tlbsize) {
+               /* Make sure all entries differ. */
+               write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+               write_c0_index(entry);
+               mtc0_tlbw_hazard();
+               tlb_write_indexed();
+               entry++;
+       }
+       tlbw_use_hazard();
+       write_c0_entryhi(old_ctx);
+       mtc0_tlbw_hazard();
+
+       local_irq_restore(flags);
+}
+
+void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu)
+{
+       int cpu, entry;
+
+       for_each_possible_cpu(cpu) {
+               for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
+                       vcpu->arch.shadow_tlb[cpu][entry].tlb_hi =
+                           UNIQUE_ENTRYHI(entry);
+                       vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = 0x0;
+                       vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = 0x0;
+                       vcpu->arch.shadow_tlb[cpu][entry].tlb_mask =
+                           read_c0_pagemask();
+#ifdef DEBUG
+                       kvm_debug
+                           ("shadow_tlb[%d][%d]: tlb_hi: %#lx, lo0: %#lx, lo1: %#lx\n",
+                            cpu, entry,
+                            vcpu->arch.shadow_tlb[cpu][entry].tlb_hi,
+                            vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0,
+                            vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
+#endif
+               }
+       }
+}
+
+/* Restore ASID once we are scheduled back after preemption */
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+       unsigned long flags;
+       int newasid = 0;
+
+#ifdef DEBUG
+       kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
+#endif
+
+       /* Alocate new kernel and user ASIDs if needed */
+
+       local_irq_save(flags);
+
+       if (((vcpu->arch.
+             guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
+               kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
+               vcpu->arch.guest_kernel_asid[cpu] =
+                   vcpu->arch.guest_kernel_mm.context.asid[cpu];
+               kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
+               vcpu->arch.guest_user_asid[cpu] =
+                   vcpu->arch.guest_user_mm.context.asid[cpu];
+               newasid++;
+
+               kvm_info("[%d]: cpu_context: %#lx\n", cpu,
+                        cpu_context(cpu, current->mm));
+               kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
+                        cpu, vcpu->arch.guest_kernel_asid[cpu]);
+               kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
+                        vcpu->arch.guest_user_asid[cpu]);
+       }
+
+       if (vcpu->arch.last_sched_cpu != cpu) {
+               kvm_info("[%d->%d]KVM VCPU[%d] switch\n",
+                        vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
+       }
+
+       /* Only reload shadow host TLB if new ASIDs haven't been allocated */
+#if 0
+       if ((atomic_read(&kvm_mips_instance) > 1) && !newasid) {
+               kvm_mips_flush_host_tlb(0);
+               kvm_shadow_tlb_load(vcpu);
+       }
+#endif
+
+       if (!newasid) {
+               /* If we preempted while the guest was executing, then reload the pre-empted ASID */
+               if (current->flags & PF_VCPU) {
+                       write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi));
+                       ehb();
+               }
+       } else {
+               /* New ASIDs were allocated for the VM */
+
+               /* Were we in guest context? If so then the pre-empted ASID is no longer
+                * valid, we need to set it to what it should be based on the mode of
+                * the Guest (Kernel/User)
+                */
+               if (current->flags & PF_VCPU) {
+                       if (KVM_GUEST_KERNEL_MODE(vcpu))
+                               write_c0_entryhi(ASID_MASK(vcpu->arch.
+                                                guest_kernel_asid[cpu]));
+                       else
+                               write_c0_entryhi(ASID_MASK(vcpu->arch.
+                                                guest_user_asid[cpu]));
+                       ehb();
+               }
+       }
+
+       local_irq_restore(flags);
+
+}
+
+/* ASID can change if another task is scheduled during preemption */
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+       unsigned long flags;
+       uint32_t cpu;
+
+       local_irq_save(flags);
+
+       cpu = smp_processor_id();
+
+
+       vcpu->arch.preempt_entryhi = read_c0_entryhi();
+       vcpu->arch.last_sched_cpu = cpu;
+
+#if 0
+       if ((atomic_read(&kvm_mips_instance) > 1)) {
+               kvm_shadow_tlb_put(vcpu);
+       }
+#endif
+
+       if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
+            ASID_VERSION_MASK)) {
+               kvm_debug("%s: Dropping MMU Context:  %#lx\n", __func__,
+                         cpu_context(cpu, current->mm));
+               drop_mmu_context(current->mm, cpu);
+       }
+       write_c0_entryhi(cpu_asid(cpu, current->mm));
+       ehb();
+
+       local_irq_restore(flags);
+}
+
+uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       unsigned long paddr, flags;
+       uint32_t inst;
+       int index;
+
+       if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
+           KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+               local_irq_save(flags);
+               index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
+               if (index >= 0) {
+                       inst = *(opc);
+               } else {
+                       index =
+                           kvm_mips_guest_tlb_lookup(vcpu,
+                                                     ((unsigned long) opc & VPN2_MASK)
+                                                     |
+                                                     ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
+                       if (index < 0) {
+                               kvm_err
+                                   ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
+                                    __func__, opc, vcpu, read_c0_entryhi());
+                               kvm_mips_dump_host_tlbs();
+                               local_irq_restore(flags);
+                               return KVM_INVALID_INST;
+                       }
+                       kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
+                                                            &vcpu->arch.
+                                                            guest_tlb[index],
+                                                            NULL, NULL);
+                       inst = *(opc);
+               }
+               local_irq_restore(flags);
+       } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+               paddr =
+                   kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
+                                                        (unsigned long) opc);
+               inst = *(uint32_t *) CKSEG0ADDR(paddr);
+       } else {
+               kvm_err("%s: illegal address: %p\n", __func__, opc);
+               return KVM_INVALID_INST;
+       }
+
+       return inst;
+}
+
+EXPORT_SYMBOL(kvm_local_flush_tlb_all);
+EXPORT_SYMBOL(kvm_shadow_tlb_put);
+EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
+EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
+EXPORT_SYMBOL(kvm_mips_init_shadow_tlb);
+EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
+EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
+EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
+EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
+EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
+EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
+EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
+EXPORT_SYMBOL(kvm_shadow_tlb_load);
+EXPORT_SYMBOL(kvm_mips_dump_shadow_tlbs);
+EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
+EXPORT_SYMBOL(kvm_get_inst);
+EXPORT_SYMBOL(kvm_arch_vcpu_load);
+EXPORT_SYMBOL(kvm_arch_vcpu_put);
diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/kvm_trap_emul.c
new file mode 100644 (file)
index 0000000..466aeef
--- /dev/null
@@ -0,0 +1,482 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_opcode.h"
+#include "kvm_mips_int.h"
+
+static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
+{
+       gpa_t gpa;
+       uint32_t kseg = KSEGX(gva);
+
+       if ((kseg == CKSEG0) || (kseg == CKSEG1))
+               gpa = CPHYSADDR(gva);
+       else {
+               printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
+               kvm_mips_dump_host_tlbs();
+               gpa = KVM_INVALID_ADDR;
+       }
+
+#ifdef DEBUG
+       kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
+#endif
+
+       return gpa;
+}
+
+
+static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
+               er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
+       } else
+               er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+
+       switch (er) {
+       case EMULATE_DONE:
+               ret = RESUME_GUEST;
+               break;
+
+       case EMULATE_FAIL:
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               break;
+
+       case EMULATE_WAIT:
+               run->exit_reason = KVM_EXIT_INTR;
+               ret = RESUME_HOST;
+               break;
+
+       default:
+               BUG();
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+           || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+#ifdef DEBUG
+               kvm_debug
+                   ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+#endif
+               er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
+
+               if (er == EMULATE_DONE)
+                       ret = RESUME_GUEST;
+               else {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+               /* XXXKYMA: The guest kernel does not expect to get this fault when we are not
+                * using HIGHMEM. Need to address this in a HIGHMEM kernel
+                */
+               printk
+                   ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       } else {
+               printk
+                   ("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
+           && KVM_GUEST_KERNEL_MODE(vcpu)) {
+               if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+                  || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+#ifdef DEBUG
+               kvm_debug
+                   ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+#endif
+               er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
+               if (er == EMULATE_DONE)
+                       ret = RESUME_GUEST;
+               else {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+               /* All KSEG0 faults are handled by KVM, as the guest kernel does not
+                * expect to ever get them
+                */
+               if (kvm_mips_handle_kseg0_tlb_fault
+                   (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else {
+               kvm_err
+                   ("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
+           && KVM_GUEST_KERNEL_MODE(vcpu)) {
+               if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+                  || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+#ifdef DEBUG
+               kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
+                         vcpu->arch.pc, badvaddr);
+#endif
+
+               /* User Address (UA) fault, this could happen if
+                * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
+                *     case we pass on the fault to the guest kernel and let it handle it.
+                * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
+                *     case we inject the TLB from the Guest TLB into the shadow host TLB
+                */
+
+               er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
+               if (er == EMULATE_DONE)
+                       ret = RESUME_GUEST;
+               else {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+               if (kvm_mips_handle_kseg0_tlb_fault
+                   (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else {
+               printk
+                   ("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (KVM_GUEST_KERNEL_MODE(vcpu)
+           && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
+#ifdef DEBUG
+               kvm_debug("Emulate Store to MMIO space\n");
+#endif
+               er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+               if (er == EMULATE_FAIL) {
+                       printk("Emulate Store to MMIO space failed\n");
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               } else {
+                       run->exit_reason = KVM_EXIT_MMIO;
+                       ret = RESUME_HOST;
+               }
+       } else {
+               printk
+                   ("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
+#ifdef DEBUG
+               kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
+#endif
+               er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+               if (er == EMULATE_FAIL) {
+                       printk("Emulate Load from MMIO space failed\n");
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               } else {
+                       run->exit_reason = KVM_EXIT_MMIO;
+                       ret = RESUME_HOST;
+               }
+       } else {
+               printk
+                   ("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               er = EMULATE_FAIL;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
+       if (er == EMULATE_DONE)
+               ret = RESUME_GUEST;
+       else {
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       er = kvm_mips_handle_ri(cause, opc, run, vcpu);
+       if (er == EMULATE_DONE)
+               ret = RESUME_GUEST;
+       else {
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
+       if (er == EMULATE_DONE)
+               ret = RESUME_GUEST;
+       else {
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int
+kvm_trap_emul_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+       kvm_write_c0_guest_index(cop0, regs->cp0reg[MIPS_CP0_TLB_INDEX][0]);
+       kvm_write_c0_guest_context(cop0, regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0]);
+       kvm_write_c0_guest_badvaddr(cop0, regs->cp0reg[MIPS_CP0_BAD_VADDR][0]);
+       kvm_write_c0_guest_entryhi(cop0, regs->cp0reg[MIPS_CP0_TLB_HI][0]);
+       kvm_write_c0_guest_epc(cop0, regs->cp0reg[MIPS_CP0_EXC_PC][0]);
+
+       kvm_write_c0_guest_status(cop0, regs->cp0reg[MIPS_CP0_STATUS][0]);
+       kvm_write_c0_guest_cause(cop0, regs->cp0reg[MIPS_CP0_CAUSE][0]);
+       kvm_write_c0_guest_pagemask(cop0,
+                                   regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0]);
+       kvm_write_c0_guest_wired(cop0, regs->cp0reg[MIPS_CP0_TLB_WIRED][0]);
+       kvm_write_c0_guest_errorepc(cop0, regs->cp0reg[MIPS_CP0_ERROR_PC][0]);
+
+       return 0;
+}
+
+static int
+kvm_trap_emul_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+       regs->cp0reg[MIPS_CP0_TLB_INDEX][0] = kvm_read_c0_guest_index(cop0);
+       regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0] = kvm_read_c0_guest_context(cop0);
+       regs->cp0reg[MIPS_CP0_BAD_VADDR][0] = kvm_read_c0_guest_badvaddr(cop0);
+       regs->cp0reg[MIPS_CP0_TLB_HI][0] = kvm_read_c0_guest_entryhi(cop0);
+       regs->cp0reg[MIPS_CP0_EXC_PC][0] = kvm_read_c0_guest_epc(cop0);
+
+       regs->cp0reg[MIPS_CP0_STATUS][0] = kvm_read_c0_guest_status(cop0);
+       regs->cp0reg[MIPS_CP0_CAUSE][0] = kvm_read_c0_guest_cause(cop0);
+       regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0] =
+           kvm_read_c0_guest_pagemask(cop0);
+       regs->cp0reg[MIPS_CP0_TLB_WIRED][0] = kvm_read_c0_guest_wired(cop0);
+       regs->cp0reg[MIPS_CP0_ERROR_PC][0] = kvm_read_c0_guest_errorepc(cop0);
+
+       regs->cp0reg[MIPS_CP0_CONFIG][0] = kvm_read_c0_guest_config(cop0);
+       regs->cp0reg[MIPS_CP0_CONFIG][1] = kvm_read_c0_guest_config1(cop0);
+       regs->cp0reg[MIPS_CP0_CONFIG][2] = kvm_read_c0_guest_config2(cop0);
+       regs->cp0reg[MIPS_CP0_CONFIG][3] = kvm_read_c0_guest_config3(cop0);
+       regs->cp0reg[MIPS_CP0_CONFIG][7] = kvm_read_c0_guest_config7(cop0);
+
+       return 0;
+}
+
+static int kvm_trap_emul_vm_init(struct kvm *kvm)
+{
+       return 0;
+}
+
+static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       uint32_t config1;
+       int vcpu_id = vcpu->vcpu_id;
+
+       /* Arch specific stuff, set up config registers properly so that the
+        * guest will come up as expected, for now we simulate a
+        * MIPS 24kc
+        */
+       kvm_write_c0_guest_prid(cop0, 0x00019300);
+       kvm_write_c0_guest_config(cop0,
+                                 MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+                                 (MMU_TYPE_R4000 << CP0C0_MT));
+
+       /* Read the cache characteristics from the host Config1 Register */
+       config1 = (read_c0_config1() & ~0x7f);
+
+       /* Set up MMU size */
+       config1 &= ~(0x3f << 25);
+       config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
+
+       /* We unset some bits that we aren't emulating */
+       config1 &=
+           ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
+             (1 << CP0C1_WR) | (1 << CP0C1_CA));
+       kvm_write_c0_guest_config1(cop0, config1);
+
+       kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
+       /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
+       kvm_write_c0_guest_config3(cop0,
+                                  MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 <<
+                                                                      CP0C3_ULRI));
+
+       /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
+       kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
+
+       /* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */
+       kvm_write_c0_guest_intctl(cop0, 0xFC000000);
+
+       /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
+       kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
+
+       return 0;
+}
+
+static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
+       /* exit handlers */
+       .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
+       .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
+       .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
+       .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
+       .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
+       .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
+       .handle_syscall = kvm_trap_emul_handle_syscall,
+       .handle_res_inst = kvm_trap_emul_handle_res_inst,
+       .handle_break = kvm_trap_emul_handle_break,
+
+       .vm_init = kvm_trap_emul_vm_init,
+       .vcpu_init = kvm_trap_emul_vcpu_init,
+       .vcpu_setup = kvm_trap_emul_vcpu_setup,
+       .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
+       .queue_timer_int = kvm_mips_queue_timer_int_cb,
+       .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
+       .queue_io_int = kvm_mips_queue_io_int_cb,
+       .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
+       .irq_deliver = kvm_mips_irq_deliver_cb,
+       .irq_clear = kvm_mips_irq_clear_cb,
+       .vcpu_ioctl_get_regs = kvm_trap_emul_ioctl_get_regs,
+       .vcpu_ioctl_set_regs = kvm_trap_emul_ioctl_set_regs,
+};
+
+int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
+{
+       *install_callbacks = &kvm_trap_emul_callbacks;
+       return 0;
+}
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
new file mode 100644 (file)
index 0000000..bc9e0f4
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+
+/*
+ * Tracepoints for VM eists
+ */
+extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES];
+
+TRACE_EVENT(kvm_exit,
+           TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
+           TP_ARGS(vcpu, reason),
+           TP_STRUCT__entry(
+                       __field(struct kvm_vcpu *, vcpu)
+                       __field(unsigned int, reason)
+           ),
+
+           TP_fast_assign(
+                       __entry->vcpu = vcpu;
+                       __entry->reason = reason;
+           ),
+
+           TP_printk("[%s]PC: 0x%08lx",
+                     kvm_mips_exit_types_str[__entry->reason],
+                     __entry->vcpu->arch.pc)
+);
+
+#endif /* _TRACE_KVM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index a64daee740ee0414ee6a8db474b32893ae51ab1d..3b2a1e78a54369fc43bf052288f3fc7acaac5ae4 100644 (file)
@@ -19,7 +19,7 @@
  */
 void __mips_set_bit(unsigned long nr, volatile unsigned long *addr)
 {
-       volatile unsigned long *a = addr;
+       unsigned long *a = (unsigned long *)addr;
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
@@ -41,7 +41,7 @@ EXPORT_SYMBOL(__mips_set_bit);
  */
 void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr)
 {
-       volatile unsigned long *a = addr;
+       unsigned long *a = (unsigned long *)addr;
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
@@ -63,7 +63,7 @@ EXPORT_SYMBOL(__mips_clear_bit);
  */
 void __mips_change_bit(unsigned long nr, volatile unsigned long *addr)
 {
-       volatile unsigned long *a = addr;
+       unsigned long *a = (unsigned long *)addr;
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
@@ -86,7 +86,7 @@ EXPORT_SYMBOL(__mips_change_bit);
 int __mips_test_and_set_bit(unsigned long nr,
                            volatile unsigned long *addr)
 {
-       volatile unsigned long *a = addr;
+       unsigned long *a = (unsigned long *)addr;
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
@@ -112,7 +112,7 @@ EXPORT_SYMBOL(__mips_test_and_set_bit);
 int __mips_test_and_set_bit_lock(unsigned long nr,
                                 volatile unsigned long *addr)
 {
-       volatile unsigned long *a = addr;
+       unsigned long *a = (unsigned long *)addr;
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
@@ -137,7 +137,7 @@ EXPORT_SYMBOL(__mips_test_and_set_bit_lock);
  */
 int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
 {
-       volatile unsigned long *a = addr;
+       unsigned long *a = (unsigned long *)addr;
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
@@ -162,7 +162,7 @@ EXPORT_SYMBOL(__mips_test_and_clear_bit);
  */
 int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
 {
-       volatile unsigned long *a = addr;
+       unsigned long *a = (unsigned long *)addr;
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
index 32b9f21bfd8562f37d8e51e1ad23908c320ad3e8..8a12d00908e024ab3559681955182f7f216c0155 100644 (file)
@@ -11,6 +11,7 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/tlbdebug.h>
+#include <asm/mmu_context.h>
 
 static inline const char *msk2str(unsigned int mask)
 {
@@ -55,7 +56,7 @@ static void dump_tlb(int first, int last)
        s_pagemask = read_c0_pagemask();
        s_entryhi = read_c0_entryhi();
        s_index = read_c0_index();
-       asid = s_entryhi & 0xff;
+       asid = ASID_MASK(s_entryhi);
 
        for (i = first; i <= last; i++) {
                write_c0_index(i);
@@ -85,7 +86,7 @@ static void dump_tlb(int first, int last)
 
                        printk("va=%0*lx asid=%02lx\n",
                               width, (entryhi & ~0x1fffUL),
-                              entryhi & 0xff);
+                              ASID_MASK(entryhi));
                        printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ",
                               width,
                               (entrylo0 << 6) & PAGE_MASK, c0,
index 053d3b0b0317b18d848b2b2419025ebb1f23db31..0580194e7402aa6508c579e2ff925833fd421948 100644 (file)
@@ -5,7 +5,8 @@
  *
  * Copyright (C) 1998, 1999, 2000 by Ralf Baechle
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
- * Copyright (C) 2007  Maciej W. Rozycki
+ * Copyright (C) 2007 by Maciej W. Rozycki
+ * Copyright (C) 2011, 2012 MIPS Technologies, Inc.
  */
 #include <asm/asm.h>
 #include <asm/asm-offsets.h>
 #define LONG_S_R sdr
 #endif
 
+#ifdef CONFIG_CPU_MICROMIPS
+#define STORSIZE (LONGSIZE * 2)
+#define STORMASK (STORSIZE - 1)
+#define FILL64RG t8
+#define FILLPTRG t7
+#undef  LONG_S
+#define LONG_S LONG_SP
+#else
+#define STORSIZE LONGSIZE
+#define STORMASK LONGMASK
+#define FILL64RG a1
+#define FILLPTRG t0
+#endif
+
 #define EX(insn,reg,addr,handler)                      \
 9:     insn    reg, addr;                              \
        .section __ex_table,"a";                        \
        .previous
 
        .macro  f_fill64 dst, offset, val, fixup
-       EX(LONG_S, \val, (\offset +  0 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset +  1 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset +  2 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset +  3 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset +  4 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset +  5 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset +  6 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset +  7 * LONGSIZE)(\dst), \fixup)
-#if LONGSIZE == 4
-       EX(LONG_S, \val, (\offset +  8 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset +  9 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset + 10 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset + 11 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset + 12 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset + 13 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset + 14 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset + 15 * LONGSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset +  0 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset +  1 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset +  2 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset +  3 * STORSIZE)(\dst), \fixup)
+#if ((defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4)) || !defined(CONFIG_CPU_MICROMIPS))
+       EX(LONG_S, \val, (\offset +  4 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset +  5 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset +  6 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset +  7 * STORSIZE)(\dst), \fixup)
+#endif
+#if (!defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4))
+       EX(LONG_S, \val, (\offset +  8 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset +  9 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset + 10 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset + 11 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset + 12 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset + 13 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset + 14 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset + 15 * STORSIZE)(\dst), \fixup)
 #endif
        .endm
 
@@ -71,16 +88,20 @@ LEAF(memset)
 1:
 
 FEXPORT(__bzero)
-       sltiu           t0, a2, LONGSIZE        /* very small region? */
+       sltiu           t0, a2, STORSIZE        /* very small region? */
        bnez            t0, .Lsmall_memset
-        andi           t0, a0, LONGMASK        /* aligned? */
+        andi           t0, a0, STORMASK        /* aligned? */
 
+#ifdef CONFIG_CPU_MICROMIPS
+       move            t8, a1                  /* used by 'swp' instruction */
+       move            t9, a1
+#endif
 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
        beqz            t0, 1f
-        PTR_SUBU       t0, LONGSIZE            /* alignment in bytes */
+        PTR_SUBU       t0, STORSIZE            /* alignment in bytes */
 #else
        .set            noat
-       li              AT, LONGSIZE
+       li              AT, STORSIZE
        beqz            t0, 1f
         PTR_SUBU       t0, AT                  /* alignment in bytes */
        .set            at
@@ -99,24 +120,27 @@ FEXPORT(__bzero)
 1:     ori             t1, a2, 0x3f            /* # of full blocks */
        xori            t1, 0x3f
        beqz            t1, .Lmemset_partial    /* no block to fill */
-        andi           t0, a2, 0x40-LONGSIZE
+        andi           t0, a2, 0x40-STORSIZE
 
        PTR_ADDU        t1, a0                  /* end address */
        .set            reorder
 1:     PTR_ADDIU       a0, 64
        R10KCBARRIER(0(ra))
-       f_fill64 a0, -64, a1, .Lfwd_fixup
+       f_fill64 a0, -64, FILL64RG, .Lfwd_fixup
        bne             t1, a0, 1b
        .set            noreorder
 
 .Lmemset_partial:
        R10KCBARRIER(0(ra))
        PTR_LA          t1, 2f                  /* where to start */
+#ifdef CONFIG_CPU_MICROMIPS
+       LONG_SRL        t7, t0, 1
+#endif
 #if LONGSIZE == 4
-       PTR_SUBU        t1, t0
+       PTR_SUBU        t1, FILLPTRG
 #else
        .set            noat
-       LONG_SRL                AT, t0, 1
+       LONG_SRL        AT, FILLPTRG, 1
        PTR_SUBU        t1, AT
        .set            at
 #endif
@@ -126,9 +150,9 @@ FEXPORT(__bzero)
        .set            push
        .set            noreorder
        .set            nomacro
-       f_fill64 a0, -64, a1, .Lpartial_fixup   /* ... but first do longs ... */
+       f_fill64 a0, -64, FILL64RG, .Lpartial_fixup     /* ... but first do longs ... */
 2:     .set            pop
-       andi            a2, LONGMASK            /* At most one long to go */
+       andi            a2, STORMASK            /* At most one long to go */
 
        beqz            a2, 1f
         PTR_ADDU       a0, a2                  /* What's left */
@@ -169,7 +193,7 @@ FEXPORT(__bzero)
 
 .Lpartial_fixup:
        PTR_L           t0, TI_TASK($28)
-       andi            a2, LONGMASK
+       andi            a2, STORMASK
        LONG_L          t0, THREAD_BUADDR(t0)
        LONG_ADDU       a2, t1
        jr              ra
@@ -177,4 +201,4 @@ FEXPORT(__bzero)
 
 .Llast_fixup:
        jr              ra
-        andi           v1, a2, LONGMASK
+        andi           v1, a2, STORMASK
index cd160be3ce4dc97b934b566b4c69197ceb07b342..6807f7172eaf46f2841301d64882484f11be861f 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/compiler.h>
 #include <linux/preempt.h>
 #include <linux/export.h>
+#include <linux/stringify.h>
 
 #if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
 
  *
  * Workaround: mask EXL bit of the result or place a nop before mfc0.
  */
-__asm__(
-       "       .macro  arch_local_irq_disable\n"
+notrace void arch_local_irq_disable(void)
+{
+       preempt_disable();
+
+       __asm__ __volatile__(
        "       .set    push                                            \n"
        "       .set    noat                                            \n"
 #ifdef CONFIG_MIPS_MT_SMTC
@@ -52,108 +56,98 @@ __asm__(
        "       .set    noreorder                                       \n"
        "       mtc0    $1,$12                                          \n"
 #endif
-       "       irq_disable_hazard                                      \n"
+       "       " __stringify(__irq_disable_hazard) "                   \n"
        "       .set    pop                                             \n"
-       "       .endm                                                   \n");
+       : /* no outputs */
+       : /* no inputs */
+       : "memory");
 
-notrace void arch_local_irq_disable(void)
-{
-       preempt_disable();
-       __asm__ __volatile__(
-               "arch_local_irq_disable"
-               : /* no outputs */
-               : /* no inputs */
-               : "memory");
        preempt_enable();
 }
 EXPORT_SYMBOL(arch_local_irq_disable);
 
 
-__asm__(
-       "       .macro  arch_local_irq_save result                      \n"
+notrace unsigned long arch_local_irq_save(void)
+{
+       unsigned long flags;
+
+       preempt_disable();
+
+       __asm__ __volatile__(
        "       .set    push                                            \n"
        "       .set    reorder                                         \n"
        "       .set    noat                                            \n"
 #ifdef CONFIG_MIPS_MT_SMTC
-       "       mfc0    \\result, $2, 1                                 \n"
-       "       ori     $1, \\result, 0x400                             \n"
+       "       mfc0    %[flags], $2, 1                         \n"
+       "       ori     $1, %[flags], 0x400                             \n"
        "       .set    noreorder                                       \n"
        "       mtc0    $1, $2, 1                                       \n"
-       "       andi    \\result, \\result, 0x400                       \n"
+       "       andi    %[flags], %[flags], 0x400                       \n"
 #elif defined(CONFIG_CPU_MIPSR2)
        /* see irqflags.h for inline function */
 #else
-       "       mfc0    \\result, $12                                   \n"
-       "       ori     $1, \\result, 0x1f                              \n"
+       "       mfc0    %[flags], $12                                   \n"
+       "       ori     $1, %[flags], 0x1f                              \n"
        "       xori    $1, 0x1f                                        \n"
        "       .set    noreorder                                       \n"
        "       mtc0    $1, $12                                         \n"
 #endif
-       "       irq_disable_hazard                                      \n"
+       "       " __stringify(__irq_disable_hazard) "                   \n"
        "       .set    pop                                             \n"
-       "       .endm                                                   \n");
+       : [flags] "=r" (flags)
+       : /* no inputs */
+       : "memory");
 
-notrace unsigned long arch_local_irq_save(void)
-{
-       unsigned long flags;
-       preempt_disable();
-       asm volatile("arch_local_irq_save\t%0"
-                    : "=r" (flags)
-                    : /* no inputs */
-                    : "memory");
        preempt_enable();
+
        return flags;
 }
 EXPORT_SYMBOL(arch_local_irq_save);
 
+notrace void arch_local_irq_restore(unsigned long flags)
+{
+       unsigned long __tmp1;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+       /*
+        * SMTC kernel needs to do a software replay of queued
+        * IPIs, at the cost of branch and call overhead on each
+        * local_irq_restore()
+        */
+       if (unlikely(!(flags & 0x0400)))
+               smtc_ipi_replay();
+#endif
+       preempt_disable();
 
-__asm__(
-       "       .macro  arch_local_irq_restore flags                    \n"
+       __asm__ __volatile__(
        "       .set    push                                            \n"
        "       .set    noreorder                                       \n"
        "       .set    noat                                            \n"
 #ifdef CONFIG_MIPS_MT_SMTC
-       "mfc0   $1, $2, 1                                               \n"
-       "andi   \\flags, 0x400                                          \n"
-       "ori    $1, 0x400                                               \n"
-       "xori   $1, 0x400                                               \n"
-       "or     \\flags, $1                                             \n"
-       "mtc0   \\flags, $2, 1                                          \n"
+       "       mfc0    $1, $2, 1                                       \n"
+       "       andi    %[flags], 0x400                                 \n"
+       "       ori     $1, 0x400                                       \n"
+       "       xori    $1, 0x400                                       \n"
+       "       or      %[flags], $1                                    \n"
+       "       mtc0    %[flags], $2, 1                                 \n"
 #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
        /* see irqflags.h for inline function */
 #elif defined(CONFIG_CPU_MIPSR2)
        /* see irqflags.h for inline function */
 #else
        "       mfc0    $1, $12                                         \n"
-       "       andi    \\flags, 1                                      \n"
+       "       andi    %[flags], 1                                     \n"
        "       ori     $1, 0x1f                                        \n"
        "       xori    $1, 0x1f                                        \n"
-       "       or      \\flags, $1                                     \n"
-       "       mtc0    \\flags, $12                                    \n"
+       "       or      %[flags], $1                                    \n"
+       "       mtc0    %[flags], $12                                   \n"
 #endif
-       "       irq_disable_hazard                                      \n"
+       "       " __stringify(__irq_disable_hazard) "                   \n"
        "       .set    pop                                             \n"
-       "       .endm                                                   \n");
+       : [flags] "=r" (__tmp1)
+       : "0" (flags)
+       : "memory");
 
-notrace void arch_local_irq_restore(unsigned long flags)
-{
-       unsigned long __tmp1;
-
-#ifdef CONFIG_MIPS_MT_SMTC
-       /*
-        * SMTC kernel needs to do a software replay of queued
-        * IPIs, at the cost of branch and call overhead on each
-        * local_irq_restore()
-        */
-       if (unlikely(!(flags & 0x0400)))
-               smtc_ipi_replay();
-#endif
-       preempt_disable();
-       __asm__ __volatile__(
-               "arch_local_irq_restore\t%0"
-               : "=r" (__tmp1)
-               : "0" (flags)
-               : "memory");
        preempt_enable();
 }
 EXPORT_SYMBOL(arch_local_irq_restore);
@@ -164,11 +158,36 @@ notrace void __arch_local_irq_restore(unsigned long flags)
        unsigned long __tmp1;
 
        preempt_disable();
+
        __asm__ __volatile__(
-               "arch_local_irq_restore\t%0"
-               : "=r" (__tmp1)
-               : "0" (flags)
-               : "memory");
+       "       .set    push                                            \n"
+       "       .set    noreorder                                       \n"
+       "       .set    noat                                            \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+       "       mfc0    $1, $2, 1                                       \n"
+       "       andi    %[flags], 0x400                                 \n"
+       "       ori     $1, 0x400                                       \n"
+       "       xori    $1, 0x400                                       \n"
+       "       or      %[flags], $1                                    \n"
+       "       mtc0    %[flags], $2, 1                                 \n"
+#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+       /* see irqflags.h for inline function */
+#elif defined(CONFIG_CPU_MIPSR2)
+       /* see irqflags.h for inline function */
+#else
+       "       mfc0    $1, $12                                         \n"
+       "       andi    %[flags], 1                                     \n"
+       "       ori     $1, 0x1f                                        \n"
+       "       xori    $1, 0x1f                                        \n"
+       "       or      %[flags], $1                                    \n"
+       "       mtc0    %[flags], $12                                   \n"
+#endif
+       "       " __stringify(__irq_disable_hazard) "                   \n"
+       "       .set    pop                                             \n"
+       : [flags] "=r" (__tmp1)
+       : "0" (flags)
+       : "memory");
+
        preempt_enable();
 }
 EXPORT_SYMBOL(__arch_local_irq_restore);
index 91615c2ef0cf969baeff215ca3d8a627e3851d2f..8327698b99377e0e78c74a7bf0bef19e657fbd53 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/mm.h>
 
 #include <asm/mipsregs.h>
+#include <asm/mmu_context.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/tlbdebug.h>
@@ -21,7 +22,7 @@ static void dump_tlb(int first, int last)
        unsigned int asid;
        unsigned long entryhi, entrylo0;
 
-       asid = read_c0_entryhi() & 0xfc0;
+       asid = ASID_MASK(read_c0_entryhi());
 
        for (i = first; i <= last; i++) {
                write_c0_index(i<<8);
@@ -35,7 +36,7 @@ static void dump_tlb(int first, int last)
 
                /* Unused entries have a virtual address of KSEG0.  */
                if ((entryhi & 0xffffe000) != 0x80000000
-                   && (entryhi & 0xfc0) == asid) {
+                   && (ASID_MASK(entryhi) == asid)) {
                        /*
                         * Only print entries in use
                         */
@@ -44,7 +45,7 @@ static void dump_tlb(int first, int last)
                        printk("va=%08lx asid=%08lx"
                               "  [pa=%06lx n=%d d=%d v=%d g=%d]",
                               (entryhi & 0xffffe000),
-                              entryhi & 0xfc0,
+                              ASID_MASK(entryhi),
                               entrylo0 & PAGE_MASK,
                               (entrylo0 & (1 << 11)) ? 1 : 0,
                               (entrylo0 & (1 << 10)) ? 1 : 0,
index fdbb970f670d84ed2bafcbd42cc4e9eb46b96156..e362dcdc69d1617486ee5627f055ffc3075b5548 100644 (file)
@@ -3,8 +3,9 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 1996, 1998, 1999, 2004 by Ralf Baechle
- * Copyright (c) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 1996, 1998, 1999, 2004 by Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2011 MIPS Technologies, Inc.
  */
 #include <asm/asm.h>
 #include <asm/asm-offsets.h>
@@ -28,9 +29,9 @@ LEAF(__strlen_user_asm)
 
 FEXPORT(__strlen_user_nocheck_asm)
        move            v0, a0
-1:     EX(lb, t0, (v0), .Lfault)
+1:     EX(lbu, v1, (v0), .Lfault)
        PTR_ADDIU       v0, 1
-       bnez            t0, 1b
+       bnez            v1, 1b
        PTR_SUBU        v0, a0
        jr              ra
        END(__strlen_user_asm)
index bad5394875031ecb08e668a53b5bbca70649de95..92870b6b53eaeee044a0424ef1cdd16870ec445a 100644 (file)
@@ -3,7 +3,8 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 1996, 1999 by Ralf Baechle
+ * Copyright (C) 1996, 1999 by Ralf Baechle
+ * Copyright (C) 2011 MIPS Technologies, Inc.
  */
 #include <linux/errno.h>
 #include <asm/asm.h>
@@ -33,26 +34,27 @@ LEAF(__strncpy_from_user_asm)
        bnez            v0, .Lfault
 
 FEXPORT(__strncpy_from_user_nocheck_asm)
-       move            v0, zero
-       move            v1, a1
        .set            noreorder
-1:     EX(lbu, t0, (v1), .Lfault)
+       move            t0, zero
+       move            v1, a1
+1:     EX(lbu, v0, (v1), .Lfault)
        PTR_ADDIU       v1, 1
        R10KCBARRIER(0(ra))
-       beqz            t0, 2f
-        sb             t0, (a0)
-       PTR_ADDIU       v0, 1
-       .set            reorder
-       PTR_ADDIU       a0, 1
-       bne             v0, a2, 1b
-2:     PTR_ADDU        t0, a1, v0
-       xor             t0, a1
-       bltz            t0, .Lfault
+       beqz            v0, 2f
+        sb             v0, (a0)
+       PTR_ADDIU       t0, 1
+       bne             t0, a2, 1b
+        PTR_ADDIU      a0, 1
+2:     PTR_ADDU        v0, a1, t0
+       xor             v0, a1
+       bltz            v0, .Lfault
+        nop
        jr              ra                      # return n
+        move           v0, t0
        END(__strncpy_from_user_asm)
 
-.Lfault:       li              v0, -EFAULT
-       jr              ra
+.Lfault: jr            ra
+         li            v0, -EFAULT
 
        .section        __ex_table,"a"
        PTR             1b, .Lfault
index beea03c8c0ce823130ab25b421f6874b5a2a13b8..fcacea5e61f1e685891e8e4af7ec8f701c596fd2 100644 (file)
@@ -35,7 +35,7 @@ FEXPORT(__strnlen_user_nocheck_asm)
        PTR_ADDU        a1, a0                  # stop pointer
 1:     beq             v0, a1, 1f              # limit reached?
        EX(lb, t0, (v0), .Lfault)
-       PTR_ADD       v0, 1
+       PTR_ADDIU       v0, 1
        bnez            t0, 1b
 1:     PTR_SUBU        v0, a0
        jr              ra
index afb5a0bcf7a5a524c4fe75450be559e8402c68d4..f03771900813cb69c9ddd5f579841350a8c48786 100644 (file)
@@ -45,6 +45,7 @@
 #include <asm/signal.h>
 #include <asm/mipsregs.h>
 #include <asm/fpu_emulator.h>
+#include <asm/fpu.h>
 #include <asm/uaccess.h>
 #include <asm/branch.h>
 
@@ -81,6 +82,11 @@ DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
 /* Determine rounding mode from the RM bits of the FCSR */
 #define modeindex(v) ((v) & FPU_CSR_RM)
 
+/* microMIPS bitfields */
+#define MM_POOL32A_MINOR_MASK  0x3f
+#define MM_POOL32A_MINOR_SHIFT 0x6
+#define MM_MIPS32_COND_FC      0x30
+
 /* Convert Mips rounding mode (0..3) to IEEE library modes. */
 static const unsigned char ieee_rm[4] = {
        [FPU_CSR_RN] = IEEE754_RN,
@@ -110,6 +116,556 @@ static const unsigned int fpucondbit[8] = {
 };
 #endif
 
+/* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */
+static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7};
+
+/* (microMIPS) Convert certain microMIPS instructions to MIPS32 format. */
+static const int sd_format[] = {16, 17, 0, 0, 0, 0, 0, 0};
+static const int sdps_format[] = {16, 17, 22, 0, 0, 0, 0, 0};
+static const int dwl_format[] = {17, 20, 21, 0, 0, 0, 0, 0};
+static const int swl_format[] = {16, 20, 21, 0, 0, 0, 0, 0};
+
+/*
+ * This functions translates a 32-bit microMIPS instruction
+ * into a 32-bit MIPS32 instruction. Returns 0 on success
+ * and SIGILL otherwise.
+ */
+static int microMIPS32_to_MIPS32(union mips_instruction *insn_ptr)
+{
+       union mips_instruction insn = *insn_ptr;
+       union mips_instruction mips32_insn = insn;
+       int func, fmt, op;
+
+       switch (insn.mm_i_format.opcode) {
+       case mm_ldc132_op:
+               mips32_insn.mm_i_format.opcode = ldc1_op;
+               mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
+               mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
+               break;
+       case mm_lwc132_op:
+               mips32_insn.mm_i_format.opcode = lwc1_op;
+               mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
+               mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
+               break;
+       case mm_sdc132_op:
+               mips32_insn.mm_i_format.opcode = sdc1_op;
+               mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
+               mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
+               break;
+       case mm_swc132_op:
+               mips32_insn.mm_i_format.opcode = swc1_op;
+               mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
+               mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
+               break;
+       case mm_pool32i_op:
+               /* NOTE: offset is << by 1 if in microMIPS mode. */
+               if ((insn.mm_i_format.rt == mm_bc1f_op) ||
+                   (insn.mm_i_format.rt == mm_bc1t_op)) {
+                       mips32_insn.fb_format.opcode = cop1_op;
+                       mips32_insn.fb_format.bc = bc_op;
+                       mips32_insn.fb_format.flag =
+                               (insn.mm_i_format.rt == mm_bc1t_op) ? 1 : 0;
+               } else
+                       return SIGILL;
+               break;
+       case mm_pool32f_op:
+               switch (insn.mm_fp0_format.func) {
+               case mm_32f_01_op:
+               case mm_32f_11_op:
+               case mm_32f_02_op:
+               case mm_32f_12_op:
+               case mm_32f_41_op:
+               case mm_32f_51_op:
+               case mm_32f_42_op:
+               case mm_32f_52_op:
+                       op = insn.mm_fp0_format.func;
+                       if (op == mm_32f_01_op)
+                               func = madd_s_op;
+                       else if (op == mm_32f_11_op)
+                               func = madd_d_op;
+                       else if (op == mm_32f_02_op)
+                               func = nmadd_s_op;
+                       else if (op == mm_32f_12_op)
+                               func = nmadd_d_op;
+                       else if (op == mm_32f_41_op)
+                               func = msub_s_op;
+                       else if (op == mm_32f_51_op)
+                               func = msub_d_op;
+                       else if (op == mm_32f_42_op)
+                               func = nmsub_s_op;
+                       else
+                               func = nmsub_d_op;
+                       mips32_insn.fp6_format.opcode = cop1x_op;
+                       mips32_insn.fp6_format.fr = insn.mm_fp6_format.fr;
+                       mips32_insn.fp6_format.ft = insn.mm_fp6_format.ft;
+                       mips32_insn.fp6_format.fs = insn.mm_fp6_format.fs;
+                       mips32_insn.fp6_format.fd = insn.mm_fp6_format.fd;
+                       mips32_insn.fp6_format.func = func;
+                       break;
+               case mm_32f_10_op:
+                       func = -1;      /* Invalid */
+                       op = insn.mm_fp5_format.op & 0x7;
+                       if (op == mm_ldxc1_op)
+                               func = ldxc1_op;
+                       else if (op == mm_sdxc1_op)
+                               func = sdxc1_op;
+                       else if (op == mm_lwxc1_op)
+                               func = lwxc1_op;
+                       else if (op == mm_swxc1_op)
+                               func = swxc1_op;
+
+                       if (func != -1) {
+                               mips32_insn.r_format.opcode = cop1x_op;
+                               mips32_insn.r_format.rs =
+                                       insn.mm_fp5_format.base;
+                               mips32_insn.r_format.rt =
+                                       insn.mm_fp5_format.index;
+                               mips32_insn.r_format.rd = 0;
+                               mips32_insn.r_format.re = insn.mm_fp5_format.fd;
+                               mips32_insn.r_format.func = func;
+                       } else
+                               return SIGILL;
+                       break;
+               case mm_32f_40_op:
+                       op = -1;        /* Invalid */
+                       if (insn.mm_fp2_format.op == mm_fmovt_op)
+                               op = 1;
+                       else if (insn.mm_fp2_format.op == mm_fmovf_op)
+                               op = 0;
+                       if (op != -1) {
+                               mips32_insn.fp0_format.opcode = cop1_op;
+                               mips32_insn.fp0_format.fmt =
+                                       sdps_format[insn.mm_fp2_format.fmt];
+                               mips32_insn.fp0_format.ft =
+                                       (insn.mm_fp2_format.cc<<2) + op;
+                               mips32_insn.fp0_format.fs =
+                                       insn.mm_fp2_format.fs;
+                               mips32_insn.fp0_format.fd =
+                                       insn.mm_fp2_format.fd;
+                               mips32_insn.fp0_format.func = fmovc_op;
+                       } else
+                               return SIGILL;
+                       break;
+               case mm_32f_60_op:
+                       func = -1;      /* Invalid */
+                       if (insn.mm_fp0_format.op == mm_fadd_op)
+                               func = fadd_op;
+                       else if (insn.mm_fp0_format.op == mm_fsub_op)
+                               func = fsub_op;
+                       else if (insn.mm_fp0_format.op == mm_fmul_op)
+                               func = fmul_op;
+                       else if (insn.mm_fp0_format.op == mm_fdiv_op)
+                               func = fdiv_op;
+                       if (func != -1) {
+                               mips32_insn.fp0_format.opcode = cop1_op;
+                               mips32_insn.fp0_format.fmt =
+                                       sdps_format[insn.mm_fp0_format.fmt];
+                               mips32_insn.fp0_format.ft =
+                                       insn.mm_fp0_format.ft;
+                               mips32_insn.fp0_format.fs =
+                                       insn.mm_fp0_format.fs;
+                               mips32_insn.fp0_format.fd =
+                                       insn.mm_fp0_format.fd;
+                               mips32_insn.fp0_format.func = func;
+                       } else
+                               return SIGILL;
+                       break;
+               case mm_32f_70_op:
+                       func = -1;      /* Invalid */
+                       if (insn.mm_fp0_format.op == mm_fmovn_op)
+                               func = fmovn_op;
+                       else if (insn.mm_fp0_format.op == mm_fmovz_op)
+                               func = fmovz_op;
+                       if (func != -1) {
+                               mips32_insn.fp0_format.opcode = cop1_op;
+                               mips32_insn.fp0_format.fmt =
+                                       sdps_format[insn.mm_fp0_format.fmt];
+                               mips32_insn.fp0_format.ft =
+                                       insn.mm_fp0_format.ft;
+                               mips32_insn.fp0_format.fs =
+                                       insn.mm_fp0_format.fs;
+                               mips32_insn.fp0_format.fd =
+                                       insn.mm_fp0_format.fd;
+                               mips32_insn.fp0_format.func = func;
+                       } else
+                               return SIGILL;
+                       break;
+               case mm_32f_73_op:    /* POOL32FXF */
+                       switch (insn.mm_fp1_format.op) {
+                       case mm_movf0_op:
+                       case mm_movf1_op:
+                       case mm_movt0_op:
+                       case mm_movt1_op:
+                               if ((insn.mm_fp1_format.op & 0x7f) ==
+                                   mm_movf0_op)
+                                       op = 0;
+                               else
+                                       op = 1;
+                               mips32_insn.r_format.opcode = spec_op;
+                               mips32_insn.r_format.rs = insn.mm_fp4_format.fs;
+                               mips32_insn.r_format.rt =
+                                       (insn.mm_fp4_format.cc << 2) + op;
+                               mips32_insn.r_format.rd = insn.mm_fp4_format.rt;
+                               mips32_insn.r_format.re = 0;
+                               mips32_insn.r_format.func = movc_op;
+                               break;
+                       case mm_fcvtd0_op:
+                       case mm_fcvtd1_op:
+                       case mm_fcvts0_op:
+                       case mm_fcvts1_op:
+                               if ((insn.mm_fp1_format.op & 0x7f) ==
+                                   mm_fcvtd0_op) {
+                                       func = fcvtd_op;
+                                       fmt = swl_format[insn.mm_fp3_format.fmt];
+                               } else {
+                                       func = fcvts_op;
+                                       fmt = dwl_format[insn.mm_fp3_format.fmt];
+                               }
+                               mips32_insn.fp0_format.opcode = cop1_op;
+                               mips32_insn.fp0_format.fmt = fmt;
+                               mips32_insn.fp0_format.ft = 0;
+                               mips32_insn.fp0_format.fs =
+                                       insn.mm_fp3_format.fs;
+                               mips32_insn.fp0_format.fd =
+                                       insn.mm_fp3_format.rt;
+                               mips32_insn.fp0_format.func = func;
+                               break;
+                       case mm_fmov0_op:
+                       case mm_fmov1_op:
+                       case mm_fabs0_op:
+                       case mm_fabs1_op:
+                       case mm_fneg0_op:
+                       case mm_fneg1_op:
+                               if ((insn.mm_fp1_format.op & 0x7f) ==
+                                   mm_fmov0_op)
+                                       func = fmov_op;
+                               else if ((insn.mm_fp1_format.op & 0x7f) ==
+                                        mm_fabs0_op)
+                                       func = fabs_op;
+                               else
+                                       func = fneg_op;
+                               mips32_insn.fp0_format.opcode = cop1_op;
+                               mips32_insn.fp0_format.fmt =
+                                       sdps_format[insn.mm_fp3_format.fmt];
+                               mips32_insn.fp0_format.ft = 0;
+                               mips32_insn.fp0_format.fs =
+                                       insn.mm_fp3_format.fs;
+                               mips32_insn.fp0_format.fd =
+                                       insn.mm_fp3_format.rt;
+                               mips32_insn.fp0_format.func = func;
+                               break;
+                       case mm_ffloorl_op:
+                       case mm_ffloorw_op:
+                       case mm_fceill_op:
+                       case mm_fceilw_op:
+                       case mm_ftruncl_op:
+                       case mm_ftruncw_op:
+                       case mm_froundl_op:
+                       case mm_froundw_op:
+                       case mm_fcvtl_op:
+                       case mm_fcvtw_op:
+                               if (insn.mm_fp1_format.op == mm_ffloorl_op)
+                                       func = ffloorl_op;
+                               else if (insn.mm_fp1_format.op == mm_ffloorw_op)
+                                       func = ffloor_op;
+                               else if (insn.mm_fp1_format.op == mm_fceill_op)
+                                       func = fceill_op;
+                               else if (insn.mm_fp1_format.op == mm_fceilw_op)
+                                       func = fceil_op;
+                               else if (insn.mm_fp1_format.op == mm_ftruncl_op)
+                                       func = ftruncl_op;
+                               else if (insn.mm_fp1_format.op == mm_ftruncw_op)
+                                       func = ftrunc_op;
+                               else if (insn.mm_fp1_format.op == mm_froundl_op)
+                                       func = froundl_op;
+                               else if (insn.mm_fp1_format.op == mm_froundw_op)
+                                       func = fround_op;
+                               else if (insn.mm_fp1_format.op == mm_fcvtl_op)
+                                       func = fcvtl_op;
+                               else
+                                       func = fcvtw_op;
+                               mips32_insn.fp0_format.opcode = cop1_op;
+                               mips32_insn.fp0_format.fmt =
+                                       sd_format[insn.mm_fp1_format.fmt];
+                               mips32_insn.fp0_format.ft = 0;
+                               mips32_insn.fp0_format.fs =
+                                       insn.mm_fp1_format.fs;
+                               mips32_insn.fp0_format.fd =
+                                       insn.mm_fp1_format.rt;
+                               mips32_insn.fp0_format.func = func;
+                               break;
+                       case mm_frsqrt_op:
+                       case mm_fsqrt_op:
+                       case mm_frecip_op:
+                               if (insn.mm_fp1_format.op == mm_frsqrt_op)
+                                       func = frsqrt_op;
+                               else if (insn.mm_fp1_format.op == mm_fsqrt_op)
+                                       func = fsqrt_op;
+                               else
+                                       func = frecip_op;
+                               mips32_insn.fp0_format.opcode = cop1_op;
+                               mips32_insn.fp0_format.fmt =
+                                       sdps_format[insn.mm_fp1_format.fmt];
+                               mips32_insn.fp0_format.ft = 0;
+                               mips32_insn.fp0_format.fs =
+                                       insn.mm_fp1_format.fs;
+                               mips32_insn.fp0_format.fd =
+                                       insn.mm_fp1_format.rt;
+                               mips32_insn.fp0_format.func = func;
+                               break;
+                       case mm_mfc1_op:
+                       case mm_mtc1_op:
+                       case mm_cfc1_op:
+                       case mm_ctc1_op:
+                               if (insn.mm_fp1_format.op == mm_mfc1_op)
+                                       op = mfc_op;
+                               else if (insn.mm_fp1_format.op == mm_mtc1_op)
+                                       op = mtc_op;
+                               else if (insn.mm_fp1_format.op == mm_cfc1_op)
+                                       op = cfc_op;
+                               else
+                                       op = ctc_op;
+                               mips32_insn.fp1_format.opcode = cop1_op;
+                               mips32_insn.fp1_format.op = op;
+                               mips32_insn.fp1_format.rt =
+                                       insn.mm_fp1_format.rt;
+                               mips32_insn.fp1_format.fs =
+                                       insn.mm_fp1_format.fs;
+                               mips32_insn.fp1_format.fd = 0;
+                               mips32_insn.fp1_format.func = 0;
+                               break;
+                       default:
+                               return SIGILL;
+                               break;
+                       }
+                       break;
+               case mm_32f_74_op:      /* c.cond.fmt */
+                       mips32_insn.fp0_format.opcode = cop1_op;
+                       mips32_insn.fp0_format.fmt =
+                               sdps_format[insn.mm_fp4_format.fmt];
+                       mips32_insn.fp0_format.ft = insn.mm_fp4_format.rt;
+                       mips32_insn.fp0_format.fs = insn.mm_fp4_format.fs;
+                       mips32_insn.fp0_format.fd = insn.mm_fp4_format.cc << 2;
+                       mips32_insn.fp0_format.func =
+                               insn.mm_fp4_format.cond | MM_MIPS32_COND_FC;
+                       break;
+               default:
+                       return SIGILL;
+                       break;
+               }
+               break;
+       default:
+               return SIGILL;
+               break;
+       }
+
+       *insn_ptr = mips32_insn;
+       return 0;
+}
+
+int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+                    unsigned long *contpc)
+{
+       union mips_instruction insn = (union mips_instruction)dec_insn.insn;
+       int bc_false = 0;
+       unsigned int fcr31;
+       unsigned int bit;
+
+       switch (insn.mm_i_format.opcode) {
+       case mm_pool32a_op:
+               if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) ==
+                   mm_pool32axf_op) {
+                       switch (insn.mm_i_format.simmediate >>
+                               MM_POOL32A_MINOR_SHIFT) {
+                       case mm_jalr_op:
+                       case mm_jalrhb_op:
+                       case mm_jalrs_op:
+                       case mm_jalrshb_op:
+                               if (insn.mm_i_format.rt != 0)   /* Not mm_jr */
+                                       regs->regs[insn.mm_i_format.rt] =
+                                               regs->cp0_epc +
+                                               dec_insn.pc_inc +
+                                               dec_insn.next_pc_inc;
+                               *contpc = regs->regs[insn.mm_i_format.rs];
+                               return 1;
+                               break;
+                       }
+               }
+               break;
+       case mm_pool32i_op:
+               switch (insn.mm_i_format.rt) {
+               case mm_bltzals_op:
+               case mm_bltzal_op:
+                       regs->regs[31] = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+                       /* Fall through */
+               case mm_bltz_op:
+                       if ((long)regs->regs[insn.mm_i_format.rs] < 0)
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       (insn.mm_i_format.simmediate << 1);
+                       else
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       dec_insn.next_pc_inc;
+                       return 1;
+                       break;
+               case mm_bgezals_op:
+               case mm_bgezal_op:
+                       regs->regs[31] = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       dec_insn.next_pc_inc;
+                       /* Fall through */
+               case mm_bgez_op:
+                       if ((long)regs->regs[insn.mm_i_format.rs] >= 0)
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       (insn.mm_i_format.simmediate << 1);
+                       else
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       dec_insn.next_pc_inc;
+                       return 1;
+                       break;
+               case mm_blez_op:
+                       if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       (insn.mm_i_format.simmediate << 1);
+                       else
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       dec_insn.next_pc_inc;
+                       return 1;
+                       break;
+               case mm_bgtz_op:
+                       if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       (insn.mm_i_format.simmediate << 1);
+                       else
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       dec_insn.next_pc_inc;
+                       return 1;
+                       break;
+               case mm_bc2f_op:
+               case mm_bc1f_op:
+                       bc_false = 1;
+                       /* Fall through */
+               case mm_bc2t_op:
+               case mm_bc1t_op:
+                       preempt_disable();
+                       if (is_fpu_owner())
+                               asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
+                       else
+                               fcr31 = current->thread.fpu.fcr31;
+                       preempt_enable();
+
+                       if (bc_false)
+                               fcr31 = ~fcr31;
+
+                       bit = (insn.mm_i_format.rs >> 2);
+                       bit += (bit != 0);
+                       bit += 23;
+                       if (fcr31 & (1 << bit))
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       (insn.mm_i_format.simmediate << 1);
+                       else
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc + dec_insn.next_pc_inc;
+                       return 1;
+                       break;
+               }
+               break;
+       case mm_pool16c_op:
+               switch (insn.mm_i_format.rt) {
+               case mm_jalr16_op:
+               case mm_jalrs16_op:
+                       regs->regs[31] = regs->cp0_epc +
+                               dec_insn.pc_inc + dec_insn.next_pc_inc;
+                       /* Fall through */
+               case mm_jr16_op:
+                       *contpc = regs->regs[insn.mm_i_format.rs];
+                       return 1;
+                       break;
+               }
+               break;
+       case mm_beqz16_op:
+               if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0)
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.mm_b1_format.simmediate << 1);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc + dec_insn.next_pc_inc;
+               return 1;
+               break;
+       case mm_bnez16_op:
+               if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0)
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.mm_b1_format.simmediate << 1);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc + dec_insn.next_pc_inc;
+               return 1;
+               break;
+       case mm_b16_op:
+               *contpc = regs->cp0_epc + dec_insn.pc_inc +
+                        (insn.mm_b0_format.simmediate << 1);
+               return 1;
+               break;
+       case mm_beq32_op:
+               if (regs->regs[insn.mm_i_format.rs] ==
+                   regs->regs[insn.mm_i_format.rt])
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.mm_i_format.simmediate << 1);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+               return 1;
+               break;
+       case mm_bne32_op:
+               if (regs->regs[insn.mm_i_format.rs] !=
+                   regs->regs[insn.mm_i_format.rt])
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.mm_i_format.simmediate << 1);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc + dec_insn.next_pc_inc;
+               return 1;
+               break;
+       case mm_jalx32_op:
+               regs->regs[31] = regs->cp0_epc +
+                       dec_insn.pc_inc + dec_insn.next_pc_inc;
+               *contpc = regs->cp0_epc + dec_insn.pc_inc;
+               *contpc >>= 28;
+               *contpc <<= 28;
+               *contpc |= (insn.j_format.target << 2);
+               return 1;
+               break;
+       case mm_jals32_op:
+       case mm_jal32_op:
+               regs->regs[31] = regs->cp0_epc +
+                       dec_insn.pc_inc + dec_insn.next_pc_inc;
+               /* Fall through */
+       case mm_j32_op:
+               *contpc = regs->cp0_epc + dec_insn.pc_inc;
+               *contpc >>= 27;
+               *contpc <<= 27;
+               *contpc |= (insn.j_format.target << 1);
+               set_isa16_mode(*contpc);
+               return 1;
+               break;
+       }
+       return 0;
+}
 
 /*
  * Redundant with logic already in kernel/branch.c,
@@ -117,53 +673,177 @@ static const unsigned int fpucondbit[8] = {
  * a single subroutine should be used across both
  * modules.
  */
-static int isBranchInstr(mips_instruction * i)
+static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+                        unsigned long *contpc)
 {
-       switch (MIPSInst_OPCODE(*i)) {
+       union mips_instruction insn = (union mips_instruction)dec_insn.insn;
+       unsigned int fcr31;
+       unsigned int bit = 0;
+
+       switch (insn.i_format.opcode) {
        case spec_op:
-               switch (MIPSInst_FUNC(*i)) {
+               switch (insn.r_format.func) {
                case jalr_op:
+                       regs->regs[insn.r_format.rd] =
+                               regs->cp0_epc + dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+                       /* Fall through */
                case jr_op:
+                       *contpc = regs->regs[insn.r_format.rs];
                        return 1;
+                       break;
                }
                break;
-
        case bcond_op:
-               switch (MIPSInst_RT(*i)) {
+               switch (insn.i_format.rt) {
+               case bltzal_op:
+               case bltzall_op:
+                       regs->regs[31] = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+                       /* Fall through */
                case bltz_op:
-               case bgez_op:
                case bltzl_op:
-               case bgezl_op:
-               case bltzal_op:
+                       if ((long)regs->regs[insn.i_format.rs] < 0)
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       (insn.i_format.simmediate << 2);
+                       else
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       dec_insn.next_pc_inc;
+                       return 1;
+                       break;
                case bgezal_op:
-               case bltzall_op:
                case bgezall_op:
+                       regs->regs[31] = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+                       /* Fall through */
+               case bgez_op:
+               case bgezl_op:
+                       if ((long)regs->regs[insn.i_format.rs] >= 0)
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       (insn.i_format.simmediate << 2);
+                       else
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       dec_insn.next_pc_inc;
                        return 1;
+                       break;
                }
                break;
-
-       case j_op:
-       case jal_op:
        case jalx_op:
+               set_isa16_mode(bit);
+       case jal_op:
+               regs->regs[31] = regs->cp0_epc +
+                       dec_insn.pc_inc +
+                       dec_insn.next_pc_inc;
+               /* Fall through */
+       case j_op:
+               *contpc = regs->cp0_epc + dec_insn.pc_inc;
+               *contpc >>= 28;
+               *contpc <<= 28;
+               *contpc |= (insn.j_format.target << 2);
+               /* Set microMIPS mode bit: XOR for jalx. */
+               *contpc ^= bit;
+               return 1;
+               break;
        case beq_op:
-       case bne_op:
-       case blez_op:
-       case bgtz_op:
        case beql_op:
+               if (regs->regs[insn.i_format.rs] ==
+                   regs->regs[insn.i_format.rt])
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.i_format.simmediate << 2);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+               return 1;
+               break;
+       case bne_op:
        case bnel_op:
+               if (regs->regs[insn.i_format.rs] !=
+                   regs->regs[insn.i_format.rt])
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.i_format.simmediate << 2);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+               return 1;
+               break;
+       case blez_op:
        case blezl_op:
+               if ((long)regs->regs[insn.i_format.rs] <= 0)
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.i_format.simmediate << 2);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+               return 1;
+               break;
+       case bgtz_op:
        case bgtzl_op:
+               if ((long)regs->regs[insn.i_format.rs] > 0)
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.i_format.simmediate << 2);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
                return 1;
-
+               break;
        case cop0_op:
        case cop1_op:
        case cop2_op:
        case cop1x_op:
-               if (MIPSInst_RS(*i) == bc_op)
-                       return 1;
+               if (insn.i_format.rs == bc_op) {
+                       preempt_disable();
+                       if (is_fpu_owner())
+                               asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
+                       else
+                               fcr31 = current->thread.fpu.fcr31;
+                       preempt_enable();
+
+                       bit = (insn.i_format.rt >> 2);
+                       bit += (bit != 0);
+                       bit += 23;
+                       switch (insn.i_format.rt & 3) {
+                       case 0: /* bc1f */
+                       case 2: /* bc1fl */
+                               if (~fcr31 & (1 << bit))
+                                       *contpc = regs->cp0_epc +
+                                               dec_insn.pc_inc +
+                                               (insn.i_format.simmediate << 2);
+                               else
+                                       *contpc = regs->cp0_epc +
+                                               dec_insn.pc_inc +
+                                               dec_insn.next_pc_inc;
+                               return 1;
+                               break;
+                       case 1: /* bc1t */
+                       case 3: /* bc1tl */
+                               if (fcr31 & (1 << bit))
+                                       *contpc = regs->cp0_epc +
+                                               dec_insn.pc_inc +
+                                               (insn.i_format.simmediate << 2);
+                               else
+                                       *contpc = regs->cp0_epc +
+                                               dec_insn.pc_inc +
+                                               dec_insn.next_pc_inc;
+                               return 1;
+                               break;
+                       }
+               }
                break;
        }
-
        return 0;
 }
 
@@ -210,26 +890,23 @@ static inline int cop1_64bit(struct pt_regs *xcp)
  */
 
 static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
-                      void *__user *fault_addr)
+               struct mm_decoded_insn dec_insn, void *__user *fault_addr)
 {
        mips_instruction ir;
-       unsigned long emulpc, contpc;
+       unsigned long contpc = xcp->cp0_epc + dec_insn.pc_inc;
        unsigned int cond;
-
-       if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) {
-               MIPS_FPU_EMU_INC_STATS(errors);
-               *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
-               return SIGBUS;
-       }
-       if (__get_user(ir, (mips_instruction __user *) xcp->cp0_epc)) {
-               MIPS_FPU_EMU_INC_STATS(errors);
-               *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
-               return SIGSEGV;
-       }
+       int pc_inc;
 
        /* XXX NEC Vr54xx bug workaround */
-       if ((xcp->cp0_cause & CAUSEF_BD) && !isBranchInstr(&ir))
-               xcp->cp0_cause &= ~CAUSEF_BD;
+       if (xcp->cp0_cause & CAUSEF_BD) {
+               if (dec_insn.micro_mips_mode) {
+                       if (!mm_isBranchInstr(xcp, dec_insn, &contpc))
+                               xcp->cp0_cause &= ~CAUSEF_BD;
+               } else {
+                       if (!isBranchInstr(xcp, dec_insn, &contpc))
+                               xcp->cp0_cause &= ~CAUSEF_BD;
+               }
+       }
 
        if (xcp->cp0_cause & CAUSEF_BD) {
                /*
@@ -244,32 +921,33 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
                 * Linux MIPS branch emulator operates on context, updating the
                 * cp0_epc.
                 */
-               emulpc = xcp->cp0_epc + 4;      /* Snapshot emulation target */
+               ir = dec_insn.next_insn;  /* process delay slot instr */
+               pc_inc = dec_insn.next_pc_inc;
+       } else {
+               ir = dec_insn.insn;       /* process current instr */
+               pc_inc = dec_insn.pc_inc;
+       }
 
-               if (__compute_return_epc(xcp) < 0) {
-#ifdef CP1DBG
-                       printk("failed to emulate branch at %p\n",
-                               (void *) (xcp->cp0_epc));
-#endif
+       /*
+        * Since microMIPS FPU instructios are a subset of MIPS32 FPU
+        * instructions, we want to convert microMIPS FPU instructions
+        * into MIPS32 instructions so that we could reuse all of the
+        * FPU emulation code.
+        *
+        * NOTE: We cannot do this for branch instructions since they
+        *       are not a subset. Example: Cannot emulate a 16-bit
+        *       aligned target address with a MIPS32 instruction.
+        */
+       if (dec_insn.micro_mips_mode) {
+               /*
+                * If next instruction is a 16-bit instruction, then it
+                * it cannot be a FPU instruction. This could happen
+                * since we can be called for non-FPU instructions.
+                */
+               if ((pc_inc == 2) ||
+                       (microMIPS32_to_MIPS32((union mips_instruction *)&ir)
+                        == SIGILL))
                        return SIGILL;
-               }
-               if (!access_ok(VERIFY_READ, emulpc, sizeof(mips_instruction))) {
-                       MIPS_FPU_EMU_INC_STATS(errors);
-                       *fault_addr = (mips_instruction __user *)emulpc;
-                       return SIGBUS;
-               }
-               if (__get_user(ir, (mips_instruction __user *) emulpc)) {
-                       MIPS_FPU_EMU_INC_STATS(errors);
-                       *fault_addr = (mips_instruction __user *)emulpc;
-                       return SIGSEGV;
-               }
-               /* __compute_return_epc() will have updated cp0_epc */
-               contpc = xcp->cp0_epc;
-               /* In order not to confuse ptrace() et al, tweak context */
-               xcp->cp0_epc = emulpc - 4;
-       } else {
-               emulpc = xcp->cp0_epc;
-               contpc = xcp->cp0_epc + 4;
        }
 
       emul:
@@ -474,22 +1152,35 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
                                /* branch taken: emulate dslot
                                 * instruction
                                 */
-                               xcp->cp0_epc += 4;
-                               contpc = (xcp->cp0_epc +
-                                       (MIPSInst_SIMM(ir) << 2));
-
-                               if (!access_ok(VERIFY_READ, xcp->cp0_epc,
-                                              sizeof(mips_instruction))) {
-                                       MIPS_FPU_EMU_INC_STATS(errors);
-                                       *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
-                                       return SIGBUS;
-                               }
-                               if (__get_user(ir,
-                                   (mips_instruction __user *) xcp->cp0_epc)) {
-                                       MIPS_FPU_EMU_INC_STATS(errors);
-                                       *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
-                                       return SIGSEGV;
-                               }
+                               xcp->cp0_epc += dec_insn.pc_inc;
+
+                               contpc = MIPSInst_SIMM(ir);
+                               ir = dec_insn.next_insn;
+                               if (dec_insn.micro_mips_mode) {
+                                       contpc = (xcp->cp0_epc + (contpc << 1));
+
+                                       /* If 16-bit instruction, not FPU. */
+                                       if ((dec_insn.next_pc_inc == 2) ||
+                                               (microMIPS32_to_MIPS32((union mips_instruction *)&ir) == SIGILL)) {
+
+                                               /*
+                                                * Since this instruction will
+                                                * be put on the stack with
+                                                * 32-bit words, get around
+                                                * this problem by putting a
+                                                * NOP16 as the second one.
+                                                */
+                                               if (dec_insn.next_pc_inc == 2)
+                                                       ir = (ir & (~0xffff)) | MM_NOP16;
+
+                                               /*
+                                                * Single step the non-CP1
+                                                * instruction in the dslot.
+                                                */
+                                               return mips_dsemul(xcp, ir, contpc);
+                                       }
+                               } else
+                                       contpc = (xcp->cp0_epc + (contpc << 2));
 
                                switch (MIPSInst_OPCODE(ir)) {
                                case lwc1_op:
@@ -525,8 +1216,8 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
                                         * branch likely nullifies
                                         * dslot if not taken
                                         */
-                                       xcp->cp0_epc += 4;
-                                       contpc += 4;
+                                       xcp->cp0_epc += dec_insn.pc_inc;
+                                       contpc += dec_insn.pc_inc;
                                        /*
                                         * else continue & execute
                                         * dslot as normal insn
@@ -1313,25 +2004,75 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
        int has_fpu, void *__user *fault_addr)
 {
        unsigned long oldepc, prevepc;
-       mips_instruction insn;
+       struct mm_decoded_insn dec_insn;
+       u16 instr[4];
+       u16 *instr_ptr;
        int sig = 0;
 
        oldepc = xcp->cp0_epc;
        do {
                prevepc = xcp->cp0_epc;
 
-               if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) {
-                       MIPS_FPU_EMU_INC_STATS(errors);
-                       *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
-                       return SIGBUS;
-               }
-               if (__get_user(insn, (mips_instruction __user *) xcp->cp0_epc)) {
-                       MIPS_FPU_EMU_INC_STATS(errors);
-                       *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
-                       return SIGSEGV;
+               if (get_isa16_mode(prevepc) && cpu_has_mmips) {
+                       /*
+                        * Get next 2 microMIPS instructions and convert them
+                        * into 32-bit instructions.
+                        */
+                       if ((get_user(instr[0], (u16 __user *)msk_isa16_mode(xcp->cp0_epc))) ||
+                           (get_user(instr[1], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 2))) ||
+                           (get_user(instr[2], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 4))) ||
+                           (get_user(instr[3], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 6)))) {
+                               MIPS_FPU_EMU_INC_STATS(errors);
+                               return SIGBUS;
+                       }
+                       instr_ptr = instr;
+
+                       /* Get first instruction. */
+                       if (mm_insn_16bit(*instr_ptr)) {
+                               /* Duplicate the half-word. */
+                               dec_insn.insn = (*instr_ptr << 16) |
+                                       (*instr_ptr);
+                               /* 16-bit instruction. */
+                               dec_insn.pc_inc = 2;
+                               instr_ptr += 1;
+                       } else {
+                               dec_insn.insn = (*instr_ptr << 16) |
+                                       *(instr_ptr+1);
+                               /* 32-bit instruction. */
+                               dec_insn.pc_inc = 4;
+                               instr_ptr += 2;
+                       }
+                       /* Get second instruction. */
+                       if (mm_insn_16bit(*instr_ptr)) {
+                               /* Duplicate the half-word. */
+                               dec_insn.next_insn = (*instr_ptr << 16) |
+                                       (*instr_ptr);
+                               /* 16-bit instruction. */
+                               dec_insn.next_pc_inc = 2;
+                       } else {
+                               dec_insn.next_insn = (*instr_ptr << 16) |
+                                       *(instr_ptr+1);
+                               /* 32-bit instruction. */
+                               dec_insn.next_pc_inc = 4;
+                       }
+                       dec_insn.micro_mips_mode = 1;
+               } else {
+                       if ((get_user(dec_insn.insn,
+                           (mips_instruction __user *) xcp->cp0_epc)) ||
+                           (get_user(dec_insn.next_insn,
+                           (mips_instruction __user *)(xcp->cp0_epc+4)))) {
+                               MIPS_FPU_EMU_INC_STATS(errors);
+                               return SIGBUS;
+                       }
+                       dec_insn.pc_inc = 4;
+                       dec_insn.next_pc_inc = 4;
+                       dec_insn.micro_mips_mode = 0;
                }
-               if (insn == 0)
-                       xcp->cp0_epc += 4;      /* skip nops */
+
+               if ((dec_insn.insn == 0) ||
+                  ((dec_insn.pc_inc == 2) &&
+                  ((dec_insn.insn & 0xffff) == MM_NOP16)))
+                       xcp->cp0_epc += dec_insn.pc_inc;        /* Skip NOPs */
                else {
                        /*
                         * The 'ieee754_csr' is an alias of
@@ -1341,7 +2082,7 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
                         */
                        /* convert to ieee library modes */
                        ieee754_csr.rm = ieee_rm[ieee754_csr.rm];
-                       sig = cop1Emulate(xcp, ctx, fault_addr);
+                       sig = cop1Emulate(xcp, ctx, dec_insn, fault_addr);
                        /* revert to mips rounding mode */
                        ieee754_csr.rm = mips_rm[ieee754_csr.rm];
                }
index 384a3b0091ea33e507fe063c0b8b43a5ab714823..7ea622ab8dad32df6bf1fbd8d07bffdbf48c3f72 100644 (file)
@@ -55,7 +55,9 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
        struct emuframe __user *fr;
        int err;
 
-       if (ir == 0) {          /* a nop is easy */
+       if ((get_isa16_mode(regs->cp0_epc) && ((ir >> 16) == MM_NOP16)) ||
+               (ir == 0)) {
+               /* NOP is easy */
                regs->cp0_epc = cpc;
                regs->cp0_cause &= ~CAUSEF_BD;
                return 0;
@@ -91,8 +93,16 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
        if (unlikely(!access_ok(VERIFY_WRITE, fr, sizeof(struct emuframe))))
                return SIGBUS;
 
-       err = __put_user(ir, &fr->emul);
-       err |= __put_user((mips_instruction)BREAK_MATH, &fr->badinst);
+       if (get_isa16_mode(regs->cp0_epc)) {
+               err = __put_user(ir >> 16, (u16 __user *)(&fr->emul));
+               err |= __put_user(ir & 0xffff, (u16 __user *)((long)(&fr->emul) + 2));
+               err |= __put_user(BREAK_MATH >> 16, (u16 __user *)(&fr->badinst));
+               err |= __put_user(BREAK_MATH & 0xffff, (u16 __user *)((long)(&fr->badinst) + 2));
+       } else {
+               err = __put_user(ir, &fr->emul);
+               err |= __put_user((mips_instruction)BREAK_MATH, &fr->badinst);
+       }
+
        err |= __put_user((mips_instruction)BD_COOKIE, &fr->cookie);
        err |= __put_user(cpc, &fr->epc);
 
@@ -101,7 +111,8 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
                return SIGBUS;
        }
 
-       regs->cp0_epc = (unsigned long) &fr->emul;
+       regs->cp0_epc = ((unsigned long) &fr->emul) |
+               get_isa16_mode(regs->cp0_epc);
 
        flush_cache_sigtramp((unsigned long)&fr->badinst);
 
@@ -114,9 +125,10 @@ int do_dsemulret(struct pt_regs *xcp)
        unsigned long epc;
        u32 insn, cookie;
        int err = 0;
+       u16 instr[2];
 
        fr = (struct emuframe __user *)
-               (xcp->cp0_epc - sizeof(mips_instruction));
+               (msk_isa16_mode(xcp->cp0_epc) - sizeof(mips_instruction));
 
        /*
         * If we can't even access the area, something is very wrong, but we'll
@@ -131,7 +143,13 @@ int do_dsemulret(struct pt_regs *xcp)
         *  - Is the instruction pointed to by the EPC an BREAK_MATH?
         *  - Is the following memory word the BD_COOKIE?
         */
-       err = __get_user(insn, &fr->badinst);
+       if (get_isa16_mode(xcp->cp0_epc)) {
+               err = __get_user(instr[0], (u16 __user *)(&fr->badinst));
+               err |= __get_user(instr[1], (u16 __user *)((long)(&fr->badinst) + 2));
+               insn = (instr[0] << 16) | instr[1];
+       } else {
+               err = __get_user(insn, &fr->badinst);
+       }
        err |= __get_user(cookie, &fr->cookie);
 
        if (unlikely(err || (insn != BREAK_MATH) || (cookie != BD_COOKIE))) {
index 1dcec30ad1c43b7c5e6e06c61f5c46cf04df7eac..e87aae1f2e802835d9a2730b5378d2bd29b8b54f 100644 (file)
@@ -4,7 +4,7 @@
 
 obj-y                          += cache.o dma-default.o extable.o fault.o \
                                   gup.o init.o mmap.o page.o page-funcs.o \
-                                  tlbex.o tlbex-fault.o uasm.o
+                                  tlbex.o tlbex-fault.o uasm-mips.o
 
 obj-$(CONFIG_32BIT)            += ioremap.o pgtable-32.o
 obj-$(CONFIG_64BIT)            += pgtable-64.o
@@ -22,3 +22,5 @@ obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
 obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
 obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
 obj-$(CONFIG_MIPS_CPU_SCACHE)  += sc-mips.o
+
+obj-$(CONFIG_SYS_SUPPORTS_MICROMIPS) += uasm-micromips.o
index 2078915eacb9c9ff740ec2daa1a65274ad1de7a9..21813beec7a56f8c17ff493ca6000324168850fe 100644 (file)
@@ -33,6 +33,7 @@
 #include <asm/war.h>
 #include <asm/cacheflush.h> /* for run_uncached() */
 #include <asm/traps.h>
+#include <asm/dma-coherence.h>
 
 /*
  * Special Variant of smp_call_function for use by cache functions:
@@ -136,7 +137,8 @@ static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
                r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
 }
 
-static void (* r4k_blast_dcache)(void);
+void (* r4k_blast_dcache)(void);
+EXPORT_SYMBOL(r4k_blast_dcache);
 
 static void __cpuinit r4k_blast_dcache_setup(void)
 {
@@ -264,7 +266,8 @@ static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
                r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
 }
 
-static void (* r4k_blast_icache)(void);
+void (* r4k_blast_icache)(void);
+EXPORT_SYMBOL(r4k_blast_icache);
 
 static void __cpuinit r4k_blast_icache_setup(void)
 {
@@ -1377,20 +1380,6 @@ static void __cpuinit coherency_setup(void)
        }
 }
 
-#if defined(CONFIG_DMA_NONCOHERENT)
-
-static int __cpuinitdata coherentio;
-
-static int __init setcoherentio(char *str)
-{
-       coherentio = 1;
-
-       return 0;
-}
-
-early_param("coherentio", setcoherentio);
-#endif
-
 static void __cpuinit r4k_cache_error_setup(void)
 {
        extern char __weak except_vec2_generic;
@@ -1472,9 +1461,14 @@ void __cpuinit r4k_cache_init(void)
 
        build_clear_page();
        build_copy_page();
-#if !defined(CONFIG_MIPS_CMP)
+
+       /*
+        * We want to run CMP kernels on core with and without coherent
+        * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
+        * or not to flush caches.
+        */
        local_r4k___flush_cache_all(NULL);
-#endif
+
        coherency_setup();
        board_cache_error_setup = r4k_cache_error_setup;
 }
index 07cec4407b0c00cf073f406738256bd363913dcf..5aeb3eb0b72f87b5f108a6d287517e53d038be2a 100644 (file)
@@ -48,6 +48,7 @@ void (*flush_icache_all)(void);
 
 EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
 EXPORT_SYMBOL(flush_data_cache_page);
+EXPORT_SYMBOL(flush_icache_all);
 
 #ifdef CONFIG_DMA_NONCOHERENT
 
index f9ef83829a523fd65803b3f5b2c8fdfe77e31a8f..caf92ecb37d6966639c47e6eb277d74cb42f2012 100644 (file)
 
 #include <dma-coherence.h>
 
+int coherentio = 0;    /* User defined DMA coherency from command line. */
+EXPORT_SYMBOL_GPL(coherentio);
+int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
+
+static int __init setcoherentio(char *str)
+{
+       coherentio = 1;
+       pr_info("Hardware DMA cache coherency (command line)\n");
+       return 0;
+}
+early_param("coherentio", setcoherentio);
+
+static int __init setnocoherentio(char *str)
+{
+       coherentio = 0;
+       pr_info("Software DMA cache coherency (command line)\n");
+       return 0;
+}
+early_param("nocoherentio", setnocoherentio);
+
 static inline struct page *dma_addr_to_page(struct device *dev,
        dma_addr_t dma_addr)
 {
@@ -115,7 +135,8 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
 
                if (!plat_device_is_coherent(dev)) {
                        dma_cache_wback_inv((unsigned long) ret, size);
-                       ret = UNCAC_ADDR(ret);
+                       if (!hw_coherentio)
+                               ret = UNCAC_ADDR(ret);
                }
        }
 
@@ -142,7 +163,7 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
 
        plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
 
-       if (!plat_device_is_coherent(dev))
+       if (!plat_device_is_coherent(dev) && !hw_coherentio)
                addr = CAC_ADDR(addr);
 
        free_pages(addr, get_order(size));
index a29fba55b53e79c8d7399d67f101356379f20c73..4eb8dcfaf1ce1953760059faf151d54fe135d6a4 100644 (file)
@@ -247,6 +247,11 @@ void __cpuinit build_clear_page(void)
        struct uasm_label *l = labels;
        struct uasm_reloc *r = relocs;
        int i;
+       static atomic_t run_once = ATOMIC_INIT(0);
+
+       if (atomic_xchg(&run_once, 1)) {
+               return;
+       }
 
        memset(labels, 0, sizeof(labels));
        memset(relocs, 0, sizeof(relocs));
@@ -389,6 +394,11 @@ void __cpuinit build_copy_page(void)
        struct uasm_label *l = labels;
        struct uasm_reloc *r = relocs;
        int i;
+       static atomic_t run_once = ATOMIC_INIT(0);
+
+       if (atomic_xchg(&run_once, 1)) {
+               return;
+       }
 
        memset(labels, 0, sizeof(labels));
        memset(relocs, 0, sizeof(relocs));
index a63d1ed0827fefe36520b2d21877b5bd6a6767f4..4a13c150f31b18d3317c9e0e7e12ffa39bb561e4 100644 (file)
@@ -51,7 +51,7 @@ void local_flush_tlb_all(void)
 #endif
 
        local_irq_save(flags);
-       old_ctx = read_c0_entryhi() & ASID_MASK;
+       old_ctx = ASID_MASK(read_c0_entryhi());
        write_c0_entrylo0(0);
        entry = r3k_have_wired_reg ? read_c0_wired() : 8;
        for (; entry < current_cpu_data.tlbsize; entry++) {
@@ -87,13 +87,13 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 
 #ifdef DEBUG_TLB
                printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
-                       cpu_context(cpu, mm) & ASID_MASK, start, end);
+                       ASID_MASK(cpu_context(cpu, mm)), start, end);
 #endif
                local_irq_save(flags);
                size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
                if (size <= current_cpu_data.tlbsize) {
-                       int oldpid = read_c0_entryhi() & ASID_MASK;
-                       int newpid = cpu_context(cpu, mm) & ASID_MASK;
+                       int oldpid = ASID_MASK(read_c0_entryhi());
+                       int newpid = ASID_MASK(cpu_context(cpu, mm));
 
                        start &= PAGE_MASK;
                        end += PAGE_SIZE - 1;
@@ -166,10 +166,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 #ifdef DEBUG_TLB
                printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
 #endif
-               newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK;
+               newpid = ASID_MASK(cpu_context(cpu, vma->vm_mm));
                page &= PAGE_MASK;
                local_irq_save(flags);
-               oldpid = read_c0_entryhi() & ASID_MASK;
+               oldpid = ASID_MASK(read_c0_entryhi());
                write_c0_entryhi(page | newpid);
                BARRIER;
                tlb_probe();
@@ -197,10 +197,10 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
        if (current->active_mm != vma->vm_mm)
                return;
 
-       pid = read_c0_entryhi() & ASID_MASK;
+       pid = ASID_MASK(read_c0_entryhi());
 
 #ifdef DEBUG_TLB
-       if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
+       if ((pid != ASID_MASK(cpu_context(cpu, vma->vm_mm))) || (cpu_context(cpu, vma->vm_mm) == 0)) {
                printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
                       (cpu_context(cpu, vma->vm_mm)), pid);
        }
@@ -241,7 +241,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
 
                local_irq_save(flags);
                /* Save old context and create impossible VPN2 value */
-               old_ctx = read_c0_entryhi() & ASID_MASK;
+               old_ctx = ASID_MASK(read_c0_entryhi());
                old_pagemask = read_c0_pagemask();
                w = read_c0_wired();
                write_c0_wired(w + 1);
@@ -264,7 +264,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
 #endif
 
                local_irq_save(flags);
-               old_ctx = read_c0_entryhi() & ASID_MASK;
+               old_ctx = ASID_MASK(read_c0_entryhi());
                write_c0_entrylo0(entrylo0);
                write_c0_entryhi(entryhi);
                write_c0_index(wired);
index 493131c81a29b9a1c45bf44a000596ccdf17035f..09653b290d53356517607ac51388a09412e3d033 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/smp.h>
 #include <linux/mm.h>
 #include <linux/hugetlb.h>
+#include <linux/module.h>
 
 #include <asm/cpu.h>
 #include <asm/bootinfo.h>
@@ -94,6 +95,7 @@ void local_flush_tlb_all(void)
        FLUSH_ITLB;
        EXIT_CRITICAL(flags);
 }
+EXPORT_SYMBOL(local_flush_tlb_all);
 
 /* All entries common to a mm share an asid.  To effectively flush
    these entries, we just bump the asid. */
@@ -285,7 +287,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
 
        ENTER_CRITICAL(flags);
 
-       pid = read_c0_entryhi() & ASID_MASK;
+       pid = ASID_MASK(read_c0_entryhi());
        address &= (PAGE_MASK << 1);
        write_c0_entryhi(address | pid);
        pgdp = pgd_offset(vma->vm_mm, address);
index 91c2499f806a25809259a0b9682667ce2d7f31d5..122f9207f49e7f58871cda1bb681f73370fd1286 100644 (file)
@@ -195,7 +195,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
        if (current->active_mm != vma->vm_mm)
                return;
 
-       pid = read_c0_entryhi() & ASID_MASK;
+       pid = ASID_MASK(read_c0_entryhi());
 
        local_irq_save(flags);
        address &= PAGE_MASK;
index 820e6612d744e199f379419103ddc507bd398696..4d46d37875765a3d3024bdee0d043fae9b7a23f7 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/init.h>
 #include <linux/cache.h>
 
+#include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 #include <asm/pgtable.h>
 #include <asm/war.h>
@@ -305,6 +306,78 @@ static struct uasm_reloc relocs[128] __cpuinitdata;
 static int check_for_high_segbits __cpuinitdata;
 #endif
 
+static void __cpuinit insn_fixup(unsigned int **start, unsigned int **stop,
+                                       unsigned int i_const)
+{
+       unsigned int **p;
+
+       for (p = start; p < stop; p++) {
+#ifndef CONFIG_CPU_MICROMIPS
+               unsigned int *ip;
+
+               ip = *p;
+               *ip = (*ip & 0xffff0000) | i_const;
+#else
+               unsigned short *ip;
+
+               ip = ((unsigned short *)((unsigned int)*p - 1));
+               if ((*ip & 0xf000) == 0x4000) {
+                       *ip &= 0xfff1;
+                       *ip |= (i_const << 1);
+               } else if ((*ip & 0xf000) == 0x6000) {
+                       *ip &= 0xfff1;
+                       *ip |= ((i_const >> 2) << 1);
+               } else {
+                       ip++;
+                       *ip = i_const;
+               }
+#endif
+               local_flush_icache_range((unsigned long)ip,
+                                        (unsigned long)ip + sizeof(*ip));
+       }
+}
+
+#define asid_insn_fixup(section, const)                                        \
+do {                                                                   \
+       extern unsigned int *__start_ ## section;                       \
+       extern unsigned int *__stop_ ## section;                        \
+       insn_fixup(&__start_ ## section, &__stop_ ## section, const);   \
+} while(0)
+
+/*
+ * Caller is assumed to flush the caches before the first context switch.
+ */
+static void __cpuinit setup_asid(unsigned int inc, unsigned int mask,
+                                unsigned int version_mask,
+                                unsigned int first_version)
+{
+       extern asmlinkage void handle_ri_rdhwr_vivt(void);
+       unsigned long *vivt_exc;
+
+#ifdef CONFIG_CPU_MICROMIPS
+       /*
+        * Worst case optimised microMIPS addiu instructions support
+        * only a 3-bit immediate value.
+        */
+       if(inc > 7)
+               panic("Invalid ASID increment value!");
+#endif
+       asid_insn_fixup(__asid_inc, inc);
+       asid_insn_fixup(__asid_mask, mask);
+       asid_insn_fixup(__asid_version_mask, version_mask);
+       asid_insn_fixup(__asid_first_version, first_version);
+
+       /* Patch up the 'handle_ri_rdhwr_vivt' handler. */
+       vivt_exc = (unsigned long *) &handle_ri_rdhwr_vivt;
+#ifdef CONFIG_CPU_MICROMIPS
+       vivt_exc = (unsigned long *)((unsigned long) vivt_exc - 1);
+#endif
+       vivt_exc++;
+       *vivt_exc = (*vivt_exc & ~mask) | mask;
+
+       current_cpu_data.asid_cache = first_version;
+}
+
 static int check_for_high_segbits __cpuinitdata;
 
 static unsigned int kscratch_used_mask __cpuinitdata;
@@ -1458,17 +1531,17 @@ u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
 u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
 u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
-u32 tlbmiss_handler_setup_pgd[16] __cacheline_aligned;
+u32 tlbmiss_handler_setup_pgd_array[16] __cacheline_aligned;
 
 static void __cpuinit build_r4000_setup_pgd(void)
 {
        const int a0 = 4;
        const int a1 = 5;
-       u32 *p = tlbmiss_handler_setup_pgd;
+       u32 *p = tlbmiss_handler_setup_pgd_array;
        struct uasm_label *l = labels;
        struct uasm_reloc *r = relocs;
 
-       memset(tlbmiss_handler_setup_pgd, 0, sizeof(tlbmiss_handler_setup_pgd));
+       memset(tlbmiss_handler_setup_pgd_array, 0, sizeof(tlbmiss_handler_setup_pgd_array));
        memset(labels, 0, sizeof(labels));
        memset(relocs, 0, sizeof(relocs));
 
@@ -1496,15 +1569,15 @@ static void __cpuinit build_r4000_setup_pgd(void)
                uasm_i_jr(&p, 31);
                UASM_i_MTC0(&p, a0, 31, pgd_reg);
        }
-       if (p - tlbmiss_handler_setup_pgd > ARRAY_SIZE(tlbmiss_handler_setup_pgd))
-               panic("tlbmiss_handler_setup_pgd space exceeded");
+       if (p - tlbmiss_handler_setup_pgd_array > ARRAY_SIZE(tlbmiss_handler_setup_pgd_array))
+               panic("tlbmiss_handler_setup_pgd_array space exceeded");
        uasm_resolve_relocs(relocs, labels);
-       pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
-                (unsigned int)(p - tlbmiss_handler_setup_pgd));
+       pr_debug("Wrote tlbmiss_handler_setup_pgd_array (%u instructions).\n",
+                (unsigned int)(p - tlbmiss_handler_setup_pgd_array));
 
        dump_handler("tlbmiss_handler",
-                    tlbmiss_handler_setup_pgd,
-                    ARRAY_SIZE(tlbmiss_handler_setup_pgd));
+                    tlbmiss_handler_setup_pgd_array,
+                    ARRAY_SIZE(tlbmiss_handler_setup_pgd_array));
 }
 #endif
 
@@ -2030,6 +2103,13 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
 
        uasm_l_nopage_tlbl(&l, p);
        build_restore_work_registers(&p);
+#ifdef CONFIG_CPU_MICROMIPS
+       if ((unsigned long)tlb_do_page_fault_0 & 1) {
+               uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0));
+               uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0));
+               uasm_i_jr(&p, K0);
+       } else
+#endif
        uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
        uasm_i_nop(&p);
 
@@ -2077,6 +2157,13 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
 
        uasm_l_nopage_tlbs(&l, p);
        build_restore_work_registers(&p);
+#ifdef CONFIG_CPU_MICROMIPS
+       if ((unsigned long)tlb_do_page_fault_1 & 1) {
+               uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
+               uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
+               uasm_i_jr(&p, K0);
+       } else
+#endif
        uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
        uasm_i_nop(&p);
 
@@ -2125,6 +2212,13 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
 
        uasm_l_nopage_tlbm(&l, p);
        build_restore_work_registers(&p);
+#ifdef CONFIG_CPU_MICROMIPS
+       if ((unsigned long)tlb_do_page_fault_1 & 1) {
+               uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
+               uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
+               uasm_i_jr(&p, K0);
+       } else
+#endif
        uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
        uasm_i_nop(&p);
 
@@ -2162,8 +2256,12 @@ void __cpuinit build_tlb_refill_handler(void)
        case CPU_TX3922:
        case CPU_TX3927:
 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
-               build_r3000_tlb_refill_handler();
+               setup_asid(0x40, 0xfc0, 0xf000, ASID_FIRST_VERSION_R3000);
+               if (cpu_has_local_ebase)
+                       build_r3000_tlb_refill_handler();
                if (!run_once) {
+                       if (!cpu_has_local_ebase)
+                               build_r3000_tlb_refill_handler();
                        build_r3000_tlb_load_handler();
                        build_r3000_tlb_store_handler();
                        build_r3000_tlb_modify_handler();
@@ -2184,6 +2282,11 @@ void __cpuinit build_tlb_refill_handler(void)
                break;
 
        default:
+#ifndef CONFIG_MIPS_MT_SMTC
+               setup_asid(0x1, 0xff, 0xff00, ASID_FIRST_VERSION_R4000);
+#else
+               setup_asid(0x1, smtc_asid_mask, 0xff00, ASID_FIRST_VERSION_R4000);
+#endif
                if (!run_once) {
                        scratch_reg = allocate_kscratch();
 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
@@ -2192,9 +2295,12 @@ void __cpuinit build_tlb_refill_handler(void)
                        build_r4000_tlb_load_handler();
                        build_r4000_tlb_store_handler();
                        build_r4000_tlb_modify_handler();
+                       if (!cpu_has_local_ebase)
+                               build_r4000_tlb_refill_handler();
                        run_once++;
                }
-               build_r4000_tlb_refill_handler();
+               if (cpu_has_local_ebase)
+                       build_r4000_tlb_refill_handler();
        }
 }
 
@@ -2207,7 +2313,7 @@ void __cpuinit flush_tlb_handlers(void)
        local_flush_icache_range((unsigned long)handle_tlbm,
                           (unsigned long)handle_tlbm + sizeof(handle_tlbm));
 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
-       local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
-                          (unsigned long)tlbmiss_handler_setup_pgd + sizeof(handle_tlbm));
+       local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd_array,
+                          (unsigned long)tlbmiss_handler_setup_pgd_array + sizeof(handle_tlbm));
 #endif
 }
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
new file mode 100644 (file)
index 0000000..162ee6d
--- /dev/null
@@ -0,0 +1,221 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * A small micro-assembler. It is intentionally kept simple, does only
+ * support a subset of instructions, and does not try to hide pipeline
+ * effects like branch delay slots.
+ *
+ * Copyright (C) 2004, 2005, 2006, 2008         Thiemo Seufer
+ * Copyright (C) 2005, 2007  Maciej W. Rozycki
+ * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012, 2013   MIPS Technologies, Inc.  All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+
+#include <asm/inst.h>
+#include <asm/elf.h>
+#include <asm/bugs.h>
+#define UASM_ISA       _UASM_ISA_MICROMIPS
+#include <asm/uasm.h>
+
+#define RS_MASK                0x1f
+#define RS_SH          16
+#define RT_MASK                0x1f
+#define RT_SH          21
+#define SCIMM_MASK     0x3ff
+#define SCIMM_SH       16
+
+/* This macro sets the non-variable bits of an instruction. */
+#define M(a, b, c, d, e, f)                                    \
+       ((a) << OP_SH                                           \
+        | (b) << RT_SH                                         \
+        | (c) << RS_SH                                         \
+        | (d) << RD_SH                                         \
+        | (e) << RE_SH                                         \
+        | (f) << FUNC_SH)
+
+/* Define these when we are not the ISA the kernel is being compiled with. */
+#ifndef CONFIG_CPU_MICROMIPS
+#define MM_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
+#define MM_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
+#define MM_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
+#define MM_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
+#endif
+
+#include "uasm.c"
+
+static struct insn insn_table_MM[] __uasminitdata = {
+       { insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD },
+       { insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
+       { insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD },
+       { insn_andi, M(mm_andi32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
+       { insn_beq, M(mm_beq32_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+       { insn_beql, 0, 0 },
+       { insn_bgez, M(mm_pool32i_op, mm_bgez_op, 0, 0, 0, 0), RS | BIMM },
+       { insn_bgezl, 0, 0 },
+       { insn_bltz, M(mm_pool32i_op, mm_bltz_op, 0, 0, 0, 0), RS | BIMM },
+       { insn_bltzl, 0, 0 },
+       { insn_bne, M(mm_bne32_op, 0, 0, 0, 0, 0), RT | RS | BIMM },
+       { insn_cache, M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM },
+       { insn_daddu, 0, 0 },
+       { insn_daddiu, 0, 0 },
+       { insn_dmfc0, 0, 0 },
+       { insn_dmtc0, 0, 0 },
+       { insn_dsll, 0, 0 },
+       { insn_dsll32, 0, 0 },
+       { insn_dsra, 0, 0 },
+       { insn_dsrl, 0, 0 },
+       { insn_dsrl32, 0, 0 },
+       { insn_drotr, 0, 0 },
+       { insn_drotr32, 0, 0 },
+       { insn_dsubu, 0, 0 },
+       { insn_eret, M(mm_pool32a_op, 0, 0, 0, mm_eret_op, mm_pool32axf_op), 0 },
+       { insn_ins, M(mm_pool32a_op, 0, 0, 0, 0, mm_ins_op), RT | RS | RD | RE },
+       { insn_ext, M(mm_pool32a_op, 0, 0, 0, 0, mm_ext_op), RT | RS | RD | RE },
+       { insn_j, M(mm_j32_op, 0, 0, 0, 0, 0), JIMM },
+       { insn_jal, M(mm_jal32_op, 0, 0, 0, 0, 0), JIMM },
+       { insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS },
+       { insn_ld, 0, 0 },
+       { insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM },
+       { insn_lld, 0, 0 },
+       { insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM },
+       { insn_lw, M(mm_lw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
+       { insn_mfc0, M(mm_pool32a_op, 0, 0, 0, mm_mfc0_op, mm_pool32axf_op), RT | RS | RD },
+       { insn_mtc0, M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD },
+       { insn_or, M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD },
+       { insn_ori, M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
+       { insn_pref, M(mm_pool32c_op, 0, 0, (mm_pref_func << 1), 0, 0), RT | RS | SIMM },
+       { insn_rfe, 0, 0 },
+       { insn_sc, M(mm_pool32c_op, 0, 0, (mm_sc_func << 1), 0, 0), RT | RS | SIMM },
+       { insn_scd, 0, 0 },
+       { insn_sd, 0, 0 },
+       { insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD },
+       { insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD },
+       { insn_srl, M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD },
+       { insn_rotr, M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD },
+       { insn_subu, M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD },
+       { insn_sw, M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
+       { insn_tlbp, M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0 },
+       { insn_tlbr, M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0 },
+       { insn_tlbwi, M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0 },
+       { insn_tlbwr, M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0 },
+       { insn_xor, M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD },
+       { insn_xori, M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
+       { insn_dins, 0, 0 },
+       { insn_dinsm, 0, 0 },
+       { insn_syscall, M(mm_pool32a_op, 0, 0, 0, mm_syscall_op, mm_pool32axf_op), SCIMM},
+       { insn_bbit0, 0, 0 },
+       { insn_bbit1, 0, 0 },
+       { insn_lwx, 0, 0 },
+       { insn_ldx, 0, 0 },
+       { insn_invalid, 0, 0 }
+};
+
+#undef M
+
+static inline __uasminit u32 build_bimm(s32 arg)
+{
+       WARN(arg > 0xffff || arg < -0x10000,
+            KERN_WARNING "Micro-assembler field overflow\n");
+
+       WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
+
+       return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff);
+}
+
+static inline __uasminit u32 build_jimm(u32 arg)
+{
+
+       WARN(arg & ~((JIMM_MASK << 2) | 1),
+            KERN_WARNING "Micro-assembler field overflow\n");
+
+       return (arg >> 1) & JIMM_MASK;
+}
+
+/*
+ * The order of opcode arguments is implicitly left to right,
+ * starting with RS and ending with FUNC or IMM.
+ */
+static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
+{
+       struct insn *ip = NULL;
+       unsigned int i;
+       va_list ap;
+       u32 op;
+
+       for (i = 0; insn_table_MM[i].opcode != insn_invalid; i++)
+               if (insn_table_MM[i].opcode == opc) {
+                       ip = &insn_table_MM[i];
+                       break;
+               }
+
+       if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
+               panic("Unsupported Micro-assembler instruction %d", opc);
+
+       op = ip->match;
+       va_start(ap, opc);
+       if (ip->fields & RS) {
+               if (opc == insn_mfc0 || opc == insn_mtc0)
+                       op |= build_rt(va_arg(ap, u32));
+               else
+                       op |= build_rs(va_arg(ap, u32));
+       }
+       if (ip->fields & RT) {
+               if (opc == insn_mfc0 || opc == insn_mtc0)
+                       op |= build_rs(va_arg(ap, u32));
+               else
+                       op |= build_rt(va_arg(ap, u32));
+       }
+       if (ip->fields & RD)
+               op |= build_rd(va_arg(ap, u32));
+       if (ip->fields & RE)
+               op |= build_re(va_arg(ap, u32));
+       if (ip->fields & SIMM)
+               op |= build_simm(va_arg(ap, s32));
+       if (ip->fields & UIMM)
+               op |= build_uimm(va_arg(ap, u32));
+       if (ip->fields & BIMM)
+               op |= build_bimm(va_arg(ap, s32));
+       if (ip->fields & JIMM)
+               op |= build_jimm(va_arg(ap, u32));
+       if (ip->fields & FUNC)
+               op |= build_func(va_arg(ap, u32));
+       if (ip->fields & SET)
+               op |= build_set(va_arg(ap, u32));
+       if (ip->fields & SCIMM)
+               op |= build_scimm(va_arg(ap, u32));
+       va_end(ap);
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+       **buf = ((op & 0xffff) << 16) | (op >> 16);
+#else
+       **buf = op;
+#endif
+       (*buf)++;
+}
+
+static inline void __uasminit
+__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
+{
+       long laddr = (long)lab->addr;
+       long raddr = (long)rel->addr;
+
+       switch (rel->type) {
+       case R_MIPS_PC16:
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+               *rel->addr |= (build_bimm(laddr - (raddr + 4)) << 16);
+#else
+               *rel->addr |= build_bimm(laddr - (raddr + 4));
+#endif
+               break;
+
+       default:
+               panic("Unsupported Micro-assembler relocation %d",
+                     rel->type);
+       }
+}
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
new file mode 100644 (file)
index 0000000..5fcdd8f
--- /dev/null
@@ -0,0 +1,205 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * A small micro-assembler. It is intentionally kept simple, does only
+ * support a subset of instructions, and does not try to hide pipeline
+ * effects like branch delay slots.
+ *
+ * Copyright (C) 2004, 2005, 2006, 2008         Thiemo Seufer
+ * Copyright (C) 2005, 2007  Maciej W. Rozycki
+ * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012, 2013  MIPS Technologies, Inc.  All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+
+#include <asm/inst.h>
+#include <asm/elf.h>
+#include <asm/bugs.h>
+#define UASM_ISA       _UASM_ISA_CLASSIC
+#include <asm/uasm.h>
+
+#define RS_MASK                0x1f
+#define RS_SH          21
+#define RT_MASK                0x1f
+#define RT_SH          16
+#define SCIMM_MASK     0xfffff
+#define SCIMM_SH       6
+
+/* This macro sets the non-variable bits of an instruction. */
+#define M(a, b, c, d, e, f)                                    \
+       ((a) << OP_SH                                           \
+        | (b) << RS_SH                                         \
+        | (c) << RT_SH                                         \
+        | (d) << RD_SH                                         \
+        | (e) << RE_SH                                         \
+        | (f) << FUNC_SH)
+
+/* Define these when we are not the ISA the kernel is being compiled with. */
+#ifdef CONFIG_CPU_MICROMIPS
+#define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
+#define CL_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
+#define CL_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
+#define CL_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
+#endif
+
+#include "uasm.c"
+
+static struct insn insn_table[] __uasminitdata = {
+       { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+       { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
+       { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
+       { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
+       { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+       { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+       { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+       { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+       { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
+       { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
+       { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
+       { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
+       { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+       { insn_cache,  M(cache_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+       { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
+       { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
+       { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
+       { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
+       { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
+       { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
+       { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
+       { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
+       { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
+       { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
+       { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
+       { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
+       { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
+       { insn_eret,  M(cop0_op, cop_op, 0, 0, 0, eret_op),  0 },
+       { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE },
+       { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
+       { insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
+       { insn_jal,  M(jal_op, 0, 0, 0, 0, 0),  JIMM },
+       { insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
+       { insn_jr,  M(spec_op, 0, 0, 0, 0, jr_op),  RS },
+       { insn_ld,  M(ld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
+       { insn_lld,  M(lld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_ll,  M(ll_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_lui,  M(lui_op, 0, 0, 0, 0, 0),  RT | SIMM },
+       { insn_lw,  M(lw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
+       { insn_mfc0,  M(cop0_op, mfc_op, 0, 0, 0, 0),  RT | RD | SET},
+       { insn_mtc0,  M(cop0_op, mtc_op, 0, 0, 0, 0),  RT | RD | SET},
+       { insn_ori,  M(ori_op, 0, 0, 0, 0, 0),  RS | RT | UIMM },
+       { insn_or,  M(spec_op, 0, 0, 0, 0, or_op),  RS | RT | RD },
+       { insn_pref,  M(pref_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_rfe,  M(cop0_op, cop_op, 0, 0, 0, rfe_op),  0 },
+       { insn_rotr,  M(spec_op, 1, 0, 0, 0, srl_op),  RT | RD | RE },
+       { insn_scd,  M(scd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_sc,  M(sc_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_sd,  M(sd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_sll,  M(spec_op, 0, 0, 0, 0, sll_op),  RT | RD | RE },
+       { insn_sra,  M(spec_op, 0, 0, 0, 0, sra_op),  RT | RD | RE },
+       { insn_srl,  M(spec_op, 0, 0, 0, 0, srl_op),  RT | RD | RE },
+       { insn_subu,  M(spec_op, 0, 0, 0, 0, subu_op),  RS | RT | RD },
+       { insn_sw,  M(sw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
+       { insn_tlbp,  M(cop0_op, cop_op, 0, 0, 0, tlbp_op),  0 },
+       { insn_tlbr,  M(cop0_op, cop_op, 0, 0, 0, tlbr_op),  0 },
+       { insn_tlbwi,  M(cop0_op, cop_op, 0, 0, 0, tlbwi_op),  0 },
+       { insn_tlbwr,  M(cop0_op, cop_op, 0, 0, 0, tlbwr_op),  0 },
+       { insn_xori,  M(xori_op, 0, 0, 0, 0, 0),  RS | RT | UIMM },
+       { insn_xor,  M(spec_op, 0, 0, 0, 0, xor_op),  RS | RT | RD },
+       { insn_invalid, 0, 0 }
+};
+
+#undef M
+
+static inline __uasminit u32 build_bimm(s32 arg)
+{
+       WARN(arg > 0x1ffff || arg < -0x20000,
+            KERN_WARNING "Micro-assembler field overflow\n");
+
+       WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
+
+       return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
+}
+
+static inline __uasminit u32 build_jimm(u32 arg)
+{
+       WARN(arg & ~(JIMM_MASK << 2),
+            KERN_WARNING "Micro-assembler field overflow\n");
+
+       return (arg >> 2) & JIMM_MASK;
+}
+
+/*
+ * The order of opcode arguments is implicitly left to right,
+ * starting with RS and ending with FUNC or IMM.
+ */
+static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
+{
+       struct insn *ip = NULL;
+       unsigned int i;
+       va_list ap;
+       u32 op;
+
+       for (i = 0; insn_table[i].opcode != insn_invalid; i++)
+               if (insn_table[i].opcode == opc) {
+                       ip = &insn_table[i];
+                       break;
+               }
+
+       if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
+               panic("Unsupported Micro-assembler instruction %d", opc);
+
+       op = ip->match;
+       va_start(ap, opc);
+       if (ip->fields & RS)
+               op |= build_rs(va_arg(ap, u32));
+       if (ip->fields & RT)
+               op |= build_rt(va_arg(ap, u32));
+       if (ip->fields & RD)
+               op |= build_rd(va_arg(ap, u32));
+       if (ip->fields & RE)
+               op |= build_re(va_arg(ap, u32));
+       if (ip->fields & SIMM)
+               op |= build_simm(va_arg(ap, s32));
+       if (ip->fields & UIMM)
+               op |= build_uimm(va_arg(ap, u32));
+       if (ip->fields & BIMM)
+               op |= build_bimm(va_arg(ap, s32));
+       if (ip->fields & JIMM)
+               op |= build_jimm(va_arg(ap, u32));
+       if (ip->fields & FUNC)
+               op |= build_func(va_arg(ap, u32));
+       if (ip->fields & SET)
+               op |= build_set(va_arg(ap, u32));
+       if (ip->fields & SCIMM)
+               op |= build_scimm(va_arg(ap, u32));
+       va_end(ap);
+
+       **buf = op;
+       (*buf)++;
+}
+
+static inline void __uasminit
+__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
+{
+       long laddr = (long)lab->addr;
+       long raddr = (long)rel->addr;
+
+       switch (rel->type) {
+       case R_MIPS_PC16:
+               *rel->addr |= build_bimm(laddr - (raddr + 4));
+               break;
+
+       default:
+               panic("Unsupported Micro-assembler relocation %d",
+                     rel->type);
+       }
+}
index 942ff6c2eba27ac256d812c64a1b5752b164f712..7eb5e4355d25c467fd1241ffda14715dfc69c1c7 100644 (file)
  * Copyright (C) 2004, 2005, 2006, 2008         Thiemo Seufer
  * Copyright (C) 2005, 2007  Maciej W. Rozycki
  * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012, 2013  MIPS Technologies, Inc.  All rights reserved.
  */
 
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-
-#include <asm/inst.h>
-#include <asm/elf.h>
-#include <asm/bugs.h>
-#include <asm/uasm.h>
-
 enum fields {
        RS = 0x001,
        RT = 0x002,
@@ -37,10 +29,6 @@ enum fields {
 
 #define OP_MASK                0x3f
 #define OP_SH          26
-#define RS_MASK                0x1f
-#define RS_SH          21
-#define RT_MASK                0x1f
-#define RT_SH          16
 #define RD_MASK                0x1f
 #define RD_SH          11
 #define RE_MASK                0x1f
@@ -53,8 +41,6 @@ enum fields {
 #define FUNC_SH                0
 #define SET_MASK       0x7
 #define SET_SH         0
-#define SCIMM_MASK     0xfffff
-#define SCIMM_SH       6
 
 enum opcode {
        insn_invalid,
@@ -77,85 +63,6 @@ struct insn {
        enum fields fields;
 };
 
-/* This macro sets the non-variable bits of an instruction. */
-#define M(a, b, c, d, e, f)                                    \
-       ((a) << OP_SH                                           \
-        | (b) << RS_SH                                         \
-        | (c) << RT_SH                                         \
-        | (d) << RD_SH                                         \
-        | (e) << RE_SH                                         \
-        | (f) << FUNC_SH)
-
-static struct insn insn_table[] __uasminitdata = {
-       { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
-       { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
-       { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
-       { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
-       { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
-       { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
-       { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
-       { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
-       { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
-       { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
-       { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
-       { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
-       { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
-       { insn_cache,  M(cache_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
-       { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
-       { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
-       { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
-       { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
-       { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
-       { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
-       { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
-       { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
-       { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
-       { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
-       { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
-       { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
-       { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
-       { insn_eret,  M(cop0_op, cop_op, 0, 0, 0, eret_op),  0 },
-       { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE },
-       { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
-       { insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
-       { insn_jal,  M(jal_op, 0, 0, 0, 0, 0),  JIMM },
-       { insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
-       { insn_jr,  M(spec_op, 0, 0, 0, 0, jr_op),  RS },
-       { insn_ld,  M(ld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
-       { insn_lld,  M(lld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_ll,  M(ll_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_lui,  M(lui_op, 0, 0, 0, 0, 0),  RT | SIMM },
-       { insn_lw,  M(lw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
-       { insn_mfc0,  M(cop0_op, mfc_op, 0, 0, 0, 0),  RT | RD | SET},
-       { insn_mtc0,  M(cop0_op, mtc_op, 0, 0, 0, 0),  RT | RD | SET},
-       { insn_ori,  M(ori_op, 0, 0, 0, 0, 0),  RS | RT | UIMM },
-       { insn_or,  M(spec_op, 0, 0, 0, 0, or_op),  RS | RT | RD },
-       { insn_pref,  M(pref_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_rfe,  M(cop0_op, cop_op, 0, 0, 0, rfe_op),  0 },
-       { insn_rotr,  M(spec_op, 1, 0, 0, 0, srl_op),  RT | RD | RE },
-       { insn_scd,  M(scd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_sc,  M(sc_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_sd,  M(sd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_sll,  M(spec_op, 0, 0, 0, 0, sll_op),  RT | RD | RE },
-       { insn_sra,  M(spec_op, 0, 0, 0, 0, sra_op),  RT | RD | RE },
-       { insn_srl,  M(spec_op, 0, 0, 0, 0, srl_op),  RT | RD | RE },
-       { insn_subu,  M(spec_op, 0, 0, 0, 0, subu_op),  RS | RT | RD },
-       { insn_sw,  M(sw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
-       { insn_tlbp,  M(cop0_op, cop_op, 0, 0, 0, tlbp_op),  0 },
-       { insn_tlbr,  M(cop0_op, cop_op, 0, 0, 0, tlbr_op),  0 },
-       { insn_tlbwi,  M(cop0_op, cop_op, 0, 0, 0, tlbwi_op),  0 },
-       { insn_tlbwr,  M(cop0_op, cop_op, 0, 0, 0, tlbwr_op),  0 },
-       { insn_xori,  M(xori_op, 0, 0, 0, 0, 0),  RS | RT | UIMM },
-       { insn_xor,  M(spec_op, 0, 0, 0, 0, xor_op),  RS | RT | RD },
-       { insn_invalid, 0, 0 }
-};
-
-#undef M
-
 static inline __uasminit u32 build_rs(u32 arg)
 {
        WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n");
@@ -199,24 +106,6 @@ static inline __uasminit u32 build_uimm(u32 arg)
        return arg & IMM_MASK;
 }
 
-static inline __uasminit u32 build_bimm(s32 arg)
-{
-       WARN(arg > 0x1ffff || arg < -0x20000,
-            KERN_WARNING "Micro-assembler field overflow\n");
-
-       WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
-
-       return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
-}
-
-static inline __uasminit u32 build_jimm(u32 arg)
-{
-       WARN(arg & ~(JIMM_MASK << 2),
-            KERN_WARNING "Micro-assembler field overflow\n");
-
-       return (arg >> 2) & JIMM_MASK;
-}
-
 static inline __uasminit u32 build_scimm(u32 arg)
 {
        WARN(arg & ~SCIMM_MASK,
@@ -239,55 +128,7 @@ static inline __uasminit u32 build_set(u32 arg)
        return arg & SET_MASK;
 }
 
-/*
- * The order of opcode arguments is implicitly left to right,
- * starting with RS and ending with FUNC or IMM.
- */
-static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
-{
-       struct insn *ip = NULL;
-       unsigned int i;
-       va_list ap;
-       u32 op;
-
-       for (i = 0; insn_table[i].opcode != insn_invalid; i++)
-               if (insn_table[i].opcode == opc) {
-                       ip = &insn_table[i];
-                       break;
-               }
-
-       if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
-               panic("Unsupported Micro-assembler instruction %d", opc);
-
-       op = ip->match;
-       va_start(ap, opc);
-       if (ip->fields & RS)
-               op |= build_rs(va_arg(ap, u32));
-       if (ip->fields & RT)
-               op |= build_rt(va_arg(ap, u32));
-       if (ip->fields & RD)
-               op |= build_rd(va_arg(ap, u32));
-       if (ip->fields & RE)
-               op |= build_re(va_arg(ap, u32));
-       if (ip->fields & SIMM)
-               op |= build_simm(va_arg(ap, s32));
-       if (ip->fields & UIMM)
-               op |= build_uimm(va_arg(ap, u32));
-       if (ip->fields & BIMM)
-               op |= build_bimm(va_arg(ap, s32));
-       if (ip->fields & JIMM)
-               op |= build_jimm(va_arg(ap, u32));
-       if (ip->fields & FUNC)
-               op |= build_func(va_arg(ap, u32));
-       if (ip->fields & SET)
-               op |= build_set(va_arg(ap, u32));
-       if (ip->fields & SCIMM)
-               op |= build_scimm(va_arg(ap, u32));
-       va_end(ap);
-
-       **buf = op;
-       (*buf)++;
-}
+static void __uasminit build_insn(u32 **buf, enum opcode opc, ...);
 
 #define I_u1u2u3(op)                                   \
 Ip_u1u2u3(op)                                          \
@@ -445,7 +286,7 @@ I_u3u1u2(_ldx)
 
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
 #include <asm/octeon/octeon.h>
-void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b,
+void __uasminit ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
                            unsigned int c)
 {
        if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5)
@@ -457,21 +298,21 @@ void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b,
        else
                build_insn(buf, insn_pref, c, a, b);
 }
-UASM_EXPORT_SYMBOL(uasm_i_pref);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_i_pref));
 #else
 I_u2s3u1(_pref)
 #endif
 
 /* Handle labels. */
-void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
+void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid)
 {
        (*lab)->addr = addr;
        (*lab)->lab = lid;
        (*lab)++;
 }
-UASM_EXPORT_SYMBOL(uasm_build_label);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label));
 
-int __uasminit uasm_in_compat_space_p(long addr)
+int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr)
 {
        /* Is this address in 32bit compat space? */
 #ifdef CONFIG_64BIT
@@ -480,7 +321,7 @@ int __uasminit uasm_in_compat_space_p(long addr)
        return 1;
 #endif
 }
-UASM_EXPORT_SYMBOL(uasm_in_compat_space_p);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p));
 
 static int __uasminit uasm_rel_highest(long val)
 {
@@ -500,77 +341,66 @@ static int __uasminit uasm_rel_higher(long val)
 #endif
 }
 
-int __uasminit uasm_rel_hi(long val)
+int __uasminit ISAFUNC(uasm_rel_hi)(long val)
 {
        return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
 }
-UASM_EXPORT_SYMBOL(uasm_rel_hi);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi));
 
-int __uasminit uasm_rel_lo(long val)
+int __uasminit ISAFUNC(uasm_rel_lo)(long val)
 {
        return ((val & 0xffff) ^ 0x8000) - 0x8000;
 }
-UASM_EXPORT_SYMBOL(uasm_rel_lo);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo));
 
-void __uasminit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
+void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
 {
-       if (!uasm_in_compat_space_p(addr)) {
-               uasm_i_lui(buf, rs, uasm_rel_highest(addr));
+       if (!ISAFUNC(uasm_in_compat_space_p)(addr)) {
+               ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr));
                if (uasm_rel_higher(addr))
-                       uasm_i_daddiu(buf, rs, rs, uasm_rel_higher(addr));
-               if (uasm_rel_hi(addr)) {
-                       uasm_i_dsll(buf, rs, rs, 16);
-                       uasm_i_daddiu(buf, rs, rs, uasm_rel_hi(addr));
-                       uasm_i_dsll(buf, rs, rs, 16);
+                       ISAFUNC(uasm_i_daddiu)(buf, rs, rs, uasm_rel_higher(addr));
+               if (ISAFUNC(uasm_rel_hi(addr))) {
+                       ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
+                       ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
+                                       ISAFUNC(uasm_rel_hi)(addr));
+                       ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
                } else
-                       uasm_i_dsll32(buf, rs, rs, 0);
+                       ISAFUNC(uasm_i_dsll32)(buf, rs, rs, 0);
        } else
-               uasm_i_lui(buf, rs, uasm_rel_hi(addr));
+               ISAFUNC(uasm_i_lui)(buf, rs, ISAFUNC(uasm_rel_hi(addr)));
 }
-UASM_EXPORT_SYMBOL(UASM_i_LA_mostly);
+UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly));
 
-void __uasminit UASM_i_LA(u32 **buf, unsigned int rs, long addr)
+void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
 {
-       UASM_i_LA_mostly(buf, rs, addr);
-       if (uasm_rel_lo(addr)) {
-               if (!uasm_in_compat_space_p(addr))
-                       uasm_i_daddiu(buf, rs, rs, uasm_rel_lo(addr));
+       ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr);
+       if (ISAFUNC(uasm_rel_lo(addr))) {
+               if (!ISAFUNC(uasm_in_compat_space_p)(addr))
+                       ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
+                                       ISAFUNC(uasm_rel_lo(addr)));
                else
-                       uasm_i_addiu(buf, rs, rs, uasm_rel_lo(addr));
+                       ISAFUNC(uasm_i_addiu)(buf, rs, rs,
+                                       ISAFUNC(uasm_rel_lo(addr)));
        }
 }
-UASM_EXPORT_SYMBOL(UASM_i_LA);
+UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA));
 
 /* Handle relocations. */
 void __uasminit
-uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
+ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
 {
        (*rel)->addr = addr;
        (*rel)->type = R_MIPS_PC16;
        (*rel)->lab = lid;
        (*rel)++;
 }
-UASM_EXPORT_SYMBOL(uasm_r_mips_pc16);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16));
 
 static inline void __uasminit
-__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
-{
-       long laddr = (long)lab->addr;
-       long raddr = (long)rel->addr;
-
-       switch (rel->type) {
-       case R_MIPS_PC16:
-               *rel->addr |= build_bimm(laddr - (raddr + 4));
-               break;
-
-       default:
-               panic("Unsupported Micro-assembler relocation %d",
-                     rel->type);
-       }
-}
+__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab);
 
 void __uasminit
-uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
+ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab)
 {
        struct uasm_label *l;
 
@@ -579,40 +409,40 @@ uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
                        if (rel->lab == l->lab)
                                __resolve_relocs(rel, l);
 }
-UASM_EXPORT_SYMBOL(uasm_resolve_relocs);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs));
 
 void __uasminit
-uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
+ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
 {
        for (; rel->lab != UASM_LABEL_INVALID; rel++)
                if (rel->addr >= first && rel->addr < end)
                        rel->addr += off;
 }
-UASM_EXPORT_SYMBOL(uasm_move_relocs);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs));
 
 void __uasminit
-uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off)
+ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off)
 {
        for (; lab->lab != UASM_LABEL_INVALID; lab++)
                if (lab->addr >= first && lab->addr < end)
                        lab->addr += off;
 }
-UASM_EXPORT_SYMBOL(uasm_move_labels);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels));
 
 void __uasminit
-uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
+ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
                  u32 *end, u32 *target)
 {
        long off = (long)(target - first);
 
        memcpy(target, first, (end - first) * sizeof(u32));
 
-       uasm_move_relocs(rel, first, end, off);
-       uasm_move_labels(lab, first, end, off);
+       ISAFUNC(uasm_move_relocs(rel, first, end, off));
+       ISAFUNC(uasm_move_labels(lab, first, end, off));
 }
-UASM_EXPORT_SYMBOL(uasm_copy_handler);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler));
 
-int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
+int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
 {
        for (; rel->lab != UASM_LABEL_INVALID; rel++) {
                if (rel->addr == addr
@@ -623,88 +453,88 @@ int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
 
        return 0;
 }
-UASM_EXPORT_SYMBOL(uasm_insn_has_bdelay);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay));
 
 /* Convenience functions for labeled branches. */
 void __uasminit
-uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_bltz(p, reg, 0);
+       ISAFUNC(uasm_i_bltz)(p, reg, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bltz);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz));
 
 void __uasminit
-uasm_il_b(u32 **p, struct uasm_reloc **r, int lid)
+ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_b(p, 0);
+       ISAFUNC(uasm_i_b)(p, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_b);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b));
 
 void __uasminit
-uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_beqz(p, reg, 0);
+       ISAFUNC(uasm_i_beqz)(p, reg, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_beqz);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz));
 
 void __uasminit
-uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_beqzl(p, reg, 0);
+       ISAFUNC(uasm_i_beqzl)(p, reg, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_beqzl);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl));
 
 void __uasminit
-uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
+ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1,
        unsigned int reg2, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_bne(p, reg1, reg2, 0);
+       ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bne);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne));
 
 void __uasminit
-uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_bnez(p, reg, 0);
+       ISAFUNC(uasm_i_bnez)(p, reg, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bnez);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez));
 
 void __uasminit
-uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_bgezl(p, reg, 0);
+       ISAFUNC(uasm_i_bgezl)(p, reg, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bgezl);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl));
 
 void __uasminit
-uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_bgez(p, reg, 0);
+       ISAFUNC(uasm_i_bgez)(p, reg, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bgez);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez));
 
 void __uasminit
-uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg,
              unsigned int bit, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_bbit0(p, reg, bit, 0);
+       ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bbit0);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0));
 
 void __uasminit
-uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg,
              unsigned int bit, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_bbit1(p, reg, bit, 0);
+       ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bbit1);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit1));
index 6079ef33b5f0670e0e3f4de945bd287a821a7381..0388fc8b5613430152a0f097c7e7d36e15aebdc6 100644 (file)
@@ -5,9 +5,8 @@
 # Copyright (C) 2008 Wind River Systems, Inc.
 #   written by Ralf Baechle <ralf@linux-mips.org>
 #
-obj-y                          := malta-amon.o malta-cmdline.o \
-                                  malta-display.o malta-init.o malta-int.o \
-                                  malta-memory.o malta-platform.o \
+obj-y                          := malta-amon.o malta-display.o malta-init.o \
+                                  malta-int.o malta-memory.o malta-platform.o \
                                   malta-reset.o malta-setup.o malta-time.o
 
 obj-$(CONFIG_EARLY_PRINTK)     += malta-console.o
index 5b548b5a4fcf1f5bf116d6dd59e4c42755193c4c..2cc72c9b38e3ba508a1d9b6596da4403f67e6539 100644 (file)
@@ -3,5 +3,9 @@
 #
 platform-$(CONFIG_MIPS_MALTA)  += mti-malta/
 cflags-$(CONFIG_MIPS_MALTA)    += -I$(srctree)/arch/mips/include/asm/mach-malta
-load-$(CONFIG_MIPS_MALTA)      += 0xffffffff80100000
+ifdef CONFIG_KVM_GUEST
+    load-$(CONFIG_MIPS_MALTA)  += 0x0000000040100000
+else
+    load-$(CONFIG_MIPS_MALTA)  += 0xffffffff80100000
+endif
 all-$(CONFIG_MIPS_MALTA)       := $(COMPRESSION_FNAME).bin
diff --git a/arch/mips/mti-malta/malta-cmdline.c b/arch/mips/mti-malta/malta-cmdline.c
deleted file mode 100644 (file)
index 5576a30..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Kernel command line creation using the prom monitor (YAMON) argc/argv.
- */
-#include <linux/init.h>
-#include <linux/string.h>
-
-#include <asm/bootinfo.h>
-
-extern int prom_argc;
-extern int *_prom_argv;
-
-/*
- * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
- * This macro take care of sign extension.
- */
-#define prom_argv(index) ((char *)(long)_prom_argv[(index)])
-
-char * __init prom_getcmdline(void)
-{
-       return &(arcs_cmdline[0]);
-}
-
-
-void  __init prom_init_cmdline(void)
-{
-       char *cp;
-       int actr;
-
-       actr = 1; /* Always ignore argv[0] */
-
-       cp = &(arcs_cmdline[0]);
-       while(actr < prom_argc) {
-               strcpy(cp, prom_argv(actr));
-               cp += strlen(prom_argv(actr));
-               *cp++ = ' ';
-               actr++;
-       }
-       if (cp != &(arcs_cmdline[0])) {
-               /* get rid of trailing space */
-               --cp;
-               *cp = '\0';
-       }
-}
index 9bc58a24e80a637307e13058eab6d54eacf91c88..d4f807191ecd74694075ba3ca2748ff0b80bcbb1 100644 (file)
@@ -1,28 +1,20 @@
 /*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
  *
  * Display routines for display messages in MIPS boards ascii display.
+ *
+ * Copyright (C) 1999,2000,2012  MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ *          Steven J. Hill <sjhill@mips.com>
  */
-
 #include <linux/compiler.h>
 #include <linux/timer.h>
-#include <asm/io.h>
+#include <linux/io.h>
+
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
 
 extern const char display_string[];
 static unsigned int display_count;
@@ -36,11 +28,11 @@ void mips_display_message(const char *str)
        if (unlikely(display == NULL))
                display = ioremap(ASCII_DISPLAY_POS_BASE, 16*sizeof(int));
 
-       for (i = 0; i <= 14; i=i+2) {
-                if (*str)
-                        __raw_writel(*str++, display + i);
-                else
-                        __raw_writel(' ', display + i);
+       for (i = 0; i <= 14; i += 2) {
+               if (*str)
+                       __raw_writel(*str++, display + i);
+               else
+                       __raw_writel(' ', display + i);
        }
 }
 
index c2cbce9e435e8ffc74d25a9cfff61acbc38dc1b4..ff8caffd3266ea0a1deaaa1ee1093eac50b600b9 100644 (file)
@@ -1,54 +1,28 @@
 /*
- * Copyright (C) 1999, 2000, 2004, 2005         MIPS Technologies, Inc.
- *     All rights reserved.
- *     Authors: Carsten Langgaard <carstenl@mips.com>
- *              Maciej W. Rozycki <macro@mips.com>
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
  *
  * PROM library initialisation code.
+ *
+ * Copyright (C) 1999,2000,2004,2005,2012  MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ *         Maciej W. Rozycki <macro@mips.com>
+ *          Steven J. Hill <sjhill@mips.com>
  */
 #include <linux/init.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
 
-#include <asm/bootinfo.h>
-#include <asm/gt64120.h>
-#include <asm/io.h>
 #include <asm/cacheflush.h>
 #include <asm/smp-ops.h>
 #include <asm/traps.h>
-
+#include <asm/fw/fw.h>
 #include <asm/gcmpregs.h>
-#include <asm/mips-boards/prom.h>
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/bonito64.h>
-#include <asm/mips-boards/msc01_pci.h>
-
 #include <asm/mips-boards/malta.h>
 
-int prom_argc;
-int *_prom_argv, *_prom_envp;
-
-/*
- * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
- * This macro take care of sign extension, if running in 64-bit mode.
- */
-#define prom_envp(index) ((char *)(long)_prom_envp[(index)])
-
-int init_debug;
-
 static int mips_revision_corid;
 int mips_revision_sconid;
 
@@ -62,74 +36,6 @@ unsigned long _pcictrl_gt64120;
 /* MIPS System controller register base */
 unsigned long _pcictrl_msc;
 
-char *prom_getenv(char *envname)
-{
-       /*
-        * Return a pointer to the given environment variable.
-        * In 64-bit mode: we're using 64-bit pointers, but all pointers
-        * in the PROM structures are only 32-bit, so we need some
-        * workarounds, if we are running in 64-bit mode.
-        */
-       int i, index=0;
-
-       i = strlen(envname);
-
-       while (prom_envp(index)) {
-               if(strncmp(envname, prom_envp(index), i) == 0) {
-                       return(prom_envp(index+1));
-               }
-               index += 2;
-       }
-
-       return NULL;
-}
-
-static inline unsigned char str2hexnum(unsigned char c)
-{
-       if (c >= '0' && c <= '9')
-               return c - '0';
-       if (c >= 'a' && c <= 'f')
-               return c - 'a' + 10;
-       return 0; /* foo */
-}
-
-static inline void str2eaddr(unsigned char *ea, unsigned char *str)
-{
-       int i;
-
-       for (i = 0; i < 6; i++) {
-               unsigned char num;
-
-               if((*str == '.') || (*str == ':'))
-                       str++;
-               num = str2hexnum(*str++) << 4;
-               num |= (str2hexnum(*str++));
-               ea[i] = num;
-       }
-}
-
-int get_ethernet_addr(char *ethernet_addr)
-{
-       char *ethaddr_str;
-
-       ethaddr_str = prom_getenv("ethaddr");
-       if (!ethaddr_str) {
-               printk("ethaddr not set in boot prom\n");
-               return -1;
-       }
-       str2eaddr(ethernet_addr, ethaddr_str);
-
-       if (init_debug > 1) {
-               int i;
-               printk("get_ethernet_addr: ");
-               for (i=0; i<5; i++)
-                       printk("%02x:", (unsigned char)*(ethernet_addr+i));
-               printk("%02x\n", *(ethernet_addr+i));
-       }
-
-       return 0;
-}
-
 #ifdef CONFIG_SERIAL_8250_CONSOLE
 static void __init console_config(void)
 {
@@ -138,17 +44,23 @@ static void __init console_config(void)
        char parity = '\0', bits = '\0', flow = '\0';
        char *s;
 
-       if ((strstr(prom_getcmdline(), "console=")) == NULL) {
-               s = prom_getenv("modetty0");
+       if ((strstr(fw_getcmdline(), "console=")) == NULL) {
+               s = fw_getenv("modetty0");
                if (s) {
                        while (*s >= '0' && *s <= '9')
                                baud = baud*10 + *s++ - '0';
-                       if (*s == ',') s++;
-                       if (*s) parity = *s++;
-                       if (*s == ',') s++;
-                       if (*s) bits = *s++;
-                       if (*s == ',') s++;
-                       if (*s == 'h') flow = 'r';
+                       if (*s == ',')
+                               s++;
+                       if (*s)
+                               parity = *s++;
+                       if (*s == ',')
+                               s++;
+                       if (*s)
+                               bits = *s++;
+                       if (*s == ',')
+                               s++;
+                       if (*s == 'h')
+                               flow = 'r';
                }
                if (baud == 0)
                        baud = 38400;
@@ -158,8 +70,9 @@ static void __init console_config(void)
                        bits = '8';
                if (flow == '\0')
                        flow = 'r';
-               sprintf(console_string, " console=ttyS0,%d%c%c%c", baud, parity, bits, flow);
-               strcat(prom_getcmdline(), console_string);
+               sprintf(console_string, " console=ttyS0,%d%c%c%c", baud,
+                       parity, bits, flow);
+               strcat(fw_getcmdline(), console_string);
                pr_info("Config serial console:%s\n", console_string);
        }
 }
@@ -193,10 +106,6 @@ extern struct plat_smp_ops msmtc_smp_ops;
 
 void __init prom_init(void)
 {
-       prom_argc = fw_arg0;
-       _prom_argv = (int *) fw_arg1;
-       _prom_envp = (int *) fw_arg2;
-
        mips_display_message("LINUX");
 
        /*
@@ -306,7 +215,7 @@ void __init prom_init(void)
        case MIPS_REVISION_SCON_SOCIT:
        case MIPS_REVISION_SCON_ROCIT:
                _pcictrl_msc = (unsigned long)ioremap(MIPS_MSC01_PCI_REG_BASE, 0x2000);
-       mips_pci_controller:
+mips_pci_controller:
                mb();
                MSC_READ(MSC01_PCI_CFG, data);
                MSC_WRITE(MSC01_PCI_CFG, data & ~MSC01_PCI_CFG_EN_BIT);
@@ -348,13 +257,13 @@ void __init prom_init(void)
        default:
                /* Unknown system controller */
                mips_display_message("SC Error");
-               while (1);   /* We die here... */
+               while (1);      /* We die here... */
        }
        board_nmi_handler_setup = mips_nmi_setup;
        board_ejtag_handler_setup = mips_ejtag_setup;
 
-       prom_init_cmdline();
-       prom_meminit();
+       fw_init_cmdline();
+       fw_meminit();
 #ifdef CONFIG_SERIAL_8250_CONSOLE
        console_config();
 #endif
index e364af70e6cf5cc255a9be51ca569d7cb911dbf0..0a1339ac3ec8f8e6c3d9be6c4dc3b272d7044f98 100644 (file)
@@ -47,7 +47,6 @@
 #include <asm/setup.h>
 
 int gcmp_present = -1;
-int gic_present;
 static unsigned long _msc01_biu_base;
 static unsigned long _gcmp_base;
 static unsigned int ipi_map[NR_CPUS];
@@ -134,6 +133,9 @@ static void malta_ipi_irqdispatch(void)
 {
        int irq;
 
+       if (gic_compare_int())
+               do_IRQ(MIPS_GIC_IRQ_BASE);
+
        irq = gic_get_int();
        if (irq < 0)
                return;  /* interrupt has already been cleared */
index f3d43aa023a9c9215dc06334204c1f82e122ef73..1f73d63e92a765d3ab1d829244a19e57dab8bd8e 100644 (file)
@@ -1,73 +1,45 @@
 /*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
  *
  * PROM library functions for acquiring/using memory descriptors given to
  * us from the YAMON.
+ *
+ * Copyright (C) 1999,2000,2012  MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ *          Steven J. Hill <sjhill@mips.com>
  */
 #include <linux/init.h>
-#include <linux/mm.h>
 #include <linux/bootmem.h>
-#include <linux/pfn.h>
 #include <linux/string.h>
 
 #include <asm/bootinfo.h>
-#include <asm/page.h>
 #include <asm/sections.h>
+#include <asm/fw/fw.h>
 
-#include <asm/mips-boards/prom.h>
-
-/*#define DEBUG*/
-
-enum yamon_memtypes {
-       yamon_dontuse,
-       yamon_prom,
-       yamon_free,
-};
-static struct prom_pmemblock mdesc[PROM_MAX_PMEMBLOCKS];
-
-#ifdef DEBUG
-static char *mtypes[3] = {
-       "Dont use memory",
-       "YAMON PROM memory",
-       "Free memory",
-};
-#endif
+static fw_memblock_t mdesc[FW_MAX_MEMBLOCKS];
 
 /* determined physical memory size, not overridden by command line args         */
 unsigned long physical_memsize = 0L;
 
-static struct prom_pmemblock * __init prom_getmdesc(void)
+fw_memblock_t * __init fw_getmdesc(void)
 {
-       char *memsize_str;
+       char *memsize_str, *ptr;
        unsigned int memsize;
-       char *ptr;
        static char cmdline[COMMAND_LINE_SIZE] __initdata;
+       long val;
+       int tmp;
 
        /* otherwise look in the environment */
-       memsize_str = prom_getenv("memsize");
+       memsize_str = fw_getenv("memsize");
        if (!memsize_str) {
-               printk(KERN_WARNING
-                      "memsize not set in boot prom, set to default (32Mb)\n");
+               pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
                physical_memsize = 0x02000000;
        } else {
-#ifdef DEBUG
-               pr_debug("prom_memsize = %s\n", memsize_str);
-#endif
-               physical_memsize = simple_strtol(memsize_str, NULL, 0);
+               tmp = kstrtol(memsize_str, 0, &val);
+               physical_memsize = (unsigned long)val;
        }
 
 #ifdef CONFIG_CPU_BIG_ENDIAN
@@ -90,11 +62,11 @@ static struct prom_pmemblock * __init prom_getmdesc(void)
 
        memset(mdesc, 0, sizeof(mdesc));
 
-       mdesc[0].type = yamon_dontuse;
+       mdesc[0].type = fw_dontuse;
        mdesc[0].base = 0x00000000;
        mdesc[0].size = 0x00001000;
 
-       mdesc[1].type = yamon_prom;
+       mdesc[1].type = fw_code;
        mdesc[1].base = 0x00001000;
        mdesc[1].size = 0x000ef000;
 
@@ -105,55 +77,45 @@ static struct prom_pmemblock * __init prom_getmdesc(void)
         * This mean that this area can't be used as DMA memory for PCI
         * devices.
         */
-       mdesc[2].type = yamon_dontuse;
+       mdesc[2].type = fw_dontuse;
        mdesc[2].base = 0x000f0000;
        mdesc[2].size = 0x00010000;
 
-       mdesc[3].type = yamon_dontuse;
+       mdesc[3].type = fw_dontuse;
        mdesc[3].base = 0x00100000;
-       mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) - mdesc[3].base;
+       mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) -
+               mdesc[3].base;
 
-       mdesc[4].type = yamon_free;
+       mdesc[4].type = fw_free;
        mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end));
        mdesc[4].size = memsize - mdesc[4].base;
 
        return &mdesc[0];
 }
 
-static int __init prom_memtype_classify(unsigned int type)
+static int __init fw_memtype_classify(unsigned int type)
 {
        switch (type) {
-       case yamon_free:
+       case fw_free:
                return BOOT_MEM_RAM;
-       case yamon_prom:
+       case fw_code:
                return BOOT_MEM_ROM_DATA;
        default:
                return BOOT_MEM_RESERVED;
        }
 }
 
-void __init prom_meminit(void)
+void __init fw_meminit(void)
 {
-       struct prom_pmemblock *p;
+       fw_memblock_t *p;
 
-#ifdef DEBUG
-       pr_debug("YAMON MEMORY DESCRIPTOR dump:\n");
-       p = prom_getmdesc();
-       while (p->size) {
-               int i = 0;
-               pr_debug("[%d,%p]: base<%08lx> size<%08lx> type<%s>\n",
-                        i, p, p->base, p->size, mtypes[p->type]);
-               p++;
-               i++;
-       }
-#endif
-       p = prom_getmdesc();
+       p = fw_getmdesc();
 
        while (p->size) {
                long type;
                unsigned long base, size;
 
-               type = prom_memtype_classify(p->type);
+               type = fw_memtype_classify(p->type);
                base = p->base;
                size = p->size;
 
@@ -172,7 +134,7 @@ void __init prom_free_prom_memory(void)
                        continue;
 
                addr = boot_mem_map.map[i].addr;
-               free_init_pages("prom memory",
+               free_init_pages("YAMON memory",
                                addr, addr + boot_mem_map.map[i].size);
        }
 }
index 200f64df2c9b492bf6ec948ff03bbfee864f59fa..c72a069367819d1ca8c91532862dbfbd890de1b1 100644 (file)
 #include <linux/screen_info.h>
 #include <linux/time.h>
 
-#include <asm/bootinfo.h>
+#include <asm/fw/fw.h>
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
 #include <asm/mips-boards/malta.h>
 #include <asm/mips-boards/maltaint.h>
 #include <asm/dma.h>
 #include <asm/traps.h>
+#include <asm/gcmpregs.h>
 #ifdef CONFIG_VT
 #include <linux/console.h>
 #endif
@@ -105,6 +105,66 @@ static void __init fd_activate(void)
 }
 #endif
 
+static int __init plat_enable_iocoherency(void)
+{
+       int supported = 0;
+       if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) {
+               if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
+                       BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
+                       pr_info("Enabled Bonito CPU coherency\n");
+                       supported = 1;
+               }
+               if (strstr(fw_getcmdline(), "iobcuncached")) {
+                       BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN;
+                       BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
+                               ~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
+                                 BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
+                       pr_info("Disabled Bonito IOBC coherency\n");
+               } else {
+                       BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN;
+                       BONITO_PCIMEMBASECFG |=
+                               (BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
+                                BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
+                       pr_info("Enabled Bonito IOBC coherency\n");
+               }
+       } else if (gcmp_niocu() != 0) {
+               /* Nothing special needs to be done to enable coherency */
+               pr_info("CMP IOCU detected\n");
+               if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) {
+                       pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n");
+                       return 0;
+               }
+               supported = 1;
+       }
+       hw_coherentio = supported;
+       return supported;
+}
+
+static void __init plat_setup_iocoherency(void)
+{
+#ifdef CONFIG_DMA_NONCOHERENT
+       /*
+        * Kernel has been configured with software coherency
+        * but we might choose to turn it off and use hardware
+        * coherency instead.
+        */
+       if (plat_enable_iocoherency()) {
+               if (coherentio == 0)
+                       pr_info("Hardware DMA cache coherency disabled\n");
+               else
+                       pr_info("Hardware DMA cache coherency enabled\n");
+       } else {
+               if (coherentio == 1)
+                       pr_info("Hardware DMA cache coherency unsupported, but enabled from command line!\n");
+               else
+                       pr_info("Software DMA cache coherency enabled\n");
+       }
+#else
+       if (!plat_enable_iocoherency())
+               panic("Hardware DMA cache coherency not supported!");
+#endif
+}
+
 #ifdef CONFIG_BLK_DEV_IDE
 static void __init pci_clock_check(void)
 {
@@ -115,16 +175,15 @@ static void __init pci_clock_check(void)
                33, 20, 25, 30, 12, 16, 37, 10
        };
        int pciclock = pciclocks[jmpr];
-       char *argptr = prom_getcmdline();
+       char *argptr = fw_getcmdline();
 
        if (pciclock != 33 && !strstr(argptr, "idebus=")) {
-               printk(KERN_WARNING "WARNING: PCI clock is %dMHz, "
-                               "setting idebus\n", pciclock);
+               pr_warn("WARNING: PCI clock is %dMHz, setting idebus\n",
+                       pciclock);
                argptr += strlen(argptr);
                sprintf(argptr, " idebus=%d", pciclock);
                if (pciclock < 20 || pciclock > 66)
-                       printk(KERN_WARNING "WARNING: IDE timing "
-                                       "calculations will be incorrect\n");
+                       pr_warn("WARNING: IDE timing calculations will be incorrect\n");
        }
 }
 #endif
@@ -153,31 +212,31 @@ static void __init bonito_quirks_setup(void)
 {
        char *argptr;
 
-       argptr = prom_getcmdline();
+       argptr = fw_getcmdline();
        if (strstr(argptr, "debug")) {
                BONITO_BONGENCFG |= BONITO_BONGENCFG_DEBUGMODE;
-               printk(KERN_INFO "Enabled Bonito debug mode\n");
+               pr_info("Enabled Bonito debug mode\n");
        } else
                BONITO_BONGENCFG &= ~BONITO_BONGENCFG_DEBUGMODE;
 
 #ifdef CONFIG_DMA_COHERENT
        if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
                BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
-               printk(KERN_INFO "Enabled Bonito CPU coherency\n");
+               pr_info("Enabled Bonito CPU coherency\n");
 
-               argptr = prom_getcmdline();
+               argptr = fw_getcmdline();
                if (strstr(argptr, "iobcuncached")) {
                        BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN;
                        BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
                                ~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
                                        BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
-                       printk(KERN_INFO "Disabled Bonito IOBC coherency\n");
+                       pr_info("Disabled Bonito IOBC coherency\n");
                } else {
                        BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN;
                        BONITO_PCIMEMBASECFG |=
                                (BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
                                        BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
-                       printk(KERN_INFO "Enabled Bonito IOBC coherency\n");
+                       pr_info("Enabled Bonito IOBC coherency\n");
                }
        } else
                panic("Hardware DMA cache coherency not supported");
@@ -207,6 +266,8 @@ void __init plat_mem_setup(void)
        if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO)
                bonito_quirks_setup();
 
+       plat_setup_iocoherency();
+
 #ifdef CONFIG_BLK_DEV_IDE
        pci_clock_check();
 #endif
index a144b89cf9ba299e5f9fb2bcea5fd179f5ceb413..0ad305f75802bc64250ad01be6e76b1826ea95a2 100644 (file)
 #include <asm/gic.h>
 
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
-
 #include <asm/mips-boards/maltaint.h>
 
 unsigned long cpu_khz;
-int gic_frequency;
 
 static int mips_cpu_timer_irq;
 static int mips_cpu_perf_irq;
@@ -74,7 +71,24 @@ static void __init estimate_frequencies(void)
 {
        unsigned long flags;
        unsigned int count, start;
+#ifdef CONFIG_IRQ_GIC
        unsigned int giccount = 0, gicstart = 0;
+#endif
+
+#if defined (CONFIG_KVM_GUEST) && defined (CONFIG_KVM_HOST_FREQ)
+       unsigned int prid = read_c0_prid() & 0xffff00;
+
+       /*
+        * XXXKYMA: hardwire the CPU frequency to Host Freq/4
+        */
+       count = (CONFIG_KVM_HOST_FREQ * 1000000) >> 3;
+       if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) &&
+           (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
+               count *= 2;
+
+       mips_hpt_frequency = count;
+       return;
+#endif
 
        local_irq_save(flags);
 
@@ -84,26 +98,32 @@ static void __init estimate_frequencies(void)
 
        /* Initialize counters. */
        start = read_c0_count();
+#ifdef CONFIG_IRQ_GIC
        if (gic_present)
                GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), gicstart);
+#endif
 
        /* Read counter exactly on falling edge of update flag. */
        while (CMOS_READ(RTC_REG_A) & RTC_UIP);
        while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
 
        count = read_c0_count();
+#ifdef CONFIG_IRQ_GIC
        if (gic_present)
                GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), giccount);
+#endif
 
        local_irq_restore(flags);
 
        count -= start;
-       if (gic_present)
-               giccount -= gicstart;
-
        mips_hpt_frequency = count;
-       if (gic_present)
+
+#ifdef CONFIG_IRQ_GIC
+       if (gic_present) {
+               giccount -= gicstart;
                gic_frequency = giccount;
+       }
+#endif
 }
 
 void read_persistent_clock(struct timespec *ts)
@@ -159,24 +179,27 @@ void __init plat_time_init(void)
            (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
                freq *= 2;
        freq = freqround(freq, 5000);
-       pr_debug("CPU frequency %d.%02d MHz\n", freq/1000000,
+       printk("CPU frequency %d.%02d MHz\n", freq/1000000,
               (freq%1000000)*100/1000000);
        cpu_khz = freq / 1000;
 
-       if (gic_present) {
-               freq = freqround(gic_frequency, 5000);
-               pr_debug("GIC frequency %d.%02d MHz\n", freq/1000000,
-                      (freq%1000000)*100/1000000);
-               gic_clocksource_init(gic_frequency);
-       } else
-               init_r4k_clocksource();
+       mips_scroll_message();
 
 #ifdef CONFIG_I8253
        /* Only Malta has a PIT. */
        setup_pit_timer();
 #endif
 
-       mips_scroll_message();
+#ifdef CONFIG_IRQ_GIC
+       if (gic_present) {
+               freq = freqround(gic_frequency, 5000);
+               printk("GIC frequency %d.%02d MHz\n", freq/1000000,
+                      (freq%1000000)*100/1000000);
+#ifdef CONFIG_CSRC_GIC
+               gic_clocksource_init(gic_frequency);
+#endif
+       }
+#endif
 
        plat_perf_setup();
 }
index 10ec701ce6c72e8cd056fd8ac6fb55b19608d8c8..be114209217cc5a13fe8531365ba5454b12c530f 100644 (file)
@@ -8,10 +8,10 @@
 # Copyright (C) 2012 MIPS Technoligies, Inc.  All rights reserved.
 # Steven J. Hill <sjhill@mips.com>
 #
-obj-y                          := sead3-lcd.o sead3-cmdline.o \
-                                  sead3-display.o sead3-init.o sead3-int.o \
-                                  sead3-mtd.o sead3-net.o sead3-platform.o \
-                                  sead3-reset.o sead3-setup.o sead3-time.o
+obj-y                          := sead3-lcd.o sead3-display.o sead3-init.o \
+                                  sead3-int.o sead3-mtd.o sead3-net.o \
+                                  sead3-platform.o sead3-reset.o \
+                                  sead3-setup.o sead3-time.o
 
 obj-y                          += sead3-i2c-dev.o sead3-i2c.o \
                                   sead3-pic32-i2c-drv.o sead3-pic32-bus.o \
index 322148c353ed65065bd146f05de3ce27198382ca..0a168c948b01a9e593c5c4cc7cce7679a86c618b 100644 (file)
@@ -34,33 +34,15 @@ static void sead3_fled_set(struct led_classdev *led_cdev,
 static struct led_classdev sead3_pled = {
        .name           = "sead3::pled",
        .brightness_set = sead3_pled_set,
+       .flags          = LED_CORE_SUSPENDRESUME,
 };
 
 static struct led_classdev sead3_fled = {
        .name           = "sead3::fled",
        .brightness_set = sead3_fled_set,
+       .flags          = LED_CORE_SUSPENDRESUME,
 };
 
-#ifdef CONFIG_PM
-static int sead3_led_suspend(struct platform_device *dev,
-               pm_message_t state)
-{
-       led_classdev_suspend(&sead3_pled);
-       led_classdev_suspend(&sead3_fled);
-       return 0;
-}
-
-static int sead3_led_resume(struct platform_device *dev)
-{
-       led_classdev_resume(&sead3_pled);
-       led_classdev_resume(&sead3_fled);
-       return 0;
-}
-#else
-#define sead3_led_suspend NULL
-#define sead3_led_resume NULL
-#endif
-
 static int sead3_led_probe(struct platform_device *pdev)
 {
        int ret;
@@ -86,8 +68,6 @@ static int sead3_led_remove(struct platform_device *pdev)
 static struct platform_driver sead3_led_driver = {
        .probe          = sead3_led_probe,
        .remove         = sead3_led_remove,
-       .suspend        = sead3_led_suspend,
-       .resume         = sead3_led_resume,
        .driver         = {
                .name           = DRVNAME,
                .owner          = THIS_MODULE,
diff --git a/arch/mips/mti-sead3/sead3-cmdline.c b/arch/mips/mti-sead3/sead3-cmdline.c
deleted file mode 100644 (file)
index a2e6cec..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/string.h>
-
-#include <asm/bootinfo.h>
-
-extern int prom_argc;
-extern int *_prom_argv;
-
-/*
- * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
- * This macro take care of sign extension.
- */
-#define prom_argv(index) ((char *)(long)_prom_argv[(index)])
-
-char * __init prom_getcmdline(void)
-{
-       return &(arcs_cmdline[0]);
-}
-
-void  __init prom_init_cmdline(void)
-{
-       char *cp;
-       int actr;
-
-       actr = 1; /* Always ignore argv[0] */
-
-       cp = &(arcs_cmdline[0]);
-       while (actr < prom_argc) {
-               strcpy(cp, prom_argv(actr));
-               cp += strlen(prom_argv(actr));
-               *cp++ = ' ';
-               actr++;
-       }
-       if (cp != &(arcs_cmdline[0])) {
-               /* get rid of trailing space */
-               --cp;
-               *cp = '\0';
-       }
-}
index 2ddef19a9adc891b898188f8fae38e692309073a..031f47d6977006451f514f3aa1b72d0219f1821c 100644 (file)
@@ -26,7 +26,7 @@ static inline void serial_out(int offset, int value, unsigned int base_addr)
        __raw_writel(value, PORT(base_addr, offset));
 }
 
-void __init prom_init_early_console(char port)
+void __init fw_init_early_console(char port)
 {
        console_port = port;
 }
index e389326cfa423add03614ec7baa10b1ed529c7e2..94875991907bd4c3bfda8ac32b825a3513e7a4e8 100644 (file)
@@ -8,7 +8,6 @@
 #include <linux/timer.h>
 #include <linux/io.h>
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
 
 static unsigned int display_count;
 static unsigned int max_display_count;
index f95abaa1aa5db6a06b4d8b80696c99120ed48711..bfbd17b120a21d637888a65db614df206de41043 100644 (file)
 #include <asm/cacheflush.h>
 #include <asm/traps.h>
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
-
-extern void prom_init_early_console(char port);
+#include <asm/fw/fw.h>
 
 extern char except_vec_nmi;
 extern char except_vec_ejtag_debug;
 
-int prom_argc;
-int *_prom_argv, *_prom_envp;
-
-#define prom_envp(index) ((char *)(long)_prom_envp[(index)])
-
-char *prom_getenv(char *envname)
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+static void __init console_config(void)
 {
-       /*
-        * Return a pointer to the given environment variable.
-        * In 64-bit mode: we're using 64-bit pointers, but all pointers
-        * in the PROM structures are only 32-bit, so we need some
-        * workarounds, if we are running in 64-bit mode.
-        */
-       int i, index = 0;
-
-       i = strlen(envname);
-
-       while (prom_envp(index)) {
-               if (strncmp(envname, prom_envp(index), i) == 0)
-                       return prom_envp(index+1);
-               index += 2;
+       char console_string[40];
+       int baud = 0;
+       char parity = '\0', bits = '\0', flow = '\0';
+       char *s;
+
+       if ((strstr(fw_getcmdline(), "console=")) == NULL) {
+               s = fw_getenv("modetty0");
+               if (s) {
+                       while (*s >= '0' && *s <= '9')
+                               baud = baud*10 + *s++ - '0';
+                       if (*s == ',')
+                               s++;
+                       if (*s)
+                               parity = *s++;
+                       if (*s == ',')
+                               s++;
+                       if (*s)
+                               bits = *s++;
+                       if (*s == ',')
+                               s++;
+                       if (*s == 'h')
+                               flow = 'r';
+               }
+               if (baud == 0)
+                       baud = 38400;
+               if (parity != 'n' && parity != 'o' && parity != 'e')
+                       parity = 'n';
+               if (bits != '7' && bits != '8')
+                       bits = '8';
+               if (flow == '\0')
+                       flow = 'r';
+               sprintf(console_string, " console=ttyS0,%d%c%c%c", baud,
+                       parity, bits, flow);
+               strcat(fw_getcmdline(), console_string);
        }
-
-       return NULL;
 }
+#endif
 
 static void __init mips_nmi_setup(void)
 {
@@ -52,7 +65,41 @@ static void __init mips_nmi_setup(void)
        base = cpu_has_veic ?
                (void *)(CAC_BASE + 0xa80) :
                (void *)(CAC_BASE + 0x380);
+#ifdef CONFIG_CPU_MICROMIPS
+       /*
+        * Decrement the exception vector address by one for microMIPS.
+        */
+       memcpy(base, (&except_vec_nmi - 1), 0x80);
+
+       /*
+        * This is a hack. We do not know if the boot loader was built with
+        * microMIPS instructions or not. If it was not, the NMI exception
+        * code at 0x80000a80 will be taken in MIPS32 mode. The hand coded
+        * assembly below forces us into microMIPS mode if we are a pure
+        * microMIPS kernel. The assembly instructions are:
+        *
+        *  3C1A8000   lui       k0,0x8000
+        *  375A0381   ori       k0,k0,0x381
+        *  03400008   jr        k0
+        *  00000000   nop
+        *
+        * The mode switch occurs by jumping to the unaligned exception
+        * vector address at 0x80000381 which would have been 0x80000380
+        * in MIPS32 mode. The jump to the unaligned address transitions
+        * us into microMIPS mode.
+        */
+       if (!cpu_has_veic) {
+               void *base2 = (void *)(CAC_BASE + 0xa80);
+               *((unsigned int *)base2) = 0x3c1a8000;
+               *((unsigned int *)base2 + 1) = 0x375a0381;
+               *((unsigned int *)base2 + 2) = 0x03400008;
+               *((unsigned int *)base2 + 3) = 0x00000000;
+               flush_icache_range((unsigned long)base2,
+                       (unsigned long)base2 + 0x10);
+       }
+#else
        memcpy(base, &except_vec_nmi, 0x80);
+#endif
        flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
 }
 
@@ -63,29 +110,40 @@ static void __init mips_ejtag_setup(void)
        base = cpu_has_veic ?
                (void *)(CAC_BASE + 0xa00) :
                (void *)(CAC_BASE + 0x300);
+#ifdef CONFIG_CPU_MICROMIPS
+       /* Deja vu... */
+       memcpy(base, (&except_vec_ejtag_debug - 1), 0x80);
+       if (!cpu_has_veic) {
+               void *base2 = (void *)(CAC_BASE + 0xa00);
+               *((unsigned int *)base2) = 0x3c1a8000;
+               *((unsigned int *)base2 + 1) = 0x375a0301;
+               *((unsigned int *)base2 + 2) = 0x03400008;
+               *((unsigned int *)base2 + 3) = 0x00000000;
+               flush_icache_range((unsigned long)base2,
+                       (unsigned long)base2 + 0x10);
+       }
+#else
        memcpy(base, &except_vec_ejtag_debug, 0x80);
+#endif
        flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
 }
 
 void __init prom_init(void)
 {
-       prom_argc = fw_arg0;
-       _prom_argv = (int *) fw_arg1;
-       _prom_envp = (int *) fw_arg2;
-
        board_nmi_handler_setup = mips_nmi_setup;
        board_ejtag_handler_setup = mips_ejtag_setup;
 
-       prom_init_cmdline();
+       fw_init_cmdline();
 #ifdef CONFIG_EARLY_PRINTK
-       if ((strstr(prom_getcmdline(), "console=ttyS0")) != NULL)
-               prom_init_early_console(0);
-       else if ((strstr(prom_getcmdline(), "console=ttyS1")) != NULL)
-               prom_init_early_console(1);
+       if ((strstr(fw_getcmdline(), "console=ttyS0")) != NULL)
+               fw_init_early_console(0);
+       else if ((strstr(fw_getcmdline(), "console=ttyS1")) != NULL)
+               fw_init_early_console(1);
 #endif
 #ifdef CONFIG_SERIAL_8250_CONSOLE
-       if ((strstr(prom_getcmdline(), "console=")) == NULL)
-               strcat(prom_getcmdline(), " console=ttyS0,38400n8r");
+       if ((strstr(fw_getcmdline(), "console=")) == NULL)
+               strcat(fw_getcmdline(), " console=ttyS0,38400n8r");
+       console_config();
 #endif
 }
 
index e26e08274fc5b32a898ccdc40c8c346ec2a9d7d8..6a560ac03def0db5f2a794529998cf5e39ad1887 100644 (file)
@@ -20,7 +20,6 @@
 #define SEAD_CONFIG_BASE               0x1b100110
 #define SEAD_CONFIG_SIZE               4
 
-int gic_present;
 static unsigned long sead3_config_reg;
 
 /*
index f012fd164ceedc9e2aed9a94ebb30cc2bdc602c2..b5059dc899f44d2616a4c72c873fe82819474c09 100644 (file)
 #include <linux/bootmem.h>
 
 #include <asm/mips-boards/generic.h>
-#include <asm/prom.h>
-
-int coherentio;                /* 0 => no DMA cache coherency (may be set by user) */
-int hw_coherentio;     /* 0 => no HW DMA cache coherency (reflects real HW) */
 
 const char *get_system_type(void)
 {
index 239e4e32757fc566f8fb96b4366806531ba87aa7..96b42eb9b5e221cccdc4cf861fe48f5455cb4d0f 100644 (file)
@@ -11,7 +11,6 @@
 #include <asm/time.h>
 #include <asm/irq.h>
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
 
 unsigned long cpu_khz;
 
index 3c05bf9e280ae5e66ea1e8ae3cfc96179a610d8b..e0873a31ebaace03336502d8bf57087fca3375b9 100644 (file)
@@ -2,13 +2,22 @@ if NLM_XLP_BOARD || NLM_XLR_BOARD
 
 if NLM_XLP_BOARD
 config DT_XLP_EVP
-       bool "Built-in device tree for XLP EVP/SVP boards"
+       bool "Built-in device tree for XLP EVP boards"
        default y
        help
-         Add an FDT blob for XLP EVP and SVP boards into the kernel.
+         Add an FDT blob for XLP EVP boards into the kernel.
          This DTB will be used if the firmware does not pass in a DTB
-          pointer to the kernel.  The corresponding DTS file is at
-          arch/mips/netlogic/dts/xlp_evp.dts
+         pointer to the kernel.  The corresponding DTS file is at
+         arch/mips/netlogic/dts/xlp_evp.dts
+
+config DT_XLP_SVP
+       bool "Built-in device tree for XLP SVP boards"
+       default y
+       help
+         Add an FDT blob for XLP VP boards into the kernel.
+         This DTB will be used if the firmware does not pass in a DTB
+         pointer to the kernel.  The corresponding DTS file is at
+         arch/mips/netlogic/dts/xlp_svp.dts
 
 config NLM_MULTINODE
        bool "Support for multi-chip boards"
index 2bb95dcfe20addf631ec84d15267563c30fe26e2..ffba52489bef7b52a1dcb996cb911c277359a45d 100644 (file)
@@ -148,8 +148,7 @@ void nlm_cpus_done(void)
 int nlm_cpu_ready[NR_CPUS];
 unsigned long nlm_next_gp;
 unsigned long nlm_next_sp;
-
-cpumask_t phys_cpu_present_map;
+static cpumask_t phys_cpu_present_mask;
 
 void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
 {
@@ -169,11 +168,12 @@ void __init nlm_smp_setup(void)
 {
        unsigned int boot_cpu;
        int num_cpus, i, ncore;
+       char buf[64];
 
        boot_cpu = hard_smp_processor_id();
-       cpumask_clear(&phys_cpu_present_map);
+       cpumask_clear(&phys_cpu_present_mask);
 
-       cpumask_set_cpu(boot_cpu, &phys_cpu_present_map);
+       cpumask_set_cpu(boot_cpu, &phys_cpu_present_mask);
        __cpu_number_map[boot_cpu] = 0;
        __cpu_logical_map[0] = boot_cpu;
        set_cpu_possible(0, true);
@@ -185,7 +185,7 @@ void __init nlm_smp_setup(void)
                 * it is only set for ASPs (see smpboot.S)
                 */
                if (nlm_cpu_ready[i]) {
-                       cpumask_set_cpu(i, &phys_cpu_present_map);
+                       cpumask_set_cpu(i, &phys_cpu_present_mask);
                        __cpu_number_map[i] = num_cpus;
                        __cpu_logical_map[num_cpus] = i;
                        set_cpu_possible(num_cpus, true);
@@ -193,16 +193,19 @@ void __init nlm_smp_setup(void)
                }
        }
 
+       cpumask_scnprintf(buf, ARRAY_SIZE(buf), &phys_cpu_present_mask);
+       pr_info("Physical CPU mask: %s\n", buf);
+       cpumask_scnprintf(buf, ARRAY_SIZE(buf), cpu_possible_mask);
+       pr_info("Possible CPU mask: %s\n", buf);
+
        /* check with the cores we have worken up */
        for (ncore = 0, i = 0; i < NLM_NR_NODES; i++)
                ncore += hweight32(nlm_get_node(i)->coremask);
 
-       pr_info("Phys CPU present map: %lx, possible map %lx\n",
-               (unsigned long)cpumask_bits(&phys_cpu_present_map)[0],
-               (unsigned long)cpumask_bits(cpu_possible_mask)[0]);
-
        pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore,
                nlm_threads_per_core, num_cpus);
+
+       /* switch NMI handler to boot CPUs */
        nlm_set_nmi_handler(nlm_boot_secondary_cpus);
 }
 
index d117d46413aa29d9b03806935c3d206f0e4f6b29..aecb6fa9a9c33968a187c872672d835c5ee39845 100644 (file)
@@ -1 +1,2 @@
 obj-$(CONFIG_DT_XLP_EVP) := xlp_evp.dtb.o
+obj-$(CONFIG_DT_XLP_SVP) += xlp_svp.dtb.o
index 7628b5464fc76161bf84f5192221b2e9716c1478..e14f4230806427bfb481d4e46d7a757cd2c82114 100644 (file)
@@ -20,7 +20,7 @@
                #address-cells = <2>;
                #size-cells = <1>;
                compatible = "simple-bus";
-               ranges = <0 0  0 0x18000000  0x04000000   // PCIe CFG
+               ranges = <0 0  0 0x18000000  0x04000000   // PCIe CFG
                          1 0  0 0x16000000  0x01000000>; // GBU chipselects
 
                serial0: serial@30000 {
diff --git a/arch/mips/netlogic/dts/xlp_svp.dts b/arch/mips/netlogic/dts/xlp_svp.dts
new file mode 100644 (file)
index 0000000..8af4bdb
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * XLP3XX Device Tree Source for SVP boards
+ */
+
+/dts-v1/;
+/ {
+       model = "netlogic,XLP-SVP";
+       compatible = "netlogic,xlp";
+       #address-cells = <2>;
+       #size-cells = <2>;
+
+       memory {
+               device_type = "memory";
+               reg =  <0 0x00100000 0 0x0FF00000       // 255M at 1M
+                       0 0x20000000 0 0xa0000000       // 2560M at 512M
+                       0 0xe0000000 0 0x40000000>;
+       };
+
+       soc {
+               #address-cells = <2>;
+               #size-cells = <1>;
+               compatible = "simple-bus";
+               ranges = <0 0  0 0x18000000  0x04000000   // PCIe CFG
+                         1 0  0 0x16000000  0x01000000>; // GBU chipselects
+
+               serial0: serial@30000 {
+                       device_type = "serial";
+                       compatible = "ns16550";
+                       reg = <0 0x30100 0xa00>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+                       clock-frequency = <133333333>;
+                       interrupt-parent = <&pic>;
+                       interrupts = <17>;
+               };
+               serial1: serial@31000 {
+                       device_type = "serial";
+                       compatible = "ns16550";
+                       reg = <0 0x31100 0xa00>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+                       clock-frequency = <133333333>;
+                       interrupt-parent = <&pic>;
+                       interrupts = <18>;
+               };
+               i2c0: ocores@32000 {
+                       compatible = "opencores,i2c-ocores";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0 0x32100 0xa00>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+                       clock-frequency = <32000000>;
+                       interrupt-parent = <&pic>;
+                       interrupts = <30>;
+               };
+               i2c1: ocores@33000 {
+                       compatible = "opencores,i2c-ocores";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0 0x33100 0xa00>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+                       clock-frequency = <32000000>;
+                       interrupt-parent = <&pic>;
+                       interrupts = <31>;
+
+                       rtc@68 {
+                               compatible = "dallas,ds1374";
+                               reg = <0x68>;
+                       };
+
+                       dtt@4c {
+                               compatible = "national,lm90";
+                               reg = <0x4c>;
+                       };
+               };
+               pic: pic@4000 {
+                       interrupt-controller;
+                       #address-cells = <0>;
+                       #interrupt-cells = <1>;
+                       reg = <0 0x4000 0x200>;
+               };
+
+               nor_flash@1,0 {
+                       compatible = "cfi-flash";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       bank-width = <2>;
+                       reg = <1 0 0x1000000>;
+
+                       partition@0 {
+                               label = "x-loader";
+                               reg = <0x0 0x100000>; /* 1M */
+                               read-only;
+                       };
+
+                       partition@100000 {
+                               label = "u-boot";
+                               reg = <0x100000 0x100000>; /* 1M */
+                       };
+
+                       partition@200000 {
+                               label = "kernel";
+                               reg = <0x200000 0x500000>; /* 5M */
+                       };
+
+                       partition@700000 {
+                               label = "rootfs";
+                               reg = <0x700000 0x800000>; /* 8M */
+                       };
+
+                       partition@f00000 {
+                               label = "env";
+                               reg = <0xf00000 0x100000>; /* 1M */
+                               read-only;
+                       };
+               };
+       };
+
+       chosen {
+               bootargs = "console=ttyS0,115200 rdinit=/sbin/init";
+       };
+};
index c68fd4026104b7ed4db04d21ae8d86753fa71266..87560e4db35f148e87028b6ce34fcaf1485177bf 100644 (file)
@@ -61,43 +61,61 @@ void nlm_node_init(int node)
 
 int nlm_irq_to_irt(int irq)
 {
-       if (!PIC_IRQ_IS_IRT(irq))
-               return -1;
+       uint64_t pcibase;
+       int devoff, irt;
 
        switch (irq) {
        case PIC_UART_0_IRQ:
-               return PIC_IRT_UART_0_INDEX;
+               devoff = XLP_IO_UART0_OFFSET(0);
+               break;
        case PIC_UART_1_IRQ:
-               return PIC_IRT_UART_1_INDEX;
-       case PIC_PCIE_LINK_0_IRQ:
-              return PIC_IRT_PCIE_LINK_0_INDEX;
-       case PIC_PCIE_LINK_1_IRQ:
-              return PIC_IRT_PCIE_LINK_1_INDEX;
-       case PIC_PCIE_LINK_2_IRQ:
-              return PIC_IRT_PCIE_LINK_2_INDEX;
-       case PIC_PCIE_LINK_3_IRQ:
-              return PIC_IRT_PCIE_LINK_3_INDEX;
+               devoff = XLP_IO_UART1_OFFSET(0);
+               break;
        case PIC_EHCI_0_IRQ:
-              return PIC_IRT_EHCI_0_INDEX;
+               devoff = XLP_IO_USB_EHCI0_OFFSET(0);
+               break;
        case PIC_EHCI_1_IRQ:
-              return PIC_IRT_EHCI_1_INDEX;
+               devoff = XLP_IO_USB_EHCI1_OFFSET(0);
+               break;
        case PIC_OHCI_0_IRQ:
-              return PIC_IRT_OHCI_0_INDEX;
+               devoff = XLP_IO_USB_OHCI0_OFFSET(0);
+               break;
        case PIC_OHCI_1_IRQ:
-              return PIC_IRT_OHCI_1_INDEX;
+               devoff = XLP_IO_USB_OHCI1_OFFSET(0);
+               break;
        case PIC_OHCI_2_IRQ:
-              return PIC_IRT_OHCI_2_INDEX;
+               devoff = XLP_IO_USB_OHCI2_OFFSET(0);
+               break;
        case PIC_OHCI_3_IRQ:
-              return PIC_IRT_OHCI_3_INDEX;
+               devoff = XLP_IO_USB_OHCI3_OFFSET(0);
+               break;
        case PIC_MMC_IRQ:
-              return PIC_IRT_MMC_INDEX;
+               devoff = XLP_IO_SD_OFFSET(0);
+               break;
        case PIC_I2C_0_IRQ:
-               return PIC_IRT_I2C_0_INDEX;
+               devoff = XLP_IO_I2C0_OFFSET(0);
+               break;
        case PIC_I2C_1_IRQ:
-               return PIC_IRT_I2C_1_INDEX;
+               devoff = XLP_IO_I2C1_OFFSET(0);
+               break;
        default:
-               return -1;
+               devoff = 0;
+               break;
        }
+
+       if (devoff != 0) {
+               pcibase = nlm_pcicfg_base(devoff);
+               irt = nlm_read_reg(pcibase, XLP_PCI_IRTINFO_REG) & 0xffff;
+               /* HW bug, I2C 1 irt entry is off by one */
+               if (irq == PIC_I2C_1_IRQ)
+                       irt = irt + 1;
+       } else if (irq >= PIC_PCIE_LINK_0_IRQ && irq <= PIC_PCIE_LINK_3_IRQ) {
+               /* HW bug, PCI IRT entries are bad on early silicon, fix */
+               irt = PIC_IRT_PCIE_LINK_INDEX(irq - PIC_PCIE_LINK_0_IRQ);
+       } else {
+               irt = -1;
+       }
+       return irt;
 }
 
 unsigned int nlm_get_core_frequency(int node, int core)
index 4894d62043ac3639ce7844069995466ced103ea0..af319143b59187a4918805f737e2dcdb480845e7 100644 (file)
@@ -56,7 +56,7 @@ uint64_t nlm_io_base;
 struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
 cpumask_t nlm_cpumask = CPU_MASK_CPU0;
 unsigned int nlm_threads_per_core;
-extern u32 __dtb_start[];
+extern u32 __dtb_xlp_evp_begin[], __dtb_xlp_svp_begin[], __dtb_start[];
 
 static void nlm_linux_exit(void)
 {
@@ -82,8 +82,24 @@ void __init plat_mem_setup(void)
         * 64-bit, so convert pointer.
         */
        fdtp = (void *)(long)fw_arg0;
-       if (!fdtp)
-               fdtp = __dtb_start;
+       if (!fdtp) {
+               switch (current_cpu_data.processor_id & 0xff00) {
+#ifdef CONFIG_DT_XLP_SVP
+               case PRID_IMP_NETLOGIC_XLP3XX:
+                       fdtp = __dtb_xlp_svp_begin;
+                       break;
+#endif
+#ifdef CONFIG_DT_XLP_EVP
+               case PRID_IMP_NETLOGIC_XLP8XX:
+                       fdtp = __dtb_xlp_evp_begin;
+                       break;
+#endif
+               default:
+                       /* Pick a built-in if any, and hope for the best */
+                       fdtp = __dtb_start;
+                       break;
+               }
+       }
        fdtp = phys_to_virt(__pa(fdtp));
        early_init_devtree(fdtp);
 }
index 1d0b66c62fd1beed8f4f601a380170bcb16c17df..9c401dd78337384d1d95050c26ad3b38935d3f50 100644 (file)
 #include <asm/netlogic/haldefs.h>
 #include <asm/netlogic/xlp-hal/iomap.h>
 #include <asm/netlogic/xlp-hal/xlp.h>
-#include <asm/netlogic/xlp-hal/usb.h>
+
+/*
+ * USB glue logic registers, used only during initialization
+ */
+#define USB_CTL_0                      0x01
+#define USB_PHY_0                      0x0A
+#define USB_PHY_RESET                  0x01
+#define USB_PHY_PORT_RESET_0           0x10
+#define USB_PHY_PORT_RESET_1           0x20
+#define USB_CONTROLLER_RESET           0x01
+#define USB_INT_STATUS                 0x0E
+#define USB_INT_EN                     0x0F
+#define USB_PHY_INTERRUPT_EN           0x01
+#define USB_OHCI_INTERRUPT_EN          0x02
+#define USB_OHCI_INTERRUPT1_EN         0x04
+#define USB_OHCI_INTERRUPT2_EN         0x08
+#define USB_CTRL_INTERRUPT_EN          0x10
+
+#define nlm_read_usb_reg(b, r)                 nlm_read_reg(b, r)
+#define nlm_write_usb_reg(b, r, v)             nlm_write_reg(b, r, v)
+#define nlm_get_usb_pcibase(node, inst)                \
+       nlm_pcicfg_base(XLP_IO_USB_OFFSET(node, inst))
+#define nlm_get_usb_regbase(node, inst)                \
+       (nlm_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
 
 static void nlm_usb_intr_en(int node, int port)
 {
@@ -99,23 +122,23 @@ static void nlm_usb_fixup_final(struct pci_dev *dev)
        dev->dev.coherent_dma_mask      = DMA_BIT_MASK(64);
        switch (dev->devfn) {
        case 0x10:
-              dev->irq = PIC_EHCI_0_IRQ;
-              break;
+               dev->irq = PIC_EHCI_0_IRQ;
+               break;
        case 0x11:
-              dev->irq = PIC_OHCI_0_IRQ;
-              break;
+               dev->irq = PIC_OHCI_0_IRQ;
+               break;
        case 0x12:
-              dev->irq = PIC_OHCI_1_IRQ;
-              break;
+               dev->irq = PIC_OHCI_1_IRQ;
+               break;
        case 0x13:
-              dev->irq = PIC_EHCI_1_IRQ;
-              break;
+               dev->irq = PIC_EHCI_1_IRQ;
+               break;
        case 0x14:
-              dev->irq = PIC_OHCI_2_IRQ;
-              break;
+               dev->irq = PIC_OHCI_2_IRQ;
+               break;
        case 0x15:
-              dev->irq = PIC_OHCI_3_IRQ;
-              break;
+               dev->irq = PIC_OHCI_3_IRQ;
+               break;
        }
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_EHCI,
index 1fd361462c030b7fefd06c676f54129ff4fe63f6..e4b1140cdae060dca0de8bfdd6a5985b1429de58 100644 (file)
@@ -41,7 +41,7 @@ static int (*save_perf_irq)(void);
  * first hardware thread in the core for setup and init.
  * Skip CPUs with non-zero hardware thread id (4 hwt per core)
  */
-#ifdef CONFIG_CPU_XLR
+#if defined(CONFIG_CPU_XLR) && defined(CONFIG_SMP)
 #define oprofile_skip_cpu(c)   ((cpu_logical_map(c) & 0x3) != 0)
 #else
 #define oprofile_skip_cpu(c)   0
index 412ec025cf55643d528ca28116669a519f8ee182..18517dd0f7090987fe182df3d2a4dbe4e62842cf 100644 (file)
@@ -366,9 +366,9 @@ static int ar71xx_pci_probe(struct platform_device *pdev)
        if (!res)
                return -EINVAL;
 
-       apc->cfg_base = devm_request_and_ioremap(&pdev->dev, res);
-       if (!apc->cfg_base)
-               return -ENOMEM;
+       apc->cfg_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(apc->cfg_base))
+               return PTR_ERR(apc->cfg_base);
 
        apc->irq = platform_get_irq(pdev, 0);
        if (apc->irq < 0)
index 8a0700d448fe45c65cb9063e37e7453dc1e05931..65ec032fa0b442367c93b70cf8599fb5bd03fdd1 100644 (file)
@@ -365,25 +365,25 @@ static int ar724x_pci_probe(struct platform_device *pdev)
        if (!res)
                return -EINVAL;
 
-       apc->ctrl_base = devm_request_and_ioremap(&pdev->dev, res);
-       if (apc->ctrl_base == NULL)
-               return -EBUSY;
+       apc->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(apc->ctrl_base))
+               return PTR_ERR(apc->ctrl_base);
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base");
        if (!res)
                return -EINVAL;
 
-       apc->devcfg_base = devm_request_and_ioremap(&pdev->dev, res);
-       if (!apc->devcfg_base)
-               return -EBUSY;
+       apc->devcfg_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(apc->devcfg_base))
+               return PTR_ERR(apc->devcfg_base);
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crp_base");
        if (!res)
                return -EINVAL;
 
-       apc->crp_base = devm_request_and_ioremap(&pdev->dev, res);
-       if (apc->crp_base == NULL)
-               return -EBUSY;
+       apc->crp_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(apc->crp_base))
+               return PTR_ERR(apc->crp_base);
 
        apc->irq = platform_get_irq(pdev, 0);
        if (apc->irq < 0)
index 88e781c6b5bab274b53f76c55b7f51c2356b1bc6..2eb954239bc5bbc3b1edac6325be44f287d76235 100644 (file)
@@ -121,11 +121,17 @@ void __iomem *pci_iospace_start;
 static void __init bcm63xx_reset_pcie(void)
 {
        u32 val;
+       u32 reg;
 
        /* enable SERDES */
-       val = bcm_misc_readl(MISC_SERDES_CTRL_REG);
+       if (BCMCPU_IS_6328())
+               reg = MISC_SERDES_CTRL_6328_REG;
+       else
+               reg = MISC_SERDES_CTRL_6362_REG;
+
+       val = bcm_misc_readl(reg);
        val |= SERDES_PCIE_EN | SERDES_PCIE_EXD_EN;
-       bcm_misc_writel(val, MISC_SERDES_CTRL_REG);
+       bcm_misc_writel(val, reg);
 
        /* reset the PCIe core */
        bcm63xx_core_set_reset(BCM63XX_RESET_PCIE, 1);
@@ -330,6 +336,7 @@ static int __init bcm63xx_pci_init(void)
 
        switch (bcm63xx_get_cpu_id()) {
        case BCM6328_CPU_ID:
+       case BCM6362_CPU_ID:
                return bcm63xx_register_pcie();
        case BCM6348_CPU_ID:
        case BCM6358_CPU_ID:
index 5bd9d8f468cc3582ab02114ff272b833d8893565..a01baff52cae0ab1ad652574843049c6cf640097 100644 (file)
 #include <asm/cacheflush.h>
 #include <asm/traps.h>
 
-#include <asm/mips-boards/prom.h>
 #include <asm/mips-boards/generic.h>
 #include <asm/mach-powertv/asic.h>
 
+#include "init.h"
+
 static int *_prom_envp;
 unsigned long _prom_memsize;
 
index b194c34ca9660eda535d77cc758f79868cdbbc82..c1a8bd0dbe4b38984958f4fbb498e392b0cda8eb 100644 (file)
@@ -23,4 +23,6 @@
 #ifndef _POWERTV_INIT_H
 #define _POWERTV_INIT_H
 extern unsigned long _prom_memsize;
+extern void prom_meminit(void);
+extern char *prom_getenv(char *name);
 #endif
index 6e5f1bdc59b5fa77215803d8e919b55afe3c081d..bc2f3ca22b413a27ed8f245495d5c8a38374d8d2 100644 (file)
@@ -29,7 +29,6 @@
 #include <asm/page.h>
 #include <asm/sections.h>
 
-#include <asm/mips-boards/prom.h>
 #include <asm/mach-powertv/asic.h>
 #include <asm/mach-powertv/ioremap.h>
 
index 820b8480f2223850f56169e6570e50fed0ae8de3..24689bff1039fe522e50e6d70c0eb0ab725e7849 100644 (file)
@@ -31,7 +31,6 @@
 #include <asm/bootinfo.h>
 #include <asm/irq.h>
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
 #include <asm/dma.h>
 #include <asm/asm.h>
 #include <asm/traps.h>
index a0b0197cab0aa205cdebbbb9e31fe6381da5aa07..026e823d871d34e8952ba22b064e59cd4b778fda 100644 (file)
@@ -6,12 +6,23 @@ choice
        help
          Select Ralink MIPS SoC type.
 
+       config SOC_RT288X
+               bool "RT288x"
+
        config SOC_RT305X
                bool "RT305x"
                select USB_ARCH_HAS_HCD
                select USB_ARCH_HAS_OHCI
                select USB_ARCH_HAS_EHCI
 
+       config SOC_RT3883
+               bool "RT3883"
+               select USB_ARCH_HAS_OHCI
+               select USB_ARCH_HAS_EHCI
+
+       config SOC_MT7620
+               bool "MT7620"
+
 endchoice
 
 choice
@@ -23,10 +34,22 @@ choice
        config DTB_RT_NONE
                bool "None"
 
+       config DTB_RT2880_EVAL
+               bool "RT2880 eval kit"
+               depends on SOC_RT288X
+
        config DTB_RT305X_EVAL
                bool "RT305x eval kit"
                depends on SOC_RT305X
 
+       config DTB_RT3883_EVAL
+               bool "RT3883 eval kit"
+               depends on SOC_RT3883
+
+       config DTB_MT7620A_EVAL
+               bool "MT7620A eval kit"
+               depends on SOC_MT7620
+
 endchoice
 
 endif
index 939757f0e71f61024bdc32d50102af1eb50c9c65..38cf1a880aaac21ca6b0e38b3561e1832aa5e4c3 100644 (file)
@@ -8,7 +8,10 @@
 
 obj-y := prom.o of.o reset.o clk.o irq.o
 
+obj-$(CONFIG_SOC_RT288X) += rt288x.o
 obj-$(CONFIG_SOC_RT305X) += rt305x.o
+obj-$(CONFIG_SOC_RT3883) += rt3883.o
+obj-$(CONFIG_SOC_MT7620) += mt7620.o
 
 obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
 
index 6babd65765e608a6d49a51a50a627da0938e23b8..cda4b6645c50587da40f9ad9ffa0357d27fe74ab 100644 (file)
@@ -4,7 +4,25 @@
 core-$(CONFIG_RALINK)          += arch/mips/ralink/
 cflags-$(CONFIG_RALINK)                += -I$(srctree)/arch/mips/include/asm/mach-ralink
 
+#
+# Ralink RT288x
+#
+load-$(CONFIG_SOC_RT288X)      += 0xffffffff88000000
+cflags-$(CONFIG_SOC_RT288X)    += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt288x
+
 #
 # Ralink RT305x
 #
 load-$(CONFIG_SOC_RT305X)      += 0xffffffff80000000
+cflags-$(CONFIG_SOC_RT305X)    += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt305x
+
+#
+# Ralink RT3883
+#
+load-$(CONFIG_SOC_RT3883)      += 0xffffffff80000000
+cflags-$(CONFIG_SOC_RT3883)    += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt3883
+
+#
+# Ralink MT7620
+#
+load-$(CONFIG_SOC_MT7620)      += 0xffffffff80000000
index 300990313e1b3acdd85486c1a8b743c4f8e4b653..83144c3fc5acc32c0b619b7b7f4ac9949a4e4e1e 100644 (file)
@@ -22,13 +22,22 @@ struct ralink_pinmux {
        struct ralink_pinmux_grp *mode;
        struct ralink_pinmux_grp *uart;
        int uart_shift;
+       u32 uart_mask;
        void (*wdt_reset)(void);
+       struct ralink_pinmux_grp *pci;
+       int pci_shift;
+       u32 pci_mask;
 };
-extern struct ralink_pinmux gpio_pinmux;
+extern struct ralink_pinmux rt_gpio_pinmux;
 
 struct ralink_soc_info {
        unsigned char sys_type[RAMIPS_SYS_TYPE_LEN];
        unsigned char *compatible;
+
+       unsigned long mem_base;
+       unsigned long mem_size;
+       unsigned long mem_size_min;
+       unsigned long mem_size_max;
 };
 extern struct ralink_soc_info soc_info;
 
index 1a69fb300955391b30f56e64e93850769774d1aa..18194fa93e8065f0916c90ce706ae2cb4e072ae4 100644 (file)
@@ -1 +1,4 @@
+obj-$(CONFIG_DTB_RT2880_EVAL) := rt2880_eval.dtb.o
 obj-$(CONFIG_DTB_RT305X_EVAL) := rt3052_eval.dtb.o
+obj-$(CONFIG_DTB_RT3883_EVAL) := rt3883_eval.dtb.o
+obj-$(CONFIG_DTB_MT7620A_EVAL) := mt7620a_eval.dtb.o
diff --git a/arch/mips/ralink/dts/mt7620a.dtsi b/arch/mips/ralink/dts/mt7620a.dtsi
new file mode 100644 (file)
index 0000000..08bf24f
--- /dev/null
@@ -0,0 +1,58 @@
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+       compatible = "ralink,mtk7620a-soc";
+
+       cpus {
+               cpu@0 {
+                       compatible = "mips,mips24KEc";
+               };
+       };
+
+       cpuintc: cpuintc@0 {
+               #address-cells = <0>;
+               #interrupt-cells = <1>;
+               interrupt-controller;
+               compatible = "mti,cpu-interrupt-controller";
+       };
+
+       palmbus@10000000 {
+               compatible = "palmbus";
+               reg = <0x10000000 0x200000>;
+                ranges = <0x0 0x10000000 0x1FFFFF>;
+
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               sysc@0 {
+                       compatible = "ralink,mt7620a-sysc";
+                       reg = <0x0 0x100>;
+               };
+
+               intc: intc@200 {
+                       compatible = "ralink,mt7620a-intc", "ralink,rt2880-intc";
+                       reg = <0x200 0x100>;
+
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+
+                       interrupt-parent = <&cpuintc>;
+                       interrupts = <2>;
+               };
+
+               memc@300 {
+                       compatible = "ralink,mt7620a-memc", "ralink,rt3050-memc";
+                       reg = <0x300 0x100>;
+               };
+
+               uartlite@c00 {
+                       compatible = "ralink,mt7620a-uart", "ralink,rt2880-uart", "ns16550a";
+                       reg = <0xc00 0x100>;
+
+                       interrupt-parent = <&intc>;
+                       interrupts = <12>;
+
+                       reg-shift = <2>;
+               };
+       };
+};
diff --git a/arch/mips/ralink/dts/mt7620a_eval.dts b/arch/mips/ralink/dts/mt7620a_eval.dts
new file mode 100644 (file)
index 0000000..35eb874
--- /dev/null
@@ -0,0 +1,16 @@
+/dts-v1/;
+
+/include/ "mt7620a.dtsi"
+
+/ {
+       compatible = "ralink,mt7620a-eval-board", "ralink,mt7620a-soc";
+       model = "Ralink MT7620A evaluation board";
+
+       memory@0 {
+               reg = <0x0 0x2000000>;
+       };
+
+       chosen {
+               bootargs = "console=ttyS0,57600";
+       };
+};
diff --git a/arch/mips/ralink/dts/rt2880.dtsi b/arch/mips/ralink/dts/rt2880.dtsi
new file mode 100644 (file)
index 0000000..182afde
--- /dev/null
@@ -0,0 +1,58 @@
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+       compatible = "ralink,rt2880-soc";
+
+       cpus {
+               cpu@0 {
+                       compatible = "mips,mips4KEc";
+               };
+       };
+
+       cpuintc: cpuintc@0 {
+               #address-cells = <0>;
+               #interrupt-cells = <1>;
+               interrupt-controller;
+               compatible = "mti,cpu-interrupt-controller";
+       };
+
+       palmbus@300000 {
+               compatible = "palmbus";
+               reg = <0x300000 0x200000>;
+                ranges = <0x0 0x300000 0x1FFFFF>;
+
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               sysc@0 {
+                       compatible = "ralink,rt2880-sysc";
+                       reg = <0x0 0x100>;
+               };
+
+               intc: intc@200 {
+                       compatible = "ralink,rt2880-intc";
+                       reg = <0x200 0x100>;
+
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+
+                       interrupt-parent = <&cpuintc>;
+                       interrupts = <2>;
+               };
+
+               memc@300 {
+                       compatible = "ralink,rt2880-memc";
+                       reg = <0x300 0x100>;
+               };
+
+               uartlite@c00 {
+                       compatible = "ralink,rt2880-uart", "ns16550a";
+                       reg = <0xc00 0x100>;
+
+                       interrupt-parent = <&intc>;
+                       interrupts = <8>;
+
+                       reg-shift = <2>;
+               };
+       };
+};
diff --git a/arch/mips/ralink/dts/rt2880_eval.dts b/arch/mips/ralink/dts/rt2880_eval.dts
new file mode 100644 (file)
index 0000000..322d700
--- /dev/null
@@ -0,0 +1,46 @@
+/dts-v1/;
+
+/include/ "rt2880.dtsi"
+
+/ {
+       compatible = "ralink,rt2880-eval-board", "ralink,rt2880-soc";
+       model = "Ralink RT2880 evaluation board";
+
+       memory@0 {
+               reg = <0x8000000 0x2000000>;
+       };
+
+       chosen {
+               bootargs = "console=ttyS0,57600";
+       };
+
+       cfi@1f000000 {
+               compatible = "cfi-flash";
+               reg = <0x1f000000 0x400000>;
+
+               bank-width = <2>;
+               device-width = <2>;
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               partition@0 {
+                       label = "uboot";
+                       reg = <0x0 0x30000>;
+                       read-only;
+               };
+               partition@30000 {
+                       label = "uboot-env";
+                       reg = <0x30000 0x10000>;
+                       read-only;
+               };
+               partition@40000 {
+                       label = "calibration";
+                       reg = <0x40000 0x10000>;
+                       read-only;
+               };
+               partition@50000 {
+                       label = "linux";
+                       reg = <0x50000 0x3b0000>;
+               };
+       };
+};
index 069d0660e1ddaf844922ba6dffb7e7a838f7284d..ef7da1e227e61309c990548fa6ed93b259fe0040 100644 (file)
@@ -1,7 +1,7 @@
 / {
        #address-cells = <1>;
        #size-cells = <1>;
-       compatible = "ralink,rt3050-soc", "ralink,rt3052-soc";
+       compatible = "ralink,rt3050-soc", "ralink,rt3052-soc", "ralink,rt3350-soc";
 
        cpus {
                cpu@0 {
@@ -9,10 +9,6 @@
                };
        };
 
-       chosen {
-               bootargs = "console=ttyS0,57600 init=/init";
-       };
-
        cpuintc: cpuintc@0 {
                #address-cells = <0>;
                #interrupt-cells = <1>;
@@ -23,7 +19,7 @@
        palmbus@10000000 {
                compatible = "palmbus";
                reg = <0x10000000 0x200000>;
-                ranges = <0x0 0x10000000 0x1FFFFF>;
+               ranges = <0x0 0x10000000 0x1FFFFF>;
 
                #address-cells = <1>;
                #size-cells = <1>;
                        reg = <0x0 0x100>;
                };
 
-               timer@100 {
-                       compatible = "ralink,rt3052-wdt", "ralink,rt2880-wdt";
-                       reg = <0x100 0x100>;
-               };
-
                intc: intc@200 {
                        compatible = "ralink,rt3052-intc", "ralink,rt2880-intc";
                        reg = <0x200 0x100>;
                        reg = <0x300 0x100>;
                };
 
-               gpio0: gpio@600 {
-                       compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
-                       reg = <0x600 0x34>;
-
-                       gpio-controller;
-                       #gpio-cells = <2>;
-
-                       ralink,ngpio = <24>;
-                       ralink,regs = [ 00 04 08 0c
-                                       20 24 28 2c
-                                       30 34 ];
-               };
-
-               gpio1: gpio@638 {
-                       compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
-                       reg = <0x638 0x24>;
-
-                       gpio-controller;
-                       #gpio-cells = <2>;
-
-                       ralink,ngpio = <16>;
-                       ralink,regs = [ 00 04 08 0c
-                                       10 14 18 1c
-                                       20 24 ];
-               };
-
-               gpio2: gpio@660 {
-                       compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
-                       reg = <0x660 0x24>;
-
-                       gpio-controller;
-                       #gpio-cells = <2>;
-
-                       ralink,ngpio = <12>;
-                       ralink,regs = [ 00 04 08 0c
-                                       10 14 18 1c
-                                       20 24 ];
-               };
-
                uartlite@c00 {
                        compatible = "ralink,rt3052-uart", "ralink,rt2880-uart", "ns16550a";
                        reg = <0xc00 0x100>;
index 148a590bc4194784c369acef68c76b73d9c2240d..c18c9a84f4c4ee5794088f133332d3a391b834b7 100644 (file)
@@ -1,10 +1,8 @@
 /dts-v1/;
 
-/include/ "rt3050.dtsi"
+#include "rt3050.dtsi"
 
 / {
-       #address-cells = <1>;
-       #size-cells = <1>;
        compatible = "ralink,rt3052-eval-board", "ralink,rt3052-soc";
        model = "Ralink RT3052 evaluation board";
 
                reg = <0x0 0x2000000>;
        };
 
-       palmbus@10000000 {
-               sysc@0 {
-                       ralink,pinmmux = "uartlite", "spi";
-                       ralink,uartmux = "gpio";
-                       ralink,wdtmux = <0>;
-               };
+       chosen {
+               bootargs = "console=ttyS0,57600";
        };
 
        cfi@1f000000 {
diff --git a/arch/mips/ralink/dts/rt3883.dtsi b/arch/mips/ralink/dts/rt3883.dtsi
new file mode 100644 (file)
index 0000000..3b131dd
--- /dev/null
@@ -0,0 +1,58 @@
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+       compatible = "ralink,rt3883-soc";
+
+       cpus {
+               cpu@0 {
+                       compatible = "mips,mips74Kc";
+               };
+       };
+
+       cpuintc: cpuintc@0 {
+               #address-cells = <0>;
+               #interrupt-cells = <1>;
+               interrupt-controller;
+               compatible = "mti,cpu-interrupt-controller";
+       };
+
+       palmbus@10000000 {
+               compatible = "palmbus";
+               reg = <0x10000000 0x200000>;
+               ranges = <0x0 0x10000000 0x1FFFFF>;
+
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               sysc@0 {
+                       compatible = "ralink,rt3883-sysc", "ralink,rt3050-sysc";
+                       reg = <0x0 0x100>;
+               };
+
+               intc: intc@200 {
+                       compatible = "ralink,rt3883-intc", "ralink,rt2880-intc";
+                       reg = <0x200 0x100>;
+
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+
+                       interrupt-parent = <&cpuintc>;
+                       interrupts = <2>;
+               };
+
+               memc@300 {
+                       compatible = "ralink,rt3883-memc", "ralink,rt3050-memc";
+                       reg = <0x300 0x100>;
+               };
+
+               uartlite@c00 {
+                       compatible = "ralink,rt3883-uart", "ralink,rt2880-uart", "ns16550a";
+                       reg = <0xc00 0x100>;
+
+                       interrupt-parent = <&intc>;
+                       interrupts = <12>;
+
+                       reg-shift = <2>;
+               };
+       };
+};
diff --git a/arch/mips/ralink/dts/rt3883_eval.dts b/arch/mips/ralink/dts/rt3883_eval.dts
new file mode 100644 (file)
index 0000000..2fa6b33
--- /dev/null
@@ -0,0 +1,16 @@
+/dts-v1/;
+
+/include/ "rt3883.dtsi"
+
+/ {
+       compatible = "ralink,rt3883-eval-board", "ralink,rt3883-soc";
+       model = "Ralink RT3883 evaluation board";
+
+       memory@0 {
+               reg = <0x0 0x2000000>;
+       };
+
+       chosen {
+               bootargs = "console=ttyS0,57600";
+       };
+};
index c4ae47eb24abba22390902acd90e8f0430325162..b46d0419d09b52a3864e7f3f1bc0e2767ed97d4f 100644 (file)
 
 #include <asm/addrspace.h>
 
+#ifdef CONFIG_SOC_RT288X
+#define EARLY_UART_BASE         0x300c00
+#else
 #define EARLY_UART_BASE         0x10000c00
+#endif
 
 #define UART_REG_RX             0x00
 #define UART_REG_TX             0x04
index 6d054c5ec9ab03bf83950d152fdc173dbd0d8b56..320b1f1043fff108854ac7bab56ae1cf37e5699f 100644 (file)
@@ -31,6 +31,7 @@
 #define INTC_INT_GLOBAL                BIT(31)
 
 #define RALINK_CPU_IRQ_INTC    (MIPS_CPU_IRQ_BASE + 2)
+#define RALINK_CPU_IRQ_PCI     (MIPS_CPU_IRQ_BASE + 4)
 #define RALINK_CPU_IRQ_FE      (MIPS_CPU_IRQ_BASE + 5)
 #define RALINK_CPU_IRQ_WIFI    (MIPS_CPU_IRQ_BASE + 6)
 #define RALINK_CPU_IRQ_COUNTER (MIPS_CPU_IRQ_BASE + 7)
@@ -104,6 +105,9 @@ asmlinkage void plat_irq_dispatch(void)
        else if (pending & STATUSF_IP6)
                do_IRQ(RALINK_CPU_IRQ_WIFI);
 
+       else if (pending & STATUSF_IP4)
+               do_IRQ(RALINK_CPU_IRQ_PCI);
+
        else if (pending & STATUSF_IP2)
                do_IRQ(RALINK_CPU_IRQ_INTC);
 
@@ -162,6 +166,7 @@ static int __init intc_of_init(struct device_node *node,
        irq_set_chained_handler(irq, ralink_intc_irq_handler);
        irq_set_handler_data(irq, domain);
 
+       /* tell the kernel which irq is used for performance monitoring */
        cp0_perfcount_irq = irq_create_mapping(domain, 9);
 
        return 0;
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
new file mode 100644 (file)
index 0000000..0018b1a
--- /dev/null
@@ -0,0 +1,234 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/mt7620.h>
+
+#include "common.h"
+
+/* does the board have sdram or ddram */
+static int dram_type;
+
+/* the pll dividers */
+static u32 mt7620_clk_divider[] = { 2, 3, 4, 8 };
+
+static struct ralink_pinmux_grp mode_mux[] = {
+       {
+               .name = "i2c",
+               .mask = MT7620_GPIO_MODE_I2C,
+               .gpio_first = 1,
+               .gpio_last = 2,
+       }, {
+               .name = "spi",
+               .mask = MT7620_GPIO_MODE_SPI,
+               .gpio_first = 3,
+               .gpio_last = 6,
+       }, {
+               .name = "uartlite",
+               .mask = MT7620_GPIO_MODE_UART1,
+               .gpio_first = 15,
+               .gpio_last = 16,
+       }, {
+               .name = "wdt",
+               .mask = MT7620_GPIO_MODE_WDT,
+               .gpio_first = 17,
+               .gpio_last = 17,
+       }, {
+               .name = "mdio",
+               .mask = MT7620_GPIO_MODE_MDIO,
+               .gpio_first = 22,
+               .gpio_last = 23,
+       }, {
+               .name = "rgmii1",
+               .mask = MT7620_GPIO_MODE_RGMII1,
+               .gpio_first = 24,
+               .gpio_last = 35,
+       }, {
+               .name = "spi refclk",
+               .mask = MT7620_GPIO_MODE_SPI_REF_CLK,
+               .gpio_first = 37,
+               .gpio_last = 39,
+       }, {
+               .name = "jtag",
+               .mask = MT7620_GPIO_MODE_JTAG,
+               .gpio_first = 40,
+               .gpio_last = 44,
+       }, {
+               /* shared lines with jtag */
+               .name = "ephy",
+               .mask = MT7620_GPIO_MODE_EPHY,
+               .gpio_first = 40,
+               .gpio_last = 44,
+       }, {
+               .name = "nand",
+               .mask = MT7620_GPIO_MODE_JTAG,
+               .gpio_first = 45,
+               .gpio_last = 59,
+       }, {
+               .name = "rgmii2",
+               .mask = MT7620_GPIO_MODE_RGMII2,
+               .gpio_first = 60,
+               .gpio_last = 71,
+       }, {
+               .name = "wled",
+               .mask = MT7620_GPIO_MODE_WLED,
+               .gpio_first = 72,
+               .gpio_last = 72,
+       }, {0}
+};
+
+static struct ralink_pinmux_grp uart_mux[] = {
+       {
+               .name = "uartf",
+               .mask = MT7620_GPIO_MODE_UARTF,
+               .gpio_first = 7,
+               .gpio_last = 14,
+       }, {
+               .name = "pcm uartf",
+               .mask = MT7620_GPIO_MODE_PCM_UARTF,
+               .gpio_first = 7,
+               .gpio_last = 14,
+       }, {
+               .name = "pcm i2s",
+               .mask = MT7620_GPIO_MODE_PCM_I2S,
+               .gpio_first = 7,
+               .gpio_last = 14,
+       }, {
+               .name = "i2s uartf",
+               .mask = MT7620_GPIO_MODE_I2S_UARTF,
+               .gpio_first = 7,
+               .gpio_last = 14,
+       }, {
+               .name = "pcm gpio",
+               .mask = MT7620_GPIO_MODE_PCM_GPIO,
+               .gpio_first = 11,
+               .gpio_last = 14,
+       }, {
+               .name = "gpio uartf",
+               .mask = MT7620_GPIO_MODE_GPIO_UARTF,
+               .gpio_first = 7,
+               .gpio_last = 10,
+       }, {
+               .name = "gpio i2s",
+               .mask = MT7620_GPIO_MODE_GPIO_I2S,
+               .gpio_first = 7,
+               .gpio_last = 10,
+       }, {
+               .name = "gpio",
+               .mask = MT7620_GPIO_MODE_GPIO,
+       }, {0}
+};
+
+struct ralink_pinmux rt_gpio_pinmux = {
+       .mode = mode_mux,
+       .uart = uart_mux,
+       .uart_shift = MT7620_GPIO_MODE_UART0_SHIFT,
+       .uart_mask = MT7620_GPIO_MODE_UART0_MASK,
+};
+
+void __init ralink_clk_init(void)
+{
+       unsigned long cpu_rate, sys_rate;
+       u32 c0 = rt_sysc_r32(SYSC_REG_CPLL_CONFIG0);
+       u32 c1 = rt_sysc_r32(SYSC_REG_CPLL_CONFIG1);
+       u32 swconfig = (c0 >> CPLL_SW_CONFIG_SHIFT) & CPLL_SW_CONFIG_MASK;
+       u32 cpu_clk = (c1 >> CPLL_CPU_CLK_SHIFT) & CPLL_CPU_CLK_MASK;
+
+       if (cpu_clk) {
+               cpu_rate = 480000000;
+       } else if (!swconfig) {
+               cpu_rate = 600000000;
+       } else {
+               u32 m = (c0 >> CPLL_MULT_RATIO_SHIFT) & CPLL_MULT_RATIO;
+               u32 d = (c0 >> CPLL_DIV_RATIO_SHIFT) & CPLL_DIV_RATIO;
+
+               cpu_rate = ((40 * (m + 24)) / mt7620_clk_divider[d]) * 1000000;
+       }
+
+       if (dram_type == SYSCFG0_DRAM_TYPE_SDRAM)
+               sys_rate = cpu_rate / 4;
+       else
+               sys_rate = cpu_rate / 3;
+
+       ralink_clk_add("cpu", cpu_rate);
+       ralink_clk_add("10000100.timer", 40000000);
+       ralink_clk_add("10000500.uart", 40000000);
+       ralink_clk_add("10000c00.uartlite", 40000000);
+}
+
+void __init ralink_of_remap(void)
+{
+       rt_sysc_membase = plat_of_remap_node("ralink,mt7620a-sysc");
+       rt_memc_membase = plat_of_remap_node("ralink,mt7620a-memc");
+
+       if (!rt_sysc_membase || !rt_memc_membase)
+               panic("Failed to remap core resources");
+}
+
+void prom_soc_init(struct ralink_soc_info *soc_info)
+{
+       void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7620_SYSC_BASE);
+       unsigned char *name = NULL;
+       u32 n0;
+       u32 n1;
+       u32 rev;
+       u32 cfg0;
+
+       n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
+       n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
+
+       if (n0 == MT7620N_CHIP_NAME0 && n1 == MT7620N_CHIP_NAME1) {
+               name = "MT7620N";
+               soc_info->compatible = "ralink,mt7620n-soc";
+       } else if (n0 == MT7620A_CHIP_NAME0 && n1 == MT7620A_CHIP_NAME1) {
+               name = "MT7620A";
+               soc_info->compatible = "ralink,mt7620a-soc";
+       } else {
+               panic("mt7620: unknown SoC, n0:%08x n1:%08x\n", n0, n1);
+       }
+
+       rev = __raw_readl(sysc + SYSC_REG_CHIP_REV);
+
+       snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+               "Ralink %s ver:%u eco:%u",
+               name,
+               (rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK,
+               (rev & CHIP_REV_ECO_MASK));
+
+       cfg0 = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG0);
+       dram_type = (cfg0 >> SYSCFG0_DRAM_TYPE_SHIFT) & SYSCFG0_DRAM_TYPE_MASK;
+
+       switch (dram_type) {
+       case SYSCFG0_DRAM_TYPE_SDRAM:
+               soc_info->mem_size_min = MT7620_SDRAM_SIZE_MIN;
+               soc_info->mem_size_max = MT7620_SDRAM_SIZE_MAX;
+               break;
+
+       case SYSCFG0_DRAM_TYPE_DDR1:
+               soc_info->mem_size_min = MT7620_DDR1_SIZE_MIN;
+               soc_info->mem_size_max = MT7620_DDR1_SIZE_MAX;
+               break;
+
+       case SYSCFG0_DRAM_TYPE_DDR2:
+               soc_info->mem_size_min = MT7620_DDR2_SIZE_MIN;
+               soc_info->mem_size_max = MT7620_DDR2_SIZE_MAX;
+               break;
+       default:
+               BUG();
+       }
+       soc_info->mem_base = MT7620_DRAM_BASE;
+}
index 4165e70775be52dbf5df31d344204aa1b5ba40de..fb1569580def7c25a03a7f1348315c4e93e92e3c 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/io.h>
 #include <linux/clk.h>
 #include <linux/init.h>
+#include <linux/sizes.h>
 #include <linux/of_fdt.h>
 #include <linux/kernel.h>
 #include <linux/bootmem.h>
@@ -85,6 +86,14 @@ void __init plat_mem_setup(void)
         * parsed resulting in our memory appearing
         */
        __dt_setup_arch(&__dtb_start);
+
+       if (soc_info.mem_size)
+               add_memory_region(soc_info.mem_base, soc_info.mem_size,
+                                 BOOT_MEM_RAM);
+       else
+               detect_memory_region(soc_info.mem_base,
+                                    soc_info.mem_size_min * SZ_1M,
+                                    soc_info.mem_size_max * SZ_1M);
 }
 
 static int __init plat_of_setup(void)
diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c
new file mode 100644 (file)
index 0000000..f87de1a
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/rt288x.h>
+
+#include "common.h"
+
+static struct ralink_pinmux_grp mode_mux[] = {
+       {
+               .name = "i2c",
+               .mask = RT2880_GPIO_MODE_I2C,
+               .gpio_first = 1,
+               .gpio_last = 2,
+       }, {
+               .name = "spi",
+               .mask = RT2880_GPIO_MODE_SPI,
+               .gpio_first = 3,
+               .gpio_last = 6,
+       }, {
+               .name = "uartlite",
+               .mask = RT2880_GPIO_MODE_UART0,
+               .gpio_first = 7,
+               .gpio_last = 14,
+       }, {
+               .name = "jtag",
+               .mask = RT2880_GPIO_MODE_JTAG,
+               .gpio_first = 17,
+               .gpio_last = 21,
+       }, {
+               .name = "mdio",
+               .mask = RT2880_GPIO_MODE_MDIO,
+               .gpio_first = 22,
+               .gpio_last = 23,
+       }, {
+               .name = "sdram",
+               .mask = RT2880_GPIO_MODE_SDRAM,
+               .gpio_first = 24,
+               .gpio_last = 39,
+       }, {
+               .name = "pci",
+               .mask = RT2880_GPIO_MODE_PCI,
+               .gpio_first = 40,
+               .gpio_last = 71,
+       }, {0}
+};
+
+static void rt288x_wdt_reset(void)
+{
+       u32 t;
+
+       /* enable WDT reset output on pin SRAM_CS_N */
+       t = rt_sysc_r32(SYSC_REG_CLKCFG);
+       t |= CLKCFG_SRAM_CS_N_WDT;
+       rt_sysc_w32(t, SYSC_REG_CLKCFG);
+}
+
+struct ralink_pinmux rt_gpio_pinmux = {
+       .mode = mode_mux,
+       .wdt_reset = rt288x_wdt_reset,
+};
+
+void __init ralink_clk_init(void)
+{
+       unsigned long cpu_rate;
+       u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
+       t = ((t >> SYSTEM_CONFIG_CPUCLK_SHIFT) & SYSTEM_CONFIG_CPUCLK_MASK);
+
+       switch (t) {
+       case SYSTEM_CONFIG_CPUCLK_250:
+               cpu_rate = 250000000;
+               break;
+       case SYSTEM_CONFIG_CPUCLK_266:
+               cpu_rate = 266666667;
+               break;
+       case SYSTEM_CONFIG_CPUCLK_280:
+               cpu_rate = 280000000;
+               break;
+       case SYSTEM_CONFIG_CPUCLK_300:
+               cpu_rate = 300000000;
+               break;
+       }
+
+       ralink_clk_add("cpu", cpu_rate);
+       ralink_clk_add("300100.timer", cpu_rate / 2);
+       ralink_clk_add("300120.watchdog", cpu_rate / 2);
+       ralink_clk_add("300500.uart", cpu_rate / 2);
+       ralink_clk_add("300c00.uartlite", cpu_rate / 2);
+       ralink_clk_add("400000.ethernet", cpu_rate / 2);
+}
+
+void __init ralink_of_remap(void)
+{
+       rt_sysc_membase = plat_of_remap_node("ralink,rt2880-sysc");
+       rt_memc_membase = plat_of_remap_node("ralink,rt2880-memc");
+
+       if (!rt_sysc_membase || !rt_memc_membase)
+               panic("Failed to remap core resources");
+}
+
+void prom_soc_init(struct ralink_soc_info *soc_info)
+{
+       void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT2880_SYSC_BASE);
+       const char *name;
+       u32 n0;
+       u32 n1;
+       u32 id;
+
+       n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
+       n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
+       id = __raw_readl(sysc + SYSC_REG_CHIP_ID);
+
+       if (n0 == RT2880_CHIP_NAME0 && n1 == RT2880_CHIP_NAME1) {
+               soc_info->compatible = "ralink,r2880-soc";
+               name = "RT2880";
+       } else {
+               panic("rt288x: unknown SoC, n0:%08x n1:%08x", n0, n1);
+       }
+
+       snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+               "Ralink %s id:%u rev:%u",
+               name,
+               (id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK,
+               (id & CHIP_ID_REV_MASK));
+
+       soc_info->mem_base = RT2880_SDRAM_BASE;
+       soc_info->mem_size_min = RT2880_MEM_SIZE_MIN;
+       soc_info->mem_size_max = RT2880_MEM_SIZE_MAX;
+}
index 0a4bbdcf59d97d9d82b01fc8eb8ab9fffd5eba91..ca7ee3a33790fc074ed4788f8fb1e1412babe187 100644 (file)
@@ -22,7 +22,7 @@
 
 enum rt305x_soc_type rt305x_soc;
 
-struct ralink_pinmux_grp mode_mux[] = {
+static struct ralink_pinmux_grp mode_mux[] = {
        {
                .name = "i2c",
                .mask = RT305X_GPIO_MODE_I2C,
@@ -61,7 +61,7 @@ struct ralink_pinmux_grp mode_mux[] = {
        }, {0}
 };
 
-struct ralink_pinmux_grp uart_mux[] = {
+static struct ralink_pinmux_grp uart_mux[] = {
        {
                .name = "uartf",
                .mask = RT305X_GPIO_MODE_UARTF,
@@ -91,19 +91,19 @@ struct ralink_pinmux_grp uart_mux[] = {
                .name = "gpio uartf",
                .mask = RT305X_GPIO_MODE_GPIO_UARTF,
                .gpio_first = RT305X_GPIO_7,
-               .gpio_last = RT305X_GPIO_14,
+               .gpio_last = RT305X_GPIO_10,
        }, {
                .name = "gpio i2s",
                .mask = RT305X_GPIO_MODE_GPIO_I2S,
                .gpio_first = RT305X_GPIO_7,
-               .gpio_last = RT305X_GPIO_14,
+               .gpio_last = RT305X_GPIO_10,
        }, {
                .name = "gpio",
                .mask = RT305X_GPIO_MODE_GPIO,
        }, {0}
 };
 
-void rt305x_wdt_reset(void)
+static void rt305x_wdt_reset(void)
 {
        u32 t;
 
@@ -114,16 +114,53 @@ void rt305x_wdt_reset(void)
        rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG);
 }
 
-struct ralink_pinmux gpio_pinmux = {
+struct ralink_pinmux rt_gpio_pinmux = {
        .mode = mode_mux,
        .uart = uart_mux,
        .uart_shift = RT305X_GPIO_MODE_UART0_SHIFT,
+       .uart_mask = RT305X_GPIO_MODE_UART0_MASK,
        .wdt_reset = rt305x_wdt_reset,
 };
 
+static unsigned long rt5350_get_mem_size(void)
+{
+       void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE);
+       unsigned long ret;
+       u32 t;
+
+       t = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG);
+       t = (t >> RT5350_SYSCFG0_DRAM_SIZE_SHIFT) &
+               RT5350_SYSCFG0_DRAM_SIZE_MASK;
+
+       switch (t) {
+       case RT5350_SYSCFG0_DRAM_SIZE_2M:
+               ret = 2;
+               break;
+       case RT5350_SYSCFG0_DRAM_SIZE_8M:
+               ret = 8;
+               break;
+       case RT5350_SYSCFG0_DRAM_SIZE_16M:
+               ret = 16;
+               break;
+       case RT5350_SYSCFG0_DRAM_SIZE_32M:
+               ret = 32;
+               break;
+       case RT5350_SYSCFG0_DRAM_SIZE_64M:
+               ret = 64;
+               break;
+       default:
+               panic("rt5350: invalid DRAM size: %u", t);
+               break;
+       }
+
+       return ret;
+}
+
 void __init ralink_clk_init(void)
 {
        unsigned long cpu_rate, sys_rate, wdt_rate, uart_rate;
+       unsigned long wmac_rate = 40000000;
+
        u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
 
        if (soc_is_rt305x() || soc_is_rt3350()) {
@@ -176,11 +213,21 @@ void __init ralink_clk_init(void)
                BUG();
        }
 
+       if (soc_is_rt3352() || soc_is_rt5350()) {
+               u32 val = rt_sysc_r32(RT3352_SYSC_REG_SYSCFG0);
+
+               if (!(val & RT3352_CLKCFG0_XTAL_SEL))
+                       wmac_rate = 20000000;
+       }
+
        ralink_clk_add("cpu", cpu_rate);
        ralink_clk_add("10000b00.spi", sys_rate);
        ralink_clk_add("10000100.timer", wdt_rate);
+       ralink_clk_add("10000120.watchdog", wdt_rate);
        ralink_clk_add("10000500.uart", uart_rate);
        ralink_clk_add("10000c00.uartlite", uart_rate);
+       ralink_clk_add("10100000.ethernet", sys_rate);
+       ralink_clk_add("10180000.wmac", wmac_rate);
 }
 
 void __init ralink_of_remap(void)
@@ -239,4 +286,15 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
                name,
                (id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK,
                (id & CHIP_ID_REV_MASK));
+
+       soc_info->mem_base = RT305X_SDRAM_BASE;
+       if (soc_is_rt5350()) {
+               soc_info->mem_size = rt5350_get_mem_size();
+       } else if (soc_is_rt305x() || soc_is_rt3350()) {
+               soc_info->mem_size_min = RT305X_MEM_SIZE_MIN;
+               soc_info->mem_size_max = RT305X_MEM_SIZE_MAX;
+       } else if (soc_is_rt3352()) {
+               soc_info->mem_size_min = RT3352_MEM_SIZE_MIN;
+               soc_info->mem_size_max = RT3352_MEM_SIZE_MAX;
+       }
 }
diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
new file mode 100644 (file)
index 0000000..b474ac2
--- /dev/null
@@ -0,0 +1,246 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/rt3883.h>
+
+#include "common.h"
+
+static struct ralink_pinmux_grp mode_mux[] = {
+       {
+               .name = "i2c",
+               .mask = RT3883_GPIO_MODE_I2C,
+               .gpio_first = RT3883_GPIO_I2C_SD,
+               .gpio_last = RT3883_GPIO_I2C_SCLK,
+       }, {
+               .name = "spi",
+               .mask = RT3883_GPIO_MODE_SPI,
+               .gpio_first = RT3883_GPIO_SPI_CS0,
+               .gpio_last = RT3883_GPIO_SPI_MISO,
+       }, {
+               .name = "uartlite",
+               .mask = RT3883_GPIO_MODE_UART1,
+               .gpio_first = RT3883_GPIO_UART1_TXD,
+               .gpio_last = RT3883_GPIO_UART1_RXD,
+       }, {
+               .name = "jtag",
+               .mask = RT3883_GPIO_MODE_JTAG,
+               .gpio_first = RT3883_GPIO_JTAG_TDO,
+               .gpio_last = RT3883_GPIO_JTAG_TCLK,
+       }, {
+               .name = "mdio",
+               .mask = RT3883_GPIO_MODE_MDIO,
+               .gpio_first = RT3883_GPIO_MDIO_MDC,
+               .gpio_last = RT3883_GPIO_MDIO_MDIO,
+       }, {
+               .name = "ge1",
+               .mask = RT3883_GPIO_MODE_GE1,
+               .gpio_first = RT3883_GPIO_GE1_TXD0,
+               .gpio_last = RT3883_GPIO_GE1_RXCLK,
+       }, {
+               .name = "ge2",
+               .mask = RT3883_GPIO_MODE_GE2,
+               .gpio_first = RT3883_GPIO_GE2_TXD0,
+               .gpio_last = RT3883_GPIO_GE2_RXCLK,
+       }, {
+               .name = "pci",
+               .mask = RT3883_GPIO_MODE_PCI,
+               .gpio_first = RT3883_GPIO_PCI_AD0,
+               .gpio_last = RT3883_GPIO_PCI_AD31,
+       }, {
+               .name = "lna a",
+               .mask = RT3883_GPIO_MODE_LNA_A,
+               .gpio_first = RT3883_GPIO_LNA_PE_A0,
+               .gpio_last = RT3883_GPIO_LNA_PE_A2,
+       }, {
+               .name = "lna g",
+               .mask = RT3883_GPIO_MODE_LNA_G,
+               .gpio_first = RT3883_GPIO_LNA_PE_G0,
+               .gpio_last = RT3883_GPIO_LNA_PE_G2,
+       }, {0}
+};
+
+static struct ralink_pinmux_grp uart_mux[] = {
+       {
+               .name = "uartf",
+               .mask = RT3883_GPIO_MODE_UARTF,
+               .gpio_first = RT3883_GPIO_7,
+               .gpio_last = RT3883_GPIO_14,
+       }, {
+               .name = "pcm uartf",
+               .mask = RT3883_GPIO_MODE_PCM_UARTF,
+               .gpio_first = RT3883_GPIO_7,
+               .gpio_last = RT3883_GPIO_14,
+       }, {
+               .name = "pcm i2s",
+               .mask = RT3883_GPIO_MODE_PCM_I2S,
+               .gpio_first = RT3883_GPIO_7,
+               .gpio_last = RT3883_GPIO_14,
+       }, {
+               .name = "i2s uartf",
+               .mask = RT3883_GPIO_MODE_I2S_UARTF,
+               .gpio_first = RT3883_GPIO_7,
+               .gpio_last = RT3883_GPIO_14,
+       }, {
+               .name = "pcm gpio",
+               .mask = RT3883_GPIO_MODE_PCM_GPIO,
+               .gpio_first = RT3883_GPIO_11,
+               .gpio_last = RT3883_GPIO_14,
+       }, {
+               .name = "gpio uartf",
+               .mask = RT3883_GPIO_MODE_GPIO_UARTF,
+               .gpio_first = RT3883_GPIO_7,
+               .gpio_last = RT3883_GPIO_10,
+       }, {
+               .name = "gpio i2s",
+               .mask = RT3883_GPIO_MODE_GPIO_I2S,
+               .gpio_first = RT3883_GPIO_7,
+               .gpio_last = RT3883_GPIO_10,
+       }, {
+               .name = "gpio",
+               .mask = RT3883_GPIO_MODE_GPIO,
+       }, {0}
+};
+
+static struct ralink_pinmux_grp pci_mux[] = {
+       {
+               .name = "pci-dev",
+               .mask = 0,
+               .gpio_first = RT3883_GPIO_PCI_AD0,
+               .gpio_last = RT3883_GPIO_PCI_AD31,
+       }, {
+               .name = "pci-host2",
+               .mask = 1,
+               .gpio_first = RT3883_GPIO_PCI_AD0,
+               .gpio_last = RT3883_GPIO_PCI_AD31,
+       }, {
+               .name = "pci-host1",
+               .mask = 2,
+               .gpio_first = RT3883_GPIO_PCI_AD0,
+               .gpio_last = RT3883_GPIO_PCI_AD31,
+       }, {
+               .name = "pci-fnc",
+               .mask = 3,
+               .gpio_first = RT3883_GPIO_PCI_AD0,
+               .gpio_last = RT3883_GPIO_PCI_AD31,
+       }, {
+               .name = "pci-gpio",
+               .mask = 7,
+               .gpio_first = RT3883_GPIO_PCI_AD0,
+               .gpio_last = RT3883_GPIO_PCI_AD31,
+       }, {0}
+};
+
+static void rt3883_wdt_reset(void)
+{
+       u32 t;
+
+       /* enable WDT reset output on GPIO 2 */
+       t = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG1);
+       t |= RT3883_SYSCFG1_GPIO2_AS_WDT_OUT;
+       rt_sysc_w32(t, RT3883_SYSC_REG_SYSCFG1);
+}
+
+struct ralink_pinmux rt_gpio_pinmux = {
+       .mode = mode_mux,
+       .uart = uart_mux,
+       .uart_shift = RT3883_GPIO_MODE_UART0_SHIFT,
+       .uart_mask = RT3883_GPIO_MODE_UART0_MASK,
+       .wdt_reset = rt3883_wdt_reset,
+       .pci = pci_mux,
+       .pci_shift = RT3883_GPIO_MODE_PCI_SHIFT,
+       .pci_mask = RT3883_GPIO_MODE_PCI_MASK,
+};
+
+void __init ralink_clk_init(void)
+{
+       unsigned long cpu_rate, sys_rate;
+       u32 syscfg0;
+       u32 clksel;
+       u32 ddr2;
+
+       syscfg0 = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG0);
+       clksel = ((syscfg0 >> RT3883_SYSCFG0_CPUCLK_SHIFT) &
+               RT3883_SYSCFG0_CPUCLK_MASK);
+       ddr2 = syscfg0 & RT3883_SYSCFG0_DRAM_TYPE_DDR2;
+
+       switch (clksel) {
+       case RT3883_SYSCFG0_CPUCLK_250:
+               cpu_rate = 250000000;
+               sys_rate = (ddr2) ? 125000000 : 83000000;
+               break;
+       case RT3883_SYSCFG0_CPUCLK_384:
+               cpu_rate = 384000000;
+               sys_rate = (ddr2) ? 128000000 : 96000000;
+               break;
+       case RT3883_SYSCFG0_CPUCLK_480:
+               cpu_rate = 480000000;
+               sys_rate = (ddr2) ? 160000000 : 120000000;
+               break;
+       case RT3883_SYSCFG0_CPUCLK_500:
+               cpu_rate = 500000000;
+               sys_rate = (ddr2) ? 166000000 : 125000000;
+               break;
+       }
+
+       ralink_clk_add("cpu", cpu_rate);
+       ralink_clk_add("10000100.timer", sys_rate);
+       ralink_clk_add("10000120.watchdog", sys_rate);
+       ralink_clk_add("10000500.uart", 40000000);
+       ralink_clk_add("10000b00.spi", sys_rate);
+       ralink_clk_add("10000c00.uartlite", 40000000);
+       ralink_clk_add("10100000.ethernet", sys_rate);
+}
+
+void __init ralink_of_remap(void)
+{
+       rt_sysc_membase = plat_of_remap_node("ralink,rt3883-sysc");
+       rt_memc_membase = plat_of_remap_node("ralink,rt3883-memc");
+
+       if (!rt_sysc_membase || !rt_memc_membase)
+               panic("Failed to remap core resources");
+}
+
+void prom_soc_init(struct ralink_soc_info *soc_info)
+{
+       void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT3883_SYSC_BASE);
+       const char *name;
+       u32 n0;
+       u32 n1;
+       u32 id;
+
+       n0 = __raw_readl(sysc + RT3883_SYSC_REG_CHIPID0_3);
+       n1 = __raw_readl(sysc + RT3883_SYSC_REG_CHIPID4_7);
+       id = __raw_readl(sysc + RT3883_SYSC_REG_REVID);
+
+       if (n0 == RT3883_CHIP_NAME0 && n1 == RT3883_CHIP_NAME1) {
+               soc_info->compatible = "ralink,rt3883-soc";
+               name = "RT3883";
+       } else {
+               panic("rt3883: unknown SoC, n0:%08x n1:%08x", n0, n1);
+       }
+
+       snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+               "Ralink %s ver:%u eco:%u",
+               name,
+               (id >> RT3883_REVID_VER_ID_SHIFT) & RT3883_REVID_VER_ID_MASK,
+               (id & RT3883_REVID_ECO_ID_MASK));
+
+       soc_info->mem_base = RT3883_SDRAM_BASE;
+       soc_info->mem_size_min = RT3883_MEM_SIZE_MIN;
+       soc_info->mem_size_max = RT3883_MEM_SIZE_MAX;
+}
index 1d1919a44e88c350685866d8420b24fa9e168dc9..7a53b1e28a93dfa512da4e5a6dff6f49e9ae9baa 100644 (file)
@@ -114,7 +114,7 @@ void __init replicate_kernel_text()
  * data structures on the first couple of pages of the first slot of each
  * node. If this is the case, getfirstfree(node) > getslotstart(node, 0).
  */
-pfn_t node_getfirstfree(cnodeid_t cnode)
+unsigned long node_getfirstfree(cnodeid_t cnode)
 {
        unsigned long loadbase = REP_BASE;
        nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
index 5f2bddb1860e11c1b32035593b3376b758c37570..1230f56429d7334ea418f41921e709a9b737586a 100644 (file)
@@ -255,14 +255,14 @@ static void __init dump_topology(void)
        }
 }
 
-static pfn_t __init slot_getbasepfn(cnodeid_t cnode, int slot)
+static unsigned long __init slot_getbasepfn(cnodeid_t cnode, int slot)
 {
        nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
 
-       return ((pfn_t)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
+       return ((unsigned long)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
 }
 
-static pfn_t __init slot_psize_compute(cnodeid_t node, int slot)
+static unsigned long __init slot_psize_compute(cnodeid_t node, int slot)
 {
        nasid_t nasid;
        lboard_t *brd;
@@ -353,7 +353,7 @@ static void __init mlreset(void)
 
 static void __init szmem(void)
 {
-       pfn_t slot_psize, slot0sz = 0, nodebytes;       /* Hack to detect problem configs */
+       unsigned long slot_psize, slot0sz = 0, nodebytes;       /* Hack to detect problem configs */
        int slot;
        cnodeid_t node;
 
@@ -390,10 +390,10 @@ static void __init szmem(void)
 
 static void __init node_mem_init(cnodeid_t node)
 {
-       pfn_t slot_firstpfn = slot_getbasepfn(node, 0);
-       pfn_t slot_freepfn = node_getfirstfree(node);
+       unsigned long slot_firstpfn = slot_getbasepfn(node, 0);
+       unsigned long slot_freepfn = node_getfirstfree(node);
        unsigned long bootmap_size;
-       pfn_t start_pfn, end_pfn;
+       unsigned long start_pfn, end_pfn;
 
        get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
 
@@ -467,7 +467,7 @@ void __init paging_init(void)
        pagetable_init();
 
        for_each_online_node(node) {
-               pfn_t start_pfn, end_pfn;
+               unsigned long start_pfn, end_pfn;
 
                get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
 
index fff58ac176f36328914c06801234f1f31c38389c..2e21b761cb9c771586797a50f5eda5666368a81e 100644 (file)
@@ -69,7 +69,7 @@ static void rt_set_mode(enum clock_event_mode mode,
        /* Nothing to do ...  */
 }
 
-int rt_timer_irq;
+unsigned int rt_timer_irq;
 
 static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent);
 static DEFINE_PER_CPU(char [11], hub_rt_name);
index f517e08e7f0d4727b771db06429875b140180722..a134ff4da12ee1e4ad39a78e514328f578573ca3 100644 (file)
@@ -59,11 +59,3 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
        current->comm, current->pid, r20);
     return -ENOSYS;
 }
-
-asmlinkage long compat_sys_fanotify_mark(int fan_fd, int flags, u32 mask_hi,
-                                        u32 mask_lo, int fd,
-                                        const char __user *pathname)
-{
-       return sys_fanotify_mark(fan_fd, flags, ((u64)mask_hi << 32) | mask_lo,
-                                fd, pathname);
-}
index 3fe5259e2fea872be5da2e333427a935f8a012d0..915fbb4fc2fe4e7534dbf78eb39317013b03b014 100644 (file)
@@ -150,7 +150,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
        CURRENT_THREAD_INFO(r11, r1)
        ld      r10,TI_FLAGS(r11)
        andi.   r11,r10,_TIF_SYSCALL_T_OR_A
-       bne-    syscall_dotrace
+       bne     syscall_dotrace
 .Lsyscall_dotrace_cont:
        cmpldi  0,r0,NR_syscalls
        bge-    syscall_enosys
index cd6e19d263b32ca403f49fa4409ca3fab6f88411..8a285876aef8a8e667091b5560ec1aa8756f5bc7 100644 (file)
@@ -126,11 +126,3 @@ asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags,
 
        return sys_sync_file_range(fd, offset, nbytes, flags);
 }
-
-asmlinkage long compat_sys_fanotify_mark(int fanotify_fd, unsigned int flags,
-                                        unsigned mask_hi, unsigned mask_lo,
-                                        int dfd, const char __user *pathname)
-{
-       u64 mask = ((u64)mask_hi << 32) | mask_lo;
-       return sys_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname);
-}
index 2d72d9e96c153b360ae5eb13d68c7d0370b9139e..9cb1b975b3532f3cc6237ab1cd168146f99a9904 100644 (file)
@@ -793,10 +793,6 @@ ENTRY(sys32_stime_wrapper)
        llgtr   %r2,%r2                 # long *
        jg      compat_sys_stime        # branch to system call
 
-ENTRY(sys32_sysctl_wrapper)
-       llgtr   %r2,%r2                 # struct compat_sysctl_args *
-       jg      compat_sys_sysctl
-
 ENTRY(sys32_fstat64_wrapper)
        llgfr   %r2,%r2                 # unsigned long
        llgtr   %r3,%r3                 # struct stat64 *
@@ -1349,15 +1345,6 @@ ENTRY(sys_fanotify_init_wrapper)
        llgfr   %r3,%r3                 # unsigned int
        jg      sys_fanotify_init       # branch to system call
 
-ENTRY(sys_fanotify_mark_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgfr   %r3,%r3                 # unsigned int
-       sllg    %r4,%r4,32              # get high word of 64bit mask
-       lr      %r4,%r5                 # get low word of 64bit mask
-       llgfr   %r5,%r6                 # unsigned int
-       llgt    %r6,164(%r15)           # char *
-       jg      sys_fanotify_mark       # branch to system call
-
 ENTRY(sys_prlimit64_wrapper)
        lgfr    %r2,%r2                 # pid_t
        llgfr   %r3,%r3                 # unsigned int
index 9f214e992eed2bbbdfbd2bb77a278a92de2dddf5..913410bd74a335c0693637ccb69e49cb89fd63c1 100644 (file)
@@ -157,7 +157,7 @@ SYSCALL(sys_readv,sys_readv,compat_sys_readv_wrapper)               /* 145 */
 SYSCALL(sys_writev,sys_writev,compat_sys_writev_wrapper)
 SYSCALL(sys_getsid,sys_getsid,sys32_getsid_wrapper)
 SYSCALL(sys_fdatasync,sys_fdatasync,sys32_fdatasync_wrapper)
-SYSCALL(sys_sysctl,sys_sysctl,sys32_sysctl_wrapper)
+SYSCALL(sys_sysctl,sys_sysctl,compat_sys_sysctl)
 SYSCALL(sys_mlock,sys_mlock,sys32_mlock_wrapper)               /* 150 */
 SYSCALL(sys_munlock,sys_munlock,sys32_munlock_wrapper)
 SYSCALL(sys_mlockall,sys_mlockall,sys32_mlockall_wrapper)
@@ -341,7 +341,7 @@ SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev)
 SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */
 SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper)
 SYSCALL(sys_fanotify_init,sys_fanotify_init,sys_fanotify_init_wrapper)
-SYSCALL(sys_fanotify_mark,sys_fanotify_mark,sys_fanotify_mark_wrapper)
+SYSCALL(sys_fanotify_mark,sys_fanotify_mark,compat_sys_fanotify_mark)
 SYSCALL(sys_prlimit64,sys_prlimit64,sys_prlimit64_wrapper)
 SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,sys_name_to_handle_at_wrapper) /* 335 */
 SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at)
index 2e680b5245c9b4cc0b2b8941414e11ce540b424d..f7c72b6efc27556cd2e2de7a74539b1ba21831ce 100644 (file)
@@ -239,15 +239,6 @@ do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */
        nop
        nop
 
-       .globl          sys32_fanotify_mark
-sys32_fanotify_mark:
-       sethi           %hi(sys_fanotify_mark), %g1
-       sllx            %o2, 32, %o2
-       or              %o2, %o3, %o2
-       mov             %o4, %o3
-       jmpl            %g1 + %lo(sys_fanotify_mark), %g0
-        mov            %o5, %o4
-
        .section        __ex_table,"a"
        .align          4
        .word           1b, __retl_efault, 2b, __retl_efault
index 8fd9320802153c5d4ece190b8dd535b8a3747679..6d81597064b6b5c7efa0861fc6c5c230e3c4984f 100644 (file)
@@ -84,7 +84,7 @@ sys_call_table32:
        .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
 /*320*/        .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
        .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
-/*330*/        .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
+/*330*/        .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
        .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
 /*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module
 
index cfe79c9529b353f2d132cd004e336b578f2faee4..f9e86253931407bcf851c6e84f0c007480ce6cf1 100644 (file)
 #include <asm/syscalls.h>
 #include <asm/cacheflush.h>
 
-/* Note: used by the compat code even in 64-bit Linux. */
-SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
-               unsigned long, prot, unsigned long, flags,
-               unsigned long, fd, unsigned long, off_4k)
-{
-       return sys_mmap_pgoff(addr, len, prot, flags, fd,
-                             off_4k);
-}
-
 /* Provide the actual syscall number to call mapping. */
 #undef __SYSCALL
 #define __SYSCALL(nr, call)    [nr] = (call),
 
+#define sys_mmap2 sys_mmap_pgoff
 /* Note that we don't include <linux/unistd.h> but <asm/unistd.h> */
 void *sys_call_table[__NR_syscalls] = {
        [0 ... __NR_syscalls-1] = sys_ni_syscall,
index 4e4907c67d92d0940270de5c7c55c731681afaf5..8e0ceecdc95790d7a53eb3fe5bc1c3867bcb9e7f 100644 (file)
@@ -243,12 +243,3 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo,
        return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
                             ((u64)len_hi << 32) | len_lo);
 }
-
-asmlinkage long sys32_fanotify_mark(int fanotify_fd, unsigned int flags,
-                                   u32 mask_lo, u32 mask_hi,
-                                   int fd, const char  __user *pathname)
-{
-       return sys_fanotify_mark(fanotify_fd, flags,
-                                ((u64)mask_hi << 32) | mask_lo,
-                                fd, pathname);
-}
index 0ef202e232d642e97f9bb2c5a389a35ef10cc99d..82c34ee25a651760c9950ce6c54625896fd9ea2f 100644 (file)
@@ -50,9 +50,6 @@ asmlinkage long sys32_fallocate(int, int, unsigned,
 asmlinkage long sys32_sigreturn(void);
 asmlinkage long sys32_rt_sigreturn(void);
 
-asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int,
-                                   const char __user *);
-
 #endif /* CONFIG_COMPAT */
 
 #endif /* _ASM_X86_SYS_IA32_H */
index 5f87b35fd2ef5d6a9cc4eb6e7cdf1c822d4e679d..2917a6452c496e20d254589f789e8ad0f499f6dc 100644 (file)
@@ -37,8 +37,8 @@ asmlinkage long sys_get_thread_area(struct user_desc __user *);
 unsigned long sys_sigreturn(void);
 
 /* kernel/vm86_32.c */
-int sys_vm86old(struct vm86_struct __user *);
-int sys_vm86(unsigned long, unsigned long);
+asmlinkage long sys_vm86old(struct vm86_struct __user *);
+asmlinkage long sys_vm86(unsigned long, unsigned long);
 
 #else /* CONFIG_X86_32 */
 
index b3a4866661c5c426efb0504294a3c7228ccd107a..2af848dfa75424b7798924dd9a91524af7f088e3 100644 (file)
 #define MSR_CORE_C6_RESIDENCY          0x000003fd
 #define MSR_CORE_C7_RESIDENCY          0x000003fe
 #define MSR_PKG_C2_RESIDENCY           0x0000060d
+#define MSR_PKG_C8_RESIDENCY           0x00000630
+#define MSR_PKG_C9_RESIDENCY           0x00000631
+#define MSR_PKG_C10_RESIDENCY          0x00000632
 
 /* Run Time Average Power Limiting (RAPL) Interface */
 
index 1cf5766dde169448e109101e981632bb6a6407a5..e8edcf52e06911fe5446e543f40368ea69114225 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/capability.h>
 #include <linux/errno.h>
 #include <linux/interrupt.h>
+#include <linux/syscalls.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
 #include <linux/signal.h>
@@ -48,7 +49,6 @@
 #include <asm/io.h>
 #include <asm/tlbflush.h>
 #include <asm/irq.h>
-#include <asm/syscalls.h>
 
 /*
  * Known problems:
@@ -202,36 +202,32 @@ out:
 static int do_vm86_irq_handling(int subfunction, int irqnumber);
 static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk);
 
-int sys_vm86old(struct vm86_struct __user *v86)
+SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
 {
        struct kernel_vm86_struct info; /* declare this _on top_,
                                         * this avoids wasting of stack space.
                                         * This remains on the stack until we
                                         * return to 32 bit user space.
                                         */
-       struct task_struct *tsk;
-       int tmp, ret = -EPERM;
+       struct task_struct *tsk = current;
+       int tmp;
 
-       tsk = current;
        if (tsk->thread.saved_sp0)
-               goto out;
+               return -EPERM;
        tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
                                       offsetof(struct kernel_vm86_struct, vm86plus) -
                                       sizeof(info.regs));
-       ret = -EFAULT;
        if (tmp)
-               goto out;
+               return -EFAULT;
        memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus);
        info.regs32 = current_pt_regs();
        tsk->thread.vm86_info = v86;
        do_sys_vm86(&info, tsk);
-       ret = 0;        /* we never return here */
-out:
-       return ret;
+       return 0;       /* we never return here */
 }
 
 
-int sys_vm86(unsigned long cmd, unsigned long arg)
+SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
 {
        struct kernel_vm86_struct info; /* declare this _on top_,
                                         * this avoids wasting of stack space.
@@ -239,7 +235,7 @@ int sys_vm86(unsigned long cmd, unsigned long arg)
                                         * return to 32 bit user space.
                                         */
        struct task_struct *tsk;
-       int tmp, ret;
+       int tmp;
        struct vm86plus_struct __user *v86;
 
        tsk = current;
@@ -248,8 +244,7 @@ int sys_vm86(unsigned long cmd, unsigned long arg)
        case VM86_FREE_IRQ:
        case VM86_GET_IRQ_BITS:
        case VM86_GET_AND_RESET_IRQ:
-               ret = do_vm86_irq_handling(cmd, (int)arg);
-               goto out;
+               return do_vm86_irq_handling(cmd, (int)arg);
        case VM86_PLUS_INSTALL_CHECK:
                /*
                 * NOTE: on old vm86 stuff this will return the error
@@ -257,28 +252,23 @@ int sys_vm86(unsigned long cmd, unsigned long arg)
                 *  interpreted as (invalid) address to vm86_struct.
                 *  So the installation check works.
                 */
-               ret = 0;
-               goto out;
+               return 0;
        }
 
        /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
-       ret = -EPERM;
        if (tsk->thread.saved_sp0)
-               goto out;
+               return -EPERM;
        v86 = (struct vm86plus_struct __user *)arg;
        tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
                                       offsetof(struct kernel_vm86_struct, regs32) -
                                       sizeof(info.regs));
-       ret = -EFAULT;
        if (tmp)
-               goto out;
+               return -EFAULT;
        info.regs32 = current_pt_regs();
        info.vm86plus.is_vm86pus = 1;
        tsk->thread.vm86_info = (struct vm86_struct __user *)v86;
        do_sys_vm86(&info, tsk);
-       ret = 0;        /* we never return here */
-out:
-       return ret;
+       return 0;       /* we never return here */
 }
 
 
index 8e517bba6a7c9434099139ecbb82a5017ca49cbf..8db0010ed150d5c1fa1d0b6aa96668587c2d1004 100644 (file)
@@ -60,6 +60,7 @@
 #define OpGS              25ull  /* GS */
 #define OpMem8            26ull  /* 8-bit zero extended memory operand */
 #define OpImm64           27ull  /* Sign extended 16/32/64-bit immediate */
+#define OpXLat            28ull  /* memory at BX/EBX/RBX + zero-extended AL */
 
 #define OpBits             5  /* Width of operand field */
 #define OpMask             ((1ull << OpBits) - 1)
 #define SrcImmUByte (OpImmUByte << SrcShift)
 #define SrcImmU     (OpImmU << SrcShift)
 #define SrcSI       (OpSI << SrcShift)
+#define SrcXLat     (OpXLat << SrcShift)
 #define SrcImmFAddr (OpImmFAddr << SrcShift)
 #define SrcMemFAddr (OpMemFAddr << SrcShift)
 #define SrcAcc      (OpAcc << SrcShift)
@@ -533,6 +535,9 @@ FOP_SETCC(setle)
 FOP_SETCC(setnle)
 FOP_END;
 
+FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
+FOP_END;
+
 #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex)                 \
        do {                                                            \
                unsigned long _tmp;                                     \
@@ -2996,6 +3001,28 @@ static int em_das(struct x86_emulate_ctxt *ctxt)
        return X86EMUL_CONTINUE;
 }
 
+static int em_aam(struct x86_emulate_ctxt *ctxt)
+{
+       u8 al, ah;
+
+       if (ctxt->src.val == 0)
+               return emulate_de(ctxt);
+
+       al = ctxt->dst.val & 0xff;
+       ah = al / ctxt->src.val;
+       al %= ctxt->src.val;
+
+       ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
+
+       /* Set PF, ZF, SF */
+       ctxt->src.type = OP_IMM;
+       ctxt->src.val = 0;
+       ctxt->src.bytes = 1;
+       fastop(ctxt, em_or);
+
+       return X86EMUL_CONTINUE;
+}
+
 static int em_aad(struct x86_emulate_ctxt *ctxt)
 {
        u8 al = ctxt->dst.val & 0xff;
@@ -3936,7 +3963,10 @@ static const struct opcode opcode_table[256] = {
        /* 0xD0 - 0xD7 */
        G(Src2One | ByteOp, group2), G(Src2One, group2),
        G(Src2CL | ByteOp, group2), G(Src2CL, group2),
-       N, I(DstAcc | SrcImmByte | No64, em_aad), N, N,
+       I(DstAcc | SrcImmUByte | No64, em_aam),
+       I(DstAcc | SrcImmUByte | No64, em_aad),
+       F(DstAcc | ByteOp | No64, em_salc),
+       I(DstAcc | SrcXLat | ByteOp, em_mov),
        /* 0xD8 - 0xDF */
        N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
        /* 0xE0 - 0xE7 */
@@ -4198,6 +4228,16 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
                op->val = 0;
                op->count = 1;
                break;
+       case OpXLat:
+               op->type = OP_MEM;
+               op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
+               op->addr.mem.ea =
+                       register_address(ctxt,
+                               reg_read(ctxt, VCPU_REGS_RBX) +
+                               (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
+               op->addr.mem.seg = seg_override(ctxt);
+               op->val = 0;
+               break;
        case OpImmFAddr:
                op->type = OP_IMM;
                op->addr.mem.ea = ctxt->_eip;
index 25a791ed21c88057697eb06339f7066d8cfa63e2..260a9193955538b4fea743045b2f964b2736b24e 100644 (file)
@@ -5434,6 +5434,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
                        return 0;
                }
 
+               if (vcpu->arch.halt_request) {
+                       vcpu->arch.halt_request = 0;
+                       ret = kvm_emulate_halt(vcpu);
+                       goto out;
+               }
+
                if (signal_pending(current))
                        goto out;
                if (need_resched())
index 05a8b1a2300df0997d116e753506bf6e6ffa1fe2..094b5d96ab1468c1875a2a2a5e682245f2909db9 100644 (file)
@@ -555,6 +555,25 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
 }
 EXPORT_SYMBOL_GPL(kvm_lmsw);
 
+static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
+{
+       if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
+                       !vcpu->guest_xcr0_loaded) {
+               /* kvm_set_xcr() also depends on this */
+               xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
+               vcpu->guest_xcr0_loaded = 1;
+       }
+}
+
+static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->guest_xcr0_loaded) {
+               if (vcpu->arch.xcr0 != host_xcr0)
+                       xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
+               vcpu->guest_xcr0_loaded = 0;
+       }
+}
+
 int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 {
        u64 xcr0;
@@ -571,8 +590,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
                return 1;
        if (xcr0 & ~host_xcr0)
                return 1;
+       kvm_put_guest_xcr0(vcpu);
        vcpu->arch.xcr0 = xcr0;
-       vcpu->guest_xcr0_loaded = 0;
        return 0;
 }
 
@@ -5614,25 +5633,6 @@ static void inject_pending_event(struct kvm_vcpu *vcpu)
        }
 }
 
-static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
-{
-       if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
-                       !vcpu->guest_xcr0_loaded) {
-               /* kvm_set_xcr() also depends on this */
-               xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
-               vcpu->guest_xcr0_loaded = 1;
-       }
-}
-
-static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
-{
-       if (vcpu->guest_xcr0_loaded) {
-               if (vcpu->arch.xcr0 != host_xcr0)
-                       xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
-               vcpu->guest_xcr0_loaded = 0;
-       }
-}
-
 static void process_nmi(struct kvm_vcpu *vcpu)
 {
        unsigned limit = 2;
index 4a9be6ddf05437e66ab8c94862a9b3e8a30e4373..48e8461057ba9346442aa5c72d6772b190dcf4c9 100644 (file)
@@ -295,11 +295,10 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
                        int pos;
                        u32 table_offset, bir;
 
-                       pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
-
+                       pos = dev->msix_cap;
                        pci_read_config_dword(dev, pos + PCI_MSIX_TABLE,
                                              &table_offset);
-                       bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
+                       bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
 
                        map_irq.table_base = pci_resource_start(dev, bir);
                        map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
index d0d59bfbccce01e2e019ffd64b7d57e3ddad9622..aabfb8380a1c6cf91e5af8aaed9dd30bd088d9de 100644 (file)
 336    i386    perf_event_open         sys_perf_event_open
 337    i386    recvmmsg                sys_recvmmsg                    compat_sys_recvmmsg
 338    i386    fanotify_init           sys_fanotify_init
-339    i386    fanotify_mark           sys_fanotify_mark               sys32_fanotify_mark
+339    i386    fanotify_mark           sys_fanotify_mark               compat_sys_fanotify_mark
 340    i386    prlimit64               sys_prlimit64
 341    i386    name_to_handle_at       sys_name_to_handle_at
 342    i386    open_by_handle_at       sys_open_by_handle_at           compat_sys_open_by_handle_at
index 53d4f680c9b59f498d41148325cd5a96ef1723e3..a492be2635ac048a527d856b796431ccac72d9fd 100644 (file)
 
 EXPORT_SYMBOL_GPL(hypercall_page);
 
+/*
+ * Pointer to the xen_vcpu_info structure or
+ * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info
+ * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
+ * but if the hypervisor supports VCPUOP_register_vcpu_info then it can point
+ * to xen_vcpu_info. The pointer is used in __xen_evtchn_do_upcall to
+ * acknowledge pending events.
+ * Also more subtly it is used by the patched version of irq enable/disable
+ * e.g. xen_irq_enable_direct and xen_iret in PV mode.
+ *
+ * The desire to be able to do those mask/unmask operations as a single
+ * instruction by using the per-cpu offset held in %gs is the real reason
+ * vcpu info is in a per-cpu pointer and the original reason for this
+ * hypercall.
+ *
+ */
 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
+
+/*
+ * Per CPU pages used if hypervisor supports VCPUOP_register_vcpu_info
+ * hypercall. This can be used both in PV and PVHVM mode. The structure
+ * overrides the default per_cpu(xen_vcpu, cpu) value.
+ */
 DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
 
 enum xen_domain_type xen_domain_type = XEN_NATIVE;
@@ -157,6 +179,21 @@ static void xen_vcpu_setup(int cpu)
 
        BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
 
+       /*
+        * This path is called twice on PVHVM - first during bootup via
+        * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being
+        * hotplugged: cpu_up -> xen_hvm_cpu_notify.
+        * As we can only do the VCPUOP_register_vcpu_info once lets
+        * not over-write its result.
+        *
+        * For PV it is called during restore (xen_vcpu_restore) and bootup
+        * (xen_setup_vcpu_info_placement). The hotplug mechanism does not
+        * use this function.
+        */
+       if (xen_hvm_domain()) {
+               if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
+                       return;
+       }
        if (cpu < MAX_VIRT_CPUS)
                per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
 
@@ -172,7 +209,12 @@ static void xen_vcpu_setup(int cpu)
 
        /* Check to see if the hypervisor will put the vcpu_info
           structure where we want it, which allows direct access via
-          a percpu-variable. */
+          a percpu-variable.
+          N.B. This hypercall can _only_ be called once per CPU. Subsequent
+          calls will error out with -EINVAL. This is due to the fact that
+          hypervisor has no unregister variant and this hypercall does not
+          allow to over-write info.mfn and info.offset.
+        */
        err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
 
        if (err) {
@@ -387,6 +429,9 @@ static void __init xen_init_cpuid_mask(void)
                cpuid_leaf1_edx_mask &=
                        ~((1 << X86_FEATURE_APIC) |  /* disable local APIC */
                          (1 << X86_FEATURE_ACPI));  /* disable ACPI */
+
+       cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_X2APIC % 32));
+
        ax = 1;
        cx = 0;
        xen_cpuid(&ax, &bx, &cx, &dx);
@@ -1603,6 +1648,9 @@ void __ref xen_hvm_init_shared_info(void)
         * online but xen_hvm_init_shared_info is run at resume time too and
         * in that case multiple vcpus might be online. */
        for_each_online_cpu(cpu) {
+               /* Leave it to be NULL. */
+               if (cpu >= MAX_VIRT_CPUS)
+                       continue;
                per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
        }
 }
index 8b54603ce81613fee6783a6d3869b0021878feb4..3002ec1bb71a27d193ce4b1e041f8b3a3c5b49fa 100644 (file)
@@ -364,7 +364,7 @@ void __cpuinit xen_init_lock_cpu(int cpu)
        int irq;
        const char *name;
 
-       WARN(per_cpu(lock_kicker_irq, cpu) > 0, "spinlock on CPU%d exists on IRQ%d!\n",
+       WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
             cpu, per_cpu(lock_kicker_irq, cpu));
 
        /*
index 3a8f7e6db2950fc175759e558c7663ae84d3702f..e7e92429d10f9e36d80d48e5f8a0004e4eb0284e 100644 (file)
@@ -78,6 +78,10 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
 {
        struct drm_crtc *crtc;
 
+       /* Locking is currently fubar in the panic handler. */
+       if (oops_in_progress)
+               return;
+
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
                WARN_ON(!mutex_is_locked(&crtc->mutex));
 
@@ -246,6 +250,7 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
        else
                return "unknown";
 }
+EXPORT_SYMBOL(drm_get_connector_status_name);
 
 /**
  * drm_mode_object_get - allocate a new modeset identifier
index e974f9309b72697d3c9918ab979c8469d548c25d..ed1334e27c33283442fceb604892e55666bdfe2c 100644 (file)
@@ -121,6 +121,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
                connector->helper_private;
        int count = 0;
        int mode_flags = 0;
+       bool verbose_prune = true;
 
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
                        drm_get_connector_name(connector));
@@ -149,6 +150,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
                DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
                        connector->base.id, drm_get_connector_name(connector));
                drm_mode_connector_update_edid_property(connector, NULL);
+               verbose_prune = false;
                goto prune;
        }
 
@@ -182,7 +184,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
        }
 
 prune:
-       drm_mode_prune_invalid(dev, &connector->modes, true);
+       drm_mode_prune_invalid(dev, &connector->modes, verbose_prune);
 
        if (list_empty(&connector->modes))
                return 0;
@@ -1005,12 +1007,20 @@ static void output_poll_execute(struct work_struct *work)
                        continue;
 
                connector->status = connector->funcs->detect(connector, false);
-               DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
-                             connector->base.id,
-                             drm_get_connector_name(connector),
-                             old_status, connector->status);
-               if (old_status != connector->status)
+               if (old_status != connector->status) {
+                       const char *old, *new;
+
+                       old = drm_get_connector_status_name(old_status);
+                       new = drm_get_connector_status_name(connector->status);
+
+                       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
+                                     "status updated from %s to %s\n",
+                                     connector->base.id,
+                                     drm_get_connector_name(connector),
+                                     old, new);
+
                        changed = true;
+               }
        }
 
        mutex_unlock(&dev->mode_config.mutex);
@@ -1083,10 +1093,11 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
                old_status = connector->status;
 
                connector->status = connector->funcs->detect(connector, false);
-               DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+               DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
                              connector->base.id,
                              drm_get_connector_name(connector),
-                             old_status, connector->status);
+                             drm_get_connector_status_name(old_status),
+                             drm_get_connector_status_name(connector->status));
                if (old_status != connector->status)
                        changed = true;
        }
index 8d4f29075af5bd24f0b03461cac4d35c511530dd..9cc247f555028f41046cbacd57a6b31cdf8f5e20 100644 (file)
@@ -57,7 +57,7 @@ static int drm_version(struct drm_device *dev, void *data,
                       struct drm_file *file_priv);
 
 #define DRM_IOCTL_DEF(ioctl, _func, _flags) \
-       [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0}
+       [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
 
 /** Ioctl table */
 static const struct drm_ioctl_desc drm_ioctls[] = {
@@ -375,7 +375,7 @@ long drm_ioctl(struct file *filp,
 {
        struct drm_file *file_priv = filp->private_data;
        struct drm_device *dev;
-       const struct drm_ioctl_desc *ioctl;
+       const struct drm_ioctl_desc *ioctl = NULL;
        drm_ioctl_t *func;
        unsigned int nr = DRM_IOCTL_NR(cmd);
        int retcode = -EINVAL;
@@ -392,11 +392,6 @@ long drm_ioctl(struct file *filp,
        atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
        ++file_priv->ioctl_count;
 
-       DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
-                 task_pid_nr(current), cmd, nr,
-                 (long)old_encode_dev(file_priv->minor->device),
-                 file_priv->authenticated);
-
        if ((nr >= DRM_CORE_IOCTL_COUNT) &&
            ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
                goto err_i1;
@@ -417,6 +412,11 @@ long drm_ioctl(struct file *filp,
        } else
                goto err_i1;
 
+       DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
+                 task_pid_nr(current),
+                 (long)old_encode_dev(file_priv->minor->device),
+                 file_priv->authenticated, ioctl->name);
+
        /* Do not trust userspace, use our own definition */
        func = ioctl->func;
        /* is there a local override? */
@@ -471,6 +471,12 @@ long drm_ioctl(struct file *filp,
        }
 
       err_i1:
+       if (!ioctl)
+               DRM_DEBUG("invalid iotcl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
+                         task_pid_nr(current),
+                         (long)old_encode_dev(file_priv->minor->device),
+                         file_priv->authenticated, cmd, nr);
+
        if (kdata != stack_kdata)
                kfree(kdata);
        atomic_dec(&dev->ioctl_count);
index 48c52f7df4e63affac527e1f86e06188a03e2021..0cfb60f5476655edc097ca71c648d11544f3357e 100644 (file)
@@ -54,16 +54,12 @@ int drm_i2c_encoder_init(struct drm_device *dev,
                         struct i2c_adapter *adap,
                         const struct i2c_board_info *info)
 {
-       char modalias[sizeof(I2C_MODULE_PREFIX)
-                     + I2C_NAME_SIZE];
        struct module *module = NULL;
        struct i2c_client *client;
        struct drm_i2c_encoder_driver *encoder_drv;
        int err = 0;
 
-       snprintf(modalias, sizeof(modalias),
-                "%s%s", I2C_MODULE_PREFIX, info->type);
-       request_module(modalias);
+       request_module("%s%s", I2C_MODULE_PREFIX, info->type);
 
        client = i2c_new_device(adap, info);
        if (!client) {
index db1e2d6f90d7221d713c04b70560d85a08ad6732..07cf99cc886283aedf9feee690d71ee6fbe2fb77 100644 (file)
@@ -755,33 +755,35 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
 EXPORT_SYMBOL(drm_mm_debug_table);
 
 #if defined(CONFIG_DEBUG_FS)
-int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
+static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
 {
-       struct drm_mm_node *entry;
-       unsigned long total_used = 0, total_free = 0, total = 0;
        unsigned long hole_start, hole_end, hole_size;
 
-       hole_start = drm_mm_hole_node_start(&mm->head_node);
-       hole_end = drm_mm_hole_node_end(&mm->head_node);
-       hole_size = hole_end - hole_start;
-       if (hole_size)
+       if (entry->hole_follows) {
+               hole_start = drm_mm_hole_node_start(entry);
+               hole_end = drm_mm_hole_node_end(entry);
+               hole_size = hole_end - hole_start;
                seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
                                hole_start, hole_end, hole_size);
-       total_free += hole_size;
+               return hole_size;
+       }
+
+       return 0;
+}
+
+int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
+{
+       struct drm_mm_node *entry;
+       unsigned long total_used = 0, total_free = 0, total = 0;
+
+       total_free += drm_mm_dump_hole(m, &mm->head_node);
 
        drm_mm_for_each_node(entry, mm) {
                seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
                                entry->start, entry->start + entry->size,
                                entry->size);
                total_used += entry->size;
-               if (entry->hole_follows) {
-                       hole_start = drm_mm_hole_node_start(entry);
-                       hole_end = drm_mm_hole_node_end(entry);
-                       hole_size = hole_end - hole_start;
-                       seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
-                                       hole_start, hole_end, hole_size);
-                       total_free += hole_size;
-               }
+               total_free += drm_mm_dump_hole(m, entry);
        }
        total = total_free + total_used;
 
index faa79df0264802e985719da65be37ee369a87a69..a371ff865a887755b81de57663a819429a34ed4c 100644 (file)
@@ -1143,6 +1143,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
                                was_digit = false;
                        } else
                                goto done;
+                       break;
                case '0' ... '9':
                        was_digit = true;
                        break;
index 6be940effefd1cf3dd69262e826c9cc999e4c26c..6165535d15f07af8969a74f075994d31bfbae47e 100644 (file)
@@ -1045,6 +1045,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
        if (timeout) {
                struct timespec sleep_time = timespec_sub(now, before);
                *timeout = timespec_sub(*timeout, sleep_time);
+               if (!timespec_valid(timeout)) /* i.e. negative time remains */
+                       set_normalized_timespec(timeout, 0, 0);
        }
 
        switch (end) {
@@ -1053,8 +1055,6 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
        case -ERESTARTSYS: /* Signal */
                return (int)end;
        case 0: /* Timeout */
-               if (timeout)
-                       set_normalized_timespec(timeout, 0, 0);
                return -ETIME;
        default: /* Completed */
                WARN_ON(end < 0); /* We're not aware of other errors */
@@ -2377,10 +2377,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        mutex_unlock(&dev->struct_mutex);
 
        ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
-       if (timeout) {
-               WARN_ON(!timespec_valid(timeout));
+       if (timeout)
                args->timeout_ns = timespec_to_ns(timeout);
-       }
        return ret;
 
 out:
index dca614de71b6a34189bb40fa0a99cc0f60fcb6ec..bdb0d7717bc77937dce3c4de563f1e7066c7bb2e 100644 (file)
@@ -709,15 +709,6 @@ static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
        return snb_gmch_ctl << 25; /* 32 MB units */
 }
 
-static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl)
-{
-       static const int stolen_decoder[] = {
-               0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
-       snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
-       snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
-       return stolen_decoder[snb_gmch_ctl] << 20;
-}
-
 static int gen6_gmch_probe(struct drm_device *dev,
                           size_t *gtt_total,
                           size_t *stolen,
@@ -747,11 +738,7 @@ static int gen6_gmch_probe(struct drm_device *dev,
        pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
        gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
 
-       if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev))
-               *stolen = gen7_get_stolen_size(snb_gmch_ctl);
-       else
-               *stolen = gen6_get_stolen_size(snb_gmch_ctl);
-
+       *stolen = gen6_get_stolen_size(snb_gmch_ctl);
        *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
 
        /* For Modern GENs the PTEs and register space are split in the BAR */
index 83f9c26e1adbf7b9451e02303545eb1b076b03fa..2d6b62e42daf324478ea64bc49e6e722330c3e2b 100644 (file)
@@ -46,8 +46,6 @@
 #define    SNB_GMCH_GGMS_MASK  0x3
 #define    SNB_GMCH_GMS_SHIFT   3 /* Graphics Mode Select */
 #define    SNB_GMCH_GMS_MASK    0x1f
-#define    IVB_GMCH_GMS_SHIFT   4
-#define    IVB_GMCH_GMS_MASK    0xf
 
 
 /* PCI config space */
index 26a0a570f92e0eba28fa443b28176252a7bb577d..fb961bb81903c95550289845a846bd0308c448d7 100644 (file)
@@ -1265,6 +1265,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
                intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
                intel_dp_start_link_train(intel_dp);
                intel_dp_complete_link_train(intel_dp);
+               if (port != PORT_A)
+                       intel_dp_stop_link_train(intel_dp);
        }
 }
 
@@ -1326,6 +1328,9 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
        } else if (type == INTEL_OUTPUT_EDP) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
+               if (port == PORT_A)
+                       intel_dp_stop_link_train(intel_dp);
+
                ironlake_edp_backlight_on(intel_dp);
        }
 
index fb2fbc1e08b9ba0048b17c02badb8fd8ed29d089..3d704b706a8d42d974e70f4fa3e095fbcbcdb910 100644 (file)
@@ -702,6 +702,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        /* Walk through all bpp values. Luckily they're all nicely spaced with 2
         * bpc in between. */
        bpp = min_t(int, 8*3, pipe_config->pipe_bpp);
+       if (is_edp(intel_dp) && dev_priv->edp.bpp)
+               bpp = min_t(int, bpp, dev_priv->edp.bpp);
+
        for (; bpp >= 6*3; bpp -= 2*3) {
                mode_rate = intel_dp_link_required(target_clock, bpp);
 
@@ -739,6 +742,7 @@ found:
        intel_dp->link_bw = bws[clock];
        intel_dp->lane_count = lane_count;
        adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
+       pipe_config->pipe_bpp = bpp;
        pipe_config->pixel_target_clock = target_clock;
 
        DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
@@ -751,20 +755,6 @@ found:
                               target_clock, adjusted_mode->clock,
                               &pipe_config->dp_m_n);
 
-       /*
-        * XXX: We have a strange regression where using the vbt edp bpp value
-        * for the link bw computation results in black screens, the panel only
-        * works when we do the computation at the usual 24bpp (but still
-        * requires us to use 18bpp). Until that's fully debugged, stay
-        * bug-for-bug compatible with the old code.
-        */
-       if (is_edp(intel_dp) && dev_priv->edp.bpp) {
-               DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n",
-                             bpp, dev_priv->edp.bpp);
-               bpp = min_t(int, bpp, dev_priv->edp.bpp);
-       }
-       pipe_config->pipe_bpp = bpp;
-
        return true;
 }
 
@@ -1389,6 +1379,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
        ironlake_edp_panel_on(intel_dp);
        ironlake_edp_panel_vdd_off(intel_dp, true);
        intel_dp_complete_link_train(intel_dp);
+       intel_dp_stop_link_train(intel_dp);
        ironlake_edp_backlight_on(intel_dp);
 }
 
@@ -1711,10 +1702,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum port port = intel_dig_port->port;
        int ret;
-       uint32_t temp;
 
        if (HAS_DDI(dev)) {
-               temp = I915_READ(DP_TP_CTL(port));
+               uint32_t temp = I915_READ(DP_TP_CTL(port));
 
                if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
                        temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
@@ -1724,18 +1714,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
                temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
                switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
                case DP_TRAINING_PATTERN_DISABLE:
-
-                       if (port != PORT_A) {
-                               temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
-                               I915_WRITE(DP_TP_CTL(port), temp);
-
-                               if (wait_for((I915_READ(DP_TP_STATUS(port)) &
-                                             DP_TP_STATUS_IDLE_DONE), 1))
-                                       DRM_ERROR("Timed out waiting for DP idle patterns\n");
-
-                               temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
-                       }
-
                        temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
 
                        break;
@@ -1811,6 +1789,37 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
        return true;
 }
 
+static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum port port = intel_dig_port->port;
+       uint32_t val;
+
+       if (!HAS_DDI(dev))
+               return;
+
+       val = I915_READ(DP_TP_CTL(port));
+       val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+       val |= DP_TP_CTL_LINK_TRAIN_IDLE;
+       I915_WRITE(DP_TP_CTL(port), val);
+
+       /*
+        * On PORT_A we can have only eDP in SST mode. There the only reason
+        * we need to set idle transmission mode is to work around a HW issue
+        * where we enable the pipe while not in idle link-training mode.
+        * In this case there is requirement to wait for a minimum number of
+        * idle patterns to be sent.
+        */
+       if (port == PORT_A)
+               return;
+
+       if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
+                    1))
+               DRM_ERROR("Timed out waiting for DP idle patterns\n");
+}
+
 /* Enable corresponding port and start training pattern 1 */
 void
 intel_dp_start_link_train(struct intel_dp *intel_dp)
@@ -1953,10 +1962,19 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
                ++tries;
        }
 
+       intel_dp_set_idle_link_train(intel_dp);
+
+       intel_dp->DP = DP;
+
        if (channel_eq)
                DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
 
-       intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
+}
+
+void intel_dp_stop_link_train(struct intel_dp *intel_dp)
+{
+       intel_dp_set_link_train(intel_dp, intel_dp->DP,
+                               DP_TRAINING_PATTERN_DISABLE);
 }
 
 static void
@@ -2164,6 +2182,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
                              drm_get_encoder_name(&intel_encoder->base));
                intel_dp_start_link_train(intel_dp);
                intel_dp_complete_link_train(intel_dp);
+               intel_dp_stop_link_train(intel_dp);
        }
 }
 
index b5b6d19e6dd3ff30293799980d1efafef6540812..624a9e6b8d718ebe64a3aa41019137b673b3b296 100644 (file)
@@ -499,6 +499,7 @@ extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
 extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
 extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
+extern void intel_dp_stop_link_train(struct intel_dp *intel_dp);
 extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
 extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
 extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
index 0e19e575a1b41e456cbe3e82877502fa1906c875..6b7c3ca2c035e5514c2c333877966ff1cb3d3cfd 100644 (file)
@@ -262,10 +262,22 @@ void intel_fbdev_fini(struct drm_device *dev)
 void intel_fbdev_set_suspend(struct drm_device *dev, int state)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       if (!dev_priv->fbdev)
+       struct intel_fbdev *ifbdev = dev_priv->fbdev;
+       struct fb_info *info;
+
+       if (!ifbdev)
                return;
 
-       fb_set_suspend(dev_priv->fbdev->helper.fbdev, state);
+       info = ifbdev->helper.fbdev;
+
+       /* On resume from hibernation: If the object is shmemfs backed, it has
+        * been restored from swap. If the object is stolen however, it will be
+        * full of whatever garbage was left in there.
+        */
+       if (!state && ifbdev->ifb.obj->stolen)
+               memset_io(info->screen_base, 0, info->screen_size);
+
+       fb_set_suspend(info, state);
 }
 
 MODULE_LICENSE("GPL and additional rights");
index de3b0dc5658bcf0c84081ec23ee1cc0add9d93c5..aa01128ff192cc6c5860c881a9aa8a71cb756fd7 100644 (file)
@@ -1301,17 +1301,17 @@ static void valleyview_update_wm(struct drm_device *dev)
 
        vlv_update_drain_latency(dev);
 
-       if (g4x_compute_wm0(dev, 0,
+       if (g4x_compute_wm0(dev, PIPE_A,
                            &valleyview_wm_info, latency_ns,
                            &valleyview_cursor_wm_info, latency_ns,
                            &planea_wm, &cursora_wm))
-               enabled |= 1;
+               enabled |= 1 << PIPE_A;
 
-       if (g4x_compute_wm0(dev, 1,
+       if (g4x_compute_wm0(dev, PIPE_B,
                            &valleyview_wm_info, latency_ns,
                            &valleyview_cursor_wm_info, latency_ns,
                            &planeb_wm, &cursorb_wm))
-               enabled |= 2;
+               enabled |= 1 << PIPE_B;
 
        if (single_plane_enabled(enabled) &&
            g4x_compute_srwm(dev, ffs(enabled) - 1,
@@ -1357,17 +1357,17 @@ static void g4x_update_wm(struct drm_device *dev)
        int plane_sr, cursor_sr;
        unsigned int enabled = 0;
 
-       if (g4x_compute_wm0(dev, 0,
+       if (g4x_compute_wm0(dev, PIPE_A,
                            &g4x_wm_info, latency_ns,
                            &g4x_cursor_wm_info, latency_ns,
                            &planea_wm, &cursora_wm))
-               enabled |= 1;
+               enabled |= 1 << PIPE_A;
 
-       if (g4x_compute_wm0(dev, 1,
+       if (g4x_compute_wm0(dev, PIPE_B,
                            &g4x_wm_info, latency_ns,
                            &g4x_cursor_wm_info, latency_ns,
                            &planeb_wm, &cursorb_wm))
-               enabled |= 2;
+               enabled |= 1 << PIPE_B;
 
        if (single_plane_enabled(enabled) &&
            g4x_compute_srwm(dev, ffs(enabled) - 1,
@@ -1716,7 +1716,7 @@ static void ironlake_update_wm(struct drm_device *dev)
        unsigned int enabled;
 
        enabled = 0;
-       if (g4x_compute_wm0(dev, 0,
+       if (g4x_compute_wm0(dev, PIPE_A,
                            &ironlake_display_wm_info,
                            ILK_LP0_PLANE_LATENCY,
                            &ironlake_cursor_wm_info,
@@ -1727,10 +1727,10 @@ static void ironlake_update_wm(struct drm_device *dev)
                DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
                              " plane %d, " "cursor: %d\n",
                              plane_wm, cursor_wm);
-               enabled |= 1;
+               enabled |= 1 << PIPE_A;
        }
 
-       if (g4x_compute_wm0(dev, 1,
+       if (g4x_compute_wm0(dev, PIPE_B,
                            &ironlake_display_wm_info,
                            ILK_LP0_PLANE_LATENCY,
                            &ironlake_cursor_wm_info,
@@ -1741,7 +1741,7 @@ static void ironlake_update_wm(struct drm_device *dev)
                DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
                              " plane %d, cursor: %d\n",
                              plane_wm, cursor_wm);
-               enabled |= 2;
+               enabled |= 1 << PIPE_B;
        }
 
        /*
@@ -1801,7 +1801,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
        unsigned int enabled;
 
        enabled = 0;
-       if (g4x_compute_wm0(dev, 0,
+       if (g4x_compute_wm0(dev, PIPE_A,
                            &sandybridge_display_wm_info, latency,
                            &sandybridge_cursor_wm_info, latency,
                            &plane_wm, &cursor_wm)) {
@@ -1812,10 +1812,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
                DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
                              " plane %d, " "cursor: %d\n",
                              plane_wm, cursor_wm);
-               enabled |= 1;
+               enabled |= 1 << PIPE_A;
        }
 
-       if (g4x_compute_wm0(dev, 1,
+       if (g4x_compute_wm0(dev, PIPE_B,
                            &sandybridge_display_wm_info, latency,
                            &sandybridge_cursor_wm_info, latency,
                            &plane_wm, &cursor_wm)) {
@@ -1826,7 +1826,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
                DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
                              " plane %d, cursor: %d\n",
                              plane_wm, cursor_wm);
-               enabled |= 2;
+               enabled |= 1 << PIPE_B;
        }
 
        /*
@@ -1904,7 +1904,7 @@ static void ivybridge_update_wm(struct drm_device *dev)
        unsigned int enabled;
 
        enabled = 0;
-       if (g4x_compute_wm0(dev, 0,
+       if (g4x_compute_wm0(dev, PIPE_A,
                            &sandybridge_display_wm_info, latency,
                            &sandybridge_cursor_wm_info, latency,
                            &plane_wm, &cursor_wm)) {
@@ -1915,10 +1915,10 @@ static void ivybridge_update_wm(struct drm_device *dev)
                DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
                              " plane %d, " "cursor: %d\n",
                              plane_wm, cursor_wm);
-               enabled |= 1;
+               enabled |= 1 << PIPE_A;
        }
 
-       if (g4x_compute_wm0(dev, 1,
+       if (g4x_compute_wm0(dev, PIPE_B,
                            &sandybridge_display_wm_info, latency,
                            &sandybridge_cursor_wm_info, latency,
                            &plane_wm, &cursor_wm)) {
@@ -1929,10 +1929,10 @@ static void ivybridge_update_wm(struct drm_device *dev)
                DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
                              " plane %d, cursor: %d\n",
                              plane_wm, cursor_wm);
-               enabled |= 2;
+               enabled |= 1 << PIPE_B;
        }
 
-       if (g4x_compute_wm0(dev, 2,
+       if (g4x_compute_wm0(dev, PIPE_C,
                            &sandybridge_display_wm_info, latency,
                            &sandybridge_cursor_wm_info, latency,
                            &plane_wm, &cursor_wm)) {
@@ -1943,7 +1943,7 @@ static void ivybridge_update_wm(struct drm_device *dev)
                DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
                              " plane %d, cursor: %d\n",
                              plane_wm, cursor_wm);
-               enabled |= 3;
+               enabled |= 1 << PIPE_C;
        }
 
        /*
index f9889658329bfd1aa613cc595a143511e9f94386..77b8a45fb10a3c872aa86304f22abf16266a0249 100644 (file)
@@ -46,29 +46,26 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc)
 
 static inline void mga_wait_vsync(struct mga_device *mdev)
 {
-       unsigned int count = 0;
+       unsigned long timeout = jiffies + HZ/10;
        unsigned int status = 0;
 
        do {
                status = RREG32(MGAREG_Status);
-               count++;
-       } while ((status & 0x08) && (count < 250000));
-       count = 0;
+       } while ((status & 0x08) && time_before(jiffies, timeout));
+       timeout = jiffies + HZ/10;
        status = 0;
        do {
                status = RREG32(MGAREG_Status);
-               count++;
-       } while (!(status & 0x08) && (count < 250000));
+       } while (!(status & 0x08) && time_before(jiffies, timeout));
 }
 
 static inline void mga_wait_busy(struct mga_device *mdev)
 {
-       unsigned int count = 0;
+       unsigned long timeout = jiffies + HZ;
        unsigned int status = 0;
        do {
                status = RREG8(MGAREG_Status + 2);
-               count++;
-       } while ((status & 0x01) && (count < 500000));
+       } while ((status & 0x01) && time_before(jiffies, timeout));
 }
 
 /*
@@ -189,12 +186,12 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
                WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
                tmp = RREG8(DAC_DATA);
                tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
-               WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+               WREG8(DAC_DATA, tmp);
 
                WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
                tmp = RREG8(DAC_DATA);
                tmp |= MGA1064_REMHEADCTL_CLKDIS;
-               WREG_DAC(MGA1064_REMHEADCTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                /* select PLL Set C */
                tmp = RREG8(MGAREG_MEM_MISC_READ);
@@ -204,7 +201,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
                WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
                tmp = RREG8(DAC_DATA);
                tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80;
-               WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                udelay(500);
 
@@ -212,7 +209,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
                WREG8(DAC_INDEX, MGA1064_VREF_CTL);
                tmp = RREG8(DAC_DATA);
                tmp &= ~0x04;
-               WREG_DAC(MGA1064_VREF_CTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                udelay(50);
 
@@ -236,13 +233,13 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
                tmp = RREG8(DAC_DATA);
                tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
                tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
-               WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
                tmp = RREG8(DAC_DATA);
                tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK;
                tmp |= MGA1064_REMHEADCTL_CLKSL_PLL;
-               WREG_DAC(MGA1064_REMHEADCTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                /* reset dotclock rate bit */
                WREG8(MGAREG_SEQ_INDEX, 1);
@@ -253,7 +250,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
                WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
                tmp = RREG8(DAC_DATA);
                tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
-               WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                vcount = RREG8(MGAREG_VCOUNT);
 
@@ -318,7 +315,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
        WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
        tmp = RREG8(DAC_DATA);
        tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
-       WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+       WREG8(DAC_DATA, tmp);
 
        tmp = RREG8(MGAREG_MEM_MISC_READ);
        tmp |= 0x3 << 2;
@@ -326,12 +323,12 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
 
        WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
        tmp = RREG8(DAC_DATA);
-       WREG_DAC(MGA1064_PIX_PLL_STAT, tmp & ~0x40);
+       WREG8(DAC_DATA, tmp & ~0x40);
 
        WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
        tmp = RREG8(DAC_DATA);
        tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
-       WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+       WREG8(DAC_DATA, tmp);
 
        WREG_DAC(MGA1064_EV_PIX_PLLC_M, m);
        WREG_DAC(MGA1064_EV_PIX_PLLC_N, n);
@@ -342,7 +339,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
        WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
        tmp = RREG8(DAC_DATA);
        tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
-       WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+       WREG8(DAC_DATA, tmp);
 
        udelay(500);
 
@@ -350,11 +347,11 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
        tmp = RREG8(DAC_DATA);
        tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
        tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
-       WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+       WREG8(DAC_DATA, tmp);
 
        WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
        tmp = RREG8(DAC_DATA);
-       WREG_DAC(MGA1064_PIX_PLL_STAT, tmp | 0x40);
+       WREG8(DAC_DATA, tmp | 0x40);
 
        tmp = RREG8(MGAREG_MEM_MISC_READ);
        tmp |= (0x3 << 2);
@@ -363,7 +360,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
        WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
        tmp = RREG8(DAC_DATA);
        tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
-       WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+       WREG8(DAC_DATA, tmp);
 
        return 0;
 }
@@ -416,7 +413,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
                WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
                tmp = RREG8(DAC_DATA);
                tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
-               WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+               WREG8(DAC_DATA, tmp);
 
                tmp = RREG8(MGAREG_MEM_MISC_READ);
                tmp |= 0x3 << 2;
@@ -425,7 +422,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
                WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
                tmp = RREG8(DAC_DATA);
                tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
-               WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                udelay(500);
 
@@ -439,13 +436,13 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
                tmp = RREG8(DAC_DATA);
                tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
                tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
-               WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
                tmp = RREG8(DAC_DATA);
                tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
                tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
-               WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                vcount = RREG8(MGAREG_VCOUNT);
 
@@ -515,12 +512,12 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
        WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
        tmp = RREG8(DAC_DATA);
        tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
-       WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+       WREG8(DAC_DATA, tmp);
 
        WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
        tmp = RREG8(DAC_DATA);
        tmp |= MGA1064_REMHEADCTL_CLKDIS;
-       WREG_DAC(MGA1064_REMHEADCTL, tmp);
+       WREG8(DAC_DATA, tmp);
 
        tmp = RREG8(MGAREG_MEM_MISC_READ);
        tmp |= (0x3<<2) | 0xc0;
@@ -530,7 +527,7 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
        tmp = RREG8(DAC_DATA);
        tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
        tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
-       WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+       WREG8(DAC_DATA, tmp);
 
        udelay(500);
 
@@ -657,12 +654,26 @@ static void mga_g200wb_commit(struct drm_crtc *crtc)
        WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
 }
 
-
+/*
+   This is how the framebuffer base address is stored in g200 cards:
+   * Assume @offset is the gpu_addr variable of the framebuffer object
+   * Then addr is the number of _pixels_ (not bytes) from the start of
+     VRAM to the first pixel we want to display. (divided by 2 for 32bit
+     framebuffers)
+   * addr is stored in the CRTCEXT0, CRTCC and CRTCD registers
+   addr<20> -> CRTCEXT0<6>
+   addr<19-16> -> CRTCEXT0<3-0>
+   addr<15-8> -> CRTCC<7-0>
+   addr<7-0> -> CRTCD<7-0>
+   CRTCEXT0 has to be programmed last to trigger an update and make the
+   new addr variable take effect.
+ */
 void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
 {
        struct mga_device *mdev = crtc->dev->dev_private;
        u32 addr;
        int count;
+       u8 crtcext0;
 
        while (RREG8(0x1fda) & 0x08);
        while (!(RREG8(0x1fda) & 0x08));
@@ -670,10 +681,17 @@ void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
        count = RREG8(MGAREG_VCOUNT) + 2;
        while (RREG8(MGAREG_VCOUNT) < count);
 
-       addr = offset >> 2;
+       WREG8(MGAREG_CRTCEXT_INDEX, 0);
+       crtcext0 = RREG8(MGAREG_CRTCEXT_DATA);
+       crtcext0 &= 0xB0;
+       addr = offset / 8;
+       /* Can't store addresses any higher than that...
+          but we also don't have more than 16MB of memory, so it should be fine. */
+       WARN_ON(addr > 0x1fffff);
+       crtcext0 |= (!!(addr & (1<<20)))<<6;
        WREG_CRT(0x0d, (u8)(addr & 0xff));
        WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff);
-       WREG_CRT(0xaf, (u8)(addr >> 16) & 0xf);
+       WREG_ECRT(0x0, ((u8)(addr >> 16) & 0xf) | crtcext0);
 }
 
 
@@ -829,11 +847,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
 
 
        for (i = 0; i < sizeof(dacvalue); i++) {
-               if ((i <= 0x03) ||
-                   (i == 0x07) ||
-                   (i == 0x0b) ||
-                   (i == 0x0f) ||
-                   ((i >= 0x13) && (i <= 0x17)) ||
+               if ((i <= 0x17) ||
                    (i == 0x1b) ||
                    (i == 0x1c) ||
                    ((i >= 0x1f) && (i <= 0x29)) ||
index 6961bbeab3edb0c9a75767865c87bc2a6a6b4386..264f550999406f7b924be6634917e3826e80a161 100644 (file)
@@ -1685,6 +1685,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
        { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
@@ -2341,7 +2342,7 @@ struct hid_device *hid_allocate_device(void)
 
        init_waitqueue_head(&hdev->debug_wait);
        INIT_LIST_HEAD(&hdev->debug_list);
-       mutex_init(&hdev->debug_list_lock);
+       spin_lock_init(&hdev->debug_list_lock);
        sema_init(&hdev->driver_lock, 1);
        sema_init(&hdev->driver_input_lock, 1);
 
index 7e56cb3855e329696abea711445bffe588d101f6..8453214ec3767d6c55aefe2bb99466d426904403 100644 (file)
@@ -579,15 +579,16 @@ void hid_debug_event(struct hid_device *hdev, char *buf)
 {
        int i;
        struct hid_debug_list *list;
+       unsigned long flags;
 
-       mutex_lock(&hdev->debug_list_lock);
+       spin_lock_irqsave(&hdev->debug_list_lock, flags);
        list_for_each_entry(list, &hdev->debug_list, node) {
                for (i = 0; i < strlen(buf); i++)
                        list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] =
                                buf[i];
                list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE;
         }
-       mutex_unlock(&hdev->debug_list_lock);
+       spin_unlock_irqrestore(&hdev->debug_list_lock, flags);
 
        wake_up_interruptible(&hdev->debug_wait);
 }
@@ -977,6 +978,7 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
 {
        int err = 0;
        struct hid_debug_list *list;
+       unsigned long flags;
 
        if (!(list = kzalloc(sizeof(struct hid_debug_list), GFP_KERNEL))) {
                err = -ENOMEM;
@@ -992,9 +994,9 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
        file->private_data = list;
        mutex_init(&list->read_mutex);
 
-       mutex_lock(&list->hdev->debug_list_lock);
+       spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
        list_add_tail(&list->node, &list->hdev->debug_list);
-       mutex_unlock(&list->hdev->debug_list_lock);
+       spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
 
 out:
        return err;
@@ -1088,10 +1090,11 @@ static unsigned int hid_debug_events_poll(struct file *file, poll_table *wait)
 static int hid_debug_events_release(struct inode *inode, struct file *file)
 {
        struct hid_debug_list *list = file->private_data;
+       unsigned long flags;
 
-       mutex_lock(&list->hdev->debug_list_lock);
+       spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
        list_del(&list->node);
-       mutex_unlock(&list->hdev->debug_list_lock);
+       spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
        kfree(list->hid_debug_buf);
        kfree(list);
 
index 9b0efb0083feaf4a1f258226aad0426c7b961721..d1649119211277275840a1bee984ed93de28c025 100644 (file)
@@ -18,7 +18,8 @@
 
 #include "hid-ids.h"
 
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
+    (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
 #define SRWS1_NUMBER_LEDS 15
 struct steelseries_srws1_data {
        __u16 led_state;
@@ -107,7 +108,8 @@ static __u8 steelseries_srws1_rdesc_fixed[] = {
 0xC0                /*  End Collection                      */
 };
 
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
+    (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
 static void steelseries_srws1_set_leds(struct hid_device *hdev, __u16 leds)
 {
        struct list_head *report_list = &hdev->report_enum[HID_OUTPUT_REPORT].report_list;
@@ -370,7 +372,8 @@ MODULE_DEVICE_TABLE(hid, steelseries_srws1_devices);
 static struct hid_driver steelseries_srws1_driver = {
        .name = "steelseries_srws1",
        .id_table = steelseries_srws1_devices,
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
+    (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
        .probe = steelseries_srws1_probe,
        .remove = steelseries_srws1_remove,
 #endif
index 0e8fab1913dfd8737ede3b7d9f332aee0861169e..fa6964d8681a0d126fcf7c4845896b3f06298f8f 100644 (file)
@@ -272,6 +272,27 @@ static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = {
                .exit_latency = 166,
                .target_residency = 500,
                .enter = &intel_idle },
+       {
+               .name = "C8-HSW",
+               .desc = "MWAIT 0x40",
+               .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 300,
+               .target_residency = 900,
+               .enter = &intel_idle },
+       {
+               .name = "C9-HSW",
+               .desc = "MWAIT 0x50",
+               .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 600,
+               .target_residency = 1800,
+               .enter = &intel_idle },
+       {
+               .name = "C10-HSW",
+               .desc = "MWAIT 0x60",
+               .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 2600,
+               .target_residency = 7700,
+               .enter = &intel_idle },
        {
                .enter = NULL }
 };
index 699187ab380099a32d926a83dde84d96e7124e85..5b9ac32801c7604b25c64804fb888aa1ebbbceab 100644 (file)
@@ -1002,6 +1002,7 @@ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
                        kill_guest(&lg->cpus[0],
                                   "Cannot populate switcher mapping");
                }
+               lg->pgdirs[pgdir].last_host_cpu = -1;
        }
 }
 
index c6083132c4b8ccaf21c7addb61cf8596165bf915..0387e05cdb98b9708bde55143cd8a5cba853a2fa 100644 (file)
@@ -319,6 +319,9 @@ static void __cache_size_refresh(void)
 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
                               enum data_mode *data_mode)
 {
+       unsigned noio_flag;
+       void *ptr;
+
        if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
                *data_mode = DATA_MODE_SLAB;
                return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
@@ -332,7 +335,26 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
        }
 
        *data_mode = DATA_MODE_VMALLOC;
-       return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
+
+       /*
+        * __vmalloc allocates the data pages and auxiliary structures with
+        * gfp_flags that were specified, but pagetables are always allocated
+        * with GFP_KERNEL, no matter what was specified as gfp_mask.
+        *
+        * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
+        * all allocations done by this process (including pagetables) are done
+        * as if GFP_NOIO was specified.
+        */
+
+       if (gfp_mask & __GFP_NORETRY)
+               noio_flag = memalloc_noio_save();
+
+       ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
+
+       if (gfp_mask & __GFP_NORETRY)
+               memalloc_noio_restore(noio_flag);
+
+       return ptr;
 }
 
 /*
index 83e995fece88c1330335c85a23ef42b2ed54a1f4..1af7255bbffb547aa67db8b1f38bf99b2ff87094 100644 (file)
@@ -1044,7 +1044,7 @@ void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
                                 struct dm_cache_statistics *stats)
 {
        down_read(&cmd->root_lock);
-       memcpy(stats, &cmd->stats, sizeof(*stats));
+       *stats = cmd->stats;
        up_read(&cmd->root_lock);
 }
 
@@ -1052,7 +1052,7 @@ void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
                                 struct dm_cache_statistics *stats)
 {
        down_write(&cmd->root_lock);
-       memcpy(&cmd->stats, stats, sizeof(*stats));
+       cmd->stats = *stats;
        up_write(&cmd->root_lock);
 }
 
index 558bdfdabf5f2da39e5bef487dc5008a2399b659..33369ca9614f978ae0481e0d79677c980660f42d 100644 (file)
@@ -130,8 +130,8 @@ struct dm_cache_policy {
         *
         * Must not block.
         *
-        * Returns 1 iff in cache, 0 iff not, < 0 on error (-EWOULDBLOCK
-        * would be typical).
+        * Returns 0 if in cache, -ENOENT if not, < 0 for other errors
+        * (-EWOULDBLOCK would be typical).
         */
        int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock);
 
index 10744091e6cabb5f6bb55e59709eb4ba0350a98b..df44b60e66f289880c177e98d78f1aa04ce08e7a 100644 (file)
@@ -205,7 +205,7 @@ struct per_bio_data {
        /*
         * writethrough fields.  These MUST remain at the end of this
         * structure and the 'cache' member must be the first as it
-        * is used to determine the offsetof the writethrough fields.
+        * is used to determine the offset of the writethrough fields.
         */
        struct cache *cache;
        dm_cblock_t cblock;
@@ -393,7 +393,7 @@ static int get_cell(struct cache *cache,
        return r;
 }
 
- /*----------------------------------------------------------------*/
+/*----------------------------------------------------------------*/
 
 static bool is_dirty(struct cache *cache, dm_cblock_t b)
 {
@@ -419,6 +419,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl
 }
 
 /*----------------------------------------------------------------*/
+
 static bool block_size_is_power_of_two(struct cache *cache)
 {
        return cache->sectors_per_block_shift >= 0;
@@ -667,7 +668,7 @@ static void writethrough_endio(struct bio *bio, int err)
 
        /*
         * We can't issue this bio directly, since we're in interrupt
-        * context.  So it get's put on a bio list for processing by the
+        * context.  So it gets put on a bio list for processing by the
         * worker thread.
         */
        defer_writethrough_bio(pb->cache, bio);
@@ -1445,6 +1446,7 @@ static void do_worker(struct work_struct *ws)
 static void do_waker(struct work_struct *ws)
 {
        struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
+       policy_tick(cache->policy);
        wake_worker(cache);
        queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
 }
@@ -1809,7 +1811,37 @@ static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
 
 static struct kmem_cache *migration_cache;
 
-static int set_config_values(struct dm_cache_policy *p, int argc, const char **argv)
+#define NOT_CORE_OPTION 1
+
+static int process_config_option(struct cache *cache, const char *key, const char *value)
+{
+       unsigned long tmp;
+
+       if (!strcasecmp(key, "migration_threshold")) {
+               if (kstrtoul(value, 10, &tmp))
+                       return -EINVAL;
+
+               cache->migration_threshold = tmp;
+               return 0;
+       }
+
+       return NOT_CORE_OPTION;
+}
+
+static int set_config_value(struct cache *cache, const char *key, const char *value)
+{
+       int r = process_config_option(cache, key, value);
+
+       if (r == NOT_CORE_OPTION)
+               r = policy_set_config_value(cache->policy, key, value);
+
+       if (r)
+               DMWARN("bad config value for %s: %s", key, value);
+
+       return r;
+}
+
+static int set_config_values(struct cache *cache, int argc, const char **argv)
 {
        int r = 0;
 
@@ -1819,12 +1851,9 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a
        }
 
        while (argc) {
-               r = policy_set_config_value(p, argv[0], argv[1]);
-               if (r) {
-                       DMWARN("policy_set_config_value failed: key = '%s', value = '%s'",
-                              argv[0], argv[1]);
-                       return r;
-               }
+               r = set_config_value(cache, argv[0], argv[1]);
+               if (r)
+                       break;
 
                argc -= 2;
                argv += 2;
@@ -1836,8 +1865,6 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a
 static int create_cache_policy(struct cache *cache, struct cache_args *ca,
                               char **error)
 {
-       int r;
-
        cache->policy = dm_cache_policy_create(ca->policy_name,
                                               cache->cache_size,
                                               cache->origin_sectors,
@@ -1847,14 +1874,7 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca,
                return -ENOMEM;
        }
 
-       r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv);
-       if (r) {
-               *error = "Error setting cache policy's config values";
-               dm_cache_policy_destroy(cache->policy);
-               cache->policy = NULL;
-       }
-
-       return r;
+       return 0;
 }
 
 /*
@@ -1886,7 +1906,7 @@ static sector_t calculate_discard_block_size(sector_t cache_block_size,
        return discard_block_size;
 }
 
-#define DEFAULT_MIGRATION_THRESHOLD (2048 * 100)
+#define DEFAULT_MIGRATION_THRESHOLD 2048
 
 static int cache_create(struct cache_args *ca, struct cache **result)
 {
@@ -1911,7 +1931,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
        ti->discards_supported = true;
        ti->discard_zeroes_data_unsupported = true;
 
-       memcpy(&cache->features, &ca->features, sizeof(cache->features));
+       cache->features = ca->features;
        ti->per_bio_data_size = get_per_bio_data_size(cache);
 
        cache->callbacks.congested_fn = cache_is_congested;
@@ -1948,7 +1968,15 @@ static int cache_create(struct cache_args *ca, struct cache **result)
        r = create_cache_policy(cache, ca, error);
        if (r)
                goto bad;
+
        cache->policy_nr_args = ca->policy_argc;
+       cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
+
+       r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
+       if (r) {
+               *error = "Error setting cache policy's config values";
+               goto bad;
+       }
 
        cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
                                     ca->block_size, may_format,
@@ -1967,10 +1995,10 @@ static int cache_create(struct cache_args *ca, struct cache **result)
        INIT_LIST_HEAD(&cache->quiesced_migrations);
        INIT_LIST_HEAD(&cache->completed_migrations);
        INIT_LIST_HEAD(&cache->need_commit_migrations);
-       cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
        atomic_set(&cache->nr_migrations, 0);
        init_waitqueue_head(&cache->migration_wait);
 
+       r = -ENOMEM;
        cache->nr_dirty = 0;
        cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
        if (!cache->dirty_bitset) {
@@ -2517,23 +2545,6 @@ err:
        DMEMIT("Error");
 }
 
-#define NOT_CORE_OPTION 1
-
-static int process_config_option(struct cache *cache, char **argv)
-{
-       unsigned long tmp;
-
-       if (!strcasecmp(argv[0], "migration_threshold")) {
-               if (kstrtoul(argv[1], 10, &tmp))
-                       return -EINVAL;
-
-               cache->migration_threshold = tmp;
-               return 0;
-       }
-
-       return NOT_CORE_OPTION;
-}
-
 /*
  * Supports <key> <value>.
  *
@@ -2541,17 +2552,12 @@ static int process_config_option(struct cache *cache, char **argv)
  */
 static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
 {
-       int r;
        struct cache *cache = ti->private;
 
        if (argc != 2)
                return -EINVAL;
 
-       r = process_config_option(cache, argv);
-       if (r == NOT_CORE_OPTION)
-               return policy_set_config_value(cache->policy, argv[0], argv[1]);
-
-       return r;
+       return set_config_value(cache, argv[0], argv[1]);
 }
 
 static int cache_iterate_devices(struct dm_target *ti,
@@ -2609,7 +2615,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
 
 static struct target_type cache_target = {
        .name = "cache",
-       .version = {1, 1, 0},
+       .version = {1, 1, 1},
        .module = THIS_MODULE,
        .ctr = cache_ctr,
        .dtr = cache_dtr,
index 51bb81676be37d2ff4520557c70f83bda488845a..bdf26f5bd326011595f8b9801cc2e408af6842cc 100644 (file)
@@ -907,6 +907,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
 
        ti->num_flush_bios = 1;
        ti->num_discard_bios = 1;
+       ti->num_write_same_bios = 1;
 
        return 0;
 
index c0e07026a8d136f084958800192c12236ffd37e2..c434e5aab2dfc9e6a63ca7700e5ac1c1deacd025 100644 (file)
@@ -1121,6 +1121,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
        if (!s->pending_pool) {
                ti->error = "Could not allocate mempool for pending exceptions";
+               r = -ENOMEM;
                goto bad_pending_pool;
        }
 
index ea5e878a30b93b1f974d449738fc46b55d5eaeba..d907ca6227cec3519e46ac95e146656b342125c2 100644 (file)
@@ -94,7 +94,7 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
 static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 {
        struct stripe_c *sc;
-       sector_t width;
+       sector_t width, tmp_len;
        uint32_t stripes;
        uint32_t chunk_size;
        int r;
@@ -116,15 +116,16 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        }
 
        width = ti->len;
-       if (sector_div(width, chunk_size)) {
+       if (sector_div(width, stripes)) {
                ti->error = "Target length not divisible by "
-                   "chunk size";
+                   "number of stripes";
                return -EINVAL;
        }
 
-       if (sector_div(width, stripes)) {
+       tmp_len = width;
+       if (sector_div(tmp_len, chunk_size)) {
                ti->error = "Target length not divisible by "
-                   "number of stripes";
+                   "chunk size";
                return -EINVAL;
        }
 
index e50dad0c65f4759d535b5cf81b0258a3b27066b5..1ff252ab7d46a1ed55fb98a93426acfb764998dd 100644 (file)
@@ -1442,7 +1442,7 @@ static bool dm_table_supports_write_same(struct dm_table *t)
                        return false;
 
                if (!ti->type->iterate_devices ||
-                   !ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
+                   ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
                        return false;
        }
 
index 00cee02f8fc9b299788941c9b4147abdfc56c83c..60bce435f4fa1443c2994bd483e70ea096c7aa92 100644 (file)
@@ -1645,12 +1645,12 @@ int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
        return r;
 }
 
-static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
+static int __resize_space_map(struct dm_space_map *sm, dm_block_t new_count)
 {
        int r;
        dm_block_t old_count;
 
-       r = dm_sm_get_nr_blocks(pmd->data_sm, &old_count);
+       r = dm_sm_get_nr_blocks(sm, &old_count);
        if (r)
                return r;
 
@@ -1658,11 +1658,11 @@ static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
                return 0;
 
        if (new_count < old_count) {
-               DMERR("cannot reduce size of data device");
+               DMERR("cannot reduce size of space map");
                return -EINVAL;
        }
 
-       return dm_sm_extend(pmd->data_sm, new_count - old_count);
+       return dm_sm_extend(sm, new_count - old_count);
 }
 
 int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
@@ -1671,7 +1671,19 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
 
        down_write(&pmd->root_lock);
        if (!pmd->fail_io)
-               r = __resize_data_dev(pmd, new_count);
+               r = __resize_space_map(pmd->data_sm, new_count);
+       up_write(&pmd->root_lock);
+
+       return r;
+}
+
+int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
+{
+       int r = -EINVAL;
+
+       down_write(&pmd->root_lock);
+       if (!pmd->fail_io)
+               r = __resize_space_map(pmd->metadata_sm, new_count);
        up_write(&pmd->root_lock);
 
        return r;
@@ -1684,3 +1696,17 @@ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
        dm_bm_set_read_only(pmd->bm);
        up_write(&pmd->root_lock);
 }
+
+int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
+                                       dm_block_t threshold,
+                                       dm_sm_threshold_fn fn,
+                                       void *context)
+{
+       int r;
+
+       down_write(&pmd->root_lock);
+       r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context);
+       up_write(&pmd->root_lock);
+
+       return r;
+}
index 0cecc3702885fb57452c013f83679fc437a393ca..845ebbe589a9e0a00505bab0150df48331233928 100644 (file)
@@ -8,6 +8,7 @@
 #define DM_THIN_METADATA_H
 
 #include "persistent-data/dm-block-manager.h"
+#include "persistent-data/dm-space-map.h"
 
 #define THIN_METADATA_BLOCK_SIZE 4096
 
@@ -185,6 +186,7 @@ int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
  * blocks would be lost.
  */
 int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
+int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
 
 /*
  * Flicks the underlying block manager into read only mode, so you know
@@ -192,6 +194,11 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
  */
 void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd);
 
+int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
+                                       dm_block_t threshold,
+                                       dm_sm_threshold_fn fn,
+                                       void *context);
+
 /*----------------------------------------------------------------*/
 
 #endif
index 004ad1652b73477dae0194b957842434cf48c38d..759cffc45cabce8414c883f329b52fd56513bccf 100644 (file)
@@ -922,7 +922,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
                return r;
 
        if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
-               DMWARN("%s: reached low water mark, sending event.",
+               DMWARN("%s: reached low water mark for data device: sending event.",
                       dm_device_name(pool->pool_md));
                spin_lock_irqsave(&pool->lock, flags);
                pool->low_water_triggered = 1;
@@ -1281,6 +1281,10 @@ static void process_bio_fail(struct thin_c *tc, struct bio *bio)
        bio_io_error(bio);
 }
 
+/*
+ * FIXME: should we also commit due to size of transaction, measured in
+ * metadata blocks?
+ */
 static int need_commit_due_to_time(struct pool *pool)
 {
        return jiffies < pool->last_commit_jiffies ||
@@ -1909,6 +1913,56 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
        return r;
 }
 
+static void metadata_low_callback(void *context)
+{
+       struct pool *pool = context;
+
+       DMWARN("%s: reached low water mark for metadata device: sending event.",
+              dm_device_name(pool->pool_md));
+
+       dm_table_event(pool->ti->table);
+}
+
+static sector_t get_metadata_dev_size(struct block_device *bdev)
+{
+       sector_t metadata_dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+       char buffer[BDEVNAME_SIZE];
+
+       if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) {
+               DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
+                      bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
+               metadata_dev_size = THIN_METADATA_MAX_SECTORS_WARNING;
+       }
+
+       return metadata_dev_size;
+}
+
+static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
+{
+       sector_t metadata_dev_size = get_metadata_dev_size(bdev);
+
+       sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
+
+       return metadata_dev_size;
+}
+
+/*
+ * When a metadata threshold is crossed a dm event is triggered, and
+ * userland should respond by growing the metadata device.  We could let
+ * userland set the threshold, like we do with the data threshold, but I'm
+ * not sure they know enough to do this well.
+ */
+static dm_block_t calc_metadata_threshold(struct pool_c *pt)
+{
+       /*
+        * 4M is ample for all ops with the possible exception of thin
+        * device deletion which is harmless if it fails (just retry the
+        * delete after you've grown the device).
+        */
+       dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
+       return min((dm_block_t)1024ULL /* 4M */, quarter);
+}
+
 /*
  * thin-pool <metadata dev> <data dev>
  *          <data block size (sectors)>
@@ -1931,8 +1985,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
        unsigned long block_size;
        dm_block_t low_water_blocks;
        struct dm_dev *metadata_dev;
-       sector_t metadata_dev_size;
-       char b[BDEVNAME_SIZE];
+       fmode_t metadata_mode;
 
        /*
         * FIXME Remove validation from scope of lock.
@@ -1944,19 +1997,32 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
                r = -EINVAL;
                goto out_unlock;
        }
+
        as.argc = argc;
        as.argv = argv;
 
-       r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &metadata_dev);
+       /*
+        * Set default pool features.
+        */
+       pool_features_init(&pf);
+
+       dm_consume_args(&as, 4);
+       r = parse_pool_features(&as, &pf, ti);
+       if (r)
+               goto out_unlock;
+
+       metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
+       r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
        if (r) {
                ti->error = "Error opening metadata block device";
                goto out_unlock;
        }
 
-       metadata_dev_size = i_size_read(metadata_dev->bdev->bd_inode) >> SECTOR_SHIFT;
-       if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
-               DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
-                      bdevname(metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
+       /*
+        * Run for the side-effect of possibly issuing a warning if the
+        * device is too big.
+        */
+       (void) get_metadata_dev_size(metadata_dev->bdev);
 
        r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
        if (r) {
@@ -1979,16 +2045,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
                goto out;
        }
 
-       /*
-        * Set default pool features.
-        */
-       pool_features_init(&pf);
-
-       dm_consume_args(&as, 4);
-       r = parse_pool_features(&as, &pf, ti);
-       if (r)
-               goto out;
-
        pt = kzalloc(sizeof(*pt), GFP_KERNEL);
        if (!pt) {
                r = -ENOMEM;
@@ -2040,6 +2096,13 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
        }
        ti->private = pt;
 
+       r = dm_pool_register_metadata_threshold(pt->pool->pmd,
+                                               calc_metadata_threshold(pt),
+                                               metadata_low_callback,
+                                               pool);
+       if (r)
+               goto out_free_pt;
+
        pt->callbacks.congested_fn = pool_is_congested;
        dm_table_add_target_callbacks(ti->table, &pt->callbacks);
 
@@ -2079,18 +2142,7 @@ static int pool_map(struct dm_target *ti, struct bio *bio)
        return r;
 }
 
-/*
- * Retrieves the number of blocks of the data device from
- * the superblock and compares it to the actual device size,
- * thus resizing the data device in case it has grown.
- *
- * This both copes with opening preallocated data devices in the ctr
- * being followed by a resume
- * -and-
- * calling the resume method individually after userspace has
- * grown the data device in reaction to a table event.
- */
-static int pool_preresume(struct dm_target *ti)
+static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
 {
        int r;
        struct pool_c *pt = ti->private;
@@ -2098,12 +2150,7 @@ static int pool_preresume(struct dm_target *ti)
        sector_t data_size = ti->len;
        dm_block_t sb_data_size;
 
-       /*
-        * Take control of the pool object.
-        */
-       r = bind_control_target(pool, ti);
-       if (r)
-               return r;
+       *need_commit = false;
 
        (void) sector_div(data_size, pool->sectors_per_block);
 
@@ -2114,7 +2161,7 @@ static int pool_preresume(struct dm_target *ti)
        }
 
        if (data_size < sb_data_size) {
-               DMERR("pool target too small, is %llu blocks (expected %llu)",
+               DMERR("pool target (%llu blocks) too small: expected %llu",
                      (unsigned long long)data_size, sb_data_size);
                return -EINVAL;
 
@@ -2122,17 +2169,90 @@ static int pool_preresume(struct dm_target *ti)
                r = dm_pool_resize_data_dev(pool->pmd, data_size);
                if (r) {
                        DMERR("failed to resize data device");
-                       /* FIXME Stricter than necessary: Rollback transaction instead here */
                        set_pool_mode(pool, PM_READ_ONLY);
                        return r;
                }
 
-               (void) commit_or_fallback(pool);
+               *need_commit = true;
        }
 
        return 0;
 }
 
+static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
+{
+       int r;
+       struct pool_c *pt = ti->private;
+       struct pool *pool = pt->pool;
+       dm_block_t metadata_dev_size, sb_metadata_dev_size;
+
+       *need_commit = false;
+
+       metadata_dev_size = get_metadata_dev_size(pool->md_dev);
+
+       r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
+       if (r) {
+               DMERR("failed to retrieve data device size");
+               return r;
+       }
+
+       if (metadata_dev_size < sb_metadata_dev_size) {
+               DMERR("metadata device (%llu sectors) too small: expected %llu",
+                     metadata_dev_size, sb_metadata_dev_size);
+               return -EINVAL;
+
+       } else if (metadata_dev_size > sb_metadata_dev_size) {
+               r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
+               if (r) {
+                       DMERR("failed to resize metadata device");
+                       return r;
+               }
+
+               *need_commit = true;
+       }
+
+       return 0;
+}
+
+/*
+ * Retrieves the number of blocks of the data device from
+ * the superblock and compares it to the actual device size,
+ * thus resizing the data device in case it has grown.
+ *
+ * This both copes with opening preallocated data devices in the ctr
+ * being followed by a resume
+ * -and-
+ * calling the resume method individually after userspace has
+ * grown the data device in reaction to a table event.
+ */
+static int pool_preresume(struct dm_target *ti)
+{
+       int r;
+       bool need_commit1, need_commit2;
+       struct pool_c *pt = ti->private;
+       struct pool *pool = pt->pool;
+
+       /*
+        * Take control of the pool object.
+        */
+       r = bind_control_target(pool, ti);
+       if (r)
+               return r;
+
+       r = maybe_resize_data_dev(ti, &need_commit1);
+       if (r)
+               return r;
+
+       r = maybe_resize_metadata_dev(ti, &need_commit2);
+       if (r)
+               return r;
+
+       if (need_commit1 || need_commit2)
+               (void) commit_or_fallback(pool);
+
+       return 0;
+}
+
 static void pool_resume(struct dm_target *ti)
 {
        struct pool_c *pt = ti->private;
@@ -2549,7 +2669,7 @@ static struct target_type pool_target = {
        .name = "thin-pool",
        .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
                    DM_TARGET_IMMUTABLE,
-       .version = {1, 7, 0},
+       .version = {1, 8, 0},
        .module = THIS_MODULE,
        .ctr = pool_ctr,
        .dtr = pool_dtr,
index f6d29e614ab728c521b0ebc1f8a7f320e5099deb..e735a6d5a793dfff9c71330fd7555469d5a58501 100644 (file)
@@ -248,7 +248,8 @@ static struct dm_space_map ops = {
        .new_block = sm_disk_new_block,
        .commit = sm_disk_commit,
        .root_size = sm_disk_root_size,
-       .copy_root = sm_disk_copy_root
+       .copy_root = sm_disk_copy_root,
+       .register_threshold_callback = NULL
 };
 
 struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
index 906cf3df71af33365e460cbca0a82b0077fc6abf..1c959684caef7512fafd3c1a5505c29c798a9c11 100644 (file)
 
 /*----------------------------------------------------------------*/
 
+/*
+ * An edge triggered threshold.
+ */
+struct threshold {
+       bool threshold_set;
+       bool value_set;
+       dm_block_t threshold;
+       dm_block_t current_value;
+       dm_sm_threshold_fn fn;
+       void *context;
+};
+
+static void threshold_init(struct threshold *t)
+{
+       t->threshold_set = false;
+       t->value_set = false;
+}
+
+static void set_threshold(struct threshold *t, dm_block_t value,
+                         dm_sm_threshold_fn fn, void *context)
+{
+       t->threshold_set = true;
+       t->threshold = value;
+       t->fn = fn;
+       t->context = context;
+}
+
+static bool below_threshold(struct threshold *t, dm_block_t value)
+{
+       return t->threshold_set && value <= t->threshold;
+}
+
+static bool threshold_already_triggered(struct threshold *t)
+{
+       return t->value_set && below_threshold(t, t->current_value);
+}
+
+static void check_threshold(struct threshold *t, dm_block_t value)
+{
+       if (below_threshold(t, value) &&
+           !threshold_already_triggered(t))
+               t->fn(t->context);
+
+       t->value_set = true;
+       t->current_value = value;
+}
+
+/*----------------------------------------------------------------*/
+
 /*
  * Space map interface.
  *
@@ -54,6 +103,8 @@ struct sm_metadata {
        unsigned allocated_this_transaction;
        unsigned nr_uncommitted;
        struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS];
+
+       struct threshold threshold;
 };
 
 static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
@@ -144,12 +195,6 @@ static void sm_metadata_destroy(struct dm_space_map *sm)
        kfree(smm);
 }
 
-static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
-{
-       DMERR("doesn't support extend");
-       return -EINVAL;
-}
-
 static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
 {
        struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
@@ -335,9 +380,19 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
 
 static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
 {
+       dm_block_t count;
+       struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
        int r = sm_metadata_new_block_(sm, b);
        if (r)
                DMERR("unable to allocate new metadata block");
+
+       r = sm_metadata_get_nr_free(sm, &count);
+       if (r)
+               DMERR("couldn't get free block count");
+
+       check_threshold(&smm->threshold, count);
+
        return r;
 }
 
@@ -357,6 +412,18 @@ static int sm_metadata_commit(struct dm_space_map *sm)
        return 0;
 }
 
+static int sm_metadata_register_threshold_callback(struct dm_space_map *sm,
+                                                  dm_block_t threshold,
+                                                  dm_sm_threshold_fn fn,
+                                                  void *context)
+{
+       struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
+       set_threshold(&smm->threshold, threshold, fn, context);
+
+       return 0;
+}
+
 static int sm_metadata_root_size(struct dm_space_map *sm, size_t *result)
 {
        *result = sizeof(struct disk_sm_root);
@@ -382,6 +449,8 @@ static int sm_metadata_copy_root(struct dm_space_map *sm, void *where_le, size_t
        return 0;
 }
 
+static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks);
+
 static struct dm_space_map ops = {
        .destroy = sm_metadata_destroy,
        .extend = sm_metadata_extend,
@@ -395,7 +464,8 @@ static struct dm_space_map ops = {
        .new_block = sm_metadata_new_block,
        .commit = sm_metadata_commit,
        .root_size = sm_metadata_root_size,
-       .copy_root = sm_metadata_copy_root
+       .copy_root = sm_metadata_copy_root,
+       .register_threshold_callback = sm_metadata_register_threshold_callback
 };
 
 /*----------------------------------------------------------------*/
@@ -410,7 +480,7 @@ static void sm_bootstrap_destroy(struct dm_space_map *sm)
 
 static int sm_bootstrap_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
 {
-       DMERR("boostrap doesn't support extend");
+       DMERR("bootstrap doesn't support extend");
 
        return -EINVAL;
 }
@@ -450,7 +520,7 @@ static int sm_bootstrap_count_is_more_than_one(struct dm_space_map *sm,
 static int sm_bootstrap_set_count(struct dm_space_map *sm, dm_block_t b,
                                  uint32_t count)
 {
-       DMERR("boostrap doesn't support set_count");
+       DMERR("bootstrap doesn't support set_count");
 
        return -EINVAL;
 }
@@ -491,7 +561,7 @@ static int sm_bootstrap_commit(struct dm_space_map *sm)
 
 static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result)
 {
-       DMERR("boostrap doesn't support root_size");
+       DMERR("bootstrap doesn't support root_size");
 
        return -EINVAL;
 }
@@ -499,7 +569,7 @@ static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result)
 static int sm_bootstrap_copy_root(struct dm_space_map *sm, void *where,
                                  size_t max)
 {
-       DMERR("boostrap doesn't support copy_root");
+       DMERR("bootstrap doesn't support copy_root");
 
        return -EINVAL;
 }
@@ -517,11 +587,42 @@ static struct dm_space_map bootstrap_ops = {
        .new_block = sm_bootstrap_new_block,
        .commit = sm_bootstrap_commit,
        .root_size = sm_bootstrap_root_size,
-       .copy_root = sm_bootstrap_copy_root
+       .copy_root = sm_bootstrap_copy_root,
+       .register_threshold_callback = NULL
 };
 
 /*----------------------------------------------------------------*/
 
+static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+{
+       int r, i;
+       enum allocation_event ev;
+       struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+       dm_block_t old_len = smm->ll.nr_blocks;
+
+       /*
+        * Flick into a mode where all blocks get allocated in the new area.
+        */
+       smm->begin = old_len;
+       memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
+
+       /*
+        * Extend.
+        */
+       r = sm_ll_extend(&smm->ll, extra_blocks);
+
+       /*
+        * Switch back to normal behaviour.
+        */
+       memcpy(&smm->sm, &ops, sizeof(smm->sm));
+       for (i = old_len; !r && i < smm->begin; i++)
+               r = sm_ll_inc(&smm->ll, i, &ev);
+
+       return r;
+}
+
+/*----------------------------------------------------------------*/
+
 struct dm_space_map *dm_sm_metadata_init(void)
 {
        struct sm_metadata *smm;
@@ -549,6 +650,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
        smm->recursion_count = 0;
        smm->allocated_this_transaction = 0;
        smm->nr_uncommitted = 0;
+       threshold_init(&smm->threshold);
 
        memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
 
@@ -590,6 +692,7 @@ int dm_sm_metadata_open(struct dm_space_map *sm,
        smm->recursion_count = 0;
        smm->allocated_this_transaction = 0;
        smm->nr_uncommitted = 0;
+       threshold_init(&smm->threshold);
 
        memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
        return 0;
index 1cbfc6b1638a05a42535fe80c4e76afa0e39a3b0..3e6d1153b7c4b898b5a1355ad8bcc43cac2de4f4 100644 (file)
@@ -9,6 +9,8 @@
 
 #include "dm-block-manager.h"
 
+typedef void (*dm_sm_threshold_fn)(void *context);
+
 /*
  * struct dm_space_map keeps a record of how many times each block in a device
  * is referenced.  It needs to be fixed on disk as part of the transaction.
@@ -59,6 +61,15 @@ struct dm_space_map {
         */
        int (*root_size)(struct dm_space_map *sm, size_t *result);
        int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
+
+       /*
+        * You can register one threshold callback which is edge-triggered
+        * when the free space in the space map drops below the threshold.
+        */
+       int (*register_threshold_callback)(struct dm_space_map *sm,
+                                          dm_block_t threshold,
+                                          dm_sm_threshold_fn fn,
+                                          void *context);
 };
 
 /*----------------------------------------------------------------*/
@@ -131,4 +142,16 @@ static inline int dm_sm_copy_root(struct dm_space_map *sm, void *copy_to_here_le
        return sm->copy_root(sm, copy_to_here_le, len);
 }
 
+static inline int dm_sm_register_threshold_callback(struct dm_space_map *sm,
+                                                   dm_block_t threshold,
+                                                   dm_sm_threshold_fn fn,
+                                                   void *context)
+{
+       if (sm->register_threshold_callback)
+               return sm->register_threshold_callback(sm, threshold, fn, context);
+
+       return -EINVAL;
+}
+
+
 #endif /* _LINUX_DM_SPACE_MAP_H */
index a3a851e49321542ac84659f4f3315219163430ae..18c0d8d1ddf779dbba8f09d7b898b7b687bd0a74 100644 (file)
@@ -68,12 +68,6 @@ MODULE_LICENSE("Dual MPL/GPL");
 
 #if !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B)
 
-/* The RPX series use SLOT_B */
-#if defined(CONFIG_RPXCLASSIC) || defined(CONFIG_RPXLITE)
-#define CONFIG_PCMCIA_SLOT_B
-#define CONFIG_BD_IS_MHZ
-#endif
-
 /* The ADS board use SLOT_A */
 #ifdef CONFIG_ADS
 #define CONFIG_PCMCIA_SLOT_A
@@ -253,81 +247,6 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev);
 
 #define PCMCIA_BMT_LIMIT (15*4)        /* Bus Monitor Timeout value */
 
-/* ------------------------------------------------------------------------- */
-/* board specific stuff:                                                     */
-/* voltage_set(), hardware_enable() and hardware_disable()                   */
-/* ------------------------------------------------------------------------- */
-/* RPX Boards from Embedded Planet                                           */
-
-#if defined(CONFIG_RPXCLASSIC) || defined(CONFIG_RPXLITE)
-
-/* The RPX boards seems to have it's bus monitor timeout set to 6*8 clocks.
- * SYPCR is write once only, therefore must the slowest memory be faster
- * than the bus monitor or we will get a machine check due to the bus timeout.
- */
-
-#define PCMCIA_BOARD_MSG "RPX CLASSIC or RPX LITE"
-
-#undef PCMCIA_BMT_LIMIT
-#define PCMCIA_BMT_LIMIT (6*8)
-
-static int voltage_set(int slot, int vcc, int vpp)
-{
-       u32 reg = 0;
-
-       switch (vcc) {
-       case 0:
-               break;
-       case 33:
-               reg |= BCSR1_PCVCTL4;
-               break;
-       case 50:
-               reg |= BCSR1_PCVCTL5;
-               break;
-       default:
-               return 1;
-       }
-
-       switch (vpp) {
-       case 0:
-               break;
-       case 33:
-       case 50:
-               if (vcc == vpp)
-                       reg |= BCSR1_PCVCTL6;
-               else
-                       return 1;
-               break;
-       case 120:
-               reg |= BCSR1_PCVCTL7;
-       default:
-               return 1;
-       }
-
-       if (!((vcc == 50) || (vcc == 0)))
-               return 1;
-
-       /* first, turn off all power */
-
-       out_be32(((u32 *) RPX_CSR_ADDR),
-                in_be32(((u32 *) RPX_CSR_ADDR)) & ~(BCSR1_PCVCTL4 |
-                                                    BCSR1_PCVCTL5 |
-                                                    BCSR1_PCVCTL6 |
-                                                    BCSR1_PCVCTL7));
-
-       /* enable new powersettings */
-
-       out_be32(((u32 *) RPX_CSR_ADDR), in_be32(((u32 *) RPX_CSR_ADDR)) | reg);
-
-       return 0;
-}
-
-#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
-#define hardware_enable(_slot_)        /* No hardware to enable */
-#define hardware_disable(_slot_)       /* No hardware to disable */
-
-#endif                         /* CONFIG_RPXCLASSIC */
-
 /* FADS Boards from Motorola                                               */
 
 #if defined(CONFIG_FADS)
@@ -419,65 +338,6 @@ static inline int voltage_set(int slot, int vcc, int vpp)
 
 #endif
 
-/* ------------------------------------------------------------------------- */
-/* Motorola MBX860                                                           */
-
-#if defined(CONFIG_MBX)
-
-#define PCMCIA_BOARD_MSG "MBX"
-
-static int voltage_set(int slot, int vcc, int vpp)
-{
-       u8 reg = 0;
-
-       switch (vcc) {
-       case 0:
-               break;
-       case 33:
-               reg |= CSR2_VCC_33;
-               break;
-       case 50:
-               reg |= CSR2_VCC_50;
-               break;
-       default:
-               return 1;
-       }
-
-       switch (vpp) {
-       case 0:
-               break;
-       case 33:
-       case 50:
-               if (vcc == vpp)
-                       reg |= CSR2_VPP_VCC;
-               else
-                       return 1;
-               break;
-       case 120:
-               if ((vcc == 33) || (vcc == 50))
-                       reg |= CSR2_VPP_12;
-               else
-                       return 1;
-       default:
-               return 1;
-       }
-
-       /* first, turn off all power */
-       out_8((u8 *) MBX_CSR2_ADDR,
-             in_8((u8 *) MBX_CSR2_ADDR) & ~(CSR2_VCC_MASK | CSR2_VPP_MASK));
-
-       /* enable new powersettings */
-       out_8((u8 *) MBX_CSR2_ADDR, in_8((u8 *) MBX_CSR2_ADDR) | reg);
-
-       return 0;
-}
-
-#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
-#define hardware_enable(_slot_)        /* No hardware to enable */
-#define hardware_disable(_slot_)       /* No hardware to disable */
-
-#endif                         /* CONFIG_MBX */
-
 #if defined(CONFIG_PRxK)
 #include <asm/cpld.h>
 extern volatile fpga_pc_regs *fpga_pc;
index 3338437b559b7e696e305b08e54ab1d874e67b4d..85772616efbf02b82c61af7e0846baed894c273e 100644 (file)
@@ -781,4 +781,12 @@ config APPLE_GMUX
          graphics as well as the backlight. Currently only backlight
          control is supported by the driver.
 
+config PVPANIC
+       tristate "pvpanic device support"
+       depends on ACPI
+       ---help---
+         This driver provides support for the pvpanic device.  pvpanic is
+         a paravirtualized device provided by QEMU; it lets a virtual machine
+         (guest) communicate panic events to the host.
+
 endif # X86_PLATFORM_DEVICES
index ace2b38942fe27f9cc02697f0f9543ca5eeff595..ef0ec746f78c49e5f6bf01fe28099ebbbee08318 100644 (file)
@@ -51,3 +51,5 @@ obj-$(CONFIG_INTEL_OAKTRAIL)  += intel_oaktrail.o
 obj-$(CONFIG_SAMSUNG_Q10)      += samsung-q10.o
 obj-$(CONFIG_APPLE_GMUX)       += apple-gmux.o
 obj-$(CONFIG_CHROMEOS_LAPTOP)  += chromeos_laptop.o
+
+obj-$(CONFIG_PVPANIC)           += pvpanic.o
index 210b5b872125e8aef1d751e56968f953686c4904..8fcb41e18b9cd6ad508001d33d86665f4955816c 100644 (file)
@@ -171,6 +171,15 @@ static struct dmi_system_id asus_quirks[] = {
                },
                .driver_data = &quirk_asus_x401u,
        },
+       {
+               .callback = dmi_matched,
+               .ident = "ASUSTeK COMPUTER INC. X75A",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "X75A"),
+               },
+               .driver_data = &quirk_asus_x401u,
+       },
        {},
 };
 
index fa3ee6209572976b71d623585ecfb026842d5645..1134119521ac2e4da9708703d8b4252ac807ce15 100644 (file)
@@ -284,6 +284,7 @@ static void __init parse_da_table(const struct dmi_header *dm)
 {
        /* Final token is a terminator, so we don't want to copy it */
        int tokens = (dm->length-11)/sizeof(struct calling_interface_token)-1;
+       struct calling_interface_token *new_da_tokens;
        struct calling_interface_structure *table =
                container_of(dm, struct calling_interface_structure, header);
 
@@ -296,12 +297,13 @@ static void __init parse_da_table(const struct dmi_header *dm)
        da_command_address = table->cmdIOAddress;
        da_command_code = table->cmdIOCode;
 
-       da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) *
-                            sizeof(struct calling_interface_token),
-                            GFP_KERNEL);
+       new_da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) *
+                                sizeof(struct calling_interface_token),
+                                GFP_KERNEL);
 
-       if (!da_tokens)
+       if (!new_da_tokens)
                return;
+       da_tokens = new_da_tokens;
 
        memcpy(da_tokens+da_num_tokens, table->tokens,
               sizeof(struct calling_interface_token) * tokens);
index 3f945457f71c218116e7f8eb9d4a7400fcdc8b62..bcf8cc6b5537cca1a1c3ad016d700ca3f30104a4 100644 (file)
@@ -34,6 +34,14 @@ MODULE_LICENSE("GPL");
 #define EVENT_GUID1 "284A0E6B-380E-472A-921F-E52786257FB4"
 #define EVENT_GUID2 "02314822-307C-4F66-BF0E-48AEAEB26CC8"
 
+struct dell_wmi_event {
+       u16     length;
+       /* 0x000: A hot key pressed or an event occurred
+        * 0x00F: A sequence of hot keys are pressed */
+       u16     type;
+       u16     event[];
+};
+
 static const char *dell_wmi_aio_guids[] = {
        EVENT_GUID1,
        EVENT_GUID2,
@@ -46,15 +54,41 @@ MODULE_ALIAS("wmi:"EVENT_GUID2);
 static const struct key_entry dell_wmi_aio_keymap[] = {
        { KE_KEY, 0xc0, { KEY_VOLUMEUP } },
        { KE_KEY, 0xc1, { KEY_VOLUMEDOWN } },
+       { KE_KEY, 0xe030, { KEY_VOLUMEUP } },
+       { KE_KEY, 0xe02e, { KEY_VOLUMEDOWN } },
+       { KE_KEY, 0xe020, { KEY_MUTE } },
+       { KE_KEY, 0xe027, { KEY_DISPLAYTOGGLE } },
+       { KE_KEY, 0xe006, { KEY_BRIGHTNESSUP } },
+       { KE_KEY, 0xe005, { KEY_BRIGHTNESSDOWN } },
+       { KE_KEY, 0xe00b, { KEY_SWITCHVIDEOMODE } },
        { KE_END, 0 }
 };
 
 static struct input_dev *dell_wmi_aio_input_dev;
 
+/*
+ * The new WMI event data format will follow the dell_wmi_event structure
+ * So, we will check if the buffer matches the format
+ */
+static bool dell_wmi_aio_event_check(u8 *buffer, int length)
+{
+       struct dell_wmi_event *event = (struct dell_wmi_event *)buffer;
+
+       if (event == NULL || length < 6)
+               return false;
+
+       if ((event->type == 0 || event->type == 0xf) &&
+                       event->length >= 2)
+               return true;
+
+       return false;
+}
+
 static void dell_wmi_aio_notify(u32 value, void *context)
 {
        struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
        union acpi_object *obj;
+       struct dell_wmi_event *event;
        acpi_status status;
 
        status = wmi_get_event_data(value, &response);
@@ -65,7 +99,7 @@ static void dell_wmi_aio_notify(u32 value, void *context)
 
        obj = (union acpi_object *)response.pointer;
        if (obj) {
-               unsigned int scancode;
+               unsigned int scancode = 0;
 
                switch (obj->type) {
                case ACPI_TYPE_INTEGER:
@@ -75,13 +109,22 @@ static void dell_wmi_aio_notify(u32 value, void *context)
                                scancode, 1, true);
                        break;
                case ACPI_TYPE_BUFFER:
-                       /* Broken machines return the scancode in a buffer */
-                       if (obj->buffer.pointer && obj->buffer.length > 0) {
-                               scancode = obj->buffer.pointer[0];
+                       if (dell_wmi_aio_event_check(obj->buffer.pointer,
+                                               obj->buffer.length)) {
+                               event = (struct dell_wmi_event *)
+                                       obj->buffer.pointer;
+                               scancode = event->event[0];
+                       } else {
+                               /* Broken machines return the scancode in a
+                                  buffer */
+                               if (obj->buffer.pointer &&
+                                               obj->buffer.length > 0)
+                                       scancode = obj->buffer.pointer[0];
+                       }
+                       if (scancode)
                                sparse_keymap_report_event(
                                        dell_wmi_aio_input_dev,
                                        scancode, 1, true);
-                       }
                        break;
                }
        }
index 1a779bbfb87d2b8de61e964734981f4f9a6c891e..8df0c5a21be27d7a2044b06b8ee21bc8fab90359 100644 (file)
@@ -71,6 +71,14 @@ enum hp_wmi_event_ids {
        HPWMI_WIRELESS = 5,
        HPWMI_CPU_BATTERY_THROTTLE = 6,
        HPWMI_LOCK_SWITCH = 7,
+       HPWMI_LID_SWITCH = 8,
+       HPWMI_SCREEN_ROTATION = 9,
+       HPWMI_COOLSENSE_SYSTEM_MOBILE = 0x0A,
+       HPWMI_COOLSENSE_SYSTEM_HOT = 0x0B,
+       HPWMI_PROXIMITY_SENSOR = 0x0C,
+       HPWMI_BACKLIT_KB_BRIGHTNESS = 0x0D,
+       HPWMI_PEAKSHIFT_PERIOD = 0x0F,
+       HPWMI_BATTERY_CHARGE_PERIOD = 0x10,
 };
 
 struct bios_args {
@@ -536,6 +544,22 @@ static void hp_wmi_notify(u32 value, void *context)
                break;
        case HPWMI_LOCK_SWITCH:
                break;
+       case HPWMI_LID_SWITCH:
+               break;
+       case HPWMI_SCREEN_ROTATION:
+               break;
+       case HPWMI_COOLSENSE_SYSTEM_MOBILE:
+               break;
+       case HPWMI_COOLSENSE_SYSTEM_HOT:
+               break;
+       case HPWMI_PROXIMITY_SENSOR:
+               break;
+       case HPWMI_BACKLIT_KB_BRIGHTNESS:
+               break;
+       case HPWMI_PEAKSHIFT_PERIOD:
+               break;
+       case HPWMI_BATTERY_CHARGE_PERIOD:
+               break;
        default:
                pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data);
                break;
index e64a7a870d42146c910ffb39b955c29a11a88080..a8e43cf70fac96309d7b689265b7c2b6ba2a19db 100644 (file)
@@ -362,7 +362,8 @@ static int lis3lv02d_suspend(struct device *dev)
 
 static int lis3lv02d_resume(struct device *dev)
 {
-       return lis3lv02d_poweron(&lis3_dev);
+       lis3lv02d_poweron(&lis3_dev);
+       return 0;
 }
 
 static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume);
index 17f00b8dc5cbcb841540f07c7330775bcb5393df..89c4519d48ac80d2a54f42ceabf7a48b792e871c 100644 (file)
@@ -640,7 +640,8 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv)
        for (bit = 0; bit < 16; bit++) {
                if (test_bit(bit, &value)) {
                        switch (bit) {
-                       case 6:
+                       case 0: /* Z580 */
+                       case 6: /* Z570 */
                                /* Thermal Management button */
                                ideapad_input_report(priv, 65);
                                break;
@@ -648,6 +649,9 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv)
                                /* OneKey Theater button */
                                ideapad_input_report(priv, 64);
                                break;
+                       default:
+                               pr_info("Unknown special button: %lu\n", bit);
+                               break;
                        }
                }
        }
diff --git a/drivers/platform/x86/pvpanic.c b/drivers/platform/x86/pvpanic.c
new file mode 100644 (file)
index 0000000..47ae0c4
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ *  pvpanic.c - pvpanic Device Support
+ *
+ *  Copyright (C) 2013 Fujitsu.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>");
+MODULE_DESCRIPTION("pvpanic device driver");
+MODULE_LICENSE("GPL");
+
+static int pvpanic_add(struct acpi_device *device);
+static int pvpanic_remove(struct acpi_device *device);
+
+static const struct acpi_device_id pvpanic_device_ids[] = {
+       { "QEMU0001", 0 },
+       { "", 0 },
+};
+MODULE_DEVICE_TABLE(acpi, pvpanic_device_ids);
+
+#define PVPANIC_PANICKED       (1 << 0)
+
+static u16 port;
+
+static struct acpi_driver pvpanic_driver = {
+       .name =         "pvpanic",
+       .class =        "QEMU",
+       .ids =          pvpanic_device_ids,
+       .ops =          {
+                               .add =          pvpanic_add,
+                               .remove =       pvpanic_remove,
+                       },
+       .owner =        THIS_MODULE,
+};
+
+static void
+pvpanic_send_event(unsigned int event)
+{
+       outb(event, port);
+}
+
+static int
+pvpanic_panic_notify(struct notifier_block *nb, unsigned long code,
+                    void *unused)
+{
+       pvpanic_send_event(PVPANIC_PANICKED);
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block pvpanic_panic_nb = {
+       .notifier_call = pvpanic_panic_notify,
+};
+
+
+static acpi_status
+pvpanic_walk_resources(struct acpi_resource *res, void *context)
+{
+       switch (res->type) {
+       case ACPI_RESOURCE_TYPE_END_TAG:
+               return AE_OK;
+
+       case ACPI_RESOURCE_TYPE_IO:
+               port = res->data.io.minimum;
+               return AE_OK;
+
+       default:
+               return AE_ERROR;
+       }
+}
+
+static int pvpanic_add(struct acpi_device *device)
+{
+       acpi_status status;
+       u64 ret;
+
+       status = acpi_evaluate_integer(device->handle, "_STA", NULL,
+                                      &ret);
+
+       if (ACPI_FAILURE(status) || (ret & 0x0B) != 0x0B)
+               return -ENODEV;
+
+       acpi_walk_resources(device->handle, METHOD_NAME__CRS,
+                           pvpanic_walk_resources, NULL);
+
+       if (!port)
+               return -ENODEV;
+
+       atomic_notifier_chain_register(&panic_notifier_list,
+                                      &pvpanic_panic_nb);
+
+       return 0;
+}
+
+static int pvpanic_remove(struct acpi_device *device)
+{
+
+       atomic_notifier_chain_unregister(&panic_notifier_list,
+                                        &pvpanic_panic_nb);
+       return 0;
+}
+
+module_acpi_driver(pvpanic_driver);
index 5f770059fd4d3aa3115140fd5afcc7a7832eca53..1a90b62a71c66400268cad23c7d6690b1eec30d0 100644 (file)
@@ -176,10 +176,7 @@ static int __init samsungq10_init(void)
                                                   samsungq10_probe,
                                                   NULL, 0, NULL, 0);
 
-       if (IS_ERR(samsungq10_device))
-               return PTR_ERR(samsungq10_device);
-
-       return 0;
+       return PTR_RET(samsungq10_device);
 }
 
 static void __exit samsungq10_exit(void)
index d544e3aaf76141754b622aada806b04050fe2886..2ac045f27f10112aa467b867a9b97a9a1790c189 100644 (file)
@@ -1255,6 +1255,11 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
                        real_ev = __sony_nc_gfx_switch_status_get();
                        break;
 
+               case 0x015B:
+                       /* Hybrid GFX switching SVS151290S */
+                       ev_type = GFX_SWITCH;
+                       real_ev = __sony_nc_gfx_switch_status_get();
+                       break;
                default:
                        dprintk("Unknown event 0x%x for handle 0x%x\n",
                                        event, handle);
@@ -1353,6 +1358,7 @@ static void sony_nc_function_setup(struct acpi_device *device,
                        break;
                case 0x0128:
                case 0x0146:
+               case 0x015B:
                        result = sony_nc_gfx_switch_setup(pf_device, handle);
                        if (result)
                                pr_err("couldn't set up GFX Switch status (%d)\n",
@@ -1375,6 +1381,7 @@ static void sony_nc_function_setup(struct acpi_device *device,
                case 0x0143:
                case 0x014b:
                case 0x014c:
+               case 0x0163:
                        result = sony_nc_kbd_backlight_setup(pf_device, handle);
                        if (result)
                                pr_err("couldn't set up keyboard backlight function (%d)\n",
@@ -1426,6 +1433,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
                        break;
                case 0x0128:
                case 0x0146:
+               case 0x015B:
                        sony_nc_gfx_switch_cleanup(pd);
                        break;
                case 0x0131:
@@ -1439,6 +1447,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
                case 0x0143:
                case 0x014b:
                case 0x014c:
+               case 0x0163:
                        sony_nc_kbd_backlight_cleanup(pd);
                        break;
                default:
@@ -1485,6 +1494,7 @@ static void sony_nc_function_resume(void)
                case 0x0143:
                case 0x014b:
                case 0x014c:
+               case 0x0163:
                        sony_nc_kbd_backlight_resume();
                        break;
                default:
@@ -2390,7 +2400,9 @@ static int __sony_nc_gfx_switch_status_get(void)
 {
        unsigned int result;
 
-       if (sony_call_snc_handle(gfxs_ctl->handle, 0x0100, &result))
+       if (sony_call_snc_handle(gfxs_ctl->handle,
+                               gfxs_ctl->handle == 0x015B ? 0x0000 : 0x0100,
+                               &result))
                return -EIO;
 
        switch (gfxs_ctl->handle) {
@@ -2400,6 +2412,12 @@ static int __sony_nc_gfx_switch_status_get(void)
                 */
                return result & 0x1 ? SPEED : STAMINA;
                break;
+       case 0x015B:
+               /* 0: discrete GFX (speed)
+                * 1: integrated GFX (stamina)
+                */
+               return result & 0x1 ? STAMINA : SPEED;
+               break;
        case 0x0128:
                /* it's a more elaborated bitmask, for now:
                 * 2: integrated GFX (stamina)
index db95c547c09d6781d68dca561abe230c5d0f57ce..86af29f53bbebefec7d58ca0c595c3852e6fdf79 100644 (file)
@@ -1353,6 +1353,8 @@ config SCSI_LPFC
        tristate "Emulex LightPulse Fibre Channel Support"
        depends on PCI && SCSI
        select SCSI_FC_ATTRS
+       select GENERIC_CSUM
+       select CRC_T10DIF
        help
           This lpfc driver supports the Emulex LightPulse
           Family of Fibre Channel PCI host adapters.
index 64136c56e7065052bf24d0aa53e500f69f3957d0..33072388ea166a0cce3a2019782b4f64e5a91e33 100644 (file)
@@ -84,7 +84,7 @@ static void asd_set_ddb_type(struct domain_device *dev)
        struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
        int ddb = (int) (unsigned long) dev->lldd_dev;
 
-       if (dev->dev_type == SATA_PM_PORT)
+       if (dev->dev_type == SAS_SATA_PM_PORT)
                asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_PM_PORT);
        else if (dev->tproto)
                asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_TARGET);
@@ -116,7 +116,7 @@ void asd_set_dmamode(struct domain_device *dev)
        int ddb = (int) (unsigned long) dev->lldd_dev;
        u32 qdepth = 0;
 
-       if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) {
+       if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM_PORT) {
                if (ata_id_has_ncq(ata_dev->id))
                        qdepth = ata_id_queue_depth(ata_dev->id);
                asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK,
@@ -140,8 +140,8 @@ static int asd_init_sata(struct domain_device *dev)
        int ddb = (int) (unsigned long) dev->lldd_dev;
 
        asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF);
-       if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM ||
-           dev->dev_type == SATA_PM_PORT) {
+       if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM ||
+           dev->dev_type == SAS_SATA_PM_PORT) {
                struct dev_to_host_fis *fis = (struct dev_to_host_fis *)
                        dev->frame_rcvd;
                asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status);
@@ -174,7 +174,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
        asd_ddbsite_write_byte(asd_ha, ddb, CONN_MASK, dev->port->phy_mask);
        if (dev->port->oob_mode != SATA_OOB_MODE) {
                flags |= OPEN_REQUIRED;
-               if ((dev->dev_type == SATA_DEV) ||
+               if ((dev->dev_type == SAS_SATA_DEV) ||
                    (dev->tproto & SAS_PROTOCOL_STP)) {
                        struct smp_resp *rps_resp = &dev->sata_dev.rps_resp;
                        if (rps_resp->frame_type == SMP_RESPONSE &&
@@ -188,8 +188,8 @@ static int asd_init_target_ddb(struct domain_device *dev)
                } else {
                        flags |= CONCURRENT_CONN_SUPP;
                        if (!dev->parent &&
-                           (dev->dev_type == EDGE_DEV ||
-                            dev->dev_type == FANOUT_DEV))
+                           (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                            dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE))
                                asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN,
                                                       4);
                        else
@@ -198,7 +198,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
                        asd_ddbsite_write_byte(asd_ha, ddb, NUM_CTX, 1);
                }
        }
-       if (dev->dev_type == SATA_PM)
+       if (dev->dev_type == SAS_SATA_PM)
                flags |= SATA_MULTIPORT;
        asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags);
 
@@ -211,7 +211,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
        asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF);
        asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF);
 
-       if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+       if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
                i = asd_init_sata(dev);
                if (i < 0) {
                        asd_free_ddb(asd_ha, ddb);
@@ -219,7 +219,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
                }
        }
 
-       if (dev->dev_type == SAS_END_DEV) {
+       if (dev->dev_type == SAS_END_DEVICE) {
                struct sas_end_device *rdev = rphy_to_end_device(dev->rphy);
                if (rdev->I_T_nexus_loss_timeout > 0)
                        asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT,
@@ -328,10 +328,10 @@ int asd_dev_found(struct domain_device *dev)
 
        spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
        switch (dev->dev_type) {
-       case SATA_PM:
+       case SAS_SATA_PM:
                res = asd_init_sata_pm_ddb(dev);
                break;
-       case SATA_PM_PORT:
+       case SAS_SATA_PM_PORT:
                res = asd_init_sata_pm_port_ddb(dev);
                break;
        default:
index 81b736c76fffab7bf40be168ff1ae3fb2388faad..4df867e07b2022df773ce0f7f0115e2b7bf4a5e7 100644 (file)
@@ -74,7 +74,7 @@ static void asd_init_phy_identify(struct asd_phy *phy)
 
        memset(phy->identify_frame, 0, sizeof(*phy->identify_frame));
 
-       phy->identify_frame->dev_type = SAS_END_DEV;
+       phy->identify_frame->dev_type = SAS_END_DEVICE;
        if (phy->sas_phy.role & PHY_ROLE_INITIATOR)
                phy->identify_frame->initiator_bits = phy->sas_phy.iproto;
        if (phy->sas_phy.role & PHY_ROLE_TARGET)
index cf9040933da6497ca500454274fe163c50ec324d..d4c35df3d4ae600f9e5d0d7a02ed547f4c8ff905 100644 (file)
@@ -184,7 +184,7 @@ int asd_I_T_nexus_reset(struct domain_device *dev)
        struct sas_phy *phy = sas_get_local_phy(dev);
        /* Standard mandates link reset for ATA  (type 0) and
         * hard reset for SSP (type 1) */
-       int reset_type = (dev->dev_type == SATA_DEV ||
+       int reset_type = (dev->dev_type == SAS_SATA_DEV ||
                          (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
 
        asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
index f1733dfa3ae24489b44b3f6bee92568589c7ade1..777e7c0bbb4b327d6624a2d771a78fa5033d432c 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
index 5c87768c109c0fa9a8fc0dda6f8bcb6c357ca083..e66aa7c11a8a21e041f8956c6ed0f758401c8420 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -155,6 +155,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
        uint16_t status = 0, addl_status = 0, wrb_num = 0;
        struct be_mcc_wrb *temp_wrb;
        struct be_cmd_req_hdr *ioctl_hdr;
+       struct be_cmd_resp_hdr *ioctl_resp_hdr;
        struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
 
        if (beiscsi_error(phba))
@@ -204,6 +205,12 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
                            ioctl_hdr->subsystem,
                            ioctl_hdr->opcode,
                            status, addl_status);
+
+               if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
+                       ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr;
+                       if (ioctl_resp_hdr->response_length)
+                               goto release_mcc_tag;
+               }
                rc = -EAGAIN;
        }
 
@@ -267,6 +274,7 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
        struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
+       struct be_cmd_resp_hdr *resp_hdr;
 
        be_dws_le_to_cpu(compl, 4);
 
@@ -284,6 +292,11 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
                            hdr->subsystem, hdr->opcode,
                            compl_status, extd_status);
 
+               if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) {
+                       resp_hdr = (struct be_cmd_resp_hdr *) hdr;
+                       if (resp_hdr->response_length)
+                               return 0;
+               }
                return -EBUSY;
        }
        return 0;
@@ -335,30 +348,26 @@ static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
 void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
                struct be_async_event_link_state *evt)
 {
-       switch (evt->port_link_status) {
-       case ASYNC_EVENT_LINK_DOWN:
+       if ((evt->port_link_status == ASYNC_EVENT_LINK_DOWN) ||
+           ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
+            (evt->port_fault != BEISCSI_PHY_LINK_FAULT_NONE))) {
+               phba->state = BE_ADAPTER_LINK_DOWN;
+
                beiscsi_log(phba, KERN_ERR,
                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
-                           "BC_%d : Link Down on Physical Port %d\n",
+                           "BC_%d : Link Down on Port %d\n",
                            evt->physical_port);
 
-               phba->state |= BE_ADAPTER_LINK_DOWN;
                iscsi_host_for_each_session(phba->shost,
                                            be2iscsi_fail_session);
-               break;
-       case ASYNC_EVENT_LINK_UP:
+       } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) ||
+                   ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
+                    (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
                phba->state = BE_ADAPTER_UP;
+
                beiscsi_log(phba, KERN_ERR,
                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
-                           "BC_%d : Link UP on Physical Port %d\n",
-                           evt->physical_port);
-               break;
-       default:
-               beiscsi_log(phba, KERN_ERR,
-                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
-                           "BC_%d : Unexpected Async Notification %d on"
-                           "Physical Port %d\n",
-                           evt->port_link_status,
+                           "BC_%d : Link UP on Port %d\n",
                            evt->physical_port);
        }
 }
@@ -479,7 +488,7 @@ static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
 {
        void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
-       int wait = 0;
+       uint32_t wait = 0;
        u32 ready;
 
        do {
@@ -527,6 +536,10 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
        struct be_mcc_compl *compl = &mbox->compl;
        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 
+       status = be_mbox_db_ready_wait(ctrl);
+       if (status)
+               return status;
+
        val &= ~MPU_MAILBOX_DB_RDY_MASK;
        val |= MPU_MAILBOX_DB_HI_MASK;
        val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
@@ -580,6 +593,10 @@ static int be_mbox_notify_wait(struct beiscsi_hba *phba)
        struct be_mcc_compl *compl = &mbox->compl;
        struct be_ctrl_info *ctrl = &phba->ctrl;
 
+       status = be_mbox_db_ready_wait(ctrl);
+       if (status)
+               return status;
+
        val |= MPU_MAILBOX_DB_HI_MASK;
        /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
        val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
@@ -732,6 +749,16 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
        return status;
 }
 
+/**
+ * be_cmd_fw_initialize()- Initialize FW
+ * @ctrl: Pointer to function control structure
+ *
+ * Send FW initialize pattern for the function.
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero value
+ **/
 int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
 {
        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
@@ -762,6 +789,47 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
        return status;
 }
 
+/**
+ * be_cmd_fw_uninit()- Uinitialize FW
+ * @ctrl: Pointer to function control structure
+ *
+ * Send FW uninitialize pattern for the function
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero value
+ **/
+int be_cmd_fw_uninit(struct be_ctrl_info *ctrl)
+{
+       struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+       struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+       int status;
+       u8 *endian_check;
+
+       spin_lock(&ctrl->mbox_lock);
+       memset(wrb, 0, sizeof(*wrb));
+
+       endian_check = (u8 *) wrb;
+       *endian_check++ = 0xFF;
+       *endian_check++ = 0xAA;
+       *endian_check++ = 0xBB;
+       *endian_check++ = 0xFF;
+       *endian_check++ = 0xFF;
+       *endian_check++ = 0xCC;
+       *endian_check++ = 0xDD;
+       *endian_check = 0xFF;
+
+       be_dws_cpu_to_le(wrb, sizeof(*wrb));
+
+       status = be_mbox_notify(ctrl);
+       if (status)
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BC_%d : be_cmd_fw_uninit Failed\n");
+
+       spin_unlock(&ctrl->mbox_lock);
+       return status;
+}
+
 int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
                          struct be_queue_info *cq, struct be_queue_info *eq,
                          bool sol_evts, bool no_delay, int coalesce_wm)
@@ -783,20 +851,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
                        OPCODE_COMMON_CQ_CREATE, sizeof(*req));
 
        req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
-       if (chip_skh_r(ctrl->pdev)) {
-               req->hdr.version = MBX_CMD_VER2;
-               req->page_size = 1;
-               AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
-                             ctxt, coalesce_wm);
-               AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
-                             ctxt, no_delay);
-               AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
-                             __ilog2_u32(cq->len / 256));
-               AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
-               AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
-               AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
-               AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
-       } else {
+       if (is_chip_be2_be3r(phba)) {
                AMAP_SET_BITS(struct amap_cq_context, coalescwm,
                              ctxt, coalesce_wm);
                AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
@@ -809,6 +864,19 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
                AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
                AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
                              PCI_FUNC(ctrl->pdev->devfn));
+       } else {
+               req->hdr.version = MBX_CMD_VER2;
+               req->page_size = 1;
+               AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
+                             ctxt, coalesce_wm);
+               AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
+                             ctxt, no_delay);
+               AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
+                             __ilog2_u32(cq->len / 256));
+               AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
+               AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
+               AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
+               AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
        }
 
        be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -949,6 +1017,7 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
        struct be_defq_create_req *req = embedded_payload(wrb);
        struct be_dma_mem *q_mem = &dq->dma_mem;
+       struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
        void *ctxt = &req->context;
        int status;
 
@@ -961,17 +1030,36 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
                           OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
 
        req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
-       AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0);
-       AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt,
-                     1);
-       AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt,
-                     PCI_FUNC(ctrl->pdev->devfn));
-       AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt,
-                     be_encoded_q_len(length / sizeof(struct phys_addr)));
-       AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size,
-                     ctxt, entry_size);
-       AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt,
-                     cq->id);
+
+       if (is_chip_be2_be3r(phba)) {
+               AMAP_SET_BITS(struct amap_be_default_pdu_context,
+                             rx_pdid, ctxt, 0);
+               AMAP_SET_BITS(struct amap_be_default_pdu_context,
+                             rx_pdid_valid, ctxt, 1);
+               AMAP_SET_BITS(struct amap_be_default_pdu_context,
+                             pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn));
+               AMAP_SET_BITS(struct amap_be_default_pdu_context,
+                             ring_size, ctxt,
+                             be_encoded_q_len(length /
+                             sizeof(struct phys_addr)));
+               AMAP_SET_BITS(struct amap_be_default_pdu_context,
+                             default_buffer_size, ctxt, entry_size);
+               AMAP_SET_BITS(struct amap_be_default_pdu_context,
+                             cq_id_recv, ctxt, cq->id);
+       } else {
+               AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+                             rx_pdid, ctxt, 0);
+               AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+                             rx_pdid_valid, ctxt, 1);
+               AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+                             ring_size, ctxt,
+                             be_encoded_q_len(length /
+                             sizeof(struct phys_addr)));
+               AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+                             default_buffer_size, ctxt, entry_size);
+               AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+                             cq_id_recv, ctxt, cq->id);
+       }
 
        be_dws_cpu_to_le(ctxt, sizeof(req->context));
 
index 23397d51ac54f5f9550f15b3b4571ed8342ee762..99073086dfe06522621e57b4e8a8e2d968f7c1c7 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -52,6 +52,10 @@ struct be_mcc_wrb {
 
 /* Completion Status */
 #define MCC_STATUS_SUCCESS 0x0
+#define MCC_STATUS_FAILED 0x1
+#define MCC_STATUS_ILLEGAL_REQUEST 0x2
+#define MCC_STATUS_ILLEGAL_FIELD 0x3
+#define MCC_STATUS_INSUFFICIENT_BUFFER 0x4
 
 #define CQE_STATUS_COMPL_MASK 0xFFFF
 #define CQE_STATUS_COMPL_SHIFT 0       /* bits 0 - 15 */
@@ -118,7 +122,8 @@ struct be_async_event_trailer {
 
 enum {
        ASYNC_EVENT_LINK_DOWN = 0x0,
-       ASYNC_EVENT_LINK_UP = 0x1
+       ASYNC_EVENT_LINK_UP = 0x1,
+       ASYNC_EVENT_LOGICAL = 0x2
 };
 
 /**
@@ -130,6 +135,9 @@ struct be_async_event_link_state {
        u8 port_link_status;
        u8 port_duplex;
        u8 port_speed;
+#define BEISCSI_PHY_LINK_FAULT_NONE    0x00
+#define BEISCSI_PHY_LINK_FAULT_LOCAL   0x01
+#define BEISCSI_PHY_LINK_FAULT_REMOTE  0x02
        u8 port_fault;
        u8 rsvd0[7];
        struct be_async_event_trailer trailer;
@@ -697,6 +705,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
                        uint32_t tag, struct be_mcc_wrb **wrb, void *cmd_va);
 /*ISCSI Functuions */
 int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
+int be_cmd_fw_uninit(struct be_ctrl_info *ctrl);
 
 struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
 struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba);
@@ -751,6 +760,18 @@ struct amap_be_default_pdu_context {
        u8 rsvd4[32];           /* dword 3 */
 } __packed;
 
+struct amap_default_pdu_context_ext {
+       u8 rsvd0[16];   /* dword 0 */
+       u8 ring_size[4];    /* dword 0 */
+       u8 rsvd1[12];   /* dword 0 */
+       u8 rsvd2[22];   /* dword 1 */
+       u8 rx_pdid[9];  /* dword 1 */
+       u8 rx_pdid_valid;   /* dword 1 */
+       u8 default_buffer_size[16]; /* dword 2 */
+       u8 cq_id_recv[16];  /* dword 2 */
+       u8 rsvd3[32];   /* dword 3 */
+} __packed;
+
 struct be_defq_create_req {
        struct be_cmd_req_hdr hdr;
        u16 num_pages;
@@ -896,7 +917,7 @@ struct amap_it_dmsg_cqe_v2 {
  * stack to notify the
  * controller of a posted Work Request Block
  */
-#define DB_WRB_POST_CID_MASK           0x3FF   /* bits 0 - 9 */
+#define DB_WRB_POST_CID_MASK           0xFFFF  /* bits 0 - 16 */
 #define DB_DEF_PDU_WRB_INDEX_MASK      0xFF    /* bits 0 - 9 */
 
 #define DB_DEF_PDU_WRB_INDEX_SHIFT     16
index 9014690fe841f08e8993bcfd2153cba785f6135e..ef36be003f67a49f0d37cf1ed1136c87894bd942 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -161,7 +161,9 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
                                struct beiscsi_conn *beiscsi_conn,
                                unsigned int cid)
 {
-       if (phba->conn_table[cid]) {
+       uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
+
+       if (phba->conn_table[cri_index]) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
                            "BS_%d : Connection table already occupied. Detected clash\n");
 
@@ -169,9 +171,9 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
        } else {
                beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
                            "BS_%d : phba->conn_table[%d]=%p(beiscsi_conn)\n",
-                           cid, beiscsi_conn);
+                           cri_index, beiscsi_conn);
 
-               phba->conn_table[cid] = beiscsi_conn;
+               phba->conn_table[cri_index] = beiscsi_conn;
        }
        return 0;
 }
@@ -990,9 +992,27 @@ static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
 static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
 {
        struct beiscsi_hba *phba = beiscsi_ep->phba;
+       struct beiscsi_conn *beiscsi_conn;
 
        beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
        beiscsi_ep->phba = NULL;
+       phba->ep_array[BE_GET_CRI_FROM_CID
+                      (beiscsi_ep->ep_cid)] = NULL;
+
+       /**
+        * Check if any connection resource allocated by driver
+        * is to be freed.This case occurs when target redirection
+        * or connection retry is done.
+        **/
+       if (!beiscsi_ep->conn)
+               return;
+
+       beiscsi_conn = beiscsi_ep->conn;
+       if (beiscsi_conn->login_in_progress) {
+               beiscsi_free_mgmt_task_handles(beiscsi_conn,
+                                              beiscsi_conn->task);
+               beiscsi_conn->login_in_progress = 0;
+       }
 }
 
 /**
@@ -1009,7 +1029,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
 {
        struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
        struct beiscsi_hba *phba = beiscsi_ep->phba;
-       struct be_mcc_wrb *wrb;
        struct tcp_connect_and_offload_out *ptcpcnct_out;
        struct be_dma_mem nonemb_cmd;
        unsigned int tag;
@@ -1029,15 +1048,8 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
                    "BS_%d : In beiscsi_open_conn, ep_cid=%d\n",
                    beiscsi_ep->ep_cid);
 
-       phba->ep_array[beiscsi_ep->ep_cid -
-                      phba->fw_config.iscsi_cid_start] = ep;
-       if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start +
-                                 phba->params.cxns_per_ctrl * 2)) {
-
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-                           "BS_%d : Failed in allocate iscsi cid\n");
-               goto free_ep;
-       }
+       phba->ep_array[BE_GET_CRI_FROM_CID
+                      (beiscsi_ep->ep_cid)] = ep;
 
        beiscsi_ep->cid_vld = 0;
        nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
@@ -1049,24 +1061,24 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
                            "BS_%d : Failed to allocate memory for"
                            " mgmt_open_connection\n");
 
-               beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
+               beiscsi_free_ep(beiscsi_ep);
                return -ENOMEM;
        }
        nonemb_cmd.size = sizeof(struct tcp_connect_and_offload_in);
        memset(nonemb_cmd.va, 0, nonemb_cmd.size);
        tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd);
-       if (!tag) {
+       if (tag <= 0) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
                            "BS_%d : mgmt_open_connection Failed for cid=%d\n",
                            beiscsi_ep->ep_cid);
 
-               beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
                pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
                                    nonemb_cmd.va, nonemb_cmd.dma);
+               beiscsi_free_ep(beiscsi_ep);
                return -EAGAIN;
        }
 
-       ret = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+       ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
        if (ret) {
                beiscsi_log(phba, KERN_ERR,
                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
@@ -1074,10 +1086,11 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
 
                pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
                            nonemb_cmd.va, nonemb_cmd.dma);
-               goto free_ep;
+               beiscsi_free_ep(beiscsi_ep);
+               return -EBUSY;
        }
 
-       ptcpcnct_out = embedded_payload(wrb);
+       ptcpcnct_out = (struct tcp_connect_and_offload_out *)nonemb_cmd.va;
        beiscsi_ep = ep->dd_data;
        beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
        beiscsi_ep->cid_vld = 1;
@@ -1087,10 +1100,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
        pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
                            nonemb_cmd.va, nonemb_cmd.dma);
        return 0;
-
-free_ep:
-       beiscsi_free_ep(beiscsi_ep);
-       return -EBUSY;
 }
 
 /**
@@ -1119,6 +1128,13 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
                return ERR_PTR(ret);
        }
 
+       if (beiscsi_error(phba)) {
+               ret = -EIO;
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+                           "BS_%d : The FW state Not Stable!!!\n");
+               return ERR_PTR(ret);
+       }
+
        if (phba->state != BE_ADAPTER_UP) {
                ret = -EBUSY;
                beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
@@ -1201,8 +1217,10 @@ static int beiscsi_close_conn(struct  beiscsi_endpoint *beiscsi_ep, int flag)
 static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba,
                                      unsigned int cid)
 {
-       if (phba->conn_table[cid])
-               phba->conn_table[cid] = NULL;
+       uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
+
+       if (phba->conn_table[cri_index])
+               phba->conn_table[cri_index] = NULL;
        else {
                beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
                            "BS_%d : Connection table Not occupied.\n");
index 38eab7232159dd7ec412e798a3a521cc8bab862f..31ddc84943989b94eaa4682b91d1a8461c7d066c 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
index 4e2733d2300365761e7dba5df79e3a1b606c67e2..d24a2867bc21cb885506f9d35d0fbe397e27400a 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -153,10 +153,14 @@ BEISCSI_RW_ATTR(log_enable, 0x00,
 
 DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
 DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
+DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL);
+DEVICE_ATTR(beiscsi_active_cid_count, S_IRUGO, beiscsi_active_cid_disp, NULL);
 struct device_attribute *beiscsi_attrs[] = {
        &dev_attr_beiscsi_log_enable,
        &dev_attr_beiscsi_drvr_ver,
        &dev_attr_beiscsi_adapter_family,
+       &dev_attr_beiscsi_fw_ver,
+       &dev_attr_beiscsi_active_cid_count,
        NULL,
 };
 
@@ -702,7 +706,7 @@ static void beiscsi_get_params(struct beiscsi_hba *phba)
                                    + BE2_TMFS
                                    + BE2_NOPOUT_REQ));
        phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
-       phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
+       phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;
        phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;
        phba->params.num_sge_per_io = BE2_SGE;
        phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
@@ -1032,7 +1036,6 @@ static void hwi_ring_cq_db(struct beiscsi_hba *phba,
 static unsigned int
 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
                          struct beiscsi_hba *phba,
-                         unsigned short cid,
                          struct pdu_base *ppdu,
                          unsigned long pdu_len,
                          void *pbuffer, unsigned long buf_len)
@@ -1144,9 +1147,10 @@ struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
        struct hwi_wrb_context *pwrb_context;
        struct hwi_controller *phwi_ctrlr;
        struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
+       uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
 
        phwi_ctrlr = phba->phwi_ctrlr;
-       pwrb_context = &phwi_ctrlr->wrb_context[cid];
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
        if (pwrb_context->wrb_handles_available >= 2) {
                pwrb_handle = pwrb_context->pwrb_handle_base[
                                            pwrb_context->alloc_index];
@@ -1322,8 +1326,9 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn,
        hdr->t2retain = 0;
        hdr->flags = csol_cqe->i_flags;
        hdr->response = csol_cqe->i_resp;
-       hdr->exp_cmdsn = csol_cqe->exp_cmdsn;
-       hdr->max_cmdsn = (csol_cqe->exp_cmdsn + csol_cqe->cmd_wnd - 1);
+       hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
+       hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
+                                    csol_cqe->cmd_wnd - 1);
 
        hdr->dlength[0] = 0;
        hdr->dlength[1] = 0;
@@ -1346,9 +1351,9 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
        hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
        hdr->flags = csol_cqe->i_flags;
        hdr->response = csol_cqe->i_resp;
-       hdr->exp_cmdsn = csol_cqe->exp_cmdsn;
-       hdr->max_cmdsn = (csol_cqe->exp_cmdsn +
-                         csol_cqe->cmd_wnd - 1);
+       hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
+       hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
+                                    csol_cqe->cmd_wnd - 1);
 
        hdr->itt = io_task->libiscsi_itt;
        __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
@@ -1363,35 +1368,29 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
        struct hwi_controller *phwi_ctrlr;
        struct iscsi_task *task;
        struct beiscsi_io_task *io_task;
-       struct iscsi_conn *conn = beiscsi_conn->conn;
-       struct iscsi_session *session = conn->session;
-       uint16_t wrb_index, cid;
+       uint16_t wrb_index, cid, cri_index;
 
        phwi_ctrlr = phba->phwi_ctrlr;
-       if (chip_skh_r(phba->pcidev)) {
-               wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
+       if (is_chip_be2_be3r(phba)) {
+               wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
                                          wrb_idx, psol);
-               cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
+               cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
                                    cid, psol);
        } else {
-               wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
+               wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
                                          wrb_idx, psol);
-               cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
+               cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
                                    cid, psol);
        }
 
-       pwrb_context = &phwi_ctrlr->wrb_context[
-                       cid - phba->fw_config.iscsi_cid_start];
+       cri_index = BE_GET_CRI_FROM_CID(cid);
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
        pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
        task = pwrb_handle->pio_handle;
 
        io_task = task->dd_data;
-       spin_lock_bh(&phba->mgmt_sgl_lock);
-       free_mgmt_sgl_handle(phba, io_task->psgl_handle);
-       spin_unlock_bh(&phba->mgmt_sgl_lock);
-       spin_lock_bh(&session->lock);
-       free_wrb_handle(phba, pwrb_context, pwrb_handle);
-       spin_unlock_bh(&session->lock);
+       memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb));
+       iscsi_put_task(task);
 }
 
 static void
@@ -1406,8 +1405,8 @@ be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
        hdr = (struct iscsi_nopin *)task->hdr;
        hdr->flags = csol_cqe->i_flags;
        hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
-       hdr->max_cmdsn = be32_to_cpu(hdr->exp_cmdsn +
-                        csol_cqe->cmd_wnd - 1);
+       hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
+                                    csol_cqe->cmd_wnd - 1);
 
        hdr->opcode = ISCSI_OP_NOOP_IN;
        hdr->itt = io_task->libiscsi_itt;
@@ -1418,7 +1417,26 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
                struct sol_cqe *psol,
                struct common_sol_cqe *csol_cqe)
 {
-       if (chip_skh_r(phba->pcidev)) {
+       if (is_chip_be2_be3r(phba)) {
+               csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                   i_exp_cmd_sn, psol);
+               csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                 i_res_cnt, psol);
+               csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                 i_cmd_wnd, psol);
+               csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                   wrb_index, psol);
+               csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe,
+                                             cid, psol);
+               csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                hw_sts, psol);
+               csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                i_resp, psol);
+               csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe,
+                                               i_sts, psol);
+               csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                 i_flags, psol);
+       } else {
                csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2,
                                                    i_exp_cmd_sn, psol);
                csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2,
@@ -1429,7 +1447,7 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
                                              cid, psol);
                csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
                                                 hw_sts, psol);
-               csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
+               csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2,
                                                  i_cmd_wnd, psol);
                if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
                                  cmd_cmpl, psol))
@@ -1445,25 +1463,6 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
                if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
                                  o, psol))
                        csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW;
-       } else {
-               csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                   i_exp_cmd_sn, psol);
-               csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                 i_res_cnt, psol);
-               csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                 i_cmd_wnd, psol);
-               csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                   wrb_index, psol);
-               csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe,
-                                             cid, psol);
-               csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                hw_sts, psol);
-               csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                i_resp, psol);
-               csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe,
-                                               i_sts, psol);
-               csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                 i_flags, psol);
        }
 }
 
@@ -1480,14 +1479,15 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
        struct iscsi_conn *conn = beiscsi_conn->conn;
        struct iscsi_session *session = conn->session;
        struct common_sol_cqe csol_cqe = {0};
+       uint16_t cri_index = 0;
 
        phwi_ctrlr = phba->phwi_ctrlr;
 
        /* Copy the elements to a common structure */
        adapter_get_sol_cqe(phba, psol, &csol_cqe);
 
-       pwrb_context = &phwi_ctrlr->wrb_context[
-                       csol_cqe.cid - phba->fw_config.iscsi_cid_start];
+       cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid);
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
 
        pwrb_handle = pwrb_context->pwrb_handle_basestd[
                      csol_cqe.wrb_index];
@@ -1561,15 +1561,15 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
        unsigned char is_header = 0;
        unsigned int index, dpl;
 
-       if (chip_skh_r(phba->pcidev)) {
-               dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
+       if (is_chip_be2_be3r(phba)) {
+               dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
                                    dpl, pdpdu_cqe);
-               index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
+               index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
                                      index, pdpdu_cqe);
        } else {
-               dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+               dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
                                    dpl, pdpdu_cqe);
-               index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+               index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
                                      index, pdpdu_cqe);
        }
 
@@ -1613,8 +1613,8 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
 
        WARN_ON(!pasync_handle);
 
-       pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
-                                            phba->fw_config.iscsi_cid_start;
+       pasync_handle->cri =
+                       BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid);
        pasync_handle->is_header = is_header;
        pasync_handle->buffer_len = dpl;
        *pcq_index = index;
@@ -1856,8 +1856,6 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
        }
 
        status = beiscsi_process_async_pdu(beiscsi_conn, phba,
-                                          (beiscsi_conn->beiscsi_conn_cid -
-                                           phba->fw_config.iscsi_cid_start),
                                            phdr, hdr_len, pfirst_buffer,
                                            offset);
 
@@ -2011,6 +2009,7 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
        unsigned int num_processed = 0;
        unsigned int tot_nump = 0;
        unsigned short code = 0, cid = 0;
+       uint16_t cri_index = 0;
        struct beiscsi_conn *beiscsi_conn;
        struct beiscsi_endpoint *beiscsi_ep;
        struct iscsi_endpoint *ep;
@@ -2028,7 +2027,9 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
                         32] & CQE_CODE_MASK);
 
                 /* Get the CID */
-               if (chip_skh_r(phba->pcidev)) {
+               if (is_chip_be2_be3r(phba)) {
+                       cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
+               } else {
                        if ((code == DRIVERMSG_NOTIFY) ||
                            (code == UNSOL_HDR_NOTIFY) ||
                            (code == UNSOL_DATA_NOTIFY))
@@ -2038,10 +2039,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
                         else
                                 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
                                                     cid, sol);
-                  } else
-                        cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
+               }
 
-               ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start];
+               cri_index = BE_GET_CRI_FROM_CID(cid);
+               ep = phba->ep_array[cri_index];
                beiscsi_ep = ep->dd_data;
                beiscsi_conn = beiscsi_ep->conn;
 
@@ -2191,7 +2192,7 @@ void beiscsi_process_all_cqs(struct work_struct *work)
 
 static int be_iopoll(struct blk_iopoll *iop, int budget)
 {
-       static unsigned int ret;
+       unsigned int ret;
        struct beiscsi_hba *phba;
        struct be_eq_obj *pbe_eq;
 
@@ -2416,11 +2417,11 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
                /* Check for the data_count */
                dsp_value = (task->data_count) ? 1 : 0;
 
-               if (chip_skh_r(phba->pcidev))
-                       AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
+               if (is_chip_be2_be3r(phba))
+                       AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
                                      pwrb, dsp_value);
                else
-                       AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
+                       AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
                                      pwrb, dsp_value);
 
                /* Map addr only if there is data_count */
@@ -2538,8 +2539,9 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
 
 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
 {
-       struct be_mem_descriptor *mem_descr;
        dma_addr_t bus_add;
+       struct hwi_controller *phwi_ctrlr;
+       struct be_mem_descriptor *mem_descr;
        struct mem_array *mem_arr, *mem_arr_orig;
        unsigned int i, j, alloc_size, curr_alloc_size;
 
@@ -2547,9 +2549,18 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
        if (!phba->phwi_ctrlr)
                return -ENOMEM;
 
+       /* Allocate memory for wrb_context */
+       phwi_ctrlr = phba->phwi_ctrlr;
+       phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) *
+                                         phba->params.cxns_per_ctrl,
+                                         GFP_KERNEL);
+       if (!phwi_ctrlr->wrb_context)
+               return -ENOMEM;
+
        phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
                                 GFP_KERNEL);
        if (!phba->init_mem) {
+               kfree(phwi_ctrlr->wrb_context);
                kfree(phba->phwi_ctrlr);
                return -ENOMEM;
        }
@@ -2558,6 +2569,7 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
                               GFP_KERNEL);
        if (!mem_arr_orig) {
                kfree(phba->init_mem);
+               kfree(phwi_ctrlr->wrb_context);
                kfree(phba->phwi_ctrlr);
                return -ENOMEM;
        }
@@ -2628,6 +2640,7 @@ free_mem:
        }
        kfree(mem_arr_orig);
        kfree(phba->init_mem);
+       kfree(phba->phwi_ctrlr->wrb_context);
        kfree(phba->phwi_ctrlr);
        return -ENOMEM;
 }
@@ -2666,6 +2679,7 @@ static void iscsi_init_global_templates(struct beiscsi_hba *phba)
 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
 {
        struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
+       struct hwi_context_memory *phwi_ctxt;
        struct wrb_handle *pwrb_handle = NULL;
        struct hwi_controller *phwi_ctrlr;
        struct hwi_wrb_context *pwrb_context;
@@ -2680,7 +2694,18 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
        mem_descr_wrb += HWI_MEM_WRB;
        phwi_ctrlr = phba->phwi_ctrlr;
 
-       for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
+       /* Allocate memory for WRBQ */
+       phwi_ctxt = phwi_ctrlr->phwi_ctxt;
+       phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) *
+                                    phba->fw_config.iscsi_cid_count,
+                                    GFP_KERNEL);
+       if (!phwi_ctxt->be_wrbq) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : WRBQ Mem Alloc Failed\n");
+               return -ENOMEM;
+       }
+
+       for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
                pwrb_context = &phwi_ctrlr->wrb_context[index];
                pwrb_context->pwrb_handle_base =
                                kzalloc(sizeof(struct wrb_handle *) *
@@ -2723,7 +2748,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
                }
        }
        idx = 0;
-       for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
+       for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
                pwrb_context = &phwi_ctrlr->wrb_context[index];
                if (!num_cxn_wrb) {
                        pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
@@ -2752,7 +2777,7 @@ init_wrb_hndl_failed:
        return -ENOMEM;
 }
 
-static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
+static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
 {
        struct hwi_controller *phwi_ctrlr;
        struct hba_parameters *p = &phba->params;
@@ -2770,6 +2795,15 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
        pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
        memset(pasync_ctx, 0, sizeof(*pasync_ctx));
 
+       pasync_ctx->async_entry = kzalloc(sizeof(struct hwi_async_entry) *
+                                         phba->fw_config.iscsi_cid_count,
+                                         GFP_KERNEL);
+       if (!pasync_ctx->async_entry) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : hwi_init_async_pdu_ctx Mem Alloc Failed\n");
+               return -ENOMEM;
+       }
+
        pasync_ctx->num_entries = p->asyncpdus_per_ctrl;
        pasync_ctx->buffer_size = p->defpdu_hdr_sz;
 
@@ -2934,6 +2968,8 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
        pasync_ctx->async_header.ep_read_ptr = -1;
        pasync_ctx->async_data.host_write_ptr = 0;
        pasync_ctx->async_data.ep_read_ptr = -1;
+
+       return 0;
 }
 
 static int
@@ -3293,6 +3329,7 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
        void *wrb_vaddr;
        struct be_dma_mem sgl;
        struct be_mem_descriptor *mem_descr;
+       struct hwi_wrb_context *pwrb_context;
        int status;
 
        idx = 0;
@@ -3351,8 +3388,9 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
                        kfree(pwrb_arr);
                        return status;
                }
-               phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
-                                                                  id;
+               pwrb_context = &phwi_ctrlr->wrb_context[i];
+               pwrb_context->cid = phwi_context->be_wrbq[i].id;
+               BE_SET_CID_TO_CRI(i, pwrb_context->cid);
        }
        kfree(pwrb_arr);
        return 0;
@@ -3365,7 +3403,7 @@ static void free_wrb_handles(struct beiscsi_hba *phba)
        struct hwi_wrb_context *pwrb_context;
 
        phwi_ctrlr = phba->phwi_ctrlr;
-       for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
+       for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
                pwrb_context = &phwi_ctrlr->wrb_context[index];
                kfree(pwrb_context->pwrb_handle_base);
                kfree(pwrb_context->pwrb_handle_basestd);
@@ -3394,6 +3432,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
        struct be_ctrl_info *ctrl = &phba->ctrl;
        struct hwi_controller *phwi_ctrlr;
        struct hwi_context_memory *phwi_context;
+       struct hwi_async_pdu_context *pasync_ctx;
        int i, eq_num;
 
        phwi_ctrlr = phba->phwi_ctrlr;
@@ -3403,6 +3442,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
                if (q->created)
                        beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
        }
+       kfree(phwi_context->be_wrbq);
        free_wrb_handles(phba);
 
        q = &phwi_context->be_def_hdrq;
@@ -3430,6 +3470,10 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
                        beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
        }
        be_mcc_queues_destroy(phba);
+
+       pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
+       kfree(pasync_ctx->async_entry);
+       be_cmd_fw_uninit(ctrl);
 }
 
 static int be_mcc_queues_create(struct beiscsi_hba *phba,
@@ -3607,7 +3651,12 @@ static int hwi_init_controller(struct beiscsi_hba *phba)
        if (beiscsi_init_wrb_handle(phba))
                return -ENOMEM;
 
-       hwi_init_async_pdu_ctx(phba);
+       if (hwi_init_async_pdu_ctx(phba)) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : hwi_init_async_pdu_ctx failed\n");
+               return -ENOMEM;
+       }
+
        if (hwi_init_port(phba) != 0) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BM_%d : hwi_init_controller failed\n");
@@ -3637,6 +3686,7 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
                mem_descr++;
        }
        kfree(phba->init_mem);
+       kfree(phba->phwi_ctrlr->wrb_context);
        kfree(phba->phwi_ctrlr);
 }
 
@@ -3769,7 +3819,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
 
 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
 {
-       int i, new_cid;
+       int i;
 
        phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
                                  GFP_KERNEL);
@@ -3780,19 +3830,33 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
                return -ENOMEM;
        }
        phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
-                                phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
+                                phba->params.cxns_per_ctrl, GFP_KERNEL);
        if (!phba->ep_array) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BM_%d : Failed to allocate memory in "
                            "hba_setup_cid_tbls\n");
                kfree(phba->cid_array);
+               phba->cid_array = NULL;
                return -ENOMEM;
        }
-       new_cid = phba->fw_config.iscsi_cid_start;
-       for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
-               phba->cid_array[i] = new_cid;
-               new_cid += 2;
+
+       phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) *
+                                  phba->params.cxns_per_ctrl, GFP_KERNEL);
+       if (!phba->conn_table) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : Failed to allocate memory in"
+                           "hba_setup_cid_tbls\n");
+
+               kfree(phba->cid_array);
+               kfree(phba->ep_array);
+               phba->cid_array = NULL;
+               phba->ep_array = NULL;
+               return -ENOMEM;
        }
+
+       for (i = 0; i < phba->params.cxns_per_ctrl; i++)
+               phba->cid_array[i] = phba->phwi_ctrlr->wrb_context[i].cid;
+
        phba->avlbl_cids = phba->params.cxns_per_ctrl;
        return 0;
 }
@@ -4062,6 +4126,53 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba)
        kfree(phba->eh_sgl_hndl_base);
        kfree(phba->cid_array);
        kfree(phba->ep_array);
+       kfree(phba->conn_table);
+}
+
+/**
+ * beiscsi_free_mgmt_task_handles()- Free driver CXN resources
+ * @beiscsi_conn: ptr to the conn to be cleaned up
+ * @task: ptr to iscsi_task resource to be freed.
+ *
+ * Free driver mgmt resources binded to CXN.
+ **/
+void
+beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
+                               struct iscsi_task *task)
+{
+       struct beiscsi_io_task *io_task;
+       struct beiscsi_hba *phba = beiscsi_conn->phba;
+       struct hwi_wrb_context *pwrb_context;
+       struct hwi_controller *phwi_ctrlr;
+       uint16_t cri_index = BE_GET_CRI_FROM_CID(
+                               beiscsi_conn->beiscsi_conn_cid);
+
+       phwi_ctrlr = phba->phwi_ctrlr;
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
+
+       io_task = task->dd_data;
+
+       if (io_task->pwrb_handle) {
+               memset(io_task->pwrb_handle->pwrb, 0,
+                      sizeof(struct iscsi_wrb));
+               free_wrb_handle(phba, pwrb_context,
+                               io_task->pwrb_handle);
+               io_task->pwrb_handle = NULL;
+       }
+
+       if (io_task->psgl_handle) {
+               spin_lock_bh(&phba->mgmt_sgl_lock);
+               free_mgmt_sgl_handle(phba,
+                                    io_task->psgl_handle);
+               io_task->psgl_handle = NULL;
+               spin_unlock_bh(&phba->mgmt_sgl_lock);
+       }
+
+       if (io_task->mtask_addr)
+               pci_unmap_single(phba->pcidev,
+                                io_task->mtask_addr,
+                                io_task->mtask_data_count,
+                                PCI_DMA_TODEVICE);
 }
 
 /**
@@ -4078,10 +4189,11 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
        struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
        struct hwi_wrb_context *pwrb_context;
        struct hwi_controller *phwi_ctrlr;
+       uint16_t cri_index = BE_GET_CRI_FROM_CID(
+                            beiscsi_conn->beiscsi_conn_cid);
 
        phwi_ctrlr = phba->phwi_ctrlr;
-       pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
-                       - phba->fw_config.iscsi_cid_start];
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
 
        if (io_task->cmd_bhs) {
                pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
@@ -4103,27 +4215,8 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
                        io_task->psgl_handle = NULL;
                }
        } else {
-               if (!beiscsi_conn->login_in_progress) {
-                       if (io_task->pwrb_handle) {
-                               free_wrb_handle(phba, pwrb_context,
-                                               io_task->pwrb_handle);
-                               io_task->pwrb_handle = NULL;
-                       }
-                       if (io_task->psgl_handle) {
-                               spin_lock(&phba->mgmt_sgl_lock);
-                               free_mgmt_sgl_handle(phba,
-                                                    io_task->psgl_handle);
-                               spin_unlock(&phba->mgmt_sgl_lock);
-                               io_task->psgl_handle = NULL;
-                       }
-                       if (io_task->mtask_addr) {
-                               pci_unmap_single(phba->pcidev,
-                                                io_task->mtask_addr,
-                                                io_task->mtask_data_count,
-                                                PCI_DMA_TODEVICE);
-                               io_task->mtask_addr = 0;
-                       }
-               }
+               if (!beiscsi_conn->login_in_progress)
+                       beiscsi_free_mgmt_task_handles(beiscsi_conn, task);
        }
 }
 
@@ -4146,15 +4239,14 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
        beiscsi_cleanup_task(task);
        spin_unlock_bh(&session->lock);
 
-       pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
-                                      phba->fw_config.iscsi_cid_start));
+       pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid);
 
        /* Check for the adapter family */
-       if (chip_skh_r(phba->pcidev))
-               beiscsi_offload_cxn_v2(params, pwrb_handle);
-       else
+       if (is_chip_be2_be3r(phba))
                beiscsi_offload_cxn_v0(params, pwrb_handle,
                                       phba->init_mem);
+       else
+               beiscsi_offload_cxn_v2(params, pwrb_handle);
 
        be_dws_le_to_cpu(pwrb_handle->pwrb,
                         sizeof(struct iscsi_target_context_update_wrb));
@@ -4194,6 +4286,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
        struct hwi_wrb_context *pwrb_context;
        struct hwi_controller *phwi_ctrlr;
        itt_t itt;
+       uint16_t cri_index = 0;
        struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
        dma_addr_t paddr;
 
@@ -4223,8 +4316,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
                        goto free_hndls;
                }
                io_task->pwrb_handle = alloc_wrb_handle(phba,
-                                       beiscsi_conn->beiscsi_conn_cid -
-                                       phba->fw_config.iscsi_cid_start);
+                                       beiscsi_conn->beiscsi_conn_cid);
                if (!io_task->pwrb_handle) {
                        beiscsi_log(phba, KERN_ERR,
                                    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
@@ -4236,6 +4328,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
        } else {
                io_task->scsi_cmnd = NULL;
                if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
+                       beiscsi_conn->task = task;
                        if (!beiscsi_conn->login_in_progress) {
                                spin_lock(&phba->mgmt_sgl_lock);
                                io_task->psgl_handle = (struct sgl_handle *)
@@ -4257,8 +4350,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
                                                        io_task->psgl_handle;
                                io_task->pwrb_handle =
                                        alloc_wrb_handle(phba,
-                                       beiscsi_conn->beiscsi_conn_cid -
-                                       phba->fw_config.iscsi_cid_start);
+                                       beiscsi_conn->beiscsi_conn_cid);
                                if (!io_task->pwrb_handle) {
                                        beiscsi_log(phba, KERN_ERR,
                                                    BEISCSI_LOG_IO |
@@ -4278,7 +4370,6 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
                                io_task->pwrb_handle =
                                                beiscsi_conn->plogin_wrb_handle;
                        }
-                       beiscsi_conn->task = task;
                } else {
                        spin_lock(&phba->mgmt_sgl_lock);
                        io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
@@ -4295,8 +4386,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
                        }
                        io_task->pwrb_handle =
                                        alloc_wrb_handle(phba,
-                                       beiscsi_conn->beiscsi_conn_cid -
-                                       phba->fw_config.iscsi_cid_start);
+                                       beiscsi_conn->beiscsi_conn_cid);
                        if (!io_task->pwrb_handle) {
                                beiscsi_log(phba, KERN_ERR,
                                            BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
@@ -4324,12 +4414,13 @@ free_io_hndls:
 free_mgmt_hndls:
        spin_lock(&phba->mgmt_sgl_lock);
        free_mgmt_sgl_handle(phba, io_task->psgl_handle);
+       io_task->psgl_handle = NULL;
        spin_unlock(&phba->mgmt_sgl_lock);
 free_hndls:
        phwi_ctrlr = phba->phwi_ctrlr;
-       pwrb_context = &phwi_ctrlr->wrb_context[
-                       beiscsi_conn->beiscsi_conn_cid -
-                       phba->fw_config.iscsi_cid_start];
+       cri_index = BE_GET_CRI_FROM_CID(
+       beiscsi_conn->beiscsi_conn_cid);
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
        if (io_task->pwrb_handle)
                free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
        io_task->pwrb_handle = NULL;
@@ -4351,7 +4442,6 @@ int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
        unsigned int doorbell = 0;
 
        pwrb = io_task->pwrb_handle->pwrb;
-       memset(pwrb, 0, sizeof(*pwrb));
 
        io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
        io_task->bhs_len = sizeof(struct be_cmd_bhs);
@@ -4465,19 +4555,7 @@ static int beiscsi_mtask(struct iscsi_task *task)
        pwrb = io_task->pwrb_handle->pwrb;
        memset(pwrb, 0, sizeof(*pwrb));
 
-       if (chip_skh_r(phba->pcidev)) {
-               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
-                             be32_to_cpu(task->cmdsn));
-               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
-                             io_task->pwrb_handle->wrb_index);
-               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
-                             io_task->psgl_handle->sgl_index);
-               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
-                             task->data_count);
-               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
-                             io_task->pwrb_handle->nxt_wrb_index);
-               pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
-       } else {
+       if (is_chip_be2_be3r(phba)) {
                AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
                              be32_to_cpu(task->cmdsn));
                AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
@@ -4489,6 +4567,18 @@ static int beiscsi_mtask(struct iscsi_task *task)
                AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
                              io_task->pwrb_handle->nxt_wrb_index);
                pwrb_typeoffset = BE_WRB_TYPE_OFFSET;
+       } else {
+               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
+                             be32_to_cpu(task->cmdsn));
+               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
+                             io_task->pwrb_handle->wrb_index);
+               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
+                             io_task->psgl_handle->sgl_index);
+               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
+                             task->data_count);
+               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
+                             io_task->pwrb_handle->nxt_wrb_index);
+               pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
        }
 
 
@@ -4501,19 +4591,19 @@ static int beiscsi_mtask(struct iscsi_task *task)
        case ISCSI_OP_NOOP_OUT:
                if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
                        ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
-                       if (chip_skh_r(phba->pcidev))
-                               AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+                       if (is_chip_be2_be3r(phba))
+                               AMAP_SET_BITS(struct amap_iscsi_wrb,
                                              dmsg, pwrb, 1);
                        else
-                               AMAP_SET_BITS(struct amap_iscsi_wrb,
+                               AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
                                              dmsg, pwrb, 1);
                } else {
                        ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset);
-                       if (chip_skh_r(phba->pcidev))
-                               AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+                       if (is_chip_be2_be3r(phba))
+                               AMAP_SET_BITS(struct amap_iscsi_wrb,
                                              dmsg, pwrb, 0);
                        else
-                               AMAP_SET_BITS(struct amap_iscsi_wrb,
+                               AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
                                              dmsg, pwrb, 0);
                }
                hwi_write_buffer(pwrb, task);
@@ -4540,9 +4630,9 @@ static int beiscsi_mtask(struct iscsi_task *task)
        }
 
        /* Set the task type */
-       io_task->wrb_type = (chip_skh_r(phba->pcidev)) ?
-               AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb) :
-               AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb);
+       io_task->wrb_type = (is_chip_be2_be3r(phba)) ?
+               AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) :
+               AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb);
 
        doorbell |= cid & DB_WRB_POST_CID_MASK;
        doorbell |= (io_task->pwrb_handle->wrb_index &
@@ -4834,6 +4924,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
        case OC_SKH_ID1:
                phba->generation = BE_GEN4;
                phba->iotask_fn = beiscsi_iotask_v2;
+               break;
        default:
                phba->generation = 0;
        }
index 5946577d79d6579f6f4fa4ff8c8eaf156caf9ac5..2c06ef3c02aca49974f6bee2487f09b98c2b1d4d 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -36,7 +36,7 @@
 
 #include "be.h"
 #define DRV_NAME               "be2iscsi"
-#define BUILD_STR              "10.0.272.0"
+#define BUILD_STR              "10.0.467.0"
 #define BE_NAME                        "Emulex OneConnect" \
                                "Open-iSCSI Driver version" BUILD_STR
 #define DRV_DESC               BE_NAME " " "Driver"
@@ -66,8 +66,9 @@
 
 #define MAX_CPUS               64
 #define BEISCSI_MAX_NUM_CPUS   7
-#define OC_SKH_MAX_NUM_CPUS    63
+#define OC_SKH_MAX_NUM_CPUS    31
 
+#define BEISCSI_VER_STRLEN 32
 
 #define BEISCSI_SGLIST_ELEMENTS        30
 
@@ -265,7 +266,9 @@ struct invalidate_command_table {
        unsigned short cid;
 } __packed;
 
-#define chip_skh_r(pdev)       (pdev->device == OC_SKH_ID1)
+#define chip_be2(phba)      (phba->generation == BE_GEN2)
+#define chip_be3_r(phba)    (phba->generation == BE_GEN3)
+#define is_chip_be2_be3r(phba) (chip_be3_r(phba) || (chip_be2(phba)))
 struct beiscsi_hba {
        struct hba_parameters params;
        struct hwi_controller *phwi_ctrlr;
@@ -304,10 +307,15 @@ struct beiscsi_hba {
        unsigned short avlbl_cids;
        unsigned short cid_alloc;
        unsigned short cid_free;
-       struct beiscsi_conn *conn_table[BE2_MAX_SESSIONS * 2];
        struct list_head hba_queue;
+#define BE_MAX_SESSION 2048
+#define BE_SET_CID_TO_CRI(cri_index, cid) \
+                         (phba->cid_to_cri_map[cid] = cri_index)
+#define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid])
+       unsigned short cid_to_cri_map[BE_MAX_SESSION];
        unsigned short *cid_array;
        struct iscsi_endpoint **ep_array;
+       struct beiscsi_conn **conn_table;
        struct iscsi_boot_kset *boot_kset;
        struct Scsi_Host *shost;
        struct iscsi_iface *ipv4_iface;
@@ -339,6 +347,7 @@ struct beiscsi_hba {
        struct delayed_work beiscsi_hw_check_task;
 
        u8 mac_address[ETH_ALEN];
+       char fw_ver_str[BEISCSI_VER_STRLEN];
        char wq_name[20];
        struct workqueue_struct *wq;    /* The actuak work queue */
        struct be_ctrl_info ctrl;
@@ -563,7 +572,7 @@ struct hwi_async_pdu_context {
         * This is a varying size list! Do not add anything
         * after this entry!!
         */
-       struct hwi_async_entry async_entry[BE2_MAX_SESSIONS * 2];
+       struct hwi_async_entry *async_entry;
 };
 
 #define PDUCQE_CODE_MASK       0x0000003F
@@ -749,6 +758,8 @@ void
 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle);
 
 void beiscsi_process_all_cqs(struct work_struct *work);
+void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
+                                    struct iscsi_task *task);
 
 static inline bool beiscsi_error(struct beiscsi_hba *phba)
 {
@@ -933,7 +944,7 @@ struct hwi_controller {
        struct sgl_handle *psgl_handle_base;
        unsigned int wrb_mem_index;
 
-       struct hwi_wrb_context wrb_context[BE2_MAX_SESSIONS * 2];
+       struct hwi_wrb_context *wrb_context;
        struct mcc_wrb *pmcc_wrb_base;
        struct be_ring default_pdu_hdr;
        struct be_ring default_pdu_data;
@@ -970,9 +981,7 @@ struct hwi_context_memory {
        struct be_queue_info be_def_hdrq;
        struct be_queue_info be_def_dataq;
 
-       struct be_queue_info be_wrbq[BE2_MAX_SESSIONS];
-       struct be_mcc_wrb_context *pbe_mcc_context;
-
+       struct be_queue_info *be_wrbq;
        struct hwi_async_pdu_context *pasync_ctx;
 };
 
index 55cc9902263dd84bc047fadb429ea9a45ababa50..245a9595a93a131449fd660a7b7d8bd0e9e7f3b5 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -368,6 +368,8 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
                beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
                            "BM_%d : phba->fw_config.iscsi_features = %d\n",
                            phba->fw_config.iscsi_features);
+               memcpy(phba->fw_ver_str, resp->params.hba_attribs.
+                      firmware_version_string, BEISCSI_VER_STRLEN);
        } else
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BG_%d :  Failed in mgmt_check_supported_fw\n");
@@ -1259,6 +1261,45 @@ beiscsi_drvr_ver_disp(struct device *dev, struct device_attribute *attr,
        return snprintf(buf, PAGE_SIZE, BE_NAME "\n");
 }
 
+/**
+ * beiscsi_fw_ver_disp()- Display Firmware Version
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text Firmware version
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr,
+                    char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct beiscsi_hba *phba = iscsi_host_priv(shost);
+
+       return snprintf(buf, PAGE_SIZE, "%s\n", phba->fw_ver_str);
+}
+
+/**
+ * beiscsi_active_cid_disp()- Display Sessions Active
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text Session Count
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_active_cid_disp(struct device *dev, struct device_attribute *attr,
+                        char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct beiscsi_hba *phba = iscsi_host_priv(shost);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n",
+                      (phba->params.cxns_per_ctrl - phba->avlbl_cids));
+}
+
 /**
  * beiscsi_adap_family_disp()- Display adapter family.
  * @dev: ptr to device to get priv structure
index 2e4968add799f7e91ab53e38edd21cd5e74eaa44..04af7e74fe4884a54a773f0471b264412de726d7 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -156,25 +156,25 @@ union invalidate_commands_params {
 } __packed;
 
 struct mgmt_hba_attributes {
-       u8 flashrom_version_string[32];
-       u8 manufacturer_name[32];
+       u8 flashrom_version_string[BEISCSI_VER_STRLEN];
+       u8 manufacturer_name[BEISCSI_VER_STRLEN];
        u32 supported_modes;
        u8 seeprom_version_lo;
        u8 seeprom_version_hi;
        u8 rsvd0[2];
        u32 fw_cmd_data_struct_version;
        u32 ep_fw_data_struct_version;
-       u32 future_reserved[12];
+       u8 ncsi_version_string[12];
        u32 default_extended_timeout;
-       u8 controller_model_number[32];
+       u8 controller_model_number[BEISCSI_VER_STRLEN];
        u8 controller_description[64];
-       u8 controller_serial_number[32];
-       u8 ip_version_string[32];
-       u8 firmware_version_string[32];
-       u8 bios_version_string[32];
-       u8 redboot_version_string[32];
-       u8 driver_version_string[32];
-       u8 fw_on_flash_version_string[32];
+       u8 controller_serial_number[BEISCSI_VER_STRLEN];
+       u8 ip_version_string[BEISCSI_VER_STRLEN];
+       u8 firmware_version_string[BEISCSI_VER_STRLEN];
+       u8 bios_version_string[BEISCSI_VER_STRLEN];
+       u8 redboot_version_string[BEISCSI_VER_STRLEN];
+       u8 driver_version_string[BEISCSI_VER_STRLEN];
+       u8 fw_on_flash_version_string[BEISCSI_VER_STRLEN];
        u32 functionalities_supported;
        u16 max_cdblength;
        u8 asic_revision;
@@ -190,7 +190,8 @@ struct mgmt_hba_attributes {
        u32 firmware_post_status;
        u32 hba_mtu[8];
        u8 iscsi_features;
-       u8 future_u8[3];
+       u8 asic_generation;
+       u8 future_u8[2];
        u32 future_u32[3];
 } __packed;
 
@@ -207,7 +208,7 @@ struct mgmt_controller_attributes {
        u64 unique_identifier;
        u8 netfilters;
        u8 rsvd0[3];
-       u8 future_u32[4];
+       u32 future_u32[4];
 } __packed;
 
 struct be_mgmt_controller_attributes {
@@ -311,6 +312,12 @@ int mgmt_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
 ssize_t beiscsi_drvr_ver_disp(struct device *dev,
                               struct device_attribute *attr, char *buf);
 
+ssize_t beiscsi_fw_ver_disp(struct device *dev,
+                            struct device_attribute *attr, char *buf);
+
+ssize_t beiscsi_active_cid_disp(struct device *dev,
+                                struct device_attribute *attr, char *buf);
+
 ssize_t beiscsi_adap_family_disp(struct device *dev,
                                  struct device_attribute *attr, char *buf);
 
index 11596b2c4702ae952c7063ab93494a0d5f9bb59f..08b22a901c2590e3aca12ebcfe4484e143c54460 100644 (file)
@@ -2,7 +2,7 @@
 #define _BNX2FC_H_
 /* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 #include "bnx2fc_constants.h"
 
 #define BNX2FC_NAME            "bnx2fc"
-#define BNX2FC_VERSION         "1.0.13"
+#define BNX2FC_VERSION         "1.0.14"
 
 #define PFX                    "bnx2fc: "
 
+#define BCM_CHIP_LEN           16
+
 #define BNX2X_DOORBELL_PCI_BAR         2
 
 #define BNX2FC_MAX_BD_LEN              0xffff
@@ -241,6 +243,8 @@ struct bnx2fc_hba {
        int wait_for_link_down;
        int num_ofld_sess;
        struct list_head vports;
+
+       char chip_num[BCM_CHIP_LEN];
 };
 
 struct bnx2fc_interface {
index bdbbb13b8534c2318464b1b9a803c554af2b17ac..b1c9a4f8caee852f2cbf16b3735a7f7d912bb014 100644 (file)
@@ -3,7 +3,7 @@
  * This file contains helper routines that handle ELS requests
  * and responses.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 7dffec1e57158469328e670df896767227261ae7..69ac55495c1d7479d75f79b13fdb22659875b95c 100644 (file)
@@ -3,7 +3,7 @@
  * cnic modules to create FCoE instances, send/receive non-offloaded
  * FIP/FCoE packets, listen to link events etc.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
 
 #define DRV_MODULE_NAME                "bnx2fc"
 #define DRV_MODULE_VERSION     BNX2FC_VERSION
-#define DRV_MODULE_RELDATE     "Dec 21, 2012"
+#define DRV_MODULE_RELDATE     "Mar 08, 2013"
 
 
 static char version[] =
@@ -679,6 +679,7 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
 {
        struct fcoe_port *port = lport_priv(lport);
        struct bnx2fc_interface *interface = port->priv;
+       struct bnx2fc_hba *hba = interface->hba;
        struct Scsi_Host *shost = lport->host;
        int rc = 0;
 
@@ -699,8 +700,9 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
        }
        if (!lport->vport)
                fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
-       sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s",
-               BNX2FC_NAME, BNX2FC_VERSION,
+       snprintf(fc_host_symbolic_name(lport->host), 256,
+                "%s (Broadcom %s) v%s over %s",
+               BNX2FC_NAME, hba->chip_num, BNX2FC_VERSION,
                interface->netdev->name);
 
        return 0;
@@ -1656,23 +1658,60 @@ mem_err:
 static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba)
 {
        struct cnic_dev *cnic;
+       struct pci_dev *pdev;
 
        if (!hba->cnic) {
                printk(KERN_ERR PFX "cnic is NULL\n");
                return -ENODEV;
        }
        cnic = hba->cnic;
-       hba->pcidev = cnic->pcidev;
-       if (hba->pcidev)
-               pci_dev_get(hba->pcidev);
+       pdev = hba->pcidev = cnic->pcidev;
+       if (!hba->pcidev)
+               return -ENODEV;
 
+       switch (pdev->device) {
+       case PCI_DEVICE_ID_NX2_57710:
+               strncpy(hba->chip_num, "BCM57710", BCM_CHIP_LEN);
+               break;
+       case PCI_DEVICE_ID_NX2_57711:
+               strncpy(hba->chip_num, "BCM57711", BCM_CHIP_LEN);
+               break;
+       case PCI_DEVICE_ID_NX2_57712:
+       case PCI_DEVICE_ID_NX2_57712_MF:
+       case PCI_DEVICE_ID_NX2_57712_VF:
+               strncpy(hba->chip_num, "BCM57712", BCM_CHIP_LEN);
+               break;
+       case PCI_DEVICE_ID_NX2_57800:
+       case PCI_DEVICE_ID_NX2_57800_MF:
+       case PCI_DEVICE_ID_NX2_57800_VF:
+               strncpy(hba->chip_num, "BCM57800", BCM_CHIP_LEN);
+               break;
+       case PCI_DEVICE_ID_NX2_57810:
+       case PCI_DEVICE_ID_NX2_57810_MF:
+       case PCI_DEVICE_ID_NX2_57810_VF:
+               strncpy(hba->chip_num, "BCM57810", BCM_CHIP_LEN);
+               break;
+       case PCI_DEVICE_ID_NX2_57840:
+       case PCI_DEVICE_ID_NX2_57840_MF:
+       case PCI_DEVICE_ID_NX2_57840_VF:
+       case PCI_DEVICE_ID_NX2_57840_2_20:
+       case PCI_DEVICE_ID_NX2_57840_4_10:
+               strncpy(hba->chip_num, "BCM57840", BCM_CHIP_LEN);
+               break;
+       default:
+               pr_err(PFX "Unknown device id 0x%x\n", pdev->device);
+               break;
+       }
+       pci_dev_get(hba->pcidev);
        return 0;
 }
 
 static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
 {
-       if (hba->pcidev)
+       if (hba->pcidev) {
+               hba->chip_num[0] = '\0';
                pci_dev_put(hba->pcidev);
+       }
        hba->pcidev = NULL;
 }
 
index 50510ffe1bf59444b2dcf28cd8c1345b41b3269a..c0d035a8f8f9e79a08a0b313742c2f7b0009ca6f 100644 (file)
@@ -2,7 +2,7 @@
  * This file contains the code that low level functions that interact
  * with 57712 FCoE firmware.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -126,7 +126,11 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
        fcoe_init3.error_bit_map_lo = 0xffffffff;
        fcoe_init3.error_bit_map_hi = 0xffffffff;
 
-       fcoe_init3.perf_config = 1;
+       /*
+        * enable both cached connection and cached tasks
+        * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both
+        */
+       fcoe_init3.perf_config = 3;
 
        kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
        kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
index 723a9a8ba5ee28f48de2e67854205f5749287cee..575142e92d9c5219b10fb286d23a12ee1580db58 100644 (file)
@@ -1,7 +1,7 @@
 /* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
  * IO manager and SCSI IO processing.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -1270,8 +1270,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
 
        spin_lock_bh(&tgt->tgt_lock);
        io_req->wait_for_comp = 0;
-       if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
-                                   &io_req->req_flags))) {
+       if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
+               BNX2FC_IO_DBG(io_req, "IO completed in a different context\n");
+               rc = SUCCESS;
+       } else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+                                     &io_req->req_flags))) {
                /* Let the scsi-ml try to recover this command */
                printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
                       io_req->xid);
index c57a3bb8a9fbfe1944e796a67df7cf81efec23c8..4d93177dfb530c4446d959bd97fb71cd5dee86d1 100644 (file)
@@ -2,7 +2,7 @@
  * Handles operations such as session offload/upload etc, and manages
  * session resources such as connection id and qp resources.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 0f9c04175b11d4894f34157bf8177d6956c2f9c7..372a67d122d38fc161743c105db229c534d26a89 100644 (file)
@@ -114,7 +114,7 @@ struct csio_lnode_stats {
        uint32_t        n_rnode_match;  /* matched rnode */
        uint32_t        n_dev_loss_tmo; /* Device loss timeout */
        uint32_t        n_fdmi_err;     /* fdmi err */
-       uint32_t        n_evt_fw[PROTO_ERR_IMPL_LOGO];  /* fw events */
+       uint32_t        n_evt_fw[PROTO_ERR_IMPL_LOGO + 1];      /* fw events */
        enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT];   /* State m/c events */
        uint32_t        n_rnode_alloc;  /* rnode allocated */
        uint32_t        n_rnode_free;   /* rnode freed */
index 65940096a80d8426eb438392bf544ecee1d5a1e1..43343422122202c9614d4b76539c5a5d20e2fab6 100644 (file)
@@ -63,7 +63,7 @@ struct csio_rnode_stats {
        uint32_t        n_err_nomem;    /* error nomem */
        uint32_t        n_evt_unexp;    /* unexpected event */
        uint32_t        n_evt_drop;     /* unexpected event */
-       uint32_t        n_evt_fw[PROTO_ERR_IMPL_LOGO];  /* fw events */
+       uint32_t        n_evt_fw[PROTO_ERR_IMPL_LOGO + 1];      /* fw events */
        enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT];  /* State m/c events */
        uint32_t        n_lun_rst;      /* Number of resets of
                                         * of LUNs under this
index 98436c3630359f7e348cb496ed4a719575675e2d..b6d1f92ed33cc17e67ff4cbe1b3c424f5f75bd0e 100644 (file)
@@ -38,7 +38,7 @@
 
 #define DRV_NAME               "fnic"
 #define DRV_DESCRIPTION                "Cisco FCoE HBA Driver"
-#define DRV_VERSION            "1.5.0.2"
+#define DRV_VERSION            "1.5.0.22"
 #define PFX                    DRV_NAME ": "
 #define DFX                     DRV_NAME "%d: "
 
@@ -192,6 +192,18 @@ enum fnic_state {
 
 struct mempool;
 
+enum fnic_evt {
+       FNIC_EVT_START_VLAN_DISC = 1,
+       FNIC_EVT_START_FCF_DISC = 2,
+       FNIC_EVT_MAX,
+};
+
+struct fnic_event {
+       struct list_head list;
+       struct fnic *fnic;
+       enum fnic_evt event;
+};
+
 /* Per-instance private data structure */
 struct fnic {
        struct fc_lport *lport;
@@ -254,6 +266,18 @@ struct fnic {
        struct sk_buff_head frame_queue;
        struct sk_buff_head tx_queue;
 
+       /*** FIP related data members  -- start ***/
+       void (*set_vlan)(struct fnic *, u16 vlan);
+       struct work_struct      fip_frame_work;
+       struct sk_buff_head     fip_frame_queue;
+       struct timer_list       fip_timer;
+       struct list_head        vlans;
+       spinlock_t              vlans_lock;
+
+       struct work_struct      event_work;
+       struct list_head        evlist;
+       /*** FIP related data members  -- end ***/
+
        /* copy work queue cache line section */
        ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX];
        /* completion queue cache line section */
@@ -278,6 +302,7 @@ static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip)
 }
 
 extern struct workqueue_struct *fnic_event_queue;
+extern struct workqueue_struct *fnic_fip_queue;
 extern struct device_attribute *fnic_attrs[];
 
 void fnic_clear_intr_mode(struct fnic *fnic);
@@ -289,6 +314,7 @@ int fnic_send(struct fc_lport *, struct fc_frame *);
 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
 void fnic_handle_frame(struct work_struct *work);
 void fnic_handle_link(struct work_struct *work);
+void fnic_handle_event(struct work_struct *work);
 int fnic_rq_cmpl_handler(struct fnic *fnic, int);
 int fnic_alloc_rq_frame(struct vnic_rq *rq);
 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
@@ -321,6 +347,12 @@ void fnic_handle_link_event(struct fnic *fnic);
 
 int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *);
 
+void fnic_handle_fip_frame(struct work_struct *work);
+void fnic_handle_fip_event(struct fnic *fnic);
+void fnic_fcoe_reset_vlans(struct fnic *fnic);
+void fnic_fcoe_evlist_free(struct fnic *fnic);
+extern void fnic_handle_fip_timer(struct fnic *fnic);
+
 static inline int
 fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
 {
index 483eb9dbe66366377c5dd4f049c15a160ca2a500..006fa92a02df0b4178f2ce88c73dffd8635cb5fc 100644 (file)
 #include <scsi/libfc.h>
 #include "fnic_io.h"
 #include "fnic.h"
+#include "fnic_fip.h"
 #include "cq_enet_desc.h"
 #include "cq_exch_desc.h"
 
+static u8 fcoe_all_fcfs[ETH_ALEN];
+struct workqueue_struct *fnic_fip_queue;
 struct workqueue_struct *fnic_event_queue;
 
 static void fnic_set_eth_mode(struct fnic *);
+static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
+static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
+static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
+static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
+static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
 
 void fnic_handle_link(struct work_struct *work)
 {
@@ -69,6 +77,11 @@ void fnic_handle_link(struct work_struct *work)
                                FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
                                             "link down\n");
                                fcoe_ctlr_link_down(&fnic->ctlr);
+                               if (fnic->config.flags & VFCF_FIP_CAPABLE) {
+                                       /* start FCoE VLAN discovery */
+                                       fnic_fcoe_send_vlan_req(fnic);
+                                       return;
+                               }
                                FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
                                             "link up\n");
                                fcoe_ctlr_link_up(&fnic->ctlr);
@@ -79,6 +92,11 @@ void fnic_handle_link(struct work_struct *work)
        } else if (fnic->link_status) {
                /* DOWN -> UP */
                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               if (fnic->config.flags & VFCF_FIP_CAPABLE) {
+                       /* start FCoE VLAN discovery */
+                       fnic_fcoe_send_vlan_req(fnic);
+                       return;
+               }
                FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
                fcoe_ctlr_link_up(&fnic->ctlr);
        } else {
@@ -128,6 +146,441 @@ void fnic_handle_frame(struct work_struct *work)
        }
 }
 
+void fnic_fcoe_evlist_free(struct fnic *fnic)
+{
+       struct fnic_event *fevt = NULL;
+       struct fnic_event *next = NULL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&fnic->fnic_lock, flags);
+       if (list_empty(&fnic->evlist)) {
+               spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               return;
+       }
+
+       list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
+               list_del(&fevt->list);
+               kfree(fevt);
+       }
+       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
+
+void fnic_handle_event(struct work_struct *work)
+{
+       struct fnic *fnic = container_of(work, struct fnic, event_work);
+       struct fnic_event *fevt = NULL;
+       struct fnic_event *next = NULL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&fnic->fnic_lock, flags);
+       if (list_empty(&fnic->evlist)) {
+               spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               return;
+       }
+
+       list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
+               if (fnic->stop_rx_link_events) {
+                       list_del(&fevt->list);
+                       kfree(fevt);
+                       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+                       return;
+               }
+               /*
+                * If we're in a transitional state, just re-queue and return.
+                * The queue will be serviced when we get to a stable state.
+                */
+               if (fnic->state != FNIC_IN_FC_MODE &&
+                   fnic->state != FNIC_IN_ETH_MODE) {
+                       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+                       return;
+               }
+
+               list_del(&fevt->list);
+               switch (fevt->event) {
+               case FNIC_EVT_START_VLAN_DISC:
+                       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+                       fnic_fcoe_send_vlan_req(fnic);
+                       spin_lock_irqsave(&fnic->fnic_lock, flags);
+                       break;
+               case FNIC_EVT_START_FCF_DISC:
+                       FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+                                 "Start FCF Discovery\n");
+                       fnic_fcoe_start_fcf_disc(fnic);
+                       break;
+               default:
+                       FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+                                 "Unknown event 0x%x\n", fevt->event);
+                       break;
+               }
+               kfree(fevt);
+       }
+       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
+
+/**
+ * Check if the Received FIP FLOGI frame is rejected
+ * @fip: The FCoE controller that received the frame
+ * @skb: The received FIP frame
+ *
+ * Returns non-zero if the frame is rejected with unsupported cmd with
+ * insufficient resource els explanation.
+ */
+static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
+                                        struct sk_buff *skb)
+{
+       struct fc_lport *lport = fip->lp;
+       struct fip_header *fiph;
+       struct fc_frame_header *fh = NULL;
+       struct fip_desc *desc;
+       struct fip_encaps *els;
+       enum fip_desc_type els_dtype = 0;
+       u16 op;
+       u8 els_op;
+       u8 sub;
+
+       size_t els_len = 0;
+       size_t rlen;
+       size_t dlen = 0;
+
+       if (skb_linearize(skb))
+               return 0;
+
+       if (skb->len < sizeof(*fiph))
+               return 0;
+
+       fiph = (struct fip_header *)skb->data;
+       op = ntohs(fiph->fip_op);
+       sub = fiph->fip_subcode;
+
+       if (op != FIP_OP_LS)
+               return 0;
+
+       if (sub != FIP_SC_REP)
+               return 0;
+
+       rlen = ntohs(fiph->fip_dl_len) * 4;
+       if (rlen + sizeof(*fiph) > skb->len)
+               return 0;
+
+       desc = (struct fip_desc *)(fiph + 1);
+       dlen = desc->fip_dlen * FIP_BPW;
+
+       if (desc->fip_dtype == FIP_DT_FLOGI) {
+
+               shost_printk(KERN_DEBUG, lport->host,
+                         " FIP TYPE FLOGI: fab name:%llx "
+                         "vfid:%d map:%x\n",
+                         fip->sel_fcf->fabric_name, fip->sel_fcf->vfid,
+                         fip->sel_fcf->fc_map);
+               if (dlen < sizeof(*els) + sizeof(*fh) + 1)
+                       return 0;
+
+               els_len = dlen - sizeof(*els);
+               els = (struct fip_encaps *)desc;
+               fh = (struct fc_frame_header *)(els + 1);
+               els_dtype = desc->fip_dtype;
+
+               if (!fh)
+                       return 0;
+
+               /*
+                * ELS command code, reason and explanation should be = Reject,
+                * unsupported command and insufficient resource
+                */
+               els_op = *(u8 *)(fh + 1);
+               if (els_op == ELS_LS_RJT) {
+                       shost_printk(KERN_INFO, lport->host,
+                                 "Flogi Request Rejected by Switch\n");
+                       return 1;
+               }
+               shost_printk(KERN_INFO, lport->host,
+                               "Flogi Request Accepted by Switch\n");
+       }
+       return 0;
+}
+
+static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
+{
+       struct fcoe_ctlr *fip = &fnic->ctlr;
+       struct sk_buff *skb;
+       char *eth_fr;
+       int fr_len;
+       struct fip_vlan *vlan;
+       u64 vlan_tov;
+
+       fnic_fcoe_reset_vlans(fnic);
+       fnic->set_vlan(fnic, 0);
+       FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+                 "Sending VLAN request...\n");
+       skb = dev_alloc_skb(sizeof(struct fip_vlan));
+       if (!skb)
+               return;
+
+       fr_len = sizeof(*vlan);
+       eth_fr = (char *)skb->data;
+       vlan = (struct fip_vlan *)eth_fr;
+
+       memset(vlan, 0, sizeof(*vlan));
+       memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
+       memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
+       vlan->eth.h_proto = htons(ETH_P_FIP);
+
+       vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
+       vlan->fip.fip_op = htons(FIP_OP_VLAN);
+       vlan->fip.fip_subcode = FIP_SC_VL_REQ;
+       vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
+
+       vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
+       vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
+       memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
+
+       vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
+       vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
+       put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
+
+       skb_put(skb, sizeof(*vlan));
+       skb->protocol = htons(ETH_P_FIP);
+       skb_reset_mac_header(skb);
+       skb_reset_network_header(skb);
+       fip->send(fip, skb);
+
+       /* set a timer so that we can retry if there no response */
+       vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
+       mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
+}
+
+static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
+{
+       struct fcoe_ctlr *fip = &fnic->ctlr;
+       struct fip_header *fiph;
+       struct fip_desc *desc;
+       u16 vid;
+       size_t rlen;
+       size_t dlen;
+       struct fcoe_vlan *vlan;
+       u64 sol_time;
+       unsigned long flags;
+
+       FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+                 "Received VLAN response...\n");
+
+       fiph = (struct fip_header *) skb->data;
+
+       FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+                 "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
+                 ntohs(fiph->fip_op), fiph->fip_subcode);
+
+       rlen = ntohs(fiph->fip_dl_len) * 4;
+       fnic_fcoe_reset_vlans(fnic);
+       spin_lock_irqsave(&fnic->vlans_lock, flags);
+       desc = (struct fip_desc *)(fiph + 1);
+       while (rlen > 0) {
+               dlen = desc->fip_dlen * FIP_BPW;
+               switch (desc->fip_dtype) {
+               case FIP_DT_VLAN:
+                       vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
+                       shost_printk(KERN_INFO, fnic->lport->host,
+                                 "process_vlan_resp: FIP VLAN %d\n", vid);
+                       vlan = kmalloc(sizeof(*vlan),
+                                                       GFP_ATOMIC);
+                       if (!vlan) {
+                               /* retry from timer */
+                               spin_unlock_irqrestore(&fnic->vlans_lock,
+                                                       flags);
+                               goto out;
+                       }
+                       memset(vlan, 0, sizeof(struct fcoe_vlan));
+                       vlan->vid = vid & 0x0fff;
+                       vlan->state = FIP_VLAN_AVAIL;
+                       list_add_tail(&vlan->list, &fnic->vlans);
+                       break;
+               }
+               desc = (struct fip_desc *)((char *)desc + dlen);
+               rlen -= dlen;
+       }
+
+       /* any VLAN descriptors present ? */
+       if (list_empty(&fnic->vlans)) {
+               /* retry from timer */
+               FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+                         "No VLAN descriptors in FIP VLAN response\n");
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               goto out;
+       }
+
+       vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
+       fnic->set_vlan(fnic, vlan->vid);
+       vlan->state = FIP_VLAN_SENT; /* sent now */
+       vlan->sol_count++;
+       spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+
+       /* start the solicitation */
+       fcoe_ctlr_link_up(fip);
+
+       sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
+       mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
+out:
+       return;
+}
+
+static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
+{
+       unsigned long flags;
+       struct fcoe_vlan *vlan;
+       u64 sol_time;
+
+       spin_lock_irqsave(&fnic->vlans_lock, flags);
+       vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
+       fnic->set_vlan(fnic, vlan->vid);
+       vlan->state = FIP_VLAN_SENT; /* sent now */
+       vlan->sol_count = 1;
+       spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+
+       /* start the solicitation */
+       fcoe_ctlr_link_up(&fnic->ctlr);
+
+       sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
+       mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
+}
+
+static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
+{
+       unsigned long flags;
+       struct fcoe_vlan *fvlan;
+
+       spin_lock_irqsave(&fnic->vlans_lock, flags);
+       if (list_empty(&fnic->vlans)) {
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               return -EINVAL;
+       }
+
+       fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
+       if (fvlan->state == FIP_VLAN_USED) {
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               return 0;
+       }
+
+       if (fvlan->state == FIP_VLAN_SENT) {
+               fvlan->state = FIP_VLAN_USED;
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               return 0;
+       }
+       spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+       return -EINVAL;
+}
+
+static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
+{
+       struct fnic_event *fevt;
+       unsigned long flags;
+
+       fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
+       if (!fevt)
+               return;
+
+       fevt->fnic = fnic;
+       fevt->event = ev;
+
+       spin_lock_irqsave(&fnic->fnic_lock, flags);
+       list_add_tail(&fevt->list, &fnic->evlist);
+       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+       schedule_work(&fnic->event_work);
+}
+
+static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
+{
+       struct fip_header *fiph;
+       int ret = 1;
+       u16 op;
+       u8 sub;
+
+       if (!skb || !(skb->data))
+               return -1;
+
+       if (skb_linearize(skb))
+               goto drop;
+
+       fiph = (struct fip_header *)skb->data;
+       op = ntohs(fiph->fip_op);
+       sub = fiph->fip_subcode;
+
+       if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
+               goto drop;
+
+       if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
+               goto drop;
+
+       if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
+               if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
+                       goto drop;
+               /* pass it on to fcoe */
+               ret = 1;
+       } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_REP) {
+               /* set the vlan as used */
+               fnic_fcoe_process_vlan_resp(fnic, skb);
+               ret = 0;
+       } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
+               /* received CVL request, restart vlan disc */
+               fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
+               /* pass it on to fcoe */
+               ret = 1;
+       }
+drop:
+       return ret;
+}
+
+void fnic_handle_fip_frame(struct work_struct *work)
+{
+       struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
+       unsigned long flags;
+       struct sk_buff *skb;
+       struct ethhdr *eh;
+
+       while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
+               spin_lock_irqsave(&fnic->fnic_lock, flags);
+               if (fnic->stop_rx_link_events) {
+                       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+                       dev_kfree_skb(skb);
+                       return;
+               }
+               /*
+                * If we're in a transitional state, just re-queue and return.
+                * The queue will be serviced when we get to a stable state.
+                */
+               if (fnic->state != FNIC_IN_FC_MODE &&
+                   fnic->state != FNIC_IN_ETH_MODE) {
+                       skb_queue_head(&fnic->fip_frame_queue, skb);
+                       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+                       return;
+               }
+               spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               eh = (struct ethhdr *)skb->data;
+               if (eh->h_proto == htons(ETH_P_FIP)) {
+                       skb_pull(skb, sizeof(*eh));
+                       if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
+                               dev_kfree_skb(skb);
+                               continue;
+                       }
+                       /*
+                        * If there's FLOGI rejects - clear all
+                        * fcf's & restart from scratch
+                        */
+                       if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
+                               shost_printk(KERN_INFO, fnic->lport->host,
+                                         "Trigger a Link down - VLAN Disc\n");
+                               fcoe_ctlr_link_down(&fnic->ctlr);
+                               /* start FCoE VLAN discovery */
+                               fnic_fcoe_send_vlan_req(fnic);
+                               dev_kfree_skb(skb);
+                               continue;
+                       }
+                       fcoe_ctlr_recv(&fnic->ctlr, skb);
+                       continue;
+               }
+       }
+}
+
 /**
  * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
  * @fnic:      fnic instance.
@@ -150,8 +603,14 @@ static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
                skb_reset_mac_header(skb);
        }
        if (eh->h_proto == htons(ETH_P_FIP)) {
-               skb_pull(skb, sizeof(*eh));
-               fcoe_ctlr_recv(&fnic->ctlr, skb);
+               if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
+                       printk(KERN_ERR "Dropped FIP frame, as firmware "
+                                       "uses non-FIP mode, Enable FIP "
+                                       "using UCSM\n");
+                       goto drop;
+               }
+               skb_queue_tail(&fnic->fip_frame_queue, skb);
+               queue_work(fnic_fip_queue, &fnic->fip_frame_work);
                return 1;               /* let caller know packet was used */
        }
        if (eh->h_proto != htons(ETH_P_FCOE))
@@ -720,3 +1179,104 @@ void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
        dev_kfree_skb(fp_skb(fp));
        buf->os_buf = NULL;
 }
+
+void fnic_fcoe_reset_vlans(struct fnic *fnic)
+{
+       unsigned long flags;
+       struct fcoe_vlan *vlan;
+       struct fcoe_vlan *next;
+
+       /*
+        * indicate a link down to fcoe so that all fcf's are free'd
+        * might not be required since we did this before sending vlan
+        * discovery request
+        */
+       spin_lock_irqsave(&fnic->vlans_lock, flags);
+       if (!list_empty(&fnic->vlans)) {
+               list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
+                       list_del(&vlan->list);
+                       kfree(vlan);
+               }
+       }
+       spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+}
+
+void fnic_handle_fip_timer(struct fnic *fnic)
+{
+       unsigned long flags;
+       struct fcoe_vlan *vlan;
+       u64 sol_time;
+
+       spin_lock_irqsave(&fnic->fnic_lock, flags);
+       if (fnic->stop_rx_link_events) {
+               spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               return;
+       }
+       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+       if (fnic->ctlr.mode == FIP_ST_NON_FIP)
+               return;
+
+       spin_lock_irqsave(&fnic->vlans_lock, flags);
+       if (list_empty(&fnic->vlans)) {
+               /* no vlans available, try again */
+               FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+                         "Start VLAN Discovery\n");
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
+               return;
+       }
+
+       vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
+       shost_printk(KERN_DEBUG, fnic->lport->host,
+                 "fip_timer: vlan %d state %d sol_count %d\n",
+                 vlan->vid, vlan->state, vlan->sol_count);
+       switch (vlan->state) {
+       case FIP_VLAN_USED:
+               FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+                         "FIP VLAN is selected for FC transaction\n");
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               break;
+       case FIP_VLAN_FAILED:
+               /* if all vlans are in failed state, restart vlan disc */
+               FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+                         "Start VLAN Discovery\n");
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
+               break;
+       case FIP_VLAN_SENT:
+               if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
+                       /*
+                        * no response on this vlan, remove  from the list.
+                        * Try the next vlan
+                        */
+                       shost_printk(KERN_INFO, fnic->lport->host,
+                                 "Dequeue this VLAN ID %d from list\n",
+                                 vlan->vid);
+                       list_del(&vlan->list);
+                       kfree(vlan);
+                       vlan = NULL;
+                       if (list_empty(&fnic->vlans)) {
+                               /* we exhausted all vlans, restart vlan disc */
+                               spin_unlock_irqrestore(&fnic->vlans_lock,
+                                                       flags);
+                               shost_printk(KERN_INFO, fnic->lport->host,
+                                         "fip_timer: vlan list empty, "
+                                         "trigger vlan disc\n");
+                               fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
+                               return;
+                       }
+                       /* check the next vlan */
+                       vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
+                                                       list);
+                       fnic->set_vlan(fnic, vlan->vid);
+                       vlan->state = FIP_VLAN_SENT; /* sent now */
+               }
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               vlan->sol_count++;
+               sol_time = jiffies + msecs_to_jiffies
+                                       (FCOE_CTLR_START_DELAY);
+               mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
+               break;
+       }
+}
diff --git a/drivers/scsi/fnic/fnic_fip.h b/drivers/scsi/fnic/fnic_fip.h
new file mode 100644 (file)
index 0000000..87e74c2
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _FNIC_FIP_H_
+#define _FNIC_FIP_H_
+
+
+#define FCOE_CTLR_START_DELAY    2000    /* ms after first adv. to choose FCF */
+#define FCOE_CTLR_FIPVLAN_TOV    2000    /* ms after FIP VLAN disc */
+#define FCOE_CTLR_MAX_SOL        8
+
+#define FINC_MAX_FLOGI_REJECTS   8
+
+/*
+ * FIP_DT_VLAN descriptor.
+ */
+struct fip_vlan_desc {
+       struct fip_desc fd_desc;
+       __be16 fd_vlan;
+} __attribute__((packed));
+
+struct vlan {
+       __be16 vid;
+       __be16 type;
+};
+
+/*
+ * VLAN entry.
+ */
+struct fcoe_vlan {
+       struct list_head list;
+       u16 vid;                /* vlan ID */
+       u16 sol_count;          /* no. of sols sent */
+       u16 state;              /* state */
+};
+
+enum fip_vlan_state {
+       FIP_VLAN_AVAIL  = 0,    /* don't do anything */
+       FIP_VLAN_SENT   = 1,    /* sent */
+       FIP_VLAN_USED   = 2,    /* succeed */
+       FIP_VLAN_FAILED = 3,    /* failed to response */
+};
+
+struct fip_vlan {
+       struct ethhdr eth;
+       struct fip_header fip;
+       struct {
+               struct fip_mac_desc mac;
+               struct fip_wwn_desc wwnn;
+       } desc;
+};
+
+#endif  /* __FINC_FIP_H_ */
index d601ac543c52bd1f24b50a760c240ab2c374f62c..5f09d1814d2685e86b06d9c6d697659d06cade07 100644 (file)
@@ -39,6 +39,7 @@
 #include "vnic_intr.h"
 #include "vnic_stats.h"
 #include "fnic_io.h"
+#include "fnic_fip.h"
 #include "fnic.h"
 
 #define PCI_DEVICE_ID_CISCO_FNIC       0x0045
@@ -292,6 +293,13 @@ static void fnic_notify_timer(unsigned long data)
                  round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
 }
 
+static void fnic_fip_notify_timer(unsigned long data)
+{
+       struct fnic *fnic = (struct fnic *)data;
+
+       fnic_handle_fip_timer(fnic);
+}
+
 static void fnic_notify_timer_start(struct fnic *fnic)
 {
        switch (vnic_dev_get_intr_mode(fnic->vdev)) {
@@ -403,6 +411,12 @@ static u8 *fnic_get_mac(struct fc_lport *lport)
        return fnic->data_src_addr;
 }
 
+static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
+{
+       u16 old_vlan;
+       old_vlan = vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
+}
+
 static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        struct Scsi_Host *host;
@@ -620,7 +634,29 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
                vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
                vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
+               fnic->set_vlan = fnic_set_vlan;
                fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
+               setup_timer(&fnic->fip_timer, fnic_fip_notify_timer,
+                                                       (unsigned long)fnic);
+               spin_lock_init(&fnic->vlans_lock);
+               INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
+               INIT_WORK(&fnic->event_work, fnic_handle_event);
+               skb_queue_head_init(&fnic->fip_frame_queue);
+               spin_lock_irqsave(&fnic_list_lock, flags);
+               if (!fnic_fip_queue) {
+                       fnic_fip_queue =
+                               create_singlethread_workqueue("fnic_fip_q");
+                       if (!fnic_fip_queue) {
+                               spin_unlock_irqrestore(&fnic_list_lock, flags);
+                               printk(KERN_ERR PFX "fnic FIP work queue "
+                                                "create failed\n");
+                               err = -ENOMEM;
+                               goto err_out_free_max_pool;
+                       }
+               }
+               spin_unlock_irqrestore(&fnic_list_lock, flags);
+               INIT_LIST_HEAD(&fnic->evlist);
+               INIT_LIST_HEAD(&fnic->vlans);
        } else {
                shost_printk(KERN_INFO, fnic->lport->host,
                             "firmware uses non-FIP mode\n");
@@ -807,6 +843,13 @@ static void fnic_remove(struct pci_dev *pdev)
        skb_queue_purge(&fnic->frame_queue);
        skb_queue_purge(&fnic->tx_queue);
 
+       if (fnic->config.flags & VFCF_FIP_CAPABLE) {
+               del_timer_sync(&fnic->fip_timer);
+               skb_queue_purge(&fnic->fip_frame_queue);
+               fnic_fcoe_reset_vlans(fnic);
+               fnic_fcoe_evlist_free(fnic);
+       }
+
        /*
         * Log off the fabric. This stops all remote ports, dns port,
         * logs off the fabric. This flushes all rport, disc, lport work
@@ -889,8 +932,8 @@ static int __init fnic_init_module(void)
        len = sizeof(struct fnic_sgl_list);
        fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create
                ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
-                SLAB_HWCACHE_ALIGN,
-                NULL);
+                 SLAB_HWCACHE_ALIGN,
+                 NULL);
        if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) {
                printk(KERN_ERR PFX "failed to create fnic max sgl slab\n");
                err = -ENOMEM;
@@ -951,6 +994,10 @@ static void __exit fnic_cleanup_module(void)
 {
        pci_unregister_driver(&fnic_driver);
        destroy_workqueue(fnic_event_queue);
+       if (fnic_fip_queue) {
+               flush_workqueue(fnic_fip_queue);
+               destroy_workqueue(fnic_fip_queue);
+       }
        kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
        kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
        kmem_cache_destroy(fnic_io_req_cache);
index b576be734e2e04a342cd031f75c146096bc1c2f5..9795d6f3e1974e47e98f6d28ef80f1e4f282e528 100644 (file)
@@ -584,6 +584,16 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg)
        return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
 }
 
+u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev, u16 new_default_vlan)
+{
+       u64 a0 = new_default_vlan, a1 = 0;
+       int wait = 1000;
+       int old_vlan = 0;
+
+       old_vlan = vnic_dev_cmd(vdev, CMD_SET_DEFAULT_VLAN, &a0, &a1, wait);
+       return (u16)old_vlan;
+}
+
 int vnic_dev_link_status(struct vnic_dev *vdev)
 {
        if (vdev->linkstatus)
index f9935a8a5a0932e14b4f896627ebe59567f8a1d9..40d4195f562bad4f40d406d3e28e48a018b6c213 100644 (file)
@@ -148,6 +148,8 @@ int vnic_dev_disable(struct vnic_dev *vdev);
 int vnic_dev_open(struct vnic_dev *vdev, int arg);
 int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
 int vnic_dev_init(struct vnic_dev *vdev, int arg);
+u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev,
+                               u16 new_default_vlan);
 int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
 int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
 void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
index 7c9ccbd4134b722dad20643314fa719fdcffb89c..3e2fcbda6aedad54315aac57e07664df171306db 100644 (file)
@@ -196,6 +196,73 @@ enum vnic_devcmd_cmd {
 
        /* undo initialize of virtual link */
        CMD_DEINIT              = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
+
+       /* check fw capability of a cmd:
+        * in:  (u32)a0=cmd
+        * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */
+       CMD_CAPABILITY      = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36),
+
+       /* persistent binding info
+        * in:  (u64)a0=paddr of arg
+        *      (u32)a1=CMD_PERBI_XXX */
+       CMD_PERBI       = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 37),
+
+       /* Interrupt Assert Register functionality
+        * in: (u16)a0=interrupt number to assert
+        */
+       CMD_IAR         = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38),
+
+       /* initiate hangreset, like softreset after hang detected */
+       CMD_HANG_RESET      = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39),
+
+       /* hangreset status:
+        *    out: a0=0 reset complete, a0=1 reset in progress */
+       CMD_HANG_RESET_STATUS   = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40),
+
+       /*
+        * Set hw ingress packet vlan rewrite mode:
+        * in:  (u32)a0=new vlan rewrite mode
+        * out: (u32)a0=old vlan rewrite mode */
+       CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41),
+
+       /*
+        * in:  (u16)a0=bdf of target vnic
+        *      (u32)a1=cmd to proxy
+        *      a2-a15=args to cmd in a1
+        * out: (u32)a0=status of proxied cmd
+        *      a1-a15=out args of proxied cmd */
+       CMD_PROXY_BY_BDF =  _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42),
+
+       /*
+        * As for BY_BDF except a0 is index of hvnlink subordinate vnic
+        * or SR-IOV virtual vnic
+        */
+       CMD_PROXY_BY_INDEX =    _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43),
+
+       /*
+        * For HPP toggle:
+        * adapter-info-get
+        * in:  (u64)a0=phsical address of buffer passed in from caller.
+        *      (u16)a1=size of buffer specified in a0.
+        * out: (u64)a0=phsical address of buffer passed in from caller.
+        *      (u16)a1=actual bytes from VIF-CONFIG-INFO TLV, or
+        *              0 if no VIF-CONFIG-INFO TLV was ever received. */
+       CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
+
+       /*
+        * INT13 API: (u64)a0=paddr to vnic_int13_params struct
+        *            (u32)a1=INT13_CMD_xxx
+        */
+       CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45),
+
+       /*
+        * Set default vlan:
+        * in: (u16)a0=new default vlan
+        *     (u16)a1=zero for overriding vlan with param a0,
+        *             non-zero for resetting vlan to the default
+        * out: (u16)a0=old default vlan
+        */
+       CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46)
 };
 
 /* flags for CMD_OPEN */
index cc82d0f322b6cb30fd3fdaf1fd5e49665302dece..4e31caa21ddfba379036b6a7b4c085a5f916549f 100644 (file)
@@ -2179,7 +2179,7 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
                return 0;
        }
 
-       if (vhost->state == IBMVFC_ACTIVE) {
+       if (vhost->logged_in) {
                evt = ibmvfc_get_event(vhost);
                ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
 
@@ -2190,7 +2190,12 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
                tmf->common.length = sizeof(*tmf);
                tmf->scsi_id = rport->port_id;
                int_to_scsilun(sdev->lun, &tmf->lun);
-               tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
+               if (!(vhost->login_buf->resp.capabilities & IBMVFC_CAN_SUPPRESS_ABTS))
+                       type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
+               if (vhost->state == IBMVFC_ACTIVE)
+                       tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
+               else
+                       tmf->flags = ((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID);
                tmf->cancel_key = (unsigned long)sdev->hostdata;
                tmf->my_cancel_key = (unsigned long)starget->hostdata;
 
@@ -2327,7 +2332,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
        timeout = wait_for_completion_timeout(&evt->comp, timeout);
 
        if (!timeout) {
-               rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
+               rc = ibmvfc_cancel_all(sdev, 0);
                if (!rc) {
                        rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
                        if (rc == SUCCESS)
@@ -2383,24 +2388,30 @@ out:
  * @cmd:       scsi command to abort
  *
  * Returns:
- *     SUCCESS / FAILED
+ *     SUCCESS / FAST_IO_FAIL / FAILED
  **/
 static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
 {
        struct scsi_device *sdev = cmd->device;
        struct ibmvfc_host *vhost = shost_priv(sdev->host);
-       int cancel_rc, abort_rc;
+       int cancel_rc, block_rc;
        int rc = FAILED;
 
        ENTER;
-       fc_block_scsi_eh(cmd);
+       block_rc = fc_block_scsi_eh(cmd);
        ibmvfc_wait_while_resetting(vhost);
-       cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
-       abort_rc = ibmvfc_abort_task_set(sdev);
+       if (block_rc != FAST_IO_FAIL) {
+               cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
+               ibmvfc_abort_task_set(sdev);
+       } else
+               cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
 
-       if (!cancel_rc && !abort_rc)
+       if (!cancel_rc)
                rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
 
+       if (block_rc == FAST_IO_FAIL && rc != FAILED)
+               rc = FAST_IO_FAIL;
+
        LEAVE;
        return rc;
 }
@@ -2410,28 +2421,46 @@ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
  * @cmd:       scsi command struct
  *
  * Returns:
- *     SUCCESS / FAILED
+ *     SUCCESS / FAST_IO_FAIL / FAILED
  **/
 static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
 {
        struct scsi_device *sdev = cmd->device;
        struct ibmvfc_host *vhost = shost_priv(sdev->host);
-       int cancel_rc, reset_rc;
+       int cancel_rc, block_rc, reset_rc = 0;
        int rc = FAILED;
 
        ENTER;
-       fc_block_scsi_eh(cmd);
+       block_rc = fc_block_scsi_eh(cmd);
        ibmvfc_wait_while_resetting(vhost);
-       cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
-       reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
+       if (block_rc != FAST_IO_FAIL) {
+               cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
+               reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
+       } else
+               cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
 
        if (!cancel_rc && !reset_rc)
                rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
 
+       if (block_rc == FAST_IO_FAIL && rc != FAILED)
+               rc = FAST_IO_FAIL;
+
        LEAVE;
        return rc;
 }
 
+/**
+ * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
+ * @sdev:      scsi device struct
+ * @data:      return code
+ *
+ **/
+static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
+{
+       unsigned long *rc = data;
+       *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
+}
+
 /**
  * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
  * @sdev:      scsi device struct
@@ -2449,26 +2478,33 @@ static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
  * @cmd:       scsi command struct
  *
  * Returns:
- *     SUCCESS / FAILED
+ *     SUCCESS / FAST_IO_FAIL / FAILED
  **/
 static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
 {
        struct scsi_device *sdev = cmd->device;
        struct ibmvfc_host *vhost = shost_priv(sdev->host);
        struct scsi_target *starget = scsi_target(sdev);
-       int reset_rc;
+       int block_rc;
+       int reset_rc = 0;
        int rc = FAILED;
        unsigned long cancel_rc = 0;
 
        ENTER;
-       fc_block_scsi_eh(cmd);
+       block_rc = fc_block_scsi_eh(cmd);
        ibmvfc_wait_while_resetting(vhost);
-       starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
-       reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
+       if (block_rc != FAST_IO_FAIL) {
+               starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
+               reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
+       } else
+               starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
 
        if (!cancel_rc && !reset_rc)
                rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
 
+       if (block_rc == FAST_IO_FAIL && rc != FAILED)
+               rc = FAST_IO_FAIL;
+
        LEAVE;
        return rc;
 }
@@ -2480,12 +2516,16 @@ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
  **/
 static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
 {
-       int rc;
+       int rc, block_rc;
        struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
 
-       fc_block_scsi_eh(cmd);
+       block_rc = fc_block_scsi_eh(cmd);
        dev_err(vhost->dev, "Resetting connection due to error recovery\n");
        rc = ibmvfc_issue_fc_host_lip(vhost->host);
+
+       if (block_rc == FAST_IO_FAIL)
+               return FAST_IO_FAIL;
+
        return rc ? FAILED : SUCCESS;
 }
 
@@ -2509,8 +2549,7 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
                dev_rport = starget_to_rport(scsi_target(sdev));
                if (dev_rport != rport)
                        continue;
-               ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
-               ibmvfc_abort_task_set(sdev);
+               ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
        }
 
        rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
index 3be8af624e6fce888ab2b148c66321a2ce30558c..017a5290e8c12ba5d27c89a390c4ed6490e05000 100644 (file)
@@ -29,8 +29,8 @@
 #include "viosrp.h"
 
 #define IBMVFC_NAME    "ibmvfc"
-#define IBMVFC_DRIVER_VERSION          "1.0.10"
-#define IBMVFC_DRIVER_DATE             "(August 24, 2012)"
+#define IBMVFC_DRIVER_VERSION          "1.0.11"
+#define IBMVFC_DRIVER_DATE             "(April 12, 2013)"
 
 #define IBMVFC_DEFAULT_TIMEOUT 60
 #define IBMVFC_ADISC_CANCEL_TIMEOUT    45
@@ -208,10 +208,10 @@ struct ibmvfc_npiv_login_resp {
        u16 error;
        u32 flags;
 #define IBMVFC_NATIVE_FC               0x01
-#define IBMVFC_CAN_FLUSH_ON_HALT       0x08
        u32 reserved;
        u64 capabilities;
 #define IBMVFC_CAN_FLUSH_ON_HALT       0x08
+#define IBMVFC_CAN_SUPPRESS_ABTS       0x10
        u32 max_cmds;
        u32 scsi_id_sz;
        u64 max_dma_len;
@@ -351,6 +351,7 @@ struct ibmvfc_tmf {
 #define IBMVFC_TMF_LUN_RESET           0x10
 #define IBMVFC_TMF_TGT_RESET           0x20
 #define IBMVFC_TMF_LUA_VALID           0x40
+#define IBMVFC_TMF_SUPPRESS_ABTS       0x80
        u32 cancel_key;
        u32 my_cancel_key;
        u32 pad;
index 2197b57fb2251f541e0bfc0eab626ce670b66504..82a3c1ec8706600473c4daedd33b248d2c94518f 100644 (file)
@@ -4777,7 +4777,7 @@ static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
        ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 
-       if (!ioa_cfg->in_reset_reload) {
+       if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
                dev_err(&ioa_cfg->pdev->dev,
                        "Adapter being reset as a result of error recovery.\n");
@@ -6421,7 +6421,7 @@ static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
 {
        u32 ioadl_flags = 0;
        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
-       struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
+       struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
        struct ipr_ioadl64_desc *last_ioadl64 = NULL;
        int len = qc->nbytes;
        struct scatterlist *sg;
@@ -6441,7 +6441,7 @@ static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
        ioarcb->ioadl_len =
                cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
        ioarcb->u.sis64_addr_data.data_ioadl_addr =
-               cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
+               cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
 
        for_each_sg(qc->sg, sg, qc->n_elem, si) {
                ioadl64->flags = cpu_to_be32(ioadl_flags);
@@ -6739,6 +6739,7 @@ static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
 {
        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       int i;
 
        ENTER;
        if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
@@ -6750,6 +6751,13 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
 
        ioa_cfg->in_reset_reload = 0;
        ioa_cfg->reset_retries = 0;
+       for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+               spin_lock(&ioa_cfg->hrrq[i]._lock);
+               ioa_cfg->hrrq[i].ioa_is_dead = 1;
+               spin_unlock(&ioa_cfg->hrrq[i]._lock);
+       }
+       wmb();
+
        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
        wake_up_all(&ioa_cfg->reset_wait_q);
        LEAVE;
@@ -8651,7 +8659,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
        if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
                ioa_cfg->sdt_state = ABORT_DUMP;
-       ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
+       ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
        ioa_cfg->in_ioa_bringdown = 1;
        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
                spin_lock(&ioa_cfg->hrrq[i]._lock);
index 21a6ff1ed5c6b1afb49c38d191372bc59533780d..a1fb840596ef08cb129c8301dd81a34d24c73fd5 100644 (file)
@@ -552,7 +552,7 @@ struct ipr_ioarcb_ata_regs {        /* 22 bytes */
        u8 hob_lbam;
        u8 hob_lbah;
        u8 ctl;
-}__attribute__ ((packed, aligned(4)));
+}__attribute__ ((packed, aligned(2)));
 
 struct ipr_ioadl_desc {
        __be32 flags_and_data_len;
index c3aa6c5457b9c386e7237db6201dfb1f8603b83c..96a26f454673cf8b9cad6abd38ee9a28104c6a95 100644 (file)
@@ -1085,7 +1085,7 @@ static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *s
        struct isci_host *ihost = idev->owning_port->owning_controller;
        struct domain_device *dev = idev->domain_dev;
 
-       if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
+       if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
                sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
        } else if (dev_is_expander(dev)) {
                sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
@@ -1098,7 +1098,7 @@ static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm
        struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
        struct domain_device *dev = idev->domain_dev;
 
-       if (dev->dev_type == SAS_END_DEV) {
+       if (dev->dev_type == SAS_END_DEVICE) {
                struct isci_host *ihost = idev->owning_port->owning_controller;
 
                isci_remote_device_not_ready(ihost, idev,
index 7674caae1d88bac82520c65b0cd6004e19db626f..47a013fffae73c1d416b68fc6b81dc5274bf2c46 100644 (file)
@@ -297,7 +297,7 @@ static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_conte
 
 static inline bool dev_is_expander(struct domain_device *dev)
 {
-       return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV;
+       return dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE;
 }
 
 static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev)
index 9594ab62702b1786d51d7ac80f7e10b365b6ce61..e3e3bcbd5a9fcf4245bf9d28b53c0274ca642c2d 100644 (file)
@@ -2978,7 +2978,7 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
        /* all unaccelerated request types (non ssp or ncq) handled with
         * substates
         */
-       if (!task && dev->dev_type == SAS_END_DEV) {
+       if (!task && dev->dev_type == SAS_END_DEVICE) {
                state = SCI_REQ_TASK_WAIT_TC_COMP;
        } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
                state = SCI_REQ_SMP_WAIT_RESP;
@@ -3101,7 +3101,7 @@ sci_io_request_construct(struct isci_host *ihost,
        if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
                return SCI_FAILURE_INVALID_REMOTE_DEVICE;
 
-       if (dev->dev_type == SAS_END_DEV)
+       if (dev->dev_type == SAS_END_DEVICE)
                /* pass */;
        else if (dev_is_sata(dev))
                memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
@@ -3125,7 +3125,7 @@ enum sci_status sci_task_request_construct(struct isci_host *ihost,
        /* Build the common part of the request */
        sci_general_request_construct(ihost, idev, ireq);
 
-       if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) {
+       if (dev->dev_type == SAS_END_DEVICE || dev_is_sata(dev)) {
                set_bit(IREQ_TMF, &ireq->flags);
                memset(ireq->tc, 0, sizeof(struct scu_task_context));
 
index b6f19a1db780f7303abc9796c19553b058c89ada..9bb020ac089cddf6b665e8a52f728c95621ece34 100644 (file)
@@ -250,7 +250,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
        }
 
        /* XXX convert to get this from task->tproto like other drivers */
-       if (dev->dev_type == SAS_END_DEV) {
+       if (dev->dev_type == SAS_END_DEVICE) {
                isci_tmf->proto = SAS_PROTOCOL_SSP;
                status = sci_task_request_construct_ssp(ireq);
                if (status != SCI_SUCCESS)
index bdb81cda84013f0d132aa1972bd409dcb9903f6b..161c98efade9b9f290c04588e4638df0f3c421ac 100644 (file)
@@ -285,14 +285,14 @@ int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
        if (phy->attached_tproto & SAS_PROTOCOL_STP)
                dev->tproto = phy->attached_tproto;
        if (phy->attached_sata_dev)
-               dev->tproto |= SATA_DEV;
+               dev->tproto |= SAS_SATA_DEV;
 
-       if (phy->attached_dev_type == SATA_PENDING)
-               dev->dev_type = SATA_PENDING;
+       if (phy->attached_dev_type == SAS_SATA_PENDING)
+               dev->dev_type = SAS_SATA_PENDING;
        else {
                int res;
 
-               dev->dev_type = SATA_DEV;
+               dev->dev_type = SAS_SATA_DEV;
                res = sas_get_report_phy_sata(dev->parent, phy->phy_id,
                                              &dev->sata_dev.rps_resp);
                if (res) {
@@ -314,7 +314,7 @@ static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy)
        int res;
 
        /* we weren't pending, so successfully end the reset sequence now */
-       if (dev->dev_type != SATA_PENDING)
+       if (dev->dev_type != SAS_SATA_PENDING)
                return 1;
 
        /* hmmm, if this succeeds do we need to repost the domain_device to the
@@ -348,9 +348,9 @@ static int smp_ata_check_ready(struct ata_link *link)
                return 0;
 
        switch (ex_phy->attached_dev_type) {
-       case SATA_PENDING:
+       case SAS_SATA_PENDING:
                return 0;
-       case SAS_END_DEV:
+       case SAS_END_DEVICE:
                if (ex_phy->attached_sata_dev)
                        return sas_ata_clear_pending(dev, ex_phy);
        default:
@@ -631,7 +631,7 @@ static void sas_get_ata_command_set(struct domain_device *dev)
        struct dev_to_host_fis *fis =
                (struct dev_to_host_fis *) dev->frame_rcvd;
 
-       if (dev->dev_type == SATA_PENDING)
+       if (dev->dev_type == SAS_SATA_PENDING)
                return;
 
        if ((fis->sector_count == 1 && /* ATA */
@@ -797,7 +797,7 @@ int sas_discover_sata(struct domain_device *dev)
 {
        int res;
 
-       if (dev->dev_type == SATA_PM)
+       if (dev->dev_type == SAS_SATA_PM)
                return -ENODEV;
 
        sas_get_ata_command_set(dev);
index a0c3003e0c7d2f6b5e1a89147ab29d58b1609aaa..62b58d38ce2e63beda6d1ba4d9876c9af2e51008 100644 (file)
 void sas_init_dev(struct domain_device *dev)
 {
        switch (dev->dev_type) {
-       case SAS_END_DEV:
+       case SAS_END_DEVICE:
                INIT_LIST_HEAD(&dev->ssp_dev.eh_list_node);
                break;
-       case EDGE_DEV:
-       case FANOUT_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                INIT_LIST_HEAD(&dev->ex_dev.children);
                mutex_init(&dev->ex_dev.cmd_mutex);
                break;
@@ -93,9 +93,9 @@ static int sas_get_port_device(struct asd_sas_port *port)
                if (fis->interrupt_reason == 1 && fis->lbal == 1 &&
                    fis->byte_count_low==0x69 && fis->byte_count_high == 0x96
                    && (fis->device & ~0x10) == 0)
-                       dev->dev_type = SATA_PM;
+                       dev->dev_type = SAS_SATA_PM;
                else
-                       dev->dev_type = SATA_DEV;
+                       dev->dev_type = SAS_SATA_DEV;
                dev->tproto = SAS_PROTOCOL_SATA;
        } else {
                struct sas_identify_frame *id =
@@ -109,21 +109,21 @@ static int sas_get_port_device(struct asd_sas_port *port)
 
        dev->port = port;
        switch (dev->dev_type) {
-       case SATA_DEV:
+       case SAS_SATA_DEV:
                rc = sas_ata_init(dev);
                if (rc) {
                        rphy = NULL;
                        break;
                }
                /* fall through */
-       case SAS_END_DEV:
+       case SAS_END_DEVICE:
                rphy = sas_end_device_alloc(port->port);
                break;
-       case EDGE_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
                rphy = sas_expander_alloc(port->port,
                                          SAS_EDGE_EXPANDER_DEVICE);
                break;
-       case FANOUT_DEV:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                rphy = sas_expander_alloc(port->port,
                                          SAS_FANOUT_EXPANDER_DEVICE);
                break;
@@ -156,7 +156,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
        dev->rphy = rphy;
        get_device(&dev->rphy->dev);
 
-       if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV)
+       if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEVICE)
                list_add_tail(&dev->disco_list_node, &port->disco_list);
        else {
                spin_lock_irq(&port->dev_list_lock);
@@ -315,7 +315,7 @@ void sas_free_device(struct kref *kref)
        dev->phy = NULL;
 
        /* remove the phys and ports, everything else should be gone */
-       if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV)
+       if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
                kfree(dev->ex_dev.ex_phy);
 
        if (dev_is_sata(dev) && dev->sata_dev.ap) {
@@ -343,7 +343,7 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d
        spin_unlock_irq(&port->dev_list_lock);
 
        spin_lock_irq(&ha->lock);
-       if (dev->dev_type == SAS_END_DEV &&
+       if (dev->dev_type == SAS_END_DEVICE &&
            !list_empty(&dev->ssp_dev.eh_list_node)) {
                list_del_init(&dev->ssp_dev.eh_list_node);
                ha->eh_active--;
@@ -457,15 +457,15 @@ static void sas_discover_domain(struct work_struct *work)
                    task_pid_nr(current));
 
        switch (dev->dev_type) {
-       case SAS_END_DEV:
+       case SAS_END_DEVICE:
                error = sas_discover_end_dev(dev);
                break;
-       case EDGE_DEV:
-       case FANOUT_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                error = sas_discover_root_expander(dev);
                break;
-       case SATA_DEV:
-       case SATA_PM:
+       case SAS_SATA_DEV:
+       case SAS_SATA_PM:
 #ifdef CONFIG_SCSI_SAS_ATA
                error = sas_discover_sata(dev);
                break;
index f42b0e15410f8a52e0a570664b04547d459e7aec..446b85110a1fc0a69b07e42bb3ecc7144d76d1ce 100644 (file)
@@ -183,21 +183,21 @@ static char sas_route_char(struct domain_device *dev, struct ex_phy *phy)
        }
 }
 
-static enum sas_dev_type to_dev_type(struct discover_resp *dr)
+static enum sas_device_type to_dev_type(struct discover_resp *dr)
 {
        /* This is detecting a failure to transmit initial dev to host
         * FIS as described in section J.5 of sas-2 r16
         */
-       if (dr->attached_dev_type == NO_DEVICE && dr->attached_sata_dev &&
+       if (dr->attached_dev_type == SAS_PHY_UNUSED && dr->attached_sata_dev &&
            dr->linkrate >= SAS_LINK_RATE_1_5_GBPS)
-               return SATA_PENDING;
+               return SAS_SATA_PENDING;
        else
                return dr->attached_dev_type;
 }
 
 static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
 {
-       enum sas_dev_type dev_type;
+       enum sas_device_type dev_type;
        enum sas_linkrate linkrate;
        u8 sas_addr[SAS_ADDR_SIZE];
        struct smp_resp *resp = rsp;
@@ -238,7 +238,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
        /* Handle vacant phy - rest of dr data is not valid so skip it */
        if (phy->phy_state == PHY_VACANT) {
                memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
-               phy->attached_dev_type = NO_DEVICE;
+               phy->attached_dev_type = SAS_PHY_UNUSED;
                if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
                        phy->phy_id = phy_id;
                        goto skip;
@@ -259,7 +259,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
        /* help some expanders that fail to zero sas_address in the 'no
         * device' case
         */
-       if (phy->attached_dev_type == NO_DEVICE ||
+       if (phy->attached_dev_type == SAS_PHY_UNUSED ||
            phy->linkrate < SAS_LINK_RATE_1_5_GBPS)
                memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
        else
@@ -292,13 +292,13 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
 
  out:
        switch (phy->attached_dev_type) {
-       case SATA_PENDING:
+       case SAS_SATA_PENDING:
                type = "stp pending";
                break;
-       case NO_DEVICE:
+       case SAS_PHY_UNUSED:
                type = "no device";
                break;
-       case SAS_END_DEV:
+       case SAS_END_DEVICE:
                if (phy->attached_iproto) {
                        if (phy->attached_tproto)
                                type = "host+target";
@@ -311,8 +311,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
                                type = "ssp";
                }
                break;
-       case EDGE_DEV:
-       case FANOUT_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                type = "smp";
                break;
        default:
@@ -833,7 +833,7 @@ static struct domain_device *sas_ex_discover_end_dev(
        } else
 #endif
          if (phy->attached_tproto & SAS_PROTOCOL_SSP) {
-               child->dev_type = SAS_END_DEV;
+               child->dev_type = SAS_END_DEVICE;
                rphy = sas_end_device_alloc(phy->port);
                /* FIXME: error handling */
                if (unlikely(!rphy))
@@ -932,11 +932,11 @@ static struct domain_device *sas_ex_discover_expander(
 
 
        switch (phy->attached_dev_type) {
-       case EDGE_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
                rphy = sas_expander_alloc(phy->port,
                                          SAS_EDGE_EXPANDER_DEVICE);
                break;
-       case FANOUT_DEV:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                rphy = sas_expander_alloc(phy->port,
                                          SAS_FANOUT_EXPANDER_DEVICE);
                break;
@@ -1013,7 +1013,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
        if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr))
                sas_ex_disable_port(dev, ex_phy->attached_sas_addr);
 
-       if (ex_phy->attached_dev_type == NO_DEVICE) {
+       if (ex_phy->attached_dev_type == SAS_PHY_UNUSED) {
                if (ex_phy->routing_attr == DIRECT_ROUTING) {
                        memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
                        sas_configure_routing(dev, ex_phy->attached_sas_addr);
@@ -1022,10 +1022,10 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
        } else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN)
                return 0;
 
-       if (ex_phy->attached_dev_type != SAS_END_DEV &&
-           ex_phy->attached_dev_type != FANOUT_DEV &&
-           ex_phy->attached_dev_type != EDGE_DEV &&
-           ex_phy->attached_dev_type != SATA_PENDING) {
+       if (ex_phy->attached_dev_type != SAS_END_DEVICE &&
+           ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE &&
+           ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE &&
+           ex_phy->attached_dev_type != SAS_SATA_PENDING) {
                SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx "
                            "phy 0x%x\n", ex_phy->attached_dev_type,
                            SAS_ADDR(dev->sas_addr),
@@ -1049,11 +1049,11 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
        }
 
        switch (ex_phy->attached_dev_type) {
-       case SAS_END_DEV:
-       case SATA_PENDING:
+       case SAS_END_DEVICE:
+       case SAS_SATA_PENDING:
                child = sas_ex_discover_end_dev(dev, phy_id);
                break;
-       case FANOUT_DEV:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) {
                        SAS_DPRINTK("second fanout expander %016llx phy 0x%x "
                                    "attached to ex %016llx phy 0x%x\n",
@@ -1067,7 +1067,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
                        memcpy(dev->port->disc.fanout_sas_addr,
                               ex_phy->attached_sas_addr, SAS_ADDR_SIZE);
                /* fallthrough */
-       case EDGE_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
                child = sas_ex_discover_expander(dev, phy_id);
                break;
        default:
@@ -1111,8 +1111,8 @@ static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr)
                    phy->phy_state == PHY_NOT_PRESENT)
                        continue;
 
-               if ((phy->attached_dev_type == EDGE_DEV ||
-                    phy->attached_dev_type == FANOUT_DEV) &&
+               if ((phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                    phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE) &&
                    phy->routing_attr == SUBTRACTIVE_ROUTING) {
 
                        memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE);
@@ -1130,8 +1130,8 @@ static int sas_check_level_subtractive_boundary(struct domain_device *dev)
        u8 sub_addr[8] = {0, };
 
        list_for_each_entry(child, &ex->children, siblings) {
-               if (child->dev_type != EDGE_DEV &&
-                   child->dev_type != FANOUT_DEV)
+               if (child->dev_type != SAS_EDGE_EXPANDER_DEVICE &&
+                   child->dev_type != SAS_FANOUT_EXPANDER_DEVICE)
                        continue;
                if (sub_addr[0] == 0) {
                        sas_find_sub_addr(child, sub_addr);
@@ -1208,7 +1208,7 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev)
        int i;
        u8  *sub_sas_addr = NULL;
 
-       if (dev->dev_type != EDGE_DEV)
+       if (dev->dev_type != SAS_EDGE_EXPANDER_DEVICE)
                return 0;
 
        for (i = 0; i < ex->num_phys; i++) {
@@ -1218,8 +1218,8 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev)
                    phy->phy_state == PHY_NOT_PRESENT)
                        continue;
 
-               if ((phy->attached_dev_type == FANOUT_DEV ||
-                    phy->attached_dev_type == EDGE_DEV) &&
+               if ((phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
+                    phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE) &&
                    phy->routing_attr == SUBTRACTIVE_ROUTING) {
 
                        if (!sub_sas_addr)
@@ -1245,8 +1245,8 @@ static void sas_print_parent_topology_bug(struct domain_device *child,
                                                 struct ex_phy *child_phy)
 {
        static const char *ex_type[] = {
-               [EDGE_DEV] = "edge",
-               [FANOUT_DEV] = "fanout",
+               [SAS_EDGE_EXPANDER_DEVICE] = "edge",
+               [SAS_FANOUT_EXPANDER_DEVICE] = "fanout",
        };
        struct domain_device *parent = child->parent;
 
@@ -1321,8 +1321,8 @@ static int sas_check_parent_topology(struct domain_device *child)
        if (!child->parent)
                return 0;
 
-       if (child->parent->dev_type != EDGE_DEV &&
-           child->parent->dev_type != FANOUT_DEV)
+       if (child->parent->dev_type != SAS_EDGE_EXPANDER_DEVICE &&
+           child->parent->dev_type != SAS_FANOUT_EXPANDER_DEVICE)
                return 0;
 
        parent_ex = &child->parent->ex_dev;
@@ -1341,8 +1341,8 @@ static int sas_check_parent_topology(struct domain_device *child)
                child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id];
 
                switch (child->parent->dev_type) {
-               case EDGE_DEV:
-                       if (child->dev_type == FANOUT_DEV) {
+               case SAS_EDGE_EXPANDER_DEVICE:
+                       if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
                                if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING ||
                                    child_phy->routing_attr != TABLE_ROUTING) {
                                        sas_print_parent_topology_bug(child, parent_phy, child_phy);
@@ -1366,7 +1366,7 @@ static int sas_check_parent_topology(struct domain_device *child)
                                }
                        }
                        break;
-               case FANOUT_DEV:
+               case SAS_FANOUT_EXPANDER_DEVICE:
                        if (parent_phy->routing_attr != TABLE_ROUTING ||
                            child_phy->routing_attr != SUBTRACTIVE_ROUTING) {
                                sas_print_parent_topology_bug(child, parent_phy, child_phy);
@@ -1619,8 +1619,8 @@ static int sas_ex_level_discovery(struct asd_sas_port *port, const int level)
        struct domain_device *dev;
 
        list_for_each_entry(dev, &port->dev_list, dev_list_node) {
-               if (dev->dev_type == EDGE_DEV ||
-                   dev->dev_type == FANOUT_DEV) {
+               if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                   dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
                        struct sas_expander_device *ex =
                                rphy_to_expander_device(dev->rphy);
 
@@ -1720,7 +1720,7 @@ static int sas_get_phy_change_count(struct domain_device *dev,
 }
 
 static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
-                                   u8 *sas_addr, enum sas_dev_type *type)
+                                   u8 *sas_addr, enum sas_device_type *type)
 {
        int res;
        struct smp_resp *disc_resp;
@@ -1849,7 +1849,7 @@ static int sas_find_bcast_dev(struct domain_device *dev,
                        SAS_DPRINTK("Expander phys DID NOT change\n");
        }
        list_for_each_entry(ch, &ex->children, siblings) {
-               if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) {
+               if (ch->dev_type == SAS_EDGE_EXPANDER_DEVICE || ch->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
                        res = sas_find_bcast_dev(ch, src_dev);
                        if (*src_dev)
                                return res;
@@ -1866,8 +1866,8 @@ static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_devi
 
        list_for_each_entry_safe(child, n, &ex->children, siblings) {
                set_bit(SAS_DEV_GONE, &child->state);
-               if (child->dev_type == EDGE_DEV ||
-                   child->dev_type == FANOUT_DEV)
+               if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                   child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
                        sas_unregister_ex_tree(port, child);
                else
                        sas_unregister_dev(port, child);
@@ -1887,8 +1887,8 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
                        if (SAS_ADDR(child->sas_addr) ==
                            SAS_ADDR(phy->attached_sas_addr)) {
                                set_bit(SAS_DEV_GONE, &child->state);
-                               if (child->dev_type == EDGE_DEV ||
-                                   child->dev_type == FANOUT_DEV)
+                               if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                                   child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
                                        sas_unregister_ex_tree(parent->port, child);
                                else
                                        sas_unregister_dev(parent->port, child);
@@ -1916,8 +1916,8 @@ static int sas_discover_bfs_by_root_level(struct domain_device *root,
        int res = 0;
 
        list_for_each_entry(child, &ex_root->children, siblings) {
-               if (child->dev_type == EDGE_DEV ||
-                   child->dev_type == FANOUT_DEV) {
+               if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                   child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
                        struct sas_expander_device *ex =
                                rphy_to_expander_device(child->rphy);
 
@@ -1970,8 +1970,8 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
        list_for_each_entry(child, &dev->ex_dev.children, siblings) {
                if (SAS_ADDR(child->sas_addr) ==
                    SAS_ADDR(ex_phy->attached_sas_addr)) {
-                       if (child->dev_type == EDGE_DEV ||
-                           child->dev_type == FANOUT_DEV)
+                       if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                           child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
                                res = sas_discover_bfs_by_root(child);
                        break;
                }
@@ -1979,16 +1979,16 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
        return res;
 }
 
-static bool dev_type_flutter(enum sas_dev_type new, enum sas_dev_type old)
+static bool dev_type_flutter(enum sas_device_type new, enum sas_device_type old)
 {
        if (old == new)
                return true;
 
        /* treat device directed resets as flutter, if we went
-        * SAS_END_DEV to SATA_PENDING the link needs recovery
+        * SAS_END_DEVICE to SAS_SATA_PENDING the link needs recovery
         */
-       if ((old == SATA_PENDING && new == SAS_END_DEV) ||
-           (old == SAS_END_DEV && new == SATA_PENDING))
+       if ((old == SAS_SATA_PENDING && new == SAS_END_DEVICE) ||
+           (old == SAS_END_DEVICE && new == SAS_SATA_PENDING))
                return true;
 
        return false;
@@ -1998,7 +1998,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
 {
        struct expander_device *ex = &dev->ex_dev;
        struct ex_phy *phy = &ex->ex_phy[phy_id];
-       enum sas_dev_type type = NO_DEVICE;
+       enum sas_device_type type = SAS_PHY_UNUSED;
        u8 sas_addr[8];
        int res;
 
@@ -2032,7 +2032,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
 
                sas_ex_phy_discover(dev, phy_id);
 
-               if (ata_dev && phy->attached_dev_type == SATA_PENDING)
+               if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING)
                        action = ", needs recovery";
                SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n",
                            SAS_ADDR(dev->sas_addr), phy_id, action);
index 1de67964e5a1e39ffa10151c79489078826e2536..7e7ba83f0a2139c565f692fa93d0b3f5954abcce 100644 (file)
@@ -131,16 +131,16 @@ static inline void sas_fill_in_rphy(struct domain_device *dev,
        rphy->identify.initiator_port_protocols = dev->iproto;
        rphy->identify.target_port_protocols = dev->tproto;
        switch (dev->dev_type) {
-       case SATA_DEV:
+       case SAS_SATA_DEV:
                /* FIXME: need sata device type */
-       case SAS_END_DEV:
-       case SATA_PENDING:
+       case SAS_END_DEVICE:
+       case SAS_SATA_PENDING:
                rphy->identify.device_type = SAS_END_DEVICE;
                break;
-       case EDGE_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
                rphy->identify.device_type = SAS_EDGE_EXPANDER_DEVICE;
                break;
-       case FANOUT_DEV:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                rphy->identify.device_type = SAS_FANOUT_EXPANDER_DEVICE;
                break;
        default:
index 1398b714c01836ee3789199cf3cd627ac4fa4021..d3c5297c6c89e242d845d16142f7a309ed4ef773 100644 (file)
@@ -69,7 +69,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
                        continue;
                }
 
-               if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) {
+               if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
                        dev->ex_dev.ex_change_count = -1;
                        for (i = 0; i < dev->ex_dev.num_phys; i++) {
                                struct ex_phy *phy = &dev->ex_dev.ex_phy[i];
index 7706c99ec8bbb1e8c70b051113f789cb6f97c1dd..bcc56cac4fd8f358d53084b598c167d060a82c19 100644 (file)
@@ -46,10 +46,15 @@ struct lpfc_sli2_slim;
 #define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128      /* sg element count per scsi
                cmnd for menlo needs nearly twice as for firmware
                downloads using bsg */
-#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
+
+#define LPFC_MIN_SG_SLI4_BUF_SZ        0x800   /* based on LPFC_DEFAULT_SG_SEG_CNT */
+#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */
+#define LPFC_MAX_SG_SEG_CNT_DIF 512    /* sg element count per scsi cmnd  */
 #define LPFC_MAX_SG_SEG_CNT    4096    /* sg element count per scsi cmnd */
+#define LPFC_MAX_SGL_SEG_CNT   512     /* SGL element count per scsi cmnd */
+#define LPFC_MAX_BPL_SEG_CNT   4096    /* BPL element count per scsi cmnd */
+
 #define LPFC_MAX_SGE_SIZE       0x80000000 /* Maximum data allowed in a SGE */
-#define LPFC_MAX_PROT_SG_SEG_CNT 4096  /* prot sg element count per scsi cmd*/
 #define LPFC_IOCB_LIST_CNT     2250    /* list of IOCBs for fast-path usage. */
 #define LPFC_Q_RAMP_UP_INTERVAL 120     /* lun q_depth ramp up interval */
 #define LPFC_VNAME_LEN         100     /* vport symbolic name length */
@@ -66,8 +71,10 @@ struct lpfc_sli2_slim;
  * queue depths when there are driver resource error or Firmware
  * resource error.
  */
-#define QUEUE_RAMP_DOWN_INTERVAL       (1 * HZ)   /* 1 Second */
-#define QUEUE_RAMP_UP_INTERVAL         (300 * HZ) /* 5 minutes */
+/* 1 Second */
+#define QUEUE_RAMP_DOWN_INTERVAL       (msecs_to_jiffies(1000 * 1))
+/* 5 minutes */
+#define QUEUE_RAMP_UP_INTERVAL         (msecs_to_jiffies(1000 * 300))
 
 /* Number of exchanges reserved for discovery to complete */
 #define LPFC_DISC_IOCB_BUFF_COUNT 20
@@ -671,6 +678,7 @@ struct lpfc_hba {
        uint32_t lmt;
 
        uint32_t fc_topology;   /* link topology, from LINK INIT */
+       uint32_t fc_topology_changed;   /* link topology, from LINK INIT */
 
        struct lpfc_stats fc_stat;
 
@@ -701,9 +709,11 @@ struct lpfc_hba {
        uint32_t cfg_poll_tmo;
        uint32_t cfg_use_msi;
        uint32_t cfg_fcp_imax;
+       uint32_t cfg_fcp_cpu_map;
        uint32_t cfg_fcp_wq_count;
        uint32_t cfg_fcp_eq_count;
        uint32_t cfg_fcp_io_channel;
+       uint32_t cfg_total_seg_cnt;
        uint32_t cfg_sg_seg_cnt;
        uint32_t cfg_prot_sg_seg_cnt;
        uint32_t cfg_sg_dma_buf_size;
@@ -804,8 +814,10 @@ struct lpfc_hba {
        uint64_t bg_reftag_err_cnt;
 
        /* fastpath list. */
-       spinlock_t scsi_buf_list_lock;
-       struct list_head lpfc_scsi_buf_list;
+       spinlock_t scsi_buf_list_get_lock;  /* SCSI buf alloc list lock */
+       spinlock_t scsi_buf_list_put_lock;  /* SCSI buf free list lock */
+       struct list_head lpfc_scsi_buf_list_get;
+       struct list_head lpfc_scsi_buf_list_put;
        uint32_t total_scsi_bufs;
        struct list_head lpfc_iocb_list;
        uint32_t total_iocbq_bufs;
index 9290713af253fd740454bbee6d2b6807f64f4b93..3c5625b8b1f4e83814f878c590f27377f4aaecbf 100644 (file)
@@ -674,6 +674,9 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
        int i;
        int rc;
 
+       if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+               return 0;
+
        init_completion(&online_compl);
        rc = lpfc_workq_post_event(phba, &status, &online_compl,
                              LPFC_EVT_OFFLINE_PREP);
@@ -741,7 +744,8 @@ lpfc_selective_reset(struct lpfc_hba *phba)
        int status = 0;
        int rc;
 
-       if (!phba->cfg_enable_hba_reset)
+       if ((!phba->cfg_enable_hba_reset) ||
+           (phba->pport->fc_flag & FC_OFFLINE_MODE))
                return -EACCES;
 
        status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
@@ -895,6 +899,7 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
                pci_disable_sriov(pdev);
                phba->cfg_sriov_nr_virtfn = 0;
        }
+
        status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
 
        if (status != 0)
@@ -2801,6 +2806,8 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
                lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
                        "3054 lpfc_topology changed from %d to %d\n",
                        prev_val, val);
+               if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4)
+                       phba->fc_topology_changed = 1;
                err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
                if (err) {
                        phba->cfg_topology = prev_val;
@@ -3792,6 +3799,141 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
 static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR,
                   lpfc_fcp_imax_show, lpfc_fcp_imax_store);
 
+/**
+ * lpfc_state_show - Display current driver CPU affinity
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains text describing the state of the link.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
+                     char *buf)
+{
+       struct Scsi_Host  *shost = class_to_shost(dev);
+       struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+       struct lpfc_hba   *phba = vport->phba;
+       struct lpfc_vector_map_info *cpup;
+       int  idx, len = 0;
+
+       if ((phba->sli_rev != LPFC_SLI_REV4) ||
+           (phba->intr_type != MSIX))
+               return len;
+
+       switch (phba->cfg_fcp_cpu_map) {
+       case 0:
+               len += snprintf(buf + len, PAGE_SIZE-len,
+                               "fcp_cpu_map: No mapping (%d)\n",
+                               phba->cfg_fcp_cpu_map);
+               return len;
+       case 1:
+               len += snprintf(buf + len, PAGE_SIZE-len,
+                               "fcp_cpu_map: HBA centric mapping (%d): "
+                               "%d online CPUs\n",
+                               phba->cfg_fcp_cpu_map,
+                               phba->sli4_hba.num_online_cpu);
+               break;
+       case 2:
+               len += snprintf(buf + len, PAGE_SIZE-len,
+                               "fcp_cpu_map: Driver centric mapping (%d): "
+                               "%d online CPUs\n",
+                               phba->cfg_fcp_cpu_map,
+                               phba->sli4_hba.num_online_cpu);
+               break;
+       }
+
+       cpup = phba->sli4_hba.cpu_map;
+       for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
+               if (cpup->irq == LPFC_VECTOR_MAP_EMPTY)
+                       len += snprintf(buf + len, PAGE_SIZE-len,
+                                       "CPU %02d io_chan %02d "
+                                       "physid %d coreid %d\n",
+                                       idx, cpup->channel_id, cpup->phys_id,
+                                       cpup->core_id);
+               else
+                       len += snprintf(buf + len, PAGE_SIZE-len,
+                                       "CPU %02d io_chan %02d "
+                                       "physid %d coreid %d IRQ %d\n",
+                                       idx, cpup->channel_id, cpup->phys_id,
+                                       cpup->core_id, cpup->irq);
+
+               cpup++;
+       }
+       return len;
+}
+
+/**
+ * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: one or more lpfc_polling_flags values.
+ * @count: not used.
+ *
+ * Returns:
+ * -EINVAL  - Not implemented yet.
+ **/
+static ssize_t
+lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
+                      const char *buf, size_t count)
+{
+       int status = -EINVAL;
+       return status;
+}
+
+/*
+# lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors
+# for the HBA.
+#
+# Value range is [0 to 2]. Default value is LPFC_DRIVER_CPU_MAP (2).
+#      0 - Do not affinitze IRQ vectors
+#      1 - Affintize HBA vectors with respect to each HBA
+#          (start with CPU0 for each HBA)
+#      2 - Affintize HBA vectors with respect to the entire driver
+#          (round robin thru all CPUs across all HBAs)
+*/
+static int lpfc_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
+module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(lpfc_fcp_cpu_map,
+                "Defines how to map CPUs to IRQ vectors per HBA");
+
+/**
+ * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range [0-2], then affinitze the adapter's
+ * MSIX vectors.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
+{
+       if (phba->sli_rev != LPFC_SLI_REV4) {
+               phba->cfg_fcp_cpu_map = 0;
+               return 0;
+       }
+
+       if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) {
+               phba->cfg_fcp_cpu_map = val;
+               return 0;
+       }
+
+       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "3326 fcp_cpu_map: %d out of range, using default\n",
+                       val);
+       phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
+
+       return 0;
+}
+
+static DEVICE_ATTR(lpfc_fcp_cpu_map, S_IRUGO | S_IWUSR,
+                  lpfc_fcp_cpu_map_show, lpfc_fcp_cpu_map_store);
+
 /*
 # lpfc_fcp_class:  Determines FC class to use for the FCP protocol.
 # Value range is [2,3]. Default value is 3.
@@ -4009,12 +4151,11 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
 #       0  = disabled (default)
 #       1  = enabled
 # Value range is [0,1]. Default value is 0.
+#
+# This feature in under investigation and may be supported in the future.
 */
 unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
 
-module_param(lpfc_fcp_look_ahead, uint, S_IRUGO);
-MODULE_PARM_DESC(lpfc_fcp_look_ahead, "Look ahead for completions");
-
 /*
 # lpfc_prot_mask: i
 #      - Bit mask of host protection capabilities used to register with the
@@ -4071,16 +4212,23 @@ MODULE_PARM_DESC(lpfc_delay_discovery,
 
 /*
  * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
- * This value can be set to values between 64 and 256. The default value is
+ * This value can be set to values between 64 and 4096. The default value is
  * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer
  * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE).
+ * Because of the additional overhead involved in setting up T10-DIF,
+ * this parameter will be limited to 128 if BlockGuard is enabled under SLI4
+ * and will be limited to 512 if BlockGuard is enabled under SLI3.
  */
 LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
            LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
 
-LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_PROT_SG_SEG_CNT,
-               LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_MAX_PROT_SG_SEG_CNT,
-               "Max Protection Scatter Gather Segment Count");
+/*
+ * This parameter will be depricated, the driver cannot limit the
+ * protection data s/g list.
+ */
+LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT,
+           LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT,
+           "Max Protection Scatter Gather Segment Count");
 
 struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_bg_info,
@@ -4141,6 +4289,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_lpfc_poll_tmo,
        &dev_attr_lpfc_use_msi,
        &dev_attr_lpfc_fcp_imax,
+       &dev_attr_lpfc_fcp_cpu_map,
        &dev_attr_lpfc_fcp_wq_count,
        &dev_attr_lpfc_fcp_eq_count,
        &dev_attr_lpfc_fcp_io_channel,
@@ -5123,6 +5272,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
        lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
        lpfc_use_msi_init(phba, lpfc_use_msi);
        lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
+       lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
        lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
        lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
        lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
index 8886668920049e685cb9b5d3572cb1426dbea526..094be2cad65bba821e9050a48bb2602dfd65f067 100644 (file)
@@ -219,26 +219,35 @@ lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
        unsigned int transfer_bytes, bytes_copied = 0;
        unsigned int sg_offset, dma_offset;
        unsigned char *dma_address, *sg_address;
-       struct scatterlist *sgel;
        LIST_HEAD(temp_list);
-
+       struct sg_mapping_iter miter;
+       unsigned long flags;
+       unsigned int sg_flags = SG_MITER_ATOMIC;
+       bool sg_valid;
 
        list_splice_init(&dma_buffers->list, &temp_list);
        list_add(&dma_buffers->list, &temp_list);
        sg_offset = 0;
-       sgel = bsg_buffers->sg_list;
+       if (to_buffers)
+               sg_flags |= SG_MITER_FROM_SG;
+       else
+               sg_flags |= SG_MITER_TO_SG;
+       sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt,
+                      sg_flags);
+       local_irq_save(flags);
+       sg_valid = sg_miter_next(&miter);
        list_for_each_entry(mp, &temp_list, list) {
                dma_offset = 0;
-               while (bytes_to_transfer && sgel &&
+               while (bytes_to_transfer && sg_valid &&
                       (dma_offset < LPFC_BPL_SIZE)) {
                        dma_address = mp->virt + dma_offset;
                        if (sg_offset) {
                                /* Continue previous partial transfer of sg */
-                               sg_address = sg_virt(sgel) + sg_offset;
-                               transfer_bytes = sgel->length - sg_offset;
+                               sg_address = miter.addr + sg_offset;
+                               transfer_bytes = miter.length - sg_offset;
                        } else {
-                               sg_address = sg_virt(sgel);
-                               transfer_bytes = sgel->length;
+                               sg_address = miter.addr;
+                               transfer_bytes = miter.length;
                        }
                        if (bytes_to_transfer < transfer_bytes)
                                transfer_bytes = bytes_to_transfer;
@@ -252,12 +261,14 @@ lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
                        sg_offset += transfer_bytes;
                        bytes_to_transfer -= transfer_bytes;
                        bytes_copied += transfer_bytes;
-                       if (sg_offset >= sgel->length) {
+                       if (sg_offset >= miter.length) {
                                sg_offset = 0;
-                               sgel = sg_next(sgel);
+                               sg_valid = sg_miter_next(&miter);
                        }
                }
        }
+       sg_miter_stop(&miter);
+       local_irq_restore(flags);
        list_del_init(&dma_buffers->list);
        list_splice(&temp_list, &dma_buffers->list);
        return bytes_copied;
@@ -471,6 +482,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
        cmdiocbq->context1 = dd_data;
        cmdiocbq->context2 = cmp;
        cmdiocbq->context3 = bmp;
+       cmdiocbq->context_un.ndlp = ndlp;
        dd_data->type = TYPE_IOCB;
        dd_data->set_job = job;
        dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
@@ -1508,6 +1520,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
        ctiocb->context1 = dd_data;
        ctiocb->context2 = cmp;
        ctiocb->context3 = bmp;
+       ctiocb->context_un.ndlp = ndlp;
        ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
 
        dd_data->type = TYPE_IOCB;
@@ -2576,7 +2589,8 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
        evt->wait_time_stamp = jiffies;
        time_left = wait_event_interruptible_timeout(
                evt->wq, !list_empty(&evt->events_to_see),
-               ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
+               msecs_to_jiffies(1000 *
+                       ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
        if (list_empty(&evt->events_to_see))
                ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
        else {
@@ -3151,7 +3165,8 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
        evt->waiting = 1;
        time_left = wait_event_interruptible_timeout(
                evt->wq, !list_empty(&evt->events_to_see),
-               ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
+               msecs_to_jiffies(1000 *
+                       ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
        evt->waiting = 0;
        if (list_empty(&evt->events_to_see)) {
                rc = (time_left) ? -EINTR : -ETIMEDOUT;
index 7631893ae00511c49033f3445e45f8b164d47d46..d41456e5f8149e3fc09d82086554d20ef9921ce2 100644 (file)
@@ -470,3 +470,4 @@ int lpfc_sli4_xri_sgl_update(struct lpfc_hba *);
 void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
 uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
 int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
+void lpfc_sli4_offline_eratt(struct lpfc_hba *);
index 7bff3a19af56880dba0fdca4e203676d52c4e916..ae1a07c57caef681d5d54222e39b41b67e70643f 100644 (file)
@@ -1811,7 +1811,8 @@ lpfc_fdmi_timeout_handler(struct lpfc_vport *vport)
                if (init_utsname()->nodename[0] != '\0')
                        lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
                else
-                       mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
+                       mod_timer(&vport->fc_fdmitmo, jiffies +
+                                 msecs_to_jiffies(1000 * 60));
        }
        return;
 }
index bbed8471bf0b81fce50390bb9b0ec6b7a68ba146..3cae0a92e8bd0001142c7a175102de1e0ab01dc6 100644 (file)
@@ -29,6 +29,7 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
 
+
 #include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
@@ -238,7 +239,10 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
 
                icmd->un.elsreq64.remoteID = did;               /* DID */
                icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
-               icmd->ulpTimeout = phba->fc_ratov * 2;
+               if (elscmd == ELS_CMD_FLOGI)
+                       icmd->ulpTimeout = FF_DEF_RATOV * 2;
+               else
+                       icmd->ulpTimeout = phba->fc_ratov * 2;
        } else {
                icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
                icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
@@ -308,16 +312,20 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
                /* Xmit ELS command <elsCmd> to remote NPORT <did> */
                lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
                                 "0116 Xmit ELS command x%x to remote "
-                                "NPORT x%x I/O tag: x%x, port state: x%x\n",
+                                "NPORT x%x I/O tag: x%x, port state:x%x"
+                                " fc_flag:x%x\n",
                                 elscmd, did, elsiocb->iotag,
-                                vport->port_state);
+                                vport->port_state,
+                                vport->fc_flag);
        } else {
                /* Xmit ELS response <elsCmd> to remote NPORT <did> */
                lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
                                 "0117 Xmit ELS response x%x to remote "
-                                "NPORT x%x I/O tag: x%x, size: x%x\n",
+                                "NPORT x%x I/O tag: x%x, size: x%x "
+                                "port_state x%x fc_flag x%x\n",
                                 elscmd, ndlp->nlp_DID, elsiocb->iotag,
-                                cmdSize);
+                                cmdSize, vport->port_state,
+                                vport->fc_flag);
        }
        return elsiocb;
 
@@ -909,6 +917,23 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        spin_lock_irq(shost->host_lock);
        vport->fc_flag |= FC_PT2PT;
        spin_unlock_irq(shost->host_lock);
+       /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
+       if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
+               lpfc_unregister_fcf_prep(phba);
+
+               /* The FC_VFI_REGISTERED flag will get clear in the cmpl
+                * handler for unreg_vfi, but if we don't force the
+                * FC_VFI_REGISTERED flag then the reg_vfi mailbox could be
+                * built with the update bit set instead of just the vp bit to
+                * change the Nport ID.  We need to have the vp set and the
+                * Upd cleared on topology changes.
+                */
+               spin_lock_irq(shost->host_lock);
+               vport->fc_flag &= ~FC_VFI_REGISTERED;
+               spin_unlock_irq(shost->host_lock);
+               phba->fc_topology_changed = 0;
+               lpfc_issue_reg_vfi(vport);
+       }
 
        /* Start discovery - this should just do CLEAR_LA */
        lpfc_disc_start(vport);
@@ -1030,9 +1055,19 @@ stop_rr_fcf_flogi:
                        vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
                if ((phba->sli_rev == LPFC_SLI_REV4) &&
                    (!(vport->fc_flag & FC_VFI_REGISTERED) ||
-                    (vport->fc_prevDID != vport->fc_myDID))) {
-                       if (vport->fc_flag & FC_VFI_REGISTERED)
-                               lpfc_sli4_unreg_all_rpis(vport);
+                    (vport->fc_prevDID != vport->fc_myDID) ||
+                       phba->fc_topology_changed)) {
+                       if (vport->fc_flag & FC_VFI_REGISTERED) {
+                               if (phba->fc_topology_changed) {
+                                       lpfc_unregister_fcf_prep(phba);
+                                       spin_lock_irq(shost->host_lock);
+                                       vport->fc_flag &= ~FC_VFI_REGISTERED;
+                                       spin_unlock_irq(shost->host_lock);
+                                       phba->fc_topology_changed = 0;
+                               } else {
+                                       lpfc_sli4_unreg_all_rpis(vport);
+                               }
+                       }
                        lpfc_issue_reg_vfi(vport);
                        lpfc_nlp_put(ndlp);
                        goto out;
@@ -1054,10 +1089,11 @@ stop_rr_fcf_flogi:
 
        /* FLOGI completes successfully */
        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
-                        "0101 FLOGI completes successfully "
-                        "Data: x%x x%x x%x x%x\n",
+                        "0101 FLOGI completes successfully, I/O tag:x%x, "
+                        "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag,
                         irsp->un.ulpWord[4], sp->cmn.e_d_tov,
-                        sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
+                        sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
+                        vport->port_state, vport->fc_flag);
 
        if (vport->port_state == LPFC_FLOGI) {
                /*
@@ -5047,6 +5083,8 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
        struct ls_rjt stat;
        uint32_t cmd, did;
        int rc;
+       uint32_t fc_flag = 0;
+       uint32_t port_state = 0;
 
        cmd = *lp++;
        sp = (struct serv_parm *) lp;
@@ -5113,16 +5151,25 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
                         * will be.
                         */
                        vport->fc_myDID = PT2PT_LocalID;
-               }
+               } else
+                       vport->fc_myDID = PT2PT_RemoteID;
 
                /*
                 * The vport state should go to LPFC_FLOGI only
                 * AFTER we issue a FLOGI, not receive one.
                 */
                spin_lock_irq(shost->host_lock);
+               fc_flag = vport->fc_flag;
+               port_state = vport->port_state;
                vport->fc_flag |= FC_PT2PT;
                vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+               vport->port_state = LPFC_FLOGI;
                spin_unlock_irq(shost->host_lock);
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+                                "3311 Rcv Flogi PS x%x new PS x%x "
+                                "fc_flag x%x new fc_flag x%x\n",
+                                port_state, vport->port_state,
+                                fc_flag, vport->fc_flag);
 
                /*
                 * We temporarily set fc_myDID to make it look like we are
@@ -6241,7 +6288,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
        }
 
        if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
-               mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
+               mod_timer(&vport->els_tmofunc,
+                         jiffies + msecs_to_jiffies(1000 * timeout));
 }
 
 /**
@@ -6612,7 +6660,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
        /* ELS command <elsCmd> received from NPORT <did> */
        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
                         "0112 ELS command x%x received from NPORT x%x "
-                        "Data: x%x\n", cmd, did, vport->port_state);
+                        "Data: x%x x%x x%x x%x\n",
+                       cmd, did, vport->port_state, vport->fc_flag,
+                       vport->fc_myDID, vport->fc_prevDID);
        switch (cmd) {
        case ELS_CMD_PLOGI:
                lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -6621,6 +6671,19 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 
                phba->fc_stat.elsRcvPLOGI++;
                ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
+               if (phba->sli_rev == LPFC_SLI_REV4 &&
+                   (phba->pport->fc_flag & FC_PT2PT)) {
+                       vport->fc_prevDID = vport->fc_myDID;
+                       /* Our DID needs to be updated before registering
+                        * the vfi. This is done in lpfc_rcv_plogi but
+                        * that is called after the reg_vfi.
+                        */
+                       vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo;
+                       lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+                                        "3312 Remote port assigned DID x%x "
+                                        "%x\n", vport->fc_myDID,
+                                        vport->fc_prevDID);
+               }
 
                lpfc_send_els_event(vport, ndlp, payload);
 
@@ -6630,6 +6693,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                        rjt_exp = LSEXP_NOTHING_MORE;
                        break;
                }
+               shost = lpfc_shost_from_vport(vport);
                if (vport->port_state < LPFC_DISC_AUTH) {
                        if (!(phba->pport->fc_flag & FC_PT2PT) ||
                                (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -6641,9 +6705,18 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                         * another NPort and the other side has initiated
                         * the PLOGI before responding to our FLOGI.
                         */
+                       if (phba->sli_rev == LPFC_SLI_REV4 &&
+                           (phba->fc_topology_changed ||
+                            vport->fc_myDID != vport->fc_prevDID)) {
+                               lpfc_unregister_fcf_prep(phba);
+                               spin_lock_irq(shost->host_lock);
+                               vport->fc_flag &= ~FC_VFI_REGISTERED;
+                               spin_unlock_irq(shost->host_lock);
+                               phba->fc_topology_changed = 0;
+                               lpfc_issue_reg_vfi(vport);
+                       }
                }
 
-               shost = lpfc_shost_from_vport(vport);
                spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
                spin_unlock_irq(shost->host_lock);
@@ -7002,8 +7075,11 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
        spin_lock_irq(shost->host_lock);
        if (vport->fc_flag & FC_DISC_DELAYED) {
                spin_unlock_irq(shost->host_lock);
+               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+                               "3334 Delay fc port discovery for %d seconds\n",
+                               phba->fc_ratov);
                mod_timer(&vport->delayed_disc_tmo,
-                       jiffies + HZ * phba->fc_ratov);
+                       jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
                return;
        }
        spin_unlock_irq(shost->host_lock);
@@ -7287,7 +7363,7 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba)
                return;
 
        shost = lpfc_shost_from_vport(phba->pport);
-       mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+       mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
        spin_lock_irq(shost->host_lock);
        ndlp->nlp_flag |= NLP_DELAY_TMO;
        spin_unlock_irq(shost->host_lock);
@@ -7791,7 +7867,8 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
        blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
        /* Start a timer to unblock fabric iocbs after 100ms */
        if (!blocked)
-               mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
+               mod_timer(&phba->fabric_block_timer,
+                         jiffies + msecs_to_jiffies(100));
 
        return;
 }
index 326e05a65a7314d7648a16f0097485bb5d439a0b..0f6e2548f35d7b7b03d870b813b5c1726bfcc48d 100644 (file)
@@ -160,11 +160,12 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
        if (!list_empty(&evtp->evt_listp))
                return;
 
+       evtp->evt_arg1  = lpfc_nlp_get(ndlp);
+
        spin_lock_irq(&phba->hbalock);
        /* We need to hold the node by incrementing the reference
         * count until this queued work is done
         */
-       evtp->evt_arg1  = lpfc_nlp_get(ndlp);
        if (evtp->evt_arg1) {
                evtp->evt = LPFC_EVT_DEV_LOSS;
                list_add_tail(&evtp->evt_listp, &phba->work_list);
@@ -1008,9 +1009,6 @@ lpfc_linkup(struct lpfc_hba *phba)
                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
                        lpfc_linkup_port(vports[i]);
        lpfc_destroy_vport_work_array(phba, vports);
-       if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
-           (phba->sli_rev < LPFC_SLI_REV4))
-               lpfc_issue_clear_la(phba, phba->pport);
 
        return 0;
 }
@@ -1436,7 +1434,8 @@ lpfc_register_fcf(struct lpfc_hba *phba)
        if (phba->fcf.fcf_flag & FCF_REGISTERED) {
                phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
                phba->hba_flag &= ~FCF_TS_INPROG;
-               if (phba->pport->port_state != LPFC_FLOGI) {
+               if (phba->pport->port_state != LPFC_FLOGI &&
+                   phba->pport->fc_flag & FC_FABRIC) {
                        phba->hba_flag |= FCF_RR_INPROG;
                        spin_unlock_irq(&phba->hbalock);
                        lpfc_initial_flogi(phba->pport);
@@ -2270,8 +2269,11 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                                spin_unlock_irq(&phba->hbalock);
                                lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
                                                "2836 New FCF matches in-use "
-                                               "FCF (x%x)\n",
-                                               phba->fcf.current_rec.fcf_indx);
+                                               "FCF (x%x), port_state:x%x, "
+                                               "fc_flag:x%x\n",
+                                               phba->fcf.current_rec.fcf_indx,
+                                               phba->pport->port_state,
+                                               phba->pport->fc_flag);
                                goto out;
                        } else
                                lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
@@ -2796,7 +2798,19 @@ void
 lpfc_issue_init_vpi(struct lpfc_vport *vport)
 {
        LPFC_MBOXQ_t *mboxq;
-       int rc;
+       int rc, vpi;
+
+       if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
+               vpi = lpfc_alloc_vpi(vport->phba);
+               if (!vpi) {
+                       lpfc_printf_vlog(vport, KERN_ERR,
+                                        LOG_MBOX,
+                                        "3303 Failed to obtain vport vpi\n");
+                       lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+                       return;
+               }
+               vport->vpi = vpi;
+       }
 
        mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
        if (!mboxq) {
@@ -2894,9 +2908,14 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                goto out_free_mem;
        }
 
-       /* If the VFI is already registered, there is nothing else to do */
+       /* If the VFI is already registered, there is nothing else to do
+        * Unless this was a VFI update and we are in PT2PT mode, then
+        * we should drop through to set the port state to ready.
+        */
        if (vport->fc_flag & FC_VFI_REGISTERED)
-               goto out_free_mem;
+               if (!(phba->sli_rev == LPFC_SLI_REV4 &&
+                     vport->fc_flag & FC_PT2PT))
+                       goto out_free_mem;
 
        /* The VPI is implicitly registered when the VFI is registered */
        spin_lock_irq(shost->host_lock);
@@ -2913,6 +2932,13 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                goto out_free_mem;
        }
 
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+                        "3313 cmpl reg vfi  port_state:%x fc_flag:%x myDid:%x "
+                        "alpacnt:%d LinkState:%x topology:%x\n",
+                        vport->port_state, vport->fc_flag, vport->fc_myDID,
+                        vport->phba->alpa_map[0],
+                        phba->link_state, phba->fc_topology);
+
        if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
                /*
                 * For private loop or for NPort pt2pt,
@@ -2925,7 +2951,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                        /* Use loop map to make discovery list */
                        lpfc_disc_list_loopmap(vport);
                        /* Start discovery */
-                       lpfc_disc_start(vport);
+                       if (vport->fc_flag & FC_PT2PT)
+                               vport->port_state = LPFC_VPORT_READY;
+                       else
+                               lpfc_disc_start(vport);
                } else {
                        lpfc_start_fdiscs(phba);
                        lpfc_do_scr_ns_plogi(phba, vport);
@@ -3007,6 +3036,15 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
                break;
        }
 
+       if (phba->fc_topology &&
+           phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+                               "3314 Toplogy changed was 0x%x is 0x%x\n",
+                               phba->fc_topology,
+                               bf_get(lpfc_mbx_read_top_topology, la));
+               phba->fc_topology_changed = 1;
+       }
+
        phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
        phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
 
@@ -4235,7 +4273,7 @@ lpfc_set_disctmo(struct lpfc_vport *vport)
                        tmo, vport->port_state, vport->fc_flag);
        }
 
-       mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
+       mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
        spin_lock_irq(shost->host_lock);
        vport->fc_flag |= FC_DISC_TMO;
        spin_unlock_irq(shost->host_lock);
@@ -4949,8 +4987,12 @@ lpfc_disc_start(struct lpfc_vport *vport)
        uint32_t clear_la_pending;
        int did_changed;
 
-       if (!lpfc_is_link_up(phba))
+       if (!lpfc_is_link_up(phba)) {
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+                                "3315 Link is not up %x\n",
+                                phba->link_state);
                return;
+       }
 
        if (phba->link_state == LPFC_CLEAR_LA)
                clear_la_pending = 1;
@@ -4983,11 +5025,13 @@ lpfc_disc_start(struct lpfc_vport *vport)
        if (num_sent)
                return;
 
-       /* Register the VPI for SLI3, NON-NPIV only. */
+       /* Register the VPI for SLI3, NPIV only. */
        if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
            !(vport->fc_flag & FC_PT2PT) &&
            !(vport->fc_flag & FC_RSCN_MODE) &&
            (phba->sli_rev < LPFC_SLI_REV4)) {
+               if (vport->port_type == LPFC_PHYSICAL_PORT)
+                       lpfc_issue_clear_la(phba, vport);
                lpfc_issue_reg_vpi(phba, vport);
                return;
        }
@@ -5410,7 +5454,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        if (vport->cfg_fdmi_on == 1)
                lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
        else
-               mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
+               mod_timer(&vport->fc_fdmitmo,
+                         jiffies + msecs_to_jiffies(1000 * 60));
 
        /* decrement the node reference count held for this callback
         * function.
@@ -5855,7 +5900,7 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
        struct lpfc_vport **vports;
        struct lpfc_nodelist *ndlp;
        struct Scsi_Host *shost;
-       int i, rc;
+       int i = 0, rc;
 
        /* Unregister RPIs */
        if (lpfc_fcf_inuse(phba))
@@ -5883,6 +5928,20 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
                        spin_unlock_irq(shost->host_lock);
                }
        lpfc_destroy_vport_work_array(phba, vports);
+       if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
+               ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+               if (ndlp)
+                       lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
+               lpfc_cleanup_pending_mbox(phba->pport);
+               if (phba->sli_rev == LPFC_SLI_REV4)
+                       lpfc_sli4_unreg_all_rpis(phba->pport);
+               lpfc_mbx_unreg_vpi(phba->pport);
+               shost = lpfc_shost_from_vport(phba->pport);
+               spin_lock_irq(shost->host_lock);
+               phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+               phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
+               spin_unlock_irq(shost->host_lock);
+       }
 
        /* Cleanup any outstanding ELS commands */
        lpfc_els_flush_all_cmd(phba);
index e8c47603170370ca0d2418bb3e4d759b377d60e3..83700c18f4688a872894aec6da35d7c46058580a 100644 (file)
@@ -1667,6 +1667,7 @@ enum lpfc_protgrp_type {
 #define        BG_OP_IN_CSUM_OUT_CSUM          0x5
 #define        BG_OP_IN_CRC_OUT_CSUM           0x6
 #define        BG_OP_IN_CSUM_OUT_CRC           0x7
+#define        BG_OP_RAW_MODE                  0x8
 
 struct lpfc_pde5 {
        uint32_t word0;
index 1dd2f6f0a1272f4717e6063449879b55b06ce4b0..713a4613ec3ac1c22829fa9d150d6c9767ae3d80 100644 (file)
@@ -200,6 +200,11 @@ struct lpfc_sli_intf {
 #define LPFC_MAX_IMAX          5000000
 #define LPFC_DEF_IMAX          50000
 
+#define LPFC_MIN_CPU_MAP       0
+#define LPFC_MAX_CPU_MAP       2
+#define LPFC_HBA_CPU_MAP       1
+#define LPFC_DRIVER_CPU_MAP    2  /* Default */
+
 /* PORT_CAPABILITIES constants. */
 #define LPFC_MAX_SUPPORTED_PAGES       8
 
@@ -621,7 +626,7 @@ struct lpfc_register {
 #define lpfc_sliport_status_rdy_SHIFT  23
 #define lpfc_sliport_status_rdy_MASK   0x1
 #define lpfc_sliport_status_rdy_WORD   word0
-#define MAX_IF_TYPE_2_RESETS   1000
+#define MAX_IF_TYPE_2_RESETS           6
 
 #define LPFC_CTL_PORT_CTL_OFFSET       0x408
 #define lpfc_sliport_ctrl_end_SHIFT    30
index 90b8b0515e23d7c0ee0f309d640c5fb6e94e8988..cb465b253910b9972eb48639d4527162a7a0b0b9 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <linux/firmware.h>
 #include <linux/miscdevice.h>
+#include <linux/percpu.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
@@ -58,6 +59,9 @@ char *_dump_buf_dif;
 unsigned long _dump_buf_dif_order;
 spinlock_t _dump_buf_lock;
 
+/* Used when mapping IRQ vectors in a driver centric manner */
+uint16_t lpfc_used_cpu[LPFC_MAX_CPU];
+
 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
 static int lpfc_post_rcv_buf(struct lpfc_hba *);
 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
@@ -541,13 +545,16 @@ lpfc_config_port_post(struct lpfc_hba *phba)
 
        /* Set up ring-0 (ELS) timer */
        timeout = phba->fc_ratov * 2;
-       mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
+       mod_timer(&vport->els_tmofunc,
+                 jiffies + msecs_to_jiffies(1000 * timeout));
        /* Set up heart beat (HB) timer */
-       mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+       mod_timer(&phba->hb_tmofunc,
+                 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
        phba->hb_outstanding = 0;
        phba->last_completion_time = jiffies;
        /* Set up error attention (ERATT) polling timer */
-       mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
+       mod_timer(&phba->eratt_poll,
+                 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
 
        if (phba->hba_flag & LINK_DISABLED) {
                lpfc_printf_log(phba,
@@ -908,9 +915,9 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
                psb->pCmd = NULL;
                psb->status = IOSTAT_SUCCESS;
        }
-       spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
-       list_splice(&aborts, &phba->lpfc_scsi_buf_list);
-       spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+       spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
+       list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
+       spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
        return 0;
 }
 
@@ -1021,7 +1028,8 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
                !(phba->link_state == LPFC_HBA_ERROR) &&
                !(phba->pport->load_flag & FC_UNLOADING))
                mod_timer(&phba->hb_tmofunc,
-                       jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+                         jiffies +
+                         msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
        return;
 }
 
@@ -1064,15 +1072,18 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
 
        spin_lock_irq(&phba->pport->work_port_lock);
 
-       if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
-               jiffies)) {
+       if (time_after(phba->last_completion_time +
+                       msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
+                       jiffies)) {
                spin_unlock_irq(&phba->pport->work_port_lock);
                if (!phba->hb_outstanding)
                        mod_timer(&phba->hb_tmofunc,
-                               jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+                               jiffies +
+                               msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
                else
                        mod_timer(&phba->hb_tmofunc,
-                               jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+                               jiffies +
+                               msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
                return;
        }
        spin_unlock_irq(&phba->pport->work_port_lock);
@@ -1104,7 +1115,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
                                if (!pmboxq) {
                                        mod_timer(&phba->hb_tmofunc,
                                                 jiffies +
-                                                HZ * LPFC_HB_MBOX_INTERVAL);
+                                                msecs_to_jiffies(1000 *
+                                                LPFC_HB_MBOX_INTERVAL));
                                        return;
                                }
 
@@ -1120,7 +1132,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
                                                        phba->mbox_mem_pool);
                                        mod_timer(&phba->hb_tmofunc,
                                                jiffies +
-                                               HZ * LPFC_HB_MBOX_INTERVAL);
+                                               msecs_to_jiffies(1000 *
+                                               LPFC_HB_MBOX_INTERVAL));
                                        return;
                                }
                                phba->skipped_hb = 0;
@@ -1136,7 +1149,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
                                phba->skipped_hb = jiffies;
 
                        mod_timer(&phba->hb_tmofunc,
-                                 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+                                jiffies +
+                                msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
                        return;
                } else {
                        /*
@@ -1150,7 +1164,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
                                        jiffies_to_msecs(jiffies
                                                 - phba->last_completion_time));
                        mod_timer(&phba->hb_tmofunc,
-                                 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+                               jiffies +
+                               msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
                }
        }
 }
@@ -1191,7 +1206,7 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
  * This routine is called to bring a SLI4 HBA offline when HBA hardware error
  * other than Port Error 6 has been detected.
  **/
-static void
+void
 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
 {
        lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
@@ -2633,6 +2648,7 @@ lpfc_online(struct lpfc_hba *phba)
        struct lpfc_vport *vport;
        struct lpfc_vport **vports;
        int i;
+       bool vpis_cleared = false;
 
        if (!phba)
                return 0;
@@ -2656,6 +2672,10 @@ lpfc_online(struct lpfc_hba *phba)
                        lpfc_unblock_mgmt_io(phba);
                        return 1;
                }
+               spin_lock_irq(&phba->hbalock);
+               if (!phba->sli4_hba.max_cfg_param.vpi_used)
+                       vpis_cleared = true;
+               spin_unlock_irq(&phba->hbalock);
        } else {
                if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
                        lpfc_unblock_mgmt_io(phba);
@@ -2672,8 +2692,13 @@ lpfc_online(struct lpfc_hba *phba)
                        vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
                        if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
                                vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
-                       if (phba->sli_rev == LPFC_SLI_REV4)
+                       if (phba->sli_rev == LPFC_SLI_REV4) {
                                vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+                               if ((vpis_cleared) &&
+                                   (vports[i]->port_type !=
+                                       LPFC_PHYSICAL_PORT))
+                                       vports[i]->vpi = 0;
+                       }
                        spin_unlock_irq(shost->host_lock);
                }
                lpfc_destroy_vport_work_array(phba, vports);
@@ -2833,16 +2858,30 @@ lpfc_scsi_free(struct lpfc_hba *phba)
        struct lpfc_iocbq *io, *io_next;
 
        spin_lock_irq(&phba->hbalock);
+
        /* Release all the lpfc_scsi_bufs maintained by this host. */
-       spin_lock(&phba->scsi_buf_list_lock);
-       list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
+
+       spin_lock(&phba->scsi_buf_list_put_lock);
+       list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
+                                list) {
                list_del(&sb->list);
                pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
                              sb->dma_handle);
                kfree(sb);
                phba->total_scsi_bufs--;
        }
-       spin_unlock(&phba->scsi_buf_list_lock);
+       spin_unlock(&phba->scsi_buf_list_put_lock);
+
+       spin_lock(&phba->scsi_buf_list_get_lock);
+       list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
+                                list) {
+               list_del(&sb->list);
+               pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
+                             sb->dma_handle);
+               kfree(sb);
+               phba->total_scsi_bufs--;
+       }
+       spin_unlock(&phba->scsi_buf_list_get_lock);
 
        /* Release all the lpfc_iocbq entries maintained by this host. */
        list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
@@ -2978,9 +3017,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
                        phba->sli4_hba.scsi_xri_cnt,
                        phba->sli4_hba.scsi_xri_max);
 
-       spin_lock_irq(&phba->scsi_buf_list_lock);
-       list_splice_init(&phba->lpfc_scsi_buf_list, &scsi_sgl_list);
-       spin_unlock_irq(&phba->scsi_buf_list_lock);
+       spin_lock_irq(&phba->scsi_buf_list_get_lock);
+       spin_lock_irq(&phba->scsi_buf_list_put_lock);
+       list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
+       list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
+       spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+       spin_unlock_irq(&phba->scsi_buf_list_get_lock);
 
        if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
                /* max scsi xri shrinked below the allocated scsi buffers */
@@ -2994,9 +3036,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
                                      psb->dma_handle);
                        kfree(psb);
                }
-               spin_lock_irq(&phba->scsi_buf_list_lock);
+               spin_lock_irq(&phba->scsi_buf_list_get_lock);
                phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
-               spin_unlock_irq(&phba->scsi_buf_list_lock);
+               spin_unlock_irq(&phba->scsi_buf_list_get_lock);
        }
 
        /* update xris associated to remaining allocated scsi buffers */
@@ -3014,9 +3056,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
                psb->cur_iocbq.sli4_lxritag = lxri;
                psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
        }
-       spin_lock_irq(&phba->scsi_buf_list_lock);
-       list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list);
-       spin_unlock_irq(&phba->scsi_buf_list_lock);
+       spin_lock_irq(&phba->scsi_buf_list_get_lock);
+       spin_lock_irq(&phba->scsi_buf_list_put_lock);
+       list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
+       INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
+       spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+       spin_unlock_irq(&phba->scsi_buf_list_get_lock);
 
        return 0;
 
@@ -3197,14 +3242,15 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
                stat = 1;
                goto finished;
        }
-       if (time >= 30 * HZ) {
+       if (time >= msecs_to_jiffies(30 * 1000)) {
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
                                "0461 Scanning longer than 30 "
                                "seconds.  Continuing initialization\n");
                stat = 1;
                goto finished;
        }
-       if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
+       if (time >= msecs_to_jiffies(15 * 1000) &&
+           phba->link_state <= LPFC_LINK_DOWN) {
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
                                "0465 Link down longer than 15 "
                                "seconds.  Continuing initialization\n");
@@ -3216,7 +3262,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
                goto finished;
        if (vport->num_disc_nodes || vport->fc_prli_sent)
                goto finished;
-       if (vport->fc_map_cnt == 0 && time < 2 * HZ)
+       if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
                goto finished;
        if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
                goto finished;
@@ -4215,7 +4261,8 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
                         * If there are other active VLinks present,
                         * re-instantiate the Vlink using FDISC.
                         */
-                       mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+                       mod_timer(&ndlp->nlp_delayfunc,
+                                 jiffies + msecs_to_jiffies(1000));
                        shost = lpfc_shost_from_vport(vport);
                        spin_lock_irq(shost->host_lock);
                        ndlp->nlp_flag |= NLP_DELAY_TMO;
@@ -4707,23 +4754,52 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
                return -ENOMEM;
 
        /*
-        * Since the sg_tablesize is module parameter, the sg_dma_buf_size
+        * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
         * used to create the sg_dma_buf_pool must be dynamically calculated.
-        * 2 segments are added since the IOCB needs a command and response bde.
         */
-       phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
-               sizeof(struct fcp_rsp) +
-                       ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
 
+       /* Initialize the host templates the configured values. */
+       lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+       lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+
+       /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
        if (phba->cfg_enable_bg) {
-               phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
-               phba->cfg_sg_dma_buf_size +=
-                       phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
+               /*
+                * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
+                * the FCP rsp, and a BDE for each. Sice we have no control
+                * over how many protection data segments the SCSI Layer
+                * will hand us (ie: there could be one for every block
+                * in the IO), we just allocate enough BDEs to accomidate
+                * our max amount and we need to limit lpfc_sg_seg_cnt to
+                * minimize the risk of running out.
+                */
+               phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+                       sizeof(struct fcp_rsp) +
+                       (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
+
+               if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
+                       phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
+
+               /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
+               phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
+       } else {
+               /*
+                * The scsi_buf for a regular I/O will hold the FCP cmnd,
+                * the FCP rsp, a BDE for each, and a BDE for up to
+                * cfg_sg_seg_cnt data segments.
+                */
+               phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+                       sizeof(struct fcp_rsp) +
+                       ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
+
+               /* Total BDEs in BPL for scsi_sg_list */
+               phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
        }
 
-       /* Also reinitialize the host templates with new values. */
-       lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
-       lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+       lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
+                       "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
+                       phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
+                       phba->cfg_total_seg_cnt);
 
        phba->max_vpi = LPFC_MAX_VPI;
        /* This will be set to correct value after config_port mbox */
@@ -4789,13 +4865,13 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
 static int
 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 {
+       struct lpfc_vector_map_info *cpup;
        struct lpfc_sli *psli;
        LPFC_MBOXQ_t *mboxq;
-       int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
+       int rc, i, hbq_count, max_buf_size;
        uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
        struct lpfc_mqe *mqe;
-       int longs, sli_family;
-       int sges_per_segment;
+       int longs;
 
        /* Before proceed, wait for POST done and device ready */
        rc = lpfc_sli4_post_status_check(phba);
@@ -4863,11 +4939,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
        phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
 
-       /* With BlockGuard we can have multiple SGEs per Data Segemnt */
-       sges_per_segment = 1;
-       if (phba->cfg_enable_bg)
-               sges_per_segment = 2;
-
        /*
         * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
         * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
@@ -4878,43 +4949,71 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                        sizeof(struct lpfc_sli_ring), GFP_KERNEL);
        if (!phba->sli.ring)
                return -ENOMEM;
+
        /*
-        * Since the sg_tablesize is module parameter, the sg_dma_buf_size
+        * It doesn't matter what family our adapter is in, we are
+        * limited to 2 Pages, 512 SGEs, for our SGL.
+        * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
+        */
+       max_buf_size = (2 * SLI4_PAGE_SIZE);
+       if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
+               phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
+
+       /*
+        * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
         * used to create the sg_dma_buf_pool must be dynamically calculated.
-        * 2 segments are added since the IOCB needs a command and response bde.
-        * To insure that the scsi sgl does not cross a 4k page boundary only
-        * sgl sizes of must be a power of 2.
         */
-       buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
-                   (((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) *
-                   sizeof(struct sli4_sge)));
-
-       sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
-       max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
-       switch (sli_family) {
-       case LPFC_SLI_INTF_FAMILY_BE2:
-       case LPFC_SLI_INTF_FAMILY_BE3:
-               /* There is a single hint for BE - 2 pages per BPL. */
-               if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) ==
-                   LPFC_SLI_INTF_SLI_HINT1_1)
-                       max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
-               break;
-       case LPFC_SLI_INTF_FAMILY_LNCR_A0:
-       case LPFC_SLI_INTF_FAMILY_LNCR_B0:
-       default:
-               break;
+
+       if (phba->cfg_enable_bg) {
+               /*
+                * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
+                * the FCP rsp, and a SGE for each. Sice we have no control
+                * over how many protection data segments the SCSI Layer
+                * will hand us (ie: there could be one for every block
+                * in the IO), we just allocate enough SGEs to accomidate
+                * our max amount and we need to limit lpfc_sg_seg_cnt to
+                * minimize the risk of running out.
+                */
+               phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+                       sizeof(struct fcp_rsp) + max_buf_size;
+
+               /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
+               phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
+
+               if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
+                       phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
+       } else {
+               /*
+                * The scsi_buf for a regular I/O will hold the FCP cmnd,
+                * the FCP rsp, a SGE for each, and a SGE for up to
+                * cfg_sg_seg_cnt data segments.
+                */
+               phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+                       sizeof(struct fcp_rsp) +
+                       ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
+
+               /* Total SGEs for scsi_sg_list */
+               phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
+               /*
+                * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need
+                * to post 1 page for the SGL.
+                */
        }
 
-       for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
-            dma_buf_size < max_buf_size && buf_size > dma_buf_size;
-            dma_buf_size = dma_buf_size << 1)
-               ;
-       if (dma_buf_size == max_buf_size)
-               phba->cfg_sg_seg_cnt = (dma_buf_size -
-                       sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
-                       (2 * sizeof(struct sli4_sge))) /
-                               sizeof(struct sli4_sge);
-       phba->cfg_sg_dma_buf_size = dma_buf_size;
+       /* Initialize the host templates with the updated values. */
+       lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+       lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+
+       if (phba->cfg_sg_dma_buf_size  <= LPFC_MIN_SG_SLI4_BUF_SZ)
+               phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
+       else
+               phba->cfg_sg_dma_buf_size =
+                       SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
+                       "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
+                       phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
+                       phba->cfg_total_seg_cnt);
 
        /* Initialize buffer queue management fields */
        hbq_count = lpfc_sli_hbq_count();
@@ -5104,6 +5203,26 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                goto out_free_fcp_eq_hdl;
        }
 
+       phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) *
+                                        phba->sli4_hba.num_present_cpu),
+                                        GFP_KERNEL);
+       if (!phba->sli4_hba.cpu_map) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3327 Failed allocate memory for msi-x "
+                               "interrupt vector mapping\n");
+               rc = -ENOMEM;
+               goto out_free_msix;
+       }
+       /* Initialize io channels for round robin */
+       cpup = phba->sli4_hba.cpu_map;
+       rc = 0;
+       for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+               cpup->channel_id = rc;
+               rc++;
+               if (rc >= phba->cfg_fcp_io_channel)
+                       rc = 0;
+       }
+
        /*
         * Enable sr-iov virtual functions if supported and configured
         * through the module parameter.
@@ -5123,6 +5242,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 
        return 0;
 
+out_free_msix:
+       kfree(phba->sli4_hba.msix_entries);
 out_free_fcp_eq_hdl:
        kfree(phba->sli4_hba.fcp_eq_hdl);
 out_free_fcf_rr_bmask:
@@ -5152,6 +5273,11 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
 {
        struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
 
+       /* Free memory allocated for msi-x interrupt vector to CPU mapping */
+       kfree(phba->sli4_hba.cpu_map);
+       phba->sli4_hba.num_present_cpu = 0;
+       phba->sli4_hba.num_online_cpu = 0;
+
        /* Free memory allocated for msi-x interrupt vector entries */
        kfree(phba->sli4_hba.msix_entries);
 
@@ -5260,8 +5386,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
        init_waitqueue_head(&phba->work_waitq);
 
        /* Initialize the scsi buffer list used by driver for scsi IO */
-       spin_lock_init(&phba->scsi_buf_list_lock);
-       INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
+       spin_lock_init(&phba->scsi_buf_list_get_lock);
+       INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
+       spin_lock_init(&phba->scsi_buf_list_put_lock);
+       INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
 
        /* Initialize the fabric iocb list */
        INIT_LIST_HEAD(&phba->fabric_iocb_list);
@@ -6696,6 +6824,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
        int cfg_fcp_io_channel;
        uint32_t cpu;
        uint32_t i = 0;
+       uint32_t j = 0;
 
 
        /*
@@ -6706,15 +6835,21 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
        /* Sanity check on HBA EQ parameters */
        cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
 
-       /* It doesn't make sense to have more io channels then CPUs */
-       for_each_online_cpu(cpu) {
-               i++;
+       /* It doesn't make sense to have more io channels then online CPUs */
+       for_each_present_cpu(cpu) {
+               if (cpu_online(cpu))
+                       i++;
+               j++;
        }
+       phba->sli4_hba.num_online_cpu = i;
+       phba->sli4_hba.num_present_cpu = j;
+
        if (i < cfg_fcp_io_channel) {
                lpfc_printf_log(phba,
                                KERN_ERR, LOG_INIT,
                                "3188 Reducing IO channels to match number of "
-                               "CPUs: from %d to %d\n", cfg_fcp_io_channel, i);
+                               "online CPUs: from %d to %d\n",
+                               cfg_fcp_io_channel, i);
                cfg_fcp_io_channel = i;
        }
 
@@ -7743,8 +7878,13 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
 
 out:
        /* Catch the not-ready port failure after a port reset. */
-       if (num_resets >= MAX_IF_TYPE_2_RESETS)
+       if (num_resets >= MAX_IF_TYPE_2_RESETS) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3317 HBA not functional: IP Reset Failed "
+                               "after (%d) retries, try: "
+                               "echo fw_reset > board_mode\n", num_resets);
                rc = -ENODEV;
+       }
 
        return rc;
 }
@@ -8208,6 +8348,269 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba)
        return;
 }
 
+/**
+ * lpfc_find_next_cpu - Find next available CPU that matches the phys_id
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * Find next available CPU to use for IRQ to CPU affinity.
+ */
+static int
+lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id)
+{
+       struct lpfc_vector_map_info *cpup;
+       int cpu;
+
+       cpup = phba->sli4_hba.cpu_map;
+       for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+               /* CPU must be online */
+               if (cpu_online(cpu)) {
+                       if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
+                           (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) &&
+                           (cpup->phys_id == phys_id)) {
+                               return cpu;
+                       }
+               }
+               cpup++;
+       }
+
+       /*
+        * If we get here, we have used ALL CPUs for the specific
+        * phys_id. Now we need to clear out lpfc_used_cpu and start
+        * reusing CPUs.
+        */
+
+       for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+               if (lpfc_used_cpu[cpu] == phys_id)
+                       lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
+       }
+
+       cpup = phba->sli4_hba.cpu_map;
+       for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+               /* CPU must be online */
+               if (cpu_online(cpu)) {
+                       if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
+                           (cpup->phys_id == phys_id)) {
+                               return cpu;
+                       }
+               }
+               cpup++;
+       }
+       return LPFC_VECTOR_MAP_EMPTY;
+}
+
+/**
+ * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors
+ * @phba:      pointer to lpfc hba data structure.
+ * @vectors:   number of HBA vectors
+ *
+ * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector
+ * affinization across multple physical CPUs (numa nodes).
+ * In addition, this routine will assign an IO channel for each CPU
+ * to use when issuing I/Os.
+ */
+static int
+lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
+{
+       int i, idx, saved_chann, used_chann, cpu, phys_id;
+       int max_phys_id, num_io_channel, first_cpu;
+       struct lpfc_vector_map_info *cpup;
+#ifdef CONFIG_X86
+       struct cpuinfo_x86 *cpuinfo;
+#endif
+       struct cpumask *mask;
+       uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1];
+
+       /* If there is no mapping, just return */
+       if (!phba->cfg_fcp_cpu_map)
+               return 1;
+
+       /* Init cpu_map array */
+       memset(phba->sli4_hba.cpu_map, 0xff,
+              (sizeof(struct lpfc_vector_map_info) *
+               phba->sli4_hba.num_present_cpu));
+
+       max_phys_id = 0;
+       phys_id = 0;
+       num_io_channel = 0;
+       first_cpu = LPFC_VECTOR_MAP_EMPTY;
+
+       /* Update CPU map with physical id and core id of each CPU */
+       cpup = phba->sli4_hba.cpu_map;
+       for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+#ifdef CONFIG_X86
+               cpuinfo = &cpu_data(cpu);
+               cpup->phys_id = cpuinfo->phys_proc_id;
+               cpup->core_id = cpuinfo->cpu_core_id;
+#else
+               /* No distinction between CPUs for other platforms */
+               cpup->phys_id = 0;
+               cpup->core_id = 0;
+#endif
+
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "3328 CPU physid %d coreid %d\n",
+                               cpup->phys_id, cpup->core_id);
+
+               if (cpup->phys_id > max_phys_id)
+                       max_phys_id = cpup->phys_id;
+               cpup++;
+       }
+
+       /* Now associate the HBA vectors with specific CPUs */
+       for (idx = 0; idx < vectors; idx++) {
+               cpup = phba->sli4_hba.cpu_map;
+               cpu = lpfc_find_next_cpu(phba, phys_id);
+               if (cpu == LPFC_VECTOR_MAP_EMPTY) {
+
+                       /* Try for all phys_id's */
+                       for (i = 1; i < max_phys_id; i++) {
+                               phys_id++;
+                               if (phys_id > max_phys_id)
+                                       phys_id = 0;
+                               cpu = lpfc_find_next_cpu(phba, phys_id);
+                               if (cpu == LPFC_VECTOR_MAP_EMPTY)
+                                       continue;
+                               goto found;
+                       }
+
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "3329 Cannot set affinity:"
+                                       "Error mapping vector %d (%d)\n",
+                                       idx, vectors);
+                       return 0;
+               }
+found:
+               cpup += cpu;
+               if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP)
+                       lpfc_used_cpu[cpu] = phys_id;
+
+               /* Associate vector with selected CPU */
+               cpup->irq = phba->sli4_hba.msix_entries[idx].vector;
+
+               /* Associate IO channel with selected CPU */
+               cpup->channel_id = idx;
+               num_io_channel++;
+
+               if (first_cpu == LPFC_VECTOR_MAP_EMPTY)
+                       first_cpu = cpu;
+
+               /* Now affinitize to the selected CPU */
+               mask = &cpup->maskbits;
+               cpumask_clear(mask);
+               cpumask_set_cpu(cpu, mask);
+               i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx].
+                                         vector, mask);
+
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "3330 Set Affinity: CPU %d channel %d "
+                               "irq %d (%x)\n",
+                               cpu, cpup->channel_id,
+                               phba->sli4_hba.msix_entries[idx].vector, i);
+
+               /* Spread vector mapping across multple physical CPU nodes */
+               phys_id++;
+               if (phys_id > max_phys_id)
+                       phys_id = 0;
+       }
+
+       /*
+        * Finally fill in the IO channel for any remaining CPUs.
+        * At this point, all IO channels have been assigned to a specific
+        * MSIx vector, mapped to a specific CPU.
+        * Base the remaining IO channel assigned, to IO channels already
+        * assigned to other CPUs on the same phys_id.
+        */
+       for (i = 0; i <= max_phys_id; i++) {
+               /*
+                * If there are no io channels already mapped to
+                * this phys_id, just round robin thru the io_channels.
+                * Setup chann[] for round robin.
+                */
+               for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
+                       chann[idx] = idx;
+
+               saved_chann = 0;
+               used_chann = 0;
+
+               /*
+                * First build a list of IO channels already assigned
+                * to this phys_id before reassigning the same IO
+                * channels to the remaining CPUs.
+                */
+               cpup = phba->sli4_hba.cpu_map;
+               cpu = first_cpu;
+               cpup += cpu;
+               for (idx = 0; idx < phba->sli4_hba.num_present_cpu;
+                    idx++) {
+                       if (cpup->phys_id == i) {
+                               /*
+                                * Save any IO channels that are
+                                * already mapped to this phys_id.
+                                */
+                               if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) {
+                                       chann[saved_chann] =
+                                               cpup->channel_id;
+                                       saved_chann++;
+                                       goto out;
+                               }
+
+                               /* See if we are using round-robin */
+                               if (saved_chann == 0)
+                                       saved_chann =
+                                               phba->cfg_fcp_io_channel;
+
+                               /* Associate next IO channel with CPU */
+                               cpup->channel_id = chann[used_chann];
+                               num_io_channel++;
+                               used_chann++;
+                               if (used_chann == saved_chann)
+                                       used_chann = 0;
+
+                               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                                               "3331 Set IO_CHANN "
+                                               "CPU %d channel %d\n",
+                                               idx, cpup->channel_id);
+                       }
+out:
+                       cpu++;
+                       if (cpu >= phba->sli4_hba.num_present_cpu) {
+                               cpup = phba->sli4_hba.cpu_map;
+                               cpu = 0;
+                       } else {
+                               cpup++;
+                       }
+               }
+       }
+
+       if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) {
+               cpup = phba->sli4_hba.cpu_map;
+               for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
+                       if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) {
+                               cpup->channel_id = 0;
+                               num_io_channel++;
+
+                               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                                               "3332 Assign IO_CHANN "
+                                               "CPU %d channel %d\n",
+                                               idx, cpup->channel_id);
+                       }
+                       cpup++;
+               }
+       }
+
+       /* Sanity check */
+       if (num_io_channel != phba->sli4_hba.num_present_cpu)
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3333 Set affinity mismatch:"
+                               "%d chann != %d cpus: %d vactors\n",
+                               num_io_channel, phba->sli4_hba.num_present_cpu,
+                               vectors);
+
+       phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU;
+       return 1;
+}
+
+
 /**
  * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
  * @phba: pointer to lpfc hba data structure.
@@ -8259,9 +8662,7 @@ enable_msix_vectors:
                                phba->sli4_hba.msix_entries[index].vector,
                                phba->sli4_hba.msix_entries[index].entry);
 
-       /*
-        * Assign MSI-X vectors to interrupt handlers
-        */
+       /* Assign MSI-X vectors to interrupt handlers */
        for (index = 0; index < vectors; index++) {
                memset(&phba->sli4_hba.handler_name[index], 0, 16);
                sprintf((char *)&phba->sli4_hba.handler_name[index],
@@ -8289,6 +8690,8 @@ enable_msix_vectors:
                                phba->cfg_fcp_io_channel, vectors);
                phba->cfg_fcp_io_channel = vectors;
        }
+
+       lpfc_sli4_set_affinity(phba, vectors);
        return rc;
 
 cfg_fail_out:
@@ -9213,15 +9616,15 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
        /* Block all SCSI devices' I/Os on the host */
        lpfc_scsi_dev_block(phba);
 
+       /* Flush all driver's outstanding SCSI I/Os as we are to reset */
+       lpfc_sli_flush_fcp_rings(phba);
+
        /* stop all timers */
        lpfc_stop_hba_timers(phba);
 
        /* Disable interrupt and pci device */
        lpfc_sli_disable_intr(phba);
        pci_disable_device(phba->pcidev);
-
-       /* Flush all driver's outstanding SCSI I/Os as we are to reset */
-       lpfc_sli_flush_fcp_rings(phba);
 }
 
 /**
@@ -9966,6 +10369,9 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
        /* Block all SCSI devices' I/Os on the host */
        lpfc_scsi_dev_block(phba);
 
+       /* Flush all driver's outstanding SCSI I/Os as we are to reset */
+       lpfc_sli_flush_fcp_rings(phba);
+
        /* stop all timers */
        lpfc_stop_hba_timers(phba);
 
@@ -9973,9 +10379,6 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
        lpfc_sli4_disable_intr(phba);
        lpfc_sli4_queue_destroy(phba);
        pci_disable_device(phba->pcidev);
-
-       /* Flush all driver's outstanding SCSI I/Os as we are to reset */
-       lpfc_sli_flush_fcp_rings(phba);
 }
 
 /**
@@ -10535,6 +10938,7 @@ static struct miscdevice lpfc_mgmt_dev = {
 static int __init
 lpfc_init(void)
 {
+       int cpu;
        int error = 0;
 
        printk(LPFC_MODULE_DESC "\n");
@@ -10561,6 +10965,11 @@ lpfc_init(void)
                        return -ENOMEM;
                }
        }
+
+       /* Initialize in case vector mapping is needed */
+       for (cpu = 0; cpu < LPFC_MAX_CPU; cpu++)
+               lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
+
        error = pci_register_driver(&lpfc_driver);
        if (error) {
                fc_release_transport(lpfc_transport_template);
index baf53e6c2bd15bfbbf8c25af5face63f1c9fe006..2a4e5d21eab2ad3c10f45dd7bece5c5a7ae02e06 100644 (file)
@@ -37,6 +37,7 @@
 #define LOG_EVENT      0x00010000      /* CT,TEMP,DUMP, logging */
 #define LOG_FIP                0x00020000      /* FIP events */
 #define LOG_FCP_UNDER  0x00040000      /* FCP underruns errors */
+#define LOG_SCSI_CMD   0x00080000      /* ALL SCSI commands */
 #define LOG_ALL_MSG    0xffffffff      /* LOG all messages */
 
 #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
index a7a9fa468308bda8ddaa97d196fa23ce75133824..41363db7d42628b9af9ec5e1a1ab7f927ab6aff9 100644 (file)
@@ -2149,18 +2149,21 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
 
        /* Only FC supports upd bit */
        if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) &&
-           (vport->fc_flag & FC_VFI_REGISTERED)) {
+           (vport->fc_flag & FC_VFI_REGISTERED) &&
+           (!phba->fc_topology_changed)) {
                bf_set(lpfc_reg_vfi_vp, reg_vfi, 0);
                bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
        }
        lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
                        "3134 Register VFI, mydid:x%x, fcfi:%d, "
-                       " vfi:%d, vpi:%d, fc_pname:%x%x\n",
+                       " vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x"
+                       " port_state:x%x topology chg:%d\n",
                        vport->fc_myDID,
                        phba->fcf.fcfi,
                        phba->sli4_hba.vfi_ids[vport->vfi],
                        phba->vpi_ids[vport->vpi],
-                       reg_vfi->wwn[0], reg_vfi->wwn[1]);
+                       reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag,
+                       vport->port_state, phba->fc_topology_changed);
 }
 
 /**
index cd86069a0ba82a2954238139e8626283fac56d07..812d0cd7c86dcc8c9c3fe86f3ff6b8079ba850ab 100644 (file)
@@ -64,18 +64,26 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
        struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
        int i;
 
-       if (phba->sli_rev == LPFC_SLI_REV4)
+       if (phba->sli_rev == LPFC_SLI_REV4) {
+               /* Calculate alignment */
+               if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
+                       i = phba->cfg_sg_dma_buf_size;
+               else
+                       i = SLI4_PAGE_SIZE;
+
                phba->lpfc_scsi_dma_buf_pool =
                        pci_pool_create("lpfc_scsi_dma_buf_pool",
                                phba->pcidev,
                                phba->cfg_sg_dma_buf_size,
-                               phba->cfg_sg_dma_buf_size,
+                               i,
                                0);
-       else
+       } else {
                phba->lpfc_scsi_dma_buf_pool =
                        pci_pool_create("lpfc_scsi_dma_buf_pool",
                                phba->pcidev, phba->cfg_sg_dma_buf_size,
                                align, 0);
+       }
+
        if (!phba->lpfc_scsi_dma_buf_pool)
                goto fail;
 
index 82f4d3542289f6a8fab4cc4f99a5b5d3c2604cf1..31e9b92f5a9bebc181e0fffec874d95c94dac6e9 100644 (file)
@@ -332,9 +332,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 
        /* PLOGI chkparm OK */
        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
-                        "0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
+                        "0114 PLOGI chkparm OK Data: x%x x%x x%x "
+                        "x%x x%x x%x\n",
                         ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
-                        ndlp->nlp_rpi);
+                        ndlp->nlp_rpi, vport->port_state,
+                        vport->fc_flag);
 
        if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
                ndlp->nlp_fcp_info |= CLASS2;
@@ -574,7 +576,7 @@ out:
        lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
 
        /* 1 sec timeout */
-       mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+       mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
 
        spin_lock_irq(shost->host_lock);
        ndlp->nlp_flag |= NLP_DELAY_TMO;
@@ -631,7 +633,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                         * If there are other active VLinks present,
                         * re-instantiate the Vlink using FDISC.
                         */
-                       mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+                       mod_timer(&ndlp->nlp_delayfunc,
+                                 jiffies + msecs_to_jiffies(1000));
                        spin_lock_irq(shost->host_lock);
                        ndlp->nlp_flag |= NLP_DELAY_TMO;
                        spin_unlock_irq(shost->host_lock);
@@ -648,7 +651,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
                (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
                /* Only try to re-login if this is NOT a Fabric Node */
-               mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+               mod_timer(&ndlp->nlp_delayfunc,
+                         jiffies + msecs_to_jiffies(1000 * 1));
                spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag |= NLP_DELAY_TMO;
                spin_unlock_irq(shost->host_lock);
@@ -969,7 +973,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        }
 
        /* Put ndlp in npr state set plogi timer for 1 sec */
-       mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+       mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
        spin_lock_irq(shost->host_lock);
        ndlp->nlp_flag |= NLP_DELAY_TMO;
        spin_unlock_irq(shost->host_lock);
@@ -1303,7 +1307,8 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
        if ((irsp->ulpStatus) ||
            (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
                /* 1 sec timeout */
-               mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+               mod_timer(&ndlp->nlp_delayfunc,
+                         jiffies + msecs_to_jiffies(1000));
                spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag |= NLP_DELAY_TMO;
                spin_unlock_irq(shost->host_lock);
@@ -1509,7 +1514,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
                }
 
                /* Put ndlp in npr state set plogi timer for 1 sec */
-               mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+               mod_timer(&ndlp->nlp_delayfunc,
+                         jiffies + msecs_to_jiffies(1000 * 1));
                spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag |= NLP_DELAY_TMO;
                spin_unlock_irq(shost->host_lock);
@@ -2145,7 +2151,8 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
 
        if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
-               mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+               mod_timer(&ndlp->nlp_delayfunc,
+                         jiffies + msecs_to_jiffies(1000 * 1));
                spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag |= NLP_DELAY_TMO;
                ndlp->nlp_flag &= ~NLP_NPR_ADISC;
index 74b8710e1e90499eadcbfc72580b12a4c223c18e..8523b278ec9daf1c2d37ffedf08335b32701cb53 100644 (file)
@@ -24,6 +24,8 @@
 #include <linux/export.h>
 #include <linux/delay.h>
 #include <asm/unaligned.h>
+#include <linux/crc-t10dif.h>
+#include <net/checksum.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
@@ -48,7 +50,7 @@
 #define LPFC_RESET_WAIT  2
 #define LPFC_ABORT_WAIT  2
 
-int _dump_buf_done;
+int _dump_buf_done = 1;
 
 static char *dif_op_str[] = {
        "PROT_NORMAL",
@@ -66,6 +68,10 @@ struct scsi_dif_tuple {
        __be32 ref_tag;         /* Target LBA or indirect LBA */
 };
 
+#if !defined(SCSI_PROT_GUARD_CHECK) || !defined(SCSI_PROT_REF_CHECK)
+#define scsi_prot_flagged(sc, flg)     sc
+#endif
+
 static void
 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
 static void
@@ -534,7 +540,16 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
        dma_addr_t pdma_phys_fcp_rsp;
        dma_addr_t pdma_phys_bpl;
        uint16_t iotag;
-       int bcnt;
+       int bcnt, bpl_size;
+
+       bpl_size = phba->cfg_sg_dma_buf_size -
+               (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+                        "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
+                        num_to_alloc, phba->cfg_sg_dma_buf_size,
+                        (int)sizeof(struct fcp_cmnd),
+                        (int)sizeof(struct fcp_rsp), bpl_size);
 
        for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
                psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
@@ -759,7 +774,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
                             struct list_head *post_sblist, int sb_count)
 {
        struct lpfc_scsi_buf *psb, *psb_next;
-       int status;
+       int status, sgl_size;
        int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
        dma_addr_t pdma_phys_bpl1;
        int last_xritag = NO_XRI;
@@ -771,6 +786,9 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
        if (sb_count <= 0)
                return -EINVAL;
 
+       sgl_size = phba->cfg_sg_dma_buf_size -
+               (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
        list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
                list_del_init(&psb->list);
                block_cnt++;
@@ -803,7 +821,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
                                post_cnt = block_cnt;
                        } else if (block_cnt == 1) {
                                /* last single sgl with non-contiguous xri */
-                               if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+                               if (sgl_size > SGL_PAGE_SIZE)
                                        pdma_phys_bpl1 = psb->dma_phys_bpl +
                                                                SGL_PAGE_SIZE;
                                else
@@ -885,9 +903,12 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
        int num_posted, rc = 0;
 
        /* get all SCSI buffers need to repost to a local list */
-       spin_lock_irq(&phba->scsi_buf_list_lock);
-       list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist);
-       spin_unlock_irq(&phba->scsi_buf_list_lock);
+       spin_lock_irq(&phba->scsi_buf_list_get_lock);
+       spin_lock_irq(&phba->scsi_buf_list_put_lock);
+       list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
+       list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
+       spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+       spin_unlock_irq(&phba->scsi_buf_list_get_lock);
 
        /* post the list of scsi buffer sgls to port if available */
        if (!list_empty(&post_sblist)) {
@@ -923,13 +944,22 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
        IOCB_t *iocb;
        dma_addr_t pdma_phys_fcp_cmd;
        dma_addr_t pdma_phys_fcp_rsp;
-       dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
+       dma_addr_t pdma_phys_bpl;
        uint16_t iotag, lxri = 0;
-       int bcnt, num_posted;
+       int bcnt, num_posted, sgl_size;
        LIST_HEAD(prep_sblist);
        LIST_HEAD(post_sblist);
        LIST_HEAD(scsi_sblist);
 
+       sgl_size = phba->cfg_sg_dma_buf_size -
+               (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+                        "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
+                        num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size,
+                        (int)sizeof(struct fcp_cmnd),
+                        (int)sizeof(struct fcp_rsp));
+
        for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
                psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
                if (!psb)
@@ -948,6 +978,15 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
                }
                memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
 
+               /* Page alignment is CRITICAL, double check to be sure */
+               if (((unsigned long)(psb->data) &
+                   (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) {
+                       pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
+                                     psb->data, psb->dma_handle);
+                       kfree(psb);
+                       break;
+               }
+
                /* Allocate iotag for psb->cur_iocbq. */
                iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
                if (iotag == 0) {
@@ -968,17 +1007,14 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
                psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
                psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
                psb->fcp_bpl = psb->data;
-               psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
-                       - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+               psb->fcp_cmnd = (psb->data + sgl_size);
                psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
                                        sizeof(struct fcp_cmnd));
 
                /* Initialize local short-hand pointers. */
                sgl = (struct sli4_sge *)psb->fcp_bpl;
                pdma_phys_bpl = psb->dma_handle;
-               pdma_phys_fcp_cmd =
-                       (psb->dma_handle + phba->cfg_sg_dma_buf_size)
-                        - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+               pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size);
                pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
 
                /*
@@ -1020,17 +1056,13 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
                iocb->ulpLe = 1;
                iocb->ulpClass = CLASS3;
                psb->cur_iocbq.context1 = psb;
-               if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
-                       pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
-               else
-                       pdma_phys_bpl1 = 0;
                psb->dma_phys_bpl = pdma_phys_bpl;
 
                /* add the scsi buffer to a post list */
                list_add_tail(&psb->list, &post_sblist);
-               spin_lock_irq(&phba->scsi_buf_list_lock);
+               spin_lock_irq(&phba->scsi_buf_list_get_lock);
                phba->sli4_hba.scsi_xri_cnt++;
-               spin_unlock_irq(&phba->scsi_buf_list_lock);
+               spin_unlock_irq(&phba->scsi_buf_list_get_lock);
        }
        lpfc_printf_log(phba, KERN_INFO, LOG_BG,
                        "3021 Allocate %d out of %d requested new SCSI "
@@ -1079,17 +1111,23 @@ static struct lpfc_scsi_buf*
 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 {
        struct  lpfc_scsi_buf * lpfc_cmd = NULL;
-       struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
-       unsigned long iflag = 0;
-
-       spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
-       list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
-       if (lpfc_cmd) {
-               lpfc_cmd->seg_cnt = 0;
-               lpfc_cmd->nonsg_phys = 0;
-               lpfc_cmd->prot_seg_cnt = 0;
+       struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
+       unsigned long gflag = 0;
+       unsigned long pflag = 0;
+
+       spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
+       list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
+                        list);
+       if (!lpfc_cmd) {
+               spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
+               list_splice(&phba->lpfc_scsi_buf_list_put,
+                           &phba->lpfc_scsi_buf_list_get);
+               INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
+               list_remove_head(scsi_buf_list_get, lpfc_cmd,
+                                struct lpfc_scsi_buf, list);
+               spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
        }
-       spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+       spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
        return  lpfc_cmd;
 }
 /**
@@ -1107,28 +1145,39 @@ static struct lpfc_scsi_buf*
 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 {
        struct lpfc_scsi_buf *lpfc_cmd ;
-       unsigned long iflag = 0;
+       unsigned long gflag = 0;
+       unsigned long pflag = 0;
        int found = 0;
 
-       spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
-       list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list,
-                                                       list) {
+       spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
+       list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get, list) {
                if (lpfc_test_rrq_active(phba, ndlp,
                                         lpfc_cmd->cur_iocbq.sli4_lxritag))
                        continue;
                list_del(&lpfc_cmd->list);
                found = 1;
-               lpfc_cmd->seg_cnt = 0;
-               lpfc_cmd->nonsg_phys = 0;
-               lpfc_cmd->prot_seg_cnt = 0;
                break;
        }
-       spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
-                                                iflag);
+       if (!found) {
+               spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
+               list_splice(&phba->lpfc_scsi_buf_list_put,
+                           &phba->lpfc_scsi_buf_list_get);
+               INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
+               spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
+               list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get,
+                                   list) {
+                       if (lpfc_test_rrq_active(
+                               phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
+                               continue;
+                       list_del(&lpfc_cmd->list);
+                       found = 1;
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
        if (!found)
                return NULL;
-       else
-               return  lpfc_cmd;
+       return  lpfc_cmd;
 }
 /**
  * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
@@ -1160,10 +1209,15 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
 {
        unsigned long iflag = 0;
 
-       spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
+       psb->seg_cnt = 0;
+       psb->nonsg_phys = 0;
+       psb->prot_seg_cnt = 0;
+
+       spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
        psb->pCmd = NULL;
-       list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
-       spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+       psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
+       list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
+       spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
 }
 
 /**
@@ -1181,6 +1235,10 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
 {
        unsigned long iflag = 0;
 
+       psb->seg_cnt = 0;
+       psb->nonsg_phys = 0;
+       psb->prot_seg_cnt = 0;
+
        if (psb->exch_busy) {
                spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
                                        iflag);
@@ -1190,11 +1248,11 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
                spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
                                        iflag);
        } else {
-
-               spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
                psb->pCmd = NULL;
-               list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
-               spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+               psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
+               spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
+               list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
+               spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
        }
 }
 
@@ -1268,6 +1326,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
                               "dma_map_sg.  Config %d, seg_cnt %d\n",
                               __func__, phba->cfg_sg_seg_cnt,
                               lpfc_cmd->seg_cnt);
+                       lpfc_cmd->seg_cnt = 0;
                        scsi_dma_unmap(scsi_cmnd);
                        return 1;
                }
@@ -2013,9 +2072,21 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
        bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
        bf_set(pde6_optx, pde6, txop);
        bf_set(pde6_oprx, pde6, rxop);
+
+       /*
+        * We only need to check the data on READs, for WRITEs
+        * protection data is automatically generated, not checked.
+        */
        if (datadir == DMA_FROM_DEVICE) {
-               bf_set(pde6_ce, pde6, checking);
-               bf_set(pde6_re, pde6, checking);
+               if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
+                       bf_set(pde6_ce, pde6, checking);
+               else
+                       bf_set(pde6_ce, pde6, 0);
+
+               if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
+                       bf_set(pde6_re, pde6, checking);
+               else
+                       bf_set(pde6_re, pde6, 0);
        }
        bf_set(pde6_ai, pde6, 1);
        bf_set(pde6_ae, pde6, 0);
@@ -2145,6 +2216,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 
        split_offset = 0;
        do {
+               /* Check to see if we ran out of space */
+               if (num_bde >= (phba->cfg_total_seg_cnt - 2))
+                       return num_bde + 3;
+
                /* setup PDE5 with what we have */
                pde5 = (struct lpfc_pde5 *) bpl;
                memset(pde5, 0, sizeof(struct lpfc_pde5));
@@ -2164,8 +2239,17 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
                bf_set(pde6_optx, pde6, txop);
                bf_set(pde6_oprx, pde6, rxop);
-               bf_set(pde6_ce, pde6, checking);
-               bf_set(pde6_re, pde6, checking);
+
+               if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
+                       bf_set(pde6_ce, pde6, checking);
+               else
+                       bf_set(pde6_ce, pde6, 0);
+
+               if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
+                       bf_set(pde6_re, pde6, checking);
+               else
+                       bf_set(pde6_re, pde6, 0);
+
                bf_set(pde6_ai, pde6, 1);
                bf_set(pde6_ae, pde6, 0);
                bf_set(pde6_apptagval, pde6, 0);
@@ -2213,6 +2297,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                pgdone = 0;
                subtotal = 0; /* total bytes processed for current prot grp */
                while (!pgdone) {
+                       /* Check to see if we ran out of space */
+                       if (num_bde >= phba->cfg_total_seg_cnt)
+                               return num_bde + 1;
+
                        if (!sgde) {
                                lpfc_printf_log(phba, KERN_ERR, LOG_BG,
                                        "9065 BLKGRD:%s Invalid data segment\n",
@@ -2324,7 +2412,6 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
        struct sli4_sge_diseed *diseed = NULL;
        dma_addr_t physaddr;
        int i = 0, num_sge = 0, status;
-       int datadir = sc->sc_data_direction;
        uint32_t reftag;
        unsigned blksize;
        uint8_t txop, rxop;
@@ -2362,13 +2449,26 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
        diseed->ref_tag = cpu_to_le32(reftag);
        diseed->ref_tag_tran = diseed->ref_tag;
 
+       /*
+        * We only need to check the data on READs, for WRITEs
+        * protection data is automatically generated, not checked.
+        */
+       if (sc->sc_data_direction == DMA_FROM_DEVICE) {
+               if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
+                       bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
+               else
+                       bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
+
+               if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
+                       bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
+               else
+                       bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
+       }
+
        /* setup DISEED with the rest of the info */
        bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
        bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
-       if (datadir == DMA_FROM_DEVICE) {
-               bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
-               bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
-       }
+
        bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
        bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
 
@@ -2497,6 +2597,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 
        split_offset = 0;
        do {
+               /* Check to see if we ran out of space */
+               if (num_sge >= (phba->cfg_total_seg_cnt - 2))
+                       return num_sge + 3;
+
                /* setup DISEED with what we have */
                diseed = (struct sli4_sge_diseed *) sgl;
                memset(diseed, 0, sizeof(struct sli4_sge_diseed));
@@ -2506,11 +2610,34 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                diseed->ref_tag = cpu_to_le32(reftag);
                diseed->ref_tag_tran = diseed->ref_tag;
 
+               if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) {
+                       bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
+
+               } else {
+                       bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
+                       /*
+                        * When in this mode, the hardware will replace
+                        * the guard tag from the host with a
+                        * newly generated good CRC for the wire.
+                        * Switch to raw mode here to avoid this
+                        * behavior. What the host sends gets put on the wire.
+                        */
+                       if (txop == BG_OP_IN_CRC_OUT_CRC) {
+                               txop = BG_OP_RAW_MODE;
+                               rxop = BG_OP_RAW_MODE;
+                       }
+               }
+
+
+               if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
+                       bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
+               else
+                       bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
+
                /* setup DISEED with the rest of the info */
                bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
                bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
-               bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
-               bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
+
                bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
                bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
 
@@ -2556,6 +2683,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                pgdone = 0;
                subtotal = 0; /* total bytes processed for current prot grp */
                while (!pgdone) {
+                       /* Check to see if we ran out of space */
+                       if (num_sge >= phba->cfg_total_seg_cnt)
+                               return num_sge + 1;
+
                        if (!sgde) {
                                lpfc_printf_log(phba, KERN_ERR, LOG_BG,
                                        "9086 BLKGRD:%s Invalid data segment\n",
@@ -2669,6 +2800,47 @@ lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
        return ret;
 }
 
+/**
+ * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be adjusted.
+ *
+ * Adjust the data length to account for how much data
+ * is actually on the wire.
+ *
+ * returns the adjusted data length
+ **/
+static int
+lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
+                      struct lpfc_scsi_buf *lpfc_cmd)
+{
+       struct scsi_cmnd *sc = lpfc_cmd->pCmd;
+       int fcpdl;
+
+       fcpdl = scsi_bufflen(sc);
+
+       /* Check if there is protection data on the wire */
+       if (sc->sc_data_direction == DMA_FROM_DEVICE) {
+               /* Read */
+               if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
+                       return fcpdl;
+
+       } else {
+               /* Write */
+               if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
+                       return fcpdl;
+       }
+
+       /*
+        * If we are in DIF Type 1 mode every data block has a 8 byte
+        * DIF (trailer) attached to it. Must ajust FCP data length.
+        */
+       if (scsi_prot_flagged(sc, SCSI_PROT_TRANSFER_PI))
+               fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
+
+       return fcpdl;
+}
+
 /**
  * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
  * @phba: The Hba for which this call is being executed.
@@ -2689,8 +2861,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
        uint32_t num_bde = 0;
        int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
        int prot_group_type = 0;
-       int diflen, fcpdl;
-       unsigned blksize;
+       int fcpdl;
 
        /*
         * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
@@ -2711,28 +2882,28 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
                        return 1;
 
                lpfc_cmd->seg_cnt = datasegcnt;
-               if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                                       "9067 BLKGRD: %s: Too many sg segments"
-                                       " from dma_map_sg.  Config %d, seg_cnt"
-                                       " %d\n",
-                                       __func__, phba->cfg_sg_seg_cnt,
-                                       lpfc_cmd->seg_cnt);
-                       scsi_dma_unmap(scsi_cmnd);
-                       return 1;
-               }
+
+               /* First check if data segment count from SCSI Layer is good */
+               if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
+                       goto err;
 
                prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
 
                switch (prot_group_type) {
                case LPFC_PG_TYPE_NO_DIF:
+
+                       /* Here we need to add a PDE5 and PDE6 to the count */
+                       if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
+                               goto err;
+
                        num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
                                        datasegcnt);
                        /* we should have 2 or more entries in buffer list */
                        if (num_bde < 2)
                                goto err;
                        break;
-               case LPFC_PG_TYPE_DIF_BUF:{
+
+               case LPFC_PG_TYPE_DIF_BUF:
                        /*
                         * This type indicates that protection buffers are
                         * passed to the driver, so that needs to be prepared
@@ -2747,31 +2918,28 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
                        }
 
                        lpfc_cmd->prot_seg_cnt = protsegcnt;
-                       if (lpfc_cmd->prot_seg_cnt
-                           > phba->cfg_prot_sg_seg_cnt) {
-                               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                                       "9068 BLKGRD: %s: Too many prot sg "
-                                       "segments from dma_map_sg.  Config %d,"
-                                               "prot_seg_cnt %d\n", __func__,
-                                               phba->cfg_prot_sg_seg_cnt,
-                                               lpfc_cmd->prot_seg_cnt);
-                               dma_unmap_sg(&phba->pcidev->dev,
-                                            scsi_prot_sglist(scsi_cmnd),
-                                            scsi_prot_sg_count(scsi_cmnd),
-                                            datadir);
-                               scsi_dma_unmap(scsi_cmnd);
-                               return 1;
-                       }
+
+                       /*
+                        * There is a minimun of 4 BPLs used for every
+                        * protection data segment.
+                        */
+                       if ((lpfc_cmd->prot_seg_cnt * 4) >
+                           (phba->cfg_total_seg_cnt - 2))
+                               goto err;
 
                        num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
                                        datasegcnt, protsegcnt);
                        /* we should have 3 or more entries in buffer list */
-                       if (num_bde < 3)
+                       if ((num_bde < 3) ||
+                           (num_bde > phba->cfg_total_seg_cnt))
                                goto err;
                        break;
-               }
+
                case LPFC_PG_TYPE_INVALID:
                default:
+                       scsi_dma_unmap(scsi_cmnd);
+                       lpfc_cmd->seg_cnt = 0;
+
                        lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
                                        "9022 Unexpected protection group %i\n",
                                        prot_group_type);
@@ -2790,18 +2958,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
        iocb_cmd->ulpBdeCount = 1;
        iocb_cmd->ulpLe = 1;
 
-       fcpdl = scsi_bufflen(scsi_cmnd);
-
-       if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
-               /*
-                * We are in DIF Type 1 mode
-                * Every data block has a 8 byte DIF (trailer)
-                * attached to it.  Must ajust FCP data length
-                */
-               blksize = lpfc_cmd_blksize(scsi_cmnd);
-               diflen = (fcpdl / blksize) * 8;
-               fcpdl += diflen;
-       }
+       fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
        fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
 
        /*
@@ -2812,13 +2969,233 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
 
        return 0;
 err:
+       if (lpfc_cmd->seg_cnt)
+               scsi_dma_unmap(scsi_cmnd);
+       if (lpfc_cmd->prot_seg_cnt)
+               dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
+                            scsi_prot_sg_count(scsi_cmnd),
+                            scsi_cmnd->sc_data_direction);
+
        lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-                       "9023 Could not setup all needed BDE's"
-                       "prot_group_type=%d, num_bde=%d\n",
+                       "9023 Cannot setup S/G List for HBA"
+                       "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
+                       lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
+                       phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
                        prot_group_type, num_bde);
+
+       lpfc_cmd->seg_cnt = 0;
+       lpfc_cmd->prot_seg_cnt = 0;
        return 1;
 }
 
+/*
+ * This function calcuates the T10 DIF guard tag
+ * on the specified data using a CRC algorithmn
+ * using crc_t10dif.
+ */
+uint16_t
+lpfc_bg_crc(uint8_t *data, int count)
+{
+       uint16_t crc = 0;
+       uint16_t x;
+
+       crc = crc_t10dif(data, count);
+       x = cpu_to_be16(crc);
+       return x;
+}
+
+/*
+ * This function calcuates the T10 DIF guard tag
+ * on the specified data using a CSUM algorithmn
+ * using ip_compute_csum.
+ */
+uint16_t
+lpfc_bg_csum(uint8_t *data, int count)
+{
+       uint16_t ret;
+
+       ret = ip_compute_csum(data, count);
+       return ret;
+}
+
+/*
+ * This function examines the protection data to try to determine
+ * what type of T10-DIF error occurred.
+ */
+void
+lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+       struct scatterlist *sgpe; /* s/g prot entry */
+       struct scatterlist *sgde; /* s/g data entry */
+       struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+       struct scsi_dif_tuple *src = NULL;
+       uint8_t *data_src = NULL;
+       uint16_t guard_tag, guard_type;
+       uint16_t start_app_tag, app_tag;
+       uint32_t start_ref_tag, ref_tag;
+       int prot, protsegcnt;
+       int err_type, len, data_len;
+       int chk_ref, chk_app, chk_guard;
+       uint16_t sum;
+       unsigned blksize;
+
+       err_type = BGS_GUARD_ERR_MASK;
+       sum = 0;
+       guard_tag = 0;
+
+       /* First check to see if there is protection data to examine */
+       prot = scsi_get_prot_op(cmd);
+       if ((prot == SCSI_PROT_READ_STRIP) ||
+           (prot == SCSI_PROT_WRITE_INSERT) ||
+           (prot == SCSI_PROT_NORMAL))
+               goto out;
+
+       /* Currently the driver just supports ref_tag and guard_tag checking */
+       chk_ref = 1;
+       chk_app = 0;
+       chk_guard = 0;
+
+       /* Setup a ptr to the protection data provided by the SCSI host */
+       sgpe = scsi_prot_sglist(cmd);
+       protsegcnt = lpfc_cmd->prot_seg_cnt;
+
+       if (sgpe && protsegcnt) {
+
+               /*
+                * We will only try to verify guard tag if the segment
+                * data length is a multiple of the blksize.
+                */
+               sgde = scsi_sglist(cmd);
+               blksize = lpfc_cmd_blksize(cmd);
+               data_src = (uint8_t *)sg_virt(sgde);
+               data_len = sgde->length;
+               if ((data_len & (blksize - 1)) == 0)
+                       chk_guard = 1;
+               guard_type = scsi_host_get_guard(cmd->device->host);
+
+               start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
+               start_app_tag = src->app_tag;
+               src = (struct scsi_dif_tuple *)sg_virt(sgpe);
+               len = sgpe->length;
+               while (src && protsegcnt) {
+                       while (len) {
+
+                               /*
+                                * First check to see if a protection data
+                                * check is valid
+                                */
+                               if ((src->ref_tag == 0xffffffff) ||
+                                   (src->app_tag == 0xffff)) {
+                                       start_ref_tag++;
+                                       goto skipit;
+                               }
+
+                               /* App Tag checking */
+                               app_tag = src->app_tag;
+                               if (chk_app && (app_tag != start_app_tag)) {
+                                       err_type = BGS_APPTAG_ERR_MASK;
+                                       goto out;
+                               }
+
+                               /* Reference Tag checking */
+                               ref_tag = be32_to_cpu(src->ref_tag);
+                               if (chk_ref && (ref_tag != start_ref_tag)) {
+                                       err_type = BGS_REFTAG_ERR_MASK;
+                                       goto out;
+                               }
+                               start_ref_tag++;
+
+                               /* Guard Tag checking */
+                               if (chk_guard) {
+                                       guard_tag = src->guard_tag;
+                                       if (guard_type == SHOST_DIX_GUARD_IP)
+                                               sum = lpfc_bg_csum(data_src,
+                                                                  blksize);
+                                       else
+                                               sum = lpfc_bg_crc(data_src,
+                                                                 blksize);
+                                       if ((guard_tag != sum)) {
+                                               err_type = BGS_GUARD_ERR_MASK;
+                                               goto out;
+                                       }
+                               }
+skipit:
+                               len -= sizeof(struct scsi_dif_tuple);
+                               if (len < 0)
+                                       len = 0;
+                               src++;
+
+                               data_src += blksize;
+                               data_len -= blksize;
+
+                               /*
+                                * Are we at the end of the Data segment?
+                                * The data segment is only used for Guard
+                                * tag checking.
+                                */
+                               if (chk_guard && (data_len == 0)) {
+                                       chk_guard = 0;
+                                       sgde = sg_next(sgde);
+                                       if (!sgde)
+                                               goto out;
+
+                                       data_src = (uint8_t *)sg_virt(sgde);
+                                       data_len = sgde->length;
+                                       if ((data_len & (blksize - 1)) == 0)
+                                               chk_guard = 1;
+                               }
+                       }
+
+                       /* Goto the next Protection data segment */
+                       sgpe = sg_next(sgpe);
+                       if (sgpe) {
+                               src = (struct scsi_dif_tuple *)sg_virt(sgpe);
+                               len = sgpe->length;
+                       } else {
+                               src = NULL;
+                       }
+                       protsegcnt--;
+               }
+       }
+out:
+       if (err_type == BGS_GUARD_ERR_MASK) {
+               scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+                                       0x10, 0x1);
+               cmd->result = DRIVER_SENSE << 24
+                       | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+               phba->bg_guard_err_cnt++;
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
+                               (unsigned long)scsi_get_lba(cmd),
+                               sum, guard_tag);
+
+       } else if (err_type == BGS_REFTAG_ERR_MASK) {
+               scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+                                       0x10, 0x3);
+               cmd->result = DRIVER_SENSE << 24
+                       | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+               phba->bg_reftag_err_cnt++;
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
+                               (unsigned long)scsi_get_lba(cmd),
+                               ref_tag, start_ref_tag);
+
+       } else if (err_type == BGS_APPTAG_ERR_MASK) {
+               scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+                                       0x10, 0x2);
+               cmd->result = DRIVER_SENSE << 24
+                       | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+               phba->bg_apptag_err_cnt++;
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
+                               (unsigned long)scsi_get_lba(cmd),
+                               app_tag, start_app_tag);
+       }
+}
+
+
 /*
  * This function checks for BlockGuard errors detected by
  * the HBA.  In case of errors, the ASC/ASCQ fields in the
@@ -2842,12 +3219,6 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
        uint32_t bgstat = bgf->bgstat;
        uint64_t failing_sector = 0;
 
-       lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd"
-                       " 0x%x lba 0x%llx blk cnt 0x%x "
-                       "bgstat=0x%x bghm=0x%x\n",
-                       cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
-                       blk_rq_sectors(cmd->request), bgstat, bghm);
-
        spin_lock(&_dump_buf_lock);
        if (!_dump_buf_done) {
                lpfc_printf_log(phba, KERN_ERR, LOG_BG,  "9070 BLKGRD: Saving"
@@ -2870,18 +3241,24 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
 
        if (lpfc_bgs_get_invalid_prof(bgstat)) {
                cmd->result = ScsiResult(DID_ERROR, 0);
-               lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid"
-                       " BlockGuard profile. bgstat:0x%x\n",
-                       bgstat);
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9072 BLKGRD: Invalid BG Profile in cmd"
+                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+                               (unsigned long long)scsi_get_lba(cmd),
+                               blk_rq_sectors(cmd->request), bgstat, bghm);
                ret = (-1);
                goto out;
        }
 
        if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
                cmd->result = ScsiResult(DID_ERROR, 0);
-               lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: "
-                               "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
-                               bgstat);
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9073 BLKGRD: Invalid BG PDIF Block in cmd"
+                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+                               (unsigned long long)scsi_get_lba(cmd),
+                               blk_rq_sectors(cmd->request), bgstat, bghm);
                ret = (-1);
                goto out;
        }
@@ -2894,8 +3271,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
                cmd->result = DRIVER_SENSE << 24
                        | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
                phba->bg_guard_err_cnt++;
-               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                       "9055 BLKGRD: guard_tag error\n");
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9055 BLKGRD: Guard Tag error in cmd"
+                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+                               (unsigned long long)scsi_get_lba(cmd),
+                               blk_rq_sectors(cmd->request), bgstat, bghm);
        }
 
        if (lpfc_bgs_get_reftag_err(bgstat)) {
@@ -2907,8 +3288,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
                        | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
 
                phba->bg_reftag_err_cnt++;
-               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                       "9056 BLKGRD: ref_tag error\n");
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9056 BLKGRD: Ref Tag error in cmd"
+                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+                               (unsigned long long)scsi_get_lba(cmd),
+                               blk_rq_sectors(cmd->request), bgstat, bghm);
        }
 
        if (lpfc_bgs_get_apptag_err(bgstat)) {
@@ -2920,8 +3305,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
                        | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
 
                phba->bg_apptag_err_cnt++;
-               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                       "9061 BLKGRD: app_tag error\n");
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9061 BLKGRD: App Tag error in cmd"
+                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+                               (unsigned long long)scsi_get_lba(cmd),
+                               blk_rq_sectors(cmd->request), bgstat, bghm);
        }
 
        if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
@@ -2960,11 +3349,16 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
 
        if (!ret) {
                /* No error was reported - problem in FW? */
-               cmd->result = ScsiResult(DID_ERROR, 0);
-               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                       "9057 BLKGRD: Unknown error reported!\n");
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9057 BLKGRD: Unknown error in cmd"
+                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+                               (unsigned long long)scsi_get_lba(cmd),
+                               blk_rq_sectors(cmd->request), bgstat, bghm);
+
+               /* Calcuate what type of error it was */
+               lpfc_calc_bg_err(phba, lpfc_cmd);
        }
-
 out:
        return ret;
 }
@@ -3028,6 +3422,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
                                "dma_map_sg.  Config %d, seg_cnt %d\n",
                                __func__, phba->cfg_sg_seg_cnt,
                               lpfc_cmd->seg_cnt);
+                       lpfc_cmd->seg_cnt = 0;
                        scsi_dma_unmap(scsi_cmnd);
                        return 1;
                }
@@ -3093,45 +3488,6 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
        return 0;
 }
 
-/**
- * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
- * @phba: The Hba for which this call is being executed.
- * @lpfc_cmd: The scsi buffer which is going to be adjusted.
- *
- * Adjust the data length to account for how much data
- * is actually on the wire.
- *
- * returns the adjusted data length
- **/
-static int
-lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
-               struct lpfc_scsi_buf *lpfc_cmd)
-{
-       struct scsi_cmnd *sc = lpfc_cmd->pCmd;
-       int diflen, fcpdl;
-       unsigned blksize;
-
-       fcpdl = scsi_bufflen(sc);
-
-       /* Check if there is protection data on the wire */
-       if (sc->sc_data_direction == DMA_FROM_DEVICE) {
-               /* Read */
-               if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
-                       return fcpdl;
-
-       } else {
-               /* Write */
-               if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
-                       return fcpdl;
-       }
-
-       /* If protection data on the wire, adjust the count accordingly */
-       blksize = lpfc_cmd_blksize(sc);
-       diflen = (fcpdl / blksize) * 8;
-       fcpdl += diflen;
-       return fcpdl;
-}
-
 /**
  * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
  * @phba: The Hba for which this call is being executed.
@@ -3149,14 +3505,14 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
        struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
        struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
        IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
-       uint32_t num_bde = 0;
+       uint32_t num_sge = 0;
        int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
        int prot_group_type = 0;
        int fcpdl;
 
        /*
         * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
-        *  fcp_rsp regions to the first data bde entry
+        *  fcp_rsp regions to the first data sge entry
         */
        if (scsi_sg_count(scsi_cmnd)) {
                /*
@@ -3179,28 +3535,28 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
 
                sgl += 1;
                lpfc_cmd->seg_cnt = datasegcnt;
-               if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                                       "9087 BLKGRD: %s: Too many sg segments"
-                                       " from dma_map_sg.  Config %d, seg_cnt"
-                                       " %d\n",
-                                       __func__, phba->cfg_sg_seg_cnt,
-                                       lpfc_cmd->seg_cnt);
-                       scsi_dma_unmap(scsi_cmnd);
-                       return 1;
-               }
+
+               /* First check if data segment count from SCSI Layer is good */
+               if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
+                       goto err;
 
                prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
 
                switch (prot_group_type) {
                case LPFC_PG_TYPE_NO_DIF:
-                       num_bde = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
+                       /* Here we need to add a DISEED to the count */
+                       if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
+                               goto err;
+
+                       num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
                                        datasegcnt);
+
                        /* we should have 2 or more entries in buffer list */
-                       if (num_bde < 2)
+                       if (num_sge < 2)
                                goto err;
                        break;
-               case LPFC_PG_TYPE_DIF_BUF:{
+
+               case LPFC_PG_TYPE_DIF_BUF:
                        /*
                         * This type indicates that protection buffers are
                         * passed to the driver, so that needs to be prepared
@@ -3215,31 +3571,28 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
                        }
 
                        lpfc_cmd->prot_seg_cnt = protsegcnt;
-                       if (lpfc_cmd->prot_seg_cnt
-                           > phba->cfg_prot_sg_seg_cnt) {
-                               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                                       "9088 BLKGRD: %s: Too many prot sg "
-                                       "segments from dma_map_sg.  Config %d,"
-                                               "prot_seg_cnt %d\n", __func__,
-                                               phba->cfg_prot_sg_seg_cnt,
-                                               lpfc_cmd->prot_seg_cnt);
-                               dma_unmap_sg(&phba->pcidev->dev,
-                                            scsi_prot_sglist(scsi_cmnd),
-                                            scsi_prot_sg_count(scsi_cmnd),
-                                            datadir);
-                               scsi_dma_unmap(scsi_cmnd);
-                               return 1;
-                       }
+                       /*
+                        * There is a minimun of 3 SGEs used for every
+                        * protection data segment.
+                        */
+                       if ((lpfc_cmd->prot_seg_cnt * 3) >
+                           (phba->cfg_total_seg_cnt - 2))
+                               goto err;
 
-                       num_bde = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
+                       num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
                                        datasegcnt, protsegcnt);
+
                        /* we should have 3 or more entries in buffer list */
-                       if (num_bde < 3)
+                       if ((num_sge < 3) ||
+                           (num_sge > phba->cfg_total_seg_cnt))
                                goto err;
                        break;
-               }
+
                case LPFC_PG_TYPE_INVALID:
                default:
+                       scsi_dma_unmap(scsi_cmnd);
+                       lpfc_cmd->seg_cnt = 0;
+
                        lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
                                        "9083 Unexpected protection group %i\n",
                                        prot_group_type);
@@ -3263,7 +3616,6 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
        }
 
        fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
-
        fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
 
        /*
@@ -3274,10 +3626,22 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
 
        return 0;
 err:
+       if (lpfc_cmd->seg_cnt)
+               scsi_dma_unmap(scsi_cmnd);
+       if (lpfc_cmd->prot_seg_cnt)
+               dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
+                            scsi_prot_sg_count(scsi_cmnd),
+                            scsi_cmnd->sc_data_direction);
+
        lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-                       "9084 Could not setup all needed BDE's"
-                       "prot_group_type=%d, num_bde=%d\n",
-                       prot_group_type, num_bde);
+                       "9084 Cannot setup S/G List for HBA"
+                       "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
+                       lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
+                       phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
+                       prot_group_type, num_sge);
+
+       lpfc_cmd->seg_cnt = 0;
+       lpfc_cmd->prot_seg_cnt = 0;
        return 1;
 }
 
@@ -4357,7 +4721,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
 
        if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
                if (vport->phba->cfg_enable_bg) {
-                       lpfc_printf_vlog(vport, KERN_INFO, LOG_BG,
+                       lpfc_printf_vlog(vport,
+                                        KERN_INFO, LOG_SCSI_CMD,
                                         "9033 BLKGRD: rcvd %s cmd:x%x "
                                         "sector x%llx cnt %u pt %x\n",
                                         dif_op_str[scsi_get_prot_op(cmnd)],
@@ -4369,7 +4734,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
                err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
        } else {
                if (vport->phba->cfg_enable_bg) {
-                       lpfc_printf_vlog(vport, KERN_INFO, LOG_BG,
+                       lpfc_printf_vlog(vport,
+                                        KERN_INFO, LOG_SCSI_CMD,
                                         "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
                                         "x%x sector x%llx cnt %u pt %x\n",
                                         cmnd->cmnd[0],
@@ -4542,7 +4908,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
        /* Wait for abort to complete */
        wait_event_timeout(waitq,
                          (lpfc_cmd->pCmd != cmnd),
-                          (2*vport->cfg_devloss_tmo*HZ));
+                          msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
        lpfc_cmd->waitq = NULL;
 
        if (lpfc_cmd->pCmd == cmnd) {
@@ -5012,16 +5378,24 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
        struct lpfc_hba *phba = vport->phba;
        int rc, ret = SUCCESS;
 
+       lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+                        "3172 SCSI layer issued Host Reset Data:\n");
+
        lpfc_offline_prep(phba, LPFC_MBX_WAIT);
        lpfc_offline(phba);
        rc = lpfc_sli_brdrestart(phba);
        if (rc)
                ret = FAILED;
-       lpfc_online(phba);
+       rc = lpfc_online(phba);
+       if (rc)
+               ret = FAILED;
        lpfc_unblock_mgmt_io(phba);
 
-       lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-                       "3172 SCSI layer issued Host Reset Data: x%x\n", ret);
+       if (ret == FAILED) {
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+                                "3323 Failed host reset, bring it offline\n");
+               lpfc_sli4_offline_eratt(phba);
+       }
        return ret;
 }
 
@@ -5088,11 +5462,11 @@ lpfc_slave_alloc(struct scsi_device *sdev)
        }
        num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
        if (num_to_alloc != num_allocated) {
-                       lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
-                                "0708 Allocation request of %d "
-                                "command buffers did not succeed.  "
-                                "Allocated %d buffers.\n",
-                                num_to_alloc, num_allocated);
+                       lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+                                        "0708 Allocation request of %d "
+                                        "command buffers did not succeed.  "
+                                        "Allocated %d buffers.\n",
+                                        num_to_alloc, num_allocated);
        }
        if (num_allocated > 0)
                phba->total_scsi_bufs += num_allocated;
index 35dd17eb0f27907f67f4c50968a95452498b93be..572579f87de4fb1c3f74268da15ff8d226384103 100644 (file)
@@ -667,7 +667,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
 
        spin_lock_irqsave(&phba->hbalock, iflags);
        phba->hba_flag &= ~HBA_RRQ_ACTIVE;
-       next_time = jiffies + HZ * (phba->fc_ratov + 1);
+       next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
        list_for_each_entry_safe(rrq, nextrrq,
                                 &phba->active_rrq_list, list) {
                if (time_after(jiffies, rrq->rrq_stop_time))
@@ -782,7 +782,7 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
                return;
        spin_lock_irqsave(&phba->hbalock, iflags);
        phba->hba_flag &= ~HBA_RRQ_ACTIVE;
-       next_time = jiffies + HZ * (phba->fc_ratov * 2);
+       next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2));
        list_splice_init(&phba->active_rrq_list, &rrq_list);
        spin_unlock_irqrestore(&phba->hbalock, iflags);
 
@@ -878,7 +878,8 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
        else
                rrq->send_rrq = 0;
        rrq->xritag = xritag;
-       rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
+       rrq->rrq_stop_time = jiffies +
+                               msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
        rrq->ndlp = ndlp;
        rrq->nlp_DID = ndlp->nlp_DID;
        rrq->vport = ndlp->vport;
@@ -926,8 +927,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
        } else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
                        !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
                ndlp = piocbq->context_un.ndlp;
-       else  if ((piocbq->iocb.ulpCommand == CMD_ELS_REQUEST64_CR) &&
-                       (piocbq->iocb_flag & LPFC_IO_LIBDFC))
+       else  if (piocbq->iocb_flag & LPFC_IO_LIBDFC)
                ndlp = piocbq->context_un.ndlp;
        else
                ndlp = piocbq->context1;
@@ -1339,7 +1339,8 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                        BUG();
                else
                        mod_timer(&piocb->vport->els_tmofunc,
-                                 jiffies + HZ * (phba->fc_ratov << 1));
+                               jiffies +
+                               msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
        }
 
 
@@ -2340,7 +2341,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
                /* Mailbox cmd <cmd> Cmpl <cmpl> */
                lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
                                "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
-                               "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
+                               "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+                               "x%x x%x x%x\n",
                                pmb->vport ? pmb->vport->vpi : 0,
                                pmbox->mbxCommand,
                                lpfc_sli_config_mbox_subsys_get(phba, pmb),
@@ -2354,7 +2356,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
                                pmbox->un.varWords[4],
                                pmbox->un.varWords[5],
                                pmbox->un.varWords[6],
-                               pmbox->un.varWords[7]);
+                               pmbox->un.varWords[7],
+                               pmbox->un.varWords[8],
+                               pmbox->un.varWords[9],
+                               pmbox->un.varWords[10]);
 
                if (pmb->mbox_cmpl)
                        pmb->mbox_cmpl(phba,pmb);
@@ -2908,8 +2913,9 @@ void lpfc_poll_eratt(unsigned long ptr)
                lpfc_worker_wake_up(phba);
        else
                /* Restart the timer for next eratt poll */
-               mod_timer(&phba->eratt_poll, jiffies +
-                                       HZ * LPFC_ERATT_POLL_INTERVAL);
+               mod_timer(&phba->eratt_poll,
+                         jiffies +
+                         msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
        return;
 }
 
@@ -5511,6 +5517,7 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
                        list_del_init(&rsrc_blk->list);
                        kfree(rsrc_blk);
                }
+               phba->sli4_hba.max_cfg_param.vpi_used = 0;
                break;
        case LPFC_RSC_TYPE_FCOE_XRI:
                kfree(phba->sli4_hba.xri_bmask);
@@ -5811,6 +5818,7 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
                lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
        } else {
                kfree(phba->vpi_bmask);
+               phba->sli4_hba.max_cfg_param.vpi_used = 0;
                kfree(phba->vpi_ids);
                bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
                kfree(phba->sli4_hba.xri_bmask);
@@ -5992,7 +6000,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
        struct lpfc_sglq *sglq_entry = NULL;
        struct lpfc_sglq *sglq_entry_next = NULL;
        struct lpfc_sglq *sglq_entry_first = NULL;
-       int status, post_cnt = 0, num_posted = 0, block_cnt = 0;
+       int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0;
        int last_xritag = NO_XRI;
        LIST_HEAD(prep_sgl_list);
        LIST_HEAD(blck_sgl_list);
@@ -6004,6 +6012,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
        list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
        spin_unlock_irq(&phba->hbalock);
 
+       total_cnt = phba->sli4_hba.els_xri_cnt;
        list_for_each_entry_safe(sglq_entry, sglq_entry_next,
                                 &allc_sgl_list, list) {
                list_del_init(&sglq_entry->list);
@@ -6055,9 +6064,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
                                                sglq_entry->sli4_xritag);
                                        list_add_tail(&sglq_entry->list,
                                                      &free_sgl_list);
-                                       spin_lock_irq(&phba->hbalock);
-                                       phba->sli4_hba.els_xri_cnt--;
-                                       spin_unlock_irq(&phba->hbalock);
+                                       total_cnt--;
                                }
                        }
                }
@@ -6085,9 +6092,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
                                        (sglq_entry_first->sli4_xritag +
                                         post_cnt - 1));
                        list_splice_init(&blck_sgl_list, &free_sgl_list);
-                       spin_lock_irq(&phba->hbalock);
-                       phba->sli4_hba.els_xri_cnt -= post_cnt;
-                       spin_unlock_irq(&phba->hbalock);
+                       total_cnt -= post_cnt;
                }
 
                /* don't reset xirtag due to hole in xri block */
@@ -6097,6 +6102,8 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
                /* reset els sgl post count for next round of posting */
                post_cnt = 0;
        }
+       /* update the number of XRIs posted for ELS */
+       phba->sli4_hba.els_xri_cnt = total_cnt;
 
        /* free the els sgls failed to post */
        lpfc_free_sgl_list(phba, &free_sgl_list);
@@ -6446,16 +6453,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 
        /* Start the ELS watchdog timer */
        mod_timer(&vport->els_tmofunc,
-                 jiffies + HZ * (phba->fc_ratov * 2));
+                 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
 
        /* Start heart beat timer */
        mod_timer(&phba->hb_tmofunc,
-                 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+                 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
        phba->hb_outstanding = 0;
        phba->last_completion_time = jiffies;
 
        /* Start error attention (ERATT) polling timer */
-       mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
+       mod_timer(&phba->eratt_poll,
+                 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
 
        /* Enable PCIe device Advanced Error Reporting (AER) if configured */
        if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
@@ -6822,8 +6830,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
                        goto out_not_finished;
                }
                /* timeout active mbox command */
-               mod_timer(&psli->mbox_tmo, (jiffies +
-                              (HZ * lpfc_mbox_tmo_val(phba, pmbox))));
+               timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
+                                          1000);
+               mod_timer(&psli->mbox_tmo, jiffies + timeout);
        }
 
        /* Mailbox cmd <cmd> issue */
@@ -7496,7 +7505,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
 
        /* Start timer for the mbox_tmo and log some mailbox post messages */
        mod_timer(&psli->mbox_tmo, (jiffies +
-                 (HZ * lpfc_mbox_tmo_val(phba, mboxq))));
+                 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
 
        lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
                        "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
@@ -7914,15 +7923,21 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
 static inline uint32_t
 lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
 {
-       int i;
-
-       if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU)
-               i = smp_processor_id();
-       else
-               i = atomic_add_return(1, &phba->fcp_qidx);
+       struct lpfc_vector_map_info *cpup;
+       int chann, cpu;
 
-       i = (i % phba->cfg_fcp_io_channel);
-       return i;
+       if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) {
+               cpu = smp_processor_id();
+               if (cpu < phba->sli4_hba.num_present_cpu) {
+                       cpup = phba->sli4_hba.cpu_map;
+                       cpup += cpu;
+                       return cpup->channel_id;
+               }
+               chann = cpu;
+       }
+       chann = atomic_add_return(1, &phba->fcp_qidx);
+       chann = (chann % phba->cfg_fcp_io_channel);
+       return chann;
 }
 
 /**
@@ -8444,10 +8459,14 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
 
        if ((piocb->iocb_flag & LPFC_IO_FCP) ||
                (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+               if (unlikely(!phba->sli4_hba.fcp_wq))
+                       return IOCB_ERROR;
                if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
                                     &wqe))
                        return IOCB_ERROR;
        } else {
+               if (unlikely(!phba->sli4_hba.els_wq))
+                       return IOCB_ERROR;
                if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
                        return IOCB_ERROR;
        }
@@ -10003,7 +10022,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
        retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
                                     SLI_IOCB_RET_IOCB);
        if (retval == IOCB_SUCCESS) {
-               timeout_req = timeout * HZ;
+               timeout_req = msecs_to_jiffies(timeout * 1000);
                timeleft = wait_event_timeout(done_q,
                                lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
                                timeout_req);
@@ -10108,7 +10127,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
        if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
                wait_event_interruptible_timeout(done_q,
                                pmboxq->mbox_flag & LPFC_MBX_WAKE,
-                               timeout * HZ);
+                               msecs_to_jiffies(timeout * 1000));
 
                spin_lock_irqsave(&phba->hbalock, flag);
                pmboxq->context1 = NULL;
@@ -12899,8 +12918,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
                }
                wq->db_regaddr = bar_memmap_p + db_offset;
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                               "3264 WQ[%d]: barset:x%x, offset:x%x\n",
-                               wq->queue_id, pci_barset, db_offset);
+                               "3264 WQ[%d]: barset:x%x, offset:x%x, "
+                               "format:x%x\n", wq->queue_id, pci_barset,
+                               db_offset, wq->db_format);
        } else {
                wq->db_format = LPFC_DB_LIST_FORMAT;
                wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
@@ -13120,8 +13140,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
                }
                hrq->db_regaddr = bar_memmap_p + db_offset;
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                               "3266 RQ[qid:%d]: barset:x%x, offset:x%x\n",
-                               hrq->queue_id, pci_barset, db_offset);
+                               "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
+                               "format:x%x\n", hrq->queue_id, pci_barset,
+                               db_offset, hrq->db_format);
        } else {
                hrq->db_format = LPFC_DB_RING_FORMAT;
                hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
@@ -13971,13 +13992,14 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
        }
 
        lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-                       "2538 Received frame rctl:%s type:%s "
-                       "Frame Data:%08x %08x %08x %08x %08x %08x\n",
-                       rctl_names[fc_hdr->fh_r_ctl],
-                       type_names[fc_hdr->fh_type],
+                       "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
+                       "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
+                       rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
+                       type_names[fc_hdr->fh_type], fc_hdr->fh_type,
                        be32_to_cpu(header[0]), be32_to_cpu(header[1]),
                        be32_to_cpu(header[2]), be32_to_cpu(header[3]),
-                       be32_to_cpu(header[4]), be32_to_cpu(header[5]));
+                       be32_to_cpu(header[4]), be32_to_cpu(header[5]),
+                       be32_to_cpu(header[6]));
        return 0;
 drop:
        lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
index be02b59ea2797a53972a1795dcd4b0b1c156a846..67af460184badc9a2104cd477e099aeb8112a573 100644 (file)
@@ -346,11 +346,6 @@ struct lpfc_bmbx {
 #define SLI4_CT_VFI 2
 #define SLI4_CT_FCFI 3
 
-#define LPFC_SLI4_FL1_MAX_SEGMENT_SIZE 0x10000
-#define LPFC_SLI4_FL1_MAX_BUF_SIZE     0X2000
-#define LPFC_SLI4_MIN_BUF_SIZE         0x400
-#define LPFC_SLI4_MAX_BUF_SIZE         0x20000
-
 /*
  * SLI4 specific data structures
  */
@@ -440,6 +435,17 @@ struct lpfc_sli4_lnk_info {
 
 #define LPFC_SLI4_HANDLER_NAME_SZ      16
 
+/* Used for IRQ vector to CPU mapping */
+struct lpfc_vector_map_info {
+       uint16_t        phys_id;
+       uint16_t        core_id;
+       uint16_t        irq;
+       uint16_t        channel_id;
+       struct cpumask  maskbits;
+};
+#define LPFC_VECTOR_MAP_EMPTY  0xffff
+#define LPFC_MAX_CPU           256
+
 /* SLI4 HBA data structure entries */
 struct lpfc_sli4_hba {
        void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -573,6 +579,11 @@ struct lpfc_sli4_hba {
        struct lpfc_iov iov;
        spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
        spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
+
+       /* CPU to vector mapping information */
+       struct lpfc_vector_map_info *cpu_map;
+       uint16_t num_online_cpu;
+       uint16_t num_present_cpu;
 };
 
 enum lpfc_sge_type {
index 664cd04f7cd8091dcaf0396f889b0d0852a4e6f8..a38dc3b169697ee2915cee01158334a240eb51b1 100644 (file)
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.38"
+#define LPFC_DRIVER_VERSION "8.3.39"
 #define LPFC_DRIVER_NAME               "lpfc"
 
 /* Used for SLI 2/3 */
index 0fe188e66000da063c01feec76b557a075545258..e28e431564b08f5b90693c3dc49c5d218e67a262 100644 (file)
@@ -80,7 +80,7 @@ inline void lpfc_vport_set_state(struct lpfc_vport *vport,
        }
 }
 
-static int
+int
 lpfc_alloc_vpi(struct lpfc_hba *phba)
 {
        unsigned long vpi;
@@ -568,6 +568,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
        struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
        struct lpfc_hba   *phba = vport->phba;
        long timeout;
+       bool ns_ndlp_referenced = false;
 
        if (vport->port_type == LPFC_PHYSICAL_PORT) {
                lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
@@ -628,6 +629,18 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
 
        lpfc_debugfs_terminate(vport);
 
+       /*
+        * The call to fc_remove_host might release the NameServer ndlp. Since
+        * we might need to use the ndlp to send the DA_ID CT command,
+        * increment the reference for the NameServer ndlp to prevent it from
+        * being released.
+        */
+       ndlp = lpfc_findnode_did(vport, NameServer_DID);
+       if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+               lpfc_nlp_get(ndlp);
+               ns_ndlp_referenced = true;
+       }
+
        /* Remove FC host and then SCSI host with the vport */
        fc_remove_host(lpfc_shost_from_vport(vport));
        scsi_remove_host(lpfc_shost_from_vport(vport));
@@ -734,6 +747,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
                lpfc_discovery_wait(vport);
 
 skip_logo:
+
+       /*
+        * If the NameServer ndlp has been incremented to allow the DA_ID CT
+        * command to be sent, decrement the ndlp now.
+        */
+       if (ns_ndlp_referenced) {
+               ndlp = lpfc_findnode_did(vport, NameServer_DID);
+               lpfc_nlp_put(ndlp);
+       }
+
        lpfc_cleanup(vport);
        lpfc_sli_host_down(vport);
 
index 90828340aceadbd275c7184b8ad465582e894e2a..6b2c94eb8134300e72b4d06571994c0c2fe5d32b 100644 (file)
@@ -90,6 +90,7 @@ int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *);
 int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint);
 struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *);
 void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **);
+int lpfc_alloc_vpi(struct lpfc_hba *phba);
 
 /*
  *  queuecommand  VPORT-specific return codes. Specified in  the host byte code.
index 7c90d57b867e2c332e1a953a3c01892f971bd779..3a9ddae86f1f8e2d5e33474a7f0f3510ec413c65 100644 (file)
@@ -4931,11 +4931,12 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
                printk(KERN_ERR "megaraid_sas: timed out while"
                        "waiting for HBA to recover\n");
                error = -ENODEV;
-               goto out_kfree_ioc;
+               goto out_up;
        }
        spin_unlock_irqrestore(&instance->hba_lock, flags);
 
        error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
+      out_up:
        up(&instance->ioctl_sem);
 
       out_kfree_ioc:
index 74550922ad55e418eb7e37753e0983023728943b..7b7381d7671fdc9cfeecdf3c256984b0261d3116 100644 (file)
@@ -254,7 +254,7 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
        }
        for (i = 0; i < MVS_MAX_DEVICES; i++) {
                mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED;
-               mvi->devices[i].dev_type = NO_DEVICE;
+               mvi->devices[i].dev_type = SAS_PHY_UNUSED;
                mvi->devices[i].device_id = i;
                mvi->devices[i].dev_status = MVS_DEV_NORMAL;
                init_timer(&mvi->devices[i].timer);
index 532110f4562af58b7517b7b72bfaf0d0f5e25653..c9e244984e30ec1dfed39c7b574b71caffeb0794 100644 (file)
@@ -706,7 +706,7 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
        return 0;
 }
 
-#define        DEV_IS_GONE(mvi_dev)    ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
+#define        DEV_IS_GONE(mvi_dev)    ((!mvi_dev || (mvi_dev->dev_type == SAS_PHY_UNUSED)))
 static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf,
                                struct mvs_tmf_task *tmf, int *pass)
 {
@@ -726,7 +726,7 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
                 * libsas will use dev->port, should
                 * not call task_done for sata
                 */
-               if (dev->dev_type != SATA_DEV)
+               if (dev->dev_type != SAS_SATA_DEV)
                        task->task_done(task);
                return rc;
        }
@@ -1159,10 +1159,10 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
                        phy->identify.device_type =
                                phy->att_dev_info & PORT_DEV_TYPE_MASK;
 
-                       if (phy->identify.device_type == SAS_END_DEV)
+                       if (phy->identify.device_type == SAS_END_DEVICE)
                                phy->identify.target_port_protocols =
                                                        SAS_PROTOCOL_SSP;
-                       else if (phy->identify.device_type != NO_DEVICE)
+                       else if (phy->identify.device_type != SAS_PHY_UNUSED)
                                phy->identify.target_port_protocols =
                                                        SAS_PROTOCOL_SMP;
                        if (oob_done)
@@ -1260,7 +1260,7 @@ struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
 {
        u32 dev;
        for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
-               if (mvi->devices[dev].dev_type == NO_DEVICE) {
+               if (mvi->devices[dev].dev_type == SAS_PHY_UNUSED) {
                        mvi->devices[dev].device_id = dev;
                        return &mvi->devices[dev];
                }
@@ -1278,7 +1278,7 @@ void mvs_free_dev(struct mvs_device *mvi_dev)
        u32 id = mvi_dev->device_id;
        memset(mvi_dev, 0, sizeof(*mvi_dev));
        mvi_dev->device_id = id;
-       mvi_dev->dev_type = NO_DEVICE;
+       mvi_dev->dev_type = SAS_PHY_UNUSED;
        mvi_dev->dev_status = MVS_DEV_NORMAL;
        mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
 }
@@ -1480,7 +1480,7 @@ static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
 {
        int rc;
        struct sas_phy *phy = sas_get_local_phy(dev);
-       int reset_type = (dev->dev_type == SATA_DEV ||
+       int reset_type = (dev->dev_type == SAS_SATA_DEV ||
                        (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
        rc = sas_phy_reset(phy, reset_type);
        sas_put_local_phy(phy);
@@ -1629,7 +1629,7 @@ int mvs_abort_task(struct sas_task *task)
 
        } else if (task->task_proto & SAS_PROTOCOL_SATA ||
                task->task_proto & SAS_PROTOCOL_STP) {
-               if (SATA_DEV == dev->dev_type) {
+               if (SAS_SATA_DEV == dev->dev_type) {
                        struct mvs_slot_info *slot = task->lldd_task;
                        u32 slot_idx = (u32)(slot - mvi->slot_info);
                        mv_dprintk("mvs_abort_task() mvi=%p task=%p "
index 9f3cc13a5ce7ef70ca9c59c29530a8481fd679fc..60e2fb7f2dca7e37512b128bea9b59fd1c9cd579 100644 (file)
@@ -67,7 +67,7 @@ extern const struct mvs_dispatch mvs_94xx_dispatch;
 extern struct kmem_cache *mvs_task_list_cache;
 
 #define DEV_IS_EXPANDER(type)  \
-       ((type == EDGE_DEV) || (type == FANOUT_DEV))
+       ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
 
 #define bit(n) ((u64)1 << n)
 
@@ -241,7 +241,7 @@ struct mvs_phy {
 
 struct mvs_device {
        struct list_head                dev_entry;
-       enum sas_dev_type dev_type;
+       enum sas_device_type dev_type;
        struct mvs_info *mvi_info;
        struct domain_device *sas_device;
        struct timer_list timer;
index 52f04296171c3e1c3dc0963f6e5e8a6b3037f92f..ce4cd87c7c662a6de92f71331e1e49052f4c8e4f 100644 (file)
@@ -4,9 +4,10 @@
 # Copyright (C) 2008-2009  USI Co., Ltd.
 
 
-obj-$(CONFIG_SCSI_PM8001) += pm8001.o
-pm8001-y += pm8001_init.o \
+obj-$(CONFIG_SCSI_PM8001) += pm80xx.o
+pm80xx-y += pm8001_init.o \
                pm8001_sas.o  \
                pm8001_ctl.o  \
-               pm8001_hwi.o
+               pm8001_hwi.o  \
+               pm80xx_hwi.o
 
index 45bc197bc22f2a45c46f5176cabefddc69e077f2..d99f41c2ca13b927c66d34036937d3443b29f862 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver
  *
  * Copyright (c) 2008-2009 USI Co., Ltd.
  * All rights reserved.
@@ -58,8 +58,13 @@ static ssize_t pm8001_ctl_mpi_interface_rev_show(struct device *cdev,
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n",
-               pm8001_ha->main_cfg_tbl.interface_rev);
+       if (pm8001_ha->chip_id == chip_8001) {
+               return snprintf(buf, PAGE_SIZE, "%d\n",
+                       pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev);
+       } else {
+               return snprintf(buf, PAGE_SIZE, "%d\n",
+                       pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev);
+       }
 }
 static
 DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL);
@@ -78,11 +83,19 @@ static ssize_t pm8001_ctl_fw_version_show(struct device *cdev,
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-       return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
-                      (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 24),
-                      (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 16),
-                      (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 8),
-                      (u8)(pm8001_ha->main_cfg_tbl.firmware_rev));
+       if (pm8001_ha->chip_id == chip_8001) {
+               return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+               (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 24),
+               (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 16),
+               (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 8),
+               (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev));
+       } else {
+               return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+               (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 24),
+               (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 16),
+               (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 8),
+               (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev));
+       }
 }
 static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL);
 /**
@@ -99,8 +112,13 @@ static ssize_t pm8001_ctl_max_out_io_show(struct device *cdev,
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n",
-                       pm8001_ha->main_cfg_tbl.max_out_io);
+       if (pm8001_ha->chip_id == chip_8001) {
+               return snprintf(buf, PAGE_SIZE, "%d\n",
+                       pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io);
+       } else {
+               return snprintf(buf, PAGE_SIZE, "%d\n",
+                       pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io);
+       }
 }
 static DEVICE_ATTR(max_out_io, S_IRUGO, pm8001_ctl_max_out_io_show, NULL);
 /**
@@ -117,8 +135,15 @@ static ssize_t pm8001_ctl_max_devices_show(struct device *cdev,
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-       return snprintf(buf, PAGE_SIZE, "%04d\n",
-                       (u16)(pm8001_ha->main_cfg_tbl.max_sgl >> 16));
+       if (pm8001_ha->chip_id == chip_8001) {
+               return snprintf(buf, PAGE_SIZE, "%04d\n",
+                       (u16)(pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl >> 16)
+                       );
+       } else {
+               return snprintf(buf, PAGE_SIZE, "%04d\n",
+                       (u16)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl >> 16)
+                       );
+       }
 }
 static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL);
 /**
@@ -136,8 +161,15 @@ static ssize_t pm8001_ctl_max_sg_list_show(struct device *cdev,
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-       return snprintf(buf, PAGE_SIZE, "%04d\n",
-                       pm8001_ha->main_cfg_tbl.max_sgl & 0x0000FFFF);
+       if (pm8001_ha->chip_id == chip_8001) {
+               return snprintf(buf, PAGE_SIZE, "%04d\n",
+                       pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl & 0x0000FFFF
+                       );
+       } else {
+               return snprintf(buf, PAGE_SIZE, "%04d\n",
+                       pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl & 0x0000FFFF
+                       );
+       }
 }
 static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL);
 
@@ -173,7 +205,14 @@ static ssize_t pm8001_ctl_sas_spec_support_show(struct device *cdev,
        struct Scsi_Host *shost = class_to_shost(cdev);
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
-       mode = (pm8001_ha->main_cfg_tbl.ctrl_cap_flag & 0xfe000000)>>25;
+       /* fe000000 means supports SAS2.1 */
+       if (pm8001_ha->chip_id == chip_8001)
+               mode = (pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag &
+                                                       0xfe000000)>>25;
+       else
+               /* fe000000 means supports SAS2.1 */
+               mode = (pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag &
+                                                       0xfe000000)>>25;
        return show_sas_spec_support_status(mode, buf);
 }
 static DEVICE_ATTR(sas_spec_support, S_IRUGO,
@@ -361,10 +400,11 @@ static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha)
                goto out;
        }
        payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
-       memcpy((u8 *)payload->func_specific, (u8 *)pm8001_ha->fw_image->data,
+       memcpy((u8 *)&payload->func_specific, (u8 *)pm8001_ha->fw_image->data,
                                pm8001_ha->fw_image->size);
        payload->length = pm8001_ha->fw_image->size;
        payload->id = 0;
+       payload->minor_function = 0x1;
        pm8001_ha->nvmd_completion = &completion;
        ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload);
        wait_for_completion(&completion);
@@ -411,7 +451,7 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha)
                        payload->length = 1024*16;
                        payload->id = 0;
                        fwControl =
-                             (struct fw_control_info *)payload->func_specific;
+                             (struct fw_control_info *)&payload->func_specific;
                        fwControl->len = IOCTL_BUF_SIZE;   /* IN */
                        fwControl->size = partitionSize + HEADER_LEN;/* IN */
                        fwControl->retcode = 0;/* OUT */
index c3d20c8d4abe3724308182697a558dac7440a07a..479c5a7a863a8aa8fded8e4164b8377d3658089d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver
  *
  * Copyright (c) 2008-2009 USI Co., Ltd.
  * All rights reserved.
 
 enum chip_flavors {
        chip_8001,
+       chip_8008,
+       chip_8009,
+       chip_8018,
+       chip_8019
 };
-#define USI_MAX_MEMCNT                 9
-#define PM8001_MAX_DMA_SG              SG_ALL
+
 enum phy_speed {
        PHY_SPEED_15 = 0x01,
        PHY_SPEED_30 = 0x02,
@@ -69,23 +72,34 @@ enum port_type {
 #define PM8001_MPI_QUEUE         1024   /* maximum mpi queue entries */
 #define        PM8001_MAX_INB_NUM       1
 #define        PM8001_MAX_OUTB_NUM      1
+#define        PM8001_MAX_SPCV_INB_NUM         1
+#define        PM8001_MAX_SPCV_OUTB_NUM        4
 #define        PM8001_CAN_QUEUE         508    /* SCSI Queue depth */
 
+/* Inbound/Outbound queue size */
+#define IOMB_SIZE_SPC          64
+#define IOMB_SIZE_SPCV         128
+
 /* unchangeable hardware details */
-#define        PM8001_MAX_PHYS          8      /* max. possible phys */
-#define        PM8001_MAX_PORTS         8      /* max. possible ports */
-#define        PM8001_MAX_DEVICES       1024   /* max supported device */
+#define        PM8001_MAX_PHYS          16     /* max. possible phys */
+#define        PM8001_MAX_PORTS         16     /* max. possible ports */
+#define        PM8001_MAX_DEVICES       2048   /* max supported device */
+#define        PM8001_MAX_MSIX_VEC      64     /* max msi-x int for spcv/ve */
 
+#define USI_MAX_MEMCNT_BASE    5
+#define IB                     (USI_MAX_MEMCNT_BASE + 1)
+#define CI                     (IB + PM8001_MAX_SPCV_INB_NUM)
+#define OB                     (CI + PM8001_MAX_SPCV_INB_NUM)
+#define PI                     (OB + PM8001_MAX_SPCV_OUTB_NUM)
+#define USI_MAX_MEMCNT         (PI + PM8001_MAX_SPCV_OUTB_NUM)
+#define PM8001_MAX_DMA_SG      SG_ALL
 enum memory_region_num {
        AAP1 = 0x0, /* application acceleration processor */
        IOP,        /* IO processor */
-       CI,         /* consumer index */
-       PI,         /* producer index */
-       IB,         /* inbound queue */
-       OB,         /* outbound queue */
        NVMD,       /* NVM device */
        DEV_MEM,    /* memory for devices */
        CCB_MEM,    /* memory for command control block */
+       FW_FLASH    /* memory for fw flash update */
 };
 #define        PM8001_EVENT_LOG_SIZE    (128 * 1024)
 
index b8dd05074abb4f61faebbe0af12bd3074fd8b930..69dd49c05f1e1069b2aea0544734f52ea5126abb 100644 (file)
 static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
 {
        void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
-       pm8001_ha->main_cfg_tbl.signature       = pm8001_mr32(address, 0x00);
-       pm8001_ha->main_cfg_tbl.interface_rev   = pm8001_mr32(address, 0x04);
-       pm8001_ha->main_cfg_tbl.firmware_rev    = pm8001_mr32(address, 0x08);
-       pm8001_ha->main_cfg_tbl.max_out_io      = pm8001_mr32(address, 0x0C);
-       pm8001_ha->main_cfg_tbl.max_sgl         = pm8001_mr32(address, 0x10);
-       pm8001_ha->main_cfg_tbl.ctrl_cap_flag   = pm8001_mr32(address, 0x14);
-       pm8001_ha->main_cfg_tbl.gst_offset      = pm8001_mr32(address, 0x18);
-       pm8001_ha->main_cfg_tbl.inbound_queue_offset =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.signature    =
+                               pm8001_mr32(address, 0x00);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev =
+                               pm8001_mr32(address, 0x04);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev =
+                               pm8001_mr32(address, 0x08);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io   =
+                               pm8001_mr32(address, 0x0C);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl      =
+                               pm8001_mr32(address, 0x10);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag =
+                               pm8001_mr32(address, 0x14);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.gst_offset   =
+                               pm8001_mr32(address, 0x18);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_queue_offset =
                pm8001_mr32(address, MAIN_IBQ_OFFSET);
-       pm8001_ha->main_cfg_tbl.outbound_queue_offset =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_queue_offset =
                pm8001_mr32(address, MAIN_OBQ_OFFSET);
-       pm8001_ha->main_cfg_tbl.hda_mode_flag   =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.hda_mode_flag        =
                pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET);
 
        /* read analog Setting offset from the configuration table */
-       pm8001_ha->main_cfg_tbl.anolog_setup_table_offset =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.anolog_setup_table_offset =
                pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET);
 
        /* read Error Dump Offset and Length */
-       pm8001_ha->main_cfg_tbl.fatal_err_dump_offset0 =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset0 =
                pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET);
-       pm8001_ha->main_cfg_tbl.fatal_err_dump_length0 =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length0 =
                pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH);
-       pm8001_ha->main_cfg_tbl.fatal_err_dump_offset1 =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset1 =
                pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET);
-       pm8001_ha->main_cfg_tbl.fatal_err_dump_length1 =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length1 =
                pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH);
 }
 
@@ -86,31 +93,56 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
 static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
 {
        void __iomem *address = pm8001_ha->general_stat_tbl_addr;
-       pm8001_ha->gs_tbl.gst_len_mpistate      = pm8001_mr32(address, 0x00);
-       pm8001_ha->gs_tbl.iq_freeze_state0      = pm8001_mr32(address, 0x04);
-       pm8001_ha->gs_tbl.iq_freeze_state1      = pm8001_mr32(address, 0x08);
-       pm8001_ha->gs_tbl.msgu_tcnt             = pm8001_mr32(address, 0x0C);
-       pm8001_ha->gs_tbl.iop_tcnt              = pm8001_mr32(address, 0x10);
-       pm8001_ha->gs_tbl.reserved              = pm8001_mr32(address, 0x14);
-       pm8001_ha->gs_tbl.phy_state[0]  = pm8001_mr32(address, 0x18);
-       pm8001_ha->gs_tbl.phy_state[1]  = pm8001_mr32(address, 0x1C);
-       pm8001_ha->gs_tbl.phy_state[2]  = pm8001_mr32(address, 0x20);
-       pm8001_ha->gs_tbl.phy_state[3]  = pm8001_mr32(address, 0x24);
-       pm8001_ha->gs_tbl.phy_state[4]  = pm8001_mr32(address, 0x28);
-       pm8001_ha->gs_tbl.phy_state[5]  = pm8001_mr32(address, 0x2C);
-       pm8001_ha->gs_tbl.phy_state[6]  = pm8001_mr32(address, 0x30);
-       pm8001_ha->gs_tbl.phy_state[7]  = pm8001_mr32(address, 0x34);
-       pm8001_ha->gs_tbl.reserved1             = pm8001_mr32(address, 0x38);
-       pm8001_ha->gs_tbl.reserved2             = pm8001_mr32(address, 0x3C);
-       pm8001_ha->gs_tbl.reserved3             = pm8001_mr32(address, 0x40);
-       pm8001_ha->gs_tbl.recover_err_info[0]   = pm8001_mr32(address, 0x44);
-       pm8001_ha->gs_tbl.recover_err_info[1]   = pm8001_mr32(address, 0x48);
-       pm8001_ha->gs_tbl.recover_err_info[2]   = pm8001_mr32(address, 0x4C);
-       pm8001_ha->gs_tbl.recover_err_info[3]   = pm8001_mr32(address, 0x50);
-       pm8001_ha->gs_tbl.recover_err_info[4]   = pm8001_mr32(address, 0x54);
-       pm8001_ha->gs_tbl.recover_err_info[5]   = pm8001_mr32(address, 0x58);
-       pm8001_ha->gs_tbl.recover_err_info[6]   = pm8001_mr32(address, 0x5C);
-       pm8001_ha->gs_tbl.recover_err_info[7]   = pm8001_mr32(address, 0x60);
+       pm8001_ha->gs_tbl.pm8001_tbl.gst_len_mpistate   =
+                               pm8001_mr32(address, 0x00);
+       pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state0   =
+                               pm8001_mr32(address, 0x04);
+       pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state1   =
+                               pm8001_mr32(address, 0x08);
+       pm8001_ha->gs_tbl.pm8001_tbl.msgu_tcnt          =
+                               pm8001_mr32(address, 0x0C);
+       pm8001_ha->gs_tbl.pm8001_tbl.iop_tcnt           =
+                               pm8001_mr32(address, 0x10);
+       pm8001_ha->gs_tbl.pm8001_tbl.rsvd               =
+                               pm8001_mr32(address, 0x14);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[0]       =
+                               pm8001_mr32(address, 0x18);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[1]       =
+                               pm8001_mr32(address, 0x1C);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[2]       =
+                               pm8001_mr32(address, 0x20);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[3]       =
+                               pm8001_mr32(address, 0x24);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[4]       =
+                               pm8001_mr32(address, 0x28);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[5]       =
+                               pm8001_mr32(address, 0x2C);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[6]       =
+                               pm8001_mr32(address, 0x30);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[7]       =
+                               pm8001_mr32(address, 0x34);
+       pm8001_ha->gs_tbl.pm8001_tbl.gpio_input_val     =
+                               pm8001_mr32(address, 0x38);
+       pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[0]           =
+                               pm8001_mr32(address, 0x3C);
+       pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[1]           =
+                               pm8001_mr32(address, 0x40);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[0]        =
+                               pm8001_mr32(address, 0x44);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[1]        =
+                               pm8001_mr32(address, 0x48);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[2]        =
+                               pm8001_mr32(address, 0x4C);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[3]        =
+                               pm8001_mr32(address, 0x50);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[4]        =
+                               pm8001_mr32(address, 0x54);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[5]        =
+                               pm8001_mr32(address, 0x58);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[6]        =
+                               pm8001_mr32(address, 0x5C);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[7]        =
+                               pm8001_mr32(address, 0x60);
 }
 
 /**
@@ -119,10 +151,9 @@ static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
  */
 static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
 {
-       int inbQ_num = 1;
        int i;
        void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
-       for (i = 0; i < inbQ_num; i++) {
+       for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
                u32 offset = i * 0x20;
                pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
                      get_pci_bar_index(pm8001_mr32(address, (offset + 0x14)));
@@ -137,10 +168,9 @@ static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
  */
 static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
 {
-       int outbQ_num = 1;
        int i;
        void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
-       for (i = 0; i < outbQ_num; i++) {
+       for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
                u32 offset = i * 0x24;
                pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
                      get_pci_bar_index(pm8001_mr32(address, (offset + 0x14)));
@@ -155,54 +185,57 @@ static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
  */
 static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
 {
-       int qn = 1;
        int i;
        u32 offsetib, offsetob;
        void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr;
        void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr;
 
-       pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd                     = 0;
-       pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3                = 0;
-       pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7                = 0;
-       pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3               = 0;
-       pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7               = 0;
-       pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3       = 0;
-       pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7       = 0;
-       pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3   = 0;
-       pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7   = 0;
-       pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3   = 0;
-       pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7   = 0;
-
-       pm8001_ha->main_cfg_tbl.upper_event_log_addr            =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd          = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3     = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7     = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3    = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7    = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid0_3 =
+                                                                        0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid4_7 =
+                                                                        0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid0_3 = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid4_7 = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid0_3 = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid4_7 = 0;
+
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr         =
                pm8001_ha->memoryMap.region[AAP1].phys_addr_hi;
-       pm8001_ha->main_cfg_tbl.lower_event_log_addr            =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr         =
                pm8001_ha->memoryMap.region[AAP1].phys_addr_lo;
-       pm8001_ha->main_cfg_tbl.event_log_size  = PM8001_EVENT_LOG_SIZE;
-       pm8001_ha->main_cfg_tbl.event_log_option                = 0x01;
-       pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr        =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size               =
+               PM8001_EVENT_LOG_SIZE;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option             = 0x01;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr     =
                pm8001_ha->memoryMap.region[IOP].phys_addr_hi;
-       pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr        =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr     =
                pm8001_ha->memoryMap.region[IOP].phys_addr_lo;
-       pm8001_ha->main_cfg_tbl.iop_event_log_size      = PM8001_EVENT_LOG_SIZE;
-       pm8001_ha->main_cfg_tbl.iop_event_log_option            = 0x01;
-       pm8001_ha->main_cfg_tbl.fatal_err_interrupt             = 0x01;
-       for (i = 0; i < qn; i++) {
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size           =
+               PM8001_EVENT_LOG_SIZE;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option         = 0x01;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt          = 0x01;
+       for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
                pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt  =
                        PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30);
                pm8001_ha->inbnd_q_tbl[i].upper_base_addr       =
-                       pm8001_ha->memoryMap.region[IB].phys_addr_hi;
+                       pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
                pm8001_ha->inbnd_q_tbl[i].lower_base_addr       =
-               pm8001_ha->memoryMap.region[IB].phys_addr_lo;
+               pm8001_ha->memoryMap.region[IB + i].phys_addr_lo;
                pm8001_ha->inbnd_q_tbl[i].base_virt             =
-                       (u8 *)pm8001_ha->memoryMap.region[IB].virt_ptr;
+                       (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr;
                pm8001_ha->inbnd_q_tbl[i].total_length          =
-                       pm8001_ha->memoryMap.region[IB].total_len;
+                       pm8001_ha->memoryMap.region[IB + i].total_len;
                pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr    =
-                       pm8001_ha->memoryMap.region[CI].phys_addr_hi;
+                       pm8001_ha->memoryMap.region[CI + i].phys_addr_hi;
                pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr    =
-                       pm8001_ha->memoryMap.region[CI].phys_addr_lo;
+                       pm8001_ha->memoryMap.region[CI + i].phys_addr_lo;
                pm8001_ha->inbnd_q_tbl[i].ci_virt               =
-                       pm8001_ha->memoryMap.region[CI].virt_ptr;
+                       pm8001_ha->memoryMap.region[CI + i].virt_ptr;
                offsetib = i * 0x20;
                pm8001_ha->inbnd_q_tbl[i].pi_pci_bar            =
                        get_pci_bar_index(pm8001_mr32(addressib,
@@ -212,25 +245,25 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
                pm8001_ha->inbnd_q_tbl[i].producer_idx          = 0;
                pm8001_ha->inbnd_q_tbl[i].consumer_index        = 0;
        }
-       for (i = 0; i < qn; i++) {
+       for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
                pm8001_ha->outbnd_q_tbl[i].element_size_cnt     =
                        PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30);
                pm8001_ha->outbnd_q_tbl[i].upper_base_addr      =
-                       pm8001_ha->memoryMap.region[OB].phys_addr_hi;
+                       pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
                pm8001_ha->outbnd_q_tbl[i].lower_base_addr      =
-                       pm8001_ha->memoryMap.region[OB].phys_addr_lo;
+                       pm8001_ha->memoryMap.region[OB + i].phys_addr_lo;
                pm8001_ha->outbnd_q_tbl[i].base_virt            =
-                       (u8 *)pm8001_ha->memoryMap.region[OB].virt_ptr;
+                       (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr;
                pm8001_ha->outbnd_q_tbl[i].total_length         =
-                       pm8001_ha->memoryMap.region[OB].total_len;
+                       pm8001_ha->memoryMap.region[OB + i].total_len;
                pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr   =
-                       pm8001_ha->memoryMap.region[PI].phys_addr_hi;
+                       pm8001_ha->memoryMap.region[PI + i].phys_addr_hi;
                pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr   =
-                       pm8001_ha->memoryMap.region[PI].phys_addr_lo;
+                       pm8001_ha->memoryMap.region[PI + i].phys_addr_lo;
                pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay       =
-                       0 | (10 << 16) | (0 << 24);
+                       0 | (10 << 16) | (i << 24);
                pm8001_ha->outbnd_q_tbl[i].pi_virt              =
-                       pm8001_ha->memoryMap.region[PI].virt_ptr;
+                       pm8001_ha->memoryMap.region[PI + i].virt_ptr;
                offsetob = i * 0x24;
                pm8001_ha->outbnd_q_tbl[i].ci_pci_bar           =
                        get_pci_bar_index(pm8001_mr32(addressob,
@@ -250,42 +283,51 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
 {
        void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
        pm8001_mw32(address, 0x24,
-               pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd);
        pm8001_mw32(address, 0x28,
-               pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3);
        pm8001_mw32(address, 0x2C,
-               pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7);
        pm8001_mw32(address, 0x30,
-               pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3);
        pm8001_mw32(address, 0x34,
-               pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7);
        pm8001_mw32(address, 0x38,
-               pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.
+                                       outbound_tgt_ITNexus_event_pid0_3);
        pm8001_mw32(address, 0x3C,
-               pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.
+                                       outbound_tgt_ITNexus_event_pid4_7);
        pm8001_mw32(address, 0x40,
-               pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.
+                                       outbound_tgt_ssp_event_pid0_3);
        pm8001_mw32(address, 0x44,
-               pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.
+                                       outbound_tgt_ssp_event_pid4_7);
        pm8001_mw32(address, 0x48,
-               pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.
+                                       outbound_tgt_smp_event_pid0_3);
        pm8001_mw32(address, 0x4C,
-               pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.
+                                       outbound_tgt_smp_event_pid4_7);
        pm8001_mw32(address, 0x50,
-               pm8001_ha->main_cfg_tbl.upper_event_log_addr);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr);
        pm8001_mw32(address, 0x54,
-               pm8001_ha->main_cfg_tbl.lower_event_log_addr);
-       pm8001_mw32(address, 0x58, pm8001_ha->main_cfg_tbl.event_log_size);
-       pm8001_mw32(address, 0x5C, pm8001_ha->main_cfg_tbl.event_log_option);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr);
+       pm8001_mw32(address, 0x58,
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size);
+       pm8001_mw32(address, 0x5C,
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option);
        pm8001_mw32(address, 0x60,
-               pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr);
        pm8001_mw32(address, 0x64,
-               pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr);
-       pm8001_mw32(address, 0x68, pm8001_ha->main_cfg_tbl.iop_event_log_size);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr);
+       pm8001_mw32(address, 0x68,
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size);
        pm8001_mw32(address, 0x6C,
-               pm8001_ha->main_cfg_tbl.iop_event_log_option);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option);
        pm8001_mw32(address, 0x70,
-               pm8001_ha->main_cfg_tbl.fatal_err_interrupt);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt);
 }
 
 /**
@@ -597,6 +639,19 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
  */
 static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
 {
+       u8 i = 0;
+       u16 deviceid;
+       pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
+       /* 8081 controllers need BAR shift to access MPI space
+       * as this is shared with BIOS data */
+       if (deviceid == 0x8081) {
+               if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
+                       PM8001_FAIL_DBG(pm8001_ha,
+                               pm8001_printk("Shift Bar4 to 0x%x failed\n",
+                                       GSM_SM_BASE));
+                       return -1;
+               }
+       }
        /* check the firmware status */
        if (-1 == check_fw_ready(pm8001_ha)) {
                PM8001_FAIL_DBG(pm8001_ha,
@@ -613,11 +668,16 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
        read_outbnd_queue_table(pm8001_ha);
        /* update main config table ,inbound table and outbound table */
        update_main_config_table(pm8001_ha);
-       update_inbnd_queue_table(pm8001_ha, 0);
-       update_outbnd_queue_table(pm8001_ha, 0);
-       mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
-       /* 7->130ms, 34->500ms, 119->1.5s */
-       mpi_set_open_retry_interval_reg(pm8001_ha, 119);
+       for (i = 0; i < PM8001_MAX_INB_NUM; i++)
+               update_inbnd_queue_table(pm8001_ha, i);
+       for (i = 0; i < PM8001_MAX_OUTB_NUM; i++)
+               update_outbnd_queue_table(pm8001_ha, i);
+       /* 8081 controller donot require these operations */
+       if (deviceid != 0x8081) {
+               mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
+               /* 7->130ms, 34->500ms, 119->1.5s */
+               mpi_set_open_retry_interval_reg(pm8001_ha, 119);
+       }
        /* notify firmware update finished and check initialization status */
        if (0 == mpi_init_check(pm8001_ha)) {
                PM8001_INIT_DBG(pm8001_ha,
@@ -639,6 +699,16 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
        u32 max_wait_count;
        u32 value;
        u32 gst_len_mpistate;
+       u16 deviceid;
+       pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
+       if (deviceid == 0x8081) {
+               if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
+                       PM8001_FAIL_DBG(pm8001_ha,
+                               pm8001_printk("Shift Bar4 to 0x%x failed\n",
+                                       GSM_SM_BASE));
+                       return -1;
+               }
+       }
        init_pci_device_addresses(pm8001_ha);
        /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the
        table is stop */
@@ -740,14 +810,14 @@ static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha)
  * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
  * the FW register status to the originated status.
  * @pm8001_ha: our hba card information
- * @signature: signature in host scratch pad0 register.
  */
 static int
-pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
+pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
 {
        u32     regVal, toggleVal;
        u32     max_wait_count;
        u32     regVal1, regVal2, regVal3;
+       u32     signature = 0x252acbcd; /* for host scratch pad0 */
        unsigned long flags;
 
        /* step1: Check FW is ready for soft reset */
@@ -1113,7 +1183,7 @@ static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
  * pm8001_chip_iounmap - which maped when initialized.
  * @pm8001_ha: our hba card information
  */
-static void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
+void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
 {
        s8 bar, logical = 0;
        for (bar = 0; bar < 6; bar++) {
@@ -1192,7 +1262,7 @@ pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha,
  * @pm8001_ha: our hba card information
  */
 static void
-pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
+pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
 #ifdef PM8001_USE_MSIX
        pm8001_chip_msix_interrupt_enable(pm8001_ha, 0);
@@ -1207,7 +1277,7 @@ pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
  * @pm8001_ha: our hba card information
  */
 static void
-pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
+pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
 #ifdef PM8001_USE_MSIX
        pm8001_chip_msix_interrupt_disable(pm8001_ha, 0);
@@ -1218,12 +1288,13 @@ pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
 }
 
 /**
- * mpi_msg_free_get- get the free message buffer for transfer inbound queue.
+ * pm8001_mpi_msg_free_get - get the free message buffer for transfer
+ * inbound queue.
  * @circularQ: the inbound queue  we want to transfer to HBA.
  * @messageSize: the message size of this transfer, normally it is 64 bytes
  * @messagePtr: the pointer to message.
  */
-static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
+int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
                            u16 messageSize, void **messagePtr)
 {
        u32 offset, consumer_index;
@@ -1231,7 +1302,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
        u8 bcCount = 1; /* only support single buffer */
 
        /* Checks is the requested message size can be allocated in this queue*/
-       if (messageSize > 64) {
+       if (messageSize > IOMB_SIZE_SPCV) {
                *messagePtr = NULL;
                return -1;
        }
@@ -1245,7 +1316,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
                return -1;
        }
        /* get memory IOMB buffer address */
-       offset = circularQ->producer_idx * 64;
+       offset = circularQ->producer_idx * messageSize;
        /* increment to next bcCount element */
        circularQ->producer_idx = (circularQ->producer_idx + bcCount)
                                % PM8001_MPI_QUEUE;
@@ -1257,29 +1328,30 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
 }
 
 /**
- * mpi_build_cmd- build the message queue for transfer, update the PI to FW
- * to tell the fw to get this message from IOMB.
+ * pm8001_mpi_build_cmd- build the message queue for transfer, update the PI to
+ * FW to tell the fw to get this message from IOMB.
  * @pm8001_ha: our hba card information
  * @circularQ: the inbound queue we want to transfer to HBA.
  * @opCode: the operation code represents commands which LLDD and fw recognized.
  * @payload: the command payload of each operation command.
  */
-static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
+int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
                         struct inbound_queue_table *circularQ,
-                        u32 opCode, void *payload)
+                        u32 opCode, void *payload, u32 responseQueue)
 {
        u32 Header = 0, hpriority = 0, bc = 1, category = 0x02;
-       u32 responseQueue = 0;
        void *pMessage;
 
-       if (mpi_msg_free_get(circularQ, 64, &pMessage) < 0) {
+       if (pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size,
+               &pMessage) < 0) {
                PM8001_IO_DBG(pm8001_ha,
                        pm8001_printk("No free mpi buffer\n"));
                return -1;
        }
        BUG_ON(!payload);
        /*Copy to the payload*/
-       memcpy(pMessage, payload, (64 - sizeof(struct mpi_msg_hdr)));
+       memcpy(pMessage, payload, (pm8001_ha->iomb_size -
+                               sizeof(struct mpi_msg_hdr)));
 
        /*Build the header*/
        Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24)
@@ -1291,12 +1363,13 @@ static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
        pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar,
                circularQ->pi_offset, circularQ->producer_idx);
        PM8001_IO_DBG(pm8001_ha,
-               pm8001_printk("after PI= %d CI= %d\n", circularQ->producer_idx,
-               circularQ->consumer_index));
+               pm8001_printk("INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n",
+                       responseQueue, opCode, circularQ->producer_idx,
+                       circularQ->consumer_index));
        return 0;
 }
 
-static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
+u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
                            struct outbound_queue_table *circularQ, u8 bc)
 {
        u32 producer_index;
@@ -1305,7 +1378,7 @@ static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
 
        msgHeader = (struct mpi_msg_hdr *)(pMsg - sizeof(struct mpi_msg_hdr));
        pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt +
-                               circularQ->consumer_idx * 64);
+                               circularQ->consumer_idx * pm8001_ha->iomb_size);
        if (pOutBoundMsgHeader != msgHeader) {
                PM8001_FAIL_DBG(pm8001_ha,
                        pm8001_printk("consumer_idx = %d msgHeader = %p\n",
@@ -1336,13 +1409,14 @@ static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
 }
 
 /**
- * mpi_msg_consume- get the MPI message from  outbound queue message table.
+ * pm8001_mpi_msg_consume- get the MPI message from outbound queue
+ * message table.
  * @pm8001_ha: our hba card information
  * @circularQ: the outbound queue  table.
  * @messagePtr1: the message contents of this outbound message.
  * @pBC: the message size.
  */
-static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
+u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
                           struct outbound_queue_table *circularQ,
                           void **messagePtr1, u8 *pBC)
 {
@@ -1356,7 +1430,7 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
                        /*Get the pointer to the circular queue buffer element*/
                        msgHeader = (struct mpi_msg_hdr *)
                                (circularQ->base_virt +
-                               circularQ->consumer_idx * 64);
+                               circularQ->consumer_idx * pm8001_ha->iomb_size);
                        /* read header */
                        header_tmp = pm8001_read_32(msgHeader);
                        msgHeader_tmp = cpu_to_le32(header_tmp);
@@ -1416,7 +1490,7 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
        return MPI_IO_STATUS_BUSY;
 }
 
-static void pm8001_work_fn(struct work_struct *work)
+void pm8001_work_fn(struct work_struct *work)
 {
        struct pm8001_work *pw = container_of(work, struct pm8001_work, work);
        struct pm8001_device *pm8001_dev;
@@ -1431,7 +1505,7 @@ static void pm8001_work_fn(struct work_struct *work)
        pm8001_dev = pw->data; /* Most stash device structure */
        if ((pm8001_dev == NULL)
         || ((pw->handler != IO_XFER_ERROR_BREAK)
-         && (pm8001_dev->dev_type == NO_DEVICE))) {
+         && (pm8001_dev->dev_type == SAS_PHY_UNUSED))) {
                kfree(pw);
                return;
        }
@@ -1596,7 +1670,7 @@ static void pm8001_work_fn(struct work_struct *work)
        }       break;
        case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
                dev = pm8001_dev->sas_device;
-               pm8001_I_T_nexus_reset(dev);
+               pm8001_I_T_nexus_event_handler(dev);
                break;
        case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
                dev = pm8001_dev->sas_device;
@@ -1614,7 +1688,7 @@ static void pm8001_work_fn(struct work_struct *work)
        kfree(pw);
 }
 
-static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
+int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
                               int handler)
 {
        struct pm8001_work *pw;
@@ -1633,6 +1707,123 @@ static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
        return ret;
 }
 
+static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha,
+               struct pm8001_device *pm8001_ha_dev)
+{
+       int res;
+       u32 ccb_tag;
+       struct pm8001_ccb_info *ccb;
+       struct sas_task *task = NULL;
+       struct task_abort_req task_abort;
+       struct inbound_queue_table *circularQ;
+       u32 opc = OPC_INB_SATA_ABORT;
+       int ret;
+
+       if (!pm8001_ha_dev) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n"));
+               return;
+       }
+
+       task = sas_alloc_slow_task(GFP_ATOMIC);
+
+       if (!task) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot "
+                                               "allocate task\n"));
+               return;
+       }
+
+       task->task_done = pm8001_task_done;
+
+       res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+       if (res)
+               return;
+
+       ccb = &pm8001_ha->ccb_info[ccb_tag];
+       ccb->device = pm8001_ha_dev;
+       ccb->ccb_tag = ccb_tag;
+       ccb->task = task;
+
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       memset(&task_abort, 0, sizeof(task_abort));
+       task_abort.abort_all = cpu_to_le32(1);
+       task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+       task_abort.tag = cpu_to_le32(ccb_tag);
+
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+
+}
+
+static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
+               struct pm8001_device *pm8001_ha_dev)
+{
+       struct sata_start_req sata_cmd;
+       int res;
+       u32 ccb_tag;
+       struct pm8001_ccb_info *ccb;
+       struct sas_task *task = NULL;
+       struct host_to_dev_fis fis;
+       struct domain_device *dev;
+       struct inbound_queue_table *circularQ;
+       u32 opc = OPC_INB_SATA_HOST_OPSTART;
+
+       task = sas_alloc_slow_task(GFP_ATOMIC);
+
+       if (!task) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("cannot allocate task !!!\n"));
+               return;
+       }
+       task->task_done = pm8001_task_done;
+
+       res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+       if (res) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("cannot allocate tag !!!\n"));
+               return;
+       }
+
+       /* allocate domain device by ourselves as libsas
+        * is not going to provide any
+       */
+       dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
+       if (!dev) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("Domain device cannot be allocated\n"));
+               sas_free_task(task);
+               return;
+       } else {
+               task->dev = dev;
+               task->dev->lldd_dev = pm8001_ha_dev;
+       }
+
+       ccb = &pm8001_ha->ccb_info[ccb_tag];
+       ccb->device = pm8001_ha_dev;
+       ccb->ccb_tag = ccb_tag;
+       ccb->task = task;
+       pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
+       pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
+
+       memset(&sata_cmd, 0, sizeof(sata_cmd));
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       /* construct read log FIS */
+       memset(&fis, 0, sizeof(struct host_to_dev_fis));
+       fis.fis_type = 0x27;
+       fis.flags = 0x80;
+       fis.command = ATA_CMD_READ_LOG_EXT;
+       fis.lbal = 0x10;
+       fis.sector_count = 0x1;
+
+       sata_cmd.tag = cpu_to_le32(ccb_tag);
+       sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+       sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9));
+       memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
+
+       res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
+
+}
+
 /**
  * mpi_ssp_completion- process the event that FW response to the SSP request.
  * @pm8001_ha: our hba card information
@@ -1867,7 +2058,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
                break;
        }
        PM8001_IO_DBG(pm8001_ha,
-               pm8001_printk("scsi_status = %x \n ",
+               pm8001_printk("scsi_status = %x\n ",
                psspPayload->ssp_resp_iu.status));
        spin_lock_irqsave(&t->task_state_lock, flags);
        t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
@@ -2096,16 +2287,44 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
        status = le32_to_cpu(psataPayload->status);
        tag = le32_to_cpu(psataPayload->tag);
 
+       if (!tag) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("tag null\n"));
+               return;
+       }
        ccb = &pm8001_ha->ccb_info[tag];
        param = le32_to_cpu(psataPayload->param);
-       t = ccb->task;
+       if (ccb) {
+               t = ccb->task;
+               pm8001_dev = ccb->device;
+       } else {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("ccb null\n"));
+               return;
+       }
+
+       if (t) {
+               if (t->dev && (t->dev->lldd_dev))
+                       pm8001_dev = t->dev->lldd_dev;
+       } else {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task null\n"));
+               return;
+       }
+
+       if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG))
+               && unlikely(!t || !t->lldd_task || !t->dev)) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task or dev null\n"));
+               return;
+       }
+
        ts = &t->task_status;
-       pm8001_dev = ccb->device;
-       if (status)
+       if (!ts) {
                PM8001_FAIL_DBG(pm8001_ha,
-                       pm8001_printk("sata IO status 0x%x\n", status));
-       if (unlikely(!t || !t->lldd_task || !t->dev))
+                       pm8001_printk("ts null\n"));
                return;
+       }
 
        switch (status) {
        case IO_SUCCESS:
@@ -2113,6 +2332,19 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
                if (param == 0) {
                        ts->resp = SAS_TASK_COMPLETE;
                        ts->stat = SAM_STAT_GOOD;
+                       /* check if response is for SEND READ LOG */
+                       if (pm8001_dev &&
+                               (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
+                               /* set new bit for abort_all */
+                               pm8001_dev->id |= NCQ_ABORT_ALL_FLAG;
+                               /* clear bit for read log */
+                               pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF;
+                               pm8001_send_abort_all(pm8001_ha, pm8001_dev);
+                               /* Free the tag */
+                               pm8001_tag_free(pm8001_ha, tag);
+                               sas_free_task(t);
+                               return;
+                       }
                } else {
                        u8 len;
                        ts->resp = SAS_TASK_COMPLETE;
@@ -2423,6 +2655,29 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
        u32 dev_id = le32_to_cpu(psataPayload->device_id);
        unsigned long flags;
 
+       ccb = &pm8001_ha->ccb_info[tag];
+
+       if (ccb) {
+               t = ccb->task;
+               pm8001_dev = ccb->device;
+       } else {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("No CCB !!!. returning\n"));
+       }
+       if (event)
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("SATA EVENT 0x%x\n", event));
+
+       /* Check if this is NCQ error */
+       if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) {
+               /* find device using device id */
+               pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id);
+               /* send read log extension */
+               if (pm8001_dev)
+                       pm8001_send_read_log(pm8001_ha, pm8001_dev);
+               return;
+       }
+
        ccb = &pm8001_ha->ccb_info[tag];
        t = ccb->task;
        pm8001_dev = ccb->device;
@@ -2432,9 +2687,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
        if (unlikely(!t || !t->lldd_task || !t->dev))
                return;
        ts = &t->task_status;
-       PM8001_IO_DBG(pm8001_ha,
-               pm8001_printk("port_id = %x,device_id = %x\n",
-               port_id, dev_id));
+       PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+               "port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n",
+               port_id, dev_id, tag, event));
        switch (event) {
        case IO_OVERFLOW:
                PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
@@ -2822,8 +3077,8 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
        }
 }
 
-static void
-mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
+               void *piomb)
 {
        struct set_dev_state_resp *pPayload =
                (struct set_dev_state_resp *)(piomb + 4);
@@ -2843,8 +3098,7 @@ mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        pm8001_ccb_free(pm8001_ha, tag);
 }
 
-static void
-mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
        struct get_nvm_data_resp *pPayload =
                (struct get_nvm_data_resp *)(piomb + 4);
@@ -2863,8 +3117,8 @@ mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        pm8001_ccb_free(pm8001_ha, tag);
 }
 
-static void
-mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+void
+pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
        struct fw_control_ex    *fw_control_context;
        struct get_nvm_data_resp *pPayload =
@@ -2925,7 +3179,7 @@ mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        pm8001_ccb_free(pm8001_ha, tag);
 }
 
-static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
+int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
        struct local_phy_ctl_resp *pPayload =
                (struct local_phy_ctl_resp *)(piomb + 4);
@@ -2954,7 +3208,7 @@ static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
  * while receive a broadcast(change) primitive just tell the sas
  * layer to discover the changed domain rather than the whole domain.
  */
-static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
+void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
 {
        struct pm8001_phy *phy = &pm8001_ha->phy[i];
        struct asd_sas_phy *sas_phy = &phy->sas_phy;
@@ -2988,7 +3242,7 @@ static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
 }
 
 /* Get the link rate speed  */
-static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
+void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
 {
        struct sas_phy *sas_phy = phy->sas_phy.phy;
 
@@ -3025,7 +3279,7 @@ static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
  * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame
  * buffer.
  */
-static void pm8001_get_attached_sas_addr(struct pm8001_phy *phy,
+void pm8001_get_attached_sas_addr(struct pm8001_phy *phy,
        u8 *sas_addr)
 {
        if (phy->sas_phy.frame_rcvd[0] == 0x34
@@ -3067,7 +3321,7 @@ static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
                ((phyId & 0x0F) << 4) | (port_id & 0x0F));
        payload.param0 = cpu_to_le32(param0);
        payload.param1 = cpu_to_le32(param1);
-       mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
 }
 
 static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
@@ -3112,19 +3366,19 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
                pm8001_chip_phy_ctl_req(pm8001_ha, phy_id,
                        PHY_NOTIFY_ENABLE_SPINUP);
                port->port_attached = 1;
-               get_lrate_mode(phy, link_rate);
+               pm8001_get_lrate_mode(phy, link_rate);
                break;
        case SAS_EDGE_EXPANDER_DEVICE:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("expander device.\n"));
                port->port_attached = 1;
-               get_lrate_mode(phy, link_rate);
+               pm8001_get_lrate_mode(phy, link_rate);
                break;
        case SAS_FANOUT_EXPANDER_DEVICE:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("fanout expander device.\n"));
                port->port_attached = 1;
-               get_lrate_mode(phy, link_rate);
+               pm8001_get_lrate_mode(phy, link_rate);
                break;
        default:
                PM8001_MSG_DBG(pm8001_ha,
@@ -3179,7 +3433,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
                " phy id = %d\n", port_id, phy_id));
        port->port_state =  portstate;
        port->port_attached = 1;
-       get_lrate_mode(phy, link_rate);
+       pm8001_get_lrate_mode(phy, link_rate);
        phy->phy_type |= PORT_TYPE_SATA;
        phy->phy_attached = 1;
        phy->sas_phy.oob_mode = SATA_OOB_MODE;
@@ -3189,7 +3443,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
                sizeof(struct dev_to_host_fis));
        phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
        phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
-       phy->identify.device_type = SATA_DEV;
+       phy->identify.device_type = SAS_SATA_DEV;
        pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
        spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
        pm8001_bytes_dmaed(pm8001_ha, phy_id);
@@ -3260,7 +3514,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
 }
 
 /**
- * mpi_reg_resp -process register device ID response.
+ * pm8001_mpi_reg_resp -process register device ID response.
  * @pm8001_ha: our hba card information
  * @piomb: IO message buffer
  *
@@ -3269,7 +3523,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
  * has assigned, from now,inter-communication with FW is no longer using the
  * SAS address, use device ID which FW assigned.
  */
-static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
        u32 status;
        u32 device_id;
@@ -3331,7 +3585,7 @@ static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        return 0;
 }
 
-static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
        u32 status;
        u32 device_id;
@@ -3347,8 +3601,13 @@ static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        return 0;
 }
 
-static int
-mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+/**
+ * fw_flash_update_resp - Response from FW for flash update command.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
+               void *piomb)
 {
        u32 status;
        struct fw_control_ex    fw_control_context;
@@ -3403,10 +3662,6 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
                break;
        }
        ccb->fw_control_context->fw_control->retcode = status;
-       pci_free_consistent(pm8001_ha->pdev,
-                       fw_control_context.len,
-                       fw_control_context.virtAddr,
-                       fw_control_context.phys_addr);
        complete(pm8001_ha->nvmd_completion);
        ccb->task = NULL;
        ccb->ccb_tag = 0xFFFFFFFF;
@@ -3414,8 +3669,7 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        return 0;
 }
 
-static int
-mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
 {
        u32 status;
        int i;
@@ -3431,8 +3685,7 @@ mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
        return 0;
 }
 
-static int
-mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
        struct sas_task *t;
        struct pm8001_ccb_info *ccb;
@@ -3440,19 +3693,29 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        u32 status ;
        u32 tag, scp;
        struct task_status_struct *ts;
+       struct pm8001_device *pm8001_dev;
 
        struct task_abort_resp *pPayload =
                (struct task_abort_resp *)(piomb + 4);
 
        status = le32_to_cpu(pPayload->status);
        tag = le32_to_cpu(pPayload->tag);
+       if (!tag) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk(" TAG NULL. RETURNING !!!"));
+               return -1;
+       }
+
        scp = le32_to_cpu(pPayload->scp);
        ccb = &pm8001_ha->ccb_info[tag];
        t = ccb->task;
-       PM8001_IO_DBG(pm8001_ha,
-               pm8001_printk(" status = 0x%x\n", status));
-       if (t == NULL)
+       pm8001_dev = ccb->device; /* retrieve device */
+
+       if (!t) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk(" TASK NULL. RETURNING !!!"));
                return -1;
+       }
        ts = &t->task_status;
        if (status != 0)
                PM8001_FAIL_DBG(pm8001_ha,
@@ -3476,7 +3739,15 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        spin_unlock_irqrestore(&t->task_state_lock, flags);
        pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
        mb();
-       t->task_done(t);
+
+       if ((pm8001_dev->id & NCQ_ABORT_ALL_FLAG) && t) {
+               pm8001_tag_free(pm8001_ha, tag);
+               sas_free_task(t);
+               /* clear the flag */
+               pm8001_dev->id &= 0xBFFFFFFF;
+       } else
+               t->task_done(t);
+
        return 0;
 }
 
@@ -3727,17 +3998,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        case OPC_OUB_LOCAL_PHY_CNTRL:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n"));
-               mpi_local_phy_ctl(pm8001_ha, piomb);
+               pm8001_mpi_local_phy_ctl(pm8001_ha, piomb);
                break;
        case OPC_OUB_DEV_REGIST:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_DEV_REGIST\n"));
-               mpi_reg_resp(pm8001_ha, piomb);
+               pm8001_mpi_reg_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_DEREG_DEV:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("unregister the device\n"));
-               mpi_dereg_resp(pm8001_ha, piomb);
+               pm8001_mpi_dereg_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_GET_DEV_HANDLE:
                PM8001_MSG_DBG(pm8001_ha,
@@ -3775,7 +4046,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        case OPC_OUB_FW_FLASH_UPDATE:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n"));
-               mpi_fw_flash_update_resp(pm8001_ha, piomb);
+               pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_GPIO_RESPONSE:
                PM8001_MSG_DBG(pm8001_ha,
@@ -3788,17 +4059,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        case OPC_OUB_GENERAL_EVENT:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_GENERAL_EVENT\n"));
-               mpi_general_event(pm8001_ha, piomb);
+               pm8001_mpi_general_event(pm8001_ha, piomb);
                break;
        case OPC_OUB_SSP_ABORT_RSP:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n"));
-               mpi_task_abort_resp(pm8001_ha, piomb);
+               pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_SATA_ABORT_RSP:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n"));
-               mpi_task_abort_resp(pm8001_ha, piomb);
+               pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_SAS_DIAG_MODE_START_END:
                PM8001_MSG_DBG(pm8001_ha,
@@ -3823,17 +4094,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        case OPC_OUB_SMP_ABORT_RSP:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n"));
-               mpi_task_abort_resp(pm8001_ha, piomb);
+               pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_GET_NVMD_DATA:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_GET_NVMD_DATA\n"));
-               mpi_get_nvmd_resp(pm8001_ha, piomb);
+               pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_SET_NVMD_DATA:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_SET_NVMD_DATA\n"));
-               mpi_set_nvmd_resp(pm8001_ha, piomb);
+               pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_DEVICE_HANDLE_REMOVAL:
                PM8001_MSG_DBG(pm8001_ha,
@@ -3842,7 +4113,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        case OPC_OUB_SET_DEVICE_STATE:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n"));
-               mpi_set_dev_state_resp(pm8001_ha, piomb);
+               pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_GET_DEVICE_STATE:
                PM8001_MSG_DBG(pm8001_ha,
@@ -3864,7 +4135,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        }
 }
 
-static int process_oq(struct pm8001_hba_info *pm8001_ha)
+static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
        struct outbound_queue_table *circularQ;
        void *pMsg1 = NULL;
@@ -3873,14 +4144,15 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha)
        unsigned long flags;
 
        spin_lock_irqsave(&pm8001_ha->lock, flags);
-       circularQ = &pm8001_ha->outbnd_q_tbl[0];
+       circularQ = &pm8001_ha->outbnd_q_tbl[vec];
        do {
-               ret = mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
+               ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
                if (MPI_IO_STATUS_SUCCESS == ret) {
                        /* process the outbound message */
                        process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4));
                        /* free the message from the outbound circular buffer */
-                       mpi_msg_free_set(pm8001_ha, pMsg1, circularQ, bc);
+                       pm8001_mpi_msg_free_set(pm8001_ha, pMsg1,
+                                                       circularQ, bc);
                }
                if (MPI_IO_STATUS_BUSY == ret) {
                        /* Update the producer index from SPC */
@@ -3903,7 +4175,7 @@ static const u8 data_dir_flags[] = {
        [PCI_DMA_FROMDEVICE]    = DATA_DIR_IN,/* INBOUND */
        [PCI_DMA_NONE]          = DATA_DIR_NONE,/* NO TRANSFER */
 };
-static void
+void
 pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd)
 {
        int i;
@@ -3978,7 +4250,7 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
        smp_cmd.long_smp_req.long_resp_size =
                cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
        build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd);
-       mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd);
+       pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0);
        return 0;
 
 err_out_2:
@@ -4042,7 +4314,7 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
                ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
                ssp_cmd.esgl = 0;
        }
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, 0);
        return ret;
 }
 
@@ -4060,6 +4332,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
        u32 ATAP = 0x0;
        u32 dir;
        struct inbound_queue_table *circularQ;
+       unsigned long flags;
        u32  opc = OPC_INB_SATA_HOST_OPSTART;
        memset(&sata_cmd, 0, sizeof(sata_cmd));
        circularQ = &pm8001_ha->inbnd_q_tbl[0];
@@ -4080,8 +4353,10 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
                        PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
                }
        }
-       if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag))
+       if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
+               task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
                ncg_tag = hdr_tag;
+       }
        dir = data_dir_flags[task->data_dir] << 8;
        sata_cmd.tag = cpu_to_le32(tag);
        sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
@@ -4112,7 +4387,55 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
                sata_cmd.len = cpu_to_le32(task->total_xfer_len);
                sata_cmd.esgl = 0;
        }
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd);
+
+       /* Check for read log for failed drive and return */
+       if (sata_cmd.sata_fis.command == 0x2f) {
+               if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) ||
+                       (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) ||
+                       (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) {
+                       struct task_status_struct *ts;
+
+                       pm8001_ha_dev->id &= 0xDFFFFFFF;
+                       ts = &task->task_status;
+
+                       spin_lock_irqsave(&task->task_state_lock, flags);
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAM_STAT_GOOD;
+                       task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+                       task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+                       task->task_state_flags |= SAS_TASK_STATE_DONE;
+                       if (unlikely((task->task_state_flags &
+                                       SAS_TASK_STATE_ABORTED))) {
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                       flags);
+                               PM8001_FAIL_DBG(pm8001_ha,
+                                       pm8001_printk("task 0x%p resp 0x%x "
+                                       " stat 0x%x but aborted by upper layer "
+                                       "\n", task, ts->resp, ts->stat));
+                               pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+                       } else if (task->uldd_task) {
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                       flags);
+                               pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+                               mb();/* ditto */
+                               spin_unlock_irq(&pm8001_ha->lock);
+                               task->task_done(task);
+                               spin_lock_irq(&pm8001_ha->lock);
+                               return 0;
+                       } else if (!task->uldd_task) {
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                       flags);
+                               pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+                               mb();/*ditto*/
+                               spin_unlock_irq(&pm8001_ha->lock);
+                               task->task_done(task);
+                               spin_lock_irq(&pm8001_ha->lock);
+                               return 0;
+                       }
+               }
+       }
+
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
        return ret;
 }
 
@@ -4142,12 +4465,12 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
        payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
                LINKMODE_AUTO | LINKRATE_15 |
                LINKRATE_30 | LINKRATE_60 | phy_id);
-       payload.sas_identify.dev_type = SAS_END_DEV;
+       payload.sas_identify.dev_type = SAS_END_DEVICE;
        payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
        memcpy(payload.sas_identify.sas_addr,
                pm8001_ha->sas_addr, SAS_ADDR_SIZE);
        payload.sas_identify.phy_id = phy_id;
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
        return ret;
 }
 
@@ -4157,7 +4480,7 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
  * @num: the inbound queue number
  * @phy_id: the phy id which we wanted to start up.
  */
-static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
        u8 phy_id)
 {
        struct phy_stop_req payload;
@@ -4169,12 +4492,12 @@ static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
        memset(&payload, 0, sizeof(payload));
        payload.tag = cpu_to_le32(tag);
        payload.phy_id = cpu_to_le32(phy_id);
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
        return ret;
 }
 
 /**
- * see comments on mpi_reg_resp.
+ * see comments on pm8001_mpi_reg_resp.
  */
 static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
        struct pm8001_device *pm8001_dev, u32 flag)
@@ -4204,11 +4527,11 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
        if (flag == 1)
                stp_sspsmp_sata = 0x02; /*direct attached sata */
        else {
-               if (pm8001_dev->dev_type == SATA_DEV)
+               if (pm8001_dev->dev_type == SAS_SATA_DEV)
                        stp_sspsmp_sata = 0x00; /* stp*/
-               else if (pm8001_dev->dev_type == SAS_END_DEV ||
-                       pm8001_dev->dev_type == EDGE_DEV ||
-                       pm8001_dev->dev_type == FANOUT_DEV)
+               else if (pm8001_dev->dev_type == SAS_END_DEVICE ||
+                       pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                       pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
                        stp_sspsmp_sata = 0x01; /*ssp or smp*/
        }
        if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
@@ -4228,14 +4551,14 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
                cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
        memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
                SAS_ADDR_SIZE);
-       rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
        return rc;
 }
 
 /**
- * see comments on mpi_reg_resp.
+ * see comments on pm8001_mpi_reg_resp.
  */
-static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
        u32 device_id)
 {
        struct dereg_dev_req payload;
@@ -4249,7 +4572,7 @@ static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
        payload.device_id = cpu_to_le32(device_id);
        PM8001_MSG_DBG(pm8001_ha,
                pm8001_printk("unregister device device_id = %d\n", device_id));
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
        return ret;
 }
 
@@ -4272,7 +4595,7 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
        payload.tag = cpu_to_le32(1);
        payload.phyop_phyid =
                cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F));
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
        return ret;
 }
 
@@ -4296,11 +4619,11 @@ static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
  * @stat: stat.
  */
 static irqreturn_t
-pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha)
+pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
-       pm8001_chip_interrupt_disable(pm8001_ha);
-       process_oq(pm8001_ha);
-       pm8001_chip_interrupt_enable(pm8001_ha);
+       pm8001_chip_interrupt_disable(pm8001_ha, vec);
+       process_oq(pm8001_ha, vec);
+       pm8001_chip_interrupt_enable(pm8001_ha, vec);
        return IRQ_HANDLED;
 }
 
@@ -4322,7 +4645,7 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
                task_abort.device_id = cpu_to_le32(dev_id);
                task_abort.tag = cpu_to_le32(cmd_tag);
        }
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
        return ret;
 }
 
@@ -4331,16 +4654,17 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
  * @task: the task we wanted to aborted.
  * @flag: the abort flag.
  */
-static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
        struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag)
 {
        u32 opc, device_id;
        int rc = TMF_RESP_FUNC_FAILED;
-       PM8001_EH_DBG(pm8001_ha, pm8001_printk("cmd_tag = %x, abort task tag"
-               " = %x", cmd_tag, task_tag));
-       if (pm8001_dev->dev_type == SAS_END_DEV)
+       PM8001_EH_DBG(pm8001_ha,
+               pm8001_printk("cmd_tag = %x, abort task tag = 0x%x",
+                       cmd_tag, task_tag));
+       if (pm8001_dev->dev_type == SAS_END_DEVICE)
                opc = OPC_INB_SSP_ABORT;
-       else if (pm8001_dev->dev_type == SATA_DEV)
+       else if (pm8001_dev->dev_type == SAS_SATA_DEV)
                opc = OPC_INB_SATA_ABORT;
        else
                opc = OPC_INB_SMP_ABORT;/* SMP */
@@ -4358,7 +4682,7 @@ static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
  * @ccb: the ccb information.
  * @tmf: task management function.
  */
-static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
        struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
 {
        struct sas_task *task = ccb->task;
@@ -4376,11 +4700,11 @@ static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
        memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8);
        sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag);
        circularQ = &pm8001_ha->inbnd_q_tbl[0];
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0);
        return ret;
 }
 
-static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
        void *payload)
 {
        u32 opc = OPC_INB_GET_NVMD_DATA;
@@ -4397,7 +4721,7 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
        fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
        if (!fw_control_context)
                return -ENOMEM;
-       fw_control_context->usrAddr = (u8 *)&ioctl_payload->func_specific[0];
+       fw_control_context->usrAddr = (u8 *)ioctl_payload->func_specific;
        fw_control_context->len = ioctl_payload->length;
        circularQ = &pm8001_ha->inbnd_q_tbl[0];
        memset(&nvmd_req, 0, sizeof(nvmd_req));
@@ -4456,11 +4780,11 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
        default:
                break;
        }
-       rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req);
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
        return rc;
 }
 
-static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
        void *payload)
 {
        u32 opc = OPC_INB_SET_NVMD_DATA;
@@ -4479,7 +4803,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
                return -ENOMEM;
        circularQ = &pm8001_ha->inbnd_q_tbl[0];
        memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr,
-               ioctl_payload->func_specific,
+               &ioctl_payload->func_specific,
                ioctl_payload->length);
        memset(&nvmd_req, 0, sizeof(nvmd_req));
        rc = pm8001_tag_alloc(pm8001_ha, &tag);
@@ -4536,7 +4860,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
        default:
                break;
        }
-       rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req);
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
        return rc;
 }
 
@@ -4545,7 +4869,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
  * @pm8001_ha: our hba card information.
  * @fw_flash_updata_info: firmware flash update param
  */
-static int
+int
 pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
        void *fw_flash_updata_info, u32 tag)
 {
@@ -4567,11 +4891,11 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
                cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr)));
        payload.sgl_addr_hi =
                cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr)));
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
        return ret;
 }
 
-static int
+int
 pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
        void *payload)
 {
@@ -4581,29 +4905,14 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
        int rc;
        u32 tag;
        struct pm8001_ccb_info *ccb;
-       void *buffer = NULL;
-       dma_addr_t phys_addr;
-       u32 phys_addr_hi;
-       u32 phys_addr_lo;
+       void *buffer = pm8001_ha->memoryMap.region[FW_FLASH].virt_ptr;
+       dma_addr_t phys_addr = pm8001_ha->memoryMap.region[FW_FLASH].phys_addr;
        struct pm8001_ioctl_payload *ioctl_payload = payload;
 
        fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
        if (!fw_control_context)
                return -ENOMEM;
-       fw_control = (struct fw_control_info *)&ioctl_payload->func_specific[0];
-       if (fw_control->len != 0) {
-               if (pm8001_mem_alloc(pm8001_ha->pdev,
-                       (void **)&buffer,
-                       &phys_addr,
-                       &phys_addr_hi,
-                       &phys_addr_lo,
-                       fw_control->len, 0) != 0) {
-                               PM8001_FAIL_DBG(pm8001_ha,
-                                       pm8001_printk("Mem alloc failure\n"));
-                               kfree(fw_control_context);
-                               return -ENOMEM;
-               }
-       }
+       fw_control = (struct fw_control_info *)&ioctl_payload->func_specific;
        memcpy(buffer, fw_control->buffer, fw_control->len);
        flash_update_info.sgl.addr = cpu_to_le64(phys_addr);
        flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len);
@@ -4613,6 +4922,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
        flash_update_info.total_image_len = fw_control->size;
        fw_control_context->fw_control = fw_control;
        fw_control_context->virtAddr = buffer;
+       fw_control_context->phys_addr = phys_addr;
        fw_control_context->len = fw_control->len;
        rc = pm8001_tag_alloc(pm8001_ha, &tag);
        if (rc) {
@@ -4627,7 +4937,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
        return rc;
 }
 
-static int
+int
 pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
        struct pm8001_device *pm8001_dev, u32 state)
 {
@@ -4648,7 +4958,7 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
        payload.tag = cpu_to_le32(tag);
        payload.device_id = cpu_to_le32(pm8001_dev->device_id);
        payload.nds = cpu_to_le32(state);
-       rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
        return rc;
 
 }
@@ -4673,7 +4983,7 @@ pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha)
        payload.SSAHOLT = cpu_to_le32(0xd << 25);
        payload.sata_hol_tmo = cpu_to_le32(80);
        payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff);
-       rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
        return rc;
 
 }
@@ -4706,4 +5016,3 @@ const struct pm8001_dispatch pm8001_8001_dispatch = {
        .set_dev_state_req      = pm8001_chip_set_dev_state_req,
        .sas_re_init_req        = pm8001_chip_sas_re_initialization,
 };
-
index d437309cb1e1c30de4f75aaafc7fb8dd0884a724..d7c1e2034226ea7667201a2a1474400f52994b5d 100644 (file)
 #define LINKRATE_30                    (0x02 << 8)
 #define LINKRATE_60                    (0x04 << 8)
 
+/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */
+#define GSM_SM_BASE                    0x4F0000
 struct mpi_msg_hdr{
        __le32  header; /* Bits [11:0]  - Message operation code */
        /* Bits [15:12] - Message Category */
@@ -298,7 +300,7 @@ struct local_phy_ctl_resp {
 
 
 #define OP_BITS 0x0000FF00
-#define ID_BITS 0x0000000F
+#define ID_BITS 0x000000FF
 
 /*
  * brief the data structure of PORT Control Command
index 3d5e522e00fc8ae93a4c9344731f960fa42c0006..e4b9bc7f5410fa5e38a9df530cba183652f1962a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
  *
  * Copyright (c) 2008-2009 USI Co., Ltd.
  * All rights reserved.
 
 static struct scsi_transport_template *pm8001_stt;
 
+/**
+ * chip info structure to identify chip key functionality as
+ * encryption available/not, no of ports, hw specific function ref
+ */
 static const struct pm8001_chip_info pm8001_chips[] = {
-       [chip_8001] = {  8, &pm8001_8001_dispatch,},
+       [chip_8001] = {0,  8, &pm8001_8001_dispatch,},
+       [chip_8008] = {0,  8, &pm8001_80xx_dispatch,},
+       [chip_8009] = {1,  8, &pm8001_80xx_dispatch,},
+       [chip_8018] = {0,  16, &pm8001_80xx_dispatch,},
+       [chip_8019] = {1,  16, &pm8001_80xx_dispatch,},
 };
 static int pm8001_id;
 
@@ -155,37 +163,75 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
 }
 
 #ifdef PM8001_USE_TASKLET
+
+/**
+ * tasklet for 64 msi-x interrupt handler
+ * @opaque: the passed general host adapter struct
+ * Note: pm8001_tasklet is common for pm8001 & pm80xx
+ */
 static void pm8001_tasklet(unsigned long opaque)
 {
        struct pm8001_hba_info *pm8001_ha;
+       u32 vec;
        pm8001_ha = (struct pm8001_hba_info *)opaque;
        if (unlikely(!pm8001_ha))
                BUG_ON(1);
-       PM8001_CHIP_DISP->isr(pm8001_ha);
+       vec = pm8001_ha->int_vector;
+       PM8001_CHIP_DISP->isr(pm8001_ha, vec);
+}
+#endif
+
+static struct  pm8001_hba_info *outq_to_hba(u8 *outq)
+{
+       return container_of((outq - *outq), struct pm8001_hba_info, outq[0]);
 }
+
+/**
+ * pm8001_interrupt_handler_msix - main MSIX interrupt handler.
+ * It obtains the vector number and calls the equivalent bottom
+ * half or services directly.
+ * @opaque: the passed outbound queue/vector. Host structure is
+ * retrieved from the same.
+ */
+static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque)
+{
+       struct pm8001_hba_info *pm8001_ha = outq_to_hba(opaque);
+       u8 outq = *(u8 *)opaque;
+       irqreturn_t ret = IRQ_HANDLED;
+       if (unlikely(!pm8001_ha))
+               return IRQ_NONE;
+       if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
+               return IRQ_NONE;
+       pm8001_ha->int_vector = outq;
+#ifdef PM8001_USE_TASKLET
+       tasklet_schedule(&pm8001_ha->tasklet);
+#else
+       ret = PM8001_CHIP_DISP->isr(pm8001_ha, outq);
 #endif
+       return ret;
+}
 
+/**
+ * pm8001_interrupt_handler_intx - main INTx interrupt handler.
+ * @dev_id: sas_ha structure. The HBA is retrieved from sas_has structure.
+ */
 
- /**
-  * pm8001_interrupt - when HBA originate a interrupt,we should invoke this
-  * dispatcher to handle each case.
-  * @irq: irq number.
-  * @opaque: the passed general host adapter struct
-  */
-static irqreturn_t pm8001_interrupt(int irq, void *opaque)
+static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
 {
        struct pm8001_hba_info *pm8001_ha;
        irqreturn_t ret = IRQ_HANDLED;
-       struct sas_ha_struct *sha = opaque;
+       struct sas_ha_struct *sha = dev_id;
        pm8001_ha = sha->lldd_ha;
        if (unlikely(!pm8001_ha))
                return IRQ_NONE;
        if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
                return IRQ_NONE;
+
+       pm8001_ha->int_vector = 0;
 #ifdef PM8001_USE_TASKLET
        tasklet_schedule(&pm8001_ha->tasklet);
 #else
-       ret = PM8001_CHIP_DISP->isr(pm8001_ha);
+       ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0);
 #endif
        return ret;
 }
@@ -195,10 +241,14 @@ static irqreturn_t pm8001_interrupt(int irq, void *opaque)
  * @pm8001_ha:our hba structure.
  *
  */
-static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
+static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
+                       const struct pci_device_id *ent)
 {
        int i;
        spin_lock_init(&pm8001_ha->lock);
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("pm8001_alloc: PHY:%x\n",
+                               pm8001_ha->chip->n_phy));
        for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
                pm8001_phy_init(pm8001_ha, i);
                pm8001_ha->port[i].wide_port_phymap = 0;
@@ -222,30 +272,57 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
        pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE;
        pm8001_ha->memoryMap.region[IOP].alignment = 32;
 
-       /* MPI Memory region 3 for consumer Index of inbound queues */
-       pm8001_ha->memoryMap.region[CI].num_elements = 1;
-       pm8001_ha->memoryMap.region[CI].element_size = 4;
-       pm8001_ha->memoryMap.region[CI].total_len = 4;
-       pm8001_ha->memoryMap.region[CI].alignment = 4;
-
-       /* MPI Memory region 4 for producer Index of outbound queues */
-       pm8001_ha->memoryMap.region[PI].num_elements = 1;
-       pm8001_ha->memoryMap.region[PI].element_size = 4;
-       pm8001_ha->memoryMap.region[PI].total_len = 4;
-       pm8001_ha->memoryMap.region[PI].alignment = 4;
-
-       /* MPI Memory region 5 inbound queues */
-       pm8001_ha->memoryMap.region[IB].num_elements = PM8001_MPI_QUEUE;
-       pm8001_ha->memoryMap.region[IB].element_size = 64;
-       pm8001_ha->memoryMap.region[IB].total_len = PM8001_MPI_QUEUE * 64;
-       pm8001_ha->memoryMap.region[IB].alignment = 64;
-
-       /* MPI Memory region 6 outbound queues */
-       pm8001_ha->memoryMap.region[OB].num_elements = PM8001_MPI_QUEUE;
-       pm8001_ha->memoryMap.region[OB].element_size = 64;
-       pm8001_ha->memoryMap.region[OB].total_len = PM8001_MPI_QUEUE * 64;
-       pm8001_ha->memoryMap.region[OB].alignment = 64;
+       for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
+               /* MPI Memory region 3 for consumer Index of inbound queues */
+               pm8001_ha->memoryMap.region[CI+i].num_elements = 1;
+               pm8001_ha->memoryMap.region[CI+i].element_size = 4;
+               pm8001_ha->memoryMap.region[CI+i].total_len = 4;
+               pm8001_ha->memoryMap.region[CI+i].alignment = 4;
+
+               if ((ent->driver_data) != chip_8001) {
+                       /* MPI Memory region 5 inbound queues */
+                       pm8001_ha->memoryMap.region[IB+i].num_elements =
+                                               PM8001_MPI_QUEUE;
+                       pm8001_ha->memoryMap.region[IB+i].element_size = 128;
+                       pm8001_ha->memoryMap.region[IB+i].total_len =
+                                               PM8001_MPI_QUEUE * 128;
+                       pm8001_ha->memoryMap.region[IB+i].alignment = 128;
+               } else {
+                       pm8001_ha->memoryMap.region[IB+i].num_elements =
+                                               PM8001_MPI_QUEUE;
+                       pm8001_ha->memoryMap.region[IB+i].element_size = 64;
+                       pm8001_ha->memoryMap.region[IB+i].total_len =
+                                               PM8001_MPI_QUEUE * 64;
+                       pm8001_ha->memoryMap.region[IB+i].alignment = 64;
+               }
+       }
+
+       for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
+               /* MPI Memory region 4 for producer Index of outbound queues */
+               pm8001_ha->memoryMap.region[PI+i].num_elements = 1;
+               pm8001_ha->memoryMap.region[PI+i].element_size = 4;
+               pm8001_ha->memoryMap.region[PI+i].total_len = 4;
+               pm8001_ha->memoryMap.region[PI+i].alignment = 4;
+
+               if (ent->driver_data != chip_8001) {
+                       /* MPI Memory region 6 Outbound queues */
+                       pm8001_ha->memoryMap.region[OB+i].num_elements =
+                                               PM8001_MPI_QUEUE;
+                       pm8001_ha->memoryMap.region[OB+i].element_size = 128;
+                       pm8001_ha->memoryMap.region[OB+i].total_len =
+                                               PM8001_MPI_QUEUE * 128;
+                       pm8001_ha->memoryMap.region[OB+i].alignment = 128;
+               } else {
+                       /* MPI Memory region 6 Outbound queues */
+                       pm8001_ha->memoryMap.region[OB+i].num_elements =
+                                               PM8001_MPI_QUEUE;
+                       pm8001_ha->memoryMap.region[OB+i].element_size = 64;
+                       pm8001_ha->memoryMap.region[OB+i].total_len =
+                                               PM8001_MPI_QUEUE * 64;
+                       pm8001_ha->memoryMap.region[OB+i].alignment = 64;
+               }
 
+       }
        /* Memory region write DMA*/
        pm8001_ha->memoryMap.region[NVMD].num_elements = 1;
        pm8001_ha->memoryMap.region[NVMD].element_size = 4096;
@@ -264,6 +341,9 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
        pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB *
                sizeof(struct pm8001_ccb_info);
 
+       /* Memory region for fw flash */
+       pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096;
+
        for (i = 0; i < USI_MAX_MEMCNT; i++) {
                if (pm8001_mem_alloc(pm8001_ha->pdev,
                        &pm8001_ha->memoryMap.region[i].virt_ptr,
@@ -281,7 +361,7 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
 
        pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr;
        for (i = 0; i < PM8001_MAX_DEVICES; i++) {
-               pm8001_ha->devices[i].dev_type = NO_DEVICE;
+               pm8001_ha->devices[i].dev_type = SAS_PHY_UNUSED;
                pm8001_ha->devices[i].id = i;
                pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES;
                pm8001_ha->devices[i].running_req = 0;
@@ -339,10 +419,12 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
                                ioremap(pm8001_ha->io_mem[logicalBar].membase,
                                pm8001_ha->io_mem[logicalBar].memsize);
                        PM8001_INIT_DBG(pm8001_ha,
-                               pm8001_printk("PCI: bar %d, logicalBar %d "
-                               "virt_addr=%lx,len=%d\n", bar, logicalBar,
-                               (unsigned long)
-                               pm8001_ha->io_mem[logicalBar].memvirtaddr,
+                               pm8001_printk("PCI: bar %d, logicalBar %d ",
+                               bar, logicalBar));
+                       PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                               "base addr %llx virt_addr=%llx len=%d\n",
+                               (u64)pm8001_ha->io_mem[logicalBar].membase,
+                               (u64)pm8001_ha->io_mem[logicalBar].memvirtaddr,
                                pm8001_ha->io_mem[logicalBar].memsize));
                } else {
                        pm8001_ha->io_mem[logicalBar].membase   = 0;
@@ -361,8 +443,9 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
  * @shost: scsi host struct which has been initialized before.
  */
 static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
-                                               u32 chip_id,
-                                               struct Scsi_Host *shost)
+                                const struct pci_device_id *ent,
+                               struct Scsi_Host *shost)
+
 {
        struct pm8001_hba_info *pm8001_ha;
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
@@ -374,7 +457,7 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
 
        pm8001_ha->pdev = pdev;
        pm8001_ha->dev = &pdev->dev;
-       pm8001_ha->chip_id = chip_id;
+       pm8001_ha->chip_id = ent->driver_data;
        pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id];
        pm8001_ha->irq = pdev->irq;
        pm8001_ha->sas = sha;
@@ -382,12 +465,22 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
        pm8001_ha->id = pm8001_id++;
        pm8001_ha->logging_level = 0x01;
        sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
+       /* IOMB size is 128 for 8088/89 controllers */
+       if (pm8001_ha->chip_id != chip_8001)
+               pm8001_ha->iomb_size = IOMB_SIZE_SPCV;
+       else
+               pm8001_ha->iomb_size = IOMB_SIZE_SPC;
+
 #ifdef PM8001_USE_TASKLET
+       /**
+       * default tasklet for non msi-x interrupt handler/first msi-x
+       * interrupt handler
+       **/
        tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
-               (unsigned long)pm8001_ha);
+                       (unsigned long)pm8001_ha);
 #endif
        pm8001_ioremap(pm8001_ha);
-       if (!pm8001_alloc(pm8001_ha))
+       if (!pm8001_alloc(pm8001_ha, ent))
                return pm8001_ha;
        pm8001_free(pm8001_ha);
        return NULL;
@@ -512,21 +605,50 @@ static void  pm8001_post_sas_ha_init(struct Scsi_Host *shost,
  */
 static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
 {
-       u8 i;
+       u8 i, j;
 #ifdef PM8001_READ_VPD
+       /* For new SPC controllers WWN is stored in flash vpd
+       *  For SPC/SPCve controllers WWN is stored in EEPROM
+       *  For Older SPC WWN is stored in NVMD
+       */
        DECLARE_COMPLETION_ONSTACK(completion);
        struct pm8001_ioctl_payload payload;
+       u16 deviceid;
+       pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
        pm8001_ha->nvmd_completion = &completion;
-       payload.minor_function = 0;
-       payload.length = 128;
-       payload.func_specific = kzalloc(128, GFP_KERNEL);
+
+       if (pm8001_ha->chip_id == chip_8001) {
+               if (deviceid == 0x8081) {
+                       payload.minor_function = 4;
+                       payload.length = 4096;
+               } else {
+                       payload.minor_function = 0;
+                       payload.length = 128;
+               }
+       } else {
+               payload.minor_function = 1;
+               payload.length = 4096;
+       }
+       payload.offset = 0;
+       payload.func_specific = kzalloc(payload.length, GFP_KERNEL);
        PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
        wait_for_completion(&completion);
+
+       for (i = 0, j = 0; i <= 7; i++, j++) {
+               if (pm8001_ha->chip_id == chip_8001) {
+                       if (deviceid == 0x8081)
+                               pm8001_ha->sas_addr[j] =
+                                       payload.func_specific[0x704 + i];
+               } else
+                       pm8001_ha->sas_addr[j] =
+                                       payload.func_specific[0x804 + i];
+       }
+
        for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
-               memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr,
-                       SAS_ADDR_SIZE);
+               memcpy(&pm8001_ha->phy[i].dev_sas_addr,
+                       pm8001_ha->sas_addr, SAS_ADDR_SIZE);
                PM8001_INIT_DBG(pm8001_ha,
-                       pm8001_printk("phy %d sas_addr = %016llx \n", i,
+                       pm8001_printk("phy %d sas_addr = %016llx\n", i,
                        pm8001_ha->phy[i].dev_sas_addr));
        }
 #else
@@ -547,31 +669,50 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
  * @chip_info: our ha struct.
  * @irq_handler: irq_handler
  */
-static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha,
-       irq_handler_t irq_handler)
+static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
 {
        u32 i = 0, j = 0;
-       u32 number_of_intr = 1;
+       u32 number_of_intr;
        int flag = 0;
        u32 max_entry;
        int rc;
+       static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3];
+
+       /* SPCv controllers supports 64 msi-x */
+       if (pm8001_ha->chip_id == chip_8001) {
+               number_of_intr = 1;
+               flag |= IRQF_DISABLED;
+       } else {
+               number_of_intr = PM8001_MAX_MSIX_VEC;
+               flag &= ~IRQF_SHARED;
+               flag |= IRQF_DISABLED;
+       }
+
        max_entry = sizeof(pm8001_ha->msix_entries) /
                sizeof(pm8001_ha->msix_entries[0]);
-       flag |= IRQF_DISABLED;
        for (i = 0; i < max_entry ; i++)
                pm8001_ha->msix_entries[i].entry = i;
        rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries,
                number_of_intr);
        pm8001_ha->number_of_intr = number_of_intr;
        if (!rc) {
+               PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                       "pci_enable_msix request ret:%d no of intr %d\n",
+                                       rc, pm8001_ha->number_of_intr));
+
+               for (i = 0; i < number_of_intr; i++)
+                       pm8001_ha->outq[i] = i;
+
                for (i = 0; i < number_of_intr; i++) {
+                       snprintf(intr_drvname[i], sizeof(intr_drvname[0]),
+                                       DRV_NAME"%d", i);
                        if (request_irq(pm8001_ha->msix_entries[i].vector,
-                               irq_handler, flag, DRV_NAME,
-                               SHOST_TO_SAS_HA(pm8001_ha->shost))) {
+                               pm8001_interrupt_handler_msix, flag,
+                               intr_drvname[i], &pm8001_ha->outq[i])) {
                                for (j = 0; j < i; j++)
                                        free_irq(
                                        pm8001_ha->msix_entries[j].vector,
-                                       SHOST_TO_SAS_HA(pm8001_ha->shost));
+                                       &pm8001_ha->outq[j]);
                                pci_disable_msix(pm8001_ha->pdev);
                                break;
                        }
@@ -588,22 +729,24 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha,
 static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
 {
        struct pci_dev *pdev;
-       irq_handler_t irq_handler = pm8001_interrupt;
        int rc;
 
        pdev = pm8001_ha->pdev;
 
 #ifdef PM8001_USE_MSIX
        if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
-               return pm8001_setup_msix(pm8001_ha, irq_handler);
-       else
+               return pm8001_setup_msix(pm8001_ha);
+       else {
+               PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("MSIX not supported!!!\n"));
                goto intx;
+       }
 #endif
 
 intx:
        /* initialize the INT-X interrupt */
-       rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME,
-               SHOST_TO_SAS_HA(pm8001_ha->shost));
+       rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED,
+               DRV_NAME, SHOST_TO_SAS_HA(pm8001_ha->shost));
        return rc;
 }
 
@@ -621,12 +764,13 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
 {
        unsigned int rc;
        u32     pci_reg;
+       u8      i = 0;
        struct pm8001_hba_info *pm8001_ha;
        struct Scsi_Host *shost = NULL;
        const struct pm8001_chip_info *chip;
 
        dev_printk(KERN_INFO, &pdev->dev,
-               "pm8001: driver version %s\n", DRV_VERSION);
+               "pm80xx: driver version %s\n", DRV_VERSION);
        rc = pci_enable_device(pdev);
        if (rc)
                goto err_out_enable;
@@ -665,25 +809,39 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
                goto err_out_free;
        }
        pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
-       pm8001_ha = pm8001_pci_alloc(pdev, chip_8001, shost);
+       /* ent->driver variable is used to differentiate between controllers */
+       pm8001_ha = pm8001_pci_alloc(pdev, ent, shost);
        if (!pm8001_ha) {
                rc = -ENOMEM;
                goto err_out_free;
        }
        list_add_tail(&pm8001_ha->list, &hba_list);
-       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
+       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
        rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
-       if (rc)
+       if (rc) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+                       "chip_init failed [ret: %d]\n", rc));
                goto err_out_ha_free;
+       }
 
        rc = scsi_add_host(shost, &pdev->dev);
        if (rc)
                goto err_out_ha_free;
        rc = pm8001_request_irq(pm8001_ha);
-       if (rc)
+       if (rc) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+                       "pm8001_request_irq failed [ret: %d]\n", rc));
                goto err_out_shost;
+       }
+
+       PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
+       if (pm8001_ha->chip_id != chip_8001) {
+               for (i = 1; i < pm8001_ha->number_of_intr; i++)
+                       PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i);
+               /* setup thermal configuration. */
+               pm80xx_set_thermal_config(pm8001_ha);
+       }
 
-       PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
        pm8001_init_sas_add(pm8001_ha);
        pm8001_post_sas_ha_init(shost, chip);
        rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
@@ -719,14 +877,15 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
        sas_remove_host(pm8001_ha->shost);
        list_del(&pm8001_ha->list);
        scsi_remove_host(pm8001_ha->shost);
-       PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
-       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
+       PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
+       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
 
 #ifdef PM8001_USE_MSIX
        for (i = 0; i < pm8001_ha->number_of_intr; i++)
                synchronize_irq(pm8001_ha->msix_entries[i].vector);
        for (i = 0; i < pm8001_ha->number_of_intr; i++)
-               free_irq(pm8001_ha->msix_entries[i].vector, sha);
+               free_irq(pm8001_ha->msix_entries[i].vector,
+                               &pm8001_ha->outq[i]);
        pci_disable_msix(pdev);
 #else
        free_irq(pm8001_ha->irq, sha);
@@ -763,13 +922,14 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
                printk(KERN_ERR " PCI PM not supported\n");
                return -ENODEV;
        }
-       PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
-       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
+       PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
+       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
 #ifdef PM8001_USE_MSIX
        for (i = 0; i < pm8001_ha->number_of_intr; i++)
                synchronize_irq(pm8001_ha->msix_entries[i].vector);
        for (i = 0; i < pm8001_ha->number_of_intr; i++)
-               free_irq(pm8001_ha->msix_entries[i].vector, sha);
+               free_irq(pm8001_ha->msix_entries[i].vector,
+                               &pm8001_ha->outq[i]);
        pci_disable_msix(pdev);
 #else
        free_irq(pm8001_ha->irq, sha);
@@ -798,6 +958,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
        struct sas_ha_struct *sha = pci_get_drvdata(pdev);
        struct pm8001_hba_info *pm8001_ha;
        int rc;
+       u8 i = 0;
        u32 device_state;
        pm8001_ha = sha->lldd_ha;
        device_state = pdev->current_state;
@@ -820,19 +981,33 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
        if (rc)
                goto err_out_disable;
 
-       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
+       /* chip soft rst only for spc */
+       if (pm8001_ha->chip_id == chip_8001) {
+               PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
+               PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("chip soft reset successful\n"));
+       }
        rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
        if (rc)
                goto err_out_disable;
-       PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
+
+       /* disable all the interrupt bits */
+       PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
+
        rc = pm8001_request_irq(pm8001_ha);
        if (rc)
                goto err_out_disable;
-       #ifdef PM8001_USE_TASKLET
+#ifdef PM8001_USE_TASKLET
+       /* default tasklet for non msi-x interrupt handler/first msi-x
+       * interrupt handler */
        tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
-                   (unsigned long)pm8001_ha);
-       #endif
-       PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
+                       (unsigned long)pm8001_ha);
+#endif
+       PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
+       if (pm8001_ha->chip_id != chip_8001) {
+               for (i = 1; i < pm8001_ha->number_of_intr; i++)
+                       PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i);
+       }
        scsi_unblock_requests(pm8001_ha->shost);
        return 0;
 
@@ -843,14 +1018,45 @@ err_out_enable:
        return rc;
 }
 
+/* update of pci device, vendor id and driver data with
+ * unique value for each of the controller
+ */
 static struct pci_device_id pm8001_pci_table[] = {
-       {
-               PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001
-       },
+       { PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 },
        {
                PCI_DEVICE(0x117c, 0x0042),
                .driver_data = chip_8001
        },
+       /* Support for SPC/SPCv/SPCve controllers */
+       { PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 },
+       { PCI_VDEVICE(PMC_Sierra, 0x8008), chip_8008 },
+       { PCI_VDEVICE(ADAPTEC2, 0x8008), chip_8008 },
+       { PCI_VDEVICE(PMC_Sierra, 0x8018), chip_8018 },
+       { PCI_VDEVICE(ADAPTEC2, 0x8018), chip_8018 },
+       { PCI_VDEVICE(PMC_Sierra, 0x8009), chip_8009 },
+       { PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 },
+       { PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 },
+       { PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8081,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8081,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8001 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8008 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8008 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8009 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8009 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8018 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
+               PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8018 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
+               PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 },
        {} /* terminate list */
 };
 
@@ -870,7 +1076,7 @@ static int __init pm8001_init(void)
 {
        int rc = -ENOMEM;
 
-       pm8001_wq = alloc_workqueue("pm8001", 0, 0);
+       pm8001_wq = alloc_workqueue("pm80xx", 0, 0);
        if (!pm8001_wq)
                goto err;
 
@@ -902,7 +1108,8 @@ module_init(pm8001_init);
 module_exit(pm8001_exit);
 
 MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>");
-MODULE_DESCRIPTION("PMC-Sierra PM8001 SAS/SATA controller driver");
+MODULE_DESCRIPTION(
+               "PMC-Sierra PM8001/8081/8088/8089 SAS/SATA controller driver");
 MODULE_VERSION(DRV_VERSION);
 MODULE_LICENSE("GPL");
 MODULE_DEVICE_TABLE(pci, pm8001_pci_table);
index b961112395d5e053145ba6f83ed4bf7e5058c5c5..a85d73de7c80c730260362ae56a7688189459ee6 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
  *
  * Copyright (c) 2008-2009 USI Co., Ltd.
  * All rights reserved.
@@ -68,7 +68,7 @@ static void pm8001_tag_clear(struct pm8001_hba_info *pm8001_ha, u32 tag)
        clear_bit(tag, bitmap);
 }
 
-static void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
+void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
 {
        pm8001_tag_clear(pm8001_ha, tag);
 }
@@ -212,10 +212,12 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
                break;
        case PHY_FUNC_GET_EVENTS:
                spin_lock_irqsave(&pm8001_ha->lock, flags);
-               if (-1 == pm8001_bar4_shift(pm8001_ha,
+               if (pm8001_ha->chip_id == chip_8001) {
+                       if (-1 == pm8001_bar4_shift(pm8001_ha,
                                        (phy_id < 4) ? 0x30000 : 0x40000)) {
-                       spin_unlock_irqrestore(&pm8001_ha->lock, flags);
-                       return -EINVAL;
+                               spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+                               return -EINVAL;
+                       }
                }
                {
                        struct sas_phy *phy = sas_phy->phy;
@@ -228,7 +230,8 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
                        phy->loss_of_dword_sync_count = qp[3];
                        phy->phy_reset_problem_count = qp[4];
                }
-               pm8001_bar4_shift(pm8001_ha, 0);
+               if (pm8001_ha->chip_id == chip_8001)
+                       pm8001_bar4_shift(pm8001_ha, 0);
                spin_unlock_irqrestore(&pm8001_ha->lock, flags);
                return 0;
        default:
@@ -249,7 +252,9 @@ void pm8001_scan_start(struct Scsi_Host *shost)
        struct pm8001_hba_info *pm8001_ha;
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        pm8001_ha = sha->lldd_ha;
-       PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
+       /* SAS_RE_INITIALIZATION not available in SPCv/ve */
+       if (pm8001_ha->chip_id == chip_8001)
+               PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
        for (i = 0; i < pm8001_ha->chip->n_phy; ++i)
                PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
 }
@@ -352,7 +357,7 @@ static int sas_find_local_port_id(struct domain_device *dev)
   * @tmf: the task management IU
   */
 #define DEV_IS_GONE(pm8001_dev)        \
-       ((!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE)))
+       ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
 static int pm8001_task_exec(struct sas_task *task, const int num,
        gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf)
 {
@@ -370,7 +375,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
                struct task_status_struct *tsm = &t->task_status;
                tsm->resp = SAS_TASK_UNDELIVERED;
                tsm->stat = SAS_PHY_DOWN;
-               if (dev->dev_type != SATA_DEV)
+               if (dev->dev_type != SAS_SATA_DEV)
                        t->task_done(t);
                return 0;
        }
@@ -548,7 +553,7 @@ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
 {
        u32 dev;
        for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
-               if (pm8001_ha->devices[dev].dev_type == NO_DEVICE) {
+               if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) {
                        pm8001_ha->devices[dev].id = dev;
                        return &pm8001_ha->devices[dev];
                }
@@ -560,13 +565,31 @@ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
        }
        return NULL;
 }
+/**
+  * pm8001_find_dev - find a matching pm8001_device
+  * @pm8001_ha: our hba card information
+  */
+struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
+                                       u32 device_id)
+{
+       u32 dev;
+       for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
+               if (pm8001_ha->devices[dev].device_id == device_id)
+                       return &pm8001_ha->devices[dev];
+       }
+       if (dev == PM8001_MAX_DEVICES) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("NO MATCHING "
+                               "DEVICE FOUND !!!\n"));
+       }
+       return NULL;
+}
 
 static void pm8001_free_dev(struct pm8001_device *pm8001_dev)
 {
        u32 id = pm8001_dev->id;
        memset(pm8001_dev, 0, sizeof(*pm8001_dev));
        pm8001_dev->id = id;
-       pm8001_dev->dev_type = NO_DEVICE;
+       pm8001_dev->dev_type = SAS_PHY_UNUSED;
        pm8001_dev->device_id = PM8001_MAX_DEVICES;
        pm8001_dev->sas_device = NULL;
 }
@@ -624,7 +647,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
                        res = -1;
                }
        } else {
-               if (dev->dev_type == SATA_DEV) {
+               if (dev->dev_type == SAS_SATA_DEV) {
                        pm8001_device->attached_phy =
                                dev->rphy->identify.phy_identifier;
                                flag = 1; /* directly sata*/
@@ -634,7 +657,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
        PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
        spin_unlock_irqrestore(&pm8001_ha->lock, flags);
        wait_for_completion(&completion);
-       if (dev->dev_type == SAS_END_DEV)
+       if (dev->dev_type == SAS_END_DEVICE)
                msleep(50);
        pm8001_ha->flags = PM8001F_RUN_TIME;
        return 0;
@@ -648,7 +671,7 @@ int pm8001_dev_found(struct domain_device *dev)
        return pm8001_dev_found_notify(dev);
 }
 
-static void pm8001_task_done(struct sas_task *task)
+void pm8001_task_done(struct sas_task *task)
 {
        if (!del_timer(&task->slow_task->timer))
                return;
@@ -904,7 +927,7 @@ void pm8001_open_reject_retry(
                struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
 
                pm8001_dev = ccb->device;
-               if (!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE))
+               if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
                        continue;
                if (!device_to_close) {
                        uintptr_t d = (uintptr_t)pm8001_dev
@@ -995,6 +1018,72 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
        return rc;
 }
 
+/*
+* This function handle the IT_NEXUS_XXX event or completion
+* status code for SSP/SATA/SMP I/O request.
+*/
+int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
+{
+       int rc = TMF_RESP_FUNC_FAILED;
+       struct pm8001_device *pm8001_dev;
+       struct pm8001_hba_info *pm8001_ha;
+       struct sas_phy *phy;
+       u32 device_id = 0;
+
+       if (!dev || !dev->lldd_dev)
+               return -1;
+
+       pm8001_dev = dev->lldd_dev;
+       device_id = pm8001_dev->device_id;
+       pm8001_ha = pm8001_find_ha_by_dev(dev);
+
+       PM8001_EH_DBG(pm8001_ha,
+                       pm8001_printk("I_T_Nexus handler invoked !!"));
+
+       phy = sas_get_local_phy(dev);
+
+       if (dev_is_sata(dev)) {
+               DECLARE_COMPLETION_ONSTACK(completion_setstate);
+               if (scsi_is_sas_phy_local(phy)) {
+                       rc = 0;
+                       goto out;
+               }
+               /* send internal ssp/sata/smp abort command to FW */
+               rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
+                                                       dev, 1, 0);
+               msleep(100);
+
+               /* deregister the target device */
+               pm8001_dev_gone_notify(dev);
+               msleep(200);
+
+               /*send phy reset to hard reset target */
+               rc = sas_phy_reset(phy, 1);
+               msleep(2000);
+               pm8001_dev->setds_completion = &completion_setstate;
+
+               wait_for_completion(&completion_setstate);
+       } else {
+               /* send internal ssp/sata/smp abort command to FW */
+               rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
+                                                       dev, 1, 0);
+               msleep(100);
+
+               /* deregister the target device */
+               pm8001_dev_gone_notify(dev);
+               msleep(200);
+
+               /*send phy reset to hard reset target */
+               rc = sas_phy_reset(phy, 1);
+               msleep(2000);
+       }
+       PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n",
+               pm8001_dev->device_id, rc));
+out:
+       sas_put_local_phy(phy);
+
+       return rc;
+}
 /* mandatory SAM-3, the task reset the specified LUN*/
 int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
 {
index 11008205aeb331c9b86be09deb8c26733cd133b2..570819464d90b40568477eac1e4c283ae10c3169 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
  *
  * Copyright (c) 2008-2009 USI Co., Ltd.
  * All rights reserved.
@@ -57,8 +57,8 @@
 #include <linux/atomic.h>
 #include "pm8001_defs.h"
 
-#define DRV_NAME               "pm8001"
-#define DRV_VERSION            "0.1.36"
+#define DRV_NAME               "pm80xx"
+#define DRV_VERSION            "0.1.37"
 #define PM8001_FAIL_LOGGING    0x01 /* Error message logging */
 #define PM8001_INIT_LOGGING    0x02 /* driver init logging */
 #define PM8001_DISC_LOGGING    0x04 /* discovery layer logging */
@@ -66,8 +66,8 @@
 #define PM8001_EH_LOGGING      0x10 /* libsas EH function logging*/
 #define PM8001_IOCTL_LOGGING   0x20 /* IOCTL message logging */
 #define PM8001_MSG_LOGGING     0x40 /* misc message logging */
-#define pm8001_printk(format, arg...)  printk(KERN_INFO "%s %d:" format,\
-                               __func__, __LINE__, ## arg)
+#define pm8001_printk(format, arg...)  printk(KERN_INFO "pm80xx %s %d:" \
+                       format, __func__, __LINE__, ## arg)
 #define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD)  \
 do {                                           \
        if (unlikely(HBA->logging_level & LEVEL))       \
@@ -103,11 +103,12 @@ do {                                              \
 #define PM8001_READ_VPD
 
 
-#define DEV_IS_EXPANDER(type)  ((type == EDGE_DEV) || (type == FANOUT_DEV))
+#define DEV_IS_EXPANDER(type)  ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
 
 #define PM8001_NAME_LENGTH             32/* generic length of strings */
 extern struct list_head hba_list;
 extern const struct pm8001_dispatch pm8001_8001_dispatch;
+extern const struct pm8001_dispatch pm8001_80xx_dispatch;
 
 struct pm8001_hba_info;
 struct pm8001_ccb_info;
@@ -131,15 +132,15 @@ struct pm8001_ioctl_payload {
 struct pm8001_dispatch {
        char *name;
        int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
-       int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha, u32 signature);
+       int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha);
        void (*chip_rst)(struct pm8001_hba_info *pm8001_ha);
        int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha);
        void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha);
-       irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha);
+       irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha, u8 vec);
        u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha);
-       int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha);
-       void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha);
-       void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha);
+       int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha, u8 vec);
+       void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
+       void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
        void (*make_prd)(struct scatterlist *scatter, int nr, void *prd);
        int (*smp_req)(struct pm8001_hba_info *pm8001_ha,
                struct pm8001_ccb_info *ccb);
@@ -173,6 +174,7 @@ struct pm8001_dispatch {
 };
 
 struct pm8001_chip_info {
+       u32     encrypt;
        u32     n_phy;
        const struct pm8001_dispatch    *dispatch;
 };
@@ -204,7 +206,7 @@ struct pm8001_phy {
 };
 
 struct pm8001_device {
-       enum sas_dev_type       dev_type;
+       enum sas_device_type    dev_type;
        struct domain_device    *sas_device;
        u32                     attached_phy;
        u32                     id;
@@ -256,7 +258,20 @@ struct mpi_mem_req {
        struct mpi_mem          region[USI_MAX_MEMCNT];
 };
 
-struct main_cfg_table {
+struct encrypt {
+       u32     cipher_mode;
+       u32     sec_mode;
+       u32     status;
+       u32     flag;
+};
+
+struct sas_phy_attribute_table {
+       u32     phystart1_16[16];
+       u32     outbound_hw_event_pid1_16[16];
+};
+
+union main_cfg_table {
+       struct {
        u32                     signature;
        u32                     interface_rev;
        u32                     firmware_rev;
@@ -292,19 +307,69 @@ struct main_cfg_table {
        u32                     fatal_err_dump_length1;
        u32                     hda_mode_flag;
        u32                     anolog_setup_table_offset;
+       u32                     rsvd[4];
+       } pm8001_tbl;
+
+       struct {
+       u32                     signature;
+       u32                     interface_rev;
+       u32                     firmware_rev;
+       u32                     max_out_io;
+       u32                     max_sgl;
+       u32                     ctrl_cap_flag;
+       u32                     gst_offset;
+       u32                     inbound_queue_offset;
+       u32                     outbound_queue_offset;
+       u32                     inbound_q_nppd_hppd;
+       u32                     rsvd[8];
+       u32                     crc_core_dump;
+       u32                     rsvd1;
+       u32                     upper_event_log_addr;
+       u32                     lower_event_log_addr;
+       u32                     event_log_size;
+       u32                     event_log_severity;
+       u32                     upper_pcs_event_log_addr;
+       u32                     lower_pcs_event_log_addr;
+       u32                     pcs_event_log_size;
+       u32                     pcs_event_log_severity;
+       u32                     fatal_err_interrupt;
+       u32                     fatal_err_dump_offset0;
+       u32                     fatal_err_dump_length0;
+       u32                     fatal_err_dump_offset1;
+       u32                     fatal_err_dump_length1;
+       u32                     gpio_led_mapping;
+       u32                     analog_setup_table_offset;
+       u32                     int_vec_table_offset;
+       u32                     phy_attr_table_offset;
+       u32                     port_recovery_timer;
+       u32                     interrupt_reassertion_delay;
+       } pm80xx_tbl;
 };
-struct general_status_table {
+
+union general_status_table {
+       struct {
        u32                     gst_len_mpistate;
        u32                     iq_freeze_state0;
        u32                     iq_freeze_state1;
        u32                     msgu_tcnt;
        u32                     iop_tcnt;
-       u32                     reserved;
+       u32                     rsvd;
        u32                     phy_state[8];
-       u32                     reserved1;
-       u32                     reserved2;
-       u32                     reserved3;
+       u32                     gpio_input_val;
+       u32                     rsvd1[2];
+       u32                     recover_err_info[8];
+       } pm8001_tbl;
+       struct {
+       u32                     gst_len_mpistate;
+       u32                     iq_freeze_state0;
+       u32                     iq_freeze_state1;
+       u32                     msgu_tcnt;
+       u32                     iop_tcnt;
+       u32                     rsvd[9];
+       u32                     gpio_input_val;
+       u32                     rsvd1[2];
        u32                     recover_err_info[8];
+       } pm80xx_tbl;
 };
 struct inbound_queue_table {
        u32                     element_pri_size_cnt;
@@ -351,15 +416,21 @@ struct pm8001_hba_info {
        struct device           *dev;
        struct pm8001_hba_memspace io_mem[6];
        struct mpi_mem_req      memoryMap;
+       struct encrypt          encrypt_info; /* support encryption */
        void __iomem    *msg_unit_tbl_addr;/*Message Unit Table Addr*/
        void __iomem    *main_cfg_tbl_addr;/*Main Config Table Addr*/
        void __iomem    *general_stat_tbl_addr;/*General Status Table Addr*/
        void __iomem    *inbnd_q_tbl_addr;/*Inbound Queue Config Table Addr*/
        void __iomem    *outbnd_q_tbl_addr;/*Outbound Queue Config Table Addr*/
-       struct main_cfg_table   main_cfg_tbl;
-       struct general_status_table     gs_tbl;
-       struct inbound_queue_table      inbnd_q_tbl[PM8001_MAX_INB_NUM];
-       struct outbound_queue_table     outbnd_q_tbl[PM8001_MAX_OUTB_NUM];
+       void __iomem    *pspa_q_tbl_addr;
+                       /*MPI SAS PHY attributes Queue Config Table Addr*/
+       void __iomem    *ivt_tbl_addr; /*MPI IVT Table Addr */
+       union main_cfg_table    main_cfg_tbl;
+       union general_status_table      gs_tbl;
+       struct inbound_queue_table      inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM];
+       struct outbound_queue_table     outbnd_q_tbl[PM8001_MAX_SPCV_OUTB_NUM];
+       struct sas_phy_attribute_table  phy_attr_table;
+                                       /* MPI SAS PHY attributes */
        u8                      sas_addr[SAS_ADDR_SIZE];
        struct sas_ha_struct    *sas;/* SCSI/SAS glue */
        struct Scsi_Host        *shost;
@@ -372,10 +443,12 @@ struct pm8001_hba_info {
        struct pm8001_port      port[PM8001_MAX_PHYS];
        u32                     id;
        u32                     irq;
+       u32                     iomb_size; /* SPC and SPCV IOMB size */
        struct pm8001_device    *devices;
        struct pm8001_ccb_info  *ccb_info;
 #ifdef PM8001_USE_MSIX
-       struct msix_entry       msix_entries[16];/*for msi-x interrupt*/
+       struct msix_entry       msix_entries[PM8001_MAX_MSIX_VEC];
+                                       /*for msi-x interrupt*/
        int                     number_of_intr;/*will be used in remove()*/
 #endif
 #ifdef PM8001_USE_TASKLET
@@ -383,7 +456,10 @@ struct pm8001_hba_info {
 #endif
        u32                     logging_level;
        u32                     fw_status;
+       u32                     smp_exp_mode;
+       u32                     int_vector;
        const struct firmware   *fw_image;
+       u8                      outq[PM8001_MAX_MSIX_VEC];
 };
 
 struct pm8001_work {
@@ -419,6 +495,9 @@ struct pm8001_fw_image_header {
 #define FLASH_UPDATE_DNLD_NOT_SUPPORTED                0x10
 #define FLASH_UPDATE_DISABLED                  0x11
 
+#define        NCQ_READ_LOG_FLAG                       0x80000000
+#define        NCQ_ABORT_ALL_FLAG                      0x40000000
+#define        NCQ_2ND_RLE_FLAG                        0x20000000
 /**
  * brief param structure for firmware flash update.
  */
@@ -484,6 +563,7 @@ int pm8001_dev_found(struct domain_device *dev);
 void pm8001_dev_gone(struct domain_device *dev);
 int pm8001_lu_reset(struct domain_device *dev, u8 *lun);
 int pm8001_I_T_nexus_reset(struct domain_device *dev);
+int pm8001_I_T_nexus_event_handler(struct domain_device *dev);
 int pm8001_query_task(struct sas_task *task);
 void pm8001_open_reject_retry(
        struct pm8001_hba_info *pm8001_ha,
@@ -493,6 +573,61 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
        dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo,
        u32 mem_size, u32 align);
 
+void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha);
+int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
+                       struct inbound_queue_table *circularQ,
+                       u32 opCode, void *payload, u32 responseQueue);
+int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
+                               u16 messageSize, void **messagePtr);
+u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
+                       struct outbound_queue_table *circularQ, u8 bc);
+u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
+                       struct outbound_queue_table *circularQ,
+                       void **messagePtr1, u8 *pBC);
+int pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
+                       struct pm8001_device *pm8001_dev, u32 state);
+int pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
+                                       void *payload);
+int pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
+                                       void *fw_flash_updata_info, u32 tag);
+int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload);
+int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload);
+int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
+                               struct pm8001_ccb_info *ccb,
+                               struct pm8001_tmf_task *tmf);
+int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
+                               struct pm8001_device *pm8001_dev,
+                               u8 flag, u32 task_tag, u32 cmd_tag);
+int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id);
+void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd);
+void pm8001_work_fn(struct work_struct *work);
+int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha,
+                                       void *data, int handler);
+void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
+                                                       void *piomb);
+void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha,
+                                                       void *piomb);
+void pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha,
+                                                       void *piomb);
+int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha,
+                                                       void *piomb);
+void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate);
+void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, u8 *sas_addr);
+void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i);
+int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
+int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
+int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
+                                                       void *piomb);
+int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb);
+int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
+struct sas_task *pm8001_alloc_task(void);
+void pm8001_task_done(struct sas_task *task);
+void pm8001_free_task(struct sas_task *task);
+void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag);
+struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
+                                       u32 device_id);
+int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha);
+
 int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
 
 /* ctl shared API */
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
new file mode 100644 (file)
index 0000000..302514d
--- /dev/null
@@ -0,0 +1,4130 @@
+/*
+ * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 PMC-Sierra, Inc.,
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+ #include <linux/slab.h>
+ #include "pm8001_sas.h"
+ #include "pm80xx_hwi.h"
+ #include "pm8001_chips.h"
+ #include "pm8001_ctl.h"
+
+#define SMP_DIRECT 1
+#define SMP_INDIRECT 2
+/**
+ * read_main_config_table - read the configure table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
+{
+       void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
+
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature    =
+               pm8001_mr32(address, MAIN_SIGNATURE_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev =
+               pm8001_mr32(address, MAIN_INTERFACE_REVISION);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev =
+               pm8001_mr32(address, MAIN_FW_REVISION);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io   =
+               pm8001_mr32(address, MAIN_MAX_OUTSTANDING_IO_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl      =
+               pm8001_mr32(address, MAIN_MAX_SGL_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag =
+               pm8001_mr32(address, MAIN_CNTRL_CAP_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset   =
+               pm8001_mr32(address, MAIN_GST_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset =
+               pm8001_mr32(address, MAIN_IBQ_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset =
+               pm8001_mr32(address, MAIN_OBQ_OFFSET);
+
+       /* read Error Dump Offset and Length */
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset0 =
+               pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length0 =
+               pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset1 =
+               pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length1 =
+               pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH);
+
+       /* read GPIO LED settings from the configuration table */
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping =
+               pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET);
+
+       /* read analog Setting offset from the configuration table */
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.analog_setup_table_offset =
+               pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET);
+
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset =
+               pm8001_mr32(address, MAIN_INT_VECTOR_TABLE_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset =
+               pm8001_mr32(address, MAIN_SAS_PHY_ATTR_TABLE_OFFSET);
+}
+
+/**
+ * read_general_status_table - read the general status table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
+{
+       void __iomem *address = pm8001_ha->general_stat_tbl_addr;
+       pm8001_ha->gs_tbl.pm80xx_tbl.gst_len_mpistate   =
+                       pm8001_mr32(address, GST_GSTLEN_MPIS_OFFSET);
+       pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state0   =
+                       pm8001_mr32(address, GST_IQ_FREEZE_STATE0_OFFSET);
+       pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state1   =
+                       pm8001_mr32(address, GST_IQ_FREEZE_STATE1_OFFSET);
+       pm8001_ha->gs_tbl.pm80xx_tbl.msgu_tcnt          =
+                       pm8001_mr32(address, GST_MSGUTCNT_OFFSET);
+       pm8001_ha->gs_tbl.pm80xx_tbl.iop_tcnt           =
+                       pm8001_mr32(address, GST_IOPTCNT_OFFSET);
+       pm8001_ha->gs_tbl.pm80xx_tbl.gpio_input_val     =
+                       pm8001_mr32(address, GST_GPIO_INPUT_VAL);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[0] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET0);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[1] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET1);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[2] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET2);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[3] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET3);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[4] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET4);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[5] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET5);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[6] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET6);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[7] =
+                        pm8001_mr32(address, GST_RERRINFO_OFFSET7);
+}
+/**
+ * read_phy_attr_table - read the phy attribute table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_phy_attr_table(struct pm8001_hba_info *pm8001_ha)
+{
+       void __iomem *address = pm8001_ha->pspa_q_tbl_addr;
+       pm8001_ha->phy_attr_table.phystart1_16[0] =
+                       pm8001_mr32(address, PSPA_PHYSTATE0_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[1] =
+                       pm8001_mr32(address, PSPA_PHYSTATE1_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[2] =
+                       pm8001_mr32(address, PSPA_PHYSTATE2_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[3] =
+                       pm8001_mr32(address, PSPA_PHYSTATE3_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[4] =
+                       pm8001_mr32(address, PSPA_PHYSTATE4_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[5] =
+                       pm8001_mr32(address, PSPA_PHYSTATE5_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[6] =
+                       pm8001_mr32(address, PSPA_PHYSTATE6_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[7] =
+                       pm8001_mr32(address, PSPA_PHYSTATE7_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[8] =
+                       pm8001_mr32(address, PSPA_PHYSTATE8_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[9] =
+                       pm8001_mr32(address, PSPA_PHYSTATE9_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[10] =
+                       pm8001_mr32(address, PSPA_PHYSTATE10_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[11] =
+                       pm8001_mr32(address, PSPA_PHYSTATE11_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[12] =
+                       pm8001_mr32(address, PSPA_PHYSTATE12_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[13] =
+                       pm8001_mr32(address, PSPA_PHYSTATE13_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[14] =
+                       pm8001_mr32(address, PSPA_PHYSTATE14_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[15] =
+                       pm8001_mr32(address, PSPA_PHYSTATE15_OFFSET);
+
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[0] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID0_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[1] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID1_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[2] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID2_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[3] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID3_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[4] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID4_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[5] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID5_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[6] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID6_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[7] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID7_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[8] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID8_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[9] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID9_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[10] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID10_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[11] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID11_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[12] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID12_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[13] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID13_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[14] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID14_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[15] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID15_OFFSET);
+
+}
+
+/**
+ * read_inbnd_queue_table - read the inbound queue table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
+{
+       int i;
+       void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
+       for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
+               u32 offset = i * 0x20;
+               pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
+                       get_pci_bar_index(pm8001_mr32(address,
+                               (offset + IB_PIPCI_BAR)));
+               pm8001_ha->inbnd_q_tbl[i].pi_offset =
+                       pm8001_mr32(address, (offset + IB_PIPCI_BAR_OFFSET));
+       }
+}
+
+/**
+ * read_outbnd_queue_table - read the outbound queue table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
+{
+       int i;
+       void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
+       for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
+               u32 offset = i * 0x24;
+               pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
+                       get_pci_bar_index(pm8001_mr32(address,
+                               (offset + OB_CIPCI_BAR)));
+               pm8001_ha->outbnd_q_tbl[i].ci_offset =
+                       pm8001_mr32(address, (offset + OB_CIPCI_BAR_OFFSET));
+       }
+}
+
+/**
+ * init_default_table_values - init the default table.
+ * @pm8001_ha: our hba card information
+ */
+static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
+{
+       int i;
+       u32 offsetib, offsetob;
+       void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr;
+       void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr;
+
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr         =
+               pm8001_ha->memoryMap.region[AAP1].phys_addr_hi;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr         =
+               pm8001_ha->memoryMap.region[AAP1].phys_addr_lo;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size               =
+                                                       PM8001_EVENT_LOG_SIZE;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity           = 0x01;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr     =
+               pm8001_ha->memoryMap.region[IOP].phys_addr_hi;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr     =
+               pm8001_ha->memoryMap.region[IOP].phys_addr_lo;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size           =
+                                                       PM8001_EVENT_LOG_SIZE;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity       = 0x01;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt          = 0x01;
+
+       /* Disable end to end CRC checking */
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16);
+
+       for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
+               pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt  =
+                       PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30);
+               pm8001_ha->inbnd_q_tbl[i].upper_base_addr       =
+                       pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
+               pm8001_ha->inbnd_q_tbl[i].lower_base_addr       =
+               pm8001_ha->memoryMap.region[IB + i].phys_addr_lo;
+               pm8001_ha->inbnd_q_tbl[i].base_virt             =
+                       (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr;
+               pm8001_ha->inbnd_q_tbl[i].total_length          =
+                       pm8001_ha->memoryMap.region[IB + i].total_len;
+               pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr    =
+                       pm8001_ha->memoryMap.region[CI + i].phys_addr_hi;
+               pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr    =
+                       pm8001_ha->memoryMap.region[CI + i].phys_addr_lo;
+               pm8001_ha->inbnd_q_tbl[i].ci_virt               =
+                       pm8001_ha->memoryMap.region[CI + i].virt_ptr;
+               offsetib = i * 0x20;
+               pm8001_ha->inbnd_q_tbl[i].pi_pci_bar            =
+                       get_pci_bar_index(pm8001_mr32(addressib,
+                               (offsetib + 0x14)));
+               pm8001_ha->inbnd_q_tbl[i].pi_offset             =
+                       pm8001_mr32(addressib, (offsetib + 0x18));
+               pm8001_ha->inbnd_q_tbl[i].producer_idx          = 0;
+               pm8001_ha->inbnd_q_tbl[i].consumer_index        = 0;
+       }
+       for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
+               pm8001_ha->outbnd_q_tbl[i].element_size_cnt     =
+                       PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30);
+               pm8001_ha->outbnd_q_tbl[i].upper_base_addr      =
+                       pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
+               pm8001_ha->outbnd_q_tbl[i].lower_base_addr      =
+                       pm8001_ha->memoryMap.region[OB + i].phys_addr_lo;
+               pm8001_ha->outbnd_q_tbl[i].base_virt            =
+                       (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr;
+               pm8001_ha->outbnd_q_tbl[i].total_length         =
+                       pm8001_ha->memoryMap.region[OB + i].total_len;
+               pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr   =
+                       pm8001_ha->memoryMap.region[PI + i].phys_addr_hi;
+               pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr   =
+                       pm8001_ha->memoryMap.region[PI + i].phys_addr_lo;
+               /* interrupt vector based on oq */
+               pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = (i << 24);
+               pm8001_ha->outbnd_q_tbl[i].pi_virt              =
+                       pm8001_ha->memoryMap.region[PI + i].virt_ptr;
+               offsetob = i * 0x24;
+               pm8001_ha->outbnd_q_tbl[i].ci_pci_bar           =
+                       get_pci_bar_index(pm8001_mr32(addressob,
+                       offsetob + 0x14));
+               pm8001_ha->outbnd_q_tbl[i].ci_offset            =
+                       pm8001_mr32(addressob, (offsetob + 0x18));
+               pm8001_ha->outbnd_q_tbl[i].consumer_idx         = 0;
+               pm8001_ha->outbnd_q_tbl[i].producer_index       = 0;
+       }
+}
+
+/**
+ * update_main_config_table - update the main default table to the HBA.
+ * @pm8001_ha: our hba card information
+ */
+static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
+{
+       void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
+       pm8001_mw32(address, MAIN_IQNPPD_HPPD_OFFSET,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_q_nppd_hppd);
+       pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_HI,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr);
+       pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_LO,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr);
+       pm8001_mw32(address, MAIN_EVENT_LOG_BUFF_SIZE,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size);
+       pm8001_mw32(address, MAIN_EVENT_LOG_OPTION,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity);
+       pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_HI,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr);
+       pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_LO,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr);
+       pm8001_mw32(address, MAIN_PCS_EVENT_LOG_BUFF_SIZE,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size);
+       pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity);
+       pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt);
+       pm8001_mw32(address, MAIN_EVENT_CRC_CHECK,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump);
+
+       /* SPCv specific */
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping &= 0xCFFFFFFF;
+       /* Set GPIOLED to 0x2 for LED indicator */
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000;
+       pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping);
+
+       pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
+       pm8001_mw32(address, MAIN_INT_REASSERTION_DELAY,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.interrupt_reassertion_delay);
+}
+
+/**
+ * update_inbnd_queue_table - update the inbound queue table to the HBA.
+ * @pm8001_ha: our hba card information
+ */
+static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
+                                        int number)
+{
+       void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
+       u16 offset = number * 0x20;
+       pm8001_mw32(address, offset + IB_PROPERITY_OFFSET,
+               pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt);
+       pm8001_mw32(address, offset + IB_BASE_ADDR_HI_OFFSET,
+               pm8001_ha->inbnd_q_tbl[number].upper_base_addr);
+       pm8001_mw32(address, offset + IB_BASE_ADDR_LO_OFFSET,
+               pm8001_ha->inbnd_q_tbl[number].lower_base_addr);
+       pm8001_mw32(address, offset + IB_CI_BASE_ADDR_HI_OFFSET,
+               pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr);
+       pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET,
+               pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr);
+}
+
+/**
+ * update_outbnd_queue_table - update the outbound queue table to the HBA.
+ * @pm8001_ha: our hba card information
+ */
+static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
+                                                int number)
+{
+       void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
+       u16 offset = number * 0x24;
+       pm8001_mw32(address, offset + OB_PROPERITY_OFFSET,
+               pm8001_ha->outbnd_q_tbl[number].element_size_cnt);
+       pm8001_mw32(address, offset + OB_BASE_ADDR_HI_OFFSET,
+               pm8001_ha->outbnd_q_tbl[number].upper_base_addr);
+       pm8001_mw32(address, offset + OB_BASE_ADDR_LO_OFFSET,
+               pm8001_ha->outbnd_q_tbl[number].lower_base_addr);
+       pm8001_mw32(address, offset + OB_PI_BASE_ADDR_HI_OFFSET,
+               pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr);
+       pm8001_mw32(address, offset + OB_PI_BASE_ADDR_LO_OFFSET,
+               pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr);
+       pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET,
+               pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay);
+}
+
+/**
+ * mpi_init_check - check firmware initialization status.
+ * @pm8001_ha: our hba card information
+ */
+static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
+{
+       u32 max_wait_count;
+       u32 value;
+       u32 gst_len_mpistate;
+
+       /* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the
+       table is updated */
+       pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE);
+       /* wait until Inbound DoorBell Clear Register toggled */
+       max_wait_count = 2 * 1000 * 1000;/* 2 sec for spcv/ve */
+       do {
+               udelay(1);
+               value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
+               value &= SPCv_MSGU_CFG_TABLE_UPDATE;
+       } while ((value != 0) && (--max_wait_count));
+
+       if (!max_wait_count)
+               return -1;
+       /* check the MPI-State for initialization upto 100ms*/
+       max_wait_count = 100 * 1000;/* 100 msec */
+       do {
+               udelay(1);
+               gst_len_mpistate =
+                       pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
+                                       GST_GSTLEN_MPIS_OFFSET);
+       } while ((GST_MPI_STATE_INIT !=
+               (gst_len_mpistate & GST_MPI_STATE_MASK)) && (--max_wait_count));
+       if (!max_wait_count)
+               return -1;
+
+       /* check MPI Initialization error */
+       gst_len_mpistate = gst_len_mpistate >> 16;
+       if (0x0000 != gst_len_mpistate)
+               return -1;
+
+       return 0;
+}
+
+/**
+ * check_fw_ready - The LLDD check if the FW is ready, if not, return error.
+ * @pm8001_ha: our hba card information
+ */
+static int check_fw_ready(struct pm8001_hba_info *pm8001_ha)
+{
+       u32 value;
+       u32 max_wait_count;
+       u32 max_wait_time;
+       int ret = 0;
+
+       /* reset / PCIe ready */
+       max_wait_time = max_wait_count = 100 * 1000;    /* 100 milli sec */
+       do {
+               udelay(1);
+               value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+       } while ((value == 0xFFFFFFFF) && (--max_wait_count));
+
+       /* check ila status */
+       max_wait_time = max_wait_count = 1000 * 1000;   /* 1000 milli sec */
+       do {
+               udelay(1);
+               value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+       } while (((value & SCRATCH_PAD_ILA_READY) !=
+                       SCRATCH_PAD_ILA_READY) && (--max_wait_count));
+       if (!max_wait_count)
+               ret = -1;
+       else {
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" ila ready status in %d millisec\n",
+                               (max_wait_time - max_wait_count)));
+       }
+
+       /* check RAAE status */
+       max_wait_time = max_wait_count = 1800 * 1000;   /* 1800 milli sec */
+       do {
+               udelay(1);
+               value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+       } while (((value & SCRATCH_PAD_RAAE_READY) !=
+                               SCRATCH_PAD_RAAE_READY) && (--max_wait_count));
+       if (!max_wait_count)
+               ret = -1;
+       else {
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" raae ready status in %d millisec\n",
+                                       (max_wait_time - max_wait_count)));
+       }
+
+       /* check iop0 status */
+       max_wait_time = max_wait_count = 600 * 1000;    /* 600 milli sec */
+       do {
+               udelay(1);
+               value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+       } while (((value & SCRATCH_PAD_IOP0_READY) != SCRATCH_PAD_IOP0_READY) &&
+                       (--max_wait_count));
+       if (!max_wait_count)
+               ret = -1;
+       else {
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" iop0 ready status in %d millisec\n",
+                               (max_wait_time - max_wait_count)));
+       }
+
+       /* check iop1 status only for 16 port controllers */
+       if ((pm8001_ha->chip_id != chip_8008) &&
+                       (pm8001_ha->chip_id != chip_8009)) {
+               /* 200 milli sec */
+               max_wait_time = max_wait_count = 200 * 1000;
+               do {
+                       udelay(1);
+                       value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+               } while (((value & SCRATCH_PAD_IOP1_READY) !=
+                               SCRATCH_PAD_IOP1_READY) && (--max_wait_count));
+               if (!max_wait_count)
+                       ret = -1;
+               else {
+                       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                               "iop1 ready status in %d millisec\n",
+                               (max_wait_time - max_wait_count)));
+               }
+       }
+
+       return ret;
+}
+
+static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
+{
+       void __iomem *base_addr;
+       u32     value;
+       u32     offset;
+       u32     pcibar;
+       u32     pcilogic;
+
+       value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
+       offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */
+
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("Scratchpad 0 Offset: 0x%x value 0x%x\n",
+                               offset, value));
+       pcilogic = (value & 0xFC000000) >> 26;
+       pcibar = get_pci_bar_index(pcilogic);
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("Scratchpad 0 PCI BAR: %d\n", pcibar));
+       pm8001_ha->main_cfg_tbl_addr = base_addr =
+               pm8001_ha->io_mem[pcibar].memvirtaddr + offset;
+       pm8001_ha->general_stat_tbl_addr =
+               base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x18) &
+                                       0xFFFFFF);
+       pm8001_ha->inbnd_q_tbl_addr =
+               base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C) &
+                                       0xFFFFFF);
+       pm8001_ha->outbnd_q_tbl_addr =
+               base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x20) &
+                                       0xFFFFFF);
+       pm8001_ha->ivt_tbl_addr =
+               base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C) &
+                                       0xFFFFFF);
+       pm8001_ha->pspa_q_tbl_addr =
+               base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) &
+                                       0xFFFFFF);
+
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("GST OFFSET 0x%x\n",
+                       pm8001_cr32(pm8001_ha, pcibar, offset + 0x18)));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("INBND OFFSET 0x%x\n",
+                       pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C)));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("OBND OFFSET 0x%x\n",
+                       pm8001_cr32(pm8001_ha, pcibar, offset + 0x20)));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("IVT OFFSET 0x%x\n",
+                       pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C)));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("PSPA OFFSET 0x%x\n",
+                       pm8001_cr32(pm8001_ha, pcibar, offset + 0x90)));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("addr - main cfg %p general status %p\n",
+                       pm8001_ha->main_cfg_tbl_addr,
+                       pm8001_ha->general_stat_tbl_addr));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("addr - inbnd %p obnd %p\n",
+                       pm8001_ha->inbnd_q_tbl_addr,
+                       pm8001_ha->outbnd_q_tbl_addr));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("addr - pspa %p ivt %p\n",
+                       pm8001_ha->pspa_q_tbl_addr,
+                       pm8001_ha->ivt_tbl_addr));
+}
+
+/**
+ * pm80xx_set_thermal_config - support the thermal configuration
+ * @pm8001_ha: our hba card information.
+ */
+int
+pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
+{
+       struct set_ctrl_cfg_req payload;
+       struct inbound_queue_table *circularQ;
+       int rc;
+       u32 tag;
+       u32 opc = OPC_INB_SET_CONTROLLER_CONFIG;
+
+       memset(&payload, 0, sizeof(struct set_ctrl_cfg_req));
+       rc = pm8001_tag_alloc(pm8001_ha, &tag);
+       if (rc)
+               return -1;
+
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       payload.tag = cpu_to_le32(tag);
+       payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) |
+                       (THERMAL_ENABLE << 8) | THERMAL_OP_CODE;
+       payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8);
+
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+       return rc;
+
+}
+
+/**
+* pm80xx_set_sas_protocol_timer_config - support the SAS Protocol
+* Timer configuration page
+* @pm8001_ha: our hba card information.
+*/
+static int
+pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha)
+{
+       struct set_ctrl_cfg_req payload;
+       struct inbound_queue_table *circularQ;
+       SASProtocolTimerConfig_t SASConfigPage;
+       int rc;
+       u32 tag;
+       u32 opc = OPC_INB_SET_CONTROLLER_CONFIG;
+
+       memset(&payload, 0, sizeof(struct set_ctrl_cfg_req));
+       memset(&SASConfigPage, 0, sizeof(SASProtocolTimerConfig_t));
+
+       rc = pm8001_tag_alloc(pm8001_ha, &tag);
+
+       if (rc)
+               return -1;
+
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       payload.tag = cpu_to_le32(tag);
+
+       SASConfigPage.pageCode        =  SAS_PROTOCOL_TIMER_CONFIG_PAGE;
+       SASConfigPage.MST_MSI         =  3 << 15;
+       SASConfigPage.STP_SSP_MCT_TMO =  (STP_MCT_TMO << 16) | SSP_MCT_TMO;
+       SASConfigPage.STP_FRM_TMO     = (SAS_MAX_OPEN_TIME << 24) |
+                               (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER;
+       SASConfigPage.STP_IDLE_TMO    =  STP_IDLE_TIME;
+
+       if (SASConfigPage.STP_IDLE_TMO > 0x3FFFFFF)
+               SASConfigPage.STP_IDLE_TMO = 0x3FFFFFF;
+
+
+       SASConfigPage.OPNRJT_RTRY_INTVL =         (SAS_MFD << 16) |
+                                               SAS_OPNRJT_RTRY_INTVL;
+       SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO =  (SAS_DOPNRJT_RTRY_TMO << 16)
+                                               | SAS_COPNRJT_RTRY_TMO;
+       SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR =  (SAS_DOPNRJT_RTRY_THR << 16)
+                                               | SAS_COPNRJT_RTRY_THR;
+       SASConfigPage.MAX_AIP =  SAS_MAX_AIP;
+
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.pageCode "
+                       "0x%08x\n", SASConfigPage.pageCode));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.MST_MSI "
+                       " 0x%08x\n", SASConfigPage.MST_MSI));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.STP_SSP_MCT_TMO "
+                       " 0x%08x\n", SASConfigPage.STP_SSP_MCT_TMO));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.STP_FRM_TMO "
+                       " 0x%08x\n", SASConfigPage.STP_FRM_TMO));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.STP_IDLE_TMO "
+                       " 0x%08x\n", SASConfigPage.STP_IDLE_TMO));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.OPNRJT_RTRY_INTVL "
+                       " 0x%08x\n", SASConfigPage.OPNRJT_RTRY_INTVL));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO "
+                       " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR "
+                       " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR));
+       PM8001_INIT_DBG(pm8001_ha, pm8001_printk("SASConfigPage.MAX_AIP "
+                       " 0x%08x\n", SASConfigPage.MAX_AIP));
+
+       memcpy(&payload.cfg_pg, &SASConfigPage,
+                        sizeof(SASProtocolTimerConfig_t));
+
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+
+       return rc;
+}
+
+/**
+ * pm80xx_get_encrypt_info - Check for encryption
+ * @pm8001_ha: our hba card information.
+ */
+static int
+pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha)
+{
+       u32 scratch3_value;
+       int ret;
+
+       /* Read encryption status from SCRATCH PAD 3 */
+       scratch3_value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3);
+
+       if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
+                                       SCRATCH_PAD3_ENC_READY) {
+               if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
+                       pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                               SCRATCH_PAD3_SMF_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                               SCRATCH_PAD3_SMA_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                               SCRATCH_PAD3_SMB_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
+               pm8001_ha->encrypt_info.status = 0;
+               PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                       "Encryption: SCRATCH_PAD3_ENC_READY 0x%08X."
+                       "Cipher mode 0x%x Sec mode 0x%x status 0x%x\n",
+                       scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
+                       pm8001_ha->encrypt_info.sec_mode,
+                       pm8001_ha->encrypt_info.status));
+               ret = 0;
+       } else if ((scratch3_value & SCRATCH_PAD3_ENC_READY) ==
+                                       SCRATCH_PAD3_ENC_DISABLED) {
+               PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                       "Encryption: SCRATCH_PAD3_ENC_DISABLED 0x%08X\n",
+                       scratch3_value));
+               pm8001_ha->encrypt_info.status = 0xFFFFFFFF;
+               pm8001_ha->encrypt_info.cipher_mode = 0;
+               pm8001_ha->encrypt_info.sec_mode = 0;
+               return 0;
+       } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
+                               SCRATCH_PAD3_ENC_DIS_ERR) {
+               pm8001_ha->encrypt_info.status =
+                       (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16;
+               if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
+                       pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                       SCRATCH_PAD3_SMF_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                       SCRATCH_PAD3_SMA_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                       SCRATCH_PAD3_SMB_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
+               PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                       "Encryption: SCRATCH_PAD3_DIS_ERR 0x%08X."
+                       "Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
+                       scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
+                       pm8001_ha->encrypt_info.sec_mode,
+                       pm8001_ha->encrypt_info.status));
+               ret = -1;
+       } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
+                                SCRATCH_PAD3_ENC_ENA_ERR) {
+
+               pm8001_ha->encrypt_info.status =
+                       (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16;
+               if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
+                       pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                       SCRATCH_PAD3_SMF_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                       SCRATCH_PAD3_SMA_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                       SCRATCH_PAD3_SMB_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
+
+               PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                       "Encryption: SCRATCH_PAD3_ENA_ERR 0x%08X."
+                       "Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
+                       scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
+                       pm8001_ha->encrypt_info.sec_mode,
+                       pm8001_ha->encrypt_info.status));
+               ret = -1;
+       }
+       return ret;
+}
+
+/**
+ * pm80xx_encrypt_update - update flash with encryption informtion
+ * @pm8001_ha: our hba card information.
+ */
+static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
+{
+       struct kek_mgmt_req payload;
+       struct inbound_queue_table *circularQ;
+       int rc;
+       u32 tag;
+       u32 opc = OPC_INB_KEK_MANAGEMENT;
+
+       memset(&payload, 0, sizeof(struct kek_mgmt_req));
+       rc = pm8001_tag_alloc(pm8001_ha, &tag);
+       if (rc)
+               return -1;
+
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       payload.tag = cpu_to_le32(tag);
+       /* Currently only one key is used. New KEK index is 1.
+        * Current KEK index is 1. Store KEK to NVRAM is 1.
+        */
+       payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) |
+                                       KEK_MGMT_SUBOP_KEYCARDUPDATE);
+
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+
+       return rc;
+}
+
+/**
+ * pm8001_chip_init - the main init function that initialize whole PM8001 chip.
+ * @pm8001_ha: our hba card information
+ */
+static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
+{
+       int ret;
+       u8 i = 0;
+
+       /* check the firmware status */
+       if (-1 == check_fw_ready(pm8001_ha)) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("Firmware is not ready!\n"));
+               return -EBUSY;
+       }
+
+       /* Initialize pci space address eg: mpi offset */
+       init_pci_device_addresses(pm8001_ha);
+       init_default_table_values(pm8001_ha);
+       read_main_config_table(pm8001_ha);
+       read_general_status_table(pm8001_ha);
+       read_inbnd_queue_table(pm8001_ha);
+       read_outbnd_queue_table(pm8001_ha);
+       read_phy_attr_table(pm8001_ha);
+
+       /* update main config table ,inbound table and outbound table */
+       update_main_config_table(pm8001_ha);
+       for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++)
+               update_inbnd_queue_table(pm8001_ha, i);
+       for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++)
+               update_outbnd_queue_table(pm8001_ha, i);
+
+       /* notify firmware update finished and check initialization status */
+       if (0 == mpi_init_check(pm8001_ha)) {
+               PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("MPI initialize successful!\n"));
+       } else
+               return -EBUSY;
+
+       /* send SAS protocol timer configuration page to FW */
+       ret = pm80xx_set_sas_protocol_timer_config(pm8001_ha);
+
+       /* Check for encryption */
+       if (pm8001_ha->chip->encrypt) {
+               PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("Checking for encryption\n"));
+               ret = pm80xx_get_encrypt_info(pm8001_ha);
+               if (ret == -1) {
+                       PM8001_INIT_DBG(pm8001_ha,
+                               pm8001_printk("Encryption error !!\n"));
+                       if (pm8001_ha->encrypt_info.status == 0x81) {
+                               PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                                       "Encryption enabled with error."
+                                       "Saving encryption key to flash\n"));
+                               pm80xx_encrypt_update(pm8001_ha);
+                       }
+               }
+       }
+       return 0;
+}
+
+static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
+{
+       u32 max_wait_count;
+       u32 value;
+       u32 gst_len_mpistate;
+       init_pci_device_addresses(pm8001_ha);
+       /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the
+       table is stop */
+       pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET);
+
+       /* wait until Inbound DoorBell Clear Register toggled */
+       max_wait_count = 2 * 1000 * 1000;       /* 2 sec for spcv/ve */
+       do {
+               udelay(1);
+               value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
+               value &= SPCv_MSGU_CFG_TABLE_RESET;
+       } while ((value != 0) && (--max_wait_count));
+
+       if (!max_wait_count) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("TIMEOUT:IBDB value/=%x\n", value));
+               return -1;
+       }
+
+       /* check the MPI-State for termination in progress */
+       /* wait until Inbound DoorBell Clear Register toggled */
+       max_wait_count = 2 * 1000 * 1000;       /* 2 sec for spcv/ve */
+       do {
+               udelay(1);
+               gst_len_mpistate =
+                       pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
+                       GST_GSTLEN_MPIS_OFFSET);
+               if (GST_MPI_STATE_UNINIT ==
+                       (gst_len_mpistate & GST_MPI_STATE_MASK))
+                       break;
+       } while (--max_wait_count);
+       if (!max_wait_count) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk(" TIME OUT MPI State = 0x%x\n",
+                               gst_len_mpistate & GST_MPI_STATE_MASK));
+               return -1;
+       }
+
+       return 0;
+}
+
+/**
+ * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
+ * the FW register status to the originated status.
+ * @pm8001_ha: our hba card information
+ */
+
+static int
+pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
+{
+       u32 regval;
+       u32 bootloader_state;
+
+       /* Check if MPI is in ready state to reset */
+       if (mpi_uninit_check(pm8001_ha) != 0) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("MPI state is not ready\n"));
+               return -1;
+       }
+
+       /* checked for reset register normal state; 0x0 */
+       regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("reset register before write : 0x%x\n", regval));
+
+       pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE);
+       mdelay(500);
+
+       regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
+       PM8001_INIT_DBG(pm8001_ha,
+       pm8001_printk("reset register after write 0x%x\n", regval));
+
+       if ((regval & SPCv_SOFT_RESET_READ_MASK) ==
+                       SPCv_SOFT_RESET_NORMAL_RESET_OCCURED) {
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" soft reset successful [regval: 0x%x]\n",
+                                       regval));
+       } else {
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" soft reset failed [regval: 0x%x]\n",
+                                       regval));
+
+               /* check bootloader is successfully executed or in HDA mode */
+               bootloader_state =
+                       pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) &
+                       SCRATCH_PAD1_BOOTSTATE_MASK;
+
+               if (bootloader_state == SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM) {
+                       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                               "Bootloader state - HDA mode SEEPROM\n"));
+               } else if (bootloader_state ==
+                               SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP) {
+                       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                               "Bootloader state - HDA mode Bootstrap Pin\n"));
+               } else if (bootloader_state ==
+                               SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET) {
+                       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                               "Bootloader state - HDA mode soft reset\n"));
+               } else if (bootloader_state ==
+                                       SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR) {
+                       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                               "Bootloader state-HDA mode critical error\n"));
+               }
+               return -EBUSY;
+       }
+
+       /* check the firmware status after reset */
+       if (-1 == check_fw_ready(pm8001_ha)) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("Firmware is not ready!\n"));
+               return -EBUSY;
+       }
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("SPCv soft reset Complete\n"));
+       return 0;
+}
+
+static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
+{
+        u32 i;
+
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("chip reset start\n"));
+
+       /* do SPCv chip reset. */
+       pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, 0x11);
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("SPC soft reset Complete\n"));
+
+       /* Check this ..whether delay is required or no */
+       /* delay 10 usec */
+       udelay(10);
+
+       /* wait for 20 msec until the firmware gets reloaded */
+       i = 20;
+       do {
+               mdelay(1);
+       } while ((--i) != 0);
+
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("chip reset finished\n"));
+}
+
+/**
+ * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm80xx_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
+{
+       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
+       pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
+}
+
+/**
+ * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm80xx_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
+{
+       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, ODMR_MASK_ALL);
+}
+
+/**
+ * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+#ifdef PM8001_USE_MSIX
+       u32 mask;
+       mask = (u32)(1 << vec);
+
+       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF));
+       return;
+#endif
+       pm80xx_chip_intx_interrupt_enable(pm8001_ha);
+
+}
+
+/**
+ * pm8001_chip_interrupt_disable- disable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+#ifdef PM8001_USE_MSIX
+       u32 mask;
+       if (vec == 0xFF)
+               mask = 0xFFFFFFFF;
+       else
+               mask = (u32)(1 << vec);
+       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF));
+       return;
+#endif
+       pm80xx_chip_intx_interrupt_disable(pm8001_ha);
+}
+
+static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha,
+               struct pm8001_device *pm8001_ha_dev)
+{
+       int res;
+       u32 ccb_tag;
+       struct pm8001_ccb_info *ccb;
+       struct sas_task *task = NULL;
+       struct task_abort_req task_abort;
+       struct inbound_queue_table *circularQ;
+       u32 opc = OPC_INB_SATA_ABORT;
+       int ret;
+
+       if (!pm8001_ha_dev) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n"));
+               return;
+       }
+
+       task = sas_alloc_slow_task(GFP_ATOMIC);
+
+       if (!task) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot "
+                                               "allocate task\n"));
+               return;
+       }
+
+       task->task_done = pm8001_task_done;
+
+       res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+       if (res)
+               return;
+
+       ccb = &pm8001_ha->ccb_info[ccb_tag];
+       ccb->device = pm8001_ha_dev;
+       ccb->ccb_tag = ccb_tag;
+       ccb->task = task;
+
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       memset(&task_abort, 0, sizeof(task_abort));
+       task_abort.abort_all = cpu_to_le32(1);
+       task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+       task_abort.tag = cpu_to_le32(ccb_tag);
+
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+
+}
+
+static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
+               struct pm8001_device *pm8001_ha_dev)
+{
+       struct sata_start_req sata_cmd;
+       int res;
+       u32 ccb_tag;
+       struct pm8001_ccb_info *ccb;
+       struct sas_task *task = NULL;
+       struct host_to_dev_fis fis;
+       struct domain_device *dev;
+       struct inbound_queue_table *circularQ;
+       u32 opc = OPC_INB_SATA_HOST_OPSTART;
+
+       task = sas_alloc_slow_task(GFP_ATOMIC);
+
+       if (!task) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("cannot allocate task !!!\n"));
+               return;
+       }
+       task->task_done = pm8001_task_done;
+
+       res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+       if (res) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("cannot allocate tag !!!\n"));
+               return;
+       }
+
+       /* allocate domain device by ourselves as libsas
+        * is not going to provide any
+       */
+       dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
+       if (!dev) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("Domain device cannot be allocated\n"));
+               sas_free_task(task);
+               return;
+       } else {
+               task->dev = dev;
+               task->dev->lldd_dev = pm8001_ha_dev;
+       }
+
+       ccb = &pm8001_ha->ccb_info[ccb_tag];
+       ccb->device = pm8001_ha_dev;
+       ccb->ccb_tag = ccb_tag;
+       ccb->task = task;
+       pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
+       pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
+
+       memset(&sata_cmd, 0, sizeof(sata_cmd));
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       /* construct read log FIS */
+       memset(&fis, 0, sizeof(struct host_to_dev_fis));
+       fis.fis_type = 0x27;
+       fis.flags = 0x80;
+       fis.command = ATA_CMD_READ_LOG_EXT;
+       fis.lbal = 0x10;
+       fis.sector_count = 0x1;
+
+       sata_cmd.tag = cpu_to_le32(ccb_tag);
+       sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+       sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9));
+       memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
+
+       res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
+
+}
+
+/**
+ * mpi_ssp_completion- process the event that FW response to the SSP request.
+ * @pm8001_ha: our hba card information
+ * @piomb: the message contents of this outbound message.
+ *
+ * When FW has completed a ssp request for example a IO request, after it has
+ * filled the SG data with the data, it will trigger this event represent
+ * that he has finished the job,please check the coresponding buffer.
+ * So we will tell the caller who maybe waiting the result to tell upper layer
+ * that the task has been finished.
+ */
+static void
+mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+       struct sas_task *t;
+       struct pm8001_ccb_info *ccb;
+       unsigned long flags;
+       u32 status;
+       u32 param;
+       u32 tag;
+       struct ssp_completion_resp *psspPayload;
+       struct task_status_struct *ts;
+       struct ssp_response_iu *iu;
+       struct pm8001_device *pm8001_dev;
+       psspPayload = (struct ssp_completion_resp *)(piomb + 4);
+       status = le32_to_cpu(psspPayload->status);
+       tag = le32_to_cpu(psspPayload->tag);
+       ccb = &pm8001_ha->ccb_info[tag];
+       if ((status == IO_ABORTED) && ccb->open_retry) {
+               /* Being completed by another */
+               ccb->open_retry = 0;
+               return;
+       }
+       pm8001_dev = ccb->device;
+       param = le32_to_cpu(psspPayload->param);
+       t = ccb->task;
+
+       if (status && status != IO_UNDERFLOW)
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("sas IO status 0x%x\n", status));
+       if (unlikely(!t || !t->lldd_task || !t->dev))
+               return;
+       ts = &t->task_status;
+       switch (status) {
+       case IO_SUCCESS:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_SUCCESS ,param = 0x%x\n",
+                               param));
+               if (param == 0) {
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAM_STAT_GOOD;
+               } else {
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAS_PROTO_RESPONSE;
+                       ts->residual = param;
+                       iu = &psspPayload->ssp_resp_iu;
+                       sas_ssp_task_response(pm8001_ha->dev, t, iu);
+               }
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_ABORTED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_ABORTED IOMB Tag\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_ABORTED_TASK;
+               break;
+       case IO_UNDERFLOW:
+               /* SSP Completion with error */
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_UNDERFLOW ,param = 0x%x\n",
+                               param));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_UNDERRUN;
+               ts->residual = param;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_NO_DEVICE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_NO_DEVICE\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_PHY_DOWN;
+               break;
+       case IO_XFER_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               /* Force the midlayer to retry */
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_XFER_ERROR_PHY_NOT_READY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_EPROTO;
+               break;
+       case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               break;
+       case IO_OPEN_CNX_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+       case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               if (!t->uldd_task)
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+               break;
+       case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+               break;
+       case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+               break;
+       case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+               break;
+       case IO_XFER_ERROR_NAK_RECEIVED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_NAK_R_ERR;
+               break;
+       case IO_XFER_ERROR_DMA:
+               PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("IO_XFER_ERROR_DMA\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       case IO_XFER_OPEN_RETRY_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_XFER_ERROR_OFFSET_MISMATCH:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       case IO_PORT_IN_RESET:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_PORT_IN_RESET\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       case IO_DS_NON_OPERATIONAL:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               if (!t->uldd_task)
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_DS_NON_OPERATIONAL);
+               break;
+       case IO_DS_IN_RECOVERY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_IN_RECOVERY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       case IO_TM_TAG_NOT_FOUND:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_TM_TAG_NOT_FOUND\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       case IO_SSP_EXT_IU_ZERO_LEN_ERROR:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_SSP_EXT_IU_ZERO_LEN_ERROR\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       default:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("Unknown status 0x%x\n", status));
+               /* not allowed case. Therefore, return failed status */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       }
+       PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("scsi_status = 0x%x\n ",
+               psspPayload->ssp_resp_iu.status));
+       spin_lock_irqsave(&t->task_state_lock, flags);
+       t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+       t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+       t->task_state_flags |= SAS_TASK_STATE_DONE;
+       if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+                       "task 0x%p done with io_status 0x%x resp 0x%x "
+                       "stat 0x%x but aborted by upper layer!\n",
+                       t, status, ts->resp, ts->stat));
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+       } else {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/* in order to force CPU ordering */
+               t->task_done(t);
+       }
+}
+
+/*See the comments for mpi_ssp_completion */
+static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+       struct sas_task *t;
+       unsigned long flags;
+       struct task_status_struct *ts;
+       struct pm8001_ccb_info *ccb;
+       struct pm8001_device *pm8001_dev;
+       struct ssp_event_resp *psspPayload =
+               (struct ssp_event_resp *)(piomb + 4);
+       u32 event = le32_to_cpu(psspPayload->event);
+       u32 tag = le32_to_cpu(psspPayload->tag);
+       u32 port_id = le32_to_cpu(psspPayload->port_id);
+
+       ccb = &pm8001_ha->ccb_info[tag];
+       t = ccb->task;
+       pm8001_dev = ccb->device;
+       if (event)
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("sas IO status 0x%x\n", event));
+       if (unlikely(!t || !t->lldd_task || !t->dev))
+               return;
+       ts = &t->task_status;
+       PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
+                               port_id, tag, event));
+       switch (event) {
+       case IO_OVERFLOW:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n");)
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               ts->residual = 0;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_XFER_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+               pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK);
+               return;
+       case IO_XFER_ERROR_PHY_NOT_READY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_EPROTO;
+               break;
+       case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               break;
+       case IO_OPEN_CNX_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+       case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               if (!t->uldd_task)
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+               break;
+       case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+               break;
+       case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+               break;
+       case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+               break;
+       case IO_XFER_ERROR_NAK_RECEIVED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_NAK_R_ERR;
+               break;
+       case IO_XFER_OPEN_RETRY_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+               pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT);
+               return;
+       case IO_XFER_ERROR_UNEXPECTED_PHASE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_ERROR_XFER_RDY_OVERRUN:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_ERROR_OFFSET_MISMATCH:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"));
+               /* TBC: used default set values */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_CMD_FRAME_ISSUED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
+               return;
+       default:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("Unknown status 0x%x\n", event));
+               /* not allowed case. Therefore, return failed status */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       }
+       spin_lock_irqsave(&t->task_state_lock, flags);
+       t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+       t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+       t->task_state_flags |= SAS_TASK_STATE_DONE;
+       if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+                       "task 0x%p done with event 0x%x resp 0x%x "
+                       "stat 0x%x but aborted by upper layer!\n",
+                       t, event, ts->resp, ts->stat));
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+       } else {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/* in order to force CPU ordering */
+               t->task_done(t);
+       }
+}
+
+/*See the comments for mpi_ssp_completion */
+static void
+mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct sas_task *t;
+       struct pm8001_ccb_info *ccb;
+       u32 param;
+       u32 status;
+       u32 tag;
+       struct sata_completion_resp *psataPayload;
+       struct task_status_struct *ts;
+       struct ata_task_resp *resp ;
+       u32 *sata_resp;
+       struct pm8001_device *pm8001_dev;
+       unsigned long flags;
+
+       psataPayload = (struct sata_completion_resp *)(piomb + 4);
+       status = le32_to_cpu(psataPayload->status);
+       tag = le32_to_cpu(psataPayload->tag);
+
+       if (!tag) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("tag null\n"));
+               return;
+       }
+       ccb = &pm8001_ha->ccb_info[tag];
+       param = le32_to_cpu(psataPayload->param);
+       if (ccb) {
+               t = ccb->task;
+               pm8001_dev = ccb->device;
+       } else {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("ccb null\n"));
+               return;
+       }
+
+       if (t) {
+               if (t->dev && (t->dev->lldd_dev))
+                       pm8001_dev = t->dev->lldd_dev;
+       } else {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task null\n"));
+               return;
+       }
+
+       if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG))
+               && unlikely(!t || !t->lldd_task || !t->dev)) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task or dev null\n"));
+               return;
+       }
+
+       ts = &t->task_status;
+       if (!ts) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("ts null\n"));
+               return;
+       }
+
+       switch (status) {
+       case IO_SUCCESS:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+               if (param == 0) {
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAM_STAT_GOOD;
+                       /* check if response is for SEND READ LOG */
+                       if (pm8001_dev &&
+                               (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
+                               /* set new bit for abort_all */
+                               pm8001_dev->id |= NCQ_ABORT_ALL_FLAG;
+                               /* clear bit for read log */
+                               pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF;
+                               pm80xx_send_abort_all(pm8001_ha, pm8001_dev);
+                               /* Free the tag */
+                               pm8001_tag_free(pm8001_ha, tag);
+                               sas_free_task(t);
+                               return;
+                       }
+               } else {
+                       u8 len;
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAS_PROTO_RESPONSE;
+                       ts->residual = param;
+                       PM8001_IO_DBG(pm8001_ha,
+                               pm8001_printk("SAS_PROTO_RESPONSE len = %d\n",
+                               param));
+                       sata_resp = &psataPayload->sata_resp[0];
+                       resp = (struct ata_task_resp *)ts->buf;
+                       if (t->ata_task.dma_xfer == 0 &&
+                       t->data_dir == PCI_DMA_FROMDEVICE) {
+                               len = sizeof(struct pio_setup_fis);
+                               PM8001_IO_DBG(pm8001_ha,
+                               pm8001_printk("PIO read len = %d\n", len));
+                       } else if (t->ata_task.use_ncq) {
+                               len = sizeof(struct set_dev_bits_fis);
+                               PM8001_IO_DBG(pm8001_ha,
+                                       pm8001_printk("FPDMA len = %d\n", len));
+                       } else {
+                               len = sizeof(struct dev_to_host_fis);
+                               PM8001_IO_DBG(pm8001_ha,
+                               pm8001_printk("other len = %d\n", len));
+                       }
+                       if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
+                               resp->frame_len = len;
+                               memcpy(&resp->ending_fis[0], sata_resp, len);
+                               ts->buf_valid_size = sizeof(*resp);
+                       } else
+                               PM8001_IO_DBG(pm8001_ha,
+                                       pm8001_printk("response to large\n"));
+               }
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_ABORTED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_ABORTED IOMB Tag\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_ABORTED_TASK;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+               /* following cases are to do cases */
+       case IO_UNDERFLOW:
+               /* SATA Completion with error */
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_UNDERFLOW param = %d\n", param));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_UNDERRUN;
+               ts->residual = param;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_NO_DEVICE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_NO_DEVICE\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_PHY_DOWN;
+               break;
+       case IO_XFER_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_INTERRUPTED;
+               break;
+       case IO_XFER_ERROR_PHY_NOT_READY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_EPROTO;
+               break;
+       case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               break;
+       case IO_OPEN_CNX_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+               break;
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+       case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               if (!t->uldd_task) {
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+                       ts->resp = SAS_TASK_UNDELIVERED;
+                       ts->stat = SAS_QUEUE_FULL;
+                       pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+                       mb();/*in order to force CPU ordering*/
+                       spin_unlock_irq(&pm8001_ha->lock);
+                       t->task_done(t);
+                       spin_lock_irq(&pm8001_ha->lock);
+                       return;
+               }
+               break;
+       case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+               if (!t->uldd_task) {
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+                       ts->resp = SAS_TASK_UNDELIVERED;
+                       ts->stat = SAS_QUEUE_FULL;
+                       pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+                       mb();/*ditto*/
+                       spin_unlock_irq(&pm8001_ha->lock);
+                       t->task_done(t);
+                       spin_lock_irq(&pm8001_ha->lock);
+                       return;
+               }
+               break;
+       case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+               break;
+       case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               if (!t->uldd_task) {
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
+                       ts->resp = SAS_TASK_UNDELIVERED;
+                       ts->stat = SAS_QUEUE_FULL;
+                       pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+                       mb();/* ditto*/
+                       spin_unlock_irq(&pm8001_ha->lock);
+                       t->task_done(t);
+                       spin_lock_irq(&pm8001_ha->lock);
+                       return;
+               }
+               break;
+       case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+               break;
+       case IO_XFER_ERROR_NAK_RECEIVED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_NAK_R_ERR;
+               break;
+       case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_NAK_R_ERR;
+               break;
+       case IO_XFER_ERROR_DMA:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_DMA\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_ABORTED_TASK;
+               break;
+       case IO_XFER_ERROR_SATA_LINK_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_SATA_LINK_TIMEOUT\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               break;
+       case IO_XFER_ERROR_REJECTED_NCQ_MODE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_UNDERRUN;
+               break;
+       case IO_XFER_OPEN_RETRY_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_PORT_IN_RESET:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_PORT_IN_RESET\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               break;
+       case IO_DS_NON_OPERATIONAL:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               if (!t->uldd_task) {
+                       pm8001_handle_event(pm8001_ha, pm8001_dev,
+                                       IO_DS_NON_OPERATIONAL);
+                       ts->resp = SAS_TASK_UNDELIVERED;
+                       ts->stat = SAS_QUEUE_FULL;
+                       pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+                       mb();/*ditto*/
+                       spin_unlock_irq(&pm8001_ha->lock);
+                       t->task_done(t);
+                       spin_lock_irq(&pm8001_ha->lock);
+                       return;
+               }
+               break;
+       case IO_DS_IN_RECOVERY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_IN_RECOVERY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               break;
+       case IO_DS_IN_ERROR:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_IN_ERROR\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               if (!t->uldd_task) {
+                       pm8001_handle_event(pm8001_ha, pm8001_dev,
+                                       IO_DS_IN_ERROR);
+                       ts->resp = SAS_TASK_UNDELIVERED;
+                       ts->stat = SAS_QUEUE_FULL;
+                       pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+                       mb();/*ditto*/
+                       spin_unlock_irq(&pm8001_ha->lock);
+                       t->task_done(t);
+                       spin_lock_irq(&pm8001_ha->lock);
+                       return;
+               }
+               break;
+       case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+       default:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("Unknown status 0x%x\n", status));
+               /* not allowed case. Therefore, return failed status */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               break;
+       }
+       spin_lock_irqsave(&t->task_state_lock, flags);
+       t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+       t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+       t->task_state_flags |= SAS_TASK_STATE_DONE;
+       if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task 0x%p done with io_status 0x%x"
+                       " resp 0x%x stat 0x%x but aborted by upper layer!\n",
+                       t, status, ts->resp, ts->stat));
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+       } else if (t->uldd_task) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/* ditto */
+               spin_unlock_irq(&pm8001_ha->lock);
+               t->task_done(t);
+               spin_lock_irq(&pm8001_ha->lock);
+       } else if (!t->uldd_task) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/*ditto*/
+               spin_unlock_irq(&pm8001_ha->lock);
+               t->task_done(t);
+               spin_lock_irq(&pm8001_ha->lock);
+       }
+}
+
+/*See the comments for mpi_ssp_completion */
+static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+       struct sas_task *t;
+       struct task_status_struct *ts;
+       struct pm8001_ccb_info *ccb;
+       struct pm8001_device *pm8001_dev;
+       struct sata_event_resp *psataPayload =
+               (struct sata_event_resp *)(piomb + 4);
+       u32 event = le32_to_cpu(psataPayload->event);
+       u32 tag = le32_to_cpu(psataPayload->tag);
+       u32 port_id = le32_to_cpu(psataPayload->port_id);
+       u32 dev_id = le32_to_cpu(psataPayload->device_id);
+       unsigned long flags;
+
+       ccb = &pm8001_ha->ccb_info[tag];
+
+       if (ccb) {
+               t = ccb->task;
+               pm8001_dev = ccb->device;
+       } else {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("No CCB !!!. returning\n"));
+               return;
+       }
+       if (event)
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("SATA EVENT 0x%x\n", event));
+
+       /* Check if this is NCQ error */
+       if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) {
+               /* find device using device id */
+               pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id);
+               /* send read log extension */
+               if (pm8001_dev)
+                       pm80xx_send_read_log(pm8001_ha, pm8001_dev);
+               return;
+       }
+
+       if (unlikely(!t || !t->lldd_task || !t->dev)) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task or dev null\n"));
+               return;
+       }
+
+       ts = &t->task_status;
+       PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
+                               port_id, tag, event));
+       switch (event) {
+       case IO_OVERFLOW:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               ts->residual = 0;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_XFER_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_INTERRUPTED;
+               break;
+       case IO_XFER_ERROR_PHY_NOT_READY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_EPROTO;
+               break;
+       case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               break;
+       case IO_OPEN_CNX_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+               break;
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+       case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               if (!t->uldd_task) {
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAS_QUEUE_FULL;
+                       pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+                       mb();/*ditto*/
+                       spin_unlock_irq(&pm8001_ha->lock);
+                       t->task_done(t);
+                       spin_lock_irq(&pm8001_ha->lock);
+                       return;
+               }
+               break;
+       case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+               break;
+       case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+               break;
+       case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+               break;
+       case IO_XFER_ERROR_NAK_RECEIVED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_NAK_R_ERR;
+               break;
+       case IO_XFER_ERROR_PEER_ABORTED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_PEER_ABORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_NAK_R_ERR;
+               break;
+       case IO_XFER_ERROR_REJECTED_NCQ_MODE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_UNDERRUN;
+               break;
+       case IO_XFER_OPEN_RETRY_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_ERROR_UNEXPECTED_PHASE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_ERROR_XFER_RDY_OVERRUN:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_ERROR_OFFSET_MISMATCH:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_CMD_FRAME_ISSUED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
+               break;
+       case IO_XFER_PIO_SETUP_ERROR:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_PIO_SETUP_ERROR\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"));
+               /* TBC: used default set values */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_DMA_ACTIVATE_TIMEOUT:
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFR_DMA_ACTIVATE_TIMEOUT\n"));
+               /* TBC: used default set values */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       default:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("Unknown status 0x%x\n", event));
+               /* not allowed case. Therefore, return failed status */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       }
+       spin_lock_irqsave(&t->task_state_lock, flags);
+       t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+       t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+       t->task_state_flags |= SAS_TASK_STATE_DONE;
+       if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task 0x%p done with io_status 0x%x"
+                       " resp 0x%x stat 0x%x but aborted by upper layer!\n",
+                       t, event, ts->resp, ts->stat));
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+       } else if (t->uldd_task) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/* ditto */
+               spin_unlock_irq(&pm8001_ha->lock);
+               t->task_done(t);
+               spin_lock_irq(&pm8001_ha->lock);
+       } else if (!t->uldd_task) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/*ditto*/
+               spin_unlock_irq(&pm8001_ha->lock);
+               t->task_done(t);
+               spin_lock_irq(&pm8001_ha->lock);
+       }
+}
+
+/*See the comments for mpi_ssp_completion */
+static void
+mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       u32 param, i;
+       struct sas_task *t;
+       struct pm8001_ccb_info *ccb;
+       unsigned long flags;
+       u32 status;
+       u32 tag;
+       struct smp_completion_resp *psmpPayload;
+       struct task_status_struct *ts;
+       struct pm8001_device *pm8001_dev;
+       char *pdma_respaddr = NULL;
+
+       psmpPayload = (struct smp_completion_resp *)(piomb + 4);
+       status = le32_to_cpu(psmpPayload->status);
+       tag = le32_to_cpu(psmpPayload->tag);
+
+       ccb = &pm8001_ha->ccb_info[tag];
+       param = le32_to_cpu(psmpPayload->param);
+       t = ccb->task;
+       ts = &t->task_status;
+       pm8001_dev = ccb->device;
+       if (status)
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("smp IO status 0x%x\n", status));
+       if (unlikely(!t || !t->lldd_task || !t->dev))
+               return;
+
+       switch (status) {
+
+       case IO_SUCCESS:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAM_STAT_GOOD;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
+                       PM8001_IO_DBG(pm8001_ha,
+                               pm8001_printk("DIRECT RESPONSE Length:%d\n",
+                                               param));
+                       pdma_respaddr = (char *)(phys_to_virt(cpu_to_le64
+                                               ((u64)sg_dma_address
+                                               (&t->smp_task.smp_resp))));
+                       for (i = 0; i < param; i++) {
+                               *(pdma_respaddr+i) = psmpPayload->_r_a[i];
+                               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                                       "SMP Byte%d DMA data 0x%x psmp 0x%x\n",
+                                       i, *(pdma_respaddr+i),
+                                       psmpPayload->_r_a[i]));
+                       }
+               }
+               break;
+       case IO_ABORTED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_ABORTED IOMB\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_ABORTED_TASK;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_OVERFLOW:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               ts->residual = 0;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_NO_DEVICE:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_PHY_DOWN;
+               break;
+       case IO_ERROR_HW_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_ERROR_HW_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAM_STAT_BUSY;
+               break;
+       case IO_XFER_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAM_STAT_BUSY;
+               break;
+       case IO_XFER_ERROR_PHY_NOT_READY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAM_STAT_BUSY;
+               break;
+       case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               break;
+       case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               break;
+       case IO_OPEN_CNX_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+               break;
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+       case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+               break;
+       case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+               break;
+       case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(\
+                       "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+               break;
+       case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+               break;
+       case IO_XFER_ERROR_RX_FRAME:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_RX_FRAME\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               break;
+       case IO_XFER_OPEN_RETRY_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_ERROR_INTERNAL_SMP_RESOURCE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_ERROR_INTERNAL_SMP_RESOURCE\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_QUEUE_FULL;
+               break;
+       case IO_PORT_IN_RESET:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_PORT_IN_RESET\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_DS_NON_OPERATIONAL:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               break;
+       case IO_DS_IN_RECOVERY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_IN_RECOVERY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       default:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("Unknown status 0x%x\n", status));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               /* not allowed case. Therefore, return failed status */
+               break;
+       }
+       spin_lock_irqsave(&t->task_state_lock, flags);
+       t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+       t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+       t->task_state_flags |= SAS_TASK_STATE_DONE;
+       if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+                       "task 0x%p done with io_status 0x%x resp 0x%x"
+                       "stat 0x%x but aborted by upper layer!\n",
+                       t, status, ts->resp, ts->stat));
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+       } else {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/* in order to force CPU ordering */
+               t->task_done(t);
+       }
+}
+
+/**
+ * pm80xx_hw_event_ack_req- For PM8001,some events need to acknowage to FW.
+ * @pm8001_ha: our hba card information
+ * @Qnum: the outbound queue message number.
+ * @SEA: source of event to ack
+ * @port_id: port id.
+ * @phyId: phy id.
+ * @param0: parameter 0.
+ * @param1: parameter 1.
+ */
+static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
+       u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1)
+{
+       struct hw_event_ack_req  payload;
+       u32 opc = OPC_INB_SAS_HW_EVENT_ACK;
+
+       struct inbound_queue_table *circularQ;
+
+       memset((u8 *)&payload, 0, sizeof(payload));
+       circularQ = &pm8001_ha->inbnd_q_tbl[Qnum];
+       payload.tag = cpu_to_le32(1);
+       payload.phyid_sea_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) |
+               ((phyId & 0xFF) << 24) | (port_id & 0xFF));
+       payload.param0 = cpu_to_le32(param0);
+       payload.param1 = cpu_to_le32(param1);
+       pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+}
+
+static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
+       u32 phyId, u32 phy_op);
+
+/**
+ * hw_event_sas_phy_up -FW tells me a SAS phy up event.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void
+hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct hw_event_resp *pPayload =
+               (struct hw_event_resp *)(piomb + 4);
+       u32 lr_status_evt_portid =
+               le32_to_cpu(pPayload->lr_status_evt_portid);
+       u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+
+       u8 link_rate =
+               (u8)((lr_status_evt_portid & 0xF0000000) >> 28);
+       u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+       u8 phy_id =
+               (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+       u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
+
+       struct pm8001_port *port = &pm8001_ha->port[port_id];
+       struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+       struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+       unsigned long flags;
+       u8 deviceType = pPayload->sas_identify.dev_type;
+       port->port_state = portstate;
+       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+               "portid:%d; phyid:%d; linkrate:%d; "
+               "portstate:%x; devicetype:%x\n",
+               port_id, phy_id, link_rate, portstate, deviceType));
+
+       switch (deviceType) {
+       case SAS_PHY_UNUSED:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("device type no device.\n"));
+               break;
+       case SAS_END_DEVICE:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n"));
+               pm80xx_chip_phy_ctl_req(pm8001_ha, phy_id,
+                       PHY_NOTIFY_ENABLE_SPINUP);
+               port->port_attached = 1;
+               pm8001_get_lrate_mode(phy, link_rate);
+               break;
+       case SAS_EDGE_EXPANDER_DEVICE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("expander device.\n"));
+               port->port_attached = 1;
+               pm8001_get_lrate_mode(phy, link_rate);
+               break;
+       case SAS_FANOUT_EXPANDER_DEVICE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("fanout expander device.\n"));
+               port->port_attached = 1;
+               pm8001_get_lrate_mode(phy, link_rate);
+               break;
+       default:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("unknown device type(%x)\n", deviceType));
+               break;
+       }
+       phy->phy_type |= PORT_TYPE_SAS;
+       phy->identify.device_type = deviceType;
+       phy->phy_attached = 1;
+       if (phy->identify.device_type == SAS_END_DEVICE)
+               phy->identify.target_port_protocols = SAS_PROTOCOL_SSP;
+       else if (phy->identify.device_type != SAS_PHY_UNUSED)
+               phy->identify.target_port_protocols = SAS_PROTOCOL_SMP;
+       phy->sas_phy.oob_mode = SAS_OOB_MODE;
+       sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+       spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+       memcpy(phy->frame_rcvd, &pPayload->sas_identify,
+               sizeof(struct sas_identify_frame)-4);
+       phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4;
+       pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
+       spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
+       if (pm8001_ha->flags == PM8001F_RUN_TIME)
+               mdelay(200);/*delay a moment to wait disk to spinup*/
+       pm8001_bytes_dmaed(pm8001_ha, phy_id);
+}
+
+/**
+ * hw_event_sata_phy_up -FW tells me a SATA phy up event.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void
+hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct hw_event_resp *pPayload =
+               (struct hw_event_resp *)(piomb + 4);
+       u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+       u32 lr_status_evt_portid =
+               le32_to_cpu(pPayload->lr_status_evt_portid);
+       u8 link_rate =
+               (u8)((lr_status_evt_portid & 0xF0000000) >> 28);
+       u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+       u8 phy_id =
+               (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+
+       u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
+
+       struct pm8001_port *port = &pm8001_ha->port[port_id];
+       struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+       struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+       unsigned long flags;
+       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+               "port id %d, phy id %d link_rate %d portstate 0x%x\n",
+                               port_id, phy_id, link_rate, portstate));
+
+       port->port_state = portstate;
+       port->port_attached = 1;
+       pm8001_get_lrate_mode(phy, link_rate);
+       phy->phy_type |= PORT_TYPE_SATA;
+       phy->phy_attached = 1;
+       phy->sas_phy.oob_mode = SATA_OOB_MODE;
+       sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+       spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+       memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4),
+               sizeof(struct dev_to_host_fis));
+       phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
+       phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
+       phy->identify.device_type = SAS_SATA_DEV;
+       pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
+       spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
+       pm8001_bytes_dmaed(pm8001_ha, phy_id);
+}
+
+/**
+ * hw_event_phy_down -we should notify the libsas the phy is down.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void
+hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct hw_event_resp *pPayload =
+               (struct hw_event_resp *)(piomb + 4);
+
+       u32 lr_status_evt_portid =
+               le32_to_cpu(pPayload->lr_status_evt_portid);
+       u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+       u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+       u8 phy_id =
+               (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+       u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
+
+       struct pm8001_port *port = &pm8001_ha->port[port_id];
+       struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+       port->port_state = portstate;
+       phy->phy_type = 0;
+       phy->identify.device_type = 0;
+       phy->phy_attached = 0;
+       memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
+       switch (portstate) {
+       case PORT_VALID:
+               break;
+       case PORT_INVALID:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" PortInvalid portID %d\n", port_id));
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" Last phy Down and port invalid\n"));
+               port->port_attached = 0;
+               pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+                       port_id, phy_id, 0, 0);
+               break;
+       case PORT_IN_RESET:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" Port In Reset portID %d\n", port_id));
+               break;
+       case PORT_NOT_ESTABLISHED:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
+               port->port_attached = 0;
+               break;
+       case PORT_LOSTCOMM:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" Last phy Down and port invalid\n"));
+               port->port_attached = 0;
+               pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+                       port_id, phy_id, 0, 0);
+               break;
+       default:
+               port->port_attached = 0;
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" phy Down and(default) = 0x%x\n",
+                       portstate));
+               break;
+
+       }
+}
+
+static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct phy_start_resp *pPayload =
+               (struct phy_start_resp *)(piomb + 4);
+       u32 status =
+               le32_to_cpu(pPayload->status);
+       u32 phy_id =
+               le32_to_cpu(pPayload->phyid);
+       struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("phy start resp status:0x%x, phyid:0x%x\n",
+                               status, phy_id));
+       if (status == 0) {
+               phy->phy_state = 1;
+               if (pm8001_ha->flags == PM8001F_RUN_TIME)
+                       complete(phy->enable_completion);
+       }
+       return 0;
+
+}
+
+/**
+ * mpi_thermal_hw_event -The hw event has come.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct thermal_hw_event *pPayload =
+               (struct thermal_hw_event *)(piomb + 4);
+
+       u32 thermal_event = le32_to_cpu(pPayload->thermal_event);
+       u32 rht_lht = le32_to_cpu(pPayload->rht_lht);
+
+       if (thermal_event & 0x40) {
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Thermal Event: Local high temperature violated!\n"));
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Thermal Event: Measured local high temperature %d\n",
+                               ((rht_lht & 0xFF00) >> 8)));
+       }
+       if (thermal_event & 0x10) {
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Thermal Event: Remote high temperature violated!\n"));
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Thermal Event: Measured remote high temperature %d\n",
+                               ((rht_lht & 0xFF000000) >> 24)));
+       }
+       return 0;
+}
+
+/**
+ * mpi_hw_event -The hw event has come.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       unsigned long flags;
+       struct hw_event_resp *pPayload =
+               (struct hw_event_resp *)(piomb + 4);
+       u32 lr_status_evt_portid =
+               le32_to_cpu(pPayload->lr_status_evt_portid);
+       u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+       u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+       u8 phy_id =
+               (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+       u16 eventType =
+               (u16)((lr_status_evt_portid & 0x00FFFF00) >> 8);
+       u8 status =
+               (u8)((lr_status_evt_portid & 0x0F000000) >> 24);
+
+       struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+       struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+       struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
+       PM8001_MSG_DBG(pm8001_ha,
+               pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n",
+                               port_id, phy_id, eventType, status));
+
+       switch (eventType) {
+
+       case HW_EVENT_SAS_PHY_UP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PHY_START_STATUS\n"));
+               hw_event_sas_phy_up(pm8001_ha, piomb);
+               break;
+       case HW_EVENT_SATA_PHY_UP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_SATA_PHY_UP\n"));
+               hw_event_sata_phy_up(pm8001_ha, piomb);
+               break;
+       case HW_EVENT_SATA_SPINUP_HOLD:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD\n"));
+               sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
+               break;
+       case HW_EVENT_PHY_DOWN:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PHY_DOWN\n"));
+               sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
+               phy->phy_attached = 0;
+               phy->phy_state = 0;
+               hw_event_phy_down(pm8001_ha, piomb);
+               break;
+       case HW_EVENT_PORT_INVALID:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PORT_INVALID\n"));
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       /* the broadcast change primitive received, tell the LIBSAS this event
+       to revalidate the sas domain*/
+       case HW_EVENT_BROADCAST_CHANGE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_BROADCAST_CHANGE\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE,
+                       port_id, phy_id, 1, 0);
+               spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+               sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE;
+               spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+               sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+               break;
+       case HW_EVENT_PHY_ERROR:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PHY_ERROR\n"));
+               sas_phy_disconnected(&phy->sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
+               break;
+       case HW_EVENT_BROADCAST_EXP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_BROADCAST_EXP\n"));
+               spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+               sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP;
+               spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+               sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+               break;
+       case HW_EVENT_LINK_ERR_INVALID_DWORD:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_LINK_ERR_DISPARITY_ERROR\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_LINK_ERR_DISPARITY_ERROR,
+                       port_id, phy_id, 0, 0);
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_LINK_ERR_CODE_VIOLATION:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_LINK_ERR_CODE_VIOLATION\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_LINK_ERR_CODE_VIOLATION,
+                       port_id, phy_id, 0, 0);
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                               "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH,
+                       port_id, phy_id, 0, 0);
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_MALFUNCTION:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_MALFUNCTION\n"));
+               break;
+       case HW_EVENT_BROADCAST_SES:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_BROADCAST_SES\n"));
+               spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+               sas_phy->sas_prim = HW_EVENT_BROADCAST_SES;
+               spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+               sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+               break;
+       case HW_EVENT_INBOUND_CRC_ERROR:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_INBOUND_CRC_ERROR\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_INBOUND_CRC_ERROR,
+                       port_id, phy_id, 0, 0);
+               break;
+       case HW_EVENT_HARD_RESET_RECEIVED:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_HARD_RESET_RECEIVED\n"));
+               sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
+               break;
+       case HW_EVENT_ID_FRAME_TIMEOUT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_ID_FRAME_TIMEOUT\n"));
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_LINK_ERR_PHY_RESET_FAILED,
+                       port_id, phy_id, 0, 0);
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_PORT_RESET_TIMER_TMO:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n"));
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_PORT_RECOVERY_TIMER_TMO,
+                       port_id, phy_id, 0, 0);
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_PORT_RECOVER:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PORT_RECOVER\n"));
+               break;
+       case HW_EVENT_PORT_RESET_COMPLETE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n"));
+               break;
+       case EVENT_BROADCAST_ASYNCH_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
+               break;
+       default:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("Unknown event type 0x%x\n", eventType));
+               break;
+       }
+       return 0;
+}
+
+/**
+ * mpi_phy_stop_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct phy_stop_resp *pPayload =
+               (struct phy_stop_resp *)(piomb + 4);
+       u32 status =
+               le32_to_cpu(pPayload->status);
+       u32 phyid =
+               le32_to_cpu(pPayload->phyid);
+       struct pm8001_phy *phy = &pm8001_ha->phy[phyid];
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("phy:0x%x status:0x%x\n",
+                                       phyid, status));
+       if (status == 0)
+               phy->phy_state = 0;
+       return 0;
+}
+
+/**
+ * mpi_set_controller_config_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       struct set_ctrl_cfg_resp *pPayload =
+                       (struct set_ctrl_cfg_resp *)(piomb + 4);
+       u32 status = le32_to_cpu(pPayload->status);
+       u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd);
+
+       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n",
+                       status, err_qlfr_pgcd));
+
+       return 0;
+}
+
+/**
+ * mpi_get_controller_config_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_get_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" pm80xx_addition_functionality\n"));
+
+       return 0;
+}
+
+/**
+ * mpi_get_phy_profile_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_get_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" pm80xx_addition_functionality\n"));
+
+       return 0;
+}
+
+/**
+ * mpi_flash_op_ext_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" pm80xx_addition_functionality\n"));
+
+       return 0;
+}
+
+/**
+ * mpi_set_phy_profile_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" pm80xx_addition_functionality\n"));
+
+       return 0;
+}
+
+/**
+ * mpi_kek_management_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_kek_management_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       struct kek_mgmt_resp *pPayload = (struct kek_mgmt_resp *)(piomb + 4);
+
+       u32 status = le32_to_cpu(pPayload->status);
+       u32 kidx_new_curr_ksop = le32_to_cpu(pPayload->kidx_new_curr_ksop);
+       u32 err_qlfr = le32_to_cpu(pPayload->err_qlfr);
+
+       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+               "KEK MGMT RESP. Status 0x%x idx_ksop 0x%x err_qlfr 0x%x\n",
+               status, kidx_new_curr_ksop, err_qlfr));
+
+       return 0;
+}
+
+/**
+ * mpi_dek_management_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_dek_management_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" pm80xx_addition_functionality\n"));
+
+       return 0;
+}
+
+/**
+ * ssp_coalesced_comp_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" pm80xx_addition_functionality\n"));
+
+       return 0;
+}
+
+/**
+ * process_one_iomb - process one outbound Queue memory block
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       __le32 pHeader = *(__le32 *)piomb;
+       u32 opc = (u32)((le32_to_cpu(pHeader)) & 0xFFF);
+
+       switch (opc) {
+       case OPC_OUB_ECHO:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO\n"));
+               break;
+       case OPC_OUB_HW_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_HW_EVENT\n"));
+               mpi_hw_event(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_THERM_HW_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_THERMAL_EVENT\n"));
+               mpi_thermal_hw_event(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SSP_COMP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SSP_COMP\n"));
+               mpi_ssp_completion(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SMP_COMP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SMP_COMP\n"));
+               mpi_smp_completion(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_LOCAL_PHY_CNTRL:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n"));
+               pm8001_mpi_local_phy_ctl(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_DEV_REGIST:
+               PM8001_MSG_DBG(pm8001_ha,
+               pm8001_printk("OPC_OUB_DEV_REGIST\n"));
+               pm8001_mpi_reg_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_DEREG_DEV:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("unresgister the deviece\n"));
+               pm8001_mpi_dereg_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_GET_DEV_HANDLE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GET_DEV_HANDLE\n"));
+               break;
+       case OPC_OUB_SATA_COMP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SATA_COMP\n"));
+               mpi_sata_completion(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SATA_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SATA_EVENT\n"));
+               mpi_sata_event(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SSP_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SSP_EVENT\n"));
+               mpi_ssp_event(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_DEV_HANDLE_ARRIV:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_DEV_HANDLE_ARRIV\n"));
+               /*This is for target*/
+               break;
+       case OPC_OUB_SSP_RECV_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SSP_RECV_EVENT\n"));
+               /*This is for target*/
+               break;
+       case OPC_OUB_FW_FLASH_UPDATE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n"));
+               pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_GPIO_RESPONSE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GPIO_RESPONSE\n"));
+               break;
+       case OPC_OUB_GPIO_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GPIO_EVENT\n"));
+               break;
+       case OPC_OUB_GENERAL_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GENERAL_EVENT\n"));
+               pm8001_mpi_general_event(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SSP_ABORT_RSP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n"));
+               pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SATA_ABORT_RSP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n"));
+               pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SAS_DIAG_MODE_START_END:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SAS_DIAG_MODE_START_END\n"));
+               break;
+       case OPC_OUB_SAS_DIAG_EXECUTE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SAS_DIAG_EXECUTE\n"));
+               break;
+       case OPC_OUB_GET_TIME_STAMP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GET_TIME_STAMP\n"));
+               break;
+       case OPC_OUB_SAS_HW_EVENT_ACK:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SAS_HW_EVENT_ACK\n"));
+               break;
+       case OPC_OUB_PORT_CONTROL:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_PORT_CONTROL\n"));
+               break;
+       case OPC_OUB_SMP_ABORT_RSP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n"));
+               pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_GET_NVMD_DATA:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GET_NVMD_DATA\n"));
+               pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SET_NVMD_DATA:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SET_NVMD_DATA\n"));
+               pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_DEVICE_HANDLE_REMOVAL:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_DEVICE_HANDLE_REMOVAL\n"));
+               break;
+       case OPC_OUB_SET_DEVICE_STATE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n"));
+               pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_GET_DEVICE_STATE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GET_DEVICE_STATE\n"));
+               break;
+       case OPC_OUB_SET_DEV_INFO:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SET_DEV_INFO\n"));
+               break;
+       /* spcv specifc commands */
+       case OPC_OUB_PHY_START_RESP:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_PHY_START_RESP opcode:%x\n", opc));
+               mpi_phy_start_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_PHY_STOP_RESP:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_PHY_STOP_RESP opcode:%x\n", opc));
+               mpi_phy_stop_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SET_CONTROLLER_CONFIG:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_SET_CONTROLLER_CONFIG opcode:%x\n", opc));
+               mpi_set_controller_config_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_GET_CONTROLLER_CONFIG:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_GET_CONTROLLER_CONFIG opcode:%x\n", opc));
+               mpi_get_controller_config_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_GET_PHY_PROFILE:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_GET_PHY_PROFILE opcode:%x\n", opc));
+               mpi_get_phy_profile_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_FLASH_OP_EXT:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_FLASH_OP_EXT opcode:%x\n", opc));
+               mpi_flash_op_ext_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SET_PHY_PROFILE:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_SET_PHY_PROFILE opcode:%x\n", opc));
+               mpi_set_phy_profile_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_KEK_MANAGEMENT_RESP:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_KEK_MANAGEMENT_RESP opcode:%x\n", opc));
+               mpi_kek_management_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_DEK_MANAGEMENT_RESP:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_DEK_MANAGEMENT_RESP opcode:%x\n", opc));
+               mpi_dek_management_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SSP_COALESCED_COMP_RESP:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_SSP_COALESCED_COMP_RESP opcode:%x\n", opc));
+               ssp_coalesced_comp_resp(pm8001_ha, piomb);
+               break;
+       default:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "Unknown outbound Queue IOMB OPC = 0x%x\n", opc));
+               break;
+       }
+}
+
+static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+       struct outbound_queue_table *circularQ;
+       void *pMsg1 = NULL;
+       u8 uninitialized_var(bc);
+       u32 ret = MPI_IO_STATUS_FAIL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&pm8001_ha->lock, flags);
+       circularQ = &pm8001_ha->outbnd_q_tbl[vec];
+       do {
+               ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
+               if (MPI_IO_STATUS_SUCCESS == ret) {
+                       /* process the outbound message */
+                       process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4));
+                       /* free the message from the outbound circular buffer */
+                       pm8001_mpi_msg_free_set(pm8001_ha, pMsg1,
+                                                       circularQ, bc);
+               }
+               if (MPI_IO_STATUS_BUSY == ret) {
+                       /* Update the producer index from SPC */
+                       circularQ->producer_index =
+                               cpu_to_le32(pm8001_read_32(circularQ->pi_virt));
+                       if (le32_to_cpu(circularQ->producer_index) ==
+                               circularQ->consumer_idx)
+                               /* OQ is empty */
+                               break;
+               }
+       } while (1);
+       spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+       return ret;
+}
+
+/* PCI_DMA_... to our direction translation. */
+static const u8 data_dir_flags[] = {
+       [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */
+       [PCI_DMA_TODEVICE]      = DATA_DIR_OUT,/* OUTBOUND */
+       [PCI_DMA_FROMDEVICE]    = DATA_DIR_IN,/* INBOUND */
+       [PCI_DMA_NONE]          = DATA_DIR_NONE,/* NO TRANSFER */
+};
+
+static void build_smp_cmd(u32 deviceID, __le32 hTag,
+                       struct smp_req *psmp_cmd, int mode, int length)
+{
+       psmp_cmd->tag = hTag;
+       psmp_cmd->device_id = cpu_to_le32(deviceID);
+       if (mode == SMP_DIRECT) {
+               length = length - 4; /* subtract crc */
+               psmp_cmd->len_ip_ir = cpu_to_le32(length << 16);
+       } else {
+               psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1));
+       }
+}
+
+/**
+ * pm8001_chip_smp_req - send a SMP task to FW
+ * @pm8001_ha: our hba card information.
+ * @ccb: the ccb information this request used.
+ */
+static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
+       struct pm8001_ccb_info *ccb)
+{
+       int elem, rc;
+       struct sas_task *task = ccb->task;
+       struct domain_device *dev = task->dev;
+       struct pm8001_device *pm8001_dev = dev->lldd_dev;
+       struct scatterlist *sg_req, *sg_resp;
+       u32 req_len, resp_len;
+       struct smp_req smp_cmd;
+       u32 opc;
+       struct inbound_queue_table *circularQ;
+       char *preq_dma_addr = NULL;
+       __le64 tmp_addr;
+       u32 i, length;
+
+       memset(&smp_cmd, 0, sizeof(smp_cmd));
+       /*
+        * DMA-map SMP request, response buffers
+        */
+       sg_req = &task->smp_task.smp_req;
+       elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE);
+       if (!elem)
+               return -ENOMEM;
+       req_len = sg_dma_len(sg_req);
+
+       sg_resp = &task->smp_task.smp_resp;
+       elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+       if (!elem) {
+               rc = -ENOMEM;
+               goto err_out;
+       }
+       resp_len = sg_dma_len(sg_resp);
+       /* must be in dwords */
+       if ((req_len & 0x3) || (resp_len & 0x3)) {
+               rc = -EINVAL;
+               goto err_out_2;
+       }
+
+       opc = OPC_INB_SMP_REQUEST;
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       smp_cmd.tag = cpu_to_le32(ccb->ccb_tag);
+
+       length = sg_req->length;
+       PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("SMP Frame Length %d\n", sg_req->length));
+       if (!(length - 8))
+               pm8001_ha->smp_exp_mode = SMP_DIRECT;
+       else
+               pm8001_ha->smp_exp_mode = SMP_INDIRECT;
+
+       /* DIRECT MODE support only in spcv/ve */
+       pm8001_ha->smp_exp_mode = SMP_DIRECT;
+
+       tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
+       preq_dma_addr = (char *)phys_to_virt(tmp_addr);
+
+       /* INDIRECT MODE command settings. Use DMA */
+       if (pm8001_ha->smp_exp_mode == SMP_INDIRECT) {
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("SMP REQUEST INDIRECT MODE\n"));
+               /* for SPCv indirect mode. Place the top 4 bytes of
+                * SMP Request header here. */
+               for (i = 0; i < 4; i++)
+                       smp_cmd.smp_req16[i] = *(preq_dma_addr + i);
+               /* exclude top 4 bytes for SMP req header */
+               smp_cmd.long_smp_req.long_req_addr =
+                       cpu_to_le64((u64)sg_dma_address
+                               (&task->smp_task.smp_req) - 4);
+               /* exclude 4 bytes for SMP req header and CRC */
+               smp_cmd.long_smp_req.long_req_size =
+                       cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8);
+               smp_cmd.long_smp_req.long_resp_addr =
+                               cpu_to_le64((u64)sg_dma_address
+                                       (&task->smp_task.smp_resp));
+               smp_cmd.long_smp_req.long_resp_size =
+                               cpu_to_le32((u32)sg_dma_len
+                                       (&task->smp_task.smp_resp)-4);
+       } else { /* DIRECT MODE */
+               smp_cmd.long_smp_req.long_req_addr =
+                       cpu_to_le64((u64)sg_dma_address
+                                       (&task->smp_task.smp_req));
+               smp_cmd.long_smp_req.long_req_size =
+                       cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4);
+               smp_cmd.long_smp_req.long_resp_addr =
+                       cpu_to_le64((u64)sg_dma_address
+                               (&task->smp_task.smp_resp));
+               smp_cmd.long_smp_req.long_resp_size =
+                       cpu_to_le32
+                       ((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
+       }
+       if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("SMP REQUEST DIRECT MODE\n"));
+               for (i = 0; i < length; i++)
+                       if (i < 16) {
+                               smp_cmd.smp_req16[i] = *(preq_dma_addr+i);
+                               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                                       "Byte[%d]:%x (DMA data:%x)\n",
+                                       i, smp_cmd.smp_req16[i],
+                                       *(preq_dma_addr)));
+                       } else {
+                               smp_cmd.smp_req[i] = *(preq_dma_addr+i);
+                               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                                       "Byte[%d]:%x (DMA data:%x)\n",
+                                       i, smp_cmd.smp_req[i],
+                                       *(preq_dma_addr)));
+                       }
+       }
+
+       build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag,
+                               &smp_cmd, pm8001_ha->smp_exp_mode, length);
+       pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0);
+       return 0;
+
+err_out_2:
+       dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
+                       PCI_DMA_FROMDEVICE);
+err_out:
+       dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
+                       PCI_DMA_TODEVICE);
+       return rc;
+}
+
+static int check_enc_sas_cmd(struct sas_task *task)
+{
+       if ((task->ssp_task.cdb[0] == READ_10)
+               || (task->ssp_task.cdb[0] == WRITE_10)
+               || (task->ssp_task.cdb[0] == WRITE_VERIFY))
+               return 1;
+       else
+               return 0;
+}
+
+static int check_enc_sat_cmd(struct sas_task *task)
+{
+       int ret = 0;
+       switch (task->ata_task.fis.command) {
+       case ATA_CMD_FPDMA_READ:
+       case ATA_CMD_READ_EXT:
+       case ATA_CMD_READ:
+       case ATA_CMD_FPDMA_WRITE:
+       case ATA_CMD_WRITE_EXT:
+       case ATA_CMD_WRITE:
+       case ATA_CMD_PIO_READ:
+       case ATA_CMD_PIO_READ_EXT:
+       case ATA_CMD_PIO_WRITE:
+       case ATA_CMD_PIO_WRITE_EXT:
+               ret = 1;
+               break;
+       default:
+               ret = 0;
+               break;
+       }
+       return ret;
+}
+
+/**
+ * pm80xx_chip_ssp_io_req - send a SSP task to FW
+ * @pm8001_ha: our hba card information.
+ * @ccb: the ccb information this request used.
+ */
+static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
+       struct pm8001_ccb_info *ccb)
+{
+       struct sas_task *task = ccb->task;
+       struct domain_device *dev = task->dev;
+       struct pm8001_device *pm8001_dev = dev->lldd_dev;
+       struct ssp_ini_io_start_req ssp_cmd;
+       u32 tag = ccb->ccb_tag;
+       int ret;
+       u64 phys_addr;
+       struct inbound_queue_table *circularQ;
+       static u32 inb;
+       static u32 outb;
+       u32 opc = OPC_INB_SSPINIIOSTART;
+       memset(&ssp_cmd, 0, sizeof(ssp_cmd));
+       memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
+       /* data address domain added for spcv; set to 0 by host,
+        * used internally by controller
+        * 0 for SAS 1.1 and SAS 2.0 compatible TLR
+        */
+       ssp_cmd.dad_dir_m_tlr =
+               cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);
+       ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
+       ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
+       ssp_cmd.tag = cpu_to_le32(tag);
+       if (task->ssp_task.enable_first_burst)
+               ssp_cmd.ssp_iu.efb_prio_attr |= 0x80;
+       ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
+       ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
+       memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cdb, 16);
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       /* Check if encryption is set */
+       if (pm8001_ha->chip->encrypt &&
+               !(pm8001_ha->encrypt_info.status) && check_enc_sas_cmd(task)) {
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Encryption enabled.Sending Encrypt SAS command 0x%x\n",
+                       task->ssp_task.cdb[0]));
+               opc = OPC_INB_SSP_INI_DIF_ENC_IO;
+               /* enable encryption. 0 for SAS 1.1 and SAS 2.0 compatible TLR*/
+               ssp_cmd.dad_dir_m_tlr = cpu_to_le32
+                       ((data_dir_flags[task->data_dir] << 8) | 0x20 | 0x0);
+
+               /* fill in PRD (scatter/gather) table, if any */
+               if (task->num_scatter > 1) {
+                       pm8001_chip_make_sg(task->scatter,
+                                               ccb->n_elem, ccb->buf_prd);
+                       phys_addr = ccb->ccb_dma_handle +
+                               offsetof(struct pm8001_ccb_info, buf_prd[0]);
+                       ssp_cmd.enc_addr_low =
+                               cpu_to_le32(lower_32_bits(phys_addr));
+                       ssp_cmd.enc_addr_high =
+                               cpu_to_le32(upper_32_bits(phys_addr));
+                       ssp_cmd.enc_esgl = cpu_to_le32(1<<31);
+               } else if (task->num_scatter == 1) {
+                       u64 dma_addr = sg_dma_address(task->scatter);
+                       ssp_cmd.enc_addr_low =
+                               cpu_to_le32(lower_32_bits(dma_addr));
+                       ssp_cmd.enc_addr_high =
+                               cpu_to_le32(upper_32_bits(dma_addr));
+                       ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
+                       ssp_cmd.enc_esgl = 0;
+               } else if (task->num_scatter == 0) {
+                       ssp_cmd.enc_addr_low = 0;
+                       ssp_cmd.enc_addr_high = 0;
+                       ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
+                       ssp_cmd.enc_esgl = 0;
+               }
+               /* XTS mode. All other fields are 0 */
+               ssp_cmd.key_cmode = 0x6 << 4;
+               /* set tweak values. Should be the start lba */
+               ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cdb[2] << 24) |
+                                               (task->ssp_task.cdb[3] << 16) |
+                                               (task->ssp_task.cdb[4] << 8) |
+                                               (task->ssp_task.cdb[5]));
+       } else {
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Sending Normal SAS command 0x%x inb q %x\n",
+                       task->ssp_task.cdb[0], inb));
+               /* fill in PRD (scatter/gather) table, if any */
+               if (task->num_scatter > 1) {
+                       pm8001_chip_make_sg(task->scatter, ccb->n_elem,
+                                       ccb->buf_prd);
+                       phys_addr = ccb->ccb_dma_handle +
+                               offsetof(struct pm8001_ccb_info, buf_prd[0]);
+                       ssp_cmd.addr_low =
+                               cpu_to_le32(lower_32_bits(phys_addr));
+                       ssp_cmd.addr_high =
+                               cpu_to_le32(upper_32_bits(phys_addr));
+                       ssp_cmd.esgl = cpu_to_le32(1<<31);
+               } else if (task->num_scatter == 1) {
+                       u64 dma_addr = sg_dma_address(task->scatter);
+                       ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr));
+                       ssp_cmd.addr_high =
+                               cpu_to_le32(upper_32_bits(dma_addr));
+                       ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
+                       ssp_cmd.esgl = 0;
+               } else if (task->num_scatter == 0) {
+                       ssp_cmd.addr_low = 0;
+                       ssp_cmd.addr_high = 0;
+                       ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
+                       ssp_cmd.esgl = 0;
+               }
+       }
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, outb++);
+
+       /* rotate the outb queue */
+       outb = outb%PM8001_MAX_SPCV_OUTB_NUM;
+
+       return ret;
+}
+
+static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
+       struct pm8001_ccb_info *ccb)
+{
+       struct sas_task *task = ccb->task;
+       struct domain_device *dev = task->dev;
+       struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
+       u32 tag = ccb->ccb_tag;
+       int ret;
+       static u32 inb;
+       static u32 outb;
+       struct sata_start_req sata_cmd;
+       u32 hdr_tag, ncg_tag = 0;
+       u64 phys_addr;
+       u32 ATAP = 0x0;
+       u32 dir;
+       struct inbound_queue_table *circularQ;
+       unsigned long flags;
+       u32 opc = OPC_INB_SATA_HOST_OPSTART;
+       memset(&sata_cmd, 0, sizeof(sata_cmd));
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       if (task->data_dir == PCI_DMA_NONE) {
+               ATAP = 0x04; /* no data*/
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
+       } else if (likely(!task->ata_task.device_control_reg_update)) {
+               if (task->ata_task.dma_xfer) {
+                       ATAP = 0x06; /* DMA */
+                       PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA\n"));
+               } else {
+                       ATAP = 0x05; /* PIO*/
+                       PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n"));
+               }
+               if (task->ata_task.use_ncq &&
+                       dev->sata_dev.command_set != ATAPI_COMMAND_SET) {
+                       ATAP = 0x07; /* FPDMA */
+                       PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
+               }
+       }
+       if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
+               task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
+               ncg_tag = hdr_tag;
+       }
+       dir = data_dir_flags[task->data_dir] << 8;
+       sata_cmd.tag = cpu_to_le32(tag);
+       sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+       sata_cmd.data_len = cpu_to_le32(task->total_xfer_len);
+
+       sata_cmd.sata_fis = task->ata_task.fis;
+       if (likely(!task->ata_task.device_control_reg_update))
+               sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */
+       sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */
+
+       /* Check if encryption is set */
+       if (pm8001_ha->chip->encrypt &&
+               !(pm8001_ha->encrypt_info.status) && check_enc_sat_cmd(task)) {
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Encryption enabled.Sending Encrypt SATA cmd 0x%x\n",
+                       sata_cmd.sata_fis.command));
+               opc = OPC_INB_SATA_DIF_ENC_IO;
+
+               /* set encryption bit */
+               sata_cmd.ncqtag_atap_dir_m_dad =
+                       cpu_to_le32(((ncg_tag & 0xff)<<16)|
+                               ((ATAP & 0x3f) << 10) | 0x20 | dir);
+                                                       /* dad (bit 0-1) is 0 */
+               /* fill in PRD (scatter/gather) table, if any */
+               if (task->num_scatter > 1) {
+                       pm8001_chip_make_sg(task->scatter,
+                                               ccb->n_elem, ccb->buf_prd);
+                       phys_addr = ccb->ccb_dma_handle +
+                               offsetof(struct pm8001_ccb_info, buf_prd[0]);
+                       sata_cmd.enc_addr_low = lower_32_bits(phys_addr);
+                       sata_cmd.enc_addr_high = upper_32_bits(phys_addr);
+                       sata_cmd.enc_esgl = cpu_to_le32(1 << 31);
+               } else if (task->num_scatter == 1) {
+                       u64 dma_addr = sg_dma_address(task->scatter);
+                       sata_cmd.enc_addr_low = lower_32_bits(dma_addr);
+                       sata_cmd.enc_addr_high = upper_32_bits(dma_addr);
+                       sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
+                       sata_cmd.enc_esgl = 0;
+               } else if (task->num_scatter == 0) {
+                       sata_cmd.enc_addr_low = 0;
+                       sata_cmd.enc_addr_high = 0;
+                       sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
+                       sata_cmd.enc_esgl = 0;
+               }
+               /* XTS mode. All other fields are 0 */
+               sata_cmd.key_index_mode = 0x6 << 4;
+               /* set tweak values. Should be the start lba */
+               sata_cmd.twk_val0 =
+                       cpu_to_le32((sata_cmd.sata_fis.lbal_exp << 24) |
+                                       (sata_cmd.sata_fis.lbah << 16) |
+                                       (sata_cmd.sata_fis.lbam << 8) |
+                                       (sata_cmd.sata_fis.lbal));
+               sata_cmd.twk_val1 =
+                       cpu_to_le32((sata_cmd.sata_fis.lbah_exp << 8) |
+                                        (sata_cmd.sata_fis.lbam_exp));
+       } else {
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Sending Normal SATA command 0x%x inb %x\n",
+                       sata_cmd.sata_fis.command, inb));
+               /* dad (bit 0-1) is 0 */
+               sata_cmd.ncqtag_atap_dir_m_dad =
+                       cpu_to_le32(((ncg_tag & 0xff)<<16) |
+                                       ((ATAP & 0x3f) << 10) | dir);
+
+               /* fill in PRD (scatter/gather) table, if any */
+               if (task->num_scatter > 1) {
+                       pm8001_chip_make_sg(task->scatter,
+                                       ccb->n_elem, ccb->buf_prd);
+                       phys_addr = ccb->ccb_dma_handle +
+                               offsetof(struct pm8001_ccb_info, buf_prd[0]);
+                       sata_cmd.addr_low = lower_32_bits(phys_addr);
+                       sata_cmd.addr_high = upper_32_bits(phys_addr);
+                       sata_cmd.esgl = cpu_to_le32(1 << 31);
+               } else if (task->num_scatter == 1) {
+                       u64 dma_addr = sg_dma_address(task->scatter);
+                       sata_cmd.addr_low = lower_32_bits(dma_addr);
+                       sata_cmd.addr_high = upper_32_bits(dma_addr);
+                       sata_cmd.len = cpu_to_le32(task->total_xfer_len);
+                       sata_cmd.esgl = 0;
+               } else if (task->num_scatter == 0) {
+                       sata_cmd.addr_low = 0;
+                       sata_cmd.addr_high = 0;
+                       sata_cmd.len = cpu_to_le32(task->total_xfer_len);
+                       sata_cmd.esgl = 0;
+               }
+                       /* scsi cdb */
+                       sata_cmd.atapi_scsi_cdb[0] =
+                               cpu_to_le32(((task->ata_task.atapi_packet[0]) |
+                               (task->ata_task.atapi_packet[1] << 8) |
+                               (task->ata_task.atapi_packet[2] << 16) |
+                               (task->ata_task.atapi_packet[3] << 24)));
+                       sata_cmd.atapi_scsi_cdb[1] =
+                               cpu_to_le32(((task->ata_task.atapi_packet[4]) |
+                               (task->ata_task.atapi_packet[5] << 8) |
+                               (task->ata_task.atapi_packet[6] << 16) |
+                               (task->ata_task.atapi_packet[7] << 24)));
+                       sata_cmd.atapi_scsi_cdb[2] =
+                               cpu_to_le32(((task->ata_task.atapi_packet[8]) |
+                               (task->ata_task.atapi_packet[9] << 8) |
+                               (task->ata_task.atapi_packet[10] << 16) |
+                               (task->ata_task.atapi_packet[11] << 24)));
+                       sata_cmd.atapi_scsi_cdb[3] =
+                               cpu_to_le32(((task->ata_task.atapi_packet[12]) |
+                               (task->ata_task.atapi_packet[13] << 8) |
+                               (task->ata_task.atapi_packet[14] << 16) |
+                               (task->ata_task.atapi_packet[15] << 24)));
+       }
+
+       /* Check for read log for failed drive and return */
+       if (sata_cmd.sata_fis.command == 0x2f) {
+               if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) ||
+                       (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) ||
+                       (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) {
+                       struct task_status_struct *ts;
+
+                       pm8001_ha_dev->id &= 0xDFFFFFFF;
+                       ts = &task->task_status;
+
+                       spin_lock_irqsave(&task->task_state_lock, flags);
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAM_STAT_GOOD;
+                       task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+                       task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+                       task->task_state_flags |= SAS_TASK_STATE_DONE;
+                       if (unlikely((task->task_state_flags &
+                                       SAS_TASK_STATE_ABORTED))) {
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                       flags);
+                               PM8001_FAIL_DBG(pm8001_ha,
+                                       pm8001_printk("task 0x%p resp 0x%x "
+                                       " stat 0x%x but aborted by upper layer "
+                                       "\n", task, ts->resp, ts->stat));
+                               pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+                               return 0;
+                       } else if (task->uldd_task) {
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                       flags);
+                               pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+                               mb();/* ditto */
+                               spin_unlock_irq(&pm8001_ha->lock);
+                               task->task_done(task);
+                               spin_lock_irq(&pm8001_ha->lock);
+                               return 0;
+                       } else if (!task->uldd_task) {
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                       flags);
+                               pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+                               mb();/*ditto*/
+                               spin_unlock_irq(&pm8001_ha->lock);
+                               task->task_done(task);
+                               spin_lock_irq(&pm8001_ha->lock);
+                               return 0;
+                       }
+               }
+       }
+
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
+                                               &sata_cmd, outb++);
+
+       /* rotate the outb queue */
+       outb = outb%PM8001_MAX_SPCV_OUTB_NUM;
+       return ret;
+}
+
+/**
+ * pm80xx_chip_phy_start_req - start phy via PHY_START COMMAND
+ * @pm8001_ha: our hba card information.
+ * @num: the inbound queue number
+ * @phy_id: the phy id which we wanted to start up.
+ */
+static int
+pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
+{
+       struct phy_start_req payload;
+       struct inbound_queue_table *circularQ;
+       int ret;
+       u32 tag = 0x01;
+       u32 opcode = OPC_INB_PHYSTART;
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       memset(&payload, 0, sizeof(payload));
+       payload.tag = cpu_to_le32(tag);
+
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("PHY START REQ for phy_id %d\n", phy_id));
+       /*
+        ** [0:7]       PHY Identifier
+        ** [8:11]      link rate 1.5G, 3G, 6G
+        ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b Auto mode
+        ** [14]        0b disable spin up hold; 1b enable spin up hold
+        ** [15] ob no change in current PHY analig setup 1b enable using SPAST
+        */
+       payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
+                       LINKMODE_AUTO | LINKRATE_15 |
+                       LINKRATE_30 | LINKRATE_60 | phy_id);
+       /* SSC Disable and SAS Analog ST configuration */
+       /**
+       payload.ase_sh_lm_slr_phyid =
+               cpu_to_le32(SSC_DISABLE_30 | SAS_ASE | SPINHOLD_DISABLE |
+               LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 |
+               phy_id);
+       Have to add "SAS PHY Analog Setup SPASTI 1 Byte" Based on need
+       **/
+
+       payload.sas_identify.dev_type = SAS_END_DEVICE;
+       payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
+       memcpy(payload.sas_identify.sas_addr,
+               pm8001_ha->sas_addr, SAS_ADDR_SIZE);
+       payload.sas_identify.phy_id = phy_id;
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+       return ret;
+}
+
+/**
+ * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND
+ * @pm8001_ha: our hba card information.
+ * @num: the inbound queue number
+ * @phy_id: the phy id which we wanted to start up.
+ */
+static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
+       u8 phy_id)
+{
+       struct phy_stop_req payload;
+       struct inbound_queue_table *circularQ;
+       int ret;
+       u32 tag = 0x01;
+       u32 opcode = OPC_INB_PHYSTOP;
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       memset(&payload, 0, sizeof(payload));
+       payload.tag = cpu_to_le32(tag);
+       payload.phy_id = cpu_to_le32(phy_id);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+       return ret;
+}
+
+/**
+ * see comments on pm8001_mpi_reg_resp.
+ */
+static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
+       struct pm8001_device *pm8001_dev, u32 flag)
+{
+       struct reg_dev_req payload;
+       u32     opc;
+       u32 stp_sspsmp_sata = 0x4;
+       struct inbound_queue_table *circularQ;
+       u32 linkrate, phy_id;
+       int rc, tag = 0xdeadbeef;
+       struct pm8001_ccb_info *ccb;
+       u8 retryFlag = 0x1;
+       u16 firstBurstSize = 0;
+       u16 ITNT = 2000;
+       struct domain_device *dev = pm8001_dev->sas_device;
+       struct domain_device *parent_dev = dev->parent;
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       memset(&payload, 0, sizeof(payload));
+       rc = pm8001_tag_alloc(pm8001_ha, &tag);
+       if (rc)
+               return rc;
+       ccb = &pm8001_ha->ccb_info[tag];
+       ccb->device = pm8001_dev;
+       ccb->ccb_tag = tag;
+       payload.tag = cpu_to_le32(tag);
+
+       if (flag == 1) {
+               stp_sspsmp_sata = 0x02; /*direct attached sata */
+       } else {
+               if (pm8001_dev->dev_type == SAS_SATA_DEV)
+                       stp_sspsmp_sata = 0x00; /* stp*/
+               else if (pm8001_dev->dev_type == SAS_END_DEVICE ||
+                       pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                       pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
+                       stp_sspsmp_sata = 0x01; /*ssp or smp*/
+       }
+       if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+               phy_id = parent_dev->ex_dev.ex_phy->phy_id;
+       else
+               phy_id = pm8001_dev->attached_phy;
+
+       opc = OPC_INB_REG_DEV;
+
+       linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ?
+                       pm8001_dev->sas_device->linkrate : dev->port->linkrate;
+
+       payload.phyid_portid =
+               cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0xFF) |
+               ((phy_id & 0xFF) << 8));
+
+       payload.dtype_dlr_mcn_ir_retry = cpu_to_le32((retryFlag & 0x01) |
+               ((linkrate & 0x0F) << 24) |
+               ((stp_sspsmp_sata & 0x03) << 28));
+       payload.firstburstsize_ITNexustimeout =
+               cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
+
+       memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
+               SAS_ADDR_SIZE);
+
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+
+       return rc;
+}
+
+/**
+ * pm80xx_chip_phy_ctl_req - support the local phy operation
+ * @pm8001_ha: our hba card information.
+ * @num: the inbound queue number
+ * @phy_id: the phy id which we wanted to operate
+ * @phy_op:
+ */
+static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
+       u32 phyId, u32 phy_op)
+{
+       struct local_phy_ctl_req payload;
+       struct inbound_queue_table *circularQ;
+       int ret;
+       u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
+       memset(&payload, 0, sizeof(payload));
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       payload.tag = cpu_to_le32(1);
+       payload.phyop_phyid =
+               cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF));
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+       return ret;
+}
+
+static u32 pm80xx_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
+{
+       u32 value;
+#ifdef PM8001_USE_MSIX
+       return 1;
+#endif
+       value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR);
+       if (value)
+               return 1;
+       return 0;
+
+}
+
+/**
+ * pm8001_chip_isr - PM8001 isr handler.
+ * @pm8001_ha: our hba card information.
+ * @irq: irq number.
+ * @stat: stat.
+ */
+static irqreturn_t
+pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+       pm80xx_chip_interrupt_disable(pm8001_ha, vec);
+       process_oq(pm8001_ha, vec);
+       pm80xx_chip_interrupt_enable(pm8001_ha, vec);
+       return IRQ_HANDLED;
+}
+
+const struct pm8001_dispatch pm8001_80xx_dispatch = {
+       .name                   = "pmc80xx",
+       .chip_init              = pm80xx_chip_init,
+       .chip_soft_rst          = pm80xx_chip_soft_rst,
+       .chip_rst               = pm80xx_hw_chip_rst,
+       .chip_iounmap           = pm8001_chip_iounmap,
+       .isr                    = pm80xx_chip_isr,
+       .is_our_interupt        = pm80xx_chip_is_our_interupt,
+       .isr_process_oq         = process_oq,
+       .interrupt_enable       = pm80xx_chip_interrupt_enable,
+       .interrupt_disable      = pm80xx_chip_interrupt_disable,
+       .make_prd               = pm8001_chip_make_sg,
+       .smp_req                = pm80xx_chip_smp_req,
+       .ssp_io_req             = pm80xx_chip_ssp_io_req,
+       .sata_req               = pm80xx_chip_sata_req,
+       .phy_start_req          = pm80xx_chip_phy_start_req,
+       .phy_stop_req           = pm80xx_chip_phy_stop_req,
+       .reg_dev_req            = pm80xx_chip_reg_dev_req,
+       .dereg_dev_req          = pm8001_chip_dereg_dev_req,
+       .phy_ctl_req            = pm80xx_chip_phy_ctl_req,
+       .task_abort             = pm8001_chip_abort_task,
+       .ssp_tm_req             = pm8001_chip_ssp_tm_req,
+       .get_nvmd_req           = pm8001_chip_get_nvmd_req,
+       .set_nvmd_req           = pm8001_chip_set_nvmd_req,
+       .fw_flash_update_req    = pm8001_chip_fw_flash_update_req,
+       .set_dev_state_req      = pm8001_chip_set_dev_state_req,
+};
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
new file mode 100644 (file)
index 0000000..2b760ba
--- /dev/null
@@ -0,0 +1,1523 @@
+/*
+ * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 USI Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions, and the following disclaimer,
+ *     without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *     substantially similar to the "NO WARRANTY" disclaimer below
+ *     ("Disclaimer") and any redistribution must be conditioned upon
+ *     including a substantially similar Disclaimer requirement for further
+ *     binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *     of any contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+
+#ifndef _PMC8001_REG_H_
+#define _PMC8001_REG_H_
+
+#include <linux/types.h>
+#include <scsi/libsas.h>
+
+/* for Request Opcode of IOMB */
+#define OPC_INB_ECHO                           1       /* 0x000 */
+#define OPC_INB_PHYSTART                       4       /* 0x004 */
+#define OPC_INB_PHYSTOP                                5       /* 0x005 */
+#define OPC_INB_SSPINIIOSTART                  6       /* 0x006 */
+#define OPC_INB_SSPINITMSTART                  7       /* 0x007 */
+/* 0x8 RESV IN SPCv */
+#define OPC_INB_RSVD                           8       /* 0x008 */
+#define OPC_INB_DEV_HANDLE_ACCEPT              9       /* 0x009 */
+#define OPC_INB_SSPTGTIOSTART                  10      /* 0x00A */
+#define OPC_INB_SSPTGTRSPSTART                 11      /* 0x00B */
+/* 0xC, 0xD, 0xE removed in SPCv */
+#define OPC_INB_SSP_ABORT                      15      /* 0x00F */
+#define OPC_INB_DEREG_DEV_HANDLE               16      /* 0x010 */
+#define OPC_INB_GET_DEV_HANDLE                 17      /* 0x011 */
+#define OPC_INB_SMP_REQUEST                    18      /* 0x012 */
+/* 0x13 SMP_RESPONSE is removed in SPCv */
+#define OPC_INB_SMP_ABORT                      20      /* 0x014 */
+/* 0x16 RESV IN SPCv */
+#define OPC_INB_RSVD1                          22      /* 0x016 */
+#define OPC_INB_SATA_HOST_OPSTART              23      /* 0x017 */
+#define OPC_INB_SATA_ABORT                     24      /* 0x018 */
+#define OPC_INB_LOCAL_PHY_CONTROL              25      /* 0x019 */
+/* 0x1A RESV IN SPCv */
+#define OPC_INB_RSVD2                          26      /* 0x01A */
+#define OPC_INB_FW_FLASH_UPDATE                        32      /* 0x020 */
+#define OPC_INB_GPIO                           34      /* 0x022 */
+#define OPC_INB_SAS_DIAG_MODE_START_END                35      /* 0x023 */
+#define OPC_INB_SAS_DIAG_EXECUTE               36      /* 0x024 */
+/* 0x25 RESV IN SPCv */
+#define OPC_INB_RSVD3                          37      /* 0x025 */
+#define OPC_INB_GET_TIME_STAMP                 38      /* 0x026 */
+#define OPC_INB_PORT_CONTROL                   39      /* 0x027 */
+#define OPC_INB_GET_NVMD_DATA                  40      /* 0x028 */
+#define OPC_INB_SET_NVMD_DATA                  41      /* 0x029 */
+#define OPC_INB_SET_DEVICE_STATE               42      /* 0x02A */
+#define OPC_INB_GET_DEVICE_STATE               43      /* 0x02B */
+#define OPC_INB_SET_DEV_INFO                   44      /* 0x02C */
+/* 0x2D RESV IN SPCv */
+#define OPC_INB_RSVD4                          45      /* 0x02D */
+#define OPC_INB_SGPIO_REGISTER                 46      /* 0x02E */
+#define OPC_INB_PCIE_DIAG_EXEC                 47      /* 0x02F */
+#define OPC_INB_SET_CONTROLLER_CONFIG          48      /* 0x030 */
+#define OPC_INB_GET_CONTROLLER_CONFIG          49      /* 0x031 */
+#define OPC_INB_REG_DEV                                50      /* 0x032 */
+#define OPC_INB_SAS_HW_EVENT_ACK               51      /* 0x033 */
+#define OPC_INB_GET_DEVICE_INFO                        52      /* 0x034 */
+#define OPC_INB_GET_PHY_PROFILE                        53      /* 0x035 */
+#define OPC_INB_FLASH_OP_EXT                   54      /* 0x036 */
+#define OPC_INB_SET_PHY_PROFILE                        55      /* 0x037 */
+#define OPC_INB_KEK_MANAGEMENT                 256     /* 0x100 */
+#define OPC_INB_DEK_MANAGEMENT                 257     /* 0x101 */
+#define OPC_INB_SSP_INI_DIF_ENC_IO             258     /* 0x102 */
+#define OPC_INB_SATA_DIF_ENC_IO                        259     /* 0x103 */
+
+/* for Response Opcode of IOMB */
+#define OPC_OUB_ECHO                                   1       /* 0x001 */
+#define OPC_OUB_RSVD                                   4       /* 0x004 */
+#define OPC_OUB_SSP_COMP                               5       /* 0x005 */
+#define OPC_OUB_SMP_COMP                               6       /* 0x006 */
+#define OPC_OUB_LOCAL_PHY_CNTRL                                7       /* 0x007 */
+#define OPC_OUB_RSVD1                                  10      /* 0x00A */
+#define OPC_OUB_DEREG_DEV                              11      /* 0x00B */
+#define OPC_OUB_GET_DEV_HANDLE                         12      /* 0x00C */
+#define OPC_OUB_SATA_COMP                              13      /* 0x00D */
+#define OPC_OUB_SATA_EVENT                             14      /* 0x00E */
+#define OPC_OUB_SSP_EVENT                              15      /* 0x00F */
+#define OPC_OUB_RSVD2                                  16      /* 0x010 */
+/* 0x11 - SMP_RECEIVED Notification removed in SPCv*/
+#define OPC_OUB_SSP_RECV_EVENT                         18      /* 0x012 */
+#define OPC_OUB_RSVD3                                  19      /* 0x013 */
+#define OPC_OUB_FW_FLASH_UPDATE                                20      /* 0x014 */
+#define OPC_OUB_GPIO_RESPONSE                          22      /* 0x016 */
+#define OPC_OUB_GPIO_EVENT                             23      /* 0x017 */
+#define OPC_OUB_GENERAL_EVENT                          24      /* 0x018 */
+#define OPC_OUB_SSP_ABORT_RSP                          26      /* 0x01A */
+#define OPC_OUB_SATA_ABORT_RSP                         27      /* 0x01B */
+#define OPC_OUB_SAS_DIAG_MODE_START_END                        28      /* 0x01C */
+#define OPC_OUB_SAS_DIAG_EXECUTE                       29      /* 0x01D */
+#define OPC_OUB_GET_TIME_STAMP                         30      /* 0x01E */
+#define OPC_OUB_RSVD4                                  31      /* 0x01F */
+#define OPC_OUB_PORT_CONTROL                           32      /* 0x020 */
+#define OPC_OUB_SKIP_ENTRY                             33      /* 0x021 */
+#define OPC_OUB_SMP_ABORT_RSP                          34      /* 0x022 */
+#define OPC_OUB_GET_NVMD_DATA                          35      /* 0x023 */
+#define OPC_OUB_SET_NVMD_DATA                          36      /* 0x024 */
+#define OPC_OUB_DEVICE_HANDLE_REMOVAL                  37      /* 0x025 */
+#define OPC_OUB_SET_DEVICE_STATE                       38      /* 0x026 */
+#define OPC_OUB_GET_DEVICE_STATE                       39      /* 0x027 */
+#define OPC_OUB_SET_DEV_INFO                           40      /* 0x028 */
+#define OPC_OUB_RSVD5                                  41      /* 0x029 */
+#define OPC_OUB_HW_EVENT                               1792    /* 0x700 */
+#define OPC_OUB_DEV_HANDLE_ARRIV                       1824    /* 0x720 */
+#define OPC_OUB_THERM_HW_EVENT                         1840    /* 0x730 */
+#define OPC_OUB_SGPIO_RESP                             2094    /* 0x82E */
+#define OPC_OUB_PCIE_DIAG_EXECUTE                      2095    /* 0x82F */
+#define OPC_OUB_DEV_REGIST                             2098    /* 0x832 */
+#define OPC_OUB_SAS_HW_EVENT_ACK                       2099    /* 0x833 */
+#define OPC_OUB_GET_DEVICE_INFO                                2100    /* 0x834 */
+/* spcv specific commands */
+#define OPC_OUB_PHY_START_RESP                         2052    /* 0x804 */
+#define OPC_OUB_PHY_STOP_RESP                          2053    /* 0x805 */
+#define OPC_OUB_SET_CONTROLLER_CONFIG                  2096    /* 0x830 */
+#define OPC_OUB_GET_CONTROLLER_CONFIG                  2097    /* 0x831 */
+#define OPC_OUB_GET_PHY_PROFILE                                2101    /* 0x835 */
+#define OPC_OUB_FLASH_OP_EXT                           2102    /* 0x836 */
+#define OPC_OUB_SET_PHY_PROFILE                                2103    /* 0x837 */
+#define OPC_OUB_KEK_MANAGEMENT_RESP                    2304    /* 0x900 */
+#define OPC_OUB_DEK_MANAGEMENT_RESP                    2305    /* 0x901 */
+#define OPC_OUB_SSP_COALESCED_COMP_RESP                        2306    /* 0x902 */
+
+/* for phy start*/
+#define SSC_DISABLE_15                 (0x01 << 16)
+#define SSC_DISABLE_30                 (0x02 << 16)
+#define SSC_DISABLE_60                 (0x04 << 16)
+#define SAS_ASE                                (0x01 << 15)
+#define SPINHOLD_DISABLE               (0x00 << 14)
+#define SPINHOLD_ENABLE                        (0x01 << 14)
+#define LINKMODE_SAS                   (0x01 << 12)
+#define LINKMODE_DSATA                 (0x02 << 12)
+#define LINKMODE_AUTO                  (0x03 << 12)
+#define LINKRATE_15                    (0x01 << 8)
+#define LINKRATE_30                    (0x02 << 8)
+#define LINKRATE_60                    (0x06 << 8)
+
+/* Thermal related */
+#define        THERMAL_ENABLE                  0x1
+#define        THERMAL_LOG_ENABLE              0x1
+#define THERMAL_OP_CODE                        0x6
+#define LTEMPHIL                        70
+#define RTEMPHIL                       100
+
+/* Encryption info */
+#define SCRATCH_PAD3_ENC_DISABLED      0x00000000
+#define SCRATCH_PAD3_ENC_DIS_ERR       0x00000001
+#define SCRATCH_PAD3_ENC_ENA_ERR       0x00000002
+#define SCRATCH_PAD3_ENC_READY         0x00000003
+#define SCRATCH_PAD3_ENC_MASK          SCRATCH_PAD3_ENC_READY
+
+#define SCRATCH_PAD3_XTS_ENABLED               (1 << 14)
+#define SCRATCH_PAD3_SMA_ENABLED               (1 << 4)
+#define SCRATCH_PAD3_SMB_ENABLED               (1 << 5)
+#define SCRATCH_PAD3_SMF_ENABLED               0
+#define SCRATCH_PAD3_SM_MASK                   0x000000F0
+#define SCRATCH_PAD3_ERR_CODE                  0x00FF0000
+
+#define SEC_MODE_SMF                           0x0
+#define SEC_MODE_SMA                           0x100
+#define SEC_MODE_SMB                           0x200
+#define CIPHER_MODE_ECB                                0x00000001
+#define CIPHER_MODE_XTS                                0x00000002
+#define KEK_MGMT_SUBOP_KEYCARDUPDATE           0x4
+
+/* SAS protocol timer configuration page */
+#define SAS_PROTOCOL_TIMER_CONFIG_PAGE  0x04
+#define STP_MCT_TMO                     32
+#define SSP_MCT_TMO                     32
+#define SAS_MAX_OPEN_TIME                              5
+#define SMP_MAX_CONN_TIMER              0xFF
+#define STP_FRM_TIMER                   0
+#define STP_IDLE_TIME                   5 /* 5 us; controller default */
+#define SAS_MFD                         0
+#define SAS_OPNRJT_RTRY_INTVL           2
+#define SAS_DOPNRJT_RTRY_TMO            128
+#define SAS_COPNRJT_RTRY_TMO            128
+
+/*
+  Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second.
+  Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128
+  is DOPNRJT_RTRY_TMO
+*/
+#define SAS_DOPNRJT_RTRY_THR            23438
+#define SAS_COPNRJT_RTRY_THR            23438
+#define SAS_MAX_AIP                     0x200000
+#define IT_NEXUS_TIMEOUT       0x7D0
+#define PORT_RECOVERY_TIMEOUT  ((IT_NEXUS_TIMEOUT/100) + 30)
+
+struct mpi_msg_hdr {
+       __le32  header; /* Bits [11:0] - Message operation code */
+       /* Bits [15:12] - Message Category */
+       /* Bits [21:16] - Outboundqueue ID for the
+       operation completion message */
+       /* Bits [23:22] - Reserved */
+       /* Bits [28:24] - Buffer Count, indicates how
+       many buffer are allocated for the massage */
+       /* Bits [30:29] - Reserved */
+       /* Bits [31] - Message Valid bit */
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of PHY Start Command
+ * use to describe enable the phy (128 bytes)
+ */
+struct phy_start_req {
+       __le32  tag;
+       __le32  ase_sh_lm_slr_phyid;
+       struct sas_identify_frame sas_identify; /* 28 Bytes */
+       __le32 spasti;
+       u32     reserved[21];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of PHY Start Command
+ * use to disable the phy (128 bytes)
+ */
+struct phy_stop_req {
+       __le32  tag;
+       __le32  phy_id;
+       u32     reserved[29];
+} __attribute__((packed, aligned(4)));
+
+/* set device bits fis - device to host */
+struct set_dev_bits_fis {
+       u8      fis_type;       /* 0xA1*/
+       u8      n_i_pmport;
+       /* b7 : n Bit. Notification bit. If set device needs attention. */
+       /* b6 : i Bit. Interrupt Bit */
+       /* b5-b4: reserved2 */
+       /* b3-b0: PM Port */
+       u8      status;
+       u8      error;
+       u32     _r_a;
+} __attribute__ ((packed));
+/* PIO setup FIS - device to host */
+struct pio_setup_fis {
+       u8      fis_type;       /* 0x5f */
+       u8      i_d_pmPort;
+       /* b7 : reserved */
+       /* b6 : i bit. Interrupt bit */
+       /* b5 : d bit. data transfer direction. set to 1 for device to host
+       xfer */
+       /* b4 : reserved */
+       /* b3-b0: PM Port */
+       u8      status;
+       u8      error;
+       u8      lbal;
+       u8      lbam;
+       u8      lbah;
+       u8      device;
+       u8      lbal_exp;
+       u8      lbam_exp;
+       u8      lbah_exp;
+       u8      _r_a;
+       u8      sector_count;
+       u8      sector_count_exp;
+       u8      _r_b;
+       u8      e_status;
+       u8      _r_c[2];
+       u8      transfer_count;
+} __attribute__ ((packed));
+
+/*
+ * brief the data structure of SATA Completion Response
+ * use to describe the sata task response (64 bytes)
+ */
+struct sata_completion_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  param;
+       u32     sata_resp[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SAS HW Event Notification
+ * use to alert the host about the hardware event(64 bytes)
+ */
+/* updated outbound struct for spcv */
+
+struct hw_event_resp {
+       __le32  lr_status_evt_portid;
+       __le32  evt_param;
+       __le32  phyid_npip_portstate;
+       struct sas_identify_frame       sas_identify;
+       struct dev_to_host_fis  sata_fis;
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure for thermal event notification
+ */
+
+struct thermal_hw_event {
+       __le32  thermal_event;
+       __le32  rht_lht;
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of REGISTER DEVICE Command
+ * use to describe MPI REGISTER DEVICE Command (64 bytes)
+ */
+
+struct reg_dev_req {
+       __le32  tag;
+       __le32  phyid_portid;
+       __le32  dtype_dlr_mcn_ir_retry;
+       __le32  firstburstsize_ITNexustimeout;
+       u8      sas_addr[SAS_ADDR_SIZE];
+       __le32  upper_device_id;
+       u32     reserved[24];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of DEREGISTER DEVICE Command
+ * use to request spc to remove all internal resources associated
+ * with the device id (64 bytes)
+ */
+
+struct dereg_dev_req {
+       __le32  tag;
+       __le32  device_id;
+       u32     reserved[29];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of DEVICE_REGISTRATION Response
+ * use to notify the completion of the device registration (64 bytes)
+ */
+struct dev_reg_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  device_id;
+       u32     reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of Local PHY Control Command
+ * use to issue PHY CONTROL to local phy (64 bytes)
+ */
+struct local_phy_ctl_req {
+       __le32  tag;
+       __le32  phyop_phyid;
+       u32     reserved1[29];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of Local Phy Control Response
+ * use to describe MPI Local Phy Control Response (64 bytes)
+ */
+ struct local_phy_ctl_resp {
+       __le32  tag;
+       __le32  phyop_phyid;
+       __le32  status;
+       u32     reserved[12];
+} __attribute__((packed, aligned(4)));
+
+#define OP_BITS 0x0000FF00
+#define ID_BITS 0x000000FF
+
+/*
+ * brief the data structure of PORT Control Command
+ * use to control port properties (64 bytes)
+ */
+
+struct port_ctl_req {
+       __le32  tag;
+       __le32  portop_portid;
+       __le32  param0;
+       __le32  param1;
+       u32     reserved1[27];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of HW Event Ack Command
+ * use to acknowledge receive HW event (64 bytes)
+ */
+struct hw_event_ack_req {
+       __le32  tag;
+       __le32  phyid_sea_portid;
+       __le32  param0;
+       __le32  param1;
+       u32     reserved1[27];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of PHY_START Response Command
+ * indicates the completion of PHY_START command (64 bytes)
+ */
+struct phy_start_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  phyid;
+       u32     reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of PHY_STOP Response Command
+ * indicates the completion of PHY_STOP command (64 bytes)
+ */
+struct phy_stop_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  phyid;
+       u32     reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SSP Completion Response
+ * use to indicate a SSP Completion (n bytes)
+ */
+struct ssp_completion_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  param;
+       __le32  ssptag_rescv_rescpad;
+       struct ssp_response_iu ssp_resp_iu;
+       __le32  residual_count;
+} __attribute__((packed, aligned(4)));
+
+#define SSP_RESCV_BIT  0x00010000
+
+/*
+ * brief the data structure of SATA EVNET response
+ * use to indicate a SATA Completion (64 bytes)
+ */
+struct sata_event_resp {
+       __le32 tag;
+       __le32 event;
+       __le32 port_id;
+       __le32 device_id;
+       u32 reserved;
+       __le32 event_param0;
+       __le32 event_param1;
+       __le32 sata_addr_h32;
+       __le32 sata_addr_l32;
+       __le32 e_udt1_udt0_crc;
+       __le32 e_udt5_udt4_udt3_udt2;
+       __le32 a_udt1_udt0_crc;
+       __le32 a_udt5_udt4_udt3_udt2;
+       __le32 hwdevid_diferr;
+       __le32 err_framelen_byteoffset;
+       __le32 err_dataframe;
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SSP EVNET esponse
+ * use to indicate a SSP Completion (64 bytes)
+ */
+struct ssp_event_resp {
+       __le32 tag;
+       __le32 event;
+       __le32 port_id;
+       __le32 device_id;
+       __le32 ssp_tag;
+       __le32 event_param0;
+       __le32 event_param1;
+       __le32 sas_addr_h32;
+       __le32 sas_addr_l32;
+       __le32 e_udt1_udt0_crc;
+       __le32 e_udt5_udt4_udt3_udt2;
+       __le32 a_udt1_udt0_crc;
+       __le32 a_udt5_udt4_udt3_udt2;
+       __le32 hwdevid_diferr;
+       __le32 err_framelen_byteoffset;
+       __le32 err_dataframe;
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of General Event Notification Response
+ * use to describe MPI General Event Notification Response (64 bytes)
+ */
+struct general_event_resp {
+       __le32  status;
+       __le32  inb_IOMB_payload[14];
+} __attribute__((packed, aligned(4)));
+
+#define GENERAL_EVENT_PAYLOAD  14
+#define OPCODE_BITS    0x00000fff
+
+/*
+ * brief the data structure of SMP Request Command
+ * use to describe MPI SMP REQUEST Command (64 bytes)
+ */
+struct smp_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  len_ip_ir;
+       /* Bits [0] - Indirect response */
+       /* Bits [1] - Indirect Payload */
+       /* Bits [15:2] - Reserved */
+       /* Bits [23:16] - direct payload Len */
+       /* Bits [31:24] - Reserved */
+       u8      smp_req16[16];
+       union {
+               u8      smp_req[32];
+               struct {
+                       __le64 long_req_addr;/* sg dma address, LE */
+                       __le32 long_req_size;/* LE */
+                       u32     _r_a;
+                       __le64 long_resp_addr;/* sg dma address, LE */
+                       __le32 long_resp_size;/* LE */
+                       u32     _r_b;
+                       } long_smp_req;/* sequencer extension */
+       };
+       __le32  rsvd[16];
+} __attribute__((packed, aligned(4)));
+/*
+ * brief the data structure of SMP Completion Response
+ * use to describe MPI SMP Completion Response (64 bytes)
+ */
+struct smp_completion_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  param;
+       u8      _r_a[252];
+} __attribute__((packed, aligned(4)));
+
+/*
+ *brief the data structure of SSP SMP SATA Abort Command
+ * use to describe MPI SSP SMP & SATA Abort Command (64 bytes)
+ */
+struct task_abort_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  tag_to_abort;
+       __le32  abort_all;
+       u32     reserved[27];
+} __attribute__((packed, aligned(4)));
+
+/* These flags used for SSP SMP & SATA Abort */
+#define ABORT_MASK             0x3
+#define ABORT_SINGLE           0x0
+#define ABORT_ALL              0x1
+
+/**
+ * brief the data structure of SSP SATA SMP Abort Response
+ * use to describe SSP SMP & SATA Abort Response ( 64 bytes)
+ */
+struct task_abort_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  scp;
+       u32     reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SAS Diagnostic Start/End Command
+ * use to describe MPI SAS Diagnostic Start/End Command (64 bytes)
+ */
+struct sas_diag_start_end_req {
+       __le32  tag;
+       __le32  operation_phyid;
+       u32     reserved[29];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SAS Diagnostic Execute Command
+ * use to describe MPI SAS Diagnostic Execute Command (64 bytes)
+ */
+struct sas_diag_execute_req {
+       __le32  tag;
+       __le32  cmdtype_cmddesc_phyid;
+       __le32  pat1_pat2;
+       __le32  threshold;
+       __le32  codepat_errmsk;
+       __le32  pmon;
+       __le32  pERF1CTL;
+       u32     reserved[24];
+} __attribute__((packed, aligned(4)));
+
+#define SAS_DIAG_PARAM_BYTES 24
+
+/*
+ * brief the data structure of Set Device State Command
+ * use to describe MPI Set Device State Command (64 bytes)
+ */
+struct set_dev_state_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  nds;
+       u32     reserved[28];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SATA Start Command
+ * use to describe MPI SATA IO Start Command (64 bytes)
+ * Note: This structure is common for normal / encryption I/O
+ */
+
+struct sata_start_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  data_len;
+       __le32  ncqtag_atap_dir_m_dad;
+       struct host_to_dev_fis  sata_fis;
+       u32     reserved1;
+       u32     reserved2;      /* dword 11. rsvd for normal I/O. */
+                               /* EPLE Descl for enc I/O */
+       u32     addr_low;       /* dword 12. rsvd for enc I/O */
+       u32     addr_high;      /* dword 13. reserved for enc I/O */
+       __le32  len;            /* dword 14: length for normal I/O. */
+                               /* EPLE Desch for enc I/O */
+       __le32  esgl;           /* dword 15. rsvd for enc I/O */
+       __le32  atapi_scsi_cdb[4];      /* dword 16-19. rsvd for enc I/O */
+       /* The below fields are reserved for normal I/O */
+       __le32  key_index_mode; /* dword 20 */
+       __le32  sector_cnt_enss;/* dword 21 */
+       __le32  keytagl;        /* dword 22 */
+       __le32  keytagh;        /* dword 23 */
+       __le32  twk_val0;       /* dword 24 */
+       __le32  twk_val1;       /* dword 25 */
+       __le32  twk_val2;       /* dword 26 */
+       __le32  twk_val3;       /* dword 27 */
+       __le32  enc_addr_low;   /* dword 28. Encryption SGL address high */
+       __le32  enc_addr_high;  /* dword 29. Encryption SGL address low */
+       __le32  enc_len;        /* dword 30. Encryption length */
+       __le32  enc_esgl;       /* dword 31. Encryption esgl bit */
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SSP INI TM Start Command
+ * use to describe MPI SSP INI TM Start Command (64 bytes)
+ */
+struct ssp_ini_tm_start_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  relate_tag;
+       __le32  tmf;
+       u8      lun[8];
+       __le32  ds_ads_m;
+       u32     reserved[24];
+} __attribute__((packed, aligned(4)));
+
+struct ssp_info_unit {
+       u8      lun[8];/* SCSI Logical Unit Number */
+       u8      reserved1;/* reserved */
+       u8      efb_prio_attr;
+       /* B7 : enabledFirstBurst */
+       /* B6-3 : taskPriority */
+       /* B2-0 : taskAttribute */
+       u8      reserved2;      /* reserved */
+       u8      additional_cdb_len;
+       /* B7-2 : additional_cdb_len */
+       /* B1-0 : reserved */
+       u8      cdb[16];/* The SCSI CDB up to 16 bytes length */
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SSP INI IO Start Command
+ * use to describe MPI SSP INI IO Start Command (64 bytes)
+ * Note: This structure is common for normal / encryption I/O
+ */
+struct ssp_ini_io_start_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  data_len;
+       __le32  dad_dir_m_tlr;
+       struct ssp_info_unit    ssp_iu;
+       __le32  addr_low;       /* dword 12: sgl low for normal I/O. */
+                               /* epl_descl for encryption I/O */
+       __le32  addr_high;      /* dword 13: sgl hi for normal I/O */
+                               /* dpl_descl for encryption I/O */
+       __le32  len;            /* dword 14: len for normal I/O. */
+                               /* edpl_desch for encryption I/O */
+       __le32  esgl;           /* dword 15: ESGL bit for normal I/O. */
+                               /* user defined tag mask for enc I/O */
+       /* The below fields are reserved for normal I/O */
+       u8      udt[12];        /* dword 16-18 */
+       __le32  sectcnt_ios;    /* dword 19 */
+       __le32  key_cmode;      /* dword 20 */
+       __le32  ks_enss;        /* dword 21 */
+       __le32  keytagl;        /* dword 22 */
+       __le32  keytagh;        /* dword 23 */
+       __le32  twk_val0;       /* dword 24 */
+       __le32  twk_val1;       /* dword 25 */
+       __le32  twk_val2;       /* dword 26 */
+       __le32  twk_val3;       /* dword 27 */
+       __le32  enc_addr_low;   /* dword 28: Encryption sgl addr low */
+       __le32  enc_addr_high;  /* dword 29: Encryption sgl addr hi */
+       __le32  enc_len;        /* dword 30: Encryption length */
+       __le32  enc_esgl;       /* dword 31: ESGL bit for encryption */
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for SSP_INI_DIF_ENC_IO COMMAND
+ * use to initiate SSP I/O operation with optional DIF/ENC
+ */
+struct ssp_dif_enc_io_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  data_len;
+       __le32  dirMTlr;
+       __le32  sspiu0;
+       __le32  sspiu1;
+       __le32  sspiu2;
+       __le32  sspiu3;
+       __le32  sspiu4;
+       __le32  sspiu5;
+       __le32  sspiu6;
+       __le32  epl_des;
+       __le32  dpl_desl_ndplr;
+       __le32  dpl_desh;
+       __le32  uum_uuv_bss_difbits;
+       u8      udt[12];
+       __le32  sectcnt_ios;
+       __le32  key_cmode;
+       __le32  ks_enss;
+       __le32  keytagl;
+       __le32  keytagh;
+       __le32  twk_val0;
+       __le32  twk_val1;
+       __le32  twk_val2;
+       __le32  twk_val3;
+       __le32  addr_low;
+       __le32  addr_high;
+       __le32  len;
+       __le32  esgl;
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of Firmware download
+ * use to describe MPI FW DOWNLOAD Command (64 bytes)
+ */
+struct fw_flash_Update_req {
+       __le32  tag;
+       __le32  cur_image_offset;
+       __le32  cur_image_len;
+       __le32  total_image_len;
+       u32     reserved0[7];
+       __le32  sgl_addr_lo;
+       __le32  sgl_addr_hi;
+       __le32  len;
+       __le32  ext_reserved;
+       u32     reserved1[16];
+} __attribute__((packed, aligned(4)));
+
+#define FWFLASH_IOMB_RESERVED_LEN 0x07
+/**
+ * brief the data structure of FW_FLASH_UPDATE Response
+ * use to describe MPI FW_FLASH_UPDATE Response (64 bytes)
+ *
+ */
+ struct fw_flash_Update_resp {
+       __le32  tag;
+       __le32  status;
+       u32     reserved[13];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of Get NVM Data Command
+ * use to get data from NVM in HBA(64 bytes)
+ */
+struct get_nvm_data_req {
+       __le32  tag;
+       __le32  len_ir_vpdd;
+       __le32  vpd_offset;
+       u32     reserved[8];
+       __le32  resp_addr_lo;
+       __le32  resp_addr_hi;
+       __le32  resp_len;
+       u32     reserved1[17];
+} __attribute__((packed, aligned(4)));
+
+struct set_nvm_data_req {
+       __le32  tag;
+       __le32  len_ir_vpdd;
+       __le32  vpd_offset;
+       u32     reserved[8];
+       __le32  resp_addr_lo;
+       __le32  resp_addr_hi;
+       __le32  resp_len;
+       u32     reserved1[17];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for SET CONTROLLER CONFIG COMMAND
+ * use to modify controller configuration
+ */
+struct set_ctrl_cfg_req {
+       __le32  tag;
+       __le32  cfg_pg[14];
+       u32     reserved[16];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for GET CONTROLLER CONFIG COMMAND
+ * use to get controller configuration page
+ */
+struct get_ctrl_cfg_req {
+       __le32  tag;
+       __le32  pgcd;
+       __le32  int_vec;
+       u32     reserved[28];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for KEK_MANAGEMENT COMMAND
+ * use for KEK management
+ */
+struct kek_mgmt_req {
+       __le32  tag;
+       __le32  new_curidx_ksop;
+       u32     reserved;
+       __le32  kblob[12];
+       u32     reserved1[16];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for DEK_MANAGEMENT COMMAND
+ * use for DEK management
+ */
+struct dek_mgmt_req {
+       __le32  tag;
+       __le32  kidx_dsop;
+       __le32  dekidx;
+       __le32  addr_l;
+       __le32  addr_h;
+       __le32  nent;
+       __le32  dbf_tblsize;
+       u32     reserved[24];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for SET PHY PROFILE COMMAND
+ * use to retrive phy specific information
+ */
+struct set_phy_profile_req {
+       __le32  tag;
+       __le32  ppc_phyid;
+       u32     reserved[29];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for GET PHY PROFILE COMMAND
+ * use to retrive phy specific information
+ */
+struct get_phy_profile_req {
+       __le32  tag;
+       __le32  ppc_phyid;
+       __le32  profile[29];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for EXT FLASH PARTITION
+ * use to manage ext flash partition
+ */
+struct ext_flash_partition_req {
+       __le32  tag;
+       __le32  cmd;
+       __le32  offset;
+       __le32  len;
+       u32     reserved[7];
+       __le32  addr_low;
+       __le32  addr_high;
+       __le32  len1;
+       __le32  ext;
+       u32     reserved1[16];
+} __attribute__((packed, aligned(4)));
+
+#define TWI_DEVICE     0x0
+#define C_SEEPROM      0x1
+#define VPD_FLASH      0x4
+#define AAP1_RDUMP     0x5
+#define IOP_RDUMP      0x6
+#define EXPAN_ROM      0x7
+
+#define IPMode         0x80000000
+#define NVMD_TYPE      0x0000000F
+#define NVMD_STAT      0x0000FFFF
+#define NVMD_LEN       0xFF000000
+/**
+ * brief the data structure of Get NVMD Data Response
+ * use to describe MPI Get NVMD Data Response (64 bytes)
+ */
+struct get_nvm_data_resp {
+       __le32          tag;
+       __le32          ir_tda_bn_dps_das_nvm;
+       __le32          dlen_status;
+       __le32          nvm_data[12];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SAS Diagnostic Start/End Response
+ * use to describe MPI SAS Diagnostic Start/End Response (64 bytes)
+ *
+ */
+struct sas_diag_start_end_resp {
+       __le32          tag;
+       __le32          status;
+       u32             reserved[13];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SAS Diagnostic Execute Response
+ * use to describe MPI SAS Diagnostic Execute Response (64 bytes)
+ *
+ */
+struct sas_diag_execute_resp {
+       __le32          tag;
+       __le32          cmdtype_cmddesc_phyid;
+       __le32          Status;
+       __le32          ReportData;
+       u32             reserved[11];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of Set Device State Response
+ * use to describe MPI Set Device State Response (64 bytes)
+ *
+ */
+struct set_dev_state_resp {
+       __le32          tag;
+       __le32          status;
+       __le32          device_id;
+       __le32          pds_nds;
+       u32             reserved[11];
+} __attribute__((packed, aligned(4)));
+
+/* new outbound structure for spcv - begins */
+/**
+ * brief the data structure for SET CONTROLLER CONFIG COMMAND
+ * use to modify controller configuration
+ */
+struct set_ctrl_cfg_resp {
+       __le32 tag;
+       __le32 status;
+       __le32 err_qlfr_pgcd;
+       u32 reserved[12];
+} __attribute__((packed, aligned(4)));
+
+struct get_ctrl_cfg_resp {
+       __le32 tag;
+       __le32 status;
+       __le32 err_qlfr;
+       __le32 confg_page[12];
+} __attribute__((packed, aligned(4)));
+
+struct kek_mgmt_resp {
+       __le32 tag;
+       __le32 status;
+       __le32 kidx_new_curr_ksop;
+       __le32 err_qlfr;
+       u32 reserved[11];
+} __attribute__((packed, aligned(4)));
+
+struct dek_mgmt_resp {
+       __le32 tag;
+       __le32 status;
+       __le32 kekidx_tbls_dsop;
+       __le32 dekidx;
+       __le32 err_qlfr;
+       u32 reserved[10];
+} __attribute__((packed, aligned(4)));
+
+struct get_phy_profile_resp {
+       __le32 tag;
+       __le32 status;
+       __le32 ppc_phyid;
+       __le32 ppc_specific_rsp[12];
+} __attribute__((packed, aligned(4)));
+
+struct flash_op_ext_resp {
+       __le32 tag;
+       __le32 cmd;
+       __le32 status;
+       __le32 epart_size;
+       __le32 epart_sect_size;
+       u32 reserved[10];
+} __attribute__((packed, aligned(4)));
+
+struct set_phy_profile_resp {
+       __le32 tag;
+       __le32 status;
+       __le32 ppc_phyid;
+       __le32 ppc_specific_rsp[12];
+} __attribute__((packed, aligned(4)));
+
+struct ssp_coalesced_comp_resp {
+       __le32 coal_cnt;
+       __le32 tag0;
+       __le32 ssp_tag0;
+       __le32 tag1;
+       __le32 ssp_tag1;
+       __le32 add_tag_ssp_tag[10];
+} __attribute__((packed, aligned(4)));
+
+/* new outbound structure for spcv - ends */
+
+/* brief data structure for SAS protocol timer configuration page.
+ *
+ */
+struct SASProtocolTimerConfig {
+       __le32 pageCode;                        /* 0 */
+       __le32 MST_MSI;                         /* 1 */
+       __le32 STP_SSP_MCT_TMO;                 /* 2 */
+       __le32 STP_FRM_TMO;                     /* 3 */
+       __le32 STP_IDLE_TMO;                    /* 4 */
+       __le32 OPNRJT_RTRY_INTVL;               /* 5 */
+       __le32 Data_Cmd_OPNRJT_RTRY_TMO;        /* 6 */
+       __le32 Data_Cmd_OPNRJT_RTRY_THR;        /* 7 */
+       __le32 MAX_AIP;                         /* 8 */
+} __attribute__((packed, aligned(4)));
+
+typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
+
+#define NDS_BITS 0x0F
+#define PDS_BITS 0xF0
+
+/*
+ * HW Events type
+ */
+
+#define HW_EVENT_RESET_START                   0x01
+#define HW_EVENT_CHIP_RESET_COMPLETE           0x02
+#define HW_EVENT_PHY_STOP_STATUS               0x03
+#define HW_EVENT_SAS_PHY_UP                    0x04
+#define HW_EVENT_SATA_PHY_UP                   0x05
+#define HW_EVENT_SATA_SPINUP_HOLD              0x06
+#define HW_EVENT_PHY_DOWN                      0x07
+#define HW_EVENT_PORT_INVALID                  0x08
+#define HW_EVENT_BROADCAST_CHANGE              0x09
+#define HW_EVENT_PHY_ERROR                     0x0A
+#define HW_EVENT_BROADCAST_SES                 0x0B
+#define HW_EVENT_INBOUND_CRC_ERROR             0x0C
+#define HW_EVENT_HARD_RESET_RECEIVED           0x0D
+#define HW_EVENT_MALFUNCTION                   0x0E
+#define HW_EVENT_ID_FRAME_TIMEOUT              0x0F
+#define HW_EVENT_BROADCAST_EXP                 0x10
+#define HW_EVENT_PHY_START_STATUS              0x11
+#define HW_EVENT_LINK_ERR_INVALID_DWORD                0x12
+#define HW_EVENT_LINK_ERR_DISPARITY_ERROR      0x13
+#define HW_EVENT_LINK_ERR_CODE_VIOLATION       0x14
+#define HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH  0x15
+#define HW_EVENT_LINK_ERR_PHY_RESET_FAILED     0x16
+#define HW_EVENT_PORT_RECOVERY_TIMER_TMO       0x17
+#define HW_EVENT_PORT_RECOVER                  0x18
+#define HW_EVENT_PORT_RESET_TIMER_TMO          0x19
+#define HW_EVENT_PORT_RESET_COMPLETE           0x20
+#define EVENT_BROADCAST_ASYNCH_EVENT           0x21
+
+/* port state */
+#define PORT_NOT_ESTABLISHED                   0x00
+#define PORT_VALID                             0x01
+#define PORT_LOSTCOMM                          0x02
+#define PORT_IN_RESET                          0x04
+#define PORT_3RD_PARTY_RESET                   0x07
+#define PORT_INVALID                           0x08
+
+/*
+ * SSP/SMP/SATA IO Completion Status values
+ */
+
+#define IO_SUCCESS                             0x00
+#define IO_ABORTED                             0x01
+#define IO_OVERFLOW                            0x02
+#define IO_UNDERFLOW                           0x03
+#define IO_FAILED                              0x04
+#define IO_ABORT_RESET                         0x05
+#define IO_NOT_VALID                           0x06
+#define IO_NO_DEVICE                           0x07
+#define IO_ILLEGAL_PARAMETER                   0x08
+#define IO_LINK_FAILURE                                0x09
+#define IO_PROG_ERROR                          0x0A
+
+#define IO_EDC_IN_ERROR                                0x0B
+#define IO_EDC_OUT_ERROR                       0x0C
+#define IO_ERROR_HW_TIMEOUT                    0x0D
+#define IO_XFER_ERROR_BREAK                    0x0E
+#define IO_XFER_ERROR_PHY_NOT_READY            0x0F
+#define IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED       0x10
+#define IO_OPEN_CNX_ERROR_ZONE_VIOLATION               0x11
+#define IO_OPEN_CNX_ERROR_BREAK                                0x12
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS                        0x13
+#define IO_OPEN_CNX_ERROR_BAD_DESTINATION              0x14
+#define IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED        0x15
+#define IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY           0x16
+#define IO_OPEN_CNX_ERROR_WRONG_DESTINATION            0x17
+/* This error code 0x18 is not used on SPCv */
+#define IO_OPEN_CNX_ERROR_UNKNOWN_ERROR                        0x18
+#define IO_XFER_ERROR_NAK_RECEIVED                     0x19
+#define IO_XFER_ERROR_ACK_NAK_TIMEOUT                  0x1A
+#define IO_XFER_ERROR_PEER_ABORTED                     0x1B
+#define IO_XFER_ERROR_RX_FRAME                         0x1C
+#define IO_XFER_ERROR_DMA                              0x1D
+#define IO_XFER_ERROR_CREDIT_TIMEOUT                   0x1E
+#define IO_XFER_ERROR_SATA_LINK_TIMEOUT                        0x1F
+#define IO_XFER_ERROR_SATA                             0x20
+
+/* This error code 0x22 is not used on SPCv */
+#define IO_XFER_ERROR_ABORTED_DUE_TO_SRST              0x22
+#define IO_XFER_ERROR_REJECTED_NCQ_MODE                        0x21
+#define IO_XFER_ERROR_ABORTED_NCQ_MODE                 0x23
+#define IO_XFER_OPEN_RETRY_TIMEOUT                     0x24
+/* This error code 0x25 is not used on SPCv */
+#define IO_XFER_SMP_RESP_CONNECTION_ERROR              0x25
+#define IO_XFER_ERROR_UNEXPECTED_PHASE                 0x26
+#define IO_XFER_ERROR_XFER_RDY_OVERRUN                 0x27
+#define IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED            0x28
+#define IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT                0x30
+
+/* The following error code 0x31 and 0x32 are not using (obsolete) */
+#define IO_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NAK   0x31
+#define IO_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK        0x32
+
+#define IO_XFER_ERROR_OFFSET_MISMATCH                  0x34
+#define IO_XFER_ERROR_XFER_ZERO_DATA_LEN               0x35
+#define IO_XFER_CMD_FRAME_ISSUED                       0x36
+#define IO_ERROR_INTERNAL_SMP_RESOURCE                 0x37
+#define IO_PORT_IN_RESET                               0x38
+#define IO_DS_NON_OPERATIONAL                          0x39
+#define IO_DS_IN_RECOVERY                              0x3A
+#define IO_TM_TAG_NOT_FOUND                            0x3B
+#define IO_XFER_PIO_SETUP_ERROR                                0x3C
+#define IO_SSP_EXT_IU_ZERO_LEN_ERROR                   0x3D
+#define IO_DS_IN_ERROR                                 0x3E
+#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY             0x3F
+#define IO_ABORT_IN_PROGRESS                           0x40
+#define IO_ABORT_DELAYED                               0x41
+#define IO_INVALID_LENGTH                              0x42
+
+/********** additional response event values *****************/
+
+#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY_ALT         0x43
+#define IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED   0x44
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO       0x45
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST                0x46
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE   0x47
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED        0x48
+#define IO_DS_INVALID                                  0x49
+/* WARNING: the value is not contiguous from here */
+#define IO_XFER_ERR_LAST_PIO_DATAIN_CRC_ERR    0x52
+#define IO_XFER_DMA_ACTIVATE_TIMEOUT           0x53
+#define IO_XFER_ERROR_INTERNAL_CRC_ERROR       0x54
+#define MPI_IO_RQE_BUSY_FULL                   0x55
+#define IO_XFER_ERR_EOB_DATA_OVERRUN           0x56
+#define IO_XFR_ERROR_INVALID_SSP_RSP_FRAME     0x57
+#define IO_OPEN_CNX_ERROR_OPEN_PREEMPTED       0x58
+
+#define MPI_ERR_IO_RESOURCE_UNAVAILABLE                0x1004
+#define MPI_ERR_ATAPI_DEVICE_BUSY              0x1024
+
+#define IO_XFR_ERROR_DEK_KEY_CACHE_MISS                0x2040
+/*
+ * An encryption IO request failed due to DEK Key Tag mismatch.
+ * The key tag supplied in the encryption IOMB does not match with
+ * the Key Tag in the referenced DEK Entry.
+ */
+#define IO_XFR_ERROR_DEK_KEY_TAG_MISMATCH      0x2041
+#define IO_XFR_ERROR_CIPHER_MODE_INVALID       0x2042
+/*
+ * An encryption I/O request failed because the initial value (IV)
+ * in the unwrapped DEK blob didn't match the IV used to unwrap it.
+ */
+#define IO_XFR_ERROR_DEK_IV_MISMATCH           0x2043
+/* An encryption I/O request failed due to an internal RAM ECC or
+ * interface error while unwrapping the DEK. */
+#define IO_XFR_ERROR_DEK_RAM_INTERFACE_ERROR   0x2044
+/* An encryption I/O request failed due to an internal RAM ECC or
+ * interface error while unwrapping the DEK. */
+#define IO_XFR_ERROR_INTERNAL_RAM              0x2045
+/*
+ * An encryption I/O request failed
+ * because the DEK index specified in the I/O was outside the bounds of
+ * the total number of entries in the host DEK table.
+ */
+#define IO_XFR_ERROR_DEK_INDEX_OUT_OF_BOUNDS0x2046
+
+/* define DIF IO response error status code */
+#define IO_XFR_ERROR_DIF_MISMATCH                      0x3000
+#define IO_XFR_ERROR_DIF_APPLICATION_TAG_MISMATCH      0x3001
+#define IO_XFR_ERROR_DIF_REFERENCE_TAG_MISMATCH                0x3002
+#define IO_XFR_ERROR_DIF_CRC_MISMATCH                  0x3003
+
+/* define operator management response status and error qualifier code */
+#define OPR_MGMT_OP_NOT_SUPPORTED                      0x2060
+#define OPR_MGMT_MPI_ENC_ERR_OPR_PARAM_ILLEGAL         0x2061
+#define OPR_MGMT_MPI_ENC_ERR_OPR_ID_NOT_FOUND          0x2062
+#define OPR_MGMT_MPI_ENC_ERR_OPR_ROLE_NOT_MATCH                0x2063
+#define OPR_MGMT_MPI_ENC_ERR_OPR_MAX_NUM_EXCEEDED      0x2064
+#define OPR_MGMT_MPI_ENC_ERR_KEK_UNWRAP_FAIL           0x2022
+#define OPR_MGMT_MPI_ENC_ERR_NVRAM_OPERATION_FAILURE   0x2023
+/***************** additional response event values ***************/
+
+/* WARNING: This error code must always be the last number.
+ * If you add error code, modify this code also
+ * It is used as an index
+ */
+#define IO_ERROR_UNKNOWN_GENERIC                       0x2023
+
+/* MSGU CONFIGURATION TABLE*/
+
+#define SPCv_MSGU_CFG_TABLE_UPDATE             0x01
+#define SPCv_MSGU_CFG_TABLE_RESET              0x02
+#define SPCv_MSGU_CFG_TABLE_FREEZE             0x04
+#define SPCv_MSGU_CFG_TABLE_UNFREEZE           0x08
+#define MSGU_IBDB_SET                          0x00
+#define MSGU_HOST_INT_STATUS                   0x08
+#define MSGU_HOST_INT_MASK                     0x0C
+#define MSGU_IOPIB_INT_STATUS                  0x18
+#define MSGU_IOPIB_INT_MASK                    0x1C
+#define MSGU_IBDB_CLEAR                                0x20
+
+#define MSGU_MSGU_CONTROL                      0x24
+#define MSGU_ODR                               0x20
+#define MSGU_ODCR                              0x28
+
+#define MSGU_ODMR                              0x30
+#define MSGU_ODMR_U                            0x34
+#define MSGU_ODMR_CLR                          0x38
+#define MSGU_ODMR_CLR_U                                0x3C
+#define MSGU_OD_RSVD                           0x40
+
+#define MSGU_SCRATCH_PAD_0                     0x44
+#define MSGU_SCRATCH_PAD_1                     0x48
+#define MSGU_SCRATCH_PAD_2                     0x4C
+#define MSGU_SCRATCH_PAD_3                     0x50
+#define MSGU_HOST_SCRATCH_PAD_0                        0x54
+#define MSGU_HOST_SCRATCH_PAD_1                        0x58
+#define MSGU_HOST_SCRATCH_PAD_2                        0x5C
+#define MSGU_HOST_SCRATCH_PAD_3                        0x60
+#define MSGU_HOST_SCRATCH_PAD_4                        0x64
+#define MSGU_HOST_SCRATCH_PAD_5                        0x68
+#define MSGU_HOST_SCRATCH_PAD_6                        0x6C
+#define MSGU_HOST_SCRATCH_PAD_7                        0x70
+
+/* bit definition for ODMR register */
+#define ODMR_MASK_ALL                  0xFFFFFFFF/* mask all
+                                       interrupt vector */
+#define ODMR_CLEAR_ALL                 0       /* clear all
+                                       interrupt vector */
+/* bit definition for ODCR register */
+#define ODCR_CLEAR_ALL                 0xFFFFFFFF /* mask all
+                                       interrupt vector*/
+/* MSIX Interupts */
+#define MSIX_TABLE_OFFSET              0x2000
+#define MSIX_TABLE_ELEMENT_SIZE                0x10
+#define MSIX_INTERRUPT_CONTROL_OFFSET  0xC
+#define MSIX_TABLE_BASE                        (MSIX_TABLE_OFFSET + \
+                                       MSIX_INTERRUPT_CONTROL_OFFSET)
+#define MSIX_INTERRUPT_DISABLE         0x1
+#define MSIX_INTERRUPT_ENABLE          0x0
+
+/* state definition for Scratch Pad1 register */
+#define SCRATCH_PAD_RAAE_READY         0x3
+#define SCRATCH_PAD_ILA_READY          0xC
+#define SCRATCH_PAD_BOOT_LOAD_SUCCESS  0x0
+#define SCRATCH_PAD_IOP0_READY         0xC00
+#define SCRATCH_PAD_IOP1_READY         0x3000
+
+/* boot loader state */
+#define SCRATCH_PAD1_BOOTSTATE_MASK            0x70    /* Bit 4-6 */
+#define SCRATCH_PAD1_BOOTSTATE_SUCESS          0x0     /* Load successful */
+#define SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM     0x10    /* HDA SEEPROM */
+#define SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP   0x20    /* HDA BootStrap Pins */
+#define SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET   0x30    /* HDA Soft Reset */
+#define SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR      0x40    /* HDA critical error */
+#define SCRATCH_PAD1_BOOTSTATE_R1              0x50    /* Reserved */
+#define SCRATCH_PAD1_BOOTSTATE_R2              0x60    /* Reserved */
+#define SCRATCH_PAD1_BOOTSTATE_FATAL           0x70    /* Fatal Error */
+
+ /* state definition for Scratch Pad2 register */
+#define SCRATCH_PAD2_POR               0x00    /* power on state */
+#define SCRATCH_PAD2_SFR               0x01    /* soft reset state */
+#define SCRATCH_PAD2_ERR               0x02    /* error state */
+#define SCRATCH_PAD2_RDY               0x03    /* ready state */
+#define SCRATCH_PAD2_FWRDY_RST         0x04    /* FW rdy for soft reset flag */
+#define SCRATCH_PAD2_IOPRDY_RST                0x08    /* IOP ready for soft reset */
+#define SCRATCH_PAD2_STATE_MASK                0xFFFFFFF4 /* ScratchPad 2
+ Mask, bit1-0 State */
+#define SCRATCH_PAD2_RESERVED          0x000003FC/* Scratch Pad1
+ Reserved bit 2 to 9 */
+
+#define SCRATCH_PAD_ERROR_MASK         0xFFFFFC00 /* Error mask bits */
+#define SCRATCH_PAD_STATE_MASK         0x00000003 /* State Mask bits */
+
+/* main configuration offset - byte offset */
+#define MAIN_SIGNATURE_OFFSET          0x00 /* DWORD 0x00 */
+#define MAIN_INTERFACE_REVISION                0x04 /* DWORD 0x01 */
+#define MAIN_FW_REVISION               0x08 /* DWORD 0x02 */
+#define MAIN_MAX_OUTSTANDING_IO_OFFSET 0x0C /* DWORD 0x03 */
+#define MAIN_MAX_SGL_OFFSET            0x10 /* DWORD 0x04 */
+#define MAIN_CNTRL_CAP_OFFSET          0x14 /* DWORD 0x05 */
+#define MAIN_GST_OFFSET                        0x18 /* DWORD 0x06 */
+#define MAIN_IBQ_OFFSET                        0x1C /* DWORD 0x07 */
+#define MAIN_OBQ_OFFSET                        0x20 /* DWORD 0x08 */
+#define MAIN_IQNPPD_HPPD_OFFSET                0x24 /* DWORD 0x09 */
+
+/* 0x28 - 0x4C - RSVD */
+#define MAIN_EVENT_CRC_CHECK           0x48 /* DWORD 0x12 */
+#define MAIN_EVENT_LOG_ADDR_HI         0x50 /* DWORD 0x14 */
+#define MAIN_EVENT_LOG_ADDR_LO         0x54 /* DWORD 0x15 */
+#define MAIN_EVENT_LOG_BUFF_SIZE       0x58 /* DWORD 0x16 */
+#define MAIN_EVENT_LOG_OPTION          0x5C /* DWORD 0x17 */
+#define MAIN_PCS_EVENT_LOG_ADDR_HI     0x60 /* DWORD 0x18 */
+#define MAIN_PCS_EVENT_LOG_ADDR_LO     0x64 /* DWORD 0x19 */
+#define MAIN_PCS_EVENT_LOG_BUFF_SIZE   0x68 /* DWORD 0x1A */
+#define MAIN_PCS_EVENT_LOG_OPTION      0x6C /* DWORD 0x1B */
+#define MAIN_FATAL_ERROR_INTERRUPT     0x70 /* DWORD 0x1C */
+#define MAIN_FATAL_ERROR_RDUMP0_OFFSET 0x74 /* DWORD 0x1D */
+#define MAIN_FATAL_ERROR_RDUMP0_LENGTH 0x78 /* DWORD 0x1E */
+#define MAIN_FATAL_ERROR_RDUMP1_OFFSET 0x7C /* DWORD 0x1F */
+#define MAIN_FATAL_ERROR_RDUMP1_LENGTH 0x80 /* DWORD 0x20 */
+#define MAIN_GPIO_LED_FLAGS_OFFSET     0x84 /* DWORD 0x21 */
+#define MAIN_ANALOG_SETUP_OFFSET       0x88 /* DWORD 0x22 */
+
+#define MAIN_INT_VECTOR_TABLE_OFFSET   0x8C /* DWORD 0x23 */
+#define MAIN_SAS_PHY_ATTR_TABLE_OFFSET 0x90 /* DWORD 0x24 */
+#define MAIN_PORT_RECOVERY_TIMER       0x94 /* DWORD 0x25 */
+#define MAIN_INT_REASSERTION_DELAY     0x98 /* DWORD 0x26 */
+
+/* Gereral Status Table offset - byte offset */
+#define GST_GSTLEN_MPIS_OFFSET         0x00
+#define GST_IQ_FREEZE_STATE0_OFFSET    0x04
+#define GST_IQ_FREEZE_STATE1_OFFSET    0x08
+#define GST_MSGUTCNT_OFFSET            0x0C
+#define GST_IOPTCNT_OFFSET             0x10
+/* 0x14 - 0x34 - RSVD */
+#define GST_GPIO_INPUT_VAL             0x38
+/* 0x3c - 0x40 - RSVD */
+#define GST_RERRINFO_OFFSET0           0x44
+#define GST_RERRINFO_OFFSET1           0x48
+#define GST_RERRINFO_OFFSET2           0x4c
+#define GST_RERRINFO_OFFSET3           0x50
+#define GST_RERRINFO_OFFSET4           0x54
+#define GST_RERRINFO_OFFSET5           0x58
+#define GST_RERRINFO_OFFSET6           0x5c
+#define GST_RERRINFO_OFFSET7           0x60
+
+/* General Status Table - MPI state */
+#define GST_MPI_STATE_UNINIT           0x00
+#define GST_MPI_STATE_INIT             0x01
+#define GST_MPI_STATE_TERMINATION      0x02
+#define GST_MPI_STATE_ERROR            0x03
+#define GST_MPI_STATE_MASK             0x07
+
+/* Per SAS PHY Attributes */
+
+#define PSPA_PHYSTATE0_OFFSET          0x00 /* Dword V */
+#define PSPA_OB_HW_EVENT_PID0_OFFSET   0x04 /* DWORD V+1 */
+#define PSPA_PHYSTATE1_OFFSET          0x08 /* Dword V+2 */
+#define PSPA_OB_HW_EVENT_PID1_OFFSET   0x0C /* DWORD V+3 */
+#define PSPA_PHYSTATE2_OFFSET          0x10 /* Dword V+4 */
+#define PSPA_OB_HW_EVENT_PID2_OFFSET   0x14 /* DWORD V+5 */
+#define PSPA_PHYSTATE3_OFFSET          0x18 /* Dword V+6 */
+#define PSPA_OB_HW_EVENT_PID3_OFFSET   0x1C /* DWORD V+7 */
+#define PSPA_PHYSTATE4_OFFSET          0x20 /* Dword V+8 */
+#define PSPA_OB_HW_EVENT_PID4_OFFSET   0x24 /* DWORD V+9 */
+#define PSPA_PHYSTATE5_OFFSET          0x28 /* Dword V+10 */
+#define PSPA_OB_HW_EVENT_PID5_OFFSET   0x2C /* DWORD V+11 */
+#define PSPA_PHYSTATE6_OFFSET          0x30 /* Dword V+12 */
+#define PSPA_OB_HW_EVENT_PID6_OFFSET   0x34 /* DWORD V+13 */
+#define PSPA_PHYSTATE7_OFFSET          0x38 /* Dword V+14 */
+#define PSPA_OB_HW_EVENT_PID7_OFFSET   0x3C /* DWORD V+15 */
+#define PSPA_PHYSTATE8_OFFSET          0x40 /* DWORD V+16 */
+#define PSPA_OB_HW_EVENT_PID8_OFFSET   0x44 /* DWORD V+17 */
+#define PSPA_PHYSTATE9_OFFSET          0x48 /* DWORD V+18 */
+#define PSPA_OB_HW_EVENT_PID9_OFFSET   0x4C /* DWORD V+19 */
+#define PSPA_PHYSTATE10_OFFSET         0x50 /* DWORD V+20 */
+#define PSPA_OB_HW_EVENT_PID10_OFFSET  0x54 /* DWORD V+21 */
+#define PSPA_PHYSTATE11_OFFSET         0x58 /* DWORD V+22 */
+#define PSPA_OB_HW_EVENT_PID11_OFFSET  0x5C /* DWORD V+23 */
+#define PSPA_PHYSTATE12_OFFSET         0x60 /* DWORD V+24 */
+#define PSPA_OB_HW_EVENT_PID12_OFFSET  0x64 /* DWORD V+25 */
+#define PSPA_PHYSTATE13_OFFSET         0x68 /* DWORD V+26 */
+#define PSPA_OB_HW_EVENT_PID13_OFFSET  0x6c /* DWORD V+27 */
+#define PSPA_PHYSTATE14_OFFSET         0x70 /* DWORD V+28 */
+#define PSPA_OB_HW_EVENT_PID14_OFFSET  0x74 /* DWORD V+29 */
+#define PSPA_PHYSTATE15_OFFSET         0x78 /* DWORD V+30 */
+#define PSPA_OB_HW_EVENT_PID15_OFFSET  0x7c /* DWORD V+31 */
+/* end PSPA */
+
+/* inbound queue configuration offset - byte offset */
+#define IB_PROPERITY_OFFSET            0x00
+#define IB_BASE_ADDR_HI_OFFSET         0x04
+#define IB_BASE_ADDR_LO_OFFSET         0x08
+#define IB_CI_BASE_ADDR_HI_OFFSET      0x0C
+#define IB_CI_BASE_ADDR_LO_OFFSET      0x10
+#define IB_PIPCI_BAR                   0x14
+#define IB_PIPCI_BAR_OFFSET            0x18
+#define IB_RESERVED_OFFSET             0x1C
+
+/* outbound queue configuration offset - byte offset */
+#define OB_PROPERITY_OFFSET            0x00
+#define OB_BASE_ADDR_HI_OFFSET         0x04
+#define OB_BASE_ADDR_LO_OFFSET         0x08
+#define OB_PI_BASE_ADDR_HI_OFFSET      0x0C
+#define OB_PI_BASE_ADDR_LO_OFFSET      0x10
+#define OB_CIPCI_BAR                   0x14
+#define OB_CIPCI_BAR_OFFSET            0x18
+#define OB_INTERRUPT_COALES_OFFSET     0x1C
+#define OB_DYNAMIC_COALES_OFFSET       0x20
+#define OB_PROPERTY_INT_ENABLE         0x40000000
+
+#define MBIC_NMI_ENABLE_VPE0_IOP       0x000418
+#define MBIC_NMI_ENABLE_VPE0_AAP1      0x000418
+/* PCIE registers - BAR2(0x18), BAR1(win) 0x010000 */
+#define PCIE_EVENT_INTERRUPT_ENABLE    0x003040
+#define PCIE_EVENT_INTERRUPT           0x003044
+#define PCIE_ERROR_INTERRUPT_ENABLE    0x003048
+#define PCIE_ERROR_INTERRUPT           0x00304C
+
+/* SPCV soft reset */
+#define SPC_REG_SOFT_RESET 0x00001000
+#define SPCv_NORMAL_RESET_VALUE                0x1
+
+#define SPCv_SOFT_RESET_READ_MASK              0xC0
+#define SPCv_SOFT_RESET_NO_RESET               0x0
+#define SPCv_SOFT_RESET_NORMAL_RESET_OCCURED   0x40
+#define SPCv_SOFT_RESET_HDA_MODE_OCCURED       0x80
+#define SPCv_SOFT_RESET_CHIP_RESET_OCCURED     0xC0
+
+/* signature definition for host scratch pad0 register */
+#define SPC_SOFT_RESET_SIGNATURE       0x252acbcd
+/* Signature for Soft Reset */
+
+/* SPC Reset register - BAR4(0x20), BAR2(win) (need dynamic mapping) */
+#define SPC_REG_RESET                  0x000000/* reset register */
+
+/* bit definition for SPC_RESET register */
+#define SPC_REG_RESET_OSSP             0x00000001
+#define SPC_REG_RESET_RAAE             0x00000002
+#define SPC_REG_RESET_PCS_SPBC         0x00000004
+#define SPC_REG_RESET_PCS_IOP_SS       0x00000008
+#define SPC_REG_RESET_PCS_AAP1_SS      0x00000010
+#define SPC_REG_RESET_PCS_AAP2_SS      0x00000020
+#define SPC_REG_RESET_PCS_LM           0x00000040
+#define SPC_REG_RESET_PCS              0x00000080
+#define SPC_REG_RESET_GSM              0x00000100
+#define SPC_REG_RESET_DDR2             0x00010000
+#define SPC_REG_RESET_BDMA_CORE                0x00020000
+#define SPC_REG_RESET_BDMA_SXCBI       0x00040000
+#define SPC_REG_RESET_PCIE_AL_SXCBI    0x00080000
+#define SPC_REG_RESET_PCIE_PWR         0x00100000
+#define SPC_REG_RESET_PCIE_SFT         0x00200000
+#define SPC_REG_RESET_PCS_SXCBI                0x00400000
+#define SPC_REG_RESET_LMS_SXCBI                0x00800000
+#define SPC_REG_RESET_PMIC_SXCBI       0x01000000
+#define SPC_REG_RESET_PMIC_CORE                0x02000000
+#define SPC_REG_RESET_PCIE_PC_SXCBI    0x04000000
+#define SPC_REG_RESET_DEVICE           0x80000000
+
+/* registers for BAR Shifting - BAR2(0x18), BAR1(win) */
+#define SPCV_IBW_AXI_TRANSLATION_LOW   0x001010
+
+#define MBIC_AAP1_ADDR_BASE            0x060000
+#define MBIC_IOP_ADDR_BASE             0x070000
+#define GSM_ADDR_BASE                  0x0700000
+/* Dynamic map through Bar4 - 0x00700000 */
+#define GSM_CONFIG_RESET               0x00000000
+#define RAM_ECC_DB_ERR                 0x00000018
+#define GSM_READ_ADDR_PARITY_INDIC     0x00000058
+#define GSM_WRITE_ADDR_PARITY_INDIC    0x00000060
+#define GSM_WRITE_DATA_PARITY_INDIC    0x00000068
+#define GSM_READ_ADDR_PARITY_CHECK     0x00000038
+#define GSM_WRITE_ADDR_PARITY_CHECK    0x00000040
+#define GSM_WRITE_DATA_PARITY_CHECK    0x00000048
+
+#define RB6_ACCESS_REG                 0x6A0000
+#define HDAC_EXEC_CMD                  0x0002
+#define HDA_C_PA                       0xcb
+#define HDA_SEQ_ID_BITS                        0x00ff0000
+#define HDA_GSM_OFFSET_BITS            0x00FFFFFF
+#define HDA_GSM_CMD_OFFSET_BITS                0x42C0
+#define HDA_GSM_RSP_OFFSET_BITS                0x42E0
+
+#define MBIC_AAP1_ADDR_BASE            0x060000
+#define MBIC_IOP_ADDR_BASE             0x070000
+#define GSM_ADDR_BASE                  0x0700000
+#define SPC_TOP_LEVEL_ADDR_BASE                0x000000
+#define GSM_CONFIG_RESET_VALUE         0x00003b00
+#define GPIO_ADDR_BASE                 0x00090000
+#define GPIO_GPIO_0_0UTPUT_CTL_OFFSET  0x0000010c
+
+/* RB6 offset */
+#define SPC_RB6_OFFSET                 0x80C0
+/* Magic number of soft reset for RB6 */
+#define RB6_MAGIC_NUMBER_RST           0x1234
+
+/* Device Register status */
+#define DEVREG_SUCCESS                                 0x00
+#define DEVREG_FAILURE_OUT_OF_RESOURCE                 0x01
+#define DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED       0x02
+#define DEVREG_FAILURE_INVALID_PHY_ID                  0x03
+#define DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED       0x04
+#define DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE            0x05
+#define DEVREG_FAILURE_PORT_NOT_VALID_STATE            0x06
+#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID           0x07
+
+#endif
index 317a7fdc3b825064e4a5677f0a64bc5f3e43a8d6..23d607218ae8b8a27e07b51d3752bfae17bbba26 100644 (file)
@@ -24,7 +24,9 @@ config SCSI_QLA_FC
 
        Firmware images can be retrieved from:
 
-               ftp://ftp.qlogic.com/outgoing/linux/firmware/
+               http://ldriver.qlogic.com/firmware/
+
+       They are also included in the linux-firmware tree as well.
 
 config TCM_QLA2XXX
        tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"
index 729b74389f83168544fb4ef95afe2e9464193ba6..937fed8cb0388550b619dfae667d452fa1c686a7 100644 (file)
@@ -3003,12 +3003,10 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
 
        /* Set transfer direction */
        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
-               lcmd_pkt->cntrl_flags =
-                   __constant_cpu_to_le16(TMF_WRITE_DATA);
+               lcmd_pkt->cntrl_flags = TMF_WRITE_DATA;
                vha->qla_stats.output_bytes += scsi_bufflen(cmd);
        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
-               lcmd_pkt->cntrl_flags =
-                   __constant_cpu_to_le16(TMF_READ_DATA);
+               lcmd_pkt->cntrl_flags = TMF_READ_DATA;
                vha->qla_stats.input_bytes += scsi_bufflen(cmd);
        }
 
index 5307bf86d5e08d453be2f8d3bd3ca781607b640c..ad72c1d8511162b9465b96265510bdad6dfd1e42 100644 (file)
@@ -644,7 +644,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
        qla2x00_rel_sp(sp->fcport->vha, sp);
 }
 
-void
+static void
 qla2x00_sp_compl(void *data, void *ptr, int res)
 {
        struct qla_hw_data *ha = (struct qla_hw_data *)data;
index 14fec976f634e1c4804855617c65aae7fcd4a566..fad71ed067ec3525a742fe251634fa429bb781f8 100644 (file)
@@ -507,6 +507,7 @@ static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb,
        mrb->mbox_cmd = in_mbox[0];
        wmb();
 
+       ha->iocb_cnt += mrb->iocb_cnt;
        ha->isp_ops->queue_iocb(ha);
 exit_mbox_iocb:
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
index a47f99957ba8eb9c37d6e4b46ec25cfe763c24a8..4d231c12463eb38ad29d5b3c4ca25f4af24d99fa 100644 (file)
@@ -2216,14 +2216,14 @@ static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
        fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain);
        fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt);
        fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
-       fw_ddb_entry->tcp_xmt_wsf = cpu_to_le16(conn->tcp_xmit_wsf);
-       fw_ddb_entry->tcp_rcv_wsf = cpu_to_le16(conn->tcp_recv_wsf);
+       fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf);
+       fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf);
        fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
        fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
        fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
        fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
-       fw_ddb_entry->stat_sn = cpu_to_le16(conn->statsn);
-       fw_ddb_entry->exp_stat_sn = cpu_to_le16(conn->exp_statsn);
+       fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn);
+       fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn);
        fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_type);
        fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
        fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
@@ -5504,9 +5504,9 @@ static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data)
  * If this is invoked as a result of a userspace call then the entry is marked
  * as nonpersistent using flash_state field.
  **/
-int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
-                                struct dev_db_entry *fw_ddb_entry,
-                                uint16_t *idx, int user)
+static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
+                                       struct dev_db_entry *fw_ddb_entry,
+                                       uint16_t *idx, int user)
 {
        struct iscsi_bus_flash_session *fnode_sess = NULL;
        struct iscsi_bus_flash_conn *fnode_conn = NULL;
@@ -5605,6 +5605,7 @@ static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
                ql4_printk(KERN_ERR, ha,
                           "%s: A non-persistent entry %s found\n",
                           __func__, dev->kobj.name);
+               put_device(dev);
                goto exit_ddb_add;
        }
 
@@ -6112,8 +6113,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
        int parent_type, parent_index = 0xffff;
        int rc = 0;
 
-       dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
-                                       iscsi_is_flashnode_conn_dev);
+       dev = iscsi_find_flashnode_conn(fnode_sess);
        if (!dev)
                return -EIO;
 
@@ -6276,8 +6276,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
                        rc = sprintf(buf, "\n");
                break;
        case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
-               if ((fnode_sess->discovery_parent_idx) >= 0  &&
-                   (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES))
+               if (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES)
                        parent_index = fnode_sess->discovery_parent_idx;
 
                rc = sprintf(buf, "%u\n", parent_index);
@@ -6287,8 +6286,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
                        parent_type = ISCSI_DISC_PARENT_ISNS;
                else if (fnode_sess->discovery_parent_type == DDB_NO_LINK)
                        parent_type = ISCSI_DISC_PARENT_UNKNOWN;
-               else if (fnode_sess->discovery_parent_type >= 0  &&
-                        fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
+               else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
                        parent_type = ISCSI_DISC_PARENT_SENDTGT;
                else
                        parent_type = ISCSI_DISC_PARENT_UNKNOWN;
@@ -6349,6 +6347,8 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
                rc = -ENOSYS;
                break;
        }
+
+       put_device(dev);
        return rc;
 }
 
@@ -6368,20 +6368,11 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
 {
        struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
        struct scsi_qla_host *ha = to_qla_host(shost);
-       struct dev_db_entry *fw_ddb_entry = NULL;
        struct iscsi_flashnode_param_info *fnode_param;
        struct nlattr *attr;
        int rc = QLA_ERROR;
        uint32_t rem = len;
 
-       fw_ddb_entry = kzalloc(sizeof(*fw_ddb_entry), GFP_KERNEL);
-       if (!fw_ddb_entry) {
-               DEBUG2(ql4_printk(KERN_ERR, ha,
-                                 "%s: Unable to allocate ddb buffer\n",
-                                 __func__));
-               return -ENOMEM;
-       }
-
        nla_for_each_attr(attr, data, len, rem) {
                fnode_param = nla_data(attr);
 
@@ -6590,16 +6581,11 @@ static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
        struct dev_db_entry *fw_ddb_entry = NULL;
        dma_addr_t fw_ddb_entry_dma;
        uint16_t *ddb_cookie = NULL;
-       size_t ddb_size;
+       size_t ddb_size = 0;
        void *pddb = NULL;
        int target_id;
        int rc = 0;
 
-       if (!fnode_sess) {
-               rc = -EINVAL;
-               goto exit_ddb_del;
-       }
-
        if (fnode_sess->is_boot_target) {
                rc = -EPERM;
                DEBUG2(ql4_printk(KERN_ERR, ha,
@@ -6631,8 +6617,7 @@ static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
 
                dev_db_start_offset += (fnode_sess->target_id *
                                       sizeof(*fw_ddb_entry));
-               dev_db_start_offset += (void *)&(fw_ddb_entry->cookie) -
-                                      (void *)fw_ddb_entry;
+               dev_db_start_offset += offsetof(struct dev_db_entry, cookie);
 
                ddb_size = sizeof(*ddb_cookie);
        }
index 83e0fec35d563262df3d82bbc404d0b61456353a..fe873cf7570d05abe96e71e4422c7744ee15c265 100644 (file)
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION "5.03.00-k8"
+#define QLA4XXX_DRIVER_VERSION "5.03.00-k9"
index 5add6f4e79281a252928519f9613cf9c92f275dc..0a537a0515ca6c6c37829fb3c45f1fe8aa20c4de 100644 (file)
@@ -1997,24 +1997,39 @@ out:
        return ret;
 }
 
-static unsigned int map_state(sector_t lba, unsigned int *num)
+static unsigned long lba_to_map_index(sector_t lba)
+{
+       if (scsi_debug_unmap_alignment) {
+               lba += scsi_debug_unmap_granularity -
+                       scsi_debug_unmap_alignment;
+       }
+       do_div(lba, scsi_debug_unmap_granularity);
+
+       return lba;
+}
+
+static sector_t map_index_to_lba(unsigned long index)
 {
-       unsigned int granularity, alignment, mapped;
-       sector_t block, next, end;
+       return index * scsi_debug_unmap_granularity -
+               scsi_debug_unmap_alignment;
+}
 
-       granularity = scsi_debug_unmap_granularity;
-       alignment = granularity - scsi_debug_unmap_alignment;
-       block = lba + alignment;
-       do_div(block, granularity);
+static unsigned int map_state(sector_t lba, unsigned int *num)
+{
+       sector_t end;
+       unsigned int mapped;
+       unsigned long index;
+       unsigned long next;
 
-       mapped = test_bit(block, map_storep);
+       index = lba_to_map_index(lba);
+       mapped = test_bit(index, map_storep);
 
        if (mapped)
-               next = find_next_zero_bit(map_storep, map_size, block);
+               next = find_next_zero_bit(map_storep, map_size, index);
        else
-               next = find_next_bit(map_storep, map_size, block);
+               next = find_next_bit(map_storep, map_size, index);
 
-       end = next * granularity - scsi_debug_unmap_alignment;
+       end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
        *num = end - lba;
 
        return mapped;
@@ -2022,47 +2037,37 @@ static unsigned int map_state(sector_t lba, unsigned int *num)
 
 static void map_region(sector_t lba, unsigned int len)
 {
-       unsigned int granularity, alignment;
        sector_t end = lba + len;
 
-       granularity = scsi_debug_unmap_granularity;
-       alignment = granularity - scsi_debug_unmap_alignment;
-
        while (lba < end) {
-               sector_t block, rem;
-
-               block = lba + alignment;
-               rem = do_div(block, granularity);
+               unsigned long index = lba_to_map_index(lba);
 
-               if (block < map_size)
-                       set_bit(block, map_storep);
+               if (index < map_size)
+                       set_bit(index, map_storep);
 
-               lba += granularity - rem;
+               lba = map_index_to_lba(index + 1);
        }
 }
 
 static void unmap_region(sector_t lba, unsigned int len)
 {
-       unsigned int granularity, alignment;
        sector_t end = lba + len;
 
-       granularity = scsi_debug_unmap_granularity;
-       alignment = granularity - scsi_debug_unmap_alignment;
-
        while (lba < end) {
-               sector_t block, rem;
-
-               block = lba + alignment;
-               rem = do_div(block, granularity);
+               unsigned long index = lba_to_map_index(lba);
 
-               if (rem == 0 && lba + granularity < end && block < map_size) {
-                       clear_bit(block, map_storep);
-                       if (scsi_debug_lbprz)
+               if (lba == map_index_to_lba(index) &&
+                   lba + scsi_debug_unmap_granularity <= end &&
+                   index < map_size) {
+                       clear_bit(index, map_storep);
+                       if (scsi_debug_lbprz) {
                                memset(fake_storep +
-                                      block * scsi_debug_sector_size, 0,
-                                      scsi_debug_sector_size);
+                                      lba * scsi_debug_sector_size, 0,
+                                      scsi_debug_sector_size *
+                                      scsi_debug_unmap_granularity);
+                       }
                }
-               lba += granularity - rem;
+               lba = map_index_to_lba(index + 1);
        }
 }
 
@@ -2089,7 +2094,7 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
 
        write_lock_irqsave(&atomic_rw, iflags);
        ret = do_device_access(SCpnt, devip, lba, num, 1);
-       if (scsi_debug_unmap_granularity)
+       if (scsi_debug_lbp())
                map_region(lba, num);
        write_unlock_irqrestore(&atomic_rw, iflags);
        if (-1 == ret)
@@ -2122,7 +2127,7 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
 
        write_lock_irqsave(&atomic_rw, iflags);
 
-       if (unmap && scsi_debug_unmap_granularity) {
+       if (unmap && scsi_debug_lbp()) {
                unmap_region(lba, num);
                goto out;
        }
@@ -2146,7 +2151,7 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
                       fake_storep + (lba * scsi_debug_sector_size),
                       scsi_debug_sector_size);
 
-       if (scsi_debug_unmap_granularity)
+       if (scsi_debug_lbp())
                map_region(lba, num);
 out:
        write_unlock_irqrestore(&atomic_rw, iflags);
@@ -3389,8 +3394,6 @@ static int __init scsi_debug_init(void)
 
        /* Logical Block Provisioning */
        if (scsi_debug_lbp()) {
-               unsigned int map_bytes;
-
                scsi_debug_unmap_max_blocks =
                        clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
 
@@ -3401,16 +3404,16 @@ static int __init scsi_debug_init(void)
                        clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
 
                if (scsi_debug_unmap_alignment &&
-                   scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) {
+                   scsi_debug_unmap_granularity <=
+                   scsi_debug_unmap_alignment) {
                        printk(KERN_ERR
-                              "%s: ERR: unmap_granularity < unmap_alignment\n",
+                              "%s: ERR: unmap_granularity <= unmap_alignment\n",
                               __func__);
                        return -EINVAL;
                }
 
-               map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity);
-               map_bytes = map_size >> 3;
-               map_storep = vmalloc(map_bytes);
+               map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
+               map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
 
                printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
                       map_size);
@@ -3421,7 +3424,7 @@ static int __init scsi_debug_init(void)
                        goto free_vm;
                }
 
-               memset(map_storep, 0x0, map_bytes);
+               bitmap_zero(map_storep, map_size);
 
                /* Map first 1KB for partition table */
                if (scsi_debug_num_parts)
index c1b05a83d403221912fb845e314d3a30656a89fc..f43de1e56420ac7916ca99c83281b81b417f4fb9 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/interrupt.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
+#include <linux/jiffies.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -791,32 +792,48 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
        struct scsi_device *sdev = scmd->device;
        struct Scsi_Host *shost = sdev->host;
        DECLARE_COMPLETION_ONSTACK(done);
-       unsigned long timeleft;
+       unsigned long timeleft = timeout;
        struct scsi_eh_save ses;
+       const unsigned long stall_for = msecs_to_jiffies(100);
        int rtn;
 
+retry:
        scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes);
        shost->eh_action = &done;
 
        scsi_log_send(scmd);
        scmd->scsi_done = scsi_eh_done;
-       shost->hostt->queuecommand(shost, scmd);
-
-       timeleft = wait_for_completion_timeout(&done, timeout);
+       rtn = shost->hostt->queuecommand(shost, scmd);
+       if (rtn) {
+               if (timeleft > stall_for) {
+                       scsi_eh_restore_cmnd(scmd, &ses);
+                       timeleft -= stall_for;
+                       msleep(jiffies_to_msecs(stall_for));
+                       goto retry;
+               }
+               /* signal not to enter either branch of the if () below */
+               timeleft = 0;
+               rtn = NEEDS_RETRY;
+       } else {
+               timeleft = wait_for_completion_timeout(&done, timeout);
+       }
 
        shost->eh_action = NULL;
 
-       scsi_log_completion(scmd, SUCCESS);
+       scsi_log_completion(scmd, rtn);
 
        SCSI_LOG_ERROR_RECOVERY(3,
                printk("%s: scmd: %p, timeleft: %ld\n",
                        __func__, scmd, timeleft));
 
        /*
-        * If there is time left scsi_eh_done got called, and we will
-        * examine the actual status codes to see whether the command
-        * actually did complete normally, else tell the host to forget
-        * about this command.
+        * If there is time left scsi_eh_done got called, and we will examine
+        * the actual status codes to see whether the command actually did
+        * complete normally, else if we have a zero return and no time left,
+        * the command must still be pending, so abort it and return FAILED.
+        * If we never actually managed to issue the command, because
+        * ->queuecommand() kept returning non zero, use the rtn = FAILED
+        * value above (so don't execute either branch of the if)
         */
        if (timeleft) {
                rtn = scsi_eh_completed_normally(scmd);
@@ -837,7 +854,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
                        rtn = FAILED;
                        break;
                }
-       } else {
+       } else if (!rtn) {
                scsi_abort_eh_cmnd(scmd);
                rtn = FAILED;
        }
index c31187d79343a17af26404a2d6c547dc392ae151..86d522004a208255b17861b4c1cb556bbe7c973e 100644 (file)
@@ -276,11 +276,10 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
 }
 EXPORT_SYMBOL(scsi_execute);
 
-
-int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
+int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
                     int data_direction, void *buffer, unsigned bufflen,
                     struct scsi_sense_hdr *sshdr, int timeout, int retries,
-                    int *resid)
+                    int *resid, int flags)
 {
        char *sense = NULL;
        int result;
@@ -291,14 +290,14 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
                        return DRIVER_ERROR << 24;
        }
        result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
-                             sense, timeout, retries, 0, resid);
+                             sense, timeout, retries, flags, resid);
        if (sshdr)
                scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
 
        kfree(sense);
        return result;
 }
-EXPORT_SYMBOL(scsi_execute_req);
+EXPORT_SYMBOL(scsi_execute_req_flags);
 
 /*
  * Function:    scsi_init_cmd_errh()
index 8f6b12cbd224801e2828571beb6938808d87b26c..42539ee2cb111f0e10a6b152d9a0d60cbb2d895b 100644 (file)
@@ -144,33 +144,83 @@ static int scsi_bus_restore(struct device *dev)
 
 #ifdef CONFIG_PM_RUNTIME
 
+static int sdev_blk_runtime_suspend(struct scsi_device *sdev,
+                                       int (*cb)(struct device *))
+{
+       int err;
+
+       err = blk_pre_runtime_suspend(sdev->request_queue);
+       if (err)
+               return err;
+       if (cb)
+               err = cb(&sdev->sdev_gendev);
+       blk_post_runtime_suspend(sdev->request_queue, err);
+
+       return err;
+}
+
+static int sdev_runtime_suspend(struct device *dev)
+{
+       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+       int (*cb)(struct device *) = pm ? pm->runtime_suspend : NULL;
+       struct scsi_device *sdev = to_scsi_device(dev);
+       int err;
+
+       if (sdev->request_queue->dev)
+               return sdev_blk_runtime_suspend(sdev, cb);
+
+       err = scsi_dev_type_suspend(dev, cb);
+       if (err == -EAGAIN)
+               pm_schedule_suspend(dev, jiffies_to_msecs(
+                                       round_jiffies_up_relative(HZ/10)));
+       return err;
+}
+
 static int scsi_runtime_suspend(struct device *dev)
 {
        int err = 0;
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
        dev_dbg(dev, "scsi_runtime_suspend\n");
-       if (scsi_is_sdev_device(dev)) {
-               err = scsi_dev_type_suspend(dev,
-                               pm ? pm->runtime_suspend : NULL);
-               if (err == -EAGAIN)
-                       pm_schedule_suspend(dev, jiffies_to_msecs(
-                               round_jiffies_up_relative(HZ/10)));
-       }
+       if (scsi_is_sdev_device(dev))
+               err = sdev_runtime_suspend(dev);
 
        /* Insert hooks here for targets, hosts, and transport classes */
 
        return err;
 }
 
-static int scsi_runtime_resume(struct device *dev)
+static int sdev_blk_runtime_resume(struct scsi_device *sdev,
+                                       int (*cb)(struct device *))
 {
        int err = 0;
+
+       blk_pre_runtime_resume(sdev->request_queue);
+       if (cb)
+               err = cb(&sdev->sdev_gendev);
+       blk_post_runtime_resume(sdev->request_queue, err);
+
+       return err;
+}
+
+static int sdev_runtime_resume(struct device *dev)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
        const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+       int (*cb)(struct device *) = pm ? pm->runtime_resume : NULL;
+
+       if (sdev->request_queue->dev)
+               return sdev_blk_runtime_resume(sdev, cb);
+       else
+               return scsi_dev_type_resume(dev, cb);
+}
+
+static int scsi_runtime_resume(struct device *dev)
+{
+       int err = 0;
 
        dev_dbg(dev, "scsi_runtime_resume\n");
        if (scsi_is_sdev_device(dev))
-               err = scsi_dev_type_resume(dev, pm ? pm->runtime_resume : NULL);
+               err = sdev_runtime_resume(dev);
 
        /* Insert hooks here for targets, hosts, and transport classes */
 
@@ -185,10 +235,18 @@ static int scsi_runtime_idle(struct device *dev)
 
        /* Insert hooks here for targets, hosts, and transport classes */
 
-       if (scsi_is_sdev_device(dev))
-               err = pm_schedule_suspend(dev, 100);
-       else
+       if (scsi_is_sdev_device(dev)) {
+               struct scsi_device *sdev = to_scsi_device(dev);
+
+               if (sdev->request_queue->dev) {
+                       pm_runtime_mark_last_busy(dev);
+                       err = pm_runtime_autosuspend(dev);
+               } else {
+                       err = pm_runtime_suspend(dev);
+               }
+       } else {
                err = pm_runtime_suspend(dev);
+       }
        return err;
 }
 
index 47799a33d6caacf7e5715e9c7760d121c24797b1..133926b1bb78bc39231260b5359dff3ff9a16bcb 100644 (file)
@@ -1019,8 +1019,7 @@ exit_match_index:
 /**
  * iscsi_get_flashnode_by_index -finds flashnode session entry by index
  * @shost: pointer to host data
- * @data: pointer to data containing value to use for comparison
- * @fn: function pointer that does actual comparison
+ * @idx: index to match
  *
  * Finds the flashnode session object for the passed index
  *
@@ -1029,13 +1028,13 @@ exit_match_index:
  *  %NULL on failure
  */
 static struct iscsi_bus_flash_session *
-iscsi_get_flashnode_by_index(struct Scsi_Host *shost, void *data,
-                            int (*fn)(struct device *dev, void *data))
+iscsi_get_flashnode_by_index(struct Scsi_Host *shost, uint32_t idx)
 {
        struct iscsi_bus_flash_session *fnode_sess = NULL;
        struct device *dev;
 
-       dev = device_find_child(&shost->shost_gendev, data, fn);
+       dev = device_find_child(&shost->shost_gendev, &idx,
+                               flashnode_match_index);
        if (dev)
                fnode_sess = iscsi_dev_to_flash_session(dev);
 
@@ -1059,18 +1058,13 @@ struct device *
 iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
                          int (*fn)(struct device *dev, void *data))
 {
-       struct device *dev;
-
-       dev = device_find_child(&shost->shost_gendev, data, fn);
-       return dev;
+       return device_find_child(&shost->shost_gendev, data, fn);
 }
 EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess);
 
 /**
  * iscsi_find_flashnode_conn - finds flashnode connection entry
  * @fnode_sess: pointer to parent flashnode session entry
- * @data: pointer to data containing value to use for comparison
- * @fn: function pointer that does actual comparison
  *
  * Finds the flashnode connection object comparing the data passed using logic
  * defined in passed function pointer
@@ -1080,14 +1074,10 @@ EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess);
  *  %NULL on failure
  */
 struct device *
-iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess,
-                         void *data,
-                         int (*fn)(struct device *dev, void *data))
+iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess)
 {
-       struct device *dev;
-
-       dev = device_find_child(&fnode_sess->dev, data, fn);
-       return dev;
+       return device_find_child(&fnode_sess->dev, NULL,
+                                iscsi_is_flashnode_conn_dev);
 }
 EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn);
 
@@ -2808,7 +2798,7 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport,
        struct iscsi_bus_flash_session *fnode_sess;
        struct iscsi_bus_flash_conn *fnode_conn;
        struct device *dev;
-       uint32_t *idx;
+       uint32_t idx;
        int err = 0;
 
        if (!transport->set_flashnode_param) {
@@ -2824,25 +2814,27 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport,
                goto put_host;
        }
 
-       idx = &ev->u.set_flashnode.flashnode_idx;
-       fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
-                                                 flashnode_match_index);
+       idx = ev->u.set_flashnode.flashnode_idx;
+       fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
        if (!fnode_sess) {
                pr_err("%s could not find flashnode %u for host no %u\n",
-                      __func__, *idx, ev->u.set_flashnode.host_no);
+                      __func__, idx, ev->u.set_flashnode.host_no);
                err = -ENODEV;
                goto put_host;
        }
 
-       dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
-                                       iscsi_is_flashnode_conn_dev);
+       dev = iscsi_find_flashnode_conn(fnode_sess);
        if (!dev) {
                err = -ENODEV;
-               goto put_host;
+               goto put_sess;
        }
 
        fnode_conn = iscsi_dev_to_flash_conn(dev);
        err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len);
+       put_device(dev);
+
+put_sess:
+       put_device(&fnode_sess->dev);
 
 put_host:
        scsi_host_put(shost);
@@ -2891,7 +2883,7 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport,
 {
        struct Scsi_Host *shost;
        struct iscsi_bus_flash_session *fnode_sess;
-       uint32_t *idx;
+       uint32_t idx;
        int err = 0;
 
        if (!transport->del_flashnode) {
@@ -2907,17 +2899,17 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport,
                goto put_host;
        }
 
-       idx = &ev->u.del_flashnode.flashnode_idx;
-       fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
-                                                 flashnode_match_index);
+       idx = ev->u.del_flashnode.flashnode_idx;
+       fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
        if (!fnode_sess) {
                pr_err("%s could not find flashnode %u for host no %u\n",
-                      __func__, *idx, ev->u.del_flashnode.host_no);
+                      __func__, idx, ev->u.del_flashnode.host_no);
                err = -ENODEV;
                goto put_host;
        }
 
        err = transport->del_flashnode(fnode_sess);
+       put_device(&fnode_sess->dev);
 
 put_host:
        scsi_host_put(shost);
@@ -2933,7 +2925,7 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport,
        struct iscsi_bus_flash_session *fnode_sess;
        struct iscsi_bus_flash_conn *fnode_conn;
        struct device *dev;
-       uint32_t *idx;
+       uint32_t idx;
        int err = 0;
 
        if (!transport->login_flashnode) {
@@ -2949,25 +2941,27 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport,
                goto put_host;
        }
 
-       idx = &ev->u.login_flashnode.flashnode_idx;
-       fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
-                                                 flashnode_match_index);
+       idx = ev->u.login_flashnode.flashnode_idx;
+       fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
        if (!fnode_sess) {
                pr_err("%s could not find flashnode %u for host no %u\n",
-                      __func__, *idx, ev->u.login_flashnode.host_no);
+                      __func__, idx, ev->u.login_flashnode.host_no);
                err = -ENODEV;
                goto put_host;
        }
 
-       dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
-                                       iscsi_is_flashnode_conn_dev);
+       dev = iscsi_find_flashnode_conn(fnode_sess);
        if (!dev) {
                err = -ENODEV;
-               goto put_host;
+               goto put_sess;
        }
 
        fnode_conn = iscsi_dev_to_flash_conn(dev);
        err = transport->login_flashnode(fnode_sess, fnode_conn);
+       put_device(dev);
+
+put_sess:
+       put_device(&fnode_sess->dev);
 
 put_host:
        scsi_host_put(shost);
@@ -2983,7 +2977,7 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport,
        struct iscsi_bus_flash_session *fnode_sess;
        struct iscsi_bus_flash_conn *fnode_conn;
        struct device *dev;
-       uint32_t *idx;
+       uint32_t idx;
        int err = 0;
 
        if (!transport->logout_flashnode) {
@@ -2999,26 +2993,28 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport,
                goto put_host;
        }
 
-       idx = &ev->u.logout_flashnode.flashnode_idx;
-       fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
-                                                 flashnode_match_index);
+       idx = ev->u.logout_flashnode.flashnode_idx;
+       fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
        if (!fnode_sess) {
                pr_err("%s could not find flashnode %u for host no %u\n",
-                      __func__, *idx, ev->u.logout_flashnode.host_no);
+                      __func__, idx, ev->u.logout_flashnode.host_no);
                err = -ENODEV;
                goto put_host;
        }
 
-       dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
-                                       iscsi_is_flashnode_conn_dev);
+       dev = iscsi_find_flashnode_conn(fnode_sess);
        if (!dev) {
                err = -ENODEV;
-               goto put_host;
+               goto put_sess;
        }
 
        fnode_conn = iscsi_dev_to_flash_conn(dev);
 
        err = transport->logout_flashnode(fnode_sess, fnode_conn);
+       put_device(dev);
+
+put_sess:
+       put_device(&fnode_sess->dev);
 
 put_host:
        scsi_host_put(shost);
@@ -3985,8 +3981,10 @@ static __init int iscsi_transport_init(void)
        }
 
        iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh");
-       if (!iscsi_eh_timer_workq)
+       if (!iscsi_eh_timer_workq) {
+               err = -ENOMEM;
                goto release_nls;
+       }
 
        return 0;
 
index e6689776b4f617ac55b1e9c4dac138cee85a9196..c1c555242d0d715d46c051955deacea4a819d634 100644 (file)
@@ -142,6 +142,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
        char *buffer_data;
        struct scsi_mode_data data;
        struct scsi_sense_hdr sshdr;
+       const char *temp = "temporary ";
        int len;
 
        if (sdp->type != TYPE_DISK)
@@ -150,6 +151,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
                 * it's not worth the risk */
                return -EINVAL;
 
+       if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
+               buf += sizeof(temp) - 1;
+               sdkp->cache_override = 1;
+       } else {
+               sdkp->cache_override = 0;
+       }
+
        for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) {
                len = strlen(sd_cache_types[i]);
                if (strncmp(sd_cache_types[i], buf, len) == 0 &&
@@ -162,6 +170,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
                return -EINVAL;
        rcd = ct & 0x01 ? 1 : 0;
        wce = ct & 0x02 ? 1 : 0;
+
+       if (sdkp->cache_override) {
+               sdkp->WCE = wce;
+               sdkp->RCD = rcd;
+               return count;
+       }
+
        if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
                            SD_MAX_RETRIES, &data, NULL))
                return -EINVAL;
@@ -1121,10 +1136,6 @@ static int sd_open(struct block_device *bdev, fmode_t mode)
 
        sdev = sdkp->device;
 
-       retval = scsi_autopm_get_device(sdev);
-       if (retval)
-               goto error_autopm;
-
        /*
         * If the device is in error recovery, wait until it is done.
         * If the device is offline, then disallow any access to it.
@@ -1169,8 +1180,6 @@ static int sd_open(struct block_device *bdev, fmode_t mode)
        return 0;
 
 error_out:
-       scsi_autopm_put_device(sdev);
-error_autopm:
        scsi_disk_put(sdkp);
        return retval;  
 }
@@ -1205,7 +1214,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
         * XXX is followed by a "rmmod sd_mod"?
         */
 
-       scsi_autopm_put_device(sdev);
        scsi_disk_put(sdkp);
 }
 
@@ -1366,14 +1374,9 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
        retval = -ENODEV;
 
        if (scsi_block_when_processing_errors(sdp)) {
-               retval = scsi_autopm_get_device(sdp);
-               if (retval)
-                       goto out;
-
                sshdr  = kzalloc(sizeof(*sshdr), GFP_KERNEL);
                retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
                                              sshdr);
-               scsi_autopm_put_device(sdp);
        }
 
        /* failed to execute TUR, assume media not present */
@@ -1423,8 +1426,9 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
                 * Leave the rest of the command zero to indicate
                 * flush everything.
                 */
-               res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
-                                      SD_FLUSH_TIMEOUT, SD_MAX_RETRIES, NULL);
+               res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0,
+                                            &sshdr, SD_FLUSH_TIMEOUT,
+                                            SD_MAX_RETRIES, NULL, REQ_PM);
                if (res == 0)
                        break;
        }
@@ -2318,6 +2322,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
        int old_rcd = sdkp->RCD;
        int old_dpofua = sdkp->DPOFUA;
 
+
+       if (sdkp->cache_override)
+               return;
+
        first_len = 4;
        if (sdp->skip_ms_page_8) {
                if (sdp->type == TYPE_RBC)
@@ -2811,6 +2819,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
        sdkp->capacity = 0;
        sdkp->media_present = 1;
        sdkp->write_prot = 0;
+       sdkp->cache_override = 0;
        sdkp->WCE = 0;
        sdkp->RCD = 0;
        sdkp->ATO = 0;
@@ -2837,6 +2846,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
 
        sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
                  sdp->removable ? "removable " : "");
+       blk_pm_runtime_init(sdp->request_queue, dev);
        scsi_autopm_put_device(sdp);
        put_device(&sdkp->dev);
 }
@@ -3020,8 +3030,8 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
        if (!scsi_device_online(sdp))
                return -ENODEV;
 
-       res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
-                              SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+       res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
+                              SD_TIMEOUT, SD_MAX_RETRIES, NULL, REQ_PM);
        if (res) {
                sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n");
                sd_print_result(sdkp, res);
index 74a1e4ca5401f9a58e6e705bb2fc8a99fcd04a0f..2386aeb41fe8d74826908d3dca71eb3f97a59ac8 100644 (file)
@@ -73,6 +73,7 @@ struct scsi_disk {
        u8              protection_type;/* Data Integrity Field */
        u8              provisioning_mode;
        unsigned        ATO : 1;        /* state of disk ATO bit */
+       unsigned        cache_override : 1; /* temp override of WCE,RCD */
        unsigned        WCE : 1;        /* state of disk WCE bit */
        unsigned        RCD : 1;        /* state of disk RCD bit, unused */
        unsigned        DPOFUA : 1;     /* state of disk DPOFUA bit */
index 04998f36e5071bdda94ff225f39b0ca3616eeb7b..6174ca4ea27594487d7dc0828d9e21841742b8ed 100644 (file)
@@ -93,14 +93,6 @@ static int sd_dif_type1_verify(struct blk_integrity_exchg *bix, csum_fn *fn)
                if (sdt->app_tag == 0xffff)
                        return 0;
 
-               /* Bad ref tag received from disk */
-               if (sdt->ref_tag == 0xffffffff) {
-                       printk(KERN_ERR
-                              "%s: bad phys ref tag on sector %lu\n",
-                              bix->disk_name, (unsigned long)sector);
-                       return -EIO;
-               }
-
                if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
                        printk(KERN_ERR
                               "%s: ref tag error on sector %lu (rcvd %u)\n",
index 0371047c59222251971d823219e3639ac2a53ff6..35faf24c604401eeb560472542cd2ed350289d10 100644 (file)
@@ -57,3 +57,14 @@ config SCSI_UFSHCD_PCI
          If you have a controller with this interface, say Y or M here.
 
          If unsure, say N.
+
+config SCSI_UFSHCD_PLATFORM
+       tristate "Platform bus based UFS Controller support"
+       depends on SCSI_UFSHCD
+       ---help---
+       This selects the UFS host controller support. Select this if
+       you have an UFS controller on Platform bus.
+
+       If you have a controller with this interface, say Y or M here.
+
+         If unsure, say N.
index 9eda0dfbd6df383304de10a0805868ee6485929c..1e5bd48457d636ef0aafd096f8a8038ac0dcbd5e 100644 (file)
@@ -1,3 +1,4 @@
 # UFSHCD makefile
 obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
 obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
+obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
new file mode 100644 (file)
index 0000000..03319ac
--- /dev/null
@@ -0,0 +1,217 @@
+/*
+ * Universal Flash Storage Host controller Platform bus based glue driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd-pltfrm.c
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ *     Santosh Yaraganavi <santosh.sy@samsung.com>
+ *     Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ */
+
+#include "ufshcd.h"
+#include <linux/platform_device.h>
+
+#ifdef CONFIG_PM
+/**
+ * ufshcd_pltfrm_suspend - suspend power management function
+ * @dev: pointer to device handle
+ *
+ *
+ * Returns 0
+ */
+static int ufshcd_pltfrm_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct ufs_hba *hba =  platform_get_drvdata(pdev);
+
+       /*
+        * TODO:
+        * 1. Call ufshcd_suspend
+        * 2. Do bus specific power management
+        */
+
+       disable_irq(hba->irq);
+
+       return 0;
+}
+
+/**
+ * ufshcd_pltfrm_resume - resume power management function
+ * @dev: pointer to device handle
+ *
+ * Returns 0
+ */
+static int ufshcd_pltfrm_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct ufs_hba *hba =  platform_get_drvdata(pdev);
+
+       /*
+        * TODO:
+        * 1. Call ufshcd_resume.
+        * 2. Do bus specific wake up
+        */
+
+       enable_irq(hba->irq);
+
+       return 0;
+}
+#else
+#define ufshcd_pltfrm_suspend  NULL
+#define ufshcd_pltfrm_resume   NULL
+#endif
+
+/**
+ * ufshcd_pltfrm_probe - probe routine of the driver
+ * @pdev: pointer to Platform device handle
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_pltfrm_probe(struct platform_device *pdev)
+{
+       struct ufs_hba *hba;
+       void __iomem *mmio_base;
+       struct resource *mem_res;
+       struct resource *irq_res;
+       resource_size_t mem_size;
+       int err;
+       struct device *dev = &pdev->dev;
+
+       mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!mem_res) {
+               dev_err(&pdev->dev,
+                       "Memory resource not available\n");
+               err = -ENODEV;
+               goto out_error;
+       }
+
+       mem_size = resource_size(mem_res);
+       if (!request_mem_region(mem_res->start, mem_size, "ufshcd")) {
+               dev_err(&pdev->dev,
+                       "Cannot reserve the memory resource\n");
+               err = -EBUSY;
+               goto out_error;
+       }
+
+       mmio_base = ioremap_nocache(mem_res->start, mem_size);
+       if (!mmio_base) {
+               dev_err(&pdev->dev, "memory map failed\n");
+               err = -ENOMEM;
+               goto out_release_regions;
+       }
+
+       irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!irq_res) {
+               dev_err(&pdev->dev, "IRQ resource not available\n");
+               err = -ENODEV;
+               goto out_iounmap;
+       }
+
+       err = dma_set_coherent_mask(dev, dev->coherent_dma_mask);
+       if (err) {
+               dev_err(&pdev->dev, "set dma mask failed\n");
+               goto out_iounmap;
+       }
+
+       err = ufshcd_init(&pdev->dev, &hba, mmio_base, irq_res->start);
+       if (err) {
+               dev_err(&pdev->dev, "Intialization failed\n");
+               goto out_iounmap;
+       }
+
+       platform_set_drvdata(pdev, hba);
+
+       return 0;
+
+out_iounmap:
+       iounmap(mmio_base);
+out_release_regions:
+       release_mem_region(mem_res->start, mem_size);
+out_error:
+       return err;
+}
+
+/**
+ * ufshcd_pltfrm_remove - remove platform driver routine
+ * @pdev: pointer to platform device handle
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_pltfrm_remove(struct platform_device *pdev)
+{
+       struct resource *mem_res;
+       resource_size_t mem_size;
+       struct ufs_hba *hba =  platform_get_drvdata(pdev);
+
+       disable_irq(hba->irq);
+
+       /* Some buggy controllers raise interrupt after
+        * the resources are removed. So first we unregister the
+        * irq handler and then the resources used by driver
+        */
+
+       free_irq(hba->irq, hba);
+       ufshcd_remove(hba);
+       mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!mem_res)
+               dev_err(&pdev->dev, "ufshcd: Memory resource not available\n");
+       else {
+               mem_size = resource_size(mem_res);
+               release_mem_region(mem_res->start, mem_size);
+       }
+       platform_set_drvdata(pdev, NULL);
+       return 0;
+}
+
+static const struct of_device_id ufs_of_match[] = {
+       { .compatible = "jedec,ufs-1.1"},
+};
+
+static const struct dev_pm_ops ufshcd_dev_pm_ops = {
+       .suspend        = ufshcd_pltfrm_suspend,
+       .resume         = ufshcd_pltfrm_resume,
+};
+
+static struct platform_driver ufshcd_pltfrm_driver = {
+       .probe  = ufshcd_pltfrm_probe,
+       .remove = ufshcd_pltfrm_remove,
+       .driver = {
+               .name   = "ufshcd",
+               .owner  = THIS_MODULE,
+               .pm     = &ufshcd_dev_pm_ops,
+               .of_match_table = ufs_of_match,
+       },
+};
+
+module_platform_driver(ufshcd_pltfrm_driver);
+
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("UFS host controller Pltform bus based glue driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(UFSHCD_DRIVER_VERSION);
index 60fd40c4e4c2ada48a268a997099b18f2fa4ef34..c32a478df81b83c75cdbbdac9206236021f9f863 100644 (file)
@@ -478,7 +478,7 @@ static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp)
                ucd_cmd_ptr->header.dword_2 = 0;
 
                ucd_cmd_ptr->exp_data_transfer_len =
-                       cpu_to_be32(lrbp->cmd->transfersize);
+                       cpu_to_be32(lrbp->cmd->sdb.length);
 
                memcpy(ucd_cmd_ptr->cdb,
                       lrbp->cmd->cmnd,
index 787bd2c22bca44043b615d8b3b4d70663a3d36a9..380387a47b1d86fe4e6a8a7ef22a537bfb85517a 100644 (file)
@@ -526,13 +526,17 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master,
        }
 
        if (xfer->tx_buf)
-               spi_writel(as, TDR, *(u8 *)(xfer->tx_buf));
+               if (xfer->bits_per_word > 8)
+                       spi_writel(as, TDR, *(u16 *)(xfer->tx_buf));
+               else
+                       spi_writel(as, TDR, *(u8 *)(xfer->tx_buf));
        else
                spi_writel(as, TDR, 0);
 
        dev_dbg(master->dev.parent,
-               "  start pio xfer %p: len %u tx %p rx %p\n",
-               xfer, xfer->len, xfer->tx_buf, xfer->rx_buf);
+               "  start pio xfer %p: len %u tx %p rx %p bitpw %d\n",
+               xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
+               xfer->bits_per_word);
 
        /* Enable relevant interrupts */
        spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES));
@@ -950,21 +954,39 @@ atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
 {
        u8              *txp;
        u8              *rxp;
+       u16             *txp16;
+       u16             *rxp16;
        unsigned long   xfer_pos = xfer->len - as->current_remaining_bytes;
 
        if (xfer->rx_buf) {
-               rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
-               *rxp = spi_readl(as, RDR);
+               if (xfer->bits_per_word > 8) {
+                       rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos);
+                       *rxp16 = spi_readl(as, RDR);
+               } else {
+                       rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
+                       *rxp = spi_readl(as, RDR);
+               }
        } else {
                spi_readl(as, RDR);
        }
-
-       as->current_remaining_bytes--;
+       if (xfer->bits_per_word > 8) {
+               as->current_remaining_bytes -= 2;
+               if (as->current_remaining_bytes < 0)
+                       as->current_remaining_bytes = 0;
+       } else {
+               as->current_remaining_bytes--;
+       }
 
        if (as->current_remaining_bytes) {
                if (xfer->tx_buf) {
-                       txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1;
-                       spi_writel(as, TDR, *txp);
+                       if (xfer->bits_per_word > 8) {
+                               txp16 = (u16 *)(((u8 *)xfer->tx_buf)
+                                                       + xfer_pos + 2);
+                               spi_writel(as, TDR, *txp16);
+                       } else {
+                               txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1;
+                               spi_writel(as, TDR, *txp);
+                       }
                } else {
                        spi_writel(as, TDR, 0);
                }
@@ -1378,9 +1400,16 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
                        }
                }
 
+               if (xfer->bits_per_word > 8) {
+                       if (xfer->len % 2) {
+                               dev_dbg(&spi->dev, "buffer len should be 16 bits aligned\n");
+                               return -EINVAL;
+                       }
+               }
+
                /* FIXME implement these protocol options!! */
-               if (xfer->speed_hz) {
-                       dev_dbg(&spi->dev, "no protocol options yet\n");
+               if (xfer->speed_hz < spi->max_speed_hz) {
+                       dev_dbg(&spi->dev, "can't change speed in transfer\n");
                        return -ENOPROTOOPT;
                }
 
index 2e8f24a1fb952cbfd86b161ad50ac3e315d0850b..50b13c9b1ab691fd5defcae44b98dc4bfccb5557 100644 (file)
@@ -784,7 +784,7 @@ static const struct of_device_id davinci_spi_of_match[] = {
        },
        { },
 };
-MODULE_DEVICE_TABLE(of, davini_spi_of_match);
+MODULE_DEVICE_TABLE(of, davinci_spi_of_match);
 
 /**
  * spi_davinci_get_pdata - Get platform data from DTS binding
index 163fd802b7aced2217494397297de76862056ea5..32b7bb111eb6b53d9e96808f7bd5fd0982cffd9d 100644 (file)
@@ -334,7 +334,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
        spi->dev.parent = &master->dev;
        spi->dev.bus = &spi_bus_type;
        spi->dev.release = spidev_release;
-       spi->cs_gpio = -EINVAL;
+       spi->cs_gpio = -ENOENT;
        device_initialize(&spi->dev);
        return spi;
 }
@@ -1067,8 +1067,11 @@ static int of_spi_register_master(struct spi_master *master)
        nb = of_gpio_named_count(np, "cs-gpios");
        master->num_chipselect = max(nb, (int)master->num_chipselect);
 
-       if (nb < 1)
+       /* Return error only for an incorrectly formed cs-gpios property */
+       if (nb == 0 || nb == -ENOENT)
                return 0;
+       else if (nb < 0)
+               return nb;
 
        cs = devm_kzalloc(&master->dev,
                          sizeof(int) * master->num_chipselect,
@@ -1079,7 +1082,7 @@ static int of_spi_register_master(struct spi_master *master)
                return -ENOMEM;
 
        for (i = 0; i < master->num_chipselect; i++)
-               cs[i] = -EINVAL;
+               cs[i] = -ENOENT;
 
        for (i = 0; i < nb; i++)
                cs[i] = of_get_named_gpio(np, "cs-gpios", i);
index ef2e08e9b5901fdd9267a66896da10f829f931ac..5dc9c4bfa66e4686d5360ab33da61eff1109715a 100644 (file)
@@ -14,7 +14,6 @@
  * 2.4/2.5 port                 David McCullough
  */
 
-#include <asm/dbg.h>
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/serial.h>
index 52a3ecd404219c7365850debe309d06b99a2d8c8..6fa2ae77fffde5a2de2fd14f8568da7fc55f8d1e 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/serial.h>
 #include <linux/serial_core.h>
 
-#include <bcm63xx_clk.h>
 #include <bcm63xx_irq.h>
 #include <bcm63xx_regs.h>
 #include <bcm63xx_io.h>
index 6953dc82850cb278fd99da209e560021d4d0c1e8..a4fdce74f883e4f5868f3efb975ec51347ef2c47 100644 (file)
@@ -60,24 +60,22 @@ static void tty_audit_buf_put(struct tty_audit_buf *buf)
                tty_audit_buf_free(buf);
 }
 
-static void tty_audit_log(const char *description, struct task_struct *tsk,
-                         kuid_t loginuid, unsigned sessionid, int major,
-                         int minor, unsigned char *data, size_t size)
+static void tty_audit_log(const char *description, int major, int minor,
+                         unsigned char *data, size_t size)
 {
        struct audit_buffer *ab;
+       struct task_struct *tsk = current;
+       uid_t uid = from_kuid(&init_user_ns, task_uid(tsk));
+       uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(tsk));
+       u32 sessionid = audit_get_sessionid(tsk);
 
        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_TTY);
        if (ab) {
                char name[sizeof(tsk->comm)];
-               kuid_t uid = task_uid(tsk);
-
-               audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u "
-                                "major=%d minor=%d comm=", description,
-                                tsk->pid,
-                                from_kuid(&init_user_ns, uid),
-                                from_kuid(&init_user_ns, loginuid),
-                                sessionid,
-                                major, minor);
+
+               audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u major=%d"
+                                " minor=%d comm=", description, tsk->pid, uid,
+                                loginuid, sessionid, major, minor);
                get_task_comm(name, tsk);
                audit_log_untrustedstring(ab, name);
                audit_log_format(ab, " data=");
@@ -90,11 +88,9 @@ static void tty_audit_log(const char *description, struct task_struct *tsk,
  *     tty_audit_buf_push      -       Push buffered data out
  *
  *     Generate an audit message from the contents of @buf, which is owned by
- *     @tsk with @loginuid.  @buf->mutex must be locked.
+ *     the current task.  @buf->mutex must be locked.
  */
-static void tty_audit_buf_push(struct task_struct *tsk, kuid_t loginuid,
-                              unsigned int sessionid,
-                              struct tty_audit_buf *buf)
+static void tty_audit_buf_push(struct tty_audit_buf *buf)
 {
        if (buf->valid == 0)
                return;
@@ -102,24 +98,10 @@ static void tty_audit_buf_push(struct task_struct *tsk, kuid_t loginuid,
                buf->valid = 0;
                return;
        }
-       tty_audit_log("tty", tsk, loginuid, sessionid, buf->major, buf->minor,
-                     buf->data, buf->valid);
+       tty_audit_log("tty", buf->major, buf->minor, buf->data, buf->valid);
        buf->valid = 0;
 }
 
-/**
- *     tty_audit_buf_push_current      -       Push buffered data out
- *
- *     Generate an audit message from the contents of @buf, which is owned by
- *     the current task.  @buf->mutex must be locked.
- */
-static void tty_audit_buf_push_current(struct tty_audit_buf *buf)
-{
-       kuid_t auid = audit_get_loginuid(current);
-       unsigned int sessionid = audit_get_sessionid(current);
-       tty_audit_buf_push(current, auid, sessionid, buf);
-}
-
 /**
  *     tty_audit_exit  -       Handle a task exit
  *
@@ -130,15 +112,13 @@ void tty_audit_exit(void)
 {
        struct tty_audit_buf *buf;
 
-       spin_lock_irq(&current->sighand->siglock);
        buf = current->signal->tty_audit_buf;
        current->signal->tty_audit_buf = NULL;
-       spin_unlock_irq(&current->sighand->siglock);
        if (!buf)
                return;
 
        mutex_lock(&buf->mutex);
-       tty_audit_buf_push_current(buf);
+       tty_audit_buf_push(buf);
        mutex_unlock(&buf->mutex);
 
        tty_audit_buf_put(buf);
@@ -151,9 +131,8 @@ void tty_audit_exit(void)
  */
 void tty_audit_fork(struct signal_struct *sig)
 {
-       spin_lock_irq(&current->sighand->siglock);
        sig->audit_tty = current->signal->audit_tty;
-       spin_unlock_irq(&current->sighand->siglock);
+       sig->audit_tty_log_passwd = current->signal->audit_tty_log_passwd;
 }
 
 /**
@@ -163,20 +142,21 @@ void tty_audit_tiocsti(struct tty_struct *tty, char ch)
 {
        struct tty_audit_buf *buf;
        int major, minor, should_audit;
+       unsigned long flags;
 
-       spin_lock_irq(&current->sighand->siglock);
+       spin_lock_irqsave(&current->sighand->siglock, flags);
        should_audit = current->signal->audit_tty;
        buf = current->signal->tty_audit_buf;
        if (buf)
                atomic_inc(&buf->count);
-       spin_unlock_irq(&current->sighand->siglock);
+       spin_unlock_irqrestore(&current->sighand->siglock, flags);
 
        major = tty->driver->major;
        minor = tty->driver->minor_start + tty->index;
        if (buf) {
                mutex_lock(&buf->mutex);
                if (buf->major == major && buf->minor == minor)
-                       tty_audit_buf_push_current(buf);
+                       tty_audit_buf_push(buf);
                mutex_unlock(&buf->mutex);
                tty_audit_buf_put(buf);
        }
@@ -187,24 +167,20 @@ void tty_audit_tiocsti(struct tty_struct *tty, char ch)
 
                auid = audit_get_loginuid(current);
                sessionid = audit_get_sessionid(current);
-               tty_audit_log("ioctl=TIOCSTI", current, auid, sessionid, major,
-                             minor, &ch, 1);
+               tty_audit_log("ioctl=TIOCSTI", major, minor, &ch, 1);
        }
 }
 
 /**
- * tty_audit_push_task -       Flush task's pending audit data
- * @tsk:               task pointer
- * @loginuid:          sender login uid
- * @sessionid:         sender session id
+ * tty_audit_push_current -    Flush current's pending audit data
  *
- * Called with a ref on @tsk held. Try to lock sighand and get a
- * reference to the tty audit buffer if available.
+ * Try to lock sighand and get a reference to the tty audit buffer if available.
  * Flush the buffer or return an appropriate error code.
  */
-int tty_audit_push_task(struct task_struct *tsk, kuid_t loginuid, u32 sessionid)
+int tty_audit_push_current(void)
 {
        struct tty_audit_buf *buf = ERR_PTR(-EPERM);
+       struct task_struct *tsk = current;
        unsigned long flags;
 
        if (!lock_task_sighand(tsk, &flags))
@@ -225,7 +201,7 @@ int tty_audit_push_task(struct task_struct *tsk, kuid_t loginuid, u32 sessionid)
                return PTR_ERR(buf);
 
        mutex_lock(&buf->mutex);
-       tty_audit_buf_push(tsk, loginuid, sessionid, buf);
+       tty_audit_buf_push(buf);
        mutex_unlock(&buf->mutex);
 
        tty_audit_buf_put(buf);
@@ -243,10 +219,11 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
                unsigned icanon)
 {
        struct tty_audit_buf *buf, *buf2;
+       unsigned long flags;
 
        buf = NULL;
        buf2 = NULL;
-       spin_lock_irq(&current->sighand->siglock);
+       spin_lock_irqsave(&current->sighand->siglock, flags);
        if (likely(!current->signal->audit_tty))
                goto out;
        buf = current->signal->tty_audit_buf;
@@ -254,7 +231,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
                atomic_inc(&buf->count);
                goto out;
        }
-       spin_unlock_irq(&current->sighand->siglock);
+       spin_unlock_irqrestore(&current->sighand->siglock, flags);
 
        buf2 = tty_audit_buf_alloc(tty->driver->major,
                                   tty->driver->minor_start + tty->index,
@@ -264,7 +241,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
                return NULL;
        }
 
-       spin_lock_irq(&current->sighand->siglock);
+       spin_lock_irqsave(&current->sighand->siglock, flags);
        if (!current->signal->audit_tty)
                goto out;
        buf = current->signal->tty_audit_buf;
@@ -276,7 +253,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
        atomic_inc(&buf->count);
        /* Fall through */
  out:
-       spin_unlock_irq(&current->sighand->siglock);
+       spin_unlock_irqrestore(&current->sighand->siglock, flags);
        if (buf2)
                tty_audit_buf_free(buf2);
        return buf;
@@ -292,10 +269,18 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
 {
        struct tty_audit_buf *buf;
        int major, minor;
+       int audit_log_tty_passwd;
+       unsigned long flags;
 
        if (unlikely(size == 0))
                return;
 
+       spin_lock_irqsave(&current->sighand->siglock, flags);
+       audit_log_tty_passwd = current->signal->audit_tty_log_passwd;
+       spin_unlock_irqrestore(&current->sighand->siglock, flags);
+       if (!audit_log_tty_passwd && icanon && !L_ECHO(tty))
+               return;
+
        if (tty->driver->type == TTY_DRIVER_TYPE_PTY
            && tty->driver->subtype == PTY_TYPE_MASTER)
                return;
@@ -309,7 +294,7 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
        minor = tty->driver->minor_start + tty->index;
        if (buf->major != major || buf->minor != minor
            || buf->icanon != icanon) {
-               tty_audit_buf_push_current(buf);
+               tty_audit_buf_push(buf);
                buf->major = major;
                buf->minor = minor;
                buf->icanon = icanon;
@@ -325,7 +310,7 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
                data += run;
                size -= run;
                if (buf->valid == N_TTY_BUF_SIZE)
-                       tty_audit_buf_push_current(buf);
+                       tty_audit_buf_push(buf);
        } while (size != 0);
        mutex_unlock(&buf->mutex);
        tty_audit_buf_put(buf);
@@ -339,16 +324,17 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
 void tty_audit_push(struct tty_struct *tty)
 {
        struct tty_audit_buf *buf;
+       unsigned long flags;
 
-       spin_lock_irq(&current->sighand->siglock);
+       spin_lock_irqsave(&current->sighand->siglock, flags);
        if (likely(!current->signal->audit_tty)) {
-               spin_unlock_irq(&current->sighand->siglock);
+               spin_unlock_irqrestore(&current->sighand->siglock, flags);
                return;
        }
        buf = current->signal->tty_audit_buf;
        if (buf)
                atomic_inc(&buf->count);
-       spin_unlock_irq(&current->sighand->siglock);
+       spin_unlock_irqrestore(&current->sighand->siglock, flags);
 
        if (buf) {
                int major, minor;
@@ -357,7 +343,7 @@ void tty_audit_push(struct tty_struct *tty)
                minor = tty->driver->minor_start + tty->index;
                mutex_lock(&buf->mutex);
                if (buf->major == major && buf->minor == minor)
-                       tty_audit_buf_push_current(buf);
+                       tty_audit_buf_push(buf);
                mutex_unlock(&buf->mutex);
                tty_audit_buf_put(buf);
        }
index bff0775e258c9ce30b1615e38492fa123eb49f8a..5174ebac288d65e70f31c13867e44aa98cc18ff4 100644 (file)
@@ -3,6 +3,7 @@
  *
  * Since these may be in userspace, we use (inline) accessors.
  */
+#include <linux/module.h>
 #include <linux/vringh.h>
 #include <linux/virtio_ring.h>
 #include <linux/kernel.h>
@@ -1005,3 +1006,5 @@ int vringh_need_notify_kern(struct vringh *vrh)
        return __vringh_need_notify(vrh, getu16_kern);
 }
 EXPORT_SYMBOL(vringh_need_notify_kern);
+
+MODULE_LICENSE("GPL");
index ddabaa867b0dc38e24ab090d6e62108c88a02a2c..700cac067b4611891af50d90ee083bdbb0f94eb9 100644 (file)
@@ -111,30 +111,16 @@ static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi)
        switch (blank_mode) {
 
        case VESA_NO_BLANKING:
-                       /* Turn on panel */
-                       fbdev->regs->lcd_control |= LCD_CONTROL_GO;
-#ifdef CONFIG_MIPS_PB1100
-                       if (fbdev->panel_idx == 1) {
-                               au_writew(au_readw(PB1100_G_CONTROL)
-                                         | (PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD),
-                       PB1100_G_CONTROL);
-                       }
-#endif
+               /* Turn on panel */
+               fbdev->regs->lcd_control |= LCD_CONTROL_GO;
                au_sync();
                break;
 
        case VESA_VSYNC_SUSPEND:
        case VESA_HSYNC_SUSPEND:
        case VESA_POWERDOWN:
-                       /* Turn off panel */
-                       fbdev->regs->lcd_control &= ~LCD_CONTROL_GO;
-#ifdef CONFIG_MIPS_PB1100
-                       if (fbdev->panel_idx == 1) {
-                               au_writew(au_readw(PB1100_G_CONTROL)
-                                         & ~(PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD),
-                       PB1100_G_CONTROL);
-                       }
-#endif
+               /* Turn off panel */
+               fbdev->regs->lcd_control &= ~LCD_CONTROL_GO;
                au_sync();
                break;
        default:
index dd4d9cb862432b4ce6715aba3164a53ffaaa87a5..f03bf501527f64785a3fbbec39963f979d70ad8a 100644 (file)
@@ -141,7 +141,7 @@ config XEN_GRANT_DEV_ALLOC
 
 config SWIOTLB_XEN
        def_bool y
-       depends on PCI
+       depends on PCI && X86
        select SWIOTLB
 
 config XEN_TMEM
index d8cc8127f19c18f9b6a4ea9f937015363b912bd8..6a6bbe4ede92c67afe4c88efd105839f14b3a240 100644 (file)
@@ -167,6 +167,8 @@ static void xen_irq_info_common_init(struct irq_info *info,
        info->cpu = cpu;
 
        evtchn_to_irq[evtchn] = irq;
+
+       irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
 }
 
 static void xen_irq_info_evtchn_init(unsigned irq,
@@ -874,7 +876,6 @@ int bind_evtchn_to_irq(unsigned int evtchn)
                struct irq_info *info = info_for_irq(irq);
                WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
        }
-       irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
 
 out:
        mutex_unlock(&irq_mapping_update_lock);
index d5c25db4398f0244f0f1d0c1f1e20a55f8f51aa9..f71ec125290db7da87355f444f7308826ee1c034 100644 (file)
@@ -243,7 +243,7 @@ void ecryptfs_destroy_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat)
        struct ecryptfs_key_sig *key_sig, *key_sig_tmp;
 
        if (crypt_stat->tfm)
-               crypto_free_blkcipher(crypt_stat->tfm);
+               crypto_free_ablkcipher(crypt_stat->tfm);
        if (crypt_stat->hash_tfm)
                crypto_free_hash(crypt_stat->hash_tfm);
        list_for_each_entry_safe(key_sig, key_sig_tmp,
@@ -319,6 +319,22 @@ int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg,
        return i;
 }
 
+struct extent_crypt_result {
+       struct completion completion;
+       int rc;
+};
+
+static void extent_crypt_complete(struct crypto_async_request *req, int rc)
+{
+       struct extent_crypt_result *ecr = req->data;
+
+       if (rc == -EINPROGRESS)
+               return;
+
+       ecr->rc = rc;
+       complete(&ecr->completion);
+}
+
 /**
  * encrypt_scatterlist
  * @crypt_stat: Pointer to the crypt_stat struct to initialize.
@@ -334,11 +350,8 @@ static int encrypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
                               struct scatterlist *src_sg, int size,
                               unsigned char *iv)
 {
-       struct blkcipher_desc desc = {
-               .tfm = crypt_stat->tfm,
-               .info = iv,
-               .flags = CRYPTO_TFM_REQ_MAY_SLEEP
-       };
+       struct ablkcipher_request *req = NULL;
+       struct extent_crypt_result ecr;
        int rc = 0;
 
        BUG_ON(!crypt_stat || !crypt_stat->tfm
@@ -349,24 +362,47 @@ static int encrypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
                ecryptfs_dump_hex(crypt_stat->key,
                                  crypt_stat->key_size);
        }
-       /* Consider doing this once, when the file is opened */
+
+       init_completion(&ecr.completion);
+
        mutex_lock(&crypt_stat->cs_tfm_mutex);
-       if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) {
-               rc = crypto_blkcipher_setkey(crypt_stat->tfm, crypt_stat->key,
-                                            crypt_stat->key_size);
-               crypt_stat->flags |= ECRYPTFS_KEY_SET;
-       }
-       if (rc) {
-               ecryptfs_printk(KERN_ERR, "Error setting key; rc = [%d]\n",
-                               rc);
+       req = ablkcipher_request_alloc(crypt_stat->tfm, GFP_NOFS);
+       if (!req) {
                mutex_unlock(&crypt_stat->cs_tfm_mutex);
-               rc = -EINVAL;
+               rc = -ENOMEM;
                goto out;
        }
-       ecryptfs_printk(KERN_DEBUG, "Encrypting [%d] bytes.\n", size);
-       crypto_blkcipher_encrypt_iv(&desc, dest_sg, src_sg, size);
+
+       ablkcipher_request_set_callback(req,
+                       CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+                       extent_crypt_complete, &ecr);
+       /* Consider doing this once, when the file is opened */
+       if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) {
+               rc = crypto_ablkcipher_setkey(crypt_stat->tfm, crypt_stat->key,
+                                             crypt_stat->key_size);
+               if (rc) {
+                       ecryptfs_printk(KERN_ERR,
+                                       "Error setting key; rc = [%d]\n",
+                                       rc);
+                       mutex_unlock(&crypt_stat->cs_tfm_mutex);
+                       rc = -EINVAL;
+                       goto out;
+               }
+               crypt_stat->flags |= ECRYPTFS_KEY_SET;
+       }
        mutex_unlock(&crypt_stat->cs_tfm_mutex);
+       ecryptfs_printk(KERN_DEBUG, "Encrypting [%d] bytes.\n", size);
+       ablkcipher_request_set_crypt(req, src_sg, dest_sg, size, iv);
+       rc = crypto_ablkcipher_encrypt(req);
+       if (rc == -EINPROGRESS || rc == -EBUSY) {
+               struct extent_crypt_result *ecr = req->base.data;
+
+               wait_for_completion(&ecr->completion);
+               rc = ecr->rc;
+               INIT_COMPLETION(ecr->completion);
+       }
 out:
+       ablkcipher_request_free(req);
        return rc;
 }
 
@@ -624,35 +660,61 @@ static int decrypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
                               struct scatterlist *src_sg, int size,
                               unsigned char *iv)
 {
-       struct blkcipher_desc desc = {
-               .tfm = crypt_stat->tfm,
-               .info = iv,
-               .flags = CRYPTO_TFM_REQ_MAY_SLEEP
-       };
+       struct ablkcipher_request *req = NULL;
+       struct extent_crypt_result ecr;
        int rc = 0;
 
-       /* Consider doing this once, when the file is opened */
+       BUG_ON(!crypt_stat || !crypt_stat->tfm
+              || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED));
+       if (unlikely(ecryptfs_verbosity > 0)) {
+               ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
+                               crypt_stat->key_size);
+               ecryptfs_dump_hex(crypt_stat->key,
+                                 crypt_stat->key_size);
+       }
+
+       init_completion(&ecr.completion);
+
        mutex_lock(&crypt_stat->cs_tfm_mutex);
-       rc = crypto_blkcipher_setkey(crypt_stat->tfm, crypt_stat->key,
-                                    crypt_stat->key_size);
-       if (rc) {
-               ecryptfs_printk(KERN_ERR, "Error setting key; rc = [%d]\n",
-                               rc);
+       req = ablkcipher_request_alloc(crypt_stat->tfm, GFP_NOFS);
+       if (!req) {
                mutex_unlock(&crypt_stat->cs_tfm_mutex);
-               rc = -EINVAL;
+               rc = -ENOMEM;
                goto out;
        }
-       ecryptfs_printk(KERN_DEBUG, "Decrypting [%d] bytes.\n", size);
-       rc = crypto_blkcipher_decrypt_iv(&desc, dest_sg, src_sg, size);
+
+       ablkcipher_request_set_callback(req,
+                       CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+                       extent_crypt_complete, &ecr);
+       /* Consider doing this once, when the file is opened */
+       if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) {
+               rc = crypto_ablkcipher_setkey(crypt_stat->tfm, crypt_stat->key,
+                                             crypt_stat->key_size);
+               if (rc) {
+                       ecryptfs_printk(KERN_ERR,
+                                       "Error setting key; rc = [%d]\n",
+                                       rc);
+                       mutex_unlock(&crypt_stat->cs_tfm_mutex);
+                       rc = -EINVAL;
+                       goto out;
+               }
+               crypt_stat->flags |= ECRYPTFS_KEY_SET;
+       }
        mutex_unlock(&crypt_stat->cs_tfm_mutex);
-       if (rc) {
-               ecryptfs_printk(KERN_ERR, "Error decrypting; rc = [%d]\n",
-                               rc);
-               goto out;
+       ecryptfs_printk(KERN_DEBUG, "Decrypting [%d] bytes.\n", size);
+       ablkcipher_request_set_crypt(req, src_sg, dest_sg, size, iv);
+       rc = crypto_ablkcipher_decrypt(req);
+       if (rc == -EINPROGRESS || rc == -EBUSY) {
+               struct extent_crypt_result *ecr = req->base.data;
+
+               wait_for_completion(&ecr->completion);
+               rc = ecr->rc;
+               INIT_COMPLETION(ecr->completion);
        }
-       rc = size;
 out:
+       ablkcipher_request_free(req);
        return rc;
+
 }
 
 /**
@@ -746,8 +808,7 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat)
                                                    crypt_stat->cipher, "cbc");
        if (rc)
                goto out_unlock;
-       crypt_stat->tfm = crypto_alloc_blkcipher(full_alg_name, 0,
-                                                CRYPTO_ALG_ASYNC);
+       crypt_stat->tfm = crypto_alloc_ablkcipher(full_alg_name, 0, 0);
        kfree(full_alg_name);
        if (IS_ERR(crypt_stat->tfm)) {
                rc = PTR_ERR(crypt_stat->tfm);
@@ -757,7 +818,7 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat)
                                crypt_stat->cipher);
                goto out_unlock;
        }
-       crypto_blkcipher_set_flags(crypt_stat->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+       crypto_ablkcipher_set_flags(crypt_stat->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
        rc = 0;
 out_unlock:
        mutex_unlock(&crypt_stat->cs_tfm_mutex);
index dd299b389d4e4fc36b7694dbf6acac0dcf9742f7..f622a733f7adc3ff1778e4f74790db507c61c824 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/nsproxy.h>
 #include <linux/backing-dev.h>
 #include <linux/ecryptfs.h>
+#include <linux/crypto.h>
 
 #define ECRYPTFS_DEFAULT_IV_BYTES 16
 #define ECRYPTFS_DEFAULT_EXTENT_SIZE 4096
@@ -233,7 +234,7 @@ struct ecryptfs_crypt_stat {
        size_t extent_shift;
        unsigned int extent_mask;
        struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
-       struct crypto_blkcipher *tfm;
+       struct crypto_ablkcipher *tfm;
        struct crypto_hash *hash_tfm; /* Crypto context for generating
                                       * the initialization vectors */
        unsigned char cipher[ECRYPTFS_MAX_CIPHER_NAME_SIZE];
index 57ae9c8c66bfc6d98ae96643874710cdc3de321a..85e40d1c0a8fd64b358447ee09b26f906102ee1d 100644 (file)
@@ -2740,7 +2740,7 @@ static int do_last(struct nameidata *nd, struct path *path,
                if (error)
                        return error;
 
-               audit_inode(name, dir, 0);
+               audit_inode(name, dir, LOOKUP_PARENT);
                error = -EISDIR;
                /* trailing slashes? */
                if (nd->last.name[nd->last.len])
index 8ae5abfe6ba24fd699aed2e156a2b2f815695c04..27d74a2945151cfbdf76e41cd381b90a0efe4d09 100644 (file)
@@ -279,6 +279,7 @@ do_open_fhandle(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, str
 {
        struct svc_fh *current_fh = &cstate->current_fh;
        __be32 status;
+       int accmode = 0;
 
        /* We don't know the target directory, and therefore can not
        * set the change info
@@ -290,9 +291,19 @@ do_open_fhandle(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, str
 
        open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) &&
                (open->op_iattr.ia_size == 0);
+       /*
+        * In the delegation case, the client is telling us about an
+        * open that it *already* performed locally, some time ago.  We
+        * should let it succeed now if possible.
+        *
+        * In the case of a CLAIM_FH open, on the other hand, the client
+        * may be counting on us to enforce permissions (the Linux 4.1
+        * client uses this for normal opens, for example).
+        */
+       if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH)
+               accmode = NFSD_MAY_OWNER_OVERRIDE;
 
-       status = do_open_permission(rqstp, current_fh, open,
-                                   NFSD_MAY_OWNER_OVERRIDE);
+       status = do_open_permission(rqstp, current_fh, open, accmode);
 
        return status;
 }
index 899ca26dd194d73f43234ba08447f1533428eff6..4e9a21db867ae60afcc14a6ca362e978495c4a5c 100644 (file)
@@ -146,7 +146,7 @@ out_no_tfm:
  * then disable recovery tracking.
  */
 static void
-legacy_recdir_name_error(int error)
+legacy_recdir_name_error(struct nfs4_client *clp, int error)
 {
        printk(KERN_ERR "NFSD: unable to generate recoverydir "
                        "name (%d).\n", error);
@@ -159,9 +159,7 @@ legacy_recdir_name_error(int error)
        if (error == -ENOENT) {
                printk(KERN_ERR "NFSD: disabling legacy clientid tracking. "
                        "Reboot recovery will not function correctly!\n");
-
-               /* the argument is ignored by the legacy exit function */
-               nfsd4_client_tracking_exit(NULL);
+               nfsd4_client_tracking_exit(clp->net);
        }
 }
 
@@ -184,7 +182,7 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
 
        status = nfs4_make_rec_clidname(dname, &clp->cl_name);
        if (status)
-               return legacy_recdir_name_error(status);
+               return legacy_recdir_name_error(clp, status);
 
        status = nfs4_save_creds(&original_cred);
        if (status < 0)
@@ -341,7 +339,7 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
 
        status = nfs4_make_rec_clidname(dname, &clp->cl_name);
        if (status)
-               return legacy_recdir_name_error(status);
+               return legacy_recdir_name_error(clp, status);
 
        status = mnt_want_write_file(nn->rec_file);
        if (status)
@@ -601,7 +599,7 @@ nfsd4_check_legacy_client(struct nfs4_client *clp)
 
        status = nfs4_make_rec_clidname(dname, &clp->cl_name);
        if (status) {
-               legacy_recdir_name_error(status);
+               legacy_recdir_name_error(clp, status);
                return status;
        }
 
index d0be29fa94cffba4e11355b20a270a11e608adef..6c80083a984fc192ebc73bad2a2edc86b4c25e11 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/uaccess.h>
+#include <linux/compat.h>
 
 #include <asm/ioctls.h>
 
@@ -857,6 +858,22 @@ fput_and_out:
        return ret;
 }
 
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE6(fanotify_mark,
+                               int, fanotify_fd, unsigned int, flags,
+                               __u32, mask0, __u32, mask1, int, dfd,
+                               const char  __user *, pathname)
+{
+       return sys_fanotify_mark(fanotify_fd, flags,
+#ifdef __BIG_ENDIAN
+                               ((__u64)mask1 << 32) | mask0,
+#else
+                               ((__u64)mask0 << 32) | mask1,
+#endif
+                                dfd, pathname);
+}
+#endif
+
 /*
  * fanotify_user_setup - Our initialization function.  Note that we cannot return
  * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
index e1a7779dd3cb18e72276569f8d2b0ce77e53a42c..f373bde8f545da481ba0a7caa873271dd599b30d 100644 (file)
@@ -49,8 +49,11 @@ static unsigned long romfs_get_unmapped_area(struct file *file,
                return (unsigned long) -EINVAL;
 
        offset += ROMFS_I(inode)->i_dataoffset;
-       if (offset > mtd->size - len)
+       if (offset >= mtd->size)
                return (unsigned long) -EINVAL;
+       /* the mapping mustn't extend beyond the EOF */
+       if ((offset + len) > mtd->size)
+               len = mtd->size - offset;
 
        ret = mtd_get_unmapped_area(mtd, len, offset, flags);
        if (ret == -EOPNOTSUPP)
index 61196592152e09a3913d1989cd847a8f9457664b..63d17ee9eb488c336ad55521f346dc1c2703c4c4 100644 (file)
@@ -316,6 +316,7 @@ struct drm_ioctl_desc {
        int flags;
        drm_ioctl_t *func;
        unsigned int cmd_drv;
+       const char *name;
 };
 
 /**
@@ -324,7 +325,7 @@ struct drm_ioctl_desc {
  */
 
 #define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags)                        \
-       [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl}
+       [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl}
 
 struct drm_magic_entry {
        struct list_head head;
index 8230b46fdd73ff916324a9d4d0dd064a13207502..471f276ce8f741638ad6ced787aa7f943457e751 100644 (file)
@@ -50,13 +50,14 @@ struct drm_fb_helper_surface_size {
 
 /**
  * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library
- * @gamma_set: - Set the given gamma lut register on the given crtc.
- * @gamma_get: - Read the given gamma lut register on the given crtc, used to
- *              save the current lut when force-restoring the fbdev for e.g.
- *              kdbg.
- * @fb_probe: - Driver callback to allocate and initialize the fbdev info
- *             structure. Futhermore it also needs to allocate the drm
- *             framebuffer used to back the fbdev.
+ * @gamma_set: Set the given gamma lut register on the given crtc.
+ * @gamma_get: Read the given gamma lut register on the given crtc, used to
+ *             save the current lut when force-restoring the fbdev for e.g.
+ *             kdbg.
+ * @fb_probe: Driver callback to allocate and initialize the fbdev info
+ *            structure. Futhermore it also needs to allocate the drm
+ *            framebuffer used to back the fbdev.
+ * @initial_config: Setup an initial fbdev display configuration
  *
  * Driver callbacks used by the fbdev emulation helper library.
  */
index 5a6d718adf34825eb11bc4dd20cbcb8f1dced0ad..b20b03852f21dd722e405c4685f2c9851588ed98 100644 (file)
@@ -84,8 +84,13 @@ extern int audit_classify_arch(int arch);
 #define        AUDIT_TYPE_CHILD_DELETE 3       /* a child being deleted */
 #define        AUDIT_TYPE_CHILD_CREATE 4       /* a child being created */
 
+/* maximized args number that audit_socketcall can process */
+#define AUDITSC_ARGS           6
+
 struct filename;
 
+extern void audit_log_session_info(struct audit_buffer *ab);
+
 #ifdef CONFIG_AUDITSYSCALL
 /* These are defined in auditsc.c */
                                /* Public API */
@@ -120,7 +125,7 @@ static inline void audit_syscall_entry(int arch, int major, unsigned long a0,
                                       unsigned long a1, unsigned long a2,
                                       unsigned long a3)
 {
-       if (unlikely(!audit_dummy_context()))
+       if (unlikely(current->audit_context))
                __audit_syscall_entry(arch, major, a0, a1, a2, a3);
 }
 static inline void audit_syscall_exit(void *pt_regs)
@@ -185,12 +190,10 @@ static inline int audit_get_sessionid(struct task_struct *tsk)
        return tsk->sessionid;
 }
 
-extern void audit_log_task_context(struct audit_buffer *ab);
-extern void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk);
 extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
 extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
 extern int __audit_bprm(struct linux_binprm *bprm);
-extern void __audit_socketcall(int nargs, unsigned long *args);
+extern int __audit_socketcall(int nargs, unsigned long *args);
 extern int __audit_sockaddr(int len, void *addr);
 extern void __audit_fd_pair(int fd1, int fd2);
 extern void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr);
@@ -224,10 +227,11 @@ static inline int audit_bprm(struct linux_binprm *bprm)
                return __audit_bprm(bprm);
        return 0;
 }
-static inline void audit_socketcall(int nargs, unsigned long *args)
+static inline int audit_socketcall(int nargs, unsigned long *args)
 {
        if (unlikely(!audit_dummy_context()))
-               __audit_socketcall(nargs, args);
+               return __audit_socketcall(nargs, args);
+       return 0;
 }
 static inline int audit_sockaddr(int len, void *addr)
 {
@@ -340,11 +344,6 @@ static inline int audit_get_sessionid(struct task_struct *tsk)
 {
        return -1;
 }
-static inline void audit_log_task_context(struct audit_buffer *ab)
-{ }
-static inline void audit_log_task_info(struct audit_buffer *ab,
-                                      struct task_struct *tsk)
-{ }
 static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
 { }
 static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid,
@@ -354,8 +353,10 @@ static inline int audit_bprm(struct linux_binprm *bprm)
 {
        return 0;
 }
-static inline void audit_socketcall(int nargs, unsigned long *args)
-{ }
+static inline int audit_socketcall(int nargs, unsigned long *args)
+{
+       return 0;
+}
 static inline void audit_fd_pair(int fd1, int fd2)
 { }
 static inline int audit_sockaddr(int len, void *addr)
@@ -390,6 +391,11 @@ static inline void audit_ptrace(struct task_struct *t)
 #define audit_signals 0
 #endif /* CONFIG_AUDITSYSCALL */
 
+static inline bool audit_loginuid_set(struct task_struct *tsk)
+{
+       return uid_valid(audit_get_loginuid(tsk));
+}
+
 #ifdef CONFIG_AUDIT
 /* These are defined in audit.c */
                                /* Public API */
@@ -429,14 +435,17 @@ static inline void            audit_log_secctx(struct audit_buffer *ab, u32 secid)
 { }
 #endif
 
+extern int audit_log_task_context(struct audit_buffer *ab);
+extern void audit_log_task_info(struct audit_buffer *ab,
+                               struct task_struct *tsk);
+
 extern int                 audit_update_lsm_rules(void);
 
                                /* Private API (for audit.c only) */
-extern int audit_filter_user(void);
+extern int audit_filter_user(int type);
 extern int audit_filter_type(int type);
 extern int  audit_receive_filter(int type, int pid, int seq,
-                               void *data, size_t datasz, kuid_t loginuid,
-                               u32 sessionid, u32 sid);
+                               void *data, size_t datasz);
 extern int audit_enabled;
 #else /* CONFIG_AUDIT */
 static inline __printf(4, 5)
@@ -476,6 +485,13 @@ static inline void audit_log_link_denied(const char *string,
 { }
 static inline void audit_log_secctx(struct audit_buffer *ab, u32 secid)
 { }
+static inline int audit_log_task_context(struct audit_buffer *ab)
+{
+       return 0;
+}
+static inline void audit_log_task_info(struct audit_buffer *ab,
+                                      struct task_struct *tsk)
+{ }
 #define audit_enabled 0
 #endif /* CONFIG_AUDIT */
 static inline void audit_log_string(struct audit_buffer *ab, const char *buf)
index d53c35352ea9d14aa4a9c974fa559ab95f88e187..7f0c1dd0907904a831ab0f65fdcbb0d67a068b50 100644 (file)
@@ -673,6 +673,8 @@ int __compat_save_altstack(compat_stack_t __user *, unsigned long);
 asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
                                                 struct compat_timespec __user *interval);
 
+asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
+                                           int, const char __user *);
 #else
 
 #define is_compat_task() (0)
index 3c86faa597980fbad7e17d65d7668d1da5d40e60..8f0406230a0a4890b4247e60215795c11e0a30d3 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/completion.h>
 #include <linux/hrtimer.h>
 
-#define CPUIDLE_STATE_MAX      8
+#define CPUIDLE_STATE_MAX      10
 #define CPUIDLE_NAME_LEN       16
 #define CPUIDLE_DESC_LEN       32
 
index 1e483fa7afb41b681ba802061d4ae5ab4ad4676a..3cd32478f2fd095817c7fb0b9f6558d336020bde 100644 (file)
@@ -79,11 +79,26 @@ typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
 typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
                            struct bio_vec *biovec, int max_size);
 
+/*
+ * These iteration functions are typically used to check (and combine)
+ * properties of underlying devices.
+ * E.g. Does at least one underlying device support flush?
+ *      Does any underlying device not support WRITE_SAME?
+ *
+ * The callout function is called once for each contiguous section of
+ * an underlying device.  State can be maintained in *data.
+ * Return non-zero to stop iterating through any further devices.
+ */
 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
                                           struct dm_dev *dev,
                                           sector_t start, sector_t len,
                                           void *data);
 
+/*
+ * This function must iterate through each section of device used by the
+ * target until it encounters a non-zero return code, which it then returns.
+ * Returns zero if no callout returned non-zero.
+ */
 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
                                      iterate_devices_callout_fn fn,
                                      void *data);
index f83e17a40e8b848185b649836274b57acdc332c1..99d0fbcbaf79eff04c8ab56531e02aa46e953977 100644 (file)
@@ -90,6 +90,8 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
  *            not set this, then the ftrace infrastructure will add recursion
  *            protection for the caller.
  * STUB   - The ftrace_ops is just a place holder.
+ * INITIALIZED - The ftrace_ops has already been initialized (first use time
+ *            register_ftrace_function() is called, it will initialized the ops)
  */
 enum {
        FTRACE_OPS_FL_ENABLED                   = 1 << 0,
@@ -100,6 +102,7 @@ enum {
        FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED    = 1 << 5,
        FTRACE_OPS_FL_RECURSION_SAFE            = 1 << 6,
        FTRACE_OPS_FL_STUB                      = 1 << 7,
+       FTRACE_OPS_FL_INITIALIZED               = 1 << 8,
 };
 
 struct ftrace_ops {
@@ -110,6 +113,7 @@ struct ftrace_ops {
 #ifdef CONFIG_DYNAMIC_FTRACE
        struct ftrace_hash              *notrace_hash;
        struct ftrace_hash              *filter_hash;
+       struct mutex                    regex_lock;
 #endif
 };
 
index 34e00fb49becf506865c18ffafefe23c413ea5c4..4372658c73ae5eddd3dd6570d4dfac8c5d8825c8 100644 (file)
@@ -293,6 +293,7 @@ struct ftrace_event_file {
         * caching and such. Which is mostly OK ;-)
         */
        unsigned long           flags;
+       atomic_t                sm_ref; /* soft-mode reference counter */
 };
 
 #define __TRACE_EVENT_FLAGS(name, value)                               \
index af1b86d46f6e714e4feb5aeab303fb76f47a1234..0c48991b0402d0d93110b8a4fa9cd42d615279c2 100644 (file)
@@ -515,7 +515,7 @@ struct hid_device {                                                 /* device report descriptor */
        struct dentry *debug_rdesc;
        struct dentry *debug_events;
        struct list_head debug_list;
-       struct mutex debug_list_lock;
+       spinlock_t  debug_list_lock;
        wait_queue_head_t debug_wait;
 };
 
index 2b85c521f7375452ed4c0fd54d7cba37b60f4dde..c12916248469f577795d28e96c4f86be747fbf5b 100644 (file)
 #define PCI_DEVICE_ID_TIGON3_5705M_2   0x165e
 #define PCI_DEVICE_ID_NX2_57712                0x1662
 #define PCI_DEVICE_ID_NX2_57712E       0x1663
+#define PCI_DEVICE_ID_NX2_57712_MF     0x1663
 #define PCI_DEVICE_ID_TIGON3_5714      0x1668
 #define PCI_DEVICE_ID_TIGON3_5714S     0x1669
 #define PCI_DEVICE_ID_TIGON3_5780      0x166a
 #define PCI_DEVICE_ID_TIGON3_5780S     0x166b
 #define PCI_DEVICE_ID_TIGON3_5705F     0x166e
+#define PCI_DEVICE_ID_NX2_57712_VF     0x166f
 #define PCI_DEVICE_ID_TIGON3_5754M     0x1672
 #define PCI_DEVICE_ID_TIGON3_5755M     0x1673
 #define PCI_DEVICE_ID_TIGON3_5756      0x1674
 #define PCI_DEVICE_ID_TIGON3_5787      0x169b
 #define PCI_DEVICE_ID_TIGON3_5788      0x169c
 #define PCI_DEVICE_ID_TIGON3_5789      0x169d
+#define PCI_DEVICE_ID_NX2_57840_4_10   0x16a1
+#define PCI_DEVICE_ID_NX2_57840_2_20   0x16a2
+#define PCI_DEVICE_ID_NX2_57840_MF     0x16a4
 #define PCI_DEVICE_ID_NX2_57800_MF     0x16a5
 #define PCI_DEVICE_ID_TIGON3_5702X     0x16a6
 #define PCI_DEVICE_ID_TIGON3_5703X     0x16a7
 #define PCI_DEVICE_ID_TIGON3_5704S     0x16a8
 #define PCI_DEVICE_ID_NX2_57800_VF     0x16a9
 #define PCI_DEVICE_ID_NX2_5706S                0x16aa
-#define PCI_DEVICE_ID_NX2_57840_MF     0x16a4
 #define PCI_DEVICE_ID_NX2_5708S                0x16ac
 #define PCI_DEVICE_ID_NX2_57840_VF     0x16ad
 #define PCI_DEVICE_ID_NX2_57810_MF     0x16ae
index caa8f4d0186b742c3effc99c296ac99724f2f4d1..178a8d909f14a3dcdcbc0ce255572975c8b3b221 100644 (file)
@@ -593,6 +593,7 @@ struct signal_struct {
 #endif
 #ifdef CONFIG_AUDIT
        unsigned audit_tty;
+       unsigned audit_tty_log_passwd;
        struct tty_audit_buf *tty_audit_buf;
 #endif
 #ifdef CONFIG_CGROUPS
index 733eb5ee31c5446cbe6a11bb0b3603a6534edf49..6ff26c8db7b923853527cee3ee1ffff1ca55bdd6 100644 (file)
@@ -57,7 +57,7 @@ extern struct bus_type spi_bus_type;
  * @modalias: Name of the driver to use with this device, or an alias
  *     for that name.  This appears in the sysfs "modalias" attribute
  *     for driver coldplugging, and in uevents used for hotplugging
- * @cs_gpio: gpio number of the chipselect line (optional, -EINVAL when
+ * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when
  *     when not using a GPIO line)
  *
  * A @spi_device is used to interchange data between an SPI slave
@@ -266,7 +266,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
  *     queue so the subsystem notifies the driver that it may relax the
  *     hardware by issuing this call
  * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
- *     number. Any individual value may be -EINVAL for CS lines that
+ *     number. Any individual value may be -ENOENT for CS lines that
  *     are not GPIOs (driven by the SPI controller itself).
  *
  * Each SPI master controller can communicate with one or more @spi_device
index 7e92bd86a808cfbdf31bf38f1165ab30fe93c2ff..8780bd2a272ab6672c8f6c32340b7d1ab2831acb 100644 (file)
@@ -575,8 +575,7 @@ extern void tty_audit_exit(void);
 extern void tty_audit_fork(struct signal_struct *sig);
 extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
 extern void tty_audit_push(struct tty_struct *tty);
-extern int tty_audit_push_task(struct task_struct *tsk,
-                              kuid_t loginuid, u32 sessionid);
+extern int tty_audit_push_current(void);
 #else
 static inline void tty_audit_add_data(struct tty_struct *tty,
                unsigned char *data, size_t size, unsigned icanon)
@@ -594,8 +593,7 @@ static inline void tty_audit_fork(struct signal_struct *sig)
 static inline void tty_audit_push(struct tty_struct *tty)
 {
 }
-static inline int tty_audit_push_task(struct task_struct *tsk,
-                                     kuid_t loginuid, u32 sessionid)
+static inline int tty_audit_push_current(void)
 {
        return 0;
 }
index ef937b56f9b54c44e4003d762eaa7105534077da..e2c1e66d58ae6c810e4ca18d4e62d9f78b2c3e4b 100644 (file)
@@ -118,7 +118,7 @@ struct ex_phy {
 
        enum ex_phy_state phy_state;
 
-       enum sas_dev_type attached_dev_type;
+       enum sas_device_type attached_dev_type;
        enum sas_linkrate linkrate;
 
        u8   attached_sata_host:1;
@@ -195,7 +195,7 @@ enum {
 
 struct domain_device {
        spinlock_t done_lock;
-        enum sas_dev_type dev_type;
+       enum sas_device_type dev_type;
 
         enum sas_linkrate linkrate;
         enum sas_linkrate min_linkrate;
index a6026da25f3e0bf5787d479e1511da85c1175299..25ac6283b9c753a6af74055d590a1ed613004be1 100644 (file)
@@ -107,7 +107,7 @@ enum osd_attributes_mode {
  *             int exponent: 04;
  *     }
  */
-typedef __be32 __bitwise osd_cdb_offset;
+typedef __be32 osd_cdb_offset;
 
 enum {
        OSD_OFFSET_UNUSED = 0xFFFFFFFF,
index be3eb0bf1ac0ef1213343a173787c645d16e21ef..0d2607d1238753f951e43e6b278735971413423c 100644 (file)
@@ -90,16 +90,18 @@ enum sas_oob_mode {
 };
 
 /* See sas_discover.c if you plan on changing these */
-enum sas_dev_type {
-       NO_DEVICE   = 0,          /* protocol */
-       SAS_END_DEV = 1,          /* protocol */
-       EDGE_DEV    = 2,          /* protocol */
-       FANOUT_DEV  = 3,          /* protocol */
-       SAS_HA      = 4,
-       SATA_DEV    = 5,
-       SATA_PM     = 7,
-       SATA_PM_PORT= 8,
-       SATA_PENDING  = 9,
+enum sas_device_type {
+       /* these are SAS protocol defined (attached device type field) */
+       SAS_PHY_UNUSED = 0,
+       SAS_END_DEVICE = 1,
+       SAS_EDGE_EXPANDER_DEVICE = 2,
+       SAS_FANOUT_EXPANDER_DEVICE = 3,
+       /* these are internal to libsas */
+       SAS_HA = 4,
+       SAS_SATA_DEV = 5,
+       SAS_SATA_PM = 7,
+       SAS_SATA_PM_PORT = 8,
+       SAS_SATA_PENDING = 9,
 };
 
 enum sas_protocol {
index ff71a56546845f2e29266ecf466ecd6d7174f38a..00f41aeeecf54f321b74599b99dc5f97a5af0bca 100644 (file)
@@ -32,8 +32,8 @@
 
 static inline int dev_is_sata(struct domain_device *dev)
 {
-       return dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM ||
-              dev->dev_type == SATA_PM_PORT || dev->dev_type == SATA_PENDING;
+       return dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM ||
+              dev->dev_type == SAS_SATA_PM_PORT || dev->dev_type == SAS_SATA_PENDING;
 }
 
 int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy);
index a7f9cba275e98405b7da60143361b87cc4fcc231..cc645876d14737db54ea12aeb9959b7a00dfe9c2 100644 (file)
@@ -394,10 +394,18 @@ extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
                        int data_direction, void *buffer, unsigned bufflen,
                        unsigned char *sense, int timeout, int retries,
                        int flag, int *resid);
-extern int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
-                           int data_direction, void *buffer, unsigned bufflen,
-                           struct scsi_sense_hdr *, int timeout, int retries,
-                           int *resid);
+extern int scsi_execute_req_flags(struct scsi_device *sdev,
+       const unsigned char *cmd, int data_direction, void *buffer,
+       unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
+       int retries, int *resid, int flags);
+static inline int scsi_execute_req(struct scsi_device *sdev,
+       const unsigned char *cmd, int data_direction, void *buffer,
+       unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
+       int retries, int *resid)
+{
+       return scsi_execute_req_flags(sdev, cmd, data_direction, buffer,
+               bufflen, sshdr, timeout, retries, resid, 0);
+}
 extern void sdev_disable_disk_events(struct scsi_device *sdev);
 extern void sdev_enable_disk_events(struct scsi_device *sdev);
 
index 4a58cca2ecc182f1d44912297cc294b3ecce4760..d0f1602985e7efe6d6ab40dd67fbcc04e6a32276 100644 (file)
@@ -471,14 +471,10 @@ iscsi_destroy_flashnode_sess(struct iscsi_bus_flash_session *fnode_sess);
 extern void iscsi_destroy_all_flashnode(struct Scsi_Host *shost);
 extern int iscsi_flashnode_bus_match(struct device *dev,
                                     struct device_driver *drv);
-extern int iscsi_is_flashnode_conn_dev(struct device *dev, void *data);
-
 extern struct device *
 iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
                          int (*fn)(struct device *dev, void *data));
-
 extern struct device *
-iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess,
-                         void *data,
-                         int (*fn)(struct device *dev, void *data));
+iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess);
+
 #endif
index 9b8e08879cfc9d7f7038245cf12e9b162f0f7f4e..0bd71e2702e3181e115ba8aa8e2b49a78ac21da7 100644 (file)
@@ -10,13 +10,6 @@ struct scsi_transport_template;
 struct sas_rphy;
 struct request;
 
-enum sas_device_type {
-       SAS_PHY_UNUSED = 0,
-       SAS_END_DEVICE = 1,
-       SAS_EDGE_EXPANDER_DEVICE = 2,
-       SAS_FANOUT_EXPANDER_DEVICE = 3,
-};
-
 static inline int sas_protocol_ata(enum sas_protocol proto)
 {
        return ((proto & SAS_PROTOCOL_SATA) ||
index 28c65e1ada21e8f632422055d70d66789d63a3d5..e11e179420a11fb07bce24e48e960bfdba9671a4 100644 (file)
 #define DECLARE_TLV_DB_LINEAR(name, min_dB, max_dB)    \
        unsigned int name[] = { TLV_DB_LINEAR_ITEM(min_dB, max_dB) }
 
-/* dB range container */
+/* dB range container:
+ * Items in dB range container must be ordered by their values and by their
+ * dB values. This implies that larger values must correspond with larger
+ * dB values (which is also required for all other mixer controls).
+ */
 /* Each item is: <min> <max> <TLV> */
 #define TLV_DB_RANGE_ITEM(...) \
        TLV_ITEM(SNDRV_CTL_TLVT_DB_RANGE, __VA_ARGS__)
index 9f096f1c0907b7056bcbe396783fdb2237724a65..75cef3fd97add201693b6bf21f4e2f1754c96f43 100644 (file)
 #define AUDIT_OBJ_TYPE 21
 #define AUDIT_OBJ_LEV_LOW      22
 #define AUDIT_OBJ_LEV_HIGH     23
+#define AUDIT_LOGINUID_SET     24
 
                                /* These are ONLY useful when checking
                                 * at syscall exit time (AUDIT_AT_EXIT). */
@@ -369,7 +370,8 @@ struct audit_status {
 };
 
 struct audit_tty_status {
-       __u32           enabled; /* 1 = enabled, 0 = disabled */
+       __u32           enabled;        /* 1 = enabled, 0 = disabled */
+       __u32           log_passwd;     /* 1 = enabled, 0 = disabled */
 };
 
 /* audit_rule_data supports filter rules with both integer and string
index 0b084fa44b1f21f3cc73508676e79d58b08f1896..21c7fa615bd3107b0c28a4da499ea3ee7361d695 100644 (file)
@@ -49,6 +49,8 @@
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/syscalls.h>
 
 #include <linux/audit.h>
 
@@ -265,7 +267,6 @@ void audit_log_lost(const char *message)
 }
 
 static int audit_log_config_change(char *function_name, int new, int old,
-                                  kuid_t loginuid, u32 sessionid, u32 sid,
                                   int allow_changes)
 {
        struct audit_buffer *ab;
@@ -274,29 +275,17 @@ static int audit_log_config_change(char *function_name, int new, int old,
        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
        if (unlikely(!ab))
                return rc;
-       audit_log_format(ab, "%s=%d old=%d auid=%u ses=%u", function_name, new,
-                        old, from_kuid(&init_user_ns, loginuid), sessionid);
-       if (sid) {
-               char *ctx = NULL;
-               u32 len;
-
-               rc = security_secid_to_secctx(sid, &ctx, &len);
-               if (rc) {
-                       audit_log_format(ab, " sid=%u", sid);
-                       allow_changes = 0; /* Something weird, deny request */
-               } else {
-                       audit_log_format(ab, " subj=%s", ctx);
-                       security_release_secctx(ctx, len);
-               }
-       }
+       audit_log_format(ab, "%s=%d old=%d", function_name, new, old);
+       audit_log_session_info(ab);
+       rc = audit_log_task_context(ab);
+       if (rc)
+               allow_changes = 0; /* Something weird, deny request */
        audit_log_format(ab, " res=%d", allow_changes);
        audit_log_end(ab);
        return rc;
 }
 
-static int audit_do_config_change(char *function_name, int *to_change,
-                                 int new, kuid_t loginuid, u32 sessionid,
-                                 u32 sid)
+static int audit_do_config_change(char *function_name, int *to_change, int new)
 {
        int allow_changes, rc = 0, old = *to_change;
 
@@ -307,8 +296,7 @@ static int audit_do_config_change(char *function_name, int *to_change,
                allow_changes = 1;
 
        if (audit_enabled != AUDIT_OFF) {
-               rc = audit_log_config_change(function_name, new, old, loginuid,
-                                            sessionid, sid, allow_changes);
+               rc = audit_log_config_change(function_name, new, old, allow_changes);
                if (rc)
                        allow_changes = 0;
        }
@@ -322,44 +310,37 @@ static int audit_do_config_change(char *function_name, int *to_change,
        return rc;
 }
 
-static int audit_set_rate_limit(int limit, kuid_t loginuid, u32 sessionid,
-                               u32 sid)
+static int audit_set_rate_limit(int limit)
 {
-       return audit_do_config_change("audit_rate_limit", &audit_rate_limit,
-                                     limit, loginuid, sessionid, sid);
+       return audit_do_config_change("audit_rate_limit", &audit_rate_limit, limit);
 }
 
-static int audit_set_backlog_limit(int limit, kuid_t loginuid, u32 sessionid,
-                                  u32 sid)
+static int audit_set_backlog_limit(int limit)
 {
-       return audit_do_config_change("audit_backlog_limit", &audit_backlog_limit,
-                                     limit, loginuid, sessionid, sid);
+       return audit_do_config_change("audit_backlog_limit", &audit_backlog_limit, limit);
 }
 
-static int audit_set_enabled(int state, kuid_t loginuid, u32 sessionid, u32 sid)
+static int audit_set_enabled(int state)
 {
        int rc;
        if (state < AUDIT_OFF || state > AUDIT_LOCKED)
                return -EINVAL;
 
-       rc =  audit_do_config_change("audit_enabled", &audit_enabled, state,
-                                    loginuid, sessionid, sid);
-
+       rc =  audit_do_config_change("audit_enabled", &audit_enabled, state);
        if (!rc)
                audit_ever_enabled |= !!state;
 
        return rc;
 }
 
-static int audit_set_failure(int state, kuid_t loginuid, u32 sessionid, u32 sid)
+static int audit_set_failure(int state)
 {
        if (state != AUDIT_FAIL_SILENT
            && state != AUDIT_FAIL_PRINTK
            && state != AUDIT_FAIL_PANIC)
                return -EINVAL;
 
-       return audit_do_config_change("audit_failure", &audit_failure, state,
-                                     loginuid, sessionid, sid);
+       return audit_do_config_change("audit_failure", &audit_failure, state);
 }
 
 /*
@@ -417,34 +398,53 @@ static void kauditd_send_skb(struct sk_buff *skb)
                consume_skb(skb);
 }
 
-static int kauditd_thread(void *dummy)
+/*
+ * flush_hold_queue - empty the hold queue if auditd appears
+ *
+ * If auditd just started, drain the queue of messages already
+ * sent to syslog/printk.  Remember loss here is ok.  We already
+ * called audit_log_lost() if it didn't go out normally.  so the
+ * race between the skb_dequeue and the next check for audit_pid
+ * doesn't matter.
+ *
+ * If you ever find kauditd to be too slow we can get a perf win
+ * by doing our own locking and keeping better track if there
+ * are messages in this queue.  I don't see the need now, but
+ * in 5 years when I want to play with this again I'll see this
+ * note and still have no friggin idea what i'm thinking today.
+ */
+static void flush_hold_queue(void)
 {
        struct sk_buff *skb;
 
+       if (!audit_default || !audit_pid)
+               return;
+
+       skb = skb_dequeue(&audit_skb_hold_queue);
+       if (likely(!skb))
+               return;
+
+       while (skb && audit_pid) {
+               kauditd_send_skb(skb);
+               skb = skb_dequeue(&audit_skb_hold_queue);
+       }
+
+       /*
+        * if auditd just disappeared but we
+        * dequeued an skb we need to drop ref
+        */
+       if (skb)
+               consume_skb(skb);
+}
+
+static int kauditd_thread(void *dummy)
+{
        set_freezable();
        while (!kthread_should_stop()) {
-               /*
-                * if auditd just started drain the queue of messages already
-                * sent to syslog/printk.  remember loss here is ok.  we already
-                * called audit_log_lost() if it didn't go out normally.  so the
-                * race between the skb_dequeue and the next check for audit_pid
-                * doesn't matter.
-                *
-                * if you ever find kauditd to be too slow we can get a perf win
-                * by doing our own locking and keeping better track if there
-                * are messages in this queue.  I don't see the need now, but
-                * in 5 years when I want to play with this again I'll see this
-                * note and still have no friggin idea what i'm thinking today.
-                */
-               if (audit_default && audit_pid) {
-                       skb = skb_dequeue(&audit_skb_hold_queue);
-                       if (unlikely(skb)) {
-                               while (skb && audit_pid) {
-                                       kauditd_send_skb(skb);
-                                       skb = skb_dequeue(&audit_skb_hold_queue);
-                               }
-                       }
-               }
+               struct sk_buff *skb;
+               DECLARE_WAITQUEUE(wait, current);
+
+               flush_hold_queue();
 
                skb = skb_dequeue(&audit_skb_queue);
                wake_up(&audit_backlog_wait);
@@ -453,19 +453,18 @@ static int kauditd_thread(void *dummy)
                                kauditd_send_skb(skb);
                        else
                                audit_printk_skb(skb);
-               } else {
-                       DECLARE_WAITQUEUE(wait, current);
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       add_wait_queue(&kauditd_wait, &wait);
-
-                       if (!skb_queue_len(&audit_skb_queue)) {
-                               try_to_freeze();
-                               schedule();
-                       }
+                       continue;
+               }
+               set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue(&kauditd_wait, &wait);
 
-                       __set_current_state(TASK_RUNNING);
-                       remove_wait_queue(&kauditd_wait, &wait);
+               if (!skb_queue_len(&audit_skb_queue)) {
+                       try_to_freeze();
+                       schedule();
                }
+
+               __set_current_state(TASK_RUNNING);
+               remove_wait_queue(&kauditd_wait, &wait);
        }
        return 0;
 }
@@ -579,13 +578,14 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
                return -EPERM;
 
        switch (msg_type) {
-       case AUDIT_GET:
        case AUDIT_LIST:
-       case AUDIT_LIST_RULES:
-       case AUDIT_SET:
        case AUDIT_ADD:
-       case AUDIT_ADD_RULE:
        case AUDIT_DEL:
+               return -EOPNOTSUPP;
+       case AUDIT_GET:
+       case AUDIT_SET:
+       case AUDIT_LIST_RULES:
+       case AUDIT_ADD_RULE:
        case AUDIT_DEL_RULE:
        case AUDIT_SIGNAL_INFO:
        case AUDIT_TTY_GET:
@@ -608,12 +608,10 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
        return err;
 }
 
-static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type,
-                                    kuid_t auid, u32 ses, u32 sid)
+static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type)
 {
        int rc = 0;
-       char *ctx = NULL;
-       u32 len;
+       uid_t uid = from_kuid(&init_user_ns, current_uid());
 
        if (!audit_enabled) {
                *ab = NULL;
@@ -623,33 +621,21 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type,
        *ab = audit_log_start(NULL, GFP_KERNEL, msg_type);
        if (unlikely(!*ab))
                return rc;
-       audit_log_format(*ab, "pid=%d uid=%u auid=%u ses=%u",
-                        task_tgid_vnr(current),
-                        from_kuid(&init_user_ns, current_uid()),
-                        from_kuid(&init_user_ns, auid), ses);
-       if (sid) {
-               rc = security_secid_to_secctx(sid, &ctx, &len);
-               if (rc)
-                       audit_log_format(*ab, " ssid=%u", sid);
-               else {
-                       audit_log_format(*ab, " subj=%s", ctx);
-                       security_release_secctx(ctx, len);
-               }
-       }
+       audit_log_format(*ab, "pid=%d uid=%u", task_tgid_vnr(current), uid);
+       audit_log_session_info(*ab);
+       audit_log_task_context(*ab);
 
        return rc;
 }
 
 static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
-       u32                     seq, sid;
+       u32                     seq;
        void                    *data;
        struct audit_status     *status_get, status_set;
        int                     err;
        struct audit_buffer     *ab;
        u16                     msg_type = nlh->nlmsg_type;
-       kuid_t                  loginuid; /* loginuid of sender */
-       u32                     sessionid;
        struct audit_sig_info   *sig_data;
        char                    *ctx = NULL;
        u32                     len;
@@ -668,9 +654,6 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                        return err;
                }
        }
-       loginuid = audit_get_loginuid(current);
-       sessionid = audit_get_sessionid(current);
-       security_task_getsecid(current, &sid);
        seq  = nlh->nlmsg_seq;
        data = nlmsg_data(nlh);
 
@@ -691,14 +674,12 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                        return -EINVAL;
                status_get   = (struct audit_status *)data;
                if (status_get->mask & AUDIT_STATUS_ENABLED) {
-                       err = audit_set_enabled(status_get->enabled,
-                                               loginuid, sessionid, sid);
+                       err = audit_set_enabled(status_get->enabled);
                        if (err < 0)
                                return err;
                }
                if (status_get->mask & AUDIT_STATUS_FAILURE) {
-                       err = audit_set_failure(status_get->failure,
-                                               loginuid, sessionid, sid);
+                       err = audit_set_failure(status_get->failure);
                        if (err < 0)
                                return err;
                }
@@ -706,22 +687,17 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                        int new_pid = status_get->pid;
 
                        if (audit_enabled != AUDIT_OFF)
-                               audit_log_config_change("audit_pid", new_pid,
-                                                       audit_pid, loginuid,
-                                                       sessionid, sid, 1);
-
+                               audit_log_config_change("audit_pid", new_pid, audit_pid, 1);
                        audit_pid = new_pid;
                        audit_nlk_portid = NETLINK_CB(skb).portid;
                }
                if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) {
-                       err = audit_set_rate_limit(status_get->rate_limit,
-                                                  loginuid, sessionid, sid);
+                       err = audit_set_rate_limit(status_get->rate_limit);
                        if (err < 0)
                                return err;
                }
                if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT)
-                       err = audit_set_backlog_limit(status_get->backlog_limit,
-                                                     loginuid, sessionid, sid);
+                       err = audit_set_backlog_limit(status_get->backlog_limit);
                break;
        case AUDIT_USER:
        case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
@@ -729,25 +705,22 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                if (!audit_enabled && msg_type != AUDIT_USER_AVC)
                        return 0;
 
-               err = audit_filter_user();
+               err = audit_filter_user(msg_type);
                if (err == 1) {
                        err = 0;
                        if (msg_type == AUDIT_USER_TTY) {
-                               err = tty_audit_push_task(current, loginuid,
-                                                            sessionid);
+                               err = tty_audit_push_current();
                                if (err)
                                        break;
                        }
-                       audit_log_common_recv_msg(&ab, msg_type,
-                                                 loginuid, sessionid, sid);
-
+                       audit_log_common_recv_msg(&ab, msg_type);
                        if (msg_type != AUDIT_USER_TTY)
                                audit_log_format(ab, " msg='%.1024s'",
                                                 (char *)data);
                        else {
                                int size;
 
-                               audit_log_format(ab, " msg=");
+                               audit_log_format(ab, " data=");
                                size = nlmsg_len(nlh);
                                if (size > 0 &&
                                    ((unsigned char *)data)[size - 1] == '\0')
@@ -758,50 +731,24 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                        audit_log_end(ab);
                }
                break;
-       case AUDIT_ADD:
-       case AUDIT_DEL:
-               if (nlmsg_len(nlh) < sizeof(struct audit_rule))
-                       return -EINVAL;
-               if (audit_enabled == AUDIT_LOCKED) {
-                       audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE,
-                                                 loginuid, sessionid, sid);
-
-                       audit_log_format(ab, " audit_enabled=%d res=0",
-                                        audit_enabled);
-                       audit_log_end(ab);
-                       return -EPERM;
-               }
-               /* fallthrough */
-       case AUDIT_LIST:
-               err = audit_receive_filter(msg_type, NETLINK_CB(skb).portid,
-                                          seq, data, nlmsg_len(nlh),
-                                          loginuid, sessionid, sid);
-               break;
        case AUDIT_ADD_RULE:
        case AUDIT_DEL_RULE:
                if (nlmsg_len(nlh) < sizeof(struct audit_rule_data))
                        return -EINVAL;
                if (audit_enabled == AUDIT_LOCKED) {
-                       audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE,
-                                                 loginuid, sessionid, sid);
-
-                       audit_log_format(ab, " audit_enabled=%d res=0",
-                                        audit_enabled);
+                       audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE);
+                       audit_log_format(ab, " audit_enabled=%d res=0", audit_enabled);
                        audit_log_end(ab);
                        return -EPERM;
                }
                /* fallthrough */
        case AUDIT_LIST_RULES:
                err = audit_receive_filter(msg_type, NETLINK_CB(skb).portid,
-                                          seq, data, nlmsg_len(nlh),
-                                          loginuid, sessionid, sid);
+                                          seq, data, nlmsg_len(nlh));
                break;
        case AUDIT_TRIM:
                audit_trim_trees();
-
-               audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE,
-                                         loginuid, sessionid, sid);
-
+               audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE);
                audit_log_format(ab, " op=trim res=1");
                audit_log_end(ab);
                break;
@@ -831,8 +778,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                /* OK, here comes... */
                err = audit_tag_tree(old, new);
 
-               audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE,
-                                         loginuid, sessionid, sid);
+               audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE);
 
                audit_log_format(ab, " op=make_equiv old=");
                audit_log_untrustedstring(ab, old);
@@ -871,27 +817,30 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                struct audit_tty_status s;
                struct task_struct *tsk = current;
 
-               spin_lock_irq(&tsk->sighand->siglock);
+               spin_lock(&tsk->sighand->siglock);
                s.enabled = tsk->signal->audit_tty != 0;
-               spin_unlock_irq(&tsk->sighand->siglock);
+               s.log_passwd = tsk->signal->audit_tty_log_passwd;
+               spin_unlock(&tsk->sighand->siglock);
 
                audit_send_reply(NETLINK_CB(skb).portid, seq,
                                 AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
                break;
        }
        case AUDIT_TTY_SET: {
-               struct audit_tty_status *s;
+               struct audit_tty_status s;
                struct task_struct *tsk = current;
 
-               if (nlh->nlmsg_len < sizeof(struct audit_tty_status))
-                       return -EINVAL;
-               s = data;
-               if (s->enabled != 0 && s->enabled != 1)
+               memset(&s, 0, sizeof(s));
+               /* guard against past and future API changes */
+               memcpy(&s, data, min(sizeof(s), (size_t)nlh->nlmsg_len));
+               if ((s.enabled != 0 && s.enabled != 1) ||
+                   (s.log_passwd != 0 && s.log_passwd != 1))
                        return -EINVAL;
 
-               spin_lock_irq(&tsk->sighand->siglock);
-               tsk->signal->audit_tty = s->enabled != 0;
-               spin_unlock_irq(&tsk->sighand->siglock);
+               spin_lock(&tsk->sighand->siglock);
+               tsk->signal->audit_tty = s.enabled;
+               tsk->signal->audit_tty_log_passwd = s.log_passwd;
+               spin_unlock(&tsk->sighand->siglock);
                break;
        }
        default:
@@ -1434,6 +1383,14 @@ void audit_log_d_path(struct audit_buffer *ab, const char *prefix,
        kfree(pathname);
 }
 
+void audit_log_session_info(struct audit_buffer *ab)
+{
+       u32 sessionid = audit_get_sessionid(current);
+       uid_t auid = from_kuid(&init_user_ns, audit_get_loginuid(current));
+
+       audit_log_format(ab, " auid=%u ses=%u\n", auid, sessionid);
+}
+
 void audit_log_key(struct audit_buffer *ab, char *key)
 {
        audit_log_format(ab, " key=");
@@ -1443,6 +1400,224 @@ void audit_log_key(struct audit_buffer *ab, char *key)
                audit_log_format(ab, "(null)");
 }
 
+void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap)
+{
+       int i;
+
+       audit_log_format(ab, " %s=", prefix);
+       CAP_FOR_EACH_U32(i) {
+               audit_log_format(ab, "%08x",
+                                cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]);
+       }
+}
+
+void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name)
+{
+       kernel_cap_t *perm = &name->fcap.permitted;
+       kernel_cap_t *inh = &name->fcap.inheritable;
+       int log = 0;
+
+       if (!cap_isclear(*perm)) {
+               audit_log_cap(ab, "cap_fp", perm);
+               log = 1;
+       }
+       if (!cap_isclear(*inh)) {
+               audit_log_cap(ab, "cap_fi", inh);
+               log = 1;
+       }
+
+       if (log)
+               audit_log_format(ab, " cap_fe=%d cap_fver=%x",
+                                name->fcap.fE, name->fcap_ver);
+}
+
+static inline int audit_copy_fcaps(struct audit_names *name,
+                                  const struct dentry *dentry)
+{
+       struct cpu_vfs_cap_data caps;
+       int rc;
+
+       if (!dentry)
+               return 0;
+
+       rc = get_vfs_caps_from_disk(dentry, &caps);
+       if (rc)
+               return rc;
+
+       name->fcap.permitted = caps.permitted;
+       name->fcap.inheritable = caps.inheritable;
+       name->fcap.fE = !!(caps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE);
+       name->fcap_ver = (caps.magic_etc & VFS_CAP_REVISION_MASK) >>
+                               VFS_CAP_REVISION_SHIFT;
+
+       return 0;
+}
+
+/* Copy inode data into an audit_names. */
+void audit_copy_inode(struct audit_names *name, const struct dentry *dentry,
+                     const struct inode *inode)
+{
+       name->ino   = inode->i_ino;
+       name->dev   = inode->i_sb->s_dev;
+       name->mode  = inode->i_mode;
+       name->uid   = inode->i_uid;
+       name->gid   = inode->i_gid;
+       name->rdev  = inode->i_rdev;
+       security_inode_getsecid(inode, &name->osid);
+       audit_copy_fcaps(name, dentry);
+}
+
+/**
+ * audit_log_name - produce AUDIT_PATH record from struct audit_names
+ * @context: audit_context for the task
+ * @n: audit_names structure with reportable details
+ * @path: optional path to report instead of audit_names->name
+ * @record_num: record number to report when handling a list of names
+ * @call_panic: optional pointer to int that will be updated if secid fails
+ */
+void audit_log_name(struct audit_context *context, struct audit_names *n,
+                   struct path *path, int record_num, int *call_panic)
+{
+       struct audit_buffer *ab;
+       ab = audit_log_start(context, GFP_KERNEL, AUDIT_PATH);
+       if (!ab)
+               return;
+
+       audit_log_format(ab, "item=%d", record_num);
+
+       if (path)
+               audit_log_d_path(ab, " name=", path);
+       else if (n->name) {
+               switch (n->name_len) {
+               case AUDIT_NAME_FULL:
+                       /* log the full path */
+                       audit_log_format(ab, " name=");
+                       audit_log_untrustedstring(ab, n->name->name);
+                       break;
+               case 0:
+                       /* name was specified as a relative path and the
+                        * directory component is the cwd */
+                       audit_log_d_path(ab, " name=", &context->pwd);
+                       break;
+               default:
+                       /* log the name's directory component */
+                       audit_log_format(ab, " name=");
+                       audit_log_n_untrustedstring(ab, n->name->name,
+                                                   n->name_len);
+               }
+       } else
+               audit_log_format(ab, " name=(null)");
+
+       if (n->ino != (unsigned long)-1) {
+               audit_log_format(ab, " inode=%lu"
+                                " dev=%02x:%02x mode=%#ho"
+                                " ouid=%u ogid=%u rdev=%02x:%02x",
+                                n->ino,
+                                MAJOR(n->dev),
+                                MINOR(n->dev),
+                                n->mode,
+                                from_kuid(&init_user_ns, n->uid),
+                                from_kgid(&init_user_ns, n->gid),
+                                MAJOR(n->rdev),
+                                MINOR(n->rdev));
+       }
+       if (n->osid != 0) {
+               char *ctx = NULL;
+               u32 len;
+               if (security_secid_to_secctx(
+                       n->osid, &ctx, &len)) {
+                       audit_log_format(ab, " osid=%u", n->osid);
+                       if (call_panic)
+                               *call_panic = 2;
+               } else {
+                       audit_log_format(ab, " obj=%s", ctx);
+                       security_release_secctx(ctx, len);
+               }
+       }
+
+       audit_log_fcaps(ab, n);
+       audit_log_end(ab);
+}
+
+int audit_log_task_context(struct audit_buffer *ab)
+{
+       char *ctx = NULL;
+       unsigned len;
+       int error;
+       u32 sid;
+
+       security_task_getsecid(current, &sid);
+       if (!sid)
+               return 0;
+
+       error = security_secid_to_secctx(sid, &ctx, &len);
+       if (error) {
+               if (error != -EINVAL)
+                       goto error_path;
+               return 0;
+       }
+
+       audit_log_format(ab, " subj=%s", ctx);
+       security_release_secctx(ctx, len);
+       return 0;
+
+error_path:
+       audit_panic("error in audit_log_task_context");
+       return error;
+}
+EXPORT_SYMBOL(audit_log_task_context);
+
+void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
+{
+       const struct cred *cred;
+       char name[sizeof(tsk->comm)];
+       struct mm_struct *mm = tsk->mm;
+       char *tty;
+
+       if (!ab)
+               return;
+
+       /* tsk == current */
+       cred = current_cred();
+
+       spin_lock_irq(&tsk->sighand->siglock);
+       if (tsk->signal && tsk->signal->tty && tsk->signal->tty->name)
+               tty = tsk->signal->tty->name;
+       else
+               tty = "(none)";
+       spin_unlock_irq(&tsk->sighand->siglock);
+
+       audit_log_format(ab,
+                        " ppid=%ld pid=%d auid=%u uid=%u gid=%u"
+                        " euid=%u suid=%u fsuid=%u"
+                        " egid=%u sgid=%u fsgid=%u ses=%u tty=%s",
+                        sys_getppid(),
+                        tsk->pid,
+                        from_kuid(&init_user_ns, audit_get_loginuid(tsk)),
+                        from_kuid(&init_user_ns, cred->uid),
+                        from_kgid(&init_user_ns, cred->gid),
+                        from_kuid(&init_user_ns, cred->euid),
+                        from_kuid(&init_user_ns, cred->suid),
+                        from_kuid(&init_user_ns, cred->fsuid),
+                        from_kgid(&init_user_ns, cred->egid),
+                        from_kgid(&init_user_ns, cred->sgid),
+                        from_kgid(&init_user_ns, cred->fsgid),
+                        audit_get_sessionid(tsk), tty);
+
+       get_task_comm(name, tsk);
+       audit_log_format(ab, " comm=");
+       audit_log_untrustedstring(ab, name);
+
+       if (mm) {
+               down_read(&mm->mmap_sem);
+               if (mm->exe_file)
+                       audit_log_d_path(ab, " exe=", &mm->exe_file->f_path);
+               up_read(&mm->mmap_sem);
+       }
+       audit_log_task_context(ab);
+}
+EXPORT_SYMBOL(audit_log_task_info);
+
 /**
  * audit_log_link_denied - report a link restriction denial
  * @operation: specific link opreation
@@ -1451,19 +1626,28 @@ void audit_log_key(struct audit_buffer *ab, char *key)
 void audit_log_link_denied(const char *operation, struct path *link)
 {
        struct audit_buffer *ab;
+       struct audit_names *name;
+
+       name = kzalloc(sizeof(*name), GFP_NOFS);
+       if (!name)
+               return;
 
+       /* Generate AUDIT_ANOM_LINK with subject, operation, outcome. */
        ab = audit_log_start(current->audit_context, GFP_KERNEL,
                             AUDIT_ANOM_LINK);
        if (!ab)
-               return;
-       audit_log_format(ab, "op=%s action=denied", operation);
-       audit_log_format(ab, " pid=%d comm=", current->pid);
-       audit_log_untrustedstring(ab, current->comm);
-       audit_log_d_path(ab, " path=", link);
-       audit_log_format(ab, " dev=");
-       audit_log_untrustedstring(ab, link->dentry->d_inode->i_sb->s_id);
-       audit_log_format(ab, " ino=%lu", link->dentry->d_inode->i_ino);
+               goto out;
+       audit_log_format(ab, "op=%s", operation);
+       audit_log_task_info(ab, current);
+       audit_log_format(ab, " res=0");
        audit_log_end(ab);
+
+       /* Generate AUDIT_PATH record with object. */
+       name->type = AUDIT_TYPE_NORMAL;
+       audit_copy_inode(name, link->dentry, link->dentry->d_inode);
+       audit_log_name(current->audit_context, name, link, 0, NULL);
+out:
+       kfree(name);
 }
 
 /**
index 11468d99dad00b9f168071701c35965cf49ae585..1c95131ef760c2f45d140efdccfd2d08636b76d7 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/fs.h>
 #include <linux/audit.h>
 #include <linux/skbuff.h>
+#include <uapi/linux/mqueue.h>
 
 /* 0 = no checking
    1 = put_count checking
 */
 #define AUDIT_DEBUG 0
 
+/* AUDIT_NAMES is the number of slots we reserve in the audit_context
+ * for saving names from getname().  If we get more names we will allocate
+ * a name dynamically and also add those to the list anchored by names_list. */
+#define AUDIT_NAMES    5
+
 /* At task start time, the audit_state is set in the audit_context using
    a per-task filter.  At syscall entry, the audit_state is augmented by
    the syscall filter. */
@@ -59,8 +65,158 @@ struct audit_entry {
        struct audit_krule      rule;
 };
 
+struct audit_cap_data {
+       kernel_cap_t            permitted;
+       kernel_cap_t            inheritable;
+       union {
+               unsigned int    fE;             /* effective bit of file cap */
+               kernel_cap_t    effective;      /* effective set of process */
+       };
+};
+
+/* When fs/namei.c:getname() is called, we store the pointer in name and
+ * we don't let putname() free it (instead we free all of the saved
+ * pointers at syscall exit time).
+ *
+ * Further, in fs/namei.c:path_lookup() we store the inode and device.
+ */
+struct audit_names {
+       struct list_head        list;           /* audit_context->names_list */
+
+       struct filename         *name;
+       int                     name_len;       /* number of chars to log */
+       bool                    name_put;       /* call __putname()? */
+
+       unsigned long           ino;
+       dev_t                   dev;
+       umode_t                 mode;
+       kuid_t                  uid;
+       kgid_t                  gid;
+       dev_t                   rdev;
+       u32                     osid;
+       struct audit_cap_data   fcap;
+       unsigned int            fcap_ver;
+       unsigned char           type;           /* record type */
+       /*
+        * This was an allocated audit_names and not from the array of
+        * names allocated in the task audit context.  Thus this name
+        * should be freed on syscall exit.
+        */
+       bool                    should_free;
+};
+
+/* The per-task audit context. */
+struct audit_context {
+       int                 dummy;      /* must be the first element */
+       int                 in_syscall; /* 1 if task is in a syscall */
+       enum audit_state    state, current_state;
+       unsigned int        serial;     /* serial number for record */
+       int                 major;      /* syscall number */
+       struct timespec     ctime;      /* time of syscall entry */
+       unsigned long       argv[4];    /* syscall arguments */
+       long                return_code;/* syscall return code */
+       u64                 prio;
+       int                 return_valid; /* return code is valid */
+       /*
+        * The names_list is the list of all audit_names collected during this
+        * syscall.  The first AUDIT_NAMES entries in the names_list will
+        * actually be from the preallocated_names array for performance
+        * reasons.  Except during allocation they should never be referenced
+        * through the preallocated_names array and should only be found/used
+        * by running the names_list.
+        */
+       struct audit_names  preallocated_names[AUDIT_NAMES];
+       int                 name_count; /* total records in names_list */
+       struct list_head    names_list; /* struct audit_names->list anchor */
+       char                *filterkey; /* key for rule that triggered record */
+       struct path         pwd;
+       struct audit_aux_data *aux;
+       struct audit_aux_data *aux_pids;
+       struct sockaddr_storage *sockaddr;
+       size_t sockaddr_len;
+                               /* Save things to print about task_struct */
+       pid_t               pid, ppid;
+       kuid_t              uid, euid, suid, fsuid;
+       kgid_t              gid, egid, sgid, fsgid;
+       unsigned long       personality;
+       int                 arch;
+
+       pid_t               target_pid;
+       kuid_t              target_auid;
+       kuid_t              target_uid;
+       unsigned int        target_sessionid;
+       u32                 target_sid;
+       char                target_comm[TASK_COMM_LEN];
+
+       struct audit_tree_refs *trees, *first_trees;
+       struct list_head killed_trees;
+       int tree_count;
+
+       int type;
+       union {
+               struct {
+                       int nargs;
+                       long args[6];
+               } socketcall;
+               struct {
+                       kuid_t                  uid;
+                       kgid_t                  gid;
+                       umode_t                 mode;
+                       u32                     osid;
+                       int                     has_perm;
+                       uid_t                   perm_uid;
+                       gid_t                   perm_gid;
+                       umode_t                 perm_mode;
+                       unsigned long           qbytes;
+               } ipc;
+               struct {
+                       mqd_t                   mqdes;
+                       struct mq_attr          mqstat;
+               } mq_getsetattr;
+               struct {
+                       mqd_t                   mqdes;
+                       int                     sigev_signo;
+               } mq_notify;
+               struct {
+                       mqd_t                   mqdes;
+                       size_t                  msg_len;
+                       unsigned int            msg_prio;
+                       struct timespec         abs_timeout;
+               } mq_sendrecv;
+               struct {
+                       int                     oflag;
+                       umode_t                 mode;
+                       struct mq_attr          attr;
+               } mq_open;
+               struct {
+                       pid_t                   pid;
+                       struct audit_cap_data   cap;
+               } capset;
+               struct {
+                       int                     fd;
+                       int                     flags;
+               } mmap;
+       };
+       int fds[2];
+
+#if AUDIT_DEBUG
+       int                 put_count;
+       int                 ino_count;
+#endif
+};
+
 extern int audit_ever_enabled;
 
+extern void audit_copy_inode(struct audit_names *name,
+                            const struct dentry *dentry,
+                            const struct inode *inode);
+extern void audit_log_cap(struct audit_buffer *ab, char *prefix,
+                         kernel_cap_t *cap);
+extern void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name);
+extern void audit_log_name(struct audit_context *context,
+                          struct audit_names *n, struct path *path,
+                          int record_num, int *call_panic);
+
 extern int audit_pid;
 
 #define AUDIT_INODE_BUCKETS    32
index 267436826c3bc179678a57de72cf08220fa8faca..83a2970295d19ffdb1bd7417ed2b1b54a815cba1 100644 (file)
@@ -310,121 +310,83 @@ static u32 audit_to_op(u32 op)
        return n;
 }
 
-
-/* Translate struct audit_rule to kernel's rule respresentation.
- * Exists for backward compatibility with userspace. */
-static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule)
+/* check if an audit field is valid */
+static int audit_field_valid(struct audit_entry *entry, struct audit_field *f)
 {
-       struct audit_entry *entry;
-       int err = 0;
-       int i;
-
-       entry = audit_to_entry_common(rule);
-       if (IS_ERR(entry))
-               goto exit_nofree;
-
-       for (i = 0; i < rule->field_count; i++) {
-               struct audit_field *f = &entry->rule.fields[i];
-               u32 n;
-
-               n = rule->fields[i] & (AUDIT_NEGATE|AUDIT_OPERATORS);
-
-               /* Support for legacy operators where
-                * AUDIT_NEGATE bit signifies != and otherwise assumes == */
-               if (n & AUDIT_NEGATE)
-                       f->op = Audit_not_equal;
-               else if (!n)
-                       f->op = Audit_equal;
-               else
-                       f->op = audit_to_op(n);
-
-               entry->rule.vers_ops = (n & AUDIT_OPERATORS) ? 2 : 1;
-
-               f->type = rule->fields[i] & ~(AUDIT_NEGATE|AUDIT_OPERATORS);
-               f->val = rule->values[i];
-               f->uid = INVALID_UID;
-               f->gid = INVALID_GID;
-
-               err = -EINVAL;
-               if (f->op == Audit_bad)
-                       goto exit_free;
-
-               switch(f->type) {
-               default:
-                       goto exit_free;
-               case AUDIT_UID:
-               case AUDIT_EUID:
-               case AUDIT_SUID:
-               case AUDIT_FSUID:
-               case AUDIT_LOGINUID:
-                       /* bit ops not implemented for uid comparisons */
-                       if (f->op == Audit_bitmask || f->op == Audit_bittest)
-                               goto exit_free;
-
-                       f->uid = make_kuid(current_user_ns(), f->val);
-                       if (!uid_valid(f->uid))
-                               goto exit_free;
-                       break;
-               case AUDIT_GID:
-               case AUDIT_EGID:
-               case AUDIT_SGID:
-               case AUDIT_FSGID:
-                       /* bit ops not implemented for gid comparisons */
-                       if (f->op == Audit_bitmask || f->op == Audit_bittest)
-                               goto exit_free;
-
-                       f->gid = make_kgid(current_user_ns(), f->val);
-                       if (!gid_valid(f->gid))
-                               goto exit_free;
-                       break;
-               case AUDIT_PID:
-               case AUDIT_PERS:
-               case AUDIT_MSGTYPE:
-               case AUDIT_PPID:
-               case AUDIT_DEVMAJOR:
-               case AUDIT_DEVMINOR:
-               case AUDIT_EXIT:
-               case AUDIT_SUCCESS:
-                       /* bit ops are only useful on syscall args */
-                       if (f->op == Audit_bitmask || f->op == Audit_bittest)
-                               goto exit_free;
-                       break;
-               case AUDIT_ARG0:
-               case AUDIT_ARG1:
-               case AUDIT_ARG2:
-               case AUDIT_ARG3:
-                       break;
-               /* arch is only allowed to be = or != */
-               case AUDIT_ARCH:
-                       if (f->op != Audit_not_equal && f->op != Audit_equal)
-                               goto exit_free;
-                       entry->rule.arch_f = f;
-                       break;
-               case AUDIT_PERM:
-                       if (f->val & ~15)
-                               goto exit_free;
-                       break;
-               case AUDIT_FILETYPE:
-                       if (f->val & ~S_IFMT)
-                               goto exit_free;
-                       break;
-               case AUDIT_INODE:
-                       err = audit_to_inode(&entry->rule, f);
-                       if (err)
-                               goto exit_free;
-                       break;
-               }
-       }
-
-       if (entry->rule.inode_f && entry->rule.inode_f->op == Audit_not_equal)
-               entry->rule.inode_f = NULL;
-
-exit_nofree:
-       return entry;
+       switch(f->type) {
+       case AUDIT_MSGTYPE:
+               if (entry->rule.listnr != AUDIT_FILTER_TYPE &&
+                   entry->rule.listnr != AUDIT_FILTER_USER)
+                       return -EINVAL;
+               break;
+       };
 
-exit_free:
-       audit_free_rule(entry);
-       return ERR_PTR(err);
+       switch(f->type) {
+       default:
+               return -EINVAL;
+       case AUDIT_UID:
+       case AUDIT_EUID:
+       case AUDIT_SUID:
+       case AUDIT_FSUID:
+       case AUDIT_LOGINUID:
+       case AUDIT_OBJ_UID:
+       case AUDIT_GID:
+       case AUDIT_EGID:
+       case AUDIT_SGID:
+       case AUDIT_FSGID:
+       case AUDIT_OBJ_GID:
+       case AUDIT_PID:
+       case AUDIT_PERS:
+       case AUDIT_MSGTYPE:
+       case AUDIT_PPID:
+       case AUDIT_DEVMAJOR:
+       case AUDIT_DEVMINOR:
+       case AUDIT_EXIT:
+       case AUDIT_SUCCESS:
+               /* bit ops are only useful on syscall args */
+               if (f->op == Audit_bitmask || f->op == Audit_bittest)
+                       return -EINVAL;
+               break;
+       case AUDIT_ARG0:
+       case AUDIT_ARG1:
+       case AUDIT_ARG2:
+       case AUDIT_ARG3:
+       case AUDIT_SUBJ_USER:
+       case AUDIT_SUBJ_ROLE:
+       case AUDIT_SUBJ_TYPE:
+       case AUDIT_SUBJ_SEN:
+       case AUDIT_SUBJ_CLR:
+       case AUDIT_OBJ_USER:
+       case AUDIT_OBJ_ROLE:
+       case AUDIT_OBJ_TYPE:
+       case AUDIT_OBJ_LEV_LOW:
+       case AUDIT_OBJ_LEV_HIGH:
+       case AUDIT_WATCH:
+       case AUDIT_DIR:
+       case AUDIT_FILTERKEY:
+               break;
+       case AUDIT_LOGINUID_SET:
+               if ((f->val != 0) && (f->val != 1))
+                       return -EINVAL;
+       /* FALL THROUGH */
+       case AUDIT_ARCH:
+               if (f->op != Audit_not_equal && f->op != Audit_equal)
+                       return -EINVAL;
+               break;
+       case AUDIT_PERM:
+               if (f->val & ~15)
+                       return -EINVAL;
+               break;
+       case AUDIT_FILETYPE:
+               if (f->val & ~S_IFMT)
+                       return -EINVAL;
+               break;
+       case AUDIT_FIELD_COMPARE:
+               if (f->val > AUDIT_MAX_FIELD_COMPARE)
+                       return -EINVAL;
+               break;
+       };
+       return 0;
 }
 
 /* Translate struct audit_rule_data to kernel's rule respresentation. */
@@ -459,17 +421,25 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
                f->gid = INVALID_GID;
                f->lsm_str = NULL;
                f->lsm_rule = NULL;
-               switch(f->type) {
+
+               /* Support legacy tests for a valid loginuid */
+               if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295)) {
+                       f->type = AUDIT_LOGINUID_SET;
+                       f->val = 0;
+               }
+
+               err = audit_field_valid(entry, f);
+               if (err)
+                       goto exit_free;
+
+               err = -EINVAL;
+               switch (f->type) {
+               case AUDIT_LOGINUID:
                case AUDIT_UID:
                case AUDIT_EUID:
                case AUDIT_SUID:
                case AUDIT_FSUID:
-               case AUDIT_LOGINUID:
                case AUDIT_OBJ_UID:
-                       /* bit ops not implemented for uid comparisons */
-                       if (f->op == Audit_bitmask || f->op == Audit_bittest)
-                               goto exit_free;
-
                        f->uid = make_kuid(current_user_ns(), f->val);
                        if (!uid_valid(f->uid))
                                goto exit_free;
@@ -479,27 +449,10 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
                case AUDIT_SGID:
                case AUDIT_FSGID:
                case AUDIT_OBJ_GID:
-                       /* bit ops not implemented for gid comparisons */
-                       if (f->op == Audit_bitmask || f->op == Audit_bittest)
-                               goto exit_free;
-
                        f->gid = make_kgid(current_user_ns(), f->val);
                        if (!gid_valid(f->gid))
                                goto exit_free;
                        break;
-               case AUDIT_PID:
-               case AUDIT_PERS:
-               case AUDIT_MSGTYPE:
-               case AUDIT_PPID:
-               case AUDIT_DEVMAJOR:
-               case AUDIT_DEVMINOR:
-               case AUDIT_EXIT:
-               case AUDIT_SUCCESS:
-               case AUDIT_ARG0:
-               case AUDIT_ARG1:
-               case AUDIT_ARG2:
-               case AUDIT_ARG3:
-                       break;
                case AUDIT_ARCH:
                        entry->rule.arch_f = f;
                        break;
@@ -570,20 +523,6 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
                        entry->rule.buflen += f->val;
                        entry->rule.filterkey = str;
                        break;
-               case AUDIT_PERM:
-                       if (f->val & ~15)
-                               goto exit_free;
-                       break;
-               case AUDIT_FILETYPE:
-                       if (f->val & ~S_IFMT)
-                               goto exit_free;
-                       break;
-               case AUDIT_FIELD_COMPARE:
-                       if (f->val > AUDIT_MAX_FIELD_COMPARE)
-                               goto exit_free;
-                       break;
-               default:
-                       goto exit_free;
                }
        }
 
@@ -613,36 +552,6 @@ static inline size_t audit_pack_string(void **bufp, const char *str)
        return len;
 }
 
-/* Translate kernel rule respresentation to struct audit_rule.
- * Exists for backward compatibility with userspace. */
-static struct audit_rule *audit_krule_to_rule(struct audit_krule *krule)
-{
-       struct audit_rule *rule;
-       int i;
-
-       rule = kzalloc(sizeof(*rule), GFP_KERNEL);
-       if (unlikely(!rule))
-               return NULL;
-
-       rule->flags = krule->flags | krule->listnr;
-       rule->action = krule->action;
-       rule->field_count = krule->field_count;
-       for (i = 0; i < rule->field_count; i++) {
-               rule->values[i] = krule->fields[i].val;
-               rule->fields[i] = krule->fields[i].type;
-
-               if (krule->vers_ops == 1) {
-                       if (krule->fields[i].op == Audit_not_equal)
-                               rule->fields[i] |= AUDIT_NEGATE;
-               } else {
-                       rule->fields[i] |= audit_ops[krule->fields[i].op];
-               }
-       }
-       for (i = 0; i < AUDIT_BITMASK_SIZE; i++) rule->mask[i] = krule->mask[i];
-
-       return rule;
-}
-
 /* Translate kernel rule respresentation to struct audit_rule_data. */
 static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule)
 {
@@ -1055,35 +964,6 @@ out:
        return ret;
 }
 
-/* List rules using struct audit_rule.  Exists for backward
- * compatibility with userspace. */
-static void audit_list(int pid, int seq, struct sk_buff_head *q)
-{
-       struct sk_buff *skb;
-       struct audit_krule *r;
-       int i;
-
-       /* This is a blocking read, so use audit_filter_mutex instead of rcu
-        * iterator to sync with list writers. */
-       for (i=0; i<AUDIT_NR_FILTERS; i++) {
-               list_for_each_entry(r, &audit_rules_list[i], list) {
-                       struct audit_rule *rule;
-
-                       rule = audit_krule_to_rule(r);
-                       if (unlikely(!rule))
-                               break;
-                       skb = audit_make_reply(pid, seq, AUDIT_LIST, 0, 1,
-                                        rule, sizeof(*rule));
-                       if (skb)
-                               skb_queue_tail(q, skb);
-                       kfree(rule);
-               }
-       }
-       skb = audit_make_reply(pid, seq, AUDIT_LIST, 1, 1, NULL, 0);
-       if (skb)
-               skb_queue_tail(q, skb);
-}
-
 /* List rules using struct audit_rule_data. */
 static void audit_list_rules(int pid, int seq, struct sk_buff_head *q)
 {
@@ -1113,11 +993,11 @@ static void audit_list_rules(int pid, int seq, struct sk_buff_head *q)
 }
 
 /* Log rule additions and removals */
-static void audit_log_rule_change(kuid_t loginuid, u32 sessionid, u32 sid,
-                                 char *action, struct audit_krule *rule,
-                                 int res)
+static void audit_log_rule_change(char *action, struct audit_krule *rule, int res)
 {
        struct audit_buffer *ab;
+       uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(current));
+       u32 sessionid = audit_get_sessionid(current);
 
        if (!audit_enabled)
                return;
@@ -1125,18 +1005,8 @@ static void audit_log_rule_change(kuid_t loginuid, u32 sessionid, u32 sid,
        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
        if (!ab)
                return;
-       audit_log_format(ab, "auid=%u ses=%u",
-                        from_kuid(&init_user_ns, loginuid), sessionid);
-       if (sid) {
-               char *ctx = NULL;
-               u32 len;
-               if (security_secid_to_secctx(sid, &ctx, &len))
-                       audit_log_format(ab, " ssid=%u", sid);
-               else {
-                       audit_log_format(ab, " subj=%s", ctx);
-                       security_release_secctx(ctx, len);
-               }
-       }
+       audit_log_format(ab, "auid=%u ses=%u" ,loginuid, sessionid);
+       audit_log_task_context(ab);
        audit_log_format(ab, " op=");
        audit_log_string(ab, action);
        audit_log_key(ab, rule->filterkey);
@@ -1155,8 +1025,7 @@ static void audit_log_rule_change(kuid_t loginuid, u32 sessionid, u32 sid,
  * @sessionid: sessionid for netlink audit message
  * @sid: SE Linux Security ID of sender
  */
-int audit_receive_filter(int type, int pid, int seq, void *data,
-                        size_t datasz, kuid_t loginuid, u32 sessionid, u32 sid)
+int audit_receive_filter(int type, int pid, int seq, void *data, size_t datasz)
 {
        struct task_struct *tsk;
        struct audit_netlink_list *dest;
@@ -1164,7 +1033,6 @@ int audit_receive_filter(int type, int pid, int seq, void *data,
        struct audit_entry *entry;
 
        switch (type) {
-       case AUDIT_LIST:
        case AUDIT_LIST_RULES:
                /* We can't just spew out the rules here because we might fill
                 * the available socket buffer space and deadlock waiting for
@@ -1179,10 +1047,7 @@ int audit_receive_filter(int type, int pid, int seq, void *data,
                skb_queue_head_init(&dest->q);
 
                mutex_lock(&audit_filter_mutex);
-               if (type == AUDIT_LIST)
-                       audit_list(pid, seq, &dest->q);
-               else
-                       audit_list_rules(pid, seq, &dest->q);
+               audit_list_rules(pid, seq, &dest->q);
                mutex_unlock(&audit_filter_mutex);
 
                tsk = kthread_run(audit_send_list, dest, "audit_send_list");
@@ -1192,35 +1057,23 @@ int audit_receive_filter(int type, int pid, int seq, void *data,
                        err = PTR_ERR(tsk);
                }
                break;
-       case AUDIT_ADD:
        case AUDIT_ADD_RULE:
-               if (type == AUDIT_ADD)
-                       entry = audit_rule_to_entry(data);
-               else
-                       entry = audit_data_to_entry(data, datasz);
+               entry = audit_data_to_entry(data, datasz);
                if (IS_ERR(entry))
                        return PTR_ERR(entry);
 
                err = audit_add_rule(entry);
-               audit_log_rule_change(loginuid, sessionid, sid, "add rule",
-                                     &entry->rule, !err);
-
+               audit_log_rule_change("add rule", &entry->rule, !err);
                if (err)
                        audit_free_rule(entry);
                break;
-       case AUDIT_DEL:
        case AUDIT_DEL_RULE:
-               if (type == AUDIT_DEL)
-                       entry = audit_rule_to_entry(data);
-               else
-                       entry = audit_data_to_entry(data, datasz);
+               entry = audit_data_to_entry(data, datasz);
                if (IS_ERR(entry))
                        return PTR_ERR(entry);
 
                err = audit_del_rule(entry);
-               audit_log_rule_change(loginuid, sessionid, sid, "remove rule",
-                                     &entry->rule, !err);
-
+               audit_log_rule_change("remove rule", &entry->rule, !err);
                audit_free_rule(entry);
                break;
        default:
@@ -1358,7 +1211,7 @@ int audit_compare_dname_path(const char *dname, const char *path, int parentlen)
        return strncmp(p, dname, dlen);
 }
 
-static int audit_filter_user_rules(struct audit_krule *rule,
+static int audit_filter_user_rules(struct audit_krule *rule, int type,
                                   enum audit_state *state)
 {
        int i;
@@ -1382,6 +1235,13 @@ static int audit_filter_user_rules(struct audit_krule *rule,
                        result = audit_uid_comparator(audit_get_loginuid(current),
                                                  f->op, f->uid);
                        break;
+               case AUDIT_LOGINUID_SET:
+                       result = audit_comparator(audit_loginuid_set(current),
+                                                 f->op, f->val);
+                       break;
+               case AUDIT_MSGTYPE:
+                       result = audit_comparator(type, f->op, f->val);
+                       break;
                case AUDIT_SUBJ_USER:
                case AUDIT_SUBJ_ROLE:
                case AUDIT_SUBJ_TYPE:
@@ -1408,7 +1268,7 @@ static int audit_filter_user_rules(struct audit_krule *rule,
        return 1;
 }
 
-int audit_filter_user(void)
+int audit_filter_user(int type)
 {
        enum audit_state state = AUDIT_DISABLED;
        struct audit_entry *e;
@@ -1416,7 +1276,7 @@ int audit_filter_user(void)
 
        rcu_read_lock();
        list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_USER], list) {
-               if (audit_filter_user_rules(&e->rule, &state)) {
+               if (audit_filter_user_rules(&e->rule, type, &state)) {
                        if (state == AUDIT_DISABLED)
                                ret = 0;
                        break;
index c68229411a7c20afbd6903f7272d23afbc0e9ac0..3c8a601324a280fe9224c872f24e2995243da830 100644 (file)
 #define AUDITSC_SUCCESS 1
 #define AUDITSC_FAILURE 2
 
-/* AUDIT_NAMES is the number of slots we reserve in the audit_context
- * for saving names from getname().  If we get more names we will allocate
- * a name dynamically and also add those to the list anchored by names_list. */
-#define AUDIT_NAMES    5
-
 /* no execve audit message should be longer than this (userspace limits) */
 #define MAX_EXECVE_AUDIT_LEN 7500
 
@@ -90,44 +85,6 @@ int audit_n_rules;
 /* determines whether we collect data for signals sent */
 int audit_signals;
 
-struct audit_cap_data {
-       kernel_cap_t            permitted;
-       kernel_cap_t            inheritable;
-       union {
-               unsigned int    fE;             /* effective bit of a file capability */
-               kernel_cap_t    effective;      /* effective set of a process */
-       };
-};
-
-/* When fs/namei.c:getname() is called, we store the pointer in name and
- * we don't let putname() free it (instead we free all of the saved
- * pointers at syscall exit time).
- *
- * Further, in fs/namei.c:path_lookup() we store the inode and device.
- */
-struct audit_names {
-       struct list_head        list;           /* audit_context->names_list */
-       struct filename *name;
-       unsigned long           ino;
-       dev_t                   dev;
-       umode_t                 mode;
-       kuid_t                  uid;
-       kgid_t                  gid;
-       dev_t                   rdev;
-       u32                     osid;
-       struct audit_cap_data    fcap;
-       unsigned int            fcap_ver;
-       int                     name_len;       /* number of name's characters to log */
-       unsigned char           type;           /* record type */
-       bool                    name_put;       /* call __putname() for this name */
-       /*
-        * This was an allocated audit_names and not from the array of
-        * names allocated in the task audit context.  Thus this name
-        * should be freed on syscall exit
-        */
-       bool                    should_free;
-};
-
 struct audit_aux_data {
        struct audit_aux_data   *next;
        int                     type;
@@ -175,106 +132,6 @@ struct audit_tree_refs {
        struct audit_chunk *c[31];
 };
 
-/* The per-task audit context. */
-struct audit_context {
-       int                 dummy;      /* must be the first element */
-       int                 in_syscall; /* 1 if task is in a syscall */
-       enum audit_state    state, current_state;
-       unsigned int        serial;     /* serial number for record */
-       int                 major;      /* syscall number */
-       struct timespec     ctime;      /* time of syscall entry */
-       unsigned long       argv[4];    /* syscall arguments */
-       long                return_code;/* syscall return code */
-       u64                 prio;
-       int                 return_valid; /* return code is valid */
-       /*
-        * The names_list is the list of all audit_names collected during this
-        * syscall.  The first AUDIT_NAMES entries in the names_list will
-        * actually be from the preallocated_names array for performance
-        * reasons.  Except during allocation they should never be referenced
-        * through the preallocated_names array and should only be found/used
-        * by running the names_list.
-        */
-       struct audit_names  preallocated_names[AUDIT_NAMES];
-       int                 name_count; /* total records in names_list */
-       struct list_head    names_list; /* anchor for struct audit_names->list */
-       char *              filterkey;  /* key for rule that triggered record */
-       struct path         pwd;
-       struct audit_aux_data *aux;
-       struct audit_aux_data *aux_pids;
-       struct sockaddr_storage *sockaddr;
-       size_t sockaddr_len;
-                               /* Save things to print about task_struct */
-       pid_t               pid, ppid;
-       kuid_t              uid, euid, suid, fsuid;
-       kgid_t              gid, egid, sgid, fsgid;
-       unsigned long       personality;
-       int                 arch;
-
-       pid_t               target_pid;
-       kuid_t              target_auid;
-       kuid_t              target_uid;
-       unsigned int        target_sessionid;
-       u32                 target_sid;
-       char                target_comm[TASK_COMM_LEN];
-
-       struct audit_tree_refs *trees, *first_trees;
-       struct list_head killed_trees;
-       int tree_count;
-
-       int type;
-       union {
-               struct {
-                       int nargs;
-                       long args[6];
-               } socketcall;
-               struct {
-                       kuid_t                  uid;
-                       kgid_t                  gid;
-                       umode_t                 mode;
-                       u32                     osid;
-                       int                     has_perm;
-                       uid_t                   perm_uid;
-                       gid_t                   perm_gid;
-                       umode_t                 perm_mode;
-                       unsigned long           qbytes;
-               } ipc;
-               struct {
-                       mqd_t                   mqdes;
-                       struct mq_attr          mqstat;
-               } mq_getsetattr;
-               struct {
-                       mqd_t                   mqdes;
-                       int                     sigev_signo;
-               } mq_notify;
-               struct {
-                       mqd_t                   mqdes;
-                       size_t                  msg_len;
-                       unsigned int            msg_prio;
-                       struct timespec         abs_timeout;
-               } mq_sendrecv;
-               struct {
-                       int                     oflag;
-                       umode_t                 mode;
-                       struct mq_attr          attr;
-               } mq_open;
-               struct {
-                       pid_t                   pid;
-                       struct audit_cap_data   cap;
-               } capset;
-               struct {
-                       int                     fd;
-                       int                     flags;
-               } mmap;
-       };
-       int fds[2];
-
-#if AUDIT_DEBUG
-       int                 put_count;
-       int                 ino_count;
-#endif
-};
-
 static inline int open_arg(int flags, int mask)
 {
        int n = ACC_MODE(flags);
@@ -633,9 +490,23 @@ static int audit_filter_rules(struct task_struct *tsk,
                        break;
                case AUDIT_GID:
                        result = audit_gid_comparator(cred->gid, f->op, f->gid);
+                       if (f->op == Audit_equal) {
+                               if (!result)
+                                       result = in_group_p(f->gid);
+                       } else if (f->op == Audit_not_equal) {
+                               if (result)
+                                       result = !in_group_p(f->gid);
+                       }
                        break;
                case AUDIT_EGID:
                        result = audit_gid_comparator(cred->egid, f->op, f->gid);
+                       if (f->op == Audit_equal) {
+                               if (!result)
+                                       result = in_egroup_p(f->gid);
+                       } else if (f->op == Audit_not_equal) {
+                               if (result)
+                                       result = !in_egroup_p(f->gid);
+                       }
                        break;
                case AUDIT_SGID:
                        result = audit_gid_comparator(cred->sgid, f->op, f->gid);
@@ -742,6 +613,9 @@ static int audit_filter_rules(struct task_struct *tsk,
                        if (ctx)
                                result = audit_uid_comparator(tsk->loginuid, f->op, f->uid);
                        break;
+               case AUDIT_LOGINUID_SET:
+                       result = audit_comparator(audit_loginuid_set(tsk), f->op, f->val);
+                       break;
                case AUDIT_SUBJ_USER:
                case AUDIT_SUBJ_ROLE:
                case AUDIT_SUBJ_TYPE:
@@ -987,6 +861,8 @@ static inline void audit_free_names(struct audit_context *context)
 
 #if AUDIT_DEBUG == 2
        if (context->put_count + context->ino_count != context->name_count) {
+               int i = 0;
+
                printk(KERN_ERR "%s:%d(:%d): major=%d in_syscall=%d"
                       " name_count=%d put_count=%d"
                       " ino_count=%d [NOT freeing]\n",
@@ -995,7 +871,7 @@ static inline void audit_free_names(struct audit_context *context)
                       context->name_count, context->put_count,
                       context->ino_count);
                list_for_each_entry(n, &context->names_list, list) {
-                       printk(KERN_ERR "names[%d] = %p = %s\n", i,
+                       printk(KERN_ERR "names[%d] = %p = %s\n", i++,
                               n->name, n->name->name ?: "(null)");
                }
                dump_stack();
@@ -1010,7 +886,7 @@ static inline void audit_free_names(struct audit_context *context)
        list_for_each_entry_safe(n, next, &context->names_list, list) {
                list_del(&n->list);
                if (n->name && n->name_put)
-                       __putname(n->name);
+                       final_putname(n->name);
                if (n->should_free)
                        kfree(n);
        }
@@ -1093,88 +969,6 @@ static inline void audit_free_context(struct audit_context *context)
        kfree(context);
 }
 
-void audit_log_task_context(struct audit_buffer *ab)
-{
-       char *ctx = NULL;
-       unsigned len;
-       int error;
-       u32 sid;
-
-       security_task_getsecid(current, &sid);
-       if (!sid)
-               return;
-
-       error = security_secid_to_secctx(sid, &ctx, &len);
-       if (error) {
-               if (error != -EINVAL)
-                       goto error_path;
-               return;
-       }
-
-       audit_log_format(ab, " subj=%s", ctx);
-       security_release_secctx(ctx, len);
-       return;
-
-error_path:
-       audit_panic("error in audit_log_task_context");
-       return;
-}
-
-EXPORT_SYMBOL(audit_log_task_context);
-
-void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
-{
-       const struct cred *cred;
-       char name[sizeof(tsk->comm)];
-       struct mm_struct *mm = tsk->mm;
-       char *tty;
-
-       if (!ab)
-               return;
-
-       /* tsk == current */
-       cred = current_cred();
-
-       spin_lock_irq(&tsk->sighand->siglock);
-       if (tsk->signal && tsk->signal->tty)
-               tty = tsk->signal->tty->name;
-       else
-               tty = "(none)";
-       spin_unlock_irq(&tsk->sighand->siglock);
-
-
-       audit_log_format(ab,
-                        " ppid=%ld pid=%d auid=%u uid=%u gid=%u"
-                        " euid=%u suid=%u fsuid=%u"
-                        " egid=%u sgid=%u fsgid=%u ses=%u tty=%s",
-                        sys_getppid(),
-                        tsk->pid,
-                        from_kuid(&init_user_ns, tsk->loginuid),
-                        from_kuid(&init_user_ns, cred->uid),
-                        from_kgid(&init_user_ns, cred->gid),
-                        from_kuid(&init_user_ns, cred->euid),
-                        from_kuid(&init_user_ns, cred->suid),
-                        from_kuid(&init_user_ns, cred->fsuid),
-                        from_kgid(&init_user_ns, cred->egid),
-                        from_kgid(&init_user_ns, cred->sgid),
-                        from_kgid(&init_user_ns, cred->fsgid),
-                        tsk->sessionid, tty);
-
-       get_task_comm(name, tsk);
-       audit_log_format(ab, " comm=");
-       audit_log_untrustedstring(ab, name);
-
-       if (mm) {
-               down_read(&mm->mmap_sem);
-               if (mm->exe_file)
-                       audit_log_d_path(ab, " exe=", &mm->exe_file->f_path);
-               up_read(&mm->mmap_sem);
-       }
-       audit_log_task_context(ab);
-}
-
-EXPORT_SYMBOL(audit_log_task_info);
-
 static int audit_log_pid_context(struct audit_context *context, pid_t pid,
                                 kuid_t auid, kuid_t uid, unsigned int sessionid,
                                 u32 sid, char *comm)
@@ -1191,12 +985,14 @@ static int audit_log_pid_context(struct audit_context *context, pid_t pid,
        audit_log_format(ab, "opid=%d oauid=%d ouid=%d oses=%d", pid,
                         from_kuid(&init_user_ns, auid),
                         from_kuid(&init_user_ns, uid), sessionid);
-       if (security_secid_to_secctx(sid, &ctx, &len)) {
-               audit_log_format(ab, " obj=(none)");
-               rc = 1;
-       } else {
-               audit_log_format(ab, " obj=%s", ctx);
-               security_release_secctx(ctx, len);
+       if (sid) {
+               if (security_secid_to_secctx(sid, &ctx, &len)) {
+                       audit_log_format(ab, " obj=(none)");
+                       rc = 1;
+               } else {
+                       audit_log_format(ab, " obj=%s", ctx);
+                       security_release_secctx(ctx, len);
+               }
        }
        audit_log_format(ab, " ocomm=");
        audit_log_untrustedstring(ab, comm);
@@ -1390,35 +1186,6 @@ static void audit_log_execve_info(struct audit_context *context,
        kfree(buf);
 }
 
-static void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap)
-{
-       int i;
-
-       audit_log_format(ab, " %s=", prefix);
-       CAP_FOR_EACH_U32(i) {
-               audit_log_format(ab, "%08x", cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]);
-       }
-}
-
-static void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name)
-{
-       kernel_cap_t *perm = &name->fcap.permitted;
-       kernel_cap_t *inh = &name->fcap.inheritable;
-       int log = 0;
-
-       if (!cap_isclear(*perm)) {
-               audit_log_cap(ab, "cap_fp", perm);
-               log = 1;
-       }
-       if (!cap_isclear(*inh)) {
-               audit_log_cap(ab, "cap_fi", inh);
-               log = 1;
-       }
-
-       if (log)
-               audit_log_format(ab, " cap_fe=%d cap_fver=%x", name->fcap.fE, name->fcap_ver);
-}
-
 static void show_special(struct audit_context *context, int *call_panic)
 {
        struct audit_buffer *ab;
@@ -1516,68 +1283,6 @@ static void show_special(struct audit_context *context, int *call_panic)
        audit_log_end(ab);
 }
 
-static void audit_log_name(struct audit_context *context, struct audit_names *n,
-                          int record_num, int *call_panic)
-{
-       struct audit_buffer *ab;
-       ab = audit_log_start(context, GFP_KERNEL, AUDIT_PATH);
-       if (!ab)
-               return; /* audit_panic has been called */
-
-       audit_log_format(ab, "item=%d", record_num);
-
-       if (n->name) {
-               switch (n->name_len) {
-               case AUDIT_NAME_FULL:
-                       /* log the full path */
-                       audit_log_format(ab, " name=");
-                       audit_log_untrustedstring(ab, n->name->name);
-                       break;
-               case 0:
-                       /* name was specified as a relative path and the
-                        * directory component is the cwd */
-                       audit_log_d_path(ab, " name=", &context->pwd);
-                       break;
-               default:
-                       /* log the name's directory component */
-                       audit_log_format(ab, " name=");
-                       audit_log_n_untrustedstring(ab, n->name->name,
-                                                   n->name_len);
-               }
-       } else
-               audit_log_format(ab, " name=(null)");
-
-       if (n->ino != (unsigned long)-1) {
-               audit_log_format(ab, " inode=%lu"
-                                " dev=%02x:%02x mode=%#ho"
-                                " ouid=%u ogid=%u rdev=%02x:%02x",
-                                n->ino,
-                                MAJOR(n->dev),
-                                MINOR(n->dev),
-                                n->mode,
-                                from_kuid(&init_user_ns, n->uid),
-                                from_kgid(&init_user_ns, n->gid),
-                                MAJOR(n->rdev),
-                                MINOR(n->rdev));
-       }
-       if (n->osid != 0) {
-               char *ctx = NULL;
-               u32 len;
-               if (security_secid_to_secctx(
-                       n->osid, &ctx, &len)) {
-                       audit_log_format(ab, " osid=%u", n->osid);
-                       *call_panic = 2;
-               } else {
-                       audit_log_format(ab, " obj=%s", ctx);
-                       security_release_secctx(ctx, len);
-               }
-       }
-
-       audit_log_fcaps(ab, n);
-
-       audit_log_end(ab);
-}
-
 static void audit_log_exit(struct audit_context *context, struct task_struct *tsk)
 {
        int i, call_panic = 0;
@@ -1695,7 +1400,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
 
        i = 0;
        list_for_each_entry(n, &context->names_list, list)
-               audit_log_name(context, n, i++, &call_panic);
+               audit_log_name(context, n, NULL, i++, &call_panic);
 
        /* Send end of event record to help user space know we are finished */
        ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE);
@@ -2030,18 +1735,18 @@ void audit_putname(struct filename *name)
        BUG_ON(!context);
        if (!context->in_syscall) {
 #if AUDIT_DEBUG == 2
-               printk(KERN_ERR "%s:%d(:%d): __putname(%p)\n",
+               printk(KERN_ERR "%s:%d(:%d): final_putname(%p)\n",
                       __FILE__, __LINE__, context->serial, name);
                if (context->name_count) {
                        struct audit_names *n;
-                       int i;
+                       int i = 0;
 
                        list_for_each_entry(n, &context->names_list, list)
-                               printk(KERN_ERR "name[%d] = %p = %s\n", i,
+                               printk(KERN_ERR "name[%d] = %p = %s\n", i++,
                                       n->name, n->name->name ?: "(null)");
                        }
 #endif
-               __putname(name);
+               final_putname(name);
        }
 #if AUDIT_DEBUG
        else {
@@ -2060,41 +1765,6 @@ void audit_putname(struct filename *name)
 #endif
 }
 
-static inline int audit_copy_fcaps(struct audit_names *name, const struct dentry *dentry)
-{
-       struct cpu_vfs_cap_data caps;
-       int rc;
-
-       if (!dentry)
-               return 0;
-
-       rc = get_vfs_caps_from_disk(dentry, &caps);
-       if (rc)
-               return rc;
-
-       name->fcap.permitted = caps.permitted;
-       name->fcap.inheritable = caps.inheritable;
-       name->fcap.fE = !!(caps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE);
-       name->fcap_ver = (caps.magic_etc & VFS_CAP_REVISION_MASK) >> VFS_CAP_REVISION_SHIFT;
-
-       return 0;
-}
-
-
-/* Copy inode data into an audit_names. */
-static void audit_copy_inode(struct audit_names *name, const struct dentry *dentry,
-                            const struct inode *inode)
-{
-       name->ino   = inode->i_ino;
-       name->dev   = inode->i_sb->s_dev;
-       name->mode  = inode->i_mode;
-       name->uid   = inode->i_uid;
-       name->gid   = inode->i_gid;
-       name->rdev  = inode->i_rdev;
-       security_inode_getsecid(inode, &name->osid);
-       audit_copy_fcaps(name, dentry);
-}
-
 /**
  * __audit_inode - store the inode and device from a lookup
  * @name: name being audited
@@ -2303,7 +1973,7 @@ int audit_set_loginuid(kuid_t loginuid)
        unsigned int sessionid;
 
 #ifdef CONFIG_AUDIT_LOGINUID_IMMUTABLE
-       if (uid_valid(task->loginuid))
+       if (audit_loginuid_set(task))
                return -EPERM;
 #else /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
        if (!capable(CAP_AUDIT_CONTROL))
@@ -2471,17 +2141,20 @@ int __audit_bprm(struct linux_binprm *bprm)
 
 /**
  * audit_socketcall - record audit data for sys_socketcall
- * @nargs: number of args
+ * @nargs: number of args, which should not be more than AUDITSC_ARGS.
  * @args: args array
  *
  */
-void __audit_socketcall(int nargs, unsigned long *args)
+int __audit_socketcall(int nargs, unsigned long *args)
 {
        struct audit_context *context = current->audit_context;
 
+       if (nargs <= 0 || nargs > AUDITSC_ARGS || !args)
+               return -EINVAL;
        context->type = AUDIT_SOCKETCALL;
        context->socketcall.nargs = nargs;
        memcpy(context->socketcall.args, args, nargs * sizeof(unsigned long));
+       return 0;
 }
 
 /**
index ed35345be536eefb68f1cd585b7c0213f24419b4..53b958fcd639eb2d93d9bed73cffbb9250df7c06 100644 (file)
@@ -613,10 +613,13 @@ static __modinit int add_sysfs_param(struct module_kobject *mk,
                       sizeof(*mk->mp) + sizeof(mk->mp->attrs[0]) * (num+1),
                       GFP_KERNEL);
        if (!new) {
-               kfree(mk->mp);
+               kfree(attrs);
                err = -ENOMEM;
                goto fail;
        }
+       /* Despite looking like the typical realloc() bug, this is safe.
+        * We *want* the old 'attrs' to be freed either way, and we'll store
+        * the new one in the success case. */
        attrs = krealloc(attrs, sizeof(new->grp.attrs[0])*(num+2), GFP_KERNEL);
        if (!attrs) {
                err = -ENOMEM;
index bfd6787b355a13813cb0b19c018d47a46f2bba8a..7078052284fd9eda7bae8a86805054328af7bb4b 100644 (file)
@@ -200,6 +200,7 @@ cond_syscall(sys_perf_event_open);
 /* fanotify! */
 cond_syscall(sys_fanotify_init);
 cond_syscall(sys_fanotify_mark);
+cond_syscall(compat_sys_fanotify_mark);
 
 /* open by handle */
 cond_syscall(sys_name_to_handle_at);
index ebf72358e86aec33c270edd7e8167789fea59ff9..aea4a9ea6fc845b884f84a367589a5945703e761 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/netdevice.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
+#include <linux/compat.h>
 
 #ifdef CONFIG_SYSCTL_SYSCALL
 
@@ -1447,7 +1448,6 @@ SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args)
 
 
 #ifdef CONFIG_COMPAT
-#include <asm/compat.h>
 
 struct compat_sysctl_args {
        compat_uptr_t   name;
@@ -1459,7 +1459,7 @@ struct compat_sysctl_args {
        compat_ulong_t  __unused[4];
 };
 
-asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args)
+COMPAT_SYSCALL_DEFINE1(sysctl, struct compat_sysctl_args __user *, args)
 {
        struct compat_sysctl_args tmp;
        compat_size_t __user *compat_oldlenp;
index 5e9efd4b83a47fda4e70825078baabbc090a9a72..015f85aaca08f5f5d6eb55af1f1aab46670bb03b 100644 (file)
@@ -71,6 +71,7 @@ config TRACE_CLOCK
 config RING_BUFFER
        bool
        select TRACE_CLOCK
+       select IRQ_WORK
 
 config FTRACE_NMI_ENTER
        bool
@@ -107,7 +108,6 @@ config TRACING
        select BINARY_PRINTF
        select EVENT_TRACING
        select TRACE_CLOCK
-       select IRQ_WORK
 
 config GENERIC_TRACER
        bool
index 8a5c017bb50c141bfca4d8206ac2a1805d224185..b549b0f5b9771624159d8dbbf1b6a103127a842f 100644 (file)
 
 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+#define INIT_REGEX_LOCK(opsname)       \
+       .regex_lock     = __MUTEX_INITIALIZER(opsname.regex_lock),
+#else
+#define INIT_REGEX_LOCK(opsname)
+#endif
+
 static struct ftrace_ops ftrace_list_end __read_mostly = {
        .func           = ftrace_stub,
        .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
@@ -131,6 +138,16 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
        while (likely(op = rcu_dereference_raw((op)->next)) &&  \
               unlikely((op) != &ftrace_list_end))
 
+static inline void ftrace_ops_init(struct ftrace_ops *ops)
+{
+#ifdef CONFIG_DYNAMIC_FTRACE
+       if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
+               mutex_init(&ops->regex_lock);
+               ops->flags |= FTRACE_OPS_FL_INITIALIZED;
+       }
+#endif
+}
+
 /**
  * ftrace_nr_registered_ops - return number of ops registered
  *
@@ -907,7 +924,8 @@ static void unregister_ftrace_profiler(void)
 #else
 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
        .func           = function_profile_call,
-       .flags          = FTRACE_OPS_FL_RECURSION_SAFE,
+       .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+       INIT_REGEX_LOCK(ftrace_profile_ops)
 };
 
 static int register_ftrace_profiler(void)
@@ -1103,11 +1121,10 @@ static struct ftrace_ops global_ops = {
        .func                   = ftrace_stub,
        .notrace_hash           = EMPTY_HASH,
        .filter_hash            = EMPTY_HASH,
-       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
+       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+       INIT_REGEX_LOCK(global_ops)
 };
 
-static DEFINE_MUTEX(ftrace_regex_lock);
-
 struct ftrace_page {
        struct ftrace_page      *next;
        struct dyn_ftrace       *records;
@@ -1247,6 +1264,7 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
 
 void ftrace_free_filter(struct ftrace_ops *ops)
 {
+       ftrace_ops_init(ops);
        free_ftrace_hash(ops->filter_hash);
        free_ftrace_hash(ops->notrace_hash);
 }
@@ -2441,7 +2459,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
                     !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
 
                    ((iter->flags & FTRACE_ITER_ENABLED) &&
-                    !(rec->flags & ~FTRACE_FL_MASK))) {
+                    !(rec->flags & FTRACE_FL_ENABLED))) {
 
                        rec = NULL;
                        goto retry;
@@ -2624,6 +2642,8 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
        struct ftrace_hash *hash;
        int ret = 0;
 
+       ftrace_ops_init(ops);
+
        if (unlikely(ftrace_disabled))
                return -ENODEV;
 
@@ -2636,28 +2656,26 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
                return -ENOMEM;
        }
 
+       iter->ops = ops;
+       iter->flags = flag;
+
+       mutex_lock(&ops->regex_lock);
+
        if (flag & FTRACE_ITER_NOTRACE)
                hash = ops->notrace_hash;
        else
                hash = ops->filter_hash;
 
-       iter->ops = ops;
-       iter->flags = flag;
-
        if (file->f_mode & FMODE_WRITE) {
-               mutex_lock(&ftrace_lock);
                iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
-               mutex_unlock(&ftrace_lock);
-
                if (!iter->hash) {
                        trace_parser_put(&iter->parser);
                        kfree(iter);
-                       return -ENOMEM;
+                       ret = -ENOMEM;
+                       goto out_unlock;
                }
        }
 
-       mutex_lock(&ftrace_regex_lock);
-
        if ((file->f_mode & FMODE_WRITE) &&
            (file->f_flags & O_TRUNC))
                ftrace_filter_reset(iter->hash);
@@ -2677,7 +2695,9 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
                }
        } else
                file->private_data = iter;
-       mutex_unlock(&ftrace_regex_lock);
+
+ out_unlock:
+       mutex_unlock(&ops->regex_lock);
 
        return ret;
 }
@@ -2910,6 +2930,8 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
 static struct ftrace_ops trace_probe_ops __read_mostly =
 {
        .func           = function_trace_probe_call,
+       .flags          = FTRACE_OPS_FL_INITIALIZED,
+       INIT_REGEX_LOCK(trace_probe_ops)
 };
 
 static int ftrace_probe_registered;
@@ -2919,8 +2941,12 @@ static void __enable_ftrace_function_probe(void)
        int ret;
        int i;
 
-       if (ftrace_probe_registered)
+       if (ftrace_probe_registered) {
+               /* still need to update the function call sites */
+               if (ftrace_enabled)
+                       ftrace_run_update_code(FTRACE_UPDATE_CALLS);
                return;
+       }
 
        for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
                struct hlist_head *hhd = &ftrace_func_hash[i];
@@ -2990,19 +3016,21 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
        if (WARN_ON(not))
                return -EINVAL;
 
-       mutex_lock(&ftrace_lock);
+       mutex_lock(&trace_probe_ops.regex_lock);
 
        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
        if (!hash) {
                count = -ENOMEM;
-               goto out_unlock;
+               goto out;
        }
 
        if (unlikely(ftrace_disabled)) {
                count = -ENODEV;
-               goto out_unlock;
+               goto out;
        }
 
+       mutex_lock(&ftrace_lock);
+
        do_for_each_ftrace_rec(pg, rec) {
 
                if (!ftrace_match_record(rec, NULL, search, len, type))
@@ -3056,6 +3084,8 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
 
  out_unlock:
        mutex_unlock(&ftrace_lock);
+ out:
+       mutex_unlock(&trace_probe_ops.regex_lock);
        free_ftrace_hash(hash);
 
        return count;
@@ -3095,7 +3125,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                        return;
        }
 
-       mutex_lock(&ftrace_lock);
+       mutex_lock(&trace_probe_ops.regex_lock);
 
        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
        if (!hash)
@@ -3133,6 +3163,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                        list_add(&entry->free_list, &free_list);
                }
        }
+       mutex_lock(&ftrace_lock);
        __disable_ftrace_function_probe();
        /*
         * Remove after the disable is called. Otherwise, if the last
@@ -3144,9 +3175,10 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                list_del(&entry->free_list);
                ftrace_free_entry(entry);
        }
+       mutex_unlock(&ftrace_lock);
                
  out_unlock:
-       mutex_unlock(&ftrace_lock);
+       mutex_unlock(&trace_probe_ops.regex_lock);
        free_ftrace_hash(hash);
 }
 
@@ -3256,18 +3288,17 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
        if (!cnt)
                return 0;
 
-       mutex_lock(&ftrace_regex_lock);
-
-       ret = -ENODEV;
-       if (unlikely(ftrace_disabled))
-               goto out_unlock;
-
        if (file->f_mode & FMODE_READ) {
                struct seq_file *m = file->private_data;
                iter = m->private;
        } else
                iter = file->private_data;
 
+       if (unlikely(ftrace_disabled))
+               return -ENODEV;
+
+       /* iter->hash is a local copy, so we don't need regex_lock */
+
        parser = &iter->parser;
        read = trace_get_user(parser, ubuf, cnt, ppos);
 
@@ -3276,14 +3307,12 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
                ret = ftrace_process_regex(iter->hash, parser->buffer,
                                           parser->idx, enable);
                trace_parser_clear(parser);
-               if (ret)
-                       goto out_unlock;
+               if (ret < 0)
+                       goto out;
        }
 
        ret = read;
-out_unlock:
-       mutex_unlock(&ftrace_regex_lock);
-
+ out:
        return ret;
 }
 
@@ -3335,16 +3364,19 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
        if (unlikely(ftrace_disabled))
                return -ENODEV;
 
+       mutex_lock(&ops->regex_lock);
+
        if (enable)
                orig_hash = &ops->filter_hash;
        else
                orig_hash = &ops->notrace_hash;
 
        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
-       if (!hash)
-               return -ENOMEM;
+       if (!hash) {
+               ret = -ENOMEM;
+               goto out_regex_unlock;
+       }
 
-       mutex_lock(&ftrace_regex_lock);
        if (reset)
                ftrace_filter_reset(hash);
        if (buf && !ftrace_match_records(hash, buf, len)) {
@@ -3366,7 +3398,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
        mutex_unlock(&ftrace_lock);
 
  out_regex_unlock:
-       mutex_unlock(&ftrace_regex_lock);
+       mutex_unlock(&ops->regex_lock);
 
        free_ftrace_hash(hash);
        return ret;
@@ -3392,6 +3424,7 @@ ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
                         int remove, int reset)
 {
+       ftrace_ops_init(ops);
        return ftrace_set_addr(ops, ip, remove, reset, 1);
 }
 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
@@ -3416,6 +3449,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
                       int len, int reset)
 {
+       ftrace_ops_init(ops);
        return ftrace_set_regex(ops, buf, len, reset, 1);
 }
 EXPORT_SYMBOL_GPL(ftrace_set_filter);
@@ -3434,6 +3468,7 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter);
 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
                        int len, int reset)
 {
+       ftrace_ops_init(ops);
        return ftrace_set_regex(ops, buf, len, reset, 0);
 }
 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
@@ -3524,6 +3559,8 @@ ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
 {
        char *func;
 
+       ftrace_ops_init(ops);
+
        while (buf) {
                func = strsep(&buf, ",");
                ftrace_set_regex(ops, func, strlen(func), 0, enable);
@@ -3551,10 +3588,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
        int filter_hash;
        int ret;
 
-       mutex_lock(&ftrace_regex_lock);
        if (file->f_mode & FMODE_READ) {
                iter = m->private;
-
                seq_release(inode, file);
        } else
                iter = file->private_data;
@@ -3567,6 +3602,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
 
        trace_parser_put(parser);
 
+       mutex_lock(&iter->ops->regex_lock);
+
        if (file->f_mode & FMODE_WRITE) {
                filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
 
@@ -3584,10 +3621,11 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
 
                mutex_unlock(&ftrace_lock);
        }
+
+       mutex_unlock(&iter->ops->regex_lock);
        free_ftrace_hash(iter->hash);
        kfree(iter);
 
-       mutex_unlock(&ftrace_regex_lock);
        return 0;
 }
 
@@ -4126,7 +4164,8 @@ void __init ftrace_init(void)
 
 static struct ftrace_ops global_ops = {
        .func                   = ftrace_stub,
-       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
+       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+       INIT_REGEX_LOCK(global_ops)
 };
 
 static int __init ftrace_nodyn_init(void)
@@ -4180,8 +4219,9 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
 }
 
 static struct ftrace_ops control_ops = {
-       .func = ftrace_ops_control_func,
-       .flags = FTRACE_OPS_FL_RECURSION_SAFE,
+       .func   = ftrace_ops_control_func,
+       .flags  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+       INIT_REGEX_LOCK(control_ops)
 };
 
 static inline void
@@ -4539,6 +4579,8 @@ int register_ftrace_function(struct ftrace_ops *ops)
 {
        int ret = -1;
 
+       ftrace_ops_init(ops);
+
        mutex_lock(&ftrace_lock);
 
        ret = __register_ftrace_function(ops);
index 53582e982e51673e7294080097e9acc7b2bdf8f2..7a0cf68027ccf62118d4eb1da83ec00af3a0b49b 100644 (file)
@@ -251,7 +251,8 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
        switch (enable) {
        case 0:
                /*
-                * When soft_disable is set and enable is cleared, we want
+                * When soft_disable is set and enable is cleared, the sm_ref
+                * reference counter is decremented. If it reaches 0, we want
                 * to clear the SOFT_DISABLED flag but leave the event in the
                 * state that it was. That is, if the event was enabled and
                 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
@@ -263,6 +264,8 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
                 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
                 */
                if (soft_disable) {
+                       if (atomic_dec_return(&file->sm_ref) > 0)
+                               break;
                        disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
                        clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
                } else
@@ -291,8 +294,11 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
                 */
                if (!soft_disable)
                        clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
-               else
+               else {
+                       if (atomic_inc_return(&file->sm_ref) > 1)
+                               break;
                        set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
+               }
 
                if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
 
@@ -623,6 +629,8 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
        if (file->flags & FTRACE_EVENT_FL_ENABLED) {
                if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)
                        buf = "0*\n";
+               else if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
+                       buf = "1*\n";
                else
                        buf = "1\n";
        } else
@@ -1521,6 +1529,24 @@ __register_event(struct ftrace_event_call *call, struct module *mod)
        return 0;
 }
 
+static struct ftrace_event_file *
+trace_create_new_event(struct ftrace_event_call *call,
+                      struct trace_array *tr)
+{
+       struct ftrace_event_file *file;
+
+       file = kmem_cache_alloc(file_cachep, GFP_TRACE);
+       if (!file)
+               return NULL;
+
+       file->event_call = call;
+       file->tr = tr;
+       atomic_set(&file->sm_ref, 0);
+       list_add(&file->list, &tr->events);
+
+       return file;
+}
+
 /* Add an event to a trace directory */
 static int
 __trace_add_new_event(struct ftrace_event_call *call,
@@ -1532,14 +1558,10 @@ __trace_add_new_event(struct ftrace_event_call *call,
 {
        struct ftrace_event_file *file;
 
-       file = kmem_cache_alloc(file_cachep, GFP_TRACE);
+       file = trace_create_new_event(call, tr);
        if (!file)
                return -ENOMEM;
 
-       file->event_call = call;
-       file->tr = tr;
-       list_add(&file->list, &tr->events);
-
        return event_create_dir(tr->event_dir, file, id, enable, filter, format);
 }
 
@@ -1554,14 +1576,10 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
 {
        struct ftrace_event_file *file;
 
-       file = kmem_cache_alloc(file_cachep, GFP_TRACE);
+       file = trace_create_new_event(call, tr);
        if (!file)
                return -ENOMEM;
 
-       file->event_call = call;
-       file->tr = tr;
-       list_add(&file->list, &tr->events);
-
        return 0;
 }
 
@@ -2061,8 +2079,18 @@ event_enable_func(struct ftrace_hash *hash,
        if (ret < 0)
                goto out_put;
        ret = register_ftrace_function_probe(glob, ops, data);
-       if (!ret)
+       /*
+        * The above returns on success the # of functions enabled,
+        * but if it didn't find any functions it returns zero.
+        * Consider no functions a failure too.
+        */
+       if (!ret) {
+               ret = -ENOENT;
+               goto out_disable;
+       } else if (ret < 0)
                goto out_disable;
+       /* Just return zero, not the number of enabled functions */
+       ret = 0;
  out:
        mutex_unlock(&event_mutex);
        return ret;
index 1865d5f765387f55600d063a633e6a7e79bcae3c..636d45fe69b37a80eefafe6b3aa6d46220f132dd 100644 (file)
@@ -27,7 +27,6 @@
 /**
  * Kprobe event core functions
  */
-
 struct trace_probe {
        struct list_head        list;
        struct kretprobe        rp;     /* Use rp.kp for kprobe use */
@@ -36,6 +35,7 @@ struct trace_probe {
        const char              *symbol;        /* symbol name */
        struct ftrace_event_class       class;
        struct ftrace_event_call        call;
+       struct ftrace_event_file        **files;
        ssize_t                 size;           /* trace entry size */
        unsigned int            nr_args;
        struct probe_arg        args[];
@@ -46,7 +46,7 @@ struct trace_probe {
        (sizeof(struct probe_arg) * (n)))
 
 
-static __kprobes int trace_probe_is_return(struct trace_probe *tp)
+static __kprobes bool trace_probe_is_return(struct trace_probe *tp)
 {
        return tp->rp.handler != NULL;
 }
@@ -183,12 +183,57 @@ static struct trace_probe *find_trace_probe(const char *event,
        return NULL;
 }
 
-/* Enable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
-static int enable_trace_probe(struct trace_probe *tp, int flag)
+static int trace_probe_nr_files(struct trace_probe *tp)
+{
+       struct ftrace_event_file **file = tp->files;
+       int ret = 0;
+
+       if (file)
+               while (*(file++))
+                       ret++;
+
+       return ret;
+}
+
+static DEFINE_MUTEX(probe_enable_lock);
+
+/*
+ * Enable trace_probe
+ * if the file is NULL, enable "perf" handler, or enable "trace" handler.
+ */
+static int
+enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
 {
        int ret = 0;
 
-       tp->flags |= flag;
+       mutex_lock(&probe_enable_lock);
+
+       if (file) {
+               struct ftrace_event_file **new, **old = tp->files;
+               int n = trace_probe_nr_files(tp);
+
+               /* 1 is for new one and 1 is for stopper */
+               new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *),
+                             GFP_KERNEL);
+               if (!new) {
+                       ret = -ENOMEM;
+                       goto out_unlock;
+               }
+               memcpy(new, old, n * sizeof(struct ftrace_event_file *));
+               new[n] = file;
+               /* The last one keeps a NULL */
+
+               rcu_assign_pointer(tp->files, new);
+               tp->flags |= TP_FLAG_TRACE;
+
+               if (old) {
+                       /* Make sure the probe is done with old files */
+                       synchronize_sched();
+                       kfree(old);
+               }
+       } else
+               tp->flags |= TP_FLAG_PROFILE;
+
        if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) &&
            !trace_probe_has_gone(tp)) {
                if (trace_probe_is_return(tp))
@@ -197,19 +242,83 @@ static int enable_trace_probe(struct trace_probe *tp, int flag)
                        ret = enable_kprobe(&tp->rp.kp);
        }
 
+ out_unlock:
+       mutex_unlock(&probe_enable_lock);
+
        return ret;
 }
 
-/* Disable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
-static void disable_trace_probe(struct trace_probe *tp, int flag)
+static int
+trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
+{
+       int i;
+
+       if (tp->files) {
+               for (i = 0; tp->files[i]; i++)
+                       if (tp->files[i] == file)
+                               return i;
+       }
+
+       return -1;
+}
+
+/*
+ * Disable trace_probe
+ * if the file is NULL, disable "perf" handler, or disable "trace" handler.
+ */
+static int
+disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
 {
-       tp->flags &= ~flag;
+       int ret = 0;
+
+       mutex_lock(&probe_enable_lock);
+
+       if (file) {
+               struct ftrace_event_file **new, **old = tp->files;
+               int n = trace_probe_nr_files(tp);
+               int i, j;
+
+               if (n == 0 || trace_probe_file_index(tp, file) < 0) {
+                       ret = -EINVAL;
+                       goto out_unlock;
+               }
+
+               if (n == 1) {   /* Remove the last file */
+                       tp->flags &= ~TP_FLAG_TRACE;
+                       new = NULL;
+               } else {
+                       new = kzalloc(n * sizeof(struct ftrace_event_file *),
+                                     GFP_KERNEL);
+                       if (!new) {
+                               ret = -ENOMEM;
+                               goto out_unlock;
+                       }
+
+                       /* This copy & check loop copies the NULL stopper too */
+                       for (i = 0, j = 0; j < n && i < n + 1; i++)
+                               if (old[i] != file)
+                                       new[j++] = old[i];
+               }
+
+               rcu_assign_pointer(tp->files, new);
+
+               /* Make sure the probe is done with old files */
+               synchronize_sched();
+               kfree(old);
+       } else
+               tp->flags &= ~TP_FLAG_PROFILE;
+
        if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
                if (trace_probe_is_return(tp))
                        disable_kretprobe(&tp->rp);
                else
                        disable_kprobe(&tp->rp.kp);
        }
+
+ out_unlock:
+       mutex_unlock(&probe_enable_lock);
+
+       return ret;
 }
 
 /* Internal register function - just handle k*probes and flags */
@@ -723,9 +832,10 @@ static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp,
 }
 
 /* Kprobe handler */
-static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
+static __kprobes void
+__kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
+                   struct ftrace_event_file *ftrace_file)
 {
-       struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
        struct kprobe_trace_entry_head *entry;
        struct ring_buffer_event *event;
        struct ring_buffer *buffer;
@@ -733,7 +843,10 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
        unsigned long irq_flags;
        struct ftrace_event_call *call = &tp->call;
 
-       tp->nhit++;
+       WARN_ON(call != ftrace_file->event_call);
+
+       if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
+               return;
 
        local_save_flags(irq_flags);
        pc = preempt_count();
@@ -741,13 +854,14 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
        dsize = __get_data_size(tp, regs);
        size = sizeof(*entry) + tp->size + dsize;
 
-       event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
-                                                 size, irq_flags, pc);
+       event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
+                                               call->event.type,
+                                               size, irq_flags, pc);
        if (!event)
                return;
 
        entry = ring_buffer_event_data(event);
-       entry->ip = (unsigned long)kp->addr;
+       entry->ip = (unsigned long)tp->rp.kp.addr;
        store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
 
        if (!filter_current_check_discard(buffer, call, entry, event))
@@ -755,11 +869,24 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
                                                irq_flags, pc, regs);
 }
 
+static __kprobes void
+kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
+{
+       struct ftrace_event_file **file = tp->files;
+
+       /* Note: preempt is already disabled around the kprobe handler */
+       while (*file) {
+               __kprobe_trace_func(tp, regs, *file);
+               file++;
+       }
+}
+
 /* Kretprobe handler */
-static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
-                                         struct pt_regs *regs)
+static __kprobes void
+__kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
+                      struct pt_regs *regs,
+                      struct ftrace_event_file *ftrace_file)
 {
-       struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
        struct kretprobe_trace_entry_head *entry;
        struct ring_buffer_event *event;
        struct ring_buffer *buffer;
@@ -767,14 +894,20 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
        unsigned long irq_flags;
        struct ftrace_event_call *call = &tp->call;
 
+       WARN_ON(call != ftrace_file->event_call);
+
+       if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
+               return;
+
        local_save_flags(irq_flags);
        pc = preempt_count();
 
        dsize = __get_data_size(tp, regs);
        size = sizeof(*entry) + tp->size + dsize;
 
-       event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
-                                                 size, irq_flags, pc);
+       event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
+                                               call->event.type,
+                                               size, irq_flags, pc);
        if (!event)
                return;
 
@@ -788,6 +921,19 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
                                                irq_flags, pc, regs);
 }
 
+static __kprobes void
+kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
+                    struct pt_regs *regs)
+{
+       struct ftrace_event_file **file = tp->files;
+
+       /* Note: preempt is already disabled around the kprobe handler */
+       while (*file) {
+               __kretprobe_trace_func(tp, ri, regs, *file);
+               file++;
+       }
+}
+
 /* Event entry printers */
 enum print_line_t
 print_kprobe_event(struct trace_iterator *iter, int flags,
@@ -975,10 +1121,9 @@ static int set_print_fmt(struct trace_probe *tp)
 #ifdef CONFIG_PERF_EVENTS
 
 /* Kprobe profile handler */
-static __kprobes void kprobe_perf_func(struct kprobe *kp,
-                                        struct pt_regs *regs)
+static __kprobes void
+kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
 {
-       struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
        struct ftrace_event_call *call = &tp->call;
        struct kprobe_trace_entry_head *entry;
        struct hlist_head *head;
@@ -997,7 +1142,7 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
        if (!entry)
                return;
 
-       entry->ip = (unsigned long)kp->addr;
+       entry->ip = (unsigned long)tp->rp.kp.addr;
        memset(&entry[1], 0, dsize);
        store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
 
@@ -1007,10 +1152,10 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
 }
 
 /* Kretprobe profile handler */
-static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
-                                           struct pt_regs *regs)
+static __kprobes void
+kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
+                   struct pt_regs *regs)
 {
-       struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
        struct ftrace_event_call *call = &tp->call;
        struct kretprobe_trace_entry_head *entry;
        struct hlist_head *head;
@@ -1044,20 +1189,19 @@ int kprobe_register(struct ftrace_event_call *event,
                    enum trace_reg type, void *data)
 {
        struct trace_probe *tp = (struct trace_probe *)event->data;
+       struct ftrace_event_file *file = data;
 
        switch (type) {
        case TRACE_REG_REGISTER:
-               return enable_trace_probe(tp, TP_FLAG_TRACE);
+               return enable_trace_probe(tp, file);
        case TRACE_REG_UNREGISTER:
-               disable_trace_probe(tp, TP_FLAG_TRACE);
-               return 0;
+               return disable_trace_probe(tp, file);
 
 #ifdef CONFIG_PERF_EVENTS
        case TRACE_REG_PERF_REGISTER:
-               return enable_trace_probe(tp, TP_FLAG_PROFILE);
+               return enable_trace_probe(tp, NULL);
        case TRACE_REG_PERF_UNREGISTER:
-               disable_trace_probe(tp, TP_FLAG_PROFILE);
-               return 0;
+               return disable_trace_probe(tp, NULL);
        case TRACE_REG_PERF_OPEN:
        case TRACE_REG_PERF_CLOSE:
        case TRACE_REG_PERF_ADD:
@@ -1073,11 +1217,13 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
 {
        struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
 
+       tp->nhit++;
+
        if (tp->flags & TP_FLAG_TRACE)
-               kprobe_trace_func(kp, regs);
+               kprobe_trace_func(tp, regs);
 #ifdef CONFIG_PERF_EVENTS
        if (tp->flags & TP_FLAG_PROFILE)
-               kprobe_perf_func(kp, regs);
+               kprobe_perf_func(tp, regs);
 #endif
        return 0;       /* We don't tweek kernel, so just return 0 */
 }
@@ -1087,11 +1233,13 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
 {
        struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
 
+       tp->nhit++;
+
        if (tp->flags & TP_FLAG_TRACE)
-               kretprobe_trace_func(ri, regs);
+               kretprobe_trace_func(tp, ri, regs);
 #ifdef CONFIG_PERF_EVENTS
        if (tp->flags & TP_FLAG_PROFILE)
-               kretprobe_perf_func(ri, regs);
+               kretprobe_perf_func(tp, ri, regs);
 #endif
        return 0;       /* We don't tweek kernel, so just return 0 */
 }
@@ -1189,11 +1337,24 @@ static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
        return a1 + a2 + a3 + a4 + a5 + a6;
 }
 
+static struct ftrace_event_file *
+find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr)
+{
+       struct ftrace_event_file *file;
+
+       list_for_each_entry(file, &tr->events, list)
+               if (file->event_call == &tp->call)
+                       return file;
+
+       return NULL;
+}
+
 static __init int kprobe_trace_self_tests_init(void)
 {
        int ret, warn = 0;
        int (*target)(int, int, int, int, int, int);
        struct trace_probe *tp;
+       struct ftrace_event_file *file;
 
        target = kprobe_trace_selftest_target;
 
@@ -1203,31 +1364,43 @@ static __init int kprobe_trace_self_tests_init(void)
                                  "$stack $stack0 +0($stack)",
                                  create_trace_probe);
        if (WARN_ON_ONCE(ret)) {
-               pr_warning("error on probing function entry.\n");
+               pr_warn("error on probing function entry.\n");
                warn++;
        } else {
                /* Enable trace point */
                tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
                if (WARN_ON_ONCE(tp == NULL)) {
-                       pr_warning("error on getting new probe.\n");
+                       pr_warn("error on getting new probe.\n");
                        warn++;
-               } else
-                       enable_trace_probe(tp, TP_FLAG_TRACE);
+               } else {
+                       file = find_trace_probe_file(tp, top_trace_array());
+                       if (WARN_ON_ONCE(file == NULL)) {
+                               pr_warn("error on getting probe file.\n");
+                               warn++;
+                       } else
+                               enable_trace_probe(tp, file);
+               }
        }
 
        ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
                                  "$retval", create_trace_probe);
        if (WARN_ON_ONCE(ret)) {
-               pr_warning("error on probing function return.\n");
+               pr_warn("error on probing function return.\n");
                warn++;
        } else {
                /* Enable trace point */
                tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
                if (WARN_ON_ONCE(tp == NULL)) {
-                       pr_warning("error on getting new probe.\n");
+                       pr_warn("error on getting 2nd new probe.\n");
                        warn++;
-               } else
-                       enable_trace_probe(tp, TP_FLAG_TRACE);
+               } else {
+                       file = find_trace_probe_file(tp, top_trace_array());
+                       if (WARN_ON_ONCE(file == NULL)) {
+                               pr_warn("error on getting probe file.\n");
+                               warn++;
+                       } else
+                               enable_trace_probe(tp, file);
+               }
        }
 
        if (warn)
@@ -1238,27 +1411,39 @@ static __init int kprobe_trace_self_tests_init(void)
        /* Disable trace points before removing it */
        tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
        if (WARN_ON_ONCE(tp == NULL)) {
-               pr_warning("error on getting test probe.\n");
+               pr_warn("error on getting test probe.\n");
                warn++;
-       } else
-               disable_trace_probe(tp, TP_FLAG_TRACE);
+       } else {
+               file = find_trace_probe_file(tp, top_trace_array());
+               if (WARN_ON_ONCE(file == NULL)) {
+                       pr_warn("error on getting probe file.\n");
+                       warn++;
+               } else
+                       disable_trace_probe(tp, file);
+       }
 
        tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
        if (WARN_ON_ONCE(tp == NULL)) {
-               pr_warning("error on getting 2nd test probe.\n");
+               pr_warn("error on getting 2nd test probe.\n");
                warn++;
-       } else
-               disable_trace_probe(tp, TP_FLAG_TRACE);
+       } else {
+               file = find_trace_probe_file(tp, top_trace_array());
+               if (WARN_ON_ONCE(file == NULL)) {
+                       pr_warn("error on getting probe file.\n");
+                       warn++;
+               } else
+                       disable_trace_probe(tp, file);
+       }
 
        ret = traceprobe_command("-:testprobe", create_trace_probe);
        if (WARN_ON_ONCE(ret)) {
-               pr_warning("error on deleting a probe.\n");
+               pr_warn("error on deleting a probe.\n");
                warn++;
        }
 
        ret = traceprobe_command("-:testprobe2", create_trace_probe);
        if (WARN_ON_ONCE(ret)) {
-               pr_warning("error on deleting a probe.\n");
+               pr_warn("error on deleting a probe.\n");
                warn++;
        }
 
index b416093997dafcf528204be9c99a9986dfe7965d..6b94633ca61d67e29faa58a7d42410e1ef678193 100644 (file)
@@ -2412,7 +2412,7 @@ static const unsigned char nargs[21] = {
 
 SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
 {
-       unsigned long a[6];
+       unsigned long a[AUDITSC_ARGS];
        unsigned long a0, a1;
        int err;
        unsigned int len;
@@ -2428,7 +2428,9 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
        if (copy_from_user(a, args, len))
                return -EFAULT;
 
-       audit_socketcall(nargs[call] / sizeof(unsigned long), a);
+       err = audit_socketcall(nargs[call] / sizeof(unsigned long), a);
+       if (err)
+               return err;
 
        a0 = a[0];
        a1 = a[1];
index 5c4c61d527e2dbc1752e1d2ddbfc04417e4fc286..357f613df7ff49d3460e37d3f338cb89c7029a69 100644 (file)
 #include <linux/sunrpc/svcauth.h>
 #include "gss_rpc_xdr.h"
 
-static bool gssx_check_pointer(struct xdr_stream *xdr)
-{
-       __be32 *p;
-
-       p = xdr_reserve_space(xdr, 4);
-       if (unlikely(p == NULL))
-               return -ENOSPC;
-       return *p?true:false;
-}
-
 static int gssx_enc_bool(struct xdr_stream *xdr, int v)
 {
        __be32 *p;
@@ -264,25 +254,27 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
        if (unlikely(p == NULL))
                return -ENOSPC;
        count = be32_to_cpup(p++);
-       if (count != 0) {
-               /* we recognize only 1 currently: CREDS_VALUE */
-               oa->count = 1;
+       if (!count)
+               return 0;
 
-               oa->data = kmalloc(sizeof(struct gssx_option), GFP_KERNEL);
-               if (!oa->data)
-                       return -ENOMEM;
+       /* we recognize only 1 currently: CREDS_VALUE */
+       oa->count = 1;
 
-               creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL);
-               if (!creds) {
-                       kfree(oa->data);
-                       return -ENOMEM;
-               }
+       oa->data = kmalloc(sizeof(struct gssx_option), GFP_KERNEL);
+       if (!oa->data)
+               return -ENOMEM;
 
-               oa->data[0].option.data = CREDS_VALUE;
-               oa->data[0].option.len = sizeof(CREDS_VALUE);
-               oa->data[0].value.data = (void *)creds;
-               oa->data[0].value.len = 0;
+       creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL);
+       if (!creds) {
+               kfree(oa->data);
+               return -ENOMEM;
        }
+
+       oa->data[0].option.data = CREDS_VALUE;
+       oa->data[0].option.len = sizeof(CREDS_VALUE);
+       oa->data[0].value.data = (void *)creds;
+       oa->data[0].value.len = 0;
+
        for (i = 0; i < count; i++) {
                gssx_buffer dummy = { 0, NULL };
                u32 length;
@@ -800,6 +792,7 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
                                struct xdr_stream *xdr,
                                struct gssx_res_accept_sec_context *res)
 {
+       u32 value_follows;
        int err;
 
        /* res->status */
@@ -808,7 +801,10 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
                return err;
 
        /* res->context_handle */
-       if (gssx_check_pointer(xdr)) {
+       err = gssx_dec_bool(xdr, &value_follows);
+       if (err)
+               return err;
+       if (value_follows) {
                err = gssx_dec_ctx(xdr, res->context_handle);
                if (err)
                        return err;
@@ -817,7 +813,10 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
        }
 
        /* res->output_token */
-       if (gssx_check_pointer(xdr)) {
+       err = gssx_dec_bool(xdr, &value_follows);
+       if (err)
+               return err;
+       if (value_follows) {
                err = gssx_dec_buffer(xdr, res->output_token);
                if (err)
                        return err;
@@ -826,7 +825,10 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
        }
 
        /* res->delegated_cred_handle */
-       if (gssx_check_pointer(xdr)) {
+       err = gssx_dec_bool(xdr, &value_follows);
+       if (err)
+               return err;
+       if (value_follows) {
                /* we do not support upcall servers sending this data. */
                return -EINVAL;
        }
index 071ce1b5f2b40bb7b6d1558ee28f42fc1a99dd96..872d59e35ee23583cc60a2be24afa5ba1bf3c88d 100644 (file)
@@ -583,8 +583,6 @@ static int atmel_abdac_remove(struct platform_device *pdev)
        free_irq(dac->irq, dac);
        snd_card_free(card);
 
-       platform_set_drvdata(pdev, NULL);
-
        return 0;
 }
 
index 6b7e2b5a72dead0fe1e542fa47460e3184929a0d..ae63d22c0f883e48ddb0b37ee6aad40ac6f8943c 100644 (file)
@@ -1199,8 +1199,6 @@ static int atmel_ac97c_remove(struct platform_device *pdev)
        snd_card_set_dev(card, NULL);
        snd_card_free(card);
 
-       platform_set_drvdata(pdev, NULL);
-
        return 0;
 }
 
index 7420c59444ab41c442cb774be05f869019c5fc86..2b7f6e8bdd24343bd41ce854bb93383815386d6b 100644 (file)
@@ -922,7 +922,6 @@ static int hal2_remove(struct platform_device *pdev)
        struct snd_card *card = platform_get_drvdata(pdev);
 
        snd_card_free(card);
-       platform_set_drvdata(pdev, NULL);
        return 0;
 }
 
index 01a03efdc8b042059668e24274ac25799af63029..cfe99ae149fed9f55d70dc10f23e12e59fa4ee34 100644 (file)
@@ -963,7 +963,6 @@ static int snd_sgio2audio_remove(struct platform_device *pdev)
        struct snd_card *card = platform_get_drvdata(pdev);
 
        snd_card_free(card);
-       platform_set_drvdata(pdev, NULL);
        return 0;
 }
 
index 5849b129e50d2c73802f53369bbae3776cf4b08d..51c4ba95a32d3961375a593d41afe538ab787760 100644 (file)
@@ -250,6 +250,7 @@ config MSND_FIFOSIZE
 menuconfig SOUND_OSS
        tristate "OSS sound modules"
        depends on ISA_DMA_API && VIRT_TO_BUS
+       depends on !ISA_DMA_SUPPORT_BROKEN
        help
          OSS is the Open Sound System suite of sound card drivers.  They make
          sound programming easier since they provide a common API.  Say Y or
index 6f9b64700f6e48ec859bb7153a5bc119812062b4..55108b5fb2919c597177d9dc5f28b56cb34d28a2 100644 (file)
@@ -681,6 +681,9 @@ int snd_hda_queue_unsol_event(struct hda_bus *bus, u32 res, u32 res_ex)
        struct hda_bus_unsolicited *unsol;
        unsigned int wp;
 
+       if (!bus || !bus->workq)
+               return 0;
+
        trace_hda_unsol_event(bus, res, res_ex);
        unsol = bus->unsol;
        if (!unsol)
@@ -1580,7 +1583,7 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid,
                    "NID=0x%x, stream=0x%x, channel=%d, format=0x%x\n",
                    nid, stream_tag, channel_id, format);
        p = get_hda_cvt_setup(codec, nid);
-       if (!p || p->active)
+       if (!p)
                return;
 
        if (codec->pcm_format_first)
@@ -1627,7 +1630,7 @@ void __snd_hda_codec_cleanup_stream(struct hda_codec *codec, hda_nid_t nid,
 
        snd_printdd("hda_codec_cleanup_stream: NID=0x%x\n", nid);
        p = get_hda_cvt_setup(codec, nid);
-       if (p && p->active) {
+       if (p) {
                /* here we just clear the active flag when do_now isn't set;
                 * actual clean-ups will be done later in
                 * purify_inactive_streams() called from snd_hda_codec_prpapre()
index 7b213d589ef654ba27b7056a4dd77b5b091de49c..de18722c487346858783fa5d5e9f9fd574b70b39 100644 (file)
@@ -615,7 +615,7 @@ enum {
 /* quirks for Nvidia */
 #define AZX_DCAPS_PRESET_NVIDIA \
        (AZX_DCAPS_NVIDIA_SNOOP | AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI |\
-        AZX_DCAPS_ALIGN_BUFSIZE)
+        AZX_DCAPS_ALIGN_BUFSIZE | AZX_DCAPS_NO_64BIT)
 
 #define AZX_DCAPS_PRESET_CTHDA \
        (AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_4K_BDLE_BOUNDARY)
index 84b81c874a4a10413eaa807443b27330117463e5..b314d3e6d7fae5d0a576ccbe14eb45c54dfde2f9 100644 (file)
@@ -64,6 +64,7 @@ struct conexant_spec {
        /* extra EAPD pins */
        unsigned int num_eapds;
        hda_nid_t eapds[4];
+       bool dynamic_eapd;
 
 #ifdef ENABLE_CXT_STATIC_QUIRKS
        const struct snd_kcontrol_new *mixers[5];
@@ -3155,7 +3156,7 @@ static void cx_auto_parse_eapd(struct hda_codec *codec)
         * thus it might control over all pins.
         */
        if (spec->num_eapds > 2)
-               spec->gen.own_eapd_ctl = 1;
+               spec->dynamic_eapd = 1;
 }
 
 static void cx_auto_turn_eapd(struct hda_codec *codec, int num_pins,
@@ -3194,10 +3195,19 @@ static int cx_auto_build_controls(struct hda_codec *codec)
        return 0;
 }
 
+static int cx_auto_init(struct hda_codec *codec)
+{
+       struct conexant_spec *spec = codec->spec;
+       snd_hda_gen_init(codec);
+       if (!spec->dynamic_eapd)
+               cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, true);
+       return 0;
+}
+
 static const struct hda_codec_ops cx_auto_patch_ops = {
        .build_controls = cx_auto_build_controls,
        .build_pcms = snd_hda_gen_build_pcms,
-       .init = snd_hda_gen_init,
+       .init = cx_auto_init,
        .free = snd_hda_gen_free,
        .unsol_event = snd_hda_jack_unsol_event,
 #ifdef CONFIG_PM
@@ -3348,7 +3358,8 @@ static int patch_conexant_auto(struct hda_codec *codec)
 
        cx_auto_parse_beep(codec);
        cx_auto_parse_eapd(codec);
-       if (spec->gen.own_eapd_ctl)
+       spec->gen.own_eapd_ctl = 1;
+       if (spec->dynamic_eapd)
                spec->gen.vmaster_mute.hook = cx_auto_vmaster_hook;
 
        switch (codec->vendor_id) {
index 32930e66885452f0c32ad483842cb9f8b1a57701..e12f7a030c58efae2c5a93829ec6bfce1fb17ac7 100644 (file)
@@ -1832,12 +1832,10 @@ static void intel_haswell_fixup_connect_list(struct hda_codec *codec,
 #define INTEL_EN_ALL_PIN_CVTS  0x01 /* enable 2nd & 3rd pins and convertors */
 
 static void intel_haswell_enable_all_pins(struct hda_codec *codec,
-                                       const struct hda_fixup *fix, int action)
+                                         bool update_tree)
 {
        unsigned int vendor_param;
 
-       if (action != HDA_FIXUP_ACT_PRE_PROBE)
-               return;
        vendor_param = snd_hda_codec_read(codec, INTEL_VENDOR_NID, 0,
                                INTEL_GET_VENDOR_VERB, 0);
        if (vendor_param == -1 || vendor_param & INTEL_EN_ALL_PIN_CVTS)
@@ -1849,8 +1847,8 @@ static void intel_haswell_enable_all_pins(struct hda_codec *codec,
        if (vendor_param == -1)
                return;
 
-       snd_hda_codec_update_widgets(codec);
-       return;
+       if (update_tree)
+               snd_hda_codec_update_widgets(codec);
 }
 
 static void intel_haswell_fixup_enable_dp12(struct hda_codec *codec)
@@ -1868,30 +1866,20 @@ static void intel_haswell_fixup_enable_dp12(struct hda_codec *codec)
                                INTEL_SET_VENDOR_VERB, vendor_param);
 }
 
+/* Haswell needs to re-issue the vendor-specific verbs before turning to D0.
+ * Otherwise you may get severe h/w communication errors.
+ */
+static void haswell_set_power_state(struct hda_codec *codec, hda_nid_t fg,
+                               unsigned int power_state)
+{
+       if (power_state == AC_PWRST_D0) {
+               intel_haswell_enable_all_pins(codec, false);
+               intel_haswell_fixup_enable_dp12(codec);
+       }
 
-
-/* available models for fixup */
-enum {
-       INTEL_HASWELL,
-};
-
-static const struct hda_model_fixup hdmi_models[] = {
-       {.id = INTEL_HASWELL, .name = "Haswell"},
-       {}
-};
-
-static const struct snd_pci_quirk hdmi_fixup_tbl[] = {
-       SND_PCI_QUIRK(0x8086, 0x2010, "Haswell", INTEL_HASWELL),
-       {} /* terminator */
-};
-
-static const struct hda_fixup hdmi_fixups[] = {
-       [INTEL_HASWELL] = {
-               .type = HDA_FIXUP_FUNC,
-               .v.func = intel_haswell_enable_all_pins,
-       },
-};
-
+       snd_hda_codec_read(codec, fg, 0, AC_VERB_SET_POWER_STATE, power_state);
+       snd_hda_codec_set_power_to_all(codec, fg, power_state);
+}
 
 static int patch_generic_hdmi(struct hda_codec *codec)
 {
@@ -1904,11 +1892,10 @@ static int patch_generic_hdmi(struct hda_codec *codec)
        codec->spec = spec;
        hdmi_array_init(spec, 4);
 
-       snd_hda_pick_fixup(codec, hdmi_models, hdmi_fixup_tbl, hdmi_fixups);
-       snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
-
-       if (codec->vendor_id == 0x80862807)
+       if (codec->vendor_id == 0x80862807) {
+               intel_haswell_enable_all_pins(codec, true);
                intel_haswell_fixup_enable_dp12(codec);
+       }
 
        if (hdmi_parse_codec(codec) < 0) {
                codec->spec = NULL;
@@ -1916,6 +1903,9 @@ static int patch_generic_hdmi(struct hda_codec *codec)
                return -EINVAL;
        }
        codec->patch_ops = generic_hdmi_patch_ops;
+       if (codec->vendor_id == 0x80862807)
+               codec->patch_ops.set_power_state = haswell_set_power_state;
+
        generic_hdmi_init_per_pins(codec);
 
        init_channel_allocations();
index 14094f558e031999a9e791843d95fd415296f36c..1eb152cb10970d06a09b98995871581105e6fa51 100644 (file)
@@ -2882,6 +2882,7 @@ static int wm8994_aif3_hw_params(struct snd_pcm_substream *substream,
                default:
                        return 0;
                }
+               break;
        default:
                return 0;
        }
index 8b85049daab08aa0592760cb3c4f67f2ab5bf7f0..56ecfc72f2e9500ebda81214d972b65f291cd518 100644 (file)
@@ -505,7 +505,10 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
                mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
                mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
 
-               mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, ACLKX | AFSX);
+               mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG,
+                               ACLKX | ACLKR);
+               mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG,
+                               AFSX | AFSR);
                break;
        case SND_SOC_DAIFMT_CBM_CFS:
                /* codec is clock master and frame slave */
@@ -565,7 +568,7 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
                mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
                mcasp_clr_bits(base + DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
 
-               mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
+               mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
                mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
                break;
 
index 21779a6a781a26ddf8a860588a468ac52ef12780..a80c883bb8be29eeba512d86e46af833d472c833 100644 (file)
@@ -1095,9 +1095,9 @@ int dapm_clock_event(struct snd_soc_dapm_widget *w,
 
 #ifdef CONFIG_HAVE_CLK
        if (SND_SOC_DAPM_EVENT_ON(event)) {
-               return clk_enable(w->clk);
+               return clk_prepare_enable(w->clk);
        } else {
-               clk_disable(w->clk);
+               clk_disable_unprepare(w->clk);
                return 0;
        }
 #endif
index 321e066a07533bcf7a18e72c471e01cc1bef9527..9e9d348711953a44006b7c377a898e98c15b131d 100644 (file)
@@ -46,6 +46,7 @@ unsigned int skip_c0;
 unsigned int skip_c1;
 unsigned int do_nhm_cstates;
 unsigned int do_snb_cstates;
+unsigned int do_c8_c9_c10;
 unsigned int has_aperf;
 unsigned int has_epb;
 unsigned int units = 1000000000;       /* Ghz etc */
@@ -120,6 +121,9 @@ struct pkg_data {
        unsigned long long pc3;
        unsigned long long pc6;
        unsigned long long pc7;
+       unsigned long long pc8;
+       unsigned long long pc9;
+       unsigned long long pc10;
        unsigned int package_id;
        unsigned int energy_pkg;        /* MSR_PKG_ENERGY_STATUS */
        unsigned int energy_dram;       /* MSR_DRAM_ENERGY_STATUS */
@@ -282,6 +286,11 @@ void print_header(void)
                outp += sprintf(outp, "   %%pc6");
        if (do_snb_cstates)
                outp += sprintf(outp, "   %%pc7");
+       if (do_c8_c9_c10) {
+               outp += sprintf(outp, "   %%pc8");
+               outp += sprintf(outp, "   %%pc9");
+               outp += sprintf(outp, "  %%pc10");
+       }
 
        if (do_rapl & RAPL_PKG)
                outp += sprintf(outp, "  Pkg_W");
@@ -336,6 +345,9 @@ int dump_counters(struct thread_data *t, struct core_data *c,
                fprintf(stderr, "pc3: %016llX\n", p->pc3);
                fprintf(stderr, "pc6: %016llX\n", p->pc6);
                fprintf(stderr, "pc7: %016llX\n", p->pc7);
+               fprintf(stderr, "pc8: %016llX\n", p->pc8);
+               fprintf(stderr, "pc9: %016llX\n", p->pc9);
+               fprintf(stderr, "pc10: %016llX\n", p->pc10);
                fprintf(stderr, "Joules PKG: %0X\n", p->energy_pkg);
                fprintf(stderr, "Joules COR: %0X\n", p->energy_cores);
                fprintf(stderr, "Joules GFX: %0X\n", p->energy_gfx);
@@ -493,6 +505,11 @@ int format_counters(struct thread_data *t, struct core_data *c,
                outp += sprintf(outp, " %6.2f", 100.0 * p->pc6/t->tsc);
        if (do_snb_cstates)
                outp += sprintf(outp, " %6.2f", 100.0 * p->pc7/t->tsc);
+       if (do_c8_c9_c10) {
+               outp += sprintf(outp, " %6.2f", 100.0 * p->pc8/t->tsc);
+               outp += sprintf(outp, " %6.2f", 100.0 * p->pc9/t->tsc);
+               outp += sprintf(outp, " %6.2f", 100.0 * p->pc10/t->tsc);
+       }
 
        /*
         * If measurement interval exceeds minimum RAPL Joule Counter range,
@@ -569,6 +586,9 @@ delta_package(struct pkg_data *new, struct pkg_data *old)
        old->pc3 = new->pc3 - old->pc3;
        old->pc6 = new->pc6 - old->pc6;
        old->pc7 = new->pc7 - old->pc7;
+       old->pc8 = new->pc8 - old->pc8;
+       old->pc9 = new->pc9 - old->pc9;
+       old->pc10 = new->pc10 - old->pc10;
        old->pkg_temp_c = new->pkg_temp_c;
 
        DELTA_WRAP32(new->energy_pkg, old->energy_pkg);
@@ -702,6 +722,9 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
        p->pc3 = 0;
        p->pc6 = 0;
        p->pc7 = 0;
+       p->pc8 = 0;
+       p->pc9 = 0;
+       p->pc10 = 0;
 
        p->energy_pkg = 0;
        p->energy_dram = 0;
@@ -740,6 +763,9 @@ int sum_counters(struct thread_data *t, struct core_data *c,
        average.packages.pc3 += p->pc3;
        average.packages.pc6 += p->pc6;
        average.packages.pc7 += p->pc7;
+       average.packages.pc8 += p->pc8;
+       average.packages.pc9 += p->pc9;
+       average.packages.pc10 += p->pc10;
 
        average.packages.energy_pkg += p->energy_pkg;
        average.packages.energy_dram += p->energy_dram;
@@ -781,6 +807,10 @@ void compute_average(struct thread_data *t, struct core_data *c,
        average.packages.pc3 /= topo.num_packages;
        average.packages.pc6 /= topo.num_packages;
        average.packages.pc7 /= topo.num_packages;
+
+       average.packages.pc8 /= topo.num_packages;
+       average.packages.pc9 /= topo.num_packages;
+       average.packages.pc10 /= topo.num_packages;
 }
 
 static unsigned long long rdtsc(void)
@@ -880,6 +910,14 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7))
                        return -12;
        }
+       if (do_c8_c9_c10) {
+               if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8))
+                       return -13;
+               if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9))
+                       return -13;
+               if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10))
+                       return -13;
+       }
        if (do_rapl & RAPL_PKG) {
                if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr))
                        return -13;
@@ -1762,6 +1800,19 @@ int is_snb(unsigned int family, unsigned int model)
        return 0;
 }
 
+int has_c8_c9_c10(unsigned int family, unsigned int model)
+{
+       if (!genuine_intel)
+               return 0;
+
+       switch (model) {
+       case 0x45:
+               return 1;
+       }
+       return 0;
+}
+
+
 double discover_bclk(unsigned int family, unsigned int model)
 {
        if (is_snb(family, model))
@@ -1918,6 +1969,7 @@ void check_cpuid()
        do_nhm_cstates = genuine_intel; /* all Intel w/ non-stop TSC have NHM counters */
        do_smi = do_nhm_cstates;
        do_snb_cstates = is_snb(family, model);
+       do_c8_c9_c10 = has_c8_c9_c10(family, model);
        bclk = discover_bclk(family, model);
 
        do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model);
@@ -2279,7 +2331,7 @@ int main(int argc, char **argv)
        cmdline(argc, argv);
 
        if (verbose)
-               fprintf(stderr, "turbostat v3.3 March 15, 2013"
+               fprintf(stderr, "turbostat v3.4 April 17, 2013"
                        " - Len Brown <lenb@kernel.org>\n");
 
        turbostat_init();
index 45f09362ee7be02df67171efa3508fa225129499..302681c4aa4465bb21b69524d7c0d3a5341f4a4e 100644 (file)
@@ -1978,7 +1978,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
        if (vcpu->kvm->mm != current->mm)
                return -EIO;
 
-#if defined(CONFIG_S390) || defined(CONFIG_PPC)
+#if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
        /*
         * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
         * so vcpu_load() would break it.
@@ -3105,13 +3105,21 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
        int r;
        int cpu;
 
-       r = kvm_irqfd_init();
-       if (r)
-               goto out_irqfd;
        r = kvm_arch_init(opaque);
        if (r)
                goto out_fail;
 
+       /*
+        * kvm_arch_init makes sure there's at most one caller
+        * for architectures that support multiple implementations,
+        * like intel and amd on x86.
+        * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
+        * conflicts in case kvm is already setup for another implementation.
+        */
+       r = kvm_irqfd_init();
+       if (r)
+               goto out_irqfd;
+
        if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
                r = -ENOMEM;
                goto out_free_0;
@@ -3186,10 +3194,10 @@ out_free_1:
 out_free_0a:
        free_cpumask_var(cpus_hardware_enabled);
 out_free_0:
-       kvm_arch_exit();
-out_fail:
        kvm_irqfd_exit();
 out_irqfd:
+       kvm_arch_exit();
+out_fail:
        return r;
 }
 EXPORT_SYMBOL_GPL(kvm_init);